diff --git a/blockdb/blockdb.go b/blockdb/blockdb.go
index 6ef50d23..f31fa227 100644
--- a/blockdb/blockdb.go
+++ b/blockdb/blockdb.go
@@ -3,18 +3,24 @@ package blockdb
import (
"context"
+ "github.com/sirupsen/logrus"
+
"github.com/ethpandaops/dora/blockdb/pebble"
"github.com/ethpandaops/dora/blockdb/s3"
+ "github.com/ethpandaops/dora/blockdb/tiered"
"github.com/ethpandaops/dora/blockdb/types"
dtypes "github.com/ethpandaops/dora/types"
)
+// BlockDb is the main wrapper for block database operations.
type BlockDb struct {
engine types.BlockDbEngine
}
+// GlobalBlockDb is the global block database instance.
var GlobalBlockDb *BlockDb
+// InitWithPebble initializes the block database with Pebble (local) storage.
func InitWithPebble(config dtypes.PebbleBlockDBConfig) error {
engine, err := pebble.NewPebbleEngine(config)
if err != nil {
@@ -28,6 +34,7 @@ func InitWithPebble(config dtypes.PebbleBlockDBConfig) error {
return nil
}
+// InitWithS3 initializes the block database with S3 (remote) storage.
func InitWithS3(config dtypes.S3BlockDBConfig) error {
engine, err := s3.NewS3Engine(config)
if err != nil {
@@ -41,25 +48,77 @@ func InitWithS3(config dtypes.S3BlockDBConfig) error {
return nil
}
+// InitWithTiered initializes the block database with tiered storage (Pebble cache + S3 backend).
+func InitWithTiered(config dtypes.TieredBlockDBConfig, logger logrus.FieldLogger) error {
+ engine, err := tiered.NewTieredEngine(config, logger)
+ if err != nil {
+ return err
+ }
+
+ GlobalBlockDb = &BlockDb{
+ engine: engine,
+ }
+
+ return nil
+}
+
+// Close closes the block database.
func (db *BlockDb) Close() error {
return db.engine.Close()
}
-func (db *BlockDb) GetBlock(ctx context.Context, slot uint64, root []byte, parseBlock func(uint64, []byte) (interface{}, error)) (*types.BlockData, error) {
- return db.engine.GetBlock(ctx, slot, root, parseBlock)
+// GetBlock retrieves block data with selective loading based on flags.
+func (db *BlockDb) GetBlock(
+ ctx context.Context,
+ slot uint64,
+ root []byte,
+ flags types.BlockDataFlags,
+ parseBlock func(uint64, []byte) (any, error),
+ parsePayload func(uint64, []byte) (any, error),
+) (*types.BlockData, error) {
+ return db.engine.GetBlock(ctx, slot, root, flags, parseBlock, parsePayload)
+}
+
+// GetStoredComponents returns which components exist for a block.
+func (db *BlockDb) GetStoredComponents(ctx context.Context, slot uint64, root []byte) (types.BlockDataFlags, error) {
+ return db.engine.GetStoredComponents(ctx, slot, root)
}
-func (db *BlockDb) AddBlock(ctx context.Context, slot uint64, root []byte, header_ver uint64, header_data []byte, body_ver uint64, body_data []byte) (bool, error) {
+// AddBlock stores block data. Returns (added, updated, error).
+func (db *BlockDb) AddBlock(
+ ctx context.Context,
+ slot uint64,
+ root []byte,
+ headerVer uint64,
+ headerData []byte,
+ bodyVer uint64,
+ bodyData []byte,
+ payloadVer uint64,
+ payloadData []byte,
+ balVer uint64,
+ balData []byte,
+) (bool, bool, error) {
return db.engine.AddBlock(ctx, slot, root, func() (*types.BlockData, error) {
return &types.BlockData{
- HeaderVersion: header_ver,
- HeaderData: header_data,
- BodyVersion: body_ver,
- BodyData: body_data,
+ HeaderVersion: headerVer,
+ HeaderData: headerData,
+ BodyVersion: bodyVer,
+ BodyData: bodyData,
+ PayloadVersion: payloadVer,
+ PayloadData: payloadData,
+ BalVersion: balVer,
+ BalData: balData,
}, nil
})
}
-func (db *BlockDb) AddBlockWithCallback(ctx context.Context, slot uint64, root []byte, dataCb func() (*types.BlockData, error)) (bool, error) {
+// AddBlockWithCallback stores block data using a callback for deferred data loading.
+// Returns (added, updated, error).
+func (db *BlockDb) AddBlockWithCallback(
+ ctx context.Context,
+ slot uint64,
+ root []byte,
+ dataCb func() (*types.BlockData, error),
+) (bool, bool, error) {
return db.engine.AddBlock(ctx, slot, root, dataCb)
}
diff --git a/blockdb/pebble/cleanup.go b/blockdb/pebble/cleanup.go
new file mode 100644
index 00000000..5a3cf787
--- /dev/null
+++ b/blockdb/pebble/cleanup.go
@@ -0,0 +1,439 @@
+package pebble
+
+import (
+ "context"
+ "encoding/binary"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/cockroachdb/pebble"
+ "github.com/sirupsen/logrus"
+
+ "github.com/ethpandaops/dora/blockdb/types"
+ dtypes "github.com/ethpandaops/dora/types"
+)
+
+const (
+ // KeyNamespaceLRU is the namespace for LRU tracking data.
+ KeyNamespaceLRU uint16 = 2
+
+ // LRU value format: [headerAccess (8B)] [bodyAccess (8B)] [payloadAccess (8B)] [balAccess (8B)]
+ // Each access time is a Unix nanosecond timestamp, 0 means never accessed.
+ lruValueSize = 32
+
+ // Maximum number of LRU updates to buffer before forcing a flush.
+ maxLRUBufferSize = 1000
+)
+
+// CacheCleanup manages background cleanup of cached data.
+type CacheCleanup struct {
+ engine *PebbleEngine
+ config dtypes.PebbleBlockDBConfig
+ logger logrus.FieldLogger
+
+ ctx context.Context
+ cancel context.CancelFunc
+ wg sync.WaitGroup
+
+ // LRU update buffer
+ lruMu sync.Mutex
+ lruBuffer map[string]*lruUpdate // root hex -> update
+}
+
+// lruUpdate holds pending LRU timestamp updates for a block.
+type lruUpdate struct {
+ root []byte
+ headerAccess int64 // Unix nano, 0 = no update
+ bodyAccess int64
+ payloadAccess int64
+ balAccess int64
+}
+
+// NewCacheCleanup creates a new cache cleanup manager.
+func NewCacheCleanup(engine *PebbleEngine, logger logrus.FieldLogger) *CacheCleanup {
+ ctx, cancel := context.WithCancel(context.Background())
+
+ return &CacheCleanup{
+ engine: engine,
+ config: engine.GetConfig(),
+ logger: logger.WithField("component", "pebble-cleanup"),
+ ctx: ctx,
+ cancel: cancel,
+ lruBuffer: make(map[string]*lruUpdate, 100),
+ }
+}
+
+// Start begins the background cleanup loop.
+func (c *CacheCleanup) Start() {
+ if c.config.CleanupInterval == 0 {
+ c.logger.Info("cleanup disabled (interval is 0)")
+ return
+ }
+
+ c.wg.Add(1)
+ go c.runCleanupLoop()
+}
+
+// Stop stops the background cleanup loop.
+func (c *CacheCleanup) Stop() {
+ c.cancel()
+ c.wg.Wait()
+
+ // Final flush of LRU buffer
+ c.FlushLRU()
+}
+
+// runCleanupLoop runs the periodic cleanup.
+func (c *CacheCleanup) runCleanupLoop() {
+ defer c.wg.Done()
+
+ ticker := time.NewTicker(c.config.CleanupInterval)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-c.ctx.Done():
+ return
+ case <-ticker.C:
+ c.FlushLRU()
+ c.runCleanup()
+ }
+ }
+}
+
+// RecordAccess records an access for LRU tracking. Buffered until flush.
+func (c *CacheCleanup) RecordAccess(root []byte, flags types.BlockDataFlags) {
+ c.lruMu.Lock()
+ defer c.lruMu.Unlock()
+
+ key := string(root)
+ now := time.Now().UnixNano()
+
+ update, exists := c.lruBuffer[key]
+ if !exists {
+ rootCopy := make([]byte, len(root))
+ copy(rootCopy, root)
+ update = &lruUpdate{root: rootCopy}
+ c.lruBuffer[key] = update
+ }
+
+ if flags.Has(types.BlockDataFlagHeader) {
+ update.headerAccess = now
+ }
+ if flags.Has(types.BlockDataFlagBody) {
+ update.bodyAccess = now
+ }
+ if flags.Has(types.BlockDataFlagPayload) {
+ update.payloadAccess = now
+ }
+ if flags.Has(types.BlockDataFlagBal) {
+ update.balAccess = now
+ }
+
+ // Force flush if buffer is too large
+ if len(c.lruBuffer) >= maxLRUBufferSize {
+ c.flushLRULocked()
+ }
+}
+
+// FlushLRU flushes buffered LRU updates to Pebble.
+func (c *CacheCleanup) FlushLRU() {
+ c.lruMu.Lock()
+ defer c.lruMu.Unlock()
+ c.flushLRULocked()
+}
+
+// flushLRULocked flushes LRU buffer (must hold lruMu).
+func (c *CacheCleanup) flushLRULocked() {
+ if len(c.lruBuffer) == 0 {
+ return
+ }
+
+ db := c.engine.GetDB()
+ batch := db.NewBatch()
+
+ for _, update := range c.lruBuffer {
+ key := makeLRUKey(update.root)
+
+ // Read existing LRU data
+ existing := make([]byte, lruValueSize)
+ if res, closer, err := db.Get(key); err == nil {
+ if len(res) >= lruValueSize {
+ copy(existing, res)
+ }
+ closer.Close()
+ }
+
+ // Merge updates (only update non-zero values)
+ value := make([]byte, lruValueSize)
+ copy(value, existing)
+
+ if update.headerAccess > 0 {
+ binary.BigEndian.PutUint64(value[0:8], uint64(update.headerAccess))
+ }
+ if update.bodyAccess > 0 {
+ binary.BigEndian.PutUint64(value[8:16], uint64(update.bodyAccess))
+ }
+ if update.payloadAccess > 0 {
+ binary.BigEndian.PutUint64(value[16:24], uint64(update.payloadAccess))
+ }
+ if update.balAccess > 0 {
+ binary.BigEndian.PutUint64(value[24:32], uint64(update.balAccess))
+ }
+
+ batch.Set(key, value, nil)
+ }
+
+ if err := batch.Commit(nil); err != nil {
+ c.logger.Errorf("failed to flush LRU updates: %v", err)
+ }
+ batch.Close()
+
+ // Clear buffer
+ c.lruBuffer = make(map[string]*lruUpdate, 100)
+}
+
+// makeLRUKey creates the key for LRU data.
+func makeLRUKey(root []byte) []byte {
+ key := make([]byte, 2+len(root))
+ binary.BigEndian.PutUint16(key[:2], KeyNamespaceLRU)
+ copy(key[2:], root)
+ return key
+}
+
+// runCleanup performs cleanup for all configured component types.
+func (c *CacheCleanup) runCleanup() {
+ c.logger.Debug("starting cache cleanup")
+
+ componentConfigs := map[uint16]*dtypes.BlockDbRetentionConfig{
+ BlockTypeHeader: &c.config.HeaderRetention,
+ BlockTypeBody: &c.config.BodyRetention,
+ BlockTypePayload: &c.config.PayloadRetention,
+ BlockTypeBal: &c.config.BalRetention,
+ }
+
+ for blockType, config := range componentConfigs {
+ if config == nil || !config.Enabled {
+ continue
+ }
+
+ switch config.CleanupMode {
+ case "age":
+ c.cleanupByAge(blockType, config.RetentionTime)
+ case "lru":
+ c.cleanupByLRU(blockType, config.MaxSize*1024*1024) // Convert MB to bytes
+ }
+ }
+}
+
+// cleanupByAge removes entries older than the retention time based on storage timestamp.
+func (c *CacheCleanup) cleanupByAge(blockType uint16, retention time.Duration) {
+ if retention == 0 {
+ return
+ }
+
+ cutoff := time.Now().Add(-retention)
+ deleted := 0
+
+ db := c.engine.GetDB()
+ iter, err := db.NewIter(&pebble.IterOptions{})
+ if err != nil {
+ c.logger.Errorf("failed to create iterator: %v", err)
+ return
+ }
+ defer iter.Close()
+
+ batch := db.NewBatch()
+ defer batch.Close()
+
+ for iter.First(); iter.Valid(); iter.Next() {
+ key := iter.Key()
+
+ // Check if this key is in the block namespace
+ if len(key) < 36 { // 2 (namespace) + 32 (root) + 2 (type)
+ continue
+ }
+
+ namespace := binary.BigEndian.Uint16(key[:2])
+ if namespace != KeyNamespaceBlock {
+ continue
+ }
+
+ keyType := binary.BigEndian.Uint16(key[len(key)-2:])
+ if keyType != blockType {
+ continue
+ }
+
+ // Check timestamp from value (stored at offset 8)
+ value := iter.Value()
+ if len(value) < valueHeaderSize {
+ continue
+ }
+
+ timestamp := time.Unix(0, int64(binary.BigEndian.Uint64(value[8:16])))
+ if timestamp.Before(cutoff) {
+ keyCopy := make([]byte, len(key))
+ copy(keyCopy, key)
+ batch.Delete(keyCopy, nil)
+ deleted++
+ }
+ }
+
+ if deleted > 0 {
+ if err := batch.Commit(nil); err != nil {
+ c.logger.Errorf("failed to commit age cleanup batch: %v", err)
+ } else {
+ c.logger.Infof("cleaned up %d entries for block type %d (age-based)", deleted, blockType)
+ }
+ }
+}
+
+// lruEntry represents an entry for LRU cleanup sorting.
+type lruEntry struct {
+ root []byte
+ key []byte
+ size int64
+ lastAccess int64
+}
+
+// cleanupByLRU removes least recently used entries when size exceeds limit.
+func (c *CacheCleanup) cleanupByLRU(blockType uint16, maxSize int64) {
+ if maxSize == 0 {
+ return
+ }
+
+ db := c.engine.GetDB()
+
+ // First pass: collect all entries with their sizes and LRU timestamps
+ entries := make([]*lruEntry, 0, 1000)
+ var totalSize int64
+
+ iter, err := db.NewIter(&pebble.IterOptions{})
+ if err != nil {
+ c.logger.Errorf("failed to create iterator: %v", err)
+ return
+ }
+
+ // Scan block entries
+ for iter.First(); iter.Valid(); iter.Next() {
+ key := iter.Key()
+
+ if len(key) < 36 {
+ continue
+ }
+
+ namespace := binary.BigEndian.Uint16(key[:2])
+ if namespace != KeyNamespaceBlock {
+ continue
+ }
+
+ keyType := binary.BigEndian.Uint16(key[len(key)-2:])
+ if keyType != blockType {
+ continue
+ }
+
+ // Extract root from key
+ root := key[2 : len(key)-2]
+ value := iter.Value()
+ size := int64(len(value))
+ totalSize += size
+
+ // Get LRU timestamp for this entry
+ lastAccess := c.getLRUTimestamp(db, root, blockType)
+
+ keyCopy := make([]byte, len(key))
+ copy(keyCopy, key)
+ rootCopy := make([]byte, len(root))
+ copy(rootCopy, root)
+
+ entries = append(entries, &lruEntry{
+ root: rootCopy,
+ key: keyCopy,
+ size: size,
+ lastAccess: lastAccess,
+ })
+ }
+ iter.Close()
+
+ // Check if we need to clean up
+ if totalSize <= maxSize {
+ return
+ }
+
+ // Sort by last access time (oldest first, 0 = never accessed = oldest)
+ sort.Slice(entries, func(i, j int) bool {
+ return entries[i].lastAccess < entries[j].lastAccess
+ })
+
+ // Delete oldest entries until we're under the limit
+ batch := db.NewBatch()
+ defer batch.Close()
+
+ deleted := 0
+ freedSize := int64(0)
+ targetFree := totalSize - maxSize
+
+ for _, entry := range entries {
+ if freedSize >= targetFree {
+ break
+ }
+
+ batch.Delete(entry.key, nil)
+ freedSize += entry.size
+ deleted++
+ }
+
+ if deleted > 0 {
+ if err := batch.Commit(nil); err != nil {
+ c.logger.Errorf("failed to commit LRU cleanup batch: %v", err)
+ } else {
+ c.logger.Infof("cleaned up %d entries for block type %d (LRU-based, freed %d bytes)",
+ deleted, blockType, freedSize)
+ }
+ }
+}
+
+// getLRUTimestamp retrieves the LRU timestamp for a specific component.
+func (c *CacheCleanup) getLRUTimestamp(db *pebble.DB, root []byte, blockType uint16) int64 {
+ key := makeLRUKey(root)
+
+ res, closer, err := db.Get(key)
+ if err != nil {
+ return 0 // Never accessed
+ }
+ defer closer.Close()
+
+ if len(res) < lruValueSize {
+ return 0
+ }
+
+ // Extract timestamp based on block type
+ var offset int
+ switch blockType {
+ case BlockTypeHeader:
+ offset = 0
+ case BlockTypeBody:
+ offset = 8
+ case BlockTypePayload:
+ offset = 16
+ case BlockTypeBal:
+ offset = 24
+ default:
+ return 0
+ }
+
+ return int64(binary.BigEndian.Uint64(res[offset : offset+8]))
+}
+
+// DeleteLRU removes LRU data for a block (call when deleting block data).
+func (c *CacheCleanup) DeleteLRU(root []byte) {
+ db := c.engine.GetDB()
+ key := makeLRUKey(root)
+ db.Delete(key, nil)
+
+ // Also remove from buffer
+ c.lruMu.Lock()
+ delete(c.lruBuffer, string(root))
+ c.lruMu.Unlock()
+}
diff --git a/blockdb/pebble/pebble.go b/blockdb/pebble/pebble.go
index d8619201..1da3d6b8 100644
--- a/blockdb/pebble/pebble.go
+++ b/blockdb/pebble/pebble.go
@@ -3,6 +3,8 @@ package pebble
import (
"context"
"encoding/binary"
+ "fmt"
+ "time"
"github.com/cockroachdb/pebble"
"github.com/ethpandaops/dora/blockdb/types"
@@ -14,12 +16,18 @@ const (
)
const (
- BlockTypeHeader uint16 = 1
- BlockTypeBody uint16 = 2
+ BlockTypeHeader uint16 = 1
+ BlockTypeBody uint16 = 2
+ BlockTypePayload uint16 = 3
+ BlockTypeBal uint16 = 4
)
+// Value format: [version (8 bytes)] [timestamp (8 bytes)] [data]
+const valueHeaderSize = 16
+
type PebbleEngine struct {
- db *pebble.DB
+ db *pebble.DB
+ config dtypes.PebbleBlockDBConfig
}
func NewPebbleEngine(config dtypes.PebbleBlockDBConfig) (types.BlockDbEngine, error) {
@@ -34,153 +42,259 @@ func NewPebbleEngine(config dtypes.PebbleBlockDBConfig) (types.BlockDbEngine, er
}
return &PebbleEngine{
- db: db,
+ db: db,
+ config: config,
}, nil
}
func (e *PebbleEngine) Close() error {
- err := e.db.Close()
- if err != nil {
- return err
- }
-
- return nil
+ return e.db.Close()
}
-func (e *PebbleEngine) getBlockHeader(root []byte) ([]byte, uint64, error) {
+// makeKey creates a key for the given root and block type.
+func makeKey(root []byte, blockType uint16) []byte {
key := make([]byte, 2+len(root)+2)
binary.BigEndian.PutUint16(key[:2], KeyNamespaceBlock)
copy(key[2:], root)
- binary.BigEndian.PutUint16(key[2+len(root):], BlockTypeHeader)
+ binary.BigEndian.PutUint16(key[2+len(root):], blockType)
+ return key
+}
+
+// getComponent retrieves a single component from the database.
+// Returns (data, version, timestamp, error). Returns nil data if not found.
+func (e *PebbleEngine) getComponent(root []byte, blockType uint16) ([]byte, uint64, time.Time, error) {
+ key := makeKey(root, blockType)
res, closer, err := e.db.Get(key)
- if err != nil && err != pebble.ErrNotFound {
- return nil, 0, err
+ if err == pebble.ErrNotFound {
+ return nil, 0, time.Time{}, nil
+ }
+ if err != nil {
+ return nil, 0, time.Time{}, err
}
defer closer.Close()
- if err == pebble.ErrNotFound || len(res) == 0 {
- return nil, 0, nil
+ if len(res) < valueHeaderSize {
+ return nil, 0, time.Time{}, nil
}
version := binary.BigEndian.Uint64(res[:8])
- header := make([]byte, len(res)-8)
- copy(header, res[8:])
+ timestamp := time.Unix(0, int64(binary.BigEndian.Uint64(res[8:16])))
- return header, version, nil
+ data := make([]byte, len(res)-valueHeaderSize)
+ copy(data, res[valueHeaderSize:])
+
+ return data, version, timestamp, nil
}
-func (e *PebbleEngine) getBlockBody(root []byte, parser func(uint64, []byte) (interface{}, error)) (interface{}, uint64, error) {
- key := make([]byte, 2+len(root)+2)
- binary.BigEndian.PutUint16(key[:2], KeyNamespaceBlock)
- copy(key[2:], root)
- binary.BigEndian.PutUint16(key[2+len(root):], BlockTypeBody)
+// setComponent stores a single component in the database.
+func (e *PebbleEngine) setComponent(root []byte, blockType uint16, version uint64, data []byte) error {
+ key := makeKey(root, blockType)
- res, closer, err := e.db.Get(key)
- if err != nil && err != pebble.ErrNotFound {
- return nil, 0, err
- }
- defer closer.Close()
+ value := make([]byte, valueHeaderSize+len(data))
+ binary.BigEndian.PutUint64(value[:8], version)
+ binary.BigEndian.PutUint64(value[8:16], uint64(time.Now().UnixNano()))
+ copy(value[valueHeaderSize:], data)
- if err == pebble.ErrNotFound || len(res) == 0 {
- return nil, 0, nil
+ return e.db.Set(key, value, nil)
+}
+
+// componentExists checks if a component exists in the database.
+func (e *PebbleEngine) componentExists(root []byte, blockType uint16) bool {
+ key := makeKey(root, blockType)
+
+ res, closer, err := e.db.Get(key)
+ if err == nil && len(res) >= valueHeaderSize {
+ closer.Close()
+ return true
}
+ return false
+}
- version := binary.BigEndian.Uint64(res[:8])
- block := res[8:]
+// GetStoredComponents returns which components exist for a block.
+func (e *PebbleEngine) GetStoredComponents(_ context.Context, _ uint64, root []byte) (types.BlockDataFlags, error) {
+ var flags types.BlockDataFlags
- body, err := parser(version, block)
- if err != nil {
- return nil, 0, err
+ if e.componentExists(root, BlockTypeHeader) {
+ flags |= types.BlockDataFlagHeader
+ }
+ if e.componentExists(root, BlockTypeBody) {
+ flags |= types.BlockDataFlagBody
+ }
+ if e.componentExists(root, BlockTypePayload) {
+ flags |= types.BlockDataFlagPayload
+ }
+ if e.componentExists(root, BlockTypeBal) {
+ flags |= types.BlockDataFlagBal
}
- return body, version, nil
+ return flags, nil
}
-func (e *PebbleEngine) GetBlock(_ context.Context, _ uint64, root []byte, parseBlock func(uint64, []byte) (interface{}, error)) (*types.BlockData, error) {
- header, header_ver, err := e.getBlockHeader(root)
- if err != nil {
- return nil, err
+// GetBlock retrieves block data with selective loading based on flags.
+// Note: LRU access tracking should be done by the caller via CacheCleanup.RecordAccess()
+// to avoid expensive read-modify-write operations on every access.
+func (e *PebbleEngine) GetBlock(
+ _ context.Context,
+ _ uint64,
+ root []byte,
+ flags types.BlockDataFlags,
+ parseBlock func(uint64, []byte) (any, error),
+ parsePayload func(uint64, []byte) (any, error),
+) (*types.BlockData, error) {
+ blockData := &types.BlockData{}
+
+ // Load header if requested
+ if flags.Has(types.BlockDataFlagHeader) {
+ data, version, _, err := e.getComponent(root, BlockTypeHeader)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get header: %w", err)
+ }
+ if data != nil {
+ blockData.HeaderVersion = version
+ blockData.HeaderData = data
+ }
}
- blockData := &types.BlockData{
- HeaderVersion: header_ver,
- HeaderData: header,
- }
+ // Load body if requested
+ if flags.Has(types.BlockDataFlagBody) {
+ data, version, _, err := e.getComponent(root, BlockTypeBody)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get body: %w", err)
+ }
- if parseBlock == nil {
- parseBlock = func(version uint64, block []byte) (interface{}, error) {
- blockData.BodyData = make([]byte, len(block))
- copy(blockData.BodyData, block)
- return nil, nil
+ if data != nil {
+ blockData.BodyVersion = version
+ if parseBlock != nil {
+ body, err := parseBlock(version, data)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse body: %w", err)
+ }
+ blockData.Body = body
+ } else {
+ blockData.BodyData = data
+ }
}
}
- body, body_ver, err := e.getBlockBody(root, parseBlock)
- if err != nil {
- return nil, err
- }
+ // Load payload if requested
+ if flags.Has(types.BlockDataFlagPayload) {
+ data, version, _, err := e.getComponent(root, BlockTypePayload)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get payload: %w", err)
+ }
- blockData.Body = body
- blockData.BodyVersion = body_ver
+ if data != nil {
+ blockData.PayloadVersion = version
+ if parsePayload != nil {
+ payload, err := parsePayload(version, data)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse payload: %w", err)
+ }
+ blockData.Payload = payload
+ } else {
+ blockData.PayloadData = data
+ }
+ }
+ }
- return blockData, nil
-}
+ // Load BAL if requested
+ if flags.Has(types.BlockDataFlagBal) {
+ data, version, _, err := e.getComponent(root, BlockTypeBal)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get BAL: %w", err)
+ }
-func (e *PebbleEngine) checkBlock(key []byte) bool {
- res, closer, err := e.db.Get(key)
- if err == nil && len(res) > 0 {
- closer.Close()
- return true
+ if data != nil {
+ blockData.BalVersion = version
+ blockData.BalData = data
+ }
}
- return false
+ return blockData, nil
}
-func (e *PebbleEngine) addBlockHeader(key []byte, version uint64, header []byte) error {
- data := make([]byte, 8+len(header))
- binary.BigEndian.PutUint64(data[:8], version)
+// AddBlock stores block data. Returns (added, updated, error).
+// - added: true if a new block was created
+// - updated: true if an existing block was updated with new components
+func (e *PebbleEngine) AddBlock(
+ _ context.Context,
+ _ uint64,
+ root []byte,
+ dataCb func() (*types.BlockData, error),
+) (bool, bool, error) {
+ // Check what components already exist
+ existingFlags, err := e.GetStoredComponents(context.Background(), 0, root)
+ if err != nil {
+ return false, false, fmt.Errorf("failed to check existing components: %w", err)
+ }
- return e.db.Set(key, data, nil)
-}
+ // Get the new data
+ blockData, err := dataCb()
+ if err != nil {
+ return false, false, err
+ }
-func (e *PebbleEngine) addBlockBody(root []byte, version uint64, block []byte) error {
- key := make([]byte, 2+len(root)+2)
- binary.BigEndian.PutUint16(key[:2], KeyNamespaceBlock)
- copy(key[2:], root)
- binary.BigEndian.PutUint16(key[2+len(root):], BlockTypeBody)
+ // Determine what new components we have
+ var newFlags types.BlockDataFlags
+ if len(blockData.HeaderData) > 0 {
+ newFlags |= types.BlockDataFlagHeader
+ }
+ if len(blockData.BodyData) > 0 {
+ newFlags |= types.BlockDataFlagBody
+ }
+ if blockData.PayloadVersion != 0 && len(blockData.PayloadData) > 0 {
+ newFlags |= types.BlockDataFlagPayload
+ }
+ if blockData.BalVersion != 0 && len(blockData.BalData) > 0 {
+ newFlags |= types.BlockDataFlagBal
+ }
- data := make([]byte, 8+len(block))
- binary.BigEndian.PutUint64(data[:8], version)
- copy(data[8:], block)
+ // Calculate components to add (new components not in existing)
+ toAdd := newFlags &^ existingFlags
- return e.db.Set(key, data, nil)
-}
+ if toAdd == 0 {
+ // Nothing new to add
+ return false, false, nil
+ }
-func (e *PebbleEngine) AddBlock(_ context.Context, _ uint64, root []byte, dataCb func() (*types.BlockData, error)) (bool, error) {
- key := make([]byte, 2+len(root)+2)
- binary.BigEndian.PutUint16(key[:2], KeyNamespaceBlock)
- copy(key[2:], root)
- binary.BigEndian.PutUint16(key[2+len(root):], BlockTypeHeader)
+ isNew := existingFlags == 0
+ isUpdated := !isNew
- if e.checkBlock(key) {
- return false, nil
+ // Store new components
+ if toAdd.Has(types.BlockDataFlagHeader) {
+ if err := e.setComponent(root, BlockTypeHeader, blockData.HeaderVersion, blockData.HeaderData); err != nil {
+ return false, false, fmt.Errorf("failed to store header: %w", err)
+ }
}
- blockData, err := dataCb()
- if err != nil {
- return false, err
+ if toAdd.Has(types.BlockDataFlagBody) {
+ if err := e.setComponent(root, BlockTypeBody, blockData.BodyVersion, blockData.BodyData); err != nil {
+ return false, false, fmt.Errorf("failed to store body: %w", err)
+ }
}
- err = e.addBlockHeader(key, blockData.HeaderVersion, blockData.HeaderData)
- if err != nil {
- return false, err
+ if toAdd.Has(types.BlockDataFlagPayload) {
+ if err := e.setComponent(root, BlockTypePayload, blockData.PayloadVersion, blockData.PayloadData); err != nil {
+ return false, false, fmt.Errorf("failed to store payload: %w", err)
+ }
}
- err = e.addBlockBody(root, blockData.BodyVersion, blockData.BodyData)
- if err != nil {
- return false, err
+ if toAdd.Has(types.BlockDataFlagBal) {
+ if err := e.setComponent(root, BlockTypeBal, blockData.BalVersion, blockData.BalData); err != nil {
+ return false, false, fmt.Errorf("failed to store BAL: %w", err)
+ }
}
- return true, nil
+ return isNew, isUpdated, nil
+}
+
+// GetDB returns the underlying Pebble database for cleanup operations.
+func (e *PebbleEngine) GetDB() *pebble.DB {
+ return e.db
+}
+
+// GetConfig returns the engine configuration.
+func (e *PebbleEngine) GetConfig() dtypes.PebbleBlockDBConfig {
+ return e.config
}
diff --git a/blockdb/s3/format.go b/blockdb/s3/format.go
new file mode 100644
index 00000000..79fa2c04
--- /dev/null
+++ b/blockdb/s3/format.go
@@ -0,0 +1,202 @@
+package s3
+
+import (
+ "encoding/binary"
+ "fmt"
+
+ "github.com/attestantio/go-eth2-client/spec"
+
+ "github.com/ethpandaops/dora/blockdb/types"
+)
+
+// Object format versions:
+// v1: header + body (pre-gloas blocks)
+// v2: header + body + payload + bal (gloas+ blocks, payload/BAL introduced in same fork)
+//
+// Note: Both payload and BAL may be empty (length 0), but body is always required.
+
+// Metadata sizes by version
+const (
+ metadataSizeV1 = 16 // 4 (version) + 4 (headerLen) + 4 (bodyVer) + 4 (bodyLen)
+ metadataSizeV2 = 32 // v1 + 4 (payloadVer) + 4 (payloadLen) + 4 (balVer) + 4 (balLen)
+
+ // Maximum metadata size for initial read
+ maxMetadataSize = 64
+)
+
+// objectMetadata represents the metadata for all format versions.
+type objectMetadata struct {
+ ObjVersion uint32
+
+ // Header (always present)
+ HeaderLength uint32
+
+ // Body (always required)
+ BodyVersion uint32
+ BodyLength uint32
+
+ // Payload (v2+, may be empty)
+ PayloadVersion uint32
+ PayloadLength uint32
+
+ // BAL (v2+, may be empty)
+ BalVersion uint32
+ BalLength uint32
+}
+
+// metadataSize returns the metadata size for this object.
+func (m *objectMetadata) metadataSize() int {
+ switch m.ObjVersion {
+ case 1:
+ return metadataSizeV1
+ case 2:
+ return metadataSizeV2
+ default:
+ return metadataSizeV2
+ }
+}
+
+// headerOffset returns the byte offset of the header data.
+func (m *objectMetadata) headerOffset() int {
+ return m.metadataSize()
+}
+
+// bodyOffset returns the byte offset of the body data.
+func (m *objectMetadata) bodyOffset() int {
+ return m.metadataSize() + int(m.HeaderLength)
+}
+
+// payloadOffset returns the byte offset of the payload data.
+func (m *objectMetadata) payloadOffset() int {
+ return m.metadataSize() + int(m.HeaderLength) + int(m.BodyLength)
+}
+
+// balOffset returns the byte offset of the BAL data.
+func (m *objectMetadata) balOffset() int {
+ return m.metadataSize() + int(m.HeaderLength) + int(m.BodyLength) + int(m.PayloadLength)
+}
+
+// storedFlags returns which components are stored in this object.
+func (m *objectMetadata) storedFlags() types.BlockDataFlags {
+ var flags types.BlockDataFlags
+
+ if m.HeaderLength > 0 {
+ flags |= types.BlockDataFlagHeader
+ }
+ if m.BodyLength > 0 {
+ flags |= types.BlockDataFlagBody
+ }
+ if m.PayloadLength > 0 && m.ObjVersion >= 2 {
+ flags |= types.BlockDataFlagPayload
+ }
+ if m.BalLength > 0 && m.ObjVersion >= 2 {
+ flags |= types.BlockDataFlagBal
+ }
+
+ return flags
+}
+
+// readObjectMetadata reads metadata from any format version.
+func readObjectMetadata(data []byte) (*objectMetadata, error) {
+ if len(data) < 4 {
+ return nil, fmt.Errorf("data too short for metadata version")
+ }
+
+ version := binary.BigEndian.Uint32(data[:4])
+ meta := &objectMetadata{ObjVersion: version}
+
+ switch version {
+ case 1:
+ if len(data) < metadataSizeV1 {
+ return nil, fmt.Errorf("data too short for v1 metadata: need %d, got %d", metadataSizeV1, len(data))
+ }
+ meta.HeaderLength = binary.BigEndian.Uint32(data[4:8])
+ meta.BodyVersion = binary.BigEndian.Uint32(data[8:12])
+ meta.BodyLength = binary.BigEndian.Uint32(data[12:16])
+
+ case 2:
+ if len(data) < metadataSizeV2 {
+ return nil, fmt.Errorf("data too short for v2 metadata: need %d, got %d", metadataSizeV2, len(data))
+ }
+ meta.HeaderLength = binary.BigEndian.Uint32(data[4:8])
+ meta.BodyVersion = binary.BigEndian.Uint32(data[8:12])
+ meta.BodyLength = binary.BigEndian.Uint32(data[12:16])
+ meta.PayloadVersion = binary.BigEndian.Uint32(data[16:20])
+ meta.PayloadLength = binary.BigEndian.Uint32(data[20:24])
+ meta.BalVersion = binary.BigEndian.Uint32(data[24:28])
+ meta.BalLength = binary.BigEndian.Uint32(data[28:32])
+
+ default:
+ return nil, fmt.Errorf("unsupported object version: %d", version)
+ }
+
+ return meta, nil
+}
+
+// writeObjectMetadata creates metadata bytes for the given BlockData.
+// Uses v1 format for pre-gloas blocks, v2 for gloas+ blocks.
+func writeObjectMetadata(data *types.BlockData) []byte {
+ // Use v2 format only for gloas+ blocks (which can have payload/BAL)
+ if data.BodyVersion >= uint64(spec.DataVersionGloas) {
+ meta := make([]byte, metadataSizeV2)
+ binary.BigEndian.PutUint32(meta[0:4], 2)
+ binary.BigEndian.PutUint32(meta[4:8], uint32(len(data.HeaderData)))
+ binary.BigEndian.PutUint32(meta[8:12], uint32(data.BodyVersion))
+ binary.BigEndian.PutUint32(meta[12:16], uint32(len(data.BodyData)))
+ binary.BigEndian.PutUint32(meta[16:20], uint32(data.PayloadVersion))
+ binary.BigEndian.PutUint32(meta[20:24], uint32(len(data.PayloadData)))
+ binary.BigEndian.PutUint32(meta[24:28], uint32(data.BalVersion))
+ binary.BigEndian.PutUint32(meta[28:32], uint32(len(data.BalData)))
+ return meta
+ }
+
+ // Use v1 format for pre-gloas blocks
+ meta := make([]byte, metadataSizeV1)
+ binary.BigEndian.PutUint32(meta[0:4], 1)
+ binary.BigEndian.PutUint32(meta[4:8], uint32(len(data.HeaderData)))
+ binary.BigEndian.PutUint32(meta[8:12], uint32(data.BodyVersion))
+ binary.BigEndian.PutUint32(meta[12:16], uint32(len(data.BodyData)))
+ return meta
+}
+
+// getDataRange calculates the single byte range spanning all requested components.
+// Returns (start, end) where end is inclusive. Returns (-1, -1) if no data to fetch.
+func (m *objectMetadata) getDataRange(flags types.BlockDataFlags) (int64, int64) {
+ var start int64 = -1
+ var end int64 = -1
+
+ // Check each component in order (they're stored sequentially)
+ if flags.Has(types.BlockDataFlagHeader) && m.HeaderLength > 0 {
+ start = int64(m.headerOffset())
+ end = start + int64(m.HeaderLength) - 1
+ }
+
+ if flags.Has(types.BlockDataFlagBody) && m.BodyLength > 0 {
+ bodyStart := int64(m.bodyOffset())
+ bodyEnd := bodyStart + int64(m.BodyLength) - 1
+ if start < 0 {
+ start = bodyStart
+ }
+ end = bodyEnd
+ }
+
+ if flags.Has(types.BlockDataFlagPayload) && m.PayloadLength > 0 && m.ObjVersion >= 2 {
+ payloadStart := int64(m.payloadOffset())
+ payloadEnd := payloadStart + int64(m.PayloadLength) - 1
+ if start < 0 {
+ start = payloadStart
+ }
+ end = payloadEnd
+ }
+
+ if flags.Has(types.BlockDataFlagBal) && m.BalLength > 0 && m.ObjVersion >= 2 {
+ balStart := int64(m.balOffset())
+ balEnd := balStart + int64(m.BalLength) - 1
+ if start < 0 {
+ start = balStart
+ }
+ end = balEnd
+ }
+
+ return start, end
+}
diff --git a/blockdb/s3/s3store.go b/blockdb/s3/s3store.go
index 1110b4a6..6e4a32a2 100644
--- a/blockdb/s3/s3store.go
+++ b/blockdb/s3/s3store.go
@@ -3,13 +3,13 @@ package s3
import (
"bytes"
"context"
- "encoding/binary"
"encoding/hex"
"fmt"
"io"
"path"
"strings"
+ "github.com/attestantio/go-eth2-client/spec"
"github.com/ethpandaops/dora/blockdb/types"
dtypes "github.com/ethpandaops/dora/types"
"github.com/minio/minio-go/v7"
@@ -20,6 +20,10 @@ type S3Engine struct {
client *minio.Client
bucket string
pathPrefix string
+ config dtypes.S3BlockDBConfig
+
+ // Range request support (configured via EnableRangeRequests)
+ rangeRequestsEnabled bool
}
func NewS3Engine(config dtypes.S3BlockDBConfig) (types.BlockDbEngine, error) {
@@ -42,9 +46,11 @@ func NewS3Engine(config dtypes.S3BlockDBConfig) (types.BlockDbEngine, error) {
}
engine := &S3Engine{
- client: client,
- bucket: config.Bucket,
- pathPrefix: strings.TrimPrefix(config.Path, "/"),
+ client: client,
+ bucket: config.Bucket,
+ pathPrefix: strings.TrimPrefix(config.Path, "/"),
+ config: config,
+ rangeRequestsEnabled: config.EnableRangeRequests,
}
return engine, nil
@@ -59,158 +65,457 @@ func (e *S3Engine) getObjectKey(root []byte, slot uint64) string {
return path.Join(e.pathPrefix, fmt.Sprintf("%06d", slot/10000), fmt.Sprintf("%010d_%s", slot, rootHex))
}
-type objectMetadata struct {
- objVersion uint32
- headerLength uint32
- bodyVersion uint32
- bodyLength uint32
+// GetStoredComponents returns which components exist for a block by reading metadata.
+func (e *S3Engine) GetStoredComponents(ctx context.Context, slot uint64, root []byte) (types.BlockDataFlags, error) {
+ key := e.getObjectKey(root, slot)
+
+ // Read just the metadata
+ meta, err := e.readMetadata(ctx, key)
+ if err != nil {
+ return 0, err
+ }
+ if meta == nil {
+ return 0, nil
+ }
+
+ return meta.storedFlags(), nil
}
-func (e *S3Engine) readObjectMetadata(data []byte) (*objectMetadata, int, error) {
- metadataLength := 4
- metadata := &objectMetadata{
- objVersion: binary.BigEndian.Uint32(data[:4]),
+// readMetadata reads object metadata using range request if enabled, otherwise full read.
+func (e *S3Engine) readMetadata(ctx context.Context, key string) (*objectMetadata, error) {
+ if e.config.EnableRangeRequests {
+ meta, err := e.readMetadataWithRange(ctx, key)
+ if err == nil {
+ return meta, nil
+ }
+ // Fall through to full read on error
+ }
+
+ // Full read fallback
+ obj, err := e.client.GetObject(ctx, e.bucket, key, minio.GetObjectOptions{})
+ if err != nil {
+ errResp := minio.ToErrorResponse(err)
+ if errResp.Code == "NoSuchKey" {
+ return nil, nil
+ }
+ return nil, fmt.Errorf("failed to get object: %w", err)
}
+ defer obj.Close()
- switch metadata.objVersion {
- case 1:
- metadata.headerLength = binary.BigEndian.Uint32(data[4:8])
- metadata.bodyVersion = binary.BigEndian.Uint32(data[8:12])
- metadata.bodyLength = binary.BigEndian.Uint32(data[12:16])
- metadataLength += 12
+ buf := make([]byte, maxMetadataSize)
+ n, err := obj.Read(buf)
+ if (err != nil && err != io.EOF) || n == 0 {
+ return nil, fmt.Errorf("failed to read metadata: %w", err)
}
- return metadata, metadataLength, nil
+ return readObjectMetadata(buf[:n])
}
-func (e *S3Engine) writeObjectMetadata(metadata *objectMetadata) []byte {
- data := make([]byte, 4, 16)
- binary.BigEndian.PutUint32(data, metadata.objVersion)
+// readMetadataWithRange reads metadata using HTTP Range request.
+func (e *S3Engine) readMetadataWithRange(ctx context.Context, key string) (*objectMetadata, error) {
+ opts := minio.GetObjectOptions{}
+ if err := opts.SetRange(0, int64(maxMetadataSize-1)); err != nil {
+ return nil, err
+ }
- switch metadata.objVersion {
- case 1:
- data = binary.BigEndian.AppendUint32(data, metadata.headerLength)
- data = binary.BigEndian.AppendUint32(data, metadata.bodyVersion)
- data = binary.BigEndian.AppendUint32(data, metadata.bodyLength)
+ obj, err := e.client.GetObject(ctx, e.bucket, key, opts)
+ if err != nil {
+ errResp := minio.ToErrorResponse(err)
+ if errResp.Code == "NoSuchKey" {
+ return nil, nil
+ }
+ return nil, fmt.Errorf("failed to get object with range: %w", err)
+ }
+ defer obj.Close()
+
+ buf := make([]byte, maxMetadataSize)
+ n, err := obj.Read(buf)
+ if err != nil && err != io.EOF {
+ return nil, fmt.Errorf("failed to read range: %w", err)
}
- return data
+ return readObjectMetadata(buf[:n])
}
-func (e *S3Engine) GetBlock(ctx context.Context, slot uint64, root []byte, parseBlock func(uint64, []byte) (interface{}, error)) (*types.BlockData, error) {
+// GetBlock retrieves block data with selective loading based on flags.
+func (e *S3Engine) GetBlock(
+ ctx context.Context,
+ slot uint64,
+ root []byte,
+ flags types.BlockDataFlags,
+ parseBlock func(uint64, []byte) (any, error),
+ parsePayload func(uint64, []byte) (any, error),
+) (*types.BlockData, error) {
key := e.getObjectKey(root, slot)
+ // Try range-based loading if enabled
+ if e.config.EnableRangeRequests && e.rangeRequestsEnabled {
+ data, err := e.getBlockWithRanges(ctx, key, flags, parseBlock, parsePayload)
+ if err == nil {
+ return data, nil
+ }
+ // Fall through to full read on error
+ }
+
+ // Full read fallback
+ return e.getBlockFull(ctx, key, flags, parseBlock, parsePayload)
+}
+
+// getBlockWithRanges uses a single range request for selective loading.
+// Makes exactly 2 GET requests: one for metadata, one for all requested data.
+func (e *S3Engine) getBlockWithRanges(
+ ctx context.Context,
+ key string,
+ flags types.BlockDataFlags,
+ parseBlock func(uint64, []byte) (any, error),
+ parsePayload func(uint64, []byte) (any, error),
+) (*types.BlockData, error) {
+ // First, get metadata (1 GET request)
+ meta, err := e.readMetadataWithRange(ctx, key)
+ if err != nil {
+ return nil, err
+ }
+ if meta == nil {
+ return nil, nil
+ }
+
+ // Calculate the single byte range spanning all requested components
+ rangeStart, rangeEnd := meta.getDataRange(flags)
+ if rangeStart < 0 {
+ // No data to fetch
+ return &types.BlockData{
+ HeaderVersion: uint64(meta.ObjVersion),
+ BodyVersion: uint64(meta.BodyVersion),
+ PayloadVersion: uint64(meta.PayloadVersion),
+ BalVersion: uint64(meta.BalVersion),
+ }, nil
+ }
+
+ // Fetch all requested data in a single GET request
+ opts := minio.GetObjectOptions{}
+ if err := opts.SetRange(rangeStart, rangeEnd); err != nil {
+ return nil, err
+ }
+
+ obj, err := e.client.GetObject(ctx, e.bucket, key, opts)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get object range: %w", err)
+ }
+ defer obj.Close()
+
+ data, err := io.ReadAll(obj)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read object range: %w", err)
+ }
+
+ // Extract requested components from the fetched data
+ return e.extractComponents(meta, flags, data, rangeStart, parseBlock, parsePayload)
+}
+
+// extractComponents extracts requested components from fetched data.
+func (e *S3Engine) extractComponents(
+ meta *objectMetadata,
+ flags types.BlockDataFlags,
+ data []byte,
+ dataStartOffset int64,
+ parseBlock func(uint64, []byte) (any, error),
+ parsePayload func(uint64, []byte) (any, error),
+) (*types.BlockData, error) {
+ blockData := &types.BlockData{
+ HeaderVersion: uint64(meta.ObjVersion),
+ BodyVersion: uint64(meta.BodyVersion),
+ PayloadVersion: uint64(meta.PayloadVersion),
+ BalVersion: uint64(meta.BalVersion),
+ }
+
+ // Extract header if requested
+ if flags.Has(types.BlockDataFlagHeader) && meta.HeaderLength > 0 {
+ start := int64(meta.headerOffset()) - dataStartOffset
+ end := start + int64(meta.HeaderLength)
+ if start >= 0 && end <= int64(len(data)) {
+ blockData.HeaderData = data[start:end]
+ }
+ }
+
+ // Extract body if requested
+ if flags.Has(types.BlockDataFlagBody) && meta.BodyLength > 0 {
+ start := int64(meta.bodyOffset()) - dataStartOffset
+ end := start + int64(meta.BodyLength)
+ if start >= 0 && end <= int64(len(data)) {
+ bodyData := data[start:end]
+ if parseBlock != nil {
+ body, err := parseBlock(uint64(meta.BodyVersion), bodyData)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse body: %w", err)
+ }
+ blockData.Body = body
+ } else {
+ blockData.BodyData = bodyData
+ }
+ }
+ }
+
+ // Extract payload if requested (v2+)
+ if flags.Has(types.BlockDataFlagPayload) && meta.PayloadLength > 0 && meta.ObjVersion >= 2 {
+ start := int64(meta.payloadOffset()) - dataStartOffset
+ end := start + int64(meta.PayloadLength)
+ if start >= 0 && end <= int64(len(data)) {
+ payloadData := data[start:end]
+ if parsePayload != nil {
+ payload, err := parsePayload(uint64(meta.PayloadVersion), payloadData)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse payload: %w", err)
+ }
+ blockData.Payload = payload
+ } else {
+ blockData.PayloadData = payloadData
+ }
+ }
+ }
+
+ // Extract BAL if requested (v2+)
+ if flags.Has(types.BlockDataFlagBal) && meta.BalLength > 0 && meta.ObjVersion >= 2 {
+ start := int64(meta.balOffset()) - dataStartOffset
+ end := start + int64(meta.BalLength)
+ if start >= 0 && end <= int64(len(data)) {
+ blockData.BalData = data[start:end]
+ }
+ }
+
+ return blockData, nil
+}
+
+// getBlockFull performs a full object read (fallback when range requests fail).
+func (e *S3Engine) getBlockFull(
+ ctx context.Context,
+ key string,
+ flags types.BlockDataFlags,
+ parseBlock func(uint64, []byte) (any, error),
+ parsePayload func(uint64, []byte) (any, error),
+) (*types.BlockData, error) {
obj, err := e.client.GetObject(ctx, e.bucket, key, minio.GetObjectOptions{})
if err != nil {
- if minio.ToErrorResponse(err).Code == "NoSuchKey" {
+ errResp := minio.ToErrorResponse(err)
+ if errResp.Code == "NoSuchKey" {
return nil, nil
}
return nil, fmt.Errorf("failed to get object: %w", err)
}
defer obj.Close()
- // read metadata
- buf := make([]byte, 1024)
- buflen, err := obj.Read(buf)
- if (err != nil && err != io.EOF) || buflen == 0 {
- return nil, fmt.Errorf("failed to read metadata: %w", err)
+ // Read entire object
+ data, err := io.ReadAll(obj)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read object: %w", err)
}
- metadata, metadataLength, err := e.readObjectMetadata(buf)
+ // Parse metadata
+ meta, err := readObjectMetadata(data)
if err != nil {
return nil, fmt.Errorf("failed to read metadata: %w", err)
}
- headerData := make([]byte, metadata.headerLength)
- headerOffset := 0
- if buflen > metadataLength {
- copy(headerData, buf[metadataLength:buflen])
- headerOffset = buflen - metadataLength
+ blockData := &types.BlockData{
+ HeaderVersion: uint64(meta.ObjVersion),
+ BodyVersion: uint64(meta.BodyVersion),
+ PayloadVersion: uint64(meta.PayloadVersion),
+ BalVersion: uint64(meta.BalVersion),
}
- if buflen < int(metadataLength)+int(metadata.headerLength) {
- _, err = obj.Read(headerData[headerOffset:])
- if err != nil {
- return nil, fmt.Errorf("failed to read header data: %w", err)
- }
- }
+ metaSize := meta.metadataSize()
- bodyData := make([]byte, metadata.bodyLength)
- bodyOffset := 0
- if buflen > int(metadataLength)+int(metadata.headerLength) {
- copy(bodyData, buf[int(metadataLength)+int(metadata.headerLength):buflen])
- bodyOffset = buflen - int(metadataLength) - int(metadata.headerLength)
+ // Extract header if requested
+ if flags.Has(types.BlockDataFlagHeader) && meta.HeaderLength > 0 {
+ headerEnd := metaSize + int(meta.HeaderLength)
+ if headerEnd <= len(data) {
+ blockData.HeaderData = data[metaSize:headerEnd]
+ }
}
- if buflen < int(metadataLength)+int(metadata.headerLength)+int(metadata.bodyLength) {
- _, err = obj.Read(bodyData[bodyOffset:])
- if err != nil {
- return nil, fmt.Errorf("failed to read body data: %w", err)
+ // Extract body if requested
+ if flags.Has(types.BlockDataFlagBody) && meta.BodyLength > 0 {
+ bodyStart := metaSize + int(meta.HeaderLength)
+ bodyEnd := bodyStart + int(meta.BodyLength)
+ if bodyEnd <= len(data) {
+ bodyData := data[bodyStart:bodyEnd]
+ if parseBlock != nil {
+ body, err := parseBlock(uint64(meta.BodyVersion), bodyData)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse body: %w", err)
+ }
+ blockData.Body = body
+ } else {
+ blockData.BodyData = bodyData
+ }
}
}
- blockData := &types.BlockData{
- HeaderVersion: uint64(metadata.objVersion),
- HeaderData: headerData,
- BodyVersion: uint64(metadata.bodyVersion),
+ // Extract payload if requested (v2+)
+ if flags.Has(types.BlockDataFlagPayload) && meta.PayloadLength > 0 && meta.ObjVersion >= 2 {
+ payloadStart := metaSize + int(meta.HeaderLength) + int(meta.BodyLength)
+ payloadEnd := payloadStart + int(meta.PayloadLength)
+ if payloadEnd <= len(data) {
+ payloadData := data[payloadStart:payloadEnd]
+ if parsePayload != nil {
+ payload, err := parsePayload(uint64(meta.PayloadVersion), payloadData)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse payload: %w", err)
+ }
+ blockData.Payload = payload
+ } else {
+ blockData.PayloadData = payloadData
+ }
+ }
}
- if parseBlock != nil {
- body, err := parseBlock(uint64(metadata.bodyVersion), bodyData)
- if err != nil {
- return nil, fmt.Errorf("failed to parse body: %w", err)
+ // Extract BAL if requested (v3+)
+ if flags.Has(types.BlockDataFlagBal) && meta.BalLength > 0 && meta.ObjVersion >= 2 {
+ balStart := metaSize + int(meta.HeaderLength) + int(meta.BodyLength) + int(meta.PayloadLength)
+ balEnd := balStart + int(meta.BalLength)
+ if balEnd <= len(data) {
+ blockData.BalData = data[balStart:balEnd]
}
-
- blockData.Body = body
- } else {
- blockData.BodyData = bodyData
}
return blockData, nil
}
-func (e *S3Engine) AddBlock(ctx context.Context, slot uint64, root []byte, dataCb func() (*types.BlockData, error)) (bool, error) {
+// AddBlock stores block data. Returns (added, updated, error).
+func (e *S3Engine) AddBlock(
+ ctx context.Context,
+ slot uint64,
+ root []byte,
+ dataCb func() (*types.BlockData, error),
+) (bool, bool, error) {
key := e.getObjectKey(root, slot)
- // Check if object already exists
- stat, err := e.client.StatObject(ctx, e.bucket, key, minio.StatObjectOptions{})
- if err == nil && stat.Size > 0 {
- return false, nil
+ // Check what components already exist
+ existingMeta, err := e.readMetadata(ctx, key)
+ if err != nil && err.Error() != "failed to get object: NoSuchKey" {
+ // Ignore "not found" errors
+ existingFlags, _ := e.GetStoredComponents(ctx, slot, root)
+ if existingFlags == 0 {
+ existingMeta = nil
+ }
}
+ // Get the new data
blockData, err := dataCb()
if err != nil {
- return false, fmt.Errorf("failed to get block data: %w", err)
+ return false, false, fmt.Errorf("failed to get block data: %w", err)
+ }
+
+ // Calculate what we already have
+ var existingFlags types.BlockDataFlags
+ if existingMeta != nil {
+ existingFlags = existingMeta.storedFlags()
+ }
+
+ // Calculate what the new data provides
+ var newFlags types.BlockDataFlags
+ if len(blockData.HeaderData) > 0 {
+ newFlags |= types.BlockDataFlagHeader
+ }
+ if len(blockData.BodyData) > 0 {
+ newFlags |= types.BlockDataFlagBody
+ }
+ if blockData.PayloadVersion != 0 && len(blockData.PayloadData) > 0 {
+ newFlags |= types.BlockDataFlagPayload
+ }
+ if blockData.BalVersion != 0 && len(blockData.BalData) > 0 {
+ newFlags |= types.BlockDataFlagBal
}
- metadata := &objectMetadata{
- objVersion: uint32(blockData.HeaderVersion),
- headerLength: uint32(len(blockData.HeaderData)),
- bodyVersion: uint32(blockData.BodyVersion),
- bodyLength: uint32(len(blockData.BodyData)),
+ // Check if we need to update (new data has more components)
+ needsUpdate := (newFlags &^ existingFlags) != 0
+ isNew := existingFlags == 0
+
+ if !isNew && !needsUpdate {
+ // Already have all the data
+ return false, false, nil
}
- metadataBytes := e.writeObjectMetadata(metadata)
- metadataLength := len(metadataBytes)
+ // If updating, merge with existing data
+ finalData := blockData
+ if !isNew && needsUpdate {
+ // Fetch existing data and merge
+ existingData, err := e.GetBlock(ctx, slot, root, types.BlockDataFlagAll, nil, nil)
+ if err == nil && existingData != nil {
+ finalData = mergeBlockData(existingData, blockData)
+ }
+ }
- // Prepare data with header and body versions and lengths
- data := make([]byte, metadataLength+int(metadata.headerLength)+int(metadata.bodyLength))
- copy(data[:metadataLength], metadataBytes)
- copy(data[metadataLength:metadataLength+int(metadata.headerLength)], blockData.HeaderData)
- copy(data[metadataLength+int(metadata.headerLength):], blockData.BodyData)
+ // Write object (v1 for pre-gloas, v2 for gloas+)
+ metaBytes := writeObjectMetadata(finalData)
- // Upload object
+ // Calculate total size and build reader chain (avoids copying to concatenated buffer)
+ totalSize := int64(len(metaBytes) + len(finalData.HeaderData) + len(finalData.BodyData))
+ readers := []io.Reader{
+ bytes.NewReader(metaBytes),
+ bytes.NewReader(finalData.HeaderData),
+ bytes.NewReader(finalData.BodyData),
+ }
+
+ if finalData.BodyVersion >= uint64(spec.DataVersionGloas) {
+ totalSize += int64(len(finalData.PayloadData) + len(finalData.BalData))
+ readers = append(readers,
+ bytes.NewReader(finalData.PayloadData),
+ bytes.NewReader(finalData.BalData),
+ )
+ }
+
+ // Upload object using MultiReader to stream without extra buffer allocation
_, err = e.client.PutObject(
ctx,
e.bucket,
key,
- bytes.NewReader(data),
- int64(len(data)),
+ io.MultiReader(readers...),
+ totalSize,
minio.PutObjectOptions{ContentType: "application/octet-stream"},
)
if err != nil {
- return false, fmt.Errorf("failed to upload block: %w", err)
+ return false, false, fmt.Errorf("failed to upload block: %w", err)
+ }
+
+ return isNew, !isNew && needsUpdate, nil
+}
+
+// mergeBlockData merges existing data with new data (new takes precedence for non-empty fields).
+func mergeBlockData(existing, new *types.BlockData) *types.BlockData {
+ result := &types.BlockData{}
+
+ // Use new data if available, otherwise keep existing
+ if len(new.HeaderData) > 0 {
+ result.HeaderVersion = new.HeaderVersion
+ result.HeaderData = new.HeaderData
+ } else {
+ result.HeaderVersion = existing.HeaderVersion
+ result.HeaderData = existing.HeaderData
+ }
+
+ if len(new.BodyData) > 0 {
+ result.BodyVersion = new.BodyVersion
+ result.BodyData = new.BodyData
+ } else {
+ result.BodyVersion = existing.BodyVersion
+ result.BodyData = existing.BodyData
+ }
+
+ if new.PayloadVersion != 0 && len(new.PayloadData) > 0 {
+ result.PayloadVersion = new.PayloadVersion
+ result.PayloadData = new.PayloadData
+ } else {
+ result.PayloadVersion = existing.PayloadVersion
+ result.PayloadData = existing.PayloadData
+ }
+
+ if new.BalVersion != 0 && len(new.BalData) > 0 {
+ result.BalVersion = new.BalVersion
+ result.BalData = new.BalData
+ } else {
+ result.BalVersion = existing.BalVersion
+ result.BalData = existing.BalData
}
- return true, nil
+ return result
}
diff --git a/blockdb/tiered/tiered.go b/blockdb/tiered/tiered.go
new file mode 100644
index 00000000..04f05a16
--- /dev/null
+++ b/blockdb/tiered/tiered.go
@@ -0,0 +1,278 @@
+package tiered
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/sirupsen/logrus"
+
+ "github.com/ethpandaops/dora/blockdb/pebble"
+ "github.com/ethpandaops/dora/blockdb/s3"
+ "github.com/ethpandaops/dora/blockdb/types"
+ dtypes "github.com/ethpandaops/dora/types"
+)
+
+// TieredEngine combines Pebble (cache) and S3 (primary storage) in a tiered architecture.
+// Reads check cache first, then fall back to S3.
+// Writes go to both (write-through).
+type TieredEngine struct {
+ cache *pebble.PebbleEngine
+ primary *s3.S3Engine
+ cleanup *pebble.CacheCleanup
+ logger logrus.FieldLogger
+}
+
+// NewTieredEngine creates a new tiered storage engine.
+func NewTieredEngine(config dtypes.TieredBlockDBConfig, logger logrus.FieldLogger) (types.BlockDbEngine, error) {
+ // Initialize Pebble cache
+ cacheEngine, err := pebble.NewPebbleEngine(config.Pebble)
+ if err != nil {
+ return nil, fmt.Errorf("failed to initialize pebble cache: %w", err)
+ }
+
+ pebbleEngine, ok := cacheEngine.(*pebble.PebbleEngine)
+ if !ok {
+ return nil, fmt.Errorf("unexpected pebble engine type")
+ }
+
+ // Initialize S3 primary storage
+ primaryEngine, err := s3.NewS3Engine(config.S3)
+ if err != nil {
+ cacheEngine.Close()
+ return nil, fmt.Errorf("failed to initialize s3 primary storage: %w", err)
+ }
+
+ s3Engine, ok := primaryEngine.(*s3.S3Engine)
+ if !ok {
+ cacheEngine.Close()
+ return nil, fmt.Errorf("unexpected s3 engine type")
+ }
+
+ // Initialize cache cleanup
+ cleanup := pebble.NewCacheCleanup(pebbleEngine, logger)
+ cleanup.Start()
+
+ return &TieredEngine{
+ cache: pebbleEngine,
+ primary: s3Engine,
+ cleanup: cleanup,
+ logger: logger.WithField("component", "tiered-blockdb"),
+ }, nil
+}
+
+// Close closes both storage engines.
+func (e *TieredEngine) Close() error {
+ if e.cleanup != nil {
+ e.cleanup.Stop()
+ }
+
+ var errs []error
+ if err := e.cache.Close(); err != nil {
+ errs = append(errs, fmt.Errorf("cache close: %w", err))
+ }
+ if err := e.primary.Close(); err != nil {
+ errs = append(errs, fmt.Errorf("primary close: %w", err))
+ }
+
+ if len(errs) > 0 {
+ return errs[0]
+ }
+ return nil
+}
+
+// GetStoredComponents returns which components exist for a block.
+// Checks cache first, then S3.
+func (e *TieredEngine) GetStoredComponents(ctx context.Context, slot uint64, root []byte) (types.BlockDataFlags, error) {
+ // Check cache first
+ cacheFlags, err := e.cache.GetStoredComponents(ctx, slot, root)
+ if err != nil {
+ e.logger.Debugf("cache GetStoredComponents error: %v", err)
+ }
+
+ // If cache has all components, return early
+ if cacheFlags == types.BlockDataFlagAll {
+ return cacheFlags, nil
+ }
+
+ // Check S3 for additional components
+ s3Flags, err := e.primary.GetStoredComponents(ctx, slot, root)
+ if err != nil {
+ return cacheFlags, nil // Return cache result on S3 error
+ }
+
+ return cacheFlags | s3Flags, nil
+}
+
+// GetBlock retrieves block data with selective loading.
+// Checks cache first, fetches missing components from S3.
+func (e *TieredEngine) GetBlock(
+ ctx context.Context,
+ slot uint64,
+ root []byte,
+ flags types.BlockDataFlags,
+ parseBlock func(uint64, []byte) (any, error),
+ parsePayload func(uint64, []byte) (any, error),
+) (*types.BlockData, error) {
+ // Check what's in cache
+ cacheFlags, _ := e.cache.GetStoredComponents(ctx, slot, root)
+
+ // Determine what we can get from cache vs S3
+ cacheRequestFlags := flags & cacheFlags
+ s3RequestFlags := flags &^ cacheFlags
+
+ result := &types.BlockData{}
+
+ // Get from cache
+ if cacheRequestFlags != 0 {
+ cacheData, err := e.cache.GetBlock(ctx, slot, root, cacheRequestFlags, parseBlock, parsePayload)
+ if err != nil {
+ e.logger.Debugf("cache GetBlock error: %v", err)
+ } else if cacheData != nil {
+ mergeBlockDataInto(result, cacheData)
+
+ // Record LRU access
+ if e.cleanup != nil {
+ e.cleanup.RecordAccess(root, cacheRequestFlags)
+ }
+ }
+ }
+
+ // Get missing components from S3
+ if s3RequestFlags != 0 {
+ s3Data, err := e.primary.GetBlock(ctx, slot, root, s3RequestFlags, parseBlock, parsePayload)
+ if err != nil {
+ e.logger.Debugf("s3 GetBlock error: %v", err)
+ } else if s3Data != nil {
+ mergeBlockDataInto(result, s3Data)
+
+ // Cache the S3 data for future reads
+ e.cacheS3Data(ctx, slot, root, s3Data, s3RequestFlags)
+ }
+ }
+
+ return result, nil
+}
+
+// cacheS3Data stores S3 data in the cache for future reads.
+func (e *TieredEngine) cacheS3Data(ctx context.Context, slot uint64, root []byte, data *types.BlockData, flags types.BlockDataFlags) {
+ // Build cache data with only the components we fetched from S3
+ cacheData := &types.BlockData{}
+
+ if flags.Has(types.BlockDataFlagHeader) && len(data.HeaderData) > 0 {
+ cacheData.HeaderVersion = data.HeaderVersion
+ cacheData.HeaderData = data.HeaderData
+ }
+ if flags.Has(types.BlockDataFlagBody) && len(data.BodyData) > 0 {
+ cacheData.BodyVersion = data.BodyVersion
+ cacheData.BodyData = data.BodyData
+ }
+ if flags.Has(types.BlockDataFlagPayload) && len(data.PayloadData) > 0 {
+ cacheData.PayloadVersion = data.PayloadVersion
+ cacheData.PayloadData = data.PayloadData
+ }
+ if flags.Has(types.BlockDataFlagBal) && len(data.BalData) > 0 {
+ cacheData.BalVersion = data.BalVersion
+ cacheData.BalData = data.BalData
+ }
+
+ // Add to cache (ignore errors - caching is best effort)
+ _, _, err := e.cache.AddBlock(ctx, slot, root, func() (*types.BlockData, error) {
+ return cacheData, nil
+ })
+ if err != nil {
+ e.logger.Debugf("failed to cache S3 data: %v", err)
+ }
+
+ // Flush LRU updates since we did a write
+ if e.cleanup != nil {
+ e.cleanup.FlushLRU()
+ }
+}
+
+// AddBlock stores block data using write-through to both cache and S3.
+// Returns (added, updated, error).
+func (e *TieredEngine) AddBlock(
+ ctx context.Context,
+ slot uint64,
+ root []byte,
+ dataCb func() (*types.BlockData, error),
+) (bool, bool, error) {
+ // Get the data once
+ data, err := dataCb()
+ if err != nil {
+ return false, false, err
+ }
+
+ // Check what components already exist (in cache or S3)
+ existingFlags, _ := e.GetStoredComponents(ctx, slot, root)
+
+ // Determine what new data provides
+ var newFlags types.BlockDataFlags
+ if len(data.HeaderData) > 0 {
+ newFlags |= types.BlockDataFlagHeader
+ }
+ if len(data.BodyData) > 0 {
+ newFlags |= types.BlockDataFlagBody
+ }
+ if data.PayloadVersion != 0 && len(data.PayloadData) > 0 {
+ newFlags |= types.BlockDataFlagPayload
+ }
+ if data.BalVersion != 0 && len(data.BalData) > 0 {
+ newFlags |= types.BlockDataFlagBal
+ }
+
+ // Check if we need to update
+ needsUpdate := (newFlags &^ existingFlags) != 0
+ isNew := existingFlags == 0
+
+ if !isNew && !needsUpdate {
+ return false, false, nil
+ }
+
+ // Write-through: write to S3 first (primary), then cache
+ // S3 handles merging with existing data
+ s3Added, s3Updated, err := e.primary.AddBlock(ctx, slot, root, func() (*types.BlockData, error) {
+ return data, nil
+ })
+ if err != nil {
+ return false, false, fmt.Errorf("failed to write to S3: %w", err)
+ }
+
+ // Write to cache
+ _, _, err = e.cache.AddBlock(ctx, slot, root, func() (*types.BlockData, error) {
+ return data, nil
+ })
+ if err != nil {
+ e.logger.Warnf("failed to write to cache: %v", err)
+ // Don't fail - S3 write succeeded
+ }
+
+ // Flush LRU updates after write
+ if e.cleanup != nil {
+ e.cleanup.FlushLRU()
+ }
+
+ return s3Added, s3Updated, nil
+}
+
+// mergeBlockDataInto merges source data into target (source values take precedence for non-empty fields).
+func mergeBlockDataInto(target, source *types.BlockData) {
+ if source.HeaderVersion != 0 || len(source.HeaderData) > 0 {
+ target.HeaderVersion = source.HeaderVersion
+ target.HeaderData = source.HeaderData
+ }
+ if source.BodyVersion != 0 || len(source.BodyData) > 0 {
+ target.BodyVersion = source.BodyVersion
+ target.BodyData = source.BodyData
+ target.Body = source.Body
+ }
+ if source.PayloadVersion != 0 || len(source.PayloadData) > 0 {
+ target.PayloadVersion = source.PayloadVersion
+ target.PayloadData = source.PayloadData
+ target.Payload = source.Payload
+ }
+ if source.BalVersion != 0 || len(source.BalData) > 0 {
+ target.BalVersion = source.BalVersion
+ target.BalData = source.BalData
+ }
+}
diff --git a/blockdb/types/engine.go b/blockdb/types/engine.go
index 8152b501..a10bdc66 100644
--- a/blockdb/types/engine.go
+++ b/blockdb/types/engine.go
@@ -2,15 +2,54 @@ package types
import "context"
+// BlockData contains all data components for a block.
type BlockData struct {
+ // Header data
HeaderVersion uint64
HeaderData []byte
- BodyVersion uint64
- BodyData []byte
- Body interface{}
+
+ // Body data
+ BodyVersion uint64
+ BodyData []byte
+ Body any // Parsed body (optional)
+
+ // Execution payload data (ePBS)
+ PayloadVersion uint64
+ PayloadData []byte
+ Payload any // Parsed payload (optional)
+
+ // Block access list data
+ BalVersion uint64
+ BalData []byte
}
+
+// BlockDbEngine defines the interface for block database engines.
type BlockDbEngine interface {
+ // Close closes the database engine.
Close() error
- GetBlock(ctx context.Context, slot uint64, root []byte, parseBlock func(uint64, []byte) (interface{}, error)) (*BlockData, error)
- AddBlock(ctx context.Context, slot uint64, root []byte, dataCb func() (*BlockData, error)) (bool, error)
+
+ // GetBlock retrieves block data with selective loading based on flags.
+ // If parseBlock is nil, raw body data is stored in BlockData.BodyData.
+ // If parsePayload is nil, raw payload data is stored in BlockData.PayloadData.
+ GetBlock(
+ ctx context.Context,
+ slot uint64,
+ root []byte,
+ flags BlockDataFlags,
+ parseBlock func(uint64, []byte) (any, error),
+ parsePayload func(uint64, []byte) (any, error),
+ ) (*BlockData, error)
+
+ // AddBlock stores block data. Returns:
+ // - added: true if a new block was created
+ // - updated: true if an existing block was updated with new components
+ AddBlock(
+ ctx context.Context,
+ slot uint64,
+ root []byte,
+ dataCb func() (*BlockData, error),
+ ) (added bool, updated bool, err error)
+
+ // GetStoredComponents returns which components exist for a block.
+ GetStoredComponents(ctx context.Context, slot uint64, root []byte) (BlockDataFlags, error)
}
diff --git a/blockdb/types/flags.go b/blockdb/types/flags.go
new file mode 100644
index 00000000..34aff4db
--- /dev/null
+++ b/blockdb/types/flags.go
@@ -0,0 +1,38 @@
+package types
+
+// BlockDataFlags specifies which components to load from storage.
+type BlockDataFlags uint8
+
+const (
+ // BlockDataFlagHeader requests the block header data.
+ BlockDataFlagHeader BlockDataFlags = 1 << iota // 0x01
+ // BlockDataFlagBody requests the block body data.
+ BlockDataFlagBody // 0x02
+ // BlockDataFlagPayload requests the execution payload data.
+ BlockDataFlagPayload // 0x04
+ // BlockDataFlagBal requests the block access list data.
+ BlockDataFlagBal // 0x08
+
+ // BlockDataFlagAll requests all block components.
+ BlockDataFlagAll = BlockDataFlagHeader | BlockDataFlagBody | BlockDataFlagPayload | BlockDataFlagBal
+)
+
+// Has returns true if the flag set contains the specified flag.
+func (f BlockDataFlags) Has(flag BlockDataFlags) bool {
+ return f&flag == flag
+}
+
+// HasAny returns true if the flag set contains any of the specified flags.
+func (f BlockDataFlags) HasAny(flags BlockDataFlags) bool {
+ return f&flags != 0
+}
+
+// Add returns a new flag set with the specified flag added.
+func (f BlockDataFlags) Add(flag BlockDataFlags) BlockDataFlags {
+ return f | flag
+}
+
+// Remove returns a new flag set with the specified flag removed.
+func (f BlockDataFlags) Remove(flag BlockDataFlags) BlockDataFlags {
+ return f &^ flag
+}
diff --git a/clients/consensus/chainspec.go b/clients/consensus/chainspec.go
index 8fa135c1..834de0ae 100644
--- a/clients/consensus/chainspec.go
+++ b/clients/consensus/chainspec.go
@@ -53,6 +53,8 @@ type ChainSpecConfig struct {
ElectraForkEpoch *uint64 `yaml:"ELECTRA_FORK_EPOCH" check-if-fork:"ElectraForkEpoch"`
FuluForkVersion phase0.Version `yaml:"FULU_FORK_VERSION" check-if-fork:"FuluForkEpoch"`
FuluForkEpoch *uint64 `yaml:"FULU_FORK_EPOCH" check-if-fork:"FuluForkEpoch"`
+ GloasForkVersion phase0.Version `yaml:"GLOAS_FORK_VERSION" check-if-fork:"GloasForkEpoch"`
+ GloasForkEpoch *uint64 `yaml:"GLOAS_FORK_EPOCH" check-if-fork:"GloasForkEpoch"`
// Time parameters
SecondsPerSlot uint64 `yaml:"SECONDS_PER_SLOT"`
@@ -118,6 +120,11 @@ type ChainSpecConfig struct {
ValidatorCustodyRequirement *uint64 `yaml:"VALIDATOR_CUSTODY_REQUIREMENT" check-if-fork:"FuluForkEpoch"`
BalancePerAdditionalCustodyGroup *uint64 `yaml:"BALANCE_PER_ADDITIONAL_CUSTODY_GROUP" check-if-fork:"FuluForkEpoch"`
BlobSchedule []BlobScheduleEntry `yaml:"BLOB_SCHEDULE" check-if-fork:"FuluForkEpoch"`
+
+ // Gloas
+ PtcSize uint64 `yaml:"PTC_SIZE" check-if-fork:"GloasForkEpoch"`
+ MaxPayloadAttestations uint64 `yaml:"MAX_PAYLOAD_ATTESTATIONS" check-if-fork:"GloasForkEpoch"`
+ DomainPtcAttester phase0.DomainType `yaml:"DOMAIN_PTC_ATTESTER" check-if-fork:"GloasForkEpoch"`
}
type ChainSpecPreset struct {
diff --git a/clients/consensus/chainstate.go b/clients/consensus/chainstate.go
index f96a59e7..a8bc252a 100644
--- a/clients/consensus/chainstate.go
+++ b/clients/consensus/chainstate.go
@@ -361,6 +361,34 @@ func (cs *ChainState) GetForkDigestForEpoch(epoch phase0.Epoch) phase0.ForkDiges
return cs.GetForkDigest(currentForkVersion, currentBlobParams)
}
+func (cs *ChainState) GetBlobScheduleForEpoch(epoch phase0.Epoch) *BlobScheduleEntry {
+ if cs.specs == nil {
+ return nil
+ }
+
+ var blobSchedule *BlobScheduleEntry
+
+ if cs.specs.ElectraForkEpoch != nil && epoch >= phase0.Epoch(*cs.specs.ElectraForkEpoch) {
+ blobSchedule = &BlobScheduleEntry{
+ Epoch: *cs.specs.ElectraForkEpoch,
+ MaxBlobsPerBlock: cs.specs.MaxBlobsPerBlockElectra,
+ }
+ } else if cs.specs.DenebForkEpoch != nil && epoch >= phase0.Epoch(*cs.specs.DenebForkEpoch) {
+ blobSchedule = &BlobScheduleEntry{
+ Epoch: *cs.specs.DenebForkEpoch,
+ MaxBlobsPerBlock: cs.specs.MaxBlobsPerBlock,
+ }
+ }
+
+ for i, blobScheduleEntry := range cs.specs.BlobSchedule {
+ if blobScheduleEntry.Epoch <= uint64(epoch) {
+ blobSchedule = &cs.specs.BlobSchedule[i]
+ }
+ }
+
+ return blobSchedule
+}
+
func (cs *ChainState) GetForkDigest(forkVersion phase0.Version, blobParams *BlobScheduleEntry) phase0.ForkDigest {
if cs.specs == nil || cs.genesis == nil {
return phase0.ForkDigest{}
@@ -444,6 +472,14 @@ func (cs *ChainState) GetValidatorChurnLimit(validatorCount uint64) uint64 {
return adaptable
}
+func (cs *ChainState) IsEip7732Enabled(epoch phase0.Epoch) bool {
+ if cs.specs == nil {
+ return false
+ }
+
+ return cs.specs.GloasForkEpoch != nil && phase0.Epoch(*cs.specs.GloasForkEpoch) <= epoch
+}
+
func (cs *ChainState) GetBalanceChurnLimit(totalActiveBalance uint64) uint64 {
if cs.specs == nil {
return 0
diff --git a/clients/consensus/client.go b/clients/consensus/client.go
index b28c8ba7..e641f022 100644
--- a/clients/consensus/client.go
+++ b/clients/consensus/client.go
@@ -6,6 +6,7 @@ import (
"time"
v1 "github.com/attestantio/go-eth2-client/api/v1"
+ "github.com/attestantio/go-eth2-client/spec/gloas"
"github.com/attestantio/go-eth2-client/spec/phase0"
"github.com/sirupsen/logrus"
@@ -23,37 +24,39 @@ type ClientConfig struct {
}
type Client struct {
- pool *Pool
- clientIdx uint16
- endpointConfig *ClientConfig
- clientCtx context.Context
- clientCtxCancel context.CancelFunc
- rpcClient *rpc.BeaconClient
- logger *logrus.Entry
- isOnline bool
- isSyncing bool
- isOptimistic bool
- versionStr string
- nodeIdentity *rpc.NodeIdentity
- clientType ClientType
- lastEvent time.Time
- retryCounter uint64
- lastError error
- headMutex sync.RWMutex
- headRoot phase0.Root
- headSlot phase0.Slot
- justifiedRoot phase0.Root
- justifiedEpoch phase0.Epoch
- finalizedRoot phase0.Root
- finalizedEpoch phase0.Epoch
- lastFinalityUpdateEpoch phase0.Epoch
- lastMetadataUpdateEpoch phase0.Epoch
- lastMetadataUpdateTime time.Time
- lastSyncUpdateEpoch phase0.Epoch
- peers []*v1.Peer
- blockDispatcher utils.Dispatcher[*v1.BlockEvent]
- headDispatcher utils.Dispatcher[*v1.HeadEvent]
- checkpointDispatcher utils.Dispatcher[*v1.Finality]
+ pool *Pool
+ clientIdx uint16
+ endpointConfig *ClientConfig
+ clientCtx context.Context
+ clientCtxCancel context.CancelFunc
+ rpcClient *rpc.BeaconClient
+ logger *logrus.Entry
+ isOnline bool
+ isSyncing bool
+ isOptimistic bool
+ versionStr string
+ nodeIdentity *rpc.NodeIdentity
+ clientType ClientType
+ lastEvent time.Time
+ retryCounter uint64
+ lastError error
+ headMutex sync.RWMutex
+ headRoot phase0.Root
+ headSlot phase0.Slot
+ justifiedRoot phase0.Root
+ justifiedEpoch phase0.Epoch
+ finalizedRoot phase0.Root
+ finalizedEpoch phase0.Epoch
+ lastFinalityUpdateEpoch phase0.Epoch
+ lastMetadataUpdateEpoch phase0.Epoch
+ lastMetadataUpdateTime time.Time
+ lastSyncUpdateEpoch phase0.Epoch
+ peers []*v1.Peer
+ blockDispatcher utils.Dispatcher[*v1.BlockEvent]
+ headDispatcher utils.Dispatcher[*v1.HeadEvent]
+ checkpointDispatcher utils.Dispatcher[*v1.Finality]
+ executionPayloadDispatcher utils.Dispatcher[*v1.ExecutionPayloadAvailableEvent]
+ executionPayloadBidDispatcher utils.Dispatcher[*gloas.SignedExecutionPayloadBid]
specWarnings []string // warnings from incomplete spec checks
specs map[string]interface{}
@@ -102,6 +105,14 @@ func (client *Client) SubscribeFinalizedEvent(capacity int) *utils.Subscription[
return client.checkpointDispatcher.Subscribe(capacity, false)
}
+func (client *Client) SubscribeExecutionPayloadAvailableEvent(capacity int, blocking bool) *utils.Subscription[*v1.ExecutionPayloadAvailableEvent] {
+ return client.executionPayloadDispatcher.Subscribe(capacity, blocking)
+}
+
+func (client *Client) SubscribeExecutionPayloadBidEvent(capacity int, blocking bool) *utils.Subscription[*gloas.SignedExecutionPayloadBid] {
+ return client.executionPayloadBidDispatcher.Subscribe(capacity, blocking)
+}
+
func (client *Client) GetPool() *Pool {
return client.pool
}
diff --git a/clients/consensus/clientlogic.go b/clients/consensus/clientlogic.go
index 7e5b4a8c..59b938a6 100644
--- a/clients/consensus/clientlogic.go
+++ b/clients/consensus/clientlogic.go
@@ -8,6 +8,7 @@ import (
"time"
v1 "github.com/attestantio/go-eth2-client/api/v1"
+ "github.com/attestantio/go-eth2-client/spec/gloas"
"github.com/attestantio/go-eth2-client/spec/phase0"
"github.com/sirupsen/logrus"
@@ -133,7 +134,11 @@ func (client *Client) runClientLogic() error {
}
// start event stream
- blockStream := client.rpcClient.NewBlockStream(client.clientCtx, client.logger, rpc.StreamBlockEvent|rpc.StreamHeadEvent|rpc.StreamFinalizedEvent)
+ blockStream := client.rpcClient.NewBlockStream(
+ client.clientCtx,
+ client.logger,
+ rpc.StreamBlockEvent|rpc.StreamHeadEvent|rpc.StreamFinalizedEvent|rpc.StreamExecutionPayloadEvent,
+ )
defer blockStream.Close()
// process events
@@ -171,6 +176,12 @@ func (client *Client) runClientLogic() error {
if err != nil {
client.logger.Warnf("failed processing finalized event: %v", err)
}
+
+ case rpc.StreamExecutionPayloadEvent:
+ client.executionPayloadDispatcher.Fire(evt.Data.(*v1.ExecutionPayloadAvailableEvent))
+
+ case rpc.StreamExecutionPayloadBidEvent:
+ client.executionPayloadBidDispatcher.Fire(evt.Data.(*gloas.SignedExecutionPayloadBid))
}
client.logger.Tracef("event (%v) processing time: %v ms", evt.Event, time.Since(now).Milliseconds())
diff --git a/clients/consensus/rpc/beaconapi.go b/clients/consensus/rpc/beaconapi.go
index 6768091b..7435764e 100644
--- a/clients/consensus/rpc/beaconapi.go
+++ b/clients/consensus/rpc/beaconapi.go
@@ -19,6 +19,7 @@ import (
"github.com/attestantio/go-eth2-client/spec"
"github.com/attestantio/go-eth2-client/spec/capella"
"github.com/attestantio/go-eth2-client/spec/deneb"
+ "github.com/attestantio/go-eth2-client/spec/gloas"
"github.com/attestantio/go-eth2-client/spec/phase0"
"github.com/rs/zerolog"
"github.com/sirupsen/logrus"
@@ -406,6 +407,22 @@ func (bc *BeaconClient) GetBlockBodyByBlockroot(ctx context.Context, blockroot p
return result.Data, nil
}
+func (bc *BeaconClient) GetExecutionPayloadByBlockroot(ctx context.Context, blockroot phase0.Root) (*gloas.SignedExecutionPayloadEnvelope, error) {
+ provider, isProvider := bc.clientSvc.(eth2client.ExecutionPayloadProvider)
+ if !isProvider {
+ return nil, fmt.Errorf("get execution payload not supported")
+ }
+
+ result, err := provider.SignedExecutionPayloadEnvelope(ctx, &api.SignedExecutionPayloadEnvelopeOpts{
+ Block: fmt.Sprintf("0x%x", blockroot),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return result.Data, nil
+}
+
func (bc *BeaconClient) GetState(ctx context.Context, stateRef string) (*spec.VersionedBeaconState, error) {
provider, isProvider := bc.clientSvc.(eth2client.BeaconStateProvider)
if !isProvider {
diff --git a/clients/consensus/rpc/beaconstream.go b/clients/consensus/rpc/beaconstream.go
index be6fd92c..8c91b7ae 100644
--- a/clients/consensus/rpc/beaconstream.go
+++ b/clients/consensus/rpc/beaconstream.go
@@ -10,6 +10,7 @@ import (
"time"
v1 "github.com/attestantio/go-eth2-client/api/v1"
+ "github.com/attestantio/go-eth2-client/spec/gloas"
"github.com/donovanhide/eventsource"
"github.com/sirupsen/logrus"
@@ -17,9 +18,11 @@ import (
)
const (
- StreamBlockEvent uint16 = 0x01
- StreamHeadEvent uint16 = 0x02
- StreamFinalizedEvent uint16 = 0x04
+ StreamBlockEvent uint16 = 0x01
+ StreamHeadEvent uint16 = 0x02
+ StreamFinalizedEvent uint16 = 0x04
+ StreamExecutionPayloadEvent uint16 = 0x08
+ StreamExecutionPayloadBidEvent uint16 = 0x10
)
type BeaconStreamEvent struct {
@@ -87,6 +90,10 @@ func (bs *BeaconStream) startStream() {
bs.processHeadEvent(evt)
case "finalized_checkpoint":
bs.processFinalizedEvent(evt)
+ case "execution_payload_available":
+ bs.processExecutionPayloadAvailableEvent(evt)
+ case "execution_payload_bid":
+ bs.processExecutionPayloadBidEvent(evt)
}
case <-stream.Ready:
bs.ReadyChan <- &BeaconStreamStatus{
@@ -148,6 +155,26 @@ func (bs *BeaconStream) subscribeStream(endpoint string, events uint16) *eventst
topicsCount++
}
+ if events&StreamExecutionPayloadEvent > 0 {
+ if topicsCount > 0 {
+ fmt.Fprintf(&topics, ",")
+ }
+
+ fmt.Fprintf(&topics, "execution_payload_available")
+
+ topicsCount++
+ }
+
+ if events&StreamExecutionPayloadBidEvent > 0 {
+ if topicsCount > 0 {
+ fmt.Fprintf(&topics, ",")
+ }
+
+ fmt.Fprintf(&topics, "execution_payload_bid")
+
+ topicsCount++
+ }
+
if topicsCount == 0 {
return nil
}
@@ -225,6 +252,36 @@ func (bs *BeaconStream) processFinalizedEvent(evt eventsource.Event) {
}
}
+func (bs *BeaconStream) processExecutionPayloadAvailableEvent(evt eventsource.Event) {
+ var parsed v1.ExecutionPayloadAvailableEvent
+
+ err := json.Unmarshal([]byte(evt.Data()), &parsed)
+ if err != nil {
+ bs.logger.Warnf("beacon block stream failed to decode execution_payload event: %v", err)
+ return
+ }
+
+ bs.EventChan <- &BeaconStreamEvent{
+ Event: StreamExecutionPayloadEvent,
+ Data: &parsed,
+ }
+}
+
+func (bs *BeaconStream) processExecutionPayloadBidEvent(evt eventsource.Event) {
+ var parsed gloas.SignedExecutionPayloadBid
+
+ err := json.Unmarshal([]byte(evt.Data()), &parsed)
+ if err != nil {
+ bs.logger.Warnf("beacon block stream failed to decode execution_payload_bid event: %v", err)
+ return
+ }
+
+ bs.EventChan <- &BeaconStreamEvent{
+ Event: StreamExecutionPayloadBidEvent,
+ Data: &parsed,
+ }
+}
+
func getRedactedURL(requrl string) string {
var logurl string
diff --git a/cmd/dora-explorer/main.go b/cmd/dora-explorer/main.go
index 3bb577bb..eee977e4 100644
--- a/cmd/dora-explorer/main.go
+++ b/cmd/dora-explorer/main.go
@@ -231,6 +231,7 @@ func startFrontend(router *mux.Router) {
router.HandleFunc("/validators/submit_withdrawals", handlers.SubmitWithdrawal).Methods("GET")
router.HandleFunc("/validator/{idxOrPubKey}", handlers.Validator).Methods("GET")
router.HandleFunc("/validator/{index}/slots", handlers.ValidatorSlots).Methods("GET")
+ router.HandleFunc("/builders", handlers.Builders).Methods("GET")
if utils.Config.Frontend.Pprof {
// add pprof handler
diff --git a/cmd/dora-utils/blockdb_sync.go b/cmd/dora-utils/blockdb_sync.go
index fdebcb81..7905d7e9 100644
--- a/cmd/dora-utils/blockdb_sync.go
+++ b/cmd/dora-utils/blockdb_sync.go
@@ -271,7 +271,7 @@ func processSlot(ctx context.Context, pool *consensus.Pool, dynSsz *dynssz.DynSs
return slotResult{slot: slot, err: fmt.Errorf("failed to marshal block header for slot %d: %v", slot, err), time: time.Since(t1)}
}
- added, err := blockdb.GlobalBlockDb.AddBlockWithCallback(ctx, slot, blockHeader.Root[:], func() (*btypes.BlockData, error) {
+ added, _, err := blockdb.GlobalBlockDb.AddBlockWithCallback(ctx, slot, blockHeader.Root[:], func() (*btypes.BlockData, error) {
blockBody, err := client.GetRPCClient().GetBlockBodyByBlockroot(ctx, blockHeader.Root)
if err != nil {
return nil, fmt.Errorf("failed to get block body for slot %d: %v", slot, err)
@@ -282,11 +282,29 @@ func processSlot(ctx context.Context, pool *consensus.Pool, dynSsz *dynssz.DynSs
return nil, fmt.Errorf("failed to marshal block body for slot %d: %v", slot, err)
}
+ var payloadVersion uint64
+ var payloadBytes []byte
+
+ chainState := pool.GetChainState()
+ if chainState.IsEip7732Enabled(chainState.EpochOfSlot(phase0.Slot(slot))) {
+ blockPayload, err := client.GetRPCClient().GetExecutionPayloadByBlockroot(ctx, blockHeader.Root)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get block execution payload for slot %d: %v", slot, err)
+ }
+
+ payloadVersion, payloadBytes, err = beacon.MarshalVersionedSignedExecutionPayloadEnvelopeSSZ(dynSsz, blockPayload, true)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal block execution payload for slot %d: %v", slot, err)
+ }
+ }
+
return &btypes.BlockData{
- HeaderVersion: 1,
- HeaderData: headerBytes,
- BodyVersion: version,
- BodyData: bodyBytes,
+ HeaderVersion: 1,
+ HeaderData: headerBytes,
+ BodyVersion: version,
+ BodyData: bodyBytes,
+ PayloadVersion: payloadVersion,
+ PayloadData: payloadBytes,
}, nil
})
if err != nil {
diff --git a/db/block_bids.go b/db/block_bids.go
new file mode 100644
index 00000000..bfcfac47
--- /dev/null
+++ b/db/block_bids.go
@@ -0,0 +1,113 @@
+package db
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/ethpandaops/dora/dbtypes"
+ "github.com/jmoiron/sqlx"
+)
+
+func InsertBids(bids []*dbtypes.BlockBid, tx *sqlx.Tx) error {
+ var sql strings.Builder
+ fmt.Fprint(&sql,
+ EngineQuery(map[dbtypes.DBEngineType]string{
+ dbtypes.DBEnginePgsql: "INSERT INTO block_bids ",
+ dbtypes.DBEngineSqlite: "INSERT OR REPLACE INTO block_bids ",
+ }),
+ "(parent_root, parent_hash, block_hash, fee_recipient, gas_limit, builder_index, slot, value, el_payment)",
+ " VALUES ",
+ )
+ argIdx := 0
+ fieldCount := 9
+
+ args := make([]any, len(bids)*fieldCount)
+ for i, bid := range bids {
+ if i > 0 {
+ fmt.Fprintf(&sql, ", ")
+ }
+ fmt.Fprintf(&sql, "(")
+ for f := 0; f < fieldCount; f++ {
+ if f > 0 {
+ fmt.Fprintf(&sql, ", ")
+ }
+ fmt.Fprintf(&sql, "$%v", argIdx+f+1)
+ }
+ fmt.Fprintf(&sql, ")")
+
+ args[argIdx+0] = bid.ParentRoot
+ args[argIdx+1] = bid.ParentHash
+ args[argIdx+2] = bid.BlockHash
+ args[argIdx+3] = bid.FeeRecipient
+ args[argIdx+4] = bid.GasLimit
+ args[argIdx+5] = bid.BuilderIndex
+ args[argIdx+6] = bid.Slot
+ args[argIdx+7] = bid.Value
+ args[argIdx+8] = bid.ElPayment
+ argIdx += fieldCount
+ }
+ fmt.Fprint(&sql, EngineQuery(map[dbtypes.DBEngineType]string{
+ dbtypes.DBEnginePgsql: " ON CONFLICT (parent_root, parent_hash, block_hash, builder_index) DO UPDATE SET " +
+ "fee_recipient = excluded.fee_recipient, " +
+ "gas_limit = excluded.gas_limit, " +
+ "slot = excluded.slot, " +
+ "value = excluded.value, " +
+ "el_payment = excluded.el_payment",
+ dbtypes.DBEngineSqlite: "",
+ }))
+
+ _, err := tx.Exec(sql.String(), args...)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func GetBidsForBlockRoot(blockRoot []byte) []*dbtypes.BlockBid {
+ var sql strings.Builder
+ args := []any{
+ blockRoot,
+ }
+ fmt.Fprint(&sql, `
+ SELECT
+ parent_root, parent_hash, block_hash, fee_recipient, gas_limit, builder_index, slot, value, el_payment
+ FROM block_bids
+ WHERE parent_root = $1
+ ORDER BY value DESC
+ `)
+
+ bids := []*dbtypes.BlockBid{}
+ err := ReaderDb.Select(&bids, sql.String(), args...)
+ if err != nil {
+ logger.Errorf("Error while fetching bids for block root: %v", err)
+ return nil
+ }
+ return bids
+}
+
+func GetBidsForSlotRange(minSlot uint64) []*dbtypes.BlockBid {
+ var sql strings.Builder
+ args := []any{
+ minSlot,
+ }
+ fmt.Fprint(&sql, `
+ SELECT
+ parent_root, parent_hash, block_hash, fee_recipient, gas_limit, builder_index, slot, value, el_payment
+ FROM block_bids
+ WHERE slot >= $1
+ ORDER BY slot DESC, value DESC
+ `)
+
+ bids := []*dbtypes.BlockBid{}
+ err := ReaderDb.Select(&bids, sql.String(), args...)
+ if err != nil {
+ logger.Errorf("Error while fetching bids for slot range: %v", err)
+ return nil
+ }
+ return bids
+}
+
+func DeleteBidsBeforeSlot(minSlot uint64, tx *sqlx.Tx) error {
+ _, err := tx.Exec(`DELETE FROM block_bids WHERE slot < $1`, minSlot)
+ return err
+}
diff --git a/db/builders.go b/db/builders.go
new file mode 100644
index 00000000..cb7d314d
--- /dev/null
+++ b/db/builders.go
@@ -0,0 +1,449 @@
+package db
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/ethpandaops/dora/dbtypes"
+ "github.com/jmoiron/sqlx"
+)
+
+// InsertBuilder inserts a single builder into the database
+func InsertBuilder(builder *dbtypes.Builder, tx *sqlx.Tx) error {
+ _, err := tx.Exec(EngineQuery(map[dbtypes.DBEngineType]string{
+ dbtypes.DBEnginePgsql: `
+ INSERT INTO builders (
+ pubkey, builder_index, version, execution_address,
+ deposit_epoch, withdrawable_epoch, superseded
+ ) VALUES ($1, $2, $3, $4, $5, $6, $7)
+ ON CONFLICT (pubkey) DO UPDATE SET
+ builder_index = excluded.builder_index,
+ version = excluded.version,
+ execution_address = excluded.execution_address,
+ deposit_epoch = excluded.deposit_epoch,
+ withdrawable_epoch = excluded.withdrawable_epoch,
+ superseded = excluded.superseded`,
+ dbtypes.DBEngineSqlite: `
+ INSERT OR REPLACE INTO builders (
+ pubkey, builder_index, version, execution_address,
+ deposit_epoch, withdrawable_epoch, superseded
+ ) VALUES ($1, $2, $3, $4, $5, $6, $7)`,
+ }),
+ builder.Pubkey,
+ builder.BuilderIndex,
+ builder.Version,
+ builder.ExecutionAddress,
+ builder.DepositEpoch,
+ builder.WithdrawableEpoch,
+ builder.Superseded)
+
+ if err != nil {
+ return fmt.Errorf("error inserting builder: %w", err)
+ }
+ return nil
+}
+
+// InsertBuilderBatch inserts multiple builders in a batch
+func InsertBuilderBatch(builders []*dbtypes.Builder, tx *sqlx.Tx) error {
+ if len(builders) == 0 {
+ return nil
+ }
+
+ valueStrings := make([]string, len(builders))
+ valueArgs := make([]any, 0, len(builders)*7)
+ for i, b := range builders {
+ valueStrings[i] = fmt.Sprintf("($%v, $%v, $%v, $%v, $%v, $%v, $%v)",
+ i*7+1, i*7+2, i*7+3, i*7+4, i*7+5, i*7+6, i*7+7)
+ valueArgs = append(valueArgs,
+ b.Pubkey,
+ b.BuilderIndex,
+ b.Version,
+ b.ExecutionAddress,
+ b.DepositEpoch,
+ b.WithdrawableEpoch,
+ b.Superseded)
+ }
+
+ stmt := fmt.Sprintf(EngineQuery(map[dbtypes.DBEngineType]string{
+ dbtypes.DBEnginePgsql: `
+ INSERT INTO builders (
+ pubkey, builder_index, version, execution_address,
+ deposit_epoch, withdrawable_epoch, superseded
+ ) VALUES %s
+ ON CONFLICT (pubkey) DO UPDATE SET
+ builder_index = excluded.builder_index,
+ version = excluded.version,
+ execution_address = excluded.execution_address,
+ deposit_epoch = excluded.deposit_epoch,
+ withdrawable_epoch = excluded.withdrawable_epoch,
+ superseded = excluded.superseded`,
+ dbtypes.DBEngineSqlite: `
+ INSERT OR REPLACE INTO builders (
+ pubkey, builder_index, version, execution_address,
+ deposit_epoch, withdrawable_epoch, superseded
+ ) VALUES %s`,
+ }), strings.Join(valueStrings, ","))
+
+ _, err := tx.Exec(stmt, valueArgs...)
+ if err != nil {
+ return fmt.Errorf("error inserting builder batch: %w", err)
+ }
+
+ return nil
+}
+
+// GetBuilderByPubkey returns a builder by pubkey (primary key)
+func GetBuilderByPubkey(pubkey []byte) *dbtypes.Builder {
+ builder := dbtypes.Builder{}
+ err := ReaderDb.Get(&builder, `
+ SELECT * FROM builders WHERE pubkey = $1
+ `, pubkey)
+ if err != nil {
+ return nil
+ }
+ return &builder
+}
+
+// GetActiveBuilderByIndex returns the active (non-superseded) builder for a given index
+func GetActiveBuilderByIndex(index uint64) *dbtypes.Builder {
+ builder := dbtypes.Builder{}
+ err := ReaderDb.Get(&builder, `
+ SELECT * FROM builders WHERE builder_index = $1 AND superseded = false
+ `, index)
+ if err != nil {
+ return nil
+ }
+ return &builder
+}
+
+// GetBuildersByIndex returns all builders (including superseded) for a given index
+func GetBuildersByIndex(index uint64) []*dbtypes.Builder {
+ builders := []*dbtypes.Builder{}
+ err := ReaderDb.Select(&builders, `
+ SELECT * FROM builders WHERE builder_index = $1 ORDER BY superseded ASC
+ `, index)
+ if err != nil {
+ logger.Errorf("Error while fetching builders by index: %v", err)
+ return nil
+ }
+ return builders
+}
+
+// GetBuilderRange returns builders in a given index range (only active builders)
+func GetBuilderRange(startIndex uint64, endIndex uint64) []*dbtypes.Builder {
+ builders := []*dbtypes.Builder{}
+ err := ReaderDb.Select(&builders, `
+ SELECT * FROM builders
+ WHERE builder_index >= $1 AND builder_index <= $2 AND superseded = false
+ ORDER BY builder_index ASC
+ `, startIndex, endIndex)
+ if err != nil {
+ logger.Errorf("Error while fetching builder range: %v", err)
+ return nil
+ }
+ return builders
+}
+
+// GetMaxBuilderIndex returns the highest builder index in the database
+func GetMaxBuilderIndex() (uint64, error) {
+ var maxIndex uint64
+ err := ReaderDb.Get(&maxIndex, "SELECT COALESCE(MAX(builder_index), 0) FROM builders")
+ if err != nil {
+ return 0, fmt.Errorf("error getting max builder index: %w", err)
+ }
+ return maxIndex, nil
+}
+
+// GetBuilderCount returns the count of builders (optionally only active)
+func GetBuilderCount(activeOnly bool) (uint64, error) {
+ var count uint64
+ var err error
+ if activeOnly {
+ err = ReaderDb.Get(&count, "SELECT COUNT(*) FROM builders WHERE superseded = false")
+ } else {
+ err = ReaderDb.Get(&count, "SELECT COUNT(*) FROM builders")
+ }
+ if err != nil {
+ return 0, fmt.Errorf("error getting builder count: %w", err)
+ }
+ return count, nil
+}
+
+// SetBuilderSuperseded marks a builder as superseded
+func SetBuilderSuperseded(pubkey []byte, tx *sqlx.Tx) error {
+ _, err := tx.Exec(`
+ UPDATE builders SET superseded = true WHERE pubkey = $1
+ `, pubkey)
+ if err != nil {
+ return fmt.Errorf("error setting builder superseded: %w", err)
+ }
+ return nil
+}
+
+// SetBuildersSuperseded marks multiple builders as superseded in a batch
+func SetBuildersSuperseded(pubkeys [][]byte, tx *sqlx.Tx) error {
+ if len(pubkeys) == 0 {
+ return nil
+ }
+
+ var sql strings.Builder
+ sql.WriteString("UPDATE builders SET superseded = true WHERE pubkey IN (")
+
+ args := make([]any, len(pubkeys))
+ for i, pk := range pubkeys {
+ if i > 0 {
+ sql.WriteString(", ")
+ }
+ fmt.Fprintf(&sql, "$%d", i+1)
+ args[i] = pk
+ }
+ sql.WriteString(")")
+
+ _, err := tx.Exec(sql.String(), args...)
+ if err != nil {
+ return fmt.Errorf("error setting builders superseded: %w", err)
+ }
+ return nil
+}
+
+// StreamBuildersByPubkeys streams builders by pubkeys in batches
+func StreamBuildersByPubkeys(pubkeys [][]byte, cb func(builder *dbtypes.Builder) bool) error {
+ const batchSize = 1000
+
+ for i := 0; i < len(pubkeys); i += batchSize {
+ end := min(i+batchSize, len(pubkeys))
+ batch := pubkeys[i:end]
+
+ var sql strings.Builder
+ fmt.Fprintf(&sql, `
+ SELECT
+ pubkey, builder_index, version, execution_address,
+ deposit_epoch, withdrawable_epoch, superseded
+ FROM builders
+ WHERE pubkey in (`)
+
+ args := make([]any, len(batch))
+ for j, pk := range batch {
+ if j > 0 {
+ fmt.Fprintf(&sql, ", ")
+ }
+ fmt.Fprintf(&sql, "$%v", j+1)
+ args[j] = pk
+ }
+ fmt.Fprintf(&sql, ")")
+
+ // Create pubkey map for ordering
+ pubkeyMap := make(map[string]int, len(batch))
+ for pos, pk := range batch {
+ pubkeyMap[string(pk)] = pos
+ }
+
+ // Fetch all builders for this batch
+ builders := make([]*dbtypes.Builder, len(batch))
+ rows, err := ReaderDb.Query(sql.String(), args...)
+ if err != nil {
+ return fmt.Errorf("error querying builders: %w", err)
+ }
+ defer rows.Close()
+
+ for rows.Next() {
+ builder := &dbtypes.Builder{}
+ err := rows.Scan(
+ &builder.Pubkey,
+ &builder.BuilderIndex,
+ &builder.Version,
+ &builder.ExecutionAddress,
+ &builder.DepositEpoch,
+ &builder.WithdrawableEpoch,
+ &builder.Superseded,
+ )
+ if err != nil {
+ return fmt.Errorf("error scanning builder: %w", err)
+ }
+ pos := pubkeyMap[string(builder.Pubkey)]
+ builders[pos] = builder
+ }
+
+ if err = rows.Err(); err != nil {
+ return fmt.Errorf("error iterating rows: %w", err)
+ }
+
+ // Stream in original order
+ for _, b := range builders {
+ if b != nil && !cb(b) {
+ return nil
+ }
+ }
+ }
+
+ return nil
+}
+
+// GetBuildersByExecutionAddress returns builders with a specific execution address
+func GetBuildersByExecutionAddress(address []byte) []*dbtypes.Builder {
+ builders := []*dbtypes.Builder{}
+ err := ReaderDb.Select(&builders, `
+ SELECT * FROM builders WHERE execution_address = $1 ORDER BY builder_index ASC
+ `, address)
+ if err != nil {
+ logger.Errorf("Error while fetching builders by execution address: %v", err)
+ return nil
+ }
+ return builders
+}
+
+// GetBuilderIndexesByFilter returns builder indexes matching a filter
+func GetBuilderIndexesByFilter(filter dbtypes.BuilderFilter, currentEpoch uint64) ([]uint64, error) {
+ var sql strings.Builder
+ args := []interface{}{}
+ fmt.Fprint(&sql, `
+ SELECT
+ builder_index
+ FROM builders
+ `)
+
+ args = buildBuilderFilterSql(filter, currentEpoch, &sql, args)
+
+ switch filter.OrderBy {
+ case dbtypes.BuilderOrderIndexAsc:
+ fmt.Fprint(&sql, " ORDER BY builder_index ASC")
+ case dbtypes.BuilderOrderIndexDesc:
+ fmt.Fprint(&sql, " ORDER BY builder_index DESC")
+ case dbtypes.BuilderOrderPubKeyAsc:
+ fmt.Fprint(&sql, " ORDER BY pubkey ASC")
+ case dbtypes.BuilderOrderPubKeyDesc:
+ fmt.Fprint(&sql, " ORDER BY pubkey DESC")
+ case dbtypes.BuilderOrderDepositEpochAsc:
+ fmt.Fprint(&sql, " ORDER BY deposit_epoch ASC")
+ case dbtypes.BuilderOrderDepositEpochDesc:
+ fmt.Fprint(&sql, " ORDER BY deposit_epoch DESC")
+ case dbtypes.BuilderOrderWithdrawableEpochAsc:
+ fmt.Fprint(&sql, " ORDER BY withdrawable_epoch ASC")
+ case dbtypes.BuilderOrderWithdrawableEpochDesc:
+ fmt.Fprint(&sql, " ORDER BY withdrawable_epoch DESC")
+ }
+
+ builderIds := []uint64{}
+ err := ReaderDb.Select(&builderIds, sql.String(), args...)
+ if err != nil {
+ logger.Errorf("Error while fetching builders by filter: %v", err)
+ return nil, err
+ }
+
+ return builderIds, nil
+}
+
+func buildBuilderFilterSql(filter dbtypes.BuilderFilter, currentEpoch uint64, sql *strings.Builder, args []interface{}) []interface{} {
+ filterOp := "WHERE"
+
+ if filter.MinIndex != nil {
+ fmt.Fprintf(sql, " %v builder_index >= $%v", filterOp, len(args)+1)
+ args = append(args, *filter.MinIndex)
+ filterOp = "AND"
+ }
+ if filter.MaxIndex != nil {
+ fmt.Fprintf(sql, " %v builder_index <= $%v", filterOp, len(args)+1)
+ args = append(args, *filter.MaxIndex)
+ filterOp = "AND"
+ }
+ if len(filter.PubKey) > 0 {
+ fmt.Fprintf(sql, " %v pubkey LIKE $%v", filterOp, len(args)+1)
+ args = append(args, append(filter.PubKey, '%'))
+ filterOp = "AND"
+ }
+ if len(filter.ExecutionAddress) > 0 {
+ fmt.Fprintf(sql, " %v execution_address = $%v", filterOp, len(args)+1)
+ args = append(args, filter.ExecutionAddress)
+ filterOp = "AND"
+ }
+ if len(filter.Status) > 0 {
+ statusConditions := make([]string, 0, len(filter.Status))
+ for _, status := range filter.Status {
+ switch status {
+ case dbtypes.BuilderStatusActiveFilter:
+ statusConditions = append(statusConditions, fmt.Sprintf("(superseded = false AND withdrawable_epoch > $%v)", len(args)+1))
+ args = append(args, ConvertUint64ToInt64(currentEpoch))
+ case dbtypes.BuilderStatusExitedFilter:
+ statusConditions = append(statusConditions, fmt.Sprintf("(superseded = false AND withdrawable_epoch <= $%v)", len(args)+1))
+ args = append(args, ConvertUint64ToInt64(currentEpoch))
+ case dbtypes.BuilderStatusSupersededFilter:
+ statusConditions = append(statusConditions, "superseded = true")
+ }
+ }
+ if len(statusConditions) > 0 {
+ fmt.Fprintf(sql, " %v (%v)", filterOp, strings.Join(statusConditions, " OR "))
+ }
+ }
+
+ return args
+}
+
+// StreamBuildersByIndexes streams builders by indexes
+func StreamBuildersByIndexes(indexes []uint64, cb func(builder *dbtypes.Builder) bool) {
+ const batchSize = 1000
+
+ for i := 0; i < len(indexes); i += batchSize {
+ end := min(i+batchSize, len(indexes))
+ batch := indexes[i:end]
+
+ var sql strings.Builder
+ fmt.Fprint(&sql, `
+ SELECT
+ pubkey, builder_index, version, execution_address,
+ deposit_epoch, withdrawable_epoch, superseded
+ FROM builders
+ WHERE builder_index IN (`)
+
+ args := make([]any, len(batch))
+ for j, idx := range batch {
+ if j > 0 {
+ fmt.Fprint(&sql, ", ")
+ }
+ fmt.Fprintf(&sql, "$%v", j+1)
+ args[j] = idx
+ }
+ fmt.Fprint(&sql, ")")
+
+ // Create index map for ordering
+ indexMap := make(map[uint64]int, len(batch))
+ for pos, idx := range batch {
+ indexMap[idx] = pos
+ }
+
+ // Fetch all builders for this batch
+ builders := make([]*dbtypes.Builder, len(batch))
+ rows, err := ReaderDb.Query(sql.String(), args...)
+ if err != nil {
+ logger.Errorf("Error querying builders: %v", err)
+ return
+ }
+
+ for rows.Next() {
+ builder := &dbtypes.Builder{}
+ err := rows.Scan(
+ &builder.Pubkey,
+ &builder.BuilderIndex,
+ &builder.Version,
+ &builder.ExecutionAddress,
+ &builder.DepositEpoch,
+ &builder.WithdrawableEpoch,
+ &builder.Superseded,
+ )
+ if err != nil {
+ logger.Errorf("Error scanning builder: %v", err)
+ rows.Close()
+ return
+ }
+ pos := indexMap[builder.BuilderIndex]
+ builders[pos] = builder
+ }
+ rows.Close()
+
+ // Stream in original order
+ for _, b := range builders {
+ if b != nil && !cb(b) {
+ return
+ }
+ }
+ }
+}
diff --git a/db/epochs.go b/db/epochs.go
index eab3fd87..e3b86e8d 100644
--- a/db/epochs.go
+++ b/db/epochs.go
@@ -12,8 +12,8 @@ func InsertEpoch(epoch *dbtypes.Epoch, tx *sqlx.Tx) error {
epoch, validator_count, validator_balance, eligible, voted_target, voted_head, voted_total, block_count, orphaned_count,
attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count,
proposer_slashing_count, bls_change_count, eth_transaction_count, sync_participation, blob_count,
- eth_gas_used, eth_gas_limit
- ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22)
+ eth_gas_used, eth_gas_limit, payload_count
+ ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23)
ON CONFLICT (epoch) DO UPDATE SET
validator_count = excluded.validator_count,
validator_balance = excluded.validator_balance,
@@ -35,18 +35,19 @@ func InsertEpoch(epoch *dbtypes.Epoch, tx *sqlx.Tx) error {
sync_participation = excluded.sync_participation,
blob_count = excluded.blob_count,
eth_gas_used = excluded.eth_gas_used,
- eth_gas_limit = excluded.eth_gas_limit`,
+ eth_gas_limit = excluded.eth_gas_limit,
+ payload_count = excluded.payload_count`,
dbtypes.DBEngineSqlite: `
INSERT OR REPLACE INTO epochs (
epoch, validator_count, validator_balance, eligible, voted_target, voted_head, voted_total, block_count, orphaned_count,
attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count,
proposer_slashing_count, bls_change_count, eth_transaction_count, sync_participation, blob_count,
- eth_gas_used, eth_gas_limit
- ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22)`,
+ eth_gas_used, eth_gas_limit, payload_count
+ ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23)`,
}),
epoch.Epoch, epoch.ValidatorCount, epoch.ValidatorBalance, epoch.Eligible, epoch.VotedTarget, epoch.VotedHead, epoch.VotedTotal, epoch.BlockCount, epoch.OrphanedCount,
epoch.AttestationCount, epoch.DepositCount, epoch.ExitCount, epoch.WithdrawCount, epoch.WithdrawAmount, epoch.AttesterSlashingCount, epoch.ProposerSlashingCount,
- epoch.BLSChangeCount, epoch.EthTransactionCount, epoch.SyncParticipation, epoch.BlobCount, epoch.EthGasUsed, epoch.EthGasLimit)
+ epoch.BLSChangeCount, epoch.EthTransactionCount, epoch.SyncParticipation, epoch.BlobCount, epoch.EthGasUsed, epoch.EthGasLimit, epoch.PayloadCount)
if err != nil {
return err
}
@@ -69,7 +70,7 @@ func GetEpochs(firstEpoch uint64, limit uint32) []*dbtypes.Epoch {
epoch, validator_count, validator_balance, eligible, voted_target, voted_head, voted_total, block_count, orphaned_count,
attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count,
proposer_slashing_count, bls_change_count, eth_transaction_count, sync_participation, blob_count,
- eth_gas_used, eth_gas_limit
+ eth_gas_used, eth_gas_limit, payload_count
FROM epochs
WHERE epoch <= $1
ORDER BY epoch DESC
diff --git a/db/orphaned_blocks.go b/db/orphaned_blocks.go
index e983d829..30b4582d 100644
--- a/db/orphaned_blocks.go
+++ b/db/orphaned_blocks.go
@@ -9,15 +9,15 @@ func InsertOrphanedBlock(block *dbtypes.OrphanedBlock, tx *sqlx.Tx) error {
_, err := tx.Exec(EngineQuery(map[dbtypes.DBEngineType]string{
dbtypes.DBEnginePgsql: `
INSERT INTO orphaned_blocks (
- root, header_ver, header_ssz, block_ver, block_ssz, block_uid
- ) VALUES ($1, $2, $3, $4, $5, $6)
+ root, header_ver, header_ssz, block_ver, block_ssz, block_uid, payload_ver, payload_ssz
+ ) VALUES ($1, $2, $3, $4, $5, $6, $7)
ON CONFLICT (root) DO NOTHING`,
dbtypes.DBEngineSqlite: `
INSERT OR IGNORE INTO orphaned_blocks (
- root, header_ver, header_ssz, block_ver, block_ssz, block_uid
- ) VALUES ($1, $2, $3, $4, $5, $6)`,
+ root, header_ver, header_ssz, block_ver, block_ssz, block_uid, payload_ver, payload_ssz
+ ) VALUES ($1, $2, $3, $4, $5, $6, $7)`,
}),
- block.Root, block.HeaderVer, block.HeaderSSZ, block.BlockVer, block.BlockSSZ, block.BlockUid)
+ block.Root, block.HeaderVer, block.HeaderSSZ, block.BlockVer, block.BlockSSZ, block.BlockUid, block.PayloadVer, block.PayloadSSZ)
if err != nil {
return err
}
@@ -27,7 +27,7 @@ func InsertOrphanedBlock(block *dbtypes.OrphanedBlock, tx *sqlx.Tx) error {
func GetOrphanedBlock(root []byte) *dbtypes.OrphanedBlock {
block := dbtypes.OrphanedBlock{}
err := ReaderDb.Get(&block, `
- SELECT root, header_ver, header_ssz, block_ver, block_ssz, block_uid
+ SELECT root, header_ver, header_ssz, block_ver, block_ssz, block_uid, payload_ver, payload_ssz
FROM orphaned_blocks
WHERE root = $1
`, root)
diff --git a/db/schema/pgsql/20260108202212_epbs-payload.sql b/db/schema/pgsql/20260108202212_epbs-payload.sql
new file mode 100644
index 00000000..10de8df5
--- /dev/null
+++ b/db/schema/pgsql/20260108202212_epbs-payload.sql
@@ -0,0 +1,73 @@
+-- +goose Up
+-- +goose StatementBegin
+
+ALTER TABLE public."unfinalized_blocks" ADD
+ "payload_ver" int NOT NULL DEFAULT 0,
+ "payload_ssz" bytea NULL;
+
+ALTER TABLE public."orphaned_blocks" ADD
+ "payload_ver" int NOT NULL DEFAULT 0,
+ "payload_ssz" bytea NULL;
+
+ALTER TABLE public."slots" ADD
+ "payload_status" smallint NOT NULL DEFAULT 0;
+
+CREATE INDEX IF NOT EXISTS "slots_payload_status_idx"
+ ON public."slots"
+ ("payload_status" ASC NULLS LAST);
+
+ALTER TABLE public."epochs" ADD
+ "payload_count" int NOT NULL DEFAULT 0;
+
+ALTER TABLE public."unfinalized_epochs" ADD
+ "payload_count" int NOT NULL DEFAULT 0;
+
+CREATE TABLE IF NOT EXISTS public."block_bids" (
+ "parent_root" bytea NOT NULL,
+ "parent_hash" bytea NOT NULL,
+ "block_hash" bytea NOT NULL,
+ "fee_recipient" bytea NOT NULL,
+ "gas_limit" bigint NOT NULL,
+ "builder_index" bigint NOT NULL,
+ "slot" bigint NOT NULL,
+ "value" bigint NOT NULL,
+ "el_payment" bigint NOT NULL,
+ CONSTRAINT block_bids_pkey PRIMARY KEY (parent_root, parent_hash, block_hash, builder_index)
+);
+
+CREATE INDEX IF NOT EXISTS "block_bids_parent_root_idx"
+ ON public."block_bids"
+ ("parent_root" ASC NULLS LAST);
+
+CREATE INDEX IF NOT EXISTS "block_bids_builder_index_idx"
+ ON public."block_bids"
+ ("builder_index" ASC NULLS LAST);
+
+CREATE INDEX IF NOT EXISTS "block_bids_slot_idx"
+ ON public."block_bids"
+ ("slot" ASC NULLS LAST);
+
+CREATE TABLE IF NOT EXISTS public."builders" (
+ "pubkey" bytea NOT NULL,
+ "builder_index" bigint NOT NULL,
+ "version" smallint NOT NULL,
+ "execution_address" bytea NOT NULL,
+ "deposit_epoch" bigint NOT NULL,
+ "withdrawable_epoch" bigint NOT NULL,
+ "superseded" boolean NOT NULL DEFAULT false,
+ CONSTRAINT builders_pkey PRIMARY KEY (pubkey)
+);
+
+CREATE INDEX IF NOT EXISTS "builders_builder_index_idx"
+ ON public."builders"
+ ("builder_index" ASC NULLS LAST);
+
+CREATE INDEX IF NOT EXISTS "builders_execution_address_idx"
+ ON public."builders"
+ ("execution_address" ASC NULLS LAST);
+
+-- +goose StatementEnd
+-- +goose Down
+-- +goose StatementBegin
+SELECT 'NOT SUPPORTED';
+-- +goose StatementEnd
\ No newline at end of file
diff --git a/db/schema/sqlite/20260108202212_epbs-payload.sql b/db/schema/sqlite/20260108202212_epbs-payload.sql
new file mode 100644
index 00000000..4535d7cb
--- /dev/null
+++ b/db/schema/sqlite/20260108202212_epbs-payload.sql
@@ -0,0 +1,56 @@
+-- +goose Up
+-- +goose StatementBegin
+
+ALTER TABLE "unfinalized_blocks" ADD "payload_ver" int NOT NULL DEFAULT 0;
+ALTER TABLE "unfinalized_blocks" ADD "payload_ssz" BLOB NULL;
+
+ALTER TABLE "orphaned_blocks" ADD "payload_ver" int NOT NULL DEFAULT 0;
+ALTER TABLE "orphaned_blocks" ADD "payload_ssz" BLOB NULL;
+
+ALTER TABLE "slots" ADD "payload_status" smallint NOT NULL DEFAULT 0;
+
+CREATE INDEX IF NOT EXISTS "slots_payload_status_idx" ON "slots" ("payload_status" ASC);
+
+ALTER TABLE "epochs" ADD "payload_count" int NOT NULL DEFAULT 0;
+
+ALTER TABLE "unfinalized_epochs" ADD "payload_count" int NOT NULL DEFAULT 0;
+
+CREATE TABLE IF NOT EXISTS "block_bids" (
+ "parent_root" BLOB NOT NULL,
+ "parent_hash" BLOB NOT NULL,
+ "block_hash" BLOB NOT NULL,
+ "fee_recipient" BLOB NOT NULL,
+ "gas_limit" BIGINT NOT NULL,
+ "builder_index" BIGINT NOT NULL,
+ "slot" BIGINT NOT NULL,
+ "value" BIGINT NOT NULL,
+ "el_payment" BIGINT NOT NULL,
+ CONSTRAINT block_bids_pkey PRIMARY KEY (parent_root, parent_hash, block_hash, builder_index)
+);
+
+CREATE INDEX IF NOT EXISTS "block_bids_parent_root_idx" ON "block_bids" ("parent_root" ASC);
+
+CREATE INDEX IF NOT EXISTS "block_bids_builder_index_idx" ON "block_bids" ("builder_index" ASC);
+
+CREATE INDEX IF NOT EXISTS "block_bids_slot_idx" ON "block_bids" ("slot" ASC);
+
+CREATE TABLE IF NOT EXISTS "builders" (
+ "pubkey" BLOB NOT NULL,
+ "builder_index" BIGINT NOT NULL,
+ "version" SMALLINT NOT NULL,
+ "execution_address" BLOB NOT NULL,
+ "deposit_epoch" BIGINT NOT NULL,
+ "withdrawable_epoch" BIGINT NOT NULL,
+ "superseded" BOOLEAN NOT NULL DEFAULT false,
+ PRIMARY KEY (pubkey)
+);
+
+CREATE INDEX IF NOT EXISTS "builders_builder_index_idx" ON "builders" ("builder_index" ASC);
+
+CREATE INDEX IF NOT EXISTS "builders_execution_address_idx" ON "builders" ("execution_address" ASC);
+
+-- +goose StatementEnd
+-- +goose Down
+-- +goose StatementBegin
+SELECT 'NOT SUPPORTED';
+-- +goose StatementEnd
\ No newline at end of file
diff --git a/db/slots.go b/db/slots.go
index fffa8734..5f4884c8 100644
--- a/db/slots.go
+++ b/db/slots.go
@@ -20,30 +20,31 @@ func InsertSlot(slot *dbtypes.Slot, tx *sqlx.Tx) error {
attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count,
proposer_slashing_count, bls_change_count, eth_transaction_count, eth_block_number, eth_block_hash,
eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count, eth_gas_used,
- eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, max_exec_time, exec_times,
- block_uid
- ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25, $26, $27, $28, $29, $30, $31, $32, $33, $34)
+ eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, max_exec_time,
+ exec_times, block_uid, payload_status
+ ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25, $26, $27, $28, $29, $30, $31, $32, $33, $34, $35)
ON CONFLICT (slot, root) DO UPDATE SET
status = excluded.status,
eth_block_extra = excluded.eth_block_extra,
eth_block_extra_text = excluded.eth_block_extra_text,
- fork_id = excluded.fork_id`,
+ fork_id = excluded.fork_id,
+ payload_status = excluded.payload_status`,
dbtypes.DBEngineSqlite: `
INSERT OR REPLACE INTO slots (
slot, proposer, status, root, parent_root, state_root, graffiti, graffiti_text,
attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count,
proposer_slashing_count, bls_change_count, eth_transaction_count, eth_block_number, eth_block_hash,
eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count, eth_gas_used,
- eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, max_exec_time, exec_times,
- block_uid
- ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25, $26, $27, $28, $29, $30, $31, $32, $33, $34)`,
+ eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, max_exec_time,
+ exec_times, block_uid, payload_status
+ ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25, $26, $27, $28, $29, $30, $31, $32, $33, $34, $35)`,
}),
slot.Slot, slot.Proposer, slot.Status, slot.Root, slot.ParentRoot, slot.StateRoot, slot.Graffiti, slot.GraffitiText,
slot.AttestationCount, slot.DepositCount, slot.ExitCount, slot.WithdrawCount, slot.WithdrawAmount, slot.AttesterSlashingCount,
slot.ProposerSlashingCount, slot.BLSChangeCount, slot.EthTransactionCount, slot.EthBlockNumber, slot.EthBlockHash,
slot.EthBlockExtra, slot.EthBlockExtraText, slot.SyncParticipation, slot.ForkId, slot.BlobCount, slot.EthGasUsed,
slot.EthGasLimit, slot.EthBaseFee, slot.EthFeeRecipient, slot.BlockSize, slot.RecvDelay, slot.MinExecTime, slot.MaxExecTime,
- slot.ExecTimes, slot.BlockUid)
+ slot.ExecTimes, slot.BlockUid, slot.PayloadStatus)
if err != nil {
return err
}
@@ -99,8 +100,8 @@ func GetSlotsRange(firstSlot uint64, lastSlot uint64, withMissing bool, withOrph
"attestation_count", "deposit_count", "exit_count", "withdraw_count", "withdraw_amount", "attester_slashing_count",
"proposer_slashing_count", "bls_change_count", "eth_transaction_count", "eth_block_number", "eth_block_hash",
"eth_block_extra", "eth_block_extra_text", "sync_participation", "fork_id", "blob_count", "eth_gas_used",
- "eth_gas_limit", "eth_base_fee", "eth_fee_recipient", "block_size", "recv_delay", "min_exec_time", "max_exec_time", "exec_times",
- "block_uid",
+ "eth_gas_limit", "eth_base_fee", "eth_fee_recipient", "block_size", "recv_delay", "min_exec_time", "max_exec_time",
+ "exec_times", "block_uid", "payload_status",
}
for _, blockField := range blockFields {
fmt.Fprintf(&sql, ", slots.%v AS \"block.%v\"", blockField, blockField)
@@ -133,8 +134,8 @@ func GetSlotsByParentRoot(parentRoot []byte) []*dbtypes.Slot {
attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count,
proposer_slashing_count, bls_change_count, eth_transaction_count, eth_block_number, eth_block_hash,
eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count, eth_gas_used,
- eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, max_exec_time, exec_times,
- block_uid
+ eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, max_exec_time,
+ exec_times, block_uid, payload_status
FROM slots
WHERE parent_root = $1
ORDER BY slot DESC
@@ -154,8 +155,8 @@ func GetSlotByRoot(root []byte) *dbtypes.Slot {
attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count,
proposer_slashing_count, bls_change_count, eth_transaction_count, eth_block_number, eth_block_hash,
eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count, eth_gas_used,
- eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, max_exec_time, exec_times,
- block_uid
+ eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, max_exec_time,
+ exec_times, block_uid, payload_status
FROM slots
WHERE root = $1
`, root)
@@ -182,8 +183,8 @@ func GetSlotsByRoots(roots [][]byte) map[phase0.Root]*dbtypes.Slot {
attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count,
proposer_slashing_count, bls_change_count, eth_transaction_count, eth_block_number, eth_block_hash,
eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count, eth_gas_used,
- eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, max_exec_time, exec_times,
- block_uid
+ eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, max_exec_time,
+ exec_times, block_uid, payload_status
FROM slots
WHERE root IN (%v)
ORDER BY slot DESC`,
@@ -258,8 +259,8 @@ func GetSlotsByBlockHash(blockHash []byte) []*dbtypes.Slot {
attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count,
proposer_slashing_count, bls_change_count, eth_transaction_count, eth_block_number, eth_block_hash,
eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count, eth_gas_used,
- eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, max_exec_time, exec_times,
- block_uid
+ eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, max_exec_time,
+ exec_times, block_uid, payload_status
FROM slots
WHERE eth_block_hash = $1
ORDER BY slot DESC
@@ -320,8 +321,8 @@ func GetFilteredSlots(filter *dbtypes.BlockFilter, firstSlot uint64, offset uint
"attestation_count", "deposit_count", "exit_count", "withdraw_count", "withdraw_amount", "attester_slashing_count",
"proposer_slashing_count", "bls_change_count", "eth_transaction_count", "eth_block_number", "eth_block_hash",
"eth_block_extra", "eth_block_extra_text", "sync_participation", "fork_id", "blob_count", "eth_gas_used",
- "eth_gas_limit", "eth_base_fee", "eth_fee_recipient", "block_size", "recv_delay", "min_exec_time", "max_exec_time", "exec_times",
- "block_uid",
+ "eth_gas_limit", "eth_base_fee", "eth_fee_recipient", "block_size", "recv_delay", "min_exec_time", "max_exec_time",
+ "exec_times", "block_uid", "payload_status",
}
for _, blockField := range blockFields {
fmt.Fprintf(&sql, ", slots.%v AS \"block.%v\"", blockField, blockField)
diff --git a/db/unfinalized_blocks.go b/db/unfinalized_blocks.go
index 4d2e8efe..e816a7a4 100644
--- a/db/unfinalized_blocks.go
+++ b/db/unfinalized_blocks.go
@@ -12,18 +12,16 @@ func InsertUnfinalizedBlock(block *dbtypes.UnfinalizedBlock, tx *sqlx.Tx) error
_, err := tx.Exec(EngineQuery(map[dbtypes.DBEngineType]string{
dbtypes.DBEnginePgsql: `
INSERT INTO unfinalized_blocks (
- root, slot, header_ver, header_ssz, block_ver, block_ssz, status, fork_id, recv_delay, min_exec_time, max_exec_time, exec_times,
- block_uid
- ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13)
+ root, slot, header_ver, header_ssz, block_ver, block_ssz, payload_ver, payload_ssz, status, fork_id, recv_delay, min_exec_time, max_exec_time, exec_times, block_uid
+ ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15)
ON CONFLICT (root) DO NOTHING`,
dbtypes.DBEngineSqlite: `
INSERT OR IGNORE INTO unfinalized_blocks (
- root, slot, header_ver, header_ssz, block_ver, block_ssz, status, fork_id, recv_delay, min_exec_time, max_exec_time, exec_times,
- block_uid
- ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13)`,
+ root, slot, header_ver, header_ssz, block_ver, block_ssz, payload_ver, payload_ssz, status, fork_id, recv_delay, min_exec_time, max_exec_time, exec_times, block_uid
+ ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15)`,
}),
- block.Root, block.Slot, block.HeaderVer, block.HeaderSSZ, block.BlockVer, block.BlockSSZ, block.Status, block.ForkId, block.RecvDelay, block.MinExecTime, block.MaxExecTime,
- block.ExecTimes, block.BlockUid,
+ block.Root, block.Slot, block.HeaderVer, block.HeaderSSZ, block.BlockVer, block.BlockSSZ, block.PayloadVer, block.PayloadSSZ, block.Status, block.ForkId, block.RecvDelay,
+ block.MinExecTime, block.MaxExecTime, block.ExecTimes, block.BlockUid,
)
if err != nil {
return err
@@ -81,6 +79,14 @@ func UpdateUnfinalizedBlockForkId(roots [][]byte, forkId uint64, tx *sqlx.Tx) er
return nil
}
+func UpdateUnfinalizedBlockPayload(root []byte, payloadVer uint64, payloadSSZ []byte, tx *sqlx.Tx) error {
+ _, err := tx.Exec(`UPDATE unfinalized_blocks SET payload_ver = $1, payload_ssz = $2 WHERE root = $3`, payloadVer, payloadSSZ, root)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
func UpdateUnfinalizedBlockExecutionTimes(root []byte, minExecTime uint32, maxExecTime uint32, execTimes []byte, tx *sqlx.Tx) error {
_, err := tx.Exec(`UPDATE unfinalized_blocks SET min_exec_time = $1, max_exec_time = $2, exec_times = $3 WHERE root = $4`, minExecTime, maxExecTime, execTimes, root)
if err != nil {
@@ -132,7 +138,7 @@ func StreamUnfinalizedBlocks(slot uint64, cb func(block *dbtypes.UnfinalizedBloc
var sql strings.Builder
args := []any{slot}
- fmt.Fprint(&sql, `SELECT root, slot, header_ver, header_ssz, block_ver, block_ssz, status, fork_id, recv_delay, min_exec_time, max_exec_time, exec_times, block_uid FROM unfinalized_blocks WHERE slot >= $1`)
+ fmt.Fprint(&sql, `SELECT root, slot, header_ver, header_ssz, block_ver, block_ssz, payload_ver, payload_ssz, status, fork_id, recv_delay, min_exec_time, max_exec_time, exec_times, block_uid FROM unfinalized_blocks WHERE slot >= $1`)
rows, err := ReaderDb.Query(sql.String(), args...)
if err != nil {
@@ -143,7 +149,7 @@ func StreamUnfinalizedBlocks(slot uint64, cb func(block *dbtypes.UnfinalizedBloc
for rows.Next() {
block := dbtypes.UnfinalizedBlock{}
err := rows.Scan(
- &block.Root, &block.Slot, &block.HeaderVer, &block.HeaderSSZ, &block.BlockVer, &block.BlockSSZ, &block.Status, &block.ForkId, &block.RecvDelay,
+ &block.Root, &block.Slot, &block.HeaderVer, &block.HeaderSSZ, &block.BlockVer, &block.BlockSSZ, &block.PayloadVer, &block.PayloadSSZ, &block.Status, &block.ForkId, &block.RecvDelay,
&block.MinExecTime, &block.MaxExecTime, &block.ExecTimes, &block.BlockUid,
)
if err != nil {
@@ -156,13 +162,28 @@ func StreamUnfinalizedBlocks(slot uint64, cb func(block *dbtypes.UnfinalizedBloc
return nil
}
-func GetUnfinalizedBlock(root []byte) *dbtypes.UnfinalizedBlock {
+func GetUnfinalizedBlock(root []byte, withHeader bool, withBody bool, withPayload bool) *dbtypes.UnfinalizedBlock {
+ var sql strings.Builder
+ fmt.Fprint(&sql, `SELECT root, slot`)
+
+ if withHeader {
+ fmt.Fprint(&sql, `, header_ver, header_ssz`)
+ }
+
+ if withBody {
+ fmt.Fprint(&sql, `, block_ver, block_ssz`)
+ }
+
+ if withPayload {
+ fmt.Fprint(&sql, `, payload_ver, payload_ssz`)
+ }
+
+ fmt.Fprint(&sql, `, status, fork_id, recv_delay, min_exec_time, max_exec_time, exec_times, block_uid`)
+
+ fmt.Fprint(&sql, `FROM unfinalized_blocks WHERE root = $1`)
+
block := dbtypes.UnfinalizedBlock{}
- err := ReaderDb.Get(&block, `
- SELECT root, slot, header_ver, header_ssz, block_ver, block_ssz, status, fork_id, recv_delay, min_exec_time, max_exec_time, exec_times, block_uid
- FROM unfinalized_blocks
- WHERE root = $1
- `, root)
+ err := ReaderDb.Get(&block, sql.String(), root)
if err != nil {
logger.Errorf("Error while fetching unfinalized block 0x%x: %v", root, err)
return nil
diff --git a/db/unfinalized_epochs.go b/db/unfinalized_epochs.go
index 27469196..3150f551 100644
--- a/db/unfinalized_epochs.go
+++ b/db/unfinalized_epochs.go
@@ -12,8 +12,8 @@ func InsertUnfinalizedEpoch(epoch *dbtypes.UnfinalizedEpoch, tx *sqlx.Tx) error
epoch, dependent_root, epoch_head_root, epoch_head_fork_id, validator_count, validator_balance, eligible, voted_target,
voted_head, voted_total, block_count, orphaned_count, attestation_count, deposit_count, exit_count, withdraw_count,
withdraw_amount, attester_slashing_count, proposer_slashing_count, bls_change_count, eth_transaction_count, sync_participation,
- blob_count, eth_gas_used, eth_gas_limit
- ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25)
+ blob_count, eth_gas_used, eth_gas_limit, payload_count
+ ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25, $26)
ON CONFLICT (epoch, dependent_root, epoch_head_root) DO UPDATE SET
epoch_head_fork_id = excluded.epoch_head_fork_id,
validator_count = excluded.validator_count,
@@ -36,19 +36,20 @@ func InsertUnfinalizedEpoch(epoch *dbtypes.UnfinalizedEpoch, tx *sqlx.Tx) error
sync_participation = excluded.sync_participation,
blob_count = excluded.blob_count,
eth_gas_used = excluded.eth_gas_used,
- eth_gas_limit = excluded.eth_gas_limit`,
+ eth_gas_limit = excluded.eth_gas_limit,
+ payload_count = excluded.payload_count`,
dbtypes.DBEngineSqlite: `
INSERT OR REPLACE INTO unfinalized_epochs (
epoch, dependent_root, epoch_head_root, epoch_head_fork_id, validator_count, validator_balance, eligible, voted_target,
voted_head, voted_total, block_count, orphaned_count, attestation_count, deposit_count, exit_count, withdraw_count,
withdraw_amount, attester_slashing_count, proposer_slashing_count, bls_change_count, eth_transaction_count, sync_participation,
- blob_count, eth_gas_used, eth_gas_limit
- ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25)`,
+ blob_count, eth_gas_used, eth_gas_limit, payload_count
+ ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25, $26)`,
}),
epoch.Epoch, epoch.DependentRoot, epoch.EpochHeadRoot, epoch.EpochHeadForkId, epoch.ValidatorCount, epoch.ValidatorBalance, epoch.Eligible, epoch.VotedTarget,
epoch.VotedHead, epoch.VotedTotal, epoch.BlockCount, epoch.OrphanedCount, epoch.AttestationCount, epoch.DepositCount, epoch.ExitCount, epoch.WithdrawCount,
epoch.WithdrawAmount, epoch.AttesterSlashingCount, epoch.ProposerSlashingCount, epoch.BLSChangeCount, epoch.EthTransactionCount, epoch.SyncParticipation,
- epoch.BlobCount, epoch.EthGasUsed, epoch.EthGasLimit,
+ epoch.BlobCount, epoch.EthGasUsed, epoch.EthGasLimit, epoch.PayloadCount,
)
if err != nil {
return err
@@ -62,7 +63,7 @@ func StreamUnfinalizedEpochs(epoch uint64, cb func(duty *dbtypes.UnfinalizedEpoc
epoch, dependent_root, epoch_head_root, epoch_head_fork_id, validator_count, validator_balance, eligible, voted_target,
voted_head, voted_total, block_count, orphaned_count, attestation_count, deposit_count, exit_count, withdraw_count,
withdraw_amount, attester_slashing_count, proposer_slashing_count, bls_change_count, eth_transaction_count, sync_participation,
- blob_count, eth_gas_used, eth_gas_limit
+ blob_count, eth_gas_used, eth_gas_limit, payload_count
FROM unfinalized_epochs
WHERE epoch >= $1`, epoch)
if err != nil {
@@ -76,7 +77,7 @@ func StreamUnfinalizedEpochs(epoch uint64, cb func(duty *dbtypes.UnfinalizedEpoc
&e.Epoch, &e.DependentRoot, &e.EpochHeadRoot, &e.EpochHeadForkId, &e.ValidatorCount, &e.ValidatorBalance, &e.Eligible, &e.VotedTarget,
&e.VotedHead, &e.VotedTotal, &e.BlockCount, &e.OrphanedCount, &e.AttestationCount, &e.DepositCount, &e.ExitCount, &e.WithdrawCount,
&e.WithdrawAmount, &e.AttesterSlashingCount, &e.ProposerSlashingCount, &e.BLSChangeCount, &e.EthTransactionCount, &e.SyncParticipation,
- &e.BlobCount, &e.EthGasUsed, &e.EthGasLimit,
+ &e.BlobCount, &e.EthGasUsed, &e.EthGasLimit, &e.PayloadCount,
)
if err != nil {
logger.Errorf("Error while scanning unfinalized epoch: %v", err)
@@ -95,7 +96,7 @@ func GetUnfinalizedEpoch(epoch uint64, headRoot []byte) *dbtypes.UnfinalizedEpoc
epoch, dependent_root, epoch_head_root, epoch_head_fork_id, validator_count, validator_balance, eligible, voted_target,
voted_head, voted_total, block_count, orphaned_count, attestation_count, deposit_count, exit_count, withdraw_count,
withdraw_amount, attester_slashing_count, proposer_slashing_count, bls_change_count, eth_transaction_count, sync_participation,
- blob_count, eth_gas_used, eth_gas_limit
+ blob_count, eth_gas_used, eth_gas_limit, payload_count
FROM unfinalized_epochs
WHERE epoch = $1 AND epoch_head_root = $2
`, epoch, headRoot)
diff --git a/dbtypes/dbtypes.go b/dbtypes/dbtypes.go
index 38a4b1d4..74b7daea 100644
--- a/dbtypes/dbtypes.go
+++ b/dbtypes/dbtypes.go
@@ -18,6 +18,14 @@ const (
Orphaned
)
+type PayloadStatus uint8
+
+const (
+ PayloadStatusMissing PayloadStatus = iota
+ PayloadStatusCanonical
+ PayloadStatusOrphaned
+)
+
type SlotHeader struct {
Slot uint64 `db:"slot"`
Proposer uint64 `db:"proposer"`
@@ -25,40 +33,41 @@ type SlotHeader struct {
}
type Slot struct {
- Slot uint64 `db:"slot"`
- Proposer uint64 `db:"proposer"`
- Status SlotStatus `db:"status"`
- Root []byte `db:"root"`
- ParentRoot []byte `db:"parent_root"`
- StateRoot []byte `db:"state_root"`
- Graffiti []byte `db:"graffiti"`
- GraffitiText string `db:"graffiti_text"`
- AttestationCount uint64 `db:"attestation_count"`
- DepositCount uint64 `db:"deposit_count"`
- ExitCount uint64 `db:"exit_count"`
- WithdrawCount uint64 `db:"withdraw_count"`
- WithdrawAmount uint64 `db:"withdraw_amount"`
- AttesterSlashingCount uint64 `db:"attester_slashing_count"`
- ProposerSlashingCount uint64 `db:"proposer_slashing_count"`
- BLSChangeCount uint64 `db:"bls_change_count"`
- EthTransactionCount uint64 `db:"eth_transaction_count"`
- BlobCount uint64 `db:"blob_count"`
- EthGasUsed uint64 `db:"eth_gas_used"`
- EthGasLimit uint64 `db:"eth_gas_limit"`
- EthBaseFee uint64 `db:"eth_base_fee"`
- EthFeeRecipient []byte `db:"eth_fee_recipient"`
- EthBlockNumber *uint64 `db:"eth_block_number"`
- EthBlockHash []byte `db:"eth_block_hash"`
- EthBlockExtra []byte `db:"eth_block_extra"`
- EthBlockExtraText string `db:"eth_block_extra_text"`
- SyncParticipation float32 `db:"sync_participation"`
- ForkId uint64 `db:"fork_id"`
- BlockSize uint64 `db:"block_size"`
- RecvDelay int32 `db:"recv_delay"`
- MinExecTime uint32 `db:"min_exec_time"`
- MaxExecTime uint32 `db:"max_exec_time"`
- ExecTimes []byte `db:"exec_times"`
- BlockUid uint64 `db:"block_uid"`
+ Slot uint64 `db:"slot"`
+ Proposer uint64 `db:"proposer"`
+ Status SlotStatus `db:"status"`
+ Root []byte `db:"root"`
+ ParentRoot []byte `db:"parent_root"`
+ StateRoot []byte `db:"state_root"`
+ Graffiti []byte `db:"graffiti"`
+ GraffitiText string `db:"graffiti_text"`
+ AttestationCount uint64 `db:"attestation_count"`
+ DepositCount uint64 `db:"deposit_count"`
+ ExitCount uint64 `db:"exit_count"`
+ WithdrawCount uint64 `db:"withdraw_count"`
+ WithdrawAmount uint64 `db:"withdraw_amount"`
+ AttesterSlashingCount uint64 `db:"attester_slashing_count"`
+ ProposerSlashingCount uint64 `db:"proposer_slashing_count"`
+ BLSChangeCount uint64 `db:"bls_change_count"`
+ EthTransactionCount uint64 `db:"eth_transaction_count"`
+ BlobCount uint64 `db:"blob_count"`
+ EthGasUsed uint64 `db:"eth_gas_used"`
+ EthGasLimit uint64 `db:"eth_gas_limit"`
+ EthBaseFee uint64 `db:"eth_base_fee"`
+ EthFeeRecipient []byte `db:"eth_fee_recipient"`
+ EthBlockNumber *uint64 `db:"eth_block_number"`
+ EthBlockHash []byte `db:"eth_block_hash"`
+ EthBlockExtra []byte `db:"eth_block_extra"`
+ EthBlockExtraText string `db:"eth_block_extra_text"`
+ SyncParticipation float32 `db:"sync_participation"`
+ ForkId uint64 `db:"fork_id"`
+ BlockSize uint64 `db:"block_size"`
+ RecvDelay int32 `db:"recv_delay"`
+ MinExecTime uint32 `db:"min_exec_time"`
+ MaxExecTime uint32 `db:"max_exec_time"`
+ ExecTimes []byte `db:"exec_times"`
+ PayloadStatus PayloadStatus `db:"payload_status"`
+ BlockUid uint64 `db:"block_uid"`
}
type Epoch struct {
@@ -84,15 +93,18 @@ type Epoch struct {
EthGasUsed uint64 `db:"eth_gas_used"`
EthGasLimit uint64 `db:"eth_gas_limit"`
SyncParticipation float32 `db:"sync_participation"`
+ PayloadCount uint64 `db:"payload_count"`
}
type OrphanedBlock struct {
- Root []byte `db:"root"`
- HeaderVer uint64 `db:"header_ver"`
- HeaderSSZ []byte `db:"header_ssz"`
- BlockVer uint64 `db:"block_ver"`
- BlockSSZ []byte `db:"block_ssz"`
- BlockUid uint64 `db:"block_uid"`
+ Root []byte `db:"root"`
+ HeaderVer uint64 `db:"header_ver"`
+ HeaderSSZ []byte `db:"header_ssz"`
+ BlockVer uint64 `db:"block_ver"`
+ BlockSSZ []byte `db:"block_ssz"`
+ PayloadVer uint64 `db:"payload_ver"`
+ PayloadSSZ []byte `db:"payload_ssz"`
+ BlockUid uint64 `db:"block_uid"`
}
type SlotAssignment struct {
@@ -121,6 +133,8 @@ type UnfinalizedBlock struct {
HeaderSSZ []byte `db:"header_ssz"`
BlockVer uint64 `db:"block_ver"`
BlockSSZ []byte `db:"block_ssz"`
+ PayloadVer uint64 `db:"payload_ver"`
+ PayloadSSZ []byte `db:"payload_ssz"`
Status UnfinalizedBlockStatus `db:"status"`
ForkId uint64 `db:"fork_id"`
RecvDelay int32 `db:"recv_delay"`
@@ -156,6 +170,7 @@ type UnfinalizedEpoch struct {
EthGasUsed uint64 `db:"eth_gas_used"`
EthGasLimit uint64 `db:"eth_gas_limit"`
SyncParticipation float32 `db:"sync_participation"`
+ PayloadCount uint64 `db:"payload_count"`
}
type OrphanedEpoch struct {
@@ -530,6 +545,30 @@ type ElTokenTransfer struct {
AmountRaw []byte `db:"amount_raw"`
}
+// ePBS types
+
+type BlockBid struct {
+ ParentRoot []byte `db:"parent_root"`
+ ParentHash []byte `db:"parent_hash"`
+ BlockHash []byte `db:"block_hash"`
+ FeeRecipient []byte `db:"fee_recipient"`
+ GasLimit uint64 `db:"gas_limit"`
+ BuilderIndex uint64 `db:"builder_index"`
+ Slot uint64 `db:"slot"`
+ Value uint64 `db:"value"`
+ ElPayment uint64 `db:"el_payment"`
+}
+
+type Builder struct {
+ Pubkey []byte `db:"pubkey"`
+ BuilderIndex uint64 `db:"builder_index"`
+ Version uint8 `db:"version"`
+ ExecutionAddress []byte `db:"execution_address"`
+ DepositEpoch int64 `db:"deposit_epoch"`
+ WithdrawableEpoch int64 `db:"withdrawable_epoch"`
+ Superseded bool `db:"superseded"`
+}
+
// Withdrawal types
const (
WithdrawalTypeBeaconWithdrawal = 0
diff --git a/dbtypes/other.go b/dbtypes/other.go
index d3755ec5..8871329f 100644
--- a/dbtypes/other.go
+++ b/dbtypes/other.go
@@ -211,6 +211,43 @@ type ValidatorFilter struct {
Offset uint64
}
+// Builder filter types
+
+type BuilderOrder uint8
+
+const (
+ BuilderOrderIndexAsc BuilderOrder = iota
+ BuilderOrderIndexDesc
+ BuilderOrderPubKeyAsc
+ BuilderOrderPubKeyDesc
+ BuilderOrderBalanceAsc
+ BuilderOrderBalanceDesc
+ BuilderOrderDepositEpochAsc
+ BuilderOrderDepositEpochDesc
+ BuilderOrderWithdrawableEpochAsc
+ BuilderOrderWithdrawableEpochDesc
+)
+
+type BuilderStatus uint8
+
+const (
+ BuilderStatusActiveFilter BuilderStatus = iota
+ BuilderStatusExitedFilter
+ BuilderStatusSupersededFilter
+)
+
+type BuilderFilter struct {
+ MinIndex *uint64
+ MaxIndex *uint64
+ PubKey []byte
+ ExecutionAddress []byte
+ Status []BuilderStatus
+
+ OrderBy BuilderOrder
+ Limit uint64
+ Offset uint64
+}
+
// EL Explorer filters
type ElTransactionFilter struct {
diff --git a/go.mod b/go.mod
index 8a847bef..70dc2edc 100644
--- a/go.mod
+++ b/go.mod
@@ -259,3 +259,5 @@ require (
modernc.org/memory v1.11.0 // indirect
modernc.org/sqlite v1.38.2 // indirect
)
+
+replace github.com/attestantio/go-eth2-client => github.com/pk910/go-eth2-client v0.0.0-20260109010443-3742e71092e1
diff --git a/go.sum b/go.sum
index b5ea713f..be1ae350 100644
--- a/go.sum
+++ b/go.sum
@@ -36,8 +36,6 @@ github.com/VictoriaMetrics/fastcache v1.13.0/go.mod h1:hHXhl4DA2fTL2HTZDJFXWgW0L
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
-github.com/attestantio/go-eth2-client v0.28.0 h1:2zIIIMPvSD+g6h3TgVXsoda/Yw3e+wjo1e8CZEanORU=
-github.com/attestantio/go-eth2-client v0.28.0/go.mod h1:PO9sHFCq+1RiG+Eh3eOR2GYvYV64Qzg7idM3kLgCs5k=
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@@ -568,6 +566,8 @@ github.com/pion/webrtc/v4 v4.1.4 h1:/gK1ACGHXQmtyVVbJFQDxNoODg4eSRiFLB7t9r9pg8M=
github.com/pion/webrtc/v4 v4.1.4/go.mod h1:Oab9npu1iZtQRMic3K3toYq5zFPvToe/QBw7dMI2ok4=
github.com/pk910/dynamic-ssz v1.2.0 h1:25Kb7CQKKkh8r8mlj/exNJY5vSoe3wGm/8QpsfayRtM=
github.com/pk910/dynamic-ssz v1.2.0/go.mod h1:HXRWLNcgj3DL65Kznrb+RdL3DEKw2JBZ/6crooqGoII=
+github.com/pk910/go-eth2-client v0.0.0-20260109010443-3742e71092e1 h1:Obn5KbqFo+T0Sr8fRGapWbKU5c61twez6ei5LQqA0gE=
+github.com/pk910/go-eth2-client v0.0.0-20260109010443-3742e71092e1/go.mod h1:mKrNtB6iRgMN+gLXwvko19uSdivFVX4/mNYUlrCLwNQ=
github.com/pk910/hashtree-bindings v0.0.1 h1:Sw+UlPlrBle4LUg04kqLFybVQcfmamwKL1QsrR3GU0g=
github.com/pk910/hashtree-bindings v0.0.1/go.mod h1:eayIpxMFkWzMsydESu/5bV8wglZzSE/c9mq6DQdn204=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
diff --git a/handlers/api/network_forks_v1.go b/handlers/api/network_forks_v1.go
index 7a2c8229..0ddfcce3 100644
--- a/handlers/api/network_forks_v1.go
+++ b/handlers/api/network_forks_v1.go
@@ -112,7 +112,8 @@ func buildNetworkForks(chainState *consensus.ChainState) []*APINetworkForkInfo {
// Helper function to add consensus fork
addConsensusFork := func(name string, forkEpoch *uint64, forkVersion phase0.Version) {
if forkEpoch != nil && *forkEpoch < uint64(18446744073709551615) {
- forkDigest := chainState.GetForkDigest(forkVersion, nil)
+ blobParams := chainState.GetBlobScheduleForEpoch(phase0.Epoch(*forkEpoch))
+ forkDigest := chainState.GetForkDigest(forkVersion, blobParams)
version := fmt.Sprintf("0x%x", forkVersion)
epoch := *forkEpoch
forks = append(forks, &APINetworkForkInfo{
@@ -135,6 +136,7 @@ func buildNetworkForks(chainState *consensus.ChainState) []*APINetworkForkInfo {
addConsensusFork("Deneb", specs.DenebForkEpoch, specs.DenebForkVersion)
addConsensusFork("Electra", specs.ElectraForkEpoch, specs.ElectraForkVersion)
addConsensusFork("Fulu", specs.FuluForkEpoch, specs.FuluForkVersion)
+ addConsensusFork("Gloas", specs.GloasForkEpoch, specs.GloasForkVersion)
// Add BPO forks from BLOB_SCHEDULE
for i, blobSchedule := range specs.BlobSchedule {
diff --git a/handlers/builders.go b/handlers/builders.go
new file mode 100644
index 00000000..ad1d9007
--- /dev/null
+++ b/handlers/builders.go
@@ -0,0 +1,294 @@
+package handlers
+
+import (
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/ethpandaops/dora/dbtypes"
+ "github.com/ethpandaops/dora/services"
+ "github.com/ethpandaops/dora/templates"
+ "github.com/ethpandaops/dora/types/models"
+ "github.com/sirupsen/logrus"
+)
+
+// Builders will return the main "builders" page using a go template
+func Builders(w http.ResponseWriter, r *http.Request) {
+ var buildersTemplateFiles = append(layoutTemplateFiles,
+ "builders/builders.html",
+ "_svg/professor.html",
+ )
+
+ var pageTemplate = templates.GetTemplate(buildersTemplateFiles...)
+ data := InitPageData(w, r, "builders", "/builders", "Builders", buildersTemplateFiles)
+
+ urlArgs := r.URL.Query()
+ var pageNumber uint64 = 1
+ if urlArgs.Has("p") {
+ pageNumber, _ = strconv.ParseUint(urlArgs.Get("p"), 10, 64)
+ }
+ var pageSize uint64 = 50
+ if urlArgs.Has("c") {
+ pageSize, _ = strconv.ParseUint(urlArgs.Get("c"), 10, 64)
+ }
+ if urlArgs.Has("json") && pageSize > 10000 {
+ pageSize = 10000
+ } else if !urlArgs.Has("json") && pageSize > 1000 {
+ pageSize = 1000
+ }
+
+ var filterPubKey string
+ var filterIndex string
+ var filterExecutionAddr string
+ var filterStatus string
+ if urlArgs.Has("f") {
+ if urlArgs.Has("f.pubkey") {
+ filterPubKey = urlArgs.Get("f.pubkey")
+ }
+ if urlArgs.Has("f.index") {
+ filterIndex = urlArgs.Get("f.index")
+ }
+ if urlArgs.Has("f.execution_addr") {
+ filterExecutionAddr = urlArgs.Get("f.execution_addr")
+ }
+ if urlArgs.Has("f.status") {
+ filterStatus = strings.Join(urlArgs["f.status"], ",")
+ }
+ }
+ var sortOrder string
+ if urlArgs.Has("o") {
+ sortOrder = urlArgs.Get("o")
+ }
+
+ var pageError error
+ pageError = services.GlobalCallRateLimiter.CheckCallLimit(r, 1)
+ if pageError == nil {
+ data.Data, pageError = getBuildersPageData(pageNumber, pageSize, sortOrder, filterPubKey, filterIndex, filterExecutionAddr, filterStatus)
+ }
+ if pageError != nil {
+ handlePageError(w, r, pageError)
+ return
+ }
+
+ if urlArgs.Has("json") {
+ w.Header().Set("Content-Type", "application/json")
+ err := json.NewEncoder(w).Encode(data.Data)
+ if err != nil {
+ logrus.WithError(err).Error("error encoding builders data")
+ http.Error(w, "Internal server error", http.StatusServiceUnavailable)
+ }
+ return
+ }
+
+ w.Header().Set("Content-Type", "text/html")
+ if handleTemplateError(w, r, "builders.go", "Builders", "", pageTemplate.ExecuteTemplate(w, "layout", data)) != nil {
+ return // an error has occurred and was processed
+ }
+}
+
+func getBuildersPageData(pageNumber uint64, pageSize uint64, sortOrder string, filterPubKey string, filterIndex string, filterExecutionAddr string, filterStatus string) (*models.BuildersPageData, error) {
+ pageData := &models.BuildersPageData{}
+ pageCacheKey := fmt.Sprintf("builders:%v:%v:%v:%v:%v:%v:%v", pageNumber, pageSize, sortOrder, filterPubKey, filterIndex, filterExecutionAddr, filterStatus)
+ pageRes, pageErr := services.GlobalFrontendCache.ProcessCachedPage(pageCacheKey, true, pageData, func(pageCall *services.FrontendCacheProcessingPage) interface{} {
+ pageData, cacheTimeout := buildBuildersPageData(pageNumber, pageSize, sortOrder, filterPubKey, filterIndex, filterExecutionAddr, filterStatus)
+ pageCall.CacheTimeout = cacheTimeout
+ return pageData
+ })
+ if pageErr == nil && pageRes != nil {
+ resData, resOk := pageRes.(*models.BuildersPageData)
+ if !resOk {
+ return nil, ErrInvalidPageModel
+ }
+ pageData = resData
+ }
+ return pageData, pageErr
+}
+
+func buildBuildersPageData(pageNumber uint64, pageSize uint64, sortOrder string, filterPubKey string, filterIndex string, filterExecutionAddr string, filterStatus string) (*models.BuildersPageData, time.Duration) {
+ logrus.Debugf("builders page called: %v:%v:%v:%v:%v:%v:%v", pageNumber, pageSize, sortOrder, filterPubKey, filterIndex, filterExecutionAddr, filterStatus)
+ pageData := &models.BuildersPageData{}
+ cacheTime := 10 * time.Minute
+
+ chainState := services.GlobalBeaconService.GetChainState()
+
+ builderFilter := dbtypes.BuilderFilter{
+ Limit: pageSize,
+ Offset: (pageNumber - 1) * pageSize,
+ }
+
+ filterArgs := url.Values{}
+ if filterPubKey != "" || filterIndex != "" || filterExecutionAddr != "" || filterStatus != "" {
+ if filterPubKey != "" {
+ pageData.FilterPubKey = filterPubKey
+ filterArgs.Add("f.pubkey", filterPubKey)
+ filterPubKeyVal, _ := hex.DecodeString(strings.Replace(filterPubKey, "0x", "", -1))
+ builderFilter.PubKey = filterPubKeyVal
+ }
+ if filterIndex != "" {
+ pageData.FilterIndex = filterIndex
+ filterArgs.Add("f.index", filterIndex)
+ filterIndexVal, _ := strconv.ParseUint(filterIndex, 10, 64)
+ builderFilter.MinIndex = &filterIndexVal
+ builderFilter.MaxIndex = &filterIndexVal
+ }
+ if filterExecutionAddr != "" {
+ pageData.FilterExecutionAddr = filterExecutionAddr
+ filterArgs.Add("f.execution_addr", filterExecutionAddr)
+ filterExecutionAddrVal, _ := hex.DecodeString(strings.Replace(filterExecutionAddr, "0x", "", -1))
+ builderFilter.ExecutionAddress = filterExecutionAddrVal
+ }
+ if filterStatus != "" {
+ pageData.FilterStatus = filterStatus
+ filterArgs.Add("f.status", filterStatus)
+ filterStatusVal := strings.Split(filterStatus, ",")
+ builderFilter.Status = make([]dbtypes.BuilderStatus, 0, len(filterStatusVal))
+ for _, status := range filterStatusVal {
+ switch status {
+ case "active":
+ builderFilter.Status = append(builderFilter.Status, dbtypes.BuilderStatusActiveFilter)
+ case "exited":
+ builderFilter.Status = append(builderFilter.Status, dbtypes.BuilderStatusExitedFilter)
+ case "superseded":
+ builderFilter.Status = append(builderFilter.Status, dbtypes.BuilderStatusSupersededFilter)
+ }
+ }
+ }
+ }
+
+ // apply sort order
+ switch sortOrder {
+ case "index-d":
+ builderFilter.OrderBy = dbtypes.BuilderOrderIndexDesc
+ case "pubkey":
+ builderFilter.OrderBy = dbtypes.BuilderOrderPubKeyAsc
+ case "pubkey-d":
+ builderFilter.OrderBy = dbtypes.BuilderOrderPubKeyDesc
+ case "balance":
+ builderFilter.OrderBy = dbtypes.BuilderOrderBalanceAsc
+ case "balance-d":
+ builderFilter.OrderBy = dbtypes.BuilderOrderBalanceDesc
+ case "deposit":
+ builderFilter.OrderBy = dbtypes.BuilderOrderDepositEpochAsc
+ case "deposit-d":
+ builderFilter.OrderBy = dbtypes.BuilderOrderDepositEpochDesc
+ case "withdrawable":
+ builderFilter.OrderBy = dbtypes.BuilderOrderWithdrawableEpochAsc
+ case "withdrawable-d":
+ builderFilter.OrderBy = dbtypes.BuilderOrderWithdrawableEpochDesc
+ default:
+ builderFilter.OrderBy = dbtypes.BuilderOrderIndexAsc
+ pageData.IsDefaultSorting = true
+ sortOrder = "index"
+ }
+ pageData.Sorting = sortOrder
+
+ // get latest builder set
+ builderSetRsp, builderSetLen := services.GlobalBeaconService.GetFilteredBuilderSet(&builderFilter, true)
+ if len(builderSetRsp) == 0 {
+ cacheTime = 5 * time.Minute
+ }
+
+ currentEpoch := chainState.CurrentEpoch()
+
+ // get status options
+ pageData.FilterStatusOpts = []models.BuildersPageDataStatusOption{
+ {Status: "active", Count: 0},
+ {Status: "exited", Count: 0},
+ {Status: "superseded", Count: 0},
+ }
+
+ totalPages := builderSetLen / pageSize
+ if (builderSetLen % pageSize) > 0 {
+ totalPages++
+ }
+ if pageNumber == 0 {
+ pageData.IsDefaultPage = true
+ } else if pageNumber >= totalPages {
+ if totalPages == 0 {
+ pageNumber = 0
+ } else {
+ pageNumber = totalPages
+ }
+ }
+
+ pageData.PageSize = pageSize
+ pageData.TotalPages = totalPages
+ pageData.CurrentPageIndex = pageNumber
+ if pageNumber > 1 {
+ pageData.PrevPageIndex = pageNumber - 1
+ }
+ if pageNumber < totalPages {
+ pageData.NextPageIndex = pageNumber + 1
+ }
+ if totalPages > 1 {
+ pageData.LastPageIndex = totalPages
+ }
+
+ // get builders
+ pageData.Builders = make([]*models.BuildersPageDataBuilder, 0, len(builderSetRsp))
+
+ for _, builder := range builderSetRsp {
+ if builder.Builder == nil {
+ continue
+ }
+
+ builderData := &models.BuildersPageDataBuilder{
+ Index: uint64(builder.Index),
+ PublicKey: builder.Builder.PublicKey[:],
+ ExecutionAddress: builder.Builder.ExecutionAddress[:],
+ Balance: uint64(builder.Builder.Balance),
+ }
+
+ // Determine state
+ if builder.Superseded {
+ builderData.State = "Superseded"
+ } else if builder.Builder.WithdrawableEpoch <= currentEpoch {
+ builderData.State = "Exited"
+ } else {
+ builderData.State = "Active"
+ }
+
+ // Deposit epoch
+ if builder.Builder.DepositEpoch < 18446744073709551615 {
+ builderData.ShowDeposit = true
+ builderData.DepositEpoch = uint64(builder.Builder.DepositEpoch)
+ builderData.DepositTs = chainState.EpochToTime(builder.Builder.DepositEpoch)
+ }
+
+ // Withdrawable epoch
+ if builder.Builder.WithdrawableEpoch < 18446744073709551615 {
+ builderData.ShowWithdrawable = true
+ builderData.WithdrawableEpoch = uint64(builder.Builder.WithdrawableEpoch)
+ builderData.WithdrawableTs = chainState.EpochToTime(builder.Builder.WithdrawableEpoch)
+ }
+
+ pageData.Builders = append(pageData.Builders, builderData)
+ }
+ pageData.BuilderCount = builderSetLen
+ pageData.FirstBuilder = pageNumber * pageSize
+ pageData.LastBuilder = pageData.FirstBuilder + uint64(len(pageData.Builders))
+
+ // Populate UrlParams for page jump functionality
+ pageData.UrlParams = make(map[string]string)
+ for key, values := range filterArgs {
+ if len(values) > 0 {
+ pageData.UrlParams[key] = values[0]
+ }
+ }
+ pageData.UrlParams["c"] = fmt.Sprintf("%v", pageData.PageSize)
+
+ pageData.FilteredPageLink = fmt.Sprintf("/builders?f&%v&c=%v", filterArgs.Encode(), pageData.PageSize)
+
+ // Sort status options alphabetically
+ sort.Slice(pageData.FilterStatusOpts, func(a, b int) bool {
+ return strings.Compare(pageData.FilterStatusOpts[a].Status, pageData.FilterStatusOpts[b].Status) < 0
+ })
+
+ return pageData, cacheTime
+}
diff --git a/handlers/epoch.go b/handlers/epoch.go
index 21a34ff5..74145792 100644
--- a/handlers/epoch.go
+++ b/handlers/epoch.go
@@ -169,12 +169,18 @@ func buildEpochPageData(epoch uint64) (*models.EpochPageData, time.Duration) {
pageData.MissedCount++
}
+ payloadStatus := dbSlot.PayloadStatus
+ if !chainState.IsEip7732Enabled(phase0.Epoch(epoch)) {
+ payloadStatus = dbtypes.PayloadStatusCanonical
+ }
+
slotData := &models.EpochPageDataSlot{
Slot: slot,
Epoch: uint64(chainState.EpochOfSlot(phase0.Slot(slot))),
Ts: chainState.SlotToTime(phase0.Slot(slot)),
Scheduled: slot >= uint64(currentSlot) && dbSlot.Status == dbtypes.Missing,
Status: uint8(dbSlot.Status),
+ PayloadStatus: uint8(payloadStatus),
Proposer: dbSlot.Proposer,
ProposerName: services.GlobalBeaconService.GetValidatorName(dbSlot.Proposer),
AttestationCount: dbSlot.AttestationCount,
diff --git a/handlers/index.go b/handlers/index.go
index 60cb29bd..8c3f09b0 100644
--- a/handlers/index.go
+++ b/handlers/index.go
@@ -289,6 +289,19 @@ func buildIndexPageData() (*models.IndexPageData, time.Duration) {
ForkDigest: forkDigest[:],
})
}
+ if specs.GloasForkEpoch != nil && *specs.GloasForkEpoch < uint64(18446744073709551615) {
+ blobParams := chainState.GetBlobScheduleForEpoch(phase0.Epoch(*specs.GloasForkEpoch))
+ forkDigest := chainState.GetForkDigest(specs.GloasForkVersion, blobParams)
+ pageData.NetworkForks = append(pageData.NetworkForks, &models.IndexPageDataForks{
+ Name: "Gloas",
+ Epoch: *specs.GloasForkEpoch,
+ Version: specs.GloasForkVersion[:],
+ Time: uint64(chainState.EpochToTime(phase0.Epoch(*specs.GloasForkEpoch)).Unix()),
+ Active: uint64(currentEpoch) >= *specs.GloasForkEpoch,
+ Type: "consensus",
+ ForkDigest: forkDigest[:],
+ })
+ }
// Add BPO forks from BLOB_SCHEDULE
elBlobSchedule := services.GlobalBeaconService.GetExecutionChainState().GetFullBlobSchedule()
@@ -416,14 +429,23 @@ func buildIndexPageRecentBlocksData(pageData *models.IndexPageData, recentBlockC
if blockData == nil {
continue
}
+
+ epoch := chainState.EpochOfSlot(phase0.Slot(blockData.Slot))
+
+ payloadStatus := blockData.PayloadStatus
+ if !chainState.IsEip7732Enabled(epoch) {
+ payloadStatus = dbtypes.PayloadStatusCanonical
+ }
+
blockModel := &models.IndexPageDataBlocks{
- Epoch: uint64(chainState.EpochOfSlot(phase0.Slot(blockData.Slot))),
- Slot: blockData.Slot,
- Ts: chainState.SlotToTime(phase0.Slot(blockData.Slot)),
- Proposer: blockData.Proposer,
- ProposerName: services.GlobalBeaconService.GetValidatorName(blockData.Proposer),
- Status: uint64(blockData.Status),
- BlockRoot: blockData.Root,
+ Epoch: uint64(epoch),
+ Slot: blockData.Slot,
+ Ts: chainState.SlotToTime(phase0.Slot(blockData.Slot)),
+ Proposer: blockData.Proposer,
+ ProposerName: services.GlobalBeaconService.GetValidatorName(blockData.Proposer),
+ Status: uint64(blockData.Status),
+ PayloadStatus: uint8(payloadStatus),
+ BlockRoot: blockData.Root,
}
if blockData.EthBlockNumber != nil {
blockModel.WithEthBlock = true
@@ -461,16 +483,24 @@ func buildIndexPageRecentSlotsData(pageData *models.IndexPageData, firstSlot pha
dbSlot := dbSlots[dbIdx]
dbIdx++
+ epoch := chainState.EpochOfSlot(phase0.Slot(dbSlot.Slot))
+
+ payloadStatus := dbSlot.PayloadStatus
+ if !chainState.IsEip7732Enabled(phase0.Epoch(epoch)) {
+ payloadStatus = dbtypes.PayloadStatusCanonical
+ }
+
slotData := &models.IndexPageDataSlots{
- Slot: slot,
- Epoch: uint64(chainState.EpochOfSlot(phase0.Slot(dbSlot.Slot))),
- Ts: chainState.SlotToTime(phase0.Slot(slot)),
- Status: uint64(dbSlot.Status),
- Proposer: dbSlot.Proposer,
- ProposerName: services.GlobalBeaconService.GetValidatorName(dbSlot.Proposer),
- BlockRoot: dbSlot.Root,
- ParentRoot: dbSlot.ParentRoot,
- ForkGraph: make([]*models.IndexPageDataForkGraph, 0),
+ Slot: slot,
+ Epoch: uint64(epoch),
+ Ts: chainState.SlotToTime(phase0.Slot(slot)),
+ Status: uint64(dbSlot.Status),
+ PayloadStatus: uint8(payloadStatus),
+ Proposer: dbSlot.Proposer,
+ ProposerName: services.GlobalBeaconService.GetValidatorName(dbSlot.Proposer),
+ BlockRoot: dbSlot.Root,
+ ParentRoot: dbSlot.ParentRoot,
+ ForkGraph: make([]*models.IndexPageDataForkGraph, 0),
}
pageData.RecentSlots = append(pageData.RecentSlots, slotData)
blockCount++
diff --git a/handlers/slot.go b/handlers/slot.go
index b8f94e36..6a9ca2ef 100644
--- a/handlers/slot.go
+++ b/handlers/slot.go
@@ -45,6 +45,7 @@ func Slot(w http.ResponseWriter, r *http.Request) {
"slot/deposit_requests.html",
"slot/withdrawal_requests.html",
"slot/consolidation_requests.html",
+ "slot/bids.html",
)
var notfoundTemplateFiles = append(layoutTemplateFiles,
"slot/notfound.html",
@@ -776,10 +777,26 @@ func getSlotPageBlockData(blockData *services.CombinedBlockResponse, epochStatsV
}
}
- if requests, err := blockData.Block.ExecutionRequests(); err == nil && requests != nil {
- getSlotPageDepositRequests(pageData, requests.Deposits)
- getSlotPageWithdrawalRequests(pageData, requests.Withdrawals)
- getSlotPageConsolidationRequests(pageData, requests.Consolidations)
+ if specs.ElectraForkEpoch != nil && uint64(epoch) >= *specs.ElectraForkEpoch {
+ var requests *electra.ExecutionRequests
+ if blockData.Block.Version >= spec.DataVersionGloas {
+ if blockData.Payload != nil {
+ requests = blockData.Payload.Message.ExecutionRequests
+ }
+ } else {
+ requests, _ = blockData.Block.ExecutionRequests()
+ }
+
+ if requests != nil {
+ getSlotPageDepositRequests(pageData, requests.Deposits)
+ getSlotPageWithdrawalRequests(pageData, requests.Withdrawals)
+ getSlotPageConsolidationRequests(pageData, requests.Consolidations)
+ }
+ }
+
+ // Load execution payload bids for ePBS (gloas+) blocks
+ if blockData.Block.Version >= spec.DataVersionGloas {
+ getSlotPageBids(pageData)
}
return pageData
@@ -983,6 +1000,60 @@ func getSlotPageConsolidationRequests(pageData *models.SlotPageBlockData, consol
pageData.ConsolidationRequestsCount = uint64(len(pageData.ConsolidationRequests))
}
+func getSlotPageBids(pageData *models.SlotPageBlockData) {
+ beaconIndexer := services.GlobalBeaconService.GetBeaconIndexer()
+ bids := beaconIndexer.GetBlockBids(phase0.Root(pageData.ParentRoot))
+
+ pageData.Bids = make([]*models.SlotPageBid, 0, len(bids))
+
+ // Get the winning block hash for comparison
+ var winningBlockHash []byte
+ if pageData.ExecutionData != nil {
+ winningBlockHash = pageData.ExecutionData.BlockHash
+ }
+
+ for _, bid := range bids {
+ bidData := &models.SlotPageBid{
+ ParentRoot: bid.ParentRoot,
+ ParentHash: bid.ParentHash,
+ BlockHash: bid.BlockHash,
+ FeeRecipient: bid.FeeRecipient,
+ GasLimit: bid.GasLimit,
+ BuilderIndex: bid.BuilderIndex,
+ BuilderName: services.GlobalBeaconService.GetValidatorName(bid.BuilderIndex),
+ Slot: bid.Slot,
+ Value: bid.Value,
+ ElPayment: bid.ElPayment,
+ TotalValue: bid.Value + bid.ElPayment,
+ }
+
+ // Check if this is the winning bid
+ if winningBlockHash != nil && len(bid.BlockHash) == len(winningBlockHash) {
+ isWinning := true
+ for i := range bid.BlockHash {
+ if bid.BlockHash[i] != winningBlockHash[i] {
+ isWinning = false
+ break
+ }
+ }
+ bidData.IsWinning = isWinning
+ }
+
+ pageData.Bids = append(pageData.Bids, bidData)
+ }
+
+ // Sort by total value (value + el_payment) descending
+ for i := 0; i < len(pageData.Bids)-1; i++ {
+ for j := i + 1; j < len(pageData.Bids); j++ {
+ if pageData.Bids[j].TotalValue > pageData.Bids[i].TotalValue {
+ pageData.Bids[i], pageData.Bids[j] = pageData.Bids[j], pageData.Bids[i]
+ }
+ }
+ }
+
+ pageData.BidsCount = uint64(len(pageData.Bids))
+}
+
func handleSlotDownload(ctx context.Context, w http.ResponseWriter, blockSlot int64, blockRoot []byte, downloadType string) error {
chainState := services.GlobalBeaconService.GetChainState()
currentSlot := chainState.CurrentSlot()
diff --git a/handlers/slots.go b/handlers/slots.go
index be0a610c..b9406fc0 100644
--- a/handlers/slots.go
+++ b/handlers/slots.go
@@ -253,12 +253,19 @@ func buildSlotsPageData(firstSlot uint64, pageSize uint64, displayColumns uint64
dbSlot := dbSlots[dbIdx]
dbIdx++
+ epoch := chainState.EpochOfSlot(phase0.Slot(slot))
+ payloadStatus := dbSlot.PayloadStatus
+ if !chainState.IsEip7732Enabled(phase0.Epoch(epoch)) {
+ payloadStatus = dbtypes.PayloadStatusCanonical
+ }
+
slotData := &models.SlotsPageDataSlot{
Slot: slot,
- Epoch: uint64(chainState.EpochOfSlot(phase0.Slot(slot))),
+ Epoch: uint64(epoch),
Ts: chainState.SlotToTime(phase0.Slot(slot)),
Finalized: finalized,
Status: uint8(dbSlot.Status),
+ PayloadStatus: uint8(payloadStatus),
Scheduled: slot >= uint64(currentSlot) && dbSlot.Status == dbtypes.Missing,
Synchronized: dbSlot.SyncParticipation != -1,
Proposer: dbSlot.Proposer,
diff --git a/handlers/slots_filtered.go b/handlers/slots_filtered.go
index 0a7c2192..31003f4b 100644
--- a/handlers/slots_filtered.go
+++ b/handlers/slots_filtered.go
@@ -436,12 +436,13 @@ func buildFilteredSlotsPageData(pageIdx uint64, pageSize uint64, graffiti string
break
}
slot := phase0.Slot(dbBlock.Slot)
+ epoch := chainState.EpochOfSlot(slot)
slotData := &models.SlotsFilteredPageDataSlot{
Slot: uint64(slot),
- Epoch: uint64(chainState.EpochOfSlot(slot)),
+ Epoch: uint64(epoch),
Ts: chainState.SlotToTime(slot),
- Finalized: finalizedEpoch >= chainState.EpochOfSlot(slot),
+ Finalized: finalizedEpoch >= epoch,
Synchronized: true,
Scheduled: slot >= currentSlot,
Proposer: dbBlock.Proposer,
@@ -473,6 +474,12 @@ func buildFilteredSlotsPageData(pageIdx uint64, pageSize uint64, graffiti string
slotData.EthBlockNumber = *dbBlock.Block.EthBlockNumber
}
+ payloadStatus := dbBlock.Block.PayloadStatus
+ if !chainState.IsEip7732Enabled(epoch) {
+ payloadStatus = dbtypes.PayloadStatusCanonical
+ }
+ slotData.PayloadStatus = uint8(payloadStatus)
+
if pageData.DisplayMevBlock && dbBlock.Block.EthBlockHash != nil {
if mevBlock, exists := mevBlocksMap[fmt.Sprintf("%x", dbBlock.Block.EthBlockHash)]; exists {
slotData.IsMevBlock = true
diff --git a/handlers/validator_slots.go b/handlers/validator_slots.go
index fe3307a4..8c39e7d0 100644
--- a/handlers/validator_slots.go
+++ b/handlers/validator_slots.go
@@ -112,12 +112,13 @@ func buildValidatorSlotsPageData(validator uint64, pageIdx uint64, pageSize uint
break
}
slot := blockAssignment.Slot
+ epoch := chainState.EpochOfSlot(phase0.Slot(slot))
slotData := &models.ValidatorSlotsPageDataSlot{
Slot: slot,
- Epoch: uint64(chainState.EpochOfSlot(phase0.Slot(slot))),
+ Epoch: uint64(epoch),
Ts: chainState.SlotToTime(phase0.Slot(slot)),
- Finalized: finalizedEpoch >= chainState.EpochOfSlot(phase0.Slot(slot)),
+ Finalized: finalizedEpoch >= epoch,
Status: uint8(0),
Proposer: validator,
ProposerName: pageData.Name,
@@ -140,6 +141,12 @@ func buildValidatorSlotsPageData(validator uint64, pageIdx uint64, pageSize uint
slotData.WithEthBlock = true
slotData.EthBlockNumber = *dbBlock.EthBlockNumber
}
+
+ payloadStatus := dbBlock.PayloadStatus
+ if !chainState.IsEip7732Enabled(epoch) {
+ payloadStatus = dbtypes.PayloadStatusCanonical
+ }
+ slotData.PayloadStatus = uint8(payloadStatus)
}
pageData.Slots = append(pageData.Slots, slotData)
}
diff --git a/indexer/beacon/bidcache.go b/indexer/beacon/bidcache.go
new file mode 100644
index 00000000..ce4c5ac6
--- /dev/null
+++ b/indexer/beacon/bidcache.go
@@ -0,0 +1,211 @@
+package beacon
+
+import (
+ "sync"
+
+ "github.com/attestantio/go-eth2-client/spec/phase0"
+ "github.com/ethpandaops/dora/db"
+ "github.com/ethpandaops/dora/dbtypes"
+ "github.com/jmoiron/sqlx"
+)
+
+const (
+ // bidCacheMaxSlots is the maximum number of slots to keep in the cache
+ bidCacheMaxSlots = 15
+ // bidCacheFlushThreshold is the slot span that triggers a flush
+ bidCacheFlushThreshold = 15
+ // bidCacheRetainSlots is the number of slots to retain after a flush
+ bidCacheRetainSlots = 10
+)
+
+// bidCacheKey uniquely identifies a bid in the cache
+type bidCacheKey struct {
+ ParentRoot phase0.Root
+ ParentHash phase0.Hash32
+ BlockHash phase0.Hash32
+ BuilderIndex uint64
+}
+
+// blockBidCache caches execution payload bids for recent blocks.
+// Bids for older slots are ignored. The cache is flushed to DB on shutdown
+// or when the slot span exceeds the threshold.
+type blockBidCache struct {
+ indexer *Indexer
+ cacheMutex sync.RWMutex
+ bids map[bidCacheKey]*dbtypes.BlockBid
+ minSlot phase0.Slot
+ maxSlot phase0.Slot
+}
+
+// newBlockBidCache creates a new instance of blockBidCache.
+func newBlockBidCache(indexer *Indexer) *blockBidCache {
+ return &blockBidCache{
+ indexer: indexer,
+ bids: make(map[bidCacheKey]*dbtypes.BlockBid, 64),
+ }
+}
+
+// loadFromDB loads bids from the last N slots from the database.
+func (cache *blockBidCache) loadFromDB(currentSlot phase0.Slot) {
+ cache.cacheMutex.Lock()
+ defer cache.cacheMutex.Unlock()
+
+ minSlot := phase0.Slot(0)
+ if currentSlot > bidCacheRetainSlots {
+ minSlot = currentSlot - bidCacheRetainSlots
+ }
+
+ dbBids := db.GetBidsForSlotRange(uint64(minSlot))
+ for _, bid := range dbBids {
+ key := bidCacheKey{
+ ParentRoot: phase0.Root(bid.ParentRoot),
+ ParentHash: phase0.Hash32(bid.ParentHash),
+ BlockHash: phase0.Hash32(bid.BlockHash),
+ BuilderIndex: bid.BuilderIndex,
+ }
+ cache.bids[key] = bid
+
+ slot := phase0.Slot(bid.Slot)
+ if cache.minSlot == 0 || slot < cache.minSlot {
+ cache.minSlot = slot
+ }
+ if slot > cache.maxSlot {
+ cache.maxSlot = slot
+ }
+ }
+
+ if len(dbBids) > 0 {
+ cache.indexer.logger.Infof("loaded %d bids from DB (slots %d-%d)", len(dbBids), cache.minSlot, cache.maxSlot)
+ }
+}
+
+// AddBid adds a bid to the cache. Returns true if the bid was added,
+// false if it was ignored (too old) or already exists.
+func (cache *blockBidCache) AddBid(bid *dbtypes.BlockBid) bool {
+ cache.cacheMutex.Lock()
+ defer cache.cacheMutex.Unlock()
+
+ slot := phase0.Slot(bid.Slot)
+
+ // Ignore bids for slots that are too old
+ if cache.maxSlot > 0 && slot+bidCacheMaxSlots < cache.maxSlot {
+ return false
+ }
+
+ key := bidCacheKey{
+ ParentRoot: phase0.Root(bid.ParentRoot),
+ ParentHash: phase0.Hash32(bid.ParentHash),
+ BlockHash: phase0.Hash32(bid.BlockHash),
+ BuilderIndex: bid.BuilderIndex,
+ }
+
+ // Check if bid already exists
+ if _, exists := cache.bids[key]; exists {
+ return false
+ }
+
+ cache.bids[key] = bid
+
+ // Update slot bounds
+ if cache.minSlot == 0 || slot < cache.minSlot {
+ cache.minSlot = slot
+ }
+ if slot > cache.maxSlot {
+ cache.maxSlot = slot
+ }
+
+ return true
+}
+
+// GetBidsForBlockRoot returns all bids for a given parent block root.
+func (cache *blockBidCache) GetBidsForBlockRoot(blockRoot phase0.Root) []*dbtypes.BlockBid {
+ cache.cacheMutex.RLock()
+ defer cache.cacheMutex.RUnlock()
+
+ result := make([]*dbtypes.BlockBid, 0)
+ for key, bid := range cache.bids {
+ if key.ParentRoot == blockRoot {
+ result = append(result, bid)
+ }
+ }
+
+ return result
+}
+
+// checkAndFlush checks if the cache needs to be flushed and performs the flush if necessary.
+// This should be called periodically (e.g., on each new block).
+func (cache *blockBidCache) checkAndFlush() error {
+ cache.cacheMutex.Lock()
+
+ // Check if we need to flush
+ if cache.maxSlot == 0 || cache.maxSlot-cache.minSlot < bidCacheFlushThreshold {
+ cache.cacheMutex.Unlock()
+ return nil
+ }
+
+ // Calculate the cutoff slot - we'll flush bids older than this
+ cutoffSlot := cache.maxSlot - bidCacheRetainSlots
+
+ // Collect bids to flush (from minSlot to cutoffSlot)
+ bidsToFlush := make([]*dbtypes.BlockBid, 0)
+ for key, bid := range cache.bids {
+ if phase0.Slot(bid.Slot) < cutoffSlot {
+ bidsToFlush = append(bidsToFlush, bid)
+ delete(cache.bids, key)
+ }
+ }
+
+ // Update minSlot
+ cache.minSlot = cutoffSlot
+
+ cache.cacheMutex.Unlock()
+
+ // Write to DB outside of lock
+ if len(bidsToFlush) > 0 {
+ err := db.RunDBTransaction(func(tx *sqlx.Tx) error {
+ return db.InsertBids(bidsToFlush, tx)
+ })
+ if err != nil {
+ cache.indexer.logger.Errorf("error flushing bids to db: %v", err)
+ return err
+ }
+ cache.indexer.logger.Debugf("flushed %d bids to DB (slots < %d)", len(bidsToFlush), cutoffSlot)
+ }
+
+ return nil
+}
+
+// flushAll flushes all cached bids to the database.
+// This should be called on shutdown.
+func (cache *blockBidCache) flushAll() error {
+ cache.cacheMutex.Lock()
+
+ if len(cache.bids) == 0 {
+ cache.cacheMutex.Unlock()
+ return nil
+ }
+
+ bidsToFlush := make([]*dbtypes.BlockBid, 0, len(cache.bids))
+ for _, bid := range cache.bids {
+ bidsToFlush = append(bidsToFlush, bid)
+ }
+
+ // Clear the cache
+ cache.bids = make(map[bidCacheKey]*dbtypes.BlockBid, 64)
+ cache.minSlot = 0
+ cache.maxSlot = 0
+
+ cache.cacheMutex.Unlock()
+
+ // Write to DB outside of lock
+ err := db.RunDBTransaction(func(tx *sqlx.Tx) error {
+ return db.InsertBids(bidsToFlush, tx)
+ })
+ if err != nil {
+ cache.indexer.logger.Errorf("error flushing all bids to db: %v", err)
+ return err
+ }
+
+ cache.indexer.logger.Infof("flushed %d bids to DB on shutdown", len(bidsToFlush))
+ return nil
+}
diff --git a/indexer/beacon/block.go b/indexer/beacon/block.go
index abbae64b..4a7a5d21 100644
--- a/indexer/beacon/block.go
+++ b/indexer/beacon/block.go
@@ -8,6 +8,7 @@ import (
"time"
"github.com/attestantio/go-eth2-client/spec"
+ "github.com/attestantio/go-eth2-client/spec/gloas"
"github.com/attestantio/go-eth2-client/spec/phase0"
"github.com/ethpandaops/dora/blockdb"
btypes "github.com/ethpandaops/dora/blockdb/types"
@@ -20,36 +21,40 @@ import (
// Block represents a beacon block.
type Block struct {
- Root phase0.Root
- Slot phase0.Slot
- BlockUID uint64
- dynSsz *dynssz.DynSsz
- parentRoot *phase0.Root
- dependentRoot *phase0.Root
- forkId ForkKey
- forkChecked bool
- headerMutex sync.Mutex
- headerChan chan bool
- header *phase0.SignedBeaconBlockHeader
- blockMutex sync.Mutex
- blockChan chan bool
- block *spec.VersionedSignedBeaconBlock
- blockIndex *BlockBodyIndex
- recvDelay int32
- executionTimes []ExecutionTime // execution times from snooper clients
- minExecutionTime uint16
- maxExecutionTime uint16
- execTimeUpdate *time.Ticker
- executionTimesMux sync.RWMutex
- isInFinalizedDb bool // block is in finalized table (slots)
- isInUnfinalizedDb bool // block is in unfinalized table (unfinalized_blocks)
- isDisposed bool // block is disposed
- processingStatus dbtypes.UnfinalizedBlockStatus
- seenMutex sync.RWMutex
- seenMap map[uint16]*Client
- processedActivity uint8
- blockResults [][]uint8
- blockResultsMutex sync.Mutex
+ Root phase0.Root
+ Slot phase0.Slot
+ BlockUID uint64
+ dynSsz *dynssz.DynSsz
+ parentRoot *phase0.Root
+ dependentRoot *phase0.Root
+ forkId ForkKey
+ forkChecked bool
+ headerMutex sync.Mutex
+ headerChan chan bool
+ header *phase0.SignedBeaconBlockHeader
+ blockMutex sync.Mutex
+ blockChan chan bool
+ block *spec.VersionedSignedBeaconBlock
+ executionPayloadMutex sync.Mutex
+ executionPayloadChan chan bool
+ executionPayload *gloas.SignedExecutionPayloadEnvelope
+ blockIndex *BlockBodyIndex
+ recvDelay int32
+ executionTimes []ExecutionTime // execution times from snooper clients
+ minExecutionTime uint16
+ maxExecutionTime uint16
+ execTimeUpdate *time.Ticker
+ executionTimesMux sync.RWMutex
+ isInFinalizedDb bool // block is in finalized table (slots)
+ isInUnfinalizedDb bool // block is in unfinalized table (unfinalized_blocks)
+ hasExecutionPayload bool // block has an execution payload (either in cache or db)
+ isDisposed bool // block is disposed
+ processingStatus dbtypes.UnfinalizedBlockStatus
+ seenMutex sync.RWMutex
+ seenMap map[uint16]*Client
+ processedActivity uint8
+ blockResults [][]uint8
+ blockResultsMutex sync.Mutex
}
// BlockBodyIndex holds important block properties that are used as index for cache lookups.
@@ -66,21 +71,16 @@ type BlockBodyIndex struct {
// newBlock creates a new Block instance.
func newBlock(dynSsz *dynssz.DynSsz, root phase0.Root, slot phase0.Slot, blockUID uint64) *Block {
- if blockUID == 0 {
- // use highest possible block UID as default
- blockUID = (uint64(slot) << 16) | 0xffff
+ return &Block{
+ Root: root,
+ Slot: slot,
+ BlockUID: blockUID,
+ dynSsz: dynSsz,
+ seenMap: make(map[uint16]*Client),
+ headerChan: make(chan bool),
+ blockChan: make(chan bool),
+ executionPayloadChan: make(chan bool),
}
- block := &Block{
- Root: root,
- Slot: slot,
- BlockUID: blockUID,
- dynSsz: dynSsz,
- seenMap: make(map[uint16]*Client),
- headerChan: make(chan bool),
- blockChan: make(chan bool),
- }
-
- return block
}
func (block *Block) Dispose() {
@@ -167,7 +167,7 @@ func (block *Block) GetBlock() *spec.VersionedSignedBeaconBlock {
}
if block.isInUnfinalizedDb {
- dbBlock := db.GetUnfinalizedBlock(block.Root[:])
+ dbBlock := db.GetUnfinalizedBlock(block.Root[:], false, true, false)
if dbBlock != nil {
blockBody, err := UnmarshalVersionedSignedBeaconBlockSSZ(block.dynSsz, dbBlock.BlockVer, dbBlock.BlockSSZ)
if err == nil {
@@ -198,6 +198,40 @@ func (block *Block) AwaitBlock(ctx context.Context, timeout time.Duration) *spec
return block.block
}
+// GetExecutionPayload returns the execution payload of this block.
+func (block *Block) GetExecutionPayload() *gloas.SignedExecutionPayloadEnvelope {
+ if block.executionPayload != nil {
+ return block.executionPayload
+ }
+
+ if block.hasExecutionPayload && block.isInUnfinalizedDb {
+ dbBlock := db.GetUnfinalizedBlock(block.Root[:], false, false, true)
+ if dbBlock != nil {
+ payload, err := UnmarshalVersionedSignedExecutionPayloadEnvelopeSSZ(block.dynSsz, dbBlock.PayloadVer, dbBlock.PayloadSSZ)
+ if err == nil {
+ return payload
+ }
+ }
+ }
+
+ return nil
+}
+
+// AwaitExecutionPayload waits for the execution payload of this block to be available.
+func (block *Block) AwaitExecutionPayload(ctx context.Context, timeout time.Duration) *gloas.SignedExecutionPayloadEnvelope {
+ if ctx == nil {
+ ctx = context.Background()
+ }
+
+ select {
+ case <-block.executionPayloadChan:
+ case <-time.After(timeout):
+ case <-ctx.Done():
+ }
+
+ return block.executionPayload
+}
+
// GetParentRoot returns the parent root of this block.
func (block *Block) GetParentRoot() *phase0.Root {
if block.isDisposed {
@@ -261,7 +295,7 @@ func (block *Block) SetBlock(body *spec.VersionedSignedBeaconBlock) {
return
}
- block.setBlockIndex(body)
+ block.setBlockIndex(body, nil)
block.block = body
if block.blockChan != nil {
@@ -292,7 +326,7 @@ func (block *Block) EnsureBlock(loadBlock func() (*spec.VersionedSignedBeaconBlo
return false, err
}
- block.setBlockIndex(blockBody)
+ block.setBlockIndex(blockBody, nil)
block.block = blockBody
if block.blockChan != nil {
close(block.blockChan)
@@ -302,25 +336,86 @@ func (block *Block) EnsureBlock(loadBlock func() (*spec.VersionedSignedBeaconBlo
return true, nil
}
-// setBlockIndex sets the block index of this block.
-func (block *Block) setBlockIndex(body *spec.VersionedSignedBeaconBlock) {
- blockIndex := &BlockBodyIndex{}
- blockIndex.Graffiti, _ = body.Graffiti()
+// SetExecutionPayload sets the execution payload of this block.
+func (block *Block) SetExecutionPayload(payload *gloas.SignedExecutionPayloadEnvelope) {
+ block.setBlockIndex(block.block, payload)
+ block.executionPayload = payload
+ block.hasExecutionPayload = true
+
+ if block.executionPayloadChan != nil {
+ close(block.executionPayloadChan)
+ block.executionPayloadChan = nil
+ }
+}
+
+// EnsureExecutionPayload ensures that the execution payload of this block is available.
+func (block *Block) EnsureExecutionPayload(loadExecutionPayload func() (*gloas.SignedExecutionPayloadEnvelope, error)) (bool, error) {
+ if block.executionPayload != nil {
+ return false, nil
+ }
+
+ if block.hasExecutionPayload {
+ return false, nil
+ }
+
+ block.executionPayloadMutex.Lock()
+ defer block.executionPayloadMutex.Unlock()
+
+ if block.executionPayload != nil {
+ return false, nil
+ }
+
+ payload, err := loadExecutionPayload()
+ if err != nil {
+ return false, err
+ }
+
+ if payload == nil {
+ return false, nil
+ }
+
+ block.setBlockIndex(block.block, payload)
+ block.executionPayload = payload
+ block.hasExecutionPayload = true
+ if block.executionPayloadChan != nil {
+ close(block.executionPayloadChan)
+ block.executionPayloadChan = nil
+ }
+
+ return true, nil
+}
- executionPayload, _ := body.ExecutionPayload()
- if executionPayload != nil {
- blockIndex.ExecutionExtraData, _ = executionPayload.ExtraData()
- blockIndex.ExecutionHash, _ = executionPayload.BlockHash()
- blockIndex.ExecutionNumber, _ = executionPayload.BlockNumber()
+// setBlockIndex sets the block index of this block.
+func (block *Block) setBlockIndex(body *spec.VersionedSignedBeaconBlock, payload *gloas.SignedExecutionPayloadEnvelope) {
+ blockIndex := block.blockIndex
+ if blockIndex == nil {
+ blockIndex = &BlockBodyIndex{}
+ }
+
+ if body != nil {
+ blockIndex.Graffiti, _ = body.Graffiti()
+ blockIndex.ExecutionExtraData, _ = getBlockExecutionExtraData(body)
+ blockIndex.ExecutionHash, _ = body.ExecutionBlockHash()
+ if execNumber, err := body.ExecutionBlockNumber(); err == nil {
+ blockIndex.ExecutionNumber = uint64(execNumber)
+ }
+ if transactions, err := body.ExecutionTransactions(); err == nil {
+ blockIndex.EthTransactionCount = uint64(len(transactions))
+ }
+ if blobKzgCommitments, err := body.BlobKZGCommitments(); err == nil {
+ blockIndex.BlobCount = uint64(len(blobKzgCommitments))
+ }
+ }
+ if payload != nil {
+ blockIndex.ExecutionNumber = uint64(payload.Message.Payload.BlockNumber)
// Calculate transaction count
- executionTransactions, _ := executionPayload.Transactions()
+ executionTransactions := payload.Message.Payload.Transactions
blockIndex.EthTransactionCount = uint64(len(executionTransactions))
// Calculate blob count
- blobKzgCommitments, _ := body.BlobKZGCommitments()
+ blobKzgCommitments := payload.Message.BlobKZGCommitments
blockIndex.BlobCount = uint64(len(blobKzgCommitments))
-
}
// Calculate sync participation
@@ -353,7 +448,7 @@ func (block *Block) GetBlockIndex() *BlockBodyIndex {
blockBody := block.GetBlock()
if blockBody != nil {
- block.setBlockIndex(blockBody)
+ block.setBlockIndex(blockBody, block.GetExecutionPayload())
}
return block.blockIndex
@@ -413,14 +508,25 @@ func (block *Block) buildOrphanedBlock(compress bool) (*dbtypes.OrphanedBlock, e
return nil, fmt.Errorf("marshal block ssz failed: %v", err)
}
- return &dbtypes.OrphanedBlock{
+ orphanedBlock := &dbtypes.OrphanedBlock{
Root: block.Root[:],
HeaderVer: 1,
HeaderSSZ: headerSSZ,
BlockVer: blockVer,
BlockSSZ: blockSSZ,
BlockUid: block.BlockUID,
- }, nil
+ }
+
+ if block.executionPayload != nil {
+ payloadVer, payloadSSZ, err := MarshalVersionedSignedExecutionPayloadEnvelopeSSZ(block.dynSsz, block.executionPayload, compress)
+ if err != nil {
+ return nil, fmt.Errorf("marshal execution payload ssz failed: %v", err)
+ }
+ orphanedBlock.PayloadVer = payloadVer
+ orphanedBlock.PayloadSSZ = payloadSSZ
+ }
+
+ return orphanedBlock, nil
}
func (block *Block) writeToBlockDb() error {
@@ -428,7 +534,7 @@ func (block *Block) writeToBlockDb() error {
return nil
}
- _, err := blockdb.GlobalBlockDb.AddBlockWithCallback(context.Background(), uint64(block.Slot), block.Root[:], func() (*btypes.BlockData, error) {
+ _, _, err := blockdb.GlobalBlockDb.AddBlockWithCallback(context.Background(), uint64(block.Slot), block.Root[:], func() (*btypes.BlockData, error) {
headerSSZ, err := block.header.MarshalSSZ()
if err != nil {
return nil, fmt.Errorf("marshal header ssz failed: %v", err)
@@ -459,9 +565,12 @@ func (block *Block) unpruneBlockBody() {
return
}
- dbBlock := db.GetUnfinalizedBlock(block.Root[:])
+ dbBlock := db.GetUnfinalizedBlock(block.Root[:], false, true, true)
if dbBlock != nil {
block.block, _ = UnmarshalVersionedSignedBeaconBlockSSZ(block.dynSsz, dbBlock.BlockVer, dbBlock.BlockSSZ)
+ if len(dbBlock.PayloadSSZ) > 0 {
+ block.executionPayload, _ = UnmarshalVersionedSignedExecutionPayloadEnvelopeSSZ(block.dynSsz, dbBlock.PayloadVer, dbBlock.PayloadSSZ)
+ }
}
}
diff --git a/indexer/beacon/block_helper.go b/indexer/beacon/block_helper.go
index c943ede1..bafec412 100644
--- a/indexer/beacon/block_helper.go
+++ b/indexer/beacon/block_helper.go
@@ -10,6 +10,7 @@ import (
"github.com/attestantio/go-eth2-client/spec/capella"
"github.com/attestantio/go-eth2-client/spec/deneb"
"github.com/attestantio/go-eth2-client/spec/electra"
+ "github.com/attestantio/go-eth2-client/spec/gloas"
"github.com/attestantio/go-eth2-client/spec/phase0"
"github.com/ethpandaops/dora/utils"
dynssz "github.com/pk910/dynamic-ssz"
@@ -47,6 +48,9 @@ func MarshalVersionedSignedBeaconBlockSSZ(dynSsz *dynssz.DynSsz, block *spec.Ver
case spec.DataVersionFulu:
version = uint64(block.Version)
ssz, err = dynSsz.MarshalSSZ(block.Fulu)
+ case spec.DataVersionGloas:
+ version = uint64(block.Version)
+ ssz, err = dynSsz.MarshalSSZ(block.Gloas)
default:
err = fmt.Errorf("unknown block version")
}
@@ -118,6 +122,11 @@ func UnmarshalVersionedSignedBeaconBlockSSZ(dynSsz *dynssz.DynSsz, version uint6
if err := dynSsz.UnmarshalSSZ(block.Fulu, ssz); err != nil {
return nil, fmt.Errorf("failed to decode fulu signed beacon block: %v", err)
}
+ case spec.DataVersionGloas:
+ block.Gloas = &gloas.SignedBeaconBlock{}
+ if err := dynSsz.UnmarshalSSZ(block.Gloas, ssz); err != nil {
+ return nil, fmt.Errorf("failed to decode gloas signed beacon block: %v", err)
+ }
default:
return nil, fmt.Errorf("unknown block version")
}
@@ -148,6 +157,9 @@ func MarshalVersionedSignedBeaconBlockJson(block *spec.VersionedSignedBeaconBloc
case spec.DataVersionFulu:
version = uint64(block.Version)
jsonRes, err = block.Fulu.MarshalJSON()
+ case spec.DataVersionGloas:
+ version = uint64(block.Version)
+ jsonRes, err = block.Gloas.MarshalJSON()
default:
err = fmt.Errorf("unknown block version")
}
@@ -201,12 +213,127 @@ func unmarshalVersionedSignedBeaconBlockJson(version uint64, ssz []byte) (*spec.
if err := block.Fulu.UnmarshalJSON(ssz); err != nil {
return nil, fmt.Errorf("failed to decode fulu signed beacon block: %v", err)
}
+ case spec.DataVersionGloas:
+ block.Gloas = &gloas.SignedBeaconBlock{}
+ if err := block.Gloas.UnmarshalJSON(ssz); err != nil {
+ return nil, fmt.Errorf("failed to decode gloas signed beacon block: %v", err)
+ }
default:
return nil, fmt.Errorf("unknown block version")
}
return block, nil
}
+// MarshalVersionedSignedExecutionPayloadEnvelopeSSZ marshals a signed execution payload envelope using SSZ encoding.
+func MarshalVersionedSignedExecutionPayloadEnvelopeSSZ(dynSsz *dynssz.DynSsz, payload *gloas.SignedExecutionPayloadEnvelope, compress bool) (version uint64, ssz []byte, err error) {
+ if utils.Config.KillSwitch.DisableSSZEncoding {
+ // SSZ encoding disabled, use json instead
+ version, ssz, err = marshalVersionedSignedExecutionPayloadEnvelopeJson(payload)
+ } else {
+ // SSZ encoding
+ version = uint64(spec.DataVersionGloas)
+ ssz, err = dynSsz.MarshalSSZ(payload)
+ }
+
+ if compress {
+ ssz = compressBytes(ssz)
+ version |= compressionFlag
+ }
+
+ return
+}
+
+// UnmarshalVersionedSignedExecutionPayloadEnvelopeSSZ unmarshals a versioned signed execution payload envelope using SSZ encoding.
+func UnmarshalVersionedSignedExecutionPayloadEnvelopeSSZ(dynSsz *dynssz.DynSsz, version uint64, ssz []byte) (*gloas.SignedExecutionPayloadEnvelope, error) {
+ if (version & compressionFlag) != 0 {
+ // decompress
+ if d, err := decompressBytes(ssz); err != nil {
+ return nil, fmt.Errorf("failed to decompress: %v", err)
+ } else {
+ ssz = d
+ version &= ^compressionFlag
+ }
+ }
+
+ if (version & jsonVersionFlag) != 0 {
+ // JSON encoding
+ return unmarshalVersionedSignedExecutionPayloadEnvelopeJson(version, ssz)
+ }
+
+ if version != uint64(spec.DataVersionGloas) {
+ return nil, fmt.Errorf("unknown version")
+ }
+
+ // SSZ encoding
+ payload := &gloas.SignedExecutionPayloadEnvelope{}
+ if err := dynSsz.UnmarshalSSZ(payload, ssz); err != nil {
+ return nil, fmt.Errorf("failed to decode gloas signed execution payload envelope: %v", err)
+ }
+
+ return payload, nil
+}
+
+// marshalVersionedSignedExecutionPayloadEnvelopeJson marshals a versioned signed execution payload envelope using JSON encoding.
+func marshalVersionedSignedExecutionPayloadEnvelopeJson(payload *gloas.SignedExecutionPayloadEnvelope) (version uint64, jsonRes []byte, err error) {
+ version = uint64(spec.DataVersionGloas)
+ jsonRes, err = payload.MarshalJSON()
+
+ version |= jsonVersionFlag
+
+ return
+}
+
+// unmarshalVersionedSignedExecutionPayloadEnvelopeJson unmarshals a versioned signed execution payload envelope using JSON encoding.
+func unmarshalVersionedSignedExecutionPayloadEnvelopeJson(version uint64, ssz []byte) (*gloas.SignedExecutionPayloadEnvelope, error) {
+ if version&jsonVersionFlag == 0 {
+ return nil, fmt.Errorf("no json encoding")
+ }
+
+ if version-jsonVersionFlag != uint64(spec.DataVersionGloas) {
+ return nil, fmt.Errorf("unknown version")
+ }
+
+ payload := &gloas.SignedExecutionPayloadEnvelope{}
+ if err := payload.UnmarshalJSON(ssz); err != nil {
+ return nil, fmt.Errorf("failed to decode gloas signed execution payload envelope: %v", err)
+ }
+ return payload, nil
+}
+
+// getBlockExecutionExtraData returns the extra data from the execution payload of a versioned signed beacon block.
+func getBlockExecutionExtraData(v *spec.VersionedSignedBeaconBlock) ([]byte, error) {
+ switch v.Version {
+ case spec.DataVersionBellatrix:
+ if v.Bellatrix == nil || v.Bellatrix.Message == nil || v.Bellatrix.Message.Body == nil || v.Bellatrix.Message.Body.ExecutionPayload == nil {
+ return nil, errors.New("no bellatrix block")
+ }
+
+ return v.Bellatrix.Message.Body.ExecutionPayload.ExtraData, nil
+ case spec.DataVersionCapella:
+ if v.Capella == nil || v.Capella.Message == nil || v.Capella.Message.Body == nil || v.Capella.Message.Body.ExecutionPayload == nil {
+ return nil, errors.New("no capella block")
+ }
+
+ return v.Capella.Message.Body.ExecutionPayload.ExtraData, nil
+ case spec.DataVersionDeneb:
+ if v.Deneb == nil || v.Deneb.Message == nil || v.Deneb.Message.Body == nil || v.Deneb.Message.Body.ExecutionPayload == nil {
+ return nil, errors.New("no deneb block")
+ }
+
+ return v.Deneb.Message.Body.ExecutionPayload.ExtraData, nil
+ case spec.DataVersionElectra:
+ if v.Electra == nil || v.Electra.Message == nil || v.Electra.Message.Body == nil || v.Electra.Message.Body.ExecutionPayload == nil {
+ return nil, errors.New("no electra block")
+ }
+
+ return v.Electra.Message.Body.ExecutionPayload.ExtraData, nil
+ case spec.DataVersionGloas:
+ return nil, nil
+ default:
+ return nil, errors.New("unknown version")
+ }
+}
+
// getStateRandaoMixes returns the RANDAO mixes from a versioned beacon state.
func getStateRandaoMixes(v *spec.VersionedBeaconState) ([]phase0.Root, error) {
switch v.Version {
@@ -252,6 +379,12 @@ func getStateRandaoMixes(v *spec.VersionedBeaconState) ([]phase0.Root, error) {
}
return v.Fulu.RANDAOMixes, nil
+ case spec.DataVersionGloas:
+ if v.Gloas == nil || v.Gloas.RANDAOMixes == nil {
+ return nil, errors.New("no gloas block")
+ }
+
+ return v.Gloas.RANDAOMixes, nil
default:
return nil, errors.New("unknown version")
}
@@ -274,6 +407,8 @@ func getStateDepositIndex(state *spec.VersionedBeaconState) uint64 {
return state.Electra.ETH1DepositIndex
case spec.DataVersionFulu:
return state.Fulu.ETH1DepositIndex
+ case spec.DataVersionGloas:
+ return state.Gloas.ETH1DepositIndex
}
return 0
}
@@ -319,6 +454,12 @@ func getStateCurrentSyncCommittee(v *spec.VersionedBeaconState) ([]phase0.BLSPub
}
return v.Fulu.CurrentSyncCommittee.Pubkeys, nil
+ case spec.DataVersionGloas:
+ if v.Gloas == nil || v.Gloas.CurrentSyncCommittee == nil {
+ return nil, errors.New("no gloas block")
+ }
+
+ return v.Gloas.CurrentSyncCommittee.Pubkeys, nil
default:
return nil, errors.New("unknown version")
}
@@ -349,6 +490,12 @@ func getStateDepositBalanceToConsume(v *spec.VersionedBeaconState) (phase0.Gwei,
}
return v.Fulu.DepositBalanceToConsume, nil
+ case spec.DataVersionGloas:
+ if v.Gloas == nil {
+ return 0, errors.New("no gloas block")
+ }
+
+ return v.Gloas.DepositBalanceToConsume, nil
default:
return 0, errors.New("unknown version")
}
@@ -379,6 +526,12 @@ func getStatePendingDeposits(v *spec.VersionedBeaconState) ([]*electra.PendingDe
}
return v.Fulu.PendingDeposits, nil
+ case spec.DataVersionGloas:
+ if v.Gloas == nil || v.Gloas.PendingDeposits == nil {
+ return nil, errors.New("no gloas block")
+ }
+
+ return v.Gloas.PendingDeposits, nil
default:
return nil, errors.New("unknown version")
}
@@ -409,6 +562,12 @@ func getStatePendingWithdrawals(v *spec.VersionedBeaconState) ([]*electra.Pendin
}
return v.Fulu.PendingPartialWithdrawals, nil
+ case spec.DataVersionGloas:
+ if v.Gloas == nil || v.Gloas.PendingPartialWithdrawals == nil {
+ return nil, errors.New("no gloas block")
+ }
+
+ return v.Gloas.PendingPartialWithdrawals, nil
default:
return nil, errors.New("unknown version")
}
@@ -439,6 +598,12 @@ func getStatePendingConsolidations(v *spec.VersionedBeaconState) ([]*electra.Pen
}
return v.Fulu.PendingConsolidations, nil
+ case spec.DataVersionGloas:
+ if v.Gloas == nil || v.Gloas.PendingConsolidations == nil {
+ return nil, errors.New("no gloas block")
+ }
+
+ return v.Gloas.PendingConsolidations, nil
default:
return nil, errors.New("unknown version")
}
@@ -465,6 +630,12 @@ func getStateProposerLookahead(v *spec.VersionedBeaconState) ([]phase0.Validator
}
return v.Fulu.ProposerLookahead, nil
+ case spec.DataVersionGloas:
+ if v.Gloas == nil || v.Gloas.ProposerLookahead == nil {
+ return nil, errors.New("no gloas block")
+ }
+
+ return v.Gloas.ProposerLookahead, nil
default:
return nil, errors.New("unknown version")
}
@@ -487,6 +658,8 @@ func getBlockSize(dynSsz *dynssz.DynSsz, block *spec.VersionedSignedBeaconBlock)
return dynSsz.SizeSSZ(block.Electra)
case spec.DataVersionFulu:
return dynSsz.SizeSSZ(block.Fulu)
+ case spec.DataVersionGloas:
+ return dynSsz.SizeSSZ(block.Gloas)
default:
return 0, errors.New("unknown version")
}
diff --git a/indexer/beacon/buildercache.go b/indexer/beacon/buildercache.go
new file mode 100644
index 00000000..41f9c12a
--- /dev/null
+++ b/indexer/beacon/buildercache.go
@@ -0,0 +1,751 @@
+package beacon
+
+import (
+ "bytes"
+ "fmt"
+ "hash/crc64"
+ "math"
+ "runtime/debug"
+ "sync"
+ "time"
+
+ "github.com/attestantio/go-eth2-client/spec/gloas"
+ "github.com/attestantio/go-eth2-client/spec/phase0"
+ "github.com/jmoiron/sqlx"
+
+ "github.com/ethpandaops/dora/db"
+ "github.com/ethpandaops/dora/dbtypes"
+)
+
+// BuilderIndexFlag separates builder indices from validator indices in the pubkey cache
+const BuilderIndexFlag = uint64(1 << 40)
+
+// Builder status flag constants representing different builder states
+const (
+ BuilderStatusExited uint16 = 1 << iota // Builder has exited (withdrawable_epoch reached)
+ BuilderStatusSuperseded // Builder index was reused, this pubkey is no longer active
+)
+
+// builderCache manages the in-memory cache of builder states and handles updates
+type builderCache struct {
+ indexer *Indexer
+ builderSetCache []*builderEntry
+ cacheMutex sync.RWMutex
+ triggerDbUpdate chan bool
+}
+
+// builderEntry represents a single builder's state in the cache
+type builderEntry struct {
+ builderDiffs []*builderDiff
+ finalChecksum uint64
+ finalBuilder *gloas.Builder
+ activeData *BuilderData
+ statusFlags uint16
+}
+
+// BuilderData contains the essential builder state information for active builders.
+// Only WithdrawableEpoch can change during a builder's lifetime; all other fields are static.
+type BuilderData struct {
+ WithdrawableEpoch phase0.Epoch
+}
+
+// builderDiff represents an updated builder entry in the builder set cache.
+type builderDiff struct {
+ epoch phase0.Epoch
+ dependentRoot phase0.Root
+ builder *gloas.Builder
+}
+
+// newBuilderCache initializes a new builder cache instance and starts the persist loop
+func newBuilderCache(indexer *Indexer) *builderCache {
+ cache := &builderCache{
+ indexer: indexer,
+ triggerDbUpdate: make(chan bool, 1),
+ }
+
+ go cache.runPersistLoop()
+
+ return cache
+}
+
+// updateBuilderSet processes builder set updates and maintains the cache state
+func (cache *builderCache) updateBuilderSet(slot phase0.Slot, dependentRoot phase0.Root, builders []*gloas.Builder) {
+ chainState := cache.indexer.consensusPool.GetChainState()
+ epoch := chainState.EpochOfSlot(slot)
+ currentEpoch := chainState.CurrentEpoch()
+ finalizedEpoch, finalizedRoot := chainState.GetFinalizedCheckpoint()
+ cutOffEpoch := phase0.Epoch(0)
+ if currentEpoch > phase0.Epoch(cache.indexer.inMemoryEpochs) {
+ cutOffEpoch = currentEpoch - phase0.Epoch(cache.indexer.inMemoryEpochs)
+ }
+ if cutOffEpoch > finalizedEpoch {
+ cutOffEpoch = finalizedEpoch
+ }
+
+ if epoch < cutOffEpoch {
+ cache.indexer.logger.Infof("ignoring old builder set update for epoch %d", epoch)
+ return
+ }
+
+ isFinalizedBuilderSet := false
+ if slot == 0 {
+ isFinalizedBuilderSet = true // genesis
+ } else if epoch <= finalizedEpoch {
+ finalizedBlock := cache.indexer.blockCache.getBlockByRoot(finalizedRoot)
+ if finalizedBlock != nil {
+ finalizedDependentBlock := cache.indexer.blockCache.getDependentBlock(chainState, finalizedBlock, nil)
+ if finalizedDependentBlock != nil && bytes.Equal(finalizedDependentBlock.Root[:], dependentRoot[:]) {
+ isFinalizedBuilderSet = true
+ }
+ }
+ }
+
+ cache.cacheMutex.Lock()
+ defer cache.cacheMutex.Unlock()
+
+ t1 := time.Now()
+
+ if len(cache.builderSetCache) < len(builders) {
+ if len(builders) > cap(cache.builderSetCache) {
+ newCache := make([]*builderEntry, len(builders), len(builders)+1000)
+ copy(newCache, cache.builderSetCache)
+ cache.builderSetCache = newCache
+ } else {
+ cache.builderSetCache = cache.builderSetCache[:len(builders)]
+ }
+ }
+
+ isParentMap := map[phase0.Root]bool{}
+ isAheadMap := map[phase0.Root]bool{}
+ updatedCount := uint64(0)
+
+ for i := range builders {
+ var parentChecksum uint64
+ var parentBuilder *gloas.Builder
+ parentEpoch := phase0.Epoch(0)
+
+ aheadDiffIdx := 0
+ foundAhead := false
+ aheadEpoch := phase0.Epoch(math.MaxInt64)
+
+ cachedBuilder := cache.builderSetCache[i]
+ if cachedBuilder == nil {
+ cachedBuilder = &builderEntry{}
+ cache.builderSetCache[i] = cachedBuilder
+
+ cache.indexer.pubkeyCache.Add(builders[i].PublicKey, phase0.ValidatorIndex(uint64(i)|BuilderIndexFlag))
+ } else {
+ parentBuilder = cachedBuilder.finalBuilder
+ parentChecksum = cachedBuilder.finalChecksum
+ }
+
+ deleteKeys := []int{}
+
+ if !isFinalizedBuilderSet {
+ for diffkey, diff := range cachedBuilder.builderDiffs {
+ if diff.epoch < cutOffEpoch {
+ deleteKeys = append(deleteKeys, diffkey)
+ continue
+ }
+
+ if diff.epoch < epoch {
+ isParent, checkedParent := isParentMap[diff.dependentRoot]
+ if !checkedParent {
+ isParent = cache.indexer.blockCache.isCanonicalBlock(diff.dependentRoot, dependentRoot)
+ isParentMap[diff.dependentRoot] = isParent
+ }
+
+ if isParent && diff.epoch > parentEpoch {
+ parentBuilder = diff.builder
+ parentEpoch = diff.epoch
+ }
+ }
+
+ if diff.epoch > epoch {
+ isAhead, checkedAhead := isAheadMap[diff.dependentRoot]
+ if !checkedAhead {
+ isAhead = cache.indexer.blockCache.isCanonicalBlock(dependentRoot, diff.dependentRoot)
+ isAheadMap[diff.dependentRoot] = isAhead
+ }
+
+ if isAhead && diff.epoch < aheadEpoch {
+ aheadDiffIdx = diffkey
+ aheadEpoch = diff.epoch
+ foundAhead = true
+ }
+ }
+ }
+
+ if parentBuilder != nil {
+ parentChecksum = calculateBuilderChecksum(parentBuilder)
+ }
+ }
+
+ checksum := calculateBuilderChecksum(builders[i])
+ if checksum == parentChecksum {
+ continue
+ }
+
+ if isFinalizedBuilderSet {
+ cachedBuilder.finalBuilder = builders[i]
+ cachedBuilder.finalChecksum = checksum
+ cachedBuilder.statusFlags = GetBuilderStatusFlags(builders[i])
+ updatedCount++
+
+ activeData := &BuilderData{
+ WithdrawableEpoch: builders[i].WithdrawableEpoch,
+ }
+ if cache.isActiveBuilder(activeData) {
+ cachedBuilder.activeData = activeData
+ }
+ }
+
+ if foundAhead && cache.checkBuilderEqual(cachedBuilder.builderDiffs[aheadDiffIdx].builder, builders[i]) {
+ if isFinalizedBuilderSet {
+ deleteKeys = append(deleteKeys, aheadDiffIdx)
+ } else {
+ diff := cachedBuilder.builderDiffs[aheadDiffIdx]
+ diff.epoch = epoch
+ diff.dependentRoot = dependentRoot
+ cachedBuilder.builderDiffs[aheadDiffIdx] = diff
+ }
+ } else if isFinalizedBuilderSet {
+ } else if len(deleteKeys) == 0 {
+ cachedBuilder.builderDiffs = append(cachedBuilder.builderDiffs, &builderDiff{
+ epoch: epoch,
+ dependentRoot: dependentRoot,
+ builder: builders[i],
+ })
+ } else {
+ cachedBuilder.builderDiffs[deleteKeys[0]] = &builderDiff{
+ epoch: epoch,
+ dependentRoot: dependentRoot,
+ builder: builders[i],
+ }
+ deleteKeys = deleteKeys[1:]
+ }
+
+ if len(deleteKeys) > 0 {
+ lastIdx := len(cachedBuilder.builderDiffs) - 1
+ delLen := len(deleteKeys)
+ for delIdx := 0; delIdx < delLen; delIdx++ {
+ for delLen > 0 && deleteKeys[delLen-1] == lastIdx {
+ lastIdx--
+ delLen--
+ }
+ if delLen == 0 {
+ break
+ }
+ cachedBuilder.builderDiffs[deleteKeys[delIdx]] = cachedBuilder.builderDiffs[lastIdx]
+ lastIdx--
+ }
+
+ cachedBuilder.builderDiffs = cachedBuilder.builderDiffs[:lastIdx+1]
+ }
+ }
+
+ if updatedCount > 0 {
+ select {
+ case cache.triggerDbUpdate <- true:
+ default:
+ }
+ }
+
+ isFinalizedStr := ""
+ if isFinalizedBuilderSet {
+ isFinalizedStr = "finalized "
+ }
+ cache.indexer.logger.Infof("processed %vbuilder set update for epoch %d in %v", isFinalizedStr, epoch, time.Since(t1))
+}
+
+// checkBuilderEqual compares two builder states for equality
+func (cache *builderCache) checkBuilderEqual(builder1 *gloas.Builder, builder2 *gloas.Builder) bool {
+ if builder1 == nil && builder2 == nil {
+ return true
+ }
+ if builder1 == nil || builder2 == nil {
+ return false
+ }
+ return bytes.Equal(builder1.PublicKey[:], builder2.PublicKey[:]) &&
+ builder1.Version == builder2.Version &&
+ bytes.Equal(builder1.ExecutionAddress[:], builder2.ExecutionAddress[:]) &&
+ builder1.DepositEpoch == builder2.DepositEpoch &&
+ builder1.WithdrawableEpoch == builder2.WithdrawableEpoch
+}
+
+// GetBuilderStatusFlags calculates the status flags for a builder
+func GetBuilderStatusFlags(builder *gloas.Builder) uint16 {
+ flags := uint16(0)
+ if builder.WithdrawableEpoch != FarFutureEpoch {
+ flags |= BuilderStatusExited
+ }
+ return flags
+}
+
+// getBuilderSetSize returns the current number of builders in the builder set
+func (cache *builderCache) getBuilderSetSize() uint64 {
+ cache.cacheMutex.RLock()
+ defer cache.cacheMutex.RUnlock()
+
+ return uint64(len(cache.builderSetCache))
+}
+
+// getBuilderFlags returns the status flags for a specific builder
+func (cache *builderCache) getBuilderFlags(builderIndex gloas.BuilderIndex) uint16 {
+ cache.cacheMutex.RLock()
+ defer cache.cacheMutex.RUnlock()
+
+ if uint64(builderIndex) >= uint64(len(cache.builderSetCache)) || cache.builderSetCache[builderIndex] == nil {
+ return 0
+ }
+
+ return cache.builderSetCache[builderIndex].statusFlags
+}
+
+// setFinalizedEpoch updates the builder cache when a new epoch is finalized
+func (cache *builderCache) setFinalizedEpoch(epoch phase0.Epoch, nextEpochDependentRoot phase0.Root) {
+ cache.cacheMutex.Lock()
+ defer cache.cacheMutex.Unlock()
+
+ updatedCount := uint64(0)
+
+ for _, cachedBuilder := range cache.builderSetCache {
+ if cachedBuilder == nil {
+ continue
+ }
+
+ // Find the finalized builder state
+ for _, diff := range cachedBuilder.builderDiffs {
+ if diff.dependentRoot == nextEpochDependentRoot {
+ cachedBuilder.finalBuilder = diff.builder
+ cachedBuilder.finalChecksum = calculateBuilderChecksum(diff.builder)
+ cachedBuilder.statusFlags = GetBuilderStatusFlags(diff.builder)
+ updatedCount++
+
+ cachedBuilder.activeData = &BuilderData{
+ WithdrawableEpoch: diff.builder.WithdrawableEpoch,
+ }
+ break
+ }
+ }
+
+ // Clean up old diffs
+ newDiffs := make([]*builderDiff, 0)
+ for _, diff := range cachedBuilder.builderDiffs {
+ if diff.epoch > epoch {
+ newDiffs = append(newDiffs, diff)
+ }
+ }
+ cachedBuilder.builderDiffs = newDiffs
+
+ // Clear old active data
+ if cachedBuilder.activeData != nil {
+ if !cache.isActiveBuilder(cachedBuilder.activeData) {
+ cachedBuilder.activeData = nil
+ }
+ }
+ }
+
+ if updatedCount > 0 {
+ select {
+ case cache.triggerDbUpdate <- true:
+ default:
+ }
+ }
+}
+
+// BuilderSetStreamer is a callback for streaming builder data
+type BuilderSetStreamer func(index gloas.BuilderIndex, flags uint16, activeData *BuilderData, builder *gloas.Builder) error
+
+// streamBuilderSetForRoot streams the builder set for a given blockRoot
+func (cache *builderCache) streamBuilderSetForRoot(blockRoot phase0.Root, onlyActive bool, epoch *phase0.Epoch, cb BuilderSetStreamer) error {
+ cache.cacheMutex.RLock()
+ defer cache.cacheMutex.RUnlock()
+
+ isParentMap := map[phase0.Root]bool{}
+ isAheadMap := map[phase0.Root]bool{}
+
+ for index, cachedBuilder := range cache.builderSetCache {
+ if cachedBuilder == nil {
+ continue
+ }
+
+ latestBuilder := cachedBuilder.finalBuilder
+ builderData := cachedBuilder.activeData
+ builderEpoch := phase0.Epoch(0)
+
+ var aheadBuilder *gloas.Builder
+ aheadEpoch := phase0.Epoch(math.MaxInt64)
+
+ for _, diff := range cachedBuilder.builderDiffs {
+ isParent, checkedParent := isParentMap[diff.dependentRoot]
+ if !checkedParent {
+ isParent = cache.indexer.blockCache.isCanonicalBlock(diff.dependentRoot, blockRoot)
+ isParentMap[diff.dependentRoot] = isParent
+ }
+
+ if isParent && diff.epoch >= builderEpoch {
+ builderData = &BuilderData{
+ WithdrawableEpoch: diff.builder.WithdrawableEpoch,
+ }
+ builderEpoch = diff.epoch
+ latestBuilder = diff.builder
+ }
+
+ if !isParent && builderData == nil {
+ isAhead, checkedAhead := isAheadMap[diff.dependentRoot]
+ if !checkedAhead {
+ isAhead = cache.indexer.blockCache.isCanonicalBlock(blockRoot, diff.dependentRoot)
+ isAheadMap[diff.dependentRoot] = isAhead
+ }
+
+ if isAhead && diff.epoch < aheadEpoch {
+ aheadBuilder = diff.builder
+ aheadEpoch = diff.epoch
+ }
+ }
+ }
+
+ if builderData == nil && aheadBuilder != nil {
+ builderData = &BuilderData{
+ WithdrawableEpoch: aheadBuilder.WithdrawableEpoch,
+ }
+ latestBuilder = aheadBuilder
+ }
+
+ if onlyActive && (builderData == nil || (epoch != nil && builderData.WithdrawableEpoch <= *epoch)) {
+ continue
+ }
+
+ builderFlags := cachedBuilder.statusFlags
+ if latestBuilder != nil {
+ builderFlags = GetBuilderStatusFlags(latestBuilder)
+ }
+
+ err := cb(gloas.BuilderIndex(index), builderFlags, builderData, latestBuilder)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// UnwrapDbBuilder converts a dbtypes.Builder to a gloas.Builder
+func UnwrapDbBuilder(dbBuilder *dbtypes.Builder) *gloas.Builder {
+ builder := &gloas.Builder{
+ Version: dbBuilder.Version,
+ Balance: 0, // Balance not persisted
+ DepositEpoch: phase0.Epoch(db.ConvertInt64ToUint64(dbBuilder.DepositEpoch)),
+ WithdrawableEpoch: phase0.Epoch(db.ConvertInt64ToUint64(dbBuilder.WithdrawableEpoch)),
+ }
+ copy(builder.PublicKey[:], dbBuilder.Pubkey)
+ copy(builder.ExecutionAddress[:], dbBuilder.ExecutionAddress)
+ return builder
+}
+
+// isActiveBuilder determines if a builder is currently active
+func (cache *builderCache) isActiveBuilder(builder *BuilderData) bool {
+ currentEpoch := cache.indexer.consensusPool.GetChainState().CurrentEpoch()
+ cutOffEpoch := phase0.Epoch(0)
+ if currentEpoch > 10 {
+ cutOffEpoch = currentEpoch - 10
+ }
+
+ return builder.WithdrawableEpoch > cutOffEpoch
+}
+
+// getBuilderByIndex returns the builder by index for a given forkId
+func (cache *builderCache) getBuilderByIndex(index gloas.BuilderIndex, overrideForkId *ForkKey) *gloas.Builder {
+ canonicalHead := cache.indexer.GetCanonicalHead(overrideForkId)
+ if canonicalHead == nil {
+ return nil
+ }
+
+ return cache.getBuilderByIndexAndRoot(index, canonicalHead.Root)
+}
+
+// getBuilderByIndexAndRoot returns the builder by index for a given blockRoot
+func (cache *builderCache) getBuilderByIndexAndRoot(index gloas.BuilderIndex, blockRoot phase0.Root) *gloas.Builder {
+ cache.cacheMutex.RLock()
+ defer cache.cacheMutex.RUnlock()
+
+ if uint64(index) >= uint64(len(cache.builderSetCache)) {
+ return nil
+ }
+
+ cachedBuilder := cache.builderSetCache[index]
+ if cachedBuilder == nil {
+ return nil
+ }
+
+ builder := cachedBuilder.finalBuilder
+ builderEpoch := phase0.Epoch(0)
+
+ // Find the latest valid diff
+ for _, diff := range cachedBuilder.builderDiffs {
+ if cache.indexer.blockCache.isCanonicalBlock(diff.dependentRoot, blockRoot) && diff.epoch >= builderEpoch {
+ builder = diff.builder
+ builderEpoch = diff.epoch
+ }
+ }
+
+ // Fallback to db if builder is not found in cache
+ if builder == nil {
+ if dbBuilder := db.GetActiveBuilderByIndex(uint64(index)); dbBuilder != nil {
+ builder = UnwrapDbBuilder(dbBuilder)
+ }
+ } else {
+ // Return a copy
+ builder = &gloas.Builder{
+ PublicKey: builder.PublicKey,
+ Version: builder.Version,
+ ExecutionAddress: builder.ExecutionAddress,
+ Balance: builder.Balance,
+ DepositEpoch: builder.DepositEpoch,
+ WithdrawableEpoch: builder.WithdrawableEpoch,
+ }
+ }
+
+ return builder
+}
+
+// calculateBuilderChecksum generates a CRC64 checksum of all builder fields (except balance)
+func calculateBuilderChecksum(b *gloas.Builder) uint64 {
+ if b == nil {
+ return 0
+ }
+
+ data := make([]byte, 0, 80)
+ data = append(data, b.PublicKey[:]...)
+ data = append(data, b.Version)
+ data = append(data, b.ExecutionAddress[:]...)
+ data = append(data, uint64ToBytes(uint64(b.DepositEpoch))...)
+ data = append(data, uint64ToBytes(uint64(b.WithdrawableEpoch))...)
+
+ return crc64.Checksum(data, crc64Table)
+}
+
+// prepopulateFromDB pre-populates the builder set cache from the database
+func (cache *builderCache) prepopulateFromDB() (uint64, error) {
+ cache.cacheMutex.Lock()
+ defer cache.cacheMutex.Unlock()
+
+ maxIndex, err := db.GetMaxBuilderIndex()
+ if err != nil {
+ return 0, fmt.Errorf("error getting max builder index: %w", err)
+ }
+
+ if maxIndex == 0 {
+ return 0, nil
+ }
+
+ cache.builderSetCache = make([]*builderEntry, maxIndex+1, maxIndex+1+1000)
+
+ restoreCount := uint64(0)
+
+ batchSize := uint64(10000)
+ for start := uint64(0); start <= maxIndex; start += batchSize {
+ end := min(start+batchSize, maxIndex)
+
+ builders := db.GetBuilderRange(start, end)
+ for _, dbBuilder := range builders {
+ if dbBuilder.Superseded {
+ continue
+ }
+
+ builder := UnwrapDbBuilder(dbBuilder)
+ builderEntry := &builderEntry{
+ finalChecksum: calculateBuilderChecksum(builder),
+ }
+ builderData := &BuilderData{
+ WithdrawableEpoch: phase0.Epoch(db.ConvertInt64ToUint64(dbBuilder.WithdrawableEpoch)),
+ }
+ if cache.isActiveBuilder(builderData) {
+ builderEntry.activeData = builderData
+ }
+ builderEntry.statusFlags = GetBuilderStatusFlags(builder)
+
+ cache.builderSetCache[dbBuilder.BuilderIndex] = builderEntry
+
+ cache.indexer.pubkeyCache.Add(builder.PublicKey, phase0.ValidatorIndex(dbBuilder.BuilderIndex|BuilderIndexFlag))
+
+ restoreCount++
+ }
+ }
+
+ return restoreCount, nil
+}
+
+// runPersistLoop handles the background persistence of builder states to the database
+func (cache *builderCache) runPersistLoop() {
+ defer func() {
+ if err := recover(); err != nil {
+ cache.indexer.logger.WithError(err.(error)).Errorf(
+ "uncaught panic in indexer.beacon.builderCache.runPersistLoop subroutine: %v, stack: %v",
+ err, string(debug.Stack()))
+ time.Sleep(10 * time.Second)
+
+ go cache.runPersistLoop()
+ }
+ }()
+
+ for range cache.triggerDbUpdate {
+ time.Sleep(2 * time.Second)
+ err := db.RunDBTransaction(func(tx *sqlx.Tx) error {
+ hasMore, err := cache.persistBuilders(tx)
+ if hasMore {
+ select {
+ case cache.triggerDbUpdate <- true:
+ default:
+ }
+ }
+ return err
+ })
+ if err != nil {
+ cache.indexer.logger.WithError(err).Errorf("error persisting builders")
+ }
+ }
+}
+
+// persistBuilders writes a batch of builder states to the database
+func (cache *builderCache) persistBuilders(tx *sqlx.Tx) (bool, error) {
+ cache.cacheMutex.RLock()
+ defer cache.cacheMutex.RUnlock()
+
+ const batchSize = 1000
+ const maxPerRun = 10000
+
+ batch := make([]*dbtypes.Builder, 0, batchSize)
+ batchIndices := make([]uint64, 0, batchSize)
+ supersededPubkeys := make([][]byte, 0)
+ persisted := 0
+ firstIndex := uint64(0)
+ lastIndex := uint64(0)
+ hasMore := false
+
+ for index, entry := range cache.builderSetCache {
+ if entry == nil || entry.finalBuilder == nil {
+ continue
+ }
+
+ if persisted == 0 && len(batch) == 0 {
+ firstIndex = uint64(index)
+ }
+ lastIndex = uint64(index)
+
+ dbBuilder := &dbtypes.Builder{
+ Pubkey: entry.finalBuilder.PublicKey[:],
+ BuilderIndex: uint64(index),
+ Version: entry.finalBuilder.Version,
+ ExecutionAddress: entry.finalBuilder.ExecutionAddress[:],
+ DepositEpoch: db.ConvertUint64ToInt64(uint64(entry.finalBuilder.DepositEpoch)),
+ WithdrawableEpoch: db.ConvertUint64ToInt64(uint64(entry.finalBuilder.WithdrawableEpoch)),
+ Superseded: false,
+ }
+
+ batch = append(batch, dbBuilder)
+ batchIndices = append(batchIndices, uint64(index))
+
+ if len(batch) >= batchSize {
+ superseded, err := cache.persistBuilderBatch(tx, batch, batchIndices)
+ if err != nil {
+ return false, err
+ }
+ supersededPubkeys = append(supersededPubkeys, superseded...)
+
+ // Clear finalBuilder for persisted entries
+ for _, idx := range batchIndices {
+ if cache.builderSetCache[idx] != nil {
+ cache.builderSetCache[idx].finalBuilder = nil
+ }
+ }
+
+ batch = batch[:0]
+ batchIndices = batchIndices[:0]
+ persisted += batchSize
+
+ if persisted >= maxPerRun {
+ hasMore = true
+ break
+ }
+ }
+ }
+
+ // Persist remaining batch
+ if len(batch) > 0 {
+ superseded, err := cache.persistBuilderBatch(tx, batch, batchIndices)
+ if err != nil {
+ return false, err
+ }
+ supersededPubkeys = append(supersededPubkeys, superseded...)
+
+ // Clear finalBuilder for persisted entries
+ for _, idx := range batchIndices {
+ if cache.builderSetCache[idx] != nil {
+ cache.builderSetCache[idx].finalBuilder = nil
+ }
+ }
+
+ persisted += len(batch)
+ }
+
+ // Batch mark superseded builders
+ if len(supersededPubkeys) > 0 {
+ err := db.SetBuildersSuperseded(supersededPubkeys, tx)
+ if err != nil {
+ return false, fmt.Errorf("error marking builders as superseded: %w", err)
+ }
+ }
+
+ if persisted > 0 || len(supersededPubkeys) > 0 {
+ cache.indexer.logger.Infof("persisted %d builders to db [%d-%d], marked %d as superseded",
+ persisted, firstIndex, lastIndex, len(supersededPubkeys))
+ }
+
+ return hasMore, nil
+}
+
+// persistBuilderBatch persists a batch of builders and returns pubkeys that were superseded
+func (cache *builderCache) persistBuilderBatch(tx *sqlx.Tx, batch []*dbtypes.Builder, indices []uint64) ([][]byte, error) {
+ if len(batch) == 0 {
+ return nil, nil
+ }
+
+ // Get range for this batch
+ minIndex := indices[0]
+ maxIndex := indices[0]
+ for _, idx := range indices[1:] {
+ if idx < minIndex {
+ minIndex = idx
+ }
+ if idx > maxIndex {
+ maxIndex = idx
+ }
+ }
+
+ // Fetch existing builders in this batch's range
+ existingBuilders := db.GetBuilderRange(minIndex, maxIndex)
+ existingByIndex := make(map[uint64]*dbtypes.Builder, len(existingBuilders))
+ for _, b := range existingBuilders {
+ existingByIndex[b.BuilderIndex] = b
+ }
+
+ // Find superseded pubkeys
+ supersededPubkeys := make([][]byte, 0)
+ for i, dbBuilder := range batch {
+ if existing, ok := existingByIndex[indices[i]]; ok {
+ if !bytes.Equal(existing.Pubkey, dbBuilder.Pubkey) {
+ supersededPubkeys = append(supersededPubkeys, existing.Pubkey)
+ }
+ }
+ }
+
+ // Insert batch
+ err := db.InsertBuilderBatch(batch, tx)
+ if err != nil {
+ return nil, fmt.Errorf("error persisting builder batch: %w", err)
+ }
+
+ return supersededPubkeys, nil
+}
diff --git a/indexer/beacon/client.go b/indexer/beacon/client.go
index 332bcb94..895e8c6d 100644
--- a/indexer/beacon/client.go
+++ b/indexer/beacon/client.go
@@ -10,6 +10,7 @@ import (
v1 "github.com/attestantio/go-eth2-client/api/v1"
"github.com/attestantio/go-eth2-client/spec"
+ "github.com/attestantio/go-eth2-client/spec/gloas"
"github.com/attestantio/go-eth2-client/spec/phase0"
"github.com/ethereum/go-ethereum/common"
"github.com/ethpandaops/dora/clients/consensus"
@@ -32,8 +33,10 @@ type Client struct {
archive bool
skipValidators bool
- blockSubscription *utils.Subscription[*v1.BlockEvent]
- headSubscription *utils.Subscription[*v1.HeadEvent]
+ blockSubscription *utils.Subscription[*v1.BlockEvent]
+ headSubscription *utils.Subscription[*v1.HeadEvent]
+ executionPayloadSubscription *utils.Subscription[*v1.ExecutionPayloadAvailableEvent]
+ executionPayloadBidSubscription *utils.Subscription[*gloas.SignedExecutionPayloadBid]
headRoot phase0.Root
}
@@ -81,6 +84,8 @@ func (c *Client) startIndexing() {
// blocking block subscription with a buffer to ensure no blocks are missed
c.blockSubscription = c.client.SubscribeBlockEvent(100, true)
c.headSubscription = c.client.SubscribeHeadEvent(100, true)
+ c.executionPayloadSubscription = c.client.SubscribeExecutionPayloadAvailableEvent(100, true)
+ c.executionPayloadBidSubscription = c.client.SubscribeExecutionPayloadBidEvent(100, true)
go c.startClientLoop()
}
@@ -145,7 +150,7 @@ func (c *Client) runClientLoop() error {
c.headRoot = headRoot
- headBlock, isNew, processingTimes, err := c.processBlock(headSlot, headRoot, nil, false)
+ headBlock, isNew, processingTimes, err := c.processBlock(headSlot, headRoot, nil, false, true)
if err != nil {
return fmt.Errorf("failed processing head block: %v", err)
}
@@ -179,6 +184,16 @@ func (c *Client) runClientLoop() error {
if err != nil {
c.logger.Errorf("failed processing head %v (%v): %v", headEvent.Slot, headEvent.Block.String(), err)
}
+ case executionPayloadEvent := <-c.executionPayloadSubscription.Channel():
+ err := c.processExecutionPayloadAvailableEvent(executionPayloadEvent)
+ if err != nil {
+ c.logger.Errorf("failed processing execution payload %v (%v): %v", executionPayloadEvent.Slot, executionPayloadEvent.BlockRoot.String(), err)
+ }
+ case executionPayloadBidEvent := <-c.executionPayloadBidSubscription.Channel():
+ err := c.processExecutionPayloadBidEvent(executionPayloadBidEvent)
+ if err != nil {
+ c.logger.Errorf("failed processing execution payload bid %v (%v): %v", executionPayloadBidEvent.Message.Slot, executionPayloadBidEvent.Message.ParentBlockRoot.String(), err)
+ }
}
}
@@ -291,7 +306,7 @@ func (c *Client) processHeadEvent(headEvent *v1.HeadEvent) error {
// processStreamBlock processes a block received from the stream (either via block or head events).
func (c *Client) processStreamBlock(slot phase0.Slot, root phase0.Root) (*Block, error) {
- block, isNew, processingTimes, err := c.processBlock(slot, root, nil, true)
+ block, isNew, processingTimes, err := c.processBlock(slot, root, nil, true, false)
if err != nil {
return nil, err
}
@@ -345,7 +360,7 @@ func (c *Client) processReorg(oldHead *Block, newHead *Block) error {
}
// processBlock processes a block (from stream & polling).
-func (c *Client) processBlock(slot phase0.Slot, root phase0.Root, header *phase0.SignedBeaconBlockHeader, trackRecvDelay bool) (block *Block, isNew bool, processingTimes []time.Duration, err error) {
+func (c *Client) processBlock(slot phase0.Slot, root phase0.Root, header *phase0.SignedBeaconBlockHeader, trackRecvDelay bool, loadPayload bool) (block *Block, isNew bool, processingTimes []time.Duration, err error) {
chainState := c.client.GetPool().GetChainState()
finalizedSlot := chainState.GetFinalizedSlot()
processingTimes = make([]time.Duration, 3)
@@ -403,6 +418,25 @@ func (c *Client) processBlock(slot phase0.Slot, root phase0.Root, header *phase0
return
}
+ if loadPayload {
+ newPayload, _ := block.EnsureExecutionPayload(func() (*gloas.SignedExecutionPayloadEnvelope, error) {
+ t1 := time.Now()
+ defer func() {
+ processingTimes[0] += time.Since(t1)
+ }()
+
+ return LoadExecutionPayload(c.getContext(), c, root)
+ })
+
+ if !isNew && newPayload {
+ // write payload to db
+ err = c.persistExecutionPayload(block)
+ if err != nil {
+ return
+ }
+ }
+ }
+
if slot >= finalizedSlot && isNew {
c.indexer.blockCache.addBlockToParentMap(block)
c.indexer.blockCache.addBlockToExecBlockMap(block)
@@ -526,7 +560,7 @@ func (c *Client) backfillParentBlocks(headBlock *Block) error {
if parentBlock == nil {
var err error
- parentBlock, isNewBlock, processingTimes, err = c.processBlock(parentSlot, parentRoot, parentHead, false)
+ parentBlock, isNewBlock, processingTimes, err = c.processBlock(parentSlot, parentRoot, parentHead, false, true)
if err != nil {
return fmt.Errorf("could not process block [0x%x]: %v", parentRoot, err)
}
@@ -553,3 +587,87 @@ func (c *Client) backfillParentBlocks(headBlock *Block) error {
}
return nil
}
+
+// processExecutionPayloadEvent processes an execution payload event from the event stream.
+func (c *Client) processExecutionPayloadAvailableEvent(executionPayloadEvent *v1.ExecutionPayloadAvailableEvent) error {
+ if c.client.GetStatus() != consensus.ClientStatusOnline && c.client.GetStatus() != consensus.ClientStatusOptimistic {
+ // client is not ready, skip
+ return nil
+ }
+
+ chainState := c.client.GetPool().GetChainState()
+ finalizedSlot := chainState.GetFinalizedSlot()
+
+ var block *Block
+
+ if executionPayloadEvent.Slot < finalizedSlot {
+ // block is in finalized epoch
+ // known block or a new orphaned block
+
+ // don't add to cache, process this block right after loading the details
+ block = newBlock(c.indexer.dynSsz, executionPayloadEvent.BlockRoot, executionPayloadEvent.Slot, 0)
+
+ dbBlockHead := db.GetBlockHeadByRoot(executionPayloadEvent.BlockRoot[:])
+ if dbBlockHead != nil {
+ block.isInFinalizedDb = true
+ block.parentRoot = (*phase0.Root)(dbBlockHead.ParentRoot)
+ }
+
+ } else {
+ block = c.indexer.blockCache.getBlockByRoot(executionPayloadEvent.BlockRoot)
+ }
+
+ if block == nil {
+ c.logger.Warnf("execution payload event for unknown block %v:%v [0x%x]", chainState.EpochOfSlot(executionPayloadEvent.Slot), executionPayloadEvent.Slot, executionPayloadEvent.BlockRoot)
+ return nil
+ }
+
+ newPayload, err := block.EnsureExecutionPayload(func() (*gloas.SignedExecutionPayloadEnvelope, error) {
+ return LoadExecutionPayload(c.getContext(), c, executionPayloadEvent.BlockRoot)
+ })
+ if err != nil {
+ return err
+ }
+
+ if newPayload {
+ // write payload to db
+ err = c.persistExecutionPayload(block)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (c *Client) persistExecutionPayload(block *Block) error {
+ payloadVer, payloadSSZ, err := MarshalVersionedSignedExecutionPayloadEnvelopeSSZ(block.dynSsz, block.executionPayload, c.indexer.blockCompression)
+ if err != nil {
+ return fmt.Errorf("marshal execution payload ssz failed: %v", err)
+ }
+
+ return db.RunDBTransaction(func(tx *sqlx.Tx) error {
+ err := db.UpdateUnfinalizedBlockPayload(block.Root[:], payloadVer, payloadSSZ, tx)
+ if err != nil {
+ return err
+ }
+
+ return nil
+ })
+}
+
+func (c *Client) processExecutionPayloadBidEvent(executionPayloadBidEvent *gloas.SignedExecutionPayloadBid) error {
+ bid := &dbtypes.BlockBid{
+ ParentRoot: executionPayloadBidEvent.Message.ParentBlockRoot[:],
+ ParentHash: executionPayloadBidEvent.Message.ParentBlockHash[:],
+ BlockHash: executionPayloadBidEvent.Message.BlockHash[:],
+ FeeRecipient: executionPayloadBidEvent.Message.FeeRecipient[:],
+ GasLimit: uint64(executionPayloadBidEvent.Message.GasLimit),
+ BuilderIndex: uint64(executionPayloadBidEvent.Message.BuilderIndex),
+ Slot: uint64(executionPayloadBidEvent.Message.Slot),
+ Value: uint64(executionPayloadBidEvent.Message.Value),
+ ElPayment: uint64(executionPayloadBidEvent.Message.ExecutionPayment),
+ }
+ c.indexer.blockBidCache.AddBid(bid)
+ return nil
+}
diff --git a/indexer/beacon/epochstate.go b/indexer/beacon/epochstate.go
index 4d696f48..db65ec13 100644
--- a/indexer/beacon/epochstate.go
+++ b/indexer/beacon/epochstate.go
@@ -25,6 +25,7 @@ type epochState struct {
stateSlot phase0.Slot
validatorBalances []phase0.Gwei
+ builderBalances []phase0.Gwei
randaoMixes []phase0.Root
depositIndex uint64
syncCommittee []phase0.ValidatorIndex
@@ -161,6 +162,20 @@ func (s *epochState) processState(state *spec.VersionedBeaconState, cache *epoch
cache.indexer.validatorCache.updateValidatorSet(slot, s.slotRoot, validatorList)
}
+ // Process builder set for Gloas
+ if state.Version >= spec.DataVersionGloas && state.Gloas != nil {
+ if cache != nil {
+ cache.indexer.builderCache.updateBuilderSet(slot, s.slotRoot, state.Gloas.Builders)
+ }
+
+ // Extract builder balances
+ builderBalances := make([]phase0.Gwei, len(state.Gloas.Builders))
+ for i, builder := range state.Gloas.Builders {
+ builderBalances[i] = builder.Balance
+ }
+ s.builderBalances = builderBalances
+ }
+
validatorPubkeyMap := make(map[phase0.BLSPubKey]phase0.ValidatorIndex)
for i, v := range validatorList {
validatorPubkeyMap[v.PublicKey] = phase0.ValidatorIndex(i)
diff --git a/indexer/beacon/finalization.go b/indexer/beacon/finalization.go
index 266e0831..1a360453 100644
--- a/indexer/beacon/finalization.go
+++ b/indexer/beacon/finalization.go
@@ -10,6 +10,7 @@ import (
v1 "github.com/attestantio/go-eth2-client/api/v1"
"github.com/attestantio/go-eth2-client/spec"
+ "github.com/attestantio/go-eth2-client/spec/gloas"
"github.com/attestantio/go-eth2-client/spec/phase0"
"github.com/ethpandaops/dora/blockdb"
"github.com/ethpandaops/dora/db"
@@ -150,6 +151,15 @@ func (indexer *Indexer) finalizeEpoch(epoch phase0.Epoch, justifiedRoot phase0.R
if block.block == nil {
return true, fmt.Errorf("missing block body for canonical block %v (%v)", block.Slot, block.Root.String())
}
+
+ if chainState.IsEip7732Enabled(chainState.EpochOfSlot(block.Slot)) {
+ if _, err := block.EnsureExecutionPayload(func() (*gloas.SignedExecutionPayloadEnvelope, error) {
+ return LoadExecutionPayload(client.getContext(), client, block.Root)
+ }); err != nil {
+ client.logger.Warnf("failed loading finalized execution payload %v (%v): %v", block.Slot, block.Root.String(), err)
+ }
+ }
+
canonicalBlocks = append(canonicalBlocks, block)
} else {
if block.block == nil {
@@ -532,6 +542,7 @@ func (indexer *Indexer) finalizeEpoch(epoch phase0.Epoch, justifiedRoot phase0.R
// update validator cache
if len(canonicalBlocks) > 0 {
indexer.validatorCache.setFinalizedEpoch(epoch, canonicalBlocks[len(canonicalBlocks)-1].Root)
+ indexer.builderCache.setFinalizedEpoch(epoch, canonicalBlocks[len(canonicalBlocks)-1].Root)
}
// clean fork cache
diff --git a/indexer/beacon/indexer.go b/indexer/beacon/indexer.go
index 2dcd15c2..89141c95 100644
--- a/indexer/beacon/indexer.go
+++ b/indexer/beacon/indexer.go
@@ -46,6 +46,8 @@ type Indexer struct {
pubkeyCache *pubkeyCache
validatorCache *validatorCache
validatorActivity *validatorActivityCache
+ blockBidCache *blockBidCache
+ builderCache *builderCache
// indexer state
clients []*Client
@@ -116,6 +118,8 @@ func NewIndexer(logger logrus.FieldLogger, consensusPool *consensus.Pool) *Index
indexer.pubkeyCache = newPubkeyCache(indexer, utils.Config.Indexer.PubkeyCachePath)
indexer.validatorCache = newValidatorCache(indexer)
indexer.validatorActivity = newValidatorActivityCache(indexer)
+ indexer.blockBidCache = newBlockBidCache(indexer)
+ indexer.builderCache = newBuilderCache(indexer)
indexer.dbWriter = newDbWriter(indexer)
badChainRoots := utils.Config.Indexer.BadChainRoots
@@ -273,6 +277,14 @@ func (indexer *Indexer) StartIndexer() {
indexer.logger.Infof("restored %v validators from DB (%.3f sec)", validatorCount, time.Since(t1).Seconds())
}
+ // restore finalized builder set from db
+ t1 = time.Now()
+ if builderCount, err := indexer.builderCache.prepopulateFromDB(); err != nil {
+ indexer.logger.WithError(err).Errorf("failed loading builder set")
+ } else if builderCount > 0 {
+ indexer.logger.Infof("restored %v builders from DB (%.3f sec)", builderCount, time.Since(t1).Seconds())
+ }
+
// restore unfinalized epoch stats from db
restoredEpochStats := 0
t1 = time.Now()
@@ -338,6 +350,7 @@ func (indexer *Indexer) StartIndexer() {
// restore unfinalized blocks from db
restoredBlockCount := 0
restoredBodyCount := 0
+ restoredPayloadCount := 0
t1 = time.Now()
err = db.StreamUnfinalizedBlocks(uint64(finalizedSlot), func(dbBlock *dbtypes.UnfinalizedBlock) {
block, _ := indexer.blockCache.createOrGetBlock(phase0.Root(dbBlock.Root), phase0.Slot(dbBlock.Slot))
@@ -375,10 +388,23 @@ func (indexer *Indexer) StartIndexer() {
block.SetBlock(blockBody)
restoredBodyCount++
} else {
- block.setBlockIndex(blockBody)
+ block.setBlockIndex(blockBody, nil)
block.isInFinalizedDb = true
}
+ if len(dbBlock.PayloadSSZ) > 0 {
+ blockPayload, err := UnmarshalVersionedSignedExecutionPayloadEnvelopeSSZ(indexer.dynSsz, dbBlock.PayloadVer, dbBlock.PayloadSSZ)
+ if err != nil {
+ indexer.logger.Warnf("could not restore unfinalized block payload %v [%x] from db: %v", dbBlock.Slot, dbBlock.Root, err)
+ } else if block.processingStatus == 0 {
+ block.SetExecutionPayload(blockPayload)
+ restoredPayloadCount++
+ } else {
+ block.setBlockIndex(blockBody, blockPayload)
+ block.hasExecutionPayload = true
+ }
+ }
+
indexer.blockCache.addBlockToExecBlockMap(block)
blockFork := indexer.forkCache.getForkById(block.forkId)
@@ -402,6 +428,9 @@ func (indexer *Indexer) StartIndexer() {
indexer.logger.Infof("restored %v unfinalized blocks from DB (%v with bodies, %.3f sec)", restoredBlockCount, restoredBodyCount, time.Since(t1).Seconds())
}
+ // restore block bids from db
+ indexer.blockBidCache.loadFromDB(chainState.CurrentSlot())
+
// start indexing for all clients
for _, client := range indexer.clients {
client.startIndexing()
@@ -436,6 +465,11 @@ func (indexer *Indexer) StartIndexer() {
}
func (indexer *Indexer) StopIndexer() {
+ // flush block bids to db before shutdown
+ if err := indexer.blockBidCache.flushAll(); err != nil {
+ indexer.logger.WithError(err).Errorf("error flushing block bids on shutdown")
+ }
+
indexer.pubkeyCache.Close()
}
@@ -487,6 +521,11 @@ func (indexer *Indexer) runIndexerLoop() {
slotIndex := chainState.SlotToSlotIndex(phase0.Slot(slotEvent.Number()))
slotProgress := uint8(100 / chainState.GetSpecs().SlotsPerEpoch * uint64(slotIndex))
+ // flush old block bids if needed
+ if err := indexer.blockBidCache.checkAndFlush(); err != nil {
+ indexer.logger.WithError(err).Errorf("failed flushing block bids")
+ }
+
// precalc next canonical duties on epoch start
if epoch >= indexer.lastPrecalcRunEpoch {
err := indexer.precalcNextEpochStats(epoch)
diff --git a/indexer/beacon/indexer_getter.go b/indexer/beacon/indexer_getter.go
index 883fc288..678664f1 100644
--- a/indexer/beacon/indexer_getter.go
+++ b/indexer/beacon/indexer_getter.go
@@ -12,6 +12,7 @@ import (
"github.com/attestantio/go-eth2-client/spec/phase0"
"github.com/ethpandaops/dora/clients/consensus"
"github.com/ethpandaops/dora/db"
+ "github.com/ethpandaops/dora/dbtypes"
dynssz "github.com/pk910/dynamic-ssz"
)
@@ -222,6 +223,14 @@ func (indexer *Indexer) GetOrphanedBlockByRoot(blockRoot phase0.Root) (*Block, e
block.SetHeader(header)
block.SetBlock(blockBody)
+ if len(orphanedBlock.PayloadSSZ) > 0 {
+ payload, err := UnmarshalVersionedSignedExecutionPayloadEnvelopeSSZ(indexer.dynSsz, orphanedBlock.PayloadVer, orphanedBlock.PayloadSSZ)
+ if err != nil {
+ return nil, fmt.Errorf("could not restore orphaned block payload %v [%x] from db: %v", header.Message.Slot, orphanedBlock.Root, err)
+ }
+ block.SetExecutionPayload(payload)
+ }
+
return block, nil
}
@@ -499,3 +508,66 @@ func (indexer *Indexer) GetFullValidatorByIndex(validatorIndex phase0.ValidatorI
return validatorData
}
+
+// GetBlockBids returns the execution payload bids for a given parent block root.
+// It first checks the in-memory cache, then falls back to the database.
+func (indexer *Indexer) GetBlockBids(parentBlockRoot phase0.Root) []*dbtypes.BlockBid {
+ // First check the in-memory cache
+ bids := indexer.blockBidCache.GetBidsForBlockRoot(parentBlockRoot)
+ if len(bids) > 0 {
+ return bids
+ }
+
+ // Fall back to database
+ return db.GetBidsForBlockRoot(parentBlockRoot[:])
+}
+
+// StreamActiveBuilderDataForRoot streams the available builder set data for a given blockRoot.
+func (indexer *Indexer) StreamActiveBuilderDataForRoot(blockRoot phase0.Root, activeOnly bool, epoch *phase0.Epoch, cb BuilderSetStreamer) error {
+ return indexer.builderCache.streamBuilderSetForRoot(blockRoot, activeOnly, epoch, cb)
+}
+
+// GetBuilderSetSize returns the size of the builder set cache.
+func (indexer *Indexer) GetBuilderSetSize() uint64 {
+ return indexer.builderCache.getBuilderSetSize()
+}
+
+// GetRecentBuilderBalances returns the most recent builder balances for the given fork.
+func (indexer *Indexer) GetRecentBuilderBalances(overrideForkId *ForkKey) []phase0.Gwei {
+ chainState := indexer.consensusPool.GetChainState()
+
+ canonicalHead := indexer.GetCanonicalHead(overrideForkId)
+ if canonicalHead == nil {
+ return nil
+ }
+
+ headEpoch := chainState.EpochOfSlot(canonicalHead.Slot)
+
+ var epochStats *EpochStats
+ for {
+ cEpoch := chainState.EpochOfSlot(canonicalHead.Slot)
+ if headEpoch-cEpoch > 2 {
+ return nil
+ }
+
+ dependentBlock := indexer.blockCache.getDependentBlock(chainState, canonicalHead, nil)
+ if dependentBlock == nil {
+ return nil
+ }
+ canonicalHead = dependentBlock
+
+ stats := indexer.epochCache.getEpochStats(cEpoch, dependentBlock.Root)
+ if cEpoch > 0 && (stats == nil || stats.dependentState == nil || stats.dependentState.loadingStatus != 2) {
+ continue // retry previous state
+ }
+
+ epochStats = stats
+ break
+ }
+
+ if epochStats == nil || epochStats.dependentState == nil {
+ return nil
+ }
+
+ return epochStats.dependentState.builderBalances
+}
diff --git a/indexer/beacon/pruning.go b/indexer/beacon/pruning.go
index f2d640d4..94896ba7 100644
--- a/indexer/beacon/pruning.go
+++ b/indexer/beacon/pruning.go
@@ -258,8 +258,9 @@ func (indexer *Indexer) processEpochPruning(pruneEpoch phase0.Epoch) (uint64, ui
for _, block := range pruningBlocks {
block.isInFinalizedDb = true
block.processingStatus = dbtypes.UnfinalizedBlockStatusPruned
- block.setBlockIndex(block.block)
+ block.setBlockIndex(block.block, block.executionPayload)
block.block = nil
+ block.executionPayload = nil
block.blockResults = nil
}
diff --git a/indexer/beacon/requests.go b/indexer/beacon/requests.go
index df6ec6fb..f8182a79 100644
--- a/indexer/beacon/requests.go
+++ b/indexer/beacon/requests.go
@@ -6,6 +6,7 @@ import (
"time"
"github.com/attestantio/go-eth2-client/spec"
+ "github.com/attestantio/go-eth2-client/spec/gloas"
"github.com/attestantio/go-eth2-client/spec/phase0"
)
@@ -18,6 +19,9 @@ const beaconBodyRequestTimeout time.Duration = 30 * time.Second
// BeaconStateRequestTimeout is the timeout duration for beacon state requests.
const beaconStateRequestTimeout time.Duration = 600 * time.Second
+// ExecutionPayloadRequestTimeout is the timeout duration for execution payload requests.
+const executionPayloadRequestTimeout time.Duration = 30 * time.Second
+
const beaconStateRetryCount = 10
// LoadBeaconHeader loads the block header from the client.
@@ -75,3 +79,16 @@ func LoadBeaconState(ctx context.Context, client *Client, root phase0.Root) (*sp
return resState, nil
}
+
+// LoadExecutionPayload loads the execution payload from the client.
+func LoadExecutionPayload(ctx context.Context, client *Client, root phase0.Root) (*gloas.SignedExecutionPayloadEnvelope, error) {
+ ctx, cancel := context.WithTimeout(ctx, executionPayloadRequestTimeout)
+ defer cancel()
+
+ payload, err := client.client.GetRPCClient().GetExecutionPayloadByBlockroot(ctx, root)
+ if err != nil {
+ return nil, err
+ }
+
+ return payload, nil
+}
diff --git a/indexer/beacon/synchronizer.go b/indexer/beacon/synchronizer.go
index 92a17433..3336b171 100644
--- a/indexer/beacon/synchronizer.go
+++ b/indexer/beacon/synchronizer.go
@@ -10,6 +10,7 @@ import (
"time"
"github.com/attestantio/go-eth2-client/spec"
+ "github.com/attestantio/go-eth2-client/spec/gloas"
"github.com/attestantio/go-eth2-client/spec/phase0"
"github.com/ethpandaops/dora/blockdb"
"github.com/ethpandaops/dora/clients/consensus"
@@ -264,11 +265,17 @@ func (s *synchronizer) loadBlockHeader(client *Client, slot phase0.Slot) (*phase
}
func (s *synchronizer) loadBlockBody(client *Client, root phase0.Root) (*spec.VersionedSignedBeaconBlock, error) {
- ctx, cancel := context.WithTimeout(s.syncCtx, beaconHeaderRequestTimeout)
+ ctx, cancel := context.WithTimeout(s.syncCtx, beaconBodyRequestTimeout)
defer cancel()
return LoadBeaconBlock(ctx, client, root)
}
+func (s *synchronizer) loadBlockPayload(client *Client, root phase0.Root) (*gloas.SignedExecutionPayloadEnvelope, error) {
+ ctx, cancel := context.WithTimeout(s.syncCtx, executionPayloadRequestTimeout)
+ defer cancel()
+ return LoadExecutionPayload(ctx, client, root)
+}
+
func (s *synchronizer) syncEpoch(syncEpoch phase0.Epoch, client *Client, lastTry bool) (bool, error) {
if !utils.Config.Indexer.ResyncForceUpdate && db.IsEpochSynchronized(uint64(syncEpoch)) {
return true, nil
@@ -327,6 +334,17 @@ func (s *synchronizer) syncEpoch(syncEpoch phase0.Epoch, client *Client, lastTry
block.SetBlock(blockBody)
}
+ if slot > 0 && chainState.IsEip7732Enabled(chainState.EpochOfSlot(slot)) {
+ blockPayload, err := s.loadBlockPayload(client, phase0.Root(blockRoot))
+ if err != nil && !lastTry {
+ return false, fmt.Errorf("error fetching slot %v execution payload: %v", slot, err)
+ }
+
+ if blockPayload != nil {
+ block.SetExecutionPayload(blockPayload)
+ }
+ }
+
s.cachedBlocks[slot] = block
}
diff --git a/indexer/beacon/writedb.go b/indexer/beacon/writedb.go
index 19a1fc4d..d5e9f608 100644
--- a/indexer/beacon/writedb.go
+++ b/indexer/beacon/writedb.go
@@ -7,6 +7,7 @@ import (
"github.com/attestantio/go-eth2-client/spec"
"github.com/attestantio/go-eth2-client/spec/bellatrix"
"github.com/attestantio/go-eth2-client/spec/capella"
+ "github.com/attestantio/go-eth2-client/spec/deneb"
"github.com/attestantio/go-eth2-client/spec/electra"
"github.com/attestantio/go-eth2-client/spec/phase0"
"github.com/ethpandaops/dora/clients/consensus"
@@ -237,6 +238,8 @@ func (dbw *dbWriter) buildDbBlock(block *Block, epochStats *EpochStats, override
epochStatsValues = epochStats.GetValues(true)
}
+ chainState := dbw.indexer.consensusPool.GetChainState()
+
graffiti, _ := blockBody.Graffiti()
attestations, _ := blockBody.Attestations()
deposits, _ := blockBody.Deposits()
@@ -245,28 +248,43 @@ func (dbw *dbWriter) buildDbBlock(block *Block, epochStats *EpochStats, override
proposerSlashings, _ := blockBody.ProposerSlashings()
blsToExecChanges, _ := blockBody.BLSToExecutionChanges()
syncAggregate, _ := blockBody.SyncAggregate()
- blobKzgCommitments, _ := blockBody.BlobKZGCommitments()
+ executionBlockHash, _ := blockBody.ExecutionBlockHash()
- var executionExtraData []byte
var executionBlockNumber uint64
- var executionBlockHash phase0.Hash32
+ var executionExtraData []byte
var executionTransactions []bellatrix.Transaction
var executionWithdrawals []*capella.Withdrawal
-
- executionPayload, _ := blockBody.ExecutionPayload()
- if executionPayload != nil {
- executionExtraData, _ = executionPayload.ExtraData()
- executionBlockHash, _ = executionPayload.BlockHash()
- executionBlockNumber, _ = executionPayload.BlockNumber()
- executionTransactions, _ = executionPayload.Transactions()
- executionWithdrawals, _ = executionPayload.Withdrawals()
- }
-
var depositRequests []*electra.DepositRequest
-
- executionRequests, _ := blockBody.ExecutionRequests()
- if executionRequests != nil {
- depositRequests = executionRequests.Deposits
+ var blobKzgCommitments []deneb.KZGCommitment
+ var payloadStatus dbtypes.PayloadStatus
+
+ if chainState.IsEip7732Enabled(chainState.EpochOfSlot(block.Slot)) {
+ blockPayload := block.GetExecutionPayload()
+ if blockPayload != nil {
+ executionBlockNumber = blockPayload.Message.Payload.BlockNumber
+ executionExtraData = blockPayload.Message.Payload.ExtraData
+ executionTransactions = blockPayload.Message.Payload.Transactions
+ executionWithdrawals = blockPayload.Message.Payload.Withdrawals
+ depositRequests = blockPayload.Message.ExecutionRequests.Deposits
+ blobKzgCommitments = blockPayload.Message.BlobKZGCommitments
+ payloadStatus = dbtypes.PayloadStatusCanonical
+ } else {
+ payloadStatus = dbtypes.PayloadStatusMissing
+ }
+ } else {
+ payloadStatus = dbtypes.PayloadStatusCanonical
+ executionBlockNumber, _ = blockBody.ExecutionBlockNumber()
+ executionPayload, _ := blockBody.ExecutionPayload()
+ if executionPayload != nil {
+ executionExtraData, _ = executionPayload.ExtraData()
+ executionTransactions, _ = executionPayload.Transactions()
+ executionWithdrawals, _ = executionPayload.Withdrawals()
+ }
+ blobKzgCommitments, _ = blockBody.BlobKZGCommitments()
+ executionRequests, _ := blockBody.ExecutionRequests()
+ if executionRequests != nil {
+ depositRequests = executionRequests.Deposits
+ }
}
dbBlock := dbtypes.Slot{
@@ -287,6 +305,7 @@ func (dbw *dbWriter) buildDbBlock(block *Block, epochStats *EpochStats, override
BLSChangeCount: uint64(len(blsToExecChanges)),
BlobCount: uint64(len(blobKzgCommitments)),
RecvDelay: block.recvDelay,
+ PayloadStatus: payloadStatus,
BlockUid: block.BlockUID,
}
@@ -401,6 +420,15 @@ func (dbw *dbWriter) buildDbBlock(block *Block, epochStats *EpochStats, override
dbBlock.EthBaseFee = utils.GetBaseFeeAsUint64(payload.BaseFeePerGas)
dbBlock.EthFeeRecipient = payload.FeeRecipient[:]
}
+ case spec.DataVersionGloas:
+ blockPayload := block.GetExecutionPayload()
+ if blockPayload != nil {
+ payload := blockPayload.Message.Payload
+ dbBlock.EthGasUsed = payload.GasUsed
+ dbBlock.EthGasLimit = payload.GasLimit
+ dbBlock.EthBaseFee = utils.GetBaseFeeAsUint64(payload.BaseFeePerGas)
+ dbBlock.EthFeeRecipient = payload.FeeRecipient[:]
+ }
}
}
@@ -474,15 +502,29 @@ func (dbw *dbWriter) buildDbEpoch(epoch phase0.Epoch, blocks []*Block, epochStat
proposerSlashings, _ := blockBody.ProposerSlashings()
blsToExecChanges, _ := blockBody.BLSToExecutionChanges()
syncAggregate, _ := blockBody.SyncAggregate()
- executionTransactions, _ := blockBody.ExecutionTransactions()
- executionWithdrawals, _ := blockBody.Withdrawals()
- blobKzgCommitments, _ := blockBody.BlobKZGCommitments()
+ var executionTransactions []bellatrix.Transaction
+ var executionWithdrawals []*capella.Withdrawal
var depositRequests []*electra.DepositRequest
-
- executionRequests, _ := blockBody.ExecutionRequests()
- if executionRequests != nil {
- depositRequests = executionRequests.Deposits
+ var blobKzgCommitments []deneb.KZGCommitment
+
+ if chainState.IsEip7732Enabled(chainState.EpochOfSlot(block.Slot)) {
+ blockPayload := block.GetExecutionPayload()
+ if blockPayload != nil {
+ dbEpoch.PayloadCount++
+ executionTransactions = blockPayload.Message.Payload.Transactions
+ executionWithdrawals = blockPayload.Message.Payload.Withdrawals
+ depositRequests = blockPayload.Message.ExecutionRequests.Deposits
+ blobKzgCommitments = blockPayload.Message.BlobKZGCommitments
+ }
+ } else {
+ executionTransactions, _ = blockBody.ExecutionTransactions()
+ executionWithdrawals, _ = blockBody.Withdrawals()
+ blobKzgCommitments, _ = blockBody.BlobKZGCommitments()
+ executionRequests, _ := blockBody.ExecutionRequests()
+ if executionRequests != nil {
+ depositRequests = executionRequests.Deposits
+ }
}
dbEpoch.AttestationCount += uint64(len(attestations))
@@ -556,6 +598,13 @@ func (dbw *dbWriter) buildDbEpoch(epoch phase0.Epoch, blocks []*Block, epochStat
dbEpoch.EthGasUsed += payload.GasUsed
dbEpoch.EthGasLimit += payload.GasLimit
}
+ case spec.DataVersionGloas:
+ blockPayload := block.GetExecutionPayload()
+ if blockPayload != nil {
+ payload := blockPayload.Message.Payload
+ dbEpoch.EthGasUsed += payload.GasUsed
+ dbEpoch.EthGasLimit += payload.GasLimit
+ }
}
}
}
@@ -644,14 +693,26 @@ func (dbw *dbWriter) persistBlockDepositRequests(tx *sqlx.Tx, block *Block, orph
}
func (dbw *dbWriter) buildDbDepositRequests(block *Block, orphaned bool, overrideForkId *ForkKey) []*dbtypes.Deposit {
- blockBody := block.GetBlock()
- if blockBody == nil {
- return nil
+ chainState := dbw.indexer.consensusPool.GetChainState()
+
+ var requests *electra.ExecutionRequests
+
+ if chainState.IsEip7732Enabled(chainState.EpochOfSlot(block.Slot)) {
+ payload := block.GetExecutionPayload()
+ if payload != nil {
+ requests = payload.Message.ExecutionRequests
+ }
+ } else {
+ blockBody := block.GetBlock()
+ if blockBody == nil {
+ return nil
+ }
+
+ requests, _ = blockBody.ExecutionRequests()
}
- requests, err := blockBody.ExecutionRequests()
- if err != nil {
- return nil
+ if requests == nil {
+ return []*dbtypes.Deposit{}
}
deposits := requests.Deposits
@@ -831,14 +892,29 @@ func (dbw *dbWriter) persistBlockConsolidationRequests(tx *sqlx.Tx, block *Block
}
func (dbw *dbWriter) buildDbConsolidationRequests(block *Block, orphaned bool, overrideForkId *ForkKey, sim *stateSimulator) []*dbtypes.ConsolidationRequest {
- blockBody := block.GetBlock()
- if blockBody == nil {
- return nil
+ chainState := dbw.indexer.consensusPool.GetChainState()
+
+ var requests *electra.ExecutionRequests
+ var blockNumber uint64
+
+ if chainState.IsEip7732Enabled(chainState.EpochOfSlot(block.Slot)) {
+ payload := block.GetExecutionPayload()
+ if payload != nil {
+ requests = payload.Message.ExecutionRequests
+ blockNumber = payload.Message.Payload.BlockNumber
+ }
+ } else {
+ blockBody := block.GetBlock()
+ if blockBody == nil {
+ return nil
+ }
+
+ requests, _ = blockBody.ExecutionRequests()
+ blockNumber, _ = blockBody.ExecutionBlockNumber()
}
- requests, err := blockBody.ExecutionRequests()
- if err != nil {
- return nil
+ if requests == nil {
+ return []*dbtypes.ConsolidationRequest{}
}
if sim == nil {
@@ -860,8 +936,6 @@ func (dbw *dbWriter) buildDbConsolidationRequests(block *Block, orphaned bool, o
blockResults = sim.replayBlockResults(block)
}
- blockNumber, _ := blockBody.ExecutionBlockNumber()
-
dbConsolidations := make([]*dbtypes.ConsolidationRequest, len(consolidations))
for idx, consolidation := range consolidations {
dbConsolidation := &dbtypes.ConsolidationRequest{
@@ -912,14 +986,29 @@ func (dbw *dbWriter) persistBlockWithdrawalRequests(tx *sqlx.Tx, block *Block, o
}
func (dbw *dbWriter) buildDbWithdrawalRequests(block *Block, orphaned bool, overrideForkId *ForkKey, sim *stateSimulator) []*dbtypes.WithdrawalRequest {
- blockBody := block.GetBlock()
- if blockBody == nil {
- return nil
+ chainState := dbw.indexer.consensusPool.GetChainState()
+
+ var requests *electra.ExecutionRequests
+ var blockNumber uint64
+
+ if chainState.IsEip7732Enabled(chainState.EpochOfSlot(block.Slot)) {
+ payload := block.GetExecutionPayload()
+ if payload != nil {
+ requests = payload.Message.ExecutionRequests
+ blockNumber = payload.Message.Payload.BlockNumber
+ }
+ } else {
+ blockBody := block.GetBlock()
+ if blockBody == nil {
+ return nil
+ }
+
+ requests, _ = blockBody.ExecutionRequests()
+ blockNumber, _ = blockBody.ExecutionBlockNumber()
}
- requests, err := blockBody.ExecutionRequests()
- if err != nil {
- return nil
+ if requests == nil {
+ return []*dbtypes.WithdrawalRequest{}
}
if sim == nil {
@@ -941,8 +1030,6 @@ func (dbw *dbWriter) buildDbWithdrawalRequests(block *Block, orphaned bool, over
blockResults = sim.replayBlockResults(block)
}
- blockNumber, _ := blockBody.ExecutionBlockNumber()
-
dbWithdrawalRequests := make([]*dbtypes.WithdrawalRequest, len(withdrawalRequests))
for idx, withdrawalRequest := range withdrawalRequests {
dbWithdrawalRequest := &dbtypes.WithdrawalRequest{
diff --git a/services/chainservice.go b/services/chainservice.go
index 634d5626..8677cf7f 100644
--- a/services/chainservice.go
+++ b/services/chainservice.go
@@ -252,6 +252,13 @@ func (cs *ChainService) StartService() error {
return fmt.Errorf("failed initializing s3 blockdb: %v", err)
}
cs.logger.Infof("S3 blockdb initialized at %v", utils.Config.BlockDb.S3.Bucket)
+ case "tiered":
+ err := blockdb.InitWithTiered(utils.Config.BlockDb.Tiered, cs.logger)
+ if err != nil {
+ return fmt.Errorf("failed initializing tiered blockdb: %v", err)
+ }
+ cs.logger.Infof("Tiered blockdb initialized (Pebble cache: %v, S3: %v)",
+ utils.Config.BlockDb.Tiered.Pebble.Path, utils.Config.BlockDb.Tiered.S3.Bucket)
default:
cs.logger.Infof("Blockdb disabled")
}
diff --git a/services/chainservice_blocks.go b/services/chainservice_blocks.go
index 27f4cfcc..1cce3312 100644
--- a/services/chainservice_blocks.go
+++ b/services/chainservice_blocks.go
@@ -9,9 +9,11 @@ import (
"github.com/attestantio/go-eth2-client/spec"
"github.com/attestantio/go-eth2-client/spec/deneb"
+ "github.com/attestantio/go-eth2-client/spec/gloas"
"github.com/attestantio/go-eth2-client/spec/phase0"
"github.com/ethpandaops/dora/blockdb"
+ btypes "github.com/ethpandaops/dora/blockdb/types"
"github.com/ethpandaops/dora/db"
"github.com/ethpandaops/dora/dbtypes"
"github.com/ethpandaops/dora/indexer/beacon"
@@ -22,6 +24,7 @@ type CombinedBlockResponse struct {
Root phase0.Root
Header *phase0.SignedBeaconBlockHeader
Block *spec.VersionedSignedBeaconBlock
+ Payload *gloas.SignedExecutionPayloadEnvelope
Orphaned bool
}
@@ -103,6 +106,7 @@ func (bs *ChainService) GetSlotDetailsByBlockroot(ctx context.Context, blockroot
Root: blockInfo.Root,
Header: blockInfo.GetHeader(),
Block: blockInfo.GetBlock(),
+ Payload: blockInfo.GetExecutionPayload(),
Orphaned: !bs.beaconIndexer.IsCanonicalBlock(blockInfo, nil),
}
}
@@ -115,6 +119,7 @@ func (bs *ChainService) GetSlotDetailsByBlockroot(ctx context.Context, blockroot
Root: blockInfo.Root,
Header: blockInfo.GetHeader(),
Block: blockInfo.GetBlock(),
+ Payload: blockInfo.GetExecutionPayload(),
Orphaned: true,
}
}
@@ -127,18 +132,34 @@ func (bs *ChainService) GetSlotDetailsByBlockroot(ctx context.Context, blockroot
}
var block *spec.VersionedSignedBeaconBlock
+ var payload *gloas.SignedExecutionPayloadEnvelope
bodyRetry := 0
for ; bodyRetry < 3; bodyRetry++ {
client := clients[bodyRetry%len(clients)]
- block, err = beacon.LoadBeaconBlock(ctx, client, blockroot)
- if block != nil {
- break
- } else if err != nil {
- log := logrus.WithError(err)
- if client != nil {
- log = log.WithField("client", client.GetClient().GetName())
+ if block == nil {
+ block, err = beacon.LoadBeaconBlock(ctx, client, blockroot)
+ if err != nil {
+ log := logrus.WithError(err)
+ if client != nil {
+ log = log.WithField("client", client.GetClient().GetName())
+ }
+ log.Warnf("Error loading block body for root 0x%x", blockroot)
}
- log.Warnf("Error loading block body for root 0x%x", blockroot)
+ }
+
+ if block.Version >= spec.DataVersionGloas {
+ payload, err = beacon.LoadExecutionPayload(ctx, client, blockroot)
+ if payload != nil {
+ break
+ } else if err != nil {
+ log := logrus.WithError(err)
+ if client != nil {
+ log = log.WithField("client", client.GetClient().GetName())
+ }
+ log.Warnf("Error loading block payload for root 0x%x", blockroot)
+ }
+ } else if block != nil {
+ break
}
}
if err == nil && block != nil {
@@ -146,6 +167,7 @@ func (bs *ChainService) GetSlotDetailsByBlockroot(ctx context.Context, blockroot
Root: blockroot,
Header: header,
Block: block,
+ Payload: payload,
Orphaned: false,
}
}
@@ -153,10 +175,14 @@ func (bs *ChainService) GetSlotDetailsByBlockroot(ctx context.Context, blockroot
// try loading from block db
if result == nil && header != nil && blockdb.GlobalBlockDb != nil {
- blockData, err := blockdb.GlobalBlockDb.GetBlock(ctx, uint64(header.Message.Slot), blockroot[:], func(version uint64, block []byte) (interface{}, error) {
- return beacon.UnmarshalVersionedSignedBeaconBlockSSZ(bs.beaconIndexer.GetDynSSZ(), version, block)
- })
- if err == nil && blockData != nil {
+ blockData, err := blockdb.GlobalBlockDb.GetBlock(ctx, uint64(header.Message.Slot), blockroot[:],
+ btypes.BlockDataFlagBody|btypes.BlockDataFlagPayload,
+ func(version uint64, block []byte) (any, error) {
+ return beacon.UnmarshalVersionedSignedBeaconBlockSSZ(bs.beaconIndexer.GetDynSSZ(), version, block)
+ }, func(version uint64, payload []byte) (any, error) {
+ return beacon.UnmarshalVersionedSignedExecutionPayloadEnvelopeSSZ(bs.beaconIndexer.GetDynSSZ(), version, payload)
+ })
+ if err == nil && blockData != nil && blockData.Body != nil {
result = &CombinedBlockResponse{
Root: blockroot,
Header: header,
@@ -232,6 +258,7 @@ func (bs *ChainService) GetSlotDetailsBySlot(ctx context.Context, slot phase0.Sl
Root: cachedBlock.Root,
Header: blockHeader,
Block: blockBody,
+ Payload: cachedBlock.GetExecutionPayload(),
Orphaned: isOrphaned,
}
}
@@ -248,25 +275,40 @@ func (bs *ChainService) GetSlotDetailsBySlot(ctx context.Context, slot phase0.Sl
var err error
var block *spec.VersionedSignedBeaconBlock
+ var payload *gloas.SignedExecutionPayloadEnvelope
bodyRetry := 0
for ; bodyRetry < 3; bodyRetry++ {
client := clients[bodyRetry%len(clients)]
block, err = beacon.LoadBeaconBlock(ctx, client, blockRoot)
- if block != nil {
- break
- } else if err != nil {
+ if err != nil {
log := logrus.WithError(err)
if client != nil {
log = log.WithField("client", client.GetClient().GetName())
}
log.Warnf("Error loading block body for slot %v", slot)
}
+
+ if block.Version >= spec.DataVersionGloas {
+ payload, err = beacon.LoadExecutionPayload(ctx, client, blockRoot)
+ if payload != nil {
+ break
+ } else if err != nil {
+ log := logrus.WithError(err)
+ if client != nil {
+ log = log.WithField("client", client.GetClient().GetName())
+ }
+ log.Warnf("Error loading block payload for root 0x%x", blockRoot)
+ }
+ } else if block != nil {
+ break
+ }
}
if err == nil && block != nil {
result = &CombinedBlockResponse{
Root: blockRoot,
Header: header,
Block: block,
+ Payload: payload,
Orphaned: orphaned,
}
}
@@ -274,10 +316,14 @@ func (bs *ChainService) GetSlotDetailsBySlot(ctx context.Context, slot phase0.Sl
// try loading from block db
if result == nil && header != nil && blockdb.GlobalBlockDb != nil {
- blockData, err := blockdb.GlobalBlockDb.GetBlock(ctx, uint64(slot), blockRoot[:], func(version uint64, block []byte) (interface{}, error) {
- return beacon.UnmarshalVersionedSignedBeaconBlockSSZ(bs.beaconIndexer.GetDynSSZ(), version, block)
- })
- if err == nil && blockData != nil {
+ blockData, err := blockdb.GlobalBlockDb.GetBlock(ctx, uint64(slot), blockRoot[:],
+ btypes.BlockDataFlagHeader|btypes.BlockDataFlagBody|btypes.BlockDataFlagPayload,
+ func(version uint64, block []byte) (any, error) {
+ return beacon.UnmarshalVersionedSignedBeaconBlockSSZ(bs.beaconIndexer.GetDynSSZ(), version, block)
+ }, func(version uint64, payload []byte) (any, error) {
+ return beacon.UnmarshalVersionedSignedExecutionPayloadEnvelopeSSZ(bs.beaconIndexer.GetDynSSZ(), version, payload)
+ })
+ if err == nil && blockData != nil && blockData.Body != nil {
header := &phase0.SignedBeaconBlockHeader{}
err = header.UnmarshalSSZ(blockData.HeaderData)
if err != nil {
diff --git a/services/chainservice_builder.go b/services/chainservice_builder.go
new file mode 100644
index 00000000..33819ca9
--- /dev/null
+++ b/services/chainservice_builder.go
@@ -0,0 +1,253 @@
+package services
+
+import (
+ "bytes"
+ "slices"
+ "sort"
+
+ "github.com/attestantio/go-eth2-client/spec/gloas"
+ "github.com/attestantio/go-eth2-client/spec/phase0"
+
+ "github.com/ethpandaops/dora/db"
+ "github.com/ethpandaops/dora/dbtypes"
+ "github.com/ethpandaops/dora/indexer/beacon"
+)
+
+type BuilderWithIndex struct {
+ Index gloas.BuilderIndex
+ Builder *gloas.Builder
+ Superseded bool
+}
+
+// GetFilteredBuilderSet returns builders matching the filter criteria
+func (bs *ChainService) GetFilteredBuilderSet(filter *dbtypes.BuilderFilter, withBalance bool) ([]BuilderWithIndex, uint64) {
+ var overrideForkId *beacon.ForkKey
+
+ canonicalHead := bs.beaconIndexer.GetCanonicalHead(overrideForkId)
+ if canonicalHead == nil {
+ return nil, 0
+ }
+
+ var balances []phase0.Gwei
+ if withBalance {
+ balances = bs.beaconIndexer.GetRecentBuilderBalances(overrideForkId)
+ }
+ currentEpoch := bs.consensusPool.GetChainState().CurrentEpoch()
+
+ cachedResults := make([]BuilderWithIndex, 0, 1000)
+ cachedIndexes := map[uint64]bool{}
+
+ // Get matching entries from cached builders
+ bs.beaconIndexer.StreamActiveBuilderDataForRoot(canonicalHead.Root, false, ¤tEpoch, func(index gloas.BuilderIndex, flags uint16, activeData *beacon.BuilderData, builder *gloas.Builder) error {
+ if builder == nil {
+ return nil
+ }
+ if filter.MinIndex != nil && uint64(index) < *filter.MinIndex {
+ return nil
+ }
+ if filter.MaxIndex != nil && uint64(index) > *filter.MaxIndex {
+ return nil
+ }
+ if len(filter.PubKey) > 0 {
+ pubkeylen := min(len(filter.PubKey), 48)
+ if !bytes.Equal(builder.PublicKey[:pubkeylen], filter.PubKey) {
+ return nil
+ }
+ }
+ if len(filter.ExecutionAddress) > 0 {
+ if !bytes.Equal(builder.ExecutionAddress[:], filter.ExecutionAddress) {
+ return nil
+ }
+ }
+
+ if len(filter.Status) > 0 {
+ builderStatus := getBuilderStatus(builder, currentEpoch, false)
+ if !slices.Contains(filter.Status, builderStatus) {
+ return nil
+ }
+ }
+
+ cachedResults = append(cachedResults, BuilderWithIndex{
+ Index: index,
+ Builder: builder,
+ })
+ cachedIndexes[uint64(index)] = true
+
+ return nil
+ })
+
+ // Get matching entries from DB
+ dbIndexes, err := db.GetBuilderIndexesByFilter(*filter, uint64(currentEpoch))
+ if err != nil {
+ bs.logger.Warnf("error getting builder indexes by filter: %v", err)
+ return nil, 0
+ }
+
+ // Sort results
+ var sortFn func(builderA, builderB BuilderWithIndex) bool
+ switch filter.OrderBy {
+ case dbtypes.BuilderOrderIndexAsc:
+ sortFn = func(builderA, builderB BuilderWithIndex) bool {
+ return builderA.Index < builderB.Index
+ }
+ case dbtypes.BuilderOrderIndexDesc:
+ sortFn = func(builderA, builderB BuilderWithIndex) bool {
+ return builderA.Index > builderB.Index
+ }
+ case dbtypes.BuilderOrderPubKeyAsc:
+ sortFn = func(builderA, builderB BuilderWithIndex) bool {
+ return bytes.Compare(builderA.Builder.PublicKey[:], builderB.Builder.PublicKey[:]) < 0
+ }
+ case dbtypes.BuilderOrderPubKeyDesc:
+ sortFn = func(builderA, builderB BuilderWithIndex) bool {
+ return bytes.Compare(builderA.Builder.PublicKey[:], builderB.Builder.PublicKey[:]) > 0
+ }
+ case dbtypes.BuilderOrderBalanceAsc:
+ if balances == nil {
+ sortFn = func(builderA, builderB BuilderWithIndex) bool {
+ return builderA.Builder.Balance < builderB.Builder.Balance
+ }
+ } else {
+ sortFn = func(builderA, builderB BuilderWithIndex) bool {
+ return balances[builderA.Index] < balances[builderB.Index]
+ }
+ sort.Slice(dbIndexes, func(i, j int) bool {
+ if dbIndexes[i] >= uint64(len(balances)) || dbIndexes[j] >= uint64(len(balances)) {
+ return dbIndexes[i] < dbIndexes[j]
+ }
+ return balances[dbIndexes[i]] < balances[dbIndexes[j]]
+ })
+ }
+ case dbtypes.BuilderOrderBalanceDesc:
+ if balances == nil {
+ sortFn = func(builderA, builderB BuilderWithIndex) bool {
+ return builderA.Builder.Balance > builderB.Builder.Balance
+ }
+ } else {
+ sortFn = func(builderA, builderB BuilderWithIndex) bool {
+ return balances[builderA.Index] > balances[builderB.Index]
+ }
+ sort.Slice(dbIndexes, func(i, j int) bool {
+ if dbIndexes[i] >= uint64(len(balances)) || dbIndexes[j] >= uint64(len(balances)) {
+ return dbIndexes[i] > dbIndexes[j]
+ }
+ return balances[dbIndexes[i]] > balances[dbIndexes[j]]
+ })
+ }
+ case dbtypes.BuilderOrderDepositEpochAsc:
+ sortFn = func(builderA, builderB BuilderWithIndex) bool {
+ return builderA.Builder.DepositEpoch < builderB.Builder.DepositEpoch
+ }
+ case dbtypes.BuilderOrderDepositEpochDesc:
+ sortFn = func(builderA, builderB BuilderWithIndex) bool {
+ return builderA.Builder.DepositEpoch > builderB.Builder.DepositEpoch
+ }
+ case dbtypes.BuilderOrderWithdrawableEpochAsc:
+ sortFn = func(builderA, builderB BuilderWithIndex) bool {
+ return builderA.Builder.WithdrawableEpoch < builderB.Builder.WithdrawableEpoch
+ }
+ case dbtypes.BuilderOrderWithdrawableEpochDesc:
+ sortFn = func(builderA, builderB BuilderWithIndex) bool {
+ return builderA.Builder.WithdrawableEpoch > builderB.Builder.WithdrawableEpoch
+ }
+ }
+
+ sort.Slice(cachedResults, func(i, j int) bool {
+ return sortFn(cachedResults[i], cachedResults[j])
+ })
+
+ // Stream builder set from db and merge cached results
+ resCap := filter.Limit
+ if resCap == 0 {
+ resCap = uint64(len(cachedResults) + len(dbIndexes))
+ }
+ result := make([]BuilderWithIndex, 0, resCap)
+ cachedIndex := 0
+ matchingCount := uint64(0)
+ resultCount := uint64(0)
+ dbEntryCount := uint64(0)
+
+ db.StreamBuildersByIndexes(dbIndexes, func(dbBuilder *dbtypes.Builder) bool {
+ dbEntryCount++
+ builderWithIndex := BuilderWithIndex{
+ Index: gloas.BuilderIndex(dbBuilder.BuilderIndex),
+ Builder: beacon.UnwrapDbBuilder(dbBuilder),
+ Superseded: dbBuilder.Superseded,
+ }
+
+ for cachedIndex < len(cachedResults) && (cachedResults[cachedIndex].Index == builderWithIndex.Index || sortFn(cachedResults[cachedIndex], builderWithIndex)) {
+ if matchingCount >= filter.Offset {
+ resultBuilder := cachedResults[cachedIndex]
+ if balances != nil && uint64(resultBuilder.Index) < uint64(len(balances)) {
+ resultBuilder.Builder.Balance = balances[resultBuilder.Index]
+ }
+ result = append(result, resultBuilder)
+ resultCount++
+ }
+ matchingCount++
+ cachedIndex++
+
+ if filter.Limit > 0 && resultCount >= filter.Limit {
+ return false // stop streaming
+ }
+ }
+
+ if cachedIndexes[dbBuilder.BuilderIndex] {
+ return true // skip this index, cache entry is newer
+ }
+
+ if matchingCount >= filter.Offset {
+ if !builderWithIndex.Superseded && balances != nil && dbBuilder.BuilderIndex < uint64(len(balances)) {
+ builderWithIndex.Builder.Balance = balances[dbBuilder.BuilderIndex]
+ }
+ result = append(result, builderWithIndex)
+ resultCount++
+ }
+ matchingCount++
+
+ if filter.Limit > 0 && resultCount >= filter.Limit {
+ return false // stop streaming
+ }
+
+ return true // get more from db
+ })
+
+ for cachedIndex < len(cachedResults) && (filter.Limit == 0 || resultCount < filter.Limit) {
+ if matchingCount >= filter.Offset {
+ resultBuilder := cachedResults[cachedIndex]
+ if balances != nil && uint64(resultBuilder.Index) < uint64(len(balances)) {
+ resultBuilder.Builder.Balance = balances[resultBuilder.Index]
+ }
+ result = append(result, resultBuilder)
+ resultCount++
+ }
+ matchingCount++
+ cachedIndex++
+ }
+
+ // Add remaining cached results
+ matchingCount += uint64(len(cachedResults) - cachedIndex)
+
+ // Add remaining db results
+ remainingDbCount := uint64(0)
+ for i := dbEntryCount; i < uint64(len(dbIndexes)); i++ {
+ if cachedIndexes[dbIndexes[i]] {
+ continue
+ }
+ remainingDbCount++
+ }
+ matchingCount += remainingDbCount
+
+ return result, matchingCount
+}
+
+// getBuilderStatus determines the status of a builder
+func getBuilderStatus(builder *gloas.Builder, currentEpoch phase0.Epoch, superseded bool) dbtypes.BuilderStatus {
+ if superseded {
+ return dbtypes.BuilderStatusSupersededFilter
+ }
+ if builder.WithdrawableEpoch <= currentEpoch {
+ return dbtypes.BuilderStatusExitedFilter
+ }
+ return dbtypes.BuilderStatusActiveFilter
+}
diff --git a/static/css/layout.css b/static/css/layout.css
index e0df61b7..665ee04d 100644
--- a/static/css/layout.css
+++ b/static/css/layout.css
@@ -329,6 +329,26 @@ span.validator-label {
padding: 1px .25rem;
}
+.badge.split-warning {
+ background: linear-gradient(
+ 90deg,
+ rgba(255,255,255,0) 0%,
+ rgba(255,255,255,0) 50%,
+ rgba(255,193,7,1) 50%,
+ rgba(255,193,7,1) 100%
+ );
+}
+
+.badge.split-info {
+ background: linear-gradient(
+ 90deg,
+ rgba(255,255,255,0) 0%,
+ rgba(255,255,255,0) 50%,
+ rgba(13,202,240,1) 50%,
+ rgba(13,202,240,1) 100%
+ );
+}
+
.text-monospace {
font-family: var(--bs-font-monospace, SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace) !important;
}
diff --git a/templates/builders/builders.html b/templates/builders/builders.html
new file mode 100644
index 00000000..25869e1a
--- /dev/null
+++ b/templates/builders/builders.html
@@ -0,0 +1,303 @@
+{{ define "page" }}
+
+
+
Builders Overview
+
+
+
+
+
+
+
+
+
+ {{ if gt .TotalPages 1 }}
+
+ {{ end }}
+
+
+
+
+{{ end }}
+{{ define "js" }}
+
+
+{{ end }}
+{{ define "css" }}
+
+
+{{ end }}
diff --git a/templates/epoch/epoch.html b/templates/epoch/epoch.html
index f047ee43..8f6a8b27 100644
--- a/templates/epoch/epoch.html
+++ b/templates/epoch/epoch.html
@@ -177,15 +177,15 @@