Skip to content

Commit 298f62e

Browse files
authored
feat(tools): create cache analyzer tool (#2723)
# Cache Analyzer > The cache analyzer only works currently for sync nodes. This small program is designed to analyze the cache of a sync node. It is useful to debug when the sync node is downloading from DA but not advancing. This usually means the `DA_START_HEIGHT` is too late. This tool allows clearly to identify the first height fetched from DA. ## Usage ```sh go install github.com/evstack/ev-node/tools/cache-analyzer@main cache-analyzer -data-dir ~/.appd/data/ -summary cache-analyzer -data-dir ~/.appd/data/ -limit 50 ```
1 parent 3b5792c commit 298f62e

File tree

2 files changed

+249
-0
lines changed

2 files changed

+249
-0
lines changed

tools/cache-analyzer/README.md

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
# Cache Analyzer
2+
3+
> The cache analyzer only works currently for sync nodes.
4+
5+
This small program is designed to analyze the cache of a sync node.
6+
It is useful to debug when the sync node is downloading from DA but not advancing.
7+
This usually means the `DA_START_HEIGHT` is too late. This tool allows clearly to identify the first height fetched from DA.
8+
9+
## Usage
10+
11+
```sh
12+
go install github.com/evstack/ev-node/tools/cache-analyzer@main
13+
cache-analyzer -data-dir ~/.appd/data/ -summary
14+
cache-analyzer -data-dir ~/.appd/data/ -limit 50
15+
```

tools/cache-analyzer/main.go

Lines changed: 234 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,234 @@
1+
package main
2+
3+
import (
4+
"bytes"
5+
"encoding/gob"
6+
"flag"
7+
"fmt"
8+
"os"
9+
"path/filepath"
10+
"sort"
11+
"strings"
12+
13+
"github.com/evstack/ev-node/types"
14+
)
15+
16+
// DataHashForEmptyTxs is the hash of an empty block data.
17+
var DataHashForEmptyTxs = []byte{110, 52, 11, 156, 255, 179, 122, 152, 156, 165, 68, 230, 187, 120, 10, 44, 120, 144, 29, 63, 179, 55, 56, 118, 133, 17, 163, 6, 23, 175, 160, 29}
18+
19+
const (
20+
pendingEventsCacheDir = "cache/pending_da_events"
21+
itemsByHeightFilename = "items_by_height.gob"
22+
)
23+
24+
// DAHeightEvent represents a DA event for caching (copied from internal package)
25+
type DAHeightEvent struct {
26+
Header *types.SignedHeader
27+
Data *types.Data
28+
// DaHeight corresponds to the highest DA included height between the Header and Data.
29+
DaHeight uint64
30+
}
31+
32+
// registerGobTypes registers types needed for decoding
33+
func registerGobTypes() {
34+
gob.Register(&types.SignedHeader{})
35+
gob.Register(&types.Data{})
36+
gob.Register(&DAHeightEvent{})
37+
}
38+
39+
// loadEventsFromDisk loads pending events from the cache directory
40+
func loadEventsFromDisk(dataDir string) (map[uint64]*DAHeightEvent, error) {
41+
registerGobTypes()
42+
43+
cachePath := filepath.Join(dataDir, pendingEventsCacheDir, itemsByHeightFilename)
44+
45+
file, err := os.Open(cachePath)
46+
if err != nil {
47+
if os.IsNotExist(err) {
48+
return make(map[uint64]*DAHeightEvent), nil
49+
}
50+
return nil, fmt.Errorf("failed to open cache file %s: %w", cachePath, err)
51+
}
52+
defer file.Close()
53+
54+
var events map[uint64]*DAHeightEvent
55+
decoder := gob.NewDecoder(file)
56+
if err := decoder.Decode(&events); err != nil {
57+
return nil, fmt.Errorf("failed to decode cache file: %w", err)
58+
}
59+
60+
return events, nil
61+
}
62+
63+
// formatTable creates a formatted table string
64+
func formatTable(events []eventEntry) string {
65+
if len(events) == 0 {
66+
return "No pending events found.\n"
67+
}
68+
69+
var sb strings.Builder
70+
71+
// Header
72+
sb.WriteString("┌─────────────┬──────────────┬─────────────────────────────────────────────────────────────────────┐\n")
73+
sb.WriteString("│ Height │ DA Height │ Hash/Details │\n")
74+
sb.WriteString("├─────────────┼──────────────┼─────────────────────────────────────────────────────────────────────┤\n")
75+
76+
for _, entry := range events {
77+
heightStr := fmt.Sprintf("%d", entry.Height)
78+
daHeightStr := fmt.Sprintf("%d", entry.DAHeight)
79+
80+
// Format each row
81+
sb.WriteString(fmt.Sprintf("│ %-11s │ %-12s │ %-67s │\n",
82+
heightStr, daHeightStr, truncateString(entry.Details, 67)))
83+
}
84+
85+
sb.WriteString("└─────────────┴──────────────┴─────────────────────────────────────────────────────────────────────┘\n")
86+
87+
return sb.String()
88+
}
89+
90+
// truncateString truncates a string to maxLen, adding "..." if needed
91+
func truncateString(s string, maxLen int) string {
92+
if len(s) <= maxLen {
93+
return s
94+
}
95+
if maxLen <= 3 {
96+
return "..."
97+
}
98+
return s[:maxLen-3] + "..."
99+
}
100+
101+
// eventEntry represents a table row
102+
type eventEntry struct {
103+
Height uint64
104+
DAHeight uint64
105+
Details string
106+
}
107+
108+
// analyzeEvents processes the events and creates table entries
109+
func analyzeEvents(events map[uint64]*DAHeightEvent, limit int) []eventEntry {
110+
var entries []eventEntry
111+
112+
// Convert map to sorted slice by height
113+
heights := make([]uint64, 0, len(events))
114+
for height := range events {
115+
heights = append(heights, height)
116+
}
117+
sort.Slice(heights, func(i, j int) bool {
118+
return heights[i] < heights[j]
119+
})
120+
121+
// Apply limit
122+
if limit > 0 && len(heights) > limit {
123+
heights = heights[:limit]
124+
}
125+
126+
for _, height := range heights {
127+
event := events[height]
128+
if event == nil {
129+
continue
130+
}
131+
132+
headerHash := "N/A"
133+
if event.Header.Hash() != nil {
134+
headerHash = fmt.Sprintf("%.8x", event.Header.Hash())
135+
}
136+
dataHash := "N/A"
137+
if event.Data.DACommitment() != nil {
138+
if bytes.Equal(event.Data.DACommitment(), DataHashForEmptyTxs) {
139+
dataHash = "Hash for empty transactions"
140+
} else {
141+
dataHash = fmt.Sprintf("%.8x", event.Data.DACommitment())
142+
}
143+
}
144+
txCount := 0
145+
if event.Data.Txs != nil {
146+
txCount = len(event.Data.Txs)
147+
}
148+
details := fmt.Sprintf("H:%s D:%s TxCount:%d", headerHash, dataHash, txCount)
149+
150+
entries = append(entries, eventEntry{
151+
Height: height,
152+
DAHeight: event.DaHeight,
153+
Details: details,
154+
})
155+
}
156+
157+
return entries
158+
}
159+
160+
// printSummary prints a summary of the cache contents
161+
func printSummary(events map[uint64]*DAHeightEvent) {
162+
if len(events) == 0 {
163+
fmt.Println("Cache Summary: No events found")
164+
return
165+
}
166+
167+
var minHeight, maxHeight uint64
168+
var minDaHeight, maxDaHeight uint64
169+
first := true
170+
171+
for height, event := range events {
172+
if first {
173+
minHeight = height
174+
maxHeight = height
175+
minDaHeight = event.DaHeight
176+
maxDaHeight = event.DaHeight
177+
first = false
178+
} else {
179+
if height < minHeight {
180+
minHeight = height
181+
minDaHeight = event.DaHeight
182+
}
183+
if height > maxHeight {
184+
maxHeight = height
185+
maxDaHeight = event.DaHeight
186+
}
187+
}
188+
189+
}
190+
191+
fmt.Printf("Cache Summary:\n")
192+
fmt.Printf(" Total Events: %d\n", len(events))
193+
fmt.Printf(" Height Range: %d - %d\n", minHeight, maxHeight)
194+
fmt.Printf(" DA Height Range: %d - %d\n", minDaHeight, maxDaHeight)
195+
fmt.Printf("\n")
196+
}
197+
198+
func main() {
199+
var (
200+
dataDir = flag.String("data-dir", "data", "Path to the data directory containing cache")
201+
limit = flag.Int("limit", 10, "Maximum number of events to display (0 for no limit)")
202+
summary = flag.Bool("summary", false, "Show only summary without table")
203+
)
204+
flag.Parse()
205+
206+
// Load events from cache
207+
events, err := loadEventsFromDisk(*dataDir)
208+
if err != nil {
209+
fmt.Fprintf(os.Stderr, "Error loading cache: %v\n", err)
210+
os.Exit(1)
211+
}
212+
213+
// Print summary
214+
printSummary(events)
215+
216+
// Exit if only summary requested
217+
if *summary {
218+
return
219+
}
220+
221+
// Analyze and display events
222+
entries := analyzeEvents(events, *limit)
223+
224+
if len(entries) == 0 {
225+
fmt.Println("No pending events to display.")
226+
return
227+
}
228+
229+
if *limit > 0 && len(events) > *limit {
230+
fmt.Printf("Showing first %d of %d events:\n\n", len(entries), len(events))
231+
}
232+
233+
fmt.Print(formatTable(entries))
234+
}

0 commit comments

Comments
 (0)