@@ -2,6 +2,7 @@ package neutrino_test
22
33import (
44 "bytes"
5+ "compress/bzip2"
56 "context"
67 "encoding/hex"
78 "fmt"
@@ -12,6 +13,7 @@ import (
1213 "runtime"
1314 "strings"
1415 "sync"
16+ "syscall"
1517 "testing"
1618 "time"
1719
@@ -1369,6 +1371,195 @@ func TestNeutrinoSyncWithoutHeadersImport(t *testing.T) {
13691371 }
13701372}
13711373
1374+ // BenchmarkHeadersImport benchmarks the headers import functionality. It
1375+ // decompresses the block and filter headers from the testdata directory and
1376+ // imports them into a temporary database. It then benchmarks the import time
1377+ // for different batch sizes.
1378+ func BenchmarkHeadersImport (b * testing.B ) {
1379+ blockHeadersPath , cleanupBlock , err := decompressBZ2ToTempFile (
1380+ "testdata/block-headers-mainnet-1-100_000.bz2" ,
1381+ )
1382+ require .NoError (b , err )
1383+ defer cleanupBlock ()
1384+
1385+ filterHeadersPath , cleanupFilter , err := decompressBZ2ToTempFile (
1386+ "testdata/filter-headers-mainnet-1-100_000.bz2" ,
1387+ )
1388+ require .NoError (b , err )
1389+ defer cleanupFilter ()
1390+
1391+ expectedHeaders := 100_000
1392+
1393+ validateFileSize (
1394+ b , blockHeadersPath , expectedHeaders , headerfs .Block ,
1395+ )
1396+
1397+ validateFileSize (
1398+ b , filterHeadersPath , expectedHeaders , headerfs .RegularFilter ,
1399+ )
1400+
1401+ for exp := 10 ; exp <= 19 ; exp ++ {
1402+ // Calculate batch size which is 2^exp.
1403+ batch := 1 << exp
1404+ b .Run (fmt .Sprintf ("batchSize=%d" , batch ), func (b * testing.B ) {
1405+ var memStats runtime.MemStats
1406+ var peakMemory uint64
1407+
1408+ // Take initial memory snapshot before starting the
1409+ // benchmark. This establishes a baseline for memory
1410+ // usage measurements.
1411+ runtime .GC ()
1412+ runtime .ReadMemStats (& memStats )
1413+ initialHeapInUse := memStats .HeapInuse
1414+
1415+ tempDir := b .TempDir ()
1416+ dbPath := filepath .Join (tempDir , "test.db" )
1417+ db , err := walletdb .Create (
1418+ "bdb" , dbPath , true , time .Second * 10 ,
1419+ )
1420+ require .NoError (b , err )
1421+ defer db .Close ()
1422+
1423+ blockStore , err := headerfs .NewBlockHeaderStore (
1424+ tempDir , db , & chaincfg .MainNetParams ,
1425+ )
1426+ require .NoError (b , err )
1427+
1428+ filterStore , err := headerfs .NewFilterHeaderStore (
1429+ tempDir , db , headerfs .RegularFilter ,
1430+ & chaincfg .MainNetParams , nil ,
1431+ )
1432+ require .NoError (b , err )
1433+
1434+ opts := & chainimport.ImportOptions {
1435+ TargetChainParams : chaincfg .MainNetParams ,
1436+ TargetBlockHeaderStore : blockStore ,
1437+ TargetFilterHeaderStore : filterStore ,
1438+ BlockHeadersSource : blockHeadersPath ,
1439+ FilterHeadersSource : filterHeadersPath ,
1440+ WriteBatchSizePerRegion : batch ,
1441+ }
1442+
1443+ // Get initial CPU time for measuring CPU usage during
1444+ // import.
1445+ var rusageStart , rusageEnd syscall.Rusage
1446+ syscall .Getrusage (syscall .RUSAGE_SELF , & rusageStart )
1447+
1448+ // Start memory monitoring goroutine to track peak
1449+ // memory usage. It samples memory stats every 10ms to
1450+ // capture the highest usage.
1451+ stopMemMonitor := make (chan struct {})
1452+ memMonitorDone := make (chan struct {})
1453+ go func () {
1454+ ticker := time .NewTicker (10 * time .Millisecond )
1455+ defer ticker .Stop ()
1456+ defer close (memMonitorDone )
1457+
1458+ for {
1459+ select {
1460+ case <- ticker .C :
1461+ runtime .ReadMemStats (& memStats )
1462+ heapInUse := memStats .HeapInuse
1463+ if heapInUse > peakMemory {
1464+ peakMemory = heapInUse
1465+ }
1466+ case <- stopMemMonitor :
1467+ return
1468+ }
1469+ }
1470+ }()
1471+
1472+ b .ResetTimer ()
1473+ startTime := time .Now ()
1474+ ctx := context .Background ()
1475+
1476+ importer , err := chainimport .NewHeadersImport (opts )
1477+ require .NoError (b , err )
1478+
1479+ _ , err = importer .Import (ctx )
1480+ require .NoError (b , err )
1481+
1482+ elapsed := time .Since (startTime )
1483+ b .StopTimer ()
1484+
1485+ close (stopMemMonitor )
1486+ <- memMonitorDone
1487+
1488+ // Get final CPU usage after import completion.
1489+ syscall .Getrusage (syscall .RUSAGE_SELF , & rusageEnd )
1490+
1491+ // Calculate total CPU time (user + system) in seconds.
1492+ rEndUtime := rusageEnd .Utime
1493+ rStartUtime := rusageStart .Utime
1494+ userTimeSec := float64 (rEndUtime .Sec - rStartUtime .Sec ) +
1495+ float64 (rEndUtime .Usec - rStartUtime .Usec )/ 1e6
1496+
1497+ rEndStime := rusageEnd .Stime
1498+ rStartStime := rusageStart .Stime
1499+ sysTimeSec := float64 (rEndStime .Sec - rStartStime .Sec ) +
1500+ float64 (rEndStime .Usec - rStartStime .Usec )/ 1e6
1501+
1502+ cpuTimeSec := userTimeSec + sysTimeSec
1503+
1504+ // Calculate import throughput in headers per second.
1505+ hPS := float64 (expectedHeaders ) / elapsed .Seconds ()
1506+
1507+ peakMemoryB := float64 (peakMemory - initialHeapInUse )
1508+ peakMemoryMB := peakMemoryB / (1024 * 1024 )
1509+ b .ReportMetric (peakMemoryMB , "MB_peak" )
1510+
1511+ b .ReportMetric (hPS , "headers/s" )
1512+ b .ReportMetric (cpuTimeSec , "cpu_s" )
1513+ })
1514+ }
1515+ }
1516+
1517+ // decompressBZ2ToTempFile decompresses a bz2 file to a temporary file.
1518+ func decompressBZ2ToTempFile (srcPath string ) (string , func (), error ) {
1519+ f , err := os .Open (srcPath )
1520+ if err != nil {
1521+ return "" , nil , err
1522+ }
1523+ defer f .Close ()
1524+
1525+ bz2r := bzip2 .NewReader (f )
1526+ tempFile , err := os .CreateTemp (
1527+ "" , filepath .Base (srcPath )+ "-decompressed-*" ,
1528+ )
1529+ if err != nil {
1530+ return "" , nil , err
1531+ }
1532+
1533+ if _ , err := io .Copy (tempFile , bz2r ); err != nil {
1534+ tempFile .Close ()
1535+ os .Remove (tempFile .Name ())
1536+ return "" , nil , err
1537+ }
1538+ tempFile .Close ()
1539+
1540+ cleanup := func () { os .Remove (tempFile .Name ()) }
1541+ return tempFile .Name (), cleanup , nil
1542+ }
1543+
1544+ // validateFileSize is a helper function that validates a file contains
1545+ // exactly the expected number of headers with the given size per header
1546+ // plus the constant size of header metadata at front.
1547+ func validateFileSize (b * testing.B , filePath string , nHeaders int ,
1548+ hType headerfs.HeaderType ) {
1549+
1550+ hSize , err := hType .Size ()
1551+ require .NoError (b , err )
1552+
1553+ fileInfo , err := os .Stat (filePath )
1554+ require .NoError (b , err )
1555+ expectedSize := int64 (nHeaders * hSize + chainimport .ImportMetadataSize )
1556+
1557+ require .Equal (b , expectedSize , fileInfo .Size (),
1558+ "%s file should contain exactly %d headers (%d bytes) + %d " +
1559+ "bytes metadata" , hType , nHeaders , nHeaders * hSize ,
1560+ chainimport .ImportMetadataSize )
1561+ }
1562+
13721563// csd does a connect-sync-disconnect between nodes in order to support
13731564// reorg testing. It brings up and tears down a temporary node, otherwise the
13741565// nodes try to reconnect to each other which results in unintended reorgs.
0 commit comments