@@ -13,6 +13,11 @@ import (
1313const (
1414 blockMethodFullName = "cosmos.tx.v1beta1.Service.GetBlockWithTxs"
1515 txMethodFullName = "cosmos.tx.v1beta1.Service.GetTx"
16+
17+ // maxPrunedNodeRecoveryAttempts limits how many times we adjust the start height
18+ // due to pruned node errors before giving up. This prevents infinite loops if
19+ // the node's pruning boundary keeps advancing during extraction.
20+ maxPrunedNodeRecoveryAttempts = 10
1621)
1722
1823// Extract extracts blocks and transactions from a gRPC server.
@@ -30,6 +35,24 @@ func Extract(gRPCClient *client.GRPCClient, outputHandler output.OutputHandler,
3035 }
3136 }
3237
38+ // Warm-up: validate start height against potential load balancer inconsistencies
39+ validatedStart , err := warmUpStartHeight (gRPCClient , config .BlockStart , outputHandler , config .MaxConcurrency , config .MaxRetries )
40+ if err != nil {
41+ return err
42+ }
43+ if validatedStart != config .BlockStart {
44+ slog .Info ("Start height adjusted after warm-up" ,
45+ "original" , config .BlockStart ,
46+ "validated" , validatedStart )
47+ config .BlockStart = validatedStart
48+ }
49+
50+ // In batch mode, verify the adjusted start doesn't exceed the stop block
51+ if ! config .LiveMonitoring && config .BlockStart > config .BlockStop {
52+ return fmt .Errorf ("pruned node boundary (%d) exceeds requested stop block (%d): requested range is unavailable" ,
53+ config .BlockStart , config .BlockStop )
54+ }
55+
3356 if config .LiveMonitoring {
3457 slog .Info ("Starting live extraction" , "block_time" , config .BlockTime )
3558 err := extractLiveBlocksAndTransactions (gRPCClient , config .BlockStart , outputHandler , config .BlockTime , config .MaxConcurrency , config .MaxRetries )
@@ -47,36 +70,46 @@ func Extract(gRPCClient *client.GRPCClient, outputHandler output.OutputHandler,
4770 return nil
4871}
4972
50- // setBlockRange sets correct the block range based on the configuration.
51- // If the start block is not set, it will be set to the latest block in the database.
52- // If the stop block is not set, it will be set to the latest block in the gRPC server.
53- // If the start block is greater than the stop block, an error will be returned.
73+ // setBlockRange sets the block range based on the configuration.
74+ // If the start block is not set, it will be set to the latest block in the database + 1.
75+ // If the database is empty, it queries the node for the earliest available block.
76+ // If the stop block is not set, it will be set to the latest block on the node.
77+ // Returns an error if the start block is greater than the stop block.
5478func setBlockRange (gRPCClient * client.GRPCClient , outputHandler output.OutputHandler , cfg * config.ExtractConfig ) error {
5579 if cfg .ReIndex {
5680 slog .Info ("Reindexing entire database..." )
57- // TODO: Get the earliest block from the gRPC server
58- // See https://github.com/manifest-network/yaci/issues/28
59- cfg .BlockStart = 1
6081 earliestLocalBlock , err := outputHandler .GetEarliestBlock (gRPCClient .Ctx )
6182 if err != nil {
6283 return fmt .Errorf ("failed to get the earliest local block: %w" , err )
6384 }
6485 if earliestLocalBlock != nil {
6586 cfg .BlockStart = earliestLocalBlock .ID
87+ } else {
88+ // Fresh DB with reindex - probe for earliest available
89+ earliestAvailable , err := utils .GetEarliestBlockHeight (gRPCClient , cfg .MaxRetries )
90+ if err != nil {
91+ return fmt .Errorf ("failed to determine earliest available block: %w" , err )
92+ }
93+ cfg .BlockStart = earliestAvailable
6694 }
6795 cfg .BlockStop = 0
6896 }
6997
7098 if cfg .BlockStart == 0 {
71- // TODO: Get the earliest block from the gRPC server
72- // See https://github.com/manifest-network/yaci/issues/28
73- cfg .BlockStart = 1
7499 latestLocalBlock , err := outputHandler .GetLatestBlock (gRPCClient .Ctx )
75100 if err != nil {
76101 return fmt .Errorf ("failed to get the latest block: %w" , err )
77102 }
78103 if latestLocalBlock != nil {
104+ // Resume from existing DB - no probe needed
79105 cfg .BlockStart = latestLocalBlock .ID + 1
106+ } else {
107+ // Fresh DB - probe to find earliest available block on node
108+ earliestAvailable , err := utils .GetEarliestBlockHeight (gRPCClient , cfg .MaxRetries )
109+ if err != nil {
110+ return fmt .Errorf ("failed to determine earliest available block: %w" , err )
111+ }
112+ cfg .BlockStart = earliestAvailable
80113 }
81114 }
82115
@@ -99,3 +132,42 @@ func setBlockRange(gRPCClient *client.GRPCClient, outputHandler output.OutputHan
99132func shouldSkipMissingBlockCheck (cfg config.ExtractConfig ) bool {
100133 return (cfg .BlockStart != 0 && cfg .BlockStop != 0 ) || cfg .ReIndex
101134}
135+
136+ // warmUpStartHeight validates that the start height is available by attempting to
137+ // fetch a single block. If the node returns a pruned error with a higher boundary,
138+ // it adjusts the start height and retries. This handles load balancer scenarios
139+ // where different nodes may have different pruning boundaries.
140+ func warmUpStartHeight (
141+ gRPCClient * client.GRPCClient ,
142+ start uint64 ,
143+ outputHandler output.OutputHandler ,
144+ maxConcurrency , maxRetries uint ,
145+ ) (uint64 , error ) {
146+ currentStart := start
147+
148+ for attempt := 0 ; attempt <= maxPrunedNodeRecoveryAttempts ; attempt ++ {
149+ // Try to fetch just the start block
150+ err := extractBlocksAndTransactions (gRPCClient , currentStart , currentStart , outputHandler , maxConcurrency , maxRetries )
151+ if err == nil {
152+ return currentStart , nil
153+ }
154+
155+ // Check if error is due to pruned node with higher boundary
156+ newStart := utils .ParseLowestHeightFromError (err .Error ())
157+ if newStart > currentStart {
158+ slog .Warn ("Warm-up: adjusting start height due to pruned node" ,
159+ "original_start" , currentStart ,
160+ "new_start" , newStart ,
161+ "skipped_blocks" , newStart - currentStart ,
162+ "attempt" , attempt + 1 )
163+ currentStart = newStart
164+ continue
165+ }
166+
167+ // Non-recoverable error
168+ return 0 , fmt .Errorf ("warm-up failed: %w" , err )
169+ }
170+
171+ return 0 , fmt .Errorf ("warm-up exceeded maximum attempts (%d): pruning boundary keeps changing" ,
172+ maxPrunedNodeRecoveryAttempts )
173+ }
0 commit comments