Skip to content

Commit cc81319

Browse files
committed
log updates
1 parent 8ab3050 commit cc81319

File tree

2 files changed

+11
-46
lines changed

2 files changed

+11
-46
lines changed

internal/committer/reorg.go

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -225,19 +225,6 @@ func handleReorgForRange(startBlock uint64, endBlock uint64) error {
225225
return fmt.Errorf("handleReorgForRange: failed to get old block data: %w", err)
226226
}
227227

228-
nonNilOldBlocks := 0
229-
for _, bd := range oldblockDataArray {
230-
if bd != nil {
231-
nonNilOldBlocks++
232-
}
233-
}
234-
log.Debug().
235-
Uint64("start_block", startBlock).
236-
Uint64("end_block", endBlock).
237-
Int("requested_old_blocks", len(oldblockDataArray)).
238-
Int("non_nil_old_blocks", nonNilOldBlocks).
239-
Msg("handleReorgForRange: loaded old block data from ClickHouse")
240-
241228
if err := libs.KafkaPublisherV2.PublishBlockDataReorg(newblockDataArray, oldblockDataArray); err != nil {
242229
log.Error().
243230
Err(err).

internal/storage/kafka_publisher.go

Lines changed: 11 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -268,41 +268,19 @@ func (p *KafkaPublisher) createBlockDataMessage(block *common.BlockData, isDelet
268268
return nil, fmt.Errorf("failed to marshal block data: %v", err)
269269
}
270270

271-
log.Debug().
272-
Uint64("chain_id", data.ChainId).
273-
Uint64("block_number", block.Block.Number.Uint64()).
274-
Int("tx_count", len(block.Transactions)).
275-
Int("log_count", len(block.Logs)).
276-
Int("trace_count", len(block.Traces)).
277-
Bool("is_deleted", isDeleted).
278-
Bool("is_reorg", isReorg).
279-
Msg("KafkaPublisher Message: Block metadata")
280-
281-
return p.createRecord(data.GetType(), data.ChainId, block.Block.Number.Uint64(), timestamp, isDeleted, isReorg, msgJson)
282-
}
283-
284-
func (p *KafkaPublisher) createBlockRevertMessage(chainId uint64, blockNumber uint64) (*kgo.Record, error) {
285-
timestamp := time.Now()
286-
287-
data := PublishableMessageRevert{
288-
ChainId: chainId,
289-
BlockNumber: blockNumber,
290-
IsDeleted: 0,
291-
InsertTimestamp: timestamp,
292-
}
293-
294-
msg := PublishableMessagePayload{
295-
Data: data,
296-
Type: data.GetType(),
297-
Timestamp: timestamp,
298-
}
299-
300-
msgJson, err := json.Marshal(msg)
301-
if err != nil {
302-
return nil, fmt.Errorf("failed to marshal block data: %v", err)
271+
if isReorg {
272+
log.Debug().
273+
Uint64("chain_id", data.ChainId).
274+
Uint64("block_number", block.Block.Number.Uint64()).
275+
Int("tx_count", len(block.Transactions)).
276+
Int("log_count", len(block.Logs)).
277+
Int("trace_count", len(block.Traces)).
278+
Bool("is_deleted", isDeleted).
279+
Bool("is_reorg", isReorg).
280+
Msg("KafkaPublisher Message Reorg: Block metadata")
303281
}
304282

305-
return p.createRecord(data.GetType(), chainId, blockNumber, timestamp, false, false, msgJson)
283+
return p.createRecord(data.GetType(), data.ChainId, block.Block.Number.Uint64(), timestamp, isDeleted, isReorg, msgJson)
306284
}
307285

308286
func (p *KafkaPublisher) createRecord(msgType MessageType, chainId uint64, blockNumber uint64, timestamp time.Time, isDeleted bool, isReorg bool, msgJson []byte) (*kgo.Record, error) {

0 commit comments

Comments
 (0)