Skip to content

Commit e62503b

Browse files
authored
refactor: send RequestNext in batches for chainsync pipelining (#843)
We now send pipelined RequestNext messages in batches instead of an initial batch and subsequent single messages. This potentially improves performance and reduces pressure on mini-protocol buffers
1 parent 4810693 commit e62503b

File tree

1 file changed

+31
-22
lines changed

1 file changed

+31
-22
lines changed

protocol/chainsync/client.go

Lines changed: 31 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -28,12 +28,13 @@ import (
2828
// Client implements the ChainSync client
2929
type Client struct {
3030
*protocol.Protocol
31-
config *Config
32-
callbackContext CallbackContext
33-
busyMutex sync.Mutex
34-
readyForNextBlockChan chan bool
35-
onceStart sync.Once
36-
onceStop sync.Once
31+
config *Config
32+
callbackContext CallbackContext
33+
busyMutex sync.Mutex
34+
readyForNextBlockChan chan bool
35+
onceStart sync.Once
36+
onceStop sync.Once
37+
syncPipelinedRequestNext int
3738

3839
// waitingForCurrentTipChan will process all the requests for the current tip until the channel
3940
// is empty.
@@ -404,8 +405,8 @@ func (c *Client) Sync(intersectPoints []common.Point) error {
404405
}
405406

406407
intersectResultChan, cancel := c.wantIntersectFound()
407-
msg := NewMsgFindIntersect(intersectPoints)
408-
if err := c.SendMessage(msg); err != nil {
408+
msgFindIntersect := NewMsgFindIntersect(intersectPoints)
409+
if err := c.SendMessage(msgFindIntersect); err != nil {
409410
cancel()
410411
return err
411412
}
@@ -418,14 +419,14 @@ func (c *Client) Sync(intersectPoints []common.Point) error {
418419
}
419420
}
420421

421-
// Pipeline the initial block requests to speed things up a bit
422-
// Using a value higher than 10 seems to cause problems with NtN
423-
for i := 0; i <= c.config.PipelineLimit; i++ {
424-
msg := NewMsgRequestNext()
425-
if err := c.SendMessage(msg); err != nil {
426-
return err
427-
}
422+
// Send initial RequestNext
423+
msgRequestNext := NewMsgRequestNext()
424+
if err := c.SendMessage(msgRequestNext); err != nil {
425+
return err
428426
}
427+
// Reset pipelined message counter
428+
c.syncPipelinedRequestNext = 0
429+
// Start sync loop
429430
go c.syncLoop()
430431
return nil
431432
}
@@ -441,15 +442,23 @@ func (c *Client) syncLoop() {
441442
return
442443
}
443444
c.busyMutex.Lock()
444-
// Request the next block
445-
// In practice we already have multiple block requests pipelined
446-
// and this just adds another one to the pile
447-
msg := NewMsgRequestNext()
448-
if err := c.SendMessage(msg); err != nil {
449-
c.SendError(err)
445+
// Wait for next block if we have pipelined messages
446+
if c.syncPipelinedRequestNext > 0 {
447+
c.syncPipelinedRequestNext--
450448
c.busyMutex.Unlock()
451-
return
449+
continue
450+
}
451+
// Request the next block(s)
452+
msgCount := max(c.config.PipelineLimit, 1)
453+
for i := 0; i < msgCount; i++ {
454+
msg := NewMsgRequestNext()
455+
if err := c.SendMessage(msg); err != nil {
456+
c.SendError(err)
457+
c.busyMutex.Unlock()
458+
return
459+
}
452460
}
461+
c.syncPipelinedRequestNext = msgCount - 1
453462
c.busyMutex.Unlock()
454463
}
455464
}

0 commit comments

Comments
 (0)