@@ -73,6 +73,9 @@ type BufferedSender struct {
7373 // Note that lockedMuxStream wraps the underlying grpc server stream, ensuring
7474 // thread safety.
7575 sender ServerStreamSender
76+ // sendBufSize is the number of items we pop from the queue at a time. Exposed for
77+ // testing.
78+ sendBufSize int
7679
7780 // queueMu protects the buffer queue.
7881 queueMu struct {
@@ -96,6 +99,8 @@ type BufferedSender struct {
9699 metrics * BufferedSenderMetrics
97100}
98101
102+ const defaultSendBufSize = 64
103+
99104type streamState int64
100105
101106const (
@@ -119,8 +124,9 @@ func NewBufferedSender(
119124 sender ServerStreamSender , settings * cluster.Settings , bsMetrics * BufferedSenderMetrics ,
120125) * BufferedSender {
121126 bs := & BufferedSender {
122- sender : sender ,
123- metrics : bsMetrics ,
127+ sendBufSize : defaultSendBufSize ,
128+ sender : sender ,
129+ metrics : bsMetrics ,
124130 }
125131 bs .notifyDataC = make (chan struct {}, 1 )
126132 bs .queueMu .buffer = newEventQueue ()
@@ -219,6 +225,8 @@ func (bs *BufferedSender) sendUnbuffered(ev *kvpb.MuxRangeFeedEvent) error {
219225func (bs * BufferedSender ) run (
220226 ctx context.Context , stopper * stop.Stopper , onError func (streamID int64 ),
221227) error {
228+ eventsBuf := make ([]sharedMuxEvent , 0 , bs .sendBufSize )
229+
222230 for {
223231 select {
224232 case <- ctx .Done ():
@@ -231,39 +239,47 @@ func (bs *BufferedSender) run(
231239 return nil
232240 case <- bs .notifyDataC :
233241 for {
234- e , success : = bs .popFront ( )
235- if ! success {
242+ eventsBuf = bs .popEvents ( eventsBuf [: 0 ], bs . sendBufSize )
243+ if len ( eventsBuf ) == 0 {
236244 break
237245 }
238246
239- bs .metrics .BufferedSenderQueueSize .Dec (1 )
240- err := bs .sender .Send (e .ev )
241- e .alloc .Release (ctx )
242- if e .ev .Error != nil {
243- onError (e .ev .StreamID )
244- }
245- if err != nil {
246- return err
247+ bs .metrics .BufferedSenderQueueSize .Dec (int64 (len (eventsBuf )))
248+ for _ , evt := range eventsBuf {
249+ // TODO(ssd): This might be another location where we could transform
250+ // multiple events into BulkEvents. We can't just throw them all in a
251+ // bulk event though since we are processing events for different
252+ // streams here.
253+ err := bs .sender .Send (evt .ev )
254+ evt .alloc .Release (ctx )
255+ if evt .ev .Error != nil {
256+ onError (evt .ev .StreamID )
257+ }
258+ if err != nil {
259+ return err
260+ }
247261 }
262+ clear (eventsBuf ) // Clear so referenced MuxRangeFeedEvents can be GC'd.
248263 }
249264 }
250265 }
251266}
252267
253- // popFront pops the front event from the buffer queue. It returns the event and
254- // a boolean indicating if the event was successfully popped.
255- func (bs * BufferedSender ) popFront () (e sharedMuxEvent , success bool ) {
268+ // popEvents appends up to eventsToPop events into dest, returning the appended slice.
269+ func (bs * BufferedSender ) popEvents (dest []sharedMuxEvent , eventsToPop int ) []sharedMuxEvent {
256270 bs .queueMu .Lock ()
257271 defer bs .queueMu .Unlock ()
258- event , ok := bs .queueMu .buffer .popFront ()
259- if ok {
272+ dest = bs .queueMu .buffer .popFrontInto (dest , eventsToPop )
273+
274+ // Update accounting for everything we popped.
275+ for _ , event := range dest {
260276 state , streamFound := bs .queueMu .byStream [event .ev .StreamID ]
261277 if streamFound {
262278 state .queueItems --
263279 bs .queueMu .byStream [event .ev .StreamID ] = state
264280 }
265281 }
266- return event , ok
282+ return dest
267283}
268284
269285// addStream initializes the per-stream tracking for the given streamID.
0 commit comments