@@ -311,8 +311,13 @@ func (rs ProduceResults) First() (*Record, error) {
311311// ProduceSync is a synchronous produce. See the [Produce] documentation for an
312312// in depth description of how producing works.
313313//
314- // This function simply produces all records in one range loop and waits for
315- // them all to be produced before returning.
314+ // This function produces all records and waits for them all to be produced
315+ // before returning. If the client has a non-zero linger configured, after all
316+ // records are enqueued, this function stops lingering and triggers an immediate
317+ // drain on all partitions that records were produced to. This avoids
318+ // unnecessarily waiting for linger timers when the caller is synchronously
319+ // waiting for results. Partitions that are lingering due to concurrent
320+ // [Produce] calls are not affected.
316321func (cl * Client ) ProduceSync (ctx context.Context , rs ... * Record ) ProduceResults {
317322 var (
318323 wg sync.WaitGroup
@@ -324,9 +329,76 @@ func (cl *Client) ProduceSync(ctx context.Context, rs ...*Record) ProduceResults
324329 )
325330
326331 wg .Add (len (rs ))
332+
333+ // After each Produce call for a known topic, the record's Partition
334+ // field is already set (see bufferRecord), allowing us to collect
335+ // which recBufs to unlinger without a second pass over the records.
336+ // We use a [16] base array to avoid heap allocation in the common
337+ // case, and linear dedup since the number of unique partitions is
338+ // typically small.
339+ //
340+ // We load partition data BEFORE calling Produce to avoid a data
341+ // race on r.Partition. If partitions exist before Produce,
342+ // partitionsForTopicProduce will also see them (partition counts
343+ // are monotonically increasing) and will partition the record
344+ // synchronously in bufferRecord, making r.Partition safe to read
345+ // after Produce returns. If pd is nil, we never read r.Partition,
346+ // avoiding a race with the metadata goroutine which partitions
347+ // unknownTopics records asynchronously.
348+ var (
349+ buf [16 ]* recBuf
350+ unlinger = buf [:0 ]
351+ topics topicsPartitionsData
352+
353+ lastTopic string
354+ lastPD * topicPartitionsData
355+ )
356+ if cl .cfg .linger > 0 {
357+ topics = cl .producer .topics .load ()
358+ }
359+
327360 for _ , r := range rs {
361+ var pd * topicPartitionsData
362+ if topics != nil {
363+ if r .Topic == "" || cl .cfg .defaultProduceTopicAlways {
364+ r .Topic = cl .cfg .defaultProduceTopic
365+ }
366+ if r .Topic == lastTopic {
367+ pd = lastPD
368+ } else if parts , ok := topics [r .Topic ]; ok {
369+ if v := parts .load (); len (v .partitions ) > 0 {
370+ pd = v
371+ }
372+ lastTopic = r .Topic
373+ lastPD = pd
374+ }
375+ }
376+
328377 cl .Produce (ctx , r , promise )
378+
379+ if pd == nil {
380+ continue
381+ }
382+ if int (r .Partition ) >= len (pd .partitions ) {
383+ continue
384+ }
385+ rb := pd .partitions [r .Partition ].records
386+ var seen bool
387+ for _ , have := range unlinger {
388+ if have == rb {
389+ seen = true
390+ break
391+ }
392+ }
393+ if ! seen {
394+ unlinger = append (unlinger , rb )
395+ }
329396 }
397+
398+ for _ , rb := range unlinger {
399+ rb .unlingerAndManuallyDrain ()
400+ }
401+
330402 wg .Wait ()
331403
332404 return results
@@ -594,7 +666,6 @@ type batchPromise struct {
594666 epoch int16
595667 attrs RecordAttrs
596668 beforeBuf bool
597- partition int32
598669 recs []promisedRec
599670 err error
600671}
@@ -632,7 +703,6 @@ start:
632703 } else {
633704 pr .Offset = b .baseOffset + int64 (i )
634705 }
635- pr .Partition = b .partition
636706 pr .ProducerID = b .pid
637707 pr .ProducerEpoch = b .epoch
638708 pr .Attrs = b .attrs
0 commit comments