@@ -328,6 +328,127 @@ static isoal_status_t isoal_rx_allocate_sdu(struct isoal_sink *sink,
328328 return err ;
329329}
330330
331+ /**
332+ * @brief Depending of whether the configuration is enabled, this will either
333+ * buffer and collate information for the SDU across all fragments
334+ * before emitting the batch of fragments, or immediately release the
335+ * fragment.
336+ * @param sink Point to the sink context structure
337+ * @param end_of_sdu Indicates if this is the end fragment of an SDU or forced
338+ * release on an error
339+ * @return Status of operation
340+ */
341+ static isoal_status_t isoal_rx_buffered_emit_sdu (struct isoal_sink * sink , bool end_of_sdu )
342+ {
343+ struct isoal_emitted_sdu_frag sdu_frag ;
344+ struct isoal_emitted_sdu sdu_status ;
345+ struct isoal_sink_session * session ;
346+ struct isoal_sdu_production * sp ;
347+ struct isoal_sdu_produced * sdu ;
348+ bool emit_sdu_current ;
349+ isoal_status_t err ;
350+
351+ err = ISOAL_STATUS_OK ;
352+ session = & sink -> session ;
353+ sp = & sink -> sdu_production ;
354+ sdu = & sp -> sdu ;
355+
356+ /* Initialize current SDU fragment buffer */
357+ sdu_frag .sdu_state = sp -> sdu_state ;
358+ sdu_frag .sdu_frag_size = sp -> sdu_written ;
359+ sdu_frag .sdu = * sdu ;
360+
361+ sdu_status .total_sdu_size = sdu_frag .sdu_frag_size ;
362+ sdu_status .collated_status = sdu_frag .sdu .status ;
363+ emit_sdu_current = true;
364+
365+ #if defined(ISOAL_BUFFER_RX_SDUS_ENABLE )
366+ uint16_t next_write_indx ;
367+ bool sdu_list_empty ;
368+ bool emit_sdu_list ;
369+ bool sdu_list_max ;
370+ bool sdu_list_err ;
371+
372+ next_write_indx = sp -> sdu_list .next_write_indx ;
373+ sdu_list_max = (next_write_indx >= CONFIG_BT_CTLR_ISO_RX_SDU_BUFFERS );
374+ sdu_list_empty = (next_write_indx == 0 );
375+
376+ /* There is an error in the sequence of SDUs if the current SDU fragment
377+ * is not an end fragment and either the list at capacity or the current
378+ * fragment is not a continuation (i.e. it is a start of a new SDU).
379+ */
380+ sdu_list_err = !end_of_sdu &&
381+ (sdu_list_max ||
382+ (!sdu_list_empty && sdu_frag .sdu_state != BT_ISO_CONT ));
383+
384+ /* Release the current fragment if it is the end of the SDU or if it is
385+ * not the starting fragment of a multi-fragment SDU.
386+ */
387+ emit_sdu_current = end_of_sdu || (sdu_list_empty && sdu_frag .sdu_state != BT_ISO_START );
388+
389+ /* Flush the buffered SDUs if this is an end fragment either on account
390+ * of reaching the end of the SDU or on account of an error or if
391+ * there is an error in the sequence of buffered fragments.
392+ */
393+ emit_sdu_list = emit_sdu_current || sdu_list_err ;
394+
395+ /* Total size is cleared if the current fragment is not being emitted
396+ * or if there is an error in the sequence of fragments.
397+ */
398+ if (!emit_sdu_current || sdu_list_err ) {
399+ sdu_status .total_sdu_size = 0 ;
400+ sdu_status .collated_status = (sdu_list_err ? ISOAL_SDU_STATUS_LOST_DATA :
401+ ISOAL_SDU_STATUS_VALID );
402+ }
403+
404+ if (emit_sdu_list && next_write_indx > 0 ) {
405+ if (!sdu_list_err ) {
406+ /* Collated information is not reliable if there is an
407+ * error in the sequence of the fragments.
408+ */
409+ for (uint8_t i = 0 ; i < next_write_indx ; i ++ ) {
410+ sdu_status .total_sdu_size +=
411+ sp -> sdu_list .list [i ].sdu_frag_size ;
412+ if (sp -> sdu_list .list [i ].sdu .status == ISOAL_SDU_STATUS_LOST_DATA ||
413+ sdu_status .collated_status == ISOAL_SDU_STATUS_LOST_DATA ) {
414+ sdu_status .collated_status = ISOAL_SDU_STATUS_LOST_DATA ;
415+ } else {
416+ sdu_status .collated_status |=
417+ sp -> sdu_list .list [i ].sdu .status ;
418+ }
419+ }
420+ }
421+
422+ for (uint8_t i = 0 ; i < next_write_indx ; i ++ ) {
423+ err |= session -> sdu_emit (sink , & sp -> sdu_list .list [i ],
424+ & sdu_status );
425+ }
426+
427+ next_write_indx = sp -> sdu_list .next_write_indx = 0 ;
428+ }
429+ #endif /* ISOAL_BUFFER_RX_SDUS_ENABLE */
430+
431+ if (emit_sdu_current ) {
432+ if (sdu_frag .sdu_state == BT_ISO_SINGLE ) {
433+ sdu_status .total_sdu_size = sdu_frag .sdu_frag_size ;
434+ sdu_status .collated_status = sdu_frag .sdu .status ;
435+ }
436+
437+ err |= session -> sdu_emit (sink , & sdu_frag , & sdu_status );
438+
439+ #if defined(ISOAL_BUFFER_RX_SDUS_ENABLE )
440+ } else if (next_write_indx < CONFIG_BT_CTLR_ISO_RX_SDU_BUFFERS ) {
441+ sp -> sdu_list .list [next_write_indx ++ ] = sdu_frag ;
442+ sp -> sdu_list .next_write_indx = next_write_indx ;
443+ #endif /* ISOAL_BUFFER_RX_SDUS_ENABLE */
444+ } else {
445+ /* Unreachable */
446+ LL_ASSERT (0 );
447+ }
448+
449+ return err ;
450+ }
451+
331452static isoal_status_t isoal_rx_try_emit_sdu (struct isoal_sink * sink , bool end_of_sdu )
332453{
333454 struct isoal_sdu_production * sp ;
@@ -369,9 +490,8 @@ static isoal_status_t isoal_rx_try_emit_sdu(struct isoal_sink *sink, bool end_of
369490 break ;
370491 }
371492 sdu -> status = sp -> sdu_status ;
372- struct isoal_sink_session * session = & sink -> session ;
373493
374- err = session -> sdu_emit (sink , sdu );
494+ err = isoal_rx_buffered_emit_sdu (sink , end_of_sdu );
375495 sp -> sdu_allocated = false;
376496
377497 /* update next state */
0 commit comments