@@ -398,10 +398,11 @@ void canard_refcount_dec(canard_t* const self, const canard_bytes_t obj)
398398// hence it would not affect the ordering.
399399#define CAN_ID_MSb_BITS (29U - 7U)
400400
401- // The struct is manually packed to ensure it fits into a 128-byte O1Heap block in common embedded configurations.
401+ // The struct must fit into a 128-byte O1Heap block in common embedded configurations.
402402struct canard_txfer_t
403403{
404404 canard_tree_t index_pending [CANARD_IFACE_COUNT ];
405+ canard_tree_t index_deadline ;
405406 canard_listed_t list_agewise ;
406407
407408 // Constant transfer properties supplied by the client.
@@ -418,8 +419,7 @@ struct canard_txfer_t
418419 // Application context.
419420 canard_user_context_t user_context ;
420421};
421- static_assert ((CANARD_IFACE_COUNT > 2 ) || (sizeof (void * ) > 4 ) || (sizeof (void (* )(void )) > 4 ) ||
422- (sizeof (canard_txfer_t ) <= 120 ),
422+ static_assert ((CANARD_IFACE_COUNT > 2 ) || (sizeof (void * ) > 4 ) || (sizeof (canard_txfer_t ) <= 120 ),
423423 "On a 32-bit platform with a half-fit heap, the TX transfer object should fit in a 128-byte block" );
424424
425425static canard_txfer_t * txfer_new (canard_t * const self ,
@@ -434,12 +434,13 @@ static canard_txfer_t* txfer_new(canard_t* const self,
434434 FOREACH_IFACE (i ) {
435435 tr -> index_pending [i ] = TREE_NULL ;
436436 }
437- tr -> list_agewise = LIST_NULL ;
438- tr -> deadline = deadline ;
439- tr -> seqno = self -> tx .seqno ++ ;
440- tr -> transfer_id = transfer_id & CANARD_TRANSFER_ID_MAX ;
441- tr -> can_id_msb = (can_id_template >> (29U - CAN_ID_MSb_BITS )) & ((1U << CAN_ID_MSb_BITS ) - 1U );
442- tr -> fd = fd ? 1U : 0U ;
437+ tr -> index_deadline = TREE_NULL ;
438+ tr -> list_agewise = LIST_NULL ;
439+ tr -> deadline = deadline ;
440+ tr -> seqno = self -> tx .seqno ++ ;
441+ tr -> transfer_id = transfer_id & CANARD_TRANSFER_ID_MAX ;
442+ tr -> can_id_msb = (can_id_template >> (29U - CAN_ID_MSb_BITS )) & ((1U << CAN_ID_MSb_BITS ) - 1U );
443+ tr -> fd = fd ? 1U : 0U ;
443444 FOREACH_IFACE (i ) {
444445 tr -> head [i ] = NULL ;
445446 tr -> cursor [i ] = NULL ;
@@ -449,17 +450,11 @@ static canard_txfer_t* txfer_new(canard_t* const self,
449450 return tr ;
450451}
451452
452- static canard_prio_t txfer_prio (const canard_txfer_t * const tr )
453- {
454- return (canard_prio_t )((((unsigned )tr -> can_id_msb ) >> (CAN_ID_MSb_BITS - 3U )) & 7U );
455- }
456-
457453static bool txfer_is_pending (const canard_t * const self , const canard_txfer_t * const tr )
458454{
459455 FOREACH_IFACE (i ) {
460456 if (cavl2_is_inserted (self -> tx .pending [i ], & tr -> index_pending [i ])) {
461- CANARD_ASSERT (tr -> head [i ] != NULL );
462- CANARD_ASSERT (tr -> cursor [i ] != NULL );
457+ CANARD_ASSERT ((tr -> head [i ] != NULL ) && (tr -> cursor [i ] != NULL ));
463458 return true;
464459 }
465460 }
@@ -491,6 +486,16 @@ static int32_t tx_cavl_compare_pending_order(const void* const user, const canar
491486 return (lhs -> seqno < rhs -> seqno ) ? -1 : +1 ; // clang-format on
492487}
493488
489+ // Soonest to expire (smallest deadline) on the left, then smaller seqno on the left.
490+ static int32_t tx_cavl_compare_deadline (const void * const user , const canard_tree_t * const node )
491+ {
492+ const canard_txfer_t * const lhs = (const canard_txfer_t * )user ;
493+ const canard_txfer_t * const rhs = CAVL2_TO_OWNER (node , canard_txfer_t , index_deadline ); // clang-format off
494+ if (lhs -> deadline < rhs -> deadline ) { return -1 ; }
495+ if (lhs -> deadline > rhs -> deadline ) { return +1 ; }
496+ return (lhs -> seqno < rhs -> seqno ) ? -1 : +1 ; // clang-format on
497+ }
498+
494499static void tx_make_pending (canard_t * const self , canard_txfer_t * const tr )
495500{
496501 FOREACH_IFACE (i ) { // Enqueue for transmission unless it's there already (stalled interface?)
@@ -507,15 +512,12 @@ static void tx_make_pending(canard_t* const self, canard_txfer_t* const tr)
507512// Retire one transfer and release its resources.
508513static void txfer_retire (canard_t * const self , canard_txfer_t * const tr )
509514{
510- if (self -> tx .iter == tr ) {
511- self -> tx .iter = LIST_NEXT (tr , canard_txfer_t , list_agewise ); // May be NULL, is OK.
512- }
513515 FOREACH_IFACE (i ) {
514516 (void )cavl2_remove_if (& self -> tx .pending [i ], & tr -> index_pending [i ]);
515517 }
518+ CANARD_ASSERT (cavl2_is_inserted (self -> tx .deadline , & tr -> index_deadline ));
519+ cavl2_remove (& self -> tx .deadline , & tr -> index_deadline );
516520 delist (& self -> tx .agewise , & tr -> list_agewise );
517-
518- // Free the memory. The payload memory may already be empty depending on where we were invoked from.
519521 txfer_free_payload (self , tr );
520522 mem_free (self -> mem .tx_transfer , sizeof (canard_txfer_t ), tr );
521523}
@@ -714,6 +716,18 @@ static size_t tx_predict_frame_count(const size_t transfer_size, const size_t mt
714716 return ((transfer_size + CRC_SIZE_BYTES + bytes_per_frame ) - 1U ) / bytes_per_frame ; // rounding up
715717}
716718
719+ static void tx_expire (canard_t * const self , const canard_us_t now )
720+ {
721+ canard_txfer_t * tr = CAVL2_TO_OWNER (cavl2_min (self -> tx .deadline ), canard_txfer_t , index_deadline );
722+ while ((tr != NULL ) && (now > tr -> deadline )) {
723+ canard_txfer_t * const tr_next =
724+ CAVL2_TO_OWNER (cavl2_next_greater (& tr -> index_deadline ), canard_txfer_t , index_deadline );
725+ txfer_retire (self , tr );
726+ self -> err .tx_expiration ++ ;
727+ tr = tr_next ;
728+ }
729+ }
730+
717731// Enqueues a transfer for transmission.
718732static bool tx_push (canard_t * const self ,
719733 canard_txfer_t * const tr ,
@@ -726,6 +740,11 @@ static bool tx_push(canard_t* const self,
726740 CANARD_ASSERT ((!tr -> fd ) || !v0 ); // The caller must ensure this.
727741 CANARD_ASSERT (iface_bitmap != 0 );
728742
743+ const canard_us_t now = self -> vtable -> now (self );
744+
745+ // Expire old transfers first to free up queue space.
746+ tx_expire (self , now );
747+
729748 // Ensure the queue has enough space. v0 transfers always use Classic CAN regardless of tr->fd.
730749 const size_t mtu = tr -> fd ? CANARD_MTU_CAN_FD : CANARD_MTU_CAN_CLASSIC ;
731750 const size_t size = bytes_chain_size (payload );
@@ -770,6 +789,10 @@ static bool tx_push(canard_t* const self,
770789 }
771790
772791 // Register the transfer and schedule for transmission.
792+ const canard_tree_t * const deadline_tree = cavl2_find_or_insert (
793+ & self -> tx .deadline , tr , tx_cavl_compare_deadline , & tr -> index_deadline , cavl2_trivial_factory );
794+ CANARD_ASSERT (deadline_tree == & tr -> index_deadline );
795+ (void )deadline_tree ;
773796 enlist_tail (& self -> tx .agewise , & tr -> list_agewise );
774797 tx_make_pending (self , tr );
775798 return true;
@@ -781,21 +804,6 @@ static canard_txfer_t* tx_pending_node_to_transfer(const canard_tree_t* const no
781804 node , offsetof(canard_txfer_t , index_pending ) + (((size_t )iface_index ) * sizeof (canard_tree_t )));
782805}
783806
784- static void tx_expire_iterative (canard_t * const self , const canard_us_t now )
785- {
786- if (self -> tx .iter == NULL ) {
787- self -> tx .iter = LIST_HEAD (self -> tx .agewise , canard_txfer_t , list_agewise );
788- }
789- if (self -> tx .iter != NULL ) {
790- canard_txfer_t * const tr = self -> tx .iter ;
791- self -> tx .iter = LIST_NEXT (tr , canard_txfer_t , list_agewise );
792- if (now > tr -> deadline ) {
793- txfer_retire (self , tr );
794- self -> err .tx_expiration ++ ;
795- }
796- }
797- }
798-
799807static void tx_eject_pending (canard_t * const self , const byte_t iface_index )
800808{
801809 while (true) {
@@ -838,8 +846,9 @@ static void tx_eject_pending(canard_t* const self, const byte_t iface_index)
838846void canard_poll (canard_t * const self , const uint_least8_t tx_ready_iface_bitmap )
839847{
840848 if (self != NULL ) {
841- tx_expire_iterative (self , self -> vtable -> now (self )); // deadline maintenance first to keep queue pressure bounded
842- FOREACH_IFACE (i ) { // submit queued frames through all currently writable interfaces
849+ const canard_us_t now = self -> vtable -> now (self );
850+ tx_expire (self , now ); // deadline maintenance first to keep queue pressure bounded
851+ FOREACH_IFACE (i ) { // submit queued frames through all currently writable interfaces
843852 if ((tx_ready_iface_bitmap & (1U << i )) != 0U ) {
844853 tx_eject_pending (self , (byte_t )i );
845854 }
0 commit comments