51
51
52
52
static int init_reset (void );
53
53
static inline struct ll_sync_set * sync_acquire (void );
54
- static void timeout_cleanup (struct ll_sync_set * sync );
54
+ static void sync_ticker_cleanup (struct ll_sync_set * sync , ticker_op_func stop_of_cb );
55
55
static void ticker_cb (uint32_t ticks_at_expire , uint32_t ticks_drift ,
56
56
uint32_t remainder , uint16_t lazy , uint8_t force ,
57
57
void * param );
@@ -72,6 +72,9 @@ static void *sync_free;
72
72
static struct k_sem sem_ticker_cb ;
73
73
#endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
74
74
75
+ static memq_link_t link_lll_prepare ;
76
+ static struct mayfly mfy_lll_prepare = { 0 , 0 , & link_lll_prepare , NULL , lll_sync_prepare };
77
+
75
78
uint8_t ll_sync_create (uint8_t options , uint8_t sid , uint8_t adv_addr_type ,
76
79
uint8_t * adv_addr , uint16_t skip ,
77
80
uint16_t sync_timeout , uint8_t sync_cte_type )
@@ -125,14 +128,10 @@ uint8_t ll_sync_create(uint8_t options, uint8_t sid, uint8_t adv_addr_type,
125
128
return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED ;
126
129
}
127
130
128
- node_rx -> link = link_sync_estab ;
129
- scan -> per_scan .node_rx_estab = node_rx ;
130
131
scan -> per_scan .state = LL_SYNC_STATE_IDLE ;
131
132
scan -> per_scan .filter_policy = options & BIT (0 );
132
133
if (IS_ENABLED (CONFIG_BT_CTLR_PHY_CODED )) {
133
134
scan_coded -> per_scan .state = LL_SYNC_STATE_IDLE ;
134
- scan_coded -> per_scan .node_rx_estab =
135
- scan -> per_scan .node_rx_estab ;
136
135
scan_coded -> per_scan .filter_policy =
137
136
scan -> per_scan .filter_policy ;
138
137
}
@@ -154,9 +153,9 @@ uint8_t ll_sync_create(uint8_t options, uint8_t sid, uint8_t adv_addr_type,
154
153
sync -> skip = skip ;
155
154
sync -> timeout = sync_timeout ;
156
155
157
- /* TODO: Support for CTE type */
158
-
159
156
/* Initialize sync context */
157
+ node_rx -> link = link_sync_estab ;
158
+ sync -> node_rx_sync_estab = node_rx ;
160
159
sync -> timeout_reload = 0U ;
161
160
sync -> timeout_expire = 0U ;
162
161
@@ -171,6 +170,8 @@ uint8_t ll_sync_create(uint8_t options, uint8_t sid, uint8_t adv_addr_type,
171
170
lll_sync -> skip_event = 0U ;
172
171
lll_sync -> window_widening_prepare_us = 0U ;
173
172
lll_sync -> window_widening_event_us = 0U ;
173
+ lll_sync -> cte_type = sync_cte_type ;
174
+ lll_sync -> filter_policy = scan -> per_scan .filter_policy ;
174
175
175
176
/* Reporting initially enabled/disabled */
176
177
lll_sync -> is_rx_enabled = options & BIT (1 );
@@ -235,7 +236,7 @@ uint8_t ll_sync_create_cancel(void **rx)
235
236
return BT_HCI_ERR_CMD_DISALLOWED ;
236
237
}
237
238
238
- node_rx = (void * )scan -> per_scan . node_rx_estab ;
239
+ node_rx = (void * )sync -> node_rx_sync_estab ;
239
240
link_sync_estab = node_rx -> hdr .link ;
240
241
link_sync_lost = sync -> node_rx_lost .hdr .link ;
241
242
@@ -465,19 +466,15 @@ void ull_sync_setup(struct ll_scan_set *scan, struct ll_scan_aux_set *aux,
465
466
sync_handle = ull_sync_handle_get (sync );
466
467
467
468
/* Prepare and dispatch sync notification */
468
- rx = (void * )scan -> per_scan . node_rx_estab ;
469
+ rx = (void * )sync -> node_rx_sync_estab ;
469
470
rx -> hdr .type = NODE_RX_TYPE_SYNC ;
470
471
rx -> hdr .handle = sync_handle ;
471
472
rx -> hdr .rx_ftr .param = scan ;
472
473
se = (void * )rx -> pdu ;
473
- se -> status = BT_HCI_ERR_SUCCESS ;
474
474
se -> interval = interval ;
475
475
se -> phy = lll -> phy ;
476
476
se -> sca = sca ;
477
477
478
- ll_rx_put (rx -> hdr .link , rx );
479
- ll_rx_sched ();
480
-
481
478
/* Calculate offset and schedule sync radio events */
482
479
ftr = & node_rx -> rx_ftr ;
483
480
pdu = (void * )((struct node_rx_pdu * )node_rx )-> pdu ;
@@ -515,6 +512,8 @@ void ull_sync_setup(struct ll_scan_set *scan, struct ll_scan_aux_set *aux,
515
512
}
516
513
ticks_slot_offset += HAL_TICKER_US_TO_TICKS (EVENT_OVERHEAD_START_US );
517
514
515
+ mfy_lll_prepare .fp = lll_sync_create_prepare ;
516
+
518
517
ret = ticker_start (TICKER_INSTANCE_ID_CTLR , TICKER_USER_ID_ULL_HIGH ,
519
518
(TICKER_ID_SCAN_SYNC_BASE + sync_handle ),
520
519
ftr -> ticks_anchor - ticks_slot_offset ,
@@ -528,6 +527,52 @@ void ull_sync_setup(struct ll_scan_set *scan, struct ll_scan_aux_set *aux,
528
527
(ret == TICKER_STATUS_BUSY ));
529
528
}
530
529
530
+ void ull_sync_established_report (memq_link_t * link , struct node_rx_hdr * rx )
531
+ {
532
+ struct node_rx_pdu * rx_establ ;
533
+ struct ll_sync_set * ull_sync ;
534
+ struct node_rx_ftr * ftr ;
535
+ struct node_rx_sync * se ;
536
+ struct lll_sync * lll ;
537
+
538
+ ftr = & rx -> rx_ftr ;
539
+
540
+ /* Send periodic advertisement sync established report when sync has correct CTE type
541
+ * or the CTE type is incorrect and filter policy doesn't allow to continue scanning.
542
+ */
543
+ if (ftr -> sync_status != SYNC_STAT_READY ) {
544
+ /* Set the sync handle corresponding to the LLL context passed in the node rx
545
+ * footer field.
546
+ */
547
+ lll = ftr -> param ;
548
+ ull_sync = HDR_LLL2ULL (lll );
549
+
550
+ /* Prepare and dispatch sync notification */
551
+ rx_establ = (void * )ull_sync -> node_rx_sync_estab ;
552
+ rx_establ -> hdr .type = NODE_RX_TYPE_SYNC ;
553
+ se = (void * )rx_establ -> pdu ;
554
+ se -> status = (ftr -> sync_status == SYNC_STAT_TERM ) ?
555
+ BT_HCI_ERR_UNSUPP_REMOTE_FEATURE :
556
+ BT_HCI_ERR_SUCCESS ;
557
+
558
+ ll_rx_put (rx_establ -> hdr .link , rx_establ );
559
+ ll_rx_sched ();
560
+ }
561
+
562
+ /* Handle periodic advertising PDU and send periodic advertising scan report when
563
+ * the sync was found or was established in the past. The report is not send if
564
+ * scanning is terminated due to wrong CTE type.
565
+ */
566
+ if (ftr -> sync_status != SYNC_STAT_TERM ) {
567
+ /* Switch sync event prepare function to one reposnsible for regular PDUs receive */
568
+ mfy_lll_prepare .fp = lll_sync_prepare ;
569
+
570
+ /* Change node type to appropriately handle periodic advertising PDU report */
571
+ rx -> type = NODE_RX_TYPE_SYNC_REPORT ;
572
+ ull_scan_aux_setup (link , rx );
573
+ }
574
+ }
575
+
531
576
void ull_sync_done (struct node_rx_event_done * done )
532
577
{
533
578
uint32_t ticks_drift_minus ;
@@ -543,84 +588,79 @@ void ull_sync_done(struct node_rx_event_done *done)
543
588
sync = CONTAINER_OF (done -> param , struct ll_sync_set , ull );
544
589
lll = & sync -> lll ;
545
590
546
- /* Events elapsed used in timeout checks below */
547
- skip_event = lll -> skip_event ;
548
- elapsed_event = skip_event + 1 ;
549
-
550
- /* Sync drift compensation and new skip calculation
551
- */
552
- ticks_drift_plus = 0U ;
553
- ticks_drift_minus = 0U ;
554
- if (done -> extra .trx_cnt ) {
555
- /* Calculate drift in ticks unit */
556
- ull_drift_ticks_get (done , & ticks_drift_plus ,
557
- & ticks_drift_minus );
558
-
559
- /* Enforce skip */
560
- lll -> skip_event = sync -> skip ;
561
- }
562
-
563
- /* Reset supervision countdown */
564
- if (done -> extra .crc_valid ) {
565
- sync -> timeout_expire = 0U ;
566
- }
591
+ if (done -> extra .sync_term ) {
592
+ /* Stop periodic advertising scan ticker */
593
+ sync_ticker_cleanup (sync , NULL );
594
+ } else {
595
+ /* Events elapsed used in timeout checks below */
596
+ skip_event = lll -> skip_event ;
597
+ elapsed_event = skip_event + 1 ;
598
+
599
+ /* Sync drift compensation and new skip calculation */
600
+ ticks_drift_plus = 0U ;
601
+ ticks_drift_minus = 0U ;
602
+ if (done -> extra .trx_cnt ) {
603
+ /* Calculate drift in ticks unit */
604
+ ull_drift_ticks_get (done , & ticks_drift_plus , & ticks_drift_minus );
605
+
606
+ /* Enforce skip */
607
+ lll -> skip_event = sync -> skip ;
608
+ }
567
609
568
- /* if anchor point not sync-ed, start timeout countdown, and break
569
- * skip if any.
570
- */
571
- else {
572
- if (!sync -> timeout_expire ) {
610
+ /* Reset supervision countdown */
611
+ if (done -> extra .crc_valid ) {
612
+ sync -> timeout_expire = 0U ;
613
+ }
614
+ /* If anchor point not sync-ed, start timeout countdown, and break skip if any */
615
+ else if (!sync -> timeout_expire ) {
573
616
sync -> timeout_expire = sync -> timeout_reload ;
574
617
}
575
- }
576
618
577
- /* check timeout */
578
- force = 0U ;
579
- if (sync -> timeout_expire ) {
580
- if (sync -> timeout_expire > elapsed_event ) {
581
- sync -> timeout_expire -= elapsed_event ;
619
+ /* check timeout */
620
+ force = 0U ;
621
+ if (sync -> timeout_expire ) {
622
+ if (sync -> timeout_expire > elapsed_event ) {
623
+ sync -> timeout_expire -= elapsed_event ;
582
624
583
- /* break skip */
584
- lll -> skip_event = 0U ;
625
+ /* break skip */
626
+ lll -> skip_event = 0U ;
585
627
586
- if (skip_event ) {
587
- force = 1U ;
588
- }
589
- } else {
590
- timeout_cleanup (sync );
628
+ if (skip_event ) {
629
+ force = 1U ;
630
+ }
631
+ } else {
632
+ sync_ticker_cleanup (sync , ticker_stop_op_cb );
591
633
592
- return ;
634
+ return ;
635
+ }
593
636
}
594
- }
595
637
596
- /* check if skip needs update */
597
- lazy = 0U ;
598
- if ((force ) || (skip_event != lll -> skip_event )) {
599
- lazy = lll -> skip_event + 1U ;
600
- }
638
+ /* Check if skip needs update */
639
+ lazy = 0U ;
640
+ if ((force ) || (skip_event != lll -> skip_event )) {
641
+ lazy = lll -> skip_event + 1U ;
642
+ }
601
643
602
- /* Update Sync ticker instance */
603
- if (ticks_drift_plus || ticks_drift_minus || lazy || force ) {
604
- uint16_t sync_handle = ull_sync_handle_get (sync );
605
- uint32_t ticker_status ;
644
+ /* Update Sync ticker instance */
645
+ if (ticks_drift_plus || ticks_drift_minus || lazy || force ) {
646
+ uint16_t sync_handle = ull_sync_handle_get (sync );
647
+ uint32_t ticker_status ;
606
648
607
- /* Call to ticker_update can fail under the race
608
- * condition where in the periodic sync role is being stopped
609
- * but at the same time it is preempted by periodic sync event
610
- * that gets into close state. Accept failure when periodic sync
611
- * role is being stopped.
612
- */
613
- ticker_status = ticker_update (TICKER_INSTANCE_ID_CTLR ,
614
- TICKER_USER_ID_ULL_HIGH ,
615
- (TICKER_ID_SCAN_SYNC_BASE +
616
- sync_handle ),
617
- ticks_drift_plus ,
618
- ticks_drift_minus , 0 , 0 ,
619
- lazy , force ,
620
- ticker_update_sync_op_cb , sync );
621
- LL_ASSERT ((ticker_status == TICKER_STATUS_SUCCESS ) ||
622
- (ticker_status == TICKER_STATUS_BUSY ) ||
623
- ((void * )sync == ull_disable_mark_get ()));
649
+ /* Call to ticker_update can fail under the race
650
+ * condition where in the periodic sync role is being stopped
651
+ * but at the same time it is preempted by periodic sync event
652
+ * that gets into close state. Accept failure when periodic sync
653
+ * role is being stopped.
654
+ */
655
+ ticker_status =
656
+ ticker_update (TICKER_INSTANCE_ID_CTLR , TICKER_USER_ID_ULL_HIGH ,
657
+ (TICKER_ID_SCAN_SYNC_BASE + sync_handle ),
658
+ ticks_drift_plus , ticks_drift_minus , 0 , 0 , lazy ,
659
+ force , ticker_update_sync_op_cb , sync );
660
+ LL_ASSERT ((ticker_status == TICKER_STATUS_SUCCESS ) ||
661
+ (ticker_status == TICKER_STATUS_BUSY ) ||
662
+ ((void * )sync == ull_disable_mark_get ()));
663
+ }
624
664
}
625
665
}
626
666
@@ -762,15 +802,14 @@ static inline struct ll_sync_set *sync_acquire(void)
762
802
return mem_acquire (& sync_free );
763
803
}
764
804
765
- static void timeout_cleanup (struct ll_sync_set * sync )
805
+ static void sync_ticker_cleanup (struct ll_sync_set * sync , ticker_op_func stop_of_cb )
766
806
{
767
807
uint16_t sync_handle = ull_sync_handle_get (sync );
768
808
uint32_t ret ;
769
809
770
810
/* Stop Periodic Sync Ticker */
771
811
ret = ticker_stop (TICKER_INSTANCE_ID_CTLR , TICKER_USER_ID_ULL_HIGH ,
772
- TICKER_ID_SCAN_SYNC_BASE + sync_handle ,
773
- ticker_stop_op_cb , (void * )sync );
812
+ TICKER_ID_SCAN_SYNC_BASE + sync_handle , stop_of_cb , (void * )sync );
774
813
LL_ASSERT ((ret == TICKER_STATUS_SUCCESS ) ||
775
814
(ret == TICKER_STATUS_BUSY ));
776
815
}
@@ -779,8 +818,6 @@ static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
779
818
uint32_t remainder , uint16_t lazy , uint8_t force ,
780
819
void * param )
781
820
{
782
- static memq_link_t link ;
783
- static struct mayfly mfy = {0 , 0 , & link , NULL , lll_sync_prepare };
784
821
static struct lll_prepare_param p ;
785
822
struct ll_sync_set * sync = param ;
786
823
struct lll_sync * lll ;
@@ -801,11 +838,10 @@ static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
801
838
p .lazy = lazy ;
802
839
p .force = force ;
803
840
p .param = lll ;
804
- mfy .param = & p ;
841
+ mfy_lll_prepare .param = & p ;
805
842
806
843
/* Kick LLL prepare */
807
- ret = mayfly_enqueue (TICKER_USER_ID_ULL_HIGH ,
808
- TICKER_USER_ID_LLL , 0 , & mfy );
844
+ ret = mayfly_enqueue (TICKER_USER_ID_ULL_HIGH , TICKER_USER_ID_LLL , 0 , & mfy_lll_prepare );
809
845
LL_ASSERT (!ret );
810
846
811
847
DEBUG_RADIO_PREPARE_O (1 );
@@ -814,7 +850,6 @@ static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
814
850
static void ticker_op_cb (uint32_t status , void * param )
815
851
{
816
852
ARG_UNUSED (param );
817
-
818
853
LL_ASSERT (status == TICKER_STATUS_SUCCESS );
819
854
}
820
855
0 commit comments