@@ -117,6 +117,9 @@ static void init_set(struct ll_adv_set *adv);
117117
118118static struct ll_adv_set ll_adv [BT_CTLR_ADV_SET ];
119119
120+ static uint8_t ticker_update_req ;
121+ static uint8_t ticker_update_ack ;
122+
120123#if defined(CONFIG_BT_TICKER_EXT )
121124static struct ticker_ext ll_adv_ticker_ext [BT_CTLR_ADV_SET ];
122125#endif /* CONFIG_BT_TICKER_EXT */
@@ -1965,9 +1968,6 @@ static uint32_t ticker_update_rand(struct ll_adv_set *adv, uint32_t ticks_delay_
19651968 (ret == TICKER_STATUS_BUSY ) ||
19661969 (fp_op_func == NULL ));
19671970
1968- #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING )
1969- adv -> delay = random_delay ;
1970- #endif
19711971 return random_delay ;
19721972}
19731973
@@ -2017,8 +2017,9 @@ void ull_adv_done(struct node_rx_event_done *done)
20172017
20182018 /* Check if we have enough time to re-schedule */
20192019 if (delay_remain > prepare_overhead ) {
2020- uint32_t ticks_adjust_minus ;
20212020 uint32_t interval_us = adv -> interval * ADV_INT_UNIT_US ;
2021+ uint32_t ticks_adjust_minus ;
2022+ uint32_t random_delay ;
20222023
20232024 /* Get negative ticker adjustment needed to pull back ADV one
20242025 * interval plus the randomized delay. This means that the ticker
@@ -2043,10 +2044,12 @@ void ull_adv_done(struct node_rx_event_done *done)
20432044 * ticker_stop, e.g. from ull_periph_setup. This is not a problem
20442045 * and we can safely ignore the operation result.
20452046 */
2046- ticker_update_rand (adv , delay_remain - prepare_overhead ,
2047- prepare_overhead , ticks_adjust_minus , NULL );
2047+ random_delay = ticker_update_rand (adv , delay_remain - prepare_overhead ,
2048+ prepare_overhead , ticks_adjust_minus ,
2049+ NULL );
20482050
20492051 /* Delay from ticker_update_rand is in addition to the last random delay */
2052+ adv -> delay = random_delay ;
20502053 adv -> delay += adv -> delay_at_expire ;
20512054
20522055 /* Score of the event was increased due to the result, but since
@@ -2430,11 +2433,31 @@ static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
24302433 if (!lll -> is_hdcd )
24312434#endif /* CONFIG_BT_PERIPHERAL */
24322435 {
2433- /* Apply random delay in range [0..ULL_ADV_RANDOM_DELAY] */
2434- random_delay = ticker_update_rand (adv , ULL_ADV_RANDOM_DELAY ,
2435- 0 , 0 , ticker_update_op_cb );
2436+ if (IS_ENABLED (CONFIG_BT_CTLR_JIT_SCHEDULING ) ||
2437+ (ticker_update_req == ticker_update_ack )) {
2438+ /* Ticker update requested */
2439+ ticker_update_req ++ ;
2440+
2441+ /* Apply random delay in range [0..ULL_ADV_RANDOM_DELAY] */
2442+ random_delay = ticker_update_rand (adv , ULL_ADV_RANDOM_DELAY , 0U , 0U ,
2443+ ticker_update_op_cb );
2444+ #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING )
2445+ adv -> delay = random_delay ;
2446+ #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING */
2447+ } else {
2448+ random_delay = 0U ;
2449+ }
24362450
24372451#if defined(CONFIG_BT_CTLR_ADV_EXT )
2452+ uint16_t event_counter_inc ;
2453+
2454+ if (lazy == TICKER_LAZY_MUST_EXPIRE ) {
2455+ lazy = 0U ;
2456+ event_counter_inc = 0U ;
2457+ } else {
2458+ event_counter_inc = (lazy + 1U );
2459+ }
2460+
24382461 if (adv -> remain_duration_us && adv -> event_counter > 0U ) {
24392462#if defined(CONFIG_BT_CTLR_JIT_SCHEDULING )
24402463 /* ticks_drift is always 0 with JIT scheduling, populate manually */
@@ -2453,7 +2476,7 @@ static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
24532476 }
24542477 }
24552478
2456- adv -> event_counter += ( lazy + 1U ) ;
2479+ adv -> event_counter += event_counter_inc ;
24572480#endif /* CONFIG_BT_CTLR_ADV_EXT */
24582481 }
24592482
@@ -2462,6 +2485,9 @@ static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
24622485
24632486static void ticker_update_op_cb (uint32_t status , void * param )
24642487{
2488+ /* Reset update requested */
2489+ ticker_update_ack = ticker_update_req ;
2490+
24652491#if defined(CONFIG_BT_PERIPHERAL ) && (defined(CONFIG_BT_ASSERT ) || defined(CONFIG_ASSERT ))
24662492 struct ll_adv_set * adv = param ;
24672493 struct pdu_adv * pdu = lll_adv_data_peek (& adv -> lll );
0 commit comments