|
73 | 73 | #include "hal/debug.h" |
74 | 74 |
|
75 | 75 | static int init_reset(void); |
| 76 | +#if defined(CONFIG_BT_CTLR_RX_ENQUEUE_HOLD) |
| 77 | +static bool rx_hold_is_done(struct ll_conn *conn); |
| 78 | +static void rx_hold_flush(struct ll_conn *conn); |
| 79 | +#endif /* CONFIG_BT_CTLR_RX_ENQUEUE_HOLD */ |
76 | 80 | #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL) |
77 | 81 | static void tx_demux_sched(struct ll_conn *conn); |
78 | 82 | #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */ |
@@ -1010,6 +1014,12 @@ int ull_conn_rx(memq_link_t *link, struct node_rx_pdu **rx) |
1010 | 1014 | return 0; |
1011 | 1015 | } |
1012 | 1016 |
|
| 1017 | +#if defined(CONFIG_BT_CTLR_RX_ENQUEUE_HOLD) |
| 1018 | + if (conn->llcp_rx_hold && rx_hold_is_done(conn)) { |
| 1019 | + rx_hold_flush(conn); |
| 1020 | + } |
| 1021 | +#endif /* CONFIG_BT_CTLR_RX_ENQUEUE_HOLD */ |
| 1022 | + |
1013 | 1023 | pdu_rx = (void *)(*rx)->pdu; |
1014 | 1024 |
|
1015 | 1025 | switch (pdu_rx->ll_id) { |
@@ -1379,6 +1389,16 @@ void ull_conn_done(struct node_rx_event_done *done) |
1379 | 1389 | return; |
1380 | 1390 | } |
1381 | 1391 |
|
| 1392 | +#if defined(CONFIG_BT_CTLR_RX_ENQUEUE_HOLD) |
| 1393 | + if (conn->llcp_rx_hold && rx_hold_is_done(conn)) { |
| 1394 | + rx_hold_flush(conn); |
| 1395 | + |
| 1396 | +#if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL) |
| 1397 | + ll_rx_sched(); |
| 1398 | +#endif /* !CONFIG_BT_CTLR_LOW_LAT_ULL */ |
| 1399 | + } |
| 1400 | +#endif /* CONFIG_BT_CTLR_RX_ENQUEUE_HOLD */ |
| 1401 | + |
1382 | 1402 | #if defined(CONFIG_BT_CTLR_LE_ENC) |
1383 | 1403 | /* Check authenticated payload expiry or MIC failure */ |
1384 | 1404 | switch (done->extra.mic_state) { |
@@ -1936,6 +1956,12 @@ void ull_conn_tx_ack(uint16_t handle, memq_link_t *link, struct node_tx *tx) |
1936 | 1956 | if (handle != LLL_HANDLE_INVALID) { |
1937 | 1957 | struct ll_conn *conn = ll_conn_get(handle); |
1938 | 1958 |
|
| 1959 | +#if defined(CONFIG_BT_CTLR_RX_ENQUEUE_HOLD) |
| 1960 | + if (conn->llcp_rx_hold && rx_hold_is_done(conn)) { |
| 1961 | + rx_hold_flush(conn); |
| 1962 | + } |
| 1963 | +#endif /* CONFIG_BT_CTLR_RX_ENQUEUE_HOLD */ |
| 1964 | + |
1939 | 1965 | #if defined(CONFIG_BT_LL_SW_LLCP_LEGACY) |
1940 | 1966 | ctrl_tx_ack(conn, &tx, pdu_tx); |
1941 | 1967 | #else /* CONFIG_BT_LL_SW_LLCP_LEGACY */ |
@@ -1964,6 +1990,14 @@ void ull_conn_tx_ack(uint16_t handle, memq_link_t *link, struct node_tx *tx) |
1964 | 1990 | pdu_tx->ll_id = PDU_DATA_LLID_RESV; |
1965 | 1991 | } else { |
1966 | 1992 | LL_ASSERT(handle != LLL_HANDLE_INVALID); |
| 1993 | + |
| 1994 | +#if defined(CONFIG_BT_CTLR_RX_ENQUEUE_HOLD) |
| 1995 | + struct ll_conn *conn = ll_conn_get(handle); |
| 1996 | + |
| 1997 | + if (conn->llcp_rx_hold && rx_hold_is_done(conn)) { |
| 1998 | + rx_hold_flush(conn); |
| 1999 | + } |
| 2000 | +#endif /* CONFIG_BT_CTLR_RX_ENQUEUE_HOLD */ |
1967 | 2001 | } |
1968 | 2002 |
|
1969 | 2003 | ll_tx_ack_put(handle, tx); |
@@ -2154,6 +2188,72 @@ static int init_reset(void) |
2154 | 2188 | return 0; |
2155 | 2189 | } |
2156 | 2190 |
|
| 2191 | +#if defined(CONFIG_BT_LL_SW_LLCP_LEGACY) |
| 2192 | +static void rx_hold_put(struct ll_conn *conn, memq_link_t *link, |
| 2193 | + struct node_rx_pdu *rx) |
| 2194 | +{ |
| 2195 | +#if defined(CONFIG_BT_CTLR_RX_ENQUEUE_HOLD) |
| 2196 | + struct node_rx_pdu *rx_last; |
| 2197 | + struct lll_conn *lll; |
| 2198 | + |
| 2199 | + link->mem = NULL; |
| 2200 | + rx->hdr.link = link; |
| 2201 | + |
| 2202 | + rx_last = conn->llcp_rx_hold; |
| 2203 | + while (rx_last && rx_last->hdr.link && rx_last->hdr.link->mem) { |
| 2204 | + rx_last = rx_last->hdr.link->mem; |
| 2205 | + } |
| 2206 | + |
| 2207 | + if (rx_last) { |
| 2208 | + rx_last->hdr.link->mem = rx; |
| 2209 | + } else { |
| 2210 | + conn->llcp_rx_hold = rx; |
| 2211 | + } |
| 2212 | + |
| 2213 | + lll = &conn->lll; |
| 2214 | + if (lll->rx_hold_req == lll->rx_hold_ack) { |
| 2215 | + lll->rx_hold_req++; |
| 2216 | + } |
| 2217 | + |
| 2218 | +#else /* !CONFIG_BT_CTLR_RX_ENQUEUE_HOLD */ |
| 2219 | + ARG_UNUSED(conn); |
| 2220 | + |
| 2221 | + ll_rx_put(link, rx); |
| 2222 | +#endif /* !CONFIG_BT_CTLR_RX_ENQUEUE_HOLD */ |
| 2223 | +} |
| 2224 | + |
| 2225 | +#if defined(CONFIG_BT_CTLR_RX_ENQUEUE_HOLD) |
| 2226 | +static bool rx_hold_is_done(struct ll_conn *conn) |
| 2227 | +{ |
| 2228 | + return ((conn->lll.rx_hold_req - |
| 2229 | + conn->lll.rx_hold_ack) & RX_HOLD_MASK) == RX_HOLD_ACK; |
| 2230 | +} |
| 2231 | + |
| 2232 | +static void rx_hold_flush(struct ll_conn *conn) |
| 2233 | +{ |
| 2234 | + struct node_rx_pdu *rx; |
| 2235 | + struct lll_conn *lll; |
| 2236 | + |
| 2237 | + rx = conn->llcp_rx_hold; |
| 2238 | + do { |
| 2239 | + struct node_rx_hdr *hdr; |
| 2240 | + |
| 2241 | + /* traverse to next rx node */ |
| 2242 | + hdr = &rx->hdr; |
| 2243 | + rx = hdr->link->mem; |
| 2244 | + |
| 2245 | + /* enqueue rx node towards Thread */ |
| 2246 | + ll_rx_put(hdr->link, hdr); |
| 2247 | + } while (rx); |
| 2248 | + |
| 2249 | + conn->llcp_rx_hold = NULL; |
| 2250 | + lll = &conn->lll; |
| 2251 | + lll->rx_hold_req = 0U; |
| 2252 | + lll->rx_hold_ack = 0U; |
| 2253 | +} |
| 2254 | +#endif /* CONFIG_BT_CTLR_RX_ENQUEUE_HOLD */ |
| 2255 | +#endif /* CONFIG_BT_LL_SW_LLCP_LEGACY */ |
| 2256 | + |
2157 | 2257 | #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL) |
2158 | 2258 | static void tx_demux_sched(struct ll_conn *conn) |
2159 | 2259 | { |
@@ -3121,14 +3221,21 @@ static inline int event_conn_upd_prep(struct ll_conn *conn, uint16_t lazy, |
3121 | 3221 | cu->interval = conn->llcp_cu.interval; |
3122 | 3222 | cu->latency = conn->llcp_cu.latency; |
3123 | 3223 | cu->timeout = conn->llcp_cu.timeout; |
| 3224 | + |
| 3225 | + /* hold node rx until the instant's anchor point sync */ |
| 3226 | + rx_hold_put(conn, rx->hdr.link, rx); |
| 3227 | + |
| 3228 | + if (!IS_ENABLED(CONFIG_BT_CTLR_RX_ENQUEUE_HOLD)) { |
| 3229 | + ll_rx_sched(); |
| 3230 | + } |
3124 | 3231 | } else { |
3125 | 3232 | /* Mark for buffer for release */ |
3126 | 3233 | rx->hdr.type = NODE_RX_TYPE_RELEASE; |
3127 | | - } |
3128 | 3234 |
|
3129 | | - /* enqueue rx node towards Thread */ |
3130 | | - ll_rx_put(rx->hdr.link, rx); |
3131 | | - ll_rx_sched(); |
| 3235 | + /* enqueue rx node towards Thread */ |
| 3236 | + ll_rx_put(rx->hdr.link, rx); |
| 3237 | + ll_rx_sched(); |
| 3238 | + } |
3132 | 3239 |
|
3133 | 3240 | #if defined(CONFIG_BT_CTLR_XTAL_ADVANCED) |
3134 | 3241 | /* restore to normal prepare */ |
@@ -4672,8 +4779,8 @@ static inline void event_phy_upd_ind_prep(struct ll_conn *conn, |
4672 | 4779 | upd->tx = lll->phy_tx; |
4673 | 4780 | upd->rx = lll->phy_rx; |
4674 | 4781 |
|
4675 | | - /* enqueue rx node towards Thread */ |
4676 | | - ll_rx_put(rx->hdr.link, rx); |
| 4782 | + /* hold node rx until the instant's anchor point sync */ |
| 4783 | + rx_hold_put(conn, rx->hdr.link, rx); |
4677 | 4784 |
|
4678 | 4785 | #if defined(CONFIG_BT_CTLR_DATA_LENGTH) |
4679 | 4786 | /* get a rx node for ULL->LL */ |
@@ -4716,11 +4823,13 @@ static inline void event_phy_upd_ind_prep(struct ll_conn *conn, |
4716 | 4823 | lr->max_rx_time = sys_cpu_to_le16(lll->max_rx_time); |
4717 | 4824 | lr->max_tx_time = sys_cpu_to_le16(lll->max_tx_time); |
4718 | 4825 |
|
4719 | | - /* enqueue rx node towards Thread */ |
4720 | | - ll_rx_put(rx->hdr.link, rx); |
| 4826 | + /* hold node rx until the instant's anchor point sync */ |
| 4827 | + rx_hold_put(conn, rx->hdr.link, rx); |
4721 | 4828 | #endif /* CONFIG_BT_CTLR_DATA_LENGTH */ |
4722 | 4829 |
|
4723 | | - ll_rx_sched(); |
| 4830 | + if (!IS_ENABLED(CONFIG_BT_CTLR_RX_ENQUEUE_HOLD)) { |
| 4831 | + ll_rx_sched(); |
| 4832 | + } |
4724 | 4833 | } |
4725 | 4834 | } |
4726 | 4835 | #endif /* CONFIG_BT_CTLR_PHY */ |
|
0 commit comments