diff --git a/CODEOWNERS b/CODEOWNERS index aef6aafc43..b644a8b1b3 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -43,6 +43,7 @@ /lib/ble_qwr/ @nrfconnect/ncs-bm /lib/ble_racp/ @nrfconnect/ncs-bm /lib/bm_buttons/ @nrfconnect/ncs-bm +/lib/bm_queue/ @nrfconnect/ncs-bm /lib/bm_timer/ @nrfconnect/ncs-bm /lib/boot_banner/ @nrfconnect/ncs-bm /lib/event_scheduler/ @nrfconnect/ncs-bm diff --git a/include/bm_queue.h b/include/bm_queue.h new file mode 100644 index 0000000000..b56e4ca2f5 --- /dev/null +++ b/include/bm_queue.h @@ -0,0 +1,224 @@ +/* + * Copyright (c) 2016, Wind River Systems, Inc. + * Copyright (c) 2025, Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include + +/** + * @brief Queue. + */ +struct bm_queue { + sys_sflist_t data_q; +}; + +/** + * @brief Initialize a queue. + * + * This routine initializes a queue, prior to its first use. + * + * @param queue Queue. + */ +void bm_queue_init(struct bm_queue *queue); + +/** + * @brief Append an element to the end of a queue. + * + * This routine appends a data item to @a queue. A queue data item must be + * aligned on a word boundary, and the first word of the item is reserved + * for internal use. The data is not copied. + * + * @funcprops \isr_ok + * + * @param queue Queue. + * @param data Data item. + */ +void bm_queue_append(struct bm_queue *queue, void *data); + +/** + * @brief Allocate an element and append it to the queue. + * + * This routine appends a data item to @a queue. There is an implicit memory + * allocation on the system heap to create an additional temporary bookkeeping data structure, + * which is automatically freed when the item is removed. The data itself is not copied. + * + * @funcprops \isr_ok + * + * @param queue Queue. + * @param data Data item. + * + * @retval 0 on success + * @retval -ENOMEM if there isn't sufficient RAM in the system heap + */ +int32_t bm_queue_alloc_append(struct bm_queue *queue, void *data); + +/** + * @brief Prepend an element to the queue. + * + * This routine prepends a data item to @a queue. A queue data item must be + * aligned on a word boundary, and the first word of the item is reserved + * for internal use. The data is not copied. + * + * @funcprops \isr_ok + * + * @param queue Queue. + * @param data Data item. + */ +void bm_queue_prepend(struct bm_queue *queue, void *data); + +/** + * @brief Prepend an element to a queue. + * + * This routine prepends a data item to @a queue. There is an implicit memory + * allocation to create an additional temporary bookkeeping data structure from + * the system heap, which is automatically freed when the item is removed. + * The data itself is not copied. + * + * @funcprops \isr_ok + * + * @param queue Queue. + * @param data Data item. + * + * @retval 0 on success + * @retval -ENOMEM if there isn't sufficient RAM in the system heap. + */ +int32_t bm_queue_alloc_prepend(struct bm_queue *queue, void *data); + +/** + * @brief Insert an element at a given position in the queue. + * + * This routine inserts a data item to @a queue after previous item. A queue + * data item must be aligned on a word boundary, and the first word of + * the item is reserved for internal use. + * + * @funcprops \isr_ok + * + * @param queue Queue. + * @param prev Previous element. + * @param data Data item. + */ +void bm_queue_insert(struct bm_queue *queue, void *prev, void *data); + +/** + * @brief Atomically append a list of elements to a queue. + * + * This routine adds a list of data items to @a queue in one operation. + * The data items must be in a singly-linked list, with the first word + * in each data item pointing to the next data item; the list must be + * NULL-terminated. + * + * @funcprops \isr_ok + * + * @param queue Queue. + * @param head Pointer to first node in singly-linked list. + * @param tail Pointer to last node in singly-linked list. + * + * @retval 0 on success + * @retval -EINVAL on invalid supplied data + */ +int bm_queue_append_list(struct bm_queue *queue, void *head, void *tail); + +/** + * @brief Atomically add a list of elements to a queue. + * + * This routine adds a list of data items to @a queue in one operation. + * The data items must be in a singly-linked list implemented using a + * sys_slist_t object. Upon completion, the original list is empty. + * + * @funcprops \isr_ok + * + * @param queue Queue. + * @param list Pointer to sys_slist_t object. + * + * @retval 0 on success + * @retval -EINVAL on invalid data + */ +int bm_queue_merge_slist(struct bm_queue *queue, sys_slist_t *list); + +/** + * @brief Get an element from a queue. + * + * This routine removes first data item from @a queue. The first word of the + * data item is reserved for internal use. + * + * @funcprops \isr_ok + * + * @param queue Queue. + * + * @return Address of the data item if successful; NULL otherwise. + */ +void *bm_queue_get(struct bm_queue *queue); + +/** + * @brief Remove an element from a queue. + * + * This routine removes data item from @a queue. The first word of the + * data item is reserved for internal use. Removing elements from bm_queue + * rely on sys_slist_find_and_remove which is not a constant time operation. + * + * @funcprops \isr_ok + * + * @param queue Queue. + * @param data Data item. + * + * @return true if data item was removed + */ +bool bm_queue_remove(struct bm_queue *queue, void *data); + +/** + * @brief Append an element to a queue only if it's not present already. + * + * This routine appends data item to @a queue. The first word of the data + * item is reserved for internal use. Appending elements to bm_queue + * relies on sys_slist_is_node_in_list which is not a constant time operation. + * + * @funcprops \isr_ok + * + * @param queue Queue. + * @param data Data item. + * + * @return true if data item was added, false if not + */ +bool bm_queue_unique_append(struct bm_queue *queue, void *data); + +/** + * @brief Query a queue to see if it has data available. + * + * @funcprops \isr_ok + * + * @param queue Queue. + * + * @return Non-zero if the queue is empty. + * @return 0 if data is available. + */ +static inline bool bm_queue_is_empty(struct bm_queue *queue) +{ + return sys_sflist_is_empty(&queue->data_q); +} + +/** + * @brief Peek element at the head of queue. + * + * Return element from the head of queue without removing it. + * + * @param queue Queue. + * + * @return Head element, or NULL if queue is empty. + */ +void *bm_queue_peek_head(struct bm_queue *queue); + +/** + * @brief Peek element at the tail of queue. + * + * Return element from the tail of queue without removing it. + * + * @param queue Queue. + * + * @return Tail element, or NULL if queue is empty. + */ +void *bm_queue_peek_tail(struct bm_queue *queue); diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt index 86603fc141..e0294580a8 100644 --- a/lib/CMakeLists.txt +++ b/lib/CMakeLists.txt @@ -11,6 +11,7 @@ add_subdirectory_ifdef(CONFIG_BLE_RACP ble_racp) add_subdirectory_ifdef(CONFIG_EVENT_SCHEDULER event_scheduler) add_subdirectory_ifdef(CONFIG_BM_BUTTONS bm_buttons) add_subdirectory_ifdef(CONFIG_BM_TIMER bm_timer) +add_subdirectory_ifdef(CONFIG_BM_QUEUE bm_queue) add_subdirectory_ifdef(CONFIG_BLE_QWR ble_qwr) add_subdirectory_ifdef(CONFIG_SENSORSIM sensorsim) add_subdirectory_ifdef(CONFIG_NCS_BARE_METAL_BOOT_BANNER boot_banner) diff --git a/lib/Kconfig b/lib/Kconfig index f077394c80..d1318b628f 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -12,6 +12,7 @@ rsource "ble_racp/Kconfig" rsource "event_scheduler/Kconfig" rsource "bm_buttons/Kconfig" rsource "bm_timer/Kconfig" +rsource "bm_queue/Kconfig" rsource "ble_qwr/Kconfig" rsource "sensorsim/Kconfig" rsource "boot_banner/Kconfig" diff --git a/lib/bm_queue/CMakeLists.txt b/lib/bm_queue/CMakeLists.txt new file mode 100644 index 0000000000..a41a52d7fe --- /dev/null +++ b/lib/bm_queue/CMakeLists.txt @@ -0,0 +1,7 @@ +# +# Copyright (c) 2025 Nordic Semiconductor ASA +# +# SPDX-License-Identifier: LicenseRef-Nordic-5-Clause +# +zephyr_library() +zephyr_library_sources(bm_queue.c) diff --git a/lib/bm_queue/Kconfig b/lib/bm_queue/Kconfig new file mode 100644 index 0000000000..81067ab23f --- /dev/null +++ b/lib/bm_queue/Kconfig @@ -0,0 +1,24 @@ +# +# Copyright (c) 2025 Nordic Semiconductor +# +# SPDX-License-Identifier: LicenseRef-Nordic-5-Clause +# +menuconfig BM_QUEUE + bool "Queue library" + help + A simple, dynamic queue using singly linked lists. + +if BM_QUEUE + +config BM_QUEUE_K_QUEUE_COMPAT + bool "Export k_queue compatible API" + depends on !MULTITHREADING + help + Export a k_queue compatible API for compatibility with Zephyr subsystems. + +module=BM_QUEUE +module-dep=LOG +module-str=Queue library +source "${ZEPHYR_BASE}/subsys/logging/Kconfig.template.log_config" + +endif diff --git a/lib/bm_queue/bm_queue.c b/lib/bm_queue/bm_queue.c new file mode 100644 index 0000000000..da1e8df7e1 --- /dev/null +++ b/lib/bm_queue/bm_queue.c @@ -0,0 +1,330 @@ +/* + * Copyright (c) 2010-2016 Wind River Systems, Inc. + * Copyright (c) 2025, Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/** + * @brief Dynamic size queue based on singly linked list. + */ + +#include +#include +#include +#include +#include +#if CONFIG_CPU_ARM_M +#include /* PRIMASK */ +#endif + +/* TODO: perhaps we should use sys_heap instead */ +extern void *k_malloc(size_t n); +extern void k_free(void *p); + +/* SoftDevice IRQs are defined as Zero latency IRQs. + * We can't lock those with Zephyr's irq_lock(). + */ +#if CONFIG_CPU_ARM_M +static int primask; +#endif + +static void bm_queue_critical_region_enter(void) +{ +#if CONFIG_CPU_ARM_M + /* Save PRIMASK */ + primask = __get_PRIMASK(); + __disable_irq(); +#endif +} + +static void bm_queue_critical_region_exit(void) +{ +#if CONFIG_CPU_ARM_M + /* Restore PRIMASK */ + __set_PRIMASK(primask); +#endif +} + +struct alloc_node { + sys_sfnode_t node; + void *data; +}; + +static void *z_queue_node_peek(sys_sfnode_t *node, bool needs_free) +{ + void *ret; + + if ((node != NULL) && (sys_sfnode_flags_get(node) != (uint8_t)0)) { + /* If the flag is set, then the enqueue operation for this item + * did a behind-the scenes memory allocation of an alloc_node + * struct, which is what got put in the queue. Free it and pass + * back the data pointer. + */ + struct alloc_node *anode; + + anode = CONTAINER_OF(node, struct alloc_node, node); + ret = anode->data; + if (needs_free) { + k_free(anode); + } + } else { + /* Data was directly placed in the queue, the first word + * reserved for the linked list. User mode isn't allowed to + * do this, although it can get data sent this way. + */ + ret = (void *)node; + } + + return ret; +} + +static int32_t queue_insert(struct bm_queue *queue, void *prev, void *data, + bool alloc, bool is_append) +{ + bm_queue_critical_region_enter(); + + if (is_append) { + prev = sys_sflist_peek_tail(&queue->data_q); + } + + /* Only need to actually allocate if no threads are pending */ + if (alloc) { + struct alloc_node *anode; + + anode = k_malloc(sizeof(*anode)); + if (anode == NULL) { + bm_queue_critical_region_exit(); + return -ENOMEM; + } + anode->data = data; + sys_sfnode_init(&anode->node, 0x1); + data = anode; + } else { + sys_sfnode_init(data, 0x0); + } + + sys_sflist_insert(&queue->data_q, prev, data); + + bm_queue_critical_region_exit(); + + return 0; +} + + +void bm_queue_init(struct bm_queue *queue) +{ + sys_sflist_init(&queue->data_q); +} + +void bm_queue_insert(struct bm_queue *queue, void *prev, void *data) +{ + (void)queue_insert(queue, prev, data, false, false); +} + +void bm_queue_append(struct bm_queue *queue, void *data) +{ + (void)queue_insert(queue, NULL, data, false, true); +} + +void bm_queue_prepend(struct bm_queue *queue, void *data) +{ + (void)queue_insert(queue, NULL, data, false, false); +} + +int32_t bm_queue_alloc_append(struct bm_queue *queue, void *data) +{ + return queue_insert(queue, NULL, data, true, true); +} + +int32_t bm_queue_alloc_prepend(struct bm_queue *queue, void *data) +{ + return queue_insert(queue, NULL, data, true, false); +} + +int bm_queue_append_list(struct bm_queue *queue, void *head, void *tail) +{ + /* invalid head or tail of list */ + if ((head == NULL) || (tail == NULL)) { + return -EINVAL; + } + + if (head != NULL) { + bm_queue_critical_region_enter(); + sys_sflist_append_list(&queue->data_q, head, tail); + bm_queue_critical_region_exit(); + } + + return 0; +} + +int bm_queue_merge_slist(struct bm_queue *queue, sys_slist_t *list) +{ + int ret; + + /* list must not be empty */ + if (sys_slist_is_empty(list)) { + return -EINVAL; + } + + /* + * note: this works as long as: + * - the slist implementation keeps the next pointer as the first + * field of the node object type + * - list->tail->next = NULL. + * - sflist implementation only differs from slist by stuffing + * flag bytes in the lower order bits of the data pointer + * - source list is really an slist and not an sflist with flags set + */ + ret = bm_queue_append_list(queue, list->head, list->tail); + if (ret != 0) { + return ret; + } + sys_slist_init(list); + + return 0; +} + +void *bm_queue_get(struct bm_queue *queue) +{ + void *data = NULL; + + bm_queue_critical_region_enter(); + if (likely(!sys_sflist_is_empty(&queue->data_q))) { + sys_sfnode_t *node; + + node = sys_sflist_get_not_empty(&queue->data_q); + data = z_queue_node_peek(node, true); + } + + bm_queue_critical_region_exit(); + return data; +} + +bool bm_queue_remove(struct bm_queue *queue, void *data) +{ + bool removed; + + bm_queue_critical_region_enter(); + removed = sys_sflist_find_and_remove(&queue->data_q, (sys_sfnode_t *)data); + bm_queue_critical_region_exit(); + + return removed; +} + +bool bm_queue_unique_append(struct bm_queue *queue, void *data) +{ + bool unique = true; + sys_sfnode_t *test; + + bm_queue_critical_region_enter(); + SYS_SFLIST_FOR_EACH_NODE(&queue->data_q, test) { + if (test == (sys_sfnode_t *) data) { + unique = false; + break; + } + } + + if (unique) { + /* This is a nested critical region, but that's fine + * as long as PRIMASK is restored to the previous value. + */ + bm_queue_append(queue, data); + } + + bm_queue_critical_region_exit(); + return unique; +} + +void *bm_queue_peek_head(struct bm_queue *queue) +{ + return z_queue_node_peek(sys_sflist_peek_head(&queue->data_q), false); +} + +void *bm_queue_peek_tail(struct bm_queue *queue) +{ + return z_queue_node_peek(sys_sflist_peek_tail(&queue->data_q), false); +} + + +/* Export k_queue -compatible API */ +#if CONFIG_BM_QUEUE_K_QUEUE_COMPAT + +#include +#include +#include + +BUILD_ASSERT( + offsetof(struct bm_queue, data_q) == offsetof(struct k_queue, data_q), + "Incompatible queues"); + + +inline void z_impl_k_queue_init(struct k_queue *queue) +{ + bm_queue_init((struct bm_queue *)queue); +} + +inline void k_queue_insert(struct k_queue *queue, void *prev, void *data) +{ + bm_queue_insert((struct bm_queue *)queue, prev, data); +} + +inline void k_queue_append(struct k_queue *queue, void *data) +{ + bm_queue_append((struct bm_queue *)queue, data); +} + +inline void k_queue_prepend(struct k_queue *queue, void *data) +{ + bm_queue_prepend((struct bm_queue *)queue, data); +} + +inline int32_t z_impl_k_queue_alloc_append(struct k_queue *queue, void *data) +{ + return bm_queue_alloc_append((struct bm_queue *)queue, data); +} + +inline int32_t z_impl_k_queue_alloc_prepend(struct k_queue *queue, void *data) +{ + return bm_queue_alloc_prepend((struct bm_queue *)queue, data); +} + +inline int k_queue_append_list(struct k_queue *queue, void *head, void *tail) +{ + return bm_queue_append_list((struct bm_queue *)queue, head, tail); +} + +inline int k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list) +{ + return bm_queue_merge_slist((struct bm_queue *)queue, list); +} + +inline void *z_impl_k_queue_get(struct k_queue *queue, k_timeout_t timeout) +{ + ARG_UNUSED(timeout); + __ASSERT_NO_MSG(K_TIMEOUT_EQ(timeout, K_NO_WAIT)); + + return bm_queue_get((struct bm_queue *)queue); +} + +inline bool k_queue_remove(struct k_queue *queue, void *data) +{ + return bm_queue_remove((struct bm_queue *)queue, data); +} + +inline bool k_queue_unique_append(struct k_queue *queue, void *data) +{ + return bm_queue_unique_append((struct bm_queue *)queue, data); +} + +inline void *z_impl_k_queue_peek_head(struct k_queue *queue) +{ + return bm_queue_peek_head((struct bm_queue *)queue); +} + +inline void *z_impl_k_queue_peek_tail(struct k_queue *queue) +{ + return bm_queue_peek_tail((struct bm_queue *)queue); +} + +#endif diff --git a/samples/bluetooth/hello_softdevice/prj.conf b/samples/bluetooth/hello_softdevice/prj.conf index 5d40247713..afc3bd439d 100644 --- a/samples/bluetooth/hello_softdevice/prj.conf +++ b/samples/bluetooth/hello_softdevice/prj.conf @@ -3,3 +3,6 @@ CONFIG_LOG_BACKEND_BM_UARTE=y CONFIG_SOFTDEVICE=y CONFIG_NRF_SDH=y + +CONFIG_BM_QUEUE=y +CONFIG_BM_QUEUE_K_QUEUE_COMPAT=y diff --git a/samples/bluetooth/hello_softdevice/src/main.c b/samples/bluetooth/hello_softdevice/src/main.c index 67dd548d87..29d97452ad 100644 --- a/samples/bluetooth/hello_softdevice/src/main.c +++ b/samples/bluetooth/hello_softdevice/src/main.c @@ -4,6 +4,7 @@ * SPDX-License-Identifier: LicenseRef-Nordic-5-Clause */ +#include #include #include #include @@ -11,6 +12,12 @@ #include /* USEC_PER_SEC */ #include #include +#include + +struct item { + sys_slist_t reserved; + int foo; +}; static void on_ble_evt(const ble_evt_t *evt, void *ctx) { @@ -30,6 +37,9 @@ static void on_state_change(enum nrf_sdh_state_evt state, void *ctx) } NRF_SDH_STATE_EVT_OBSERVER(sdh_state, on_state_change, NULL, 0); +#include +#include + int main(void) { int err; @@ -71,5 +81,42 @@ int main(void) /* Empty. */ } + struct k_queue k; + struct item *element; + struct item i = { + .foo = 0xDEADBEEF, + }; + bool empty; + + k_queue_init(&k); + element = k_queue_peek_head(&k); + printk("Head is %p\n", element); + empty = k_queue_is_empty(&k); + if (empty) { + printk("Queue is empty\n"); + } else { + printk("Queue is not empty\n"); + } + + k_queue_append(&k, &i); + while (!k_queue_is_empty(&k)) { + element = k_queue_get(&k, K_NO_WAIT); + printk("Element is %x\n", element->foo); + } + +#if DONTCOMPILE + k_queue_prepend(&k, &i); + k_queue_alloc_append(&k, &i); + k_queue_alloc_prepend(&k, &i); + k_queue_append_list(&k, NULL, NULL); + k_queue_merge_slist(&k, NULL); + k_queue_get(&k, K_NO_WAIT); + k_queue_remove(&k, &i); + k_queue_unique_append(&k, &i); + k_queue_peek_head(&k); + k_queue_peek_tail(&k); + k_queue_is_empty(&k); +#endif + return 0; }