11/*
22 * Copyright (c) 2024 Croxel Inc.
3+ * Copyright (c) 2025 Croxel Inc.
34 *
45 * SPDX-License-Identifier: Apache-2.0
56 */
67
78#include <zephyr/rtio/work.h>
89#include <zephyr/kernel.h>
910
10- #define RTIO_WORKQ_PRIO_MED CONFIG_RTIO_WORKQ_PRIO_MED
11- #define RTIO_WORKQ_PRIO_HIGH RTIO_WORKQ_PRIO_MED - 1
12- #define RTIO_WORKQ_PRIO_LOW RTIO_WORKQ_PRIO_MED + 1
13-
1411K_MEM_SLAB_DEFINE_STATIC (rtio_work_items_slab ,
1512 sizeof (struct rtio_work_req ),
1613 CONFIG_RTIO_WORKQ_POOL_ITEMS ,
1714 4 );
18-
19- static void rtio_work_req_done_handler (struct k_p4wq_work * work )
20- {
21- struct rtio_work_req * req = CONTAINER_OF (work ,
22- struct rtio_work_req ,
23- work );
24- k_mem_slab_free (& rtio_work_items_slab , req );
25- }
26-
27- K_P4WQ_DEFINE_WITH_DONE_HANDLER (rtio_workq ,
28- CONFIG_RTIO_WORKQ_THREADS_POOL ,
29- CONFIG_RTIO_WORKQ_STACK_SIZE ,
30- rtio_work_req_done_handler );
31-
32- static void rtio_work_handler (struct k_p4wq_work * work )
33- {
34- struct rtio_work_req * req = CONTAINER_OF (work ,
35- struct rtio_work_req ,
36- work );
37- struct rtio_iodev_sqe * iodev_sqe = req -> iodev_sqe ;
38-
39- req -> handler (iodev_sqe );
40- }
15+ static K_THREAD_STACK_ARRAY_DEFINE (rtio_workq_threads_stack ,
16+ CONFIG_RTIO_WORKQ_THREADS_POOL ,
17+ CONFIG_RTIO_WORKQ_THREADS_POOL_STACK_SIZE ) ;
18+ static struct k_thread rtio_work_threads [CONFIG_RTIO_WORKQ_THREADS_POOL ];
19+ static K_QUEUE_DEFINE (rtio_workq );
4120
4221struct rtio_work_req * rtio_work_req_alloc (void )
4322{
@@ -49,12 +28,6 @@ struct rtio_work_req *rtio_work_req_alloc(void)
4928 return NULL ;
5029 }
5130
52- /** Initialize work item before using it as it comes
53- * from a Memory slab (no-init region).
54- */
55- req -> work .thread = NULL ;
56- (void )k_sem_init (& req -> work .done_sem , 1 , 1 );
57-
5831 return req ;
5932}
6033
@@ -71,31 +44,52 @@ void rtio_work_req_submit(struct rtio_work_req *req,
7144 return ;
7245 }
7346
74- struct k_p4wq_work * work = & req -> work ;
75- struct rtio_sqe * sqe = & iodev_sqe -> sqe ;
76-
77- /** Link the relevant info so that we can get it on the k_p4wq_work work item.
78- */
7947 req -> iodev_sqe = iodev_sqe ;
8048 req -> handler = handler ;
8149
82- /** Set the required information to handle the action */
83- work -> handler = rtio_work_handler ;
84- work -> deadline = 0 ;
85-
86- if (sqe -> prio == RTIO_PRIO_LOW ) {
87- work -> priority = RTIO_WORKQ_PRIO_LOW ;
88- } else if (sqe -> prio == RTIO_PRIO_HIGH ) {
89- work -> priority = RTIO_WORKQ_PRIO_HIGH ;
90- } else {
91- work -> priority = RTIO_WORKQ_PRIO_MED ;
92- }
93-
94- /** Decoupling action: Let the P4WQ execute the action. */
95- k_p4wq_submit (& rtio_workq , work );
50+ /** For now we're simply treating this as a FIFO queue. It may be
51+ * desirable to expand this to handle queue ordering based on RTIO
52+ * SQE priority.
53+ */
54+ k_queue_append (& rtio_workq , req );
9655}
9756
9857uint32_t rtio_work_req_used_count_get (void )
9958{
10059 return k_mem_slab_num_used_get (& rtio_work_items_slab );
10160}
61+
62+ static void rtio_workq_thread_fn (void * arg1 , void * arg2 , void * arg3 )
63+ {
64+ ARG_UNUSED (arg1 );
65+ ARG_UNUSED (arg2 );
66+ ARG_UNUSED (arg3 );
67+
68+ while (true) {
69+ struct rtio_work_req * req = k_queue_get (& rtio_workq , K_FOREVER );
70+
71+ if (req != NULL ) {
72+ req -> handler (req -> iodev_sqe );
73+
74+ k_mem_slab_free (& rtio_work_items_slab , req );
75+ }
76+ }
77+ }
78+
79+ static int static_init (void )
80+ {
81+ for (size_t i = 0 ; i < ARRAY_SIZE (rtio_work_threads ) ; i ++ ) {
82+ k_thread_create (& rtio_work_threads [i ],
83+ rtio_workq_threads_stack [i ],
84+ CONFIG_RTIO_WORKQ_THREADS_POOL_STACK_SIZE ,
85+ rtio_workq_thread_fn ,
86+ NULL , NULL , NULL ,
87+ CONFIG_RTIO_WORKQ_THREADS_POOL_PRIO ,
88+ 0 ,
89+ K_NO_WAIT );
90+ }
91+
92+ return 0 ;
93+ }
94+
95+ SYS_INIT (static_init , POST_KERNEL , 1 );
0 commit comments