1313#define RTIO_IODEV_TEST_H_
1414
1515struct rtio_iodev_test_data {
16- /**
17- * k_timer for an asynchronous task
18- */
16+ /* k_timer for an asynchronous task */
1917 struct k_timer timer ;
2018
21- /**
22- * Currently executing sqe
23- */
24- atomic_ptr_t iodev_sqe ;
19+ /* Currently executing sqe */
20+ struct rtio_iodev_sqe * iodev_sqe ;
21+
22+ /* Count of submit calls */
23+ atomic_t submit_count ;
24+
25+ /* Lock around kicking off next timer */
26+ struct k_spinlock lock ;
2527};
2628
27- static void rtio_iodev_timer_fn (struct k_timer * tm )
29+ static void rtio_iodev_test_next (struct rtio_iodev * iodev )
2830{
29- struct rtio_iodev_test_data * data = CONTAINER_OF (tm , struct rtio_iodev_test_data , timer );
30- struct rtio_iodev_sqe * iodev_sqe = atomic_ptr_get (& data -> iodev_sqe );
31- struct rtio_mpsc_node * next =
32- rtio_mpsc_pop ((struct rtio_mpsc * )& iodev_sqe -> sqe -> iodev -> iodev_sq );
31+ struct rtio_iodev_test_data * data = iodev -> data ;
32+
33+ /* The next section must be serialized to ensure single consumer semantics */
34+ k_spinlock_key_t key = k_spin_lock (& data -> lock );
35+
36+ if (data -> iodev_sqe != NULL ) {
37+ goto out ;
38+ }
39+
40+ struct rtio_mpsc_node * next = rtio_mpsc_pop (& iodev -> iodev_sq );
3341
3442 if (next != NULL ) {
3543 struct rtio_iodev_sqe * next_sqe = CONTAINER_OF (next , struct rtio_iodev_sqe , q );
3644
37- atomic_ptr_set (& data -> iodev_sqe , next_sqe );
38- TC_PRINT ("starting timer again from queued iodev_sqe %p!\n" , next );
45+ data -> iodev_sqe = next_sqe ;
3946 k_timer_start (& data -> timer , K_MSEC (10 ), K_NO_WAIT );
40- } else {
41- atomic_ptr_set (& data -> iodev_sqe , NULL );
4247 }
4348
44- /* Complete the request with Ok and a result */
45- TC_PRINT ("sqe ok callback\n" );
49+ out :
50+ k_spin_unlock (& data -> lock , key );
51+ }
4652
53+ static void rtio_iodev_timer_fn (struct k_timer * tm )
54+ {
55+ struct rtio_iodev_test_data * data = CONTAINER_OF (tm , struct rtio_iodev_test_data , timer );
56+ struct rtio_iodev_sqe * iodev_sqe = data -> iodev_sqe ;
57+ struct rtio_iodev * iodev = (struct rtio_iodev * )iodev_sqe -> sqe -> iodev ;
58+
59+ /* Complete the request with Ok and a result, clear the current task */
4760 rtio_iodev_sqe_ok (iodev_sqe , 0 );
61+ data -> iodev_sqe = NULL ;
62+
63+ rtio_iodev_test_next (iodev );
4864}
4965
5066static void rtio_iodev_test_submit (struct rtio_iodev_sqe * iodev_sqe )
5167{
5268 struct rtio_iodev * iodev = (struct rtio_iodev * )iodev_sqe -> sqe -> iodev ;
5369 struct rtio_iodev_test_data * data = iodev -> data ;
5470
55- /*
56- * If a task is already going queue up the next request in the mpsc.
57- */
58- if (!atomic_ptr_cas (& data -> iodev_sqe , NULL , iodev_sqe )) {
59- TC_PRINT ("adding queued sqe\n" );
60- rtio_mpsc_push (& iodev -> iodev_sq , & iodev_sqe -> q );
61- }
71+ atomic_inc (& data -> submit_count );
72+
73+ /* The only safe operation is enqueuing */
74+ rtio_mpsc_push (& iodev -> iodev_sq , & iodev_sqe -> q );
6275
63- /*
64- * Simulate an async hardware request with a one shot timer
65- *
66- * In reality the time to complete might have some significant variance
67- * but this is proof enough of a working API flow.
68- */
69- TC_PRINT ("starting one shot\n" );
70- k_timer_start (& data -> timer , K_MSEC (10 ), K_NO_WAIT );
76+ rtio_iodev_test_next (iodev );
7177}
7278
7379const struct rtio_iodev_api rtio_iodev_test_api = {
@@ -79,7 +85,7 @@ void rtio_iodev_test_init(struct rtio_iodev *test)
7985 struct rtio_iodev_test_data * data = test -> data ;
8086
8187 rtio_mpsc_init (& test -> iodev_sq );
82- atomic_ptr_set ( & data -> iodev_sqe , NULL ) ;
88+ data -> iodev_sqe = NULL ;
8389 k_timer_init (& data -> timer , rtio_iodev_timer_fn , NULL );
8490}
8591
0 commit comments