|
| 1 | +/* |
| 2 | + * Copyright (c) 2024 Croxel Inc. |
| 3 | + * |
| 4 | + * SPDX-License-Identifier: Apache-2.0 |
| 5 | + */ |
| 6 | + |
| 7 | +#include <zephyr/ztest.h> |
| 8 | +#include <zephyr/kernel.h> |
| 9 | +#include <zephyr/rtio/rtio.h> |
| 10 | +#include <zephyr/rtio/work.h> |
| 11 | + |
| 12 | +/** Used to validate/control test execution flow */ |
| 13 | +K_SEM_DEFINE(work_handler_sem_1, 0, 1); |
| 14 | +K_SEM_DEFINE(work_handler_sem_2, 0, 1); |
| 15 | +K_SEM_DEFINE(work_handler_sem_3, 0, 1); |
| 16 | +static int work_handler_called; |
| 17 | + |
| 18 | +static void work_handler(struct rtio_iodev_sqe *iodev_sqe) |
| 19 | +{ |
| 20 | + struct rtio_sqe *sqe = &iodev_sqe->sqe; |
| 21 | + struct k_sem *sem = (struct k_sem *)sqe->userdata; |
| 22 | + |
| 23 | + work_handler_called++; |
| 24 | + printk("\t- %s() called!: %d\n", __func__, work_handler_called); |
| 25 | + |
| 26 | + k_sem_take(sem, K_FOREVER); |
| 27 | + |
| 28 | + rtio_executor_ok(iodev_sqe, 0); |
| 29 | +} |
| 30 | + |
| 31 | +static void dummy_submit(struct rtio_iodev_sqe *iodev_sqe) |
| 32 | +{ |
| 33 | + struct rtio_work_req *req = rtio_work_req_alloc(); |
| 34 | + |
| 35 | + rtio_work_req_submit(req, iodev_sqe, work_handler); |
| 36 | +} |
| 37 | + |
| 38 | +struct rtio_iodev_api r_iodev_test_api = { |
| 39 | + .submit = dummy_submit, |
| 40 | +}; |
| 41 | + |
| 42 | +RTIO_IODEV_DEFINE(dummy_iodev, &r_iodev_test_api, NULL); |
| 43 | +RTIO_IODEV_DEFINE(dummy_iodev_2, &r_iodev_test_api, NULL); |
| 44 | +RTIO_IODEV_DEFINE(dummy_iodev_3, &r_iodev_test_api, NULL); |
| 45 | + |
| 46 | +RTIO_DEFINE(r_test, 3, 3); |
| 47 | +RTIO_DEFINE(r_test_2, 3, 3); |
| 48 | +RTIO_DEFINE(r_test_3, 3, 3); |
| 49 | + |
| 50 | +static void before(void *unused) |
| 51 | +{ |
| 52 | + rtio_sqe_drop_all(&r_test); |
| 53 | + rtio_sqe_drop_all(&r_test_2); |
| 54 | + rtio_sqe_drop_all(&r_test_3); |
| 55 | + |
| 56 | + k_sem_init(&work_handler_sem_1, 0, 1); |
| 57 | + k_sem_init(&work_handler_sem_2, 0, 1); |
| 58 | + k_sem_init(&work_handler_sem_3, 0, 1); |
| 59 | + |
| 60 | + work_handler_called = 0; |
| 61 | +} |
| 62 | + |
| 63 | +static void after(void *unused) |
| 64 | +{ |
| 65 | +} |
| 66 | + |
| 67 | +ZTEST_SUITE(rtio_work, NULL, NULL, before, after, NULL); |
| 68 | + |
| 69 | +ZTEST(rtio_work, test_work_decouples_submission) |
| 70 | +{ |
| 71 | + struct rtio_sqe *sqe; |
| 72 | + struct rtio_cqe *cqe; |
| 73 | + |
| 74 | + sqe = rtio_sqe_acquire(&r_test); |
| 75 | + rtio_sqe_prep_nop(sqe, &dummy_iodev, &work_handler_sem_1); |
| 76 | + sqe->prio = RTIO_PRIO_NORM; |
| 77 | + |
| 78 | + zassert_equal(0, work_handler_called); |
| 79 | + zassert_equal(0, rtio_work_req_used_count_get()); |
| 80 | + |
| 81 | + zassert_ok(rtio_submit(&r_test, 0)); |
| 82 | + |
| 83 | + zassert_equal(1, work_handler_called); |
| 84 | + zassert_equal(1, rtio_work_req_used_count_get()); |
| 85 | + |
| 86 | + k_sem_give(&work_handler_sem_1); |
| 87 | + zassert_equal(0, rtio_work_req_used_count_get()); |
| 88 | + |
| 89 | + /** Clean-up */ |
| 90 | + cqe = rtio_cqe_consume_block(&r_test); |
| 91 | + rtio_cqe_release(&r_test, cqe); |
| 92 | +} |
| 93 | + |
| 94 | +ZTEST(rtio_work, test_work_supports_batching_submissions) |
| 95 | +{ |
| 96 | + struct rtio_sqe *sqe_a; |
| 97 | + struct rtio_sqe *sqe_b; |
| 98 | + struct rtio_sqe *sqe_c; |
| 99 | + struct rtio_cqe *cqe; |
| 100 | + |
| 101 | + sqe_a = rtio_sqe_acquire(&r_test); |
| 102 | + rtio_sqe_prep_nop(sqe_a, &dummy_iodev, &work_handler_sem_1); |
| 103 | + sqe_a->prio = RTIO_PRIO_NORM; |
| 104 | + |
| 105 | + sqe_b = rtio_sqe_acquire(&r_test); |
| 106 | + rtio_sqe_prep_nop(sqe_b, &dummy_iodev, &work_handler_sem_2); |
| 107 | + sqe_b->prio = RTIO_PRIO_NORM; |
| 108 | + |
| 109 | + sqe_c = rtio_sqe_acquire(&r_test); |
| 110 | + rtio_sqe_prep_nop(sqe_c, &dummy_iodev, &work_handler_sem_3); |
| 111 | + sqe_c->prio = RTIO_PRIO_NORM; |
| 112 | + |
| 113 | + zassert_ok(rtio_submit(&r_test, 0)); |
| 114 | + |
| 115 | + k_sem_give(&work_handler_sem_1); |
| 116 | + k_sem_give(&work_handler_sem_2); |
| 117 | + k_sem_give(&work_handler_sem_3); |
| 118 | + |
| 119 | + zassert_equal(3, work_handler_called); |
| 120 | + zassert_equal(0, rtio_work_req_used_count_get()); |
| 121 | + |
| 122 | + /** Clean-up */ |
| 123 | + cqe = rtio_cqe_consume_block(&r_test); |
| 124 | + rtio_cqe_release(&r_test, cqe); |
| 125 | + cqe = rtio_cqe_consume_block(&r_test); |
| 126 | + rtio_cqe_release(&r_test, cqe); |
| 127 | + cqe = rtio_cqe_consume_block(&r_test); |
| 128 | + rtio_cqe_release(&r_test, cqe); |
| 129 | +} |
| 130 | + |
| 131 | +ZTEST(rtio_work, test_work_supports_preempting_on_higher_prio_submissions) |
| 132 | +{ |
| 133 | + struct rtio_sqe *sqe_a; |
| 134 | + struct rtio_sqe *sqe_b; |
| 135 | + struct rtio_sqe *sqe_c; |
| 136 | + struct rtio_cqe *cqe; |
| 137 | + |
| 138 | + sqe_a = rtio_sqe_acquire(&r_test); |
| 139 | + rtio_sqe_prep_nop(sqe_a, &dummy_iodev, &work_handler_sem_1); |
| 140 | + sqe_a->prio = RTIO_PRIO_LOW; |
| 141 | + |
| 142 | + sqe_b = rtio_sqe_acquire(&r_test_2); |
| 143 | + rtio_sqe_prep_nop(sqe_b, &dummy_iodev_2, &work_handler_sem_2); |
| 144 | + sqe_b->prio = RTIO_PRIO_NORM; |
| 145 | + |
| 146 | + sqe_c = rtio_sqe_acquire(&r_test_3); |
| 147 | + rtio_sqe_prep_nop(sqe_c, &dummy_iodev_3, &work_handler_sem_3); |
| 148 | + sqe_c->prio = RTIO_PRIO_HIGH; |
| 149 | + |
| 150 | + zassert_ok(rtio_submit(&r_test, 0)); |
| 151 | + zassert_ok(rtio_submit(&r_test_2, 0)); |
| 152 | + zassert_ok(rtio_submit(&r_test_3, 0)); |
| 153 | + |
| 154 | + zassert_equal(3, work_handler_called); |
| 155 | + zassert_equal(3, rtio_work_req_used_count_get()); |
| 156 | + |
| 157 | + k_sem_give(&work_handler_sem_1); |
| 158 | + k_sem_give(&work_handler_sem_2); |
| 159 | + k_sem_give(&work_handler_sem_3); |
| 160 | + |
| 161 | + zassert_equal(3, work_handler_called); |
| 162 | + zassert_equal(0, rtio_work_req_used_count_get()); |
| 163 | + |
| 164 | + /** Clean-up */ |
| 165 | + cqe = rtio_cqe_consume_block(&r_test); |
| 166 | + rtio_cqe_release(&r_test, cqe); |
| 167 | + cqe = rtio_cqe_consume_block(&r_test_2); |
| 168 | + rtio_cqe_release(&r_test_2, cqe); |
| 169 | + cqe = rtio_cqe_consume_block(&r_test_3); |
| 170 | + rtio_cqe_release(&r_test_3, cqe); |
| 171 | +} |
| 172 | + |
| 173 | +ZTEST(rtio_work, test_used_count_keeps_track_of_alloc_items) |
| 174 | +{ |
| 175 | + struct rtio_work_req *req_a = NULL; |
| 176 | + struct rtio_work_req *req_b = NULL; |
| 177 | + struct rtio_work_req *req_c = NULL; |
| 178 | + struct rtio_work_req *req_d = NULL; |
| 179 | + struct rtio_work_req *req_e = NULL; |
| 180 | + |
| 181 | + zassert_equal(0, rtio_work_req_used_count_get()); |
| 182 | + |
| 183 | + /** We expect valid items and the count kept track */ |
| 184 | + req_a = rtio_work_req_alloc(); |
| 185 | + zassert_not_null(req_a); |
| 186 | + zassert_equal(1, rtio_work_req_used_count_get()); |
| 187 | + |
| 188 | + req_b = rtio_work_req_alloc(); |
| 189 | + zassert_not_null(req_b); |
| 190 | + zassert_equal(2, rtio_work_req_used_count_get()); |
| 191 | + |
| 192 | + req_c = rtio_work_req_alloc(); |
| 193 | + zassert_not_null(req_c); |
| 194 | + zassert_equal(3, rtio_work_req_used_count_get()); |
| 195 | + |
| 196 | + req_d = rtio_work_req_alloc(); |
| 197 | + zassert_not_null(req_d); |
| 198 | + zassert_equal(4, rtio_work_req_used_count_get()); |
| 199 | + |
| 200 | + /** This time should not have been able to allocate. */ |
| 201 | + req_e = rtio_work_req_alloc(); |
| 202 | + zassert_is_null(req_e); |
| 203 | + zassert_equal(4, rtio_work_req_used_count_get()); |
| 204 | + |
| 205 | + /** Flush requests */ |
| 206 | + rtio_work_req_submit(req_a, NULL, NULL); |
| 207 | + zassert_equal(3, rtio_work_req_used_count_get()); |
| 208 | + |
| 209 | + rtio_work_req_submit(req_b, NULL, NULL); |
| 210 | + zassert_equal(2, rtio_work_req_used_count_get()); |
| 211 | + |
| 212 | + rtio_work_req_submit(req_c, NULL, NULL); |
| 213 | + zassert_equal(1, rtio_work_req_used_count_get()); |
| 214 | + |
| 215 | + rtio_work_req_submit(req_d, NULL, NULL); |
| 216 | + zassert_equal(0, rtio_work_req_used_count_get()); |
| 217 | +} |
0 commit comments