Skip to content

Commit 7b4644c

Browse files
ubiedafabiobaltieri
authored andcommitted
rtio: executor: split handling of one-shot and multi-shot completions
As their handling is not that similar and mixing them both affects code readability. This patch does not affect functionality and does not break any testsuite under rtio API. Signed-off-by: Luis Ubieda <[email protected]>
1 parent 5505182 commit 7b4644c

File tree

1 file changed

+62
-35
lines changed

1 file changed

+62
-35
lines changed

subsys/rtio/rtio_executor.c

Lines changed: 62 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -125,71 +125,98 @@ void rtio_executor_submit(struct rtio *r)
125125
/**
126126
* @brief Handle common logic when :c:macro:`RTIO_SQE_MULTISHOT` is set
127127
*
128-
* @param[in] r RTIO context
129-
* @param[in] curr Current IODev SQE that's being marked for finished.
130-
* @param[in] is_canceled Whether or not the SQE is canceled
128+
* @param[in] iodev_sqe IODEV SQE that's being marked as finished.
129+
* @param[in] result The result of the latest request iteration
130+
* @param[in] is_ok Whether or not the SQE's result was successful
131131
*/
132-
static inline void rtio_executor_handle_multishot(struct rtio *r, struct rtio_iodev_sqe *curr,
133-
bool is_canceled)
132+
static inline void rtio_executor_handle_multishot(struct rtio_iodev_sqe *iodev_sqe,
133+
int result, bool is_ok)
134134
{
135-
/* Reset the mempool if needed */
136-
if (curr->sqe.op == RTIO_OP_RX && FIELD_GET(RTIO_SQE_MEMPOOL_BUFFER, curr->sqe.flags)) {
137-
if (is_canceled) {
138-
/* Free the memory first since no CQE will be generated */
139-
LOG_DBG("Releasing memory @%p size=%u", (void *)curr->sqe.rx.buf,
140-
curr->sqe.rx.buf_len);
141-
rtio_release_buffer(r, curr->sqe.rx.buf, curr->sqe.rx.buf_len);
142-
}
135+
struct rtio *r = iodev_sqe->r;
136+
const bool is_canceled = FIELD_GET(RTIO_SQE_CANCELED, iodev_sqe->sqe.flags) == 1;
137+
const bool uses_mempool = FIELD_GET(RTIO_SQE_MEMPOOL_BUFFER, iodev_sqe->sqe.flags) == 1;
138+
const bool requires_response = FIELD_GET(RTIO_SQE_NO_RESPONSE, iodev_sqe->sqe.flags) == 0;
139+
uint32_t cqe_flags = rtio_cqe_compute_flags(iodev_sqe);
140+
void *userdata = iodev_sqe->sqe.userdata;
141+
142+
if (iodev_sqe->sqe.op == RTIO_OP_RX && uses_mempool) {
143143
/* Reset the buffer info so the next request can get a new one */
144-
curr->sqe.rx.buf = NULL;
145-
curr->sqe.rx.buf_len = 0;
144+
iodev_sqe->sqe.rx.buf = NULL;
145+
iodev_sqe->sqe.rx.buf_len = 0;
146146
}
147-
if (!is_canceled) {
147+
148+
if (is_canceled) {
149+
LOG_DBG("Releasing memory @%p size=%u", (void *)iodev_sqe->sqe.rx.buf,
150+
iodev_sqe->sqe.rx.buf_len);
151+
rtio_release_buffer(r, iodev_sqe->sqe.rx.buf, iodev_sqe->sqe.rx.buf_len);
152+
rtio_sqe_pool_free(r->sqe_pool, iodev_sqe);
153+
} else {
148154
/* Request was not canceled, put the SQE back in the queue */
149-
mpsc_push(&r->sq, &curr->q);
155+
mpsc_push(&r->sq, &iodev_sqe->q);
150156
rtio_executor_submit(r);
151157
}
158+
159+
if (requires_response) {
160+
rtio_cqe_submit(r, result, userdata, cqe_flags);
161+
}
152162
}
153163

154-
static inline void rtio_executor_done(struct rtio_iodev_sqe *iodev_sqe, int result, bool is_ok)
164+
/**
165+
* @brief Handle common logic one-shot items
166+
*
167+
* @param[in] iodev_sqe IODEV SQE that's being marked as finished.
168+
* @param[in] result The result of the latest request iteration
169+
* @param[in] is_ok Whether or not the SQE's result was successful
170+
*/
171+
static inline void rtio_executor_handle_oneshot(struct rtio_iodev_sqe *iodev_sqe,
172+
int result, bool is_ok)
155173
{
156-
const bool is_multishot = FIELD_GET(RTIO_SQE_MULTISHOT, iodev_sqe->sqe.flags) == 1;
157174
const bool is_canceled = FIELD_GET(RTIO_SQE_CANCELED, iodev_sqe->sqe.flags) == 1;
175+
struct rtio_iodev_sqe *curr = iodev_sqe;
158176
struct rtio *r = iodev_sqe->r;
159-
struct rtio_iodev_sqe *curr = iodev_sqe, *next;
160-
void *userdata;
161-
uint32_t sqe_flags, cqe_flags;
177+
uint32_t sqe_flags;
162178

179+
/** Single-shot items may be linked as transactions or be chained together.
180+
* Untangle the set of SQEs and act accordingly on each one.
181+
*/
163182
do {
164-
userdata = curr->sqe.userdata;
183+
void *userdata = curr->sqe.userdata;
184+
uint32_t cqe_flags = rtio_cqe_compute_flags(iodev_sqe);
185+
struct rtio_iodev_sqe *next = rtio_iodev_sqe_next(curr);
186+
165187
sqe_flags = curr->sqe.flags;
166-
cqe_flags = rtio_cqe_compute_flags(iodev_sqe);
167188

168-
next = rtio_iodev_sqe_next(curr);
169-
if (is_multishot) {
170-
rtio_executor_handle_multishot(r, curr, is_canceled);
171-
}
172-
if (!is_multishot || is_canceled) {
173-
/* SQE is no longer needed, release it */
174-
rtio_sqe_pool_free(r->sqe_pool, curr);
175-
}
176189
if (!is_canceled && FIELD_GET(RTIO_SQE_NO_RESPONSE, sqe_flags) == 0) {
177-
/* Request was not canceled, generate a CQE */
190+
/* Generate a result back to the client if need be.*/
178191
rtio_cqe_submit(r, result, userdata, cqe_flags);
179192
}
193+
194+
rtio_sqe_pool_free(r->sqe_pool, curr);
180195
curr = next;
196+
181197
if (!is_ok) {
182198
/* This is an error path, so cancel any chained SQEs */
183199
result = -ECANCELED;
184200
}
185-
} while (sqe_flags & RTIO_SQE_TRANSACTION);
201+
} while (FIELD_GET(RTIO_SQE_TRANSACTION, sqe_flags) == 1);
186202

187203
/* curr should now be the last sqe in the transaction if that is what completed */
188-
if (sqe_flags & RTIO_SQE_CHAINED) {
204+
if (FIELD_GET(RTIO_SQE_CHAINED, sqe_flags) == 1) {
189205
rtio_iodev_submit(curr);
190206
}
191207
}
192208

209+
static inline void rtio_executor_done(struct rtio_iodev_sqe *iodev_sqe, int result, bool is_ok)
210+
{
211+
const bool is_multishot = FIELD_GET(RTIO_SQE_MULTISHOT, iodev_sqe->sqe.flags) == 1;
212+
213+
if (is_multishot) {
214+
rtio_executor_handle_multishot(iodev_sqe, result, is_ok);
215+
} else {
216+
rtio_executor_handle_oneshot(iodev_sqe, result, is_ok);
217+
}
218+
}
219+
193220
/**
194221
* @brief Callback from an iodev describing success
195222
*/

0 commit comments

Comments
 (0)