@@ -125,71 +125,98 @@ void rtio_executor_submit(struct rtio *r)
125
125
/**
126
126
* @brief Handle common logic when :c:macro:`RTIO_SQE_MULTISHOT` is set
127
127
*
128
- * @param[in] r RTIO context
129
- * @param[in] curr Current IODev SQE that's being marked for finished.
130
- * @param[in] is_canceled Whether or not the SQE is canceled
128
+ * @param[in] iodev_sqe IODEV SQE that's being marked as finished.
129
+ * @param[in] result The result of the latest request iteration
130
+ * @param[in] is_ok Whether or not the SQE's result was successful
131
131
*/
132
- static inline void rtio_executor_handle_multishot (struct rtio * r , struct rtio_iodev_sqe * curr ,
133
- bool is_canceled )
132
+ static inline void rtio_executor_handle_multishot (struct rtio_iodev_sqe * iodev_sqe ,
133
+ int result , bool is_ok )
134
134
{
135
- /* Reset the mempool if needed */
136
- if ( curr -> sqe . op == RTIO_OP_RX && FIELD_GET (RTIO_SQE_MEMPOOL_BUFFER , curr -> sqe .flags )) {
137
- if ( is_canceled ) {
138
- /* Free the memory first since no CQE will be generated */
139
- LOG_DBG ( "Releasing memory @%p size=%u" , ( void * ) curr -> sqe . rx . buf ,
140
- curr -> sqe .rx . buf_len ) ;
141
- rtio_release_buffer ( r , curr -> sqe . rx . buf , curr -> sqe . rx . buf_len );
142
- }
135
+ struct rtio * r = iodev_sqe -> r ;
136
+ const bool is_canceled = FIELD_GET (RTIO_SQE_CANCELED , iodev_sqe -> sqe .flags ) == 1 ;
137
+ const bool uses_mempool = FIELD_GET ( RTIO_SQE_MEMPOOL_BUFFER , iodev_sqe -> sqe . flags ) == 1 ;
138
+ const bool requires_response = FIELD_GET ( RTIO_SQE_NO_RESPONSE , iodev_sqe -> sqe . flags ) == 0 ;
139
+ uint32_t cqe_flags = rtio_cqe_compute_flags ( iodev_sqe );
140
+ void * userdata = iodev_sqe -> sqe .userdata ;
141
+
142
+ if ( iodev_sqe -> sqe . op == RTIO_OP_RX && uses_mempool ) {
143
143
/* Reset the buffer info so the next request can get a new one */
144
- curr -> sqe .rx .buf = NULL ;
145
- curr -> sqe .rx .buf_len = 0 ;
144
+ iodev_sqe -> sqe .rx .buf = NULL ;
145
+ iodev_sqe -> sqe .rx .buf_len = 0 ;
146
146
}
147
- if (!is_canceled ) {
147
+
148
+ if (is_canceled ) {
149
+ LOG_DBG ("Releasing memory @%p size=%u" , (void * )iodev_sqe -> sqe .rx .buf ,
150
+ iodev_sqe -> sqe .rx .buf_len );
151
+ rtio_release_buffer (r , iodev_sqe -> sqe .rx .buf , iodev_sqe -> sqe .rx .buf_len );
152
+ rtio_sqe_pool_free (r -> sqe_pool , iodev_sqe );
153
+ } else {
148
154
/* Request was not canceled, put the SQE back in the queue */
149
- mpsc_push (& r -> sq , & curr -> q );
155
+ mpsc_push (& r -> sq , & iodev_sqe -> q );
150
156
rtio_executor_submit (r );
151
157
}
158
+
159
+ if (requires_response ) {
160
+ rtio_cqe_submit (r , result , userdata , cqe_flags );
161
+ }
152
162
}
153
163
154
- static inline void rtio_executor_done (struct rtio_iodev_sqe * iodev_sqe , int result , bool is_ok )
164
+ /**
165
+ * @brief Handle common logic one-shot items
166
+ *
167
+ * @param[in] iodev_sqe IODEV SQE that's being marked as finished.
168
+ * @param[in] result The result of the latest request iteration
169
+ * @param[in] is_ok Whether or not the SQE's result was successful
170
+ */
171
+ static inline void rtio_executor_handle_oneshot (struct rtio_iodev_sqe * iodev_sqe ,
172
+ int result , bool is_ok )
155
173
{
156
- const bool is_multishot = FIELD_GET (RTIO_SQE_MULTISHOT , iodev_sqe -> sqe .flags ) == 1 ;
157
174
const bool is_canceled = FIELD_GET (RTIO_SQE_CANCELED , iodev_sqe -> sqe .flags ) == 1 ;
175
+ struct rtio_iodev_sqe * curr = iodev_sqe ;
158
176
struct rtio * r = iodev_sqe -> r ;
159
- struct rtio_iodev_sqe * curr = iodev_sqe , * next ;
160
- void * userdata ;
161
- uint32_t sqe_flags , cqe_flags ;
177
+ uint32_t sqe_flags ;
162
178
179
+ /** Single-shot items may be linked as transactions or be chained together.
180
+ * Untangle the set of SQEs and act accordingly on each one.
181
+ */
163
182
do {
164
- userdata = curr -> sqe .userdata ;
183
+ void * userdata = curr -> sqe .userdata ;
184
+ uint32_t cqe_flags = rtio_cqe_compute_flags (iodev_sqe );
185
+ struct rtio_iodev_sqe * next = rtio_iodev_sqe_next (curr );
186
+
165
187
sqe_flags = curr -> sqe .flags ;
166
- cqe_flags = rtio_cqe_compute_flags (iodev_sqe );
167
188
168
- next = rtio_iodev_sqe_next (curr );
169
- if (is_multishot ) {
170
- rtio_executor_handle_multishot (r , curr , is_canceled );
171
- }
172
- if (!is_multishot || is_canceled ) {
173
- /* SQE is no longer needed, release it */
174
- rtio_sqe_pool_free (r -> sqe_pool , curr );
175
- }
176
189
if (!is_canceled && FIELD_GET (RTIO_SQE_NO_RESPONSE , sqe_flags ) == 0 ) {
177
- /* Request was not canceled, generate a CQE */
190
+ /* Generate a result back to the client if need be. */
178
191
rtio_cqe_submit (r , result , userdata , cqe_flags );
179
192
}
193
+
194
+ rtio_sqe_pool_free (r -> sqe_pool , curr );
180
195
curr = next ;
196
+
181
197
if (!is_ok ) {
182
198
/* This is an error path, so cancel any chained SQEs */
183
199
result = - ECANCELED ;
184
200
}
185
- } while (sqe_flags & RTIO_SQE_TRANSACTION );
201
+ } while (FIELD_GET ( RTIO_SQE_TRANSACTION , sqe_flags ) == 1 );
186
202
187
203
/* curr should now be the last sqe in the transaction if that is what completed */
188
- if (sqe_flags & RTIO_SQE_CHAINED ) {
204
+ if (FIELD_GET ( RTIO_SQE_CHAINED , sqe_flags ) == 1 ) {
189
205
rtio_iodev_submit (curr );
190
206
}
191
207
}
192
208
209
+ static inline void rtio_executor_done (struct rtio_iodev_sqe * iodev_sqe , int result , bool is_ok )
210
+ {
211
+ const bool is_multishot = FIELD_GET (RTIO_SQE_MULTISHOT , iodev_sqe -> sqe .flags ) == 1 ;
212
+
213
+ if (is_multishot ) {
214
+ rtio_executor_handle_multishot (iodev_sqe , result , is_ok );
215
+ } else {
216
+ rtio_executor_handle_oneshot (iodev_sqe , result , is_ok );
217
+ }
218
+ }
219
+
193
220
/**
194
221
* @brief Callback from an iodev describing success
195
222
*/
0 commit comments