@@ -175,10 +175,17 @@ struct rtio_sqe {
175175 rtio_callback_t callback ;
176176 void * arg0 ; /**< Last argument given to callback */
177177 };
178+
179+ /** OP_TXRX */
180+ struct {
181+ uint32_t txrx_buf_len ;
182+ uint8_t * tx_buf ;
183+ uint8_t * rx_buf ;
184+ };
185+
178186 };
179187};
180188
181-
182189/** @cond ignore */
183190/* Ensure the rtio_sqe never grows beyond a common cacheline size of 64 bytes */
184191BUILD_ASSERT (sizeof (struct rtio_sqe ) <= 64 );
@@ -365,6 +372,8 @@ struct rtio_iodev {
365372/** An operation that calls a given function (callback) */
366373#define RTIO_OP_CALLBACK (RTIO_OP_TINY_TX+1)
367374
375+ /** An operation that transceives (reads and writes simultaneously) */
376+ #define RTIO_OP_TXRX (RTIO_OP_CALLBACK+1)
368377
369378
370379/**
@@ -468,6 +477,27 @@ static inline void rtio_sqe_prep_callback(struct rtio_sqe *sqe,
468477 sqe -> userdata = userdata ;
469478}
470479
480+ /**
481+ * @brief Prepare a transceive op submission
482+ */
483+ static inline void rtio_sqe_prep_transceive (struct rtio_sqe * sqe ,
484+ const struct rtio_iodev * iodev ,
485+ int8_t prio ,
486+ uint8_t * tx_buf ,
487+ uint8_t * rx_buf ,
488+ uint32_t buf_len ,
489+ void * userdata )
490+ {
491+ sqe -> op = RTIO_OP_TXRX ;
492+ sqe -> prio = prio ;
493+ sqe -> flags = 0 ;
494+ sqe -> iodev = iodev ;
495+ sqe -> txrx_buf_len = buf_len ;
496+ sqe -> tx_buf = tx_buf ;
497+ sqe -> rx_buf = rx_buf ;
498+ sqe -> userdata = userdata ;
499+ }
500+
471501/**
472502 * @brief Statically define and initialize a fixed length submission queue.
473503 *
@@ -647,6 +677,16 @@ static inline struct rtio_cqe *rtio_cqe_consume_block(struct rtio *r)
647677 return cqe ;
648678}
649679
680+ /**
681+ * @brief Release consumed completion queue event
682+ *
683+ * @param r RTIO context
684+ */
685+ static inline void rtio_cqe_release (struct rtio * r )
686+ {
687+ rtio_spsc_release (r -> cq );
688+ }
689+
650690/**
651691 * @brief Release all consumed completion queue events
652692 *
0 commit comments