2525#include <zephyr/dt-bindings/spi/spi.h>
2626#include <zephyr/drivers/gpio.h>
2727#include <zephyr/kernel.h>
28+ #include <zephyr/sys/__assert.h>
29+ #include <zephyr/rtio/rtio.h>
2830
2931#ifdef __cplusplus
3032extern "C" {
@@ -480,6 +482,16 @@ typedef int (*spi_api_io_async)(const struct device *dev,
480482 spi_callback_t cb ,
481483 void * userdata );
482484
485+ #if defined(CONFIG_SPI_RTIO ) || defined(DOXYGEN )
486+
487+ /**
488+ * @typedef spi_api_iodev_submit
489+ * @brief Callback API for submitting work to a SPI device with RTIO
490+ */
491+ typedef void (* spi_api_iodev_submit )(const struct device * dev ,
492+ struct rtio_iodev_sqe * iodev_sqe );
493+ #endif /* CONFIG_SPI_RTIO */
494+
483495/**
484496 * @typedef spi_api_release
485497 * @brief Callback API for unlocking SPI device.
@@ -498,6 +510,9 @@ __subsystem struct spi_driver_api {
498510#ifdef CONFIG_SPI_ASYNC
499511 spi_api_io_async transceive_async ;
500512#endif /* CONFIG_SPI_ASYNC */
513+ #ifdef CONFIG_SPI_RTIO
514+ spi_api_iodev_submit iodev_submit ;
515+ #endif /* CONFIG_SPI_RTIO */
501516 spi_api_release release ;
502517};
503518
@@ -545,6 +560,7 @@ static inline bool spi_is_ready_dt(const struct spi_dt_spec *spec)
545560 }
546561 return true;
547562}
563+
548564/**
549565 * @brief Read/write the specified amount of data from the SPI driver.
550566 *
@@ -878,6 +894,209 @@ __deprecated static inline int spi_write_async(const struct device *dev,
878894
879895#endif /* CONFIG_SPI_ASYNC */
880896
897+
898+ #if defined(CONFIG_SPI_RTIO ) || defined(DOXYGEN )
899+
900+ /**
901+ * @brief Submit a SPI device with a request
902+ *
903+ * @param dev SPI device
904+ * @param iodev_sqe Prepared submissions queue entry connected to an iodev
905+ * defined by SPI_IODEV_DEFINE.
906+ * Must live as long as the request is in flight.
907+ *
908+ * @retval 0 If successful.
909+ * @retval -errno Negative errno code on failure.
910+ */
911+ static inline void spi_iodev_submit (struct rtio_iodev_sqe * iodev_sqe )
912+ {
913+ const struct spi_dt_spec * dt_spec = iodev_sqe -> sqe -> iodev -> data ;
914+ const struct device * dev = dt_spec -> bus ;
915+ const struct spi_driver_api * api = (const struct spi_driver_api * )dev -> api ;
916+
917+ api -> iodev_submit (dt_spec -> bus , iodev_sqe );
918+ }
919+
920+ extern const struct rtio_iodev_api spi_iodev_api ;
921+
922+ /**
923+ * @brief Define an iodev for a given dt node on the bus
924+ *
925+ * These do not need to be shared globally but doing so
926+ * will save a small amount of memory.
927+ *
928+ * @param node DT_NODE
929+ */
930+ #define SPI_DT_IODEV_DEFINE (name , node_id , operation_ , delay_ ) \
931+ const struct spi_dt_spec _spi_dt_spec_##name = \
932+ SPI_DT_SPEC_GET(node_id, operation_, delay_); \
933+ RTIO_IODEV_DEFINE(name, &spi_iodev_api, (void *)&_spi_dt_spec_##name)
934+
935+ /**
936+ * @brief Validate that SPI bus (and CS gpio if defined) is ready.
937+ *
938+ * @param spi_iodev SPI iodev defined with SPI_DT_IODEV_DEFINE
939+ *
940+ * @retval true if the SPI bus is ready for use.
941+ * @retval false if the SPI bus (or the CS gpio defined) is not ready for use.
942+ */
943+ static inline bool spi_is_ready_iodev (const struct rtio_iodev * spi_iodev )
944+ {
945+ struct spi_dt_spec * spec = spi_iodev -> data ;
946+
947+ return spi_is_ready_dt (spec );
948+ }
949+
950+ /**
951+ * @brief Copy the tx_bufs and rx_bufs into a set of RTIO requests
952+ *
953+ * @param r RTIO context
954+ * @param tx_bufs Transmit buffer set
955+ * @param rx_bufs Receive buffer set
956+ *
957+ * @retval sqe Last submission in the queue added
958+ * @retval NULL Not enough memory in the context to copy the requests
959+ */
960+ static inline struct rtio_sqe * spi_rtio_copy (struct rtio * r ,
961+ struct rtio_iodev * iodev ,
962+ const struct spi_buf_set * tx_bufs ,
963+ const struct spi_buf_set * rx_bufs )
964+ {
965+ struct rtio_sqe * sqe = NULL ;
966+ size_t tx_count = tx_bufs ? tx_bufs -> count : 0 ;
967+ size_t rx_count = rx_bufs ? rx_bufs -> count : 0 ;
968+
969+ uint32_t tx = 0 , tx_len = 0 ;
970+ uint32_t rx = 0 , rx_len = 0 ;
971+ uint8_t * tx_buf , * rx_buf ;
972+
973+ if (tx < tx_count ) {
974+ tx_buf = tx_bufs -> buffers [tx ].buf ;
975+ tx_len = tx_bufs -> buffers [tx ].len ;
976+ } else {
977+ tx_buf = NULL ;
978+ tx_len = rx_bufs -> buffers [rx ].len ;
979+ }
980+
981+ if (rx < rx_count ) {
982+ rx_buf = rx_bufs -> buffers [rx ].buf ;
983+ rx_len = rx_bufs -> buffers [rx ].len ;
984+ } else {
985+ rx_buf = NULL ;
986+ rx_len = tx_bufs -> buffers [tx ].len ;
987+ }
988+
989+
990+ while ((tx < tx_count || rx < rx_count ) && (tx_len > 0 || rx_len > 0 )) {
991+ sqe = rtio_sqe_acquire (r );
992+
993+ if (sqe == NULL ) {
994+ rtio_spsc_drop_all (r -> sq );
995+ return NULL ;
996+ }
997+
998+ /* If tx/rx len are same, we can do a simple transceive */
999+ if (tx_len == rx_len ) {
1000+ if (tx_buf == NULL ) {
1001+ rtio_sqe_prep_read (sqe , iodev , RTIO_PRIO_NORM ,
1002+ rx_buf , rx_len , NULL );
1003+ } else if (rx_buf == NULL ) {
1004+ rtio_sqe_prep_write (sqe , iodev , RTIO_PRIO_NORM ,
1005+ tx_buf , tx_len , NULL );
1006+ } else {
1007+ rtio_sqe_prep_transceive (sqe , iodev , RTIO_PRIO_NORM ,
1008+ tx_buf , rx_buf , rx_len , NULL );
1009+ }
1010+ tx ++ ;
1011+ rx ++ ;
1012+ if (rx < rx_count ) {
1013+ rx_buf = rx_bufs -> buffers [rx ].buf ;
1014+ rx_len = rx_bufs -> buffers [rx ].len ;
1015+ } else {
1016+ rx_buf = NULL ;
1017+ rx_len = 0 ;
1018+ }
1019+ if (tx < tx_count ) {
1020+ tx_buf = tx_bufs -> buffers [tx ].buf ;
1021+ tx_len = tx_bufs -> buffers [tx ].len ;
1022+ } else {
1023+ tx_buf = NULL ;
1024+ tx_len = 0 ;
1025+ }
1026+ } else if (tx_len == 0 ) {
1027+ rtio_sqe_prep_read (sqe , iodev , RTIO_PRIO_NORM ,
1028+ (uint8_t * )rx_buf ,
1029+ (uint32_t )rx_len ,
1030+ NULL );
1031+ rx ++ ;
1032+ if (rx < rx_count ) {
1033+ rx_buf = rx_bufs -> buffers [rx ].buf ;
1034+ rx_len = rx_bufs -> buffers [rx ].len ;
1035+ } else {
1036+ rx_buf = NULL ;
1037+ rx_len = 0 ;
1038+ }
1039+ } else if (rx_len == 0 ) {
1040+ rtio_sqe_prep_write (sqe , iodev , RTIO_PRIO_NORM ,
1041+ (uint8_t * )tx_buf ,
1042+ (uint32_t )tx_len ,
1043+ NULL );
1044+ tx ++ ;
1045+ if (tx < tx_count ) {
1046+ tx_buf = rx_bufs -> buffers [rx ].buf ;
1047+ tx_len = rx_bufs -> buffers [rx ].len ;
1048+ } else {
1049+ tx_buf = NULL ;
1050+ tx_len = 0 ;
1051+ }
1052+ } else if (tx_len > rx_len ) {
1053+ rtio_sqe_prep_transceive (sqe , iodev , RTIO_PRIO_NORM ,
1054+ (uint8_t * )tx_buf ,
1055+ (uint8_t * )rx_buf ,
1056+ (uint32_t )rx_len ,
1057+ NULL );
1058+ tx_len -= rx_len ;
1059+ tx_buf += rx_len ;
1060+ rx ++ ;
1061+ if (rx < rx_count ) {
1062+ rx_buf = rx_bufs -> buffers [rx ].buf ;
1063+ rx_len = rx_bufs -> buffers [rx ].len ;
1064+ } else {
1065+ rx_buf = NULL ;
1066+ rx_len = tx_len ;
1067+ }
1068+ } else if (rx_len > tx_len ) {
1069+ rtio_sqe_prep_transceive (sqe , iodev , RTIO_PRIO_NORM ,
1070+ (uint8_t * )tx_buf ,
1071+ (uint8_t * )rx_buf ,
1072+ (uint32_t )tx_len ,
1073+ NULL );
1074+ rx_len -= tx_len ;
1075+ rx_buf += tx_len ;
1076+ tx ++ ;
1077+ if (tx < tx_count ) {
1078+ tx_buf = tx_bufs -> buffers [tx ].buf ;
1079+ tx_len = tx_bufs -> buffers [tx ].len ;
1080+ } else {
1081+ tx_buf = NULL ;
1082+ tx_len = rx_len ;
1083+ }
1084+ } else {
1085+ __ASSERT_NO_MSG ("Invalid spi_rtio_copy state" );
1086+ }
1087+
1088+ sqe -> flags = RTIO_SQE_TRANSACTION ;
1089+ }
1090+
1091+ if (sqe != NULL ) {
1092+ sqe -> flags = 0 ;
1093+ }
1094+
1095+ return sqe ;
1096+ }
1097+
1098+ #endif /* CONFIG_SPI_RTIO */
1099+
8811100/**
8821101 * @brief Release the SPI device locked on and/or the CS by the current config
8831102 *
0 commit comments