@@ -22,6 +22,7 @@ static void spi_rtio_iodev_default_submit_sync(struct rtio_iodev_sqe *iodev_sqe)
22
22
{
23
23
struct spi_dt_spec * dt_spec = iodev_sqe -> sqe .iodev -> data ;
24
24
const struct device * dev = dt_spec -> bus ;
25
+ uint8_t num_msgs = 0 ;
25
26
int err = 0 ;
26
27
27
28
LOG_DBG ("Sync RTIO work item for: %p" , (void * )dev );
@@ -33,67 +34,103 @@ static void spi_rtio_iodev_default_submit_sync(struct rtio_iodev_sqe *iodev_sqe)
33
34
struct rtio_iodev_sqe * txn_head = iodev_sqe ;
34
35
struct rtio_iodev_sqe * txn_curr = iodev_sqe ;
35
36
37
+ /* We allocate the spi_buf's on the stack, to do so
38
+ * the count of messages needs to be determined to
39
+ * ensure we don't go over the statically sized array.
40
+ */
36
41
do {
37
- struct rtio_sqe * sqe = & txn_curr -> sqe ;
38
- struct spi_buf tx_buf = {0 };
39
- struct spi_buf_set tx_buf_set = {
40
- .buffers = & tx_buf ,
41
- };
42
+ switch (txn_curr -> sqe .op ) {
43
+ case RTIO_OP_RX :
44
+ case RTIO_OP_TX :
45
+ case RTIO_OP_TINY_TX :
46
+ case RTIO_OP_TXRX :
47
+ num_msgs ++ ;
48
+ break ;
49
+ default :
50
+ LOG_ERR ("Invalid op code %d for submission %p" , txn_curr -> sqe .op ,
51
+ (void * )& txn_curr -> sqe );
52
+ err = - EIO ;
53
+ break ;
54
+ }
55
+ txn_curr = rtio_txn_next (txn_curr );
56
+ } while (err == 0 && txn_curr != NULL );
42
57
43
- struct spi_buf rx_buf = {0 };
44
- struct spi_buf_set rx_buf_set = {
45
- .buffers = & rx_buf ,
46
- };
58
+ if (err != 0 ) {
59
+ rtio_iodev_sqe_err (txn_head , err );
60
+ return ;
61
+ }
62
+
63
+ /* Allocate msgs on the stack, MISRA doesn't like VLAs so we need a statically
64
+ * sized array here. It's pretty unlikely we have more than 4 spi messages
65
+ * in a transaction as we typically would only have 2, one to write a
66
+ * register address, and another to read/write the register into an array
67
+ */
68
+ if (num_msgs > CONFIG_SPI_RTIO_FALLBACK_MSGS ) {
69
+ LOG_ERR ("At most CONFIG_SPI_RTIO_FALLBACK_MSGS"
70
+ " submissions in a transaction are"
71
+ " allowed in the default handler" );
72
+ rtio_iodev_sqe_err (txn_head , - ENOMEM );
73
+ return ;
74
+ }
75
+
76
+ struct spi_buf tx_bufs [CONFIG_SPI_RTIO_FALLBACK_MSGS ];
77
+ struct spi_buf rx_bufs [CONFIG_SPI_RTIO_FALLBACK_MSGS ];
78
+ struct spi_buf_set tx_buf_set = {
79
+ .buffers = tx_bufs ,
80
+ .count = num_msgs ,
81
+ };
82
+ struct spi_buf_set rx_buf_set = {
83
+ .buffers = rx_bufs ,
84
+ .count = num_msgs ,
85
+ };
47
86
48
- LOG_DBG ("Preparing transfer: %p" , txn_curr );
87
+ txn_curr = txn_head ;
88
+
89
+ for (size_t i = 0 ; i < num_msgs ; i ++ ) {
90
+ struct rtio_sqe * sqe = & txn_curr -> sqe ;
49
91
50
92
switch (sqe -> op ) {
51
93
case RTIO_OP_RX :
52
- rx_buf .buf = sqe -> rx .buf ;
53
- rx_buf .len = sqe -> rx .buf_len ;
54
- rx_buf_set .count = 1 ;
94
+ rx_bufs [i ].buf = sqe -> rx .buf ;
95
+ rx_bufs [i ].len = sqe -> rx .buf_len ;
96
+ tx_bufs [i ].buf = NULL ;
97
+ tx_bufs [i ].len = sqe -> rx .buf_len ;
55
98
break ;
56
99
case RTIO_OP_TX :
57
- tx_buf .buf = (uint8_t * )sqe -> tx .buf ;
58
- tx_buf .len = sqe -> tx .buf_len ;
59
- tx_buf_set .count = 1 ;
100
+ rx_bufs [i ].buf = NULL ;
101
+ rx_bufs [i ].len = sqe -> tx .buf_len ;
102
+ tx_bufs [i ].buf = (uint8_t * )sqe -> tx .buf ;
103
+ tx_bufs [i ].len = sqe -> tx .buf_len ;
60
104
break ;
61
105
case RTIO_OP_TINY_TX :
62
- tx_buf .buf = (uint8_t * )sqe -> tiny_tx .buf ;
63
- tx_buf .len = sqe -> tiny_tx .buf_len ;
64
- tx_buf_set .count = 1 ;
106
+ rx_bufs [i ].buf = NULL ;
107
+ rx_bufs [i ].len = sqe -> tiny_tx .buf_len ;
108
+ tx_bufs [i ].buf = (uint8_t * )sqe -> tiny_tx .buf ;
109
+ tx_bufs [i ].len = sqe -> tiny_tx .buf_len ;
65
110
break ;
66
111
case RTIO_OP_TXRX :
67
- rx_buf .buf = sqe -> txrx .rx_buf ;
68
- rx_buf .len = sqe -> txrx .buf_len ;
69
- tx_buf .buf = (uint8_t * )sqe -> txrx .tx_buf ;
70
- tx_buf .len = sqe -> txrx .buf_len ;
71
- rx_buf_set .count = 1 ;
72
- tx_buf_set .count = 1 ;
112
+ rx_bufs [i ].buf = sqe -> txrx .rx_buf ;
113
+ rx_bufs [i ].len = sqe -> txrx .buf_len ;
114
+ tx_bufs [i ].buf = (uint8_t * )sqe -> txrx .tx_buf ;
115
+ tx_bufs [i ].len = sqe -> txrx .buf_len ;
73
116
break ;
74
117
default :
75
- LOG_ERR ("Invalid op code %d for submission %p\n" , sqe -> op , (void * )sqe );
76
118
err = - EIO ;
77
119
break ;
78
120
}
79
121
80
- if (!err ) {
81
- struct spi_buf_set * tx_buf_ptr = tx_buf_set .count > 0 ? & tx_buf_set : NULL ;
82
- struct spi_buf_set * rx_buf_ptr = rx_buf_set .count > 0 ? & rx_buf_set : NULL ;
83
-
84
- err = spi_transceive_dt (dt_spec , tx_buf_ptr , rx_buf_ptr );
122
+ txn_curr = rtio_txn_next (txn_curr );
123
+ }
85
124
86
- /* NULL if this submission is not a transaction */
87
- txn_curr = rtio_txn_next ( txn_curr );
88
- }
89
- } while ( err >= 0 && txn_curr != NULL );
125
+ if ( err == 0 ) {
126
+ __ASSERT_NO_MSG ( num_msgs > 0 );
127
+ err = spi_transceive_dt ( dt_spec , & tx_buf_set , & rx_buf_set );
128
+ }
90
129
91
- if (err < 0 ) {
92
- LOG_ERR ("Transfer failed: %d" , err );
130
+ if (err != 0 ) {
93
131
rtio_iodev_sqe_err (txn_head , err );
94
132
} else {
95
- LOG_DBG ("Transfer OK: %d" , err );
96
- rtio_iodev_sqe_ok (txn_head , err );
133
+ rtio_iodev_sqe_ok (txn_head , 0 );
97
134
}
98
135
}
99
136
0 commit comments