1
1
/*
2
- * Copyright 2018, 2024 NXP
2
+ * Copyright 2018, 2024-2025 NXP
3
3
*
4
4
* SPDX-License-Identifier: Apache-2.0
5
5
*/
@@ -12,6 +12,19 @@ LOG_MODULE_DECLARE(spi_lpspi, CONFIG_SPI_LOG_LEVEL);
12
12
#include <zephyr/drivers/dma.h>
13
13
#include "spi_nxp_lpspi_priv.h"
14
14
15
+ /* These states indicate what's the status of RX and TX, also synchronization
16
+ * status of DMA size of the next DMA transfer.
17
+ */
18
+ typedef enum {
19
+ LPSPI_TRANSFER_STATE_NULL ,
20
+ LPSPI_TRANSFER_STATE_ONGOING ,
21
+ LPSPI_TRANSFER_STATE_NEXT_DMA_SIZE_UPDATED ,
22
+ LPSPI_TRANSFER_STATE_TX_DONE ,
23
+ LPSPI_TRANSFER_STATE_RX_DONE ,
24
+ LPSPI_TRANSFER_STATE_RX_TX_DONE ,
25
+ LPSPI_TRANSFER_STATE_INVALID = 0xFFFFFFFFUL ,
26
+ } lpspi_transfer_state_t ;
27
+
15
28
/* dummy memory used for transferring NOP when tx buf is null */
16
29
static uint32_t tx_nop_val ; /* check compliance says no init to 0, but should be 0 in bss */
17
30
/* dummy memory for transferring to when RX buf is null */
@@ -22,17 +35,58 @@ struct spi_dma_stream {
22
35
uint32_t channel ;
23
36
struct dma_config dma_cfg ;
24
37
struct dma_block_config dma_blk_cfg ;
25
- bool chunk_done ;
26
38
};
27
39
28
40
struct spi_nxp_dma_data {
29
41
struct spi_dma_stream dma_rx ;
30
42
struct spi_dma_stream dma_tx ;
43
+
44
+ lpspi_transfer_state_t state ;
45
+ /* This DMA size is used in callback function for RX and TX context update.
46
+ * because of old LPSPI IP limitation, RX complete depend on next TX DMA transfer start,
47
+ * so TX and RX not always start at the same time while we can only calculate DMA transfer
48
+ * size once and update the buffer pointers at the same time.
49
+ */
50
+ size_t synchronize_dma_size ;
31
51
};
32
52
53
+ /*
54
+ * Issue a TCR (Transmit Command Register) command to properly end RX DMA transfers
55
+ * on certain LPSPI versions. The behavior depends on:
56
+ *
57
+ * 1. LPSPI Hardware Version:
58
+ * - Version 1 (RT1170, RT10xx, Kinetis K series): TCR issue always required
59
+ * - Version 2 (RT1180, MCXN, RT700, K32W, S32K3xx, MCXL10): TCR issue not needed
60
+ *
61
+ * 2. SPI_HOLD_ON_CS Configuration:
62
+ * - If enabled: Keeps chip select (PCS) asserted between transfers
63
+ * - If disabled: Deasserts PCS after each transfer
64
+ *
65
+ * This function checks the LPSPI version and SPI_HOLD_ON_CS setting to determine
66
+ * if a TCR command is needed. For version 1, TCR is always issued. For version 2,
67
+ * TCR is only issued if SPI_HOLD_ON_CS is not set. Therefore, SPI_HOLD_ON_CS is not
68
+ * supported for version 1.
69
+ *
70
+ * The LPSPI version can be read from the VERID register, which is typically the
71
+ * first register in the memory map.
72
+ */
73
+ static void spi_mcux_issue_TCR (const struct device * dev )
74
+ {
75
+ LPSPI_Type * base = (LPSPI_Type * )DEVICE_MMIO_NAMED_GET (dev , reg_base );
76
+ const struct spi_config * spi_cfg = DEV_DATA (dev )-> ctx .config ;
77
+ uint8_t major_ver = (base -> VERID & LPSPI_VERID_MAJOR_MASK ) >> LPSPI_VERID_MAJOR_SHIFT ;
78
+
79
+ /* On old LPSPI versions, we always have to issue TCR, or transaction will never end.
80
+ * On a newer LPSPI version, only issue TCR when hold on CS feature is disabled.
81
+ */
82
+ if (major_ver < 2 || !(spi_cfg -> operation & SPI_HOLD_ON_CS )) {
83
+ base -> TCR &= ~LPSPI_TCR_CONTC_MASK ;
84
+ }
85
+ }
86
+
33
87
static struct dma_block_config * lpspi_dma_common_load (struct spi_dma_stream * stream ,
34
- const struct device * dev ,
35
- const uint8_t * buf , size_t len )
88
+ const struct device * dev , const uint8_t * buf ,
89
+ size_t len )
36
90
{
37
91
struct dma_block_config * blk_cfg = & stream -> dma_blk_cfg ;
38
92
@@ -91,30 +145,35 @@ static int lpspi_dma_rx_load(const struct device *dev, uint8_t *buf, size_t len)
91
145
return dma_config (stream -> dma_dev , stream -> channel , & stream -> dma_cfg );
92
146
}
93
147
94
- static inline int lpspi_dma_rxtx_load (const struct device * dev )
148
+ /* Return values:
149
+ * positive value if a data chunk is loaded successfully and return the data chunk size loaded;
150
+ * negative value if error happens and return the error code;
151
+ * 0 if no data is loaded;
152
+ */
153
+ static int lpspi_dma_rxtx_load (const struct device * dev )
95
154
{
96
155
struct lpspi_data * data = dev -> data ;
97
156
struct spi_nxp_dma_data * dma_data = (struct spi_nxp_dma_data * )data -> driver_data ;
98
157
struct spi_dma_stream * rx = & dma_data -> dma_rx ;
99
158
struct spi_dma_stream * tx = & dma_data -> dma_tx ;
100
159
struct spi_context * ctx = & data -> ctx ;
101
- size_t next_chunk_size = spi_context_max_continuous_chunk (ctx );
160
+ size_t dma_size = spi_context_max_continuous_chunk (ctx );
102
161
int ret = 0 ;
103
162
104
- if (next_chunk_size == 0 ) {
163
+ if (dma_size == 0 ) {
105
164
/* In case both buffers are 0 length, we should not even be here
106
165
* and attempting to set up a DMA transfer like this will cause
107
166
* errors that lock up the system in some cases with eDMA.
108
167
*/
109
- return - ENODATA ;
168
+ return 0 ;
110
169
}
111
170
112
- ret = lpspi_dma_tx_load (dev , ctx -> tx_buf , next_chunk_size );
171
+ ret = lpspi_dma_tx_load (dev , ctx -> tx_buf , dma_size );
113
172
if (ret != 0 ) {
114
173
return ret ;
115
174
}
116
175
117
- ret = lpspi_dma_rx_load (dev , ctx -> rx_buf , next_chunk_size );
176
+ ret = lpspi_dma_rx_load (dev , ctx -> rx_buf , dma_size );
118
177
if (ret != 0 ) {
119
178
return ret ;
120
179
}
@@ -124,88 +183,116 @@ static inline int lpspi_dma_rxtx_load(const struct device *dev)
124
183
return ret ;
125
184
}
126
185
127
- return dma_start (tx -> dma_dev , tx -> channel );
128
- }
129
-
130
- static int lpspi_dma_next_fill (const struct device * dev )
131
- {
132
- struct lpspi_data * data = (struct lpspi_data * )dev -> data ;
133
- struct spi_nxp_dma_data * dma_data = (struct spi_nxp_dma_data * )data -> driver_data ;
134
- struct spi_dma_stream * rx = & dma_data -> dma_rx ;
135
- struct spi_dma_stream * tx = & dma_data -> dma_tx ;
136
-
137
- rx -> chunk_done = false;
138
- tx -> chunk_done = false;
186
+ ret = dma_start (tx -> dma_dev , tx -> channel );
187
+ if (ret != 0 ) {
188
+ return ret ;
189
+ }
139
190
140
- return lpspi_dma_rxtx_load ( dev ) ;
191
+ return dma_size ;
141
192
}
142
193
143
194
static void spi_mcux_dma_callback (const struct device * dev , void * arg , uint32_t channel , int status )
144
195
{
196
+ /* arg directly holds the spi device */
145
197
const struct device * spi_dev = arg ;
146
198
LPSPI_Type * base = (LPSPI_Type * )DEVICE_MMIO_NAMED_GET (spi_dev , reg_base );
147
199
struct lpspi_data * data = (struct lpspi_data * )spi_dev -> data ;
148
200
struct spi_nxp_dma_data * dma_data = (struct spi_nxp_dma_data * )data -> driver_data ;
149
201
struct spi_dma_stream * rx = & dma_data -> dma_rx ;
150
202
struct spi_dma_stream * tx = & dma_data -> dma_tx ;
151
203
struct spi_context * ctx = & data -> ctx ;
152
- char debug_char ;
204
+ char debug_char = (channel == dma_data -> dma_tx .channel ) ? 'T' : 'R' ;
205
+ int ret = 0 ;
153
206
154
207
if (status < 0 ) {
208
+ ret = status ;
155
209
goto error ;
156
- } else {
157
- /* don't care about positive values, normalize to "okay" = 0 */
158
- status = 0 ;
159
210
}
160
211
161
- if (channel == rx -> channel ) {
162
- spi_context_update_rx (ctx , 1 , rx -> dma_blk_cfg .block_size );
163
- debug_char = 'R' ;
164
- rx -> chunk_done = true;
165
- } else if (channel == tx -> channel ) {
166
- spi_context_update_tx (ctx , 1 , tx -> dma_blk_cfg .block_size );
167
- debug_char = 'T' ;
168
- tx -> chunk_done = true;
169
- } else {
170
- /* invalid channel */
171
- status = - EIO ;
212
+ if (channel != dma_data -> dma_tx .channel && channel != dma_data -> dma_rx .channel ) {
213
+ ret = - EIO ;
172
214
goto error ;
173
215
}
174
216
175
- LOG_DBG ("DMA %cX Block Complete" , debug_char );
176
-
177
- /* wait for the other channel to finish if needed */
178
- if (!rx -> chunk_done || !tx -> chunk_done ) {
179
- return ;
180
- }
181
-
182
-
183
- while ((IS_ENABLED (CONFIG_SOC_FAMILY_NXP_IMXRT ) ||
184
- IS_ENABLED (CONFIG_SOC_FAMILY_KINETIS )) &&
185
- (base -> SR & LPSPI_SR_MBF_MASK )) {
186
- /* wait until module is idle */
187
- }
188
-
189
- if (spi_context_max_continuous_chunk (ctx ) == 0 ) {
190
- goto done ;
191
- }
192
-
193
- status = lpspi_dma_next_fill (spi_dev );
194
- if (status ) {
217
+ switch (dma_data -> state ) {
218
+ case LPSPI_TRANSFER_STATE_ONGOING :
219
+ spi_context_update_tx (ctx , 1 , tx -> dma_blk_cfg .block_size );
220
+ spi_context_update_rx (ctx , 1 , rx -> dma_blk_cfg .block_size );
221
+ /* Calculate next DMA transfer size */
222
+ dma_data -> synchronize_dma_size = spi_context_max_continuous_chunk (ctx );
223
+ LOG_DBG ("tx len:%d rx len:%d next dma size:%d" , ctx -> tx_len , ctx -> rx_len ,
224
+ dma_data -> synchronize_dma_size );
225
+ if (dma_data -> synchronize_dma_size > 0 ) {
226
+ ret = (channel == dma_data -> dma_tx .channel )
227
+ ? lpspi_dma_tx_load (spi_dev , ctx -> tx_buf ,
228
+ dma_data -> synchronize_dma_size )
229
+ : lpspi_dma_rx_load (spi_dev , ctx -> rx_buf ,
230
+ dma_data -> synchronize_dma_size );
231
+
232
+ if (ret != 0 ) {
233
+ goto error ;
234
+ }
235
+
236
+ ret = dma_start (dev , channel );
237
+ if (ret != 0 ) {
238
+ goto error ;
239
+ }
240
+ dma_data -> state = LPSPI_TRANSFER_STATE_NEXT_DMA_SIZE_UPDATED ;
241
+ } else {
242
+ ret = dma_stop (dev , channel );
243
+ if (ret != 0 ) {
244
+ goto error ;
245
+ }
246
+ /* This is the end of the transfer. */
247
+ if (channel == dma_data -> dma_tx .channel ) {
248
+ spi_mcux_issue_TCR (spi_dev );
249
+ dma_data -> state = LPSPI_TRANSFER_STATE_TX_DONE ;
250
+ base -> DER &= ~LPSPI_DER_TDDE_MASK ;
251
+ } else {
252
+ dma_data -> state = LPSPI_TRANSFER_STATE_RX_DONE ;
253
+ base -> DER &= ~LPSPI_DER_RDDE_MASK ;
254
+ }
255
+ }
256
+ break ;
257
+ case LPSPI_TRANSFER_STATE_NEXT_DMA_SIZE_UPDATED :
258
+ ret = (channel == dma_data -> dma_tx .channel )
259
+ ? lpspi_dma_tx_load (spi_dev , ctx -> tx_buf ,
260
+ dma_data -> synchronize_dma_size )
261
+ : lpspi_dma_rx_load (spi_dev , ctx -> rx_buf ,
262
+ dma_data -> synchronize_dma_size );
263
+ dma_data -> synchronize_dma_size = 0 ;
264
+
265
+ if (ret != 0 ) {
266
+ goto error ;
267
+ }
268
+
269
+ ret = dma_start (dev , channel );
270
+ if (ret != 0 ) {
271
+ goto error ;
272
+ }
273
+ dma_data -> state = LPSPI_TRANSFER_STATE_ONGOING ;
274
+ break ;
275
+
276
+ case LPSPI_TRANSFER_STATE_TX_DONE :
277
+ case LPSPI_TRANSFER_STATE_RX_DONE :
278
+ dma_data -> state = LPSPI_TRANSFER_STATE_RX_TX_DONE ;
279
+ /* TX and RX both done here. */
280
+ spi_context_complete (ctx , spi_dev , 0 );
281
+ spi_context_cs_control (ctx , false);
282
+ break ;
283
+
284
+ default :
285
+ LOG_ERR ("unknown spi stransfer state:%d" , dma_data -> state );
286
+ ret = - EIO ;
195
287
goto error ;
196
288
}
197
289
290
+ LOG_DBG ("DMA %cX Block Complete" , debug_char );
198
291
return ;
199
292
error :
200
- LOG_ERR ("DMA callback error with channel %d err %d." , channel , status );
201
- done :
202
- base -> DER &= ~(LPSPI_DER_TDDE_MASK | LPSPI_DER_RDDE_MASK );
203
- base -> TCR &= ~LPSPI_TCR_CONT_MASK ;
204
- lpspi_wait_tx_fifo_empty (spi_dev );
293
+ LOG_ERR ("DMA callback error with channel %d." , channel );
294
+ spi_context_complete (ctx , spi_dev , ret );
205
295
spi_context_cs_control (ctx , false);
206
- base -> CR |= LPSPI_CR_RTF_MASK | LPSPI_CR_RRF_MASK ;
207
- spi_context_complete (ctx , spi_dev , status );
208
- spi_context_release (ctx , status );
209
296
}
210
297
211
298
static int transceive_dma (const struct device * dev , const struct spi_config * spi_cfg ,
@@ -214,7 +301,9 @@ static int transceive_dma(const struct device *dev, const struct spi_config *spi
214
301
{
215
302
LPSPI_Type * base = (LPSPI_Type * )DEVICE_MMIO_NAMED_GET (dev , reg_base );
216
303
struct lpspi_data * data = dev -> data ;
304
+ struct spi_nxp_dma_data * dma_data = (struct spi_nxp_dma_data * )data -> driver_data ;
217
305
struct spi_context * ctx = & data -> ctx ;
306
+ uint8_t major_ver = (base -> VERID & LPSPI_VERID_MAJOR_MASK ) >> LPSPI_VERID_MAJOR_SHIFT ;
218
307
int ret ;
219
308
220
309
spi_context_lock (ctx , asynchronous , cb , userdata , spi_cfg );
@@ -224,30 +313,41 @@ static int transceive_dma(const struct device *dev, const struct spi_config *spi
224
313
goto out ;
225
314
}
226
315
227
- spi_context_buffers_setup (ctx , tx_bufs , rx_bufs , 1 );
228
-
229
- ret = lpspi_dma_next_fill (dev );
230
- if (ret == - ENODATA ) {
231
- /* No transfer to do? So just exit */
232
- ret = 0 ;
233
- goto out ;
234
- } else if (ret ) {
235
- goto out ;
316
+ /* Check CS hold on feature for DMA mode, it is not supported on some platform. */
317
+ if ((spi_cfg -> operation & SPI_HOLD_ON_CS ) && major_ver < 2 ) {
318
+ LOG_ERR ("SPI CS hold on feature is not supported on this platform." );
319
+ return - ENOTSUP ;
236
320
}
237
321
238
- if (!(IS_ENABLED (CONFIG_SOC_FAMILY_NXP_IMXRT ) || IS_ENABLED (CONFIG_SOC_FAMILY_KINETIS ))) {
239
- base -> TCR |= LPSPI_TCR_CONT_MASK ;
240
- }
322
+ /* Always use continuous mode to satisfy SPI API requirements. */
323
+ base -> TCR |= LPSPI_TCR_CONT_MASK | LPSPI_TCR_CONTC_MASK ;
241
324
242
- spi_context_cs_control (ctx , true);
325
+ /* Please set both watermarks as 0 because there are some synchronize requirements
326
+ * between RX and TX on RT platform. TX and RX DMA callback must be called in interleaved
327
+ * mode, a none-zero TX watermark may break this.
328
+ */
329
+ base -> FCR = LPSPI_FCR_TXWATER (0 ) | LPSPI_FCR_RXWATER (0 );
330
+ spi_context_buffers_setup (& data -> ctx , tx_bufs , rx_bufs , 1 );
243
331
244
- base -> CR |= LPSPI_CR_RTF_MASK | LPSPI_CR_RRF_MASK ;
332
+ /* Set next dma size is invalid. */
333
+ dma_data -> synchronize_dma_size = 0 ;
334
+ dma_data -> state = LPSPI_TRANSFER_STATE_NULL ;
245
335
336
+ /* Load dma block */
337
+ ret = lpspi_dma_rxtx_load (dev );
338
+ if (ret <= 0 ) {
339
+ goto out ;
340
+ }
341
+
342
+ dma_data -> state = LPSPI_TRANSFER_STATE_ONGOING ;
343
+ /* Set CS line just before DMA transfer. */
344
+ spi_context_cs_control (ctx , true);
345
+ /* Enable DMA Requests */
246
346
base -> DER |= LPSPI_DER_TDDE_MASK | LPSPI_DER_RDDE_MASK ;
247
347
248
348
ret = spi_context_wait_for_completion (ctx );
249
- if (ret >= 0 ) {
250
- return ret ;
349
+ if (ret ) {
350
+ spi_context_cs_control ( ctx , false) ;
251
351
}
252
352
out :
253
353
spi_context_release (ctx , ret );
0 commit comments