Skip to content

Commit 1c23cb9

Browse files
Raymond0225nordicjm
authored andcommitted
[nrf fromtree] drivers: spi: nxp: fix PCS broken issue and PCS_HOLD_ON support
Different LPSPI IPs are used for RT11xx and MCXN. On a older version of LPSPI, a transmit command or a TX data need to be issued to end a frame. On a new version, no such requirement. Based on above information, we have to make DMA transfers "cascade" in the DMA ISR to keep CS asserted during the whole SPI transfer. PCS_HOLD_ON is a feature to keep CS asserted during multi SPI transfers. It is implemented and supported on new LPSPI IP but it is impossible to be supported on an older version like RT11xx. Signed-off-by: Raymond Lei <[email protected]> (cherry picked from commit 0d29352)
1 parent 499ef10 commit 1c23cb9

File tree

3 files changed

+185
-83
lines changed

3 files changed

+185
-83
lines changed

drivers/spi/spi_nxp_lpspi/spi_nxp_lpspi_dma.c

Lines changed: 182 additions & 82 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright 2018, 2024 NXP
2+
* Copyright 2018, 2024-2025 NXP
33
*
44
* SPDX-License-Identifier: Apache-2.0
55
*/
@@ -12,6 +12,19 @@ LOG_MODULE_DECLARE(spi_lpspi, CONFIG_SPI_LOG_LEVEL);
1212
#include <zephyr/drivers/dma.h>
1313
#include "spi_nxp_lpspi_priv.h"
1414

15+
/* These states indicate what's the status of RX and TX, also synchronization
16+
* status of DMA size of the next DMA transfer.
17+
*/
18+
typedef enum {
19+
LPSPI_TRANSFER_STATE_NULL,
20+
LPSPI_TRANSFER_STATE_ONGOING,
21+
LPSPI_TRANSFER_STATE_NEXT_DMA_SIZE_UPDATED,
22+
LPSPI_TRANSFER_STATE_TX_DONE,
23+
LPSPI_TRANSFER_STATE_RX_DONE,
24+
LPSPI_TRANSFER_STATE_RX_TX_DONE,
25+
LPSPI_TRANSFER_STATE_INVALID = 0xFFFFFFFFUL,
26+
} lpspi_transfer_state_t;
27+
1528
/* dummy memory used for transferring NOP when tx buf is null */
1629
static uint32_t tx_nop_val; /* check compliance says no init to 0, but should be 0 in bss */
1730
/* dummy memory for transferring to when RX buf is null */
@@ -22,17 +35,58 @@ struct spi_dma_stream {
2235
uint32_t channel;
2336
struct dma_config dma_cfg;
2437
struct dma_block_config dma_blk_cfg;
25-
bool chunk_done;
2638
};
2739

2840
struct spi_nxp_dma_data {
2941
struct spi_dma_stream dma_rx;
3042
struct spi_dma_stream dma_tx;
43+
44+
lpspi_transfer_state_t state;
45+
/* This DMA size is used in callback function for RX and TX context update.
46+
* because of old LPSPI IP limitation, RX complete depend on next TX DMA transfer start,
47+
* so TX and RX not always start at the same time while we can only calculate DMA transfer
48+
* size once and update the buffer pointers at the same time.
49+
*/
50+
size_t synchronize_dma_size;
3151
};
3252

53+
/*
54+
* Issue a TCR (Transmit Command Register) command to properly end RX DMA transfers
55+
* on certain LPSPI versions. The behavior depends on:
56+
*
57+
* 1. LPSPI Hardware Version:
58+
* - Version 1 (RT1170, RT10xx, Kinetis K series): TCR issue always required
59+
* - Version 2 (RT1180, MCXN, RT700, K32W, S32K3xx, MCXL10): TCR issue not needed
60+
*
61+
* 2. SPI_HOLD_ON_CS Configuration:
62+
* - If enabled: Keeps chip select (PCS) asserted between transfers
63+
* - If disabled: Deasserts PCS after each transfer
64+
*
65+
* This function checks the LPSPI version and SPI_HOLD_ON_CS setting to determine
66+
* if a TCR command is needed. For version 1, TCR is always issued. For version 2,
67+
* TCR is only issued if SPI_HOLD_ON_CS is not set. Therefore, SPI_HOLD_ON_CS is not
68+
* supported for version 1.
69+
*
70+
* The LPSPI version can be read from the VERID register, which is typically the
71+
* first register in the memory map.
72+
*/
73+
static void spi_mcux_issue_TCR(const struct device *dev)
74+
{
75+
LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base);
76+
const struct spi_config *spi_cfg = DEV_DATA(dev)->ctx.config;
77+
uint8_t major_ver = (base->VERID & LPSPI_VERID_MAJOR_MASK) >> LPSPI_VERID_MAJOR_SHIFT;
78+
79+
/* On old LPSPI versions, we always have to issue TCR, or transaction will never end.
80+
* On a newer LPSPI version, only issue TCR when hold on CS feature is disabled.
81+
*/
82+
if (major_ver < 2 || !(spi_cfg->operation & SPI_HOLD_ON_CS)) {
83+
base->TCR &= ~LPSPI_TCR_CONTC_MASK;
84+
}
85+
}
86+
3387
static struct dma_block_config *lpspi_dma_common_load(struct spi_dma_stream *stream,
34-
const struct device *dev,
35-
const uint8_t *buf, size_t len)
88+
const struct device *dev, const uint8_t *buf,
89+
size_t len)
3690
{
3791
struct dma_block_config *blk_cfg = &stream->dma_blk_cfg;
3892

@@ -91,30 +145,35 @@ static int lpspi_dma_rx_load(const struct device *dev, uint8_t *buf, size_t len)
91145
return dma_config(stream->dma_dev, stream->channel, &stream->dma_cfg);
92146
}
93147

94-
static inline int lpspi_dma_rxtx_load(const struct device *dev)
148+
/* Return values:
149+
* positive value if a data chunk is loaded successfully and return the data chunk size loaded;
150+
* negative value if error happens and return the error code;
151+
* 0 if no data is loaded;
152+
*/
153+
static int lpspi_dma_rxtx_load(const struct device *dev)
95154
{
96155
struct lpspi_data *data = dev->data;
97156
struct spi_nxp_dma_data *dma_data = (struct spi_nxp_dma_data *)data->driver_data;
98157
struct spi_dma_stream *rx = &dma_data->dma_rx;
99158
struct spi_dma_stream *tx = &dma_data->dma_tx;
100159
struct spi_context *ctx = &data->ctx;
101-
size_t next_chunk_size = spi_context_max_continuous_chunk(ctx);
160+
size_t dma_size = spi_context_max_continuous_chunk(ctx);
102161
int ret = 0;
103162

104-
if (next_chunk_size == 0) {
163+
if (dma_size == 0) {
105164
/* In case both buffers are 0 length, we should not even be here
106165
* and attempting to set up a DMA transfer like this will cause
107166
* errors that lock up the system in some cases with eDMA.
108167
*/
109-
return -ENODATA;
168+
return 0;
110169
}
111170

112-
ret = lpspi_dma_tx_load(dev, ctx->tx_buf, next_chunk_size);
171+
ret = lpspi_dma_tx_load(dev, ctx->tx_buf, dma_size);
113172
if (ret != 0) {
114173
return ret;
115174
}
116175

117-
ret = lpspi_dma_rx_load(dev, ctx->rx_buf, next_chunk_size);
176+
ret = lpspi_dma_rx_load(dev, ctx->rx_buf, dma_size);
118177
if (ret != 0) {
119178
return ret;
120179
}
@@ -124,88 +183,116 @@ static inline int lpspi_dma_rxtx_load(const struct device *dev)
124183
return ret;
125184
}
126185

127-
return dma_start(tx->dma_dev, tx->channel);
128-
}
129-
130-
static int lpspi_dma_next_fill(const struct device *dev)
131-
{
132-
struct lpspi_data *data = (struct lpspi_data *)dev->data;
133-
struct spi_nxp_dma_data *dma_data = (struct spi_nxp_dma_data *)data->driver_data;
134-
struct spi_dma_stream *rx = &dma_data->dma_rx;
135-
struct spi_dma_stream *tx = &dma_data->dma_tx;
136-
137-
rx->chunk_done = false;
138-
tx->chunk_done = false;
186+
ret = dma_start(tx->dma_dev, tx->channel);
187+
if (ret != 0) {
188+
return ret;
189+
}
139190

140-
return lpspi_dma_rxtx_load(dev);
191+
return dma_size;
141192
}
142193

143194
static void spi_mcux_dma_callback(const struct device *dev, void *arg, uint32_t channel, int status)
144195
{
196+
/* arg directly holds the spi device */
145197
const struct device *spi_dev = arg;
146198
LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(spi_dev, reg_base);
147199
struct lpspi_data *data = (struct lpspi_data *)spi_dev->data;
148200
struct spi_nxp_dma_data *dma_data = (struct spi_nxp_dma_data *)data->driver_data;
149201
struct spi_dma_stream *rx = &dma_data->dma_rx;
150202
struct spi_dma_stream *tx = &dma_data->dma_tx;
151203
struct spi_context *ctx = &data->ctx;
152-
char debug_char;
204+
char debug_char = (channel == dma_data->dma_tx.channel) ? 'T' : 'R';
205+
int ret = 0;
153206

154207
if (status < 0) {
208+
ret = status;
155209
goto error;
156-
} else {
157-
/* don't care about positive values, normalize to "okay" = 0 */
158-
status = 0;
159210
}
160211

161-
if (channel == rx->channel) {
162-
spi_context_update_rx(ctx, 1, rx->dma_blk_cfg.block_size);
163-
debug_char = 'R';
164-
rx->chunk_done = true;
165-
} else if (channel == tx->channel) {
166-
spi_context_update_tx(ctx, 1, tx->dma_blk_cfg.block_size);
167-
debug_char = 'T';
168-
tx->chunk_done = true;
169-
} else {
170-
/* invalid channel */
171-
status = -EIO;
212+
if (channel != dma_data->dma_tx.channel && channel != dma_data->dma_rx.channel) {
213+
ret = -EIO;
172214
goto error;
173215
}
174216

175-
LOG_DBG("DMA %cX Block Complete", debug_char);
176-
177-
/* wait for the other channel to finish if needed */
178-
if (!rx->chunk_done || !tx->chunk_done) {
179-
return;
180-
}
181-
182-
183-
while ((IS_ENABLED(CONFIG_SOC_FAMILY_NXP_IMXRT) ||
184-
IS_ENABLED(CONFIG_SOC_FAMILY_KINETIS)) &&
185-
(base->SR & LPSPI_SR_MBF_MASK)) {
186-
/* wait until module is idle */
187-
}
188-
189-
if (spi_context_max_continuous_chunk(ctx) == 0) {
190-
goto done;
191-
}
192-
193-
status = lpspi_dma_next_fill(spi_dev);
194-
if (status) {
217+
switch (dma_data->state) {
218+
case LPSPI_TRANSFER_STATE_ONGOING:
219+
spi_context_update_tx(ctx, 1, tx->dma_blk_cfg.block_size);
220+
spi_context_update_rx(ctx, 1, rx->dma_blk_cfg.block_size);
221+
/* Calculate next DMA transfer size */
222+
dma_data->synchronize_dma_size = spi_context_max_continuous_chunk(ctx);
223+
LOG_DBG("tx len:%d rx len:%d next dma size:%d", ctx->tx_len, ctx->rx_len,
224+
dma_data->synchronize_dma_size);
225+
if (dma_data->synchronize_dma_size > 0) {
226+
ret = (channel == dma_data->dma_tx.channel)
227+
? lpspi_dma_tx_load(spi_dev, ctx->tx_buf,
228+
dma_data->synchronize_dma_size)
229+
: lpspi_dma_rx_load(spi_dev, ctx->rx_buf,
230+
dma_data->synchronize_dma_size);
231+
232+
if (ret != 0) {
233+
goto error;
234+
}
235+
236+
ret = dma_start(dev, channel);
237+
if (ret != 0) {
238+
goto error;
239+
}
240+
dma_data->state = LPSPI_TRANSFER_STATE_NEXT_DMA_SIZE_UPDATED;
241+
} else {
242+
ret = dma_stop(dev, channel);
243+
if (ret != 0) {
244+
goto error;
245+
}
246+
/* This is the end of the transfer. */
247+
if (channel == dma_data->dma_tx.channel) {
248+
spi_mcux_issue_TCR(spi_dev);
249+
dma_data->state = LPSPI_TRANSFER_STATE_TX_DONE;
250+
base->DER &= ~LPSPI_DER_TDDE_MASK;
251+
} else {
252+
dma_data->state = LPSPI_TRANSFER_STATE_RX_DONE;
253+
base->DER &= ~LPSPI_DER_RDDE_MASK;
254+
}
255+
}
256+
break;
257+
case LPSPI_TRANSFER_STATE_NEXT_DMA_SIZE_UPDATED:
258+
ret = (channel == dma_data->dma_tx.channel)
259+
? lpspi_dma_tx_load(spi_dev, ctx->tx_buf,
260+
dma_data->synchronize_dma_size)
261+
: lpspi_dma_rx_load(spi_dev, ctx->rx_buf,
262+
dma_data->synchronize_dma_size);
263+
dma_data->synchronize_dma_size = 0;
264+
265+
if (ret != 0) {
266+
goto error;
267+
}
268+
269+
ret = dma_start(dev, channel);
270+
if (ret != 0) {
271+
goto error;
272+
}
273+
dma_data->state = LPSPI_TRANSFER_STATE_ONGOING;
274+
break;
275+
276+
case LPSPI_TRANSFER_STATE_TX_DONE:
277+
case LPSPI_TRANSFER_STATE_RX_DONE:
278+
dma_data->state = LPSPI_TRANSFER_STATE_RX_TX_DONE;
279+
/* TX and RX both done here. */
280+
spi_context_complete(ctx, spi_dev, 0);
281+
spi_context_cs_control(ctx, false);
282+
break;
283+
284+
default:
285+
LOG_ERR("unknown spi stransfer state:%d", dma_data->state);
286+
ret = -EIO;
195287
goto error;
196288
}
197289

290+
LOG_DBG("DMA %cX Block Complete", debug_char);
198291
return;
199292
error:
200-
LOG_ERR("DMA callback error with channel %d err %d.", channel, status);
201-
done:
202-
base->DER &= ~(LPSPI_DER_TDDE_MASK | LPSPI_DER_RDDE_MASK);
203-
base->TCR &= ~LPSPI_TCR_CONT_MASK;
204-
lpspi_wait_tx_fifo_empty(spi_dev);
293+
LOG_ERR("DMA callback error with channel %d.", channel);
294+
spi_context_complete(ctx, spi_dev, ret);
205295
spi_context_cs_control(ctx, false);
206-
base->CR |= LPSPI_CR_RTF_MASK | LPSPI_CR_RRF_MASK;
207-
spi_context_complete(ctx, spi_dev, status);
208-
spi_context_release(ctx, status);
209296
}
210297

211298
static int transceive_dma(const struct device *dev, const struct spi_config *spi_cfg,
@@ -214,7 +301,9 @@ static int transceive_dma(const struct device *dev, const struct spi_config *spi
214301
{
215302
LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base);
216303
struct lpspi_data *data = dev->data;
304+
struct spi_nxp_dma_data *dma_data = (struct spi_nxp_dma_data *)data->driver_data;
217305
struct spi_context *ctx = &data->ctx;
306+
uint8_t major_ver = (base->VERID & LPSPI_VERID_MAJOR_MASK) >> LPSPI_VERID_MAJOR_SHIFT;
218307
int ret;
219308

220309
spi_context_lock(ctx, asynchronous, cb, userdata, spi_cfg);
@@ -224,30 +313,41 @@ static int transceive_dma(const struct device *dev, const struct spi_config *spi
224313
goto out;
225314
}
226315

227-
spi_context_buffers_setup(ctx, tx_bufs, rx_bufs, 1);
228-
229-
ret = lpspi_dma_next_fill(dev);
230-
if (ret == -ENODATA) {
231-
/* No transfer to do? So just exit */
232-
ret = 0;
233-
goto out;
234-
} else if (ret) {
235-
goto out;
316+
/* Check CS hold on feature for DMA mode, it is not supported on some platform. */
317+
if ((spi_cfg->operation & SPI_HOLD_ON_CS) && major_ver < 2) {
318+
LOG_ERR("SPI CS hold on feature is not supported on this platform.");
319+
return -ENOTSUP;
236320
}
237321

238-
if (!(IS_ENABLED(CONFIG_SOC_FAMILY_NXP_IMXRT) || IS_ENABLED(CONFIG_SOC_FAMILY_KINETIS))) {
239-
base->TCR |= LPSPI_TCR_CONT_MASK;
240-
}
322+
/* Always use continuous mode to satisfy SPI API requirements. */
323+
base->TCR |= LPSPI_TCR_CONT_MASK | LPSPI_TCR_CONTC_MASK;
241324

242-
spi_context_cs_control(ctx, true);
325+
/* Please set both watermarks as 0 because there are some synchronize requirements
326+
* between RX and TX on RT platform. TX and RX DMA callback must be called in interleaved
327+
* mode, a none-zero TX watermark may break this.
328+
*/
329+
base->FCR = LPSPI_FCR_TXWATER(0) | LPSPI_FCR_RXWATER(0);
330+
spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1);
243331

244-
base->CR |= LPSPI_CR_RTF_MASK | LPSPI_CR_RRF_MASK;
332+
/* Set next dma size is invalid. */
333+
dma_data->synchronize_dma_size = 0;
334+
dma_data->state = LPSPI_TRANSFER_STATE_NULL;
245335

336+
/* Load dma block */
337+
ret = lpspi_dma_rxtx_load(dev);
338+
if (ret <= 0) {
339+
goto out;
340+
}
341+
342+
dma_data->state = LPSPI_TRANSFER_STATE_ONGOING;
343+
/* Set CS line just before DMA transfer. */
344+
spi_context_cs_control(ctx, true);
345+
/* Enable DMA Requests */
246346
base->DER |= LPSPI_DER_TDDE_MASK | LPSPI_DER_RDDE_MASK;
247347

248348
ret = spi_context_wait_for_completion(ctx);
249-
if (ret >= 0) {
250-
return ret;
349+
if (ret) {
350+
spi_context_cs_control(ctx, false);
251351
}
252352
out:
253353
spi_context_release(ctx, ret);

tests/drivers/spi/spi_loopback/boards/mimxrt1060_evk_mimxrt1062_qspi_C.overlay

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
* SPDX-License-Identifier: Apache-2.0
55
*/
66

7+
/* Short J17-pin4 and J17-pin5, populate R356, R350,R346, R362 to enable Pins for SPI1. */
78
&lpspi1 {
89
slow@0 {
910
compatible = "test-spi-loopback-slow";

tests/drivers/spi/spi_loopback/boards/mimxrt1170_evk_mimxrt1176_cm7_A.overlay

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,10 @@
11
/*
2-
* Copyright 2023 NXP
2+
* Copyright 2023, 2025 NXP
33
*
44
* SPDX-License-Identifier: Apache-2.0
55
*/
66

7+
/* Short J10-pin8 and J10-pin10. */
78
&lpspi1 {
89
dmas = <&edma0 0 36>, <&edma0 1 37>;
910
dma-names = "rx", "tx";

0 commit comments

Comments
 (0)