@@ -86,9 +86,7 @@ struct spi_mcux_data {
8686 struct spi_dma_stream dma_rx ;
8787 struct spi_dma_stream dma_tx ;
8888 /* dummy value used for transferring NOP when tx buf is null */
89- uint32_t dummy_tx_buffer ;
90- /* dummy value used to read RX data into when rx buf is null */
91- uint32_t dummy_rx_buffer ;
89+ uint32_t dummy_buffer ;
9290#endif
9391};
9492
@@ -287,231 +285,244 @@ static void spi_mcux_dma_callback(const struct device *dev, void *arg, uint32_t
287285 spi_context_complete (& data -> ctx , spi_dev , 0 );
288286}
289287
290- static int spi_mcux_dma_tx_load (const struct device * dev , const uint8_t * buf , size_t len )
288+ static struct dma_block_config * spi_mcux_dma_common_load (struct spi_dma_stream * stream ,
289+ const struct device * dev ,
290+ const uint8_t * buf , size_t len )
291291{
292292 struct spi_mcux_data * data = dev -> data ;
293- struct dma_block_config * blk_cfg ;
294- LPSPI_Type * base = (LPSPI_Type * )DEVICE_MMIO_NAMED_GET (dev , reg_base );
295-
296- /* remember active TX DMA channel (used in callback) */
297- struct spi_dma_stream * stream = & data -> dma_tx ;
298-
299- blk_cfg = & stream -> dma_blk_cfg ;
293+ struct dma_block_config * blk_cfg = & stream -> dma_blk_cfg ;
300294
301295 /* prepare the block for this TX DMA channel */
302296 memset (blk_cfg , 0 , sizeof (struct dma_block_config ));
303297
298+ blk_cfg -> block_size = len ;
299+
304300 if (buf == NULL ) {
305- /* Treat the transfer as a peripheral to peripheral one, so that DMA
306- * reads from this address each time
307- */
308- blk_cfg -> source_address = (uint32_t )& data -> dummy_tx_buffer ;
301+ blk_cfg -> source_address = (uint32_t )& data -> dummy_buffer ;
302+ blk_cfg -> dest_address = (uint32_t )& data -> dummy_buffer ;
303+ /* pretend it is peripheral xfer so DMA just xfer to dummy buf */
309304 stream -> dma_cfg .channel_direction = PERIPHERAL_TO_PERIPHERAL ;
310305 } else {
311- /* tx direction has memory as source and periph as dest. */
312306 blk_cfg -> source_address = (uint32_t )buf ;
307+ blk_cfg -> dest_address = (uint32_t )buf ;
308+ }
309+
310+ /* Transfer 1 byte each DMA loop */
311+ stream -> dma_cfg .source_burst_length = 1 ;
312+ stream -> dma_cfg .user_data = (void * )dev ;
313+ stream -> dma_cfg .head_block = blk_cfg ;
314+
315+ return blk_cfg ;
316+ }
317+
318+ static int spi_mcux_dma_tx_load (const struct device * dev , const uint8_t * buf , size_t len )
319+ {
320+ LPSPI_Type * base = (LPSPI_Type * )DEVICE_MMIO_NAMED_GET (dev , reg_base );
321+ struct spi_mcux_data * data = dev -> data ;
322+ /* remember active TX DMA channel (used in callback) */
323+ struct spi_dma_stream * stream = & data -> dma_tx ;
324+ struct dma_block_config * blk_cfg = spi_mcux_dma_common_load (stream , dev , buf , len );
325+
326+ if (buf != NULL ) {
327+ /* tx direction has memory as source and periph as dest. */
313328 stream -> dma_cfg .channel_direction = MEMORY_TO_PERIPHERAL ;
314329 }
315- /* Enable scatter/gather */
316- blk_cfg -> source_gather_en = 1 ;
330+
317331 /* Dest is LPSPI tx fifo */
318332 blk_cfg -> dest_address = LPSPI_GetTxRegisterAddress (base );
319- blk_cfg -> block_size = len ;
320- /* Transfer 1 byte each DMA loop */
321- stream -> dma_cfg .source_burst_length = 1 ;
322333
323- stream -> dma_cfg .head_block = & stream -> dma_blk_cfg ;
324334 /* give the client dev as arg, as the callback comes from the dma */
325- stream -> dma_cfg .user_data = (struct device * )dev ;
326335 /* pass our client origin to the dma: data->dma_tx.dma_channel */
327- return dma_config (data -> dma_tx . dma_dev , data -> dma_tx . channel , & stream -> dma_cfg );
336+ return dma_config (stream -> dma_dev , stream -> channel , & stream -> dma_cfg );
328337}
329338
330339static int spi_mcux_dma_rx_load (const struct device * dev , uint8_t * buf , size_t len )
331340{
332- struct spi_mcux_data * data = dev -> data ;
333- struct dma_block_config * blk_cfg ;
334341 LPSPI_Type * base = (LPSPI_Type * )DEVICE_MMIO_NAMED_GET (dev , reg_base );
335-
342+ struct spi_mcux_data * data = dev -> data ;
336343 /* retrieve active RX DMA channel (used in callback) */
337344 struct spi_dma_stream * stream = & data -> dma_rx ;
345+ struct dma_block_config * blk_cfg = spi_mcux_dma_common_load (stream , dev , buf , len );
338346
339- blk_cfg = & stream -> dma_blk_cfg ;
340-
341- /* prepare the block for this RX DMA channel */
342- memset (blk_cfg , 0 , sizeof (struct dma_block_config ));
343-
344- if (buf == NULL ) {
345- /* Treat the transfer as a peripheral to peripheral one, so that DMA
346- * reads from this address each time
347- */
348- blk_cfg -> dest_address = (uint32_t )& data -> dummy_rx_buffer ;
349- stream -> dma_cfg .channel_direction = PERIPHERAL_TO_PERIPHERAL ;
350- } else {
347+ if (buf != NULL ) {
351348 /* rx direction has periph as source and mem as dest. */
352- blk_cfg -> dest_address = (uint32_t )buf ;
353349 stream -> dma_cfg .channel_direction = PERIPHERAL_TO_MEMORY ;
354350 }
355- blk_cfg -> block_size = len ;
356- /* Enable scatter/gather */
357- blk_cfg -> dest_scatter_en = 1 ;
351+
358352 /* Source is LPSPI rx fifo */
359353 blk_cfg -> source_address = LPSPI_GetRxRegisterAddress (base );
360- stream -> dma_cfg .source_burst_length = 1 ;
361-
362- stream -> dma_cfg .head_block = blk_cfg ;
363- stream -> dma_cfg .user_data = (struct device * )dev ;
364354
365355 /* pass our client origin to the dma: data->dma_rx.channel */
366- return dma_config (data -> dma_rx . dma_dev , data -> dma_rx . channel , & stream -> dma_cfg );
356+ return dma_config (stream -> dma_dev , stream -> channel , & stream -> dma_cfg );
367357}
368358
369359static int wait_dma_rx_tx_done (const struct device * dev )
370360{
371361 struct spi_mcux_data * data = dev -> data ;
372- int ret = -1 ;
362+ int ret ;
373363
374- while ( 1 ) {
364+ do {
375365 ret = spi_context_wait_for_completion (& data -> ctx );
376366 if (ret ) {
377367 LOG_DBG ("Timed out waiting for SPI context to complete" );
378368 return ret ;
379- }
380- if (data -> status_flags & LPSPI_DMA_ERROR_FLAG ) {
369+ } else if (data -> status_flags & LPSPI_DMA_ERROR_FLAG ) {
381370 return - EIO ;
382371 }
372+ } while (!((data -> status_flags & LPSPI_DMA_DONE_FLAG ) == LPSPI_DMA_DONE_FLAG ));
383373
384- if ((data -> status_flags & LPSPI_DMA_DONE_FLAG ) == LPSPI_DMA_DONE_FLAG ) {
385- LOG_DBG ("DMA block completed" );
386- return 0 ;
387- }
388- }
374+ LOG_DBG ("DMA block completed" );
375+ return 0 ;
389376}
390377
391378static inline int spi_mcux_dma_rxtx_load (const struct device * dev , size_t * dma_size )
392379{
393- struct spi_mcux_data * lpspi_data = dev -> data ;
380+ struct spi_mcux_data * data = dev -> data ;
381+ struct spi_context * ctx = & data -> ctx ;
394382 int ret = 0 ;
395383
396384 /* Clear status flags */
397- lpspi_data -> status_flags = 0U ;
385+ data -> status_flags = 0U ;
386+
398387 /* Load dma blocks of equal length */
399- * dma_size = MIN (lpspi_data -> ctx .tx_len , lpspi_data -> ctx .rx_len );
400- if (* dma_size == 0 ) {
401- * dma_size = MAX (lpspi_data -> ctx .tx_len , lpspi_data -> ctx .rx_len );
402- }
388+ * dma_size = spi_context_max_continuous_chunk (ctx );
403389
404- ret = spi_mcux_dma_tx_load (dev , lpspi_data -> ctx . tx_buf , * dma_size );
390+ ret = spi_mcux_dma_tx_load (dev , ctx -> tx_buf , * dma_size );
405391 if (ret != 0 ) {
406392 return ret ;
407393 }
408394
409- ret = spi_mcux_dma_rx_load (dev , lpspi_data -> ctx . rx_buf , * dma_size );
395+ ret = spi_mcux_dma_rx_load (dev , ctx -> rx_buf , * dma_size );
410396 if (ret != 0 ) {
411397 return ret ;
412398 }
413399
414400 /* Start DMA */
415- ret = dma_start (lpspi_data -> dma_tx .dma_dev , lpspi_data -> dma_tx .channel );
401+ ret = dma_start (data -> dma_tx .dma_dev , data -> dma_tx .channel );
416402 if (ret != 0 ) {
417403 return ret ;
418404 }
419405
420- ret = dma_start (lpspi_data -> dma_rx .dma_dev , lpspi_data -> dma_rx .channel );
406+ ret = dma_start (data -> dma_rx .dma_dev , data -> dma_rx .channel );
421407 return ret ;
422408}
423409
424- static int transceive_dma (const struct device * dev , const struct spi_config * spi_cfg ,
425- const struct spi_buf_set * tx_bufs , const struct spi_buf_set * rx_bufs ,
426- bool asynchronous , spi_callback_t cb , void * userdata )
410+ #ifdef CONFIG_SPI_ASYNC
411+ static int transceive_dma_async (const struct device * dev , spi_callback_t cb , void * userdata )
427412{
428413 struct spi_mcux_data * data = dev -> data ;
429- LPSPI_Type * base = (LPSPI_Type * )DEVICE_MMIO_NAMED_GET (dev , reg_base );
430- int ret ;
414+ struct spi_context * ctx = & data -> ctx ;
431415 size_t dma_size ;
416+ int ret ;
432417
433- if (! asynchronous ) {
434- spi_context_lock ( & data -> ctx , asynchronous , cb , userdata , spi_cfg ) ;
435- }
418+ ctx -> asynchronous = true;
419+ ctx -> callback = cb ;
420+ ctx -> callback_data = userdata ;
436421
437- ret = spi_mcux_configure (dev , spi_cfg );
422+ ret = spi_mcux_dma_rxtx_load (dev , & dma_size );
438423 if (ret ) {
439- if (!asynchronous ) {
440- spi_context_release (& data -> ctx , ret );
441- }
442424 return ret ;
443425 }
444426
445- #ifdef CONFIG_SOC_SERIES_MCXN
446- base -> TCR |= LPSPI_TCR_CONT_MASK ;
447- #endif
427+ /* Enable DMA Requests */
428+ LPSPI_EnableDMA (base , kLPSPI_TxDmaEnable | kLPSPI_RxDmaEnable );
448429
449- /* DMA is fast enough watermarks are not required */
450- LPSPI_SetFifoWatermarks (base , 0U , 0U );
430+ return 0 ;
431+ }
432+ #else
433+ #define transceive_dma_async (...) 0
434+ #endif /* CONFIG_SPI_ASYNC */
451435
452- spi_context_buffers_setup (& data -> ctx , tx_bufs , rx_bufs , 1 );
436+ static int transceive_dma_sync (const struct device * dev )
437+ {
438+ LPSPI_Type * base = (LPSPI_Type * )DEVICE_MMIO_NAMED_GET (dev , reg_base );
439+ struct spi_mcux_data * data = dev -> data ;
440+ struct spi_context * ctx = & data -> ctx ;
441+ size_t dma_size ;
442+ int ret ;
453443
454- if (!asynchronous ) {
455- spi_context_cs_control (& data -> ctx , true);
444+ spi_context_cs_control (ctx , true);
456445
457- /* Send each spi buf via DMA, updating context as DMA completes */
458- while (data -> ctx . rx_len > 0 || data -> ctx . tx_len > 0 ) {
459- /* Load dma block */
460- ret = spi_mcux_dma_rxtx_load (dev , & dma_size );
461- if (ret != 0 ) {
462- goto out ;
463- }
446+ /* Send each spi buf via DMA, updating context as DMA completes */
447+ while (ctx -> rx_len > 0 || ctx -> tx_len > 0 ) {
448+ /* Load dma block */
449+ ret = spi_mcux_dma_rxtx_load (dev , & dma_size );
450+ if (ret ) {
451+ return ret ;
452+ }
464453
465454#ifdef CONFIG_SOC_SERIES_MCXN
466- while (!(LPSPI_GetStatusFlags (base ) & kLPSPI_TxDataRequestFlag )) {
467- /* wait until previous tx finished */
468- }
455+ while (!(LPSPI_GetStatusFlags (base ) & kLPSPI_TxDataRequestFlag )) {
456+ /* wait until previous tx finished */
457+ }
469458#endif
470459
471- /* Enable DMA Requests */
472- LPSPI_EnableDMA (base , kLPSPI_TxDmaEnable | kLPSPI_RxDmaEnable );
460+ /* Enable DMA Requests */
461+ LPSPI_EnableDMA (base , kLPSPI_TxDmaEnable | kLPSPI_RxDmaEnable );
473462
474- /* Wait for DMA to finish */
475- ret = wait_dma_rx_tx_done (dev );
476- if (ret != 0 ) {
477- goto out ;
478- }
463+ /* Wait for DMA to finish */
464+ ret = wait_dma_rx_tx_done (dev );
465+ if (ret ) {
466+ return ret ;
467+ }
479468
480469#ifndef CONFIG_SOC_SERIES_MCXN
481- while ((LPSPI_GetStatusFlags (base ) & kLPSPI_ModuleBusyFlag )) {
482- /* wait until module is idle */
483- }
470+ while ((LPSPI_GetStatusFlags (base ) & kLPSPI_ModuleBusyFlag )) {
471+ /* wait until module is idle */
472+ }
484473#endif
485474
486- /* Disable DMA */
487- LPSPI_DisableDMA (base , kLPSPI_TxDmaEnable | kLPSPI_RxDmaEnable );
488-
489- /* Update SPI contexts with amount of data we just sent */
490- spi_context_update_tx (& data -> ctx , 1 , dma_size );
491- spi_context_update_rx (& data -> ctx , 1 , dma_size );
492- }
493- spi_context_cs_control (& data -> ctx , false);
494- base -> TCR = 0 ;
475+ /* Disable DMA */
476+ LPSPI_DisableDMA (base , kLPSPI_TxDmaEnable | kLPSPI_RxDmaEnable );
495477
496- out :
497- spi_context_release (& data -> ctx , ret );
478+ /* Update SPI contexts with amount of data we just sent */
479+ spi_context_update_tx (ctx , 1 , dma_size );
480+ spi_context_update_rx (ctx , 1 , dma_size );
498481 }
499- #if CONFIG_SPI_ASYNC
500- else {
501- data -> ctx .asynchronous = asynchronous ;
502- data -> ctx .callback = cb ;
503- data -> ctx .callback_data = userdata ;
504482
505- ret = spi_mcux_dma_rxtx_load (dev , & dma_size );
506- if (ret != 0 ) {
507- goto out ;
508- }
483+ spi_context_cs_control (ctx , false);
509484
510- /* Enable DMA Requests */
511- LPSPI_EnableDMA (base , kLPSPI_TxDmaEnable | kLPSPI_RxDmaEnable );
485+ base -> TCR = 0 ;
486+
487+ return 0 ;
488+ }
489+
490+ static int transceive_dma (const struct device * dev , const struct spi_config * spi_cfg ,
491+ const struct spi_buf_set * tx_bufs , const struct spi_buf_set * rx_bufs ,
492+ bool asynchronous , spi_callback_t cb , void * userdata )
493+ {
494+ struct spi_mcux_data * data = dev -> data ;
495+ LPSPI_Type * base = (LPSPI_Type * )DEVICE_MMIO_NAMED_GET (dev , reg_base );
496+ int ret ;
497+
498+ if (!asynchronous ) {
499+ spi_context_lock (& data -> ctx , asynchronous , cb , userdata , spi_cfg );
500+ }
501+
502+ ret = spi_mcux_configure (dev , spi_cfg );
503+ if (ret && !asynchronous ) {
504+ goto out ;
505+ } else if (ret ) {
506+ return ret ;
512507 }
508+
509+ #ifdef CONFIG_SOC_SERIES_MCXN
510+ base -> TCR |= LPSPI_TCR_CONT_MASK ;
513511#endif
514512
513+ /* DMA is fast enough watermarks are not required */
514+ LPSPI_SetFifoWatermarks (base , 0U , 0U );
515+
516+ spi_context_buffers_setup (& data -> ctx , tx_bufs , rx_bufs , 1 );
517+
518+ if (asynchronous ) {
519+ ret = transceive_dma_async (dev , cb , userdata );
520+ } else {
521+ ret = transceive_dma_sync (dev );
522+ }
523+
524+ out :
525+ spi_context_release (& data -> ctx , ret );
515526 return ret ;
516527}
517528#else
0 commit comments