26
26
#include " drivers/DigitalOut.h"
27
27
#include " platform/SingletonPtr.h"
28
28
#include " platform/NonCopyable.h"
29
+ #include " platform/CacheAlignedBuffer.h"
29
30
30
31
#if defined MBED_CONF_DRIVERS_SPI_COUNT_MAX && DEVICE_SPI_COUNT > MBED_CONF_DRIVERS_SPI_COUNT_MAX
31
32
#define SPI_PERIPHERALS_USED MBED_CONF_DRIVERS_SPI_COUNT_MAX
@@ -194,6 +195,11 @@ const use_gpio_ssel_t use_gpio_ssel;
194
195
* the transfer but others can execute). Here's a sample of how to send the same data as above
195
196
* using the blocking async API:</p>
196
197
*
198
+ * <p>Note that when using the asynchronous API, you must use the CacheAlignedBuffer class when declaring the
199
+ * receive buffer. This is because some processors' async SPI implementations require the received buffer to
200
+ * be at an address which is aligned to the processor cache line size. CacheAlignedBuffer takes care of this
201
+ * for you and provides functions (data(), begin(), end()) to access the underlying data in the buffer.</p>
202
+ *
197
203
* @code
198
204
* #include "mbed.h"
199
205
*
@@ -203,8 +209,8 @@ const use_gpio_ssel_t use_gpio_ssel;
203
209
* device.format(8, 0);
204
210
*
205
211
* uint8_t command[2] = {0x0A, 0x0B};
206
- * uint8_t response[2] ;
207
- * int result = device.transfer_and_wait(command, sizeof(command), response, sizeof(response ));
212
+ * CacheAlignedBuffer< uint8_t, 2> response ;
213
+ * int result = device.transfer_and_wait(command, sizeof(command), response, sizeof(command ));
208
214
* }
209
215
* @endcode
210
216
*
@@ -458,8 +464,9 @@ class SPI : private NonCopyable<SPI> {
458
464
* @param tx_buffer The TX buffer with data to be transferred. If NULL is passed,
459
465
* the default %SPI value is sent.
460
466
* @param tx_length The length of TX buffer in bytes.
461
- * @param rx_buffer The RX buffer which is used for received data. If NULL is passed,
462
- * received data are ignored.
467
+ * @param rx_buffer The RX buffer which is used for received data. Rather than a C array, a CacheAlignedBuffer
468
+ * structure must be passed so that cache alignment can be handled for data received from DMA.
469
+ * May be nullptr if rx_length is 0.
463
470
* @param rx_length The length of RX buffer in bytes.
464
471
* @param callback The event callback function.
465
472
* @param event The logical OR of events to subscribe to. May be #SPI_EVENT_ALL, or some combination
@@ -471,16 +478,18 @@ class SPI : private NonCopyable<SPI> {
471
478
*/
472
479
template <typename WordT>
473
480
typename std::enable_if<std::is_integral<WordT>::value, int >::type
474
- transfer (const WordT *tx_buffer, int tx_length, WordT * rx_buffer, int rx_length, const event_callback_t &callback, int event = SPI_EVENT_COMPLETE)
481
+ transfer (const WordT *tx_buffer, int tx_length, CacheAlignedBuffer< WordT> & rx_buffer, int rx_length, const event_callback_t &callback, int event = SPI_EVENT_COMPLETE)
475
482
{
476
- return transfer_internal (tx_buffer, tx_length, rx_buffer, rx_length, callback, event);
483
+ MBED_ASSERT (rx_length <= static_cast <int >(rx_buffer.capacity ()));
484
+ return transfer_internal (tx_buffer, tx_length, rx_buffer.data (), rx_length, callback, event);
477
485
}
478
486
479
487
// Overloads of the above to support passing nullptr
480
488
template <typename WordT>
481
489
typename std::enable_if<std::is_integral<WordT>::value, int >::type
482
- transfer (const std::nullptr_t *tx_buffer, int tx_length, WordT * rx_buffer, int rx_length, const event_callback_t &callback, int event = SPI_EVENT_COMPLETE)
490
+ transfer (const std::nullptr_t *tx_buffer, int tx_length, CacheAlignedBuffer< WordT> & rx_buffer, int rx_length, const event_callback_t &callback, int event = SPI_EVENT_COMPLETE)
483
491
{
492
+ MBED_ASSERT (rx_length <= static_cast <int >(rx_buffer.capacity ()));
484
493
return transfer_internal (tx_buffer, tx_length, rx_buffer, rx_length, callback, event);
485
494
}
486
495
template <typename WordT>
@@ -502,8 +511,10 @@ class SPI : private NonCopyable<SPI> {
502
511
* Internally, the chip vendor may implement this function using either DMA or interrupts.
503
512
*
504
513
* @param tx_buffer The TX buffer with data to be transferred. May be nullptr if tx_length is 0.
505
- * @param tx_length The length of TX buffer in bytes. If 0, no transmission is done.
506
- * @param rx_buffer The RX buffer, which is used for received data. May be nullptr if tx_length is 0.
514
+ * @param tx_length The length of TX buffer in bytes. If 0, the default %SPI data value is sent when receiving data.
515
+ * @param rx_buffer The RX buffer which is used for received data. Rather than a C array, a CacheAlignedBuffer
516
+ * structure must be passed so that cache alignment can be handled for data received from DMA.
517
+ * May be nullptr if rx_length is 0.
507
518
* @param rx_length The length of RX buffer in bytes If 0, no reception is done.
508
519
* @param timeout timeout value. Use #rtos::Kernel::wait_for_u32_forever to wait forever (the default).
509
520
*
@@ -515,17 +526,19 @@ class SPI : private NonCopyable<SPI> {
515
526
*/
516
527
template <typename WordT>
517
528
typename std::enable_if<std::is_integral<WordT>::value, int >::type
518
- transfer_and_wait (const WordT *tx_buffer, int tx_length, WordT * rx_buffer, int rx_length, rtos::Kernel::Clock::duration_u32 timeout = rtos::Kernel::wait_for_u32_forever)
529
+ transfer_and_wait (const WordT *tx_buffer, int tx_length, CacheAlignedBuffer< WordT> & rx_buffer, int rx_length, rtos::Kernel::Clock::duration_u32 timeout = rtos::Kernel::wait_for_u32_forever)
519
530
{
520
- return transfer_and_wait_internal (tx_buffer, tx_length, rx_buffer, rx_length, timeout);
531
+ MBED_ASSERT (rx_length <= static_cast <int >(rx_buffer.capacity ()));
532
+ return transfer_and_wait_internal (tx_buffer, tx_length, rx_buffer.data (), rx_length, timeout);
521
533
}
522
534
523
535
// Overloads of the above to support passing nullptr
524
536
template <typename WordT>
525
537
typename std::enable_if<std::is_integral<WordT>::value, int >::type
526
- transfer_and_wait (const std::nullptr_t *tx_buffer, int tx_length, WordT * rx_buffer, int rx_length, rtos::Kernel::Clock::duration_u32 timeout = rtos::Kernel::wait_for_u32_forever)
538
+ transfer_and_wait (const std::nullptr_t *tx_buffer, int tx_length, CacheAlignedBuffer< WordT> & rx_buffer, int rx_length, rtos::Kernel::Clock::duration_u32 timeout = rtos::Kernel::wait_for_u32_forever)
527
539
{
528
- return transfer_and_wait_internal (tx_buffer, tx_length, rx_buffer, rx_length, timeout);
540
+ MBED_ASSERT (rx_length <= static_cast <int >(rx_buffer.capacity ()));
541
+ return transfer_and_wait_internal (tx_buffer, tx_length, rx_buffer.data (), rx_length, timeout);
529
542
}
530
543
template <typename WordT>
531
544
typename std::enable_if<std::is_integral<WordT>::value, int >::type
@@ -574,7 +587,8 @@ class SPI : private NonCopyable<SPI> {
574
587
* the default SPI value is sent
575
588
* @param tx_length The length of TX buffer in bytes.
576
589
* @param rx_buffer The RX buffer which is used for received data. If NULL is passed,
577
- * received data are ignored.
590
+ * received data are ignored. This buffer is guaranteed to be cache aligned
591
+ * if the MCU has a cache.
578
592
* @param rx_length The length of RX buffer in bytes.
579
593
* @param callback The event callback function.
580
594
* @param event The event mask of events to modify.
@@ -599,6 +613,7 @@ class SPI : private NonCopyable<SPI> {
599
613
* @param tx_buffer The TX buffer with data to be transferred. May be nullptr if tx_length is 0.
600
614
* @param tx_length The length of TX buffer in bytes. If 0, no transmission is done.
601
615
* @param rx_buffer The RX buffer, which is used for received data. May be nullptr if tx_length is 0.
616
+ * This buffer is guaranteed to be cache aligned if the MCU has a cache.
602
617
* @param rx_length The length of RX buffer in bytes If 0, no reception is done.
603
618
* @param timeout timeout value. Use #rtos::Kernel::wait_for_u32_forever to wait forever (the default).
604
619
*
@@ -722,6 +737,18 @@ class SPI : private NonCopyable<SPI> {
722
737
* iff start_transfer() has been called and the chip has been selected but irq_handler_asynch()
723
738
* has NOT been called yet. */
724
739
volatile bool _transfer_in_progress = false ;
740
+
741
+ // If there is a transfer in progress, this indicates whether it used DMA and therefore requires a cache
742
+ // flush at the end
743
+ bool _transfer_in_progress_uses_dma;
744
+
745
+ #if __DCACHE_PRESENT
746
+ // These variables store the location and length in bytes of the Rx buffer if an async transfer
747
+ // is in progress. They are used for invalidating the cache after the transfer completes.
748
+ void *_transfer_in_progress_rx_buffer;
749
+ size_t _transfer_in_progress_rx_len;
750
+ #endif
751
+
725
752
/* Event flags used for transfer_and_wait() */
726
753
rtos::EventFlags _transfer_and_wait_flags;
727
754
#endif // DEVICE_SPI_ASYNCH
0 commit comments