16
16
#ifdef CONFIG_SOC_NRF54H20_GPD
17
17
#include <nrf/gpd.h>
18
18
#endif
19
+ #include <dmm.h>
19
20
#ifdef CONFIG_SOC_NRF52832_ALLOW_SPIM_DESPITE_PAN_58
20
21
#include <nrfx_ppi.h>
21
22
#endif
@@ -123,9 +124,6 @@ struct spi_nrfx_config {
123
124
#endif
124
125
uint32_t wake_pin ;
125
126
nrfx_gpiote_t wake_gpiote ;
126
- #ifdef CONFIG_DCACHE
127
- uint32_t mem_attr ;
128
- #endif
129
127
#ifdef USE_CLOCK_REQUESTS
130
128
const struct device * clk_dev ;
131
129
struct nrf_clock_spec clk_spec ;
@@ -134,6 +132,7 @@ struct spi_nrfx_config {
134
132
bool cross_domain ;
135
133
int8_t default_port ;
136
134
#endif
135
+ void * mem_reg ;
137
136
};
138
137
139
138
static void event_handler (const nrfx_spim_evt_t * p_event , void * p_context );
@@ -504,11 +503,6 @@ static void transfer_next_chunk(const struct device *dev)
504
503
}
505
504
506
505
memcpy (dev_data -> tx_buffer , tx_buf , chunk_len );
507
- #ifdef CONFIG_DCACHE
508
- if (dev_config -> mem_attr & DT_MEM_CACHEABLE ) {
509
- sys_cache_data_flush_range (dev_data -> tx_buffer , chunk_len );
510
- }
511
- #endif
512
506
tx_buf = dev_data -> tx_buffer ;
513
507
}
514
508
@@ -525,10 +519,20 @@ static void transfer_next_chunk(const struct device *dev)
525
519
526
520
dev_data -> chunk_len = chunk_len ;
527
521
528
- xfer .p_tx_buffer = tx_buf ;
529
- xfer .tx_length = spi_context_tx_buf_on (ctx ) ? chunk_len : 0 ;
530
- xfer .p_rx_buffer = rx_buf ;
531
- xfer .rx_length = spi_context_rx_buf_on (ctx ) ? chunk_len : 0 ;
522
+ xfer .tx_length = spi_context_tx_buf_on (ctx ) ? chunk_len : 0 ;
523
+ xfer .rx_length = spi_context_rx_buf_on (ctx ) ? chunk_len : 0 ;
524
+
525
+ error = dmm_buffer_out_prepare (dev_config -> mem_reg , tx_buf , xfer .tx_length ,
526
+ (void * * )& xfer .p_tx_buffer );
527
+ if (error != 0 ) {
528
+ goto out_alloc_failed ;
529
+ }
530
+
531
+ error = dmm_buffer_in_prepare (dev_config -> mem_reg , rx_buf , xfer .rx_length ,
532
+ (void * * )& xfer .p_rx_buffer );
533
+ if (error != 0 ) {
534
+ goto in_alloc_failed ;
535
+ }
532
536
533
537
#ifdef CONFIG_SOC_NRF52832_ALLOW_SPIM_DESPITE_PAN_58
534
538
if (xfer .rx_length == 1 && xfer .tx_length <= 1 ) {
@@ -551,18 +555,23 @@ static void transfer_next_chunk(const struct device *dev)
551
555
anomaly_58_workaround_clear (dev_data );
552
556
#endif
553
557
}
558
+
559
+ /* On nrfx_spim_xfer() error */
560
+ dmm_buffer_in_release (dev_config -> mem_reg , rx_buf , xfer .rx_length ,
561
+ (void * * )& xfer .p_rx_buffer );
562
+ in_alloc_failed :
563
+ dmm_buffer_out_release (dev_config -> mem_reg , (void * * )& xfer .p_tx_buffer );
554
564
}
555
565
566
+ out_alloc_failed :
556
567
finish_transaction (dev , error );
557
568
}
558
569
559
570
static void event_handler (const nrfx_spim_evt_t * p_event , void * p_context )
560
571
{
561
572
const struct device * dev = p_context ;
562
573
struct spi_nrfx_data * dev_data = dev -> data ;
563
- #ifdef CONFIG_DCACHE
564
574
const struct spi_nrfx_config * dev_config = dev -> config ;
565
- #endif
566
575
567
576
if (p_event -> type == NRFX_SPIM_EVENT_DONE ) {
568
577
/* Chunk length is set to 0 when a transaction is aborted
@@ -576,15 +585,21 @@ static void event_handler(const nrfx_spim_evt_t *p_event, void *p_context)
576
585
#ifdef CONFIG_SOC_NRF52832_ALLOW_SPIM_DESPITE_PAN_58
577
586
anomaly_58_workaround_clear (dev_data );
578
587
#endif
588
+
589
+ if (spi_context_tx_buf_on (& dev_data -> ctx )) {
590
+ dmm_buffer_out_release (dev_config -> mem_reg ,
591
+ (void * * )p_event -> xfer_desc .p_tx_buffer );
592
+ }
593
+
594
+ if (spi_context_rx_buf_on (& dev_data -> ctx )) {
595
+ dmm_buffer_in_release (dev_config -> mem_reg , dev_data -> ctx .rx_buf ,
596
+ dev_data -> chunk_len , p_event -> xfer_desc .p_rx_buffer );
597
+ }
598
+
579
599
#ifdef SPI_BUFFER_IN_RAM
580
600
if (spi_context_rx_buf_on (& dev_data -> ctx ) &&
581
601
p_event -> xfer_desc .p_rx_buffer != NULL &&
582
602
p_event -> xfer_desc .p_rx_buffer != dev_data -> ctx .rx_buf ) {
583
- #ifdef CONFIG_DCACHE
584
- if (dev_config -> mem_attr & DT_MEM_CACHEABLE ) {
585
- sys_cache_data_invd_range (dev_data -> rx_buffer , dev_data -> chunk_len );
586
- }
587
- #endif
588
603
(void )memcpy (dev_data -> ctx .rx_buf ,
589
604
dev_data -> rx_buffer ,
590
605
dev_data -> chunk_len );
@@ -878,8 +893,6 @@ static int spi_nrfx_deinit(const struct device *dev)
878
893
return 0 ;
879
894
}
880
895
881
- #define SPIM_MEM_REGION (idx ) DT_PHANDLE(SPIM(idx), memory_regions)
882
-
883
896
#define SPI_NRFX_SPIM_EXTENDED_CONFIG (idx ) \
884
897
IF_ENABLED(NRFX_SPIM_EXTENDED_ENABLED, \
885
898
(.dcx_pin = NRF_SPIM_PIN_NOT_CONNECTED, \
@@ -888,13 +901,6 @@ static int spi_nrfx_deinit(const struct device *dev)
888
901
()) \
889
902
))
890
903
891
- #define SPIM_GET_MEM_ATTR (idx ) \
892
- COND_CODE_1(SPIM_HAS_PROP(idx, memory_regions), \
893
- (COND_CODE_1(DT_NODE_HAS_PROP(SPIM_MEM_REGION(idx), zephyr_memory_attr), \
894
- (DT_PROP(SPIM_MEM_REGION(idx), zephyr_memory_attr)), \
895
- (0))), \
896
- (0))
897
-
898
904
/* Fast instances depend on the global HSFLL clock controller (as they need
899
905
* to request the highest frequency from it to operate correctly), so they
900
906
* must be initialized after that controller driver, hence the default SPI
@@ -921,10 +927,10 @@ static int spi_nrfx_deinit(const struct device *dev)
921
927
IF_ENABLED(SPI_BUFFER_IN_RAM, \
922
928
(static uint8_t spim_##idx##_tx_buffer \
923
929
[CONFIG_SPI_NRFX_RAM_BUFFER_SIZE] \
924
- SPIM_MEMORY_SECTION( idx); \
930
+ DMM_MEMORY_SECTION(SPIM( idx) ); \
925
931
static uint8_t spim_##idx##_rx_buffer \
926
932
[CONFIG_SPI_NRFX_RAM_BUFFER_SIZE] \
927
- SPIM_MEMORY_SECTION( idx);)) \
933
+ DMM_MEMORY_SECTION(SPIM( idx)) ;)) \
928
934
static struct spi_nrfx_data spi_##idx##_data = { \
929
935
IF_ENABLED(CONFIG_MULTITHREADING, \
930
936
(SPI_CONTEXT_INIT_LOCK(spi_##idx##_data, ctx),)) \
@@ -961,8 +967,6 @@ static int spi_nrfx_deinit(const struct device *dev)
961
967
.wake_pin = NRF_DT_GPIOS_TO_PSEL_OR(SPIM(idx), wake_gpios, \
962
968
WAKE_PIN_NOT_USED), \
963
969
.wake_gpiote = WAKE_GPIOTE_INSTANCE(SPIM(idx)), \
964
- IF_ENABLED(CONFIG_DCACHE, \
965
- (.mem_attr = SPIM_GET_MEM_ATTR(idx),)) \
966
970
IF_ENABLED(USE_CLOCK_REQUESTS, \
967
971
(.clk_dev = SPIM_REQUESTS_CLOCK(SPIM(idx)) \
968
972
? DEVICE_DT_GET(DT_CLOCKS_CTLR(SPIM(idx))) \
@@ -975,6 +979,7 @@ static int spi_nrfx_deinit(const struct device *dev)
975
979
.default_port = \
976
980
DT_PROP_OR(DT_PHANDLE(SPIM(idx), \
977
981
default_gpio_port), port, -1),)) \
982
+ .mem_reg = DMM_DEV_TO_REG(SPIM(idx)), \
978
983
}; \
979
984
BUILD_ASSERT(!SPIM_HAS_PROP(idx, wake_gpios) || \
980
985
!(DT_GPIO_FLAGS(SPIM(idx), wake_gpios) & GPIO_ACTIVE_LOW),\
@@ -989,12 +994,6 @@ static int spi_nrfx_deinit(const struct device *dev)
989
994
POST_KERNEL, SPIM_INIT_PRIORITY(idx), \
990
995
&spi_nrfx_driver_api)
991
996
992
- #define SPIM_MEMORY_SECTION (idx ) \
993
- COND_CODE_1(SPIM_HAS_PROP(idx, memory_regions), \
994
- (__attribute__((__section__(LINKER_DT_NODE_REGION_NAME( \
995
- SPIM_MEM_REGION(idx)))))), \
996
- ())
997
-
998
997
#define COND_NRF_SPIM_DEVICE (unused , prefix , i , _ ) \
999
998
IF_ENABLED(CONFIG_HAS_HW_NRF_SPIM##prefix##i, (SPI_NRFX_SPIM_DEFINE(prefix##i);))
1000
999
0 commit comments