@@ -97,7 +97,223 @@ static inline int vendor_specific_xip_disable(const struct device *dev,
9797}
9898#endif /* defined(CONFIG_MSPI_XIP) */
9999
100- #else
100+ #elif DT_HAS_COMPAT_STATUS_OKAY (nordic_nrf_mspi )
101+ #define MSPI_DT_DRV_COMPAT nordic_nrf_mspi
102+ #include <nrf.h>
103+
104+ static inline void vendor_specific_init (const struct device * dev )
105+ {
106+ const struct mspi_dw_config * config = dev -> config ;
107+ NRF_QSPI_Type * preg = (NRF_QSPI_Type * )config -> wrapper_regs ;
108+
109+ preg -> EVENTS_CORE = 0 ;
110+ preg -> EVENTS_DMA .DONE = 0 ;
111+
112+ preg -> INTENSET = BIT (QSPI_INTENSET_CORE_Pos )
113+ | BIT (QSPI_INTENSET_DMADONE_Pos );
114+ }
115+
116+ static inline void vendor_specific_suspend (const struct device * dev )
117+ {
118+ const struct mspi_dw_config * config = dev -> config ;
119+ NRF_QSPI_Type * preg = (NRF_QSPI_Type * )config -> wrapper_regs ;
120+
121+ preg -> ENABLE = 0 ;
122+ }
123+
124+ static inline void vendor_specific_resume (const struct device * dev )
125+ {
126+ const struct mspi_dw_config * config = dev -> config ;
127+ NRF_QSPI_Type * preg = (NRF_QSPI_Type * )config -> wrapper_regs ;
128+
129+ preg -> ENABLE = 1 ;
130+
131+ }
132+
133+ static inline void vendor_specific_irq_clear (const struct device * dev )
134+ {
135+ const struct mspi_dw_config * config = dev -> config ;
136+ NRF_QSPI_Type * preg = (NRF_QSPI_Type * )config -> wrapper_regs ;
137+
138+ preg -> EVENTS_CORE = 0 ;
139+ preg -> EVENTS_DMA .DONE = 0 ;
140+ }
141+
142+ /* DMA support */
143+
144+ #define EVDMA_ATTR_LEN_Pos (0UL)
145+ #define EVDMA_ATTR_LEN_Msk (0x00FFFFFFUL)
146+
147+ #define EVDMA_ATTR_ATTR_Pos (24UL)
148+ #define EVDMA_ATTR_ATTR_Msk (0x3FUL << EVDMA_ATTR_ATTR_Pos)
149+
150+ #define EVDMA_ATTR_32AXI_Pos (30UL)
151+ #define EVDMA_ATTR_32AXI_Msk (0x1UL << EVDMA_ATTR_32AXI_Pos)
152+
153+ #define EVDMA_ATTR_EVENTS_Pos (31UL)
154+ #define EVDMA_ATTR_EVENTS_Msk (0x1UL << EVDMA_ATTR_EVENTS_Pos)
155+
156+ typedef enum {
157+ EVDMA_BYTE_SWAP = 0 ,
158+ EVDMA_JOBLIST = 1 ,
159+ EVDMA_BUFFER_FILL = 2 ,
160+ EVDMA_FIXED_ATTR = 3 ,
161+ EVDMA_STATIC_ADDR = 4 ,
162+ EVDMA_PLAIN_DATA_BUF_WR = 5 ,
163+ } EVDMA_ATTR_Type ;
164+
165+ /* Setup EVDMA attribute with the following configuratrion */
166+ #define EVDMA_ATTRIBUTE (BIT(EVDMA_BYTE_SWAP) | BIT(EVDMA_JOBLIST) | \
167+ BIT(EVDMA_BUFFER_FILL) | BIT(EVDMA_FIXED_ATTR) | \
168+ BIT(EVDMA_STATIC_ADDR) | BIT(EVDMA_PLAIN_DATA_BUF_WR))
169+
170+
171+ typedef struct {
172+ uint8_t * addr ;
173+ uint32_t attr ;
174+ } EVDMA_JOB_Type ;
175+
176+ #define EVDMA_JOB (BUFFER , SIZE , ATTR ) \
177+ (EVDMA_JOB_Type) { .addr = (uint8_t *)BUFFER, .attr = (ATTR << EVDMA_ATTR_ATTR_Pos | SIZE) }
178+ #define EVDMA_NULL_JOB () \
179+ (EVDMA_JOB_Type) { .addr = (uint8_t *)0, .attr = 0 }
180+ typedef struct {
181+ EVDMA_JOB_Type * tx_job ;
182+ EVDMA_JOB_Type * rx_job ;
183+ } QSPI_TRANSFER_LIST_Type ;
184+
185+ /* Number of jobs needed for transmit trasaction */
186+ #define MAX_NUM_JOBS 4
187+ /* Just support 1 trasaction for each peripheral as concurrent transactions aren't supported yet*/
188+ #define MAX_CONCURR_TRANSACTIONS 1
189+
190+ /* Static allocation macros for DMA transfer lists */
191+ #define MSPI_DW_DMA_XFER_LIST_DEFINE (inst ) \
192+ static QSPI_TRANSFER_LIST_Type mspi_dw_##inst##_transfer_list; \
193+ static EVDMA_JOB_Type mspi_dw_##inst##_joblist[MAX_NUM_JOBS * MAX_CONCURR_TRANSACTIONS]
194+
195+ #define MSPI_DW_DMA_XFER_LIST_GET (inst ) &mspi_dw_##inst##_transfer_list
196+ #define MSPI_DW_DMA_JOBLIST_GET (inst ) &mspi_dw_##inst##_joblist
197+
198+ static inline void vendor_specific_start_dma_xfer (const struct device * dev )
199+ {
200+ const struct mspi_dw_config * config = dev -> config ;
201+ NRF_QSPI_Type * preg = (NRF_QSPI_Type * )config -> wrapper_regs ;
202+
203+ preg -> TASKS_START = 1 ;
204+ }
205+
206+ /* Temporarily hard-coded as not in MDK yet */
207+ #define QSPI_TMOD_OFFSET (0x490UL)
208+ #define QSPI_TMOD_RX_ONLY (0x2)
209+ static inline int vendor_specific_setup_dma_xfer (const struct device * dev ,
210+ const struct mspi_xfer_packet * packet ,
211+ const struct mspi_xfer * xfer )
212+ {
213+ struct mspi_dw_data * dev_data = dev -> data ;
214+ const struct mspi_dw_config * config = dev -> config ;
215+ NRF_QSPI_Type * preg = (NRF_QSPI_Type * )config -> wrapper_regs ;
216+
217+ /* Use static allocation from config */
218+ QSPI_TRANSFER_LIST_Type * transfer_list = (QSPI_TRANSFER_LIST_Type * )
219+ config -> dma_transfer_list ;
220+
221+ if (!transfer_list ) {
222+ LOG_ERR ("DMA transfer list not available" );
223+ return - ENODEV ;
224+ }
225+
226+ /* Get joblist from static allocation in config */
227+ EVDMA_JOB_Type * joblist = (EVDMA_JOB_Type * )config -> dma_joblist ;
228+
229+ int tmod = 0 ;
230+ int job_idx = 0 ;
231+
232+ if (packet -> dir == MSPI_TX ) {
233+ preg -> CONFIG .RXTRANSFERLENGTH = 0 ;
234+
235+ /* Setting up EVDMA joblist depending on cmd, addr and data */
236+
237+ /*
238+ * Command address will always have a length of 4 from the DMA's perspective,
239+ * QSPI peripheral will use length of data specified in core registers
240+ */
241+ if (xfer -> cmd_length > 0 ) {
242+ joblist [job_idx ++ ] = EVDMA_JOB (& packet -> cmd , 4 , EVDMA_ATTRIBUTE );
243+ }
244+ if (xfer -> addr_length > 0 ) {
245+ joblist [job_idx ++ ] = EVDMA_JOB (& packet -> address , 4 , EVDMA_ATTRIBUTE );
246+ }
247+ if (packet -> num_bytes > 0 ) {
248+ joblist [job_idx ++ ] = EVDMA_JOB (packet -> data_buf , packet -> num_bytes ,
249+ EVDMA_ATTRIBUTE );
250+ }
251+ /* Always terminate with null job */
252+ joblist [job_idx ] = EVDMA_NULL_JOB ();
253+ /* tx_job should point to first valid job, or null if none */
254+ if (job_idx > 0 ) {
255+ transfer_list -> tx_job = & joblist [0 ];
256+ } else {
257+ transfer_list -> tx_job = & joblist [job_idx ];
258+ }
259+
260+ /* rx_job always EVDMA_NULL_JOB() for transmit */
261+ transfer_list -> rx_job = & joblist [job_idx ];
262+ tmod = 0 ;
263+ } else {
264+ preg -> CONFIG .RXTRANSFERLENGTH = ((packet -> num_bytes + xfer -> addr_length +
265+ xfer -> cmd_length ) >>
266+ dev_data -> bytes_per_frame_exp ) - 1 ;
267+ joblist [0 ] = EVDMA_JOB (packet -> data_buf , packet -> num_bytes , EVDMA_ATTRIBUTE );
268+ joblist [1 ] = EVDMA_NULL_JOB ();
269+ transfer_list -> tx_job = & joblist [1 ];
270+ transfer_list -> rx_job = & joblist [0 ];
271+
272+ tmod = QSPI_TMOD_RX_ONLY ;
273+ }
274+
275+ /*
276+ * In slave mode, a tmod register in the wrapper also needs to be set. Currently
277+ * the address not in MDK so temp fix.
278+ */
279+ uintptr_t tmod_addr = (uintptr_t )preg + QSPI_TMOD_OFFSET ;
280+
281+ sys_write32 (tmod , tmod_addr );
282+
283+ preg -> CONFIG .TXBURSTLENGTH = (config -> tx_fifo_depth_minus_1 + 1 )- config -> dma_tx_data_level ;
284+ preg -> CONFIG .RXBURSTLENGTH = config -> dma_rx_data_level + 1 ;
285+ preg -> DMA .CONFIG .LISTPTR = (uint32_t )transfer_list ;
286+ /* TODO: QSPI_INTEN_CORE or using INTENSET bricks the system. Needed for XIP? */
287+
288+ preg -> INTEN = BIT (QSPI_INTEN_DMADONE_Pos );
289+
290+ return 0 ;
291+ }
292+
293+ static inline bool vendor_specific_dma_accessible_check (const struct device * dev ,
294+ const uint8_t * data_buf )
295+ {
296+ const struct mspi_dw_config * config = dev -> config ;
297+ NRF_QSPI_Type * preg = (NRF_QSPI_Type * )config -> wrapper_regs ;
298+
299+ return nrf_dma_accessible_check (preg , data_buf );
300+ }
301+
302+ static inline bool vendor_specific_read_dma_irq (const struct device * dev )
303+ {
304+ const struct mspi_dw_config * config = dev -> config ;
305+ NRF_QSPI_Type * preg = (NRF_QSPI_Type * )config -> wrapper_regs ;
306+
307+ return (bool ) preg -> EVENTS_DMA .DONE ;
308+ }
309+
310+ #else /* Supply empty vendor specific macros for generic case */
311+
312+ /* Empty macros for generic case - no DMA support */
313+ #define MSPI_DW_DMA_XFER_LIST_DEFINE (inst )
314+ #define MSPI_DW_DMA_XFER_LIST_GET (inst ) NULL
315+ #define MSPI_DW_DMA_JOBLIST_GET (inst ) NULL
316+
101317static inline void vendor_specific_init (const struct device * dev )
102318{
103319 ARG_UNUSED (dev );
@@ -134,4 +350,33 @@ static inline int vendor_specific_xip_disable(const struct device *dev,
134350
135351 return 0 ;
136352}
137- #endif /* DT_HAS_COMPAT_STATUS_OKAY(nordic_nrf_exmif) */
353+ #if defined(CONFIG_MSPI_DMA )
354+ static inline void vendor_specific_start_dma_xfer (const struct device * dev )
355+ {
356+ ARG_UNUSED (dev );
357+ }
358+ static inline int vendor_specific_setup_dma_xfer (const struct device * dev ,
359+ const struct mspi_xfer_packet * packet ,
360+ const struct mspi_xfer * xfer )
361+ {
362+ ARG_UNUSED (dev );
363+ ARG_UNUSED (packet );
364+ ARG_UNUSED (xfer );
365+
366+ return 0 ;
367+ }
368+ static inline bool vendor_specific_dma_accessible_check (const struct device * dev ,
369+ uint8_t * data_buf ) {
370+ ARG_UNUSED (dev );
371+ ARG_UNUSED (data_buf );
372+
373+ return true;
374+ }
375+ static inline bool vendor_specific_read_dma_irq (const struct device * dev )
376+ {
377+ ARG_UNUSED (dev );
378+
379+ return true;
380+ }
381+ #endif /* defined(CONFIG_MSPI_DMA) */
382+ #endif /* Empty vendor specific macros */
0 commit comments