11
11
#include <zephyr/sys/sys_io.h>
12
12
#include <zephyr/logging/log.h>
13
13
#include <zephyr/irq.h>
14
+ #include <zephyr/kernel.h>
14
15
LOG_MODULE_REGISTER (xlnx_quadspi , CONFIG_SPI_LOG_LEVEL );
15
16
16
17
#include "spi_context.h"
@@ -94,6 +95,7 @@ struct xlnx_quadspi_config {
94
95
95
96
struct xlnx_quadspi_data {
96
97
struct spi_context ctx ;
98
+ struct k_event dtr_empty ;
97
99
};
98
100
99
101
static inline uint32_t xlnx_quadspi_read32 (const struct device * dev ,
@@ -227,7 +229,7 @@ static int xlnx_quadspi_configure(const struct device *dev,
227
229
return 0 ;
228
230
}
229
231
230
- static void xlnx_quadspi_start_tx (const struct device * dev )
232
+ static bool xlnx_quadspi_start_tx (const struct device * dev )
231
233
{
232
234
const struct xlnx_quadspi_config * config = dev -> config ;
233
235
struct xlnx_quadspi_data * data = dev -> data ;
@@ -237,6 +239,7 @@ static void xlnx_quadspi_start_tx(const struct device *dev)
237
239
uint32_t spisr ;
238
240
uint32_t dtr = 0U ;
239
241
uint32_t fifo_avail_words = config -> fifo_size ? config -> fifo_size : 1 ;
242
+ bool complete = false;
240
243
241
244
if (!spi_context_tx_on (ctx ) && !spi_context_rx_on (ctx )) {
242
245
/* All done, de-assert slave select */
@@ -250,7 +253,8 @@ static void xlnx_quadspi_start_tx(const struct device *dev)
250
253
}
251
254
252
255
spi_context_complete (ctx , dev , 0 );
253
- return ;
256
+ complete = true;
257
+ return complete ;
254
258
}
255
259
256
260
if (!IS_ENABLED (CONFIG_SPI_SLAVE ) || !spi_context_is_slave (ctx )) {
@@ -318,13 +322,55 @@ static void xlnx_quadspi_start_tx(const struct device *dev)
318
322
SPICR_OFFSET );
319
323
320
324
spi_context_complete (ctx , dev , - ENOTSUP );
325
+ complete = true;
321
326
}
322
327
323
328
if (!IS_ENABLED (CONFIG_SPI_SLAVE ) || !spi_context_is_slave (ctx )) {
324
329
/* Uninhibit master transaction */
325
330
spicr &= ~(SPICR_MASTER_XFER_INH );
326
331
xlnx_quadspi_write32 (dev , spicr , SPICR_OFFSET );
327
332
}
333
+ return complete ;
334
+ }
335
+
336
+ static void xlnx_quadspi_read_fifo (const struct device * dev )
337
+ {
338
+ const struct xlnx_quadspi_config * config = dev -> config ;
339
+ struct xlnx_quadspi_data * data = dev -> data ;
340
+ struct spi_context * ctx = & data -> ctx ;
341
+ uint32_t spisr = xlnx_quadspi_read32 (dev , SPISR_OFFSET );
342
+ /* RX FIFO occupancy register only exists if FIFO is implemented */
343
+ uint32_t rx_fifo_words = config -> fifo_size ?
344
+ xlnx_quadspi_read32 (dev , SPI_RX_FIFO_OCR_OFFSET ) + 1 : 1 ;
345
+
346
+ /* Read RX data */
347
+ while (!(spisr & SPISR_RX_EMPTY )) {
348
+ uint32_t drr = xlnx_quadspi_read32 (dev , SPI_DRR_OFFSET );
349
+
350
+ if (spi_context_rx_buf_on (ctx )) {
351
+ switch (config -> num_xfer_bytes ) {
352
+ case 1 :
353
+ UNALIGNED_PUT (drr , (uint8_t * )ctx -> rx_buf );
354
+ break ;
355
+ case 2 :
356
+ UNALIGNED_PUT (drr , (uint16_t * )ctx -> rx_buf );
357
+ break ;
358
+ case 4 :
359
+ UNALIGNED_PUT (drr , (uint32_t * )ctx -> rx_buf );
360
+ break ;
361
+ default :
362
+ __ASSERT (0 , "unsupported num_xfer_bytes" );
363
+ }
364
+ }
365
+
366
+ spi_context_update_rx (ctx , config -> num_xfer_bytes , 1 );
367
+
368
+ if (-- rx_fifo_words == 0 ) {
369
+ spisr = xlnx_quadspi_read32 (dev , SPISR_OFFSET );
370
+ rx_fifo_words = config -> fifo_size ?
371
+ xlnx_quadspi_read32 (dev , SPI_RX_FIFO_OCR_OFFSET ) + 1 : 1 ;
372
+ }
373
+ }
328
374
}
329
375
330
376
static int xlnx_quadspi_transceive (const struct device * dev ,
@@ -352,7 +398,27 @@ static int xlnx_quadspi_transceive(const struct device *dev,
352
398
353
399
xlnx_quadspi_cs_control (dev , true);
354
400
355
- xlnx_quadspi_start_tx (dev );
401
+ while (true) {
402
+ k_event_clear (& data -> dtr_empty , 1 );
403
+ bool complete = xlnx_quadspi_start_tx (dev );
404
+
405
+ if (complete || async ) {
406
+ break ;
407
+ }
408
+
409
+ /**
410
+ * 20ms should be long enough for 256 byte FIFO at any
411
+ * reasonable clock speed.
412
+ */
413
+ if (!k_event_wait (& data -> dtr_empty , 1 , false,
414
+ K_MSEC (20 + CONFIG_SPI_COMPLETION_TIMEOUT_TOLERANCE ))) {
415
+ /* Timeout */
416
+ LOG_ERR ("DTR empty timeout" );
417
+ spi_context_complete (ctx , dev , - ETIMEDOUT );
418
+ break ;
419
+ }
420
+ xlnx_quadspi_read_fifo (dev );
421
+ }
356
422
357
423
ret = spi_context_wait_for_completion (ctx );
358
424
out :
@@ -405,56 +471,28 @@ static int xlnx_quadspi_release(const struct device *dev,
405
471
406
472
static void xlnx_quadspi_isr (const struct device * dev )
407
473
{
408
- const struct xlnx_quadspi_config * config = dev -> config ;
409
474
struct xlnx_quadspi_data * data = dev -> data ;
410
- struct spi_context * ctx = & data -> ctx ;
411
475
uint32_t ipisr ;
412
476
413
477
/* Acknowledge interrupt */
414
478
ipisr = xlnx_quadspi_read32 (dev , IPISR_OFFSET );
415
479
xlnx_quadspi_write32 (dev , ipisr , IPISR_OFFSET );
416
480
417
481
if (ipisr & IPIXR_DTR_EMPTY ) {
418
- uint32_t spisr = xlnx_quadspi_read32 (dev , SPISR_OFFSET );
419
- /* RX FIFO occupancy register only exists if FIFO is implemented */
420
- uint32_t rx_fifo_words = config -> fifo_size ?
421
- xlnx_quadspi_read32 (dev , SPI_RX_FIFO_OCR_OFFSET ) + 1 : 1 ;
422
-
423
- /* Read RX data */
424
- while (!(spisr & SPISR_RX_EMPTY )) {
425
- uint32_t drr = xlnx_quadspi_read32 (dev , SPI_DRR_OFFSET );
426
-
427
- if (spi_context_rx_buf_on (ctx )) {
428
- switch (config -> num_xfer_bytes ) {
429
- case 1 :
430
- UNALIGNED_PUT (drr ,
431
- (uint8_t * )ctx -> rx_buf );
432
- break ;
433
- case 2 :
434
- UNALIGNED_PUT (drr ,
435
- (uint16_t * )ctx -> rx_buf );
436
- break ;
437
- case 4 :
438
- UNALIGNED_PUT (drr ,
439
- (uint32_t * )ctx -> rx_buf );
440
- break ;
441
- default :
442
- __ASSERT (0 ,
443
- "unsupported num_xfer_bytes" );
444
- }
445
- }
446
-
447
- spi_context_update_rx (ctx , config -> num_xfer_bytes , 1 );
448
-
449
- if (-- rx_fifo_words == 0 ) {
450
- spisr = xlnx_quadspi_read32 (dev , SPISR_OFFSET );
451
- rx_fifo_words = config -> fifo_size ?
452
- xlnx_quadspi_read32 (dev , SPI_RX_FIFO_OCR_OFFSET ) + 1 : 1 ;
453
- }
482
+ /**
483
+ * For async mode, we need to read the RX FIFO and refill the TX FIFO
484
+ * if needed here.
485
+ * For sync mode, we do this in the caller's context to avoid doing too much
486
+ * work in the ISR, so just post the event.
487
+ */
488
+ #ifdef CONFIG_SPI_ASYNC
489
+ if (ctx -> asynchronous ) {
490
+ xlnx_quadspi_read_fifo (dev );
491
+ xlnx_quadspi_start_tx (dev );
492
+ return ;
454
493
}
455
-
456
- /* Start next TX */
457
- xlnx_quadspi_start_tx (dev );
494
+ #endif
495
+ k_event_post (& data -> dtr_empty , 1 );
458
496
} else {
459
497
LOG_WRN ("unhandled interrupt, ipisr = 0x%08x" , ipisr );
460
498
}
@@ -514,6 +552,8 @@ static int xlnx_quadspi_init(const struct device *dev)
514
552
const struct xlnx_quadspi_config * config = dev -> config ;
515
553
struct xlnx_quadspi_data * data = dev -> data ;
516
554
555
+ k_event_init (& data -> dtr_empty );
556
+
517
557
/* Reset controller */
518
558
xlnx_quadspi_write32 (dev , SRR_SOFTRESET_MAGIC , SRR_OFFSET );
519
559
0 commit comments