@@ -303,6 +303,16 @@ static void dma_silabs_irq_handler(const struct device *dev, uint32_t id)
303
303
atomic_clear (& chan -> busy );
304
304
}
305
305
306
+ /*
307
+ * In the case that the transfer is done but we have append a new
308
+ * descriptor, we need to manually load the next descriptor
309
+ */
310
+ if (LDMA_TransferDone (chnum ) &&
311
+ LDMA -> CH [chnum ].LINK & _LDMA_CH_LINK_LINK_MASK ) {
312
+ sys_clear_bit ((mem_addr_t )& LDMA -> CHDONE , chnum );
313
+ LDMA -> LINKLOAD = BIT (chnum );
314
+ }
315
+
306
316
if (chan -> cb ) {
307
317
chan -> cb (dev , chan -> user_data , chnum , status );
308
318
}
@@ -495,6 +505,64 @@ static DEVICE_API(dma, dma_funcs) = {
495
505
.get_status = dma_silabs_get_status
496
506
};
497
507
508
+ int silabs_ldma_append_block (const struct device * dev , uint32_t channel , struct dma_config * config )
509
+ {
510
+ const struct dma_silabs_data * data = dev -> data ;
511
+ struct dma_silabs_channel * chan_conf = & data -> dma_chan_table [channel ];
512
+ struct dma_block_config * block_config = config -> head_block ;
513
+ LDMA_Descriptor_t * desc = data -> dma_chan_table [channel ].desc ;
514
+ unsigned int key ;
515
+ int ret ;
516
+
517
+ __ASSERT (!((uintptr_t )desc & ~_LDMA_CH_LINK_LINKADDR_MASK ),
518
+ "DMA Descriptor is not 32 bits aligned" );
519
+
520
+ if (channel > data -> dma_ctx .dma_channels ) {
521
+ return - EINVAL ;
522
+ }
523
+
524
+ if (!atomic_test_bit (data -> dma_ctx .atomic , channel )) {
525
+ return - EINVAL ;
526
+ }
527
+
528
+ /* DMA Channel already have loaded a descriptor with a linkaddr
529
+ * so we can't append a new block just after the current transfer.
530
+ * You can't also append a descriptor list.
531
+ * This check is here to not use the function in a wrong way
532
+ */
533
+ if (desc -> xfer .linkAddr || config -> head_block -> next_block ) {
534
+ return - EINVAL ;
535
+ }
536
+
537
+ /* A link is already set by a previous call to the function */
538
+ if (sys_test_bit ((mem_addr_t )& LDMA -> CH [channel ].LINK , _LDMA_CH_LINK_LINK_SHIFT )) {
539
+ return - EINVAL ;
540
+ }
541
+
542
+ ret = dma_silabs_block_to_descriptor (config , chan_conf , block_config , desc );
543
+ if (ret ) {
544
+ return ret ;
545
+ }
546
+
547
+ key = irq_lock ();
548
+ if (!LDMA_TransferDone (channel )) {
549
+ /*
550
+ * It is voluntary to split this 2 lines in order to separate the write of the link
551
+ * addr and the write of the link bit. In this way, there is always a linkAddr when
552
+ * the link bit is set.
553
+ */
554
+ sys_write32 ((uintptr_t )desc , (mem_addr_t )& LDMA -> CH [channel ].LINK );
555
+ sys_set_bit ((mem_addr_t )& LDMA -> CH [channel ].LINK , _LDMA_CH_LINK_LINK_SHIFT );
556
+ irq_unlock (key );
557
+
558
+ } else {
559
+ irq_unlock (key );
560
+ LDMA_StartTransfer (channel , & chan_conf -> xfer_config , desc );
561
+ }
562
+
563
+ return 0 ;
564
+ }
565
+
498
566
#define SILABS_DMA_IRQ_CONNECT (n , inst ) \
499
567
IRQ_CONNECT(DT_INST_IRQ_BY_IDX(inst, n, irq), DT_INST_IRQ_BY_IDX(inst, n, priority), \
500
568
dma_silabs_irq_handler, DEVICE_DT_INST_GET(inst), 0); \
0 commit comments