@@ -542,6 +542,43 @@ static void xdma_synchronize(struct dma_chan *chan)
542
542
vchan_synchronize (& xdma_chan -> vchan );
543
543
}
544
544
545
+ /**
546
+ * xdma_fill_descs - Fill hardware descriptors with contiguous memory block addresses
547
+ * @sw_desc - tx descriptor state container
548
+ * @src_addr - Value for a ->src_addr field of a first descriptor
549
+ * @dst_addr - Value for a ->dst_addr field of a first descriptor
550
+ * @size - Total size of a contiguous memory block
551
+ * @filled_descs_num - Number of filled hardware descriptors for corresponding sw_desc
552
+ */
553
+ static inline u32 xdma_fill_descs (struct xdma_desc * sw_desc , u64 src_addr ,
554
+ u64 dst_addr , u32 size , u32 filled_descs_num )
555
+ {
556
+ u32 left = size , len , desc_num = filled_descs_num ;
557
+ struct xdma_desc_block * dblk ;
558
+ struct xdma_hw_desc * desc ;
559
+
560
+ dblk = sw_desc -> desc_blocks + (desc_num / XDMA_DESC_ADJACENT );
561
+ desc = dblk -> virt_addr ;
562
+ desc += desc_num & XDMA_DESC_ADJACENT_MASK ;
563
+ do {
564
+ len = min_t (u32 , left , XDMA_DESC_BLEN_MAX );
565
+ /* set hardware descriptor */
566
+ desc -> bytes = cpu_to_le32 (len );
567
+ desc -> src_addr = cpu_to_le64 (src_addr );
568
+ desc -> dst_addr = cpu_to_le64 (dst_addr );
569
+ if (!(++ desc_num & XDMA_DESC_ADJACENT_MASK ))
570
+ desc = (++ dblk )-> virt_addr ;
571
+ else
572
+ desc ++ ;
573
+
574
+ src_addr += len ;
575
+ dst_addr += len ;
576
+ left -= len ;
577
+ } while (left );
578
+
579
+ return desc_num - filled_descs_num ;
580
+ }
581
+
545
582
/**
546
583
* xdma_prep_device_sg - prepare a descriptor for a DMA transaction
547
584
* @chan: DMA channel pointer
@@ -558,13 +595,10 @@ xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
558
595
{
559
596
struct xdma_chan * xdma_chan = to_xdma_chan (chan );
560
597
struct dma_async_tx_descriptor * tx_desc ;
561
- u32 desc_num = 0 , i , len , rest ;
562
- struct xdma_desc_block * dblk ;
563
- struct xdma_hw_desc * desc ;
564
598
struct xdma_desc * sw_desc ;
565
- u64 dev_addr , * src , * dst ;
599
+ u32 desc_num = 0 , i ;
600
+ u64 addr , dev_addr , * src , * dst ;
566
601
struct scatterlist * sg ;
567
- u64 addr ;
568
602
569
603
for_each_sg (sgl , sg , sg_len , i )
570
604
desc_num += DIV_ROUND_UP (sg_dma_len (sg ), XDMA_DESC_BLEN_MAX );
@@ -584,32 +618,11 @@ xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
584
618
dst = & addr ;
585
619
}
586
620
587
- dblk = sw_desc -> desc_blocks ;
588
- desc = dblk -> virt_addr ;
589
- desc_num = 1 ;
621
+ desc_num = 0 ;
590
622
for_each_sg (sgl , sg , sg_len , i ) {
591
623
addr = sg_dma_address (sg );
592
- rest = sg_dma_len (sg );
593
-
594
- do {
595
- len = min_t (u32 , rest , XDMA_DESC_BLEN_MAX );
596
- /* set hardware descriptor */
597
- desc -> bytes = cpu_to_le32 (len );
598
- desc -> src_addr = cpu_to_le64 (* src );
599
- desc -> dst_addr = cpu_to_le64 (* dst );
600
-
601
- if (!(desc_num & XDMA_DESC_ADJACENT_MASK )) {
602
- dblk ++ ;
603
- desc = dblk -> virt_addr ;
604
- } else {
605
- desc ++ ;
606
- }
607
-
608
- desc_num ++ ;
609
- dev_addr += len ;
610
- addr += len ;
611
- rest -= len ;
612
- } while (rest );
624
+ desc_num += xdma_fill_descs (sw_desc , * src , * dst , sg_dma_len (sg ), desc_num );
625
+ dev_addr += sg_dma_len (sg );
613
626
}
614
627
615
628
tx_desc = vchan_tx_prep (& xdma_chan -> vchan , & sw_desc -> vdesc , flags );
@@ -643,9 +656,9 @@ xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address,
643
656
struct xdma_device * xdev = xdma_chan -> xdev_hdl ;
644
657
unsigned int periods = size / period_size ;
645
658
struct dma_async_tx_descriptor * tx_desc ;
646
- struct xdma_desc_block * dblk ;
647
- struct xdma_hw_desc * desc ;
648
659
struct xdma_desc * sw_desc ;
660
+ u64 addr , dev_addr , * src , * dst ;
661
+ u32 desc_num ;
649
662
unsigned int i ;
650
663
651
664
/*
@@ -670,21 +683,21 @@ xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address,
670
683
sw_desc -> period_size = period_size ;
671
684
sw_desc -> dir = dir ;
672
685
673
- dblk = sw_desc -> desc_blocks ;
674
- desc = dblk -> virt_addr ;
686
+ addr = address ;
687
+ if (dir == DMA_MEM_TO_DEV ) {
688
+ dev_addr = xdma_chan -> cfg .dst_addr ;
689
+ src = & addr ;
690
+ dst = & dev_addr ;
691
+ } else {
692
+ dev_addr = xdma_chan -> cfg .src_addr ;
693
+ src = & dev_addr ;
694
+ dst = & addr ;
695
+ }
675
696
676
- /* fill hardware descriptor */
697
+ desc_num = 0 ;
677
698
for (i = 0 ; i < periods ; i ++ ) {
678
- desc -> bytes = cpu_to_le32 (period_size );
679
- if (dir == DMA_MEM_TO_DEV ) {
680
- desc -> src_addr = cpu_to_le64 (address + i * period_size );
681
- desc -> dst_addr = cpu_to_le64 (xdma_chan -> cfg .dst_addr );
682
- } else {
683
- desc -> src_addr = cpu_to_le64 (xdma_chan -> cfg .src_addr );
684
- desc -> dst_addr = cpu_to_le64 (address + i * period_size );
685
- }
686
-
687
- desc ++ ;
699
+ desc_num += xdma_fill_descs (sw_desc , * src , * dst , period_size , desc_num );
700
+ addr += i * period_size ;
688
701
}
689
702
690
703
tx_desc = vchan_tx_prep (& xdma_chan -> vchan , & sw_desc -> vdesc , flags );
0 commit comments