@@ -83,8 +83,10 @@ struct xdma_chan {
8383 * @desc_num: Number of hardware descriptors
8484 * @completed_desc_num: Completed hardware descriptors
8585 * @cyclic: Cyclic transfer vs. scatter-gather
86+ * @interleaved_dma: Interleaved DMA transfer
8687 * @periods: Number of periods in the cyclic transfer
8788 * @period_size: Size of a period in bytes in cyclic transfers
89+ * @frames_left: Number of frames left in interleaved DMA transfer
8890 * @error: tx error flag
8991 */
9092struct xdma_desc {
@@ -96,8 +98,10 @@ struct xdma_desc {
9698 u32 desc_num ;
9799 u32 completed_desc_num ;
98100 bool cyclic ;
101+ bool interleaved_dma ;
99102 u32 periods ;
100103 u32 period_size ;
104+ u32 frames_left ;
101105 bool error ;
102106};
103107
@@ -607,6 +611,8 @@ xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
607611 if (!sw_desc )
608612 return NULL ;
609613 sw_desc -> dir = dir ;
614+ sw_desc -> cyclic = false;
615+ sw_desc -> interleaved_dma = false;
610616
611617 if (dir == DMA_MEM_TO_DEV ) {
612618 dev_addr = xdma_chan -> cfg .dst_addr ;
@@ -682,6 +688,7 @@ xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address,
682688 sw_desc -> periods = periods ;
683689 sw_desc -> period_size = period_size ;
684690 sw_desc -> dir = dir ;
691+ sw_desc -> interleaved_dma = false;
685692
686693 addr = address ;
687694 if (dir == DMA_MEM_TO_DEV ) {
@@ -712,6 +719,57 @@ xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address,
712719 return NULL ;
713720}
714721
722+ /**
723+ * xdma_prep_interleaved_dma - Prepare virtual descriptor for interleaved DMA transfers
724+ * @chan: DMA channel
725+ * @xt: DMA transfer template
726+ * @flags: tx flags
727+ */
728+ struct dma_async_tx_descriptor *
729+ xdma_prep_interleaved_dma (struct dma_chan * chan ,
730+ struct dma_interleaved_template * xt ,
731+ unsigned long flags )
732+ {
733+ int i ;
734+ u32 desc_num = 0 , period_size = 0 ;
735+ struct dma_async_tx_descriptor * tx_desc ;
736+ struct xdma_chan * xchan = to_xdma_chan (chan );
737+ struct xdma_desc * sw_desc ;
738+ u64 src_addr , dst_addr ;
739+
740+ for (i = 0 ; i < xt -> frame_size ; ++ i )
741+ desc_num += DIV_ROUND_UP (xt -> sgl [i ].size , XDMA_DESC_BLEN_MAX );
742+
743+ sw_desc = xdma_alloc_desc (xchan , desc_num , false);
744+ if (!sw_desc )
745+ return NULL ;
746+ sw_desc -> dir = xt -> dir ;
747+ sw_desc -> interleaved_dma = true;
748+ sw_desc -> cyclic = flags & DMA_PREP_REPEAT ;
749+ sw_desc -> frames_left = xt -> numf ;
750+ sw_desc -> periods = xt -> numf ;
751+
752+ desc_num = 0 ;
753+ src_addr = xt -> src_start ;
754+ dst_addr = xt -> dst_start ;
755+ for (i = 0 ; i < xt -> frame_size ; ++ i ) {
756+ desc_num += xdma_fill_descs (sw_desc , src_addr , dst_addr , xt -> sgl [i ].size , desc_num );
757+ src_addr += dmaengine_get_src_icg (xt , & xt -> sgl [i ]) + xt -> src_inc ?
758+ xt -> sgl [i ].size : 0 ;
759+ dst_addr += dmaengine_get_dst_icg (xt , & xt -> sgl [i ]) + xt -> dst_inc ?
760+ xt -> sgl [i ].size : 0 ;
761+ period_size += xt -> sgl [i ].size ;
762+ }
763+ sw_desc -> period_size = period_size ;
764+
765+ tx_desc = vchan_tx_prep (& xchan -> vchan , & sw_desc -> vdesc , flags );
766+ if (tx_desc )
767+ return tx_desc ;
768+
769+ xdma_free_desc (& sw_desc -> vdesc );
770+ return NULL ;
771+ }
772+
715773/**
716774 * xdma_device_config - Configure the DMA channel
717775 * @chan: DMA channel
@@ -811,11 +869,12 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
811869{
812870 struct xdma_chan * xchan = dev_id ;
813871 u32 complete_desc_num = 0 ;
814- struct xdma_device * xdev ;
815- struct virt_dma_desc * vd ;
872+ struct xdma_device * xdev = xchan -> xdev_hdl ;
873+ struct virt_dma_desc * vd , * next_vd ;
816874 struct xdma_desc * desc ;
817875 int ret ;
818876 u32 st ;
877+ bool repeat_tx ;
819878
820879 spin_lock (& xchan -> vchan .lock );
821880
@@ -824,9 +883,6 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
824883 if (!vd )
825884 goto out ;
826885
827- desc = to_xdma_desc (vd );
828- xdev = xchan -> xdev_hdl ;
829-
830886 /* Clear-on-read the status register */
831887 ret = regmap_read (xdev -> rmap , xchan -> base + XDMA_CHAN_STATUS_RC , & st );
832888 if (ret )
@@ -845,10 +901,36 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
845901 if (ret )
846902 goto out ;
847903
848- if (desc -> cyclic ) {
849- desc -> completed_desc_num = complete_desc_num ;
850- vchan_cyclic_callback (vd );
851- } else {
904+ desc = to_xdma_desc (vd );
905+ if (desc -> interleaved_dma ) {
906+ xchan -> busy = false;
907+ desc -> completed_desc_num += complete_desc_num ;
908+ if (complete_desc_num == XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT ) {
909+ xdma_xfer_start (xchan );
910+ goto out ;
911+ }
912+
913+ /* last desc of any frame */
914+ desc -> frames_left -- ;
915+ if (desc -> frames_left )
916+ goto out ;
917+
918+ /* last desc of the last frame */
919+ repeat_tx = vd -> tx .flags & DMA_PREP_REPEAT ;
920+ next_vd = list_first_entry_or_null (& vd -> node , struct virt_dma_desc , node );
921+ if (next_vd )
922+ repeat_tx = repeat_tx && !(next_vd -> tx .flags & DMA_PREP_LOAD_EOT );
923+ if (repeat_tx ) {
924+ desc -> frames_left = desc -> periods ;
925+ desc -> completed_desc_num = 0 ;
926+ vchan_cyclic_callback (vd );
927+ } else {
928+ list_del (& vd -> node );
929+ vchan_cookie_complete (vd );
930+ }
931+ /* start (or continue) the tx of a first desc on the vc.desc_issued list, if any */
932+ xdma_xfer_start (xchan );
933+ } else if (!desc -> cyclic ) {
852934 xchan -> busy = false;
853935 desc -> completed_desc_num += complete_desc_num ;
854936
@@ -865,6 +947,9 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
865947
866948 /* transfer the rest of data */
867949 xdma_xfer_start (xchan );
950+ } else {
951+ desc -> completed_desc_num = complete_desc_num ;
952+ vchan_cyclic_callback (vd );
868953 }
869954
870955out :
@@ -1163,6 +1248,9 @@ static int xdma_probe(struct platform_device *pdev)
11631248 dma_cap_set (DMA_SLAVE , xdev -> dma_dev .cap_mask );
11641249 dma_cap_set (DMA_PRIVATE , xdev -> dma_dev .cap_mask );
11651250 dma_cap_set (DMA_CYCLIC , xdev -> dma_dev .cap_mask );
1251+ dma_cap_set (DMA_INTERLEAVE , xdev -> dma_dev .cap_mask );
1252+ dma_cap_set (DMA_REPEAT , xdev -> dma_dev .cap_mask );
1253+ dma_cap_set (DMA_LOAD_EOT , xdev -> dma_dev .cap_mask );
11661254
11671255 xdev -> dma_dev .dev = & pdev -> dev ;
11681256 xdev -> dma_dev .residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT ;
@@ -1178,6 +1266,7 @@ static int xdma_probe(struct platform_device *pdev)
11781266 xdev -> dma_dev .filter .mapcnt = pdata -> device_map_cnt ;
11791267 xdev -> dma_dev .filter .fn = xdma_filter_fn ;
11801268 xdev -> dma_dev .device_prep_dma_cyclic = xdma_prep_dma_cyclic ;
1269+ xdev -> dma_dev .device_prep_interleaved_dma = xdma_prep_interleaved_dma ;
11811270
11821271 ret = dma_async_device_register (& xdev -> dma_dev );
11831272 if (ret ) {
0 commit comments