@@ -83,8 +83,10 @@ struct xdma_chan {
83
83
* @desc_num: Number of hardware descriptors
84
84
* @completed_desc_num: Completed hardware descriptors
85
85
* @cyclic: Cyclic transfer vs. scatter-gather
86
+ * @interleaved_dma: Interleaved DMA transfer
86
87
* @periods: Number of periods in the cyclic transfer
87
88
* @period_size: Size of a period in bytes in cyclic transfers
89
+ * @frames_left: Number of frames left in interleaved DMA transfer
88
90
* @error: tx error flag
89
91
*/
90
92
struct xdma_desc {
@@ -96,8 +98,10 @@ struct xdma_desc {
96
98
u32 desc_num ;
97
99
u32 completed_desc_num ;
98
100
bool cyclic ;
101
+ bool interleaved_dma ;
99
102
u32 periods ;
100
103
u32 period_size ;
104
+ u32 frames_left ;
101
105
bool error ;
102
106
};
103
107
@@ -607,6 +611,8 @@ xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
607
611
if (!sw_desc )
608
612
return NULL ;
609
613
sw_desc -> dir = dir ;
614
+ sw_desc -> cyclic = false;
615
+ sw_desc -> interleaved_dma = false;
610
616
611
617
if (dir == DMA_MEM_TO_DEV ) {
612
618
dev_addr = xdma_chan -> cfg .dst_addr ;
@@ -682,6 +688,7 @@ xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address,
682
688
sw_desc -> periods = periods ;
683
689
sw_desc -> period_size = period_size ;
684
690
sw_desc -> dir = dir ;
691
+ sw_desc -> interleaved_dma = false;
685
692
686
693
addr = address ;
687
694
if (dir == DMA_MEM_TO_DEV ) {
@@ -712,6 +719,57 @@ xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address,
712
719
return NULL ;
713
720
}
714
721
722
+ /**
723
+ * xdma_prep_interleaved_dma - Prepare virtual descriptor for interleaved DMA transfers
724
+ * @chan: DMA channel
725
+ * @xt: DMA transfer template
726
+ * @flags: tx flags
727
+ */
728
+ struct dma_async_tx_descriptor *
729
+ xdma_prep_interleaved_dma (struct dma_chan * chan ,
730
+ struct dma_interleaved_template * xt ,
731
+ unsigned long flags )
732
+ {
733
+ int i ;
734
+ u32 desc_num = 0 , period_size = 0 ;
735
+ struct dma_async_tx_descriptor * tx_desc ;
736
+ struct xdma_chan * xchan = to_xdma_chan (chan );
737
+ struct xdma_desc * sw_desc ;
738
+ u64 src_addr , dst_addr ;
739
+
740
+ for (i = 0 ; i < xt -> frame_size ; ++ i )
741
+ desc_num += DIV_ROUND_UP (xt -> sgl [i ].size , XDMA_DESC_BLEN_MAX );
742
+
743
+ sw_desc = xdma_alloc_desc (xchan , desc_num , false);
744
+ if (!sw_desc )
745
+ return NULL ;
746
+ sw_desc -> dir = xt -> dir ;
747
+ sw_desc -> interleaved_dma = true;
748
+ sw_desc -> cyclic = flags & DMA_PREP_REPEAT ;
749
+ sw_desc -> frames_left = xt -> numf ;
750
+ sw_desc -> periods = xt -> numf ;
751
+
752
+ desc_num = 0 ;
753
+ src_addr = xt -> src_start ;
754
+ dst_addr = xt -> dst_start ;
755
+ for (i = 0 ; i < xt -> frame_size ; ++ i ) {
756
+ desc_num += xdma_fill_descs (sw_desc , src_addr , dst_addr , xt -> sgl [i ].size , desc_num );
757
+ src_addr += dmaengine_get_src_icg (xt , & xt -> sgl [i ]) + xt -> src_inc ?
758
+ xt -> sgl [i ].size : 0 ;
759
+ dst_addr += dmaengine_get_dst_icg (xt , & xt -> sgl [i ]) + xt -> dst_inc ?
760
+ xt -> sgl [i ].size : 0 ;
761
+ period_size += xt -> sgl [i ].size ;
762
+ }
763
+ sw_desc -> period_size = period_size ;
764
+
765
+ tx_desc = vchan_tx_prep (& xchan -> vchan , & sw_desc -> vdesc , flags );
766
+ if (tx_desc )
767
+ return tx_desc ;
768
+
769
+ xdma_free_desc (& sw_desc -> vdesc );
770
+ return NULL ;
771
+ }
772
+
715
773
/**
716
774
* xdma_device_config - Configure the DMA channel
717
775
* @chan: DMA channel
@@ -811,11 +869,12 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
811
869
{
812
870
struct xdma_chan * xchan = dev_id ;
813
871
u32 complete_desc_num = 0 ;
814
- struct xdma_device * xdev ;
815
- struct virt_dma_desc * vd ;
872
+ struct xdma_device * xdev = xchan -> xdev_hdl ;
873
+ struct virt_dma_desc * vd , * next_vd ;
816
874
struct xdma_desc * desc ;
817
875
int ret ;
818
876
u32 st ;
877
+ bool repeat_tx ;
819
878
820
879
spin_lock (& xchan -> vchan .lock );
821
880
@@ -824,9 +883,6 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
824
883
if (!vd )
825
884
goto out ;
826
885
827
- desc = to_xdma_desc (vd );
828
- xdev = xchan -> xdev_hdl ;
829
-
830
886
/* Clear-on-read the status register */
831
887
ret = regmap_read (xdev -> rmap , xchan -> base + XDMA_CHAN_STATUS_RC , & st );
832
888
if (ret )
@@ -845,10 +901,36 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
845
901
if (ret )
846
902
goto out ;
847
903
848
- if (desc -> cyclic ) {
849
- desc -> completed_desc_num = complete_desc_num ;
850
- vchan_cyclic_callback (vd );
851
- } else {
904
+ desc = to_xdma_desc (vd );
905
+ if (desc -> interleaved_dma ) {
906
+ xchan -> busy = false;
907
+ desc -> completed_desc_num += complete_desc_num ;
908
+ if (complete_desc_num == XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT ) {
909
+ xdma_xfer_start (xchan );
910
+ goto out ;
911
+ }
912
+
913
+ /* last desc of any frame */
914
+ desc -> frames_left -- ;
915
+ if (desc -> frames_left )
916
+ goto out ;
917
+
918
+ /* last desc of the last frame */
919
+ repeat_tx = vd -> tx .flags & DMA_PREP_REPEAT ;
920
+ next_vd = list_first_entry_or_null (& vd -> node , struct virt_dma_desc , node );
921
+ if (next_vd )
922
+ repeat_tx = repeat_tx && !(next_vd -> tx .flags & DMA_PREP_LOAD_EOT );
923
+ if (repeat_tx ) {
924
+ desc -> frames_left = desc -> periods ;
925
+ desc -> completed_desc_num = 0 ;
926
+ vchan_cyclic_callback (vd );
927
+ } else {
928
+ list_del (& vd -> node );
929
+ vchan_cookie_complete (vd );
930
+ }
931
+ /* start (or continue) the tx of a first desc on the vc.desc_issued list, if any */
932
+ xdma_xfer_start (xchan );
933
+ } else if (!desc -> cyclic ) {
852
934
xchan -> busy = false;
853
935
desc -> completed_desc_num += complete_desc_num ;
854
936
@@ -865,6 +947,9 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
865
947
866
948
/* transfer the rest of data */
867
949
xdma_xfer_start (xchan );
950
+ } else {
951
+ desc -> completed_desc_num = complete_desc_num ;
952
+ vchan_cyclic_callback (vd );
868
953
}
869
954
870
955
out :
@@ -1163,6 +1248,9 @@ static int xdma_probe(struct platform_device *pdev)
1163
1248
dma_cap_set (DMA_SLAVE , xdev -> dma_dev .cap_mask );
1164
1249
dma_cap_set (DMA_PRIVATE , xdev -> dma_dev .cap_mask );
1165
1250
dma_cap_set (DMA_CYCLIC , xdev -> dma_dev .cap_mask );
1251
+ dma_cap_set (DMA_INTERLEAVE , xdev -> dma_dev .cap_mask );
1252
+ dma_cap_set (DMA_REPEAT , xdev -> dma_dev .cap_mask );
1253
+ dma_cap_set (DMA_LOAD_EOT , xdev -> dma_dev .cap_mask );
1166
1254
1167
1255
xdev -> dma_dev .dev = & pdev -> dev ;
1168
1256
xdev -> dma_dev .residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT ;
@@ -1178,6 +1266,7 @@ static int xdma_probe(struct platform_device *pdev)
1178
1266
xdev -> dma_dev .filter .mapcnt = pdata -> device_map_cnt ;
1179
1267
xdev -> dma_dev .filter .fn = xdma_filter_fn ;
1180
1268
xdev -> dma_dev .device_prep_dma_cyclic = xdma_prep_dma_cyclic ;
1269
+ xdev -> dma_dev .device_prep_interleaved_dma = xdma_prep_interleaved_dma ;
1181
1270
1182
1271
ret = dma_async_device_register (& xdev -> dma_dev );
1183
1272
if (ret ) {
0 commit comments