2525#define DCSR 0x0000
2626#define DALGN 0x00a0
2727#define DINT 0x00f0
28- #define DDADR 0x0200
28+ #define DDADR ( n ) ( 0x0200 + ((n) << 4))
2929#define DSADR (n ) (0x0204 + ((n) << 4))
3030#define DTADR (n ) (0x0208 + ((n) << 4))
3131#define DCMD 0x020c
@@ -120,12 +120,55 @@ struct mmp_pdma_phy {
120120 struct mmp_pdma_chan * vchan ;
121121};
122122
123+ /**
124+ * struct mmp_pdma_ops - Operations for the MMP PDMA controller
125+ *
126+ * Hardware Register Operations (read/write hardware registers):
127+ * @write_next_addr: Function to program address of next descriptor into
128+ * DDADR/DDADRH
129+ * @read_src_addr: Function to read the source address from DSADR/DSADRH
130+ * @read_dst_addr: Function to read the destination address from DTADR/DTADRH
131+ *
132+ * Descriptor Memory Operations (manipulate descriptor structs in memory):
133+ * @set_desc_next_addr: Function to set next descriptor address in descriptor
134+ * @set_desc_src_addr: Function to set the source address in descriptor
135+ * @set_desc_dst_addr: Function to set the destination address in descriptor
136+ * @get_desc_src_addr: Function to get the source address from descriptor
137+ * @get_desc_dst_addr: Function to get the destination address from descriptor
138+ *
139+ * Controller Configuration:
140+ * @run_bits: Control bits in DCSR register for channel start/stop
141+ * @dma_mask: DMA addressing capability of controller. 0 to use OF/platform
142+ * settings, or explicit mask like DMA_BIT_MASK(32/64)
143+ */
144+ struct mmp_pdma_ops {
145+ /* Hardware Register Operations */
146+ void (* write_next_addr )(struct mmp_pdma_phy * phy , dma_addr_t addr );
147+ u64 (* read_src_addr )(struct mmp_pdma_phy * phy );
148+ u64 (* read_dst_addr )(struct mmp_pdma_phy * phy );
149+
150+ /* Descriptor Memory Operations */
151+ void (* set_desc_next_addr )(struct mmp_pdma_desc_hw * desc ,
152+ dma_addr_t addr );
153+ void (* set_desc_src_addr )(struct mmp_pdma_desc_hw * desc ,
154+ dma_addr_t addr );
155+ void (* set_desc_dst_addr )(struct mmp_pdma_desc_hw * desc ,
156+ dma_addr_t addr );
157+ u64 (* get_desc_src_addr )(const struct mmp_pdma_desc_hw * desc );
158+ u64 (* get_desc_dst_addr )(const struct mmp_pdma_desc_hw * desc );
159+
160+ /* Controller Configuration */
161+ u32 run_bits ;
162+ u64 dma_mask ;
163+ };
164+
123165struct mmp_pdma_device {
124166 int dma_channels ;
125167 void __iomem * base ;
126168 struct device * dev ;
127169 struct dma_device device ;
128170 struct mmp_pdma_phy * phy ;
171+ const struct mmp_pdma_ops * ops ;
129172 spinlock_t phy_lock ; /* protect alloc/free phy channels */
130173};
131174
@@ -138,24 +181,61 @@ struct mmp_pdma_device {
138181#define to_mmp_pdma_dev (dmadev ) \
139182 container_of(dmadev, struct mmp_pdma_device, device)
140183
141- static int mmp_pdma_config_write (struct dma_chan * dchan ,
142- struct dma_slave_config * cfg ,
143- enum dma_transfer_direction direction );
184+ /* For 32-bit PDMA */
185+ static void write_next_addr_32 (struct mmp_pdma_phy * phy , dma_addr_t addr )
186+ {
187+ writel (addr , phy -> base + DDADR (phy -> idx ));
188+ }
189+
190+ static u64 read_src_addr_32 (struct mmp_pdma_phy * phy )
191+ {
192+ return readl (phy -> base + DSADR (phy -> idx ));
193+ }
194+
195+ static u64 read_dst_addr_32 (struct mmp_pdma_phy * phy )
196+ {
197+ return readl (phy -> base + DTADR (phy -> idx ));
198+ }
199+
200+ static void set_desc_next_addr_32 (struct mmp_pdma_desc_hw * desc , dma_addr_t addr )
201+ {
202+ desc -> ddadr = addr ;
203+ }
204+
205+ static void set_desc_src_addr_32 (struct mmp_pdma_desc_hw * desc , dma_addr_t addr )
206+ {
207+ desc -> dsadr = addr ;
208+ }
144209
145- static void set_desc (struct mmp_pdma_phy * phy , dma_addr_t addr )
210+ static void set_desc_dst_addr_32 (struct mmp_pdma_desc_hw * desc , dma_addr_t addr )
146211{
147- u32 reg = (phy -> idx << 4 ) + DDADR ;
212+ desc -> dtadr = addr ;
213+ }
148214
149- writel (addr , phy -> base + reg );
215+ static u64 get_desc_src_addr_32 (const struct mmp_pdma_desc_hw * desc )
216+ {
217+ return desc -> dsadr ;
150218}
151219
220+ static u64 get_desc_dst_addr_32 (const struct mmp_pdma_desc_hw * desc )
221+ {
222+ return desc -> dtadr ;
223+ }
224+
225+ static int mmp_pdma_config_write (struct dma_chan * dchan ,
226+ struct dma_slave_config * cfg ,
227+ enum dma_transfer_direction direction );
228+
152229static void enable_chan (struct mmp_pdma_phy * phy )
153230{
154231 u32 reg , dalgn ;
232+ struct mmp_pdma_device * pdev ;
155233
156234 if (!phy -> vchan )
157235 return ;
158236
237+ pdev = to_mmp_pdma_dev (phy -> vchan -> chan .device );
238+
159239 reg = DRCMR (phy -> vchan -> drcmr );
160240 writel (DRCMR_MAPVLD | phy -> idx , phy -> base + reg );
161241
@@ -167,18 +247,29 @@ static void enable_chan(struct mmp_pdma_phy *phy)
167247 writel (dalgn , phy -> base + DALGN );
168248
169249 reg = (phy -> idx << 2 ) + DCSR ;
170- writel (readl (phy -> base + reg ) | DCSR_RUN , phy -> base + reg );
250+ writel (readl (phy -> base + reg ) | pdev -> ops -> run_bits ,
251+ phy -> base + reg );
171252}
172253
173254static void disable_chan (struct mmp_pdma_phy * phy )
174255{
175- u32 reg ;
256+ u32 reg , dcsr ;
176257
177258 if (!phy )
178259 return ;
179260
180261 reg = (phy -> idx << 2 ) + DCSR ;
181- writel (readl (phy -> base + reg ) & ~DCSR_RUN , phy -> base + reg );
262+ dcsr = readl (phy -> base + reg );
263+
264+ if (phy -> vchan ) {
265+ struct mmp_pdma_device * pdev ;
266+
267+ pdev = to_mmp_pdma_dev (phy -> vchan -> chan .device );
268+ writel (dcsr & ~pdev -> ops -> run_bits , phy -> base + reg );
269+ } else {
270+ /* If no vchan, just clear the RUN bit */
271+ writel (dcsr & ~DCSR_RUN , phy -> base + reg );
272+ }
182273}
183274
184275static int clear_chan_irq (struct mmp_pdma_phy * phy )
@@ -297,6 +388,7 @@ static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan)
297388static void start_pending_queue (struct mmp_pdma_chan * chan )
298389{
299390 struct mmp_pdma_desc_sw * desc ;
391+ struct mmp_pdma_device * pdev = to_mmp_pdma_dev (chan -> chan .device );
300392
301393 /* still in running, irq will start the pending list */
302394 if (!chan -> idle ) {
@@ -331,7 +423,7 @@ static void start_pending_queue(struct mmp_pdma_chan *chan)
331423 * Program the descriptor's address into the DMA controller,
332424 * then start the DMA transaction
333425 */
334- set_desc (chan -> phy , desc -> async_tx .phys );
426+ pdev -> ops -> write_next_addr (chan -> phy , desc -> async_tx .phys );
335427 enable_chan (chan -> phy );
336428 chan -> idle = false;
337429}
@@ -447,15 +539,14 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan,
447539 size_t len , unsigned long flags )
448540{
449541 struct mmp_pdma_chan * chan ;
542+ struct mmp_pdma_device * pdev ;
450543 struct mmp_pdma_desc_sw * first = NULL , * prev = NULL , * new ;
451544 size_t copy = 0 ;
452545
453- if (!dchan )
454- return NULL ;
455-
456- if (!len )
546+ if (!dchan || !len )
457547 return NULL ;
458548
549+ pdev = to_mmp_pdma_dev (dchan -> device );
459550 chan = to_mmp_pdma_chan (dchan );
460551 chan -> byte_align = false;
461552
@@ -478,13 +569,14 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan,
478569 chan -> byte_align = true;
479570
480571 new -> desc .dcmd = chan -> dcmd | (DCMD_LENGTH & copy );
481- new -> desc . dsadr = dma_src ;
482- new -> desc . dtadr = dma_dst ;
572+ pdev -> ops -> set_desc_src_addr ( & new -> desc , dma_src ) ;
573+ pdev -> ops -> set_desc_dst_addr ( & new -> desc , dma_dst ) ;
483574
484575 if (!first )
485576 first = new ;
486577 else
487- prev -> desc .ddadr = new -> async_tx .phys ;
578+ pdev -> ops -> set_desc_next_addr (& prev -> desc ,
579+ new -> async_tx .phys );
488580
489581 new -> async_tx .cookie = 0 ;
490582 async_tx_ack (& new -> async_tx );
@@ -528,6 +620,7 @@ mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
528620 unsigned long flags , void * context )
529621{
530622 struct mmp_pdma_chan * chan = to_mmp_pdma_chan (dchan );
623+ struct mmp_pdma_device * pdev = to_mmp_pdma_dev (dchan -> device );
531624 struct mmp_pdma_desc_sw * first = NULL , * prev = NULL , * new = NULL ;
532625 size_t len , avail ;
533626 struct scatterlist * sg ;
@@ -559,17 +652,18 @@ mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
559652
560653 new -> desc .dcmd = chan -> dcmd | (DCMD_LENGTH & len );
561654 if (dir == DMA_MEM_TO_DEV ) {
562- new -> desc . dsadr = addr ;
655+ pdev -> ops -> set_desc_src_addr ( & new -> desc , addr ) ;
563656 new -> desc .dtadr = chan -> dev_addr ;
564657 } else {
565658 new -> desc .dsadr = chan -> dev_addr ;
566- new -> desc . dtadr = addr ;
659+ pdev -> ops -> set_desc_dst_addr ( & new -> desc , addr ) ;
567660 }
568661
569662 if (!first )
570663 first = new ;
571664 else
572- prev -> desc .ddadr = new -> async_tx .phys ;
665+ pdev -> ops -> set_desc_next_addr (& prev -> desc ,
666+ new -> async_tx .phys );
573667
574668 new -> async_tx .cookie = 0 ;
575669 async_tx_ack (& new -> async_tx );
@@ -609,12 +703,15 @@ mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan,
609703 unsigned long flags )
610704{
611705 struct mmp_pdma_chan * chan ;
706+ struct mmp_pdma_device * pdev ;
612707 struct mmp_pdma_desc_sw * first = NULL , * prev = NULL , * new ;
613708 dma_addr_t dma_src , dma_dst ;
614709
615710 if (!dchan || !len || !period_len )
616711 return NULL ;
617712
713+ pdev = to_mmp_pdma_dev (dchan -> device );
714+
618715 /* the buffer length must be a multiple of period_len */
619716 if (len % period_len != 0 )
620717 return NULL ;
@@ -651,13 +748,14 @@ mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan,
651748
652749 new -> desc .dcmd = (chan -> dcmd | DCMD_ENDIRQEN |
653750 (DCMD_LENGTH & period_len ));
654- new -> desc . dsadr = dma_src ;
655- new -> desc . dtadr = dma_dst ;
751+ pdev -> ops -> set_desc_src_addr ( & new -> desc , dma_src ) ;
752+ pdev -> ops -> set_desc_dst_addr ( & new -> desc , dma_dst ) ;
656753
657754 if (!first )
658755 first = new ;
659756 else
660- prev -> desc .ddadr = new -> async_tx .phys ;
757+ pdev -> ops -> set_desc_next_addr (& prev -> desc ,
758+ new -> async_tx .phys );
661759
662760 new -> async_tx .cookie = 0 ;
663761 async_tx_ack (& new -> async_tx );
@@ -678,7 +776,7 @@ mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan,
678776 first -> async_tx .cookie = - EBUSY ;
679777
680778 /* make the cyclic link */
681- new -> desc . ddadr = first -> async_tx .phys ;
779+ pdev -> ops -> set_desc_next_addr ( & new -> desc , first -> async_tx .phys ) ;
682780 chan -> cyclic_first = first ;
683781
684782 return & first -> async_tx ;
@@ -764,7 +862,9 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
764862 dma_cookie_t cookie )
765863{
766864 struct mmp_pdma_desc_sw * sw ;
767- u32 curr , residue = 0 ;
865+ struct mmp_pdma_device * pdev = to_mmp_pdma_dev (chan -> chan .device );
866+ u64 curr ;
867+ u32 residue = 0 ;
768868 bool passed = false;
769869 bool cyclic = chan -> cyclic_first != NULL ;
770870
@@ -776,17 +876,18 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
776876 return 0 ;
777877
778878 if (chan -> dir == DMA_DEV_TO_MEM )
779- curr = readl ( chan -> phy -> base + DTADR (chan -> phy -> idx ) );
879+ curr = pdev -> ops -> read_dst_addr (chan -> phy );
780880 else
781- curr = readl ( chan -> phy -> base + DSADR (chan -> phy -> idx ) );
881+ curr = pdev -> ops -> read_src_addr (chan -> phy );
782882
783883 list_for_each_entry (sw , & chan -> chain_running , node ) {
784- u32 start , end , len ;
884+ u64 start , end ;
885+ u32 len ;
785886
786887 if (chan -> dir == DMA_DEV_TO_MEM )
787- start = sw -> desc . dtadr ;
888+ start = pdev -> ops -> get_desc_dst_addr ( & sw -> desc ) ;
788889 else
789- start = sw -> desc . dsadr ;
890+ start = pdev -> ops -> get_desc_src_addr ( & sw -> desc ) ;
790891
791892 len = sw -> desc .dcmd & DCMD_LENGTH ;
792893 end = start + len ;
@@ -802,7 +903,7 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
802903 if (passed ) {
803904 residue += len ;
804905 } else if (curr >= start && curr <= end ) {
805- residue += end - curr ;
906+ residue += ( u32 )( end - curr ) ;
806907 passed = true;
807908 }
808909
@@ -996,9 +1097,26 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq)
9961097 return 0 ;
9971098}
9981099
1100+ static const struct mmp_pdma_ops marvell_pdma_v1_ops = {
1101+ .write_next_addr = write_next_addr_32 ,
1102+ .read_src_addr = read_src_addr_32 ,
1103+ .read_dst_addr = read_dst_addr_32 ,
1104+ .set_desc_next_addr = set_desc_next_addr_32 ,
1105+ .set_desc_src_addr = set_desc_src_addr_32 ,
1106+ .set_desc_dst_addr = set_desc_dst_addr_32 ,
1107+ .get_desc_src_addr = get_desc_src_addr_32 ,
1108+ .get_desc_dst_addr = get_desc_dst_addr_32 ,
1109+ .run_bits = (DCSR_RUN ),
1110+ .dma_mask = 0 , /* let OF/platform set DMA mask */
1111+ };
1112+
9991113static const struct of_device_id mmp_pdma_dt_ids [] = {
1000- { .compatible = "marvell,pdma-1.0" , },
1001- {}
1114+ {
1115+ .compatible = "marvell,pdma-1.0" ,
1116+ .data = & marvell_pdma_v1_ops
1117+ }, {
1118+ /* sentinel */
1119+ }
10021120};
10031121MODULE_DEVICE_TABLE (of , mmp_pdma_dt_ids );
10041122
@@ -1050,6 +1168,10 @@ static int mmp_pdma_probe(struct platform_device *op)
10501168 if (IS_ERR (rst ))
10511169 return PTR_ERR (rst );
10521170
1171+ pdev -> ops = of_device_get_match_data (& op -> dev );
1172+ if (!pdev -> ops )
1173+ return - ENODEV ;
1174+
10531175 if (pdev -> dev -> of_node ) {
10541176 /* Parse new and deprecated dma-channels properties */
10551177 if (of_property_read_u32 (pdev -> dev -> of_node , "dma-channels" ,
@@ -1111,7 +1233,10 @@ static int mmp_pdma_probe(struct platform_device *op)
11111233 pdev -> device .directions = BIT (DMA_MEM_TO_DEV ) | BIT (DMA_DEV_TO_MEM );
11121234 pdev -> device .residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR ;
11131235
1114- if (pdev -> dev -> coherent_dma_mask )
1236+ /* Set DMA mask based on ops->dma_mask, or OF/platform */
1237+ if (pdev -> ops -> dma_mask )
1238+ dma_set_mask (pdev -> dev , pdev -> ops -> dma_mask );
1239+ else if (pdev -> dev -> coherent_dma_mask )
11151240 dma_set_mask (pdev -> dev , pdev -> dev -> coherent_dma_mask );
11161241 else
11171242 dma_set_mask (pdev -> dev , DMA_BIT_MASK (64 ));
0 commit comments