97
97
/* The maximum ID allocated by the hardware is 31 */
98
98
#define AXI_DMAC_SG_UNUSED 32U
99
99
100
+ struct axi_dmac_hw_desc {
101
+ u32 flags ;
102
+ u32 id ;
103
+ u64 dest_addr ;
104
+ u64 src_addr ;
105
+ u64 __unused ;
106
+ u32 y_len ;
107
+ u32 x_len ;
108
+ u32 src_stride ;
109
+ u32 dst_stride ;
110
+ u64 __pad [2 ];
111
+ };
112
+
100
113
struct axi_dmac_sg {
101
- dma_addr_t src_addr ;
102
- dma_addr_t dest_addr ;
103
- unsigned int x_len ;
104
- unsigned int y_len ;
105
- unsigned int dest_stride ;
106
- unsigned int src_stride ;
107
- unsigned int id ;
108
114
unsigned int partial_len ;
109
115
bool schedule_when_free ;
116
+
117
+ struct axi_dmac_hw_desc * hw ;
118
+ dma_addr_t hw_phys ;
110
119
};
111
120
112
121
struct axi_dmac_desc {
113
122
struct virt_dma_desc vdesc ;
123
+ struct axi_dmac_chan * chan ;
124
+
114
125
bool cyclic ;
115
126
bool have_partial_xfer ;
116
127
@@ -229,7 +240,7 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
229
240
sg = & desc -> sg [desc -> num_submitted ];
230
241
231
242
/* Already queued in cyclic mode. Wait for it to finish */
232
- if (sg -> id != AXI_DMAC_SG_UNUSED ) {
243
+ if (sg -> hw -> id != AXI_DMAC_SG_UNUSED ) {
233
244
sg -> schedule_when_free = true;
234
245
return ;
235
246
}
@@ -246,16 +257,16 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
246
257
chan -> next_desc = desc ;
247
258
}
248
259
249
- sg -> id = axi_dmac_read (dmac , AXI_DMAC_REG_TRANSFER_ID );
260
+ sg -> hw -> id = axi_dmac_read (dmac , AXI_DMAC_REG_TRANSFER_ID );
250
261
251
262
if (axi_dmac_dest_is_mem (chan )) {
252
- axi_dmac_write (dmac , AXI_DMAC_REG_DEST_ADDRESS , sg -> dest_addr );
253
- axi_dmac_write (dmac , AXI_DMAC_REG_DEST_STRIDE , sg -> dest_stride );
263
+ axi_dmac_write (dmac , AXI_DMAC_REG_DEST_ADDRESS , sg -> hw -> dest_addr );
264
+ axi_dmac_write (dmac , AXI_DMAC_REG_DEST_STRIDE , sg -> hw -> dst_stride );
254
265
}
255
266
256
267
if (axi_dmac_src_is_mem (chan )) {
257
- axi_dmac_write (dmac , AXI_DMAC_REG_SRC_ADDRESS , sg -> src_addr );
258
- axi_dmac_write (dmac , AXI_DMAC_REG_SRC_STRIDE , sg -> src_stride );
268
+ axi_dmac_write (dmac , AXI_DMAC_REG_SRC_ADDRESS , sg -> hw -> src_addr );
269
+ axi_dmac_write (dmac , AXI_DMAC_REG_SRC_STRIDE , sg -> hw -> src_stride );
259
270
}
260
271
261
272
/*
@@ -270,8 +281,8 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
270
281
if (chan -> hw_partial_xfer )
271
282
flags |= AXI_DMAC_FLAG_PARTIAL_REPORT ;
272
283
273
- axi_dmac_write (dmac , AXI_DMAC_REG_X_LENGTH , sg -> x_len - 1 );
274
- axi_dmac_write (dmac , AXI_DMAC_REG_Y_LENGTH , sg -> y_len - 1 );
284
+ axi_dmac_write (dmac , AXI_DMAC_REG_X_LENGTH , sg -> hw -> x_len );
285
+ axi_dmac_write (dmac , AXI_DMAC_REG_Y_LENGTH , sg -> hw -> y_len );
275
286
axi_dmac_write (dmac , AXI_DMAC_REG_FLAGS , flags );
276
287
axi_dmac_write (dmac , AXI_DMAC_REG_START_TRANSFER , 1 );
277
288
}
@@ -286,9 +297,9 @@ static inline unsigned int axi_dmac_total_sg_bytes(struct axi_dmac_chan *chan,
286
297
struct axi_dmac_sg * sg )
287
298
{
288
299
if (chan -> hw_2d )
289
- return sg -> x_len * sg -> y_len ;
300
+ return ( sg -> hw -> x_len + 1 ) * ( sg -> hw -> y_len + 1 ) ;
290
301
else
291
- return sg -> x_len ;
302
+ return ( sg -> hw -> x_len + 1 ) ;
292
303
}
293
304
294
305
static void axi_dmac_dequeue_partial_xfers (struct axi_dmac_chan * chan )
@@ -307,9 +318,9 @@ static void axi_dmac_dequeue_partial_xfers(struct axi_dmac_chan *chan)
307
318
list_for_each_entry (desc , & chan -> active_descs , vdesc .node ) {
308
319
for (i = 0 ; i < desc -> num_sgs ; i ++ ) {
309
320
sg = & desc -> sg [i ];
310
- if (sg -> id == AXI_DMAC_SG_UNUSED )
321
+ if (sg -> hw -> id == AXI_DMAC_SG_UNUSED )
311
322
continue ;
312
- if (sg -> id == id ) {
323
+ if (sg -> hw -> id == id ) {
313
324
desc -> have_partial_xfer = true;
314
325
sg -> partial_len = len ;
315
326
found_sg = true;
@@ -376,12 +387,12 @@ static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan,
376
387
377
388
do {
378
389
sg = & active -> sg [active -> num_completed ];
379
- if (sg -> id == AXI_DMAC_SG_UNUSED ) /* Not yet submitted */
390
+ if (sg -> hw -> id == AXI_DMAC_SG_UNUSED ) /* Not yet submitted */
380
391
break ;
381
- if (!(BIT (sg -> id ) & completed_transfers ))
392
+ if (!(BIT (sg -> hw -> id ) & completed_transfers ))
382
393
break ;
383
394
active -> num_completed ++ ;
384
- sg -> id = AXI_DMAC_SG_UNUSED ;
395
+ sg -> hw -> id = AXI_DMAC_SG_UNUSED ;
385
396
if (sg -> schedule_when_free ) {
386
397
sg -> schedule_when_free = false;
387
398
start_next = true;
@@ -476,22 +487,52 @@ static void axi_dmac_issue_pending(struct dma_chan *c)
476
487
spin_unlock_irqrestore (& chan -> vchan .lock , flags );
477
488
}
478
489
479
- static struct axi_dmac_desc * axi_dmac_alloc_desc (unsigned int num_sgs )
490
+ static struct axi_dmac_desc *
491
+ axi_dmac_alloc_desc (struct axi_dmac_chan * chan , unsigned int num_sgs )
480
492
{
493
+ struct axi_dmac * dmac = chan_to_axi_dmac (chan );
494
+ struct device * dev = dmac -> dma_dev .dev ;
495
+ struct axi_dmac_hw_desc * hws ;
481
496
struct axi_dmac_desc * desc ;
497
+ dma_addr_t hw_phys ;
482
498
unsigned int i ;
483
499
484
500
desc = kzalloc (struct_size (desc , sg , num_sgs ), GFP_NOWAIT );
485
501
if (!desc )
486
502
return NULL ;
487
503
desc -> num_sgs = num_sgs ;
504
+ desc -> chan = chan ;
488
505
489
- for (i = 0 ; i < num_sgs ; i ++ )
490
- desc -> sg [i ].id = AXI_DMAC_SG_UNUSED ;
506
+ hws = dma_alloc_coherent (dev , PAGE_ALIGN (num_sgs * sizeof (* hws )),
507
+ & hw_phys , GFP_ATOMIC );
508
+ if (!hws ) {
509
+ kfree (desc );
510
+ return NULL ;
511
+ }
512
+
513
+ for (i = 0 ; i < num_sgs ; i ++ ) {
514
+ desc -> sg [i ].hw = & hws [i ];
515
+ desc -> sg [i ].hw_phys = hw_phys + i * sizeof (* hws );
516
+
517
+ hws [i ].id = AXI_DMAC_SG_UNUSED ;
518
+ hws [i ].flags = 0 ;
519
+ }
491
520
492
521
return desc ;
493
522
}
494
523
524
+ static void axi_dmac_free_desc (struct axi_dmac_desc * desc )
525
+ {
526
+ struct axi_dmac * dmac = chan_to_axi_dmac (desc -> chan );
527
+ struct device * dev = dmac -> dma_dev .dev ;
528
+ struct axi_dmac_hw_desc * hw = desc -> sg [0 ].hw ;
529
+ dma_addr_t hw_phys = desc -> sg [0 ].hw_phys ;
530
+
531
+ dma_free_coherent (dev , PAGE_ALIGN (desc -> num_sgs * sizeof (* hw )),
532
+ hw , hw_phys );
533
+ kfree (desc );
534
+ }
535
+
495
536
static struct axi_dmac_sg * axi_dmac_fill_linear_sg (struct axi_dmac_chan * chan ,
496
537
enum dma_transfer_direction direction , dma_addr_t addr ,
497
538
unsigned int num_periods , unsigned int period_len ,
@@ -510,21 +551,22 @@ static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan,
510
551
for (i = 0 ; i < num_periods ; i ++ ) {
511
552
for (len = period_len ; len > segment_size ; sg ++ ) {
512
553
if (direction == DMA_DEV_TO_MEM )
513
- sg -> dest_addr = addr ;
554
+ sg -> hw -> dest_addr = addr ;
514
555
else
515
- sg -> src_addr = addr ;
516
- sg -> x_len = segment_size ;
517
- sg -> y_len = 1 ;
556
+ sg -> hw -> src_addr = addr ;
557
+ sg -> hw -> x_len = segment_size - 1 ;
558
+ sg -> hw -> y_len = 0 ;
559
+ sg -> hw -> flags = 0 ;
518
560
addr += segment_size ;
519
561
len -= segment_size ;
520
562
}
521
563
522
564
if (direction == DMA_DEV_TO_MEM )
523
- sg -> dest_addr = addr ;
565
+ sg -> hw -> dest_addr = addr ;
524
566
else
525
- sg -> src_addr = addr ;
526
- sg -> x_len = len ;
527
- sg -> y_len = 1 ;
567
+ sg -> hw -> src_addr = addr ;
568
+ sg -> hw -> x_len = len - 1 ;
569
+ sg -> hw -> y_len = 0 ;
528
570
sg ++ ;
529
571
addr += len ;
530
572
}
@@ -551,7 +593,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
551
593
for_each_sg (sgl , sg , sg_len , i )
552
594
num_sgs += DIV_ROUND_UP (sg_dma_len (sg ), chan -> max_length );
553
595
554
- desc = axi_dmac_alloc_desc (num_sgs );
596
+ desc = axi_dmac_alloc_desc (chan , num_sgs );
555
597
if (!desc )
556
598
return NULL ;
557
599
@@ -560,7 +602,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
560
602
for_each_sg (sgl , sg , sg_len , i ) {
561
603
if (!axi_dmac_check_addr (chan , sg_dma_address (sg )) ||
562
604
!axi_dmac_check_len (chan , sg_dma_len (sg ))) {
563
- kfree (desc );
605
+ axi_dmac_free_desc (desc );
564
606
return NULL ;
565
607
}
566
608
@@ -595,7 +637,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic(
595
637
num_periods = buf_len / period_len ;
596
638
num_segments = DIV_ROUND_UP (period_len , chan -> max_length );
597
639
598
- desc = axi_dmac_alloc_desc (num_periods * num_segments );
640
+ desc = axi_dmac_alloc_desc (chan , num_periods * num_segments );
599
641
if (!desc )
600
642
return NULL ;
601
643
@@ -650,26 +692,26 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved(
650
692
return NULL ;
651
693
}
652
694
653
- desc = axi_dmac_alloc_desc (1 );
695
+ desc = axi_dmac_alloc_desc (chan , 1 );
654
696
if (!desc )
655
697
return NULL ;
656
698
657
699
if (axi_dmac_src_is_mem (chan )) {
658
- desc -> sg [0 ].src_addr = xt -> src_start ;
659
- desc -> sg [0 ].src_stride = xt -> sgl [0 ].size + src_icg ;
700
+ desc -> sg [0 ].hw -> src_addr = xt -> src_start ;
701
+ desc -> sg [0 ].hw -> src_stride = xt -> sgl [0 ].size + src_icg ;
660
702
}
661
703
662
704
if (axi_dmac_dest_is_mem (chan )) {
663
- desc -> sg [0 ].dest_addr = xt -> dst_start ;
664
- desc -> sg [0 ].dest_stride = xt -> sgl [0 ].size + dst_icg ;
705
+ desc -> sg [0 ].hw -> dest_addr = xt -> dst_start ;
706
+ desc -> sg [0 ].hw -> dst_stride = xt -> sgl [0 ].size + dst_icg ;
665
707
}
666
708
667
709
if (chan -> hw_2d ) {
668
- desc -> sg [0 ].x_len = xt -> sgl [0 ].size ;
669
- desc -> sg [0 ].y_len = xt -> numf ;
710
+ desc -> sg [0 ].hw -> x_len = xt -> sgl [0 ].size - 1 ;
711
+ desc -> sg [0 ].hw -> y_len = xt -> numf - 1 ;
670
712
} else {
671
- desc -> sg [0 ].x_len = xt -> sgl [0 ].size * xt -> numf ;
672
- desc -> sg [0 ].y_len = 1 ;
713
+ desc -> sg [0 ].hw -> x_len = xt -> sgl [0 ].size * xt -> numf - 1 ;
714
+ desc -> sg [0 ].hw -> y_len = 0 ;
673
715
}
674
716
675
717
if (flags & DMA_CYCLIC )
@@ -685,7 +727,7 @@ static void axi_dmac_free_chan_resources(struct dma_chan *c)
685
727
686
728
static void axi_dmac_desc_free (struct virt_dma_desc * vdesc )
687
729
{
688
- kfree ( container_of ( vdesc , struct axi_dmac_desc , vdesc ));
730
+ axi_dmac_free_desc ( to_axi_dmac_desc ( vdesc ));
689
731
}
690
732
691
733
static bool axi_dmac_regmap_rdwr (struct device * dev , unsigned int reg )
0 commit comments