@@ -231,7 +231,6 @@ struct udma_chan {
231
231
struct udma_tx_drain tx_drain ;
232
232
233
233
u32 bcnt ; /* number of bytes completed since the start of the channel */
234
- u32 in_ring_cnt ; /* number of descriptors in flight */
235
234
236
235
/* Channel configuration parameters */
237
236
struct udma_chan_config config ;
@@ -574,7 +573,6 @@ static int udma_push_to_ring(struct udma_chan *uc, int idx)
574
573
struct udma_desc * d = uc -> desc ;
575
574
struct k3_ring * ring = NULL ;
576
575
dma_addr_t paddr ;
577
- int ret ;
578
576
579
577
switch (uc -> config .dir ) {
580
578
case DMA_DEV_TO_MEM :
@@ -598,11 +596,7 @@ static int udma_push_to_ring(struct udma_chan *uc, int idx)
598
596
udma_sync_for_device (uc , idx );
599
597
}
600
598
601
- ret = k3_ringacc_ring_push (ring , & paddr );
602
- if (!ret )
603
- uc -> in_ring_cnt ++ ;
604
-
605
- return ret ;
599
+ return k3_ringacc_ring_push (ring , & paddr );
606
600
}
607
601
608
602
static bool udma_desc_is_rx_flush (struct udma_chan * uc , dma_addr_t addr )
@@ -655,9 +649,6 @@ static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
655
649
d -> hwdesc [0 ].cppi5_desc_size ,
656
650
DMA_FROM_DEVICE );
657
651
rmb (); /* Ensure that reads are not moved before this point */
658
-
659
- if (!ret )
660
- uc -> in_ring_cnt -- ;
661
652
}
662
653
663
654
return ret ;
@@ -697,8 +688,6 @@ static void udma_reset_rings(struct udma_chan *uc)
697
688
udma_desc_free (& uc -> terminated_desc -> vd );
698
689
uc -> terminated_desc = NULL ;
699
690
}
700
-
701
- uc -> in_ring_cnt = 0 ;
702
691
}
703
692
704
693
static void udma_reset_counters (struct udma_chan * uc )
@@ -1073,9 +1062,6 @@ static irqreturn_t udma_ring_irq_handler(int irq, void *data)
1073
1062
1074
1063
/* Teardown completion message */
1075
1064
if (cppi5_desc_is_tdcm (paddr )) {
1076
- /* Compensate our internal pop/push counter */
1077
- uc -> in_ring_cnt ++ ;
1078
-
1079
1065
complete_all (& uc -> teardown_completed );
1080
1066
1081
1067
if (uc -> terminated_desc ) {
0 commit comments