Skip to content

Commit a892493

Browse files
pbrkrPaolo Abeni
authored andcommitted
net: ravb: Allow RX loop to move past DMA mapping errors
The RX loops in ravb_rx_gbeth() and ravb_rx_rcar() skip to the next loop iteration if a zero-length descriptor is seen (indicating a DMA mapping error). However, the current RX descriptor index `priv->cur_rx[q]` was incremented at the end of the loop and so would not be incremented when we skip to the next loop iteration. This would cause the loop to keep seeing the same zero-length descriptor instead of moving on to the next descriptor. As the loop counter `i` still increments, the loop would eventually terminate so there is no risk of being stuck here forever - but we should still fix this to avoid wasting cycles. To fix this, the RX descriptor index is incremented at the top of the loop, in the for statement itself. The assignments of `entry` and `desc` are brought into the loop to avoid the need for duplication. Fixes: d8b4891 ("ravb: fix ring memory allocation") Signed-off-by: Paul Barker <[email protected]> Reviewed-by: Sergey Shtylyov <[email protected]> Signed-off-by: Paolo Abeni <[email protected]>
1 parent def52db commit a892493

File tree

1 file changed

+13
-12
lines changed

1 file changed

+13
-12
lines changed

drivers/net/ethernet/renesas/ravb_main.c

Lines changed: 13 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -775,12 +775,15 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
775775
int limit;
776776
int i;
777777

778-
entry = priv->cur_rx[q] % priv->num_rx_ring[q];
779778
limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
780779
stats = &priv->stats[q];
781780

782-
desc = &priv->rx_ring[q].desc[entry];
783-
for (i = 0; i < limit && rx_packets < *quota && desc->die_dt != DT_FEMPTY; i++) {
781+
for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
782+
entry = priv->cur_rx[q] % priv->num_rx_ring[q];
783+
desc = &priv->rx_ring[q].desc[entry];
784+
if (rx_packets == *quota || desc->die_dt == DT_FEMPTY)
785+
break;
786+
784787
/* Descriptor type must be checked before all other reads */
785788
dma_rmb();
786789
desc_status = desc->msc;
@@ -848,9 +851,6 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
848851
break;
849852
}
850853
}
851-
852-
entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
853-
desc = &priv->rx_ring[q].desc[entry];
854854
}
855855

856856
/* Refill the RX ring buffers. */
@@ -891,7 +891,6 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
891891
{
892892
struct ravb_private *priv = netdev_priv(ndev);
893893
const struct ravb_hw_info *info = priv->info;
894-
int entry = priv->cur_rx[q] % priv->num_rx_ring[q];
895894
struct net_device_stats *stats = &priv->stats[q];
896895
struct ravb_ex_rx_desc *desc;
897896
unsigned int limit, i;
@@ -901,10 +900,15 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
901900
int rx_packets = 0;
902901
u8 desc_status;
903902
u16 pkt_len;
903+
int entry;
904904

905905
limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
906-
desc = &priv->rx_ring[q].ex_desc[entry];
907-
for (i = 0; i < limit && rx_packets < *quota && desc->die_dt != DT_FEMPTY; i++) {
906+
for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
907+
entry = priv->cur_rx[q] % priv->num_rx_ring[q];
908+
desc = &priv->rx_ring[q].ex_desc[entry];
909+
if (rx_packets == *quota || desc->die_dt == DT_FEMPTY)
910+
break;
911+
908912
/* Descriptor type must be checked before all other reads */
909913
dma_rmb();
910914
desc_status = desc->msc;
@@ -958,9 +962,6 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
958962
rx_packets++;
959963
stats->rx_bytes += pkt_len;
960964
}
961-
962-
entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
963-
desc = &priv->rx_ring[q].ex_desc[entry];
964965
}
965966

966967
/* Refill the RX ring buffers. */

0 commit comments

Comments
 (0)