Skip to content

Commit 27717f8

Browse files
sriramyanguy11
authored andcommitted
igb: Always call igb_xdp_ring_update_tail() under Tx lock
Always call igb_xdp_ring_update_tail() under __netif_tx_lock, add a comment and lockdep assert to indicate that. This is needed to share the same TX ring between XDP, XSK and slow paths. Furthermore, the current XDP implementation is racy on tail updates. Fixes: 9cbc948 ("igb: add XDP support") Signed-off-by: Sriram Yagnaraman <[email protected]> [Kurt: Add lockdep assert and fixes tag] Signed-off-by: Kurt Kanzenbach <[email protected]> Acked-by: Maciej Fijalkowski <[email protected]> Tested-by: George Kuruvinakunnel <[email protected]> Signed-off-by: Tony Nguyen <[email protected]>
1 parent d294000 commit 27717f8

File tree

1 file changed

+13
-4
lines changed

1 file changed

+13
-4
lines changed

drivers/net/ethernet/intel/igb/igb_main.c

Lines changed: 13 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@
3333
#include <linux/bpf_trace.h>
3434
#include <linux/pm_runtime.h>
3535
#include <linux/etherdevice.h>
36+
#include <linux/lockdep.h>
3637
#ifdef CONFIG_IGB_DCA
3738
#include <linux/dca.h>
3839
#endif
@@ -2914,8 +2915,11 @@ static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp)
29142915
}
29152916
}
29162917

2918+
/* This function assumes __netif_tx_lock is held by the caller. */
29172919
static void igb_xdp_ring_update_tail(struct igb_ring *ring)
29182920
{
2921+
lockdep_assert_held(&txring_txq(ring)->_xmit_lock);
2922+
29192923
/* Force memory writes to complete before letting h/w know there
29202924
* are new descriptors to fetch.
29212925
*/
@@ -3000,11 +3004,11 @@ static int igb_xdp_xmit(struct net_device *dev, int n,
30003004
nxmit++;
30013005
}
30023006

3003-
__netif_tx_unlock(nq);
3004-
30053007
if (unlikely(flags & XDP_XMIT_FLUSH))
30063008
igb_xdp_ring_update_tail(tx_ring);
30073009

3010+
__netif_tx_unlock(nq);
3011+
30083012
return nxmit;
30093013
}
30103014

@@ -8864,12 +8868,14 @@ static void igb_put_rx_buffer(struct igb_ring *rx_ring,
88648868

88658869
static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
88668870
{
8871+
unsigned int total_bytes = 0, total_packets = 0;
88678872
struct igb_adapter *adapter = q_vector->adapter;
88688873
struct igb_ring *rx_ring = q_vector->rx.ring;
8869-
struct sk_buff *skb = rx_ring->skb;
8870-
unsigned int total_bytes = 0, total_packets = 0;
88718874
u16 cleaned_count = igb_desc_unused(rx_ring);
8875+
struct sk_buff *skb = rx_ring->skb;
8876+
int cpu = smp_processor_id();
88728877
unsigned int xdp_xmit = 0;
8878+
struct netdev_queue *nq;
88738879
struct xdp_buff xdp;
88748880
u32 frame_sz = 0;
88758881
int rx_buf_pgcnt;
@@ -8997,7 +9003,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
89979003
if (xdp_xmit & IGB_XDP_TX) {
89989004
struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
89999005

9006+
nq = txring_txq(tx_ring);
9007+
__netif_tx_lock(nq, cpu);
90009008
igb_xdp_ring_update_tail(tx_ring);
9009+
__netif_tx_unlock(nq);
90019010
}
90029011

90039012
u64_stats_update_begin(&rx_ring->rx_syncp);

0 commit comments

Comments
 (0)