Skip to content

Commit 7292af0

Browse files
Brian Vazquezanguy11
authored andcommitted
idpf: fix a race in txq wakeup
Add a helper function to correctly handle the lockless synchronization when the sender needs to block. The paradigm is if (no_resources()) { stop_queue(); barrier(); if (!no_resources()) restart_queue(); } netif_subqueue_maybe_stop already handles the paradigm correctly, but the code split the check for resources in three parts, the first one (descriptors) followed the protocol, but the other two (completions and tx_buf) were only doing the first part and so race prone. Luckily netif_subqueue_maybe_stop macro already allows you to use a function to evaluate the start/stop conditions so the fix only requires the right helper function to evaluate all the conditions at once. The patch removes idpf_tx_maybe_stop_common since it's no longer needed and instead adjusts separately the conditions for singleq and splitq. Note that idpf_tx_buf_hw_update doesn't need to check for resources since that will be covered in idpf_tx_splitq_frame. To reproduce: Reduce the threshold for pending completions to increase the chances of hitting this pause by changing your kernel: drivers/net/ethernet/intel/idpf/idpf_txrx.h -#define IDPF_TX_COMPLQ_OVERFLOW_THRESH(txcq) ((txcq)->desc_count >> 1) +#define IDPF_TX_COMPLQ_OVERFLOW_THRESH(txcq) ((txcq)->desc_count >> 4) Use pktgen to force the host to push small pkts very aggressively: ./pktgen_sample02_multiqueue.sh -i eth1 -s 100 -6 -d $IP -m $MAC \ -p 10000-10000 -t 16 -n 0 -v -x -c 64 Fixes: 6818c4d ("idpf: add splitq start_xmit") Reviewed-by: Jacob Keller <[email protected]> Reviewed-by: Madhu Chittim <[email protected]> Signed-off-by: Josh Hay <[email protected]> Signed-off-by: Brian Vazquez <[email protected]> Signed-off-by: Luigi Rizzo <[email protected]> Reviewed-by: Simon Horman <[email protected]> Tested-by: Samuel Salin <[email protected]> Signed-off-by: Tony Nguyen <[email protected]>
1 parent 73145e6 commit 7292af0

File tree

3 files changed

+22
-40
lines changed

3 files changed

+22
-40
lines changed

drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -362,17 +362,18 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
362362
{
363363
struct idpf_tx_offload_params offload = { };
364364
struct idpf_tx_buf *first;
365+
int csum, tso, needed;
365366
unsigned int count;
366367
__be16 protocol;
367-
int csum, tso;
368368

369369
count = idpf_tx_desc_count_required(tx_q, skb);
370370
if (unlikely(!count))
371371
return idpf_tx_drop_skb(tx_q, skb);
372372

373-
if (idpf_tx_maybe_stop_common(tx_q,
374-
count + IDPF_TX_DESCS_PER_CACHE_LINE +
375-
IDPF_TX_DESCS_FOR_CTX)) {
373+
needed = count + IDPF_TX_DESCS_PER_CACHE_LINE + IDPF_TX_DESCS_FOR_CTX;
374+
if (!netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
375+
IDPF_DESC_UNUSED(tx_q),
376+
needed, needed)) {
376377
idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
377378

378379
u64_stats_update_begin(&tx_q->stats_sync);

drivers/net/ethernet/intel/idpf/idpf_txrx.c

Lines changed: 17 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -2184,6 +2184,19 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
21842184
desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag);
21852185
}
21862186

2187+
/* Global conditions to tell whether the txq (and related resources)
2188+
* has room to allow the use of "size" descriptors.
2189+
*/
2190+
static int idpf_txq_has_room(struct idpf_tx_queue *tx_q, u32 size)
2191+
{
2192+
if (IDPF_DESC_UNUSED(tx_q) < size ||
2193+
IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) >
2194+
IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq) ||
2195+
IDPF_TX_BUF_RSV_LOW(tx_q))
2196+
return 0;
2197+
return 1;
2198+
}
2199+
21872200
/**
21882201
* idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions
21892202
* @tx_q: the queue to be checked
@@ -2194,29 +2207,11 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
21942207
static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q,
21952208
unsigned int descs_needed)
21962209
{
2197-
if (idpf_tx_maybe_stop_common(tx_q, descs_needed))
2198-
goto out;
2199-
2200-
/* If there are too many outstanding completions expected on the
2201-
* completion queue, stop the TX queue to give the device some time to
2202-
* catch up
2203-
*/
2204-
if (unlikely(IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) >
2205-
IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq)))
2206-
goto splitq_stop;
2207-
2208-
/* Also check for available book keeping buffers; if we are low, stop
2209-
* the queue to wait for more completions
2210-
*/
2211-
if (unlikely(IDPF_TX_BUF_RSV_LOW(tx_q)))
2212-
goto splitq_stop;
2213-
2214-
return 0;
2215-
2216-
splitq_stop:
2217-
netif_stop_subqueue(tx_q->netdev, tx_q->idx);
2210+
if (netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
2211+
idpf_txq_has_room(tx_q, descs_needed),
2212+
1, 1))
2213+
return 0;
22182214

2219-
out:
22202215
u64_stats_update_begin(&tx_q->stats_sync);
22212216
u64_stats_inc(&tx_q->q_stats.q_busy);
22222217
u64_stats_update_end(&tx_q->stats_sync);
@@ -2242,12 +2237,6 @@ void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
22422237
nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
22432238
tx_q->next_to_use = val;
22442239

2245-
if (idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED)) {
2246-
u64_stats_update_begin(&tx_q->stats_sync);
2247-
u64_stats_inc(&tx_q->q_stats.q_busy);
2248-
u64_stats_update_end(&tx_q->stats_sync);
2249-
}
2250-
22512240
/* Force memory writes to complete before letting h/w
22522241
* know there are new descriptors to fetch. (Only
22532242
* applicable for weak-ordered memory model archs,

drivers/net/ethernet/intel/idpf/idpf_txrx.h

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1049,12 +1049,4 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq,
10491049
u16 cleaned_count);
10501050
int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off);
10511051

1052-
static inline bool idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q,
1053-
u32 needed)
1054-
{
1055-
return !netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
1056-
IDPF_DESC_UNUSED(tx_q),
1057-
needed, needed);
1058-
}
1059-
10601052
#endif /* !_IDPF_TXRX_H_ */

0 commit comments

Comments
 (0)