Skip to content

Commit e4b398d

Browse files
michalQbanguy11
authored andcommitted
idpf: fix netdev Tx queue stop/wake
netif_txq_maybe_stop() returns -1, 0, or 1, while idpf_tx_maybe_stop_common() says it returns 0 or -EBUSY. As a result, there sometimes are Tx queue timeout warnings despite that the queue is empty or there is at least enough space to restart it. Make idpf_tx_maybe_stop_common() inline and returning true or false, handling the return of netif_txq_maybe_stop() properly. Use a correct goto in idpf_tx_maybe_stop_splitq() to avoid stopping the queue or incrementing the stops counter twice. Fixes: 6818c4d ("idpf: add splitq start_xmit") Fixes: a5ab9ee ("idpf: add singleq start_xmit and napi poll") Cc: [email protected] # 6.7+ Signed-off-by: Michal Kubiak <[email protected]> Reviewed-by: Przemek Kitszel <[email protected]> Signed-off-by: Alexander Lobakin <[email protected]> Signed-off-by: Tony Nguyen <[email protected]>
1 parent 24eb35b commit e4b398d

File tree

3 files changed

+21
-27
lines changed

3 files changed

+21
-27
lines changed

drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -375,6 +375,10 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
375375
IDPF_TX_DESCS_FOR_CTX)) {
376376
idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
377377

378+
u64_stats_update_begin(&tx_q->stats_sync);
379+
u64_stats_inc(&tx_q->q_stats.q_busy);
380+
u64_stats_update_end(&tx_q->stats_sync);
381+
378382
return NETDEV_TX_BUSY;
379383
}
380384

drivers/net/ethernet/intel/idpf/idpf_txrx.c

Lines changed: 9 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -2132,29 +2132,6 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
21322132
desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag);
21332133
}
21342134

2135-
/**
2136-
* idpf_tx_maybe_stop_common - 1st level check for common Tx stop conditions
2137-
* @tx_q: the queue to be checked
2138-
* @size: number of descriptors we want to assure is available
2139-
*
2140-
* Returns 0 if stop is not needed
2141-
*/
2142-
int idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q, unsigned int size)
2143-
{
2144-
struct netdev_queue *nq;
2145-
2146-
if (likely(IDPF_DESC_UNUSED(tx_q) >= size))
2147-
return 0;
2148-
2149-
u64_stats_update_begin(&tx_q->stats_sync);
2150-
u64_stats_inc(&tx_q->q_stats.q_busy);
2151-
u64_stats_update_end(&tx_q->stats_sync);
2152-
2153-
nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
2154-
2155-
return netif_txq_maybe_stop(nq, IDPF_DESC_UNUSED(tx_q), size, size);
2156-
}
2157-
21582135
/**
21592136
* idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions
21602137
* @tx_q: the queue to be checked
@@ -2166,7 +2143,7 @@ static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q,
21662143
unsigned int descs_needed)
21672144
{
21682145
if (idpf_tx_maybe_stop_common(tx_q, descs_needed))
2169-
goto splitq_stop;
2146+
goto out;
21702147

21712148
/* If there are too many outstanding completions expected on the
21722149
* completion queue, stop the TX queue to give the device some time to
@@ -2185,10 +2162,12 @@ static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q,
21852162
return 0;
21862163

21872164
splitq_stop:
2165+
netif_stop_subqueue(tx_q->netdev, tx_q->idx);
2166+
2167+
out:
21882168
u64_stats_update_begin(&tx_q->stats_sync);
21892169
u64_stats_inc(&tx_q->q_stats.q_busy);
21902170
u64_stats_update_end(&tx_q->stats_sync);
2191-
netif_stop_subqueue(tx_q->netdev, tx_q->idx);
21922171

21932172
return -EBUSY;
21942173
}
@@ -2211,7 +2190,11 @@ void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
22112190
nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
22122191
tx_q->next_to_use = val;
22132192

2214-
idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED);
2193+
if (idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED)) {
2194+
u64_stats_update_begin(&tx_q->stats_sync);
2195+
u64_stats_inc(&tx_q->q_stats.q_busy);
2196+
u64_stats_update_end(&tx_q->stats_sync);
2197+
}
22152198

22162199
/* Force memory writes to complete before letting h/w
22172200
* know there are new descriptors to fetch. (Only

drivers/net/ethernet/intel/idpf/idpf_txrx.h

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1018,7 +1018,6 @@ void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
10181018
struct idpf_tx_buf *first, u16 ring_idx);
10191019
unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
10201020
struct sk_buff *skb);
1021-
int idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q, unsigned int size);
10221021
void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue);
10231022
netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
10241023
struct idpf_tx_queue *tx_q);
@@ -1027,4 +1026,12 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq,
10271026
u16 cleaned_count);
10281027
int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off);
10291028

1029+
static inline bool idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q,
1030+
u32 needed)
1031+
{
1032+
return !netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
1033+
IDPF_DESC_UNUSED(tx_q),
1034+
needed, needed);
1035+
}
1036+
10301037
#endif /* !_IDPF_TXRX_H_ */

0 commit comments

Comments
 (0)