Skip to content

Commit f2d18e1

Browse files
jahay1anguy11
authored andcommitted
idpf: improve when to set RE bit logic
Track the gap between next_to_use and the last RE index. Set RE again if the gap is large enough to ensure RE bit is set frequently. This is critical before removing the stashing mechanisms because the opportunistic descriptor ring cleaning from the out-of-order completions will go away. Previously the descriptors would be "cleaned" by both the descriptor (RE) completion and the out-of-order completions. Without the latter, we must ensure the RE bit is set more frequently. Otherwise, it's theoretically possible for the descriptor ring next_to_clean to never advance. The previous implementation was dependent on the start of a packet falling on a 64th index in the descriptor ring, which is not guaranteed with large packets. Signed-off-by: Luigi Rizzo <[email protected]> Signed-off-by: Brian Vazquez <[email protected]> Signed-off-by: Joshua Hay <[email protected]> Reviewed-by: Madhu Chittim <[email protected]> Tested-by: Samuel Salin <[email protected]> Signed-off-by: Tony Nguyen <[email protected]>
1 parent cb83b55 commit f2d18e1

File tree

2 files changed

+23
-3
lines changed

2 files changed

+23
-3
lines changed

drivers/net/ethernet/intel/idpf/idpf_txrx.c

Lines changed: 19 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -294,6 +294,8 @@ static int idpf_tx_desc_alloc(const struct idpf_vport *vport,
294294
*/
295295
idpf_queue_change(GEN_CHK, refillq);
296296

297+
tx_q->last_re = tx_q->desc_count - IDPF_TX_SPLITQ_RE_MIN_GAP;
298+
297299
return 0;
298300

299301
err_alloc:
@@ -2912,6 +2914,21 @@ static void idpf_tx_set_tstamp_desc(union idpf_flex_tx_ctx_desc *ctx_desc,
29122914
{ }
29132915
#endif /* CONFIG_PTP_1588_CLOCK */
29142916

2917+
/**
2918+
* idpf_tx_splitq_need_re - check whether RE bit needs to be set
2919+
* @tx_q: pointer to Tx queue
2920+
*
2921+
* Return: true if RE bit needs to be set, false otherwise
2922+
*/
2923+
static bool idpf_tx_splitq_need_re(struct idpf_tx_queue *tx_q)
2924+
{
2925+
int gap = tx_q->next_to_use - tx_q->last_re;
2926+
2927+
gap += (gap < 0) ? tx_q->desc_count : 0;
2928+
2929+
return gap >= IDPF_TX_SPLITQ_RE_MIN_GAP;
2930+
}
2931+
29152932
/**
29162933
* idpf_tx_splitq_frame - Sends buffer on Tx ring using flex descriptors
29172934
* @skb: send buffer
@@ -2998,9 +3015,10 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
29983015
* MIN_RING size to ensure it will be set at least once each
29993016
* time around the ring.
30003017
*/
3001-
if (!(tx_q->next_to_use % IDPF_TX_SPLITQ_RE_MIN_GAP)) {
3018+
if (idpf_tx_splitq_need_re(tx_q)) {
30023019
tx_params.eop_cmd |= IDPF_TXD_FLEX_FLOW_CMD_RE;
30033020
tx_q->txq_grp->num_completions_pending++;
3021+
tx_q->last_re = tx_q->next_to_use;
30043022
}
30053023

30063024
if (skb->ip_summed == CHECKSUM_PARTIAL)

drivers/net/ethernet/intel/idpf/idpf_txrx.h

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -610,6 +610,8 @@ libeth_cacheline_set_assert(struct idpf_rx_queue, 64,
610610
* @netdev: &net_device corresponding to this queue
611611
* @next_to_use: Next descriptor to use
612612
* @next_to_clean: Next descriptor to clean
613+
* @last_re: last descriptor index that RE bit was set
614+
* @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
613615
* @cleaned_bytes: Splitq only, TXQ only: When a TX completion is received on
614616
* the TX completion queue, it can be for any TXQ associated
615617
* with that completion queue. This means we can clean up to
@@ -620,7 +622,6 @@ libeth_cacheline_set_assert(struct idpf_rx_queue, 64,
620622
* only once at the end of the cleaning routine.
621623
* @clean_budget: singleq only, queue cleaning budget
622624
* @cleaned_pkts: Number of packets cleaned for the above said case
623-
* @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
624625
* @stash: Tx buffer stash for Flow-based scheduling mode
625626
* @refillq: Pointer to refill queue
626627
* @compl_tag_bufid_m: Completion tag buffer id mask
@@ -663,14 +664,15 @@ struct idpf_tx_queue {
663664
__cacheline_group_begin_aligned(read_write);
664665
u16 next_to_use;
665666
u16 next_to_clean;
667+
u16 last_re;
668+
u16 tx_max_bufs;
666669

667670
union {
668671
u32 cleaned_bytes;
669672
u32 clean_budget;
670673
};
671674
u16 cleaned_pkts;
672675

673-
u16 tx_max_bufs;
674676
struct idpf_txq_stash *stash;
675677
struct idpf_sw_queue *refillq;
676678

0 commit comments

Comments
 (0)