Skip to content

Commit c61ac2e

Browse files
committed
Merge branch '200GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue
Tony Nguyen says: ==================== idpf: replace Tx flow scheduling buffer ring with buffer pool Joshua Hay says: This series fixes a stability issue in the flow scheduling Tx send/clean path that results in a Tx timeout. The existing guardrails in the Tx path were not sufficient to prevent the driver from reusing completion tags that were still in flight (held by the HW). This collision would cause the driver to erroneously clean the wrong packet thus leaving the descriptor ring in a bad state. The main point of this fix is to replace the flow scheduling buffer ring with a large pool/array of buffers. The completion tag then simply is the index into this array. The driver tracks the free tags and pulls the next free one from a refillq. The cleaning routines simply use the completion tag from the completion descriptor to index into the array to quickly find the buffers to clean. All of the code to support this is added first to ensure traffic still passes with each patch. The final patch then removes all of the obsolete stashing code. * '200GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue: idpf: remove obsolete stashing code idpf: stop Tx if there are insufficient buffer resources idpf: replace flow scheduling buffer ring with buffer pool idpf: simplify and fix splitq Tx packet rollback error path idpf: improve when to set RE bit logic idpf: add support for Tx refillqs in flow scheduling mode ==================== Link: https://patch.msgid.link/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
2 parents a64494a + 6c4e684 commit c61ac2e

File tree

3 files changed

+356
-515
lines changed

3 files changed

+356
-515
lines changed

drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c

Lines changed: 57 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -179,6 +179,58 @@ static int idpf_tx_singleq_csum(struct sk_buff *skb,
179179
return 1;
180180
}
181181

182+
/**
183+
* idpf_tx_singleq_dma_map_error - handle TX DMA map errors
184+
* @txq: queue to send buffer on
185+
* @skb: send buffer
186+
* @first: original first buffer info buffer for packet
187+
* @idx: starting point on ring to unwind
188+
*/
189+
static void idpf_tx_singleq_dma_map_error(struct idpf_tx_queue *txq,
190+
struct sk_buff *skb,
191+
struct idpf_tx_buf *first, u16 idx)
192+
{
193+
struct libeth_sq_napi_stats ss = { };
194+
struct libeth_cq_pp cp = {
195+
.dev = txq->dev,
196+
.ss = &ss,
197+
};
198+
199+
u64_stats_update_begin(&txq->stats_sync);
200+
u64_stats_inc(&txq->q_stats.dma_map_errs);
201+
u64_stats_update_end(&txq->stats_sync);
202+
203+
/* clear dma mappings for failed tx_buf map */
204+
for (;;) {
205+
struct idpf_tx_buf *tx_buf;
206+
207+
tx_buf = &txq->tx_buf[idx];
208+
libeth_tx_complete(tx_buf, &cp);
209+
if (tx_buf == first)
210+
break;
211+
if (idx == 0)
212+
idx = txq->desc_count;
213+
idx--;
214+
}
215+
216+
if (skb_is_gso(skb)) {
217+
union idpf_tx_flex_desc *tx_desc;
218+
219+
/* If we failed a DMA mapping for a TSO packet, we will have
220+
* used one additional descriptor for a context
221+
* descriptor. Reset that here.
222+
*/
223+
tx_desc = &txq->flex_tx[idx];
224+
memset(tx_desc, 0, sizeof(*tx_desc));
225+
if (idx == 0)
226+
idx = txq->desc_count;
227+
idx--;
228+
}
229+
230+
/* Update tail in case netdev_xmit_more was previously true */
231+
idpf_tx_buf_hw_update(txq, idx, false);
232+
}
233+
182234
/**
183235
* idpf_tx_singleq_map - Build the Tx base descriptor
184236
* @tx_q: queue to send buffer on
@@ -219,8 +271,9 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
219271
for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
220272
unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
221273

222-
if (dma_mapping_error(tx_q->dev, dma))
223-
return idpf_tx_dma_map_error(tx_q, skb, first, i);
274+
if (unlikely(dma_mapping_error(tx_q->dev, dma)))
275+
return idpf_tx_singleq_dma_map_error(tx_q, skb,
276+
first, i);
224277

225278
/* record length, and DMA address */
226279
dma_unmap_len_set(tx_buf, len, size);
@@ -362,11 +415,11 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
362415
{
363416
struct idpf_tx_offload_params offload = { };
364417
struct idpf_tx_buf *first;
418+
u32 count, buf_count = 1;
365419
int csum, tso, needed;
366-
unsigned int count;
367420
__be16 protocol;
368421

369-
count = idpf_tx_desc_count_required(tx_q, skb);
422+
count = idpf_tx_res_count_required(tx_q, skb, &buf_count);
370423
if (unlikely(!count))
371424
return idpf_tx_drop_skb(tx_q, skb);
372425

0 commit comments

Comments
 (0)