Skip to content

Commit b61dfa9

Browse files
jahay1anguy11
authored andcommitted
idpf: simplify and fix splitq Tx packet rollback error path
Move (and rename) the existing rollback logic to singleq.c since that will be the only consumer. Create a simplified splitq specific rollback function to loop through and unmap tx_bufs based on the completion tag. This is critical before replacing the Tx buffer ring with the buffer pool since the previous rollback indexing will not work to unmap the chained buffers from the pool. Cache the next_to_use index before any portion of the packet is put on the descriptor ring. In case of an error, the rollback will bump tail to the correct next_to_use value. Because the splitq path now supports different types of context descriptors (and potentially multiple in the future), this will take care of rolling back any and all context descriptors encoded on the ring for the erroneous packet. The previous rollback logic was broken for PTP packets since it would not account for the PTP context descriptor. Fixes: 1a49cf8 ("idpf: add Tx timestamp flows") Signed-off-by: Joshua Hay <[email protected]> Reviewed-by: Madhu Chittim <[email protected]> Tested-by: Samuel Salin <[email protected]> Signed-off-by: Tony Nguyen <[email protected]>
1 parent f2d18e1 commit b61dfa9

File tree

3 files changed

+95
-58
lines changed

3 files changed

+95
-58
lines changed

drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c

Lines changed: 55 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -179,6 +179,58 @@ static int idpf_tx_singleq_csum(struct sk_buff *skb,
179179
return 1;
180180
}
181181

182+
/**
183+
* idpf_tx_singleq_dma_map_error - handle TX DMA map errors
184+
* @txq: queue to send buffer on
185+
* @skb: send buffer
186+
* @first: original first buffer info buffer for packet
187+
* @idx: starting point on ring to unwind
188+
*/
189+
static void idpf_tx_singleq_dma_map_error(struct idpf_tx_queue *txq,
190+
struct sk_buff *skb,
191+
struct idpf_tx_buf *first, u16 idx)
192+
{
193+
struct libeth_sq_napi_stats ss = { };
194+
struct libeth_cq_pp cp = {
195+
.dev = txq->dev,
196+
.ss = &ss,
197+
};
198+
199+
u64_stats_update_begin(&txq->stats_sync);
200+
u64_stats_inc(&txq->q_stats.dma_map_errs);
201+
u64_stats_update_end(&txq->stats_sync);
202+
203+
/* clear dma mappings for failed tx_buf map */
204+
for (;;) {
205+
struct idpf_tx_buf *tx_buf;
206+
207+
tx_buf = &txq->tx_buf[idx];
208+
libeth_tx_complete(tx_buf, &cp);
209+
if (tx_buf == first)
210+
break;
211+
if (idx == 0)
212+
idx = txq->desc_count;
213+
idx--;
214+
}
215+
216+
if (skb_is_gso(skb)) {
217+
union idpf_tx_flex_desc *tx_desc;
218+
219+
/* If we failed a DMA mapping for a TSO packet, we will have
220+
* used one additional descriptor for a context
221+
* descriptor. Reset that here.
222+
*/
223+
tx_desc = &txq->flex_tx[idx];
224+
memset(tx_desc, 0, sizeof(*tx_desc));
225+
if (idx == 0)
226+
idx = txq->desc_count;
227+
idx--;
228+
}
229+
230+
/* Update tail in case netdev_xmit_more was previously true */
231+
idpf_tx_buf_hw_update(txq, idx, false);
232+
}
233+
182234
/**
183235
* idpf_tx_singleq_map - Build the Tx base descriptor
184236
* @tx_q: queue to send buffer on
@@ -219,8 +271,9 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
219271
for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
220272
unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
221273

222-
if (dma_mapping_error(tx_q->dev, dma))
223-
return idpf_tx_dma_map_error(tx_q, skb, first, i);
274+
if (unlikely(dma_mapping_error(tx_q->dev, dma)))
275+
return idpf_tx_singleq_dma_map_error(tx_q, skb,
276+
first, i);
224277

225278
/* record length, and DMA address */
226279
dma_unmap_len_set(tx_buf, len, size);

drivers/net/ethernet/intel/idpf/idpf_txrx.c

Lines changed: 37 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -2339,57 +2339,6 @@ unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
23392339
return count;
23402340
}
23412341

2342-
/**
2343-
* idpf_tx_dma_map_error - handle TX DMA map errors
2344-
* @txq: queue to send buffer on
2345-
* @skb: send buffer
2346-
* @first: original first buffer info buffer for packet
2347-
* @idx: starting point on ring to unwind
2348-
*/
2349-
void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
2350-
struct idpf_tx_buf *first, u16 idx)
2351-
{
2352-
struct libeth_sq_napi_stats ss = { };
2353-
struct libeth_cq_pp cp = {
2354-
.dev = txq->dev,
2355-
.ss = &ss,
2356-
};
2357-
2358-
u64_stats_update_begin(&txq->stats_sync);
2359-
u64_stats_inc(&txq->q_stats.dma_map_errs);
2360-
u64_stats_update_end(&txq->stats_sync);
2361-
2362-
/* clear dma mappings for failed tx_buf map */
2363-
for (;;) {
2364-
struct idpf_tx_buf *tx_buf;
2365-
2366-
tx_buf = &txq->tx_buf[idx];
2367-
libeth_tx_complete(tx_buf, &cp);
2368-
if (tx_buf == first)
2369-
break;
2370-
if (idx == 0)
2371-
idx = txq->desc_count;
2372-
idx--;
2373-
}
2374-
2375-
if (skb_is_gso(skb)) {
2376-
union idpf_tx_flex_desc *tx_desc;
2377-
2378-
/* If we failed a DMA mapping for a TSO packet, we will have
2379-
* used one additional descriptor for a context
2380-
* descriptor. Reset that here.
2381-
*/
2382-
tx_desc = &txq->flex_tx[idx];
2383-
memset(tx_desc, 0, sizeof(*tx_desc));
2384-
if (idx == 0)
2385-
idx = txq->desc_count;
2386-
idx--;
2387-
}
2388-
2389-
/* Update tail in case netdev_xmit_more was previously true */
2390-
idpf_tx_buf_hw_update(txq, idx, false);
2391-
}
2392-
23932342
/**
23942343
* idpf_tx_splitq_bump_ntu - adjust NTU and generation
23952344
* @txq: the tx ring to wrap
@@ -2438,6 +2387,37 @@ static bool idpf_tx_get_free_buf_id(struct idpf_sw_queue *refillq,
24382387
return true;
24392388
}
24402389

2390+
/**
2391+
* idpf_tx_splitq_pkt_err_unmap - Unmap buffers and bump tail in case of error
2392+
* @txq: Tx queue to unwind
2393+
* @params: pointer to splitq params struct
2394+
* @first: starting buffer for packet to unmap
2395+
*/
2396+
static void idpf_tx_splitq_pkt_err_unmap(struct idpf_tx_queue *txq,
2397+
struct idpf_tx_splitq_params *params,
2398+
struct idpf_tx_buf *first)
2399+
{
2400+
struct libeth_sq_napi_stats ss = { };
2401+
struct idpf_tx_buf *tx_buf = first;
2402+
struct libeth_cq_pp cp = {
2403+
.dev = txq->dev,
2404+
.ss = &ss,
2405+
};
2406+
u32 idx = 0;
2407+
2408+
u64_stats_update_begin(&txq->stats_sync);
2409+
u64_stats_inc(&txq->q_stats.dma_map_errs);
2410+
u64_stats_update_end(&txq->stats_sync);
2411+
2412+
do {
2413+
libeth_tx_complete(tx_buf, &cp);
2414+
idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf);
2415+
} while (idpf_tx_buf_compl_tag(tx_buf) == params->compl_tag);
2416+
2417+
/* Update tail in case netdev_xmit_more was previously true. */
2418+
idpf_tx_buf_hw_update(txq, params->prev_ntu, false);
2419+
}
2420+
24412421
/**
24422422
* idpf_tx_splitq_map - Build the Tx flex descriptor
24432423
* @tx_q: queue to send buffer on
@@ -2482,8 +2462,9 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
24822462
for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
24832463
unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
24842464

2485-
if (dma_mapping_error(tx_q->dev, dma))
2486-
return idpf_tx_dma_map_error(tx_q, skb, first, i);
2465+
if (unlikely(dma_mapping_error(tx_q->dev, dma)))
2466+
return idpf_tx_splitq_pkt_err_unmap(tx_q, params,
2467+
first);
24872468

24882469
first->nr_frags++;
24892470
idpf_tx_buf_compl_tag(tx_buf) = params->compl_tag;
@@ -2939,7 +2920,9 @@ static bool idpf_tx_splitq_need_re(struct idpf_tx_queue *tx_q)
29392920
static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
29402921
struct idpf_tx_queue *tx_q)
29412922
{
2942-
struct idpf_tx_splitq_params tx_params = { };
2923+
struct idpf_tx_splitq_params tx_params = {
2924+
.prev_ntu = tx_q->next_to_use,
2925+
};
29432926
union idpf_flex_tx_ctx_desc *ctx_desc;
29442927
struct idpf_tx_buf *first;
29452928
unsigned int count;

drivers/net/ethernet/intel/idpf/idpf_txrx.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -196,6 +196,7 @@ struct idpf_tx_offload_params {
196196
* @compl_tag: Associated tag for completion
197197
* @td_tag: Descriptor tunneling tag
198198
* @offload: Offload parameters
199+
* @prev_ntu: stored TxQ next_to_use in case of rollback
199200
*/
200201
struct idpf_tx_splitq_params {
201202
enum idpf_tx_desc_dtype_value dtype;
@@ -206,6 +207,8 @@ struct idpf_tx_splitq_params {
206207
};
207208

208209
struct idpf_tx_offload_params offload;
210+
211+
u16 prev_ntu;
209212
};
210213

211214
enum idpf_tx_ctx_desc_eipt_offload {
@@ -1042,8 +1045,6 @@ void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
10421045
bool xmit_more);
10431046
unsigned int idpf_size_to_txd_count(unsigned int size);
10441047
netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb);
1045-
void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
1046-
struct idpf_tx_buf *first, u16 ring_idx);
10471048
unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
10481049
struct sk_buff *skb);
10491050
void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue);

0 commit comments

Comments
 (0)