@@ -160,60 +160,6 @@ static __le32 wx_test_staterr(union wx_rx_desc *rx_desc,
160
160
return rx_desc -> wb .upper .status_error & cpu_to_le32 (stat_err_bits );
161
161
}
162
162
163
- static bool wx_can_reuse_rx_page (struct wx_rx_buffer * rx_buffer ,
164
- int rx_buffer_pgcnt )
165
- {
166
- unsigned int pagecnt_bias = rx_buffer -> pagecnt_bias ;
167
- struct page * page = rx_buffer -> page ;
168
-
169
- /* avoid re-using remote and pfmemalloc pages */
170
- if (!dev_page_is_reusable (page ))
171
- return false;
172
-
173
- #if (PAGE_SIZE < 8192 )
174
- /* if we are only owner of page we can reuse it */
175
- if (unlikely ((rx_buffer_pgcnt - pagecnt_bias ) > 1 ))
176
- return false;
177
- #endif
178
-
179
- /* If we have drained the page fragment pool we need to update
180
- * the pagecnt_bias and page count so that we fully restock the
181
- * number of references the driver holds.
182
- */
183
- if (unlikely (pagecnt_bias == 1 )) {
184
- page_ref_add (page , USHRT_MAX - 1 );
185
- rx_buffer -> pagecnt_bias = USHRT_MAX ;
186
- }
187
-
188
- return true;
189
- }
190
-
191
- /**
192
- * wx_reuse_rx_page - page flip buffer and store it back on the ring
193
- * @rx_ring: rx descriptor ring to store buffers on
194
- * @old_buff: donor buffer to have page reused
195
- *
196
- * Synchronizes page for reuse by the adapter
197
- **/
198
- static void wx_reuse_rx_page (struct wx_ring * rx_ring ,
199
- struct wx_rx_buffer * old_buff )
200
- {
201
- u16 nta = rx_ring -> next_to_alloc ;
202
- struct wx_rx_buffer * new_buff ;
203
-
204
- new_buff = & rx_ring -> rx_buffer_info [nta ];
205
-
206
- /* update, and store next to alloc */
207
- nta ++ ;
208
- rx_ring -> next_to_alloc = (nta < rx_ring -> count ) ? nta : 0 ;
209
-
210
- /* transfer page from old buffer to new buffer */
211
- new_buff -> page = old_buff -> page ;
212
- new_buff -> page_dma = old_buff -> page_dma ;
213
- new_buff -> page_offset = old_buff -> page_offset ;
214
- new_buff -> pagecnt_bias = old_buff -> pagecnt_bias ;
215
- }
216
-
217
163
static void wx_dma_sync_frag (struct wx_ring * rx_ring ,
218
164
struct wx_rx_buffer * rx_buffer )
219
165
{
@@ -270,8 +216,6 @@ static struct wx_rx_buffer *wx_get_rx_buffer(struct wx_ring *rx_ring,
270
216
size ,
271
217
DMA_FROM_DEVICE );
272
218
skip_sync :
273
- rx_buffer -> pagecnt_bias -- ;
274
-
275
219
return rx_buffer ;
276
220
}
277
221
@@ -280,19 +224,9 @@ static void wx_put_rx_buffer(struct wx_ring *rx_ring,
280
224
struct sk_buff * skb ,
281
225
int rx_buffer_pgcnt )
282
226
{
283
- if (wx_can_reuse_rx_page (rx_buffer , rx_buffer_pgcnt )) {
284
- /* hand second half of page back to the ring */
285
- wx_reuse_rx_page (rx_ring , rx_buffer );
286
- } else {
287
- if (!IS_ERR (skb ) && WX_CB (skb )-> dma == rx_buffer -> dma )
288
- /* the page has been released from the ring */
289
- WX_CB (skb )-> page_released = true;
290
- else
291
- page_pool_put_full_page (rx_ring -> page_pool , rx_buffer -> page , false);
292
-
293
- __page_frag_cache_drain (rx_buffer -> page ,
294
- rx_buffer -> pagecnt_bias );
295
- }
227
+ if (!IS_ERR (skb ) && WX_CB (skb )-> dma == rx_buffer -> dma )
228
+ /* the page has been released from the ring */
229
+ WX_CB (skb )-> page_released = true;
296
230
297
231
/* clear contents of rx_buffer */
298
232
rx_buffer -> page = NULL ;
@@ -335,11 +269,12 @@ static struct sk_buff *wx_build_skb(struct wx_ring *rx_ring,
335
269
if (size <= WX_RXBUFFER_256 ) {
336
270
memcpy (__skb_put (skb , size ), page_addr ,
337
271
ALIGN (size , sizeof (long )));
338
- rx_buffer -> pagecnt_bias ++ ;
339
-
272
+ page_pool_put_full_page (rx_ring -> page_pool , rx_buffer -> page , true);
340
273
return skb ;
341
274
}
342
275
276
+ skb_mark_for_recycle (skb );
277
+
343
278
if (!wx_test_staterr (rx_desc , WX_RXD_STAT_EOP ))
344
279
WX_CB (skb )-> dma = rx_buffer -> dma ;
345
280
@@ -382,8 +317,6 @@ static bool wx_alloc_mapped_page(struct wx_ring *rx_ring,
382
317
bi -> page_dma = dma ;
383
318
bi -> page = page ;
384
319
bi -> page_offset = 0 ;
385
- page_ref_add (page , USHRT_MAX - 1 );
386
- bi -> pagecnt_bias = USHRT_MAX ;
387
320
388
321
return true;
389
322
}
@@ -723,7 +656,6 @@ static int wx_clean_rx_irq(struct wx_q_vector *q_vector,
723
656
/* exit if we failed to retrieve a buffer */
724
657
if (!skb ) {
725
658
rx_ring -> rx_stats .alloc_rx_buff_failed ++ ;
726
- rx_buffer -> pagecnt_bias ++ ;
727
659
break ;
728
660
}
729
661
@@ -2248,8 +2180,6 @@ static void wx_clean_rx_ring(struct wx_ring *rx_ring)
2248
2180
2249
2181
/* free resources associated with mapping */
2250
2182
page_pool_put_full_page (rx_ring -> page_pool , rx_buffer -> page , false);
2251
- __page_frag_cache_drain (rx_buffer -> page ,
2252
- rx_buffer -> pagecnt_bias );
2253
2183
2254
2184
i ++ ;
2255
2185
rx_buffer ++ ;
0 commit comments