Skip to content

Commit 738b54b

Browse files
duanqiangwendavem330
authored andcommitted
net: libwx: fix memory leak on free page
ifconfig ethx up, will set page->refcount larger than 1, and then ifconfig ethx down, calling __page_frag_cache_drain() to free pages, it is not compatible with page pool. So deleting codes which changing page->refcount. Fixes: 3c47e8a ("net: libwx: Support to receive packets in NAPI") Signed-off-by: duanqiangwen <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 0225191 commit 738b54b

File tree

2 files changed

+6
-77
lines changed

2 files changed

+6
-77
lines changed

drivers/net/ethernet/wangxun/libwx/wx_lib.c

Lines changed: 6 additions & 76 deletions
Original file line numberDiff line numberDiff line change
@@ -160,60 +160,6 @@ static __le32 wx_test_staterr(union wx_rx_desc *rx_desc,
160160
return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
161161
}
162162

163-
static bool wx_can_reuse_rx_page(struct wx_rx_buffer *rx_buffer,
164-
int rx_buffer_pgcnt)
165-
{
166-
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
167-
struct page *page = rx_buffer->page;
168-
169-
/* avoid re-using remote and pfmemalloc pages */
170-
if (!dev_page_is_reusable(page))
171-
return false;
172-
173-
#if (PAGE_SIZE < 8192)
174-
/* if we are only owner of page we can reuse it */
175-
if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
176-
return false;
177-
#endif
178-
179-
/* If we have drained the page fragment pool we need to update
180-
* the pagecnt_bias and page count so that we fully restock the
181-
* number of references the driver holds.
182-
*/
183-
if (unlikely(pagecnt_bias == 1)) {
184-
page_ref_add(page, USHRT_MAX - 1);
185-
rx_buffer->pagecnt_bias = USHRT_MAX;
186-
}
187-
188-
return true;
189-
}
190-
191-
/**
192-
* wx_reuse_rx_page - page flip buffer and store it back on the ring
193-
* @rx_ring: rx descriptor ring to store buffers on
194-
* @old_buff: donor buffer to have page reused
195-
*
196-
* Synchronizes page for reuse by the adapter
197-
**/
198-
static void wx_reuse_rx_page(struct wx_ring *rx_ring,
199-
struct wx_rx_buffer *old_buff)
200-
{
201-
u16 nta = rx_ring->next_to_alloc;
202-
struct wx_rx_buffer *new_buff;
203-
204-
new_buff = &rx_ring->rx_buffer_info[nta];
205-
206-
/* update, and store next to alloc */
207-
nta++;
208-
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
209-
210-
/* transfer page from old buffer to new buffer */
211-
new_buff->page = old_buff->page;
212-
new_buff->page_dma = old_buff->page_dma;
213-
new_buff->page_offset = old_buff->page_offset;
214-
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
215-
}
216-
217163
static void wx_dma_sync_frag(struct wx_ring *rx_ring,
218164
struct wx_rx_buffer *rx_buffer)
219165
{
@@ -270,8 +216,6 @@ static struct wx_rx_buffer *wx_get_rx_buffer(struct wx_ring *rx_ring,
270216
size,
271217
DMA_FROM_DEVICE);
272218
skip_sync:
273-
rx_buffer->pagecnt_bias--;
274-
275219
return rx_buffer;
276220
}
277221

@@ -280,19 +224,9 @@ static void wx_put_rx_buffer(struct wx_ring *rx_ring,
280224
struct sk_buff *skb,
281225
int rx_buffer_pgcnt)
282226
{
283-
if (wx_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
284-
/* hand second half of page back to the ring */
285-
wx_reuse_rx_page(rx_ring, rx_buffer);
286-
} else {
287-
if (!IS_ERR(skb) && WX_CB(skb)->dma == rx_buffer->dma)
288-
/* the page has been released from the ring */
289-
WX_CB(skb)->page_released = true;
290-
else
291-
page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false);
292-
293-
__page_frag_cache_drain(rx_buffer->page,
294-
rx_buffer->pagecnt_bias);
295-
}
227+
if (!IS_ERR(skb) && WX_CB(skb)->dma == rx_buffer->dma)
228+
/* the page has been released from the ring */
229+
WX_CB(skb)->page_released = true;
296230

297231
/* clear contents of rx_buffer */
298232
rx_buffer->page = NULL;
@@ -335,11 +269,12 @@ static struct sk_buff *wx_build_skb(struct wx_ring *rx_ring,
335269
if (size <= WX_RXBUFFER_256) {
336270
memcpy(__skb_put(skb, size), page_addr,
337271
ALIGN(size, sizeof(long)));
338-
rx_buffer->pagecnt_bias++;
339-
272+
page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, true);
340273
return skb;
341274
}
342275

276+
skb_mark_for_recycle(skb);
277+
343278
if (!wx_test_staterr(rx_desc, WX_RXD_STAT_EOP))
344279
WX_CB(skb)->dma = rx_buffer->dma;
345280

@@ -382,8 +317,6 @@ static bool wx_alloc_mapped_page(struct wx_ring *rx_ring,
382317
bi->page_dma = dma;
383318
bi->page = page;
384319
bi->page_offset = 0;
385-
page_ref_add(page, USHRT_MAX - 1);
386-
bi->pagecnt_bias = USHRT_MAX;
387320

388321
return true;
389322
}
@@ -723,7 +656,6 @@ static int wx_clean_rx_irq(struct wx_q_vector *q_vector,
723656
/* exit if we failed to retrieve a buffer */
724657
if (!skb) {
725658
rx_ring->rx_stats.alloc_rx_buff_failed++;
726-
rx_buffer->pagecnt_bias++;
727659
break;
728660
}
729661

@@ -2248,8 +2180,6 @@ static void wx_clean_rx_ring(struct wx_ring *rx_ring)
22482180

22492181
/* free resources associated with mapping */
22502182
page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false);
2251-
__page_frag_cache_drain(rx_buffer->page,
2252-
rx_buffer->pagecnt_bias);
22532183

22542184
i++;
22552185
rx_buffer++;

drivers/net/ethernet/wangxun/libwx/wx_type.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -787,7 +787,6 @@ struct wx_rx_buffer {
787787
dma_addr_t page_dma;
788788
struct page *page;
789789
unsigned int page_offset;
790-
u16 pagecnt_bias;
791790
};
792791

793792
struct wx_queue_stats {

0 commit comments

Comments
 (0)