Skip to content

Commit c89cca3

Browse files
kuba-moodavem330
authored andcommitted
net: skbuff: sprinkle more __GFP_NOWARN on ingress allocs
build_skb() and frag allocations done with GFP_ATOMIC will fail in real life, when system is under memory pressure, and there's nothing we can do about that. So no point printing warnings. Signed-off-by: Jakub Kicinski <[email protected]> Reviewed-by: Eric Dumazet <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 3608d6a commit c89cca3

File tree

1 file changed

+9
-6
lines changed

1 file changed

+9
-6
lines changed

net/core/skbuff.c

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -314,8 +314,8 @@ void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
314314
fragsz = SKB_DATA_ALIGN(fragsz);
315315

316316
local_lock_nested_bh(&napi_alloc_cache.bh_lock);
317-
data = __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC,
318-
align_mask);
317+
data = __page_frag_alloc_align(&nc->page, fragsz,
318+
GFP_ATOMIC | __GFP_NOWARN, align_mask);
319319
local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
320320
return data;
321321

@@ -330,7 +330,8 @@ void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
330330
struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache);
331331

332332
fragsz = SKB_DATA_ALIGN(fragsz);
333-
data = __page_frag_alloc_align(nc, fragsz, GFP_ATOMIC,
333+
data = __page_frag_alloc_align(nc, fragsz,
334+
GFP_ATOMIC | __GFP_NOWARN,
334335
align_mask);
335336
} else {
336337
local_bh_disable();
@@ -349,7 +350,7 @@ static struct sk_buff *napi_skb_cache_get(void)
349350
local_lock_nested_bh(&napi_alloc_cache.bh_lock);
350351
if (unlikely(!nc->skb_count)) {
351352
nc->skb_count = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache,
352-
GFP_ATOMIC,
353+
GFP_ATOMIC | __GFP_NOWARN,
353354
NAPI_SKB_CACHE_BULK,
354355
nc->skb_cache);
355356
if (unlikely(!nc->skb_count)) {
@@ -418,7 +419,8 @@ struct sk_buff *slab_build_skb(void *data)
418419
struct sk_buff *skb;
419420
unsigned int size;
420421

421-
skb = kmem_cache_alloc(net_hotdata.skbuff_cache, GFP_ATOMIC);
422+
skb = kmem_cache_alloc(net_hotdata.skbuff_cache,
423+
GFP_ATOMIC | __GFP_NOWARN);
422424
if (unlikely(!skb))
423425
return NULL;
424426

@@ -469,7 +471,8 @@ struct sk_buff *__build_skb(void *data, unsigned int frag_size)
469471
{
470472
struct sk_buff *skb;
471473

472-
skb = kmem_cache_alloc(net_hotdata.skbuff_cache, GFP_ATOMIC);
474+
skb = kmem_cache_alloc(net_hotdata.skbuff_cache,
475+
GFP_ATOMIC | __GFP_NOWARN);
473476
if (unlikely(!skb))
474477
return NULL;
475478

0 commit comments

Comments
 (0)