Skip to content

Commit 5ad37f2

Browse files
committed
tgupdate: merge t/DO-NOT-MERGE-mptcp-enabled-by-default base into t/DO-NOT-MERGE-mptcp-enabled-by-default
2 parents 10482fb + 4e14423 commit 5ad37f2

File tree

4 files changed

+35
-110
lines changed

4 files changed

+35
-110
lines changed

include/linux/netdevice.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4116,7 +4116,6 @@ void netif_receive_skb_list(struct list_head *head);
41164116
gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
41174117
void napi_gro_flush(struct napi_struct *napi, bool flush_old);
41184118
struct sk_buff *napi_get_frags(struct napi_struct *napi);
4119-
void napi_get_frags_check(struct napi_struct *napi);
41204119
gro_result_t napi_gro_frags(struct napi_struct *napi);
41214120

41224121
static inline void napi_free_frags(struct napi_struct *napi)

net/core/dev.c

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6925,6 +6925,23 @@ netif_napi_dev_list_add(struct net_device *dev, struct napi_struct *napi)
69256925
list_add_rcu(&napi->dev_list, higher); /* adds after higher */
69266926
}
69276927

6928+
/* Double check that napi_get_frags() allocates skbs with
6929+
* skb->head being backed by slab, not a page fragment.
6930+
* This is to make sure bug fixed in 3226b158e67c
6931+
* ("net: avoid 32 x truesize under-estimation for tiny skbs")
6932+
* does not accidentally come back.
6933+
*/
6934+
static void napi_get_frags_check(struct napi_struct *napi)
6935+
{
6936+
struct sk_buff *skb;
6937+
6938+
local_bh_disable();
6939+
skb = napi_get_frags(napi);
6940+
WARN_ON_ONCE(skb && skb->head_frag);
6941+
napi_free_frags(napi);
6942+
local_bh_enable();
6943+
}
6944+
69286945
void netif_napi_add_weight_locked(struct net_device *dev,
69296946
struct napi_struct *napi,
69306947
int (*poll)(struct napi_struct *, int),

net/core/fib_rules.c

Lines changed: 13 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -37,8 +37,8 @@ static const struct fib_kuid_range fib_kuid_range_unset = {
3737

3838
bool fib_rule_matchall(const struct fib_rule *rule)
3939
{
40-
if (rule->iifindex || rule->oifindex || rule->mark || rule->tun_id ||
41-
rule->flags)
40+
if (READ_ONCE(rule->iifindex) || READ_ONCE(rule->oifindex) ||
41+
rule->mark || rule->tun_id || rule->flags)
4242
return false;
4343
if (rule->suppress_ifgroup != -1 || rule->suppress_prefixlen != -1)
4444
return false;
@@ -261,12 +261,14 @@ static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
261261
struct flowi *fl, int flags,
262262
struct fib_lookup_arg *arg)
263263
{
264-
int ret = 0;
264+
int iifindex, oifindex, ret = 0;
265265

266-
if (rule->iifindex && (rule->iifindex != fl->flowi_iif))
266+
iifindex = READ_ONCE(rule->iifindex);
267+
if (iifindex && (iifindex != fl->flowi_iif))
267268
goto out;
268269

269-
if (rule->oifindex && (rule->oifindex != fl->flowi_oif))
270+
oifindex = READ_ONCE(rule->oifindex);
271+
if (oifindex && (oifindex != fl->flowi_oif))
270272
goto out;
271273

272274
if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask)
@@ -1041,14 +1043,14 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
10411043
if (rule->iifname[0]) {
10421044
if (nla_put_string(skb, FRA_IIFNAME, rule->iifname))
10431045
goto nla_put_failure;
1044-
if (rule->iifindex == -1)
1046+
if (READ_ONCE(rule->iifindex) == -1)
10451047
frh->flags |= FIB_RULE_IIF_DETACHED;
10461048
}
10471049

10481050
if (rule->oifname[0]) {
10491051
if (nla_put_string(skb, FRA_OIFNAME, rule->oifname))
10501052
goto nla_put_failure;
1051-
if (rule->oifindex == -1)
1053+
if (READ_ONCE(rule->oifindex) == -1)
10521054
frh->flags |= FIB_RULE_OIF_DETACHED;
10531055
}
10541056

@@ -1220,10 +1222,10 @@ static void attach_rules(struct list_head *rules, struct net_device *dev)
12201222
list_for_each_entry(rule, rules, list) {
12211223
if (rule->iifindex == -1 &&
12221224
strcmp(dev->name, rule->iifname) == 0)
1223-
rule->iifindex = dev->ifindex;
1225+
WRITE_ONCE(rule->iifindex, dev->ifindex);
12241226
if (rule->oifindex == -1 &&
12251227
strcmp(dev->name, rule->oifname) == 0)
1226-
rule->oifindex = dev->ifindex;
1228+
WRITE_ONCE(rule->oifindex, dev->ifindex);
12271229
}
12281230
}
12291231

@@ -1233,9 +1235,9 @@ static void detach_rules(struct list_head *rules, struct net_device *dev)
12331235

12341236
list_for_each_entry(rule, rules, list) {
12351237
if (rule->iifindex == dev->ifindex)
1236-
rule->iifindex = -1;
1238+
WRITE_ONCE(rule->iifindex, -1);
12371239
if (rule->oifindex == dev->ifindex)
1238-
rule->oifindex = -1;
1240+
WRITE_ONCE(rule->oifindex, -1);
12391241
}
12401242
}
12411243

net/core/skbuff.c

Lines changed: 5 additions & 98 deletions
Original file line numberDiff line numberDiff line change
@@ -220,67 +220,9 @@ static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
220220
#define NAPI_SKB_CACHE_BULK 16
221221
#define NAPI_SKB_CACHE_HALF (NAPI_SKB_CACHE_SIZE / 2)
222222

223-
#if PAGE_SIZE == SZ_4K
224-
225-
#define NAPI_HAS_SMALL_PAGE_FRAG 1
226-
#define NAPI_SMALL_PAGE_PFMEMALLOC(nc) ((nc).pfmemalloc)
227-
228-
/* specialized page frag allocator using a single order 0 page
229-
* and slicing it into 1K sized fragment. Constrained to systems
230-
* with a very limited amount of 1K fragments fitting a single
231-
* page - to avoid excessive truesize underestimation
232-
*/
233-
234-
struct page_frag_1k {
235-
void *va;
236-
u16 offset;
237-
bool pfmemalloc;
238-
};
239-
240-
static void *page_frag_alloc_1k(struct page_frag_1k *nc, gfp_t gfp)
241-
{
242-
struct page *page;
243-
int offset;
244-
245-
offset = nc->offset - SZ_1K;
246-
if (likely(offset >= 0))
247-
goto use_frag;
248-
249-
page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
250-
if (!page)
251-
return NULL;
252-
253-
nc->va = page_address(page);
254-
nc->pfmemalloc = page_is_pfmemalloc(page);
255-
offset = PAGE_SIZE - SZ_1K;
256-
page_ref_add(page, offset / SZ_1K);
257-
258-
use_frag:
259-
nc->offset = offset;
260-
return nc->va + offset;
261-
}
262-
#else
263-
264-
/* the small page is actually unused in this build; add dummy helpers
265-
* to please the compiler and avoid later preprocessor's conditionals
266-
*/
267-
#define NAPI_HAS_SMALL_PAGE_FRAG 0
268-
#define NAPI_SMALL_PAGE_PFMEMALLOC(nc) false
269-
270-
struct page_frag_1k {
271-
};
272-
273-
static void *page_frag_alloc_1k(struct page_frag_1k *nc, gfp_t gfp_mask)
274-
{
275-
return NULL;
276-
}
277-
278-
#endif
279-
280223
struct napi_alloc_cache {
281224
local_lock_t bh_lock;
282225
struct page_frag_cache page;
283-
struct page_frag_1k page_small;
284226
unsigned int skb_count;
285227
void *skb_cache[NAPI_SKB_CACHE_SIZE];
286228
};
@@ -290,23 +232,6 @@ static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache) = {
290232
.bh_lock = INIT_LOCAL_LOCK(bh_lock),
291233
};
292234

293-
/* Double check that napi_get_frags() allocates skbs with
294-
* skb->head being backed by slab, not a page fragment.
295-
* This is to make sure bug fixed in 3226b158e67c
296-
* ("net: avoid 32 x truesize under-estimation for tiny skbs")
297-
* does not accidentally come back.
298-
*/
299-
void napi_get_frags_check(struct napi_struct *napi)
300-
{
301-
struct sk_buff *skb;
302-
303-
local_bh_disable();
304-
skb = napi_get_frags(napi);
305-
WARN_ON_ONCE(!NAPI_HAS_SMALL_PAGE_FRAG && skb && skb->head_frag);
306-
napi_free_frags(napi);
307-
local_bh_enable();
308-
}
309-
310235
void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
311236
{
312237
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
@@ -813,10 +738,8 @@ struct sk_buff *napi_alloc_skb(struct napi_struct *napi, unsigned int len)
813738

814739
/* If requested length is either too small or too big,
815740
* we use kmalloc() for skb->head allocation.
816-
* When the small frag allocator is available, prefer it over kmalloc
817-
* for small fragments
818741
*/
819-
if ((!NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) ||
742+
if (len <= SKB_WITH_OVERHEAD(1024) ||
820743
len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
821744
(gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
822745
skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI,
@@ -826,32 +749,16 @@ struct sk_buff *napi_alloc_skb(struct napi_struct *napi, unsigned int len)
826749
goto skb_success;
827750
}
828751

752+
len = SKB_HEAD_ALIGN(len);
753+
829754
if (sk_memalloc_socks())
830755
gfp_mask |= __GFP_MEMALLOC;
831756

832757
local_lock_nested_bh(&napi_alloc_cache.bh_lock);
833758
nc = this_cpu_ptr(&napi_alloc_cache);
834-
if (NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) {
835-
/* we are artificially inflating the allocation size, but
836-
* that is not as bad as it may look like, as:
837-
* - 'len' less than GRO_MAX_HEAD makes little sense
838-
* - On most systems, larger 'len' values lead to fragment
839-
* size above 512 bytes
840-
* - kmalloc would use the kmalloc-1k slab for such values
841-
* - Builds with smaller GRO_MAX_HEAD will very likely do
842-
* little networking, as that implies no WiFi and no
843-
* tunnels support, and 32 bits arches.
844-
*/
845-
len = SZ_1K;
846759

847-
data = page_frag_alloc_1k(&nc->page_small, gfp_mask);
848-
pfmemalloc = NAPI_SMALL_PAGE_PFMEMALLOC(nc->page_small);
849-
} else {
850-
len = SKB_HEAD_ALIGN(len);
851-
852-
data = page_frag_alloc(&nc->page, len, gfp_mask);
853-
pfmemalloc = page_frag_cache_is_pfmemalloc(&nc->page);
854-
}
760+
data = page_frag_alloc(&nc->page, len, gfp_mask);
761+
pfmemalloc = page_frag_cache_is_pfmemalloc(&nc->page);
855762
local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
856763

857764
if (unlikely(!data))

0 commit comments

Comments
 (0)