Skip to content

Commit 08a0063

Browse files
Paul Blakeykuba-moo
authored andcommitted
net/sched: flower: Move filter handle initialization earlier
To support miss to action during hardware offload the filter's handle is needed when setting up the actions (tcf_exts_init()), and before offloading. Move filter handle initialization earlier. Signed-off-by: Paul Blakey <[email protected]> Reviewed-by: Jiri Pirko <[email protected]> Reviewed-by: Simon Horman <[email protected]> Reviewed-by: Marcelo Ricardo Leitner <[email protected]> Signed-off-by: Jakub Kicinski <[email protected]>
1 parent 80cd22c commit 08a0063

File tree

1 file changed

+35
-27
lines changed

1 file changed

+35
-27
lines changed

net/sched/cls_flower.c

Lines changed: 35 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -2187,10 +2187,6 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
21872187
INIT_LIST_HEAD(&fnew->hw_list);
21882188
refcount_set(&fnew->refcnt, 1);
21892189

2190-
err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
2191-
if (err < 0)
2192-
goto errout;
2193-
21942190
if (tb[TCA_FLOWER_FLAGS]) {
21952191
fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
21962192

@@ -2200,15 +2196,45 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
22002196
}
22012197
}
22022198

2199+
if (!fold) {
2200+
spin_lock(&tp->lock);
2201+
if (!handle) {
2202+
handle = 1;
2203+
err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2204+
INT_MAX, GFP_ATOMIC);
2205+
} else {
2206+
err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2207+
handle, GFP_ATOMIC);
2208+
2209+
/* Filter with specified handle was concurrently
2210+
* inserted after initial check in cls_api. This is not
2211+
* necessarily an error if NLM_F_EXCL is not set in
2212+
* message flags. Returning EAGAIN will cause cls_api to
2213+
* try to update concurrently inserted rule.
2214+
*/
2215+
if (err == -ENOSPC)
2216+
err = -EAGAIN;
2217+
}
2218+
spin_unlock(&tp->lock);
2219+
2220+
if (err)
2221+
goto errout;
2222+
}
2223+
fnew->handle = handle;
2224+
2225+
err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
2226+
if (err < 0)
2227+
goto errout_idr;
2228+
22032229
err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE],
22042230
tp->chain->tmplt_priv, flags, fnew->flags,
22052231
extack);
22062232
if (err)
2207-
goto errout;
2233+
goto errout_idr;
22082234

22092235
err = fl_check_assign_mask(head, fnew, fold, mask);
22102236
if (err)
2211-
goto errout;
2237+
goto errout_idr;
22122238

22132239
err = fl_ht_insert_unique(fnew, fold, &in_ht);
22142240
if (err)
@@ -2274,29 +2300,9 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
22742300
refcount_dec(&fold->refcnt);
22752301
__fl_put(fold);
22762302
} else {
2277-
if (handle) {
2278-
/* user specifies a handle and it doesn't exist */
2279-
err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2280-
handle, GFP_ATOMIC);
2281-
2282-
/* Filter with specified handle was concurrently
2283-
* inserted after initial check in cls_api. This is not
2284-
* necessarily an error if NLM_F_EXCL is not set in
2285-
* message flags. Returning EAGAIN will cause cls_api to
2286-
* try to update concurrently inserted rule.
2287-
*/
2288-
if (err == -ENOSPC)
2289-
err = -EAGAIN;
2290-
} else {
2291-
handle = 1;
2292-
err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2293-
INT_MAX, GFP_ATOMIC);
2294-
}
2295-
if (err)
2296-
goto errout_hw;
2303+
idr_replace(&head->handle_idr, fnew, fnew->handle);
22972304

22982305
refcount_inc(&fnew->refcnt);
2299-
fnew->handle = handle;
23002306
list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
23012307
spin_unlock(&tp->lock);
23022308
}
@@ -2319,6 +2325,8 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
23192325
fnew->mask->filter_ht_params);
23202326
errout_mask:
23212327
fl_mask_put(head, fnew->mask);
2328+
errout_idr:
2329+
idr_remove(&head->handle_idr, fnew->handle);
23222330
errout:
23232331
__fl_put(fnew);
23242332
errout_tb:

0 commit comments

Comments
 (0)