Skip to content

Commit 16085e4

Browse files
vbnogueiradavem330
authored andcommitted
net/sched: act_mirred: Create function tcf_mirred_to_dev and improve readability
As a preparation for adding block ID to mirred, separate the part of mirred that redirect/mirrors to a dev into a specific function so that it can be called by blockcast for each dev. Also improve readability. Eg. rename use_reinsert to dont_clone and skb2 to skb_to_send. Co-developed-by: Jamal Hadi Salim <[email protected]> Signed-off-by: Jamal Hadi Salim <[email protected]> Co-developed-by: Pedro Tammela <[email protected]> Signed-off-by: Pedro Tammela <[email protected]> Signed-off-by: Victor Nogueira <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent a7042cf commit 16085e4

File tree

1 file changed

+72
-57
lines changed

1 file changed

+72
-57
lines changed

net/sched/act_mirred.c

Lines changed: 72 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -225,110 +225,125 @@ static int tcf_mirred_forward(bool want_ingress, struct sk_buff *skb)
225225
return err;
226226
}
227227

228-
TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb,
229-
const struct tc_action *a,
230-
struct tcf_result *res)
228+
static int tcf_mirred_to_dev(struct sk_buff *skb, struct tcf_mirred *m,
229+
struct net_device *dev,
230+
const bool m_mac_header_xmit, int m_eaction,
231+
int retval)
231232
{
232-
struct tcf_mirred *m = to_mirred(a);
233-
struct sk_buff *skb2 = skb;
234-
bool m_mac_header_xmit;
235-
struct net_device *dev;
236-
unsigned int nest_level;
237-
int retval, err = 0;
238-
bool use_reinsert;
233+
struct sk_buff *skb_to_send = skb;
239234
bool want_ingress;
240235
bool is_redirect;
241236
bool expects_nh;
242237
bool at_ingress;
243-
int m_eaction;
238+
bool dont_clone;
244239
int mac_len;
245240
bool at_nh;
241+
int err;
246242

247-
nest_level = __this_cpu_inc_return(mirred_nest_level);
248-
if (unlikely(nest_level > MIRRED_NEST_LIMIT)) {
249-
net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n",
250-
netdev_name(skb->dev));
251-
__this_cpu_dec(mirred_nest_level);
252-
return TC_ACT_SHOT;
253-
}
254-
255-
tcf_lastuse_update(&m->tcf_tm);
256-
tcf_action_update_bstats(&m->common, skb);
257-
258-
m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit);
259-
m_eaction = READ_ONCE(m->tcfm_eaction);
260-
retval = READ_ONCE(m->tcf_action);
261-
dev = rcu_dereference_bh(m->tcfm_dev);
262-
if (unlikely(!dev)) {
263-
pr_notice_once("tc mirred: target device is gone\n");
264-
goto out;
265-
}
266-
243+
is_redirect = tcf_mirred_is_act_redirect(m_eaction);
267244
if (unlikely(!(dev->flags & IFF_UP)) || !netif_carrier_ok(dev)) {
268245
net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
269246
dev->name);
247+
err = -ENODEV;
270248
goto out;
271249
}
272250

273251
/* we could easily avoid the clone only if called by ingress and clsact;
274252
* since we can't easily detect the clsact caller, skip clone only for
275253
* ingress - that covers the TC S/W datapath.
276254
*/
277-
is_redirect = tcf_mirred_is_act_redirect(m_eaction);
278255
at_ingress = skb_at_tc_ingress(skb);
279-
use_reinsert = at_ingress && is_redirect &&
280-
tcf_mirred_can_reinsert(retval);
281-
if (!use_reinsert) {
282-
skb2 = skb_clone(skb, GFP_ATOMIC);
283-
if (!skb2)
256+
dont_clone = skb_at_tc_ingress(skb) && is_redirect &&
257+
tcf_mirred_can_reinsert(retval);
258+
if (!dont_clone) {
259+
skb_to_send = skb_clone(skb, GFP_ATOMIC);
260+
if (!skb_to_send) {
261+
err = -ENOMEM;
284262
goto out;
263+
}
285264
}
286265

287266
want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
288267

289268
/* All mirred/redirected skbs should clear previous ct info */
290-
nf_reset_ct(skb2);
269+
nf_reset_ct(skb_to_send);
291270
if (want_ingress && !at_ingress) /* drop dst for egress -> ingress */
292-
skb_dst_drop(skb2);
271+
skb_dst_drop(skb_to_send);
293272

294273
expects_nh = want_ingress || !m_mac_header_xmit;
295274
at_nh = skb->data == skb_network_header(skb);
296275
if (at_nh != expects_nh) {
297-
mac_len = skb_at_tc_ingress(skb) ? skb->mac_len :
276+
mac_len = at_ingress ? skb->mac_len :
298277
skb_network_offset(skb);
299278
if (expects_nh) {
300279
/* target device/action expect data at nh */
301-
skb_pull_rcsum(skb2, mac_len);
280+
skb_pull_rcsum(skb_to_send, mac_len);
302281
} else {
303282
/* target device/action expect data at mac */
304-
skb_push_rcsum(skb2, mac_len);
283+
skb_push_rcsum(skb_to_send, mac_len);
305284
}
306285
}
307286

308-
skb2->skb_iif = skb->dev->ifindex;
309-
skb2->dev = dev;
287+
skb_to_send->skb_iif = skb->dev->ifindex;
288+
skb_to_send->dev = dev;
310289

311-
/* mirror is always swallowed */
312290
if (is_redirect) {
313-
skb_set_redirected(skb2, skb2->tc_at_ingress);
314-
315-
/* let's the caller reinsert the packet, if possible */
316-
if (use_reinsert) {
317-
err = tcf_mirred_forward(want_ingress, skb);
318-
if (err)
319-
tcf_action_inc_overlimit_qstats(&m->common);
320-
__this_cpu_dec(mirred_nest_level);
321-
return TC_ACT_CONSUMED;
322-
}
291+
if (skb == skb_to_send)
292+
retval = TC_ACT_CONSUMED;
293+
294+
skb_set_redirected(skb_to_send, skb_to_send->tc_at_ingress);
295+
296+
err = tcf_mirred_forward(want_ingress, skb_to_send);
297+
} else {
298+
err = tcf_mirred_forward(want_ingress, skb_to_send);
323299
}
324300

325-
err = tcf_mirred_forward(want_ingress, skb2);
326301
if (err) {
327302
out:
328303
tcf_action_inc_overlimit_qstats(&m->common);
329-
if (tcf_mirred_is_act_redirect(m_eaction))
304+
if (is_redirect)
330305
retval = TC_ACT_SHOT;
331306
}
307+
308+
return retval;
309+
}
310+
311+
TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb,
312+
const struct tc_action *a,
313+
struct tcf_result *res)
314+
{
315+
struct tcf_mirred *m = to_mirred(a);
316+
int retval = READ_ONCE(m->tcf_action);
317+
unsigned int nest_level;
318+
bool m_mac_header_xmit;
319+
struct net_device *dev;
320+
int m_eaction;
321+
322+
nest_level = __this_cpu_inc_return(mirred_nest_level);
323+
if (unlikely(nest_level > MIRRED_NEST_LIMIT)) {
324+
net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n",
325+
netdev_name(skb->dev));
326+
retval = TC_ACT_SHOT;
327+
goto dec_nest_level;
328+
}
329+
330+
tcf_lastuse_update(&m->tcf_tm);
331+
tcf_action_update_bstats(&m->common, skb);
332+
333+
dev = rcu_dereference_bh(m->tcfm_dev);
334+
if (unlikely(!dev)) {
335+
pr_notice_once("tc mirred: target device is gone\n");
336+
tcf_action_inc_overlimit_qstats(&m->common);
337+
goto dec_nest_level;
338+
}
339+
340+
m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit);
341+
m_eaction = READ_ONCE(m->tcfm_eaction);
342+
343+
retval = tcf_mirred_to_dev(skb, m, dev, m_mac_header_xmit, m_eaction,
344+
retval);
345+
346+
dec_nest_level:
332347
__this_cpu_dec(mirred_nest_level);
333348

334349
return retval;

0 commit comments

Comments
 (0)