|
29 | 29 | static LIST_HEAD(mirred_list); |
30 | 30 | static DEFINE_SPINLOCK(mirred_list_lock); |
31 | 31 |
|
32 | | -#define MIRRED_NEST_LIMIT 4 |
33 | | - |
34 | | -#ifndef CONFIG_PREEMPT_RT |
35 | | -static u8 tcf_mirred_nest_level_inc_return(void) |
36 | | -{ |
37 | | - return __this_cpu_inc_return(softnet_data.xmit.sched_mirred_nest); |
38 | | -} |
39 | | - |
40 | | -static void tcf_mirred_nest_level_dec(void) |
41 | | -{ |
42 | | - __this_cpu_dec(softnet_data.xmit.sched_mirred_nest); |
43 | | -} |
44 | | - |
45 | | -#else |
46 | | -static u8 tcf_mirred_nest_level_inc_return(void) |
47 | | -{ |
48 | | - return current->net_xmit.sched_mirred_nest++; |
49 | | -} |
50 | | - |
51 | | -static void tcf_mirred_nest_level_dec(void) |
52 | | -{ |
53 | | - current->net_xmit.sched_mirred_nest--; |
54 | | -} |
55 | | -#endif |
56 | | - |
57 | 32 | static bool tcf_mirred_is_act_redirect(int action) |
58 | 33 | { |
59 | 34 | return action == TCA_EGRESS_REDIR || action == TCA_INGRESS_REDIR; |
@@ -439,44 +414,53 @@ TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb, |
439 | 414 | { |
440 | 415 | struct tcf_mirred *m = to_mirred(a); |
441 | 416 | int retval = READ_ONCE(m->tcf_action); |
442 | | - unsigned int nest_level; |
| 417 | + struct netdev_xmit *xmit; |
443 | 418 | bool m_mac_header_xmit; |
444 | 419 | struct net_device *dev; |
445 | | - int m_eaction; |
| 420 | + int i, m_eaction; |
446 | 421 | u32 blockid; |
447 | 422 |
|
448 | | - nest_level = tcf_mirred_nest_level_inc_return(); |
449 | | - if (unlikely(nest_level > MIRRED_NEST_LIMIT)) { |
| 423 | +#ifdef CONFIG_PREEMPT_RT |
| 424 | + xmit = ¤t->net_xmit; |
| 425 | +#else |
| 426 | + xmit = this_cpu_ptr(&softnet_data.xmit); |
| 427 | +#endif |
| 428 | + if (unlikely(xmit->sched_mirred_nest >= MIRRED_NEST_LIMIT)) { |
450 | 429 | net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n", |
451 | 430 | netdev_name(skb->dev)); |
452 | | - retval = TC_ACT_SHOT; |
453 | | - goto dec_nest_level; |
| 431 | + return TC_ACT_SHOT; |
454 | 432 | } |
455 | 433 |
|
456 | 434 | tcf_lastuse_update(&m->tcf_tm); |
457 | 435 | tcf_action_update_bstats(&m->common, skb); |
458 | 436 |
|
459 | 437 | blockid = READ_ONCE(m->tcfm_blockid); |
460 | | - if (blockid) { |
461 | | - retval = tcf_blockcast(skb, m, blockid, res, retval); |
462 | | - goto dec_nest_level; |
463 | | - } |
| 438 | + if (blockid) |
| 439 | + return tcf_blockcast(skb, m, blockid, res, retval); |
464 | 440 |
|
465 | 441 | dev = rcu_dereference_bh(m->tcfm_dev); |
466 | 442 | if (unlikely(!dev)) { |
467 | 443 | pr_notice_once("tc mirred: target device is gone\n"); |
468 | 444 | tcf_action_inc_overlimit_qstats(&m->common); |
469 | | - goto dec_nest_level; |
| 445 | + return retval; |
470 | 446 | } |
| 447 | + for (i = 0; i < xmit->sched_mirred_nest; i++) { |
| 448 | + if (xmit->sched_mirred_dev[i] != dev) |
| 449 | + continue; |
| 450 | + pr_notice_once("tc mirred: loop on device %s\n", |
| 451 | + netdev_name(dev)); |
| 452 | + tcf_action_inc_overlimit_qstats(&m->common); |
| 453 | + return retval; |
| 454 | + } |
| 455 | + |
| 456 | + xmit->sched_mirred_dev[xmit->sched_mirred_nest++] = dev; |
471 | 457 |
|
472 | 458 | m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit); |
473 | 459 | m_eaction = READ_ONCE(m->tcfm_eaction); |
474 | 460 |
|
475 | 461 | retval = tcf_mirred_to_dev(skb, m, dev, m_mac_header_xmit, m_eaction, |
476 | 462 | retval); |
477 | | - |
478 | | -dec_nest_level: |
479 | | - tcf_mirred_nest_level_dec(); |
| 463 | + xmit->sched_mirred_nest--; |
480 | 464 |
|
481 | 465 | return retval; |
482 | 466 | } |
|
0 commit comments