@@ -85,6 +85,7 @@ static void tcf_mirred_release(struct tc_action *a)
85
85
86
86
static const struct nla_policy mirred_policy [TCA_MIRRED_MAX + 1 ] = {
87
87
[TCA_MIRRED_PARMS ] = { .len = sizeof (struct tc_mirred ) },
88
+ [TCA_MIRRED_BLOCKID ] = NLA_POLICY_MIN (NLA_U32 , 1 ),
88
89
};
89
90
90
91
static struct tc_action_ops act_mirred_ops ;
@@ -136,6 +137,17 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
136
137
if (exists && bind )
137
138
return 0 ;
138
139
140
+ if (tb [TCA_MIRRED_BLOCKID ] && parm -> ifindex ) {
141
+ NL_SET_ERR_MSG_MOD (extack ,
142
+ "Cannot specify Block ID and dev simultaneously" );
143
+ if (exists )
144
+ tcf_idr_release (* a , bind );
145
+ else
146
+ tcf_idr_cleanup (tn , index );
147
+
148
+ return - EINVAL ;
149
+ }
150
+
139
151
switch (parm -> eaction ) {
140
152
case TCA_EGRESS_MIRROR :
141
153
case TCA_EGRESS_REDIR :
@@ -152,9 +164,10 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
152
164
}
153
165
154
166
if (!exists ) {
155
- if (!parm -> ifindex ) {
167
+ if (!parm -> ifindex && ! tb [ TCA_MIRRED_BLOCKID ] ) {
156
168
tcf_idr_cleanup (tn , index );
157
- NL_SET_ERR_MSG_MOD (extack , "Specified device does not exist" );
169
+ NL_SET_ERR_MSG_MOD (extack ,
170
+ "Must specify device or block" );
158
171
return - EINVAL ;
159
172
}
160
173
ret = tcf_idr_create_from_flags (tn , index , est , a ,
@@ -192,6 +205,11 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
192
205
tcf_mirred_replace_dev (m , ndev );
193
206
netdev_tracker_alloc (ndev , & m -> tcfm_dev_tracker , GFP_ATOMIC );
194
207
m -> tcfm_mac_header_xmit = mac_header_xmit ;
208
+ m -> tcfm_blockid = 0 ;
209
+ } else if (tb [TCA_MIRRED_BLOCKID ]) {
210
+ tcf_mirred_replace_dev (m , NULL );
211
+ m -> tcfm_mac_header_xmit = false;
212
+ m -> tcfm_blockid = nla_get_u32 (tb [TCA_MIRRED_BLOCKID ]);
195
213
}
196
214
goto_ch = tcf_action_set_ctrlact (* a , parm -> action , goto_ch );
197
215
m -> tcfm_eaction = parm -> eaction ;
@@ -316,6 +334,89 @@ static int tcf_mirred_to_dev(struct sk_buff *skb, struct tcf_mirred *m,
316
334
return retval ;
317
335
}
318
336
337
+ static int tcf_blockcast_redir (struct sk_buff * skb , struct tcf_mirred * m ,
338
+ struct tcf_block * block , int m_eaction ,
339
+ const u32 exception_ifindex , int retval )
340
+ {
341
+ struct net_device * dev_prev = NULL ;
342
+ struct net_device * dev = NULL ;
343
+ unsigned long index ;
344
+ int mirred_eaction ;
345
+
346
+ mirred_eaction = tcf_mirred_act_wants_ingress (m_eaction ) ?
347
+ TCA_INGRESS_MIRROR : TCA_EGRESS_MIRROR ;
348
+
349
+ xa_for_each (& block -> ports , index , dev ) {
350
+ if (index == exception_ifindex )
351
+ continue ;
352
+
353
+ if (!dev_prev )
354
+ goto assign_prev ;
355
+
356
+ tcf_mirred_to_dev (skb , m , dev_prev ,
357
+ dev_is_mac_header_xmit (dev ),
358
+ mirred_eaction , retval );
359
+ assign_prev :
360
+ dev_prev = dev ;
361
+ }
362
+
363
+ if (dev_prev )
364
+ return tcf_mirred_to_dev (skb , m , dev_prev ,
365
+ dev_is_mac_header_xmit (dev_prev ),
366
+ m_eaction , retval );
367
+
368
+ return retval ;
369
+ }
370
+
371
+ static int tcf_blockcast_mirror (struct sk_buff * skb , struct tcf_mirred * m ,
372
+ struct tcf_block * block , int m_eaction ,
373
+ const u32 exception_ifindex , int retval )
374
+ {
375
+ struct net_device * dev = NULL ;
376
+ unsigned long index ;
377
+
378
+ xa_for_each (& block -> ports , index , dev ) {
379
+ if (index == exception_ifindex )
380
+ continue ;
381
+
382
+ tcf_mirred_to_dev (skb , m , dev ,
383
+ dev_is_mac_header_xmit (dev ),
384
+ m_eaction , retval );
385
+ }
386
+
387
+ return retval ;
388
+ }
389
+
390
+ static int tcf_blockcast (struct sk_buff * skb , struct tcf_mirred * m ,
391
+ const u32 blockid , struct tcf_result * res ,
392
+ int retval )
393
+ {
394
+ const u32 exception_ifindex = skb -> dev -> ifindex ;
395
+ struct tcf_block * block ;
396
+ bool is_redirect ;
397
+ int m_eaction ;
398
+
399
+ m_eaction = READ_ONCE (m -> tcfm_eaction );
400
+ is_redirect = tcf_mirred_is_act_redirect (m_eaction );
401
+
402
+ /* we are already under rcu protection, so can call block lookup
403
+ * directly.
404
+ */
405
+ block = tcf_block_lookup (dev_net (skb -> dev ), blockid );
406
+ if (!block || xa_empty (& block -> ports )) {
407
+ tcf_action_inc_overlimit_qstats (& m -> common );
408
+ return retval ;
409
+ }
410
+
411
+ if (is_redirect )
412
+ return tcf_blockcast_redir (skb , m , block , m_eaction ,
413
+ exception_ifindex , retval );
414
+
415
+ /* If it's not redirect, it is mirror */
416
+ return tcf_blockcast_mirror (skb , m , block , m_eaction , exception_ifindex ,
417
+ retval );
418
+ }
419
+
319
420
TC_INDIRECT_SCOPE int tcf_mirred_act (struct sk_buff * skb ,
320
421
const struct tc_action * a ,
321
422
struct tcf_result * res )
@@ -326,6 +427,7 @@ TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb,
326
427
bool m_mac_header_xmit ;
327
428
struct net_device * dev ;
328
429
int m_eaction ;
430
+ u32 blockid ;
329
431
330
432
nest_level = __this_cpu_inc_return (mirred_nest_level );
331
433
if (unlikely (nest_level > MIRRED_NEST_LIMIT )) {
@@ -338,6 +440,12 @@ TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb,
338
440
tcf_lastuse_update (& m -> tcf_tm );
339
441
tcf_action_update_bstats (& m -> common , skb );
340
442
443
+ blockid = READ_ONCE (m -> tcfm_blockid );
444
+ if (blockid ) {
445
+ retval = tcf_blockcast (skb , m , blockid , res , retval );
446
+ goto dec_nest_level ;
447
+ }
448
+
341
449
dev = rcu_dereference_bh (m -> tcfm_dev );
342
450
if (unlikely (!dev )) {
343
451
pr_notice_once ("tc mirred: target device is gone\n" );
@@ -379,6 +487,7 @@ static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
379
487
};
380
488
struct net_device * dev ;
381
489
struct tcf_t t ;
490
+ u32 blockid ;
382
491
383
492
spin_lock_bh (& m -> tcf_lock );
384
493
opt .action = m -> tcf_action ;
@@ -390,6 +499,10 @@ static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
390
499
if (nla_put (skb , TCA_MIRRED_PARMS , sizeof (opt ), & opt ))
391
500
goto nla_put_failure ;
392
501
502
+ blockid = m -> tcfm_blockid ;
503
+ if (blockid && nla_put_u32 (skb , TCA_MIRRED_BLOCKID , blockid ))
504
+ goto nla_put_failure ;
505
+
393
506
tcf_tm_dump (& t , & m -> tcf_tm );
394
507
if (nla_put_64bit (skb , TCA_MIRRED_TM , sizeof (t ), & t , TCA_MIRRED_PAD ))
395
508
goto nla_put_failure ;
@@ -420,6 +533,8 @@ static int mirred_device_event(struct notifier_block *unused,
420
533
* net_device are already rcu protected.
421
534
*/
422
535
RCU_INIT_POINTER (m -> tcfm_dev , NULL );
536
+ } else if (m -> tcfm_blockid ) {
537
+ m -> tcfm_blockid = 0 ;
423
538
}
424
539
spin_unlock_bh (& m -> tcf_lock );
425
540
}
0 commit comments