11
11
#include "ice_tc_lib.h"
12
12
13
13
/**
14
- * ice_eswitch_add_vf_sp_rule - add adv rule with VF 's VSI index
14
+ * ice_eswitch_add_sp_rule - add adv rule with device 's VSI index
15
15
* @pf: pointer to PF struct
16
- * @vf : pointer to VF struct
16
+ * @repr : pointer to the repr struct
17
17
*
18
18
* This function adds advanced rule that forwards packets with
19
- * VF 's VSI index to the corresponding eswitch ctrl VSI queue.
19
+ * device 's VSI index to the corresponding eswitch ctrl VSI queue.
20
20
*/
21
21
static int
22
- ice_eswitch_add_vf_sp_rule (struct ice_pf * pf , struct ice_vf * vf )
22
+ ice_eswitch_add_sp_rule (struct ice_pf * pf , struct ice_repr * repr )
23
23
{
24
24
struct ice_vsi * ctrl_vsi = pf -> eswitch .control_vsi ;
25
25
struct ice_adv_rule_info rule_info = { 0 };
@@ -38,35 +38,32 @@ ice_eswitch_add_vf_sp_rule(struct ice_pf *pf, struct ice_vf *vf)
38
38
rule_info .sw_act .vsi_handle = ctrl_vsi -> idx ;
39
39
rule_info .sw_act .fltr_act = ICE_FWD_TO_Q ;
40
40
rule_info .sw_act .fwd_id .q_id = hw -> func_caps .common_cap .rxq_first_id +
41
- ctrl_vsi -> rxq_map [vf -> repr -> q_id ];
41
+ ctrl_vsi -> rxq_map [repr -> q_id ];
42
42
rule_info .flags_info .act |= ICE_SINGLE_ACT_LB_ENABLE ;
43
43
rule_info .flags_info .act_valid = true;
44
44
rule_info .tun_type = ICE_SW_TUN_AND_NON_TUN ;
45
- rule_info .src_vsi = vf -> lan_vsi_idx ;
45
+ rule_info .src_vsi = repr -> src_vsi -> idx ;
46
46
47
47
err = ice_add_adv_rule (hw , list , lkups_cnt , & rule_info ,
48
- & vf -> repr -> sp_rule );
48
+ & repr -> sp_rule );
49
49
if (err )
50
- dev_err (ice_pf_to_dev (pf ), "Unable to add VF slow-path rule in switchdev mode for VF %d" ,
51
- vf -> vf_id );
50
+ dev_err (ice_pf_to_dev (pf ), "Unable to add slow-path rule in switchdev mode" );
52
51
53
52
kfree (list );
54
53
return err ;
55
54
}
56
55
57
56
/**
58
- * ice_eswitch_del_vf_sp_rule - delete adv rule with VF's VSI index
59
- * @vf: pointer to the VF struct
57
+ * ice_eswitch_del_sp_rule - delete adv rule with device's VSI index
58
+ * @pf: pointer to the PF struct
59
+ * @repr: pointer to the repr struct
60
60
*
61
- * Delete the advanced rule that was used to forward packets with the VF's VSI
62
- * index to the corresponding eswitch ctrl VSI queue.
61
+ * Delete the advanced rule that was used to forward packets with the device's
62
+ * VSI index to the corresponding eswitch ctrl VSI queue.
63
63
*/
64
- static void ice_eswitch_del_vf_sp_rule (struct ice_vf * vf )
64
+ static void ice_eswitch_del_sp_rule (struct ice_pf * pf , struct ice_repr * repr )
65
65
{
66
- if (!vf -> repr )
67
- return ;
68
-
69
- ice_rem_adv_rule_by_id (& vf -> pf -> hw , & vf -> repr -> sp_rule );
66
+ ice_rem_adv_rule_by_id (& pf -> hw , & repr -> sp_rule );
70
67
}
71
68
72
69
/**
@@ -193,26 +190,24 @@ static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
193
190
static void
194
191
ice_eswitch_release_reprs (struct ice_pf * pf )
195
192
{
196
- struct ice_vf * vf ;
197
- unsigned int bkt ;
198
-
199
- lockdep_assert_held (& pf -> vfs .table_lock );
193
+ struct ice_repr * repr ;
194
+ unsigned long id ;
200
195
201
- ice_for_each_vf ( pf , bkt , vf ) {
202
- struct ice_vsi * vsi = vf -> repr -> src_vsi ;
196
+ xa_for_each ( & pf -> eswitch . reprs , id , repr ) {
197
+ struct ice_vsi * vsi = repr -> src_vsi ;
203
198
204
- /* Skip VFs that aren't configured */
205
- if (!vf -> repr -> dst )
199
+ /* Skip representors that aren't configured */
200
+ if (!repr -> dst )
206
201
continue ;
207
202
208
203
ice_vsi_update_security (vsi , ice_vsi_ctx_set_antispoof );
209
- metadata_dst_free (vf -> repr -> dst );
210
- vf -> repr -> dst = NULL ;
211
- ice_eswitch_del_vf_sp_rule ( vf );
212
- ice_fltr_add_mac_and_broadcast (vsi , vf -> hw_lan_addr ,
204
+ metadata_dst_free (repr -> dst );
205
+ repr -> dst = NULL ;
206
+ ice_eswitch_del_sp_rule ( pf , repr );
207
+ ice_fltr_add_mac_and_broadcast (vsi , repr -> parent_mac ,
213
208
ICE_FWD_TO_VSI );
214
209
215
- netif_napi_del (& vf -> repr -> q_vector -> napi );
210
+ netif_napi_del (& repr -> q_vector -> napi );
216
211
}
217
212
}
218
213
@@ -223,56 +218,53 @@ ice_eswitch_release_reprs(struct ice_pf *pf)
223
218
static int ice_eswitch_setup_reprs (struct ice_pf * pf )
224
219
{
225
220
struct ice_vsi * ctrl_vsi = pf -> eswitch .control_vsi ;
226
- struct ice_vf * vf ;
227
- unsigned int bkt ;
228
-
229
- lockdep_assert_held (& pf -> vfs .table_lock );
221
+ struct ice_repr * repr ;
222
+ unsigned long id ;
230
223
231
- ice_for_each_vf ( pf , bkt , vf ) {
232
- struct ice_vsi * vsi = vf -> repr -> src_vsi ;
224
+ xa_for_each ( & pf -> eswitch . reprs , id , repr ) {
225
+ struct ice_vsi * vsi = repr -> src_vsi ;
233
226
234
227
ice_remove_vsi_fltr (& pf -> hw , vsi -> idx );
235
- vf -> repr -> dst = metadata_dst_alloc (0 , METADATA_HW_PORT_MUX ,
236
- GFP_KERNEL );
237
- if (!vf -> repr -> dst ) {
238
- ice_fltr_add_mac_and_broadcast (vsi , vf -> hw_lan_addr ,
228
+ repr -> dst = metadata_dst_alloc (0 , METADATA_HW_PORT_MUX ,
229
+ GFP_KERNEL );
230
+ if (!repr -> dst ) {
231
+ ice_fltr_add_mac_and_broadcast (vsi , repr -> parent_mac ,
239
232
ICE_FWD_TO_VSI );
240
233
goto err ;
241
234
}
242
235
243
- if (ice_eswitch_add_vf_sp_rule (pf , vf )) {
244
- ice_fltr_add_mac_and_broadcast (vsi , vf -> hw_lan_addr ,
236
+ if (ice_eswitch_add_sp_rule (pf , repr )) {
237
+ ice_fltr_add_mac_and_broadcast (vsi , repr -> parent_mac ,
245
238
ICE_FWD_TO_VSI );
246
239
goto err ;
247
240
}
248
241
249
242
if (ice_vsi_update_security (vsi , ice_vsi_ctx_clear_antispoof )) {
250
- ice_fltr_add_mac_and_broadcast (vsi , vf -> hw_lan_addr ,
243
+ ice_fltr_add_mac_and_broadcast (vsi , repr -> parent_mac ,
251
244
ICE_FWD_TO_VSI );
252
- ice_eswitch_del_vf_sp_rule ( vf );
253
- metadata_dst_free (vf -> repr -> dst );
254
- vf -> repr -> dst = NULL ;
245
+ ice_eswitch_del_sp_rule ( pf , repr );
246
+ metadata_dst_free (repr -> dst );
247
+ repr -> dst = NULL ;
255
248
goto err ;
256
249
}
257
250
258
251
if (ice_vsi_add_vlan_zero (vsi )) {
259
- ice_fltr_add_mac_and_broadcast (vsi , vf -> hw_lan_addr ,
252
+ ice_fltr_add_mac_and_broadcast (vsi , repr -> parent_mac ,
260
253
ICE_FWD_TO_VSI );
261
- ice_eswitch_del_vf_sp_rule ( vf );
262
- metadata_dst_free (vf -> repr -> dst );
263
- vf -> repr -> dst = NULL ;
254
+ ice_eswitch_del_sp_rule ( pf , repr );
255
+ metadata_dst_free (repr -> dst );
256
+ repr -> dst = NULL ;
264
257
ice_vsi_update_security (vsi , ice_vsi_ctx_set_antispoof );
265
258
goto err ;
266
259
}
267
260
268
- netif_napi_add (vf -> repr -> netdev , & vf -> repr -> q_vector -> napi ,
261
+ netif_napi_add (repr -> netdev , & repr -> q_vector -> napi ,
269
262
ice_napi_poll );
270
263
271
- netif_keep_dst (vf -> repr -> netdev );
264
+ netif_keep_dst (repr -> netdev );
272
265
}
273
266
274
- ice_for_each_vf (pf , bkt , vf ) {
275
- struct ice_repr * repr = vf -> repr ;
267
+ xa_for_each (& pf -> eswitch .reprs , id , repr ) {
276
268
struct ice_vsi * vsi = repr -> src_vsi ;
277
269
struct metadata_dst * dst ;
278
270
@@ -291,7 +283,7 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
291
283
}
292
284
293
285
/**
294
- * ice_eswitch_update_repr - reconfigure VF port representor
286
+ * ice_eswitch_update_repr - reconfigure port representor
295
287
* @vsi: VF VSI for which port representor is configured
296
288
*/
297
289
void ice_eswitch_update_repr (struct ice_vsi * vsi )
@@ -420,47 +412,41 @@ ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
420
412
421
413
/**
422
414
* ice_eswitch_napi_del - remove NAPI handle for all port representors
423
- * @pf: pointer to PF structure
415
+ * @reprs: xarray of reprs
424
416
*/
425
- static void ice_eswitch_napi_del (struct ice_pf * pf )
417
+ static void ice_eswitch_napi_del (struct xarray * reprs )
426
418
{
427
- struct ice_vf * vf ;
428
- unsigned int bkt ;
429
-
430
- lockdep_assert_held (& pf -> vfs .table_lock );
419
+ struct ice_repr * repr ;
420
+ unsigned long id ;
431
421
432
- ice_for_each_vf ( pf , bkt , vf )
433
- netif_napi_del (& vf -> repr -> q_vector -> napi );
422
+ xa_for_each ( reprs , id , repr )
423
+ netif_napi_del (& repr -> q_vector -> napi );
434
424
}
435
425
436
426
/**
437
427
* ice_eswitch_napi_enable - enable NAPI for all port representors
438
- * @pf: pointer to PF structure
428
+ * @reprs: xarray of reprs
439
429
*/
440
- static void ice_eswitch_napi_enable (struct ice_pf * pf )
430
+ static void ice_eswitch_napi_enable (struct xarray * reprs )
441
431
{
442
- struct ice_vf * vf ;
443
- unsigned int bkt ;
444
-
445
- lockdep_assert_held (& pf -> vfs .table_lock );
432
+ struct ice_repr * repr ;
433
+ unsigned long id ;
446
434
447
- ice_for_each_vf ( pf , bkt , vf )
448
- napi_enable (& vf -> repr -> q_vector -> napi );
435
+ xa_for_each ( reprs , id , repr )
436
+ napi_enable (& repr -> q_vector -> napi );
449
437
}
450
438
451
439
/**
452
440
* ice_eswitch_napi_disable - disable NAPI for all port representors
453
- * @pf: pointer to PF structure
441
+ * @reprs: xarray of reprs
454
442
*/
455
- static void ice_eswitch_napi_disable (struct ice_pf * pf )
443
+ static void ice_eswitch_napi_disable (struct xarray * reprs )
456
444
{
457
- struct ice_vf * vf ;
458
- unsigned int bkt ;
459
-
460
- lockdep_assert_held (& pf -> vfs .table_lock );
445
+ struct ice_repr * repr ;
446
+ unsigned long id ;
461
447
462
- ice_for_each_vf ( pf , bkt , vf )
463
- napi_disable (& vf -> repr -> q_vector -> napi );
448
+ xa_for_each ( reprs , id , repr )
449
+ napi_disable (& repr -> q_vector -> napi );
464
450
}
465
451
466
452
/**
@@ -505,7 +491,7 @@ static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
505
491
if (ice_eswitch_br_offloads_init (pf ))
506
492
goto err_br_offloads ;
507
493
508
- ice_eswitch_napi_enable (pf );
494
+ ice_eswitch_napi_enable (& pf -> eswitch . reprs );
509
495
510
496
return 0 ;
511
497
@@ -528,7 +514,7 @@ static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
528
514
{
529
515
struct ice_vsi * ctrl_vsi = pf -> eswitch .control_vsi ;
530
516
531
- ice_eswitch_napi_disable (pf );
517
+ ice_eswitch_napi_disable (& pf -> eswitch . reprs );
532
518
ice_eswitch_br_offloads_deinit (pf );
533
519
ice_eswitch_release_env (pf );
534
520
ice_eswitch_release_reprs (pf );
@@ -561,6 +547,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
561
547
case DEVLINK_ESWITCH_MODE_LEGACY :
562
548
dev_info (ice_pf_to_dev (pf ), "PF %d changed eswitch mode to legacy" ,
563
549
pf -> hw .pf_id );
550
+ xa_destroy (& pf -> eswitch .reprs );
564
551
NL_SET_ERR_MSG_MOD (extack , "Changed eswitch mode to legacy" );
565
552
break ;
566
553
case DEVLINK_ESWITCH_MODE_SWITCHDEV :
@@ -573,6 +560,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
573
560
574
561
dev_info (ice_pf_to_dev (pf ), "PF %d changed eswitch mode to switchdev" ,
575
562
pf -> hw .pf_id );
563
+ xa_init_flags (& pf -> eswitch .reprs , XA_FLAGS_ALLOC );
576
564
NL_SET_ERR_MSG_MOD (extack , "Changed eswitch mode to switchdev" );
577
565
break ;
578
566
}
@@ -649,18 +637,14 @@ int ice_eswitch_configure(struct ice_pf *pf)
649
637
*/
650
638
static void ice_eswitch_start_all_tx_queues (struct ice_pf * pf )
651
639
{
652
- struct ice_vf * vf ;
653
- unsigned int bkt ;
654
-
655
- lockdep_assert_held (& pf -> vfs .table_lock );
640
+ struct ice_repr * repr ;
641
+ unsigned long id ;
656
642
657
643
if (test_bit (ICE_DOWN , pf -> state ))
658
644
return ;
659
645
660
- ice_for_each_vf (pf , bkt , vf ) {
661
- if (vf -> repr )
662
- ice_repr_start_tx_queues (vf -> repr );
663
- }
646
+ xa_for_each (& pf -> eswitch .reprs , id , repr )
647
+ ice_repr_start_tx_queues (repr );
664
648
}
665
649
666
650
/**
@@ -669,18 +653,14 @@ static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf)
669
653
*/
670
654
void ice_eswitch_stop_all_tx_queues (struct ice_pf * pf )
671
655
{
672
- struct ice_vf * vf ;
673
- unsigned int bkt ;
674
-
675
- lockdep_assert_held (& pf -> vfs .table_lock );
656
+ struct ice_repr * repr ;
657
+ unsigned long id ;
676
658
677
659
if (test_bit (ICE_DOWN , pf -> state ))
678
660
return ;
679
661
680
- ice_for_each_vf (pf , bkt , vf ) {
681
- if (vf -> repr )
682
- ice_repr_stop_tx_queues (vf -> repr );
683
- }
662
+ xa_for_each (& pf -> eswitch .reprs , id , repr )
663
+ ice_repr_stop_tx_queues (repr );
684
664
}
685
665
686
666
/**
@@ -692,8 +672,8 @@ int ice_eswitch_rebuild(struct ice_pf *pf)
692
672
struct ice_vsi * ctrl_vsi = pf -> eswitch .control_vsi ;
693
673
int status ;
694
674
695
- ice_eswitch_napi_disable (pf );
696
- ice_eswitch_napi_del (pf );
675
+ ice_eswitch_napi_disable (& pf -> eswitch . reprs );
676
+ ice_eswitch_napi_del (& pf -> eswitch . reprs );
697
677
698
678
status = ice_eswitch_setup_env (pf );
699
679
if (status )
@@ -711,7 +691,7 @@ int ice_eswitch_rebuild(struct ice_pf *pf)
711
691
if (status )
712
692
return status ;
713
693
714
- ice_eswitch_napi_enable (pf );
694
+ ice_eswitch_napi_enable (& pf -> eswitch . reprs );
715
695
ice_eswitch_start_all_tx_queues (pf );
716
696
717
697
return 0 ;
0 commit comments