@@ -2707,6 +2707,60 @@ static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2707
2707
bpf_prog_put (old_prog );
2708
2708
}
2709
2709
2710
+ static struct ice_tx_ring * ice_xdp_ring_from_qid (struct ice_vsi * vsi , int qid )
2711
+ {
2712
+ struct ice_q_vector * q_vector ;
2713
+ struct ice_tx_ring * ring ;
2714
+
2715
+ if (static_key_enabled (& ice_xdp_locking_key ))
2716
+ return vsi -> xdp_rings [qid % vsi -> num_xdp_txq ];
2717
+
2718
+ q_vector = vsi -> rx_rings [qid ]-> q_vector ;
2719
+ ice_for_each_tx_ring (ring , q_vector -> tx )
2720
+ if (ice_ring_is_xdp (ring ))
2721
+ return ring ;
2722
+
2723
+ return NULL ;
2724
+ }
2725
+
2726
+ /**
2727
+ * ice_map_xdp_rings - Map XDP rings to interrupt vectors
2728
+ * @vsi: the VSI with XDP rings being configured
2729
+ *
2730
+ * Map XDP rings to interrupt vectors and perform the configuration steps
2731
+ * dependent on the mapping.
2732
+ */
2733
+ void ice_map_xdp_rings (struct ice_vsi * vsi )
2734
+ {
2735
+ int xdp_rings_rem = vsi -> num_xdp_txq ;
2736
+ int v_idx , q_idx ;
2737
+
2738
+ /* follow the logic from ice_vsi_map_rings_to_vectors */
2739
+ ice_for_each_q_vector (vsi , v_idx ) {
2740
+ struct ice_q_vector * q_vector = vsi -> q_vectors [v_idx ];
2741
+ int xdp_rings_per_v , q_id , q_base ;
2742
+
2743
+ xdp_rings_per_v = DIV_ROUND_UP (xdp_rings_rem ,
2744
+ vsi -> num_q_vectors - v_idx );
2745
+ q_base = vsi -> num_xdp_txq - xdp_rings_rem ;
2746
+
2747
+ for (q_id = q_base ; q_id < (q_base + xdp_rings_per_v ); q_id ++ ) {
2748
+ struct ice_tx_ring * xdp_ring = vsi -> xdp_rings [q_id ];
2749
+
2750
+ xdp_ring -> q_vector = q_vector ;
2751
+ xdp_ring -> next = q_vector -> tx .tx_ring ;
2752
+ q_vector -> tx .tx_ring = xdp_ring ;
2753
+ }
2754
+ xdp_rings_rem -= xdp_rings_per_v ;
2755
+ }
2756
+
2757
+ ice_for_each_rxq (vsi , q_idx ) {
2758
+ vsi -> rx_rings [q_idx ]-> xdp_ring = ice_xdp_ring_from_qid (vsi ,
2759
+ q_idx );
2760
+ ice_tx_xsk_pool (vsi , q_idx );
2761
+ }
2762
+ }
2763
+
2710
2764
/**
2711
2765
* ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2712
2766
* @vsi: VSI to bring up Tx rings used by XDP
@@ -2719,7 +2773,6 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
2719
2773
enum ice_xdp_cfg cfg_type )
2720
2774
{
2721
2775
u16 max_txqs [ICE_MAX_TRAFFIC_CLASS ] = { 0 };
2722
- int xdp_rings_rem = vsi -> num_xdp_txq ;
2723
2776
struct ice_pf * pf = vsi -> back ;
2724
2777
struct ice_qs_cfg xdp_qs_cfg = {
2725
2778
.qs_mutex = & pf -> avail_q_mutex ,
@@ -2732,8 +2785,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
2732
2785
.mapping_mode = ICE_VSI_MAP_CONTIG
2733
2786
};
2734
2787
struct device * dev ;
2735
- int i , v_idx ;
2736
- int status ;
2788
+ int status , i ;
2737
2789
2738
2790
dev = ice_pf_to_dev (pf );
2739
2791
vsi -> xdp_rings = devm_kcalloc (dev , vsi -> num_xdp_txq ,
@@ -2752,49 +2804,15 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
2752
2804
if (ice_xdp_alloc_setup_rings (vsi ))
2753
2805
goto clear_xdp_rings ;
2754
2806
2755
- /* follow the logic from ice_vsi_map_rings_to_vectors */
2756
- ice_for_each_q_vector (vsi , v_idx ) {
2757
- struct ice_q_vector * q_vector = vsi -> q_vectors [v_idx ];
2758
- int xdp_rings_per_v , q_id , q_base ;
2759
-
2760
- xdp_rings_per_v = DIV_ROUND_UP (xdp_rings_rem ,
2761
- vsi -> num_q_vectors - v_idx );
2762
- q_base = vsi -> num_xdp_txq - xdp_rings_rem ;
2763
-
2764
- for (q_id = q_base ; q_id < (q_base + xdp_rings_per_v ); q_id ++ ) {
2765
- struct ice_tx_ring * xdp_ring = vsi -> xdp_rings [q_id ];
2766
-
2767
- xdp_ring -> q_vector = q_vector ;
2768
- xdp_ring -> next = q_vector -> tx .tx_ring ;
2769
- q_vector -> tx .tx_ring = xdp_ring ;
2770
- }
2771
- xdp_rings_rem -= xdp_rings_per_v ;
2772
- }
2773
-
2774
- ice_for_each_rxq (vsi , i ) {
2775
- if (static_key_enabled (& ice_xdp_locking_key )) {
2776
- vsi -> rx_rings [i ]-> xdp_ring = vsi -> xdp_rings [i % vsi -> num_xdp_txq ];
2777
- } else {
2778
- struct ice_q_vector * q_vector = vsi -> rx_rings [i ]-> q_vector ;
2779
- struct ice_tx_ring * ring ;
2780
-
2781
- ice_for_each_tx_ring (ring , q_vector -> tx ) {
2782
- if (ice_ring_is_xdp (ring )) {
2783
- vsi -> rx_rings [i ]-> xdp_ring = ring ;
2784
- break ;
2785
- }
2786
- }
2787
- }
2788
- ice_tx_xsk_pool (vsi , i );
2789
- }
2790
-
2791
2807
/* omit the scheduler update if in reset path; XDP queues will be
2792
2808
* taken into account at the end of ice_vsi_rebuild, where
2793
2809
* ice_cfg_vsi_lan is being called
2794
2810
*/
2795
2811
if (cfg_type == ICE_XDP_CFG_PART )
2796
2812
return 0 ;
2797
2813
2814
+ ice_map_xdp_rings (vsi );
2815
+
2798
2816
/* tell the Tx scheduler that right now we have
2799
2817
* additional queues
2800
2818
*/
0 commit comments