Skip to content

Commit f3df404

Browse files
walking-machinekuba-moo
authored andcommitted
ice: map XDP queues to vectors in ice_vsi_map_rings_to_vectors()
ice_pf_dcb_recfg() re-maps queues to vectors with ice_vsi_map_rings_to_vectors(), which does not restore the previous state for XDP queues. This leads to no AF_XDP traffic after rebuild. Map XDP queues to vectors in ice_vsi_map_rings_to_vectors(). Also, move the code around, so XDP queues are mapped independently only through .ndo_bpf(). Fixes: 6624e78 ("ice: split ice_vsi_setup into smaller functions") Reviewed-by: Przemek Kitszel <[email protected]> Signed-off-by: Larysa Zaremba <[email protected]> Reviewed-by: Simon Horman <[email protected]> Tested-by: Chandan Kumar Rout <[email protected]> Signed-off-by: Jacob Keller <[email protected]> Link: https://lore.kernel.org/r/20240603-net-2024-05-30-intel-net-fixes-v2-5-e3563aa89b0c@intel.com Signed-off-by: Jakub Kicinski <[email protected]>
1 parent 744d197 commit f3df404

File tree

4 files changed

+68
-46
lines changed

4 files changed

+68
-46
lines changed

drivers/net/ethernet/intel/ice/ice.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -940,6 +940,7 @@ int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
940940
int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
941941
enum ice_xdp_cfg cfg_type);
942942
int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type);
943+
void ice_map_xdp_rings(struct ice_vsi *vsi);
943944
int
944945
ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
945946
u32 flags);

drivers/net/ethernet/intel/ice/ice_base.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -842,6 +842,9 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
842842
}
843843
rx_rings_rem -= rx_rings_per_v;
844844
}
845+
846+
if (ice_is_xdp_ena_vsi(vsi))
847+
ice_map_xdp_rings(vsi);
845848
}
846849

847850
/**

drivers/net/ethernet/intel/ice/ice_lib.c

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -2274,13 +2274,6 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi)
22742274
if (ret)
22752275
goto unroll_vector_base;
22762276

2277-
ice_vsi_map_rings_to_vectors(vsi);
2278-
2279-
/* Associate q_vector rings to napi */
2280-
ice_vsi_set_napi_queues(vsi);
2281-
2282-
vsi->stat_offsets_loaded = false;
2283-
22842277
if (ice_is_xdp_ena_vsi(vsi)) {
22852278
ret = ice_vsi_determine_xdp_res(vsi);
22862279
if (ret)
@@ -2291,6 +2284,13 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi)
22912284
goto unroll_vector_base;
22922285
}
22932286

2287+
ice_vsi_map_rings_to_vectors(vsi);
2288+
2289+
/* Associate q_vector rings to napi */
2290+
ice_vsi_set_napi_queues(vsi);
2291+
2292+
vsi->stat_offsets_loaded = false;
2293+
22942294
/* ICE_VSI_CTRL does not need RSS so skip RSS processing */
22952295
if (vsi->type != ICE_VSI_CTRL)
22962296
/* Do not exit if configuring RSS had an issue, at

drivers/net/ethernet/intel/ice/ice_main.c

Lines changed: 57 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -2707,6 +2707,60 @@ static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
27072707
bpf_prog_put(old_prog);
27082708
}
27092709

2710+
static struct ice_tx_ring *ice_xdp_ring_from_qid(struct ice_vsi *vsi, int qid)
2711+
{
2712+
struct ice_q_vector *q_vector;
2713+
struct ice_tx_ring *ring;
2714+
2715+
if (static_key_enabled(&ice_xdp_locking_key))
2716+
return vsi->xdp_rings[qid % vsi->num_xdp_txq];
2717+
2718+
q_vector = vsi->rx_rings[qid]->q_vector;
2719+
ice_for_each_tx_ring(ring, q_vector->tx)
2720+
if (ice_ring_is_xdp(ring))
2721+
return ring;
2722+
2723+
return NULL;
2724+
}
2725+
2726+
/**
2727+
* ice_map_xdp_rings - Map XDP rings to interrupt vectors
2728+
* @vsi: the VSI with XDP rings being configured
2729+
*
2730+
* Map XDP rings to interrupt vectors and perform the configuration steps
2731+
* dependent on the mapping.
2732+
*/
2733+
void ice_map_xdp_rings(struct ice_vsi *vsi)
2734+
{
2735+
int xdp_rings_rem = vsi->num_xdp_txq;
2736+
int v_idx, q_idx;
2737+
2738+
/* follow the logic from ice_vsi_map_rings_to_vectors */
2739+
ice_for_each_q_vector(vsi, v_idx) {
2740+
struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2741+
int xdp_rings_per_v, q_id, q_base;
2742+
2743+
xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2744+
vsi->num_q_vectors - v_idx);
2745+
q_base = vsi->num_xdp_txq - xdp_rings_rem;
2746+
2747+
for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2748+
struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
2749+
2750+
xdp_ring->q_vector = q_vector;
2751+
xdp_ring->next = q_vector->tx.tx_ring;
2752+
q_vector->tx.tx_ring = xdp_ring;
2753+
}
2754+
xdp_rings_rem -= xdp_rings_per_v;
2755+
}
2756+
2757+
ice_for_each_rxq(vsi, q_idx) {
2758+
vsi->rx_rings[q_idx]->xdp_ring = ice_xdp_ring_from_qid(vsi,
2759+
q_idx);
2760+
ice_tx_xsk_pool(vsi, q_idx);
2761+
}
2762+
}
2763+
27102764
/**
27112765
* ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
27122766
* @vsi: VSI to bring up Tx rings used by XDP
@@ -2719,7 +2773,6 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
27192773
enum ice_xdp_cfg cfg_type)
27202774
{
27212775
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2722-
int xdp_rings_rem = vsi->num_xdp_txq;
27232776
struct ice_pf *pf = vsi->back;
27242777
struct ice_qs_cfg xdp_qs_cfg = {
27252778
.qs_mutex = &pf->avail_q_mutex,
@@ -2732,8 +2785,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
27322785
.mapping_mode = ICE_VSI_MAP_CONTIG
27332786
};
27342787
struct device *dev;
2735-
int i, v_idx;
2736-
int status;
2788+
int status, i;
27372789

27382790
dev = ice_pf_to_dev(pf);
27392791
vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
@@ -2752,49 +2804,15 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
27522804
if (ice_xdp_alloc_setup_rings(vsi))
27532805
goto clear_xdp_rings;
27542806

2755-
/* follow the logic from ice_vsi_map_rings_to_vectors */
2756-
ice_for_each_q_vector(vsi, v_idx) {
2757-
struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2758-
int xdp_rings_per_v, q_id, q_base;
2759-
2760-
xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2761-
vsi->num_q_vectors - v_idx);
2762-
q_base = vsi->num_xdp_txq - xdp_rings_rem;
2763-
2764-
for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2765-
struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
2766-
2767-
xdp_ring->q_vector = q_vector;
2768-
xdp_ring->next = q_vector->tx.tx_ring;
2769-
q_vector->tx.tx_ring = xdp_ring;
2770-
}
2771-
xdp_rings_rem -= xdp_rings_per_v;
2772-
}
2773-
2774-
ice_for_each_rxq(vsi, i) {
2775-
if (static_key_enabled(&ice_xdp_locking_key)) {
2776-
vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
2777-
} else {
2778-
struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
2779-
struct ice_tx_ring *ring;
2780-
2781-
ice_for_each_tx_ring(ring, q_vector->tx) {
2782-
if (ice_ring_is_xdp(ring)) {
2783-
vsi->rx_rings[i]->xdp_ring = ring;
2784-
break;
2785-
}
2786-
}
2787-
}
2788-
ice_tx_xsk_pool(vsi, i);
2789-
}
2790-
27912807
/* omit the scheduler update if in reset path; XDP queues will be
27922808
* taken into account at the end of ice_vsi_rebuild, where
27932809
* ice_cfg_vsi_lan is being called
27942810
*/
27952811
if (cfg_type == ICE_XDP_CFG_PART)
27962812
return 0;
27972813

2814+
ice_map_xdp_rings(vsi);
2815+
27982816
/* tell the Tx scheduler that right now we have
27992817
* additional queues
28002818
*/

0 commit comments

Comments
 (0)