Skip to content

Commit 5899c88

Browse files
committed
Merge branch 'intel-wired-lan-driver-updates-2024-05-29-ice-igc'
Jacob Keller says: ==================== Intel Wired LAN Driver Updates 2024-05-29 (ice, igc) This series includes fixes for the ice driver as well as a fix for the igc driver. Jacob fixes two issues in the ice driver with reading the NVM for providing firmware data via devlink info. First, fix an off-by-one error when reading the Preserved Fields Area, resolving an infinite loop triggered on some NVMs which lack certain data in the NVM. Second, fix the reading of the NVM Shadow RAM on newer E830 and E825-C devices which have a variable sized CSS header rather than assuming this header is always the same fixed size as in the E810 devices. Larysa fixes three issues with the ice driver XDP logic that could occur if the number of queues is changed after enabling an XDP program. First, the af_xdp_zc_qps bitmap is removed and replaced by simpler logic to track whether queues are in zero-copy mode. Second, the reset and .ndo_bpf flows are distinguished to avoid potential races with a PF reset occuring simultaneously to .ndo_bpf callback from userspace. Third, the logic for mapping XDP queues to vectors is fixed so that XDP state is restored for XDP queues after a reconfiguration. Sasha fixes reporting of Energy Efficient Ethernet support via ethtool in the igc driver. v1: https://lore.kernel.org/r/20240530-net-2024-05-30-intel-net-fixes-v1-0-8b11c8c9bff8@intel.com ==================== Link: https://lore.kernel.org/r/20240603-net-2024-05-30-intel-net-fixes-v2-0-e3563aa89b0c@intel.com Signed-off-by: Jakub Kicinski <[email protected]>
2 parents 886bf91 + 7d67d11 commit 5899c88

File tree

9 files changed

+244
-104
lines changed

9 files changed

+244
-104
lines changed

drivers/net/ethernet/intel/ice/ice.h

Lines changed: 31 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -409,7 +409,6 @@ struct ice_vsi {
409409
struct ice_tc_cfg tc_cfg;
410410
struct bpf_prog *xdp_prog;
411411
struct ice_tx_ring **xdp_rings; /* XDP ring array */
412-
unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */
413412
u16 num_xdp_txq; /* Used XDP queues */
414413
u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
415414

@@ -746,6 +745,25 @@ static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
746745
ring->flags |= ICE_TX_FLAGS_RING_XDP;
747746
}
748747

748+
/**
749+
* ice_get_xp_from_qid - get ZC XSK buffer pool bound to a queue ID
750+
* @vsi: pointer to VSI
751+
* @qid: index of a queue to look at XSK buff pool presence
752+
*
753+
* Return: A pointer to xsk_buff_pool structure if there is a buffer pool
754+
* attached and configured as zero-copy, NULL otherwise.
755+
*/
756+
static inline struct xsk_buff_pool *ice_get_xp_from_qid(struct ice_vsi *vsi,
757+
u16 qid)
758+
{
759+
struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid);
760+
761+
if (!ice_is_xdp_ena_vsi(vsi))
762+
return NULL;
763+
764+
return (pool && pool->dev) ? pool : NULL;
765+
}
766+
749767
/**
750768
* ice_xsk_pool - get XSK buffer pool bound to a ring
751769
* @ring: Rx ring to use
@@ -758,10 +776,7 @@ static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
758776
struct ice_vsi *vsi = ring->vsi;
759777
u16 qid = ring->q_index;
760778

761-
if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
762-
return NULL;
763-
764-
return xsk_get_pool_from_qid(vsi->netdev, qid);
779+
return ice_get_xp_from_qid(vsi, qid);
765780
}
766781

767782
/**
@@ -786,12 +801,7 @@ static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid)
786801
if (!ring)
787802
return;
788803

789-
if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) {
790-
ring->xsk_pool = NULL;
791-
return;
792-
}
793-
794-
ring->xsk_pool = xsk_get_pool_from_qid(vsi->netdev, qid);
804+
ring->xsk_pool = ice_get_xp_from_qid(vsi, qid);
795805
}
796806

797807
/**
@@ -920,9 +930,17 @@ int ice_down(struct ice_vsi *vsi);
920930
int ice_down_up(struct ice_vsi *vsi);
921931
int ice_vsi_cfg_lan(struct ice_vsi *vsi);
922932
struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
933+
934+
enum ice_xdp_cfg {
935+
ICE_XDP_CFG_FULL, /* Fully apply new config in .ndo_bpf() */
936+
ICE_XDP_CFG_PART, /* Save/use part of config in VSI rebuild */
937+
};
938+
923939
int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
924-
int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog);
925-
int ice_destroy_xdp_rings(struct ice_vsi *vsi);
940+
int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
941+
enum ice_xdp_cfg cfg_type);
942+
int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type);
943+
void ice_map_xdp_rings(struct ice_vsi *vsi);
926944
int
927945
ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
928946
u32 flags);

drivers/net/ethernet/intel/ice/ice_base.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -842,6 +842,9 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
842842
}
843843
rx_rings_rem -= rx_rings_per_v;
844844
}
845+
846+
if (ice_is_xdp_ena_vsi(vsi))
847+
ice_map_xdp_rings(vsi);
845848
}
846849

847850
/**

drivers/net/ethernet/intel/ice/ice_lib.c

Lines changed: 10 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -114,14 +114,8 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
114114
if (!vsi->q_vectors)
115115
goto err_vectors;
116116

117-
vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL);
118-
if (!vsi->af_xdp_zc_qps)
119-
goto err_zc_qps;
120-
121117
return 0;
122118

123-
err_zc_qps:
124-
devm_kfree(dev, vsi->q_vectors);
125119
err_vectors:
126120
devm_kfree(dev, vsi->rxq_map);
127121
err_rxq_map:
@@ -309,8 +303,6 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
309303

310304
dev = ice_pf_to_dev(pf);
311305

312-
bitmap_free(vsi->af_xdp_zc_qps);
313-
vsi->af_xdp_zc_qps = NULL;
314306
/* free the ring and vector containers */
315307
devm_kfree(dev, vsi->q_vectors);
316308
vsi->q_vectors = NULL;
@@ -2282,22 +2274,23 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi)
22822274
if (ret)
22832275
goto unroll_vector_base;
22842276

2285-
ice_vsi_map_rings_to_vectors(vsi);
2286-
2287-
/* Associate q_vector rings to napi */
2288-
ice_vsi_set_napi_queues(vsi);
2289-
2290-
vsi->stat_offsets_loaded = false;
2291-
22922277
if (ice_is_xdp_ena_vsi(vsi)) {
22932278
ret = ice_vsi_determine_xdp_res(vsi);
22942279
if (ret)
22952280
goto unroll_vector_base;
2296-
ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
2281+
ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog,
2282+
ICE_XDP_CFG_PART);
22972283
if (ret)
22982284
goto unroll_vector_base;
22992285
}
23002286

2287+
ice_vsi_map_rings_to_vectors(vsi);
2288+
2289+
/* Associate q_vector rings to napi */
2290+
ice_vsi_set_napi_queues(vsi);
2291+
2292+
vsi->stat_offsets_loaded = false;
2293+
23012294
/* ICE_VSI_CTRL does not need RSS so skip RSS processing */
23022295
if (vsi->type != ICE_VSI_CTRL)
23032296
/* Do not exit if configuring RSS had an issue, at
@@ -2437,7 +2430,7 @@ void ice_vsi_decfg(struct ice_vsi *vsi)
24372430
/* return value check can be skipped here, it always returns
24382431
* 0 if reset is in progress
24392432
*/
2440-
ice_destroy_xdp_rings(vsi);
2433+
ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_PART);
24412434

24422435
ice_vsi_clear_rings(vsi);
24432436
ice_vsi_free_q_vectors(vsi);

drivers/net/ethernet/intel/ice/ice_main.c

Lines changed: 69 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -2707,17 +2707,72 @@ static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
27072707
bpf_prog_put(old_prog);
27082708
}
27092709

2710+
static struct ice_tx_ring *ice_xdp_ring_from_qid(struct ice_vsi *vsi, int qid)
2711+
{
2712+
struct ice_q_vector *q_vector;
2713+
struct ice_tx_ring *ring;
2714+
2715+
if (static_key_enabled(&ice_xdp_locking_key))
2716+
return vsi->xdp_rings[qid % vsi->num_xdp_txq];
2717+
2718+
q_vector = vsi->rx_rings[qid]->q_vector;
2719+
ice_for_each_tx_ring(ring, q_vector->tx)
2720+
if (ice_ring_is_xdp(ring))
2721+
return ring;
2722+
2723+
return NULL;
2724+
}
2725+
2726+
/**
2727+
* ice_map_xdp_rings - Map XDP rings to interrupt vectors
2728+
* @vsi: the VSI with XDP rings being configured
2729+
*
2730+
* Map XDP rings to interrupt vectors and perform the configuration steps
2731+
* dependent on the mapping.
2732+
*/
2733+
void ice_map_xdp_rings(struct ice_vsi *vsi)
2734+
{
2735+
int xdp_rings_rem = vsi->num_xdp_txq;
2736+
int v_idx, q_idx;
2737+
2738+
/* follow the logic from ice_vsi_map_rings_to_vectors */
2739+
ice_for_each_q_vector(vsi, v_idx) {
2740+
struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2741+
int xdp_rings_per_v, q_id, q_base;
2742+
2743+
xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2744+
vsi->num_q_vectors - v_idx);
2745+
q_base = vsi->num_xdp_txq - xdp_rings_rem;
2746+
2747+
for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2748+
struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
2749+
2750+
xdp_ring->q_vector = q_vector;
2751+
xdp_ring->next = q_vector->tx.tx_ring;
2752+
q_vector->tx.tx_ring = xdp_ring;
2753+
}
2754+
xdp_rings_rem -= xdp_rings_per_v;
2755+
}
2756+
2757+
ice_for_each_rxq(vsi, q_idx) {
2758+
vsi->rx_rings[q_idx]->xdp_ring = ice_xdp_ring_from_qid(vsi,
2759+
q_idx);
2760+
ice_tx_xsk_pool(vsi, q_idx);
2761+
}
2762+
}
2763+
27102764
/**
27112765
* ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
27122766
* @vsi: VSI to bring up Tx rings used by XDP
27132767
* @prog: bpf program that will be assigned to VSI
2768+
* @cfg_type: create from scratch or restore the existing configuration
27142769
*
27152770
* Return 0 on success and negative value on error
27162771
*/
2717-
int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
2772+
int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
2773+
enum ice_xdp_cfg cfg_type)
27182774
{
27192775
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2720-
int xdp_rings_rem = vsi->num_xdp_txq;
27212776
struct ice_pf *pf = vsi->back;
27222777
struct ice_qs_cfg xdp_qs_cfg = {
27232778
.qs_mutex = &pf->avail_q_mutex,
@@ -2730,8 +2785,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
27302785
.mapping_mode = ICE_VSI_MAP_CONTIG
27312786
};
27322787
struct device *dev;
2733-
int i, v_idx;
2734-
int status;
2788+
int status, i;
27352789

27362790
dev = ice_pf_to_dev(pf);
27372791
vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
@@ -2750,49 +2804,15 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
27502804
if (ice_xdp_alloc_setup_rings(vsi))
27512805
goto clear_xdp_rings;
27522806

2753-
/* follow the logic from ice_vsi_map_rings_to_vectors */
2754-
ice_for_each_q_vector(vsi, v_idx) {
2755-
struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2756-
int xdp_rings_per_v, q_id, q_base;
2757-
2758-
xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2759-
vsi->num_q_vectors - v_idx);
2760-
q_base = vsi->num_xdp_txq - xdp_rings_rem;
2761-
2762-
for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2763-
struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
2764-
2765-
xdp_ring->q_vector = q_vector;
2766-
xdp_ring->next = q_vector->tx.tx_ring;
2767-
q_vector->tx.tx_ring = xdp_ring;
2768-
}
2769-
xdp_rings_rem -= xdp_rings_per_v;
2770-
}
2771-
2772-
ice_for_each_rxq(vsi, i) {
2773-
if (static_key_enabled(&ice_xdp_locking_key)) {
2774-
vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
2775-
} else {
2776-
struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
2777-
struct ice_tx_ring *ring;
2778-
2779-
ice_for_each_tx_ring(ring, q_vector->tx) {
2780-
if (ice_ring_is_xdp(ring)) {
2781-
vsi->rx_rings[i]->xdp_ring = ring;
2782-
break;
2783-
}
2784-
}
2785-
}
2786-
ice_tx_xsk_pool(vsi, i);
2787-
}
2788-
27892807
/* omit the scheduler update if in reset path; XDP queues will be
27902808
* taken into account at the end of ice_vsi_rebuild, where
27912809
* ice_cfg_vsi_lan is being called
27922810
*/
2793-
if (ice_is_reset_in_progress(pf->state))
2811+
if (cfg_type == ICE_XDP_CFG_PART)
27942812
return 0;
27952813

2814+
ice_map_xdp_rings(vsi);
2815+
27962816
/* tell the Tx scheduler that right now we have
27972817
* additional queues
27982818
*/
@@ -2842,22 +2862,21 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
28422862
/**
28432863
* ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
28442864
* @vsi: VSI to remove XDP rings
2865+
* @cfg_type: disable XDP permanently or allow it to be restored later
28452866
*
28462867
* Detach XDP rings from irq vectors, clean up the PF bitmap and free
28472868
* resources
28482869
*/
2849-
int ice_destroy_xdp_rings(struct ice_vsi *vsi)
2870+
int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
28502871
{
28512872
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
28522873
struct ice_pf *pf = vsi->back;
28532874
int i, v_idx;
28542875

28552876
/* q_vectors are freed in reset path so there's no point in detaching
2856-
* rings; in case of rebuild being triggered not from reset bits
2857-
* in pf->state won't be set, so additionally check first q_vector
2858-
* against NULL
2877+
* rings
28592878
*/
2860-
if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2879+
if (cfg_type == ICE_XDP_CFG_PART)
28612880
goto free_qmap;
28622881

28632882
ice_for_each_q_vector(vsi, v_idx) {
@@ -2898,7 +2917,7 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi)
28982917
if (static_key_enabled(&ice_xdp_locking_key))
28992918
static_branch_dec(&ice_xdp_locking_key);
29002919

2901-
if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2920+
if (cfg_type == ICE_XDP_CFG_PART)
29022921
return 0;
29032922

29042923
ice_vsi_assign_bpf_prog(vsi, NULL);
@@ -3009,7 +3028,8 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
30093028
if (xdp_ring_err) {
30103029
NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
30113030
} else {
3012-
xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
3031+
xdp_ring_err = ice_prepare_xdp_rings(vsi, prog,
3032+
ICE_XDP_CFG_FULL);
30133033
if (xdp_ring_err)
30143034
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
30153035
}
@@ -3020,7 +3040,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
30203040
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
30213041
} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
30223042
xdp_features_clear_redirect_target(vsi->netdev);
3023-
xdp_ring_err = ice_destroy_xdp_rings(vsi);
3043+
xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL);
30243044
if (xdp_ring_err)
30253045
NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
30263046
/* reallocate Rx queues that were used for zero-copy */

0 commit comments

Comments
 (0)