Skip to content

Commit 7ae0d51

Browse files
committed
tgupdate: merge t/upstream base into t/upstream
2 parents 16bc369 + a7de282 commit 7ae0d51

File tree

36 files changed

+619
-381
lines changed

36 files changed

+619
-381
lines changed

Documentation/networking/net_cachelines/snmp.rst

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@ unsigned_long LINUX_MIB_TIMEWAITRECYCLED
3636
unsigned_long LINUX_MIB_TIMEWAITKILLED
3737
unsigned_long LINUX_MIB_PAWSACTIVEREJECTED
3838
unsigned_long LINUX_MIB_PAWSESTABREJECTED
39+
unsigned_long LINUX_MIB_TSECR_REJECTED
3940
unsigned_long LINUX_MIB_DELAYEDACKLOST
4041
unsigned_long LINUX_MIB_LISTENOVERFLOWS
4142
unsigned_long LINUX_MIB_LISTENDROPS

Documentation/networking/scaling.rst

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -434,8 +434,10 @@ rps_dev_flow_table. The stack consults a CPU to hardware queue map which
434434
is maintained by the NIC driver. This is an auto-generated reverse map of
435435
the IRQ affinity table shown by /proc/interrupts. Drivers can use
436436
functions in the cpu_rmap (“CPU affinity reverse map”) kernel library
437-
to populate the map. For each CPU, the corresponding queue in the map is
438-
set to be one whose processing CPU is closest in cache locality.
437+
to populate the map. Alternatively, drivers can delegate the cpu_rmap
438+
management to the Kernel by calling netif_enable_cpu_rmap(). For each CPU,
439+
the corresponding queue in the map is set to be one whose processing CPU is
440+
closest in cache locality.
439441

440442

441443
Accelerated RFS Configuration

drivers/net/bonding/bond_main.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2548,7 +2548,7 @@ static int __bond_release_one(struct net_device *bond_dev,
25482548

25492549
RCU_INIT_POINTER(bond->current_arp_slave, NULL);
25502550

2551-
if (!all && (!bond->params.fail_over_mac ||
2551+
if (!all && (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
25522552
BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) {
25532553
if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
25542554
bond_has_slaves(bond))

drivers/net/ethernet/amazon/ena/ena_netdev.c

Lines changed: 1 addition & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -5,9 +5,6 @@
55

66
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
77

8-
#ifdef CONFIG_RFS_ACCEL
9-
#include <linux/cpu_rmap.h>
10-
#endif /* CONFIG_RFS_ACCEL */
118
#include <linux/ethtool.h>
129
#include <linux/kernel.h>
1310
#include <linux/module.h>
@@ -162,30 +159,6 @@ int ena_xmit_common(struct ena_adapter *adapter,
162159
return 0;
163160
}
164161

165-
static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
166-
{
167-
#ifdef CONFIG_RFS_ACCEL
168-
u32 i;
169-
int rc;
170-
171-
adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_io_queues);
172-
if (!adapter->netdev->rx_cpu_rmap)
173-
return -ENOMEM;
174-
for (i = 0; i < adapter->num_io_queues; i++) {
175-
int irq_idx = ENA_IO_IRQ_IDX(i);
176-
177-
rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap,
178-
pci_irq_vector(adapter->pdev, irq_idx));
179-
if (rc) {
180-
free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
181-
adapter->netdev->rx_cpu_rmap = NULL;
182-
return rc;
183-
}
184-
}
185-
#endif /* CONFIG_RFS_ACCEL */
186-
return 0;
187-
}
188-
189162
static void ena_init_io_rings_common(struct ena_adapter *adapter,
190163
struct ena_ring *ring, u16 qid)
191164
{
@@ -1596,7 +1569,7 @@ static int ena_enable_msix(struct ena_adapter *adapter)
15961569
adapter->num_io_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
15971570
}
15981571

1599-
if (ena_init_rx_cpu_rmap(adapter))
1572+
if (netif_enable_cpu_rmap(adapter->netdev, adapter->num_io_queues))
16001573
netif_warn(adapter, probe, adapter->netdev,
16011574
"Failed to map IRQs to CPUs\n");
16021575

@@ -1742,13 +1715,6 @@ static void ena_free_io_irq(struct ena_adapter *adapter)
17421715
struct ena_irq *irq;
17431716
int i;
17441717

1745-
#ifdef CONFIG_RFS_ACCEL
1746-
if (adapter->msix_vecs >= 1) {
1747-
free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
1748-
adapter->netdev->rx_cpu_rmap = NULL;
1749-
}
1750-
#endif /* CONFIG_RFS_ACCEL */
1751-
17521718
for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) {
17531719
irq = &adapter->irq_tbl[i];
17541720
irq_set_affinity_hint(irq->vector, NULL);
@@ -4131,13 +4097,6 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
41314097
ena_dev = adapter->ena_dev;
41324098
netdev = adapter->netdev;
41334099

4134-
#ifdef CONFIG_RFS_ACCEL
4135-
if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) {
4136-
free_irq_cpu_rmap(netdev->rx_cpu_rmap);
4137-
netdev->rx_cpu_rmap = NULL;
4138-
}
4139-
4140-
#endif /* CONFIG_RFS_ACCEL */
41414100
/* Make sure timer and reset routine won't be called after
41424101
* freeing device resources.
41434102
*/

drivers/net/ethernet/engleder/tsnep_main.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -852,8 +852,8 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
852852
struct skb_shared_hwtstamps hwtstamps;
853853
u64 timestamp;
854854

855-
if (skb_shinfo(entry->skb)->tx_flags &
856-
SKBTX_HW_TSTAMP_USE_CYCLES)
855+
if (entry->skb->sk &&
856+
READ_ONCE(entry->skb->sk->sk_tsflags) & SOF_TIMESTAMPING_BIND_PHC)
857857
timestamp =
858858
__le64_to_cpu(entry->desc_wb->counter);
859859
else

drivers/net/ethernet/intel/ice/ice.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -475,9 +475,6 @@ struct ice_q_vector {
475475
struct ice_ring_container rx;
476476
struct ice_ring_container tx;
477477

478-
cpumask_t affinity_mask;
479-
struct irq_affinity_notify affinity_notify;
480-
481478
struct ice_channel *ch;
482479

483480
char name[ICE_INT_NAME_STR_LEN];

drivers/net/ethernet/intel/ice/ice_arfs.c

Lines changed: 1 addition & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -570,25 +570,6 @@ void ice_clear_arfs(struct ice_vsi *vsi)
570570
vsi->arfs_fltr_cntrs = NULL;
571571
}
572572

573-
/**
574-
* ice_free_cpu_rx_rmap - free setup CPU reverse map
575-
* @vsi: the VSI to be forwarded to
576-
*/
577-
void ice_free_cpu_rx_rmap(struct ice_vsi *vsi)
578-
{
579-
struct net_device *netdev;
580-
581-
if (!vsi || vsi->type != ICE_VSI_PF)
582-
return;
583-
584-
netdev = vsi->netdev;
585-
if (!netdev || !netdev->rx_cpu_rmap)
586-
return;
587-
588-
free_irq_cpu_rmap(netdev->rx_cpu_rmap);
589-
netdev->rx_cpu_rmap = NULL;
590-
}
591-
592573
/**
593574
* ice_set_cpu_rx_rmap - setup CPU reverse map for each queue
594575
* @vsi: the VSI to be forwarded to
@@ -597,7 +578,6 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
597578
{
598579
struct net_device *netdev;
599580
struct ice_pf *pf;
600-
int i;
601581

602582
if (!vsi || vsi->type != ICE_VSI_PF)
603583
return 0;
@@ -610,18 +590,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
610590
netdev_dbg(netdev, "Setup CPU RMAP: vsi type 0x%x, ifname %s, q_vectors %d\n",
611591
vsi->type, netdev->name, vsi->num_q_vectors);
612592

613-
netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(vsi->num_q_vectors);
614-
if (unlikely(!netdev->rx_cpu_rmap))
615-
return -EINVAL;
616-
617-
ice_for_each_q_vector(vsi, i)
618-
if (irq_cpu_rmap_add(netdev->rx_cpu_rmap,
619-
vsi->q_vectors[i]->irq.virq)) {
620-
ice_free_cpu_rx_rmap(vsi);
621-
return -EINVAL;
622-
}
623-
624-
return 0;
593+
return netif_enable_cpu_rmap(netdev, vsi->num_q_vectors);
625594
}
626595

627596
/**

drivers/net/ethernet/intel/ice/ice_arfs.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,6 @@ int
4545
ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb,
4646
u16 rxq_idx, u32 flow_id);
4747
void ice_clear_arfs(struct ice_vsi *vsi);
48-
void ice_free_cpu_rx_rmap(struct ice_vsi *vsi);
4948
void ice_init_arfs(struct ice_vsi *vsi);
5049
void ice_sync_arfs_fltrs(struct ice_pf *pf);
5150
int ice_set_cpu_rx_rmap(struct ice_vsi *vsi);
@@ -56,7 +55,6 @@ ice_is_arfs_using_perfect_flow(struct ice_hw *hw,
5655
enum ice_fltr_ptype flow_type);
5756
#else
5857
static inline void ice_clear_arfs(struct ice_vsi *vsi) { }
59-
static inline void ice_free_cpu_rx_rmap(struct ice_vsi *vsi) { }
6058
static inline void ice_init_arfs(struct ice_vsi *vsi) { }
6159
static inline void ice_sync_arfs_fltrs(struct ice_pf *pf) { }
6260
static inline void ice_remove_arfs(struct ice_pf *pf) { }

drivers/net/ethernet/intel/ice/ice_base.c

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -147,10 +147,6 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
147147
q_vector->reg_idx = q_vector->irq.index;
148148
q_vector->vf_reg_idx = q_vector->irq.index;
149149

150-
/* only set affinity_mask if the CPU is online */
151-
if (cpu_online(v_idx))
152-
cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
153-
154150
/* This will not be called in the driver load path because the netdev
155151
* will not be created yet. All other cases with register the NAPI
156152
* handler here (i.e. resume, reset/rebuild, etc.)
@@ -276,7 +272,8 @@ static void ice_cfg_xps_tx_ring(struct ice_tx_ring *ring)
276272
if (test_and_set_bit(ICE_TX_XPS_INIT_DONE, ring->xps_state))
277273
return;
278274

279-
netif_set_xps_queue(ring->netdev, &ring->q_vector->affinity_mask,
275+
netif_set_xps_queue(ring->netdev,
276+
&ring->q_vector->napi.config->affinity_mask,
280277
ring->q_index);
281278
}
282279

drivers/net/ethernet/intel/ice/ice_lib.c

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -2592,7 +2592,6 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
25922592
return;
25932593

25942594
vsi->irqs_ready = false;
2595-
ice_free_cpu_rx_rmap(vsi);
25962595

25972596
ice_for_each_q_vector(vsi, i) {
25982597
int irq_num;
@@ -2605,12 +2604,6 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
26052604
vsi->q_vectors[i]->num_ring_rx))
26062605
continue;
26072606

2608-
/* clear the affinity notifier in the IRQ descriptor */
2609-
if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2610-
irq_set_affinity_notifier(irq_num, NULL);
2611-
2612-
/* clear the affinity_hint in the IRQ descriptor */
2613-
irq_update_affinity_hint(irq_num, NULL);
26142607
synchronize_irq(irq_num);
26152608
devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]);
26162609
}
@@ -2765,11 +2758,18 @@ void ice_vsi_set_napi_queues(struct ice_vsi *vsi)
27652758
void ice_vsi_clear_napi_queues(struct ice_vsi *vsi)
27662759
{
27672760
struct net_device *netdev = vsi->netdev;
2768-
int q_idx;
2761+
int q_idx, v_idx;
27692762

27702763
if (!netdev)
27712764
return;
27722765

2766+
/* Clear the NAPI's interrupt number */
2767+
ice_for_each_q_vector(vsi, v_idx) {
2768+
struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2769+
2770+
netif_napi_set_irq(&q_vector->napi, -1);
2771+
}
2772+
27732773
ice_for_each_txq(vsi, q_idx)
27742774
netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_TX, NULL);
27752775

0 commit comments

Comments
 (0)