Skip to content

Commit bd475ee

Browse files
committed
Merge branch '200GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue
Tony Nguyen says: ==================== Intel Wired LAN Driver Updates 2025-07-01 (idpf, igc) For idpf: Michal returns 0 for key size when RSS is not supported. Ahmed changes control queue to a spinlock due to sleeping calls. For igc: Vitaly disables L1.2 PCI-E link substate on I226 devices to resolve performance issues. * '200GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue: igc: disable L1.2 PCI-E link substate to avoid performance issue idpf: convert control queue mutex to a spinlock idpf: return 0 size for RSS key if not supported ==================== Link: https://patch.msgid.link/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
2 parents c2a2ff6 + 0325143 commit bd475ee

File tree

5 files changed

+32
-19
lines changed

5 files changed

+32
-19
lines changed

drivers/net/ethernet/intel/idpf/idpf_controlq.c

Lines changed: 11 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -96,16 +96,15 @@ static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
9696
*/
9797
static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
9898
{
99-
mutex_lock(&cq->cq_lock);
99+
spin_lock(&cq->cq_lock);
100100

101101
/* free ring buffers and the ring itself */
102102
idpf_ctlq_dealloc_ring_res(hw, cq);
103103

104104
/* Set ring_size to 0 to indicate uninitialized queue */
105105
cq->ring_size = 0;
106106

107-
mutex_unlock(&cq->cq_lock);
108-
mutex_destroy(&cq->cq_lock);
107+
spin_unlock(&cq->cq_lock);
109108
}
110109

111110
/**
@@ -173,7 +172,7 @@ int idpf_ctlq_add(struct idpf_hw *hw,
173172

174173
idpf_ctlq_init_regs(hw, cq, is_rxq);
175174

176-
mutex_init(&cq->cq_lock);
175+
spin_lock_init(&cq->cq_lock);
177176

178177
list_add(&cq->cq_list, &hw->cq_list_head);
179178

@@ -272,7 +271,7 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
272271
int err = 0;
273272
int i;
274273

275-
mutex_lock(&cq->cq_lock);
274+
spin_lock(&cq->cq_lock);
276275

277276
/* Ensure there are enough descriptors to send all messages */
278277
num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
@@ -332,7 +331,7 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
332331
wr32(hw, cq->reg.tail, cq->next_to_use);
333332

334333
err_unlock:
335-
mutex_unlock(&cq->cq_lock);
334+
spin_unlock(&cq->cq_lock);
336335

337336
return err;
338337
}
@@ -364,7 +363,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
364363
if (*clean_count > cq->ring_size)
365364
return -EBADR;
366365

367-
mutex_lock(&cq->cq_lock);
366+
spin_lock(&cq->cq_lock);
368367

369368
ntc = cq->next_to_clean;
370369

@@ -397,7 +396,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
397396

398397
cq->next_to_clean = ntc;
399398

400-
mutex_unlock(&cq->cq_lock);
399+
spin_unlock(&cq->cq_lock);
401400

402401
/* Return number of descriptors actually cleaned */
403402
*clean_count = i;
@@ -435,7 +434,7 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
435434
if (*buff_count > 0)
436435
buffs_avail = true;
437436

438-
mutex_lock(&cq->cq_lock);
437+
spin_lock(&cq->cq_lock);
439438

440439
if (tbp >= cq->ring_size)
441440
tbp = 0;
@@ -524,7 +523,7 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
524523
wr32(hw, cq->reg.tail, cq->next_to_post);
525524
}
526525

527-
mutex_unlock(&cq->cq_lock);
526+
spin_unlock(&cq->cq_lock);
528527

529528
/* return the number of buffers that were not posted */
530529
*buff_count = *buff_count - i;
@@ -552,7 +551,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
552551
u16 i;
553552

554553
/* take the lock before we start messing with the ring */
555-
mutex_lock(&cq->cq_lock);
554+
spin_lock(&cq->cq_lock);
556555

557556
ntc = cq->next_to_clean;
558557

@@ -614,7 +613,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
614613

615614
cq->next_to_clean = ntc;
616615

617-
mutex_unlock(&cq->cq_lock);
616+
spin_unlock(&cq->cq_lock);
618617

619618
*num_q_msg = i;
620619
if (*num_q_msg == 0)

drivers/net/ethernet/intel/idpf/idpf_controlq_api.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,7 @@ struct idpf_ctlq_info {
9999

100100
enum idpf_ctlq_type cq_type;
101101
int q_id;
102-
struct mutex cq_lock; /* control queue lock */
102+
spinlock_t cq_lock; /* control queue lock */
103103
/* used for interrupt processing */
104104
u16 next_to_use;
105105
u16 next_to_clean;

drivers/net/ethernet/intel/idpf/idpf_ethtool.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ static u32 idpf_get_rxfh_key_size(struct net_device *netdev)
4747
struct idpf_vport_user_config_data *user_config;
4848

4949
if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
50-
return -EOPNOTSUPP;
50+
return 0;
5151

5252
user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
5353

@@ -66,7 +66,7 @@ static u32 idpf_get_rxfh_indir_size(struct net_device *netdev)
6666
struct idpf_vport_user_config_data *user_config;
6767

6868
if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
69-
return -EOPNOTSUPP;
69+
return 0;
7070

7171
user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
7272

drivers/net/ethernet/intel/idpf/idpf_lib.c

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2314,8 +2314,12 @@ void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem, u64 size)
23142314
struct idpf_adapter *adapter = hw->back;
23152315
size_t sz = ALIGN(size, 4096);
23162316

2317-
mem->va = dma_alloc_coherent(&adapter->pdev->dev, sz,
2318-
&mem->pa, GFP_KERNEL);
2317+
/* The control queue resources are freed under a spinlock, contiguous
2318+
* pages will avoid IOMMU remapping and the use vmap (and vunmap in
2319+
* dma_free_*() path.
2320+
*/
2321+
mem->va = dma_alloc_attrs(&adapter->pdev->dev, sz, &mem->pa,
2322+
GFP_KERNEL, DMA_ATTR_FORCE_CONTIGUOUS);
23192323
mem->size = sz;
23202324

23212325
return mem->va;
@@ -2330,8 +2334,8 @@ void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem)
23302334
{
23312335
struct idpf_adapter *adapter = hw->back;
23322336

2333-
dma_free_coherent(&adapter->pdev->dev, mem->size,
2334-
mem->va, mem->pa);
2337+
dma_free_attrs(&adapter->pdev->dev, mem->size,
2338+
mem->va, mem->pa, DMA_ATTR_FORCE_CONTIGUOUS);
23352339
mem->size = 0;
23362340
mem->va = NULL;
23372341
mem->pa = 0;

drivers/net/ethernet/intel/igc/igc_main.c

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7115,6 +7115,10 @@ static int igc_probe(struct pci_dev *pdev,
71157115
adapter->port_num = hw->bus.func;
71167116
adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
71177117

7118+
/* Disable ASPM L1.2 on I226 devices to avoid packet loss */
7119+
if (igc_is_device_id_i226(hw))
7120+
pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2);
7121+
71187122
err = pci_save_state(pdev);
71197123
if (err)
71207124
goto err_ioremap;
@@ -7500,6 +7504,9 @@ static int __igc_resume(struct device *dev, bool rpm)
75007504
pci_enable_wake(pdev, PCI_D3hot, 0);
75017505
pci_enable_wake(pdev, PCI_D3cold, 0);
75027506

7507+
if (igc_is_device_id_i226(hw))
7508+
pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2);
7509+
75037510
if (igc_init_interrupt_scheme(adapter, true)) {
75047511
netdev_err(netdev, "Unable to allocate memory for queues\n");
75057512
return -ENOMEM;
@@ -7625,6 +7632,9 @@ static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
76257632
pci_enable_wake(pdev, PCI_D3hot, 0);
76267633
pci_enable_wake(pdev, PCI_D3cold, 0);
76277634

7635+
if (igc_is_device_id_i226(hw))
7636+
pci_disable_link_state_locked(pdev, PCIE_LINK_STATE_L1_2);
7637+
76287638
/* In case of PCI error, adapter loses its HW address
76297639
* so we should re-assign it here.
76307640
*/

0 commit comments

Comments
 (0)