Skip to content

Commit bfc5cc8

Browse files
jahay1anguy11
authored andcommitted
idpf: use reserved RDMA vectors from control plane
Fetch the number of reserved RDMA vectors from the control plane. Adjust the number of reserved LAN vectors if necessary. Adjust the minimum number of vectors the OS should reserve to include RDMA; and fail if the OS cannot reserve enough vectors for the minimum number of LAN and RDMA vectors required. Create a separate msix table for the reserved RDMA vectors, which will just get handed off to the RDMA core device to do with what it will. Reviewed-by: Madhu Chittim <[email protected]> Signed-off-by: Joshua Hay <[email protected]> Signed-off-by: Tatyana Nikolova <[email protected]> Signed-off-by: Tony Nguyen <[email protected]>
1 parent 19272b3 commit bfc5cc8

File tree

4 files changed

+98
-21
lines changed

4 files changed

+98
-21
lines changed

drivers/net/ethernet/intel/idpf/idpf.h

Lines changed: 26 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -507,10 +507,11 @@ struct idpf_vc_xn_manager;
507507
* @flags: See enum idpf_flags
508508
* @reset_reg: See struct idpf_reset_reg
509509
* @hw: Device access data
510-
* @num_req_msix: Requested number of MSIX vectors
511510
* @num_avail_msix: Available number of MSIX vectors
512511
* @num_msix_entries: Number of entries in MSIX table
513512
* @msix_entries: MSIX table
513+
* @num_rdma_msix_entries: Available number of MSIX vectors for RDMA
514+
* @rdma_msix_entries: RDMA MSIX table
514515
* @req_vec_chunks: Requested vector chunk data
515516
* @mb_vector: Mailbox vector data
516517
* @vector_stack: Stack to store the msix vector indexes
@@ -561,10 +562,11 @@ struct idpf_adapter {
561562
DECLARE_BITMAP(flags, IDPF_FLAGS_NBITS);
562563
struct idpf_reset_reg reset_reg;
563564
struct idpf_hw hw;
564-
u16 num_req_msix;
565565
u16 num_avail_msix;
566566
u16 num_msix_entries;
567567
struct msix_entry *msix_entries;
568+
u16 num_rdma_msix_entries;
569+
struct msix_entry *rdma_msix_entries;
568570
struct virtchnl2_alloc_vectors *req_vec_chunks;
569571
struct idpf_q_vector mb_vector;
570572
struct idpf_vector_lifo vector_stack;
@@ -630,6 +632,17 @@ static inline int idpf_is_queue_model_split(u16 q_model)
630632
bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all,
631633
enum idpf_cap_field field, u64 flag);
632634

635+
/**
636+
* idpf_is_rdma_cap_ena - Determine if RDMA is supported
637+
* @adapter: private data struct
638+
*
639+
* Return: true if RDMA capability is enabled, false otherwise
640+
*/
641+
static inline bool idpf_is_rdma_cap_ena(struct idpf_adapter *adapter)
642+
{
643+
return idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_RDMA);
644+
}
645+
633646
#define IDPF_CAP_RSS (\
634647
VIRTCHNL2_CAP_RSS_IPV4_TCP |\
635648
VIRTCHNL2_CAP_RSS_IPV4_TCP |\
@@ -682,6 +695,17 @@ static inline u16 idpf_get_reserved_vecs(struct idpf_adapter *adapter)
682695
return le16_to_cpu(adapter->caps.num_allocated_vectors);
683696
}
684697

698+
/**
699+
* idpf_get_reserved_rdma_vecs - Get reserved RDMA vectors
700+
* @adapter: private data struct
701+
*
702+
* Return: number of vectors reserved for RDMA
703+
*/
704+
static inline u16 idpf_get_reserved_rdma_vecs(struct idpf_adapter *adapter)
705+
{
706+
return le16_to_cpu(adapter->caps.num_rdma_allocated_vectors);
707+
}
708+
685709
/**
686710
* idpf_get_default_vports - Get default number of vports
687711
* @adapter: private data struct

drivers/net/ethernet/intel/idpf/idpf_lib.c

Lines changed: 67 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -88,6 +88,8 @@ void idpf_intr_rel(struct idpf_adapter *adapter)
8888
idpf_deinit_vector_stack(adapter);
8989
kfree(adapter->msix_entries);
9090
adapter->msix_entries = NULL;
91+
kfree(adapter->rdma_msix_entries);
92+
adapter->rdma_msix_entries = NULL;
9193
}
9294

9395
/**
@@ -299,13 +301,33 @@ int idpf_req_rel_vector_indexes(struct idpf_adapter *adapter,
299301
*/
300302
int idpf_intr_req(struct idpf_adapter *adapter)
301303
{
304+
u16 num_lan_vecs, min_lan_vecs, num_rdma_vecs = 0, min_rdma_vecs = 0;
302305
u16 default_vports = idpf_get_default_vports(adapter);
303306
int num_q_vecs, total_vecs, num_vec_ids;
304-
int min_vectors, v_actual, err;
307+
int min_vectors, actual_vecs, err;
305308
unsigned int vector;
306309
u16 *vecids;
310+
int i;
307311

308312
total_vecs = idpf_get_reserved_vecs(adapter);
313+
num_lan_vecs = total_vecs;
314+
if (idpf_is_rdma_cap_ena(adapter)) {
315+
num_rdma_vecs = idpf_get_reserved_rdma_vecs(adapter);
316+
min_rdma_vecs = IDPF_MIN_RDMA_VEC;
317+
318+
if (!num_rdma_vecs) {
319+
/* If idpf_get_reserved_rdma_vecs is 0, vectors are
320+
* pulled from the LAN pool.
321+
*/
322+
num_rdma_vecs = min_rdma_vecs;
323+
} else if (num_rdma_vecs < min_rdma_vecs) {
324+
dev_err(&adapter->pdev->dev,
325+
"Not enough vectors reserved for RDMA (min: %u, current: %u)\n",
326+
min_rdma_vecs, num_rdma_vecs);
327+
return -EINVAL;
328+
}
329+
}
330+
309331
num_q_vecs = total_vecs - IDPF_MBX_Q_VEC;
310332

311333
err = idpf_send_alloc_vectors_msg(adapter, num_q_vecs);
@@ -316,52 +338,76 @@ int idpf_intr_req(struct idpf_adapter *adapter)
316338
return -EAGAIN;
317339
}
318340

319-
min_vectors = IDPF_MBX_Q_VEC + IDPF_MIN_Q_VEC * default_vports;
320-
v_actual = pci_alloc_irq_vectors(adapter->pdev, min_vectors,
321-
total_vecs, PCI_IRQ_MSIX);
322-
if (v_actual < min_vectors) {
323-
dev_err(&adapter->pdev->dev, "Failed to allocate MSIX vectors: %d\n",
324-
v_actual);
325-
err = -EAGAIN;
341+
min_lan_vecs = IDPF_MBX_Q_VEC + IDPF_MIN_Q_VEC * default_vports;
342+
min_vectors = min_lan_vecs + min_rdma_vecs;
343+
actual_vecs = pci_alloc_irq_vectors(adapter->pdev, min_vectors,
344+
total_vecs, PCI_IRQ_MSIX);
345+
if (actual_vecs < 0) {
346+
dev_err(&adapter->pdev->dev, "Failed to allocate minimum MSIX vectors required: %d\n",
347+
min_vectors);
348+
err = actual_vecs;
326349
goto send_dealloc_vecs;
327350
}
328351

329-
adapter->msix_entries = kcalloc(v_actual, sizeof(struct msix_entry),
330-
GFP_KERNEL);
352+
if (idpf_is_rdma_cap_ena(adapter)) {
353+
if (actual_vecs < total_vecs) {
354+
dev_warn(&adapter->pdev->dev,
355+
"Warning: %d vectors requested, only %d available. Defaulting to minimum (%d) for RDMA and remaining for LAN.\n",
356+
total_vecs, actual_vecs, IDPF_MIN_RDMA_VEC);
357+
num_rdma_vecs = IDPF_MIN_RDMA_VEC;
358+
}
331359

360+
adapter->rdma_msix_entries = kcalloc(num_rdma_vecs,
361+
sizeof(struct msix_entry),
362+
GFP_KERNEL);
363+
if (!adapter->rdma_msix_entries) {
364+
err = -ENOMEM;
365+
goto free_irq;
366+
}
367+
}
368+
369+
num_lan_vecs = actual_vecs - num_rdma_vecs;
370+
adapter->msix_entries = kcalloc(num_lan_vecs, sizeof(struct msix_entry),
371+
GFP_KERNEL);
332372
if (!adapter->msix_entries) {
333373
err = -ENOMEM;
334-
goto free_irq;
374+
goto free_rdma_msix;
335375
}
336376

337377
adapter->mb_vector.v_idx = le16_to_cpu(adapter->caps.mailbox_vector_id);
338378

339-
vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL);
379+
vecids = kcalloc(actual_vecs, sizeof(u16), GFP_KERNEL);
340380
if (!vecids) {
341381
err = -ENOMEM;
342382
goto free_msix;
343383
}
344384

345-
num_vec_ids = idpf_get_vec_ids(adapter, vecids, total_vecs,
385+
num_vec_ids = idpf_get_vec_ids(adapter, vecids, actual_vecs,
346386
&adapter->req_vec_chunks->vchunks);
347-
if (num_vec_ids < v_actual) {
387+
if (num_vec_ids < actual_vecs) {
348388
err = -EINVAL;
349389
goto free_vecids;
350390
}
351391

352-
for (vector = 0; vector < v_actual; vector++) {
392+
for (vector = 0; vector < num_lan_vecs; vector++) {
353393
adapter->msix_entries[vector].entry = vecids[vector];
354394
adapter->msix_entries[vector].vector =
355395
pci_irq_vector(adapter->pdev, vector);
356396
}
397+
for (i = 0; i < num_rdma_vecs; vector++, i++) {
398+
adapter->rdma_msix_entries[i].entry = vecids[vector];
399+
adapter->rdma_msix_entries[i].vector =
400+
pci_irq_vector(adapter->pdev, vector);
401+
}
357402

358-
adapter->num_req_msix = total_vecs;
359-
adapter->num_msix_entries = v_actual;
360403
/* 'num_avail_msix' is used to distribute excess vectors to the vports
361404
* after considering the minimum vectors required per each default
362405
* vport
363406
*/
364-
adapter->num_avail_msix = v_actual - min_vectors;
407+
adapter->num_avail_msix = num_lan_vecs - min_lan_vecs;
408+
adapter->num_msix_entries = num_lan_vecs;
409+
if (idpf_is_rdma_cap_ena(adapter))
410+
adapter->num_rdma_msix_entries = num_rdma_vecs;
365411

366412
/* Fill MSIX vector lifo stack with vector indexes */
367413
err = idpf_init_vector_stack(adapter);
@@ -383,6 +429,9 @@ int idpf_intr_req(struct idpf_adapter *adapter)
383429
free_msix:
384430
kfree(adapter->msix_entries);
385431
adapter->msix_entries = NULL;
432+
free_rdma_msix:
433+
kfree(adapter->rdma_msix_entries);
434+
adapter->rdma_msix_entries = NULL;
386435
free_irq:
387436
pci_free_irq_vectors(adapter->pdev);
388437
send_dealloc_vecs:

drivers/net/ethernet/intel/idpf/idpf_txrx.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,7 @@
5757
/* Default vector sharing */
5858
#define IDPF_MBX_Q_VEC 1
5959
#define IDPF_MIN_Q_VEC 1
60+
#define IDPF_MIN_RDMA_VEC 2
6061

6162
#define IDPF_DFLT_TX_Q_DESC_COUNT 512
6263
#define IDPF_DFLT_TX_COMPLQ_DESC_COUNT 512

drivers/net/ethernet/intel/idpf/virtchnl2.h

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -483,6 +483,8 @@ VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_version_info);
483483
* segment offload.
484484
* @max_hdr_buf_per_lso: Max number of header buffers that can be used for
485485
* an LSO.
486+
* @num_rdma_allocated_vectors: Maximum number of allocated RDMA vectors for
487+
* the device.
486488
* @pad1: Padding for future extensions.
487489
*
488490
* Dataplane driver sends this message to CP to negotiate capabilities and
@@ -530,7 +532,8 @@ struct virtchnl2_get_capabilities {
530532
__le32 device_type;
531533
u8 min_sso_packet_len;
532534
u8 max_hdr_buf_per_lso;
533-
u8 pad1[10];
535+
__le16 num_rdma_allocated_vectors;
536+
u8 pad1[8];
534537
};
535538
VIRTCHNL2_CHECK_STRUCT_LEN(80, virtchnl2_get_capabilities);
536539

0 commit comments

Comments
 (0)