Skip to content

Commit 2dec50d

Browse files
author
Paolo Abeni
committed
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/linux
Tony Nguyen says: ==================== Add RDMA support for Intel IPU E2000 in idpf Tatyana Nikolova says: This idpf patch series is the second part of the staged submission for introducing RDMA RoCEv2 support for the IPU E2000 line of products, referred to as GEN3. To support RDMA GEN3 devices, the idpf driver uses common definitions of the IIDC interface and implements specific device functionality in iidc_rdma_idpf.h. The IPU model can host one or more logical network endpoints called vPorts per PCI function that are flexibly associated with a physical port or an internal communication port. Other features as it pertains to GEN3 devices include: * MMIO learning * RDMA capability negotiation * RDMA vectors discovery between idpf and control plane These patches are split from the submission "Add RDMA support for Intel IPU E2000 (GEN3)" [1]. The patches have been tested on a range of hosts and platforms with a variety of general RDMA applications which include standalone verbs (rping, perftest, etc.), storage and HPC applications. Signed-off-by: Tony Nguyen <[email protected]> [1] https://lore.kernel.org/all/[email protected]/ This idpf patch series is the second part of the staged submission for introducing RDMA RoCEv2 support for the IPU E2000 line of products, referred to as GEN3. To support RDMA GEN3 devices, the idpf driver uses common definitions of the IIDC interface and implements specific device functionality in iidc_rdma_idpf.h. The IPU model can host one or more logical network endpoints called vPorts per PCI function that are flexibly associated with a physical port or an internal communication port. Other features as it pertains to GEN3 devices include: * MMIO learning * RDMA capability negotiation * RDMA vectors discovery between idpf and control plane These patches are split from the submission "Add RDMA support for Intel IPU E2000 (GEN3)" [1]. The patches have been tested on a range of hosts and platforms with a variety of general RDMA applications which include standalone verbs (rping, perftest, etc.), storage and HPC applications. Signed-off-by: Tony Nguyen <[email protected]> [1] https://lore.kernel.org/all/[email protected]/ IWL reviews: v3: https://lore.kernel.org/all/[email protected]/ v2: https://lore.kernel.org/all/[email protected]/ v1 (split from previous series): https://lore.kernel.org/all/[email protected]/ v3: https://lore.kernel.org/all/[email protected]/ RFC v2: https://lore.kernel.org/all/[email protected]/ RFC: https://lore.kernel.org/all/[email protected]/ * 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/linux: idpf: implement get LAN MMIO memory regions idpf: implement IDC vport aux driver MTU change handler idpf: implement remaining IDC RDMA core callbacks and handlers idpf: implement RDMA vport auxiliary dev create, init, and destroy idpf: implement core RDMA auxiliary dev create, init, and destroy idpf: use reserved RDMA vectors from control plane ==================== Link: https://patch.msgid.link/[email protected] Signed-off-by: Paolo Abeni <[email protected]>
2 parents cd03135 + 6aa53e8 commit 2dec50d

File tree

15 files changed

+1107
-74
lines changed

15 files changed

+1107
-74
lines changed

drivers/net/ethernet/intel/idpf/Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ idpf-y := \
1010
idpf_controlq_setup.o \
1111
idpf_dev.o \
1212
idpf_ethtool.o \
13+
idpf_idc.o \
1314
idpf_lib.o \
1415
idpf_main.o \
1516
idpf_txrx.o \

drivers/net/ethernet/intel/idpf/idpf.h

Lines changed: 111 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -12,12 +12,16 @@ struct idpf_vport_max_q;
1212
#include <net/pkt_sched.h>
1313
#include <linux/aer.h>
1414
#include <linux/etherdevice.h>
15+
#include <linux/ioport.h>
1516
#include <linux/pci.h>
1617
#include <linux/bitfield.h>
1718
#include <linux/sctp.h>
1819
#include <linux/ethtool_netlink.h>
1920
#include <net/gro.h>
2021

22+
#include <linux/net/intel/iidc_rdma.h>
23+
#include <linux/net/intel/iidc_rdma_idpf.h>
24+
2125
#include "virtchnl2.h"
2226
#include "idpf_txrx.h"
2327
#include "idpf_controlq.h"
@@ -194,7 +198,8 @@ struct idpf_vport_max_q {
194198
* @ptp_reg_init: PTP register initialization
195199
*/
196200
struct idpf_reg_ops {
197-
void (*ctlq_reg_init)(struct idpf_ctlq_create_info *cq);
201+
void (*ctlq_reg_init)(struct idpf_adapter *adapter,
202+
struct idpf_ctlq_create_info *cq);
198203
int (*intr_reg_init)(struct idpf_vport *vport);
199204
void (*mb_intr_reg_init)(struct idpf_adapter *adapter);
200205
void (*reset_reg_init)(struct idpf_adapter *adapter);
@@ -203,12 +208,25 @@ struct idpf_reg_ops {
203208
void (*ptp_reg_init)(const struct idpf_adapter *adapter);
204209
};
205210

211+
#define IDPF_MMIO_REG_NUM_STATIC 2
212+
#define IDPF_PF_MBX_REGION_SZ 4096
213+
#define IDPF_PF_RSTAT_REGION_SZ 2048
214+
#define IDPF_VF_MBX_REGION_SZ 10240
215+
#define IDPF_VF_RSTAT_REGION_SZ 2048
216+
206217
/**
207218
* struct idpf_dev_ops - Device specific operations
208219
* @reg_ops: Register operations
220+
* @idc_init: IDC initialization
221+
* @static_reg_info: array of mailbox and rstat register info
209222
*/
210223
struct idpf_dev_ops {
211224
struct idpf_reg_ops reg_ops;
225+
226+
int (*idc_init)(struct idpf_adapter *adapter);
227+
228+
/* static_reg_info[0] is mailbox region, static_reg_info[1] is rstat */
229+
struct resource static_reg_info[IDPF_MMIO_REG_NUM_STATIC];
212230
};
213231

214232
/**
@@ -275,6 +293,7 @@ struct idpf_port_stats {
275293
* group will yield total number of RX queues.
276294
* @rxq_model: Splitq queue or single queue queuing model
277295
* @rx_ptype_lkup: Lookup table for ptypes on RX
296+
* @vdev_info: IDC vport device info pointer
278297
* @adapter: back pointer to associated adapter
279298
* @netdev: Associated net_device. Each vport should have one and only one
280299
* associated netdev.
@@ -320,6 +339,8 @@ struct idpf_vport {
320339
u32 rxq_model;
321340
struct libeth_rx_pt *rx_ptype_lkup;
322341

342+
struct iidc_rdma_vport_dev_info *vdev_info;
343+
323344
struct idpf_adapter *adapter;
324345
struct net_device *netdev;
325346
DECLARE_BITMAP(flags, IDPF_VPORT_FLAGS_NBITS);
@@ -507,10 +528,11 @@ struct idpf_vc_xn_manager;
507528
* @flags: See enum idpf_flags
508529
* @reset_reg: See struct idpf_reset_reg
509530
* @hw: Device access data
510-
* @num_req_msix: Requested number of MSIX vectors
511531
* @num_avail_msix: Available number of MSIX vectors
512532
* @num_msix_entries: Number of entries in MSIX table
513533
* @msix_entries: MSIX table
534+
* @num_rdma_msix_entries: Available number of MSIX vectors for RDMA
535+
* @rdma_msix_entries: RDMA MSIX table
514536
* @req_vec_chunks: Requested vector chunk data
515537
* @mb_vector: Mailbox vector data
516538
* @vector_stack: Stack to store the msix vector indexes
@@ -539,6 +561,7 @@ struct idpf_vc_xn_manager;
539561
* @caps: Negotiated capabilities with device
540562
* @vcxn_mngr: Virtchnl transaction manager
541563
* @dev_ops: See idpf_dev_ops
564+
* @cdev_info: IDC core device info pointer
542565
* @num_vfs: Number of allocated VFs through sysfs. PF does not directly talk
543566
* to VFs but is used to initialize them
544567
* @crc_enable: Enable CRC insertion offload
@@ -561,10 +584,11 @@ struct idpf_adapter {
561584
DECLARE_BITMAP(flags, IDPF_FLAGS_NBITS);
562585
struct idpf_reset_reg reset_reg;
563586
struct idpf_hw hw;
564-
u16 num_req_msix;
565587
u16 num_avail_msix;
566588
u16 num_msix_entries;
567589
struct msix_entry *msix_entries;
590+
u16 num_rdma_msix_entries;
591+
struct msix_entry *rdma_msix_entries;
568592
struct virtchnl2_alloc_vectors *req_vec_chunks;
569593
struct idpf_q_vector mb_vector;
570594
struct idpf_vector_lifo vector_stack;
@@ -597,6 +621,7 @@ struct idpf_adapter {
597621
struct idpf_vc_xn_manager *vcxn_mngr;
598622

599623
struct idpf_dev_ops dev_ops;
624+
struct iidc_rdma_core_dev_info *cdev_info;
600625
int num_vfs;
601626
bool crc_enable;
602627
bool req_tx_splitq;
@@ -630,6 +655,17 @@ static inline int idpf_is_queue_model_split(u16 q_model)
630655
bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all,
631656
enum idpf_cap_field field, u64 flag);
632657

658+
/**
659+
* idpf_is_rdma_cap_ena - Determine if RDMA is supported
660+
* @adapter: private data struct
661+
*
662+
* Return: true if RDMA capability is enabled, false otherwise
663+
*/
664+
static inline bool idpf_is_rdma_cap_ena(struct idpf_adapter *adapter)
665+
{
666+
return idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_RDMA);
667+
}
668+
633669
#define IDPF_CAP_RSS (\
634670
VIRTCHNL2_CAP_RSS_IPV4_TCP |\
635671
VIRTCHNL2_CAP_RSS_IPV4_TCP |\
@@ -682,6 +718,17 @@ static inline u16 idpf_get_reserved_vecs(struct idpf_adapter *adapter)
682718
return le16_to_cpu(adapter->caps.num_allocated_vectors);
683719
}
684720

721+
/**
722+
* idpf_get_reserved_rdma_vecs - Get reserved RDMA vectors
723+
* @adapter: private data struct
724+
*
725+
* Return: number of vectors reserved for RDMA
726+
*/
727+
static inline u16 idpf_get_reserved_rdma_vecs(struct idpf_adapter *adapter)
728+
{
729+
return le16_to_cpu(adapter->caps.num_rdma_allocated_vectors);
730+
}
731+
685732
/**
686733
* idpf_get_default_vports - Get default number of vports
687734
* @adapter: private data struct
@@ -720,6 +767,34 @@ static inline u8 idpf_get_min_tx_pkt_len(struct idpf_adapter *adapter)
720767
return pkt_len ? pkt_len : IDPF_TX_MIN_PKT_LEN;
721768
}
722769

770+
/**
771+
* idpf_get_mbx_reg_addr - Get BAR0 mailbox register address
772+
* @adapter: private data struct
773+
* @reg_offset: register offset value
774+
*
775+
* Return: BAR0 mailbox register address based on register offset.
776+
*/
777+
static inline void __iomem *idpf_get_mbx_reg_addr(struct idpf_adapter *adapter,
778+
resource_size_t reg_offset)
779+
{
780+
return adapter->hw.mbx.vaddr + reg_offset;
781+
}
782+
783+
/**
784+
* idpf_get_rstat_reg_addr - Get BAR0 rstat register address
785+
* @adapter: private data struct
786+
* @reg_offset: register offset value
787+
*
788+
* Return: BAR0 rstat register address based on register offset.
789+
*/
790+
static inline void __iomem *idpf_get_rstat_reg_addr(struct idpf_adapter *adapter,
791+
resource_size_t reg_offset)
792+
{
793+
reg_offset -= adapter->dev_ops.static_reg_info[1].start;
794+
795+
return adapter->hw.rstat.vaddr + reg_offset;
796+
}
797+
723798
/**
724799
* idpf_get_reg_addr - Get BAR0 register address
725800
* @adapter: private data struct
@@ -730,7 +805,30 @@ static inline u8 idpf_get_min_tx_pkt_len(struct idpf_adapter *adapter)
730805
static inline void __iomem *idpf_get_reg_addr(struct idpf_adapter *adapter,
731806
resource_size_t reg_offset)
732807
{
733-
return (void __iomem *)(adapter->hw.hw_addr + reg_offset);
808+
struct idpf_hw *hw = &adapter->hw;
809+
810+
for (int i = 0; i < hw->num_lan_regs; i++) {
811+
struct idpf_mmio_reg *region = &hw->lan_regs[i];
812+
813+
if (reg_offset >= region->addr_start &&
814+
reg_offset < (region->addr_start + region->addr_len)) {
815+
/* Convert the offset so that it is relative to the
816+
* start of the region. Then add the base address of
817+
* the region to get the final address.
818+
*/
819+
reg_offset -= region->addr_start;
820+
821+
return region->vaddr + reg_offset;
822+
}
823+
}
824+
825+
/* It's impossible to hit this case with offsets from the CP. But if we
826+
* do for any other reason, the kernel will panic on that register
827+
* access. Might as well do it here to make it clear what's happening.
828+
*/
829+
BUG();
830+
831+
return NULL;
734832
}
735833

736834
/**
@@ -744,7 +842,7 @@ static inline bool idpf_is_reset_detected(struct idpf_adapter *adapter)
744842
if (!adapter->hw.arq)
745843
return true;
746844

747-
return !(readl(idpf_get_reg_addr(adapter, adapter->hw.arq->reg.len)) &
845+
return !(readl(idpf_get_mbx_reg_addr(adapter, adapter->hw.arq->reg.len)) &
748846
adapter->hw.arq->reg.len_mask);
749847
}
750848

@@ -853,5 +951,13 @@ int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs);
853951

854952
u8 idpf_vport_get_hsplit(const struct idpf_vport *vport);
855953
bool idpf_vport_set_hsplit(const struct idpf_vport *vport, u8 val);
954+
int idpf_idc_init(struct idpf_adapter *adapter);
955+
int idpf_idc_init_aux_core_dev(struct idpf_adapter *adapter,
956+
enum iidc_function_type ftype);
957+
void idpf_idc_deinit_core_aux_device(struct iidc_rdma_core_dev_info *cdev_info);
958+
void idpf_idc_deinit_vport_aux_device(struct iidc_rdma_vport_dev_info *vdev_info);
959+
void idpf_idc_issue_reset_event(struct iidc_rdma_core_dev_info *cdev_info);
960+
void idpf_idc_vdev_mtu_event(struct iidc_rdma_vport_dev_info *vdev_info,
961+
enum iidc_rdma_event_type event_type);
856962

857963
#endif /* !_IDPF_H_ */

drivers/net/ethernet/intel/idpf/idpf_controlq.c

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -36,19 +36,19 @@ static void idpf_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
3636
{
3737
/* Update tail to post pre-allocated buffers for rx queues */
3838
if (is_rxq)
39-
wr32(hw, cq->reg.tail, (u32)(cq->ring_size - 1));
39+
idpf_mbx_wr32(hw, cq->reg.tail, (u32)(cq->ring_size - 1));
4040

4141
/* For non-Mailbox control queues only TAIL need to be set */
4242
if (cq->q_id != -1)
4343
return;
4444

4545
/* Clear Head for both send or receive */
46-
wr32(hw, cq->reg.head, 0);
46+
idpf_mbx_wr32(hw, cq->reg.head, 0);
4747

4848
/* set starting point */
49-
wr32(hw, cq->reg.bal, lower_32_bits(cq->desc_ring.pa));
50-
wr32(hw, cq->reg.bah, upper_32_bits(cq->desc_ring.pa));
51-
wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));
49+
idpf_mbx_wr32(hw, cq->reg.bal, lower_32_bits(cq->desc_ring.pa));
50+
idpf_mbx_wr32(hw, cq->reg.bah, upper_32_bits(cq->desc_ring.pa));
51+
idpf_mbx_wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));
5252
}
5353

5454
/**
@@ -328,7 +328,7 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
328328
*/
329329
dma_wmb();
330330

331-
wr32(hw, cq->reg.tail, cq->next_to_use);
331+
idpf_mbx_wr32(hw, cq->reg.tail, cq->next_to_use);
332332

333333
err_unlock:
334334
spin_unlock(&cq->cq_lock);
@@ -520,7 +520,7 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
520520

521521
dma_wmb();
522522

523-
wr32(hw, cq->reg.tail, cq->next_to_post);
523+
idpf_mbx_wr32(hw, cq->reg.tail, cq->next_to_post);
524524
}
525525

526526
spin_unlock(&cq->cq_lock);

drivers/net/ethernet/intel/idpf/idpf_controlq.h

Lines changed: 16 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -94,12 +94,26 @@ struct idpf_mbxq_desc {
9494
u32 pf_vf_id; /* used by CP when sending to PF */
9595
};
9696

97+
/* Max number of MMIO regions not including the mailbox and rstat regions in
98+
* the fallback case when the whole bar is mapped.
99+
*/
100+
#define IDPF_MMIO_MAP_FALLBACK_MAX_REMAINING 3
101+
102+
struct idpf_mmio_reg {
103+
void __iomem *vaddr;
104+
resource_size_t addr_start;
105+
resource_size_t addr_len;
106+
};
107+
97108
/* Define the driver hardware struct to replace other control structs as needed
98109
* Align to ctlq_hw_info
99110
*/
100111
struct idpf_hw {
101-
void __iomem *hw_addr;
102-
resource_size_t hw_addr_len;
112+
struct idpf_mmio_reg mbx;
113+
struct idpf_mmio_reg rstat;
114+
/* Array of remaining LAN BAR regions */
115+
int num_lan_regs;
116+
struct idpf_mmio_reg *lan_regs;
103117

104118
struct idpf_adapter *back;
105119

0 commit comments

Comments
 (0)