Skip to content

Commit 2c20e20

Browse files
longlimsftrleon
authored andcommitted
RDMA/mana_ib: query device capabilities
With RDMA device registered, use it to query on hardware capabilities and cache this information for future query requests to the driver. Signed-off-by: Long Li <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Leon Romanovsky <[email protected]>
1 parent a7f0636 commit 2c20e20

File tree

6 files changed

+113
-16
lines changed

6 files changed

+113
-16
lines changed

drivers/infiniband/hw/mana/cq.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
2626
return err;
2727
}
2828

29-
if (attr->cqe > MAX_SEND_BUFFERS_PER_QUEUE) {
29+
if (attr->cqe > mdev->adapter_caps.max_qp_wr) {
3030
ibdev_dbg(ibdev, "CQE %d exceeding limit\n", attr->cqe);
3131
return -EINVAL;
3232
}

drivers/infiniband/hw/mana/device.c

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,13 @@ static int mana_ib_probe(struct auxiliary_device *adev,
8585
}
8686
dev->gdma_dev = &mdev->gdma_context->mana_ib;
8787

88+
ret = mana_ib_gd_query_adapter_caps(dev);
89+
if (ret) {
90+
ibdev_err(&dev->ib_dev, "Failed to query device caps, ret %d",
91+
ret);
92+
goto deregister_device;
93+
}
94+
8895
ret = ib_register_device(&dev->ib_dev, "mana_%d",
8996
mdev->gdma_context->dev);
9097
if (ret)

drivers/infiniband/hw/mana/main.c

Lines changed: 52 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -486,20 +486,17 @@ int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num,
486486
int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
487487
struct ib_udata *uhw)
488488
{
489-
props->max_qp = MANA_MAX_NUM_QUEUES;
490-
props->max_qp_wr = MAX_SEND_BUFFERS_PER_QUEUE;
491-
492-
/*
493-
* max_cqe could be potentially much bigger.
494-
* As this version of driver only support RAW QP, set it to the same
495-
* value as max_qp_wr
496-
*/
497-
props->max_cqe = MAX_SEND_BUFFERS_PER_QUEUE;
498-
489+
struct mana_ib_dev *dev = container_of(ibdev,
490+
struct mana_ib_dev, ib_dev);
491+
492+
props->max_qp = dev->adapter_caps.max_qp_count;
493+
props->max_qp_wr = dev->adapter_caps.max_qp_wr;
494+
props->max_cq = dev->adapter_caps.max_cq_count;
495+
props->max_cqe = dev->adapter_caps.max_qp_wr;
496+
props->max_mr = dev->adapter_caps.max_mr_count;
499497
props->max_mr_size = MANA_IB_MAX_MR_SIZE;
500-
props->max_mr = MANA_IB_MAX_MR;
501-
props->max_send_sge = MAX_TX_WQE_SGL_ENTRIES;
502-
props->max_recv_sge = MAX_RX_WQE_SGL_ENTRIES;
498+
props->max_send_sge = dev->adapter_caps.max_send_sge_count;
499+
props->max_recv_sge = dev->adapter_caps.max_recv_sge_count;
503500

504501
return 0;
505502
}
@@ -521,3 +518,45 @@ int mana_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
521518
void mana_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
522519
{
523520
}
521+
522+
int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *dev)
523+
{
524+
struct mana_ib_adapter_caps *caps = &dev->adapter_caps;
525+
struct mana_ib_query_adapter_caps_resp resp = {};
526+
struct mana_ib_query_adapter_caps_req req = {};
527+
int err;
528+
529+
mana_gd_init_req_hdr(&req.hdr, MANA_IB_GET_ADAPTER_CAP, sizeof(req),
530+
sizeof(resp));
531+
req.hdr.resp.msg_version = GDMA_MESSAGE_V3;
532+
req.hdr.dev_id = dev->gdma_dev->dev_id;
533+
534+
err = mana_gd_send_request(dev->gdma_dev->gdma_context, sizeof(req),
535+
&req, sizeof(resp), &resp);
536+
537+
if (err) {
538+
ibdev_err(&dev->ib_dev,
539+
"Failed to query adapter caps err %d", err);
540+
return err;
541+
}
542+
543+
caps->max_sq_id = resp.max_sq_id;
544+
caps->max_rq_id = resp.max_rq_id;
545+
caps->max_cq_id = resp.max_cq_id;
546+
caps->max_qp_count = resp.max_qp_count;
547+
caps->max_cq_count = resp.max_cq_count;
548+
caps->max_mr_count = resp.max_mr_count;
549+
caps->max_pd_count = resp.max_pd_count;
550+
caps->max_inbound_read_limit = resp.max_inbound_read_limit;
551+
caps->max_outbound_read_limit = resp.max_outbound_read_limit;
552+
caps->mw_count = resp.mw_count;
553+
caps->max_srq_count = resp.max_srq_count;
554+
caps->max_qp_wr = min_t(u32,
555+
resp.max_requester_sq_size / GDMA_MAX_SQE_SIZE,
556+
resp.max_requester_rq_size / GDMA_MAX_RQE_SIZE);
557+
caps->max_inline_data_size = resp.max_inline_data_size;
558+
caps->max_send_sge_count = resp.max_send_sge_count;
559+
caps->max_recv_sge_count = resp.max_recv_sge_count;
560+
561+
return 0;
562+
}

drivers/infiniband/hw/mana/mana_ib.h

Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,9 +27,28 @@
2727
*/
2828
#define MANA_IB_MAX_MR 0xFFFFFFu
2929

30+
struct mana_ib_adapter_caps {
31+
u32 max_sq_id;
32+
u32 max_rq_id;
33+
u32 max_cq_id;
34+
u32 max_qp_count;
35+
u32 max_cq_count;
36+
u32 max_mr_count;
37+
u32 max_pd_count;
38+
u32 max_inbound_read_limit;
39+
u32 max_outbound_read_limit;
40+
u32 mw_count;
41+
u32 max_srq_count;
42+
u32 max_qp_wr;
43+
u32 max_send_sge_count;
44+
u32 max_recv_sge_count;
45+
u32 max_inline_data_size;
46+
};
47+
3048
struct mana_ib_dev {
3149
struct ib_device ib_dev;
3250
struct gdma_dev *gdma_dev;
51+
struct mana_ib_adapter_caps adapter_caps;
3352
};
3453

3554
struct mana_ib_wq {
@@ -92,6 +111,36 @@ struct mana_ib_rwq_ind_table {
92111
struct ib_rwq_ind_table ib_ind_table;
93112
};
94113

114+
enum mana_ib_command_code {
115+
MANA_IB_GET_ADAPTER_CAP = 0x30001,
116+
};
117+
118+
struct mana_ib_query_adapter_caps_req {
119+
struct gdma_req_hdr hdr;
120+
}; /*HW Data */
121+
122+
struct mana_ib_query_adapter_caps_resp {
123+
struct gdma_resp_hdr hdr;
124+
u32 max_sq_id;
125+
u32 max_rq_id;
126+
u32 max_cq_id;
127+
u32 max_qp_count;
128+
u32 max_cq_count;
129+
u32 max_mr_count;
130+
u32 max_pd_count;
131+
u32 max_inbound_read_limit;
132+
u32 max_outbound_read_limit;
133+
u32 mw_count;
134+
u32 max_srq_count;
135+
u32 max_requester_sq_size;
136+
u32 max_responder_sq_size;
137+
u32 max_requester_rq_size;
138+
u32 max_responder_rq_size;
139+
u32 max_send_sge_count;
140+
u32 max_recv_sge_count;
141+
u32 max_inline_data_size;
142+
}; /* HW Data */
143+
95144
int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
96145
mana_handle_t *gdma_region);
97146

@@ -159,4 +208,5 @@ int mana_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
159208

160209
void mana_ib_disassociate_ucontext(struct ib_ucontext *ibcontext);
161210

211+
int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *mdev);
162212
#endif

drivers/infiniband/hw/mana/qp.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,7 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
130130
return ret;
131131
}
132132

133-
if (attr->cap.max_recv_wr > MAX_SEND_BUFFERS_PER_QUEUE) {
133+
if (attr->cap.max_recv_wr > mdev->adapter_caps.max_qp_wr) {
134134
ibdev_dbg(&mdev->ib_dev,
135135
"Requested max_recv_wr %d exceeding limit\n",
136136
attr->cap.max_recv_wr);
@@ -296,7 +296,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
296296
if (port < 1 || port > mc->num_ports)
297297
return -EINVAL;
298298

299-
if (attr->cap.max_send_wr > MAX_SEND_BUFFERS_PER_QUEUE) {
299+
if (attr->cap.max_send_wr > mdev->adapter_caps.max_qp_wr) {
300300
ibdev_dbg(&mdev->ib_dev,
301301
"Requested max_send_wr %d exceeding limit\n",
302302
attr->cap.max_send_wr);

include/net/mana/gdma.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -150,6 +150,7 @@ struct gdma_general_req {
150150

151151
#define GDMA_MESSAGE_V1 1
152152
#define GDMA_MESSAGE_V2 2
153+
#define GDMA_MESSAGE_V3 3
153154

154155
struct gdma_general_resp {
155156
struct gdma_resp_hdr hdr;

0 commit comments

Comments
 (0)