Skip to content

Commit 2fe8d4b

Browse files
aharonl-nvidiajgunthorpe
authored andcommitted
RDMA/mlx5: Fail QP creation if the device can not support the CQE TS
In ConnectX6Dx device, HW can work in real time timestamp mode according to the device capabilities per RQ/SQ/QP. When the flag IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION is set, the user expect to get TS on the CQEs in free running format, so we need to fail the QP creation if the current mode of the device doesn't support it. Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Aharon Landau <[email protected]> Signed-off-by: Maor Gottlieb <[email protected]> Signed-off-by: Leon Romanovsky <[email protected]> Signed-off-by: Jason Gunthorpe <[email protected]>
1 parent a6a217d commit 2fe8d4b

File tree

1 file changed

+96
-8
lines changed
  • drivers/infiniband/hw/mlx5

1 file changed

+96
-8
lines changed

drivers/infiniband/hw/mlx5/qp.c

Lines changed: 96 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1078,6 +1078,7 @@ static int _create_kernel_qp(struct mlx5_ib_dev *dev,
10781078

10791079
qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc);
10801080
MLX5_SET(qpc, qpc, uar_page, uar_index);
1081+
MLX5_SET(qpc, qpc, ts_format, MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT);
10811082
MLX5_SET(qpc, qpc, log_page_size, qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
10821083

10831084
/* Set "fast registration enabled" for all kernel QPs */
@@ -1172,10 +1173,72 @@ static void destroy_flow_rule_vport_sq(struct mlx5_ib_sq *sq)
11721173
sq->flow_rule = NULL;
11731174
}
11741175

1176+
static int get_rq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq)
1177+
{
1178+
bool fr_supported =
1179+
MLX5_CAP_GEN(dev->mdev, rq_ts_format) ==
1180+
MLX5_RQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING ||
1181+
MLX5_CAP_GEN(dev->mdev, rq_ts_format) ==
1182+
MLX5_RQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME;
1183+
1184+
if (send_cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION) {
1185+
if (!fr_supported) {
1186+
mlx5_ib_dbg(dev, "Free running TS format is not supported\n");
1187+
return -EOPNOTSUPP;
1188+
}
1189+
return MLX5_RQC_TIMESTAMP_FORMAT_FREE_RUNNING;
1190+
}
1191+
return MLX5_RQC_TIMESTAMP_FORMAT_DEFAULT;
1192+
}
1193+
1194+
static int get_sq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq)
1195+
{
1196+
bool fr_supported =
1197+
MLX5_CAP_GEN(dev->mdev, sq_ts_format) ==
1198+
MLX5_SQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING ||
1199+
MLX5_CAP_GEN(dev->mdev, sq_ts_format) ==
1200+
MLX5_SQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME;
1201+
1202+
if (send_cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION) {
1203+
if (!fr_supported) {
1204+
mlx5_ib_dbg(dev, "Free running TS format is not supported\n");
1205+
return -EOPNOTSUPP;
1206+
}
1207+
return MLX5_SQC_TIMESTAMP_FORMAT_FREE_RUNNING;
1208+
}
1209+
return MLX5_SQC_TIMESTAMP_FORMAT_DEFAULT;
1210+
}
1211+
1212+
static int get_qp_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq,
1213+
struct mlx5_ib_cq *recv_cq)
1214+
{
1215+
bool fr_supported =
1216+
MLX5_CAP_ROCE(dev->mdev, qp_ts_format) ==
1217+
MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING ||
1218+
MLX5_CAP_ROCE(dev->mdev, qp_ts_format) ==
1219+
MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME;
1220+
int ts_format = MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT;
1221+
1222+
if (recv_cq &&
1223+
recv_cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION)
1224+
ts_format = MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING;
1225+
1226+
if (send_cq &&
1227+
send_cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION)
1228+
ts_format = MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING;
1229+
1230+
if (ts_format == MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING &&
1231+
!fr_supported) {
1232+
mlx5_ib_dbg(dev, "Free running TS format is not supported\n");
1233+
return -EOPNOTSUPP;
1234+
}
1235+
return ts_format;
1236+
}
1237+
11751238
static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
11761239
struct ib_udata *udata,
11771240
struct mlx5_ib_sq *sq, void *qpin,
1178-
struct ib_pd *pd)
1241+
struct ib_pd *pd, struct mlx5_ib_cq *cq)
11791242
{
11801243
struct mlx5_ib_ubuffer *ubuffer = &sq->ubuffer;
11811244
__be64 *pas;
@@ -1187,6 +1250,11 @@ static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
11871250
int err;
11881251
unsigned int page_offset_quantized;
11891252
unsigned long page_size;
1253+
int ts_format;
1254+
1255+
ts_format = get_sq_ts_format(dev, cq);
1256+
if (ts_format < 0)
1257+
return ts_format;
11901258

11911259
sq->ubuffer.umem = ib_umem_get(&dev->ib_dev, ubuffer->buf_addr,
11921260
ubuffer->buf_size, 0);
@@ -1215,6 +1283,7 @@ static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
12151283
if (MLX5_CAP_ETH(dev->mdev, multi_pkt_send_wqe))
12161284
MLX5_SET(sqc, sqc, allow_multi_pkt_send_wqe, 1);
12171285
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
1286+
MLX5_SET(sqc, sqc, ts_format, ts_format);
12181287
MLX5_SET(sqc, sqc, user_index, MLX5_GET(qpc, qpc, user_index));
12191288
MLX5_SET(sqc, sqc, cqn, MLX5_GET(qpc, qpc, cqn_snd));
12201289
MLX5_SET(sqc, sqc, tis_lst_sz, 1);
@@ -1263,7 +1332,7 @@ static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
12631332

12641333
static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
12651334
struct mlx5_ib_rq *rq, void *qpin,
1266-
struct ib_pd *pd)
1335+
struct ib_pd *pd, struct mlx5_ib_cq *cq)
12671336
{
12681337
struct mlx5_ib_qp *mqp = rq->base.container_mibqp;
12691338
__be64 *pas;
@@ -1274,9 +1343,14 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
12741343
struct ib_umem *umem = rq->base.ubuffer.umem;
12751344
unsigned int page_offset_quantized;
12761345
unsigned long page_size = 0;
1346+
int ts_format;
12771347
size_t inlen;
12781348
int err;
12791349

1350+
ts_format = get_rq_ts_format(dev, cq);
1351+
if (ts_format < 0)
1352+
return ts_format;
1353+
12801354
page_size = mlx5_umem_find_best_quantized_pgoff(umem, wq, log_wq_pg_sz,
12811355
MLX5_ADAPTER_PAGE_SHIFT,
12821356
page_offset, 64,
@@ -1296,6 +1370,7 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
12961370
MLX5_SET(rqc, rqc, vsd, 1);
12971371
MLX5_SET(rqc, rqc, mem_rq_type, MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE);
12981372
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
1373+
MLX5_SET(rqc, rqc, ts_format, ts_format);
12991374
MLX5_SET(rqc, rqc, flush_in_error_en, 1);
13001375
MLX5_SET(rqc, rqc, user_index, MLX5_GET(qpc, qpc, user_index));
13011376
MLX5_SET(rqc, rqc, cqn, MLX5_GET(qpc, qpc, cqn_rcv));
@@ -1393,10 +1468,10 @@ static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
13931468
}
13941469

13951470
static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
1396-
u32 *in, size_t inlen,
1397-
struct ib_pd *pd,
1471+
u32 *in, size_t inlen, struct ib_pd *pd,
13981472
struct ib_udata *udata,
1399-
struct mlx5_ib_create_qp_resp *resp)
1473+
struct mlx5_ib_create_qp_resp *resp,
1474+
struct ib_qp_init_attr *init_attr)
14001475
{
14011476
struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
14021477
struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
@@ -1415,7 +1490,8 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
14151490
if (err)
14161491
return err;
14171492

1418-
err = create_raw_packet_qp_sq(dev, udata, sq, in, pd);
1493+
err = create_raw_packet_qp_sq(dev, udata, sq, in, pd,
1494+
to_mcq(init_attr->send_cq));
14191495
if (err)
14201496
goto err_destroy_tis;
14211497

@@ -1437,7 +1513,8 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
14371513
rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING;
14381514
if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING)
14391515
rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING;
1440-
err = create_raw_packet_qp_rq(dev, rq, in, pd);
1516+
err = create_raw_packet_qp_rq(dev, rq, in, pd,
1517+
to_mcq(init_attr->recv_cq));
14411518
if (err)
14421519
goto err_destroy_sq;
14431520

@@ -1907,6 +1984,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
19071984
struct mlx5_ib_cq *recv_cq;
19081985
unsigned long flags;
19091986
struct mlx5_ib_qp_base *base;
1987+
int ts_format;
19101988
int mlx5_st;
19111989
void *qpc;
19121990
u32 *in;
@@ -1944,6 +2022,13 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
19442022
if (ucmd->sq_wqe_count > (1 << MLX5_CAP_GEN(mdev, log_max_qp_sz)))
19452023
return -EINVAL;
19462024

2025+
if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
2026+
ts_format = get_qp_ts_format(dev, to_mcq(init_attr->send_cq),
2027+
to_mcq(init_attr->recv_cq));
2028+
if (ts_format < 0)
2029+
return ts_format;
2030+
}
2031+
19472032
err = _create_user_qp(dev, pd, qp, udata, init_attr, &in, &params->resp,
19482033
&inlen, base, ucmd);
19492034
if (err)
@@ -1992,6 +2077,9 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
19922077
MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt));
19932078
}
19942079

2080+
if (init_attr->qp_type != IB_QPT_RAW_PACKET)
2081+
MLX5_SET(qpc, qpc, ts_format, ts_format);
2082+
19952083
MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, init_attr));
19962084

19972085
if (qp->sq.wqe_cnt) {
@@ -2046,7 +2134,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
20462134
qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd->sq_buf_addr;
20472135
raw_packet_qp_copy_info(qp, &qp->raw_packet_qp);
20482136
err = create_raw_packet_qp(dev, qp, in, inlen, pd, udata,
2049-
&params->resp);
2137+
&params->resp, init_attr);
20502138
} else
20512139
err = mlx5_qpc_create_qp(dev, &base->mqp, in, inlen, out);
20522140

0 commit comments

Comments
 (0)