@@ -1078,6 +1078,7 @@ static int _create_kernel_qp(struct mlx5_ib_dev *dev,
1078
1078
1079
1079
qpc = MLX5_ADDR_OF (create_qp_in , * in , qpc );
1080
1080
MLX5_SET (qpc , qpc , uar_page , uar_index );
1081
+ MLX5_SET (qpc , qpc , ts_format , MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT );
1081
1082
MLX5_SET (qpc , qpc , log_page_size , qp -> buf .page_shift - MLX5_ADAPTER_PAGE_SHIFT );
1082
1083
1083
1084
/* Set "fast registration enabled" for all kernel QPs */
@@ -1172,10 +1173,72 @@ static void destroy_flow_rule_vport_sq(struct mlx5_ib_sq *sq)
1172
1173
sq -> flow_rule = NULL ;
1173
1174
}
1174
1175
1176
+ static int get_rq_ts_format (struct mlx5_ib_dev * dev , struct mlx5_ib_cq * send_cq )
1177
+ {
1178
+ bool fr_supported =
1179
+ MLX5_CAP_GEN (dev -> mdev , rq_ts_format ) ==
1180
+ MLX5_RQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING ||
1181
+ MLX5_CAP_GEN (dev -> mdev , rq_ts_format ) ==
1182
+ MLX5_RQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME ;
1183
+
1184
+ if (send_cq -> create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION ) {
1185
+ if (!fr_supported ) {
1186
+ mlx5_ib_dbg (dev , "Free running TS format is not supported\n" );
1187
+ return - EOPNOTSUPP ;
1188
+ }
1189
+ return MLX5_RQC_TIMESTAMP_FORMAT_FREE_RUNNING ;
1190
+ }
1191
+ return MLX5_RQC_TIMESTAMP_FORMAT_DEFAULT ;
1192
+ }
1193
+
1194
+ static int get_sq_ts_format (struct mlx5_ib_dev * dev , struct mlx5_ib_cq * send_cq )
1195
+ {
1196
+ bool fr_supported =
1197
+ MLX5_CAP_GEN (dev -> mdev , sq_ts_format ) ==
1198
+ MLX5_SQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING ||
1199
+ MLX5_CAP_GEN (dev -> mdev , sq_ts_format ) ==
1200
+ MLX5_SQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME ;
1201
+
1202
+ if (send_cq -> create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION ) {
1203
+ if (!fr_supported ) {
1204
+ mlx5_ib_dbg (dev , "Free running TS format is not supported\n" );
1205
+ return - EOPNOTSUPP ;
1206
+ }
1207
+ return MLX5_SQC_TIMESTAMP_FORMAT_FREE_RUNNING ;
1208
+ }
1209
+ return MLX5_SQC_TIMESTAMP_FORMAT_DEFAULT ;
1210
+ }
1211
+
1212
+ static int get_qp_ts_format (struct mlx5_ib_dev * dev , struct mlx5_ib_cq * send_cq ,
1213
+ struct mlx5_ib_cq * recv_cq )
1214
+ {
1215
+ bool fr_supported =
1216
+ MLX5_CAP_ROCE (dev -> mdev , qp_ts_format ) ==
1217
+ MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING ||
1218
+ MLX5_CAP_ROCE (dev -> mdev , qp_ts_format ) ==
1219
+ MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME ;
1220
+ int ts_format = MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT ;
1221
+
1222
+ if (recv_cq &&
1223
+ recv_cq -> create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION )
1224
+ ts_format = MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING ;
1225
+
1226
+ if (send_cq &&
1227
+ send_cq -> create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION )
1228
+ ts_format = MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING ;
1229
+
1230
+ if (ts_format == MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING &&
1231
+ !fr_supported ) {
1232
+ mlx5_ib_dbg (dev , "Free running TS format is not supported\n" );
1233
+ return - EOPNOTSUPP ;
1234
+ }
1235
+ return ts_format ;
1236
+ }
1237
+
1175
1238
static int create_raw_packet_qp_sq (struct mlx5_ib_dev * dev ,
1176
1239
struct ib_udata * udata ,
1177
1240
struct mlx5_ib_sq * sq , void * qpin ,
1178
- struct ib_pd * pd )
1241
+ struct ib_pd * pd , struct mlx5_ib_cq * cq )
1179
1242
{
1180
1243
struct mlx5_ib_ubuffer * ubuffer = & sq -> ubuffer ;
1181
1244
__be64 * pas ;
@@ -1187,6 +1250,11 @@ static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
1187
1250
int err ;
1188
1251
unsigned int page_offset_quantized ;
1189
1252
unsigned long page_size ;
1253
+ int ts_format ;
1254
+
1255
+ ts_format = get_sq_ts_format (dev , cq );
1256
+ if (ts_format < 0 )
1257
+ return ts_format ;
1190
1258
1191
1259
sq -> ubuffer .umem = ib_umem_get (& dev -> ib_dev , ubuffer -> buf_addr ,
1192
1260
ubuffer -> buf_size , 0 );
@@ -1215,6 +1283,7 @@ static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
1215
1283
if (MLX5_CAP_ETH (dev -> mdev , multi_pkt_send_wqe ))
1216
1284
MLX5_SET (sqc , sqc , allow_multi_pkt_send_wqe , 1 );
1217
1285
MLX5_SET (sqc , sqc , state , MLX5_SQC_STATE_RST );
1286
+ MLX5_SET (sqc , sqc , ts_format , ts_format );
1218
1287
MLX5_SET (sqc , sqc , user_index , MLX5_GET (qpc , qpc , user_index ));
1219
1288
MLX5_SET (sqc , sqc , cqn , MLX5_GET (qpc , qpc , cqn_snd ));
1220
1289
MLX5_SET (sqc , sqc , tis_lst_sz , 1 );
@@ -1263,7 +1332,7 @@ static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
1263
1332
1264
1333
static int create_raw_packet_qp_rq (struct mlx5_ib_dev * dev ,
1265
1334
struct mlx5_ib_rq * rq , void * qpin ,
1266
- struct ib_pd * pd )
1335
+ struct ib_pd * pd , struct mlx5_ib_cq * cq )
1267
1336
{
1268
1337
struct mlx5_ib_qp * mqp = rq -> base .container_mibqp ;
1269
1338
__be64 * pas ;
@@ -1274,9 +1343,14 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
1274
1343
struct ib_umem * umem = rq -> base .ubuffer .umem ;
1275
1344
unsigned int page_offset_quantized ;
1276
1345
unsigned long page_size = 0 ;
1346
+ int ts_format ;
1277
1347
size_t inlen ;
1278
1348
int err ;
1279
1349
1350
+ ts_format = get_rq_ts_format (dev , cq );
1351
+ if (ts_format < 0 )
1352
+ return ts_format ;
1353
+
1280
1354
page_size = mlx5_umem_find_best_quantized_pgoff (umem , wq , log_wq_pg_sz ,
1281
1355
MLX5_ADAPTER_PAGE_SHIFT ,
1282
1356
page_offset , 64 ,
@@ -1296,6 +1370,7 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
1296
1370
MLX5_SET (rqc , rqc , vsd , 1 );
1297
1371
MLX5_SET (rqc , rqc , mem_rq_type , MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE );
1298
1372
MLX5_SET (rqc , rqc , state , MLX5_RQC_STATE_RST );
1373
+ MLX5_SET (rqc , rqc , ts_format , ts_format );
1299
1374
MLX5_SET (rqc , rqc , flush_in_error_en , 1 );
1300
1375
MLX5_SET (rqc , rqc , user_index , MLX5_GET (qpc , qpc , user_index ));
1301
1376
MLX5_SET (rqc , rqc , cqn , MLX5_GET (qpc , qpc , cqn_rcv ));
@@ -1393,10 +1468,10 @@ static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
1393
1468
}
1394
1469
1395
1470
static int create_raw_packet_qp (struct mlx5_ib_dev * dev , struct mlx5_ib_qp * qp ,
1396
- u32 * in , size_t inlen ,
1397
- struct ib_pd * pd ,
1471
+ u32 * in , size_t inlen , struct ib_pd * pd ,
1398
1472
struct ib_udata * udata ,
1399
- struct mlx5_ib_create_qp_resp * resp )
1473
+ struct mlx5_ib_create_qp_resp * resp ,
1474
+ struct ib_qp_init_attr * init_attr )
1400
1475
{
1401
1476
struct mlx5_ib_raw_packet_qp * raw_packet_qp = & qp -> raw_packet_qp ;
1402
1477
struct mlx5_ib_sq * sq = & raw_packet_qp -> sq ;
@@ -1415,7 +1490,8 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
1415
1490
if (err )
1416
1491
return err ;
1417
1492
1418
- err = create_raw_packet_qp_sq (dev , udata , sq , in , pd );
1493
+ err = create_raw_packet_qp_sq (dev , udata , sq , in , pd ,
1494
+ to_mcq (init_attr -> send_cq ));
1419
1495
if (err )
1420
1496
goto err_destroy_tis ;
1421
1497
@@ -1437,7 +1513,8 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
1437
1513
rq -> flags |= MLX5_IB_RQ_CVLAN_STRIPPING ;
1438
1514
if (qp -> flags & IB_QP_CREATE_PCI_WRITE_END_PADDING )
1439
1515
rq -> flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING ;
1440
- err = create_raw_packet_qp_rq (dev , rq , in , pd );
1516
+ err = create_raw_packet_qp_rq (dev , rq , in , pd ,
1517
+ to_mcq (init_attr -> recv_cq ));
1441
1518
if (err )
1442
1519
goto err_destroy_sq ;
1443
1520
@@ -1907,6 +1984,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1907
1984
struct mlx5_ib_cq * recv_cq ;
1908
1985
unsigned long flags ;
1909
1986
struct mlx5_ib_qp_base * base ;
1987
+ int ts_format ;
1910
1988
int mlx5_st ;
1911
1989
void * qpc ;
1912
1990
u32 * in ;
@@ -1944,6 +2022,13 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1944
2022
if (ucmd -> sq_wqe_count > (1 << MLX5_CAP_GEN (mdev , log_max_qp_sz )))
1945
2023
return - EINVAL ;
1946
2024
2025
+ if (init_attr -> qp_type != IB_QPT_RAW_PACKET ) {
2026
+ ts_format = get_qp_ts_format (dev , to_mcq (init_attr -> send_cq ),
2027
+ to_mcq (init_attr -> recv_cq ));
2028
+ if (ts_format < 0 )
2029
+ return ts_format ;
2030
+ }
2031
+
1947
2032
err = _create_user_qp (dev , pd , qp , udata , init_attr , & in , & params -> resp ,
1948
2033
& inlen , base , ucmd );
1949
2034
if (err )
@@ -1992,6 +2077,9 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1992
2077
MLX5_SET (qpc , qpc , log_rq_size , ilog2 (qp -> rq .wqe_cnt ));
1993
2078
}
1994
2079
2080
+ if (init_attr -> qp_type != IB_QPT_RAW_PACKET )
2081
+ MLX5_SET (qpc , qpc , ts_format , ts_format );
2082
+
1995
2083
MLX5_SET (qpc , qpc , rq_type , get_rx_type (qp , init_attr ));
1996
2084
1997
2085
if (qp -> sq .wqe_cnt ) {
@@ -2046,7 +2134,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
2046
2134
qp -> raw_packet_qp .sq .ubuffer .buf_addr = ucmd -> sq_buf_addr ;
2047
2135
raw_packet_qp_copy_info (qp , & qp -> raw_packet_qp );
2048
2136
err = create_raw_packet_qp (dev , qp , in , inlen , pd , udata ,
2049
- & params -> resp );
2137
+ & params -> resp , init_attr );
2050
2138
} else
2051
2139
err = mlx5_qpc_create_qp (dev , & base -> mqp , in , inlen , out );
2052
2140
0 commit comments