Skip to content

Commit ae2911d

Browse files
committed
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma fixes from Jason Gunthorpe: "Two more merge window regressions, a corruption bug in hfi1 and a few other small fixes. - Missing user input validation regression in ucma - Disallowing a previously allowed user combination regression in mlx5 - ODP prefetch memory leaking triggerable by userspace - Memory corruption in hf1 due to faulty ring buffer logic - Missed mutex initialization crash in mlx5 - Two small defects with RDMA DIM" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: RDMA/core: Free DIM memory in error unwind RDMA/core: Stop DIM before destroying CQ RDMA/mlx5: Initialize QP mutex for the debug kernels IB/rdmavt: Fix RQ counting issues causing use of an invalid RWQE RDMA/mlx5: Allow providing extra scatter CQE QP flag RDMA/mlx5: Fix prefetch memory leak if get_prefetchable_mr fails RDMA/cm: Add min length checks to user structure copies
2 parents 78431ab + fb448ce commit ae2911d

File tree

7 files changed

+57
-51
lines changed

7 files changed

+57
-51
lines changed

drivers/infiniband/core/cq.c

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,15 @@ static void rdma_dim_init(struct ib_cq *cq)
7272
INIT_WORK(&dim->work, ib_cq_rdma_dim_work);
7373
}
7474

75+
static void rdma_dim_destroy(struct ib_cq *cq)
76+
{
77+
if (!cq->dim)
78+
return;
79+
80+
cancel_work_sync(&cq->dim->work);
81+
kfree(cq->dim);
82+
}
83+
7584
static int __poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
7685
{
7786
int rc;
@@ -266,6 +275,7 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
266275
return cq;
267276

268277
out_destroy_cq:
278+
rdma_dim_destroy(cq);
269279
rdma_restrack_del(&cq->res);
270280
cq->device->ops.destroy_cq(cq, udata);
271281
out_free_wc:
@@ -331,12 +341,10 @@ void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata)
331341
WARN_ON_ONCE(1);
332342
}
333343

344+
rdma_dim_destroy(cq);
334345
trace_cq_free(cq);
335346
rdma_restrack_del(&cq->res);
336347
cq->device->ops.destroy_cq(cq, udata);
337-
if (cq->dim)
338-
cancel_work_sync(&cq->dim->work);
339-
kfree(cq->dim);
340348
kfree(cq->wc);
341349
kfree(cq);
342350
}

drivers/infiniband/core/ucma.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1084,6 +1084,8 @@ static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
10841084
size_t in_size;
10851085
int ret;
10861086

1087+
if (in_len < offsetofend(typeof(cmd), reserved))
1088+
return -EINVAL;
10871089
in_size = min_t(size_t, in_len, sizeof(cmd));
10881090
if (copy_from_user(&cmd, inbuf, in_size))
10891091
return -EFAULT;
@@ -1141,6 +1143,8 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
11411143
size_t in_size;
11421144
int ret;
11431145

1146+
if (in_len < offsetofend(typeof(cmd), reserved))
1147+
return -EINVAL;
11441148
in_size = min_t(size_t, in_len, sizeof(cmd));
11451149
if (copy_from_user(&cmd, inbuf, in_size))
11461150
return -EFAULT;

drivers/infiniband/hw/mlx5/odp.c

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1797,9 +1797,7 @@ static bool init_prefetch_work(struct ib_pd *pd,
17971797
work->frags[i].mr =
17981798
get_prefetchable_mr(pd, advice, sg_list[i].lkey);
17991799
if (!work->frags[i].mr) {
1800-
work->num_sge = i - 1;
1801-
if (i)
1802-
destroy_prefetch_work(work);
1800+
work->num_sge = i;
18031801
return false;
18041802
}
18051803

@@ -1865,6 +1863,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
18651863
srcu_key = srcu_read_lock(&dev->odp_srcu);
18661864
if (!init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge)) {
18671865
srcu_read_unlock(&dev->odp_srcu, srcu_key);
1866+
destroy_prefetch_work(work);
18681867
return -EINVAL;
18691868
}
18701869
queue_work(system_unbound_wq, &work->work);

drivers/infiniband/hw/mlx5/qp.c

Lines changed: 16 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1766,15 +1766,14 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct ib_pd *pd,
17661766
}
17671767

17681768
static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev,
1769+
struct mlx5_ib_qp *qp,
17691770
struct ib_qp_init_attr *init_attr,
1770-
struct mlx5_ib_create_qp *ucmd,
17711771
void *qpc)
17721772
{
17731773
int scqe_sz;
17741774
bool allow_scat_cqe = false;
17751775

1776-
if (ucmd)
1777-
allow_scat_cqe = ucmd->flags & MLX5_QP_FLAG_ALLOW_SCATTER_CQE;
1776+
allow_scat_cqe = qp->flags_en & MLX5_QP_FLAG_ALLOW_SCATTER_CQE;
17781777

17791778
if (!allow_scat_cqe && init_attr->sq_sig_type != IB_SIGNAL_ALL_WR)
17801779
return;
@@ -1853,8 +1852,6 @@ static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
18531852
u32 *in;
18541853
int err;
18551854

1856-
mutex_init(&qp->mutex);
1857-
18581855
if (attr->sq_sig_type == IB_SIGNAL_ALL_WR)
18591856
qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
18601857

@@ -1938,7 +1935,6 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
19381935
u32 *in;
19391936
int err;
19401937

1941-
mutex_init(&qp->mutex);
19421938
spin_lock_init(&qp->sq.lock);
19431939
spin_lock_init(&qp->rq.lock);
19441940

@@ -2012,7 +2008,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
20122008
}
20132009
if ((qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) &&
20142010
(qp->type == MLX5_IB_QPT_DCI || qp->type == IB_QPT_RC))
2015-
configure_requester_scat_cqe(dev, init_attr, ucmd, qpc);
2011+
configure_requester_scat_cqe(dev, qp, init_attr, qpc);
20162012

20172013
if (qp->rq.wqe_cnt) {
20182014
MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
@@ -2129,7 +2125,6 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
21292125
u32 *in;
21302126
int err;
21312127

2132-
mutex_init(&qp->mutex);
21332128
spin_lock_init(&qp->sq.lock);
21342129
spin_lock_init(&qp->rq.lock);
21352130

@@ -2543,13 +2538,18 @@ static void process_vendor_flag(struct mlx5_ib_dev *dev, int *flags, int flag,
25432538
return;
25442539
}
25452540

2546-
if (flag == MLX5_QP_FLAG_SCATTER_CQE) {
2541+
switch (flag) {
2542+
case MLX5_QP_FLAG_SCATTER_CQE:
2543+
case MLX5_QP_FLAG_ALLOW_SCATTER_CQE:
25472544
/*
2548-
* We don't return error if this flag was provided,
2549-
* and mlx5 doesn't have right capability.
2550-
*/
2551-
*flags &= ~MLX5_QP_FLAG_SCATTER_CQE;
2545+
* We don't return error if these flags were provided,
2546+
* and mlx5 doesn't have right capability.
2547+
*/
2548+
*flags &= ~(MLX5_QP_FLAG_SCATTER_CQE |
2549+
MLX5_QP_FLAG_ALLOW_SCATTER_CQE);
25522550
return;
2551+
default:
2552+
break;
25532553
}
25542554
mlx5_ib_dbg(dev, "Vendor create QP flag 0x%X is not supported\n", flag);
25552555
}
@@ -2589,6 +2589,8 @@ static int process_vendor_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
25892589
process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SIGNATURE, true, qp);
25902590
process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SCATTER_CQE,
25912591
MLX5_CAP_GEN(mdev, sctr_data_cqe), qp);
2592+
process_vendor_flag(dev, &flags, MLX5_QP_FLAG_ALLOW_SCATTER_CQE,
2593+
MLX5_CAP_GEN(mdev, sctr_data_cqe), qp);
25922594

25932595
if (qp->type == IB_QPT_RAW_PACKET) {
25942596
cond = MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) ||
@@ -2963,6 +2965,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr,
29632965
goto free_ucmd;
29642966
}
29652967

2968+
mutex_init(&qp->mutex);
29662969
qp->type = type;
29672970
if (udata) {
29682971
err = process_vendor_flags(dev, qp, params.ucmd, attr);

drivers/infiniband/sw/rdmavt/qp.c

Lines changed: 4 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -901,8 +901,6 @@ static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
901901
qp->s_tail_ack_queue = 0;
902902
qp->s_acked_ack_queue = 0;
903903
qp->s_num_rd_atomic = 0;
904-
if (qp->r_rq.kwq)
905-
qp->r_rq.kwq->count = qp->r_rq.size;
906904
qp->r_sge.num_sge = 0;
907905
atomic_set(&qp->s_reserved_used, 0);
908906
}
@@ -2366,31 +2364,6 @@ static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
23662364
return 0;
23672365
}
23682366

2369-
/**
2370-
* get_count - count numbers of request work queue entries
2371-
* in circular buffer
2372-
* @rq: data structure for request queue entry
2373-
* @tail: tail indices of the circular buffer
2374-
* @head: head indices of the circular buffer
2375-
*
2376-
* Return - total number of entries in the circular buffer
2377-
*/
2378-
static u32 get_count(struct rvt_rq *rq, u32 tail, u32 head)
2379-
{
2380-
u32 count;
2381-
2382-
count = head;
2383-
2384-
if (count >= rq->size)
2385-
count = 0;
2386-
if (count < tail)
2387-
count += rq->size - tail;
2388-
else
2389-
count -= tail;
2390-
2391-
return count;
2392-
}
2393-
23942367
/**
23952368
* get_rvt_head - get head indices of the circular buffer
23962369
* @rq: data structure for request queue entry
@@ -2465,7 +2438,7 @@ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
24652438

24662439
if (kwq->count < RVT_RWQ_COUNT_THRESHOLD) {
24672440
head = get_rvt_head(rq, ip);
2468-
kwq->count = get_count(rq, tail, head);
2441+
kwq->count = rvt_get_rq_count(rq, head, tail);
24692442
}
24702443
if (unlikely(kwq->count == 0)) {
24712444
ret = 0;
@@ -2500,7 +2473,9 @@ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
25002473
* the number of remaining WQEs.
25012474
*/
25022475
if (kwq->count < srq->limit) {
2503-
kwq->count = get_count(rq, tail, get_rvt_head(rq, ip));
2476+
kwq->count =
2477+
rvt_get_rq_count(rq,
2478+
get_rvt_head(rq, ip), tail);
25042479
if (kwq->count < srq->limit) {
25052480
struct ib_event ev;
25062481

drivers/infiniband/sw/rdmavt/rc.c

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -127,9 +127,7 @@ __be32 rvt_compute_aeth(struct rvt_qp *qp)
127127
* not atomic, which is OK, since the fuzziness is
128128
* resolved as further ACKs go out.
129129
*/
130-
credits = head - tail;
131-
if ((int)credits < 0)
132-
credits += qp->r_rq.size;
130+
credits = rvt_get_rq_count(&qp->r_rq, head, tail);
133131
}
134132
/*
135133
* Binary search the credit table to find the code to

include/rdma/rdmavt_qp.h

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -305,6 +305,25 @@ struct rvt_rq {
305305
spinlock_t lock ____cacheline_aligned_in_smp;
306306
};
307307

308+
/**
309+
* rvt_get_rq_count - count numbers of request work queue entries
310+
* in circular buffer
311+
* @rq: data structure for request queue entry
312+
* @head: head indices of the circular buffer
313+
* @tail: tail indices of the circular buffer
314+
*
315+
* Return - total number of entries in the Receive Queue
316+
*/
317+
318+
static inline u32 rvt_get_rq_count(struct rvt_rq *rq, u32 head, u32 tail)
319+
{
320+
u32 count = head - tail;
321+
322+
if ((s32)count < 0)
323+
count += rq->size;
324+
return count;
325+
}
326+
308327
/*
309328
* This structure holds the information that the send tasklet needs
310329
* to send a RDMA read response or atomic operation.

0 commit comments

Comments
 (0)