Skip to content

Commit 6c52d4d

Browse files
committed
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma fixes from Jason Gunthorpe: - Put the QP netlink dump back in cxgb4, fixes a user visible regression - Don't change the rounding style in mlx5 for user provided rd_atomic values - Resolve a race in bnxt_re around the qp-handle table array * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: RDMA/bnxt_re: synchronize the qp-handle table array RDMA/bnxt_re: Fix the usage of control path spin locks RDMA/mlx5: Round max_rd_atomic/max_dest_rd_atomic up instead of down RDMA/cxgb4: Dump vendor specific QP details
2 parents 5635f18 + 76d3ddf commit 6c52d4d

File tree

5 files changed

+28
-21
lines changed

5 files changed

+28
-21
lines changed

drivers/infiniband/hw/bnxt_re/qplib_fp.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1532,9 +1532,11 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
15321532
u32 tbl_indx;
15331533
int rc;
15341534

1535+
spin_lock_bh(&rcfw->tbl_lock);
15351536
tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
15361537
rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID;
15371538
rcfw->qp_tbl[tbl_indx].qp_handle = NULL;
1539+
spin_unlock_bh(&rcfw->tbl_lock);
15381540

15391541
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
15401542
CMDQ_BASE_OPCODE_DESTROY_QP,
@@ -1545,8 +1547,10 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
15451547
sizeof(resp), 0);
15461548
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
15471549
if (rc) {
1550+
spin_lock_bh(&rcfw->tbl_lock);
15481551
rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
15491552
rcfw->qp_tbl[tbl_indx].qp_handle = qp;
1553+
spin_unlock_bh(&rcfw->tbl_lock);
15501554
return rc;
15511555
}
15521556

drivers/infiniband/hw/bnxt_re/qplib_rcfw.c

Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -290,7 +290,6 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw,
290290
struct bnxt_qplib_hwq *hwq;
291291
u32 sw_prod, cmdq_prod;
292292
struct pci_dev *pdev;
293-
unsigned long flags;
294293
u16 cookie;
295294
u8 *preq;
296295

@@ -301,7 +300,7 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw,
301300
/* Cmdq are in 16-byte units, each request can consume 1 or more
302301
* cmdqe
303302
*/
304-
spin_lock_irqsave(&hwq->lock, flags);
303+
spin_lock_bh(&hwq->lock);
305304
required_slots = bnxt_qplib_get_cmd_slots(msg->req);
306305
free_slots = HWQ_FREE_SLOTS(hwq);
307306
cookie = cmdq->seq_num & RCFW_MAX_COOKIE_VALUE;
@@ -311,7 +310,7 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw,
311310
dev_info_ratelimited(&pdev->dev,
312311
"CMDQ is full req/free %d/%d!",
313312
required_slots, free_slots);
314-
spin_unlock_irqrestore(&hwq->lock, flags);
313+
spin_unlock_bh(&hwq->lock);
315314
return -EAGAIN;
316315
}
317316
if (msg->block)
@@ -367,7 +366,7 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw,
367366
wmb();
368367
writel(cmdq_prod, cmdq->cmdq_mbox.prod);
369368
writel(RCFW_CMDQ_TRIG_VAL, cmdq->cmdq_mbox.db);
370-
spin_unlock_irqrestore(&hwq->lock, flags);
369+
spin_unlock_bh(&hwq->lock);
371370
/* Return the CREQ response pointer */
372371
return 0;
373372
}
@@ -486,7 +485,6 @@ static int __bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
486485
{
487486
struct creq_qp_event *evnt = (struct creq_qp_event *)msg->resp;
488487
struct bnxt_qplib_crsqe *crsqe;
489-
unsigned long flags;
490488
u16 cookie;
491489
int rc;
492490
u8 opcode;
@@ -512,12 +510,12 @@ static int __bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
512510
rc = __poll_for_resp(rcfw, cookie);
513511

514512
if (rc) {
515-
spin_lock_irqsave(&rcfw->cmdq.hwq.lock, flags);
513+
spin_lock_bh(&rcfw->cmdq.hwq.lock);
516514
crsqe = &rcfw->crsqe_tbl[cookie];
517515
crsqe->is_waiter_alive = false;
518516
if (rc == -ENODEV)
519517
set_bit(FIRMWARE_STALL_DETECTED, &rcfw->cmdq.flags);
520-
spin_unlock_irqrestore(&rcfw->cmdq.hwq.lock, flags);
518+
spin_unlock_bh(&rcfw->cmdq.hwq.lock);
521519
return -ETIMEDOUT;
522520
}
523521

@@ -628,7 +626,6 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
628626
u16 cookie, blocked = 0;
629627
bool is_waiter_alive;
630628
struct pci_dev *pdev;
631-
unsigned long flags;
632629
u32 wait_cmds = 0;
633630
int rc = 0;
634631

@@ -637,17 +634,21 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
637634
case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
638635
err_event = (struct creq_qp_error_notification *)qp_event;
639636
qp_id = le32_to_cpu(err_event->xid);
637+
spin_lock(&rcfw->tbl_lock);
640638
tbl_indx = map_qp_id_to_tbl_indx(qp_id, rcfw);
641639
qp = rcfw->qp_tbl[tbl_indx].qp_handle;
640+
if (!qp) {
641+
spin_unlock(&rcfw->tbl_lock);
642+
break;
643+
}
644+
bnxt_qplib_mark_qp_error(qp);
645+
rc = rcfw->creq.aeq_handler(rcfw, qp_event, qp);
646+
spin_unlock(&rcfw->tbl_lock);
642647
dev_dbg(&pdev->dev, "Received QP error notification\n");
643648
dev_dbg(&pdev->dev,
644649
"qpid 0x%x, req_err=0x%x, resp_err=0x%x\n",
645650
qp_id, err_event->req_err_state_reason,
646651
err_event->res_err_state_reason);
647-
if (!qp)
648-
break;
649-
bnxt_qplib_mark_qp_error(qp);
650-
rc = rcfw->creq.aeq_handler(rcfw, qp_event, qp);
651652
break;
652653
default:
653654
/*
@@ -659,8 +660,7 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
659660
*
660661
*/
661662

662-
spin_lock_irqsave_nested(&hwq->lock, flags,
663-
SINGLE_DEPTH_NESTING);
663+
spin_lock_nested(&hwq->lock, SINGLE_DEPTH_NESTING);
664664
cookie = le16_to_cpu(qp_event->cookie);
665665
blocked = cookie & RCFW_CMD_IS_BLOCKING;
666666
cookie &= RCFW_MAX_COOKIE_VALUE;
@@ -672,7 +672,7 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
672672
dev_info(&pdev->dev,
673673
"rcfw timedout: cookie = %#x, free_slots = %d",
674674
cookie, crsqe->free_slots);
675-
spin_unlock_irqrestore(&hwq->lock, flags);
675+
spin_unlock(&hwq->lock);
676676
return rc;
677677
}
678678

@@ -720,7 +720,7 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
720720
__destroy_timedout_ah(rcfw,
721721
(struct creq_create_ah_resp *)
722722
qp_event);
723-
spin_unlock_irqrestore(&hwq->lock, flags);
723+
spin_unlock(&hwq->lock);
724724
}
725725
*num_wait += wait_cmds;
726726
return rc;
@@ -734,12 +734,11 @@ static void bnxt_qplib_service_creq(struct tasklet_struct *t)
734734
u32 type, budget = CREQ_ENTRY_POLL_BUDGET;
735735
struct bnxt_qplib_hwq *hwq = &creq->hwq;
736736
struct creq_base *creqe;
737-
unsigned long flags;
738737
u32 num_wakeup = 0;
739738
u32 hw_polled = 0;
740739

741740
/* Service the CREQ until budget is over */
742-
spin_lock_irqsave(&hwq->lock, flags);
741+
spin_lock_bh(&hwq->lock);
743742
while (budget > 0) {
744743
creqe = bnxt_qplib_get_qe(hwq, hwq->cons, NULL);
745744
if (!CREQ_CMP_VALID(creqe, creq->creq_db.dbinfo.flags))
@@ -782,7 +781,7 @@ static void bnxt_qplib_service_creq(struct tasklet_struct *t)
782781
if (hw_polled)
783782
bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo,
784783
rcfw->res->cctx, true);
785-
spin_unlock_irqrestore(&hwq->lock, flags);
784+
spin_unlock_bh(&hwq->lock);
786785
if (num_wakeup)
787786
wake_up_nr(&rcfw->cmdq.waitq, num_wakeup);
788787
}
@@ -978,6 +977,7 @@ int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
978977
GFP_KERNEL);
979978
if (!rcfw->qp_tbl)
980979
goto fail;
980+
spin_lock_init(&rcfw->tbl_lock);
981981

982982
rcfw->max_timeout = res->cctx->hwrm_cmd_max_timeout;
983983

drivers/infiniband/hw/bnxt_re/qplib_rcfw.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -224,6 +224,8 @@ struct bnxt_qplib_rcfw {
224224
struct bnxt_qplib_crsqe *crsqe_tbl;
225225
int qp_tbl_size;
226226
struct bnxt_qplib_qp_node *qp_tbl;
227+
/* To synchronize the qp-handle hash table */
228+
spinlock_t tbl_lock;
227229
u64 oos_prev;
228230
u32 init_oos_stats;
229231
u32 cmdq_depth;

drivers/infiniband/hw/cxgb4/provider.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -473,6 +473,7 @@ static const struct ib_device_ops c4iw_dev_ops = {
473473
.fill_res_cq_entry = c4iw_fill_res_cq_entry,
474474
.fill_res_cm_id_entry = c4iw_fill_res_cm_id_entry,
475475
.fill_res_mr_entry = c4iw_fill_res_mr_entry,
476+
.fill_res_qp_entry = c4iw_fill_res_qp_entry,
476477
.get_dev_fw_str = get_dev_fw_str,
477478
.get_dma_mr = c4iw_get_dma_mr,
478479
.get_hw_stats = c4iw_get_mib,

drivers/infiniband/hw/mlx5/qp.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4268,14 +4268,14 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
42684268
MLX5_SET(qpc, qpc, retry_count, attr->retry_cnt);
42694269

42704270
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && attr->max_rd_atomic)
4271-
MLX5_SET(qpc, qpc, log_sra_max, ilog2(attr->max_rd_atomic));
4271+
MLX5_SET(qpc, qpc, log_sra_max, fls(attr->max_rd_atomic - 1));
42724272

42734273
if (attr_mask & IB_QP_SQ_PSN)
42744274
MLX5_SET(qpc, qpc, next_send_psn, attr->sq_psn);
42754275

42764276
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && attr->max_dest_rd_atomic)
42774277
MLX5_SET(qpc, qpc, log_rra_max,
4278-
ilog2(attr->max_dest_rd_atomic));
4278+
fls(attr->max_dest_rd_atomic - 1));
42794279

42804280
if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
42814281
err = set_qpc_atomic_flags(qp, attr, attr_mask, qpc);

0 commit comments

Comments
 (0)