Skip to content

Commit 3d51520

Browse files
committed
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe: "Usual collection of small improvements and fixes: - Bug fixes and minor improvments in efa, irdma, mlx4, mlx5, rxe, hf1, qib, ocrdma - bnxt_re support for MSN, which is a new retransmit logic - Initial mana support for RC qps - Use after free bug and cleanups in iwcm - Reduce resource usage in mlx5 when RDMA verbs features are not used - New verb to drain shared recieve queues, similar to normal recieve queues. This is necessary to allow ULPs a clean shutdown. Used in the iscsi rdma target - mlx5 support for more than 16 bits of doorbell indexes - Doorbell moderation support for bnxt_re - IB multi-plane support for mlx5 - New EFA adaptor PCI IDs - RDMA_NAME_ASSIGN_TYPE_USER to hint to userspace that it shouldn't rename the device - A collection of hns bugs - Fix long standing bug in bnxt_re with incorrect endian handling of immediate data" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (65 commits) IB/hfi1: Constify struct flag_table RDMA/mana_ib: Set correct device into ib bnxt_re: Fix imm_data endianness RDMA: Fix netdev tracker in ib_device_set_netdev RDMA/hns: Fix mbx timing out before CMD execution is completed RDMA/hns: Fix insufficient extend DB for VFs. RDMA/hns: Fix undifined behavior caused by invalid max_sge RDMA/hns: Fix shift-out-bounds when max_inline_data is 0 RDMA/hns: Fix missing pagesize and alignment check in FRMR RDMA/hns: Fix unmatch exception handling when init eq table fails RDMA/hns: Fix soft lockup under heavy CEQE load RDMA/hns: Check atomic wr length RDMA/ocrdma: Don't inline statistics functions RDMA/core: Introduce "name_assign_type" for an IB device RDMA/qib: Fix truncation compilation warnings in qib_verbs.c RDMA/qib: Fix truncation compilation warnings in qib_init.c RDMA/efa: Add EFA 0xefa3 PCI ID RDMA/mlx5: Support per-plane port IB counters by querying PPCNT register net/mlx5: mlx5_ifc update for accessing ppcnt register of plane ports RDMA/mlx5: Add plane index support when querying PTYS registers ...
2 parents ef7c8f2 + 887cd30 commit 3d51520

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

106 files changed

+1785
-414
lines changed

MAINTAINERS

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11207,7 +11207,7 @@ F: include/linux/net/intel/iidc.h
1120711207

1120811208
INTEL ETHERNET PROTOCOL DRIVER FOR RDMA
1120911209
M: Mustafa Ismail <[email protected]>
11210-
M: Shiraz Saleem <shiraz.saleem@intel.com>
11210+
M: Tatyana Nikolova <tatyana.e.nikolova@intel.com>
1121111211
1121211212
S: Supported
1121311213
F: drivers/infiniband/hw/irdma/

drivers/infiniband/core/agent.c

Lines changed: 22 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,16 @@ __ib_get_agent_port(const struct ib_device *device, int port_num)
5959
struct ib_agent_port_private *entry;
6060

6161
list_for_each_entry(entry, &ib_agent_port_list, port_list) {
62-
if (entry->agent[1]->device == device &&
62+
/* Need to check both agent[0] and agent[1], as an agent port
63+
* may only have one of them
64+
*/
65+
if (entry->agent[0] &&
66+
entry->agent[0]->device == device &&
67+
entry->agent[0]->port_num == port_num)
68+
return entry;
69+
70+
if (entry->agent[1] &&
71+
entry->agent[1]->device == device &&
6372
entry->agent[1]->port_num == port_num)
6473
return entry;
6574
}
@@ -172,14 +181,16 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
172181
}
173182
}
174183

175-
/* Obtain send only MAD agent for GSI QP */
176-
port_priv->agent[1] = ib_register_mad_agent(device, port_num,
177-
IB_QPT_GSI, NULL, 0,
178-
&agent_send_handler,
179-
NULL, NULL, 0);
180-
if (IS_ERR(port_priv->agent[1])) {
181-
ret = PTR_ERR(port_priv->agent[1]);
182-
goto error3;
184+
if (rdma_cap_ib_cm(device, port_num)) {
185+
/* Obtain send only MAD agent for GSI QP */
186+
port_priv->agent[1] = ib_register_mad_agent(device, port_num,
187+
IB_QPT_GSI, NULL, 0,
188+
&agent_send_handler,
189+
NULL, NULL, 0);
190+
if (IS_ERR(port_priv->agent[1])) {
191+
ret = PTR_ERR(port_priv->agent[1]);
192+
goto error3;
193+
}
183194
}
184195

185196
spin_lock_irqsave(&ib_agent_port_list_lock, flags);
@@ -212,7 +223,8 @@ int ib_agent_port_close(struct ib_device *device, int port_num)
212223
list_del(&port_priv->port_list);
213224
spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
214225

215-
ib_unregister_mad_agent(port_priv->agent[1]);
226+
if (port_priv->agent[1])
227+
ib_unregister_mad_agent(port_priv->agent[1]);
216228
if (port_priv->agent[0])
217229
ib_unregister_mad_agent(port_priv->agent[0]);
218230

drivers/infiniband/core/cache.c

Lines changed: 5 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -794,7 +794,6 @@ static struct ib_gid_table *alloc_gid_table(int sz)
794794
static void release_gid_table(struct ib_device *device,
795795
struct ib_gid_table *table)
796796
{
797-
bool leak = false;
798797
int i;
799798

800799
if (!table)
@@ -803,15 +802,12 @@ static void release_gid_table(struct ib_device *device,
803802
for (i = 0; i < table->sz; i++) {
804803
if (is_gid_entry_free(table->data_vec[i]))
805804
continue;
806-
if (kref_read(&table->data_vec[i]->kref) > 1) {
807-
dev_err(&device->dev,
808-
"GID entry ref leak for index %d ref=%u\n", i,
809-
kref_read(&table->data_vec[i]->kref));
810-
leak = true;
811-
}
805+
806+
WARN_ONCE(true,
807+
"GID entry ref leak for dev %s index %d ref=%u\n",
808+
dev_name(&device->dev), i,
809+
kref_read(&table->data_vec[i]->kref));
812810
}
813-
if (leak)
814-
return;
815811

816812
mutex_destroy(&table->lock);
817813
kfree(table->data_vec);

drivers/infiniband/core/device.c

Lines changed: 74 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -503,6 +503,7 @@ static void ib_device_release(struct device *device)
503503
rcu_head);
504504
}
505505

506+
mutex_destroy(&dev->subdev_lock);
506507
mutex_destroy(&dev->unregistration_lock);
507508
mutex_destroy(&dev->compat_devs_mutex);
508509

@@ -641,6 +642,11 @@ struct ib_device *_ib_alloc_device(size_t size)
641642
BIT_ULL(IB_USER_VERBS_CMD_REG_MR) |
642643
BIT_ULL(IB_USER_VERBS_CMD_REREG_MR) |
643644
BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ);
645+
646+
mutex_init(&device->subdev_lock);
647+
INIT_LIST_HEAD(&device->subdev_list_head);
648+
INIT_LIST_HEAD(&device->subdev_list);
649+
644650
return device;
645651
}
646652
EXPORT_SYMBOL(_ib_alloc_device);
@@ -1461,6 +1467,18 @@ EXPORT_SYMBOL(ib_register_device);
14611467
/* Callers must hold a get on the device. */
14621468
static void __ib_unregister_device(struct ib_device *ib_dev)
14631469
{
1470+
struct ib_device *sub, *tmp;
1471+
1472+
mutex_lock(&ib_dev->subdev_lock);
1473+
list_for_each_entry_safe_reverse(sub, tmp,
1474+
&ib_dev->subdev_list_head,
1475+
subdev_list) {
1476+
list_del(&sub->subdev_list);
1477+
ib_dev->ops.del_sub_dev(sub);
1478+
ib_device_put(ib_dev);
1479+
}
1480+
mutex_unlock(&ib_dev->subdev_lock);
1481+
14641482
/*
14651483
* We have a registration lock so that all the calls to unregister are
14661484
* fully fenced, once any unregister returns the device is truely
@@ -2146,6 +2164,9 @@ int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
21462164
unsigned long flags;
21472165
int ret;
21482166

2167+
if (!rdma_is_port_valid(ib_dev, port))
2168+
return -EINVAL;
2169+
21492170
/*
21502171
* Drivers wish to call this before ib_register_driver, so we have to
21512172
* setup the port data early.
@@ -2154,9 +2175,6 @@ int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
21542175
if (ret)
21552176
return ret;
21562177

2157-
if (!rdma_is_port_valid(ib_dev, port))
2158-
return -EINVAL;
2159-
21602178
pdata = &ib_dev->port_data[port];
21612179
spin_lock_irqsave(&pdata->netdev_lock, flags);
21622180
old_ndev = rcu_dereference_protected(
@@ -2166,16 +2184,12 @@ int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
21662184
return 0;
21672185
}
21682186

2169-
if (old_ndev)
2170-
netdev_tracker_free(ndev, &pdata->netdev_tracker);
2171-
if (ndev)
2172-
netdev_hold(ndev, &pdata->netdev_tracker, GFP_ATOMIC);
21732187
rcu_assign_pointer(pdata->netdev, ndev);
2188+
netdev_put(old_ndev, &pdata->netdev_tracker);
2189+
netdev_hold(ndev, &pdata->netdev_tracker, GFP_ATOMIC);
21742190
spin_unlock_irqrestore(&pdata->netdev_lock, flags);
21752191

21762192
add_ndev_hash(pdata);
2177-
__dev_put(old_ndev);
2178-
21792193
return 0;
21802194
}
21812195
EXPORT_SYMBOL(ib_device_set_netdev);
@@ -2597,6 +2611,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
25972611
ops->uverbs_no_driver_id_binding;
25982612

25992613
SET_DEVICE_OP(dev_ops, add_gid);
2614+
SET_DEVICE_OP(dev_ops, add_sub_dev);
26002615
SET_DEVICE_OP(dev_ops, advise_mr);
26012616
SET_DEVICE_OP(dev_ops, alloc_dm);
26022617
SET_DEVICE_OP(dev_ops, alloc_hw_device_stats);
@@ -2631,6 +2646,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
26312646
SET_DEVICE_OP(dev_ops, dealloc_ucontext);
26322647
SET_DEVICE_OP(dev_ops, dealloc_xrcd);
26332648
SET_DEVICE_OP(dev_ops, del_gid);
2649+
SET_DEVICE_OP(dev_ops, del_sub_dev);
26342650
SET_DEVICE_OP(dev_ops, dereg_mr);
26352651
SET_DEVICE_OP(dev_ops, destroy_ah);
26362652
SET_DEVICE_OP(dev_ops, destroy_counters);
@@ -2727,6 +2743,55 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
27272743
}
27282744
EXPORT_SYMBOL(ib_set_device_ops);
27292745

2746+
int ib_add_sub_device(struct ib_device *parent,
2747+
enum rdma_nl_dev_type type,
2748+
const char *name)
2749+
{
2750+
struct ib_device *sub;
2751+
int ret = 0;
2752+
2753+
if (!parent->ops.add_sub_dev || !parent->ops.del_sub_dev)
2754+
return -EOPNOTSUPP;
2755+
2756+
if (!ib_device_try_get(parent))
2757+
return -EINVAL;
2758+
2759+
sub = parent->ops.add_sub_dev(parent, type, name);
2760+
if (IS_ERR(sub)) {
2761+
ib_device_put(parent);
2762+
return PTR_ERR(sub);
2763+
}
2764+
2765+
sub->type = type;
2766+
sub->parent = parent;
2767+
2768+
mutex_lock(&parent->subdev_lock);
2769+
list_add_tail(&parent->subdev_list_head, &sub->subdev_list);
2770+
mutex_unlock(&parent->subdev_lock);
2771+
2772+
return ret;
2773+
}
2774+
EXPORT_SYMBOL(ib_add_sub_device);
2775+
2776+
int ib_del_sub_device_and_put(struct ib_device *sub)
2777+
{
2778+
struct ib_device *parent = sub->parent;
2779+
2780+
if (!parent)
2781+
return -EOPNOTSUPP;
2782+
2783+
mutex_lock(&parent->subdev_lock);
2784+
list_del(&sub->subdev_list);
2785+
mutex_unlock(&parent->subdev_lock);
2786+
2787+
ib_device_put(sub);
2788+
parent->ops.del_sub_dev(sub);
2789+
ib_device_put(parent);
2790+
2791+
return 0;
2792+
}
2793+
EXPORT_SYMBOL(ib_del_sub_device_and_put);
2794+
27302795
#ifdef CONFIG_INFINIBAND_VIRT_DMA
27312796
int ib_dma_virt_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents)
27322797
{

drivers/infiniband/core/iwcm.c

Lines changed: 18 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -143,8 +143,8 @@ static struct iwcm_work *get_work(struct iwcm_id_private *cm_id_priv)
143143

144144
if (list_empty(&cm_id_priv->work_free_list))
145145
return NULL;
146-
work = list_entry(cm_id_priv->work_free_list.next, struct iwcm_work,
147-
free_list);
146+
work = list_first_entry(&cm_id_priv->work_free_list, struct iwcm_work,
147+
free_list);
148148
list_del_init(&work->free_list);
149149
return work;
150150
}
@@ -206,17 +206,17 @@ static void free_cm_id(struct iwcm_id_private *cm_id_priv)
206206

207207
/*
208208
* Release a reference on cm_id. If the last reference is being
209-
* released, free the cm_id and return 1.
209+
* released, free the cm_id and return 'true'.
210210
*/
211-
static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv)
211+
static bool iwcm_deref_id(struct iwcm_id_private *cm_id_priv)
212212
{
213213
if (refcount_dec_and_test(&cm_id_priv->refcount)) {
214214
BUG_ON(!list_empty(&cm_id_priv->work_list));
215215
free_cm_id(cm_id_priv);
216-
return 1;
216+
return true;
217217
}
218218

219-
return 0;
219+
return false;
220220
}
221221

222222
static void add_ref(struct iw_cm_id *cm_id)
@@ -368,8 +368,10 @@ EXPORT_SYMBOL(iw_cm_disconnect);
368368
*
369369
* Clean up all resources associated with the connection and release
370370
* the initial reference taken by iw_create_cm_id.
371+
*
372+
* Returns true if and only if the last cm_id_priv reference has been dropped.
371373
*/
372-
static void destroy_cm_id(struct iw_cm_id *cm_id)
374+
static bool destroy_cm_id(struct iw_cm_id *cm_id)
373375
{
374376
struct iwcm_id_private *cm_id_priv;
375377
struct ib_qp *qp;
@@ -439,7 +441,7 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
439441
iwpm_remove_mapping(&cm_id->local_addr, RDMA_NL_IWCM);
440442
}
441443

442-
(void)iwcm_deref_id(cm_id_priv);
444+
return iwcm_deref_id(cm_id_priv);
443445
}
444446

445447
/*
@@ -450,7 +452,8 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
450452
*/
451453
void iw_destroy_cm_id(struct iw_cm_id *cm_id)
452454
{
453-
destroy_cm_id(cm_id);
455+
if (!destroy_cm_id(cm_id))
456+
flush_workqueue(iwcm_wq);
454457
}
455458
EXPORT_SYMBOL(iw_destroy_cm_id);
456459

@@ -1017,30 +1020,25 @@ static void cm_work_handler(struct work_struct *_work)
10171020
struct iw_cm_event levent;
10181021
struct iwcm_id_private *cm_id_priv = work->cm_id;
10191022
unsigned long flags;
1020-
int empty;
10211023
int ret = 0;
10221024

10231025
spin_lock_irqsave(&cm_id_priv->lock, flags);
1024-
empty = list_empty(&cm_id_priv->work_list);
1025-
while (!empty) {
1026-
work = list_entry(cm_id_priv->work_list.next,
1027-
struct iwcm_work, list);
1026+
while (!list_empty(&cm_id_priv->work_list)) {
1027+
work = list_first_entry(&cm_id_priv->work_list,
1028+
struct iwcm_work, list);
10281029
list_del_init(&work->list);
1029-
empty = list_empty(&cm_id_priv->work_list);
10301030
levent = work->event;
10311031
put_work(work);
10321032
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
10331033

10341034
if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) {
10351035
ret = process_event(cm_id_priv, &levent);
10361036
if (ret)
1037-
destroy_cm_id(&cm_id_priv->id);
1037+
WARN_ON_ONCE(destroy_cm_id(&cm_id_priv->id));
10381038
} else
10391039
pr_debug("dropping event %d\n", levent.event);
10401040
if (iwcm_deref_id(cm_id_priv))
10411041
return;
1042-
if (empty)
1043-
return;
10441042
spin_lock_irqsave(&cm_id_priv->lock, flags);
10451043
}
10461044
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
@@ -1093,11 +1091,8 @@ static int cm_event_handler(struct iw_cm_id *cm_id,
10931091
}
10941092

10951093
refcount_inc(&cm_id_priv->refcount);
1096-
if (list_empty(&cm_id_priv->work_list)) {
1097-
list_add_tail(&work->list, &cm_id_priv->work_list);
1098-
queue_work(iwcm_wq, &work->work);
1099-
} else
1100-
list_add_tail(&work->list, &cm_id_priv->work_list);
1094+
list_add_tail(&work->list, &cm_id_priv->work_list);
1095+
queue_work(iwcm_wq, &work->work);
11011096
out:
11021097
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
11031098
return ret;

drivers/infiniband/core/mad.c

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2983,9 +2983,12 @@ static int ib_mad_port_open(struct ib_device *device,
29832983
if (ret)
29842984
goto error6;
29852985
}
2986-
ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
2987-
if (ret)
2988-
goto error7;
2986+
2987+
if (rdma_cap_ib_cm(device, port_num)) {
2988+
ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
2989+
if (ret)
2990+
goto error7;
2991+
}
29892992

29902993
snprintf(name, sizeof(name), "ib_mad%u", port_num);
29912994
port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);

0 commit comments

Comments
 (0)