Skip to content

Commit a926a90

Browse files
Bob Pearsonjgunthorpe
authored andcommitted
RDMA/rxe: Do not call dev_mc_add/del() under a spinlock
These routines were not intended to be called under a spinlock and will throw debugging warnings: raw_local_irq_restore() called with IRQs enabled WARNING: CPU: 13 PID: 3107 at kernel/locking/irqflag-debug.c:10 warn_bogus_irq_restore+0x2f/0x50 CPU: 13 PID: 3107 Comm: python3 Tainted: G E 5.18.0-rc1+ rib#7 Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006 RIP: 0010:warn_bogus_irq_restore+0x2f/0x50 Call Trace: <TASK> _raw_spin_unlock_irqrestore+0x75/0x80 rxe_attach_mcast+0x304/0x480 [rdma_rxe] ib_attach_mcast+0x88/0xa0 [ib_core] ib_uverbs_attach_mcast+0x186/0x1e0 [ib_uverbs] ib_uverbs_handler_UVERBS_METHOD_INVOKE_WRITE+0xcd/0x140 [ib_uverbs] ib_uverbs_cmd_verbs+0xdb0/0xea0 [ib_uverbs] ib_uverbs_ioctl+0xd2/0x160 [ib_uverbs] do_syscall_64+0x5c/0x80 entry_SYSCALL_64_after_hwframe+0x44/0xae Move them out of the spinlock, it is OK if there is some races setting up the MC reception at the ethernet layer with rbtree lookups. Fixes: 6090a0c ("RDMA/rxe: Cleanup rxe_mcast.c") Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Bob Pearson <[email protected]> Signed-off-by: Jason Gunthorpe <[email protected]>
1 parent ef91271 commit a926a90

File tree

1 file changed

+23
-28
lines changed

1 file changed

+23
-28
lines changed

drivers/infiniband/sw/rxe/rxe_mcast.c

Lines changed: 23 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -38,13 +38,13 @@ static int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
3838
}
3939

4040
/**
41-
* rxe_mcast_delete - delete multicast address from rxe device
41+
* rxe_mcast_del - delete multicast address from rxe device
4242
* @rxe: rxe device object
4343
* @mgid: multicast address as a gid
4444
*
4545
* Returns 0 on success else an error
4646
*/
47-
static int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid)
47+
static int rxe_mcast_del(struct rxe_dev *rxe, union ib_gid *mgid)
4848
{
4949
unsigned char ll_addr[ETH_ALEN];
5050

@@ -159,17 +159,10 @@ struct rxe_mcg *rxe_lookup_mcg(struct rxe_dev *rxe, union ib_gid *mgid)
159159
* @mcg: new mcg object
160160
*
161161
* Context: caller should hold rxe->mcg lock
162-
* Returns: 0 on success else an error
163162
*/
164-
static int __rxe_init_mcg(struct rxe_dev *rxe, union ib_gid *mgid,
165-
struct rxe_mcg *mcg)
163+
static void __rxe_init_mcg(struct rxe_dev *rxe, union ib_gid *mgid,
164+
struct rxe_mcg *mcg)
166165
{
167-
int err;
168-
169-
err = rxe_mcast_add(rxe, mgid);
170-
if (unlikely(err))
171-
return err;
172-
173166
kref_init(&mcg->ref_cnt);
174167
memcpy(&mcg->mgid, mgid, sizeof(mcg->mgid));
175168
INIT_LIST_HEAD(&mcg->qp_list);
@@ -184,8 +177,6 @@ static int __rxe_init_mcg(struct rxe_dev *rxe, union ib_gid *mgid,
184177
*/
185178
kref_get(&mcg->ref_cnt);
186179
__rxe_insert_mcg(mcg);
187-
188-
return 0;
189180
}
190181

191182
/**
@@ -209,6 +200,12 @@ static struct rxe_mcg *rxe_get_mcg(struct rxe_dev *rxe, union ib_gid *mgid)
209200
if (mcg)
210201
return mcg;
211202

203+
/* check to see if we have reached limit */
204+
if (atomic_inc_return(&rxe->mcg_num) > rxe->attr.max_mcast_grp) {
205+
err = -ENOMEM;
206+
goto err_dec;
207+
}
208+
212209
/* speculative alloc of new mcg */
213210
mcg = kzalloc(sizeof(*mcg), GFP_KERNEL);
214211
if (!mcg)
@@ -218,27 +215,23 @@ static struct rxe_mcg *rxe_get_mcg(struct rxe_dev *rxe, union ib_gid *mgid)
218215
/* re-check to see if someone else just added it */
219216
tmp = __rxe_lookup_mcg(rxe, mgid);
220217
if (tmp) {
218+
spin_unlock_irqrestore(&rxe->mcg_lock, flags);
219+
atomic_dec(&rxe->mcg_num);
221220
kfree(mcg);
222-
mcg = tmp;
223-
goto out;
224-
}
225-
226-
if (atomic_inc_return(&rxe->mcg_num) > rxe->attr.max_mcast_grp) {
227-
err = -ENOMEM;
228-
goto err_dec;
221+
return tmp;
229222
}
230223

231-
err = __rxe_init_mcg(rxe, mgid, mcg);
232-
if (err)
233-
goto err_dec;
234-
out:
224+
__rxe_init_mcg(rxe, mgid, mcg);
235225
spin_unlock_irqrestore(&rxe->mcg_lock, flags);
236-
return mcg;
237226

227+
/* add mcast address outside of lock */
228+
err = rxe_mcast_add(rxe, mgid);
229+
if (!err)
230+
return mcg;
231+
232+
kfree(mcg);
238233
err_dec:
239234
atomic_dec(&rxe->mcg_num);
240-
spin_unlock_irqrestore(&rxe->mcg_lock, flags);
241-
kfree(mcg);
242235
return ERR_PTR(err);
243236
}
244237

@@ -268,7 +261,6 @@ static void __rxe_destroy_mcg(struct rxe_mcg *mcg)
268261
__rxe_remove_mcg(mcg);
269262
kref_put(&mcg->ref_cnt, rxe_cleanup_mcg);
270263

271-
rxe_mcast_delete(mcg->rxe, &mcg->mgid);
272264
atomic_dec(&rxe->mcg_num);
273265
}
274266

@@ -282,6 +274,9 @@ static void rxe_destroy_mcg(struct rxe_mcg *mcg)
282274
{
283275
unsigned long flags;
284276

277+
/* delete mcast address outside of lock */
278+
rxe_mcast_del(mcg->rxe, &mcg->mgid);
279+
285280
spin_lock_irqsave(&mcg->rxe->mcg_lock, flags);
286281
__rxe_destroy_mcg(mcg);
287282
spin_unlock_irqrestore(&mcg->rxe->mcg_lock, flags);

0 commit comments

Comments
 (0)