Skip to content

Commit f427f4d

Browse files
Divya Indijgunthorpe
authored andcommitted
IB/sa: Resolv use-after-free in ib_nl_make_request()
There is a race condition where ib_nl_make_request() inserts the request data into the linked list but the timer in ib_nl_request_timeout() can see it and destroy it before ib_nl_send_msg() is done touching it. This could happen, for instance, if there is a long delay allocating memory during nlmsg_new() This causes a use-after-free in the send_mad() thread: [<ffffffffa02f43cb>] ? ib_pack+0x17b/0x240 [ib_core] [ <ffffffffa032aef1>] ib_sa_path_rec_get+0x181/0x200 [ib_sa] [<ffffffffa0379db0>] rdma_resolve_route+0x3c0/0x8d0 [rdma_cm] [<ffffffffa0374450>] ? cma_bind_port+0xa0/0xa0 [rdma_cm] [<ffffffffa040f850>] ? rds_rdma_cm_event_handler_cmn+0x850/0x850 [rds_rdma] [<ffffffffa040f22c>] rds_rdma_cm_event_handler_cmn+0x22c/0x850 [rds_rdma] [<ffffffffa040f860>] rds_rdma_cm_event_handler+0x10/0x20 [rds_rdma] [<ffffffffa037778e>] addr_handler+0x9e/0x140 [rdma_cm] [<ffffffffa026cdb4>] process_req+0x134/0x190 [ib_addr] [<ffffffff810a02f9>] process_one_work+0x169/0x4a0 [<ffffffff810a0b2b>] worker_thread+0x5b/0x560 [<ffffffff810a0ad0>] ? flush_delayed_work+0x50/0x50 [<ffffffff810a68fb>] kthread+0xcb/0xf0 [<ffffffff816ec49a>] ? __schedule+0x24a/0x810 [<ffffffff816ec49a>] ? __schedule+0x24a/0x810 [<ffffffff810a6830>] ? kthread_create_on_node+0x180/0x180 [<ffffffff816f25a7>] ret_from_fork+0x47/0x90 [<ffffffff810a6830>] ? kthread_create_on_node+0x180/0x180 The ownership rule is once the request is on the list, ownership transfers to the list and the local thread can't touch it any more, just like for the normal MAD case in send_mad(). Thus, instead of adding before send and then trying to delete after on errors, move the entire thing under the spinlock so that the send and update of the lists are atomic to the conurrent threads. Lightly reoganize things so spinlock safe memory allocations are done in the final NL send path and the rest of the setup work is done before and outside the lock. Fixes: 3ebd2fd ("IB/sa: Put netlink request into the request list before sending") Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Divya Indi <[email protected]> Signed-off-by: Jason Gunthorpe <[email protected]>
1 parent 2315ec1 commit f427f4d

File tree

1 file changed

+17
-21
lines changed

1 file changed

+17
-21
lines changed

drivers/infiniband/core/sa_query.c

Lines changed: 17 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -829,13 +829,20 @@ static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask)
829829
return len;
830830
}
831831

832-
static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
832+
static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
833833
{
834834
struct sk_buff *skb = NULL;
835835
struct nlmsghdr *nlh;
836836
void *data;
837837
struct ib_sa_mad *mad;
838838
int len;
839+
unsigned long flags;
840+
unsigned long delay;
841+
gfp_t gfp_flag;
842+
int ret;
843+
844+
INIT_LIST_HEAD(&query->list);
845+
query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
839846

840847
mad = query->mad_buf->mad;
841848
len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask);
@@ -860,36 +867,25 @@ static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
860867
/* Repair the nlmsg header length */
861868
nlmsg_end(skb, nlh);
862869

863-
return rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_mask);
864-
}
870+
gfp_flag = ((gfp_mask & GFP_ATOMIC) == GFP_ATOMIC) ? GFP_ATOMIC :
871+
GFP_NOWAIT;
865872

866-
static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
867-
{
868-
unsigned long flags;
869-
unsigned long delay;
870-
int ret;
873+
spin_lock_irqsave(&ib_nl_request_lock, flags);
874+
ret = rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_flag);
871875

872-
INIT_LIST_HEAD(&query->list);
873-
query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
876+
if (ret)
877+
goto out;
874878

875-
/* Put the request on the list first.*/
876-
spin_lock_irqsave(&ib_nl_request_lock, flags);
879+
/* Put the request on the list.*/
877880
delay = msecs_to_jiffies(sa_local_svc_timeout_ms);
878881
query->timeout = delay + jiffies;
879882
list_add_tail(&query->list, &ib_nl_request_list);
880883
/* Start the timeout if this is the only request */
881884
if (ib_nl_request_list.next == &query->list)
882885
queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
883-
spin_unlock_irqrestore(&ib_nl_request_lock, flags);
884886

885-
ret = ib_nl_send_msg(query, gfp_mask);
886-
if (ret) {
887-
ret = -EIO;
888-
/* Remove the request */
889-
spin_lock_irqsave(&ib_nl_request_lock, flags);
890-
list_del(&query->list);
891-
spin_unlock_irqrestore(&ib_nl_request_lock, flags);
892-
}
887+
out:
888+
spin_unlock_irqrestore(&ib_nl_request_lock, flags);
893889

894890
return ret;
895891
}

0 commit comments

Comments
 (0)