Skip to content

Commit 7e7cd79

Browse files
scsi: iscsi: Fix deadlock on recovery path during GFP_IO reclaim
iSCSI suffers from a deadlock in case a management command submitted via the netlink socket sleeps on an allocation while holding the rx_queue_mutex if that allocation causes a memory reclaim that writebacks to a failed iSCSI device. The recovery procedure can never make progress to recover the failed disk or abort outstanding IO operations to complete the reclaim (since rx_queue_mutex is locked), thus locking the system. Nevertheless, just marking all allocations under rx_queue_mutex as GFP_NOIO (or locking the userspace process with something like PF_MEMALLOC_NOIO) is not enough, since the iSCSI command code relies on other subsystems that try to grab locked mutexes, whose threads are GFP_IO, leading to the same deadlock. One instance where this situation can be observed is in the backtraces below, stitched from multiple bugs reports, involving the kobj uevent sent when a session is created. The root of the problem is not the fact that iSCSI does GFP_IO allocations, that is acceptable. The actual problem is that rx_queue_mutex has a very large granularity, covering every unrelated netlink command execution at the same time as the error recovery path. The proposed fix leverages the recently added mechanism to stop failed connections from the kernel, by enabling it to execute even though a management command from the netlink socket is being run (rx_queue_mutex is held), provided that the command is known to be safe. It splits the rx_queue_mutex in two mutexes, one protecting from concurrent command execution from the netlink socket, and one protecting stop_conn from racing with other connection management operations that might conflict with it. It is not very pretty, but it is the simplest way to resolve the deadlock. I considered making it a lock per connection, but some external mutex would still be needed to deal with iscsi_if_destroy_conn. The patch was tested by forcing a memory shrinker (unrelated, but used bufio/dm-verity) to reclaim iSCSI pages every time ISCSI_UEVENT_CREATE_SESSION happens, which is reasonable to simulate reclaims that might happen with GFP_KERNEL on that path. Then, a faulty hung target causes a connection to fail during intensive IO, at the same time a new session is added by iscsid. The following stacktraces are stiches from several bug reports, showing a case where the deadlock can happen. iSCSI-write holding: rx_queue_mutex waiting: uevent_sock_mutex kobject_uevent_env+0x1bd/0x419 kobject_uevent+0xb/0xd device_add+0x48a/0x678 scsi_add_host_with_dma+0xc5/0x22d iscsi_host_add+0x53/0x55 iscsi_sw_tcp_session_create+0xa6/0x129 iscsi_if_rx+0x100/0x1247 netlink_unicast+0x213/0x4f0 netlink_sendmsg+0x230/0x3c0 iscsi_fail iscsi_conn_failure waiting: rx_queue_mutex schedule_preempt_disabled+0x325/0x734 __mutex_lock_slowpath+0x18b/0x230 mutex_lock+0x22/0x40 iscsi_conn_failure+0x42/0x149 worker_thread+0x24a/0xbc0 EventManager_ holding: uevent_sock_mutex waiting: dm_bufio_client->lock dm_bufio_lock+0xe/0x10 shrink+0x34/0xf7 shrink_slab+0x177/0x5d0 do_try_to_free_pages+0x129/0x470 try_to_free_mem_cgroup_pages+0x14f/0x210 memcg_kmem_newpage_charge+0xa6d/0x13b0 __alloc_pages_nodemask+0x4a3/0x1a70 fallback_alloc+0x1b2/0x36c __kmalloc_node_track_caller+0xb9/0x10d0 __alloc_skb+0x83/0x2f0 kobject_uevent_env+0x26b/0x419 dm_kobject_uevent+0x70/0x79 dev_suspend+0x1a9/0x1e7 ctl_ioctl+0x3e9/0x411 dm_ctl_ioctl+0x13/0x17 do_vfs_ioctl+0xb3/0x460 SyS_ioctl+0x5e/0x90 MemcgReclaimerD" holding: dm_bufio_client->lock waiting: stuck io to finish (needs iscsi_fail thread to progress) schedule at ffffffffbd603618 io_schedule at ffffffffbd603ba4 do_io_schedule at ffffffffbdaf0d94 __wait_on_bit at ffffffffbd6008a6 out_of_line_wait_on_bit at ffffffffbd600960 wait_on_bit.constprop.10 at ffffffffbdaf0f17 __make_buffer_clean at ffffffffbdaf18ba __cleanup_old_buffer at ffffffffbdaf192f shrink at ffffffffbdaf19fd do_shrink_slab at ffffffffbd6ec000 shrink_slab at ffffffffbd6ec24a do_try_to_free_pages at ffffffffbd6eda09 try_to_free_mem_cgroup_pages at ffffffffbd6ede7e mem_cgroup_resize_limit at ffffffffbd7024c0 mem_cgroup_write at ffffffffbd703149 cgroup_file_write at ffffffffbd6d9c6e sys_write at ffffffffbd6662ea system_call_fastpath at ffffffffbdbc34a2 Link: https://lore.kernel.org/r/[email protected] Reported-by: Khazhismel Kumykov <[email protected]> Reviewed-by: Lee Duncan <[email protected]> Signed-off-by: Gabriel Krisman Bertazi <[email protected]> Signed-off-by: Martin K. Petersen <[email protected]>
1 parent 51dd905 commit 7e7cd79

File tree

1 file changed

+47
-17
lines changed

1 file changed

+47
-17
lines changed

drivers/scsi/scsi_transport_iscsi.c

Lines changed: 47 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1616,6 +1616,12 @@ static DECLARE_TRANSPORT_CLASS(iscsi_connection_class,
16161616
static struct sock *nls;
16171617
static DEFINE_MUTEX(rx_queue_mutex);
16181618

1619+
/*
1620+
* conn_mutex protects the {start,bind,stop,destroy}_conn from racing
1621+
* against the kernel stop_connection recovery mechanism
1622+
*/
1623+
static DEFINE_MUTEX(conn_mutex);
1624+
16191625
static LIST_HEAD(sesslist);
16201626
static LIST_HEAD(sessdestroylist);
16211627
static DEFINE_SPINLOCK(sesslock);
@@ -2445,6 +2451,32 @@ int iscsi_offload_mesg(struct Scsi_Host *shost,
24452451
}
24462452
EXPORT_SYMBOL_GPL(iscsi_offload_mesg);
24472453

2454+
/*
2455+
* This can be called without the rx_queue_mutex, if invoked by the kernel
2456+
* stop work. But, in that case, it is guaranteed not to race with
2457+
* iscsi_destroy by conn_mutex.
2458+
*/
2459+
static void iscsi_if_stop_conn(struct iscsi_cls_conn *conn, int flag)
2460+
{
2461+
/*
2462+
* It is important that this path doesn't rely on
2463+
* rx_queue_mutex, otherwise, a thread doing allocation on a
2464+
* start_session/start_connection could sleep waiting on a
2465+
* writeback to a failed iscsi device, that cannot be recovered
2466+
* because the lock is held. If we don't hold it here, the
2467+
* kernel stop_conn_work_fn has a chance to stop the broken
2468+
* session and resolve the allocation.
2469+
*
2470+
* Still, the user invoked .stop_conn() needs to be serialized
2471+
* with stop_conn_work_fn by a private mutex. Not pretty, but
2472+
* it works.
2473+
*/
2474+
mutex_lock(&conn_mutex);
2475+
conn->transport->stop_conn(conn, flag);
2476+
mutex_unlock(&conn_mutex);
2477+
2478+
}
2479+
24482480
static void stop_conn_work_fn(struct work_struct *work)
24492481
{
24502482
struct iscsi_cls_conn *conn, *tmp;
@@ -2463,30 +2495,17 @@ static void stop_conn_work_fn(struct work_struct *work)
24632495
uint32_t sid = iscsi_conn_get_sid(conn);
24642496
struct iscsi_cls_session *session;
24652497

2466-
mutex_lock(&rx_queue_mutex);
2467-
24682498
session = iscsi_session_lookup(sid);
24692499
if (session) {
24702500
if (system_state != SYSTEM_RUNNING) {
24712501
session->recovery_tmo = 0;
2472-
conn->transport->stop_conn(conn,
2473-
STOP_CONN_TERM);
2502+
iscsi_if_stop_conn(conn, STOP_CONN_TERM);
24742503
} else {
2475-
conn->transport->stop_conn(conn,
2476-
STOP_CONN_RECOVER);
2504+
iscsi_if_stop_conn(conn, STOP_CONN_RECOVER);
24772505
}
24782506
}
24792507

24802508
list_del_init(&conn->conn_list_err);
2481-
2482-
mutex_unlock(&rx_queue_mutex);
2483-
2484-
/* we don't want to hold rx_queue_mutex for too long,
2485-
* for instance if many conns failed at the same time,
2486-
* since this stall other iscsi maintenance operations.
2487-
* Give other users a chance to proceed.
2488-
*/
2489-
cond_resched();
24902509
}
24912510
}
24922511

@@ -2846,8 +2865,11 @@ iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev
28462865
spin_unlock_irqrestore(&connlock, flags);
28472866

28482867
ISCSI_DBG_TRANS_CONN(conn, "Destroying transport conn\n");
2868+
2869+
mutex_lock(&conn_mutex);
28492870
if (transport->destroy_conn)
28502871
transport->destroy_conn(conn);
2872+
mutex_unlock(&conn_mutex);
28512873

28522874
return 0;
28532875
}
@@ -3689,9 +3711,12 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
36893711
break;
36903712
}
36913713

3714+
mutex_lock(&conn_mutex);
36923715
ev->r.retcode = transport->bind_conn(session, conn,
36933716
ev->u.b_conn.transport_eph,
36943717
ev->u.b_conn.is_leading);
3718+
mutex_unlock(&conn_mutex);
3719+
36953720
if (ev->r.retcode || !transport->ep_connect)
36963721
break;
36973722

@@ -3713,27 +3738,32 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
37133738
case ISCSI_UEVENT_START_CONN:
37143739
conn = iscsi_conn_lookup(ev->u.start_conn.sid, ev->u.start_conn.cid);
37153740
if (conn) {
3741+
mutex_lock(&conn_mutex);
37163742
ev->r.retcode = transport->start_conn(conn);
37173743
if (!ev->r.retcode)
37183744
conn->state = ISCSI_CONN_UP;
3745+
mutex_unlock(&conn_mutex);
37193746
}
37203747
else
37213748
err = -EINVAL;
37223749
break;
37233750
case ISCSI_UEVENT_STOP_CONN:
37243751
conn = iscsi_conn_lookup(ev->u.stop_conn.sid, ev->u.stop_conn.cid);
37253752
if (conn)
3726-
transport->stop_conn(conn, ev->u.stop_conn.flag);
3753+
iscsi_if_stop_conn(conn, ev->u.stop_conn.flag);
37273754
else
37283755
err = -EINVAL;
37293756
break;
37303757
case ISCSI_UEVENT_SEND_PDU:
37313758
conn = iscsi_conn_lookup(ev->u.send_pdu.sid, ev->u.send_pdu.cid);
3732-
if (conn)
3759+
if (conn) {
3760+
mutex_lock(&conn_mutex);
37333761
ev->r.retcode = transport->send_pdu(conn,
37343762
(struct iscsi_hdr*)((char*)ev + sizeof(*ev)),
37353763
(char*)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size,
37363764
ev->u.send_pdu.data_size);
3765+
mutex_unlock(&conn_mutex);
3766+
}
37373767
else
37383768
err = -EINVAL;
37393769
break;

0 commit comments

Comments
 (0)