Skip to content

Commit cf26a23

Browse files
committed
Merge tag 'block-5.17-2022-02-11' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: - NVMe pull request - nvme-tcp: fix bogus request completion when failing to send AER (Sagi Grimberg) - add the missing nvme_complete_req tracepoint for batched completion (Bean Huo) - Revert of the loop async autoclear issue that has continued to plague us this release. A few patchsets exists to improve this, but they are too invasive to be considered at this point (Tetsuo) * tag 'block-5.17-2022-02-11' of git://git.kernel.dk/linux-block: loop: revert "make autoclear operation asynchronous" nvme-tcp: fix bogus request completion when failing to send AER nvme: add nvme_complete_req tracepoint for batched completion
2 parents 199b7f8 + bf23747 commit cf26a23

File tree

4 files changed

+39
-38
lines changed

4 files changed

+39
-38
lines changed

drivers/block/loop.c

Lines changed: 29 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -1082,7 +1082,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
10821082
return error;
10831083
}
10841084

1085-
static void __loop_clr_fd(struct loop_device *lo)
1085+
static void __loop_clr_fd(struct loop_device *lo, bool release)
10861086
{
10871087
struct file *filp;
10881088
gfp_t gfp = lo->old_gfp_mask;
@@ -1144,59 +1144,53 @@ static void __loop_clr_fd(struct loop_device *lo)
11441144
/* let user-space know about this change */
11451145
kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
11461146
mapping_set_gfp_mask(filp->f_mapping, gfp);
1147+
/* This is safe: open() is still holding a reference. */
1148+
module_put(THIS_MODULE);
11471149
blk_mq_unfreeze_queue(lo->lo_queue);
11481150

11491151
disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
11501152

11511153
if (lo->lo_flags & LO_FLAGS_PARTSCAN) {
11521154
int err;
11531155

1154-
mutex_lock(&lo->lo_disk->open_mutex);
1156+
/*
1157+
* open_mutex has been held already in release path, so don't
1158+
* acquire it if this function is called in such case.
1159+
*
1160+
* If the reread partition isn't from release path, lo_refcnt
1161+
* must be at least one and it can only become zero when the
1162+
* current holder is released.
1163+
*/
1164+
if (!release)
1165+
mutex_lock(&lo->lo_disk->open_mutex);
11551166
err = bdev_disk_changed(lo->lo_disk, false);
1156-
mutex_unlock(&lo->lo_disk->open_mutex);
1167+
if (!release)
1168+
mutex_unlock(&lo->lo_disk->open_mutex);
11571169
if (err)
11581170
pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
11591171
__func__, lo->lo_number, err);
11601172
/* Device is gone, no point in returning error */
11611173
}
11621174

1175+
/*
1176+
* lo->lo_state is set to Lo_unbound here after above partscan has
1177+
* finished. There cannot be anybody else entering __loop_clr_fd() as
1178+
* Lo_rundown state protects us from all the other places trying to
1179+
* change the 'lo' device.
1180+
*/
11631181
lo->lo_flags = 0;
11641182
if (!part_shift)
11651183
lo->lo_disk->flags |= GENHD_FL_NO_PART;
1166-
1167-
fput(filp);
1168-
}
1169-
1170-
static void loop_rundown_completed(struct loop_device *lo)
1171-
{
11721184
mutex_lock(&lo->lo_mutex);
11731185
lo->lo_state = Lo_unbound;
11741186
mutex_unlock(&lo->lo_mutex);
1175-
module_put(THIS_MODULE);
1176-
}
1177-
1178-
static void loop_rundown_workfn(struct work_struct *work)
1179-
{
1180-
struct loop_device *lo = container_of(work, struct loop_device,
1181-
rundown_work);
1182-
struct block_device *bdev = lo->lo_device;
1183-
struct gendisk *disk = lo->lo_disk;
1184-
1185-
__loop_clr_fd(lo);
1186-
kobject_put(&bdev->bd_device.kobj);
1187-
module_put(disk->fops->owner);
1188-
loop_rundown_completed(lo);
1189-
}
11901187

1191-
static void loop_schedule_rundown(struct loop_device *lo)
1192-
{
1193-
struct block_device *bdev = lo->lo_device;
1194-
struct gendisk *disk = lo->lo_disk;
1195-
1196-
__module_get(disk->fops->owner);
1197-
kobject_get(&bdev->bd_device.kobj);
1198-
INIT_WORK(&lo->rundown_work, loop_rundown_workfn);
1199-
queue_work(system_long_wq, &lo->rundown_work);
1188+
/*
1189+
* Need not hold lo_mutex to fput backing file. Calling fput holding
1190+
* lo_mutex triggers a circular lock dependency possibility warning as
1191+
* fput can take open_mutex which is usually taken before lo_mutex.
1192+
*/
1193+
fput(filp);
12001194
}
12011195

12021196
static int loop_clr_fd(struct loop_device *lo)
@@ -1228,8 +1222,7 @@ static int loop_clr_fd(struct loop_device *lo)
12281222
lo->lo_state = Lo_rundown;
12291223
mutex_unlock(&lo->lo_mutex);
12301224

1231-
__loop_clr_fd(lo);
1232-
loop_rundown_completed(lo);
1225+
__loop_clr_fd(lo, false);
12331226
return 0;
12341227
}
12351228

@@ -1754,7 +1747,7 @@ static void lo_release(struct gendisk *disk, fmode_t mode)
17541747
* In autoclear mode, stop the loop thread
17551748
* and remove configuration after last close.
17561749
*/
1757-
loop_schedule_rundown(lo);
1750+
__loop_clr_fd(lo, true);
17581751
return;
17591752
} else if (lo->lo_state == Lo_bound) {
17601753
/*

drivers/block/loop.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,6 @@ struct loop_device {
5656
struct gendisk *lo_disk;
5757
struct mutex lo_mutex;
5858
bool idr_visible;
59-
struct work_struct rundown_work;
6059
};
6160

6261
struct loop_cmd {

drivers/nvme/host/core.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -368,6 +368,7 @@ EXPORT_SYMBOL_GPL(nvme_complete_rq);
368368

369369
void nvme_complete_batch_req(struct request *req)
370370
{
371+
trace_nvme_complete_rq(req);
371372
nvme_cleanup_cmd(req);
372373
nvme_end_req_zoned(req);
373374
}

drivers/nvme/host/tcp.c

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -913,7 +913,15 @@ static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
913913

914914
static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
915915
{
916-
nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_HOST_PATH_ERROR);
916+
if (nvme_tcp_async_req(req)) {
917+
union nvme_result res = {};
918+
919+
nvme_complete_async_event(&req->queue->ctrl->ctrl,
920+
cpu_to_le16(NVME_SC_HOST_PATH_ERROR), &res);
921+
} else {
922+
nvme_tcp_end_request(blk_mq_rq_from_pdu(req),
923+
NVME_SC_HOST_PATH_ERROR);
924+
}
917925
}
918926

919927
static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)

0 commit comments

Comments
 (0)