@@ -1082,7 +1082,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
1082
1082
return error ;
1083
1083
}
1084
1084
1085
- static void __loop_clr_fd (struct loop_device * lo )
1085
+ static void __loop_clr_fd (struct loop_device * lo , bool release )
1086
1086
{
1087
1087
struct file * filp ;
1088
1088
gfp_t gfp = lo -> old_gfp_mask ;
@@ -1144,59 +1144,53 @@ static void __loop_clr_fd(struct loop_device *lo)
1144
1144
/* let user-space know about this change */
1145
1145
kobject_uevent (& disk_to_dev (lo -> lo_disk )-> kobj , KOBJ_CHANGE );
1146
1146
mapping_set_gfp_mask (filp -> f_mapping , gfp );
1147
+ /* This is safe: open() is still holding a reference. */
1148
+ module_put (THIS_MODULE );
1147
1149
blk_mq_unfreeze_queue (lo -> lo_queue );
1148
1150
1149
1151
disk_force_media_change (lo -> lo_disk , DISK_EVENT_MEDIA_CHANGE );
1150
1152
1151
1153
if (lo -> lo_flags & LO_FLAGS_PARTSCAN ) {
1152
1154
int err ;
1153
1155
1154
- mutex_lock (& lo -> lo_disk -> open_mutex );
1156
+ /*
1157
+ * open_mutex has been held already in release path, so don't
1158
+ * acquire it if this function is called in such case.
1159
+ *
1160
+ * If the reread partition isn't from release path, lo_refcnt
1161
+ * must be at least one and it can only become zero when the
1162
+ * current holder is released.
1163
+ */
1164
+ if (!release )
1165
+ mutex_lock (& lo -> lo_disk -> open_mutex );
1155
1166
err = bdev_disk_changed (lo -> lo_disk , false);
1156
- mutex_unlock (& lo -> lo_disk -> open_mutex );
1167
+ if (!release )
1168
+ mutex_unlock (& lo -> lo_disk -> open_mutex );
1157
1169
if (err )
1158
1170
pr_warn ("%s: partition scan of loop%d failed (rc=%d)\n" ,
1159
1171
__func__ , lo -> lo_number , err );
1160
1172
/* Device is gone, no point in returning error */
1161
1173
}
1162
1174
1175
+ /*
1176
+ * lo->lo_state is set to Lo_unbound here after above partscan has
1177
+ * finished. There cannot be anybody else entering __loop_clr_fd() as
1178
+ * Lo_rundown state protects us from all the other places trying to
1179
+ * change the 'lo' device.
1180
+ */
1163
1181
lo -> lo_flags = 0 ;
1164
1182
if (!part_shift )
1165
1183
lo -> lo_disk -> flags |= GENHD_FL_NO_PART ;
1166
-
1167
- fput (filp );
1168
- }
1169
-
1170
- static void loop_rundown_completed (struct loop_device * lo )
1171
- {
1172
1184
mutex_lock (& lo -> lo_mutex );
1173
1185
lo -> lo_state = Lo_unbound ;
1174
1186
mutex_unlock (& lo -> lo_mutex );
1175
- module_put (THIS_MODULE );
1176
- }
1177
-
1178
- static void loop_rundown_workfn (struct work_struct * work )
1179
- {
1180
- struct loop_device * lo = container_of (work , struct loop_device ,
1181
- rundown_work );
1182
- struct block_device * bdev = lo -> lo_device ;
1183
- struct gendisk * disk = lo -> lo_disk ;
1184
-
1185
- __loop_clr_fd (lo );
1186
- kobject_put (& bdev -> bd_device .kobj );
1187
- module_put (disk -> fops -> owner );
1188
- loop_rundown_completed (lo );
1189
- }
1190
1187
1191
- static void loop_schedule_rundown (struct loop_device * lo )
1192
- {
1193
- struct block_device * bdev = lo -> lo_device ;
1194
- struct gendisk * disk = lo -> lo_disk ;
1195
-
1196
- __module_get (disk -> fops -> owner );
1197
- kobject_get (& bdev -> bd_device .kobj );
1198
- INIT_WORK (& lo -> rundown_work , loop_rundown_workfn );
1199
- queue_work (system_long_wq , & lo -> rundown_work );
1188
+ /*
1189
+ * Need not hold lo_mutex to fput backing file. Calling fput holding
1190
+ * lo_mutex triggers a circular lock dependency possibility warning as
1191
+ * fput can take open_mutex which is usually taken before lo_mutex.
1192
+ */
1193
+ fput (filp );
1200
1194
}
1201
1195
1202
1196
static int loop_clr_fd (struct loop_device * lo )
@@ -1228,8 +1222,7 @@ static int loop_clr_fd(struct loop_device *lo)
1228
1222
lo -> lo_state = Lo_rundown ;
1229
1223
mutex_unlock (& lo -> lo_mutex );
1230
1224
1231
- __loop_clr_fd (lo );
1232
- loop_rundown_completed (lo );
1225
+ __loop_clr_fd (lo , false);
1233
1226
return 0 ;
1234
1227
}
1235
1228
@@ -1754,7 +1747,7 @@ static void lo_release(struct gendisk *disk, fmode_t mode)
1754
1747
* In autoclear mode, stop the loop thread
1755
1748
* and remove configuration after last close.
1756
1749
*/
1757
- loop_schedule_rundown (lo );
1750
+ __loop_clr_fd (lo , true );
1758
1751
return ;
1759
1752
} else if (lo -> lo_state == Lo_bound ) {
1760
1753
/*
0 commit comments