@@ -116,10 +116,26 @@ static void nvme_queue_scan(struct nvme_ctrl *ctrl)
116
116
/*
117
117
* Only new queue scan work when admin and IO queues are both alive
118
118
*/
119
- if (ctrl -> state == NVME_CTRL_LIVE )
119
+ if (ctrl -> state == NVME_CTRL_LIVE && ctrl -> tagset )
120
120
queue_work (nvme_wq , & ctrl -> scan_work );
121
121
}
122
122
123
+ /*
124
+ * Use this function to proceed with scheduling reset_work for a controller
125
+ * that had previously been set to the resetting state. This is intended for
126
+ * code paths that can't be interrupted by other reset attempts. A hot removal
127
+ * may prevent this from succeeding.
128
+ */
129
+ int nvme_try_sched_reset (struct nvme_ctrl * ctrl )
130
+ {
131
+ if (ctrl -> state != NVME_CTRL_RESETTING )
132
+ return - EBUSY ;
133
+ if (!queue_work (nvme_reset_wq , & ctrl -> reset_work ))
134
+ return - EBUSY ;
135
+ return 0 ;
136
+ }
137
+ EXPORT_SYMBOL_GPL (nvme_try_sched_reset );
138
+
123
139
int nvme_reset_ctrl (struct nvme_ctrl * ctrl )
124
140
{
125
141
if (!nvme_change_ctrl_state (ctrl , NVME_CTRL_RESETTING ))
@@ -137,8 +153,7 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
137
153
ret = nvme_reset_ctrl (ctrl );
138
154
if (!ret ) {
139
155
flush_work (& ctrl -> reset_work );
140
- if (ctrl -> state != NVME_CTRL_LIVE &&
141
- ctrl -> state != NVME_CTRL_ADMIN_ONLY )
156
+ if (ctrl -> state != NVME_CTRL_LIVE )
142
157
ret = - ENETRESET ;
143
158
}
144
159
@@ -315,15 +330,6 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
315
330
316
331
old_state = ctrl -> state ;
317
332
switch (new_state ) {
318
- case NVME_CTRL_ADMIN_ONLY :
319
- switch (old_state ) {
320
- case NVME_CTRL_CONNECTING :
321
- changed = true;
322
- /* FALLTHRU */
323
- default :
324
- break ;
325
- }
326
- break ;
327
333
case NVME_CTRL_LIVE :
328
334
switch (old_state ) {
329
335
case NVME_CTRL_NEW :
@@ -339,7 +345,6 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
339
345
switch (old_state ) {
340
346
case NVME_CTRL_NEW :
341
347
case NVME_CTRL_LIVE :
342
- case NVME_CTRL_ADMIN_ONLY :
343
348
changed = true;
344
349
/* FALLTHRU */
345
350
default :
@@ -359,7 +364,6 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
359
364
case NVME_CTRL_DELETING :
360
365
switch (old_state ) {
361
366
case NVME_CTRL_LIVE :
362
- case NVME_CTRL_ADMIN_ONLY :
363
367
case NVME_CTRL_RESETTING :
364
368
case NVME_CTRL_CONNECTING :
365
369
changed = true;
@@ -381,8 +385,10 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
381
385
break ;
382
386
}
383
387
384
- if (changed )
388
+ if (changed ) {
385
389
ctrl -> state = new_state ;
390
+ wake_up_all (& ctrl -> state_wq );
391
+ }
386
392
387
393
spin_unlock_irqrestore (& ctrl -> lock , flags );
388
394
if (changed && ctrl -> state == NVME_CTRL_LIVE )
@@ -391,6 +397,39 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
391
397
}
392
398
EXPORT_SYMBOL_GPL (nvme_change_ctrl_state );
393
399
400
+ /*
401
+ * Returns true for sink states that can't ever transition back to live.
402
+ */
403
+ static bool nvme_state_terminal (struct nvme_ctrl * ctrl )
404
+ {
405
+ switch (ctrl -> state ) {
406
+ case NVME_CTRL_NEW :
407
+ case NVME_CTRL_LIVE :
408
+ case NVME_CTRL_RESETTING :
409
+ case NVME_CTRL_CONNECTING :
410
+ return false;
411
+ case NVME_CTRL_DELETING :
412
+ case NVME_CTRL_DEAD :
413
+ return true;
414
+ default :
415
+ WARN_ONCE (1 , "Unhandled ctrl state:%d" , ctrl -> state );
416
+ return true;
417
+ }
418
+ }
419
+
420
+ /*
421
+ * Waits for the controller state to be resetting, or returns false if it is
422
+ * not possible to ever transition to that state.
423
+ */
424
+ bool nvme_wait_reset (struct nvme_ctrl * ctrl )
425
+ {
426
+ wait_event (ctrl -> state_wq ,
427
+ nvme_change_ctrl_state (ctrl , NVME_CTRL_RESETTING ) ||
428
+ nvme_state_terminal (ctrl ));
429
+ return ctrl -> state == NVME_CTRL_RESETTING ;
430
+ }
431
+ EXPORT_SYMBOL_GPL (nvme_wait_reset );
432
+
394
433
static void nvme_free_ns_head (struct kref * ref )
395
434
{
396
435
struct nvme_ns_head * head =
@@ -1306,8 +1345,6 @@ static void nvme_update_formats(struct nvme_ctrl *ctrl)
1306
1345
if (ns -> disk && nvme_revalidate_disk (ns -> disk ))
1307
1346
nvme_set_queue_dying (ns );
1308
1347
up_read (& ctrl -> namespaces_rwsem );
1309
-
1310
- nvme_remove_invalid_namespaces (ctrl , NVME_NSID_ALL );
1311
1348
}
1312
1349
1313
1350
static void nvme_passthru_end (struct nvme_ctrl * ctrl , u32 effects )
@@ -1323,6 +1360,7 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
1323
1360
nvme_unfreeze (ctrl );
1324
1361
nvme_mpath_unfreeze (ctrl -> subsys );
1325
1362
mutex_unlock (& ctrl -> subsys -> lock );
1363
+ nvme_remove_invalid_namespaces (ctrl , NVME_NSID_ALL );
1326
1364
mutex_unlock (& ctrl -> scan_lock );
1327
1365
}
1328
1366
if (effects & NVME_CMD_EFFECTS_CCC )
@@ -2874,7 +2912,6 @@ static int nvme_dev_open(struct inode *inode, struct file *file)
2874
2912
2875
2913
switch (ctrl -> state ) {
2876
2914
case NVME_CTRL_LIVE :
2877
- case NVME_CTRL_ADMIN_ONLY :
2878
2915
break ;
2879
2916
default :
2880
2917
return - EWOULDBLOCK ;
@@ -3168,7 +3205,6 @@ static ssize_t nvme_sysfs_show_state(struct device *dev,
3168
3205
static const char * const state_name [] = {
3169
3206
[NVME_CTRL_NEW ] = "new" ,
3170
3207
[NVME_CTRL_LIVE ] = "live" ,
3171
- [NVME_CTRL_ADMIN_ONLY ] = "only-admin" ,
3172
3208
[NVME_CTRL_RESETTING ] = "resetting" ,
3173
3209
[NVME_CTRL_CONNECTING ] = "connecting" ,
3174
3210
[NVME_CTRL_DELETING ] = "deleting" ,
@@ -3679,11 +3715,10 @@ static void nvme_scan_work(struct work_struct *work)
3679
3715
struct nvme_id_ctrl * id ;
3680
3716
unsigned nn ;
3681
3717
3682
- if (ctrl -> state != NVME_CTRL_LIVE )
3718
+ /* No tagset on a live ctrl means IO queues could not created */
3719
+ if (ctrl -> state != NVME_CTRL_LIVE || !ctrl -> tagset )
3683
3720
return ;
3684
3721
3685
- WARN_ON_ONCE (!ctrl -> tagset );
3686
-
3687
3722
if (test_and_clear_bit (NVME_AER_NOTICE_NS_CHANGED , & ctrl -> events )) {
3688
3723
dev_info (ctrl -> device , "rescanning namespaces.\n" );
3689
3724
nvme_clear_changed_ns_log (ctrl );
@@ -3844,13 +3879,13 @@ static void nvme_fw_act_work(struct work_struct *work)
3844
3879
if (time_after (jiffies , fw_act_timeout )) {
3845
3880
dev_warn (ctrl -> device ,
3846
3881
"Fw activation timeout, reset controller\n" );
3847
- nvme_reset_ctrl (ctrl );
3848
- break ;
3882
+ nvme_try_sched_reset (ctrl );
3883
+ return ;
3849
3884
}
3850
3885
msleep (100 );
3851
3886
}
3852
3887
3853
- if (ctrl -> state != NVME_CTRL_LIVE )
3888
+ if (! nvme_change_ctrl_state ( ctrl , NVME_CTRL_LIVE ) )
3854
3889
return ;
3855
3890
3856
3891
nvme_start_queues (ctrl );
@@ -3870,7 +3905,13 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
3870
3905
nvme_queue_scan (ctrl );
3871
3906
break ;
3872
3907
case NVME_AER_NOTICE_FW_ACT_STARTING :
3873
- queue_work (nvme_wq , & ctrl -> fw_act_work );
3908
+ /*
3909
+ * We are (ab)using the RESETTING state to prevent subsequent
3910
+ * recovery actions from interfering with the controller's
3911
+ * firmware activation.
3912
+ */
3913
+ if (nvme_change_ctrl_state (ctrl , NVME_CTRL_RESETTING ))
3914
+ queue_work (nvme_wq , & ctrl -> fw_act_work );
3874
3915
break ;
3875
3916
#ifdef CONFIG_NVME_MULTIPATH
3876
3917
case NVME_AER_NOTICE_ANA :
@@ -3993,6 +4034,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
3993
4034
INIT_WORK (& ctrl -> async_event_work , nvme_async_event_work );
3994
4035
INIT_WORK (& ctrl -> fw_act_work , nvme_fw_act_work );
3995
4036
INIT_WORK (& ctrl -> delete_work , nvme_delete_ctrl_work );
4037
+ init_waitqueue_head (& ctrl -> state_wq );
3996
4038
3997
4039
INIT_DELAYED_WORK (& ctrl -> ka_work , nvme_keep_alive_work );
3998
4040
memset (& ctrl -> ka_cmd , 0 , sizeof (ctrl -> ka_cmd ));
0 commit comments