@@ -446,13 +446,6 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
446
446
return - ENOMEM ;
447
447
448
448
fc_host -> dev_loss_tmo = fc_dev_loss_tmo ;
449
- fc_host -> devloss_work_q = alloc_workqueue ("fc_dl_%d" , 0 , 0 ,
450
- shost -> host_no );
451
- if (!fc_host -> devloss_work_q ) {
452
- destroy_workqueue (fc_host -> work_q );
453
- fc_host -> work_q = NULL ;
454
- return - ENOMEM ;
455
- }
456
449
457
450
fc_bsg_hostadd (shost , fc_host );
458
451
/* ignore any bsg add error - we just can't do sgio */
@@ -2821,10 +2814,10 @@ fc_flush_work(struct Scsi_Host *shost)
2821
2814
* 1 on success / 0 already queued / < 0 for error
2822
2815
*/
2823
2816
static int
2824
- fc_queue_devloss_work (struct Scsi_Host * shost , struct delayed_work * work ,
2825
- unsigned long delay )
2817
+ fc_queue_devloss_work (struct Scsi_Host * shost , struct fc_rport * rport ,
2818
+ struct delayed_work * work , unsigned long delay )
2826
2819
{
2827
- if (unlikely (!fc_host_devloss_work_q ( shost ) )) {
2820
+ if (unlikely (!rport -> devloss_work_q )) {
2828
2821
printk (KERN_ERR
2829
2822
"ERROR: FC host '%s' attempted to queue work, "
2830
2823
"when no workqueue created.\n" , shost -> hostt -> name );
@@ -2833,25 +2826,25 @@ fc_queue_devloss_work(struct Scsi_Host *shost, struct delayed_work *work,
2833
2826
return - EINVAL ;
2834
2827
}
2835
2828
2836
- return queue_delayed_work (fc_host_devloss_work_q ( shost ) , work , delay );
2829
+ return queue_delayed_work (rport -> devloss_work_q , work , delay );
2837
2830
}
2838
2831
2839
2832
/**
2840
2833
* fc_flush_devloss - Flush a fc_host's devloss workqueue.
2841
2834
* @shost: Pointer to Scsi_Host bound to fc_host.
2842
2835
*/
2843
2836
static void
2844
- fc_flush_devloss (struct Scsi_Host * shost )
2837
+ fc_flush_devloss (struct Scsi_Host * shost , struct fc_rport * rport )
2845
2838
{
2846
- if (! fc_host_devloss_work_q ( shost )) {
2839
+ if (unlikely (! rport -> devloss_work_q )) {
2847
2840
printk (KERN_ERR
2848
2841
"ERROR: FC host '%s' attempted to flush work, "
2849
2842
"when no workqueue created.\n" , shost -> hostt -> name );
2850
2843
dump_stack ();
2851
2844
return ;
2852
2845
}
2853
2846
2854
- flush_workqueue (fc_host_devloss_work_q ( shost ) );
2847
+ flush_workqueue (rport -> devloss_work_q );
2855
2848
}
2856
2849
2857
2850
@@ -2913,13 +2906,6 @@ fc_remove_host(struct Scsi_Host *shost)
2913
2906
fc_host -> work_q = NULL ;
2914
2907
destroy_workqueue (work_q );
2915
2908
}
2916
-
2917
- /* flush all devloss work items, then kill it */
2918
- if (fc_host -> devloss_work_q ) {
2919
- work_q = fc_host -> devloss_work_q ;
2920
- fc_host -> devloss_work_q = NULL ;
2921
- destroy_workqueue (work_q );
2922
- }
2923
2909
}
2924
2910
EXPORT_SYMBOL (fc_remove_host );
2925
2911
@@ -2967,6 +2953,7 @@ fc_rport_final_delete(struct work_struct *work)
2967
2953
struct device * dev = & rport -> dev ;
2968
2954
struct Scsi_Host * shost = rport_to_shost (rport );
2969
2955
struct fc_internal * i = to_fc_internal (shost -> transportt );
2956
+ struct workqueue_struct * work_q ;
2970
2957
unsigned long flags ;
2971
2958
int do_callback = 0 ;
2972
2959
@@ -2988,9 +2975,9 @@ fc_rport_final_delete(struct work_struct *work)
2988
2975
if (rport -> flags & FC_RPORT_DEVLOSS_PENDING ) {
2989
2976
spin_unlock_irqrestore (shost -> host_lock , flags );
2990
2977
if (!cancel_delayed_work (& rport -> fail_io_work ))
2991
- fc_flush_devloss (shost );
2978
+ fc_flush_devloss (shost , rport );
2992
2979
if (!cancel_delayed_work (& rport -> dev_loss_work ))
2993
- fc_flush_devloss (shost );
2980
+ fc_flush_devloss (shost , rport );
2994
2981
cancel_work_sync (& rport -> scan_work );
2995
2982
spin_lock_irqsave (shost -> host_lock , flags );
2996
2983
rport -> flags &= ~FC_RPORT_DEVLOSS_PENDING ;
@@ -3021,6 +3008,12 @@ fc_rport_final_delete(struct work_struct *work)
3021
3008
3022
3009
fc_bsg_remove (rport -> rqst_q );
3023
3010
3011
+ if (rport -> devloss_work_q ) {
3012
+ work_q = rport -> devloss_work_q ;
3013
+ rport -> devloss_work_q = NULL ;
3014
+ destroy_workqueue (work_q );
3015
+ }
3016
+
3024
3017
transport_remove_device (dev );
3025
3018
device_del (dev );
3026
3019
transport_destroy_device (dev );
@@ -3093,6 +3086,22 @@ fc_remote_port_create(struct Scsi_Host *shost, int channel,
3093
3086
3094
3087
spin_unlock_irqrestore (shost -> host_lock , flags );
3095
3088
3089
+ rport -> devloss_work_q = alloc_workqueue ("fc_dl_%d_%d" , 0 , 0 ,
3090
+ shost -> host_no , rport -> number );
3091
+ if (!rport -> devloss_work_q ) {
3092
+ printk (KERN_ERR "FC Remote Port alloc_workqueue failed\n" );
3093
+ /*
3094
+ * Note that we have not yet called device_initialize() / get_device()
3095
+ * Cannot reclaim incremented rport->number because we released host_lock
3096
+ */
3097
+ spin_lock_irqsave (shost -> host_lock , flags );
3098
+ list_del (& rport -> peers );
3099
+ scsi_host_put (shost ); /* for fc_host->rport list */
3100
+ spin_unlock_irqrestore (shost -> host_lock , flags );
3101
+ kfree (rport );
3102
+ return NULL ;
3103
+ }
3104
+
3096
3105
dev = & rport -> dev ;
3097
3106
device_initialize (dev ); /* takes self reference */
3098
3107
dev -> parent = get_device (& shost -> shost_gendev ); /* parent reference */
@@ -3255,9 +3264,9 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
3255
3264
* be checked and will NOOP the function.
3256
3265
*/
3257
3266
if (!cancel_delayed_work (& rport -> fail_io_work ))
3258
- fc_flush_devloss (shost );
3267
+ fc_flush_devloss (shost , rport );
3259
3268
if (!cancel_delayed_work (& rport -> dev_loss_work ))
3260
- fc_flush_devloss (shost );
3269
+ fc_flush_devloss (shost , rport );
3261
3270
3262
3271
spin_lock_irqsave (shost -> host_lock , flags );
3263
3272
@@ -3451,11 +3460,12 @@ fc_remote_port_delete(struct fc_rport *rport)
3451
3460
/* see if we need to kill io faster than waiting for device loss */
3452
3461
if ((rport -> fast_io_fail_tmo != -1 ) &&
3453
3462
(rport -> fast_io_fail_tmo < timeout ))
3454
- fc_queue_devloss_work (shost , & rport -> fail_io_work ,
3455
- rport -> fast_io_fail_tmo * HZ );
3463
+ fc_queue_devloss_work (shost , rport , & rport -> fail_io_work ,
3464
+ rport -> fast_io_fail_tmo * HZ );
3456
3465
3457
3466
/* cap the length the devices can be blocked until they are deleted */
3458
- fc_queue_devloss_work (shost , & rport -> dev_loss_work , timeout * HZ );
3467
+ fc_queue_devloss_work (shost , rport , & rport -> dev_loss_work ,
3468
+ timeout * HZ );
3459
3469
}
3460
3470
EXPORT_SYMBOL (fc_remote_port_delete );
3461
3471
@@ -3514,9 +3524,9 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
3514
3524
* transaction.
3515
3525
*/
3516
3526
if (!cancel_delayed_work (& rport -> fail_io_work ))
3517
- fc_flush_devloss (shost );
3527
+ fc_flush_devloss (shost , rport );
3518
3528
if (!cancel_delayed_work (& rport -> dev_loss_work ))
3519
- fc_flush_devloss (shost );
3529
+ fc_flush_devloss (shost , rport );
3520
3530
3521
3531
spin_lock_irqsave (shost -> host_lock , flags );
3522
3532
rport -> flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
0 commit comments