@@ -667,24 +667,28 @@ static inline struct mddev *mddev_get(struct mddev *mddev)
667
667
668
668
static void mddev_delayed_delete (struct work_struct * ws );
669
669
670
+ static void __mddev_put (struct mddev * mddev )
671
+ {
672
+ if (mddev -> raid_disks || !list_empty (& mddev -> disks ) ||
673
+ mddev -> ctime || mddev -> hold_active )
674
+ return ;
675
+
676
+ /* Array is not configured at all, and not held active, so destroy it */
677
+ set_bit (MD_DELETED , & mddev -> flags );
678
+
679
+ /*
680
+ * Call queue_work inside the spinlock so that flush_workqueue() after
681
+ * mddev_find will succeed in waiting for the work to be done.
682
+ */
683
+ INIT_WORK (& mddev -> del_work , mddev_delayed_delete );
684
+ queue_work (md_misc_wq , & mddev -> del_work );
685
+ }
686
+
670
687
void mddev_put (struct mddev * mddev )
671
688
{
672
689
if (!atomic_dec_and_lock (& mddev -> active , & all_mddevs_lock ))
673
690
return ;
674
- if (!mddev -> raid_disks && list_empty (& mddev -> disks ) &&
675
- mddev -> ctime == 0 && !mddev -> hold_active ) {
676
- /* Array is not configured at all, and not held active,
677
- * so destroy it */
678
- set_bit (MD_DELETED , & mddev -> flags );
679
-
680
- /*
681
- * Call queue_work inside the spinlock so that
682
- * flush_workqueue() after mddev_find will succeed in waiting
683
- * for the work to be done.
684
- */
685
- INIT_WORK (& mddev -> del_work , mddev_delayed_delete );
686
- queue_work (md_misc_wq , & mddev -> del_work );
687
- }
691
+ __mddev_put (mddev );
688
692
spin_unlock (& all_mddevs_lock );
689
693
}
690
694
0 commit comments