@@ -4961,15 +4961,10 @@ action_show(struct mddev *mddev, char *page)
4961
4961
* @locked: if set, reconfig_mutex will still be held after this function
4962
4962
* return; if not set, reconfig_mutex will be released after this
4963
4963
* function return.
4964
- * @check_seq: if set, only wait for curent running sync_thread to stop, noted
4965
- * that new sync_thread can still start.
4966
4964
*/
4967
- static void stop_sync_thread (struct mddev * mddev , bool locked , bool check_seq )
4965
+ static void stop_sync_thread (struct mddev * mddev , bool locked )
4968
4966
{
4969
- int sync_seq ;
4970
-
4971
- if (check_seq )
4972
- sync_seq = atomic_read (& mddev -> sync_seq );
4967
+ int sync_seq = atomic_read (& mddev -> sync_seq );
4973
4968
4974
4969
if (!test_bit (MD_RECOVERY_RUNNING , & mddev -> recovery )) {
4975
4970
if (!locked )
@@ -4990,7 +4985,8 @@ static void stop_sync_thread(struct mddev *mddev, bool locked, bool check_seq)
4990
4985
4991
4986
wait_event (resync_wait ,
4992
4987
!test_bit (MD_RECOVERY_RUNNING , & mddev -> recovery ) ||
4993
- (check_seq && sync_seq != atomic_read (& mddev -> sync_seq )));
4988
+ (!test_bit (MD_RECOVERY_FROZEN , & mddev -> recovery ) &&
4989
+ sync_seq != atomic_read (& mddev -> sync_seq )));
4994
4990
4995
4991
if (locked )
4996
4992
mddev_lock_nointr (mddev );
@@ -5001,7 +4997,7 @@ void md_idle_sync_thread(struct mddev *mddev)
5001
4997
lockdep_assert_held (& mddev -> reconfig_mutex );
5002
4998
5003
4999
clear_bit (MD_RECOVERY_FROZEN , & mddev -> recovery );
5004
- stop_sync_thread (mddev , true, true );
5000
+ stop_sync_thread (mddev , true);
5005
5001
}
5006
5002
EXPORT_SYMBOL_GPL (md_idle_sync_thread );
5007
5003
@@ -5010,7 +5006,7 @@ void md_frozen_sync_thread(struct mddev *mddev)
5010
5006
lockdep_assert_held (& mddev -> reconfig_mutex );
5011
5007
5012
5008
set_bit (MD_RECOVERY_FROZEN , & mddev -> recovery );
5013
- stop_sync_thread (mddev , true, false );
5009
+ stop_sync_thread (mddev , true);
5014
5010
}
5015
5011
EXPORT_SYMBOL_GPL (md_frozen_sync_thread );
5016
5012
@@ -5035,7 +5031,7 @@ static void idle_sync_thread(struct mddev *mddev)
5035
5031
return ;
5036
5032
}
5037
5033
5038
- stop_sync_thread (mddev , false, true );
5034
+ stop_sync_thread (mddev , false);
5039
5035
mutex_unlock (& mddev -> sync_mutex );
5040
5036
}
5041
5037
@@ -5049,7 +5045,7 @@ static void frozen_sync_thread(struct mddev *mddev)
5049
5045
return ;
5050
5046
}
5051
5047
5052
- stop_sync_thread (mddev , false, false );
5048
+ stop_sync_thread (mddev , false);
5053
5049
mutex_unlock (& mddev -> sync_mutex );
5054
5050
}
5055
5051
@@ -6544,7 +6540,7 @@ void md_stop_writes(struct mddev *mddev)
6544
6540
{
6545
6541
mddev_lock_nointr (mddev );
6546
6542
set_bit (MD_RECOVERY_FROZEN , & mddev -> recovery );
6547
- stop_sync_thread (mddev , true, false );
6543
+ stop_sync_thread (mddev , true);
6548
6544
__md_stop_writes (mddev );
6549
6545
mddev_unlock (mddev );
6550
6546
}
@@ -6612,7 +6608,7 @@ static int md_set_readonly(struct mddev *mddev)
6612
6608
set_bit (MD_RECOVERY_FROZEN , & mddev -> recovery );
6613
6609
}
6614
6610
6615
- stop_sync_thread (mddev , false, false );
6611
+ stop_sync_thread (mddev , false);
6616
6612
wait_event (mddev -> sb_wait ,
6617
6613
!test_bit (MD_SB_CHANGE_PENDING , & mddev -> sb_flags ));
6618
6614
mddev_lock_nointr (mddev );
@@ -6658,7 +6654,7 @@ static int do_md_stop(struct mddev *mddev, int mode)
6658
6654
set_bit (MD_RECOVERY_FROZEN , & mddev -> recovery );
6659
6655
}
6660
6656
6661
- stop_sync_thread (mddev , true, false );
6657
+ stop_sync_thread (mddev , true);
6662
6658
6663
6659
if (mddev -> sysfs_active ||
6664
6660
test_bit (MD_RECOVERY_RUNNING , & mddev -> recovery )) {
0 commit comments