@@ -85,8 +85,7 @@ static DEFINE_SPINLOCK(pers_lock);
8585
8686static const struct kobj_type md_ktype ;
8787
88- const struct md_cluster_operations * md_cluster_ops ;
89- EXPORT_SYMBOL (md_cluster_ops );
88+ static const struct md_cluster_operations * md_cluster_ops ;
9089static struct module * md_cluster_mod ;
9190
9291static DECLARE_WAIT_QUEUE_HEAD (resync_wait );
@@ -2663,11 +2662,11 @@ void md_update_sb(struct mddev *mddev, int force_change)
26632662 force_change = 1 ;
26642663 if (test_and_clear_bit (MD_SB_CHANGE_CLEAN , & mddev -> sb_flags ))
26652664 nospares = 1 ;
2666- ret = md_cluster_ops -> metadata_update_start (mddev );
2665+ ret = mddev -> cluster_ops -> metadata_update_start (mddev );
26672666 /* Has someone else has updated the sb */
26682667 if (!does_sb_need_changing (mddev )) {
26692668 if (ret == 0 )
2670- md_cluster_ops -> metadata_update_cancel (mddev );
2669+ mddev -> cluster_ops -> metadata_update_cancel (mddev );
26712670 bit_clear_unless (& mddev -> sb_flags , BIT (MD_SB_CHANGE_PENDING ),
26722671 BIT (MD_SB_CHANGE_DEVS ) |
26732672 BIT (MD_SB_CHANGE_CLEAN ));
@@ -2807,7 +2806,7 @@ void md_update_sb(struct mddev *mddev, int force_change)
28072806 /* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */
28082807
28092808 if (mddev_is_clustered (mddev ) && ret == 0 )
2810- md_cluster_ops -> metadata_update_finish (mddev );
2809+ mddev -> cluster_ops -> metadata_update_finish (mddev );
28112810
28122811 if (mddev -> in_sync != sync_req ||
28132812 !bit_clear_unless (& mddev -> sb_flags , BIT (MD_SB_CHANGE_PENDING ),
@@ -2966,7 +2965,7 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
29662965 else {
29672966 err = 0 ;
29682967 if (mddev_is_clustered (mddev ))
2969- err = md_cluster_ops -> remove_disk (mddev , rdev );
2968+ err = mddev -> cluster_ops -> remove_disk (mddev , rdev );
29702969
29712970 if (err == 0 ) {
29722971 md_kick_rdev_from_array (rdev );
@@ -3076,7 +3075,7 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
30763075 * by this node eventually
30773076 */
30783077 if (!mddev_is_clustered (rdev -> mddev ) ||
3079- (err = md_cluster_ops -> gather_bitmaps (rdev )) == 0 ) {
3078+ (err = mddev -> cluster_ops -> gather_bitmaps (rdev )) == 0 ) {
30803079 clear_bit (Faulty , & rdev -> flags );
30813080 err = add_bound_rdev (rdev );
30823081 }
@@ -6994,7 +6993,7 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
69946993 set_bit (Candidate , & rdev -> flags );
69956994 else if (info -> state & (1 << MD_DISK_CLUSTER_ADD )) {
69966995 /* --add initiated by this node */
6997- err = md_cluster_ops -> add_new_disk (mddev , rdev );
6996+ err = mddev -> cluster_ops -> add_new_disk (mddev , rdev );
69986997 if (err ) {
69996998 export_rdev (rdev , mddev );
70006999 return err ;
@@ -7011,14 +7010,14 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
70117010 if (mddev_is_clustered (mddev )) {
70127011 if (info -> state & (1 << MD_DISK_CANDIDATE )) {
70137012 if (!err ) {
7014- err = md_cluster_ops -> new_disk_ack (mddev ,
7015- err == 0 );
7013+ err = mddev -> cluster_ops -> new_disk_ack (
7014+ mddev , err == 0 );
70167015 if (err )
70177016 md_kick_rdev_from_array (rdev );
70187017 }
70197018 } else {
70207019 if (err )
7021- md_cluster_ops -> add_new_disk_cancel (mddev );
7020+ mddev -> cluster_ops -> add_new_disk_cancel (mddev );
70227021 else
70237022 err = add_bound_rdev (rdev );
70247023 }
@@ -7098,10 +7097,9 @@ static int hot_remove_disk(struct mddev *mddev, dev_t dev)
70987097 goto busy ;
70997098
71007099kick_rdev :
7101- if (mddev_is_clustered (mddev )) {
7102- if (md_cluster_ops -> remove_disk (mddev , rdev ))
7103- goto busy ;
7104- }
7100+ if (mddev_is_clustered (mddev ) &&
7101+ mddev -> cluster_ops -> remove_disk (mddev , rdev ))
7102+ goto busy ;
71057103
71067104 md_kick_rdev_from_array (rdev );
71077105 set_bit (MD_SB_CHANGE_DEVS , & mddev -> sb_flags );
@@ -7404,7 +7402,7 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
74047402 rv = mddev -> pers -> resize (mddev , num_sectors );
74057403 if (!rv ) {
74067404 if (mddev_is_clustered (mddev ))
7407- md_cluster_ops -> update_size (mddev , old_dev_sectors );
7405+ mddev -> cluster_ops -> update_size (mddev , old_dev_sectors );
74087406 else if (!mddev_is_dm (mddev ))
74097407 set_capacity_and_notify (mddev -> gendisk ,
74107408 mddev -> array_sectors );
@@ -7452,6 +7450,27 @@ static int update_raid_disks(struct mddev *mddev, int raid_disks)
74527450 return rv ;
74537451}
74547452
7453+ static int get_cluster_ops (struct mddev * mddev )
7454+ {
7455+ spin_lock (& pers_lock );
7456+ mddev -> cluster_ops = md_cluster_ops ;
7457+ if (mddev -> cluster_ops && !try_module_get (md_cluster_mod ))
7458+ mddev -> cluster_ops = NULL ;
7459+ spin_unlock (& pers_lock );
7460+
7461+ return mddev -> cluster_ops == NULL ? - ENOENT : 0 ;
7462+ }
7463+
7464+ static void put_cluster_ops (struct mddev * mddev )
7465+ {
7466+ if (!mddev -> cluster_ops )
7467+ return ;
7468+
7469+ mddev -> cluster_ops -> leave (mddev );
7470+ module_put (md_cluster_mod );
7471+ mddev -> cluster_ops = NULL ;
7472+ }
7473+
74557474/*
74567475 * update_array_info is used to change the configuration of an
74577476 * on-line array.
@@ -7560,16 +7579,15 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
75607579
75617580 if (mddev -> bitmap_info .nodes ) {
75627581 /* hold PW on all the bitmap lock */
7563- if (md_cluster_ops -> lock_all_bitmaps (mddev ) <= 0 ) {
7582+ if (mddev -> cluster_ops -> lock_all_bitmaps (mddev ) <= 0 ) {
75647583 pr_warn ("md: can't change bitmap to none since the array is in use by more than one node\n" );
75657584 rv = - EPERM ;
7566- md_cluster_ops -> unlock_all_bitmaps (mddev );
7585+ mddev -> cluster_ops -> unlock_all_bitmaps (mddev );
75677586 goto err ;
75687587 }
75697588
75707589 mddev -> bitmap_info .nodes = 0 ;
7571- md_cluster_ops -> leave (mddev );
7572- module_put (md_cluster_mod );
7590+ put_cluster_ops (mddev );
75737591 mddev -> safemode_delay = DEFAULT_SAFEMODE_DELAY ;
75747592 }
75757593 mddev -> bitmap_ops -> destroy (mddev );
@@ -7853,7 +7871,7 @@ static int md_ioctl(struct block_device *bdev, blk_mode_t mode,
78537871
78547872 case CLUSTERED_DISK_NACK :
78557873 if (mddev_is_clustered (mddev ))
7856- md_cluster_ops -> new_disk_ack (mddev , false);
7874+ mddev -> cluster_ops -> new_disk_ack (mddev , false);
78577875 else
78587876 err = - EINVAL ;
78597877 goto unlock ;
@@ -8568,30 +8586,28 @@ EXPORT_SYMBOL(unregister_md_cluster_operations);
85688586
85698587int md_setup_cluster (struct mddev * mddev , int nodes )
85708588{
8571- int ret ;
8572- if (!md_cluster_ops )
8589+ int ret = get_cluster_ops (mddev );
8590+
8591+ if (ret ) {
85738592 request_module ("md-cluster" );
8574- spin_lock (& pers_lock );
8593+ ret = get_cluster_ops (mddev );
8594+ }
8595+
85758596 /* ensure module won't be unloaded */
8576- if (! md_cluster_ops || ! try_module_get ( md_cluster_mod ) ) {
8597+ if (ret ) {
85778598 pr_warn ("can't find md-cluster module or get its reference.\n" );
8578- spin_unlock (& pers_lock );
8579- return - ENOENT ;
8599+ return ret ;
85808600 }
8581- spin_unlock (& pers_lock );
85828601
8583- ret = md_cluster_ops -> join (mddev , nodes );
8602+ ret = mddev -> cluster_ops -> join (mddev , nodes );
85848603 if (!ret )
85858604 mddev -> safemode_delay = 0 ;
85868605 return ret ;
85878606}
85888607
85898608void md_cluster_stop (struct mddev * mddev )
85908609{
8591- if (!md_cluster_ops )
8592- return ;
8593- md_cluster_ops -> leave (mddev );
8594- module_put (md_cluster_mod );
8610+ put_cluster_ops (mddev );
85958611}
85968612
85978613static int is_mddev_idle (struct mddev * mddev , int init )
@@ -8984,7 +9000,7 @@ void md_do_sync(struct md_thread *thread)
89849000 }
89859001
89869002 if (mddev_is_clustered (mddev )) {
8987- ret = md_cluster_ops -> resync_start (mddev );
9003+ ret = mddev -> cluster_ops -> resync_start (mddev );
89889004 if (ret )
89899005 goto skip ;
89909006
@@ -9011,7 +9027,7 @@ void md_do_sync(struct md_thread *thread)
90119027 *
90129028 */
90139029 if (mddev_is_clustered (mddev ))
9014- md_cluster_ops -> resync_start_notify (mddev );
9030+ mddev -> cluster_ops -> resync_start_notify (mddev );
90159031 do {
90169032 int mddev2_minor = -1 ;
90179033 mddev -> curr_resync = MD_RESYNC_DELAYED ;
@@ -9795,21 +9811,21 @@ void md_reap_sync_thread(struct mddev *mddev)
97959811 * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by
97969812 * clustered raid */
97979813 if (test_and_clear_bit (MD_CLUSTER_RESYNC_LOCKED , & mddev -> flags ))
9798- md_cluster_ops -> resync_finish (mddev );
9814+ mddev -> cluster_ops -> resync_finish (mddev );
97999815 clear_bit (MD_RECOVERY_RUNNING , & mddev -> recovery );
98009816 clear_bit (MD_RECOVERY_DONE , & mddev -> recovery );
98019817 clear_bit (MD_RECOVERY_SYNC , & mddev -> recovery );
98029818 clear_bit (MD_RECOVERY_RESHAPE , & mddev -> recovery );
98039819 clear_bit (MD_RECOVERY_REQUESTED , & mddev -> recovery );
98049820 clear_bit (MD_RECOVERY_CHECK , & mddev -> recovery );
98059821 /*
9806- * We call md_cluster_ops ->update_size here because sync_size could
9822+ * We call mddev->cluster_ops ->update_size here because sync_size could
98079823 * be changed by md_update_sb, and MD_RECOVERY_RESHAPE is cleared,
98089824 * so it is time to update size across cluster.
98099825 */
98109826 if (mddev_is_clustered (mddev ) && is_reshaped
98119827 && !test_bit (MD_CLOSING , & mddev -> flags ))
9812- md_cluster_ops -> update_size (mddev , old_dev_sectors );
9828+ mddev -> cluster_ops -> update_size (mddev , old_dev_sectors );
98139829 /* flag recovery needed just to double check */
98149830 set_bit (MD_RECOVERY_NEEDED , & mddev -> recovery );
98159831 sysfs_notify_dirent_safe (mddev -> sysfs_completed );
@@ -10035,7 +10051,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
1003510051 if (rdev2 -> raid_disk == -1 && role != MD_DISK_ROLE_SPARE &&
1003610052 !(le32_to_cpu (sb -> feature_map ) &
1003710053 MD_FEATURE_RESHAPE_ACTIVE ) &&
10038- !md_cluster_ops -> resync_status_get (mddev )) {
10054+ !mddev -> cluster_ops -> resync_status_get (mddev )) {
1003910055 /*
1004010056 * -1 to make raid1_add_disk() set conf->fullsync
1004110057 * to 1. This could avoid skipping sync when the
0 commit comments