Skip to content

Commit c594de0

Browse files
YuKuai-huaweiYu Kuai
authored andcommitted
md: don't export md_cluster_ops
Add a new field 'cluster_ops' and initialize it md_setup_cluster(), so that the gloable variable 'md_cluter_ops' doesn't need to be exported. Also prepare to switch md-cluster to use md_submod_head. Link: https://lore.kernel.org/linux-raid/[email protected] Signed-off-by: Yu Kuai <[email protected]> Reviewed-by: Su Yue <[email protected]>
1 parent ff84e1b commit c594de0

File tree

6 files changed

+76
-59
lines changed

6 files changed

+76
-59
lines changed

drivers/md/md-bitmap.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -944,7 +944,7 @@ static int md_bitmap_read_sb(struct bitmap *bitmap)
944944
bmname(bitmap), err);
945945
goto out_no_sb;
946946
}
947-
bitmap->cluster_slot = md_cluster_ops->slot_number(bitmap->mddev);
947+
bitmap->cluster_slot = bitmap->mddev->cluster_ops->slot_number(bitmap->mddev);
948948
goto re_read;
949949
}
950950

@@ -2023,7 +2023,7 @@ static void md_bitmap_free(void *data)
20232023
sysfs_put(bitmap->sysfs_can_clear);
20242024

20252025
if (mddev_is_clustered(bitmap->mddev) && bitmap->mddev->cluster_info &&
2026-
bitmap->cluster_slot == md_cluster_ops->slot_number(bitmap->mddev))
2026+
bitmap->cluster_slot == bitmap->mddev->cluster_ops->slot_number(bitmap->mddev))
20272027
md_cluster_stop(bitmap->mddev);
20282028

20292029
/* Shouldn't be needed - but just in case.... */
@@ -2231,7 +2231,7 @@ static int bitmap_load(struct mddev *mddev)
22312231
mddev_create_serial_pool(mddev, rdev);
22322232

22332233
if (mddev_is_clustered(mddev))
2234-
md_cluster_ops->load_bitmaps(mddev, mddev->bitmap_info.nodes);
2234+
mddev->cluster_ops->load_bitmaps(mddev, mddev->bitmap_info.nodes);
22352235

22362236
/* Clear out old bitmap info first: Either there is none, or we
22372237
* are resuming after someone else has possibly changed things,

drivers/md/md.c

Lines changed: 55 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -85,8 +85,7 @@ static DEFINE_SPINLOCK(pers_lock);
8585

8686
static const struct kobj_type md_ktype;
8787

88-
const struct md_cluster_operations *md_cluster_ops;
89-
EXPORT_SYMBOL(md_cluster_ops);
88+
static const struct md_cluster_operations *md_cluster_ops;
9089
static struct module *md_cluster_mod;
9190

9291
static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
@@ -2663,11 +2662,11 @@ void md_update_sb(struct mddev *mddev, int force_change)
26632662
force_change = 1;
26642663
if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
26652664
nospares = 1;
2666-
ret = md_cluster_ops->metadata_update_start(mddev);
2665+
ret = mddev->cluster_ops->metadata_update_start(mddev);
26672666
/* Has someone else has updated the sb */
26682667
if (!does_sb_need_changing(mddev)) {
26692668
if (ret == 0)
2670-
md_cluster_ops->metadata_update_cancel(mddev);
2669+
mddev->cluster_ops->metadata_update_cancel(mddev);
26712670
bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
26722671
BIT(MD_SB_CHANGE_DEVS) |
26732672
BIT(MD_SB_CHANGE_CLEAN));
@@ -2807,7 +2806,7 @@ void md_update_sb(struct mddev *mddev, int force_change)
28072806
/* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */
28082807

28092808
if (mddev_is_clustered(mddev) && ret == 0)
2810-
md_cluster_ops->metadata_update_finish(mddev);
2809+
mddev->cluster_ops->metadata_update_finish(mddev);
28112810

28122811
if (mddev->in_sync != sync_req ||
28132812
!bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
@@ -2966,7 +2965,7 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
29662965
else {
29672966
err = 0;
29682967
if (mddev_is_clustered(mddev))
2969-
err = md_cluster_ops->remove_disk(mddev, rdev);
2968+
err = mddev->cluster_ops->remove_disk(mddev, rdev);
29702969

29712970
if (err == 0) {
29722971
md_kick_rdev_from_array(rdev);
@@ -3076,7 +3075,7 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
30763075
* by this node eventually
30773076
*/
30783077
if (!mddev_is_clustered(rdev->mddev) ||
3079-
(err = md_cluster_ops->gather_bitmaps(rdev)) == 0) {
3078+
(err = mddev->cluster_ops->gather_bitmaps(rdev)) == 0) {
30803079
clear_bit(Faulty, &rdev->flags);
30813080
err = add_bound_rdev(rdev);
30823081
}
@@ -6994,7 +6993,7 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
69946993
set_bit(Candidate, &rdev->flags);
69956994
else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) {
69966995
/* --add initiated by this node */
6997-
err = md_cluster_ops->add_new_disk(mddev, rdev);
6996+
err = mddev->cluster_ops->add_new_disk(mddev, rdev);
69986997
if (err) {
69996998
export_rdev(rdev, mddev);
70006999
return err;
@@ -7011,14 +7010,14 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
70117010
if (mddev_is_clustered(mddev)) {
70127011
if (info->state & (1 << MD_DISK_CANDIDATE)) {
70137012
if (!err) {
7014-
err = md_cluster_ops->new_disk_ack(mddev,
7015-
err == 0);
7013+
err = mddev->cluster_ops->new_disk_ack(
7014+
mddev, err == 0);
70167015
if (err)
70177016
md_kick_rdev_from_array(rdev);
70187017
}
70197018
} else {
70207019
if (err)
7021-
md_cluster_ops->add_new_disk_cancel(mddev);
7020+
mddev->cluster_ops->add_new_disk_cancel(mddev);
70227021
else
70237022
err = add_bound_rdev(rdev);
70247023
}
@@ -7098,10 +7097,9 @@ static int hot_remove_disk(struct mddev *mddev, dev_t dev)
70987097
goto busy;
70997098

71007099
kick_rdev:
7101-
if (mddev_is_clustered(mddev)) {
7102-
if (md_cluster_ops->remove_disk(mddev, rdev))
7103-
goto busy;
7104-
}
7100+
if (mddev_is_clustered(mddev) &&
7101+
mddev->cluster_ops->remove_disk(mddev, rdev))
7102+
goto busy;
71057103

71067104
md_kick_rdev_from_array(rdev);
71077105
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
@@ -7404,7 +7402,7 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
74047402
rv = mddev->pers->resize(mddev, num_sectors);
74057403
if (!rv) {
74067404
if (mddev_is_clustered(mddev))
7407-
md_cluster_ops->update_size(mddev, old_dev_sectors);
7405+
mddev->cluster_ops->update_size(mddev, old_dev_sectors);
74087406
else if (!mddev_is_dm(mddev))
74097407
set_capacity_and_notify(mddev->gendisk,
74107408
mddev->array_sectors);
@@ -7452,6 +7450,27 @@ static int update_raid_disks(struct mddev *mddev, int raid_disks)
74527450
return rv;
74537451
}
74547452

7453+
static int get_cluster_ops(struct mddev *mddev)
7454+
{
7455+
spin_lock(&pers_lock);
7456+
mddev->cluster_ops = md_cluster_ops;
7457+
if (mddev->cluster_ops && !try_module_get(md_cluster_mod))
7458+
mddev->cluster_ops = NULL;
7459+
spin_unlock(&pers_lock);
7460+
7461+
return mddev->cluster_ops == NULL ? -ENOENT : 0;
7462+
}
7463+
7464+
static void put_cluster_ops(struct mddev *mddev)
7465+
{
7466+
if (!mddev->cluster_ops)
7467+
return;
7468+
7469+
mddev->cluster_ops->leave(mddev);
7470+
module_put(md_cluster_mod);
7471+
mddev->cluster_ops = NULL;
7472+
}
7473+
74557474
/*
74567475
* update_array_info is used to change the configuration of an
74577476
* on-line array.
@@ -7560,16 +7579,15 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
75607579

75617580
if (mddev->bitmap_info.nodes) {
75627581
/* hold PW on all the bitmap lock */
7563-
if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) {
7582+
if (mddev->cluster_ops->lock_all_bitmaps(mddev) <= 0) {
75647583
pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n");
75657584
rv = -EPERM;
7566-
md_cluster_ops->unlock_all_bitmaps(mddev);
7585+
mddev->cluster_ops->unlock_all_bitmaps(mddev);
75677586
goto err;
75687587
}
75697588

75707589
mddev->bitmap_info.nodes = 0;
7571-
md_cluster_ops->leave(mddev);
7572-
module_put(md_cluster_mod);
7590+
put_cluster_ops(mddev);
75737591
mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
75747592
}
75757593
mddev->bitmap_ops->destroy(mddev);
@@ -7853,7 +7871,7 @@ static int md_ioctl(struct block_device *bdev, blk_mode_t mode,
78537871

78547872
case CLUSTERED_DISK_NACK:
78557873
if (mddev_is_clustered(mddev))
7856-
md_cluster_ops->new_disk_ack(mddev, false);
7874+
mddev->cluster_ops->new_disk_ack(mddev, false);
78577875
else
78587876
err = -EINVAL;
78597877
goto unlock;
@@ -8568,30 +8586,28 @@ EXPORT_SYMBOL(unregister_md_cluster_operations);
85688586

85698587
int md_setup_cluster(struct mddev *mddev, int nodes)
85708588
{
8571-
int ret;
8572-
if (!md_cluster_ops)
8589+
int ret = get_cluster_ops(mddev);
8590+
8591+
if (ret) {
85738592
request_module("md-cluster");
8574-
spin_lock(&pers_lock);
8593+
ret = get_cluster_ops(mddev);
8594+
}
8595+
85758596
/* ensure module won't be unloaded */
8576-
if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
8597+
if (ret) {
85778598
pr_warn("can't find md-cluster module or get its reference.\n");
8578-
spin_unlock(&pers_lock);
8579-
return -ENOENT;
8599+
return ret;
85808600
}
8581-
spin_unlock(&pers_lock);
85828601

8583-
ret = md_cluster_ops->join(mddev, nodes);
8602+
ret = mddev->cluster_ops->join(mddev, nodes);
85848603
if (!ret)
85858604
mddev->safemode_delay = 0;
85868605
return ret;
85878606
}
85888607

85898608
void md_cluster_stop(struct mddev *mddev)
85908609
{
8591-
if (!md_cluster_ops)
8592-
return;
8593-
md_cluster_ops->leave(mddev);
8594-
module_put(md_cluster_mod);
8610+
put_cluster_ops(mddev);
85958611
}
85968612

85978613
static int is_mddev_idle(struct mddev *mddev, int init)
@@ -8984,7 +9000,7 @@ void md_do_sync(struct md_thread *thread)
89849000
}
89859001

89869002
if (mddev_is_clustered(mddev)) {
8987-
ret = md_cluster_ops->resync_start(mddev);
9003+
ret = mddev->cluster_ops->resync_start(mddev);
89889004
if (ret)
89899005
goto skip;
89909006

@@ -9011,7 +9027,7 @@ void md_do_sync(struct md_thread *thread)
90119027
*
90129028
*/
90139029
if (mddev_is_clustered(mddev))
9014-
md_cluster_ops->resync_start_notify(mddev);
9030+
mddev->cluster_ops->resync_start_notify(mddev);
90159031
do {
90169032
int mddev2_minor = -1;
90179033
mddev->curr_resync = MD_RESYNC_DELAYED;
@@ -9795,21 +9811,21 @@ void md_reap_sync_thread(struct mddev *mddev)
97959811
* call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by
97969812
* clustered raid */
97979813
if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags))
9798-
md_cluster_ops->resync_finish(mddev);
9814+
mddev->cluster_ops->resync_finish(mddev);
97999815
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
98009816
clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
98019817
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
98029818
clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
98039819
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
98049820
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
98059821
/*
9806-
* We call md_cluster_ops->update_size here because sync_size could
9822+
* We call mddev->cluster_ops->update_size here because sync_size could
98079823
* be changed by md_update_sb, and MD_RECOVERY_RESHAPE is cleared,
98089824
* so it is time to update size across cluster.
98099825
*/
98109826
if (mddev_is_clustered(mddev) && is_reshaped
98119827
&& !test_bit(MD_CLOSING, &mddev->flags))
9812-
md_cluster_ops->update_size(mddev, old_dev_sectors);
9828+
mddev->cluster_ops->update_size(mddev, old_dev_sectors);
98139829
/* flag recovery needed just to double check */
98149830
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
98159831
sysfs_notify_dirent_safe(mddev->sysfs_completed);
@@ -10035,7 +10051,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
1003510051
if (rdev2->raid_disk == -1 && role != MD_DISK_ROLE_SPARE &&
1003610052
!(le32_to_cpu(sb->feature_map) &
1003710053
MD_FEATURE_RESHAPE_ACTIVE) &&
10038-
!md_cluster_ops->resync_status_get(mddev)) {
10054+
!mddev->cluster_ops->resync_status_get(mddev)) {
1003910055
/*
1004010056
* -1 to make raid1_add_disk() set conf->fullsync
1004110057
* to 1. This could avoid skipping sync when the

drivers/md/md.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -320,6 +320,7 @@ extern int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
320320
extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
321321
int is_new);
322322
struct md_cluster_info;
323+
struct md_cluster_operations;
323324

324325
/**
325326
* enum mddev_flags - md device flags.
@@ -602,6 +603,7 @@ struct mddev {
602603
mempool_t *serial_info_pool;
603604
void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
604605
struct md_cluster_info *cluster_info;
606+
const struct md_cluster_operations *cluster_ops;
605607
unsigned int good_device_nr; /* good device num within cluster raid */
606608
unsigned int noio_flag; /* for memalloc scope API */
607609

@@ -947,7 +949,6 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
947949
}
948950
}
949951

950-
extern const struct md_cluster_operations *md_cluster_ops;
951952
static inline int mddev_is_clustered(struct mddev *mddev)
952953
{
953954
return mddev->cluster_info && mddev->bitmap_info.nodes > 1;

drivers/md/raid1-10.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -287,8 +287,8 @@ static inline bool raid1_should_read_first(struct mddev *mddev,
287287
return true;
288288

289289
if (mddev_is_clustered(mddev) &&
290-
md_cluster_ops->area_resyncing(mddev, READ, this_sector,
291-
this_sector + len))
290+
mddev->cluster_ops->area_resyncing(mddev, READ, this_sector,
291+
this_sector + len))
292292
return true;
293293

294294
return false;

drivers/md/raid1.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1469,7 +1469,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
14691469
bool is_discard = (bio_op(bio) == REQ_OP_DISCARD);
14701470

14711471
if (mddev_is_clustered(mddev) &&
1472-
md_cluster_ops->area_resyncing(mddev, WRITE,
1472+
mddev->cluster_ops->area_resyncing(mddev, WRITE,
14731473
bio->bi_iter.bi_sector, bio_end_sector(bio))) {
14741474

14751475
DEFINE_WAIT(w);
@@ -1480,7 +1480,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
14801480
for (;;) {
14811481
prepare_to_wait(&conf->wait_barrier,
14821482
&w, TASK_IDLE);
1483-
if (!md_cluster_ops->area_resyncing(mddev, WRITE,
1483+
if (!mddev->cluster_ops->area_resyncing(mddev, WRITE,
14841484
bio->bi_iter.bi_sector,
14851485
bio_end_sector(bio)))
14861486
break;
@@ -3040,9 +3040,9 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
30403040
conf->cluster_sync_low = mddev->curr_resync_completed;
30413041
conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS;
30423042
/* Send resync message */
3043-
md_cluster_ops->resync_info_update(mddev,
3044-
conf->cluster_sync_low,
3045-
conf->cluster_sync_high);
3043+
mddev->cluster_ops->resync_info_update(mddev,
3044+
conf->cluster_sync_low,
3045+
conf->cluster_sync_high);
30463046
}
30473047

30483048
/* For a user-requested sync, we read all readable devices and do a

0 commit comments

Comments
 (0)