Skip to content

Commit 3371fa2

Browse files
author
Al Viro
committed
struct mount: relocate MNT_WRITE_HOLD bit
... from ->mnt_flags to LSB of ->mnt_pprev_for_sb. This is safe - we always set and clear it within the same mount_lock scope, so we won't interfere with list operations - traversals are always forward, so they don't even look at ->mnt_prev_for_sb and both insertions and removals are in mount_lock scopes of their own, so that bit will be clear in *all* mount instances during those. Reviewed-by: Christian Brauner <[email protected]> Signed-off-by: Al Viro <[email protected]>
1 parent 09a1b33 commit 3371fa2

File tree

3 files changed

+42
-20
lines changed

3 files changed

+42
-20
lines changed

fs/mount.h

Lines changed: 24 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,8 @@ struct mount {
6666
struct list_head mnt_child; /* and going through their mnt_child */
6767
struct mount *mnt_next_for_sb; /* the next two fields are hlist_node, */
6868
struct mount * __aligned(1) *mnt_pprev_for_sb;
69-
/* except that LSB of pprev will be stolen */
69+
/* except that LSB of pprev is stolen */
70+
#define WRITE_HOLD 1 /* ... for use by mnt_hold_writers() */
7071
const char *mnt_devname; /* Name of device e.g. /dev/dsk/hda1 */
7172
struct list_head mnt_list;
7273
struct list_head mnt_expire; /* link in fs-specific expiry list */
@@ -244,4 +245,26 @@ static inline struct mount *topmost_overmount(struct mount *m)
244245
return m;
245246
}
246247

248+
static inline bool __test_write_hold(struct mount * __aligned(1) *val)
249+
{
250+
return (unsigned long)val & WRITE_HOLD;
251+
}
252+
253+
static inline bool test_write_hold(const struct mount *m)
254+
{
255+
return __test_write_hold(m->mnt_pprev_for_sb);
256+
}
257+
258+
static inline void set_write_hold(struct mount *m)
259+
{
260+
m->mnt_pprev_for_sb = (void *)((unsigned long)m->mnt_pprev_for_sb
261+
| WRITE_HOLD);
262+
}
263+
264+
static inline void clear_write_hold(struct mount *m)
265+
{
266+
m->mnt_pprev_for_sb = (void *)((unsigned long)m->mnt_pprev_for_sb
267+
& ~WRITE_HOLD);
268+
}
269+
247270
struct mnt_namespace *mnt_ns_from_dentry(struct dentry *dentry);

fs/namespace.c

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -509,20 +509,20 @@ int mnt_get_write_access(struct vfsmount *m)
509509
mnt_inc_writers(mnt);
510510
/*
511511
* The store to mnt_inc_writers must be visible before we pass
512-
* MNT_WRITE_HOLD loop below, so that the slowpath can see our
513-
* incremented count after it has set MNT_WRITE_HOLD.
512+
* WRITE_HOLD loop below, so that the slowpath can see our
513+
* incremented count after it has set WRITE_HOLD.
514514
*/
515515
smp_mb();
516516
might_lock(&mount_lock.lock);
517-
while (READ_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) {
517+
while (__test_write_hold(READ_ONCE(mnt->mnt_pprev_for_sb))) {
518518
if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
519519
cpu_relax();
520520
} else {
521521
/*
522522
* This prevents priority inversion, if the task
523-
* setting MNT_WRITE_HOLD got preempted on a remote
523+
* setting WRITE_HOLD got preempted on a remote
524524
* CPU, and it prevents life lock if the task setting
525-
* MNT_WRITE_HOLD has a lower priority and is bound to
525+
* WRITE_HOLD has a lower priority and is bound to
526526
* the same CPU as the task that is spinning here.
527527
*/
528528
preempt_enable();
@@ -533,7 +533,7 @@ int mnt_get_write_access(struct vfsmount *m)
533533
}
534534
/*
535535
* The barrier pairs with the barrier sb_start_ro_state_change() making
536-
* sure that if we see MNT_WRITE_HOLD cleared, we will also see
536+
* sure that if we see WRITE_HOLD cleared, we will also see
537537
* s_readonly_remount set (or even SB_RDONLY / MNT_READONLY flags) in
538538
* mnt_is_readonly() and bail in case we are racing with remount
539539
* read-only.
@@ -672,15 +672,15 @@ EXPORT_SYMBOL(mnt_drop_write_file);
672672
* @mnt.
673673
*
674674
* Context: This function expects lock_mount_hash() to be held serializing
675-
* setting MNT_WRITE_HOLD.
675+
* setting WRITE_HOLD.
676676
* Return: On success 0 is returned.
677677
* On error, -EBUSY is returned.
678678
*/
679679
static inline int mnt_hold_writers(struct mount *mnt)
680680
{
681-
mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
681+
set_write_hold(mnt);
682682
/*
683-
* After storing MNT_WRITE_HOLD, we'll read the counters. This store
683+
* After storing WRITE_HOLD, we'll read the counters. This store
684684
* should be visible before we do.
685685
*/
686686
smp_mb();
@@ -696,9 +696,9 @@ static inline int mnt_hold_writers(struct mount *mnt)
696696
* sum up each counter, if we read a counter before it is incremented,
697697
* but then read another CPU's count which it has been subsequently
698698
* decremented from -- we would see more decrements than we should.
699-
* MNT_WRITE_HOLD protects against this scenario, because
699+
* WRITE_HOLD protects against this scenario, because
700700
* mnt_want_write first increments count, then smp_mb, then spins on
701-
* MNT_WRITE_HOLD, so it can't be decremented by another CPU while
701+
* WRITE_HOLD, so it can't be decremented by another CPU while
702702
* we're counting up here.
703703
*/
704704
if (mnt_get_writers(mnt) > 0)
@@ -720,14 +720,14 @@ static inline int mnt_hold_writers(struct mount *mnt)
720720
*/
721721
static inline void mnt_unhold_writers(struct mount *mnt)
722722
{
723-
if (!(mnt->mnt_flags & MNT_WRITE_HOLD))
723+
if (!test_write_hold(mnt))
724724
return;
725725
/*
726-
* MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers
726+
* MNT_READONLY must become visible before ~WRITE_HOLD, so writers
727727
* that become unheld will see MNT_READONLY.
728728
*/
729729
smp_wmb();
730-
mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
730+
clear_write_hold(mnt);
731731
}
732732

733733
static inline void mnt_del_instance(struct mount *m)
@@ -766,7 +766,7 @@ int sb_prepare_remount_readonly(struct super_block *sb)
766766
{
767767
int err = 0;
768768

769-
/* Racy optimization. Recheck the counter under MNT_WRITE_HOLD */
769+
/* Racy optimization. Recheck the counter under WRITE_HOLD */
770770
if (atomic_long_read(&sb->s_remove_count))
771771
return -EBUSY;
772772

@@ -784,8 +784,8 @@ int sb_prepare_remount_readonly(struct super_block *sb)
784784
if (!err)
785785
sb_start_ro_state_change(sb);
786786
for (struct mount *m = sb->s_mounts; m; m = m->mnt_next_for_sb) {
787-
if (m->mnt.mnt_flags & MNT_WRITE_HOLD)
788-
m->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
787+
if (test_write_hold(m))
788+
clear_write_hold(m);
789789
}
790790
unlock_mount_hash();
791791

include/linux/mount.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,6 @@ enum mount_flags {
3333
MNT_NOSYMFOLLOW = 0x80,
3434

3535
MNT_SHRINKABLE = 0x100,
36-
MNT_WRITE_HOLD = 0x200,
3736

3837
MNT_INTERNAL = 0x4000,
3938

@@ -52,7 +51,7 @@ enum mount_flags {
5251
| MNT_READONLY | MNT_NOSYMFOLLOW,
5352
MNT_ATIME_MASK = MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME,
5453

55-
MNT_INTERNAL_FLAGS = MNT_WRITE_HOLD | MNT_INTERNAL | MNT_DOOMED |
54+
MNT_INTERNAL_FLAGS = MNT_INTERNAL | MNT_DOOMED |
5655
MNT_SYNC_UMOUNT | MNT_LOCKED
5756
};
5857

0 commit comments

Comments
 (0)