@@ -509,20 +509,20 @@ int mnt_get_write_access(struct vfsmount *m)
509509 mnt_inc_writers (mnt );
510510 /*
511511 * The store to mnt_inc_writers must be visible before we pass
512- * MNT_WRITE_HOLD loop below, so that the slowpath can see our
513- * incremented count after it has set MNT_WRITE_HOLD .
512+ * WRITE_HOLD loop below, so that the slowpath can see our
513+ * incremented count after it has set WRITE_HOLD .
514514 */
515515 smp_mb ();
516516 might_lock (& mount_lock .lock );
517- while (READ_ONCE (mnt -> mnt . mnt_flags ) & MNT_WRITE_HOLD ) {
517+ while (__test_write_hold ( READ_ONCE (mnt -> mnt_pprev_for_sb )) ) {
518518 if (!IS_ENABLED (CONFIG_PREEMPT_RT )) {
519519 cpu_relax ();
520520 } else {
521521 /*
522522 * This prevents priority inversion, if the task
523- * setting MNT_WRITE_HOLD got preempted on a remote
523+ * setting WRITE_HOLD got preempted on a remote
524524 * CPU, and it prevents life lock if the task setting
525- * MNT_WRITE_HOLD has a lower priority and is bound to
525+ * WRITE_HOLD has a lower priority and is bound to
526526 * the same CPU as the task that is spinning here.
527527 */
528528 preempt_enable ();
@@ -533,7 +533,7 @@ int mnt_get_write_access(struct vfsmount *m)
533533 }
534534 /*
535535 * The barrier pairs with the barrier sb_start_ro_state_change() making
536- * sure that if we see MNT_WRITE_HOLD cleared, we will also see
536+ * sure that if we see WRITE_HOLD cleared, we will also see
537537 * s_readonly_remount set (or even SB_RDONLY / MNT_READONLY flags) in
538538 * mnt_is_readonly() and bail in case we are racing with remount
539539 * read-only.
@@ -672,15 +672,15 @@ EXPORT_SYMBOL(mnt_drop_write_file);
672672 * @mnt.
673673 *
674674 * Context: This function expects lock_mount_hash() to be held serializing
675- * setting MNT_WRITE_HOLD .
675+ * setting WRITE_HOLD .
676676 * Return: On success 0 is returned.
677677 * On error, -EBUSY is returned.
678678 */
679679static inline int mnt_hold_writers (struct mount * mnt )
680680{
681- mnt -> mnt . mnt_flags |= MNT_WRITE_HOLD ;
681+ set_write_hold ( mnt ) ;
682682 /*
683- * After storing MNT_WRITE_HOLD , we'll read the counters. This store
683+ * After storing WRITE_HOLD , we'll read the counters. This store
684684 * should be visible before we do.
685685 */
686686 smp_mb ();
@@ -696,9 +696,9 @@ static inline int mnt_hold_writers(struct mount *mnt)
696696 * sum up each counter, if we read a counter before it is incremented,
697697 * but then read another CPU's count which it has been subsequently
698698 * decremented from -- we would see more decrements than we should.
699- * MNT_WRITE_HOLD protects against this scenario, because
699+ * WRITE_HOLD protects against this scenario, because
700700 * mnt_want_write first increments count, then smp_mb, then spins on
701- * MNT_WRITE_HOLD , so it can't be decremented by another CPU while
701+ * WRITE_HOLD , so it can't be decremented by another CPU while
702702 * we're counting up here.
703703 */
704704 if (mnt_get_writers (mnt ) > 0 )
@@ -720,14 +720,14 @@ static inline int mnt_hold_writers(struct mount *mnt)
720720 */
721721static inline void mnt_unhold_writers (struct mount * mnt )
722722{
723- if (!(mnt -> mnt_flags & MNT_WRITE_HOLD ))
723+ if (!test_write_hold (mnt ))
724724 return ;
725725 /*
726- * MNT_READONLY must become visible before ~MNT_WRITE_HOLD , so writers
726+ * MNT_READONLY must become visible before ~WRITE_HOLD , so writers
727727 * that become unheld will see MNT_READONLY.
728728 */
729729 smp_wmb ();
730- mnt -> mnt . mnt_flags &= ~ MNT_WRITE_HOLD ;
730+ clear_write_hold ( mnt ) ;
731731}
732732
733733static inline void mnt_del_instance (struct mount * m )
@@ -766,7 +766,7 @@ int sb_prepare_remount_readonly(struct super_block *sb)
766766{
767767 int err = 0 ;
768768
769- /* Racy optimization. Recheck the counter under MNT_WRITE_HOLD */
769+ /* Racy optimization. Recheck the counter under WRITE_HOLD */
770770 if (atomic_long_read (& sb -> s_remove_count ))
771771 return - EBUSY ;
772772
@@ -784,8 +784,8 @@ int sb_prepare_remount_readonly(struct super_block *sb)
784784 if (!err )
785785 sb_start_ro_state_change (sb );
786786 for (struct mount * m = sb -> s_mounts ; m ; m = m -> mnt_next_for_sb ) {
787- if (m -> mnt . mnt_flags & MNT_WRITE_HOLD )
788- m -> mnt . mnt_flags &= ~ MNT_WRITE_HOLD ;
787+ if (test_write_hold ( m ) )
788+ clear_write_hold ( m ) ;
789789 }
790790 unlock_mount_hash ();
791791
0 commit comments