Skip to content

Commit 03a001b

Browse files
surenbaghdasaryanPeter Zijlstra
authored andcommitted
mm: introduce mmap_lock_speculate_{try_begin|retry}
Add helper functions to speculatively perform operations without read-locking mmap_lock, expecting that mmap_lock will not be write-locked and mm is not modified from under us. Suggested-by: Peter Zijlstra <[email protected]> Signed-off-by: Suren Baghdasaryan <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Liam R. Howlett <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent eb449bd commit 03a001b

File tree

1 file changed

+31
-2
lines changed

1 file changed

+31
-2
lines changed

include/linux/mmap_lock.h

Lines changed: 31 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,7 @@ static inline void mmap_assert_write_locked(const struct mm_struct *mm)
7171
}
7272

7373
#ifdef CONFIG_PER_VMA_LOCK
74+
7475
static inline void mm_lock_seqcount_init(struct mm_struct *mm)
7576
{
7677
seqcount_init(&mm->mm_lock_seq);
@@ -87,11 +88,39 @@ static inline void mm_lock_seqcount_end(struct mm_struct *mm)
8788
do_raw_write_seqcount_end(&mm->mm_lock_seq);
8889
}
8990

90-
#else
91+
static inline bool mmap_lock_speculate_try_begin(struct mm_struct *mm, unsigned int *seq)
92+
{
93+
/*
94+
* Since mmap_lock is a sleeping lock, and waiting for it to become
95+
* unlocked is more or less equivalent with taking it ourselves, don't
96+
* bother with the speculative path if mmap_lock is already write-locked
97+
* and take the slow path, which takes the lock.
98+
*/
99+
return raw_seqcount_try_begin(&mm->mm_lock_seq, *seq);
100+
}
101+
102+
static inline bool mmap_lock_speculate_retry(struct mm_struct *mm, unsigned int seq)
103+
{
104+
return read_seqcount_retry(&mm->mm_lock_seq, seq);
105+
}
106+
107+
#else /* CONFIG_PER_VMA_LOCK */
108+
91109
static inline void mm_lock_seqcount_init(struct mm_struct *mm) {}
92110
static inline void mm_lock_seqcount_begin(struct mm_struct *mm) {}
93111
static inline void mm_lock_seqcount_end(struct mm_struct *mm) {}
94-
#endif
112+
113+
static inline bool mmap_lock_speculate_try_begin(struct mm_struct *mm, unsigned int *seq)
114+
{
115+
return false;
116+
}
117+
118+
static inline bool mmap_lock_speculate_retry(struct mm_struct *mm, unsigned int seq)
119+
{
120+
return true;
121+
}
122+
123+
#endif /* CONFIG_PER_VMA_LOCK */
95124

96125
static inline void mmap_init_lock(struct mm_struct *mm)
97126
{

0 commit comments

Comments
 (0)