@@ -71,6 +71,7 @@ static inline void mmap_assert_write_locked(const struct mm_struct *mm)
71
71
}
72
72
73
73
#ifdef CONFIG_PER_VMA_LOCK
74
+
74
75
static inline void mm_lock_seqcount_init (struct mm_struct * mm )
75
76
{
76
77
seqcount_init (& mm -> mm_lock_seq );
@@ -87,11 +88,39 @@ static inline void mm_lock_seqcount_end(struct mm_struct *mm)
87
88
do_raw_write_seqcount_end (& mm -> mm_lock_seq );
88
89
}
89
90
90
- #else
91
+ static inline bool mmap_lock_speculate_try_begin (struct mm_struct * mm , unsigned int * seq )
92
+ {
93
+ /*
94
+ * Since mmap_lock is a sleeping lock, and waiting for it to become
95
+ * unlocked is more or less equivalent with taking it ourselves, don't
96
+ * bother with the speculative path if mmap_lock is already write-locked
97
+ * and take the slow path, which takes the lock.
98
+ */
99
+ return raw_seqcount_try_begin (& mm -> mm_lock_seq , * seq );
100
+ }
101
+
102
+ static inline bool mmap_lock_speculate_retry (struct mm_struct * mm , unsigned int seq )
103
+ {
104
+ return read_seqcount_retry (& mm -> mm_lock_seq , seq );
105
+ }
106
+
107
+ #else /* CONFIG_PER_VMA_LOCK */
108
+
91
109
static inline void mm_lock_seqcount_init (struct mm_struct * mm ) {}
92
110
static inline void mm_lock_seqcount_begin (struct mm_struct * mm ) {}
93
111
static inline void mm_lock_seqcount_end (struct mm_struct * mm ) {}
94
- #endif
112
+
113
+ static inline bool mmap_lock_speculate_try_begin (struct mm_struct * mm , unsigned int * seq )
114
+ {
115
+ return false;
116
+ }
117
+
118
+ static inline bool mmap_lock_speculate_retry (struct mm_struct * mm , unsigned int seq )
119
+ {
120
+ return true;
121
+ }
122
+
123
+ #endif /* CONFIG_PER_VMA_LOCK */
95
124
96
125
static inline void mmap_init_lock (struct mm_struct * mm )
97
126
{
0 commit comments