@@ -144,6 +144,16 @@ struct wait_exceptional_entry_queue {
144
144
struct exceptional_entry_key key ;
145
145
};
146
146
147
+ /**
148
+ * enum dax_wake_mode: waitqueue wakeup behaviour
149
+ * @WAKE_ALL: wake all waiters in the waitqueue
150
+ * @WAKE_NEXT: wake only the first waiter in the waitqueue
151
+ */
152
+ enum dax_wake_mode {
153
+ WAKE_ALL ,
154
+ WAKE_NEXT ,
155
+ };
156
+
147
157
static wait_queue_head_t * dax_entry_waitqueue (struct xa_state * xas ,
148
158
void * entry , struct exceptional_entry_key * key )
149
159
{
@@ -182,7 +192,8 @@ static int wake_exceptional_entry_func(wait_queue_entry_t *wait,
182
192
* The important information it's conveying is whether the entry at
183
193
* this index used to be a PMD entry.
184
194
*/
185
- static void dax_wake_entry (struct xa_state * xas , void * entry , bool wake_all )
195
+ static void dax_wake_entry (struct xa_state * xas , void * entry ,
196
+ enum dax_wake_mode mode )
186
197
{
187
198
struct exceptional_entry_key key ;
188
199
wait_queue_head_t * wq ;
@@ -196,7 +207,7 @@ static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
196
207
* must be in the waitqueue and the following check will see them.
197
208
*/
198
209
if (waitqueue_active (wq ))
199
- __wake_up (wq , TASK_NORMAL , wake_all ? 0 : 1 , & key );
210
+ __wake_up (wq , TASK_NORMAL , mode == WAKE_ALL ? 0 : 1 , & key );
200
211
}
201
212
202
213
/*
@@ -264,11 +275,11 @@ static void wait_entry_unlocked(struct xa_state *xas, void *entry)
264
275
finish_wait (wq , & ewait .wait );
265
276
}
266
277
267
- static void put_unlocked_entry (struct xa_state * xas , void * entry )
278
+ static void put_unlocked_entry (struct xa_state * xas , void * entry ,
279
+ enum dax_wake_mode mode )
268
280
{
269
- /* If we were the only waiter woken, wake the next one */
270
281
if (entry && !dax_is_conflict (entry ))
271
- dax_wake_entry (xas , entry , false );
282
+ dax_wake_entry (xas , entry , mode );
272
283
}
273
284
274
285
/*
@@ -286,7 +297,7 @@ static void dax_unlock_entry(struct xa_state *xas, void *entry)
286
297
old = xas_store (xas , entry );
287
298
xas_unlock_irq (xas );
288
299
BUG_ON (!dax_is_locked (old ));
289
- dax_wake_entry (xas , entry , false );
300
+ dax_wake_entry (xas , entry , WAKE_NEXT );
290
301
}
291
302
292
303
/*
@@ -524,7 +535,7 @@ static void *grab_mapping_entry(struct xa_state *xas,
524
535
525
536
dax_disassociate_entry (entry , mapping , false);
526
537
xas_store (xas , NULL ); /* undo the PMD join */
527
- dax_wake_entry (xas , entry , true );
538
+ dax_wake_entry (xas , entry , WAKE_ALL );
528
539
mapping -> nrpages -= PG_PMD_NR ;
529
540
entry = NULL ;
530
541
xas_set (xas , index );
@@ -622,7 +633,7 @@ struct page *dax_layout_busy_page_range(struct address_space *mapping,
622
633
entry = get_unlocked_entry (& xas , 0 );
623
634
if (entry )
624
635
page = dax_busy_page (entry );
625
- put_unlocked_entry (& xas , entry );
636
+ put_unlocked_entry (& xas , entry , WAKE_NEXT );
626
637
if (page )
627
638
break ;
628
639
if (++ scanned % XA_CHECK_SCHED )
@@ -664,7 +675,7 @@ static int __dax_invalidate_entry(struct address_space *mapping,
664
675
mapping -> nrpages -= 1UL << dax_entry_order (entry );
665
676
ret = 1 ;
666
677
out :
667
- put_unlocked_entry (& xas , entry );
678
+ put_unlocked_entry (& xas , entry , WAKE_ALL );
668
679
xas_unlock_irq (& xas );
669
680
return ret ;
670
681
}
@@ -937,13 +948,13 @@ static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
937
948
xas_lock_irq (xas );
938
949
xas_store (xas , entry );
939
950
xas_clear_mark (xas , PAGECACHE_TAG_DIRTY );
940
- dax_wake_entry (xas , entry , false );
951
+ dax_wake_entry (xas , entry , WAKE_NEXT );
941
952
942
953
trace_dax_writeback_one (mapping -> host , index , count );
943
954
return ret ;
944
955
945
956
put_unlocked :
946
- put_unlocked_entry (xas , entry );
957
+ put_unlocked_entry (xas , entry , WAKE_NEXT );
947
958
return ret ;
948
959
}
949
960
@@ -1684,7 +1695,7 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
1684
1695
/* Did we race with someone splitting entry or so? */
1685
1696
if (!entry || dax_is_conflict (entry ) ||
1686
1697
(order == 0 && !dax_is_pte_entry (entry ))) {
1687
- put_unlocked_entry (& xas , entry );
1698
+ put_unlocked_entry (& xas , entry , WAKE_NEXT );
1688
1699
xas_unlock_irq (& xas );
1689
1700
trace_dax_insert_pfn_mkwrite_no_entry (mapping -> host , vmf ,
1690
1701
VM_FAULT_NOPAGE );
0 commit comments