35
35
#include <linux/uio.h>
36
36
#include <linux/hugetlb.h>
37
37
#include <linux/page_idle.h>
38
+ #include <linux/local_lock.h>
38
39
39
40
#include "internal.h"
40
41
44
45
/* How many pages do we try to swap or page in/out together? */
45
46
int page_cluster ;
46
47
47
- static DEFINE_PER_CPU (struct pagevec , lru_add_pvec ) ;
48
- static DEFINE_PER_CPU (struct pagevec , lru_rotate_pvecs ) ;
49
- static DEFINE_PER_CPU (struct pagevec , lru_deactivate_file_pvecs ) ;
50
- static DEFINE_PER_CPU (struct pagevec , lru_deactivate_pvecs ) ;
51
- static DEFINE_PER_CPU (struct pagevec , lru_lazyfree_pvecs ) ;
48
+ /* Protecting only lru_rotate.pvec which requires disabling interrupts */
49
+ struct lru_rotate {
50
+ local_lock_t lock ;
51
+ struct pagevec pvec ;
52
+ };
53
+ static DEFINE_PER_CPU (struct lru_rotate , lru_rotate ) = {
54
+ .lock = INIT_LOCAL_LOCK (lock ),
55
+ };
56
+
57
+ /*
58
+ * The following struct pagevec are grouped together because they are protected
59
+ * by disabling preemption (and interrupts remain enabled).
60
+ */
61
+ struct lru_pvecs {
62
+ local_lock_t lock ;
63
+ struct pagevec lru_add ;
64
+ struct pagevec lru_deactivate_file ;
65
+ struct pagevec lru_deactivate ;
66
+ struct pagevec lru_lazyfree ;
52
67
#ifdef CONFIG_SMP
53
- static DEFINE_PER_CPU ( struct pagevec , activate_page_pvecs ) ;
68
+ struct pagevec activate_page ;
54
69
#endif
70
+ };
71
+ static DEFINE_PER_CPU (struct lru_pvecs , lru_pvecs ) = {
72
+ .lock = INIT_LOCAL_LOCK (lock ),
73
+ };
55
74
56
75
/*
57
76
* This path almost never happens for VM activity - pages are normally
@@ -254,11 +273,11 @@ void rotate_reclaimable_page(struct page *page)
254
273
unsigned long flags ;
255
274
256
275
get_page (page );
257
- local_irq_save ( flags );
258
- pvec = this_cpu_ptr (& lru_rotate_pvecs );
276
+ local_lock_irqsave ( & lru_rotate . lock , flags );
277
+ pvec = this_cpu_ptr (& lru_rotate . pvec );
259
278
if (!pagevec_add (pvec , page ) || PageCompound (page ))
260
279
pagevec_move_tail (pvec );
261
- local_irq_restore ( flags );
280
+ local_unlock_irqrestore ( & lru_rotate . lock , flags );
262
281
}
263
282
}
264
283
@@ -293,27 +312,29 @@ static void __activate_page(struct page *page, struct lruvec *lruvec,
293
312
#ifdef CONFIG_SMP
294
313
static void activate_page_drain (int cpu )
295
314
{
296
- struct pagevec * pvec = & per_cpu (activate_page_pvecs , cpu );
315
+ struct pagevec * pvec = & per_cpu (lru_pvecs . activate_page , cpu );
297
316
298
317
if (pagevec_count (pvec ))
299
318
pagevec_lru_move_fn (pvec , __activate_page , NULL );
300
319
}
301
320
302
321
static bool need_activate_page_drain (int cpu )
303
322
{
304
- return pagevec_count (& per_cpu (activate_page_pvecs , cpu )) != 0 ;
323
+ return pagevec_count (& per_cpu (lru_pvecs . activate_page , cpu )) != 0 ;
305
324
}
306
325
307
326
void activate_page (struct page * page )
308
327
{
309
328
page = compound_head (page );
310
329
if (PageLRU (page ) && !PageActive (page ) && !PageUnevictable (page )) {
311
- struct pagevec * pvec = & get_cpu_var ( activate_page_pvecs ) ;
330
+ struct pagevec * pvec ;
312
331
332
+ local_lock (& lru_pvecs .lock );
333
+ pvec = this_cpu_ptr (& lru_pvecs .activate_page );
313
334
get_page (page );
314
335
if (!pagevec_add (pvec , page ) || PageCompound (page ))
315
336
pagevec_lru_move_fn (pvec , __activate_page , NULL );
316
- put_cpu_var ( activate_page_pvecs );
337
+ local_unlock ( & lru_pvecs . lock );
317
338
}
318
339
}
319
340
@@ -335,9 +356,12 @@ void activate_page(struct page *page)
335
356
336
357
static void __lru_cache_activate_page (struct page * page )
337
358
{
338
- struct pagevec * pvec = & get_cpu_var ( lru_add_pvec ) ;
359
+ struct pagevec * pvec ;
339
360
int i ;
340
361
362
+ local_lock (& lru_pvecs .lock );
363
+ pvec = this_cpu_ptr (& lru_pvecs .lru_add );
364
+
341
365
/*
342
366
* Search backwards on the optimistic assumption that the page being
343
367
* activated has just been added to this pagevec. Note that only
@@ -357,7 +381,7 @@ static void __lru_cache_activate_page(struct page *page)
357
381
}
358
382
}
359
383
360
- put_cpu_var ( lru_add_pvec );
384
+ local_unlock ( & lru_pvecs . lock );
361
385
}
362
386
363
387
/*
@@ -385,7 +409,7 @@ void mark_page_accessed(struct page *page)
385
409
} else if (!PageActive (page )) {
386
410
/*
387
411
* If the page is on the LRU, queue it for activation via
388
- * activate_page_pvecs . Otherwise, assume the page is on a
412
+ * lru_pvecs.activate_page . Otherwise, assume the page is on a
389
413
* pagevec, mark it active and it'll be moved to the active
390
414
* LRU on the next drain.
391
415
*/
@@ -404,12 +428,14 @@ EXPORT_SYMBOL(mark_page_accessed);
404
428
405
429
static void __lru_cache_add (struct page * page )
406
430
{
407
- struct pagevec * pvec = & get_cpu_var ( lru_add_pvec ) ;
431
+ struct pagevec * pvec ;
408
432
433
+ local_lock (& lru_pvecs .lock );
434
+ pvec = this_cpu_ptr (& lru_pvecs .lru_add );
409
435
get_page (page );
410
436
if (!pagevec_add (pvec , page ) || PageCompound (page ))
411
437
__pagevec_lru_add (pvec );
412
- put_cpu_var ( lru_add_pvec );
438
+ local_unlock ( & lru_pvecs . lock );
413
439
}
414
440
415
441
/**
@@ -593,30 +619,30 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
593
619
*/
594
620
void lru_add_drain_cpu (int cpu )
595
621
{
596
- struct pagevec * pvec = & per_cpu (lru_add_pvec , cpu );
622
+ struct pagevec * pvec = & per_cpu (lru_pvecs . lru_add , cpu );
597
623
598
624
if (pagevec_count (pvec ))
599
625
__pagevec_lru_add (pvec );
600
626
601
- pvec = & per_cpu (lru_rotate_pvecs , cpu );
627
+ pvec = & per_cpu (lru_rotate . pvec , cpu );
602
628
if (pagevec_count (pvec )) {
603
629
unsigned long flags ;
604
630
605
631
/* No harm done if a racing interrupt already did this */
606
- local_irq_save ( flags );
632
+ local_lock_irqsave ( & lru_rotate . lock , flags );
607
633
pagevec_move_tail (pvec );
608
- local_irq_restore ( flags );
634
+ local_unlock_irqrestore ( & lru_rotate . lock , flags );
609
635
}
610
636
611
- pvec = & per_cpu (lru_deactivate_file_pvecs , cpu );
637
+ pvec = & per_cpu (lru_pvecs . lru_deactivate_file , cpu );
612
638
if (pagevec_count (pvec ))
613
639
pagevec_lru_move_fn (pvec , lru_deactivate_file_fn , NULL );
614
640
615
- pvec = & per_cpu (lru_deactivate_pvecs , cpu );
641
+ pvec = & per_cpu (lru_pvecs . lru_deactivate , cpu );
616
642
if (pagevec_count (pvec ))
617
643
pagevec_lru_move_fn (pvec , lru_deactivate_fn , NULL );
618
644
619
- pvec = & per_cpu (lru_lazyfree_pvecs , cpu );
645
+ pvec = & per_cpu (lru_pvecs . lru_lazyfree , cpu );
620
646
if (pagevec_count (pvec ))
621
647
pagevec_lru_move_fn (pvec , lru_lazyfree_fn , NULL );
622
648
@@ -641,11 +667,14 @@ void deactivate_file_page(struct page *page)
641
667
return ;
642
668
643
669
if (likely (get_page_unless_zero (page ))) {
644
- struct pagevec * pvec = & get_cpu_var (lru_deactivate_file_pvecs );
670
+ struct pagevec * pvec ;
671
+
672
+ local_lock (& lru_pvecs .lock );
673
+ pvec = this_cpu_ptr (& lru_pvecs .lru_deactivate_file );
645
674
646
675
if (!pagevec_add (pvec , page ) || PageCompound (page ))
647
676
pagevec_lru_move_fn (pvec , lru_deactivate_file_fn , NULL );
648
- put_cpu_var ( lru_deactivate_file_pvecs );
677
+ local_unlock ( & lru_pvecs . lock );
649
678
}
650
679
}
651
680
@@ -660,12 +689,14 @@ void deactivate_file_page(struct page *page)
660
689
void deactivate_page (struct page * page )
661
690
{
662
691
if (PageLRU (page ) && PageActive (page ) && !PageUnevictable (page )) {
663
- struct pagevec * pvec = & get_cpu_var ( lru_deactivate_pvecs ) ;
692
+ struct pagevec * pvec ;
664
693
694
+ local_lock (& lru_pvecs .lock );
695
+ pvec = this_cpu_ptr (& lru_pvecs .lru_deactivate );
665
696
get_page (page );
666
697
if (!pagevec_add (pvec , page ) || PageCompound (page ))
667
698
pagevec_lru_move_fn (pvec , lru_deactivate_fn , NULL );
668
- put_cpu_var ( lru_deactivate_pvecs );
699
+ local_unlock ( & lru_pvecs . lock );
669
700
}
670
701
}
671
702
@@ -680,19 +711,30 @@ void mark_page_lazyfree(struct page *page)
680
711
{
681
712
if (PageLRU (page ) && PageAnon (page ) && PageSwapBacked (page ) &&
682
713
!PageSwapCache (page ) && !PageUnevictable (page )) {
683
- struct pagevec * pvec = & get_cpu_var ( lru_lazyfree_pvecs ) ;
714
+ struct pagevec * pvec ;
684
715
716
+ local_lock (& lru_pvecs .lock );
717
+ pvec = this_cpu_ptr (& lru_pvecs .lru_lazyfree );
685
718
get_page (page );
686
719
if (!pagevec_add (pvec , page ) || PageCompound (page ))
687
720
pagevec_lru_move_fn (pvec , lru_lazyfree_fn , NULL );
688
- put_cpu_var ( lru_lazyfree_pvecs );
721
+ local_unlock ( & lru_pvecs . lock );
689
722
}
690
723
}
691
724
692
725
void lru_add_drain (void )
693
726
{
694
- lru_add_drain_cpu (get_cpu ());
695
- put_cpu ();
727
+ local_lock (& lru_pvecs .lock );
728
+ lru_add_drain_cpu (smp_processor_id ());
729
+ local_unlock (& lru_pvecs .lock );
730
+ }
731
+
732
+ void lru_add_drain_cpu_zone (struct zone * zone )
733
+ {
734
+ local_lock (& lru_pvecs .lock );
735
+ lru_add_drain_cpu (smp_processor_id ());
736
+ drain_local_pages (zone );
737
+ local_unlock (& lru_pvecs .lock );
696
738
}
697
739
698
740
#ifdef CONFIG_SMP
@@ -743,11 +785,11 @@ void lru_add_drain_all(void)
743
785
for_each_online_cpu (cpu ) {
744
786
struct work_struct * work = & per_cpu (lru_add_drain_work , cpu );
745
787
746
- if (pagevec_count (& per_cpu (lru_add_pvec , cpu )) ||
747
- pagevec_count (& per_cpu (lru_rotate_pvecs , cpu )) ||
748
- pagevec_count (& per_cpu (lru_deactivate_file_pvecs , cpu )) ||
749
- pagevec_count (& per_cpu (lru_deactivate_pvecs , cpu )) ||
750
- pagevec_count (& per_cpu (lru_lazyfree_pvecs , cpu )) ||
788
+ if (pagevec_count (& per_cpu (lru_pvecs . lru_add , cpu )) ||
789
+ pagevec_count (& per_cpu (lru_rotate . pvec , cpu )) ||
790
+ pagevec_count (& per_cpu (lru_pvecs . lru_deactivate_file , cpu )) ||
791
+ pagevec_count (& per_cpu (lru_pvecs . lru_deactivate , cpu )) ||
792
+ pagevec_count (& per_cpu (lru_pvecs . lru_lazyfree , cpu )) ||
751
793
need_activate_page_drain (cpu )) {
752
794
INIT_WORK (work , lru_add_drain_per_cpu );
753
795
queue_work_on (cpu , mm_percpu_wq , work );
0 commit comments