@@ -3653,16 +3653,6 @@ static void kfree_rcu_monitor(struct work_struct *work)
3653
3653
schedule_delayed_monitor_work (krcp );
3654
3654
}
3655
3655
3656
- static enum hrtimer_restart
3657
- schedule_page_work_fn (struct hrtimer * t )
3658
- {
3659
- struct kfree_rcu_cpu * krcp =
3660
- container_of (t , struct kfree_rcu_cpu , hrtimer );
3661
-
3662
- queue_delayed_work (system_highpri_wq , & krcp -> page_cache_work , 0 );
3663
- return HRTIMER_NORESTART ;
3664
- }
3665
-
3666
3656
static void fill_page_cache_func (struct work_struct * work )
3667
3657
{
3668
3658
struct kvfree_rcu_bulk_data * bnode ;
@@ -3698,27 +3688,6 @@ static void fill_page_cache_func(struct work_struct *work)
3698
3688
atomic_set (& krcp -> backoff_page_cache_fill , 0 );
3699
3689
}
3700
3690
3701
- static void
3702
- run_page_cache_worker (struct kfree_rcu_cpu * krcp )
3703
- {
3704
- // If cache disabled, bail out.
3705
- if (!rcu_min_cached_objs )
3706
- return ;
3707
-
3708
- if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
3709
- !atomic_xchg (& krcp -> work_in_progress , 1 )) {
3710
- if (atomic_read (& krcp -> backoff_page_cache_fill )) {
3711
- queue_delayed_work (system_unbound_wq ,
3712
- & krcp -> page_cache_work ,
3713
- msecs_to_jiffies (rcu_delay_page_cache_fill_msec ));
3714
- } else {
3715
- hrtimer_init (& krcp -> hrtimer , CLOCK_MONOTONIC , HRTIMER_MODE_REL );
3716
- krcp -> hrtimer .function = schedule_page_work_fn ;
3717
- hrtimer_start (& krcp -> hrtimer , 0 , HRTIMER_MODE_REL );
3718
- }
3719
- }
3720
- }
3721
-
3722
3691
// Record ptr in a page managed by krcp, with the pre-krc_this_cpu_lock()
3723
3692
// state specified by flags. If can_alloc is true, the caller must
3724
3693
// be schedulable and not be holding any locks or mutexes that might be
@@ -3779,6 +3748,51 @@ add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
3779
3748
return true;
3780
3749
}
3781
3750
3751
+ #if !defined(CONFIG_TINY_RCU )
3752
+
3753
+ static enum hrtimer_restart
3754
+ schedule_page_work_fn (struct hrtimer * t )
3755
+ {
3756
+ struct kfree_rcu_cpu * krcp =
3757
+ container_of (t , struct kfree_rcu_cpu , hrtimer );
3758
+
3759
+ queue_delayed_work (system_highpri_wq , & krcp -> page_cache_work , 0 );
3760
+ return HRTIMER_NORESTART ;
3761
+ }
3762
+
3763
+ static void
3764
+ run_page_cache_worker (struct kfree_rcu_cpu * krcp )
3765
+ {
3766
+ // If cache disabled, bail out.
3767
+ if (!rcu_min_cached_objs )
3768
+ return ;
3769
+
3770
+ if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
3771
+ !atomic_xchg (& krcp -> work_in_progress , 1 )) {
3772
+ if (atomic_read (& krcp -> backoff_page_cache_fill )) {
3773
+ queue_delayed_work (system_unbound_wq ,
3774
+ & krcp -> page_cache_work ,
3775
+ msecs_to_jiffies (rcu_delay_page_cache_fill_msec ));
3776
+ } else {
3777
+ hrtimer_init (& krcp -> hrtimer , CLOCK_MONOTONIC , HRTIMER_MODE_REL );
3778
+ krcp -> hrtimer .function = schedule_page_work_fn ;
3779
+ hrtimer_start (& krcp -> hrtimer , 0 , HRTIMER_MODE_REL );
3780
+ }
3781
+ }
3782
+ }
3783
+
3784
+ void __init kfree_rcu_scheduler_running (void )
3785
+ {
3786
+ int cpu ;
3787
+
3788
+ for_each_possible_cpu (cpu ) {
3789
+ struct kfree_rcu_cpu * krcp = per_cpu_ptr (& krc , cpu );
3790
+
3791
+ if (need_offload_krc (krcp ))
3792
+ schedule_delayed_monitor_work (krcp );
3793
+ }
3794
+ }
3795
+
3782
3796
/*
3783
3797
* Queue a request for lazy invocation of the appropriate free routine
3784
3798
* after a grace period. Please note that three paths are maintained,
@@ -3944,6 +3958,8 @@ void kvfree_rcu_barrier(void)
3944
3958
}
3945
3959
EXPORT_SYMBOL_GPL (kvfree_rcu_barrier );
3946
3960
3961
+ #endif /* #if !defined(CONFIG_TINY_RCU) */
3962
+
3947
3963
static unsigned long
3948
3964
kfree_rcu_shrink_count (struct shrinker * shrink , struct shrink_control * sc )
3949
3965
{
@@ -3985,18 +4001,6 @@ kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
3985
4001
return freed == 0 ? SHRINK_STOP : freed ;
3986
4002
}
3987
4003
3988
- void __init kfree_rcu_scheduler_running (void )
3989
- {
3990
- int cpu ;
3991
-
3992
- for_each_possible_cpu (cpu ) {
3993
- struct kfree_rcu_cpu * krcp = per_cpu_ptr (& krc , cpu );
3994
-
3995
- if (need_offload_krc (krcp ))
3996
- schedule_delayed_monitor_work (krcp );
3997
- }
3998
- }
3999
-
4000
4004
/*
4001
4005
* During early boot, any blocking grace-period wait automatically
4002
4006
* implies a grace period.
0 commit comments