Skip to content

Commit d824ed7

Browse files
urezkitehcaster
authored andcommitted
rcu/kvfree: Move some functions under CONFIG_TINY_RCU
Currently when a tiny RCU is enabled, the tree.c file is not compiled, thus duplicating function names do not conflict with each other. Because of moving of kvfree_rcu() functionality to the SLAB, we have to reorder some functions and place them together under CONFIG_TINY_RCU macro definition. Therefore, those functions name will not conflict when a kernel is compiled for CONFIG_TINY_RCU flavor. Signed-off-by: Uladzislau Rezki (Sony) <[email protected]> Acked-by: Hyeonggon Yoo <[email protected]> Tested-by: Hyeonggon Yoo <[email protected]> Signed-off-by: Vlastimil Babka <[email protected]>
1 parent 0f52b4d commit d824ed7

File tree

1 file changed

+47
-43
lines changed

1 file changed

+47
-43
lines changed

kernel/rcu/tree.c

Lines changed: 47 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -3653,16 +3653,6 @@ static void kfree_rcu_monitor(struct work_struct *work)
36533653
schedule_delayed_monitor_work(krcp);
36543654
}
36553655

3656-
static enum hrtimer_restart
3657-
schedule_page_work_fn(struct hrtimer *t)
3658-
{
3659-
struct kfree_rcu_cpu *krcp =
3660-
container_of(t, struct kfree_rcu_cpu, hrtimer);
3661-
3662-
queue_delayed_work(system_highpri_wq, &krcp->page_cache_work, 0);
3663-
return HRTIMER_NORESTART;
3664-
}
3665-
36663656
static void fill_page_cache_func(struct work_struct *work)
36673657
{
36683658
struct kvfree_rcu_bulk_data *bnode;
@@ -3698,27 +3688,6 @@ static void fill_page_cache_func(struct work_struct *work)
36983688
atomic_set(&krcp->backoff_page_cache_fill, 0);
36993689
}
37003690

3701-
static void
3702-
run_page_cache_worker(struct kfree_rcu_cpu *krcp)
3703-
{
3704-
// If cache disabled, bail out.
3705-
if (!rcu_min_cached_objs)
3706-
return;
3707-
3708-
if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
3709-
!atomic_xchg(&krcp->work_in_progress, 1)) {
3710-
if (atomic_read(&krcp->backoff_page_cache_fill)) {
3711-
queue_delayed_work(system_unbound_wq,
3712-
&krcp->page_cache_work,
3713-
msecs_to_jiffies(rcu_delay_page_cache_fill_msec));
3714-
} else {
3715-
hrtimer_init(&krcp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3716-
krcp->hrtimer.function = schedule_page_work_fn;
3717-
hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL);
3718-
}
3719-
}
3720-
}
3721-
37223691
// Record ptr in a page managed by krcp, with the pre-krc_this_cpu_lock()
37233692
// state specified by flags. If can_alloc is true, the caller must
37243693
// be schedulable and not be holding any locks or mutexes that might be
@@ -3779,6 +3748,51 @@ add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
37793748
return true;
37803749
}
37813750

3751+
#if !defined(CONFIG_TINY_RCU)
3752+
3753+
static enum hrtimer_restart
3754+
schedule_page_work_fn(struct hrtimer *t)
3755+
{
3756+
struct kfree_rcu_cpu *krcp =
3757+
container_of(t, struct kfree_rcu_cpu, hrtimer);
3758+
3759+
queue_delayed_work(system_highpri_wq, &krcp->page_cache_work, 0);
3760+
return HRTIMER_NORESTART;
3761+
}
3762+
3763+
static void
3764+
run_page_cache_worker(struct kfree_rcu_cpu *krcp)
3765+
{
3766+
// If cache disabled, bail out.
3767+
if (!rcu_min_cached_objs)
3768+
return;
3769+
3770+
if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
3771+
!atomic_xchg(&krcp->work_in_progress, 1)) {
3772+
if (atomic_read(&krcp->backoff_page_cache_fill)) {
3773+
queue_delayed_work(system_unbound_wq,
3774+
&krcp->page_cache_work,
3775+
msecs_to_jiffies(rcu_delay_page_cache_fill_msec));
3776+
} else {
3777+
hrtimer_init(&krcp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3778+
krcp->hrtimer.function = schedule_page_work_fn;
3779+
hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL);
3780+
}
3781+
}
3782+
}
3783+
3784+
void __init kfree_rcu_scheduler_running(void)
3785+
{
3786+
int cpu;
3787+
3788+
for_each_possible_cpu(cpu) {
3789+
struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3790+
3791+
if (need_offload_krc(krcp))
3792+
schedule_delayed_monitor_work(krcp);
3793+
}
3794+
}
3795+
37823796
/*
37833797
* Queue a request for lazy invocation of the appropriate free routine
37843798
* after a grace period. Please note that three paths are maintained,
@@ -3944,6 +3958,8 @@ void kvfree_rcu_barrier(void)
39443958
}
39453959
EXPORT_SYMBOL_GPL(kvfree_rcu_barrier);
39463960

3961+
#endif /* #if !defined(CONFIG_TINY_RCU) */
3962+
39473963
static unsigned long
39483964
kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
39493965
{
@@ -3985,18 +4001,6 @@ kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
39854001
return freed == 0 ? SHRINK_STOP : freed;
39864002
}
39874003

3988-
void __init kfree_rcu_scheduler_running(void)
3989-
{
3990-
int cpu;
3991-
3992-
for_each_possible_cpu(cpu) {
3993-
struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3994-
3995-
if (need_offload_krc(krcp))
3996-
schedule_delayed_monitor_work(krcp);
3997-
}
3998-
}
3999-
40004004
/*
40014005
* During early boot, any blocking grace-period wait automatically
40024006
* implies a grace period.

0 commit comments

Comments
 (0)