Skip to content

Commit 83150f5

Browse files
committed
arch_topology: Avoid use-after-free for scale_freq_data
Currently topology_scale_freq_tick() (which gets called from scheduler_tick()) may end up using a pointer to "struct scale_freq_data", which was previously cleared by topology_clear_scale_freq_source(), as there is no protection in place here. The users of topology_clear_scale_freq_source() though needs a guarantee that the previously cleared scale_freq_data isn't used anymore, so they can free the related resources. Since topology_scale_freq_tick() is called from scheduler tick, we don't want to add locking in there. Use the RCU update mechanism instead (which is already used by the scheduler's utilization update path) to guarantee race free updates here. synchronize_rcu() makes sure that all RCU critical sections that started before it is called, will finish before it returns. And so the callers of topology_clear_scale_freq_source() don't need to worry about their callback getting called anymore. Cc: Paul E. McKenney <[email protected]> Fixes: 01e055c ("arch_topology: Allow multiple entities to provide sched_freq_tick() callback") Tested-by: Vincent Guittot <[email protected]> Reviewed-by: Ionela Voinescu <[email protected]> Tested-by: Qian Cai <[email protected]> Signed-off-by: Viresh Kumar <[email protected]>
1 parent eead184 commit 83150f5

File tree

1 file changed

+21
-6
lines changed

1 file changed

+21
-6
lines changed

drivers/base/arch_topology.c

Lines changed: 21 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -18,10 +18,11 @@
1818
#include <linux/cpumask.h>
1919
#include <linux/init.h>
2020
#include <linux/percpu.h>
21+
#include <linux/rcupdate.h>
2122
#include <linux/sched.h>
2223
#include <linux/smp.h>
2324

24-
static DEFINE_PER_CPU(struct scale_freq_data *, sft_data);
25+
static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
2526
static struct cpumask scale_freq_counters_mask;
2627
static bool scale_freq_invariant;
2728

@@ -66,16 +67,20 @@ void topology_set_scale_freq_source(struct scale_freq_data *data,
6667
if (cpumask_empty(&scale_freq_counters_mask))
6768
scale_freq_invariant = topology_scale_freq_invariant();
6869

70+
rcu_read_lock();
71+
6972
for_each_cpu(cpu, cpus) {
70-
sfd = per_cpu(sft_data, cpu);
73+
sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
7174

7275
/* Use ARCH provided counters whenever possible */
7376
if (!sfd || sfd->source != SCALE_FREQ_SOURCE_ARCH) {
74-
per_cpu(sft_data, cpu) = data;
77+
rcu_assign_pointer(per_cpu(sft_data, cpu), data);
7578
cpumask_set_cpu(cpu, &scale_freq_counters_mask);
7679
}
7780
}
7881

82+
rcu_read_unlock();
83+
7984
update_scale_freq_invariant(true);
8085
}
8186
EXPORT_SYMBOL_GPL(topology_set_scale_freq_source);
@@ -86,22 +91,32 @@ void topology_clear_scale_freq_source(enum scale_freq_source source,
8691
struct scale_freq_data *sfd;
8792
int cpu;
8893

94+
rcu_read_lock();
95+
8996
for_each_cpu(cpu, cpus) {
90-
sfd = per_cpu(sft_data, cpu);
97+
sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
9198

9299
if (sfd && sfd->source == source) {
93-
per_cpu(sft_data, cpu) = NULL;
100+
rcu_assign_pointer(per_cpu(sft_data, cpu), NULL);
94101
cpumask_clear_cpu(cpu, &scale_freq_counters_mask);
95102
}
96103
}
97104

105+
rcu_read_unlock();
106+
107+
/*
108+
* Make sure all references to previous sft_data are dropped to avoid
109+
* use-after-free races.
110+
*/
111+
synchronize_rcu();
112+
98113
update_scale_freq_invariant(false);
99114
}
100115
EXPORT_SYMBOL_GPL(topology_clear_scale_freq_source);
101116

102117
void topology_scale_freq_tick(void)
103118
{
104-
struct scale_freq_data *sfd = *this_cpu_ptr(&sft_data);
119+
struct scale_freq_data *sfd = rcu_dereference_sched(*this_cpu_ptr(&sft_data));
105120

106121
if (sfd)
107122
sfd->set_freq_scale();

0 commit comments

Comments
 (0)