|
23 | 23 | */
|
24 | 24 | static DEFINE_MUTEX(em_pd_mutex);
|
25 | 25 |
|
| 26 | +static void em_cpufreq_update_efficiencies(struct device *dev, |
| 27 | + struct em_perf_state *table); |
| 28 | + |
26 | 29 | static bool _is_cpu_device(struct device *dev)
|
27 | 30 | {
|
28 | 31 | return (dev->bus == &cpu_subsys);
|
@@ -103,6 +106,31 @@ static void em_debug_create_pd(struct device *dev) {}
|
103 | 106 | static void em_debug_remove_pd(struct device *dev) {}
|
104 | 107 | #endif
|
105 | 108 |
|
| 109 | +static void em_destroy_table_rcu(struct rcu_head *rp) |
| 110 | +{ |
| 111 | + struct em_perf_table __rcu *table; |
| 112 | + |
| 113 | + table = container_of(rp, struct em_perf_table, rcu); |
| 114 | + kfree(table); |
| 115 | +} |
| 116 | + |
| 117 | +static void em_free_table(struct em_perf_table __rcu *table) |
| 118 | +{ |
| 119 | + call_rcu(&table->rcu, em_destroy_table_rcu); |
| 120 | +} |
| 121 | + |
| 122 | +static struct em_perf_table __rcu * |
| 123 | +em_allocate_table(struct em_perf_domain *pd) |
| 124 | +{ |
| 125 | + struct em_perf_table __rcu *table; |
| 126 | + int table_size; |
| 127 | + |
| 128 | + table_size = sizeof(struct em_perf_state) * pd->nr_perf_states; |
| 129 | + |
| 130 | + table = kzalloc(sizeof(*table) + table_size, GFP_KERNEL); |
| 131 | + return table; |
| 132 | +} |
| 133 | + |
106 | 134 | static int em_compute_costs(struct device *dev, struct em_perf_state *table,
|
107 | 135 | struct em_data_callback *cb, int nr_states,
|
108 | 136 | unsigned long flags)
|
@@ -153,6 +181,24 @@ static int em_allocate_perf_table(struct em_perf_domain *pd,
|
153 | 181 | return 0;
|
154 | 182 | }
|
155 | 183 |
|
| 184 | +static int em_create_runtime_table(struct em_perf_domain *pd) |
| 185 | +{ |
| 186 | + struct em_perf_table __rcu *table; |
| 187 | + int table_size; |
| 188 | + |
| 189 | + table = em_allocate_table(pd); |
| 190 | + if (!table) |
| 191 | + return -ENOMEM; |
| 192 | + |
| 193 | + /* Initialize runtime table with existing data */ |
| 194 | + table_size = sizeof(struct em_perf_state) * pd->nr_perf_states; |
| 195 | + memcpy(table->state, pd->table, table_size); |
| 196 | + |
| 197 | + rcu_assign_pointer(pd->em_table, table); |
| 198 | + |
| 199 | + return 0; |
| 200 | +} |
| 201 | + |
156 | 202 | static int em_create_perf_table(struct device *dev, struct em_perf_domain *pd,
|
157 | 203 | struct em_perf_state *table,
|
158 | 204 | struct em_data_callback *cb,
|
@@ -245,6 +291,10 @@ static int em_create_pd(struct device *dev, int nr_states,
|
245 | 291 | if (ret)
|
246 | 292 | goto free_pd_table;
|
247 | 293 |
|
| 294 | + ret = em_create_runtime_table(pd); |
| 295 | + if (ret) |
| 296 | + goto free_pd_table; |
| 297 | + |
248 | 298 | if (_is_cpu_device(dev))
|
249 | 299 | for_each_cpu(cpu, cpus) {
|
250 | 300 | cpu_dev = get_cpu_device(cpu);
|
@@ -461,6 +511,9 @@ void em_dev_unregister_perf_domain(struct device *dev)
|
461 | 511 | em_debug_remove_pd(dev);
|
462 | 512 |
|
463 | 513 | kfree(dev->em_pd->table);
|
| 514 | + |
| 515 | + em_free_table(dev->em_pd->em_table); |
| 516 | + |
464 | 517 | kfree(dev->em_pd);
|
465 | 518 | dev->em_pd = NULL;
|
466 | 519 | mutex_unlock(&em_pd_mutex);
|
|
0 commit comments