Skip to content

Commit de58ed5

Browse files
Gavin Shanctmarinas
authored andcommitted
arm64: Introduce get_cpu_ops() helper function
This introduces get_cpu_ops() to return the CPU operations according to the given CPU index. For now, it simply returns the @cpu_ops[cpu] as before. Also, helper function __cpu_try_die() is introduced to be shared by cpu_die() and ipi_cpu_crash_stop(). So it shouldn't introduce any functional changes. Signed-off-by: Gavin Shan <[email protected]> Signed-off-by: Catalin Marinas <[email protected]> Acked-by: Mark Rutland <[email protected]>
1 parent 6885fb1 commit de58ed5

File tree

5 files changed

+62
-32
lines changed

5 files changed

+62
-32
lines changed

arch/arm64/include/asm/cpu_ops.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,8 +55,8 @@ struct cpu_operations {
5555
#endif
5656
};
5757

58-
extern const struct cpu_operations *cpu_ops[NR_CPUS];
5958
int __init init_cpu_ops(int cpu);
59+
extern const struct cpu_operations *get_cpu_ops(int cpu);
6060

6161
static inline void __init init_bootcpu_ops(void)
6262
{

arch/arm64/kernel/cpu_ops.c

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ extern const struct cpu_operations acpi_parking_protocol_ops;
2020
#endif
2121
extern const struct cpu_operations cpu_psci_ops;
2222

23-
const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init;
23+
static const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init;
2424

2525
static const struct cpu_operations *const dt_supported_cpu_ops[] __initconst = {
2626
&smp_spin_table_ops,
@@ -111,3 +111,8 @@ int __init init_cpu_ops(int cpu)
111111

112112
return 0;
113113
}
114+
115+
const struct cpu_operations *get_cpu_ops(int cpu)
116+
{
117+
return cpu_ops[cpu];
118+
}

arch/arm64/kernel/cpuidle.c

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -18,11 +18,11 @@
1818

1919
int arm_cpuidle_init(unsigned int cpu)
2020
{
21+
const struct cpu_operations *ops = get_cpu_ops(cpu);
2122
int ret = -EOPNOTSUPP;
2223

23-
if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_suspend &&
24-
cpu_ops[cpu]->cpu_init_idle)
25-
ret = cpu_ops[cpu]->cpu_init_idle(cpu);
24+
if (ops && ops->cpu_suspend && ops->cpu_init_idle)
25+
ret = ops->cpu_init_idle(cpu);
2626

2727
return ret;
2828
}
@@ -37,8 +37,9 @@ int arm_cpuidle_init(unsigned int cpu)
3737
int arm_cpuidle_suspend(int index)
3838
{
3939
int cpu = smp_processor_id();
40+
const struct cpu_operations *ops = get_cpu_ops(cpu);
4041

41-
return cpu_ops[cpu]->cpu_suspend(index);
42+
return ops->cpu_suspend(index);
4243
}
4344

4445
#ifdef CONFIG_ACPI

arch/arm64/kernel/setup.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -371,8 +371,10 @@ void __init setup_arch(char **cmdline_p)
371371
static inline bool cpu_can_disable(unsigned int cpu)
372372
{
373373
#ifdef CONFIG_HOTPLUG_CPU
374-
if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_can_disable)
375-
return cpu_ops[cpu]->cpu_can_disable(cpu);
374+
const struct cpu_operations *ops = get_cpu_ops(cpu);
375+
376+
if (ops && ops->cpu_can_disable)
377+
return ops->cpu_can_disable(cpu);
376378
#endif
377379
return false;
378380
}

arch/arm64/kernel/smp.c

Lines changed: 46 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -93,8 +93,10 @@ static inline int op_cpu_kill(unsigned int cpu)
9393
*/
9494
static int boot_secondary(unsigned int cpu, struct task_struct *idle)
9595
{
96-
if (cpu_ops[cpu]->cpu_boot)
97-
return cpu_ops[cpu]->cpu_boot(cpu);
96+
const struct cpu_operations *ops = get_cpu_ops(cpu);
97+
98+
if (ops->cpu_boot)
99+
return ops->cpu_boot(cpu);
98100

99101
return -EOPNOTSUPP;
100102
}
@@ -196,6 +198,7 @@ asmlinkage notrace void secondary_start_kernel(void)
196198
{
197199
u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
198200
struct mm_struct *mm = &init_mm;
201+
const struct cpu_operations *ops;
199202
unsigned int cpu;
200203

201204
cpu = task_cpu(current);
@@ -227,8 +230,9 @@ asmlinkage notrace void secondary_start_kernel(void)
227230
*/
228231
check_local_cpu_capabilities();
229232

230-
if (cpu_ops[cpu]->cpu_postboot)
231-
cpu_ops[cpu]->cpu_postboot();
233+
ops = get_cpu_ops(cpu);
234+
if (ops->cpu_postboot)
235+
ops->cpu_postboot();
232236

233237
/*
234238
* Log the CPU info before it is marked online and might get read.
@@ -266,19 +270,21 @@ asmlinkage notrace void secondary_start_kernel(void)
266270
#ifdef CONFIG_HOTPLUG_CPU
267271
static int op_cpu_disable(unsigned int cpu)
268272
{
273+
const struct cpu_operations *ops = get_cpu_ops(cpu);
274+
269275
/*
270276
* If we don't have a cpu_die method, abort before we reach the point
271277
* of no return. CPU0 may not have an cpu_ops, so test for it.
272278
*/
273-
if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die)
279+
if (!ops || !ops->cpu_die)
274280
return -EOPNOTSUPP;
275281

276282
/*
277283
* We may need to abort a hot unplug for some other mechanism-specific
278284
* reason.
279285
*/
280-
if (cpu_ops[cpu]->cpu_disable)
281-
return cpu_ops[cpu]->cpu_disable(cpu);
286+
if (ops->cpu_disable)
287+
return ops->cpu_disable(cpu);
282288

283289
return 0;
284290
}
@@ -314,15 +320,17 @@ int __cpu_disable(void)
314320

315321
static int op_cpu_kill(unsigned int cpu)
316322
{
323+
const struct cpu_operations *ops = get_cpu_ops(cpu);
324+
317325
/*
318326
* If we have no means of synchronising with the dying CPU, then assume
319327
* that it is really dead. We can only wait for an arbitrary length of
320328
* time and hope that it's dead, so let's skip the wait and just hope.
321329
*/
322-
if (!cpu_ops[cpu]->cpu_kill)
330+
if (!ops->cpu_kill)
323331
return 0;
324332

325-
return cpu_ops[cpu]->cpu_kill(cpu);
333+
return ops->cpu_kill(cpu);
326334
}
327335

328336
/*
@@ -357,6 +365,7 @@ void __cpu_die(unsigned int cpu)
357365
void cpu_die(void)
358366
{
359367
unsigned int cpu = smp_processor_id();
368+
const struct cpu_operations *ops = get_cpu_ops(cpu);
360369

361370
idle_task_exit();
362371

@@ -370,12 +379,22 @@ void cpu_die(void)
370379
* mechanism must perform all required cache maintenance to ensure that
371380
* no dirty lines are lost in the process of shutting down the CPU.
372381
*/
373-
cpu_ops[cpu]->cpu_die(cpu);
382+
ops->cpu_die(cpu);
374383

375384
BUG();
376385
}
377386
#endif
378387

388+
static void __cpu_try_die(int cpu)
389+
{
390+
#ifdef CONFIG_HOTPLUG_CPU
391+
const struct cpu_operations *ops = get_cpu_ops(cpu);
392+
393+
if (ops && ops->cpu_die)
394+
ops->cpu_die(cpu);
395+
#endif
396+
}
397+
379398
/*
380399
* Kill the calling secondary CPU, early in bringup before it is turned
381400
* online.
@@ -389,12 +408,11 @@ void cpu_die_early(void)
389408
/* Mark this CPU absent */
390409
set_cpu_present(cpu, 0);
391410

392-
#ifdef CONFIG_HOTPLUG_CPU
393-
update_cpu_boot_status(CPU_KILL_ME);
394-
/* Check if we can park ourselves */
395-
if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_die)
396-
cpu_ops[cpu]->cpu_die(cpu);
397-
#endif
411+
if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
412+
update_cpu_boot_status(CPU_KILL_ME);
413+
__cpu_try_die(cpu);
414+
}
415+
398416
update_cpu_boot_status(CPU_STUCK_IN_KERNEL);
399417

400418
cpu_park_loop();
@@ -488,10 +506,13 @@ static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid)
488506
*/
489507
static int __init smp_cpu_setup(int cpu)
490508
{
509+
const struct cpu_operations *ops;
510+
491511
if (init_cpu_ops(cpu))
492512
return -ENODEV;
493513

494-
if (cpu_ops[cpu]->cpu_init(cpu))
514+
ops = get_cpu_ops(cpu);
515+
if (ops->cpu_init(cpu))
495516
return -ENODEV;
496517

497518
set_cpu_possible(cpu, true);
@@ -714,6 +735,7 @@ void __init smp_init_cpus(void)
714735

715736
void __init smp_prepare_cpus(unsigned int max_cpus)
716737
{
738+
const struct cpu_operations *ops;
717739
int err;
718740
unsigned int cpu;
719741
unsigned int this_cpu;
@@ -744,10 +766,11 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
744766
if (cpu == smp_processor_id())
745767
continue;
746768

747-
if (!cpu_ops[cpu])
769+
ops = get_cpu_ops(cpu);
770+
if (!ops)
748771
continue;
749772

750-
err = cpu_ops[cpu]->cpu_prepare(cpu);
773+
err = ops->cpu_prepare(cpu);
751774
if (err)
752775
continue;
753776

@@ -863,10 +886,8 @@ static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
863886
local_irq_disable();
864887
sdei_mask_local_cpu();
865888

866-
#ifdef CONFIG_HOTPLUG_CPU
867-
if (cpu_ops[cpu]->cpu_die)
868-
cpu_ops[cpu]->cpu_die(cpu);
869-
#endif
889+
if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
890+
__cpu_try_die(cpu);
870891

871892
/* just in case */
872893
cpu_park_loop();
@@ -1044,8 +1065,9 @@ static bool have_cpu_die(void)
10441065
{
10451066
#ifdef CONFIG_HOTPLUG_CPU
10461067
int any_cpu = raw_smp_processor_id();
1068+
const struct cpu_operations *ops = get_cpu_ops(any_cpu);
10471069

1048-
if (cpu_ops[any_cpu] && cpu_ops[any_cpu]->cpu_die)
1070+
if (ops && ops->cpu_die)
10491071
return true;
10501072
#endif
10511073
return false;

0 commit comments

Comments
 (0)