Skip to content

Commit 0856c14

Browse files
paulburtontsbogend
authored andcommitted
MIPS: CPS: Boot CPUs in secondary clusters
Probe for & boot CPUs (cores & VPs) in secondary clusters (ie. not the cluster that began booting Linux) when they are present in systems with CM 3.5 or higher. Signed-off-by: Paul Burton <[email protected]> Signed-off-by: Chao-ying Fu <[email protected]> Signed-off-by: Dragan Mladjenovic <[email protected]> Signed-off-by: Aleksandar Rikalo <[email protected]> Tested-by: Serge Semin <[email protected]> Tested-by: Gregory CLEMENT <[email protected]> Signed-off-by: Thomas Bogendoerfer <[email protected]>
1 parent 75fa6a5 commit 0856c14

File tree

4 files changed

+207
-21
lines changed

4 files changed

+207
-21
lines changed

arch/mips/include/asm/mips-cm.h

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -255,6 +255,12 @@ GCR_ACCESSOR_RW(32, 0x130, l2_config)
255255
GCR_ACCESSOR_RO(32, 0x150, sys_config2)
256256
#define CM_GCR_SYS_CONFIG2_MAXVPW GENMASK(3, 0)
257257

258+
/* GCR_L2-RAM_CONFIG - Configuration & status of L2 cache RAMs */
259+
GCR_ACCESSOR_RW(64, 0x240, l2_ram_config)
260+
#define CM_GCR_L2_RAM_CONFIG_PRESENT BIT(31)
261+
#define CM_GCR_L2_RAM_CONFIG_HCI_DONE BIT(30)
262+
#define CM_GCR_L2_RAM_CONFIG_HCI_SUPPORTED BIT(29)
263+
258264
/* GCR_L2_PFT_CONTROL - Controls hardware L2 prefetching */
259265
GCR_ACCESSOR_RW(32, 0x300, l2_pft_control)
260266
#define CM_GCR_L2_PFT_CONTROL_PAGEMASK GENMASK(31, 12)
@@ -266,6 +272,18 @@ GCR_ACCESSOR_RW(32, 0x308, l2_pft_control_b)
266272
#define CM_GCR_L2_PFT_CONTROL_B_CEN BIT(8)
267273
#define CM_GCR_L2_PFT_CONTROL_B_PORTID GENMASK(7, 0)
268274

275+
/* GCR_L2_TAG_ADDR - Access addresses in L2 cache tags */
276+
GCR_ACCESSOR_RW(64, 0x600, l2_tag_addr)
277+
278+
/* GCR_L2_TAG_STATE - Access L2 cache tag state */
279+
GCR_ACCESSOR_RW(64, 0x608, l2_tag_state)
280+
281+
/* GCR_L2_DATA - Access data in L2 cache lines */
282+
GCR_ACCESSOR_RW(64, 0x610, l2_data)
283+
284+
/* GCR_L2_ECC - Access ECC information from L2 cache lines */
285+
GCR_ACCESSOR_RW(64, 0x618, l2_ecc)
286+
269287
/* GCR_L2SM_COP - L2 cache op state machine control */
270288
GCR_ACCESSOR_RW(32, 0x620, l2sm_cop)
271289
#define CM_GCR_L2SM_COP_PRESENT BIT(31)

arch/mips/include/asm/smp-cps.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ struct core_boot_config {
2323
};
2424

2525
struct cluster_boot_config {
26+
unsigned long *core_power;
2627
struct core_boot_config *core_config;
2728
};
2829

arch/mips/kernel/mips-cm.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -308,7 +308,9 @@ void mips_cm_lock_other(unsigned int cluster, unsigned int core,
308308
FIELD_PREP(CM3_GCR_Cx_OTHER_VP, vp);
309309

310310
if (cm_rev >= CM_REV_CM3_5) {
311-
val |= CM_GCR_Cx_OTHER_CLUSTER_EN;
311+
if (cluster != cpu_cluster(&current_cpu_data))
312+
val |= CM_GCR_Cx_OTHER_CLUSTER_EN;
313+
val |= CM_GCR_Cx_OTHER_GIC_EN;
312314
val |= FIELD_PREP(CM_GCR_Cx_OTHER_CLUSTER, cluster);
313315
val |= FIELD_PREP(CM_GCR_Cx_OTHER_BLOCK, block);
314316
} else {

arch/mips/kernel/smp-cps.c

Lines changed: 185 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -36,12 +36,56 @@ enum label_id {
3636

3737
UASM_L_LA(_not_nmi)
3838

39-
static DECLARE_BITMAP(core_power, NR_CPUS);
4039
static u64 core_entry_reg;
4140
static phys_addr_t cps_vec_pa;
4241

4342
struct cluster_boot_config *mips_cps_cluster_bootcfg;
4443

44+
static void power_up_other_cluster(unsigned int cluster)
45+
{
46+
u32 stat, seq_state;
47+
unsigned int timeout;
48+
49+
mips_cm_lock_other(cluster, CM_GCR_Cx_OTHER_CORE_CM, 0,
50+
CM_GCR_Cx_OTHER_BLOCK_LOCAL);
51+
stat = read_cpc_co_stat_conf();
52+
mips_cm_unlock_other();
53+
54+
seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE;
55+
seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
56+
if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U5)
57+
return;
58+
59+
/* Set endianness & power up the CM */
60+
mips_cm_lock_other(cluster, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
61+
write_cpc_redir_sys_config(IS_ENABLED(CONFIG_CPU_BIG_ENDIAN));
62+
write_cpc_redir_pwrup_ctl(1);
63+
mips_cm_unlock_other();
64+
65+
/* Wait for the CM to start up */
66+
timeout = 1000;
67+
mips_cm_lock_other(cluster, CM_GCR_Cx_OTHER_CORE_CM, 0,
68+
CM_GCR_Cx_OTHER_BLOCK_LOCAL);
69+
while (1) {
70+
stat = read_cpc_co_stat_conf();
71+
seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE;
72+
seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
73+
if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U5)
74+
break;
75+
76+
if (timeout) {
77+
mdelay(1);
78+
timeout--;
79+
} else {
80+
pr_warn("Waiting for cluster %u CM to power up... STAT_CONF=0x%x\n",
81+
cluster, stat);
82+
mdelay(1000);
83+
}
84+
}
85+
86+
mips_cm_unlock_other();
87+
}
88+
4589
static unsigned __init core_vpe_count(unsigned int cluster, unsigned core)
4690
{
4791
return min(smp_max_threads, mips_cps_numvps(cluster, core));
@@ -178,6 +222,9 @@ static void __init cps_smp_setup(void)
178222
pr_cont(",");
179223
pr_cont("{");
180224

225+
if (mips_cm_revision() >= CM_REV_CM3_5)
226+
power_up_other_cluster(cl);
227+
181228
ncores = mips_cps_numcores(cl);
182229
for (c = 0; c < ncores; c++) {
183230
core_vpes = core_vpe_count(cl, c);
@@ -205,18 +252,15 @@ static void __init cps_smp_setup(void)
205252

206253
/* Indicate present CPUs (CPU being synonymous with VPE) */
207254
for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) {
208-
set_cpu_possible(v, cpu_cluster(&cpu_data[v]) == 0);
209-
set_cpu_present(v, cpu_cluster(&cpu_data[v]) == 0);
255+
set_cpu_possible(v, true);
256+
set_cpu_present(v, true);
210257
__cpu_number_map[v] = v;
211258
__cpu_logical_map[v] = v;
212259
}
213260

214261
/* Set a coherent default CCA (CWB) */
215262
change_c0_config(CONF_CM_CMASK, 0x5);
216263

217-
/* Core 0 is powered up (we're running on it) */
218-
bitmap_set(core_power, 0, 1);
219-
220264
/* Initialise core 0 */
221265
mips_cps_core_init();
222266

@@ -298,6 +342,10 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
298342
goto err_out;
299343
mips_cps_cluster_bootcfg[cl].core_config = core_bootcfg;
300344

345+
mips_cps_cluster_bootcfg[cl].core_power =
346+
kcalloc(BITS_TO_LONGS(ncores), sizeof(unsigned long),
347+
GFP_KERNEL);
348+
301349
/* Allocate VPE boot configuration structs */
302350
for (c = 0; c < ncores; c++) {
303351
core_vpes = core_vpe_count(cl, c);
@@ -309,11 +357,12 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
309357
}
310358
}
311359

312-
/* Mark this CPU as booted */
360+
/* Mark this CPU as powered up & booted */
313361
cl = cpu_cluster(&current_cpu_data);
314362
c = cpu_core(&current_cpu_data);
315363
cluster_bootcfg = &mips_cps_cluster_bootcfg[cl];
316364
core_bootcfg = &cluster_bootcfg->core_config[c];
365+
bitmap_set(cluster_bootcfg->core_power, cpu_core(&current_cpu_data), 1);
317366
atomic_set(&core_bootcfg->vpe_mask, 1 << cpu_vpe_id(&current_cpu_data));
318367

319368
return;
@@ -341,13 +390,118 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
341390
}
342391
}
343392

344-
static void boot_core(unsigned int core, unsigned int vpe_id)
393+
static void init_cluster_l2(void)
345394
{
346-
u32 stat, seq_state;
347-
unsigned timeout;
395+
u32 l2_cfg, l2sm_cop, result;
396+
397+
while (1) {
398+
l2_cfg = read_gcr_redir_l2_ram_config();
399+
400+
/* If HCI is not supported, use the state machine below */
401+
if (!(l2_cfg & CM_GCR_L2_RAM_CONFIG_PRESENT))
402+
break;
403+
if (!(l2_cfg & CM_GCR_L2_RAM_CONFIG_HCI_SUPPORTED))
404+
break;
405+
406+
/* If the HCI_DONE bit is set, we're finished */
407+
if (l2_cfg & CM_GCR_L2_RAM_CONFIG_HCI_DONE)
408+
return;
409+
}
410+
411+
l2sm_cop = read_gcr_redir_l2sm_cop();
412+
if (WARN(!(l2sm_cop & CM_GCR_L2SM_COP_PRESENT),
413+
"L2 init not supported on this system yet"))
414+
return;
415+
416+
/* Clear L2 tag registers */
417+
write_gcr_redir_l2_tag_state(0);
418+
write_gcr_redir_l2_ecc(0);
419+
420+
/* Ensure the L2 tag writes complete before the state machine starts */
421+
mb();
422+
423+
/* Wait for the L2 state machine to be idle */
424+
do {
425+
l2sm_cop = read_gcr_redir_l2sm_cop();
426+
} while (l2sm_cop & CM_GCR_L2SM_COP_RUNNING);
427+
428+
/* Start a store tag operation */
429+
l2sm_cop = CM_GCR_L2SM_COP_TYPE_IDX_STORETAG;
430+
l2sm_cop <<= __ffs(CM_GCR_L2SM_COP_TYPE);
431+
l2sm_cop |= CM_GCR_L2SM_COP_CMD_START;
432+
write_gcr_redir_l2sm_cop(l2sm_cop);
433+
434+
/* Ensure the state machine starts before we poll for completion */
435+
mb();
436+
437+
/* Wait for the operation to be complete */
438+
do {
439+
l2sm_cop = read_gcr_redir_l2sm_cop();
440+
result = l2sm_cop & CM_GCR_L2SM_COP_RESULT;
441+
result >>= __ffs(CM_GCR_L2SM_COP_RESULT);
442+
} while (!result);
443+
444+
WARN(result != CM_GCR_L2SM_COP_RESULT_DONE_OK,
445+
"L2 state machine failed cache init with error %u\n", result);
446+
}
447+
448+
static void boot_core(unsigned int cluster, unsigned int core,
449+
unsigned int vpe_id)
450+
{
451+
struct cluster_boot_config *cluster_cfg;
452+
u32 access, stat, seq_state;
453+
unsigned int timeout, ncores;
454+
455+
cluster_cfg = &mips_cps_cluster_bootcfg[cluster];
456+
ncores = mips_cps_numcores(cluster);
457+
458+
if ((cluster != cpu_cluster(&current_cpu_data)) &&
459+
bitmap_empty(cluster_cfg->core_power, ncores)) {
460+
power_up_other_cluster(cluster);
461+
462+
mips_cm_lock_other(cluster, core, 0,
463+
CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
464+
465+
/* Ensure cluster GCRs are where we expect */
466+
write_gcr_redir_base(read_gcr_base());
467+
write_gcr_redir_cpc_base(read_gcr_cpc_base());
468+
write_gcr_redir_gic_base(read_gcr_gic_base());
469+
470+
init_cluster_l2();
471+
472+
/* Mirror L2 configuration */
473+
write_gcr_redir_l2_only_sync_base(read_gcr_l2_only_sync_base());
474+
write_gcr_redir_l2_pft_control(read_gcr_l2_pft_control());
475+
write_gcr_redir_l2_pft_control_b(read_gcr_l2_pft_control_b());
476+
477+
/* Mirror ECC/parity setup */
478+
write_gcr_redir_err_control(read_gcr_err_control());
479+
480+
/* Set BEV base */
481+
write_gcr_redir_bev_base(core_entry_reg);
482+
483+
mips_cm_unlock_other();
484+
}
485+
486+
if (cluster != cpu_cluster(&current_cpu_data)) {
487+
mips_cm_lock_other(cluster, core, 0,
488+
CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
489+
490+
/* Ensure the core can access the GCRs */
491+
access = read_gcr_redir_access();
492+
access |= BIT(core);
493+
write_gcr_redir_access(access);
494+
495+
mips_cm_unlock_other();
496+
} else {
497+
/* Ensure the core can access the GCRs */
498+
access = read_gcr_access();
499+
access |= BIT(core);
500+
write_gcr_access(access);
501+
}
348502

349503
/* Select the appropriate core */
350-
mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
504+
mips_cm_lock_other(cluster, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
351505

352506
/* Set its reset vector */
353507
if (mips_cm_is64)
@@ -416,7 +570,17 @@ static void boot_core(unsigned int core, unsigned int vpe_id)
416570
mips_cm_unlock_other();
417571

418572
/* The core is now powered up */
419-
bitmap_set(core_power, core, 1);
573+
bitmap_set(cluster_cfg->core_power, core, 1);
574+
575+
/*
576+
* Restore CM_PWRUP=0 so that the CM can power down if all the cores in
577+
* the cluster do (eg. if they're all removed via hotplug.
578+
*/
579+
if (mips_cm_revision() >= CM_REV_CM3_5) {
580+
mips_cm_lock_other(cluster, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
581+
write_cpc_redir_pwrup_ctl(0);
582+
mips_cm_unlock_other();
583+
}
420584
}
421585

422586
static void remote_vpe_boot(void *dummy)
@@ -442,10 +606,6 @@ static int cps_boot_secondary(int cpu, struct task_struct *idle)
442606
unsigned int remote;
443607
int err;
444608

445-
/* We don't yet support booting CPUs in other clusters */
446-
if (cpu_cluster(&cpu_data[cpu]) != cpu_cluster(&raw_current_cpu_data))
447-
return -ENOSYS;
448-
449609
vpe_cfg->pc = (unsigned long)&smp_bootstrap;
450610
vpe_cfg->sp = __KSTK_TOS(idle);
451611
vpe_cfg->gp = (unsigned long)task_thread_info(idle);
@@ -454,14 +614,15 @@ static int cps_boot_secondary(int cpu, struct task_struct *idle)
454614

455615
preempt_disable();
456616

457-
if (!test_bit(core, core_power)) {
617+
if (!test_bit(core, cluster_cfg->core_power)) {
458618
/* Boot a VPE on a powered down core */
459-
boot_core(core, vpe_id);
619+
boot_core(cluster, core, vpe_id);
460620
goto out;
461621
}
462622

463623
if (cpu_has_vp) {
464-
mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
624+
mips_cm_lock_other(cluster, core, vpe_id,
625+
CM_GCR_Cx_OTHER_BLOCK_LOCAL);
465626
if (mips_cm_is64)
466627
write_gcr_co_reset64_base(core_entry_reg);
467628
else
@@ -671,11 +832,15 @@ static void cps_cpu_die(unsigned int cpu) { }
671832

672833
static void cps_cleanup_dead_cpu(unsigned cpu)
673834
{
835+
unsigned int cluster = cpu_cluster(&cpu_data[cpu]);
674836
unsigned core = cpu_core(&cpu_data[cpu]);
675837
unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]);
676838
ktime_t fail_time;
677839
unsigned stat;
678840
int err;
841+
struct cluster_boot_config *cluster_cfg;
842+
843+
cluster_cfg = &mips_cps_cluster_bootcfg[cluster];
679844

680845
/*
681846
* Now wait for the CPU to actually offline. Without doing this that
@@ -727,7 +892,7 @@ static void cps_cleanup_dead_cpu(unsigned cpu)
727892
} while (1);
728893

729894
/* Indicate the core is powered off */
730-
bitmap_clear(core_power, core, 1);
895+
bitmap_clear(cluster_cfg->core_power, core, 1);
731896
} else if (cpu_has_mipsmt) {
732897
/*
733898
* Have a CPU with access to the offlined CPUs registers wait

0 commit comments

Comments
 (0)