@@ -36,12 +36,56 @@ enum label_id {
36
36
37
37
UASM_L_LA (_not_nmi )
38
38
39
- static DECLARE_BITMAP (core_power , NR_CPUS );
40
39
static u64 core_entry_reg ;
41
40
static phys_addr_t cps_vec_pa ;
42
41
43
42
struct cluster_boot_config * mips_cps_cluster_bootcfg ;
44
43
44
+ static void power_up_other_cluster (unsigned int cluster )
45
+ {
46
+ u32 stat , seq_state ;
47
+ unsigned int timeout ;
48
+
49
+ mips_cm_lock_other (cluster , CM_GCR_Cx_OTHER_CORE_CM , 0 ,
50
+ CM_GCR_Cx_OTHER_BLOCK_LOCAL );
51
+ stat = read_cpc_co_stat_conf ();
52
+ mips_cm_unlock_other ();
53
+
54
+ seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE ;
55
+ seq_state >>= __ffs (CPC_Cx_STAT_CONF_SEQSTATE );
56
+ if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U5 )
57
+ return ;
58
+
59
+ /* Set endianness & power up the CM */
60
+ mips_cm_lock_other (cluster , 0 , 0 , CM_GCR_Cx_OTHER_BLOCK_GLOBAL );
61
+ write_cpc_redir_sys_config (IS_ENABLED (CONFIG_CPU_BIG_ENDIAN ));
62
+ write_cpc_redir_pwrup_ctl (1 );
63
+ mips_cm_unlock_other ();
64
+
65
+ /* Wait for the CM to start up */
66
+ timeout = 1000 ;
67
+ mips_cm_lock_other (cluster , CM_GCR_Cx_OTHER_CORE_CM , 0 ,
68
+ CM_GCR_Cx_OTHER_BLOCK_LOCAL );
69
+ while (1 ) {
70
+ stat = read_cpc_co_stat_conf ();
71
+ seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE ;
72
+ seq_state >>= __ffs (CPC_Cx_STAT_CONF_SEQSTATE );
73
+ if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U5 )
74
+ break ;
75
+
76
+ if (timeout ) {
77
+ mdelay (1 );
78
+ timeout -- ;
79
+ } else {
80
+ pr_warn ("Waiting for cluster %u CM to power up... STAT_CONF=0x%x\n" ,
81
+ cluster , stat );
82
+ mdelay (1000 );
83
+ }
84
+ }
85
+
86
+ mips_cm_unlock_other ();
87
+ }
88
+
45
89
static unsigned __init core_vpe_count (unsigned int cluster , unsigned core )
46
90
{
47
91
return min (smp_max_threads , mips_cps_numvps (cluster , core ));
@@ -178,6 +222,9 @@ static void __init cps_smp_setup(void)
178
222
pr_cont ("," );
179
223
pr_cont ("{" );
180
224
225
+ if (mips_cm_revision () >= CM_REV_CM3_5 )
226
+ power_up_other_cluster (cl );
227
+
181
228
ncores = mips_cps_numcores (cl );
182
229
for (c = 0 ; c < ncores ; c ++ ) {
183
230
core_vpes = core_vpe_count (cl , c );
@@ -205,18 +252,15 @@ static void __init cps_smp_setup(void)
205
252
206
253
/* Indicate present CPUs (CPU being synonymous with VPE) */
207
254
for (v = 0 ; v < min_t (unsigned , nvpes , NR_CPUS ); v ++ ) {
208
- set_cpu_possible (v , cpu_cluster ( & cpu_data [ v ]) == 0 );
209
- set_cpu_present (v , cpu_cluster ( & cpu_data [ v ]) == 0 );
255
+ set_cpu_possible (v , true );
256
+ set_cpu_present (v , true );
210
257
__cpu_number_map [v ] = v ;
211
258
__cpu_logical_map [v ] = v ;
212
259
}
213
260
214
261
/* Set a coherent default CCA (CWB) */
215
262
change_c0_config (CONF_CM_CMASK , 0x5 );
216
263
217
- /* Core 0 is powered up (we're running on it) */
218
- bitmap_set (core_power , 0 , 1 );
219
-
220
264
/* Initialise core 0 */
221
265
mips_cps_core_init ();
222
266
@@ -298,6 +342,10 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
298
342
goto err_out ;
299
343
mips_cps_cluster_bootcfg [cl ].core_config = core_bootcfg ;
300
344
345
+ mips_cps_cluster_bootcfg [cl ].core_power =
346
+ kcalloc (BITS_TO_LONGS (ncores ), sizeof (unsigned long ),
347
+ GFP_KERNEL );
348
+
301
349
/* Allocate VPE boot configuration structs */
302
350
for (c = 0 ; c < ncores ; c ++ ) {
303
351
core_vpes = core_vpe_count (cl , c );
@@ -309,11 +357,12 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
309
357
}
310
358
}
311
359
312
- /* Mark this CPU as booted */
360
+ /* Mark this CPU as powered up & booted */
313
361
cl = cpu_cluster (& current_cpu_data );
314
362
c = cpu_core (& current_cpu_data );
315
363
cluster_bootcfg = & mips_cps_cluster_bootcfg [cl ];
316
364
core_bootcfg = & cluster_bootcfg -> core_config [c ];
365
+ bitmap_set (cluster_bootcfg -> core_power , cpu_core (& current_cpu_data ), 1 );
317
366
atomic_set (& core_bootcfg -> vpe_mask , 1 << cpu_vpe_id (& current_cpu_data ));
318
367
319
368
return ;
@@ -341,13 +390,118 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
341
390
}
342
391
}
343
392
344
- static void boot_core ( unsigned int core , unsigned int vpe_id )
393
+ static void init_cluster_l2 ( void )
345
394
{
346
- u32 stat , seq_state ;
347
- unsigned timeout ;
395
+ u32 l2_cfg , l2sm_cop , result ;
396
+
397
+ while (1 ) {
398
+ l2_cfg = read_gcr_redir_l2_ram_config ();
399
+
400
+ /* If HCI is not supported, use the state machine below */
401
+ if (!(l2_cfg & CM_GCR_L2_RAM_CONFIG_PRESENT ))
402
+ break ;
403
+ if (!(l2_cfg & CM_GCR_L2_RAM_CONFIG_HCI_SUPPORTED ))
404
+ break ;
405
+
406
+ /* If the HCI_DONE bit is set, we're finished */
407
+ if (l2_cfg & CM_GCR_L2_RAM_CONFIG_HCI_DONE )
408
+ return ;
409
+ }
410
+
411
+ l2sm_cop = read_gcr_redir_l2sm_cop ();
412
+ if (WARN (!(l2sm_cop & CM_GCR_L2SM_COP_PRESENT ),
413
+ "L2 init not supported on this system yet" ))
414
+ return ;
415
+
416
+ /* Clear L2 tag registers */
417
+ write_gcr_redir_l2_tag_state (0 );
418
+ write_gcr_redir_l2_ecc (0 );
419
+
420
+ /* Ensure the L2 tag writes complete before the state machine starts */
421
+ mb ();
422
+
423
+ /* Wait for the L2 state machine to be idle */
424
+ do {
425
+ l2sm_cop = read_gcr_redir_l2sm_cop ();
426
+ } while (l2sm_cop & CM_GCR_L2SM_COP_RUNNING );
427
+
428
+ /* Start a store tag operation */
429
+ l2sm_cop = CM_GCR_L2SM_COP_TYPE_IDX_STORETAG ;
430
+ l2sm_cop <<= __ffs (CM_GCR_L2SM_COP_TYPE );
431
+ l2sm_cop |= CM_GCR_L2SM_COP_CMD_START ;
432
+ write_gcr_redir_l2sm_cop (l2sm_cop );
433
+
434
+ /* Ensure the state machine starts before we poll for completion */
435
+ mb ();
436
+
437
+ /* Wait for the operation to be complete */
438
+ do {
439
+ l2sm_cop = read_gcr_redir_l2sm_cop ();
440
+ result = l2sm_cop & CM_GCR_L2SM_COP_RESULT ;
441
+ result >>= __ffs (CM_GCR_L2SM_COP_RESULT );
442
+ } while (!result );
443
+
444
+ WARN (result != CM_GCR_L2SM_COP_RESULT_DONE_OK ,
445
+ "L2 state machine failed cache init with error %u\n" , result );
446
+ }
447
+
448
+ static void boot_core (unsigned int cluster , unsigned int core ,
449
+ unsigned int vpe_id )
450
+ {
451
+ struct cluster_boot_config * cluster_cfg ;
452
+ u32 access , stat , seq_state ;
453
+ unsigned int timeout , ncores ;
454
+
455
+ cluster_cfg = & mips_cps_cluster_bootcfg [cluster ];
456
+ ncores = mips_cps_numcores (cluster );
457
+
458
+ if ((cluster != cpu_cluster (& current_cpu_data )) &&
459
+ bitmap_empty (cluster_cfg -> core_power , ncores )) {
460
+ power_up_other_cluster (cluster );
461
+
462
+ mips_cm_lock_other (cluster , core , 0 ,
463
+ CM_GCR_Cx_OTHER_BLOCK_GLOBAL );
464
+
465
+ /* Ensure cluster GCRs are where we expect */
466
+ write_gcr_redir_base (read_gcr_base ());
467
+ write_gcr_redir_cpc_base (read_gcr_cpc_base ());
468
+ write_gcr_redir_gic_base (read_gcr_gic_base ());
469
+
470
+ init_cluster_l2 ();
471
+
472
+ /* Mirror L2 configuration */
473
+ write_gcr_redir_l2_only_sync_base (read_gcr_l2_only_sync_base ());
474
+ write_gcr_redir_l2_pft_control (read_gcr_l2_pft_control ());
475
+ write_gcr_redir_l2_pft_control_b (read_gcr_l2_pft_control_b ());
476
+
477
+ /* Mirror ECC/parity setup */
478
+ write_gcr_redir_err_control (read_gcr_err_control ());
479
+
480
+ /* Set BEV base */
481
+ write_gcr_redir_bev_base (core_entry_reg );
482
+
483
+ mips_cm_unlock_other ();
484
+ }
485
+
486
+ if (cluster != cpu_cluster (& current_cpu_data )) {
487
+ mips_cm_lock_other (cluster , core , 0 ,
488
+ CM_GCR_Cx_OTHER_BLOCK_GLOBAL );
489
+
490
+ /* Ensure the core can access the GCRs */
491
+ access = read_gcr_redir_access ();
492
+ access |= BIT (core );
493
+ write_gcr_redir_access (access );
494
+
495
+ mips_cm_unlock_other ();
496
+ } else {
497
+ /* Ensure the core can access the GCRs */
498
+ access = read_gcr_access ();
499
+ access |= BIT (core );
500
+ write_gcr_access (access );
501
+ }
348
502
349
503
/* Select the appropriate core */
350
- mips_cm_lock_other (0 , core , 0 , CM_GCR_Cx_OTHER_BLOCK_LOCAL );
504
+ mips_cm_lock_other (cluster , core , 0 , CM_GCR_Cx_OTHER_BLOCK_LOCAL );
351
505
352
506
/* Set its reset vector */
353
507
if (mips_cm_is64 )
@@ -416,7 +570,17 @@ static void boot_core(unsigned int core, unsigned int vpe_id)
416
570
mips_cm_unlock_other ();
417
571
418
572
/* The core is now powered up */
419
- bitmap_set (core_power , core , 1 );
573
+ bitmap_set (cluster_cfg -> core_power , core , 1 );
574
+
575
+ /*
576
+ * Restore CM_PWRUP=0 so that the CM can power down if all the cores in
577
+ * the cluster do (eg. if they're all removed via hotplug.
578
+ */
579
+ if (mips_cm_revision () >= CM_REV_CM3_5 ) {
580
+ mips_cm_lock_other (cluster , 0 , 0 , CM_GCR_Cx_OTHER_BLOCK_GLOBAL );
581
+ write_cpc_redir_pwrup_ctl (0 );
582
+ mips_cm_unlock_other ();
583
+ }
420
584
}
421
585
422
586
static void remote_vpe_boot (void * dummy )
@@ -442,10 +606,6 @@ static int cps_boot_secondary(int cpu, struct task_struct *idle)
442
606
unsigned int remote ;
443
607
int err ;
444
608
445
- /* We don't yet support booting CPUs in other clusters */
446
- if (cpu_cluster (& cpu_data [cpu ]) != cpu_cluster (& raw_current_cpu_data ))
447
- return - ENOSYS ;
448
-
449
609
vpe_cfg -> pc = (unsigned long )& smp_bootstrap ;
450
610
vpe_cfg -> sp = __KSTK_TOS (idle );
451
611
vpe_cfg -> gp = (unsigned long )task_thread_info (idle );
@@ -454,14 +614,15 @@ static int cps_boot_secondary(int cpu, struct task_struct *idle)
454
614
455
615
preempt_disable ();
456
616
457
- if (!test_bit (core , core_power )) {
617
+ if (!test_bit (core , cluster_cfg -> core_power )) {
458
618
/* Boot a VPE on a powered down core */
459
- boot_core (core , vpe_id );
619
+ boot_core (cluster , core , vpe_id );
460
620
goto out ;
461
621
}
462
622
463
623
if (cpu_has_vp ) {
464
- mips_cm_lock_other (0 , core , vpe_id , CM_GCR_Cx_OTHER_BLOCK_LOCAL );
624
+ mips_cm_lock_other (cluster , core , vpe_id ,
625
+ CM_GCR_Cx_OTHER_BLOCK_LOCAL );
465
626
if (mips_cm_is64 )
466
627
write_gcr_co_reset64_base (core_entry_reg );
467
628
else
@@ -671,11 +832,15 @@ static void cps_cpu_die(unsigned int cpu) { }
671
832
672
833
static void cps_cleanup_dead_cpu (unsigned cpu )
673
834
{
835
+ unsigned int cluster = cpu_cluster (& cpu_data [cpu ]);
674
836
unsigned core = cpu_core (& cpu_data [cpu ]);
675
837
unsigned int vpe_id = cpu_vpe_id (& cpu_data [cpu ]);
676
838
ktime_t fail_time ;
677
839
unsigned stat ;
678
840
int err ;
841
+ struct cluster_boot_config * cluster_cfg ;
842
+
843
+ cluster_cfg = & mips_cps_cluster_bootcfg [cluster ];
679
844
680
845
/*
681
846
* Now wait for the CPU to actually offline. Without doing this that
@@ -727,7 +892,7 @@ static void cps_cleanup_dead_cpu(unsigned cpu)
727
892
} while (1 );
728
893
729
894
/* Indicate the core is powered off */
730
- bitmap_clear (core_power , core , 1 );
895
+ bitmap_clear (cluster_cfg -> core_power , core , 1 );
731
896
} else if (cpu_has_mipsmt ) {
732
897
/*
733
898
* Have a CPU with access to the offlined CPUs registers wait
0 commit comments