@@ -389,7 +389,8 @@ static inline int amd_pstate_cppc_enable(struct cpufreq_policy *policy)
389
389
static int msr_init_perf (struct amd_cpudata * cpudata )
390
390
{
391
391
union perf_cached perf = READ_ONCE (cpudata -> perf );
392
- u64 cap1 , numerator ;
392
+ u64 cap1 , numerator , cppc_req ;
393
+ u8 min_perf ;
393
394
394
395
int ret = rdmsrl_safe_on_cpu (cpudata -> cpu , MSR_AMD_CPPC_CAP1 ,
395
396
& cap1 );
@@ -400,6 +401,22 @@ static int msr_init_perf(struct amd_cpudata *cpudata)
400
401
if (ret )
401
402
return ret ;
402
403
404
+ ret = rdmsrl_on_cpu (cpudata -> cpu , MSR_AMD_CPPC_REQ , & cppc_req );
405
+ if (ret )
406
+ return ret ;
407
+
408
+ WRITE_ONCE (cpudata -> cppc_req_cached , cppc_req );
409
+ min_perf = FIELD_GET (AMD_CPPC_MIN_PERF_MASK , cppc_req );
410
+
411
+ /*
412
+ * Clear out the min_perf part to check if the rest of the MSR is 0, if yes, this is an
413
+ * indication that the min_perf value is the one specified through the BIOS option
414
+ */
415
+ cppc_req &= ~(AMD_CPPC_MIN_PERF_MASK );
416
+
417
+ if (!cppc_req )
418
+ perf .bios_min_perf = min_perf ;
419
+
403
420
perf .highest_perf = numerator ;
404
421
perf .max_limit_perf = numerator ;
405
422
perf .min_limit_perf = FIELD_GET (AMD_CPPC_LOWEST_PERF_MASK , cap1 );
@@ -554,6 +571,10 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u8 min_perf,
554
571
if (!policy )
555
572
return ;
556
573
574
+ /* limit the max perf when core performance boost feature is disabled */
575
+ if (!cpudata -> boost_supported )
576
+ max_perf = min_t (u8 , perf .nominal_perf , max_perf );
577
+
557
578
des_perf = clamp_t (u8 , des_perf , min_perf , max_perf );
558
579
559
580
policy -> cur = perf_to_freq (perf , cpudata -> nominal_freq , des_perf );
@@ -563,10 +584,6 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u8 min_perf,
563
584
des_perf = 0 ;
564
585
}
565
586
566
- /* limit the max perf when core performance boost feature is disabled */
567
- if (!cpudata -> boost_supported )
568
- max_perf = min_t (u8 , perf .nominal_perf , max_perf );
569
-
570
587
if (trace_amd_pstate_perf_enabled () && amd_pstate_sample (cpudata )) {
571
588
trace_amd_pstate_perf (min_perf , des_perf , max_perf , cpudata -> freq ,
572
589
cpudata -> cur .mperf , cpudata -> cur .aperf , cpudata -> cur .tsc ,
@@ -580,20 +597,26 @@ static int amd_pstate_verify(struct cpufreq_policy_data *policy_data)
580
597
{
581
598
/*
582
599
* Initialize lower frequency limit (i.e.policy->min) with
583
- * lowest_nonlinear_frequency which is the most energy efficient
584
- * frequency. Override the initial value set by cpufreq core and
585
- * amd-pstate qos_requests.
600
+ * lowest_nonlinear_frequency or the min frequency (if) specified in BIOS,
601
+ * Override the initial value set by cpufreq core and amd-pstate qos_requests.
586
602
*/
587
603
if (policy_data -> min == FREQ_QOS_MIN_DEFAULT_VALUE ) {
588
604
struct cpufreq_policy * policy __free (put_cpufreq_policy ) =
589
605
cpufreq_cpu_get (policy_data -> cpu );
590
606
struct amd_cpudata * cpudata ;
607
+ union perf_cached perf ;
591
608
592
609
if (!policy )
593
610
return - EINVAL ;
594
611
595
612
cpudata = policy -> driver_data ;
596
- policy_data -> min = cpudata -> lowest_nonlinear_freq ;
613
+ perf = READ_ONCE (cpudata -> perf );
614
+
615
+ if (perf .bios_min_perf )
616
+ policy_data -> min = perf_to_freq (perf , cpudata -> nominal_freq ,
617
+ perf .bios_min_perf );
618
+ else
619
+ policy_data -> min = cpudata -> lowest_nonlinear_freq ;
597
620
}
598
621
599
622
cpufreq_verify_within_cpu_limits (policy_data );
@@ -1021,6 +1044,10 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
1021
1044
static void amd_pstate_cpu_exit (struct cpufreq_policy * policy )
1022
1045
{
1023
1046
struct amd_cpudata * cpudata = policy -> driver_data ;
1047
+ union perf_cached perf = READ_ONCE (cpudata -> perf );
1048
+
1049
+ /* Reset CPPC_REQ MSR to the BIOS value */
1050
+ amd_pstate_update_perf (policy , perf .bios_min_perf , 0U , 0U , 0U , false);
1024
1051
1025
1052
freq_qos_remove_request (& cpudata -> req [1 ]);
1026
1053
freq_qos_remove_request (& cpudata -> req [0 ]);
@@ -1302,6 +1329,12 @@ static ssize_t amd_pstate_show_status(char *buf)
1302
1329
return sysfs_emit (buf , "%s\n" , amd_pstate_mode_string [cppc_state ]);
1303
1330
}
1304
1331
1332
+ int amd_pstate_get_status (void )
1333
+ {
1334
+ return cppc_state ;
1335
+ }
1336
+ EXPORT_SYMBOL_GPL (amd_pstate_get_status );
1337
+
1305
1338
int amd_pstate_update_status (const char * buf , size_t size )
1306
1339
{
1307
1340
int mode_idx ;
@@ -1416,7 +1449,6 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
1416
1449
struct amd_cpudata * cpudata ;
1417
1450
union perf_cached perf ;
1418
1451
struct device * dev ;
1419
- u64 value ;
1420
1452
int ret ;
1421
1453
1422
1454
/*
@@ -1481,12 +1513,6 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
1481
1513
cpudata -> epp_default = AMD_CPPC_EPP_BALANCE_PERFORMANCE ;
1482
1514
}
1483
1515
1484
- if (cpu_feature_enabled (X86_FEATURE_CPPC )) {
1485
- ret = rdmsrl_on_cpu (cpudata -> cpu , MSR_AMD_CPPC_REQ , & value );
1486
- if (ret )
1487
- return ret ;
1488
- WRITE_ONCE (cpudata -> cppc_req_cached , value );
1489
- }
1490
1516
ret = amd_pstate_set_epp (policy , cpudata -> epp_default );
1491
1517
if (ret )
1492
1518
return ret ;
@@ -1506,6 +1532,11 @@ static void amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
1506
1532
struct amd_cpudata * cpudata = policy -> driver_data ;
1507
1533
1508
1534
if (cpudata ) {
1535
+ union perf_cached perf = READ_ONCE (cpudata -> perf );
1536
+
1537
+ /* Reset CPPC_REQ MSR to the BIOS value */
1538
+ amd_pstate_update_perf (policy , perf .bios_min_perf , 0U , 0U , 0U , false);
1539
+
1509
1540
kfree (cpudata );
1510
1541
policy -> driver_data = NULL ;
1511
1542
}
@@ -1556,21 +1587,38 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
1556
1587
return 0 ;
1557
1588
}
1558
1589
1559
- static int amd_pstate_epp_cpu_online (struct cpufreq_policy * policy )
1590
+ static int amd_pstate_cpu_online (struct cpufreq_policy * policy )
1560
1591
{
1561
- pr_debug ("AMD CPU Core %d going online\n" , policy -> cpu );
1562
-
1563
1592
return amd_pstate_cppc_enable (policy );
1564
1593
}
1565
1594
1566
- static int amd_pstate_epp_cpu_offline (struct cpufreq_policy * policy )
1595
+ static int amd_pstate_cpu_offline (struct cpufreq_policy * policy )
1567
1596
{
1568
- return 0 ;
1597
+ struct amd_cpudata * cpudata = policy -> driver_data ;
1598
+ union perf_cached perf = READ_ONCE (cpudata -> perf );
1599
+
1600
+ /*
1601
+ * Reset CPPC_REQ MSR to the BIOS value, this will allow us to retain the BIOS specified
1602
+ * min_perf value across kexec reboots. If this CPU is just onlined normally after this, the
1603
+ * limits, epp and desired perf will get reset to the cached values in cpudata struct
1604
+ */
1605
+ return amd_pstate_update_perf (policy , perf .bios_min_perf , 0U , 0U , 0U , false);
1569
1606
}
1570
1607
1571
- static int amd_pstate_epp_suspend (struct cpufreq_policy * policy )
1608
+ static int amd_pstate_suspend (struct cpufreq_policy * policy )
1572
1609
{
1573
1610
struct amd_cpudata * cpudata = policy -> driver_data ;
1611
+ union perf_cached perf = READ_ONCE (cpudata -> perf );
1612
+ int ret ;
1613
+
1614
+ /*
1615
+ * Reset CPPC_REQ MSR to the BIOS value, this will allow us to retain the BIOS specified
1616
+ * min_perf value across kexec reboots. If this CPU is just resumed back without kexec,
1617
+ * the limits, epp and desired perf will get reset to the cached values in cpudata struct
1618
+ */
1619
+ ret = amd_pstate_update_perf (policy , perf .bios_min_perf , 0U , 0U , 0U , false);
1620
+ if (ret )
1621
+ return ret ;
1574
1622
1575
1623
/* invalidate to ensure it's rewritten during resume */
1576
1624
cpudata -> cppc_req_cached = 0 ;
@@ -1581,6 +1629,17 @@ static int amd_pstate_epp_suspend(struct cpufreq_policy *policy)
1581
1629
return 0 ;
1582
1630
}
1583
1631
1632
+ static int amd_pstate_resume (struct cpufreq_policy * policy )
1633
+ {
1634
+ struct amd_cpudata * cpudata = policy -> driver_data ;
1635
+ union perf_cached perf = READ_ONCE (cpudata -> perf );
1636
+ int cur_perf = freq_to_perf (perf , cpudata -> nominal_freq , policy -> cur );
1637
+
1638
+ /* Set CPPC_REQ to last sane value until the governor updates it */
1639
+ return amd_pstate_update_perf (policy , perf .min_limit_perf , cur_perf , perf .max_limit_perf ,
1640
+ 0U , false);
1641
+ }
1642
+
1584
1643
static int amd_pstate_epp_resume (struct cpufreq_policy * policy )
1585
1644
{
1586
1645
struct amd_cpudata * cpudata = policy -> driver_data ;
@@ -1606,6 +1665,10 @@ static struct cpufreq_driver amd_pstate_driver = {
1606
1665
.fast_switch = amd_pstate_fast_switch ,
1607
1666
.init = amd_pstate_cpu_init ,
1608
1667
.exit = amd_pstate_cpu_exit ,
1668
+ .online = amd_pstate_cpu_online ,
1669
+ .offline = amd_pstate_cpu_offline ,
1670
+ .suspend = amd_pstate_suspend ,
1671
+ .resume = amd_pstate_resume ,
1609
1672
.set_boost = amd_pstate_set_boost ,
1610
1673
.update_limits = amd_pstate_update_limits ,
1611
1674
.name = "amd-pstate" ,
@@ -1618,9 +1681,9 @@ static struct cpufreq_driver amd_pstate_epp_driver = {
1618
1681
.setpolicy = amd_pstate_epp_set_policy ,
1619
1682
.init = amd_pstate_epp_cpu_init ,
1620
1683
.exit = amd_pstate_epp_cpu_exit ,
1621
- .offline = amd_pstate_epp_cpu_offline ,
1622
- .online = amd_pstate_epp_cpu_online ,
1623
- .suspend = amd_pstate_epp_suspend ,
1684
+ .offline = amd_pstate_cpu_offline ,
1685
+ .online = amd_pstate_cpu_online ,
1686
+ .suspend = amd_pstate_suspend ,
1624
1687
.resume = amd_pstate_epp_resume ,
1625
1688
.update_limits = amd_pstate_update_limits ,
1626
1689
.set_boost = amd_pstate_set_boost ,
0 commit comments