32
32
#include <asm/cpu_device_id.h>
33
33
#include <asm/cpufeature.h>
34
34
#include <asm/intel-family.h>
35
+ #include "../drivers/thermal/intel/thermal_interrupt.h"
35
36
36
37
#define INTEL_PSTATE_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC)
37
38
@@ -219,6 +220,7 @@ struct global_params {
219
220
* @sched_flags: Store scheduler flags for possible cross CPU update
220
221
* @hwp_boost_min: Last HWP boosted min performance
221
222
* @suspended: Whether or not the driver has been suspended.
223
+ * @hwp_notify_work: workqueue for HWP notifications.
222
224
*
223
225
* This structure stores per CPU instance data for all CPUs.
224
226
*/
@@ -257,6 +259,7 @@ struct cpudata {
257
259
unsigned int sched_flags ;
258
260
u32 hwp_boost_min ;
259
261
bool suspended ;
262
+ struct delayed_work hwp_notify_work ;
260
263
};
261
264
262
265
static struct cpudata * * all_cpu_data ;
@@ -537,7 +540,8 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
537
540
* scaling factor is too high, so recompute it to make the HWP_CAP
538
541
* highest performance correspond to the maximum turbo frequency.
539
542
*/
540
- if (turbo_freq < cpu -> pstate .turbo_pstate * scaling ) {
543
+ cpu -> pstate .turbo_freq = cpu -> pstate .turbo_pstate * scaling ;
544
+ if (turbo_freq < cpu -> pstate .turbo_freq ) {
541
545
cpu -> pstate .turbo_freq = turbo_freq ;
542
546
scaling = DIV_ROUND_UP (turbo_freq , cpu -> pstate .turbo_pstate );
543
547
cpu -> pstate .scaling = scaling ;
@@ -985,11 +989,15 @@ static void intel_pstate_hwp_set(unsigned int cpu)
985
989
wrmsrl_on_cpu (cpu , MSR_HWP_REQUEST , value );
986
990
}
987
991
992
+ static void intel_pstate_disable_hwp_interrupt (struct cpudata * cpudata );
993
+
988
994
static void intel_pstate_hwp_offline (struct cpudata * cpu )
989
995
{
990
996
u64 value = READ_ONCE (cpu -> hwp_req_cached );
991
997
int min_perf ;
992
998
999
+ intel_pstate_disable_hwp_interrupt (cpu );
1000
+
993
1001
if (boot_cpu_has (X86_FEATURE_HWP_EPP )) {
994
1002
/*
995
1003
* In case the EPP has been set to "performance" by the
@@ -1053,6 +1061,9 @@ static int intel_pstate_suspend(struct cpufreq_policy *policy)
1053
1061
1054
1062
cpu -> suspended = true;
1055
1063
1064
+ /* disable HWP interrupt and cancel any pending work */
1065
+ intel_pstate_disable_hwp_interrupt (cpu );
1066
+
1056
1067
return 0 ;
1057
1068
}
1058
1069
@@ -1546,15 +1557,105 @@ static void intel_pstate_sysfs_hide_hwp_dynamic_boost(void)
1546
1557
1547
1558
/************************** sysfs end ************************/
1548
1559
1560
+ static void intel_pstate_notify_work (struct work_struct * work )
1561
+ {
1562
+ struct cpudata * cpudata =
1563
+ container_of (to_delayed_work (work ), struct cpudata , hwp_notify_work );
1564
+
1565
+ cpufreq_update_policy (cpudata -> cpu );
1566
+ wrmsrl_on_cpu (cpudata -> cpu , MSR_HWP_STATUS , 0 );
1567
+ }
1568
+
1569
+ static DEFINE_SPINLOCK (hwp_notify_lock );
1570
+ static cpumask_t hwp_intr_enable_mask ;
1571
+
1572
+ void notify_hwp_interrupt (void )
1573
+ {
1574
+ unsigned int this_cpu = smp_processor_id ();
1575
+ struct cpudata * cpudata ;
1576
+ unsigned long flags ;
1577
+ u64 value ;
1578
+
1579
+ if (!READ_ONCE (hwp_active ) || !boot_cpu_has (X86_FEATURE_HWP_NOTIFY ))
1580
+ return ;
1581
+
1582
+ rdmsrl_safe (MSR_HWP_STATUS , & value );
1583
+ if (!(value & 0x01 ))
1584
+ return ;
1585
+
1586
+ spin_lock_irqsave (& hwp_notify_lock , flags );
1587
+
1588
+ if (!cpumask_test_cpu (this_cpu , & hwp_intr_enable_mask ))
1589
+ goto ack_intr ;
1590
+
1591
+ /*
1592
+ * Currently we never free all_cpu_data. And we can't reach here
1593
+ * without this allocated. But for safety for future changes, added
1594
+ * check.
1595
+ */
1596
+ if (unlikely (!READ_ONCE (all_cpu_data )))
1597
+ goto ack_intr ;
1598
+
1599
+ /*
1600
+ * The free is done during cleanup, when cpufreq registry is failed.
1601
+ * We wouldn't be here if it fails on init or switch status. But for
1602
+ * future changes, added check.
1603
+ */
1604
+ cpudata = READ_ONCE (all_cpu_data [this_cpu ]);
1605
+ if (unlikely (!cpudata ))
1606
+ goto ack_intr ;
1607
+
1608
+ schedule_delayed_work (& cpudata -> hwp_notify_work , msecs_to_jiffies (10 ));
1609
+
1610
+ spin_unlock_irqrestore (& hwp_notify_lock , flags );
1611
+
1612
+ return ;
1613
+
1614
+ ack_intr :
1615
+ wrmsrl_safe (MSR_HWP_STATUS , 0 );
1616
+ spin_unlock_irqrestore (& hwp_notify_lock , flags );
1617
+ }
1618
+
1619
+ static void intel_pstate_disable_hwp_interrupt (struct cpudata * cpudata )
1620
+ {
1621
+ unsigned long flags ;
1622
+
1623
+ /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
1624
+ wrmsrl_on_cpu (cpudata -> cpu , MSR_HWP_INTERRUPT , 0x00 );
1625
+
1626
+ spin_lock_irqsave (& hwp_notify_lock , flags );
1627
+ if (cpumask_test_and_clear_cpu (cpudata -> cpu , & hwp_intr_enable_mask ))
1628
+ cancel_delayed_work (& cpudata -> hwp_notify_work );
1629
+ spin_unlock_irqrestore (& hwp_notify_lock , flags );
1630
+ }
1631
+
1632
+ static void intel_pstate_enable_hwp_interrupt (struct cpudata * cpudata )
1633
+ {
1634
+ /* Enable HWP notification interrupt for guaranteed performance change */
1635
+ if (boot_cpu_has (X86_FEATURE_HWP_NOTIFY )) {
1636
+ unsigned long flags ;
1637
+
1638
+ spin_lock_irqsave (& hwp_notify_lock , flags );
1639
+ INIT_DELAYED_WORK (& cpudata -> hwp_notify_work , intel_pstate_notify_work );
1640
+ cpumask_set_cpu (cpudata -> cpu , & hwp_intr_enable_mask );
1641
+ spin_unlock_irqrestore (& hwp_notify_lock , flags );
1642
+
1643
+ /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
1644
+ wrmsrl_on_cpu (cpudata -> cpu , MSR_HWP_INTERRUPT , 0x01 );
1645
+ }
1646
+ }
1647
+
1549
1648
static void intel_pstate_hwp_enable (struct cpudata * cpudata )
1550
1649
{
1551
- /* First disable HWP notification interrupt as we don't process them */
1650
+ /* First disable HWP notification interrupt till we activate again */
1552
1651
if (boot_cpu_has (X86_FEATURE_HWP_NOTIFY ))
1553
1652
wrmsrl_on_cpu (cpudata -> cpu , MSR_HWP_INTERRUPT , 0x00 );
1554
1653
1555
1654
wrmsrl_on_cpu (cpudata -> cpu , MSR_PM_ENABLE , 0x1 );
1556
1655
if (cpudata -> epp_default == - EINVAL )
1557
1656
cpudata -> epp_default = intel_pstate_get_epp (cpudata , 0 );
1657
+
1658
+ intel_pstate_enable_hwp_interrupt (cpudata );
1558
1659
}
1559
1660
1560
1661
static int atom_get_min_pstate (void )
@@ -2266,7 +2367,7 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
2266
2367
if (!cpu )
2267
2368
return - ENOMEM ;
2268
2369
2269
- all_cpu_data [cpunum ] = cpu ;
2370
+ WRITE_ONCE ( all_cpu_data [cpunum ], cpu ) ;
2270
2371
2271
2372
cpu -> cpu = cpunum ;
2272
2373
@@ -2929,8 +3030,10 @@ static void intel_pstate_driver_cleanup(void)
2929
3030
if (intel_pstate_driver == & intel_pstate )
2930
3031
intel_pstate_clear_update_util_hook (cpu );
2931
3032
3033
+ spin_lock (& hwp_notify_lock );
2932
3034
kfree (all_cpu_data [cpu ]);
2933
- all_cpu_data [cpu ] = NULL ;
3035
+ WRITE_ONCE (all_cpu_data [cpu ], NULL );
3036
+ spin_unlock (& hwp_notify_lock );
2934
3037
}
2935
3038
}
2936
3039
cpus_read_unlock ();
@@ -3199,6 +3302,7 @@ static bool intel_pstate_hwp_is_enabled(void)
3199
3302
3200
3303
static int __init intel_pstate_init (void )
3201
3304
{
3305
+ static struct cpudata * * _all_cpu_data ;
3202
3306
const struct x86_cpu_id * id ;
3203
3307
int rc ;
3204
3308
@@ -3224,7 +3328,7 @@ static int __init intel_pstate_init(void)
3224
3328
* deal with it.
3225
3329
*/
3226
3330
if ((!no_hwp && boot_cpu_has (X86_FEATURE_HWP_EPP )) || hwp_forced ) {
3227
- hwp_active ++ ;
3331
+ WRITE_ONCE ( hwp_active , 1 ) ;
3228
3332
hwp_mode_bdw = id -> driver_data ;
3229
3333
intel_pstate .attr = hwp_cpufreq_attrs ;
3230
3334
intel_cpufreq .attr = hwp_cpufreq_attrs ;
@@ -3275,10 +3379,12 @@ static int __init intel_pstate_init(void)
3275
3379
3276
3380
pr_info ("Intel P-state driver initializing\n" );
3277
3381
3278
- all_cpu_data = vzalloc (array_size (sizeof (void * ), num_possible_cpus ()));
3279
- if (!all_cpu_data )
3382
+ _all_cpu_data = vzalloc (array_size (sizeof (void * ), num_possible_cpus ()));
3383
+ if (!_all_cpu_data )
3280
3384
return - ENOMEM ;
3281
3385
3386
+ WRITE_ONCE (all_cpu_data , _all_cpu_data );
3387
+
3282
3388
intel_pstate_request_control_from_smm ();
3283
3389
3284
3390
intel_pstate_sysfs_expose_params ();
0 commit comments