@@ -1845,7 +1845,7 @@ static void intel_pstate_notify_work(struct work_struct *work)
1845
1845
wrmsrl_on_cpu (cpudata -> cpu , MSR_HWP_STATUS , 0 );
1846
1846
}
1847
1847
1848
- static DEFINE_SPINLOCK (hwp_notify_lock );
1848
+ static DEFINE_RAW_SPINLOCK (hwp_notify_lock );
1849
1849
static cpumask_t hwp_intr_enable_mask ;
1850
1850
1851
1851
#define HWP_GUARANTEED_PERF_CHANGE_STATUS BIT(0)
@@ -1868,21 +1868,21 @@ void notify_hwp_interrupt(void)
1868
1868
if (!(value & status_mask ))
1869
1869
return ;
1870
1870
1871
- spin_lock_irqsave (& hwp_notify_lock , flags );
1871
+ raw_spin_lock_irqsave (& hwp_notify_lock , flags );
1872
1872
1873
1873
if (!cpumask_test_cpu (this_cpu , & hwp_intr_enable_mask ))
1874
1874
goto ack_intr ;
1875
1875
1876
1876
schedule_delayed_work (& all_cpu_data [this_cpu ]-> hwp_notify_work ,
1877
1877
msecs_to_jiffies (10 ));
1878
1878
1879
- spin_unlock_irqrestore (& hwp_notify_lock , flags );
1879
+ raw_spin_unlock_irqrestore (& hwp_notify_lock , flags );
1880
1880
1881
1881
return ;
1882
1882
1883
1883
ack_intr :
1884
1884
wrmsrl_safe (MSR_HWP_STATUS , 0 );
1885
- spin_unlock_irqrestore (& hwp_notify_lock , flags );
1885
+ raw_spin_unlock_irqrestore (& hwp_notify_lock , flags );
1886
1886
}
1887
1887
1888
1888
static void intel_pstate_disable_hwp_interrupt (struct cpudata * cpudata )
@@ -1895,9 +1895,9 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
1895
1895
/* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
1896
1896
wrmsrl_on_cpu (cpudata -> cpu , MSR_HWP_INTERRUPT , 0x00 );
1897
1897
1898
- spin_lock_irq (& hwp_notify_lock );
1898
+ raw_spin_lock_irq (& hwp_notify_lock );
1899
1899
cancel_work = cpumask_test_and_clear_cpu (cpudata -> cpu , & hwp_intr_enable_mask );
1900
- spin_unlock_irq (& hwp_notify_lock );
1900
+ raw_spin_unlock_irq (& hwp_notify_lock );
1901
1901
1902
1902
if (cancel_work )
1903
1903
cancel_delayed_work_sync (& cpudata -> hwp_notify_work );
@@ -1912,10 +1912,10 @@ static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
1912
1912
if (boot_cpu_has (X86_FEATURE_HWP_NOTIFY )) {
1913
1913
u64 interrupt_mask = HWP_GUARANTEED_PERF_CHANGE_REQ ;
1914
1914
1915
- spin_lock_irq (& hwp_notify_lock );
1915
+ raw_spin_lock_irq (& hwp_notify_lock );
1916
1916
INIT_DELAYED_WORK (& cpudata -> hwp_notify_work , intel_pstate_notify_work );
1917
1917
cpumask_set_cpu (cpudata -> cpu , & hwp_intr_enable_mask );
1918
- spin_unlock_irq (& hwp_notify_lock );
1918
+ raw_spin_unlock_irq (& hwp_notify_lock );
1919
1919
1920
1920
if (cpu_feature_enabled (X86_FEATURE_HWP_HIGHEST_PERF_CHANGE ))
1921
1921
interrupt_mask |= HWP_HIGHEST_PERF_CHANGE_REQ ;
0 commit comments