Skip to content

Commit 8b4865c

Browse files
ukleinekrafaeljw
authored andcommitted
cpufreq: intel_pstate: Make hwp_notify_lock a raw spinlock
notify_hwp_interrupt() is called via sysvec_thermal() -> smp_thermal_vector() -> intel_thermal_interrupt() in hard irq context. For this reason it must not use a simple spin_lock that sleeps with PREEMPT_RT enabled. So convert it to a raw spinlock. Reported-by: xiao sheng wen <[email protected]> Link: https://bugs.debian.org/1076483 Signed-off-by: Uwe Kleine-König <[email protected]> Acked-by: Srinivas Pandruvada <[email protected]> Acked-by: Sebastian Andrzej Siewior <[email protected]> Tested-by: xiao sheng wen <[email protected]> Link: https://patch.msgid.link/[email protected] Cc: All applicable <[email protected]> Signed-off-by: Rafael J. Wysocki <[email protected]>
1 parent 9852d85 commit 8b4865c

File tree

1 file changed

+8
-8
lines changed

1 file changed

+8
-8
lines changed

drivers/cpufreq/intel_pstate.c

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1845,7 +1845,7 @@ static void intel_pstate_notify_work(struct work_struct *work)
18451845
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
18461846
}
18471847

1848-
static DEFINE_SPINLOCK(hwp_notify_lock);
1848+
static DEFINE_RAW_SPINLOCK(hwp_notify_lock);
18491849
static cpumask_t hwp_intr_enable_mask;
18501850

18511851
#define HWP_GUARANTEED_PERF_CHANGE_STATUS BIT(0)
@@ -1868,21 +1868,21 @@ void notify_hwp_interrupt(void)
18681868
if (!(value & status_mask))
18691869
return;
18701870

1871-
spin_lock_irqsave(&hwp_notify_lock, flags);
1871+
raw_spin_lock_irqsave(&hwp_notify_lock, flags);
18721872

18731873
if (!cpumask_test_cpu(this_cpu, &hwp_intr_enable_mask))
18741874
goto ack_intr;
18751875

18761876
schedule_delayed_work(&all_cpu_data[this_cpu]->hwp_notify_work,
18771877
msecs_to_jiffies(10));
18781878

1879-
spin_unlock_irqrestore(&hwp_notify_lock, flags);
1879+
raw_spin_unlock_irqrestore(&hwp_notify_lock, flags);
18801880

18811881
return;
18821882

18831883
ack_intr:
18841884
wrmsrl_safe(MSR_HWP_STATUS, 0);
1885-
spin_unlock_irqrestore(&hwp_notify_lock, flags);
1885+
raw_spin_unlock_irqrestore(&hwp_notify_lock, flags);
18861886
}
18871887

18881888
static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
@@ -1895,9 +1895,9 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
18951895
/* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
18961896
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
18971897

1898-
spin_lock_irq(&hwp_notify_lock);
1898+
raw_spin_lock_irq(&hwp_notify_lock);
18991899
cancel_work = cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask);
1900-
spin_unlock_irq(&hwp_notify_lock);
1900+
raw_spin_unlock_irq(&hwp_notify_lock);
19011901

19021902
if (cancel_work)
19031903
cancel_delayed_work_sync(&cpudata->hwp_notify_work);
@@ -1912,10 +1912,10 @@ static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
19121912
if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) {
19131913
u64 interrupt_mask = HWP_GUARANTEED_PERF_CHANGE_REQ;
19141914

1915-
spin_lock_irq(&hwp_notify_lock);
1915+
raw_spin_lock_irq(&hwp_notify_lock);
19161916
INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work);
19171917
cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask);
1918-
spin_unlock_irq(&hwp_notify_lock);
1918+
raw_spin_unlock_irq(&hwp_notify_lock);
19191919

19201920
if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE))
19211921
interrupt_mask |= HWP_HIGHEST_PERF_CHANGE_REQ;

0 commit comments

Comments
 (0)