Skip to content

Commit 1bc27de

Browse files
committed
Merge tag 'pm-5.19-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull power management fixes from Rafael Wysocki: "These fix an intel_idle issue introduced during the 5.16 development cycle and two recent regressions in the system reboot/poweroff code. Specifics: - Fix CPUIDLE_FLAG_IRQ_ENABLE handling in intel_idle (Peter Zijlstra) - Allow all platforms to use the global poweroff handler and make non-syscall poweroff code paths work again (Dmitry Osipenko)" * tag 'pm-5.19-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: cpuidle,intel_idle: Fix CPUIDLE_FLAG_IRQ_ENABLE kernel/reboot: Fix powering off using a non-syscall code paths kernel/reboot: Use static handler for register_platform_power_off()
2 parents d56fd98 + 67e59f8 commit 1bc27de

File tree

2 files changed

+87
-32
lines changed

2 files changed

+87
-32
lines changed

drivers/idle/intel_idle.c

Lines changed: 25 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -115,6 +115,18 @@ static unsigned int mwait_substates __initdata;
115115
#define flg2MWAIT(flags) (((flags) >> 24) & 0xFF)
116116
#define MWAIT2flg(eax) ((eax & 0xFF) << 24)
117117

118+
static __always_inline int __intel_idle(struct cpuidle_device *dev,
119+
struct cpuidle_driver *drv, int index)
120+
{
121+
struct cpuidle_state *state = &drv->states[index];
122+
unsigned long eax = flg2MWAIT(state->flags);
123+
unsigned long ecx = 1; /* break on interrupt flag */
124+
125+
mwait_idle_with_hints(eax, ecx);
126+
127+
return index;
128+
}
129+
118130
/**
119131
* intel_idle - Ask the processor to enter the given idle state.
120132
* @dev: cpuidle device of the target CPU.
@@ -132,16 +144,19 @@ static unsigned int mwait_substates __initdata;
132144
static __cpuidle int intel_idle(struct cpuidle_device *dev,
133145
struct cpuidle_driver *drv, int index)
134146
{
135-
struct cpuidle_state *state = &drv->states[index];
136-
unsigned long eax = flg2MWAIT(state->flags);
137-
unsigned long ecx = 1; /* break on interrupt flag */
147+
return __intel_idle(dev, drv, index);
148+
}
138149

139-
if (state->flags & CPUIDLE_FLAG_IRQ_ENABLE)
140-
local_irq_enable();
150+
static __cpuidle int intel_idle_irq(struct cpuidle_device *dev,
151+
struct cpuidle_driver *drv, int index)
152+
{
153+
int ret;
141154

142-
mwait_idle_with_hints(eax, ecx);
155+
raw_local_irq_enable();
156+
ret = __intel_idle(dev, drv, index);
157+
raw_local_irq_disable();
143158

144-
return index;
159+
return ret;
145160
}
146161

147162
/**
@@ -1801,6 +1816,9 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
18011816
/* Structure copy. */
18021817
drv->states[drv->state_count] = cpuidle_state_table[cstate];
18031818

1819+
if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_IRQ_ENABLE)
1820+
drv->states[drv->state_count].enter = intel_idle_irq;
1821+
18041822
if ((disabled_states_mask & BIT(drv->state_count)) ||
18051823
((icpu->use_acpi || force_use_acpi) &&
18061824
intel_idle_off_by_default(mwait_hint) &&

kernel/reboot.c

Lines changed: 62 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -315,6 +315,43 @@ static int sys_off_notify(struct notifier_block *nb,
315315
return handler->sys_off_cb(&data);
316316
}
317317

318+
static struct sys_off_handler platform_sys_off_handler;
319+
320+
static struct sys_off_handler *alloc_sys_off_handler(int priority)
321+
{
322+
struct sys_off_handler *handler;
323+
gfp_t flags;
324+
325+
/*
326+
* Platforms like m68k can't allocate sys_off handler dynamically
327+
* at the early boot time because memory allocator isn't available yet.
328+
*/
329+
if (priority == SYS_OFF_PRIO_PLATFORM) {
330+
handler = &platform_sys_off_handler;
331+
if (handler->cb_data)
332+
return ERR_PTR(-EBUSY);
333+
} else {
334+
if (system_state > SYSTEM_RUNNING)
335+
flags = GFP_ATOMIC;
336+
else
337+
flags = GFP_KERNEL;
338+
339+
handler = kzalloc(sizeof(*handler), flags);
340+
if (!handler)
341+
return ERR_PTR(-ENOMEM);
342+
}
343+
344+
return handler;
345+
}
346+
347+
static void free_sys_off_handler(struct sys_off_handler *handler)
348+
{
349+
if (handler == &platform_sys_off_handler)
350+
memset(handler, 0, sizeof(*handler));
351+
else
352+
kfree(handler);
353+
}
354+
318355
/**
319356
* register_sys_off_handler - Register sys-off handler
320357
* @mode: Sys-off mode
@@ -345,9 +382,9 @@ register_sys_off_handler(enum sys_off_mode mode,
345382
struct sys_off_handler *handler;
346383
int err;
347384

348-
handler = kzalloc(sizeof(*handler), GFP_KERNEL);
349-
if (!handler)
350-
return ERR_PTR(-ENOMEM);
385+
handler = alloc_sys_off_handler(priority);
386+
if (IS_ERR(handler))
387+
return handler;
351388

352389
switch (mode) {
353390
case SYS_OFF_MODE_POWER_OFF_PREPARE:
@@ -364,7 +401,7 @@ register_sys_off_handler(enum sys_off_mode mode,
364401
break;
365402

366403
default:
367-
kfree(handler);
404+
free_sys_off_handler(handler);
368405
return ERR_PTR(-EINVAL);
369406
}
370407

@@ -391,7 +428,7 @@ register_sys_off_handler(enum sys_off_mode mode,
391428
}
392429

393430
if (err) {
394-
kfree(handler);
431+
free_sys_off_handler(handler);
395432
return ERR_PTR(err);
396433
}
397434

@@ -409,7 +446,7 @@ void unregister_sys_off_handler(struct sys_off_handler *handler)
409446
{
410447
int err;
411448

412-
if (!handler)
449+
if (IS_ERR_OR_NULL(handler))
413450
return;
414451

415452
if (handler->blocking)
@@ -422,7 +459,7 @@ void unregister_sys_off_handler(struct sys_off_handler *handler)
422459
/* sanity check, shall never happen */
423460
WARN_ON(err);
424461

425-
kfree(handler);
462+
free_sys_off_handler(handler);
426463
}
427464
EXPORT_SYMBOL_GPL(unregister_sys_off_handler);
428465

@@ -584,7 +621,23 @@ static void do_kernel_power_off_prepare(void)
584621
*/
585622
void do_kernel_power_off(void)
586623
{
624+
struct sys_off_handler *sys_off = NULL;
625+
626+
/*
627+
* Register sys-off handlers for legacy PM callback. This allows
628+
* legacy PM callbacks temporary co-exist with the new sys-off API.
629+
*
630+
* TODO: Remove legacy handlers once all legacy PM users will be
631+
* switched to the sys-off based APIs.
632+
*/
633+
if (pm_power_off)
634+
sys_off = register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
635+
SYS_OFF_PRIO_DEFAULT,
636+
legacy_pm_power_off, NULL);
637+
587638
atomic_notifier_call_chain(&power_off_handler_list, 0, NULL);
639+
640+
unregister_sys_off_handler(sys_off);
588641
}
589642

590643
/**
@@ -595,7 +648,8 @@ void do_kernel_power_off(void)
595648
*/
596649
bool kernel_can_power_off(void)
597650
{
598-
return !atomic_notifier_call_chain_is_empty(&power_off_handler_list);
651+
return !atomic_notifier_call_chain_is_empty(&power_off_handler_list) ||
652+
pm_power_off;
599653
}
600654
EXPORT_SYMBOL_GPL(kernel_can_power_off);
601655

@@ -630,7 +684,6 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
630684
void __user *, arg)
631685
{
632686
struct pid_namespace *pid_ns = task_active_pid_ns(current);
633-
struct sys_off_handler *sys_off = NULL;
634687
char buffer[256];
635688
int ret = 0;
636689

@@ -655,21 +708,6 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
655708
if (ret)
656709
return ret;
657710

658-
/*
659-
* Register sys-off handlers for legacy PM callback. This allows
660-
* legacy PM callbacks temporary co-exist with the new sys-off API.
661-
*
662-
* TODO: Remove legacy handlers once all legacy PM users will be
663-
* switched to the sys-off based APIs.
664-
*/
665-
if (pm_power_off) {
666-
sys_off = register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
667-
SYS_OFF_PRIO_DEFAULT,
668-
legacy_pm_power_off, NULL);
669-
if (IS_ERR(sys_off))
670-
return PTR_ERR(sys_off);
671-
}
672-
673711
/* Instead of trying to make the power_off code look like
674712
* halt when pm_power_off is not set do it the easy way.
675713
*/
@@ -727,7 +765,6 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
727765
break;
728766
}
729767
mutex_unlock(&system_transition_mutex);
730-
unregister_sys_off_handler(sys_off);
731768
return ret;
732769
}
733770

0 commit comments

Comments
 (0)