Skip to content

Commit 892d7c1

Browse files
committed
Merge tag 'pm-6.5-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull power management fixes from Rafael Wysocki: "Revert three recent intel_idle commits that introduced a functional issue, included a coding mistake and have been questioned at the design level" * tag 'pm-6.5-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: Revert "intel_idle: Add support for using intel_idle in a VM guest using just hlt" Revert "intel_idle: Add a "Long HLT" C1 state for the VM guest mode" Revert "intel_idle: Add __init annotation to matchup_vm_state_with_baremetal()"
2 parents 3c05547 + 5534f44 commit 892d7c1

File tree

1 file changed

+1
-171
lines changed

1 file changed

+1
-171
lines changed

drivers/idle/intel_idle.c

Lines changed: 1 addition & 171 deletions
Original file line numberDiff line numberDiff line change
@@ -199,43 +199,6 @@ static __cpuidle int intel_idle_xstate(struct cpuidle_device *dev,
199199
return __intel_idle(dev, drv, index);
200200
}
201201

202-
static __always_inline int __intel_idle_hlt(struct cpuidle_device *dev,
203-
struct cpuidle_driver *drv, int index)
204-
{
205-
raw_safe_halt();
206-
raw_local_irq_disable();
207-
return index;
208-
}
209-
210-
/**
211-
* intel_idle_hlt - Ask the processor to enter the given idle state using hlt.
212-
* @dev: cpuidle device of the target CPU.
213-
* @drv: cpuidle driver (assumed to point to intel_idle_driver).
214-
* @index: Target idle state index.
215-
*
216-
* Use the HLT instruction to notify the processor that the CPU represented by
217-
* @dev is idle and it can try to enter the idle state corresponding to @index.
218-
*
219-
* Must be called under local_irq_disable().
220-
*/
221-
static __cpuidle int intel_idle_hlt(struct cpuidle_device *dev,
222-
struct cpuidle_driver *drv, int index)
223-
{
224-
return __intel_idle_hlt(dev, drv, index);
225-
}
226-
227-
static __cpuidle int intel_idle_hlt_irq_on(struct cpuidle_device *dev,
228-
struct cpuidle_driver *drv, int index)
229-
{
230-
int ret;
231-
232-
raw_local_irq_enable();
233-
ret = __intel_idle_hlt(dev, drv, index);
234-
raw_local_irq_disable();
235-
236-
return ret;
237-
}
238-
239202
/**
240203
* intel_idle_s2idle - Ask the processor to enter the given idle state.
241204
* @dev: cpuidle device of the target CPU.
@@ -1279,25 +1242,6 @@ static struct cpuidle_state snr_cstates[] __initdata = {
12791242
.enter = NULL }
12801243
};
12811244

1282-
static struct cpuidle_state vmguest_cstates[] __initdata = {
1283-
{
1284-
.name = "C1",
1285-
.desc = "HLT",
1286-
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_IRQ_ENABLE,
1287-
.exit_latency = 5,
1288-
.target_residency = 10,
1289-
.enter = &intel_idle_hlt, },
1290-
{
1291-
.name = "C1L",
1292-
.desc = "Long HLT",
1293-
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TLB_FLUSHED,
1294-
.exit_latency = 5,
1295-
.target_residency = 200,
1296-
.enter = &intel_idle_hlt, },
1297-
{
1298-
.enter = NULL }
1299-
};
1300-
13011245
static const struct idle_cpu idle_cpu_nehalem __initconst = {
13021246
.state_table = nehalem_cstates,
13031247
.auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
@@ -1897,16 +1841,6 @@ static bool __init intel_idle_verify_cstate(unsigned int mwait_hint)
18971841

18981842
static void state_update_enter_method(struct cpuidle_state *state, int cstate)
18991843
{
1900-
if (state->enter == intel_idle_hlt) {
1901-
if (force_irq_on) {
1902-
pr_info("forced intel_idle_irq for state %d\n", cstate);
1903-
state->enter = intel_idle_hlt_irq_on;
1904-
}
1905-
return;
1906-
}
1907-
if (state->enter == intel_idle_hlt_irq_on)
1908-
return; /* no update scenarios */
1909-
19101844
if (state->flags & CPUIDLE_FLAG_INIT_XSTATE) {
19111845
/*
19121846
* Combining with XSTATE with IBRS or IRQ_ENABLE flags
@@ -1940,21 +1874,6 @@ static void state_update_enter_method(struct cpuidle_state *state, int cstate)
19401874
}
19411875
}
19421876

1943-
/*
1944-
* For mwait based states, we want to verify the cpuid data to see if the state
1945-
* is actually supported by this specific CPU.
1946-
* For non-mwait based states, this check should be skipped.
1947-
*/
1948-
static bool should_verify_mwait(struct cpuidle_state *state)
1949-
{
1950-
if (state->enter == intel_idle_hlt)
1951-
return false;
1952-
if (state->enter == intel_idle_hlt_irq_on)
1953-
return false;
1954-
1955-
return true;
1956-
}
1957-
19581877
static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
19591878
{
19601879
int cstate;
@@ -2003,7 +1922,7 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
20031922
}
20041923

20051924
mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags);
2006-
if (should_verify_mwait(&cpuidle_state_table[cstate]) && !intel_idle_verify_cstate(mwait_hint))
1925+
if (!intel_idle_verify_cstate(mwait_hint))
20071926
continue;
20081927

20091928
/* Structure copy. */
@@ -2137,93 +2056,6 @@ static void __init intel_idle_cpuidle_devices_uninit(void)
21372056
cpuidle_unregister_device(per_cpu_ptr(intel_idle_cpuidle_devices, i));
21382057
}
21392058

2140-
/*
2141-
* Match up the latency and break even point of the bare metal (cpu based)
2142-
* states with the deepest VM available state.
2143-
*
2144-
* We only want to do this for the deepest state, the ones that has
2145-
* the TLB_FLUSHED flag set on the .
2146-
*
2147-
* All our short idle states are dominated by vmexit/vmenter latencies,
2148-
* not the underlying hardware latencies so we keep our values for these.
2149-
*/
2150-
static void __init matchup_vm_state_with_baremetal(void)
2151-
{
2152-
int cstate;
2153-
2154-
for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) {
2155-
int matching_cstate;
2156-
2157-
if (intel_idle_max_cstate_reached(cstate))
2158-
break;
2159-
2160-
if (!cpuidle_state_table[cstate].enter)
2161-
break;
2162-
2163-
if (!(cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_TLB_FLUSHED))
2164-
continue;
2165-
2166-
for (matching_cstate = 0; matching_cstate < CPUIDLE_STATE_MAX; ++matching_cstate) {
2167-
if (!icpu->state_table[matching_cstate].enter)
2168-
break;
2169-
if (icpu->state_table[matching_cstate].exit_latency > cpuidle_state_table[cstate].exit_latency) {
2170-
cpuidle_state_table[cstate].exit_latency = icpu->state_table[matching_cstate].exit_latency;
2171-
cpuidle_state_table[cstate].target_residency = icpu->state_table[matching_cstate].target_residency;
2172-
}
2173-
}
2174-
2175-
}
2176-
}
2177-
2178-
2179-
static int __init intel_idle_vminit(const struct x86_cpu_id *id)
2180-
{
2181-
int retval;
2182-
2183-
cpuidle_state_table = vmguest_cstates;
2184-
2185-
icpu = (const struct idle_cpu *)id->driver_data;
2186-
2187-
pr_debug("v" INTEL_IDLE_VERSION " model 0x%X\n",
2188-
boot_cpu_data.x86_model);
2189-
2190-
intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
2191-
if (!intel_idle_cpuidle_devices)
2192-
return -ENOMEM;
2193-
2194-
/*
2195-
* We don't know exactly what the host will do when we go idle, but as a worst estimate
2196-
* we can assume that the exit latency of the deepest host state will be hit for our
2197-
* deep (long duration) guest idle state.
2198-
* The same logic applies to the break even point for the long duration guest idle state.
2199-
* So lets copy these two properties from the table we found for the host CPU type.
2200-
*/
2201-
matchup_vm_state_with_baremetal();
2202-
2203-
intel_idle_cpuidle_driver_init(&intel_idle_driver);
2204-
2205-
retval = cpuidle_register_driver(&intel_idle_driver);
2206-
if (retval) {
2207-
struct cpuidle_driver *drv = cpuidle_get_driver();
2208-
printk(KERN_DEBUG pr_fmt("intel_idle yielding to %s\n"),
2209-
drv ? drv->name : "none");
2210-
goto init_driver_fail;
2211-
}
2212-
2213-
retval = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "idle/intel:online",
2214-
intel_idle_cpu_online, NULL);
2215-
if (retval < 0)
2216-
goto hp_setup_fail;
2217-
2218-
return 0;
2219-
hp_setup_fail:
2220-
intel_idle_cpuidle_devices_uninit();
2221-
cpuidle_unregister_driver(&intel_idle_driver);
2222-
init_driver_fail:
2223-
free_percpu(intel_idle_cpuidle_devices);
2224-
return retval;
2225-
}
2226-
22272059
static int __init intel_idle_init(void)
22282060
{
22292061
const struct x86_cpu_id *id;
@@ -2242,8 +2074,6 @@ static int __init intel_idle_init(void)
22422074
id = x86_match_cpu(intel_idle_ids);
22432075
if (id) {
22442076
if (!boot_cpu_has(X86_FEATURE_MWAIT)) {
2245-
if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
2246-
return intel_idle_vminit(id);
22472077
pr_debug("Please enable MWAIT in BIOS SETUP\n");
22482078
return -ENODEV;
22492079
}

0 commit comments

Comments
 (0)