@@ -1287,13 +1287,6 @@ static struct cpuidle_state vmguest_cstates[] __initdata = {
1287
1287
.exit_latency = 5 ,
1288
1288
.target_residency = 10 ,
1289
1289
.enter = & intel_idle_hlt , },
1290
- {
1291
- .name = "C1L" ,
1292
- .desc = "Long HLT" ,
1293
- .flags = MWAIT2flg (0x00 ) | CPUIDLE_FLAG_TLB_FLUSHED ,
1294
- .exit_latency = 5 ,
1295
- .target_residency = 200 ,
1296
- .enter = & intel_idle_hlt , },
1297
1290
{
1298
1291
.enter = NULL }
1299
1292
};
@@ -2137,45 +2130,6 @@ static void __init intel_idle_cpuidle_devices_uninit(void)
2137
2130
cpuidle_unregister_device (per_cpu_ptr (intel_idle_cpuidle_devices , i ));
2138
2131
}
2139
2132
2140
- /*
2141
- * Match up the latency and break even point of the bare metal (cpu based)
2142
- * states with the deepest VM available state.
2143
- *
2144
- * We only want to do this for the deepest state, the ones that has
2145
- * the TLB_FLUSHED flag set on the .
2146
- *
2147
- * All our short idle states are dominated by vmexit/vmenter latencies,
2148
- * not the underlying hardware latencies so we keep our values for these.
2149
- */
2150
- static void matchup_vm_state_with_baremetal (void )
2151
- {
2152
- int cstate ;
2153
-
2154
- for (cstate = 0 ; cstate < CPUIDLE_STATE_MAX ; ++ cstate ) {
2155
- int matching_cstate ;
2156
-
2157
- if (intel_idle_max_cstate_reached (cstate ))
2158
- break ;
2159
-
2160
- if (!cpuidle_state_table [cstate ].enter )
2161
- break ;
2162
-
2163
- if (!(cpuidle_state_table [cstate ].flags & CPUIDLE_FLAG_TLB_FLUSHED ))
2164
- continue ;
2165
-
2166
- for (matching_cstate = 0 ; matching_cstate < CPUIDLE_STATE_MAX ; ++ matching_cstate ) {
2167
- if (!icpu -> state_table [matching_cstate ].enter )
2168
- break ;
2169
- if (icpu -> state_table [matching_cstate ].exit_latency > cpuidle_state_table [cstate ].exit_latency ) {
2170
- cpuidle_state_table [cstate ].exit_latency = icpu -> state_table [matching_cstate ].exit_latency ;
2171
- cpuidle_state_table [cstate ].target_residency = icpu -> state_table [matching_cstate ].target_residency ;
2172
- }
2173
- }
2174
-
2175
- }
2176
- }
2177
-
2178
-
2179
2133
static int __init intel_idle_vminit (const struct x86_cpu_id * id )
2180
2134
{
2181
2135
int retval ;
@@ -2191,15 +2145,6 @@ static int __init intel_idle_vminit(const struct x86_cpu_id *id)
2191
2145
if (!intel_idle_cpuidle_devices )
2192
2146
return - ENOMEM ;
2193
2147
2194
- /*
2195
- * We don't know exactly what the host will do when we go idle, but as a worst estimate
2196
- * we can assume that the exit latency of the deepest host state will be hit for our
2197
- * deep (long duration) guest idle state.
2198
- * The same logic applies to the break even point for the long duration guest idle state.
2199
- * So lets copy these two properties from the table we found for the host CPU type.
2200
- */
2201
- matchup_vm_state_with_baremetal ();
2202
-
2203
2148
intel_idle_cpuidle_driver_init (& intel_idle_driver );
2204
2149
2205
2150
retval = cpuidle_register_driver (& intel_idle_driver );
0 commit comments