@@ -1132,13 +1132,15 @@ EXPORT_SYMBOL_GPL(kvm_rdpmc);
1132
1132
* List of msr numbers which we expose to userspace through KVM_GET_MSRS
1133
1133
* and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
1134
1134
*
1135
- * This list is modified at module load time to reflect the
1135
+ * The three MSR lists(msrs_to_save, emulated_msrs, msr_based_features)
1136
+ * extract the supported MSRs from the related const lists.
1137
+ * msrs_to_save is selected from the msrs_to_save_all to reflect the
1136
1138
* capabilities of the host cpu. This capabilities test skips MSRs that are
1137
- * kvm-specific. Those are put in emulated_msrs ; filtering of emulated_msrs
1139
+ * kvm-specific. Those are put in emulated_msrs_all ; filtering of emulated_msrs
1138
1140
* may depend on host virtualization features rather than host cpu features.
1139
1141
*/
1140
1142
1141
- static u32 msrs_to_save [] = {
1143
+ static const u32 msrs_to_save_all [] = {
1142
1144
MSR_IA32_SYSENTER_CS , MSR_IA32_SYSENTER_ESP , MSR_IA32_SYSENTER_EIP ,
1143
1145
MSR_STAR ,
1144
1146
#ifdef CONFIG_X86_64
@@ -1179,9 +1181,10 @@ static u32 msrs_to_save[] = {
1179
1181
MSR_ARCH_PERFMON_EVENTSEL0 + 16 , MSR_ARCH_PERFMON_EVENTSEL0 + 17 ,
1180
1182
};
1181
1183
1184
+ static u32 msrs_to_save [ARRAY_SIZE (msrs_to_save_all )];
1182
1185
static unsigned num_msrs_to_save ;
1183
1186
1184
- static u32 emulated_msrs [] = {
1187
+ static const u32 emulated_msrs_all [] = {
1185
1188
MSR_KVM_SYSTEM_TIME , MSR_KVM_WALL_CLOCK ,
1186
1189
MSR_KVM_SYSTEM_TIME_NEW , MSR_KVM_WALL_CLOCK_NEW ,
1187
1190
HV_X64_MSR_GUEST_OS_ID , HV_X64_MSR_HYPERCALL ,
@@ -1220,7 +1223,7 @@ static u32 emulated_msrs[] = {
1220
1223
* by arch/x86/kvm/vmx/nested.c based on CPUID or other MSRs.
1221
1224
* We always support the "true" VMX control MSRs, even if the host
1222
1225
* processor does not, so I am putting these registers here rather
1223
- * than in msrs_to_save .
1226
+ * than in msrs_to_save_all .
1224
1227
*/
1225
1228
MSR_IA32_VMX_BASIC ,
1226
1229
MSR_IA32_VMX_TRUE_PINBASED_CTLS ,
@@ -1239,13 +1242,14 @@ static u32 emulated_msrs[] = {
1239
1242
MSR_KVM_POLL_CONTROL ,
1240
1243
};
1241
1244
1245
+ static u32 emulated_msrs [ARRAY_SIZE (emulated_msrs_all )];
1242
1246
static unsigned num_emulated_msrs ;
1243
1247
1244
1248
/*
1245
1249
* List of msr numbers which are used to expose MSR-based features that
1246
1250
* can be used by a hypervisor to validate requested CPU features.
1247
1251
*/
1248
- static u32 msr_based_features [] = {
1252
+ static const u32 msr_based_features_all [] = {
1249
1253
MSR_IA32_VMX_BASIC ,
1250
1254
MSR_IA32_VMX_TRUE_PINBASED_CTLS ,
1251
1255
MSR_IA32_VMX_PINBASED_CTLS ,
@@ -1270,6 +1274,7 @@ static u32 msr_based_features[] = {
1270
1274
MSR_IA32_ARCH_CAPABILITIES ,
1271
1275
};
1272
1276
1277
+ static u32 msr_based_features [ARRAY_SIZE (msr_based_features_all )];
1273
1278
static unsigned int num_msr_based_features ;
1274
1279
1275
1280
static u64 kvm_get_arch_capabilities (void )
@@ -5090,22 +5095,22 @@ static void kvm_init_msr_list(void)
5090
5095
{
5091
5096
struct x86_pmu_capability x86_pmu ;
5092
5097
u32 dummy [2 ];
5093
- unsigned i , j ;
5098
+ unsigned i ;
5094
5099
5095
5100
BUILD_BUG_ON_MSG (INTEL_PMC_MAX_FIXED != 4 ,
5096
- "Please update the fixed PMCs in msrs_to_save []" );
5101
+ "Please update the fixed PMCs in msrs_to_saved_all []" );
5097
5102
5098
5103
perf_get_x86_pmu_capability (& x86_pmu );
5099
5104
5100
- for (i = j = 0 ; i < ARRAY_SIZE (msrs_to_save ); i ++ ) {
5101
- if (rdmsr_safe (msrs_to_save [i ], & dummy [0 ], & dummy [1 ]) < 0 )
5105
+ for (i = 0 ; i < ARRAY_SIZE (msrs_to_save_all ); i ++ ) {
5106
+ if (rdmsr_safe (msrs_to_save_all [i ], & dummy [0 ], & dummy [1 ]) < 0 )
5102
5107
continue ;
5103
5108
5104
5109
/*
5105
5110
* Even MSRs that are valid in the host may not be exposed
5106
5111
* to the guests in some cases.
5107
5112
*/
5108
- switch (msrs_to_save [i ]) {
5113
+ switch (msrs_to_save_all [i ]) {
5109
5114
case MSR_IA32_BNDCFGS :
5110
5115
if (!kvm_mpx_supported ())
5111
5116
continue ;
@@ -5133,52 +5138,43 @@ static void kvm_init_msr_list(void)
5133
5138
break ;
5134
5139
case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B : {
5135
5140
if (!kvm_x86_ops -> pt_supported () ||
5136
- msrs_to_save [i ] - MSR_IA32_RTIT_ADDR0_A >=
5141
+ msrs_to_save_all [i ] - MSR_IA32_RTIT_ADDR0_A >=
5137
5142
intel_pt_validate_hw_cap (PT_CAP_num_address_ranges ) * 2 )
5138
5143
continue ;
5139
5144
break ;
5140
5145
case MSR_ARCH_PERFMON_PERFCTR0 ... MSR_ARCH_PERFMON_PERFCTR0 + 17 :
5141
- if (msrs_to_save [i ] - MSR_ARCH_PERFMON_PERFCTR0 >=
5146
+ if (msrs_to_save_all [i ] - MSR_ARCH_PERFMON_PERFCTR0 >=
5142
5147
min (INTEL_PMC_MAX_GENERIC , x86_pmu .num_counters_gp ))
5143
5148
continue ;
5144
5149
break ;
5145
5150
case MSR_ARCH_PERFMON_EVENTSEL0 ... MSR_ARCH_PERFMON_EVENTSEL0 + 17 :
5146
- if (msrs_to_save [i ] - MSR_ARCH_PERFMON_EVENTSEL0 >=
5151
+ if (msrs_to_save_all [i ] - MSR_ARCH_PERFMON_EVENTSEL0 >=
5147
5152
min (INTEL_PMC_MAX_GENERIC , x86_pmu .num_counters_gp ))
5148
5153
continue ;
5149
5154
}
5150
5155
default :
5151
5156
break ;
5152
5157
}
5153
5158
5154
- if (j < i )
5155
- msrs_to_save [j ] = msrs_to_save [i ];
5156
- j ++ ;
5159
+ msrs_to_save [num_msrs_to_save ++ ] = msrs_to_save_all [i ];
5157
5160
}
5158
- num_msrs_to_save = j ;
5159
5161
5160
- for (i = j = 0 ; i < ARRAY_SIZE (emulated_msrs ); i ++ ) {
5161
- if (!kvm_x86_ops -> has_emulated_msr (emulated_msrs [i ]))
5162
+ for (i = 0 ; i < ARRAY_SIZE (emulated_msrs_all ); i ++ ) {
5163
+ if (!kvm_x86_ops -> has_emulated_msr (emulated_msrs_all [i ]))
5162
5164
continue ;
5163
5165
5164
- if (j < i )
5165
- emulated_msrs [j ] = emulated_msrs [i ];
5166
- j ++ ;
5166
+ emulated_msrs [num_emulated_msrs ++ ] = emulated_msrs_all [i ];
5167
5167
}
5168
- num_emulated_msrs = j ;
5169
5168
5170
- for (i = j = 0 ; i < ARRAY_SIZE (msr_based_features ); i ++ ) {
5169
+ for (i = 0 ; i < ARRAY_SIZE (msr_based_features_all ); i ++ ) {
5171
5170
struct kvm_msr_entry msr ;
5172
5171
5173
- msr .index = msr_based_features [i ];
5172
+ msr .index = msr_based_features_all [i ];
5174
5173
if (kvm_get_msr_feature (& msr ))
5175
5174
continue ;
5176
5175
5177
- if (j < i )
5178
- msr_based_features [j ] = msr_based_features [i ];
5179
- j ++ ;
5176
+ msr_based_features [num_msr_based_features ++ ] = msr_based_features_all [i ];
5180
5177
}
5181
- num_msr_based_features = j ;
5182
5178
}
5183
5179
5184
5180
static int vcpu_mmio_write (struct kvm_vcpu * vcpu , gpa_t addr , int len ,
0 commit comments