@@ -17,9 +17,10 @@ struct kvm_msr {
1717 const u64 write_val ;
1818 const u64 rsvd_val ;
1919 const u32 index ;
20+ const bool is_kvm_defined ;
2021};
2122
22- #define ____MSR_TEST (msr , str , val , rsvd , reset , feat , f2 ) \
23+ #define ____MSR_TEST (msr , str , val , rsvd , reset , feat , f2 , is_kvm ) \
2324{ \
2425 .index = msr, \
2526 .name = str, \
@@ -28,10 +29,11 @@ struct kvm_msr {
2829 .reset_val = reset, \
2930 .feature = X86_FEATURE_ ##feat, \
3031 .feature2 = X86_FEATURE_ ##f2, \
32+ .is_kvm_defined = is_kvm, \
3133}
3234
3335#define __MSR_TEST (msr , str , val , rsvd , reset , feat ) \
34- ____MSR_TEST(msr, str, val, rsvd, reset, feat, feat)
36+ ____MSR_TEST(msr, str, val, rsvd, reset, feat, feat, false )
3537
3638#define MSR_TEST_NON_ZERO (msr , val , rsvd , reset , feat ) \
3739 __MSR_TEST(msr, #msr, val, rsvd, reset, feat)
@@ -40,7 +42,7 @@ struct kvm_msr {
4042 __MSR_TEST(msr, #msr, val, rsvd, 0, feat)
4143
4244#define MSR_TEST2 (msr , val , rsvd , feat , f2 ) \
43- ____MSR_TEST(msr, #msr, val, rsvd, 0, feat, f2)
45+ ____MSR_TEST(msr, #msr, val, rsvd, 0, feat, f2, false )
4446
4547/*
4648 * Note, use a page aligned value for the canonical value so that the value
@@ -58,6 +60,9 @@ static const u64 u64_val = 0xaaaa5555aaaa5555ull;
5860#define MSR_TEST_CANONICAL (msr , feat ) \
5961 __MSR_TEST(msr, #msr, canonical_val, NONCANONICAL, 0, feat)
6062
63+ #define MSR_TEST_KVM (msr , val , rsvd , feat ) \
64+ ____MSR_TEST(KVM_REG_ ##msr, #msr, val, rsvd, 0, feat, feat, true)
65+
6166/*
6267 * The main struct must be scoped to a function due to the use of structures to
6368 * define features. For the global structure, allocate enough space for the
@@ -203,6 +208,83 @@ static void guest_main(void)
203208static bool has_one_reg ;
204209static bool use_one_reg ;
205210
211+ #define KVM_X86_MAX_NR_REGS 1
212+
213+ static bool vcpu_has_reg (struct kvm_vcpu * vcpu , u64 reg )
214+ {
215+ struct {
216+ struct kvm_reg_list list ;
217+ u64 regs [KVM_X86_MAX_NR_REGS ];
218+ } regs = {};
219+ int r , i ;
220+
221+ /*
222+ * If KVM_GET_REG_LIST succeeds with n=0, i.e. there are no supported
223+ * regs, then the vCPU obviously doesn't support the reg.
224+ */
225+ r = __vcpu_ioctl (vcpu , KVM_GET_REG_LIST , & regs .list );
226+ if (!r )
227+ return false;
228+
229+ TEST_ASSERT_EQ (errno , E2BIG );
230+
231+ /*
232+ * KVM x86 is expected to support enumerating a relative small number
233+ * of regs. The majority of registers supported by KVM_{G,S}ET_ONE_REG
234+ * are enumerated via other ioctls, e.g. KVM_GET_MSR_INDEX_LIST. For
235+ * simplicity, hardcode the maximum number of regs and manually update
236+ * the test as necessary.
237+ */
238+ TEST_ASSERT (regs .list .n <= KVM_X86_MAX_NR_REGS ,
239+ "KVM reports %llu regs, test expects at most %u regs, stale test?" ,
240+ regs .list .n , KVM_X86_MAX_NR_REGS );
241+
242+ vcpu_ioctl (vcpu , KVM_GET_REG_LIST , & regs .list );
243+ for (i = 0 ; i < regs .list .n ; i ++ ) {
244+ if (regs .regs [i ] == reg )
245+ return true;
246+ }
247+
248+ return false;
249+ }
250+
251+ static void host_test_kvm_reg (struct kvm_vcpu * vcpu )
252+ {
253+ bool has_reg = vcpu_cpuid_has (vcpu , msrs [idx ].feature );
254+ u64 reset_val = msrs [idx ].reset_val ;
255+ u64 write_val = msrs [idx ].write_val ;
256+ u64 rsvd_val = msrs [idx ].rsvd_val ;
257+ u32 reg = msrs [idx ].index ;
258+ u64 val ;
259+ int r ;
260+
261+ if (!use_one_reg )
262+ return ;
263+
264+ TEST_ASSERT_EQ (vcpu_has_reg (vcpu , KVM_X86_REG_KVM (reg )), has_reg );
265+
266+ if (!has_reg ) {
267+ r = __vcpu_get_reg (vcpu , KVM_X86_REG_KVM (reg ), & val );
268+ TEST_ASSERT (r && errno == EINVAL ,
269+ "Expected failure on get_reg(0x%x)" , reg );
270+ rsvd_val = 0 ;
271+ goto out ;
272+ }
273+
274+ val = vcpu_get_reg (vcpu , KVM_X86_REG_KVM (reg ));
275+ TEST_ASSERT (val == reset_val , "Wanted 0x%lx from get_reg(0x%x), got 0x%lx" ,
276+ reset_val , reg , val );
277+
278+ vcpu_set_reg (vcpu , KVM_X86_REG_KVM (reg ), write_val );
279+ val = vcpu_get_reg (vcpu , KVM_X86_REG_KVM (reg ));
280+ TEST_ASSERT (val == write_val , "Wanted 0x%lx from get_reg(0x%x), got 0x%lx" ,
281+ write_val , reg , val );
282+
283+ out :
284+ r = __vcpu_set_reg (vcpu , KVM_X86_REG_KVM (reg ), rsvd_val );
285+ TEST_ASSERT (r , "Expected failure on set_reg(0x%x, 0x%lx)" , reg , rsvd_val );
286+ }
287+
206288static void host_test_msr (struct kvm_vcpu * vcpu , u64 guest_val )
207289{
208290 u64 reset_val = msrs [idx ].reset_val ;
@@ -314,6 +396,8 @@ static void test_msrs(void)
314396 MSR_TEST (MSR_IA32_PL2_SSP , canonical_val , canonical_val | 1 , SHSTK ),
315397 MSR_TEST_CANONICAL (MSR_IA32_PL3_SSP , SHSTK ),
316398 MSR_TEST (MSR_IA32_PL3_SSP , canonical_val , canonical_val | 1 , SHSTK ),
399+
400+ MSR_TEST_KVM (GUEST_SSP , canonical_val , NONCANONICAL , SHSTK ),
317401 };
318402
319403 const struct kvm_x86_cpu_feature feat_none = X86_FEATURE_NONE ;
@@ -329,6 +413,7 @@ static void test_msrs(void)
329413 const int NR_VCPUS = 3 ;
330414 struct kvm_vcpu * vcpus [NR_VCPUS ];
331415 struct kvm_vm * vm ;
416+ int i ;
332417
333418 kvm_static_assert (sizeof (__msrs ) <= sizeof (msrs ));
334419 kvm_static_assert (ARRAY_SIZE (__msrs ) <= ARRAY_SIZE (msrs ));
@@ -359,6 +444,12 @@ static void test_msrs(void)
359444 }
360445
361446 for (idx = 0 ; idx < ARRAY_SIZE (__msrs ); idx ++ ) {
447+ if (msrs [idx ].is_kvm_defined ) {
448+ for (i = 0 ; i < NR_VCPUS ; i ++ )
449+ host_test_kvm_reg (vcpus [i ]);
450+ continue ;
451+ }
452+
362453 sync_global_to_guest (vm , idx );
363454
364455 vcpus_run (vcpus , NR_VCPUS );
0 commit comments