@@ -53,15 +53,28 @@ static inline void sync_with_host(uint64_t phase)
53
53
: "+ a " (phase));
54
54
}
55
55
56
- void self_smi (void )
56
+ static void self_smi (void )
57
57
{
58
58
x2apic_write_reg (APIC_ICR ,
59
59
APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_SMI );
60
60
}
61
61
62
- void guest_code (void * arg )
62
+ static void l2_guest_code (void )
63
63
{
64
+ sync_with_host (8 );
65
+
66
+ sync_with_host (10 );
67
+
68
+ vmcall ();
69
+ }
70
+
71
+ static void guest_code (void * arg )
72
+ {
73
+ #define L2_GUEST_STACK_SIZE 64
74
+ unsigned long l2_guest_stack [L2_GUEST_STACK_SIZE ];
64
75
uint64_t apicbase = rdmsr (MSR_IA32_APICBASE );
76
+ struct svm_test_data * svm = arg ;
77
+ struct vmx_pages * vmx_pages = arg ;
65
78
66
79
sync_with_host (1 );
67
80
@@ -74,21 +87,50 @@ void guest_code(void *arg)
74
87
sync_with_host (4 );
75
88
76
89
if (arg ) {
77
- if (cpu_has_svm ())
78
- generic_svm_setup (arg , NULL , NULL );
79
- else
80
- GUEST_ASSERT (prepare_for_vmx_operation (arg ));
90
+ if (cpu_has_svm ()) {
91
+ generic_svm_setup (svm , l2_guest_code ,
92
+ & l2_guest_stack [L2_GUEST_STACK_SIZE ]);
93
+ } else {
94
+ GUEST_ASSERT (prepare_for_vmx_operation (vmx_pages ));
95
+ GUEST_ASSERT (load_vmcs (vmx_pages ));
96
+ prepare_vmcs (vmx_pages , l2_guest_code ,
97
+ & l2_guest_stack [L2_GUEST_STACK_SIZE ]);
98
+ }
81
99
82
100
sync_with_host (5 );
83
101
84
102
self_smi ();
85
103
86
104
sync_with_host (7 );
105
+
106
+ if (cpu_has_svm ()) {
107
+ run_guest (svm -> vmcb , svm -> vmcb_gpa );
108
+ svm -> vmcb -> save .rip += 3 ;
109
+ run_guest (svm -> vmcb , svm -> vmcb_gpa );
110
+ } else {
111
+ vmlaunch ();
112
+ vmresume ();
113
+ }
114
+
115
+ /* Stages 8-11 are eaten by SMM (SMRAM_STAGE reported instead) */
116
+ sync_with_host (12 );
87
117
}
88
118
89
119
sync_with_host (DONE );
90
120
}
91
121
122
+ void inject_smi (struct kvm_vm * vm )
123
+ {
124
+ struct kvm_vcpu_events events ;
125
+
126
+ vcpu_events_get (vm , VCPU_ID , & events );
127
+
128
+ events .smi .pending = 1 ;
129
+ events .flags |= KVM_VCPUEVENT_VALID_SMM ;
130
+
131
+ vcpu_events_set (vm , VCPU_ID , & events );
132
+ }
133
+
92
134
int main (int argc , char * argv [])
93
135
{
94
136
vm_vaddr_t nested_gva = 0 ;
@@ -147,6 +189,22 @@ int main(int argc, char *argv[])
147
189
"Unexpected stage: #%x, got %x" ,
148
190
stage , stage_reported );
149
191
192
+ /*
193
+ * Enter SMM during L2 execution and check that we correctly
194
+ * return from it. Do not perform save/restore while in SMM yet.
195
+ */
196
+ if (stage == 8 ) {
197
+ inject_smi (vm );
198
+ continue ;
199
+ }
200
+
201
+ /*
202
+ * Perform save/restore while the guest is in SMM triggered
203
+ * during L2 execution.
204
+ */
205
+ if (stage == 10 )
206
+ inject_smi (vm );
207
+
150
208
state = vcpu_save_state (vm , VCPU_ID );
151
209
kvm_vm_release (vm );
152
210
kvm_vm_restart (vm , O_RDWR );
0 commit comments