@@ -109,4 +109,71 @@ static inline int __vmx_handle_ept_violation(struct kvm_vcpu *vcpu, gpa_t gpa,
109
109
return kvm_mmu_page_fault (vcpu , gpa , error_code , NULL , 0 );
110
110
}
111
111
112
+ static inline void kvm_vcpu_trigger_posted_interrupt (struct kvm_vcpu * vcpu ,
113
+ int pi_vec )
114
+ {
115
+ #ifdef CONFIG_SMP
116
+ if (vcpu -> mode == IN_GUEST_MODE ) {
117
+ /*
118
+ * The vector of the virtual has already been set in the PIR.
119
+ * Send a notification event to deliver the virtual interrupt
120
+ * unless the vCPU is the currently running vCPU, i.e. the
121
+ * event is being sent from a fastpath VM-Exit handler, in
122
+ * which case the PIR will be synced to the vIRR before
123
+ * re-entering the guest.
124
+ *
125
+ * When the target is not the running vCPU, the following
126
+ * possibilities emerge:
127
+ *
128
+ * Case 1: vCPU stays in non-root mode. Sending a notification
129
+ * event posts the interrupt to the vCPU.
130
+ *
131
+ * Case 2: vCPU exits to root mode and is still runnable. The
132
+ * PIR will be synced to the vIRR before re-entering the guest.
133
+ * Sending a notification event is ok as the host IRQ handler
134
+ * will ignore the spurious event.
135
+ *
136
+ * Case 3: vCPU exits to root mode and is blocked. vcpu_block()
137
+ * has already synced PIR to vIRR and never blocks the vCPU if
138
+ * the vIRR is not empty. Therefore, a blocked vCPU here does
139
+ * not wait for any requested interrupts in PIR, and sending a
140
+ * notification event also results in a benign, spurious event.
141
+ */
142
+
143
+ if (vcpu != kvm_get_running_vcpu ())
144
+ __apic_send_IPI_mask (get_cpu_mask (vcpu -> cpu ), pi_vec );
145
+ return ;
146
+ }
147
+ #endif
148
+ /*
149
+ * The vCPU isn't in the guest; wake the vCPU in case it is blocking,
150
+ * otherwise do nothing as KVM will grab the highest priority pending
151
+ * IRQ via ->sync_pir_to_irr() in vcpu_enter_guest().
152
+ */
153
+ kvm_vcpu_wake_up (vcpu );
154
+ }
155
+
156
+ /*
157
+ * Post an interrupt to a vCPU's PIR and trigger the vCPU to process the
158
+ * interrupt if necessary.
159
+ */
160
+ static inline void __vmx_deliver_posted_interrupt (struct kvm_vcpu * vcpu ,
161
+ struct pi_desc * pi_desc , int vector )
162
+ {
163
+ if (pi_test_and_set_pir (vector , pi_desc ))
164
+ return ;
165
+
166
+ /* If a previous notification has sent the IPI, nothing to do. */
167
+ if (pi_test_and_set_on (pi_desc ))
168
+ return ;
169
+
170
+ /*
171
+ * The implied barrier in pi_test_and_set_on() pairs with the smp_mb_*()
172
+ * after setting vcpu->mode in vcpu_enter_guest(), thus the vCPU is
173
+ * guaranteed to see PID.ON=1 and sync the PIR to IRR if triggering a
174
+ * posted interrupt "fails" because vcpu->mode != IN_GUEST_MODE.
175
+ */
176
+ kvm_vcpu_trigger_posted_interrupt (vcpu , POSTED_INTR_VECTOR );
177
+ }
178
+
112
179
#endif /* __KVM_X86_VMX_COMMON_H */
0 commit comments