@@ -2705,11 +2705,14 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
2705
2705
* Hard-disable interrupts, and check resched flag and signals.
2706
2706
* If we need to reschedule or deliver a signal, clean up
2707
2707
* and return without going into the guest(s).
2708
+ * If the hpte_setup_done flag has been cleared, don't go into the
2709
+ * guest because that means a HPT resize operation is in progress.
2708
2710
*/
2709
2711
local_irq_disable ();
2710
2712
hard_irq_disable ();
2711
2713
if (lazy_irq_pending () || need_resched () ||
2712
- recheck_signals (& core_info )) {
2714
+ recheck_signals (& core_info ) ||
2715
+ (!kvm_is_radix (vc -> kvm ) && !vc -> kvm -> arch .hpte_setup_done )) {
2713
2716
local_irq_enable ();
2714
2717
vc -> vcore_state = VCORE_INACTIVE ;
2715
2718
/* Unlock all except the primary vcore */
@@ -3078,7 +3081,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
3078
3081
3079
3082
static int kvmppc_run_vcpu (struct kvm_run * kvm_run , struct kvm_vcpu * vcpu )
3080
3083
{
3081
- int n_ceded , i ;
3084
+ int n_ceded , i , r ;
3082
3085
struct kvmppc_vcore * vc ;
3083
3086
struct kvm_vcpu * v ;
3084
3087
@@ -3132,6 +3135,20 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
3132
3135
3133
3136
while (vcpu -> arch .state == KVMPPC_VCPU_RUNNABLE &&
3134
3137
!signal_pending (current )) {
3138
+ /* See if the HPT and VRMA are ready to go */
3139
+ if (!kvm_is_radix (vcpu -> kvm ) &&
3140
+ !vcpu -> kvm -> arch .hpte_setup_done ) {
3141
+ spin_unlock (& vc -> lock );
3142
+ r = kvmppc_hv_setup_htab_rma (vcpu );
3143
+ spin_lock (& vc -> lock );
3144
+ if (r ) {
3145
+ kvm_run -> exit_reason = KVM_EXIT_FAIL_ENTRY ;
3146
+ kvm_run -> fail_entry .hardware_entry_failure_reason = 0 ;
3147
+ vcpu -> arch .ret = r ;
3148
+ break ;
3149
+ }
3150
+ }
3151
+
3135
3152
if (vc -> vcore_state == VCORE_PREEMPT && vc -> runner == NULL )
3136
3153
kvmppc_vcore_end_preempt (vc );
3137
3154
@@ -3249,13 +3266,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
3249
3266
/* Order vcpus_running vs. hpte_setup_done, see kvmppc_alloc_reset_hpt */
3250
3267
smp_mb ();
3251
3268
3252
- /* On the first time here, set up HTAB and VRMA */
3253
- if (!kvm_is_radix (vcpu -> kvm ) && !vcpu -> kvm -> arch .hpte_setup_done ) {
3254
- r = kvmppc_hv_setup_htab_rma (vcpu );
3255
- if (r )
3256
- goto out ;
3257
- }
3258
-
3259
3269
flush_all_to_thread (current );
3260
3270
3261
3271
/* Save userspace EBB and other register values */
@@ -3303,7 +3313,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
3303
3313
}
3304
3314
mtspr (SPRN_VRSAVE , user_vrsave );
3305
3315
3306
- out :
3307
3316
vcpu -> arch .state = KVMPPC_VCPU_NOTREADY ;
3308
3317
atomic_dec (& vcpu -> kvm -> arch .vcpus_running );
3309
3318
return r ;
0 commit comments