Skip to content

Commit ba4e627

Browse files
committed
Merge tag 'kvm-ppc-next-5.8-1' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc into HEAD
PPC KVM update for 5.8 - Updates and bug fixes for secure guest support - Other minor bug fixes and cleanups.
2 parents 3741679 + 11362b1 commit ba4e627

22 files changed

+276
-237
lines changed

arch/powerpc/include/asm/kvm_book3s.h

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -155,12 +155,11 @@ extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
155155
extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
156156
extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size);
157157
extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
158-
extern int kvmppc_book3s_hv_page_fault(struct kvm_run *run,
159-
struct kvm_vcpu *vcpu, unsigned long addr,
160-
unsigned long status);
158+
extern int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
159+
unsigned long addr, unsigned long status);
161160
extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
162161
unsigned long slb_v, unsigned long valid);
163-
extern int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
162+
extern int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu,
164163
unsigned long gpa, gva_t ea, int is_store);
165164

166165
extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
@@ -174,8 +173,7 @@ extern void kvmppc_mmu_hpte_sysexit(void);
174173
extern int kvmppc_mmu_hv_init(void);
175174
extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc);
176175

177-
extern int kvmppc_book3s_radix_page_fault(struct kvm_run *run,
178-
struct kvm_vcpu *vcpu,
176+
extern int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
179177
unsigned long ea, unsigned long dsisr);
180178
extern unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
181179
gva_t eaddr, void *to, void *from,
@@ -234,7 +232,7 @@ extern void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac);
234232
extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
235233
bool upper, u32 val);
236234
extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
237-
extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
235+
extern int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu);
238236
extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa,
239237
bool writing, bool *writable);
240238
extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
@@ -300,12 +298,12 @@ void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1);
300298
void kvmhv_release_all_nested(struct kvm *kvm);
301299
long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu);
302300
long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu);
303-
int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu,
301+
int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu,
304302
u64 time_limit, unsigned long lpcr);
305303
void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr);
306304
void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
307305
struct hv_guest_state *hr);
308-
long int kvmhv_nested_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu);
306+
long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu);
309307

310308
void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
311309

arch/powerpc/include/asm/kvm_host.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -795,7 +795,6 @@ struct kvm_vcpu_arch {
795795
struct mmio_hpte_cache_entry *pgfault_cache;
796796

797797
struct task_struct *run_task;
798-
struct kvm_run *kvm_run;
799798

800799
spinlock_t vpa_update_lock;
801800
struct kvmppc_vpa vpa;

arch/powerpc/include/asm/kvm_ppc.h

Lines changed: 13 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -58,28 +58,28 @@ enum xlate_readwrite {
5858
XLATE_WRITE /* check for write permissions */
5959
};
6060

61-
extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
62-
extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
61+
extern int kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
62+
extern int __kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
6363
extern void kvmppc_handler_highmem(void);
6464

6565
extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
66-
extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
66+
extern int kvmppc_handle_load(struct kvm_vcpu *vcpu,
6767
unsigned int rt, unsigned int bytes,
6868
int is_default_endian);
69-
extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
69+
extern int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
7070
unsigned int rt, unsigned int bytes,
7171
int is_default_endian);
72-
extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
72+
extern int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
7373
unsigned int rt, unsigned int bytes,
7474
int is_default_endian, int mmio_sign_extend);
75-
extern int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
75+
extern int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
7676
unsigned int rt, unsigned int bytes, int is_default_endian);
77-
extern int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
77+
extern int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
7878
unsigned int rs, unsigned int bytes, int is_default_endian);
79-
extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
79+
extern int kvmppc_handle_store(struct kvm_vcpu *vcpu,
8080
u64 val, unsigned int bytes,
8181
int is_default_endian);
82-
extern int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
82+
extern int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
8383
int rs, unsigned int bytes,
8484
int is_default_endian);
8585

@@ -90,10 +90,9 @@ extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
9090
bool data);
9191
extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
9292
bool data);
93-
extern int kvmppc_emulate_instruction(struct kvm_run *run,
94-
struct kvm_vcpu *vcpu);
93+
extern int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu);
9594
extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
96-
extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
95+
extern int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu);
9796
extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
9897
extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
9998
extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
@@ -267,7 +266,7 @@ struct kvmppc_ops {
267266
void (*vcpu_put)(struct kvm_vcpu *vcpu);
268267
void (*inject_interrupt)(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);
269268
void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
270-
int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
269+
int (*vcpu_run)(struct kvm_vcpu *vcpu);
271270
int (*vcpu_create)(struct kvm_vcpu *vcpu);
272271
void (*vcpu_free)(struct kvm_vcpu *vcpu);
273272
int (*check_requests)(struct kvm_vcpu *vcpu);
@@ -291,7 +290,7 @@ struct kvmppc_ops {
291290
int (*init_vm)(struct kvm *kvm);
292291
void (*destroy_vm)(struct kvm *kvm);
293292
int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
294-
int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
293+
int (*emulate_op)(struct kvm_vcpu *vcpu,
295294
unsigned int inst, int *advance);
296295
int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
297296
int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);

arch/powerpc/kvm/book3s.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -755,9 +755,9 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
755755
}
756756
EXPORT_SYMBOL_GPL(kvmppc_set_msr);
757757

758-
int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
758+
int kvmppc_vcpu_run(struct kvm_vcpu *vcpu)
759759
{
760-
return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu);
760+
return vcpu->kvm->arch.kvm_ops->vcpu_run(vcpu);
761761
}
762762

763763
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,

arch/powerpc/kvm/book3s.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ extern void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte);
1818

1919
extern int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu);
2020
extern void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu);
21-
extern int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
21+
extern int kvmppc_core_emulate_op_pr(struct kvm_vcpu *vcpu,
2222
unsigned int inst, int *advance);
2323
extern int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu,
2424
int sprn, ulong spr_val);

arch/powerpc/kvm/book3s_64_mmu_hv.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -413,7 +413,7 @@ static int instruction_is_store(unsigned int instr)
413413
return (instr & mask) != 0;
414414
}
415415

416-
int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
416+
int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu,
417417
unsigned long gpa, gva_t ea, int is_store)
418418
{
419419
u32 last_inst;
@@ -473,10 +473,10 @@ int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
473473

474474
vcpu->arch.paddr_accessed = gpa;
475475
vcpu->arch.vaddr_accessed = ea;
476-
return kvmppc_emulate_mmio(run, vcpu);
476+
return kvmppc_emulate_mmio(vcpu);
477477
}
478478

479-
int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
479+
int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
480480
unsigned long ea, unsigned long dsisr)
481481
{
482482
struct kvm *kvm = vcpu->kvm;
@@ -499,7 +499,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
499499
pte_t pte, *ptep;
500500

501501
if (kvm_is_radix(kvm))
502-
return kvmppc_book3s_radix_page_fault(run, vcpu, ea, dsisr);
502+
return kvmppc_book3s_radix_page_fault(vcpu, ea, dsisr);
503503

504504
/*
505505
* Real-mode code has already searched the HPT and found the
@@ -519,7 +519,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
519519
gpa_base = r & HPTE_R_RPN & ~(psize - 1);
520520
gfn_base = gpa_base >> PAGE_SHIFT;
521521
gpa = gpa_base | (ea & (psize - 1));
522-
return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
522+
return kvmppc_hv_emulate_mmio(vcpu, gpa, ea,
523523
dsisr & DSISR_ISSTORE);
524524
}
525525
}
@@ -555,7 +555,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
555555

556556
/* No memslot means it's an emulated MMIO region */
557557
if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
558-
return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
558+
return kvmppc_hv_emulate_mmio(vcpu, gpa, ea,
559559
dsisr & DSISR_ISSTORE);
560560

561561
/*

arch/powerpc/kvm/book3s_64_mmu_radix.c

Lines changed: 28 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -353,7 +353,13 @@ static struct kmem_cache *kvm_pmd_cache;
353353

354354
static pte_t *kvmppc_pte_alloc(void)
355355
{
356-
return kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL);
356+
pte_t *pte;
357+
358+
pte = kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL);
359+
/* pmd_populate() will only reference _pa(pte). */
360+
kmemleak_ignore(pte);
361+
362+
return pte;
357363
}
358364

359365
static void kvmppc_pte_free(pte_t *ptep)
@@ -363,7 +369,13 @@ static void kvmppc_pte_free(pte_t *ptep)
363369

364370
static pmd_t *kvmppc_pmd_alloc(void)
365371
{
366-
return kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL);
372+
pmd_t *pmd;
373+
374+
pmd = kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL);
375+
/* pud_populate() will only reference _pa(pmd). */
376+
kmemleak_ignore(pmd);
377+
378+
return pmd;
367379
}
368380

369381
static void kvmppc_pmd_free(pmd_t *pmdp)
@@ -417,9 +429,13 @@ void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
417429
* Callers are responsible for flushing the PWC.
418430
*
419431
* When page tables are being unmapped/freed as part of page fault path
420-
* (full == false), ptes are not expected. There is code to unmap them
421-
* and emit a warning if encountered, but there may already be data
422-
* corruption due to the unexpected mappings.
432+
* (full == false), valid ptes are generally not expected; however, there
433+
* is one situation where they arise, which is when dirty page logging is
434+
* turned off for a memslot while the VM is running. The new memslot
435+
* becomes visible to page faults before the memslot commit function
436+
* gets to flush the memslot, which can lead to a 2MB page mapping being
437+
* installed for a guest physical address where there are already 64kB
438+
* (or 4kB) mappings (of sub-pages of the same 2MB page).
423439
*/
424440
static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full,
425441
unsigned int lpid)
@@ -433,7 +449,6 @@ static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full,
433449
for (it = 0; it < PTRS_PER_PTE; ++it, ++p) {
434450
if (pte_val(*p) == 0)
435451
continue;
436-
WARN_ON_ONCE(1);
437452
kvmppc_unmap_pte(kvm, p,
438453
pte_pfn(*p) << PAGE_SHIFT,
439454
PAGE_SHIFT, NULL, lpid);
@@ -887,7 +902,7 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
887902
return ret;
888903
}
889904

890-
int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
905+
int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
891906
unsigned long ea, unsigned long dsisr)
892907
{
893908
struct kvm *kvm = vcpu->kvm;
@@ -933,7 +948,7 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
933948
kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
934949
return RESUME_GUEST;
935950
}
936-
return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, writing);
951+
return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing);
937952
}
938953

939954
if (memslot->flags & KVM_MEM_READONLY) {
@@ -1115,6 +1130,11 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm,
11151130
kvm->arch.lpid);
11161131
gpa += PAGE_SIZE;
11171132
}
1133+
/*
1134+
* Increase the mmu notifier sequence number to prevent any page
1135+
* fault that read the memslot earlier from writing a PTE.
1136+
*/
1137+
kvm->mmu_notifier_seq++;
11181138
spin_unlock(&kvm->mmu_lock);
11191139
}
11201140

arch/powerpc/kvm/book3s_64_vio.c

Lines changed: 14 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,7 @@ extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
7373
struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
7474
struct iommu_table_group *table_group = NULL;
7575

76+
rcu_read_lock();
7677
list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
7778

7879
table_group = iommu_group_get_iommudata(grp);
@@ -87,7 +88,9 @@ extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
8788
kref_put(&stit->kref, kvm_spapr_tce_liobn_put);
8889
}
8990
}
91+
cond_resched_rcu();
9092
}
93+
rcu_read_unlock();
9194
}
9295

9396
extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
@@ -105,12 +108,14 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
105108
if (!f.file)
106109
return -EBADF;
107110

111+
rcu_read_lock();
108112
list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
109113
if (stt == f.file->private_data) {
110114
found = true;
111115
break;
112116
}
113117
}
118+
rcu_read_unlock();
114119

115120
fdput(f);
116121

@@ -143,21 +148,25 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
143148
if (!tbl)
144149
return -EINVAL;
145150

151+
rcu_read_lock();
146152
list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
147153
if (tbl != stit->tbl)
148154
continue;
149155

150156
if (!kref_get_unless_zero(&stit->kref)) {
151157
/* stit is being destroyed */
152158
iommu_tce_table_put(tbl);
159+
rcu_read_unlock();
153160
return -ENOTTY;
154161
}
155162
/*
156163
* The table is already known to this KVM, we just increased
157164
* its KVM reference counter and can return.
158165
*/
166+
rcu_read_unlock();
159167
return 0;
160168
}
169+
rcu_read_unlock();
161170

162171
stit = kzalloc(sizeof(*stit), GFP_KERNEL);
163172
if (!stit) {
@@ -365,18 +374,19 @@ static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
365374
if (kvmppc_tce_to_ua(stt->kvm, tce, &ua))
366375
return H_TOO_HARD;
367376

377+
rcu_read_lock();
368378
list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
369379
unsigned long hpa = 0;
370380
struct mm_iommu_table_group_mem_t *mem;
371381
long shift = stit->tbl->it_page_shift;
372382

373383
mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift);
374-
if (!mem)
375-
return H_TOO_HARD;
376-
377-
if (mm_iommu_ua_to_hpa(mem, ua, shift, &hpa))
384+
if (!mem || mm_iommu_ua_to_hpa(mem, ua, shift, &hpa)) {
385+
rcu_read_unlock();
378386
return H_TOO_HARD;
387+
}
379388
}
389+
rcu_read_unlock();
380390

381391
return H_SUCCESS;
382392
}

arch/powerpc/kvm/book3s_emulate.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -235,7 +235,7 @@ void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val)
235235

236236
#endif
237237

238-
int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
238+
int kvmppc_core_emulate_op_pr(struct kvm_vcpu *vcpu,
239239
unsigned int inst, int *advance)
240240
{
241241
int emulated = EMULATE_DONE;
@@ -371,13 +371,13 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
371371
if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE)
372372
break;
373373

374-
run->papr_hcall.nr = cmd;
374+
vcpu->run->papr_hcall.nr = cmd;
375375
for (i = 0; i < 9; ++i) {
376376
ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
377-
run->papr_hcall.args[i] = gpr;
377+
vcpu->run->papr_hcall.args[i] = gpr;
378378
}
379379

380-
run->exit_reason = KVM_EXIT_PAPR_HCALL;
380+
vcpu->run->exit_reason = KVM_EXIT_PAPR_HCALL;
381381
vcpu->arch.hcall_needed = 1;
382382
emulated = EMULATE_EXIT_USER;
383383
break;
@@ -629,7 +629,7 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
629629
}
630630

631631
if (emulated == EMULATE_FAIL)
632-
emulated = kvmppc_emulate_paired_single(run, vcpu);
632+
emulated = kvmppc_emulate_paired_single(vcpu);
633633

634634
return emulated;
635635
}

0 commit comments

Comments
 (0)