Skip to content

Commit 7ec21d9

Browse files
uudiinpaulusmack
authored andcommitted
KVM: PPC: Clean up redundant kvm_run parameters in assembly
In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu' structure. For historical reasons, many kvm-related function parameters retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time. This patch does a unified cleanup of these remaining redundant parameters. [[email protected] - Fixed places that were missed in book3s_interrupts.S] Signed-off-by: Tianjia Zhang <[email protected]> Signed-off-by: Paul Mackerras <[email protected]>
1 parent 1508c22 commit 7ec21d9

File tree

6 files changed

+45
-50
lines changed

6 files changed

+45
-50
lines changed

arch/powerpc/include/asm/kvm_ppc.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ enum xlate_readwrite {
5959
};
6060

6161
extern int kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
62-
extern int __kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
62+
extern int __kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
6363
extern void kvmppc_handler_highmem(void);
6464

6565
extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);

arch/powerpc/kvm/book3s_interrupts.S

Lines changed: 27 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -55,8 +55,7 @@
5555
****************************************************************************/
5656

5757
/* Registers:
58-
* r3: kvm_run pointer
59-
* r4: vcpu pointer
58+
* r3: vcpu pointer
6059
*/
6160
_GLOBAL(__kvmppc_vcpu_run)
6261

@@ -68,8 +67,8 @@ kvm_start_entry:
6867
/* Save host state to the stack */
6968
PPC_STLU r1, -SWITCH_FRAME_SIZE(r1)
7069

71-
/* Save r3 (kvm_run) and r4 (vcpu) */
72-
SAVE_2GPRS(3, r1)
70+
/* Save r3 (vcpu) */
71+
SAVE_GPR(3, r1)
7372

7473
/* Save non-volatile registers (r14 - r31) */
7574
SAVE_NVGPRS(r1)
@@ -82,47 +81,46 @@ kvm_start_entry:
8281
PPC_STL r0, _LINK(r1)
8382

8483
/* Load non-volatile guest state from the vcpu */
85-
VCPU_LOAD_NVGPRS(r4)
84+
VCPU_LOAD_NVGPRS(r3)
8685

8786
kvm_start_lightweight:
8887
/* Copy registers into shadow vcpu so we can access them in real mode */
89-
mr r3, r4
9088
bl FUNC(kvmppc_copy_to_svcpu)
9189
nop
92-
REST_GPR(4, r1)
90+
REST_GPR(3, r1)
9391

9492
#ifdef CONFIG_PPC_BOOK3S_64
9593
/* Get the dcbz32 flag */
96-
PPC_LL r3, VCPU_HFLAGS(r4)
97-
rldicl r3, r3, 0, 63 /* r3 &= 1 */
98-
stb r3, HSTATE_RESTORE_HID5(r13)
94+
PPC_LL r0, VCPU_HFLAGS(r3)
95+
rldicl r0, r0, 0, 63 /* r3 &= 1 */
96+
stb r0, HSTATE_RESTORE_HID5(r13)
9997

10098
/* Load up guest SPRG3 value, since it's user readable */
101-
lwz r3, VCPU_SHAREDBE(r4)
102-
cmpwi r3, 0
103-
ld r5, VCPU_SHARED(r4)
99+
lbz r4, VCPU_SHAREDBE(r3)
100+
cmpwi r4, 0
101+
ld r5, VCPU_SHARED(r3)
104102
beq sprg3_little_endian
105103
sprg3_big_endian:
106104
#ifdef __BIG_ENDIAN__
107-
ld r3, VCPU_SHARED_SPRG3(r5)
105+
ld r4, VCPU_SHARED_SPRG3(r5)
108106
#else
109107
addi r5, r5, VCPU_SHARED_SPRG3
110-
ldbrx r3, 0, r5
108+
ldbrx r4, 0, r5
111109
#endif
112110
b after_sprg3_load
113111
sprg3_little_endian:
114112
#ifdef __LITTLE_ENDIAN__
115-
ld r3, VCPU_SHARED_SPRG3(r5)
113+
ld r4, VCPU_SHARED_SPRG3(r5)
116114
#else
117115
addi r5, r5, VCPU_SHARED_SPRG3
118-
ldbrx r3, 0, r5
116+
ldbrx r4, 0, r5
119117
#endif
120118

121119
after_sprg3_load:
122-
mtspr SPRN_SPRG3, r3
120+
mtspr SPRN_SPRG3, r4
123121
#endif /* CONFIG_PPC_BOOK3S_64 */
124122

125-
PPC_LL r4, VCPU_SHADOW_MSR(r4) /* get shadow_msr */
123+
PPC_LL r4, VCPU_SHADOW_MSR(r3) /* get shadow_msr */
126124

127125
/* Jump to segment patching handler and into our guest */
128126
bl FUNC(kvmppc_entry_trampoline)
@@ -146,7 +144,7 @@ after_sprg3_load:
146144
*
147145
*/
148146

149-
PPC_LL r3, GPR4(r1) /* vcpu pointer */
147+
PPC_LL r3, GPR3(r1) /* vcpu pointer */
150148

151149
/*
152150
* kvmppc_copy_from_svcpu can clobber volatile registers, save
@@ -169,7 +167,7 @@ after_sprg3_load:
169167
#endif /* CONFIG_PPC_BOOK3S_64 */
170168

171169
/* R7 = vcpu */
172-
PPC_LL r7, GPR4(r1)
170+
PPC_LL r7, GPR3(r1)
173171

174172
PPC_STL r14, VCPU_GPR(R14)(r7)
175173
PPC_STL r15, VCPU_GPR(R15)(r7)
@@ -190,11 +188,11 @@ after_sprg3_load:
190188
PPC_STL r30, VCPU_GPR(R30)(r7)
191189
PPC_STL r31, VCPU_GPR(R31)(r7)
192190

193-
/* Pass the exit number as 3rd argument to kvmppc_handle_exit */
194-
lwz r5, VCPU_TRAP(r7)
191+
/* Pass the exit number as 2nd argument to kvmppc_handle_exit */
192+
lwz r4, VCPU_TRAP(r7)
195193

196-
/* Restore r3 (kvm_run) and r4 (vcpu) */
197-
REST_2GPRS(3, r1)
194+
/* Restore r3 (vcpu) */
195+
REST_GPR(3, r1)
198196
bl FUNC(kvmppc_handle_exit_pr)
199197

200198
/* If RESUME_GUEST, get back in the loop */
@@ -223,19 +221,19 @@ kvm_loop_heavyweight:
223221
PPC_LL r4, _LINK(r1)
224222
PPC_STL r4, (PPC_LR_STKOFF + SWITCH_FRAME_SIZE)(r1)
225223

226-
/* Load vcpu and cpu_run */
227-
REST_2GPRS(3, r1)
224+
/* Load vcpu */
225+
REST_GPR(3, r1)
228226

229227
/* Load non-volatile guest state from the vcpu */
230-
VCPU_LOAD_NVGPRS(r4)
228+
VCPU_LOAD_NVGPRS(r3)
231229

232230
/* Jump back into the beginning of this function */
233231
b kvm_start_lightweight
234232

235233
kvm_loop_lightweight:
236234

237235
/* We'll need the vcpu pointer */
238-
REST_GPR(4, r1)
236+
REST_GPR(3, r1)
239237

240238
/* Jump back into the beginning of this function */
241239
b kvm_start_lightweight

arch/powerpc/kvm/book3s_pr.c

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1151,9 +1151,9 @@ static int kvmppc_exit_pr_progint(struct kvm_vcpu *vcpu, unsigned int exit_nr)
11511151
return r;
11521152
}
11531153

1154-
int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
1155-
unsigned int exit_nr)
1154+
int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr)
11561155
{
1156+
struct kvm_run *run = vcpu->run;
11571157
int r = RESUME_HOST;
11581158
int s;
11591159

@@ -1826,15 +1826,14 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
18261826

18271827
static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu)
18281828
{
1829-
struct kvm_run *run = vcpu->run;
18301829
int ret;
18311830
#ifdef CONFIG_ALTIVEC
18321831
unsigned long uninitialized_var(vrsave);
18331832
#endif
18341833

18351834
/* Check if we can run the vcpu at all */
18361835
if (!vcpu->arch.sane) {
1837-
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1836+
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
18381837
ret = -EINVAL;
18391838
goto out;
18401839
}
@@ -1861,7 +1860,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu)
18611860

18621861
kvmppc_fix_ee_before_entry();
18631862

1864-
ret = __kvmppc_vcpu_run(run, vcpu);
1863+
ret = __kvmppc_vcpu_run(vcpu);
18651864

18661865
kvmppc_clear_debug(vcpu);
18671866

arch/powerpc/kvm/booke.c

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -731,12 +731,11 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
731731

732732
int kvmppc_vcpu_run(struct kvm_vcpu *vcpu)
733733
{
734-
struct kvm_run *run = vcpu->run;
735734
int ret, s;
736735
struct debug_reg debug;
737736

738737
if (!vcpu->arch.sane) {
739-
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
738+
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
740739
return -EINVAL;
741740
}
742741

@@ -778,7 +777,7 @@ int kvmppc_vcpu_run(struct kvm_vcpu *vcpu)
778777
vcpu->arch.pgdir = vcpu->kvm->mm->pgd;
779778
kvmppc_fix_ee_before_entry();
780779

781-
ret = __kvmppc_vcpu_run(run, vcpu);
780+
ret = __kvmppc_vcpu_run(vcpu);
782781

783782
/* No need for guest_exit. It's done in handle_exit.
784783
We also get here with interrupts enabled. */
@@ -982,9 +981,9 @@ static int kvmppc_resume_inst_load(struct kvm_vcpu *vcpu,
982981
*
983982
* Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
984983
*/
985-
int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
986-
unsigned int exit_nr)
984+
int kvmppc_handle_exit(struct kvm_vcpu *vcpu, unsigned int exit_nr)
987985
{
986+
struct kvm_run *run = vcpu->run;
988987
int r = RESUME_HOST;
989988
int s;
990989
int idx;

arch/powerpc/kvm/booke_interrupts.S

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -237,7 +237,7 @@ _GLOBAL(kvmppc_resume_host)
237237
/* Switch to kernel stack and jump to handler. */
238238
LOAD_REG_ADDR(r3, kvmppc_handle_exit)
239239
mtctr r3
240-
lwz r3, HOST_RUN(r1)
240+
mr r3, r4
241241
lwz r2, HOST_R2(r1)
242242
mr r14, r4 /* Save vcpu pointer. */
243243

@@ -337,15 +337,14 @@ heavyweight_exit:
337337

338338

339339
/* Registers:
340-
* r3: kvm_run pointer
341-
* r4: vcpu pointer
340+
* r3: vcpu pointer
342341
*/
343342
_GLOBAL(__kvmppc_vcpu_run)
344343
stwu r1, -HOST_STACK_SIZE(r1)
345-
stw r1, VCPU_HOST_STACK(r4) /* Save stack pointer to vcpu. */
344+
stw r1, VCPU_HOST_STACK(r3) /* Save stack pointer to vcpu. */
346345

347346
/* Save host state to stack. */
348-
stw r3, HOST_RUN(r1)
347+
mr r4, r3
349348
mflr r3
350349
stw r3, HOST_STACK_LR(r1)
351350
mfcr r5

arch/powerpc/kvm/bookehv_interrupts.S

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -434,9 +434,10 @@ _GLOBAL(kvmppc_resume_host)
434434
#endif
435435

436436
/* Switch to kernel stack and jump to handler. */
437-
PPC_LL r3, HOST_RUN(r1)
437+
mr r3, r4
438438
mr r5, r14 /* intno */
439439
mr r14, r4 /* Save vcpu pointer. */
440+
mr r4, r5
440441
bl kvmppc_handle_exit
441442

442443
/* Restore vcpu pointer and the nonvolatiles we used. */
@@ -525,15 +526,14 @@ heavyweight_exit:
525526
blr
526527

527528
/* Registers:
528-
* r3: kvm_run pointer
529-
* r4: vcpu pointer
529+
* r3: vcpu pointer
530530
*/
531531
_GLOBAL(__kvmppc_vcpu_run)
532532
stwu r1, -HOST_STACK_SIZE(r1)
533-
PPC_STL r1, VCPU_HOST_STACK(r4) /* Save stack pointer to vcpu. */
533+
PPC_STL r1, VCPU_HOST_STACK(r3) /* Save stack pointer to vcpu. */
534534

535535
/* Save host state to stack. */
536-
PPC_STL r3, HOST_RUN(r1)
536+
mr r4, r3
537537
mflr r3
538538
mfcr r5
539539
PPC_STL r3, HOST_STACK_LR(r1)

0 commit comments

Comments
 (0)