Skip to content

Commit 0e20f5e

Browse files
author
Marc Zyngier
committed
KVM: arm/arm64: Cleanup MMIO handling
Our MMIO handling is a bit odd, in the sense that it uses an intermediate per-vcpu structure to store the various decoded information that describe the access. But the same information is readily available in the HSR/ESR_EL2 field, and we actually use this field to populate the structure. Let's simplify the whole thing by getting rid of the superfluous structure and save a (tiny) bit of space in the vcpu structure. [32bit fix courtesy of Olof Johansson <[email protected]>] Signed-off-by: Marc Zyngier <[email protected]>
1 parent 31a9b0b commit 0e20f5e

File tree

9 files changed

+42
-117
lines changed

9 files changed

+42
-117
lines changed

arch/arm/include/asm/kvm_emulate.h

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@
99

1010
#include <linux/kvm_host.h>
1111
#include <asm/kvm_asm.h>
12-
#include <asm/kvm_mmio.h>
1312
#include <asm/kvm_arm.h>
1413
#include <asm/cputype.h>
1514

@@ -220,7 +219,7 @@ static inline bool kvm_vcpu_dabt_is_cm(struct kvm_vcpu *vcpu)
220219
}
221220

222221
/* Get Access Size from a data abort */
223-
static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
222+
static inline unsigned int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
224223
{
225224
switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) {
226225
case 0:
@@ -231,7 +230,7 @@ static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
231230
return 4;
232231
default:
233232
kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
234-
return -EFAULT;
233+
return 4;
235234
}
236235
}
237236

arch/arm/include/asm/kvm_host.h

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@
1414
#include <asm/cputype.h>
1515
#include <asm/kvm.h>
1616
#include <asm/kvm_asm.h>
17-
#include <asm/kvm_mmio.h>
1817
#include <asm/fpstate.h>
1918
#include <kvm/arm_arch_timer.h>
2019

@@ -202,9 +201,6 @@ struct kvm_vcpu_arch {
202201
/* Don't run the guest (internal implementation need) */
203202
bool pause;
204203

205-
/* IO related fields */
206-
struct kvm_decode mmio_decode;
207-
208204
/* Cache some mmu pages needed inside spinlock regions */
209205
struct kvm_mmu_memory_cache mmu_page_cache;
210206

@@ -300,6 +296,14 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
300296
static inline void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
301297
int exception_index) {}
302298

299+
/* MMIO helpers */
300+
void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
301+
unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
302+
303+
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
304+
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
305+
phys_addr_t fault_ipa);
306+
303307
static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
304308
unsigned long hyp_stack_ptr,
305309
unsigned long vector_ptr)

arch/arm/include/asm/kvm_hyp.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
#include <linux/compiler.h>
1111
#include <linux/kvm_host.h>
1212
#include <asm/cp15.h>
13+
#include <asm/kvm_arm.h>
1314
#include <asm/vfp.h>
1415

1516
#define __hyp_text __section(.hyp.text) notrace

arch/arm/include/asm/kvm_mmio.h

Lines changed: 0 additions & 28 deletions
This file was deleted.

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,6 @@
1717
#include <asm/esr.h>
1818
#include <asm/kvm_arm.h>
1919
#include <asm/kvm_hyp.h>
20-
#include <asm/kvm_mmio.h>
2120
#include <asm/ptrace.h>
2221
#include <asm/cputype.h>
2322
#include <asm/virt.h>
@@ -341,7 +340,7 @@ static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
341340
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
342341
}
343342

344-
static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
343+
static inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
345344
{
346345
return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
347346
}

arch/arm64/include/asm/kvm_host.h

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,6 @@
2424
#include <asm/fpsimd.h>
2525
#include <asm/kvm.h>
2626
#include <asm/kvm_asm.h>
27-
#include <asm/kvm_mmio.h>
2827
#include <asm/thread_info.h>
2928

3029
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
@@ -325,9 +324,6 @@ struct kvm_vcpu_arch {
325324
/* Don't run the guest (internal implementation need) */
326325
bool pause;
327326

328-
/* IO related fields */
329-
struct kvm_decode mmio_decode;
330-
331327
/* Cache some mmu pages needed inside spinlock regions */
332328
struct kvm_mmu_memory_cache mmu_page_cache;
333329

@@ -491,6 +487,14 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
491487
void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
492488
int exception_index);
493489

490+
/* MMIO helpers */
491+
void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
492+
unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
493+
494+
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
495+
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
496+
phys_addr_t fault_ipa);
497+
494498
int kvm_perf_init(void);
495499
int kvm_perf_teardown(void);
496500

arch/arm64/include/asm/kvm_mmio.h

Lines changed: 0 additions & 27 deletions
This file was deleted.

virt/kvm/arm/mmio.c

Lines changed: 22 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@
55
*/
66

77
#include <linux/kvm_host.h>
8-
#include <asm/kvm_mmio.h>
98
#include <asm/kvm_emulate.h>
109
#include <trace/events/kvm.h>
1110

@@ -92,26 +91,23 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
9291

9392
vcpu->mmio_needed = 0;
9493

95-
if (!run->mmio.is_write) {
96-
len = run->mmio.len;
97-
if (len > sizeof(unsigned long))
98-
return -EINVAL;
99-
94+
if (!kvm_vcpu_dabt_iswrite(vcpu)) {
95+
len = kvm_vcpu_dabt_get_as(vcpu);
10096
data = kvm_mmio_read_buf(run->mmio.data, len);
10197

102-
if (vcpu->arch.mmio_decode.sign_extend &&
98+
if (kvm_vcpu_dabt_issext(vcpu) &&
10399
len < sizeof(unsigned long)) {
104100
mask = 1U << ((len * 8) - 1);
105101
data = (data ^ mask) - mask;
106102
}
107103

108-
if (!vcpu->arch.mmio_decode.sixty_four)
104+
if (!kvm_vcpu_dabt_issf(vcpu))
109105
data = data & 0xffffffff;
110106

111107
trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
112108
&data);
113109
data = vcpu_data_host_to_guest(vcpu, data, len);
114-
vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
110+
vcpu_set_reg(vcpu, kvm_vcpu_dabt_get_rd(vcpu), data);
115111
}
116112

117113
/*
@@ -123,36 +119,6 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
123119
return 0;
124120
}
125121

126-
static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len)
127-
{
128-
unsigned long rt;
129-
int access_size;
130-
bool sign_extend;
131-
bool sixty_four;
132-
133-
if (kvm_vcpu_dabt_iss1tw(vcpu)) {
134-
/* page table accesses IO mem: tell guest to fix its TTBR */
135-
kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
136-
return 1;
137-
}
138-
139-
access_size = kvm_vcpu_dabt_get_as(vcpu);
140-
if (unlikely(access_size < 0))
141-
return access_size;
142-
143-
*is_write = kvm_vcpu_dabt_iswrite(vcpu);
144-
sign_extend = kvm_vcpu_dabt_issext(vcpu);
145-
sixty_four = kvm_vcpu_dabt_issf(vcpu);
146-
rt = kvm_vcpu_dabt_get_rd(vcpu);
147-
148-
*len = access_size;
149-
vcpu->arch.mmio_decode.sign_extend = sign_extend;
150-
vcpu->arch.mmio_decode.rt = rt;
151-
vcpu->arch.mmio_decode.sixty_four = sixty_four;
152-
153-
return 0;
154-
}
155-
156122
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
157123
phys_addr_t fault_ipa)
158124
{
@@ -164,15 +130,10 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
164130
u8 data_buf[8];
165131

166132
/*
167-
* Prepare MMIO operation. First decode the syndrome data we get
168-
* from the CPU. Then try if some in-kernel emulation feels
169-
* responsible, otherwise let user space do its magic.
133+
* No valid syndrome? Ask userspace for help if it has
134+
* voluntered to do so, and bail out otherwise.
170135
*/
171-
if (kvm_vcpu_dabt_isvalid(vcpu)) {
172-
ret = decode_hsr(vcpu, &is_write, &len);
173-
if (ret)
174-
return ret;
175-
} else {
136+
if (!kvm_vcpu_dabt_isvalid(vcpu)) {
176137
if (vcpu->kvm->arch.return_nisv_io_abort_to_user) {
177138
run->exit_reason = KVM_EXIT_ARM_NISV;
178139
run->arm_nisv.esr_iss = kvm_vcpu_dabt_iss_nisv_sanitized(vcpu);
@@ -184,7 +145,20 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
184145
return -ENOSYS;
185146
}
186147

187-
rt = vcpu->arch.mmio_decode.rt;
148+
/* Page table accesses IO mem: tell guest to fix its TTBR */
149+
if (kvm_vcpu_dabt_iss1tw(vcpu)) {
150+
kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
151+
return 1;
152+
}
153+
154+
/*
155+
* Prepare MMIO operation. First decode the syndrome data we get
156+
* from the CPU. Then try if some in-kernel emulation feels
157+
* responsible, otherwise let user space do its magic.
158+
*/
159+
is_write = kvm_vcpu_dabt_iswrite(vcpu);
160+
len = kvm_vcpu_dabt_get_as(vcpu);
161+
rt = kvm_vcpu_dabt_get_rd(vcpu);
188162

189163
if (is_write) {
190164
data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),

virt/kvm/arm/mmu.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@
1414
#include <asm/cacheflush.h>
1515
#include <asm/kvm_arm.h>
1616
#include <asm/kvm_mmu.h>
17-
#include <asm/kvm_mmio.h>
1817
#include <asm/kvm_ras.h>
1918
#include <asm/kvm_asm.h>
2019
#include <asm/kvm_emulate.h>

0 commit comments

Comments
 (0)