Skip to content

Commit 3a5154c

Browse files
ouptonMarc Zyngier
authored andcommitted
KVM: arm64: Take a pointer to walker data in kvm_dereference_pteref()
Rather than passing through the state of the KVM_PGTABLE_WALK_SHARED flag, just take a pointer to the whole walker structure instead. Move around struct kvm_pgtable and the RCU indirection such that the associated ifdeffery remains in one place while ensuring the walker + flags definitions precede their use. No functional change intended. Signed-off-by: Oliver Upton <[email protected]> Acked-by: Will Deacon <[email protected]> Signed-off-by: Marc Zyngier <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 1577cb5 commit 3a5154c

File tree

2 files changed

+76
-74
lines changed

2 files changed

+76
-74
lines changed

arch/arm64/include/asm/kvm_pgtable.h

Lines changed: 73 additions & 71 deletions
Original file line numberDiff line numberDiff line change
@@ -37,54 +37,6 @@ static inline u64 kvm_get_parange(u64 mmfr0)
3737

3838
typedef u64 kvm_pte_t;
3939

40-
/*
41-
* RCU cannot be used in a non-kernel context such as the hyp. As such, page
42-
* table walkers used in hyp do not call into RCU and instead use other
43-
* synchronization mechanisms (such as a spinlock).
44-
*/
45-
#if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
46-
47-
typedef kvm_pte_t *kvm_pteref_t;
48-
49-
static inline kvm_pte_t *kvm_dereference_pteref(kvm_pteref_t pteref, bool shared)
50-
{
51-
return pteref;
52-
}
53-
54-
static inline void kvm_pgtable_walk_begin(void) {}
55-
static inline void kvm_pgtable_walk_end(void) {}
56-
57-
static inline bool kvm_pgtable_walk_lock_held(void)
58-
{
59-
return true;
60-
}
61-
62-
#else
63-
64-
typedef kvm_pte_t __rcu *kvm_pteref_t;
65-
66-
static inline kvm_pte_t *kvm_dereference_pteref(kvm_pteref_t pteref, bool shared)
67-
{
68-
return rcu_dereference_check(pteref, !shared);
69-
}
70-
71-
static inline void kvm_pgtable_walk_begin(void)
72-
{
73-
rcu_read_lock();
74-
}
75-
76-
static inline void kvm_pgtable_walk_end(void)
77-
{
78-
rcu_read_unlock();
79-
}
80-
81-
static inline bool kvm_pgtable_walk_lock_held(void)
82-
{
83-
return rcu_read_lock_held();
84-
}
85-
86-
#endif
87-
8840
#define KVM_PTE_VALID BIT(0)
8941

9042
#define KVM_PTE_ADDR_MASK GENMASK(47, PAGE_SHIFT)
@@ -212,29 +164,6 @@ enum kvm_pgtable_prot {
212164
typedef bool (*kvm_pgtable_force_pte_cb_t)(u64 addr, u64 end,
213165
enum kvm_pgtable_prot prot);
214166

215-
/**
216-
* struct kvm_pgtable - KVM page-table.
217-
* @ia_bits: Maximum input address size, in bits.
218-
* @start_level: Level at which the page-table walk starts.
219-
* @pgd: Pointer to the first top-level entry of the page-table.
220-
* @mm_ops: Memory management callbacks.
221-
* @mmu: Stage-2 KVM MMU struct. Unused for stage-1 page-tables.
222-
* @flags: Stage-2 page-table flags.
223-
* @force_pte_cb: Function that returns true if page level mappings must
224-
* be used instead of block mappings.
225-
*/
226-
struct kvm_pgtable {
227-
u32 ia_bits;
228-
u32 start_level;
229-
kvm_pteref_t pgd;
230-
struct kvm_pgtable_mm_ops *mm_ops;
231-
232-
/* Stage-2 only */
233-
struct kvm_s2_mmu *mmu;
234-
enum kvm_pgtable_stage2_flags flags;
235-
kvm_pgtable_force_pte_cb_t force_pte_cb;
236-
};
237-
238167
/**
239168
* enum kvm_pgtable_walk_flags - Flags to control a depth-first page-table walk.
240169
* @KVM_PGTABLE_WALK_LEAF: Visit leaf entries, including invalid
@@ -285,6 +214,79 @@ struct kvm_pgtable_walker {
285214
const enum kvm_pgtable_walk_flags flags;
286215
};
287216

217+
/*
218+
* RCU cannot be used in a non-kernel context such as the hyp. As such, page
219+
* table walkers used in hyp do not call into RCU and instead use other
220+
* synchronization mechanisms (such as a spinlock).
221+
*/
222+
#if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
223+
224+
typedef kvm_pte_t *kvm_pteref_t;
225+
226+
static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walker,
227+
kvm_pteref_t pteref)
228+
{
229+
return pteref;
230+
}
231+
232+
static inline void kvm_pgtable_walk_begin(void) {}
233+
static inline void kvm_pgtable_walk_end(void) {}
234+
235+
static inline bool kvm_pgtable_walk_lock_held(void)
236+
{
237+
return true;
238+
}
239+
240+
#else
241+
242+
typedef kvm_pte_t __rcu *kvm_pteref_t;
243+
244+
static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walker,
245+
kvm_pteref_t pteref)
246+
{
247+
return rcu_dereference_check(pteref, !(walker->flags & KVM_PGTABLE_WALK_SHARED));
248+
}
249+
250+
static inline void kvm_pgtable_walk_begin(void)
251+
{
252+
rcu_read_lock();
253+
}
254+
255+
static inline void kvm_pgtable_walk_end(void)
256+
{
257+
rcu_read_unlock();
258+
}
259+
260+
static inline bool kvm_pgtable_walk_lock_held(void)
261+
{
262+
return rcu_read_lock_held();
263+
}
264+
265+
#endif
266+
267+
/**
268+
* struct kvm_pgtable - KVM page-table.
269+
* @ia_bits: Maximum input address size, in bits.
270+
* @start_level: Level at which the page-table walk starts.
271+
* @pgd: Pointer to the first top-level entry of the page-table.
272+
* @mm_ops: Memory management callbacks.
273+
* @mmu: Stage-2 KVM MMU struct. Unused for stage-1 page-tables.
274+
* @flags: Stage-2 page-table flags.
275+
* @force_pte_cb: Function that returns true if page level mappings must
276+
* be used instead of block mappings.
277+
*/
278+
struct kvm_pgtable {
279+
u32 ia_bits;
280+
u32 start_level;
281+
kvm_pteref_t pgd;
282+
struct kvm_pgtable_mm_ops *mm_ops;
283+
284+
/* Stage-2 only */
285+
struct kvm_s2_mmu *mmu;
286+
enum kvm_pgtable_stage2_flags flags;
287+
kvm_pgtable_force_pte_cb_t force_pte_cb;
288+
};
289+
288290
/**
289291
* kvm_pgtable_hyp_init() - Initialise a hypervisor stage-1 page-table.
290292
* @pgt: Uninitialised page-table structure to initialise.

arch/arm64/kvm/hyp/pgtable.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -188,7 +188,7 @@ static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
188188
kvm_pteref_t pteref, u32 level)
189189
{
190190
enum kvm_pgtable_walk_flags flags = data->walker->flags;
191-
kvm_pte_t *ptep = kvm_dereference_pteref(pteref, flags & KVM_PGTABLE_WALK_SHARED);
191+
kvm_pte_t *ptep = kvm_dereference_pteref(data->walker, pteref);
192192
struct kvm_pgtable_visit_ctx ctx = {
193193
.ptep = ptep,
194194
.old = READ_ONCE(*ptep),
@@ -558,7 +558,7 @@ void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt)
558558
};
559559

560560
WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
561-
pgt->mm_ops->put_page(kvm_dereference_pteref(pgt->pgd, false));
561+
pgt->mm_ops->put_page(kvm_dereference_pteref(&walker, pgt->pgd));
562562
pgt->pgd = NULL;
563563
}
564564

@@ -1241,7 +1241,7 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
12411241

12421242
WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
12431243
pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level) * PAGE_SIZE;
1244-
pgt->mm_ops->free_pages_exact(kvm_dereference_pteref(pgt->pgd, false), pgd_sz);
1244+
pgt->mm_ops->free_pages_exact(kvm_dereference_pteref(&walker, pgt->pgd), pgd_sz);
12451245
pgt->pgd = NULL;
12461246
}
12471247

0 commit comments

Comments
 (0)