Skip to content

Commit 3864d17

Browse files
author
Marc Zyngier
committed
Merge branch kvm-arm64/pkvm/restrict-hypercalls into kvmarm-master/next
* kvm-arm64/pkvm/restrict-hypercalls: : . : Restrict the use of some hypercalls as well as kexec once : the protected KVM mode has been initialised. : . KVM: arm64: Disable privileged hypercalls after pKVM finalisation KVM: arm64: Prevent re-finalisation of pKVM for a given CPU KVM: arm64: Propagate errors from __pkvm_prot_finalize hypercall KVM: arm64: Reject stub hypercalls after pKVM has been initialised arm64: Prevent kexec and hibernation if is_protected_kvm_enabled() KVM: arm64: Turn __KVM_HOST_SMCCC_FUNC_* into an enum (mostly) Signed-off-by: Marc Zyngier <[email protected]>
2 parents 9e1ff30 + 057bed2 commit 3864d17

File tree

6 files changed

+117
-60
lines changed

6 files changed

+117
-60
lines changed

arch/arm64/include/asm/kvm_asm.h

Lines changed: 27 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -44,31 +44,38 @@
4444
#define KVM_HOST_SMCCC_FUNC(name) KVM_HOST_SMCCC_ID(__KVM_HOST_SMCCC_FUNC_##name)
4545

4646
#define __KVM_HOST_SMCCC_FUNC___kvm_hyp_init 0
47-
#define __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run 1
48-
#define __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context 2
49-
#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa 3
50-
#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid 4
51-
#define __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context 5
52-
#define __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff 6
53-
#define __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs 7
54-
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config 8
55-
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr 9
56-
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr 10
57-
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs 11
58-
#define __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2 12
59-
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs 13
60-
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs 14
61-
#define __KVM_HOST_SMCCC_FUNC___pkvm_init 15
62-
#define __KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp 16
63-
#define __KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping 17
64-
#define __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector 18
65-
#define __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize 19
66-
#define __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc 20
6747

6848
#ifndef __ASSEMBLY__
6949

7050
#include <linux/mm.h>
7151

52+
enum __kvm_host_smccc_func {
53+
/* Hypercalls available only prior to pKVM finalisation */
54+
/* __KVM_HOST_SMCCC_FUNC___kvm_hyp_init */
55+
__KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2 = __KVM_HOST_SMCCC_FUNC___kvm_hyp_init + 1,
56+
__KVM_HOST_SMCCC_FUNC___pkvm_init,
57+
__KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping,
58+
__KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector,
59+
__KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs,
60+
__KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs,
61+
__KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config,
62+
__KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize,
63+
64+
/* Hypercalls available after pKVM finalisation */
65+
__KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp,
66+
__KVM_HOST_SMCCC_FUNC___kvm_adjust_pc,
67+
__KVM_HOST_SMCCC_FUNC___kvm_vcpu_run,
68+
__KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context,
69+
__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa,
70+
__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid,
71+
__KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context,
72+
__KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff,
73+
__KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr,
74+
__KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr,
75+
__KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs,
76+
__KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs,
77+
};
78+
7279
#define DECLARE_KVM_VHE_SYM(sym) extern char sym[]
7380
#define DECLARE_KVM_NVHE_SYM(sym) extern char kvm_nvhe_sym(sym)[]
7481

arch/arm64/kernel/smp.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1128,5 +1128,6 @@ bool cpus_are_stuck_in_kernel(void)
11281128
{
11291129
bool smp_spin_tables = (num_possible_cpus() > 1 && !have_cpu_die());
11301130

1131-
return !!cpus_stuck_in_kernel || smp_spin_tables;
1131+
return !!cpus_stuck_in_kernel || smp_spin_tables ||
1132+
is_protected_kvm_enabled();
11321133
}

arch/arm64/kvm/arm.c

Lines changed: 42 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1579,25 +1579,33 @@ static void cpu_set_hyp_vector(void)
15791579
kvm_call_hyp_nvhe(__pkvm_cpu_set_vector, data->slot);
15801580
}
15811581

1582-
static void cpu_hyp_reinit(void)
1582+
static void cpu_hyp_init_context(void)
15831583
{
15841584
kvm_init_host_cpu_context(&this_cpu_ptr_hyp_sym(kvm_host_data)->host_ctxt);
15851585

1586-
cpu_hyp_reset();
1587-
1588-
if (is_kernel_in_hyp_mode())
1589-
kvm_timer_init_vhe();
1590-
else
1586+
if (!is_kernel_in_hyp_mode())
15911587
cpu_init_hyp_mode();
1588+
}
15921589

1590+
static void cpu_hyp_init_features(void)
1591+
{
15931592
cpu_set_hyp_vector();
1594-
15951593
kvm_arm_init_debug();
15961594

1595+
if (is_kernel_in_hyp_mode())
1596+
kvm_timer_init_vhe();
1597+
15971598
if (vgic_present)
15981599
kvm_vgic_init_cpu_hardware();
15991600
}
16001601

1602+
static void cpu_hyp_reinit(void)
1603+
{
1604+
cpu_hyp_reset();
1605+
cpu_hyp_init_context();
1606+
cpu_hyp_init_features();
1607+
}
1608+
16011609
static void _kvm_arch_hardware_enable(void *discard)
16021610
{
16031611
if (!__this_cpu_read(kvm_arm_hardware_enabled)) {
@@ -1788,10 +1796,17 @@ static int do_pkvm_init(u32 hyp_va_bits)
17881796
int ret;
17891797

17901798
preempt_disable();
1791-
hyp_install_host_vector();
1799+
cpu_hyp_init_context();
17921800
ret = kvm_call_hyp_nvhe(__pkvm_init, hyp_mem_base, hyp_mem_size,
17931801
num_possible_cpus(), kern_hyp_va(per_cpu_base),
17941802
hyp_va_bits);
1803+
cpu_hyp_init_features();
1804+
1805+
/*
1806+
* The stub hypercalls are now disabled, so set our local flag to
1807+
* prevent a later re-init attempt in kvm_arch_hardware_enable().
1808+
*/
1809+
__this_cpu_write(kvm_arm_hardware_enabled, 1);
17951810
preempt_enable();
17961811

17971812
return ret;
@@ -1971,9 +1986,25 @@ static int init_hyp_mode(void)
19711986
return err;
19721987
}
19731988

1974-
static void _kvm_host_prot_finalize(void *discard)
1989+
static void _kvm_host_prot_finalize(void *arg)
19751990
{
1976-
WARN_ON(kvm_call_hyp_nvhe(__pkvm_prot_finalize));
1991+
int *err = arg;
1992+
1993+
if (WARN_ON(kvm_call_hyp_nvhe(__pkvm_prot_finalize)))
1994+
WRITE_ONCE(*err, -EINVAL);
1995+
}
1996+
1997+
static int pkvm_drop_host_privileges(void)
1998+
{
1999+
int ret = 0;
2000+
2001+
/*
2002+
* Flip the static key upfront as that may no longer be possible
2003+
* once the host stage 2 is installed.
2004+
*/
2005+
static_branch_enable(&kvm_protected_mode_initialized);
2006+
on_each_cpu(_kvm_host_prot_finalize, &ret, 1);
2007+
return ret;
19772008
}
19782009

19792010
static int finalize_hyp_mode(void)
@@ -1987,15 +2018,7 @@ static int finalize_hyp_mode(void)
19872018
* None of other sections should ever be introspected.
19882019
*/
19892020
kmemleak_free_part(__hyp_bss_start, __hyp_bss_end - __hyp_bss_start);
1990-
1991-
/*
1992-
* Flip the static key upfront as that may no longer be possible
1993-
* once the host stage 2 is installed.
1994-
*/
1995-
static_branch_enable(&kvm_protected_mode_initialized);
1996-
on_each_cpu(_kvm_host_prot_finalize, NULL, 1);
1997-
1998-
return 0;
2021+
return pkvm_drop_host_privileges();
19992022
}
20002023

20012024
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)

arch/arm64/kvm/hyp/nvhe/host.S

Lines changed: 17 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -110,17 +110,14 @@ SYM_FUNC_START(__hyp_do_panic)
110110
b __host_enter_for_panic
111111
SYM_FUNC_END(__hyp_do_panic)
112112

113-
.macro host_el1_sync_vect
114-
.align 7
115-
.L__vect_start\@:
116-
stp x0, x1, [sp, #-16]!
117-
mrs x0, esr_el2
118-
lsr x0, x0, #ESR_ELx_EC_SHIFT
119-
cmp x0, #ESR_ELx_EC_HVC64
120-
b.ne __host_exit
121-
113+
SYM_FUNC_START(__host_hvc)
122114
ldp x0, x1, [sp] // Don't fixup the stack yet
123115

116+
/* No stub for you, sonny Jim */
117+
alternative_if ARM64_KVM_PROTECTED_MODE
118+
b __host_exit
119+
alternative_else_nop_endif
120+
124121
/* Check for a stub HVC call */
125122
cmp x0, #HVC_STUB_HCALL_NR
126123
b.hs __host_exit
@@ -137,6 +134,17 @@ SYM_FUNC_END(__hyp_do_panic)
137134
ldr x5, =__kvm_handle_stub_hvc
138135
hyp_pa x5, x6
139136
br x5
137+
SYM_FUNC_END(__host_hvc)
138+
139+
.macro host_el1_sync_vect
140+
.align 7
141+
.L__vect_start\@:
142+
stp x0, x1, [sp, #-16]!
143+
mrs x0, esr_el2
144+
lsr x0, x0, #ESR_ELx_EC_SHIFT
145+
cmp x0, #ESR_ELx_EC_HVC64
146+
b.eq __host_hvc
147+
b __host_exit
140148
.L__vect_end\@:
141149
.if ((.L__vect_end\@ - .L__vect_start\@) > 0x80)
142150
.error "host_el1_sync_vect larger than vector entry"

arch/arm64/kvm/hyp/nvhe/hyp-main.c

Lines changed: 26 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -165,36 +165,51 @@ typedef void (*hcall_t)(struct kvm_cpu_context *);
165165
#define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x
166166

167167
static const hcall_t host_hcall[] = {
168-
HANDLE_FUNC(__kvm_vcpu_run),
168+
/* ___kvm_hyp_init */
169+
HANDLE_FUNC(__kvm_get_mdcr_el2),
170+
HANDLE_FUNC(__pkvm_init),
171+
HANDLE_FUNC(__pkvm_create_private_mapping),
172+
HANDLE_FUNC(__pkvm_cpu_set_vector),
173+
HANDLE_FUNC(__kvm_enable_ssbs),
174+
HANDLE_FUNC(__vgic_v3_init_lrs),
175+
HANDLE_FUNC(__vgic_v3_get_gic_config),
176+
HANDLE_FUNC(__pkvm_prot_finalize),
177+
178+
HANDLE_FUNC(__pkvm_host_share_hyp),
169179
HANDLE_FUNC(__kvm_adjust_pc),
180+
HANDLE_FUNC(__kvm_vcpu_run),
170181
HANDLE_FUNC(__kvm_flush_vm_context),
171182
HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
172183
HANDLE_FUNC(__kvm_tlb_flush_vmid),
173184
HANDLE_FUNC(__kvm_flush_cpu_context),
174185
HANDLE_FUNC(__kvm_timer_set_cntvoff),
175-
HANDLE_FUNC(__kvm_enable_ssbs),
176-
HANDLE_FUNC(__vgic_v3_get_gic_config),
177186
HANDLE_FUNC(__vgic_v3_read_vmcr),
178187
HANDLE_FUNC(__vgic_v3_write_vmcr),
179-
HANDLE_FUNC(__vgic_v3_init_lrs),
180-
HANDLE_FUNC(__kvm_get_mdcr_el2),
181188
HANDLE_FUNC(__vgic_v3_save_aprs),
182189
HANDLE_FUNC(__vgic_v3_restore_aprs),
183-
HANDLE_FUNC(__pkvm_init),
184-
HANDLE_FUNC(__pkvm_cpu_set_vector),
185-
HANDLE_FUNC(__pkvm_host_share_hyp),
186-
HANDLE_FUNC(__pkvm_create_private_mapping),
187-
HANDLE_FUNC(__pkvm_prot_finalize),
188190
};
189191

190192
static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
191193
{
192194
DECLARE_REG(unsigned long, id, host_ctxt, 0);
195+
unsigned long hcall_min = 0;
193196
hcall_t hfn;
194197

198+
/*
199+
* If pKVM has been initialised then reject any calls to the
200+
* early "privileged" hypercalls. Note that we cannot reject
201+
* calls to __pkvm_prot_finalize for two reasons: (1) The static
202+
* key used to determine initialisation must be toggled prior to
203+
* finalisation and (2) finalisation is performed on a per-CPU
204+
* basis. This is all fine, however, since __pkvm_prot_finalize
205+
* returns -EPERM after the first call for a given CPU.
206+
*/
207+
if (static_branch_unlikely(&kvm_protected_mode_initialized))
208+
hcall_min = __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize;
209+
195210
id -= KVM_HOST_SMCCC_ID(0);
196211

197-
if (unlikely(id >= ARRAY_SIZE(host_hcall)))
212+
if (unlikely(id < hcall_min || id >= ARRAY_SIZE(host_hcall)))
198213
goto inval;
199214

200215
hfn = host_hcall[id];

arch/arm64/kvm/hyp/nvhe/mem_protect.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,9 @@ int __pkvm_prot_finalize(void)
123123
struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu;
124124
struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
125125

126+
if (params->hcr_el2 & HCR_VM)
127+
return -EPERM;
128+
126129
params->vttbr = kvm_get_vttbr(mmu);
127130
params->vtcr = host_kvm.arch.vtcr;
128131
params->hcr_el2 |= HCR_VM;

0 commit comments

Comments
 (0)