Skip to content

Commit 192ad3c

Browse files
committed
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini: "ARM: - Page ownership tracking between host EL1 and EL2 - Rely on userspace page tables to create large stage-2 mappings - Fix incompatibility between pKVM and kmemleak - Fix the PMU reset state, and improve the performance of the virtual PMU - Move over to the generic KVM entry code - Address PSCI reset issues w.r.t. save/restore - Preliminary rework for the upcoming pKVM fixed feature - A bunch of MM cleanups - a vGIC fix for timer spurious interrupts - Various cleanups s390: - enable interpretation of specification exceptions - fix a vcpu_idx vs vcpu_id mixup x86: - fast (lockless) page fault support for the new MMU - new MMU now the default - increased maximum allowed VCPU count - allow inhibit IRQs on KVM_RUN while debugging guests - let Hyper-V-enabled guests run with virtualized LAPIC as long as they do not enable the Hyper-V "AutoEOI" feature - fixes and optimizations for the toggling of AMD AVIC (virtualized LAPIC) - tuning for the case when two-dimensional paging (EPT/NPT) is disabled - bugfixes and cleanups, especially with respect to vCPU reset and choosing a paging mode based on CR0/CR4/EFER - support for 5-level page table on AMD processors Generic: - MMU notifier invalidation callbacks do not take mmu_lock unless necessary - improved caching of LRU kvm_memory_slot - support for histogram statistics - add statistics for halt polling and remote TLB flush requests" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (210 commits) KVM: Drop unused kvm_dirty_gfn_invalid() KVM: x86: Update vCPU's hv_clock before back to guest when tsc_offset is adjusted KVM: MMU: mark role_regs and role accessors as maybe unused KVM: MIPS: Remove a "set but not used" variable x86/kvm: Don't enable IRQ when IRQ enabled in kvm_wait KVM: stats: Add VM stat for remote tlb flush requests KVM: Remove unnecessary export of kvm_{inc,dec}_notifier_count() KVM: x86/mmu: Move lpage_disallowed_link further "down" in kvm_mmu_page KVM: x86/mmu: Relocate kvm_mmu_page.tdp_mmu_page for better cache locality Revert "KVM: x86: mmu: Add guest physical address check in translate_gpa()" KVM: x86/mmu: Remove unused field mmio_cached in struct kvm_mmu_page kvm: x86: Increase KVM_SOFT_MAX_VCPUS to 710 kvm: x86: Increase MAX_VCPUS to 1024 kvm: x86: Set KVM_MAX_VCPU_ID to 4*KVM_MAX_VCPUS KVM: VMX: avoid running vmx_handle_exit_irqoff in case of emulation KVM: x86/mmu: Don't freak out if pml5_root is NULL on 4-level host KVM: s390: index kvm->arch.idle_mask by vcpu_idx KVM: s390: Enable specification exception interpretation KVM: arm64: Trim guest debug exception handling KVM: SVM: Add 5-level page table support for SVM ...
2 parents a2b2823 + 109bbba commit 192ad3c

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

120 files changed

+2891
-1605
lines changed

Documentation/virt/kvm/api.rst

Lines changed: 28 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -3357,6 +3357,7 @@ flags which can include the following:
33573357
- KVM_GUESTDBG_INJECT_DB: inject DB type exception [x86]
33583358
- KVM_GUESTDBG_INJECT_BP: inject BP type exception [x86]
33593359
- KVM_GUESTDBG_EXIT_PENDING: trigger an immediate guest exit [s390]
3360+
- KVM_GUESTDBG_BLOCKIRQ: avoid injecting interrupts/NMI/SMI [x86]
33603361

33613362
For example KVM_GUESTDBG_USE_SW_BP indicates that software breakpoints
33623363
are enabled in memory so we need to ensure breakpoint exceptions are
@@ -5208,25 +5209,30 @@ by a string of size ``name_size``.
52085209
#define KVM_STATS_TYPE_CUMULATIVE (0x0 << KVM_STATS_TYPE_SHIFT)
52095210
#define KVM_STATS_TYPE_INSTANT (0x1 << KVM_STATS_TYPE_SHIFT)
52105211
#define KVM_STATS_TYPE_PEAK (0x2 << KVM_STATS_TYPE_SHIFT)
5212+
#define KVM_STATS_TYPE_LINEAR_HIST (0x3 << KVM_STATS_TYPE_SHIFT)
5213+
#define KVM_STATS_TYPE_LOG_HIST (0x4 << KVM_STATS_TYPE_SHIFT)
5214+
#define KVM_STATS_TYPE_MAX KVM_STATS_TYPE_LOG_HIST
52115215

52125216
#define KVM_STATS_UNIT_SHIFT 4
52135217
#define KVM_STATS_UNIT_MASK (0xF << KVM_STATS_UNIT_SHIFT)
52145218
#define KVM_STATS_UNIT_NONE (0x0 << KVM_STATS_UNIT_SHIFT)
52155219
#define KVM_STATS_UNIT_BYTES (0x1 << KVM_STATS_UNIT_SHIFT)
52165220
#define KVM_STATS_UNIT_SECONDS (0x2 << KVM_STATS_UNIT_SHIFT)
52175221
#define KVM_STATS_UNIT_CYCLES (0x3 << KVM_STATS_UNIT_SHIFT)
5222+
#define KVM_STATS_UNIT_MAX KVM_STATS_UNIT_CYCLES
52185223

52195224
#define KVM_STATS_BASE_SHIFT 8
52205225
#define KVM_STATS_BASE_MASK (0xF << KVM_STATS_BASE_SHIFT)
52215226
#define KVM_STATS_BASE_POW10 (0x0 << KVM_STATS_BASE_SHIFT)
52225227
#define KVM_STATS_BASE_POW2 (0x1 << KVM_STATS_BASE_SHIFT)
5228+
#define KVM_STATS_BASE_MAX KVM_STATS_BASE_POW2
52235229

52245230
struct kvm_stats_desc {
52255231
__u32 flags;
52265232
__s16 exponent;
52275233
__u16 size;
52285234
__u32 offset;
5229-
__u32 unused;
5235+
__u32 bucket_size;
52305236
char name[];
52315237
};
52325238

@@ -5237,21 +5243,35 @@ The following flags are supported:
52375243
Bits 0-3 of ``flags`` encode the type:
52385244

52395245
* ``KVM_STATS_TYPE_CUMULATIVE``
5240-
The statistics data is cumulative. The value of data can only be increased.
5246+
The statistics reports a cumulative count. The value of data can only be increased.
52415247
Most of the counters used in KVM are of this type.
52425248
The corresponding ``size`` field for this type is always 1.
52435249
All cumulative statistics data are read/write.
52445250
* ``KVM_STATS_TYPE_INSTANT``
5245-
The statistics data is instantaneous. Its value can be increased or
5251+
The statistics reports an instantaneous value. Its value can be increased or
52465252
decreased. This type is usually used as a measurement of some resources,
52475253
like the number of dirty pages, the number of large pages, etc.
52485254
All instant statistics are read only.
52495255
The corresponding ``size`` field for this type is always 1.
52505256
* ``KVM_STATS_TYPE_PEAK``
5251-
The statistics data is peak. The value of data can only be increased, and
5252-
represents a peak value for a measurement, for example the maximum number
5257+
The statistics data reports a peak value, for example the maximum number
52535258
of items in a hash table bucket, the longest time waited and so on.
5259+
The value of data can only be increased.
52545260
The corresponding ``size`` field for this type is always 1.
5261+
* ``KVM_STATS_TYPE_LINEAR_HIST``
5262+
The statistic is reported as a linear histogram. The number of
5263+
buckets is specified by the ``size`` field. The size of buckets is specified
5264+
by the ``hist_param`` field. The range of the Nth bucket (1 <= N < ``size``)
5265+
is [``hist_param``*(N-1), ``hist_param``*N), while the range of the last
5266+
bucket is [``hist_param``*(``size``-1), +INF). (+INF means positive infinity
5267+
value.) The bucket value indicates how many samples fell in the bucket's range.
5268+
* ``KVM_STATS_TYPE_LOG_HIST``
5269+
The statistic is reported as a logarithmic histogram. The number of
5270+
buckets is specified by the ``size`` field. The range of the first bucket is
5271+
[0, 1), while the range of the last bucket is [pow(2, ``size``-2), +INF).
5272+
Otherwise, The Nth bucket (1 < N < ``size``) covers
5273+
[pow(2, N-2), pow(2, N-1)). The bucket value indicates how many samples fell
5274+
in the bucket's range.
52555275

52565276
Bits 4-7 of ``flags`` encode the unit:
52575277

@@ -5286,9 +5306,9 @@ unsigned 64bit data.
52865306
The ``offset`` field is the offset from the start of Data Block to the start of
52875307
the corresponding statistics data.
52885308

5289-
The ``unused`` field is reserved for future support for other types of
5290-
statistics data, like log/linear histogram. Its value is always 0 for the types
5291-
defined above.
5309+
The ``bucket_size`` field is used as a parameter for histogram statistics data.
5310+
It is only used by linear histogram statistics data, specifying the size of a
5311+
bucket.
52925312

52935313
The ``name`` field is the name string of the statistics data. The name string
52945314
starts at the end of ``struct kvm_stats_desc``. The maximum length including

Documentation/virt/kvm/locking.rst

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,12 @@ The acquisition orders for mutexes are as follows:
2121
can be taken inside a kvm->srcu read-side critical section,
2222
while kvm->slots_lock cannot.
2323

24+
- kvm->mn_active_invalidate_count ensures that pairs of
25+
invalidate_range_start() and invalidate_range_end() callbacks
26+
use the same memslots array. kvm->slots_lock and kvm->slots_arch_lock
27+
are taken on the waiting side in install_new_memslots, so MMU notifiers
28+
must not take either kvm->slots_lock or kvm->slots_arch_lock.
29+
2430
On x86:
2531

2632
- vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock

arch/arm64/include/asm/cpufeature.h

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -602,14 +602,14 @@ static inline bool id_aa64pfr0_32bit_el1(u64 pfr0)
602602
{
603603
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_SHIFT);
604604

605-
return val == ID_AA64PFR0_EL1_32BIT_64BIT;
605+
return val == ID_AA64PFR0_ELx_32BIT_64BIT;
606606
}
607607

608608
static inline bool id_aa64pfr0_32bit_el0(u64 pfr0)
609609
{
610610
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL0_SHIFT);
611611

612-
return val == ID_AA64PFR0_EL0_32BIT_64BIT;
612+
return val == ID_AA64PFR0_ELx_32BIT_64BIT;
613613
}
614614

615615
static inline bool id_aa64pfr0_sve(u64 pfr0)
@@ -784,13 +784,13 @@ extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
784784
static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
785785
{
786786
switch (parange) {
787-
case 0: return 32;
788-
case 1: return 36;
789-
case 2: return 40;
790-
case 3: return 42;
791-
case 4: return 44;
792-
case 5: return 48;
793-
case 6: return 52;
787+
case ID_AA64MMFR0_PARANGE_32: return 32;
788+
case ID_AA64MMFR0_PARANGE_36: return 36;
789+
case ID_AA64MMFR0_PARANGE_40: return 40;
790+
case ID_AA64MMFR0_PARANGE_42: return 42;
791+
case ID_AA64MMFR0_PARANGE_44: return 44;
792+
case ID_AA64MMFR0_PARANGE_48: return 48;
793+
case ID_AA64MMFR0_PARANGE_52: return 52;
794794
/*
795795
* A future PE could use a value unknown to the kernel.
796796
* However, by the "D10.1.4 Principles of the ID scheme

arch/arm64/include/asm/kvm_arm.h

Lines changed: 38 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,13 @@
1212
#include <asm/types.h>
1313

1414
/* Hyp Configuration Register (HCR) bits */
15+
16+
#define HCR_TID5 (UL(1) << 58)
17+
#define HCR_DCT (UL(1) << 57)
1518
#define HCR_ATA_SHIFT 56
1619
#define HCR_ATA (UL(1) << HCR_ATA_SHIFT)
20+
#define HCR_AMVOFFEN (UL(1) << 51)
21+
#define HCR_FIEN (UL(1) << 47)
1722
#define HCR_FWB (UL(1) << 46)
1823
#define HCR_API (UL(1) << 41)
1924
#define HCR_APK (UL(1) << 40)
@@ -32,9 +37,9 @@
3237
#define HCR_TVM (UL(1) << 26)
3338
#define HCR_TTLB (UL(1) << 25)
3439
#define HCR_TPU (UL(1) << 24)
35-
#define HCR_TPC (UL(1) << 23)
40+
#define HCR_TPC (UL(1) << 23) /* HCR_TPCP if FEAT_DPB */
3641
#define HCR_TSW (UL(1) << 22)
37-
#define HCR_TAC (UL(1) << 21)
42+
#define HCR_TACR (UL(1) << 21)
3843
#define HCR_TIDCP (UL(1) << 20)
3944
#define HCR_TSC (UL(1) << 19)
4045
#define HCR_TID3 (UL(1) << 18)
@@ -56,12 +61,13 @@
5661
#define HCR_PTW (UL(1) << 2)
5762
#define HCR_SWIO (UL(1) << 1)
5863
#define HCR_VM (UL(1) << 0)
64+
#define HCR_RES0 ((UL(1) << 48) | (UL(1) << 39))
5965

6066
/*
6167
* The bits we set in HCR:
6268
* TLOR: Trap LORegion register accesses
6369
* RW: 64bit by default, can be overridden for 32bit VMs
64-
* TAC: Trap ACTLR
70+
* TACR: Trap ACTLR
6571
* TSC: Trap SMC
6672
* TSW: Trap cache operations by set/way
6773
* TWE: Trap WFE
@@ -76,7 +82,7 @@
7682
* PTW: Take a stage2 fault if a stage1 walk steps in device memory
7783
*/
7884
#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
79-
HCR_BSU_IS | HCR_FB | HCR_TAC | \
85+
HCR_BSU_IS | HCR_FB | HCR_TACR | \
8086
HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
8187
HCR_FMO | HCR_IMO | HCR_PTW )
8288
#define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
@@ -275,24 +281,40 @@
275281
#define CPTR_EL2_TTA (1 << 20)
276282
#define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT)
277283
#define CPTR_EL2_TZ (1 << 8)
278-
#define CPTR_EL2_RES1 0x000032ff /* known RES1 bits in CPTR_EL2 */
279-
#define CPTR_EL2_DEFAULT CPTR_EL2_RES1
284+
#define CPTR_NVHE_EL2_RES1 0x000032ff /* known RES1 bits in CPTR_EL2 (nVHE) */
285+
#define CPTR_EL2_DEFAULT CPTR_NVHE_EL2_RES1
286+
#define CPTR_NVHE_EL2_RES0 (GENMASK(63, 32) | \
287+
GENMASK(29, 21) | \
288+
GENMASK(19, 14) | \
289+
BIT(11))
280290

281291
/* Hyp Debug Configuration Register bits */
282292
#define MDCR_EL2_E2TB_MASK (UL(0x3))
283293
#define MDCR_EL2_E2TB_SHIFT (UL(24))
284-
#define MDCR_EL2_TTRF (1 << 19)
285-
#define MDCR_EL2_TPMS (1 << 14)
294+
#define MDCR_EL2_HPMFZS (UL(1) << 36)
295+
#define MDCR_EL2_HPMFZO (UL(1) << 29)
296+
#define MDCR_EL2_MTPME (UL(1) << 28)
297+
#define MDCR_EL2_TDCC (UL(1) << 27)
298+
#define MDCR_EL2_HCCD (UL(1) << 23)
299+
#define MDCR_EL2_TTRF (UL(1) << 19)
300+
#define MDCR_EL2_HPMD (UL(1) << 17)
301+
#define MDCR_EL2_TPMS (UL(1) << 14)
286302
#define MDCR_EL2_E2PB_MASK (UL(0x3))
287303
#define MDCR_EL2_E2PB_SHIFT (UL(12))
288-
#define MDCR_EL2_TDRA (1 << 11)
289-
#define MDCR_EL2_TDOSA (1 << 10)
290-
#define MDCR_EL2_TDA (1 << 9)
291-
#define MDCR_EL2_TDE (1 << 8)
292-
#define MDCR_EL2_HPME (1 << 7)
293-
#define MDCR_EL2_TPM (1 << 6)
294-
#define MDCR_EL2_TPMCR (1 << 5)
295-
#define MDCR_EL2_HPMN_MASK (0x1F)
304+
#define MDCR_EL2_TDRA (UL(1) << 11)
305+
#define MDCR_EL2_TDOSA (UL(1) << 10)
306+
#define MDCR_EL2_TDA (UL(1) << 9)
307+
#define MDCR_EL2_TDE (UL(1) << 8)
308+
#define MDCR_EL2_HPME (UL(1) << 7)
309+
#define MDCR_EL2_TPM (UL(1) << 6)
310+
#define MDCR_EL2_TPMCR (UL(1) << 5)
311+
#define MDCR_EL2_HPMN_MASK (UL(0x1F))
312+
#define MDCR_EL2_RES0 (GENMASK(63, 37) | \
313+
GENMASK(35, 30) | \
314+
GENMASK(25, 24) | \
315+
GENMASK(22, 20) | \
316+
BIT(18) | \
317+
GENMASK(16, 15))
296318

297319
/* For compatibility with fault code shared with 32-bit */
298320
#define FSC_FAULT ESR_ELx_FSC_FAULT

arch/arm64/include/asm/kvm_asm.h

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -59,12 +59,11 @@
5959
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs 13
6060
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs 14
6161
#define __KVM_HOST_SMCCC_FUNC___pkvm_init 15
62-
#define __KVM_HOST_SMCCC_FUNC___pkvm_create_mappings 16
62+
#define __KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp 16
6363
#define __KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping 17
6464
#define __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector 18
6565
#define __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize 19
66-
#define __KVM_HOST_SMCCC_FUNC___pkvm_mark_hyp 20
67-
#define __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc 21
66+
#define __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc 20
6867

6968
#ifndef __ASSEMBLY__
7069

@@ -210,7 +209,7 @@ extern u64 __vgic_v3_read_vmcr(void);
210209
extern void __vgic_v3_write_vmcr(u32 vmcr);
211210
extern void __vgic_v3_init_lrs(void);
212211

213-
extern u32 __kvm_get_mdcr_el2(void);
212+
extern u64 __kvm_get_mdcr_el2(void);
214213

215214
#define __KVM_EXTABLE(from, to) \
216215
" .pushsection __kvm_ex_table, \"a\"\n" \

arch/arm64/include/asm/kvm_host.h

Lines changed: 13 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
6666
extern unsigned int kvm_sve_max_vl;
6767
int kvm_arm_init_sve(void);
6868

69-
int __attribute_const__ kvm_target_cpu(void);
69+
u32 __attribute_const__ kvm_target_cpu(void);
7070
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
7171
void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu);
7272

@@ -185,7 +185,6 @@ enum vcpu_sysreg {
185185
PMCNTENSET_EL0, /* Count Enable Set Register */
186186
PMINTENSET_EL1, /* Interrupt Enable Set Register */
187187
PMOVSSET_EL0, /* Overflow Flag Status Set Register */
188-
PMSWINC_EL0, /* Software Increment Register */
189188
PMUSERENR_EL0, /* User Enable Register */
190189

191190
/* Pointer Authentication Registers in a strict increasing order. */
@@ -287,9 +286,13 @@ struct kvm_vcpu_arch {
287286
/* Stage 2 paging state used by the hardware on next switch */
288287
struct kvm_s2_mmu *hw_mmu;
289288

290-
/* HYP configuration */
289+
/* Values of trap registers for the guest. */
291290
u64 hcr_el2;
292-
u32 mdcr_el2;
291+
u64 mdcr_el2;
292+
u64 cptr_el2;
293+
294+
/* Values of trap registers for the host before guest entry. */
295+
u64 mdcr_el2_host;
293296

294297
/* Exception Information */
295298
struct kvm_vcpu_fault_info fault;
@@ -576,6 +579,7 @@ struct kvm_vcpu_stat {
576579
u64 wfi_exit_stat;
577580
u64 mmio_exit_user;
578581
u64 mmio_exit_kernel;
582+
u64 signal_exits;
579583
u64 exits;
580584
};
581585

@@ -771,6 +775,11 @@ void kvm_arch_free_vm(struct kvm *kvm);
771775

772776
int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type);
773777

778+
static inline bool kvm_vm_is_protected(struct kvm *kvm)
779+
{
780+
return false;
781+
}
782+
774783
int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
775784
bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
776785

arch/arm64/include/asm/kvm_hyp.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,7 @@ void __sve_restore_state(void *sve_pffr, u32 *fpsr);
9595

9696
#ifndef __KVM_NVHE_HYPERVISOR__
9797
void activate_traps_vhe_load(struct kvm_vcpu *vcpu);
98-
void deactivate_traps_vhe_put(void);
98+
void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu);
9999
#endif
100100

101101
u64 __guest_enter(struct kvm_vcpu *vcpu);

arch/arm64/include/asm/kvm_mmu.h

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -252,24 +252,30 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
252252

253253
#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
254254

255+
/*
256+
* When this is (directly or indirectly) used on the TLB invalidation
257+
* path, we rely on a previously issued DSB so that page table updates
258+
* and VMID reads are correctly ordered.
259+
*/
255260
static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
256261
{
257262
struct kvm_vmid *vmid = &mmu->vmid;
258263
u64 vmid_field, baddr;
259264
u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0;
260265

261266
baddr = mmu->pgd_phys;
262-
vmid_field = (u64)vmid->vmid << VTTBR_VMID_SHIFT;
267+
vmid_field = (u64)READ_ONCE(vmid->vmid) << VTTBR_VMID_SHIFT;
263268
return kvm_phys_to_vttbr(baddr) | vmid_field | cnp;
264269
}
265270

266271
/*
267272
* Must be called from hyp code running at EL2 with an updated VTTBR
268273
* and interrupts disabled.
269274
*/
270-
static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu, unsigned long vtcr)
275+
static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu,
276+
struct kvm_arch *arch)
271277
{
272-
write_sysreg(vtcr, vtcr_el2);
278+
write_sysreg(arch->vtcr, vtcr_el2);
273279
write_sysreg(kvm_get_vttbr(mmu), vttbr_el2);
274280

275281
/*
@@ -280,11 +286,6 @@ static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu, unsigned long
280286
asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
281287
}
282288

283-
static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu)
284-
{
285-
__load_stage2(mmu, kern_hyp_va(mmu->arch)->vtcr);
286-
}
287-
288289
static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu)
289290
{
290291
return container_of(mmu->arch, struct kvm, arch);

0 commit comments

Comments
 (0)