Skip to content

Commit 3bbcc8c

Browse files
author
Marc Zyngier
committed
Merge branch kvm-arm64/52bit-fixes into kvmarm-master/next
* kvm-arm64/52bit-fixes: : . : 52bit PA fixes, courtesy of Ryan Roberts. From the cover letter: : : "I've been adding support for FEAT_LPA2 to KVM and as part of that work have been : testing various (84) configurations of HW, host and guest kernels on FVP. This : has thrown up a couple of pre-existing bugs, for which the fixes are provided." : . KVM: arm64: Fix benign bug with incorrect use of VA_BITS KVM: arm64: Fix PAR_TO_HPFAR() to work independently of PA_BITS. KVM: arm64: Fix kvm init failure when mode!=vhe and VA_BITS=52. Signed-off-by: Marc Zyngier <[email protected]>
2 parents b1d10ee + 219072c commit 3bbcc8c

File tree

3 files changed

+36
-20
lines changed

3 files changed

+36
-20
lines changed

arch/arm64/include/asm/kvm_arm.h

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -340,9 +340,13 @@
340340
* We have
341341
* PAR [PA_Shift - 1 : 12] = PA [PA_Shift - 1 : 12]
342342
* HPFAR [PA_Shift - 9 : 4] = FIPA [PA_Shift - 1 : 12]
343+
*
344+
* Always assume 52 bit PA since at this point, we don't know how many PA bits
345+
* the page table has been set up for. This should be safe since unused address
346+
* bits in PAR are res0.
343347
*/
344348
#define PAR_TO_HPFAR(par) \
345-
(((par) & GENMASK_ULL(PHYS_MASK_SHIFT - 1, 12)) >> 8)
349+
(((par) & GENMASK_ULL(52 - 1, 12)) >> 8)
346350

347351
#define ECN(x) { ESR_ELx_EC_##x, #x }
348352

arch/arm64/kvm/arm.c

Lines changed: 3 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1518,7 +1518,7 @@ static int kvm_init_vector_slots(void)
15181518
return 0;
15191519
}
15201520

1521-
static void cpu_prepare_hyp_mode(int cpu)
1521+
static void cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
15221522
{
15231523
struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
15241524
unsigned long tcr;
@@ -1534,23 +1534,9 @@ static void cpu_prepare_hyp_mode(int cpu)
15341534

15351535
params->mair_el2 = read_sysreg(mair_el1);
15361536

1537-
/*
1538-
* The ID map may be configured to use an extended virtual address
1539-
* range. This is only the case if system RAM is out of range for the
1540-
* currently configured page size and VA_BITS, in which case we will
1541-
* also need the extended virtual range for the HYP ID map, or we won't
1542-
* be able to enable the EL2 MMU.
1543-
*
1544-
* However, at EL2, there is only one TTBR register, and we can't switch
1545-
* between translation tables *and* update TCR_EL2.T0SZ at the same
1546-
* time. Bottom line: we need to use the extended range with *both* our
1547-
* translation tables.
1548-
*
1549-
* So use the same T0SZ value we use for the ID map.
1550-
*/
15511537
tcr = (read_sysreg(tcr_el1) & TCR_EL2_MASK) | TCR_EL2_RES1;
15521538
tcr &= ~TCR_T0SZ_MASK;
1553-
tcr |= (idmap_t0sz & GENMASK(TCR_TxSZ_WIDTH - 1, 0)) << TCR_T0SZ_OFFSET;
1539+
tcr |= TCR_T0SZ(hyp_va_bits);
15541540
params->tcr_el2 = tcr;
15551541

15561542
params->pgd_pa = kvm_mmu_get_httbr();
@@ -2054,7 +2040,7 @@ static int init_hyp_mode(void)
20542040
}
20552041

20562042
/* Prepare the CPU initialization parameters */
2057-
cpu_prepare_hyp_mode(cpu);
2043+
cpu_prepare_hyp_mode(cpu, hyp_va_bits);
20582044
}
20592045

20602046
if (is_protected_kvm_enabled()) {

arch/arm64/kvm/mmu.c

Lines changed: 28 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -641,7 +641,7 @@ static int get_user_mapping_size(struct kvm *kvm, u64 addr)
641641
{
642642
struct kvm_pgtable pgt = {
643643
.pgd = (kvm_pte_t *)kvm->mm->pgd,
644-
.ia_bits = VA_BITS,
644+
.ia_bits = vabits_actual,
645645
.start_level = (KVM_PGTABLE_MAX_LEVELS -
646646
CONFIG_PGTABLE_LEVELS),
647647
.mm_ops = &kvm_user_mm_ops,
@@ -1618,6 +1618,8 @@ static struct kvm_pgtable_mm_ops kvm_hyp_mm_ops = {
16181618
int kvm_mmu_init(u32 *hyp_va_bits)
16191619
{
16201620
int err;
1621+
u32 idmap_bits;
1622+
u32 kernel_bits;
16211623

16221624
hyp_idmap_start = __pa_symbol(__hyp_idmap_text_start);
16231625
hyp_idmap_start = ALIGN_DOWN(hyp_idmap_start, PAGE_SIZE);
@@ -1631,7 +1633,31 @@ int kvm_mmu_init(u32 *hyp_va_bits)
16311633
*/
16321634
BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
16331635

1634-
*hyp_va_bits = 64 - ((idmap_t0sz & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET);
1636+
/*
1637+
* The ID map may be configured to use an extended virtual address
1638+
* range. This is only the case if system RAM is out of range for the
1639+
* currently configured page size and VA_BITS_MIN, in which case we will
1640+
* also need the extended virtual range for the HYP ID map, or we won't
1641+
* be able to enable the EL2 MMU.
1642+
*
1643+
* However, in some cases the ID map may be configured for fewer than
1644+
* the number of VA bits used by the regular kernel stage 1. This
1645+
* happens when VA_BITS=52 and the kernel image is placed in PA space
1646+
* below 48 bits.
1647+
*
1648+
* At EL2, there is only one TTBR register, and we can't switch between
1649+
* translation tables *and* update TCR_EL2.T0SZ at the same time. Bottom
1650+
* line: we need to use the extended range with *both* our translation
1651+
* tables.
1652+
*
1653+
* So use the maximum of the idmap VA bits and the regular kernel stage
1654+
* 1 VA bits to assure that the hypervisor can both ID map its code page
1655+
* and map any kernel memory.
1656+
*/
1657+
idmap_bits = 64 - ((idmap_t0sz & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET);
1658+
kernel_bits = vabits_actual;
1659+
*hyp_va_bits = max(idmap_bits, kernel_bits);
1660+
16351661
kvm_debug("Using %u-bit virtual addresses at EL2\n", *hyp_va_bits);
16361662
kvm_debug("IDMAP page: %lx\n", hyp_idmap_start);
16371663
kvm_debug("HYP VA range: %lx:%lx\n",

0 commit comments

Comments
 (0)