Skip to content

Commit c077711

Browse files
Suzuki K Poulosectmarinas
authored andcommitted
arm64: Detect if in a realm and set RIPAS RAM
Detect that the VM is a realm guest by the presence of the RSI interface. This is done after PSCI has been initialised so that we can check the SMCCC conduit before making any RSI calls. If in a realm then iterate over all memory ensuring that it is marked as RIPAS RAM. The loader is required to do this for us, however if some memory is missed this will cause the guest to receive a hard to debug external abort at some random point in the future. So for a belt-and-braces approach set all memory to RIPAS RAM. Any failure here implies that the RAM regions passed to Linux are incorrect so panic() promptly to make the situation clear. Reviewed-by: Gavin Shan <[email protected]> Reviewed-by: Catalin Marinas <[email protected]> Signed-off-by: Suzuki K Poulose <[email protected]> Co-developed-by: Steven Price <[email protected]> Signed-off-by: Steven Price <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Catalin Marinas <[email protected]>
1 parent b880a80 commit c077711

File tree

4 files changed

+147
-1
lines changed

4 files changed

+147
-1
lines changed

arch/arm64/include/asm/rsi.h

Lines changed: 66 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,66 @@
1+
/* SPDX-License-Identifier: GPL-2.0-only */
2+
/*
3+
* Copyright (C) 2024 ARM Ltd.
4+
*/
5+
6+
#ifndef __ASM_RSI_H_
7+
#define __ASM_RSI_H_
8+
9+
#include <linux/errno.h>
10+
#include <linux/jump_label.h>
11+
#include <asm/rsi_cmds.h>
12+
13+
DECLARE_STATIC_KEY_FALSE(rsi_present);
14+
15+
void __init arm64_rsi_init(void);
16+
17+
static inline bool is_realm_world(void)
18+
{
19+
return static_branch_unlikely(&rsi_present);
20+
}
21+
22+
static inline int rsi_set_memory_range(phys_addr_t start, phys_addr_t end,
23+
enum ripas state, unsigned long flags)
24+
{
25+
unsigned long ret;
26+
phys_addr_t top;
27+
28+
while (start != end) {
29+
ret = rsi_set_addr_range_state(start, end, state, flags, &top);
30+
if (ret || top < start || top > end)
31+
return -EINVAL;
32+
start = top;
33+
}
34+
35+
return 0;
36+
}
37+
38+
/*
39+
* Convert the specified range to RAM. Do not use this if you rely on the
40+
* contents of a page that may already be in RAM state.
41+
*/
42+
static inline int rsi_set_memory_range_protected(phys_addr_t start,
43+
phys_addr_t end)
44+
{
45+
return rsi_set_memory_range(start, end, RSI_RIPAS_RAM,
46+
RSI_CHANGE_DESTROYED);
47+
}
48+
49+
/*
50+
* Convert the specified range to RAM. Do not convert any pages that may have
51+
* been DESTROYED, without our permission.
52+
*/
53+
static inline int rsi_set_memory_range_protected_safe(phys_addr_t start,
54+
phys_addr_t end)
55+
{
56+
return rsi_set_memory_range(start, end, RSI_RIPAS_RAM,
57+
RSI_NO_CHANGE_DESTROYED);
58+
}
59+
60+
static inline int rsi_set_memory_range_shared(phys_addr_t start,
61+
phys_addr_t end)
62+
{
63+
return rsi_set_memory_range(start, end, RSI_RIPAS_EMPTY,
64+
RSI_CHANGE_DESTROYED);
65+
}
66+
#endif /* __ASM_RSI_H_ */

arch/arm64/kernel/Makefile

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,8 @@ obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
3333
return_address.o cpuinfo.o cpu_errata.o \
3434
cpufeature.o alternative.o cacheinfo.o \
3535
smp.o smp_spin_table.o topology.o smccc-call.o \
36-
syscall.o proton-pack.o idle.o patching.o pi/
36+
syscall.o proton-pack.o idle.o patching.o pi/ \
37+
rsi.o
3738

3839
obj-$(CONFIG_COMPAT) += sys32.o signal32.o \
3940
sys_compat.o

arch/arm64/kernel/rsi.c

Lines changed: 76 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,76 @@
1+
// SPDX-License-Identifier: GPL-2.0-only
2+
/*
3+
* Copyright (C) 2023 ARM Ltd.
4+
*/
5+
6+
#include <linux/jump_label.h>
7+
#include <linux/memblock.h>
8+
#include <linux/psci.h>
9+
#include <asm/rsi.h>
10+
11+
DEFINE_STATIC_KEY_FALSE_RO(rsi_present);
12+
EXPORT_SYMBOL(rsi_present);
13+
14+
static bool rsi_version_matches(void)
15+
{
16+
unsigned long ver_lower, ver_higher;
17+
unsigned long ret = rsi_request_version(RSI_ABI_VERSION,
18+
&ver_lower,
19+
&ver_higher);
20+
21+
if (ret == SMCCC_RET_NOT_SUPPORTED)
22+
return false;
23+
24+
if (ret != RSI_SUCCESS) {
25+
pr_err("RME: RMM doesn't support RSI version %lu.%lu. Supported range: %lu.%lu-%lu.%lu\n",
26+
RSI_ABI_VERSION_MAJOR, RSI_ABI_VERSION_MINOR,
27+
RSI_ABI_VERSION_GET_MAJOR(ver_lower),
28+
RSI_ABI_VERSION_GET_MINOR(ver_lower),
29+
RSI_ABI_VERSION_GET_MAJOR(ver_higher),
30+
RSI_ABI_VERSION_GET_MINOR(ver_higher));
31+
return false;
32+
}
33+
34+
pr_info("RME: Using RSI version %lu.%lu\n",
35+
RSI_ABI_VERSION_GET_MAJOR(ver_lower),
36+
RSI_ABI_VERSION_GET_MINOR(ver_lower));
37+
38+
return true;
39+
}
40+
41+
static void __init arm64_rsi_setup_memory(void)
42+
{
43+
u64 i;
44+
phys_addr_t start, end;
45+
46+
/*
47+
* Iterate over the available memory ranges and convert the state to
48+
* protected memory. We should take extra care to ensure that we DO NOT
49+
* permit any "DESTROYED" pages to be converted to "RAM".
50+
*
51+
* panic() is used because if the attempt to switch the memory to
52+
* protected has failed here, then future accesses to the memory are
53+
* simply going to be reflected as a SEA (Synchronous External Abort)
54+
* which we can't handle. Bailing out early prevents the guest limping
55+
* on and dying later.
56+
*/
57+
for_each_mem_range(i, &start, &end) {
58+
if (rsi_set_memory_range_protected_safe(start, end)) {
59+
panic("Failed to set memory range to protected: %pa-%pa",
60+
&start, &end);
61+
}
62+
}
63+
}
64+
65+
void __init arm64_rsi_init(void)
66+
{
67+
if (arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_SMC)
68+
return;
69+
if (!rsi_version_matches())
70+
return;
71+
72+
arm64_rsi_setup_memory();
73+
74+
static_branch_enable(&rsi_present);
75+
}
76+

arch/arm64/kernel/setup.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@
4343
#include <asm/cpu_ops.h>
4444
#include <asm/kasan.h>
4545
#include <asm/numa.h>
46+
#include <asm/rsi.h>
4647
#include <asm/scs.h>
4748
#include <asm/sections.h>
4849
#include <asm/setup.h>
@@ -351,6 +352,8 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
351352
else
352353
psci_acpi_init();
353354

355+
arm64_rsi_init();
356+
354357
init_bootcpu_ops();
355358
smp_init_cpus();
356359
smp_build_mpidr_hash();

0 commit comments

Comments
 (0)