Skip to content

Commit cc10554

Browse files
marcanjannau
authored andcommitted
arm64: Introduce scaffolding to add ACTLR_EL1 to thread state
Some CPUs expose IMPDEF features in ACTLR_EL1 that can be meaningfully controlled per-thread (like TSO control on Apple cores). Add the basic scaffolding to save/restore this register as part of context switching. This mechanism is disabled by default both by config symbol and via a runtime check, which ensures it is never triggered unless the system is known to need it for some feature (which also implies that the layout of ACTLR_EL1 is uniform between all CPU core types). Signed-off-by: Hector Martin <[email protected]> Reviewed-by: Neal Gompa <[email protected]>
1 parent 3f33085 commit cc10554

File tree

5 files changed

+44
-0
lines changed

5 files changed

+44
-0
lines changed

arch/arm64/Kconfig

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -411,6 +411,9 @@ config KASAN_SHADOW_OFFSET
411411
config UNWIND_TABLES
412412
bool
413413

414+
config ARM64_ACTLR_STATE
415+
bool
416+
414417
source "arch/arm64/Kconfig.platforms"
415418

416419
menu "Kernel Features"

arch/arm64/include/asm/cpufeature.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -909,6 +909,11 @@ static inline unsigned int get_vmid_bits(u64 mmfr1)
909909
return 8;
910910
}
911911

912+
static __always_inline bool system_has_actlr_state(void)
913+
{
914+
return false;
915+
}
916+
912917
s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new, s64 cur);
913918
struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id);
914919

arch/arm64/include/asm/processor.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -184,6 +184,9 @@ struct thread_struct {
184184
u64 sctlr_user;
185185
u64 svcr;
186186
u64 tpidr2_el0;
187+
#ifdef CONFIG_ARM64_ACTLR_STATE
188+
u64 actlr;
189+
#endif
187190
};
188191

189192
static inline unsigned int thread_get_vl(struct thread_struct *thread,

arch/arm64/kernel/process.c

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -372,6 +372,11 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
372372
if (system_supports_tpidr2())
373373
p->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0);
374374

375+
#ifdef CONFIG_ARM64_ACTLR_STATE
376+
if (system_has_actlr_state())
377+
p->thread.actlr = read_sysreg(actlr_el1);
378+
#endif
379+
375380
if (stack_start) {
376381
if (is_compat_thread(task_thread_info(p)))
377382
childregs->compat_sp = stack_start;
@@ -533,6 +538,25 @@ int arch_prctl_mem_model_set(struct task_struct *t, unsigned long val)
533538
}
534539
#endif
535540

541+
#ifdef CONFIG_ARM64_ACTLR_STATE
542+
/*
543+
* IMPDEF control register ACTLR_EL1 handling. Some CPUs use this to
544+
* expose features that can be controlled by userspace.
545+
*/
546+
static void actlr_thread_switch(struct task_struct *next)
547+
{
548+
if (!system_has_actlr_state())
549+
return;
550+
551+
current->thread.actlr = read_sysreg(actlr_el1);
552+
write_sysreg(next->thread.actlr, actlr_el1);
553+
}
554+
#else
555+
static inline void actlr_thread_switch(struct task_struct *next)
556+
{
557+
}
558+
#endif
559+
536560
/*
537561
* Thread switching.
538562
*/
@@ -550,6 +574,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
550574
ssbs_thread_switch(next);
551575
erratum_1418040_thread_switch(next);
552576
ptrauth_thread_switch_user(next);
577+
actlr_thread_switch(next);
553578

554579
/*
555580
* Complete any pending TLB or cache maintenance on this CPU in case

arch/arm64/kernel/setup.c

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -366,6 +366,14 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
366366
*/
367367
init_task.thread_info.ttbr0 = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
368368
#endif
369+
#ifdef CONFIG_ARM64_ACTLR_STATE
370+
/* Store the boot CPU ACTLR_EL1 value as the default. This will only
371+
* be actually restored during context switching iff the platform is
372+
* known to use ACTLR_EL1 for exposable features and its layout is
373+
* known to be the same on all CPUs.
374+
*/
375+
init_task.thread.actlr = read_sysreg(actlr_el1);
376+
#endif
369377

370378
if (boot_args[1] || boot_args[2] || boot_args[3]) {
371379
pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n"

0 commit comments

Comments
 (0)