Skip to content

Commit cf5a8ab

Browse files
clementlegerpalmer-dabbelt
authored andcommitted
riscv: misaligned: request misaligned exception from SBI
Now that the kernel can handle misaligned accesses in S-mode, request misaligned access exception delegation from SBI. This uses the FWFT SBI extension defined in SBI version 3.0. Signed-off-by: Clément Léger <[email protected]> Reviewed-by: Andrew Jones <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Palmer Dabbelt <[email protected]>
1 parent c4a50db commit cf5a8ab

File tree

3 files changed

+77
-5
lines changed

3 files changed

+77
-5
lines changed

arch/riscv/include/asm/cpufeature.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -67,8 +67,9 @@ void __init riscv_user_isa_enable(void);
6767
_RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts), _validate)
6868

6969
bool __init check_unaligned_access_emulated_all_cpus(void);
70+
void unaligned_access_init(void);
71+
int cpu_online_unaligned_access_init(unsigned int cpu);
7072
#if defined(CONFIG_RISCV_SCALAR_MISALIGNED)
71-
void check_unaligned_access_emulated(struct work_struct *work __always_unused);
7273
void unaligned_emulation_finish(void);
7374
bool unaligned_ctl_available(void);
7475
DECLARE_PER_CPU(long, misaligned_access_speed);

arch/riscv/kernel/traps_misaligned.c

Lines changed: 68 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
#include <asm/entry-common.h>
1717
#include <asm/hwprobe.h>
1818
#include <asm/cpufeature.h>
19+
#include <asm/sbi.h>
1920
#include <asm/vector.h>
2021

2122
#define INSN_MATCH_LB 0x3
@@ -646,7 +647,7 @@ bool __init check_vector_unaligned_access_emulated_all_cpus(void)
646647

647648
static bool unaligned_ctl __read_mostly;
648649

649-
void check_unaligned_access_emulated(struct work_struct *work __always_unused)
650+
static void check_unaligned_access_emulated(struct work_struct *work __always_unused)
650651
{
651652
int cpu = smp_processor_id();
652653
long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu);
@@ -657,6 +658,13 @@ void check_unaligned_access_emulated(struct work_struct *work __always_unused)
657658
__asm__ __volatile__ (
658659
" "REG_L" %[tmp], 1(%[ptr])\n"
659660
: [tmp] "=r" (tmp_val) : [ptr] "r" (&tmp_var) : "memory");
661+
}
662+
663+
static int cpu_online_check_unaligned_access_emulated(unsigned int cpu)
664+
{
665+
long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu);
666+
667+
check_unaligned_access_emulated(NULL);
660668

661669
/*
662670
* If unaligned_ctl is already set, this means that we detected that all
@@ -665,9 +673,10 @@ void check_unaligned_access_emulated(struct work_struct *work __always_unused)
665673
*/
666674
if (unlikely(unaligned_ctl && (*mas_ptr != RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED))) {
667675
pr_crit("CPU misaligned accesses non homogeneous (expected all emulated)\n");
668-
while (true)
669-
cpu_relax();
676+
return -EINVAL;
670677
}
678+
679+
return 0;
671680
}
672681

673682
bool __init check_unaligned_access_emulated_all_cpus(void)
@@ -699,4 +708,60 @@ bool __init check_unaligned_access_emulated_all_cpus(void)
699708
{
700709
return false;
701710
}
711+
static int cpu_online_check_unaligned_access_emulated(unsigned int cpu)
712+
{
713+
return 0;
714+
}
715+
#endif
716+
717+
#ifdef CONFIG_RISCV_SBI
718+
719+
static bool misaligned_traps_delegated;
720+
721+
static int cpu_online_sbi_unaligned_setup(unsigned int cpu)
722+
{
723+
if (sbi_fwft_set(SBI_FWFT_MISALIGNED_EXC_DELEG, 1, 0) &&
724+
misaligned_traps_delegated) {
725+
pr_crit("Misaligned trap delegation non homogeneous (expected delegated)");
726+
return -EINVAL;
727+
}
728+
729+
return 0;
730+
}
731+
732+
void __init unaligned_access_init(void)
733+
{
734+
int ret;
735+
736+
ret = sbi_fwft_set_online_cpus(SBI_FWFT_MISALIGNED_EXC_DELEG, 1, 0);
737+
if (ret)
738+
return;
739+
740+
misaligned_traps_delegated = true;
741+
pr_info("SBI misaligned access exception delegation ok\n");
742+
/*
743+
* Note that we don't have to take any specific action here, if
744+
* the delegation is successful, then
745+
* check_unaligned_access_emulated() will verify that indeed the
746+
* platform traps on misaligned accesses.
747+
*/
748+
}
749+
#else
750+
void __init unaligned_access_init(void) {}
751+
752+
static int cpu_online_sbi_unaligned_setup(unsigned int cpu __always_unused)
753+
{
754+
return 0;
755+
}
702756
#endif
757+
758+
int cpu_online_unaligned_access_init(unsigned int cpu)
759+
{
760+
int ret;
761+
762+
ret = cpu_online_sbi_unaligned_setup(cpu);
763+
if (ret)
764+
return ret;
765+
766+
return cpu_online_check_unaligned_access_emulated(cpu);
767+
}

arch/riscv/kernel/unaligned_access_speed.c

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -236,6 +236,11 @@ arch_initcall_sync(lock_and_set_unaligned_access_static_branch);
236236

237237
static int riscv_online_cpu(unsigned int cpu)
238238
{
239+
int ret = cpu_online_unaligned_access_init(cpu);
240+
241+
if (ret)
242+
return ret;
243+
239244
/* We are already set since the last check */
240245
if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN) {
241246
goto exit;
@@ -248,7 +253,6 @@ static int riscv_online_cpu(unsigned int cpu)
248253
{
249254
static struct page *buf;
250255

251-
check_unaligned_access_emulated(NULL);
252256
buf = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
253257
if (!buf) {
254258
pr_warn("Allocation failure, not measuring misaligned performance\n");
@@ -439,6 +443,8 @@ static int __init check_unaligned_access_all_cpus(void)
439443
{
440444
int cpu;
441445

446+
unaligned_access_init();
447+
442448
if (unaligned_scalar_speed_param != RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN) {
443449
pr_info("scalar unaligned access speed set to '%s' (%lu) by command line\n",
444450
speed_str[unaligned_scalar_speed_param], unaligned_scalar_speed_param);

0 commit comments

Comments
 (0)