Skip to content

Commit 8d20a73

Browse files
Mr-Bossmanpalmer-dabbelt
authored andcommitted
RISC-V: Check scalar unaligned access on all CPUs
Originally, the check_unaligned_access_emulated_all_cpus function only checked the boot hart. This fixes the function to check all harts. Fixes: 71c54b3 ("riscv: report misaligned accesses emulation to hwprobe") Signed-off-by: Jesse Taube <[email protected]> Reviewed-by: Charlie Jenkins <[email protected]> Reviewed-by: Evan Green <[email protected]> Cc: [email protected] Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Palmer Dabbelt <[email protected]>
1 parent 9852d85 commit 8d20a73

File tree

2 files changed

+9
-7
lines changed

2 files changed

+9
-7
lines changed

arch/riscv/include/asm/cpufeature.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88

99
#include <linux/bitmap.h>
1010
#include <linux/jump_label.h>
11+
#include <linux/workqueue.h>
1112
#include <asm/hwcap.h>
1213
#include <asm/alternative-macros.h>
1314
#include <asm/errno.h>
@@ -60,6 +61,7 @@ void riscv_user_isa_enable(void);
6061

6162
#if defined(CONFIG_RISCV_MISALIGNED)
6263
bool check_unaligned_access_emulated_all_cpus(void);
64+
void check_unaligned_access_emulated(struct work_struct *work __always_unused);
6365
void unaligned_emulation_finish(void);
6466
bool unaligned_ctl_available(void);
6567
DECLARE_PER_CPU(long, misaligned_access_speed);

arch/riscv/kernel/traps_misaligned.c

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -526,31 +526,28 @@ int handle_misaligned_store(struct pt_regs *regs)
526526
return 0;
527527
}
528528

529-
static bool check_unaligned_access_emulated(int cpu)
529+
void check_unaligned_access_emulated(struct work_struct *work __always_unused)
530530
{
531+
int cpu = smp_processor_id();
531532
long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu);
532533
unsigned long tmp_var, tmp_val;
533-
bool misaligned_emu_detected;
534534

535535
*mas_ptr = RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;
536536

537537
__asm__ __volatile__ (
538538
" "REG_L" %[tmp], 1(%[ptr])\n"
539539
: [tmp] "=r" (tmp_val) : [ptr] "r" (&tmp_var) : "memory");
540540

541-
misaligned_emu_detected = (*mas_ptr == RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED);
542541
/*
543542
* If unaligned_ctl is already set, this means that we detected that all
544543
* CPUS uses emulated misaligned access at boot time. If that changed
545544
* when hotplugging the new cpu, this is something we don't handle.
546545
*/
547-
if (unlikely(unaligned_ctl && !misaligned_emu_detected)) {
546+
if (unlikely(unaligned_ctl && (*mas_ptr != RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED))) {
548547
pr_crit("CPU misaligned accesses non homogeneous (expected all emulated)\n");
549548
while (true)
550549
cpu_relax();
551550
}
552-
553-
return misaligned_emu_detected;
554551
}
555552

556553
bool check_unaligned_access_emulated_all_cpus(void)
@@ -562,8 +559,11 @@ bool check_unaligned_access_emulated_all_cpus(void)
562559
* accesses emulated since tasks requesting such control can run on any
563560
* CPU.
564561
*/
562+
schedule_on_each_cpu(check_unaligned_access_emulated);
563+
565564
for_each_online_cpu(cpu)
566-
if (!check_unaligned_access_emulated(cpu))
565+
if (per_cpu(misaligned_access_speed, cpu)
566+
!= RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED)
567567
return false;
568568

569569
unaligned_ctl = true;

0 commit comments

Comments
 (0)