@@ -526,31 +526,28 @@ int handle_misaligned_store(struct pt_regs *regs)
526
526
return 0 ;
527
527
}
528
528
529
- static bool check_unaligned_access_emulated (int cpu )
529
+ void check_unaligned_access_emulated (struct work_struct * work __always_unused )
530
530
{
531
+ int cpu = smp_processor_id ();
531
532
long * mas_ptr = per_cpu_ptr (& misaligned_access_speed , cpu );
532
533
unsigned long tmp_var , tmp_val ;
533
- bool misaligned_emu_detected ;
534
534
535
535
* mas_ptr = RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN ;
536
536
537
537
__asm__ __volatile__ (
538
538
" " REG_L " %[tmp], 1(%[ptr])\n"
539
539
: [tmp ] "=r" (tmp_val ) : [ptr ] "r" (& tmp_var ) : "memory" );
540
540
541
- misaligned_emu_detected = (* mas_ptr == RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED );
542
541
/*
543
542
* If unaligned_ctl is already set, this means that we detected that all
544
543
* CPUS uses emulated misaligned access at boot time. If that changed
545
544
* when hotplugging the new cpu, this is something we don't handle.
546
545
*/
547
- if (unlikely (unaligned_ctl && ! misaligned_emu_detected )) {
546
+ if (unlikely (unaligned_ctl && ( * mas_ptr != RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED ) )) {
548
547
pr_crit ("CPU misaligned accesses non homogeneous (expected all emulated)\n" );
549
548
while (true)
550
549
cpu_relax ();
551
550
}
552
-
553
- return misaligned_emu_detected ;
554
551
}
555
552
556
553
bool check_unaligned_access_emulated_all_cpus (void )
@@ -562,8 +559,11 @@ bool check_unaligned_access_emulated_all_cpus(void)
562
559
* accesses emulated since tasks requesting such control can run on any
563
560
* CPU.
564
561
*/
562
+ schedule_on_each_cpu (check_unaligned_access_emulated );
563
+
565
564
for_each_online_cpu (cpu )
566
- if (!check_unaligned_access_emulated (cpu ))
565
+ if (per_cpu (misaligned_access_speed , cpu )
566
+ != RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED )
567
567
return false ;
568
568
569
569
unaligned_ctl = true;
0 commit comments