16
16
#include <asm/entry-common.h>
17
17
#include <asm/hwprobe.h>
18
18
#include <asm/cpufeature.h>
19
+ #include <asm/sbi.h>
19
20
#include <asm/vector.h>
20
21
21
22
#define INSN_MATCH_LB 0x3
@@ -646,7 +647,7 @@ bool __init check_vector_unaligned_access_emulated_all_cpus(void)
646
647
647
648
static bool unaligned_ctl __read_mostly ;
648
649
649
- void check_unaligned_access_emulated (struct work_struct * work __always_unused )
650
+ static void check_unaligned_access_emulated (struct work_struct * work __always_unused )
650
651
{
651
652
int cpu = smp_processor_id ();
652
653
long * mas_ptr = per_cpu_ptr (& misaligned_access_speed , cpu );
@@ -657,6 +658,13 @@ void check_unaligned_access_emulated(struct work_struct *work __always_unused)
657
658
__asm__ __volatile__ (
658
659
" " REG_L " %[tmp], 1(%[ptr])\n"
659
660
: [tmp ] "=r" (tmp_val ) : [ptr ] "r" (& tmp_var ) : "memory" );
661
+ }
662
+
663
+ static int cpu_online_check_unaligned_access_emulated (unsigned int cpu )
664
+ {
665
+ long * mas_ptr = per_cpu_ptr (& misaligned_access_speed , cpu );
666
+
667
+ check_unaligned_access_emulated (NULL );
660
668
661
669
/*
662
670
* If unaligned_ctl is already set, this means that we detected that all
@@ -665,9 +673,10 @@ void check_unaligned_access_emulated(struct work_struct *work __always_unused)
665
673
*/
666
674
if (unlikely (unaligned_ctl && (* mas_ptr != RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED ))) {
667
675
pr_crit ("CPU misaligned accesses non homogeneous (expected all emulated)\n" );
668
- while (true)
669
- cpu_relax ();
676
+ return - EINVAL ;
670
677
}
678
+
679
+ return 0 ;
671
680
}
672
681
673
682
bool __init check_unaligned_access_emulated_all_cpus (void )
@@ -699,4 +708,60 @@ bool __init check_unaligned_access_emulated_all_cpus(void)
699
708
{
700
709
return false;
701
710
}
711
+ static int cpu_online_check_unaligned_access_emulated (unsigned int cpu )
712
+ {
713
+ return 0 ;
714
+ }
715
+ #endif
716
+
717
+ #ifdef CONFIG_RISCV_SBI
718
+
719
+ static bool misaligned_traps_delegated ;
720
+
721
+ static int cpu_online_sbi_unaligned_setup (unsigned int cpu )
722
+ {
723
+ if (sbi_fwft_set (SBI_FWFT_MISALIGNED_EXC_DELEG , 1 , 0 ) &&
724
+ misaligned_traps_delegated ) {
725
+ pr_crit ("Misaligned trap delegation non homogeneous (expected delegated)" );
726
+ return - EINVAL ;
727
+ }
728
+
729
+ return 0 ;
730
+ }
731
+
732
+ void __init unaligned_access_init (void )
733
+ {
734
+ int ret ;
735
+
736
+ ret = sbi_fwft_set_online_cpus (SBI_FWFT_MISALIGNED_EXC_DELEG , 1 , 0 );
737
+ if (ret )
738
+ return ;
739
+
740
+ misaligned_traps_delegated = true;
741
+ pr_info ("SBI misaligned access exception delegation ok\n" );
742
+ /*
743
+ * Note that we don't have to take any specific action here, if
744
+ * the delegation is successful, then
745
+ * check_unaligned_access_emulated() will verify that indeed the
746
+ * platform traps on misaligned accesses.
747
+ */
748
+ }
749
+ #else
750
+ void __init unaligned_access_init (void ) {}
751
+
752
+ static int cpu_online_sbi_unaligned_setup (unsigned int cpu __always_unused )
753
+ {
754
+ return 0 ;
755
+ }
702
756
#endif
757
+
758
+ int cpu_online_unaligned_access_init (unsigned int cpu )
759
+ {
760
+ int ret ;
761
+
762
+ ret = cpu_online_sbi_unaligned_setup (cpu );
763
+ if (ret )
764
+ return ret ;
765
+
766
+ return cpu_online_check_unaligned_access_emulated (cpu );
767
+ }
0 commit comments