@@ -55,6 +55,34 @@ extern unsigned int VFP_arch_feroceon __alias(VFP_arch);
55
55
*/
56
56
union vfp_state * vfp_current_hw_state [NR_CPUS ];
57
57
58
+ /*
59
+ * Claim ownership of the VFP unit.
60
+ *
61
+ * The caller may change VFP registers until vfp_state_release() is called.
62
+ *
63
+ * local_bh_disable() is used to disable preemption and to disable VFP
64
+ * processing in softirq context. On PREEMPT_RT kernels local_bh_disable() is
65
+ * not sufficient because it only serializes soft interrupt related sections
66
+ * via a local lock, but stays preemptible. Disabling preemption is the right
67
+ * choice here as bottom half processing is always in thread context on RT
68
+ * kernels so it implicitly prevents bottom half processing as well.
69
+ */
70
+ static void vfp_state_hold (void )
71
+ {
72
+ if (!IS_ENABLED (CONFIG_PREEMPT_RT ))
73
+ local_bh_disable ();
74
+ else
75
+ preempt_disable ();
76
+ }
77
+
78
+ static void vfp_state_release (void )
79
+ {
80
+ if (!IS_ENABLED (CONFIG_PREEMPT_RT ))
81
+ local_bh_enable ();
82
+ else
83
+ preempt_enable ();
84
+ }
85
+
58
86
/*
59
87
* Is 'thread's most up to date state stored in this CPUs hardware?
60
88
* Must be called from non-preemptible context.
@@ -240,16 +268,15 @@ static void vfp_panic(char *reason, u32 inst)
240
268
/*
241
269
* Process bitmask of exception conditions.
242
270
*/
243
- static void vfp_raise_exceptions (u32 exceptions , u32 inst , u32 fpscr , struct pt_regs * regs )
271
+ static int vfp_raise_exceptions (u32 exceptions , u32 inst , u32 fpscr )
244
272
{
245
273
int si_code = 0 ;
246
274
247
275
pr_debug ("VFP: raising exceptions %08x\n" , exceptions );
248
276
249
277
if (exceptions == VFP_EXCEPTION_ERROR ) {
250
278
vfp_panic ("unhandled bounce" , inst );
251
- vfp_raise_sigfpe (FPE_FLTINV , regs );
252
- return ;
279
+ return FPE_FLTINV ;
253
280
}
254
281
255
282
/*
@@ -277,8 +304,7 @@ static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_
277
304
RAISE (FPSCR_OFC , FPSCR_OFE , FPE_FLTOVF );
278
305
RAISE (FPSCR_IOC , FPSCR_IOE , FPE_FLTINV );
279
306
280
- if (si_code )
281
- vfp_raise_sigfpe (si_code , regs );
307
+ return si_code ;
282
308
}
283
309
284
310
/*
@@ -324,6 +350,8 @@ static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs)
324
350
static void VFP_bounce (u32 trigger , u32 fpexc , struct pt_regs * regs )
325
351
{
326
352
u32 fpscr , orig_fpscr , fpsid , exceptions ;
353
+ int si_code2 = 0 ;
354
+ int si_code = 0 ;
327
355
328
356
pr_debug ("VFP: bounce: trigger %08x fpexc %08x\n" , trigger , fpexc );
329
357
@@ -369,8 +397,8 @@ static void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
369
397
* unallocated VFP instruction but with FPSCR.IXE set and not
370
398
* on VFP subarch 1.
371
399
*/
372
- vfp_raise_exceptions (VFP_EXCEPTION_ERROR , trigger , fpscr , regs );
373
- return ;
400
+ si_code = vfp_raise_exceptions (VFP_EXCEPTION_ERROR , trigger , fpscr );
401
+ goto exit ;
374
402
}
375
403
376
404
/*
@@ -394,14 +422,14 @@ static void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
394
422
*/
395
423
exceptions = vfp_emulate_instruction (trigger , fpscr , regs );
396
424
if (exceptions )
397
- vfp_raise_exceptions (exceptions , trigger , orig_fpscr , regs );
425
+ si_code2 = vfp_raise_exceptions (exceptions , trigger , orig_fpscr );
398
426
399
427
/*
400
428
* If there isn't a second FP instruction, exit now. Note that
401
429
* the FPEXC.FP2V bit is valid only if FPEXC.EX is 1.
402
430
*/
403
431
if ((fpexc & (FPEXC_EX | FPEXC_FP2V )) != (FPEXC_EX | FPEXC_FP2V ))
404
- return ;
432
+ goto exit ;
405
433
406
434
/*
407
435
* The barrier() here prevents fpinst2 being read
@@ -413,7 +441,13 @@ static void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
413
441
emulate :
414
442
exceptions = vfp_emulate_instruction (trigger , orig_fpscr , regs );
415
443
if (exceptions )
416
- vfp_raise_exceptions (exceptions , trigger , orig_fpscr , regs );
444
+ si_code = vfp_raise_exceptions (exceptions , trigger , orig_fpscr );
445
+ exit :
446
+ vfp_state_release ();
447
+ if (si_code2 )
448
+ vfp_raise_sigfpe (si_code2 , regs );
449
+ if (si_code )
450
+ vfp_raise_sigfpe (si_code , regs );
417
451
}
418
452
419
453
static void vfp_enable (void * unused )
@@ -512,11 +546,9 @@ static inline void vfp_pm_init(void) { }
512
546
*/
513
547
void vfp_sync_hwstate (struct thread_info * thread )
514
548
{
515
- unsigned int cpu = get_cpu ();
549
+ vfp_state_hold ();
516
550
517
- local_bh_disable ();
518
-
519
- if (vfp_state_in_hw (cpu , thread )) {
551
+ if (vfp_state_in_hw (raw_smp_processor_id (), thread )) {
520
552
u32 fpexc = fmrx (FPEXC );
521
553
522
554
/*
@@ -527,8 +559,7 @@ void vfp_sync_hwstate(struct thread_info *thread)
527
559
fmxr (FPEXC , fpexc );
528
560
}
529
561
530
- local_bh_enable ();
531
- put_cpu ();
562
+ vfp_state_release ();
532
563
}
533
564
534
565
/* Ensure that the thread reloads the hardware VFP state on the next use. */
@@ -683,7 +714,7 @@ static int vfp_support_entry(struct pt_regs *regs, u32 trigger)
683
714
if (!user_mode (regs ))
684
715
return vfp_kmode_exception (regs , trigger );
685
716
686
- local_bh_disable ();
717
+ vfp_state_hold ();
687
718
fpexc = fmrx (FPEXC );
688
719
689
720
/*
@@ -748,6 +779,7 @@ static int vfp_support_entry(struct pt_regs *regs, u32 trigger)
748
779
* replay the instruction that trapped.
749
780
*/
750
781
fmxr (FPEXC , fpexc );
782
+ vfp_state_release ();
751
783
} else {
752
784
/* Check for synchronous or asynchronous exceptions */
753
785
if (!(fpexc & (FPEXC_EX | FPEXC_DEX ))) {
@@ -762,17 +794,17 @@ static int vfp_support_entry(struct pt_regs *regs, u32 trigger)
762
794
if (!(fpscr & FPSCR_IXE )) {
763
795
if (!(fpscr & FPSCR_LENGTH_MASK )) {
764
796
pr_debug ("not VFP\n" );
765
- local_bh_enable ();
797
+ vfp_state_release ();
766
798
return - ENOEXEC ;
767
799
}
768
800
fpexc |= FPEXC_DEX ;
769
801
}
770
802
}
771
803
bounce : regs -> ARM_pc += 4 ;
804
+ /* VFP_bounce() will invoke vfp_state_release() */
772
805
VFP_bounce (trigger , fpexc , regs );
773
806
}
774
807
775
- local_bh_enable ();
776
808
return 0 ;
777
809
}
778
810
@@ -837,7 +869,7 @@ void kernel_neon_begin(void)
837
869
unsigned int cpu ;
838
870
u32 fpexc ;
839
871
840
- local_bh_disable ();
872
+ vfp_state_hold ();
841
873
842
874
/*
843
875
* Kernel mode NEON is only allowed outside of hardirq context with
@@ -868,7 +900,7 @@ void kernel_neon_end(void)
868
900
{
869
901
/* Disable the NEON/VFP unit. */
870
902
fmxr (FPEXC , fmrx (FPEXC ) & ~FPEXC_EN );
871
- local_bh_enable ();
903
+ vfp_state_release ();
872
904
}
873
905
EXPORT_SYMBOL (kernel_neon_end );
874
906
0 commit comments