@@ -31,6 +31,16 @@ LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
3131extern char xtensa_arch_except_epc [];
3232extern char xtensa_arch_kernel_oops_epc [];
3333
34+ extern void xtensa_lazy_hifi_save (uint8_t * regs );
35+ extern void xtensa_lazy_hifi_load (uint8_t * regs );
36+
37+ #if defined(CONFIG_XTENSA_LAZY_HIFI_SHARING ) && (CONFIG_MP_MAX_NUM_CPUS > 1 )
38+ #define LAZY_COPROCESSOR_LOCK
39+
40+ static struct k_spinlock coprocessor_lock ;
41+ #endif
42+
43+
3444bool xtensa_is_outside_stack_bounds (uintptr_t addr , size_t sz , uint32_t ps )
3545{
3646 uintptr_t start , end ;
@@ -279,6 +289,88 @@ static inline void *return_to(void *interrupted)
279289#endif /* CONFIG_MULTITHREADING */
280290}
281291
292+ #if defined(LAZY_COPROCESSOR_LOCK )
293+ /**
294+ * Spin until thread is no longer the HiFi owner on specified CPU.
295+ * Note: Interrupts are locked on entry. Unlock before spinning to allow
296+ * an IPI to be caught and processed; restore them afterwards.
297+ */
298+ static void spin_while_hifi_owner (struct _cpu * cpu , struct k_thread * thread )
299+ {
300+ unsigned int key ;
301+ unsigned int original ;
302+ unsigned int unlocked ;
303+
304+ __asm__ volatile ("rsr.ps %0" : "=r" (original ));
305+ unlocked = original & ~PS_INTLEVEL_MASK ;
306+ __asm__ volatile ("wsr.ps %0; rsync" :: "r" (unlocked ) : "memory" );
307+
308+ /* Spin until thread is no longer the HiFi owner on the other CPU */
309+
310+ while ((struct k_thread * )
311+ atomic_ptr_get (& cpu -> arch .hifi_owner ) == thread ) {
312+ key = arch_irq_lock ();
313+ arch_spin_relax ();
314+ arch_irq_unlock (key );
315+ }
316+
317+ __asm__ volatile ("wsr.ps %0; rsync" :: "r" (original ) : "memory" );
318+ }
319+
320+ /**
321+ * Determine if the thread is the owner of a HiFi on another CPU. This is
322+ * called with the coprocessor lock held
323+ */
324+ static struct _cpu * thread_hifi_owner_elsewhere (struct k_thread * thread )
325+ {
326+ struct _cpu * this_cpu = arch_curr_cpu ();
327+ struct k_thread * owner ;
328+
329+ for (unsigned int i = 0 ; i < CONFIG_MP_MAX_NUM_CPUS ; i ++ ) {
330+ owner = (struct k_thread * )
331+ atomic_ptr_get (& _kernel .cpus [i ].arch .hifi_owner );
332+ if ((this_cpu != & _kernel .cpus [i ]) && (owner == thread )) {
333+ return & _kernel .cpus [i ];
334+ }
335+ }
336+ return NULL ;
337+ }
338+ #endif
339+
340+ /**
341+ * This routine only needed for SMP systems with HiFi sharing. It handles the
342+ * IPI sent to save the HiFi registers so the owner can load them onto another
343+ * CPU.
344+ */
345+ void arch_ipi_lazy_coprocessors_save (void )
346+ {
347+ #if defined(LAZY_COPROCESSOR_LOCK )
348+ k_spinlock_key_t key = k_spin_lock (& coprocessor_lock );
349+ struct _cpu * cpu = arch_curr_cpu ();
350+ struct k_thread * save_hifi = (struct k_thread * )
351+ atomic_ptr_get (& cpu -> arch .save_hifi );
352+ struct k_thread * hifi_owner = (struct k_thread * )
353+ atomic_ptr_get (& cpu -> arch .hifi_owner );
354+
355+ if ((save_hifi == hifi_owner ) && (save_hifi != NULL )) {
356+ unsigned int cp ;
357+
358+ __asm__ volatile ("rsr.cpenable %0" : "=r" (cp ));
359+ cp |= BIT (XCHAL_CP_ID_AUDIOENGINELX );
360+ __asm__ volatile ("wsr.cpenable %0" :: "r" (cp ));
361+
362+ xtensa_lazy_hifi_save (save_hifi -> arch .hifi_regs );
363+
364+ cp &= ~BIT (XCHAL_CP_ID_AUDIOENGINELX );
365+ __asm__ volatile ("wsr.cpenable %0" :: "r" (cp ));
366+
367+ atomic_ptr_set (& cpu -> arch .hifi_owner , NULL );
368+ }
369+ atomic_ptr_set (& cpu -> arch .save_hifi , NULL );
370+ k_spin_unlock (& coprocessor_lock , key );
371+ #endif
372+ }
373+
282374/* The wrapper code lives here instead of in the python script that
283375 * generates _xtensa_handle_one_int*(). Seems cleaner, still kind of
284376 * ugly.
@@ -484,6 +576,59 @@ void *xtensa_excint1_c(void *esf)
484576 bsa -> pc += 3 ;
485577 break ;
486578#endif /* !CONFIG_USERSPACE */
579+ #ifdef CONFIG_XTENSA_LAZY_HIFI_SHARING
580+ case EXCCAUSE_CP_DISABLED (XCHAL_CP_ID_AUDIOENGINELX ):
581+ /* Identify the interrupted thread and the old HiFi owner */
582+ struct k_thread * thread = _current ;
583+ struct k_thread * owner ;
584+ unsigned int cp ;
585+
586+ #if defined(LAZY_COPROCESSOR_LOCK )
587+ /*
588+ * If the interrupted thread is a HiFi owner on another CPU,
589+ * then send an IPI to that CPU to have it save its HiFi state
590+ * and then return. This CPU will continue to raise the current
591+ * exception (and send IPIs) until the other CPU has both saved
592+ * the HiFi registers and cleared its HiFi owner.
593+ */
594+
595+ k_spinlock_key_t key = k_spin_lock (& coprocessor_lock );
596+ struct _cpu * cpu = thread_hifi_owner_elsewhere (thread );
597+
598+ if (cpu != NULL ) {
599+ cpu -> arch .save_hifi = thread ;
600+ arch_sched_directed_ipi (BIT (cpu -> id ));
601+ k_spin_unlock (& coprocessor_lock , key );
602+ spin_while_hifi_owner (cpu , thread );
603+ key = k_spin_lock (& coprocessor_lock );
604+ }
605+ #endif
606+ owner = (struct k_thread * )
607+ atomic_ptr_get (& arch_curr_cpu ()-> arch .hifi_owner );
608+
609+ /* Enable the HiFi coprocessor */
610+ __asm__ volatile ("rsr.cpenable %0" : "=r" (cp ));
611+ cp |= BIT (XCHAL_CP_ID_AUDIOENGINELX );
612+ __asm__ volatile ("wsr.cpenable %0" :: "r" (cp ));
613+
614+ if (owner == thread ) {
615+ #if defined(LAZY_COPROCESSOR_LOCK )
616+ k_spin_unlock (& coprocessor_lock , key );
617+ #endif
618+ break ;
619+ }
620+
621+ if (owner != NULL ) {
622+ xtensa_lazy_hifi_save (owner -> arch .hifi_regs );
623+ }
624+
625+ atomic_ptr_set (& arch_curr_cpu ()-> arch .hifi_owner , thread );
626+ #if defined(LAZY_COPROCESSOR_LOCK )
627+ k_spin_unlock (& coprocessor_lock , key );
628+ #endif
629+ xtensa_lazy_hifi_load (thread -> arch .hifi_regs );
630+ break ;
631+ #endif /* CONFIG_XTENSA_LAZY_HIFI_SHARING */
487632 default :
488633 reason = K_ERR_CPU_EXCEPTION ;
489634
@@ -549,6 +694,9 @@ void *xtensa_excint1_c(void *esf)
549694#ifndef CONFIG_USERSPACE
550695 case EXCCAUSE_SYSCALL :
551696#endif /* !CONFIG_USERSPACE */
697+ #ifdef CONFIG_XTENSA_LAZY_HIFI_SHARING
698+ case EXCCAUSE_CP_DISABLED (XCHAL_CP_ID_AUDIOENGINELX ):
699+ #endif /* CONFIG_XTENSA_LAZY_HIFI_SHARING */
552700 is_fatal_error = false;
553701 break ;
554702 default :
0 commit comments