229229 * assembly code so is implemented in portASM.s.
230230 */
231231extern void vPortRestoreTaskContext ( void );
232-
233232extern void vGIC_EnableIRQ ( uint32_t ulInterruptID );
234233extern void vGIC_SetPriority ( uint32_t ulInterruptID , uint32_t ulPriority );
235234extern void vGIC_PowerUpRedistributor ( void );
@@ -238,28 +237,38 @@ extern void vGIC_EnableCPUInterface( void );
238237/*-----------------------------------------------------------*/
239238
240239#if ( configNUMBER_OF_CORES == 1 )
240+
241241 PRIVILEGED_DATA volatile uint64_t ullCriticalNesting = 0ULL ;
242242
243- /* Saved as part of the task context. If ullPortTaskHasFPUContext is non-zero
244- * then floating point context must be saved and restored for the task. */
243+ /* Saved as part of the task context. If ullPortTaskHasFPUContext is non-zero
244+ * then floating point context must be saved and restored for the task. */
245245 PRIVILEGED_DATA uint64_t ullPortTaskHasFPUContext = pdFALSE ;
246246
247- /* Set to 1 to pend a context switch from an ISR. */
247+ /* Set to 1 to pend a context switch from an ISR. */
248248 PRIVILEGED_DATA uint64_t ullPortYieldRequired = pdFALSE ;
249249
250- /* Counts the interrupt nesting depth. A context switch is only performed if
251- * if the nesting depth is 0. */
250+ /* Counts the interrupt nesting depth. A context switch is only performed if
251+ * if the nesting depth is 0. */
252252 PRIVILEGED_DATA uint64_t ullPortInterruptNesting = 0 ;
253+
253254#else /* #if ( configNUMBER_OF_CORES == 1 ) */
254255 PRIVILEGED_DATA volatile uint64_t ullCriticalNestings [ configNUMBER_OF_CORES ] = { 0 };
255256
256257 /* Flags to check if the secondary cores are ready. */
257258 PRIVILEGED_DATA volatile uint8_t ucSecondaryCoresReadyFlags [ configNUMBER_OF_CORES - 1 ] = { 0 };
259+
260+ /* Flag to signal that the primary core has done all the shared initialisations. */
258261 PRIVILEGED_DATA volatile uint8_t ucPrimaryCoreInitDoneFlag = 0 ;
259- /* Saved as part of the task context. If ullPortTaskHasFPUContext is non-zero
262+
263+ /* Saved as part of the task context. If ullPortTaskHasFPUContext is non-zero
260264 * then floating point context must be saved and restored for the task. */
261265 PRIVILEGED_DATA uint64_t ullPortTaskHasFPUContext [ configNUMBER_OF_CORES ] = { pdFALSE };
266+
267+ /* Set to 1 to pend a context switch from an ISR. */
262268 PRIVILEGED_DATA uint64_t ullPortYieldRequired [ configNUMBER_OF_CORES ] = { pdFALSE };
269+
270+ /* Counts the interrupt nesting depth. A context switch is only performed if
271+ * if the nesting depth is 0. */
263272 PRIVILEGED_DATA uint64_t ullPortInterruptNestings [ configNUMBER_OF_CORES ] = { 0 };
264273
265274#endif /* #if ( configNUMBER_OF_CORES == 1 ) */
@@ -1157,12 +1166,12 @@ BaseType_t xPortStartScheduler( void )
11571166 volatile uint8_t ucMaxPriorityValue ;
11581167
11591168 /* Determine how many priority bits are implemented in the GIC.
1160- *
1161- * Save the interrupt priority value that is about to be clobbered. */
1169+ *
1170+ * Save the interrupt priority value that is about to be clobbered. */
11621171 ucOriginalPriority = * pucFirstUserPriorityRegister ;
11631172
11641173 /* Determine the number of priority bits available. First write to
1165- * all possible bits. */
1174+ * all possible bits. */
11661175 * pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE ;
11671176
11681177 /* Read the value back to see how many bits stuck. */
@@ -1175,12 +1184,12 @@ BaseType_t xPortStartScheduler( void )
11751184 }
11761185
11771186 /* Sanity check configUNIQUE_INTERRUPT_PRIORITIES matches the read
1178- * value. */
1187+ * value. */
11791188 configASSERT ( ucMaxPriorityValue >= portLOWEST_INTERRUPT_PRIORITY );
11801189
11811190
11821191 /* Restore the clobbered interrupt priority register to its original
1183- * value. */
1192+ * value. */
11841193 * pucFirstUserPriorityRegister = ucOriginalPriority ;
11851194 }
11861195 #endif /* configASSERT_DEFINED */
@@ -1523,9 +1532,9 @@ UBaseType_t uxPortSetInterruptMaskFromISR( void )
15231532#if ( configNUMBER_OF_CORES > 1 )
15241533
15251534 /* Which core owns the lock? Keep in privileged, shareable RAM. */
1526- PRIVILEGED_DATA volatile uint64_t ucOwnedByCore [ portMAX_CORE_COUNT ];
1535+ PRIVILEGED_DATA volatile uint64_t ullOwnedByCore [ portMAX_CORE_COUNT ];
15271536 /* Lock count a core owns. */
1528- PRIVILEGED_DATA volatile uint64_t ucRecursionCountByLock [ eLockCount ];
1537+ PRIVILEGED_DATA volatile uint64_t ullRecursionCountByLock [ eLockCount ];
15291538 /* Index 0 is used for ISR lock and Index 1 is used for task lock. */
15301539 PRIVILEGED_DATA uint32_t ulGateWord [ eLockCount ];
15311540
@@ -1549,13 +1558,14 @@ UBaseType_t uxPortSetInterruptMaskFromISR( void )
15491558
15501559 static inline void prvSpinUnlock ( uint32_t * ulLock )
15511560 {
1561+ /* Conservative unlock: preserve original barriers for broad HW/FVP. */
15521562 __asm volatile (
1553- "dmb sy\n"
1554- "mov w1, #0\n"
1555- "str w1, [%x0]\n"
1556- "sev\n"
1557- "dsb sy\n"
1558- "isb sy\n"
1563+ "dmb sy \n"
1564+ "mov w1, #0 \n"
1565+ "str w1, [%x0] \n"
1566+ "sev \n"
1567+ "dsb sy \n"
1568+ "isb sy \n"
15591569 :
15601570 : "r" ( ulLock )
15611571 : "memory" , "w1"
@@ -1566,22 +1576,30 @@ UBaseType_t uxPortSetInterruptMaskFromISR( void )
15661576
15671577 static inline uint32_t prvSpinTrylock ( uint32_t * ulLock )
15681578 {
1579+ /*
1580+ * Conservative LDXR/STXR trylock:
1581+ * - Return 1 immediately if busy, clearing exclusive state (CLREX).
1582+ * - Retry STXR only on spurious failure when observed free.
1583+ * - DMB on success to preserve expected acquire semantics.
1584+ */
15691585 register uint32_t ulRet ;
1570- /* Try to acquire spinlock; caller is responsible for further barriers. */
15711586 __asm volatile (
1572- "1:\n"
1573- "ldxr w1, [%x1]\n"
1574- "cmp w1, #1\n"
1575- "beq 2f\n"
1576- "mov w2, #1\n"
1577- "stxr w1, w2, [%x1]\n"
1578- "cmp w1, #0\n"
1579- "bne 1b\n"
1580- "2:\n"
1581- "mov %w0, w1\n"
1587+ "1: \n"
1588+ "ldxr w1, [%x1] \n"
1589+ "cbnz w1, 2f \n" /* Busy -> return 1 */
1590+ "mov w2, #1 \n"
1591+ "stxr w3, w2, [%x1] \n" /* w3 = status */
1592+ "cbnz w3, 1b \n" /* Retry on STXR failure */
1593+ "dmb sy \n" /* Acquire barrier on success */
1594+ "mov %w0, #0 \n" /* Success */
1595+ "b 3f \n"
1596+ "2: \n"
1597+ "clrex \n" /* Clear monitor when busy */
1598+ "mov %w0, #1 \n" /* Busy */
1599+ "3: \n"
15821600 : "=r" ( ulRet )
15831601 : "r" ( ulLock )
1584- : "memory" , "w1" , "w2"
1602+ : "memory" , "w1" , "w2" , "w3"
15851603 );
15861604
15871605 return ulRet ;
@@ -1629,10 +1647,10 @@ UBaseType_t uxPortSetInterruptMaskFromISR( void )
16291647 if ( prvSpinTrylock ( & ulGateWord [ eLockNum ] ) != 0 )
16301648 {
16311649 /* Check if the core owns the spinlock. */
1632- if ( prvGet64 ( & ucOwnedByCore [ xCoreID ] ) & ulLockBit )
1650+ if ( prvGet64 ( & ullOwnedByCore [ xCoreID ] ) & ulLockBit )
16331651 {
1634- configASSERT ( prvGet64 ( & ucRecursionCountByLock [ eLockNum ] ) != 255u );
1635- prvSet64 ( & ucRecursionCountByLock [ eLockNum ], ( prvGet64 ( & ucRecursionCountByLock [ eLockNum ] ) + 1 ) );
1652+ configASSERT ( prvGet64 ( & ullRecursionCountByLock [ eLockNum ] ) != 255u );
1653+ prvSet64 ( & ullRecursionCountByLock [ eLockNum ], ( prvGet64 ( & ullRecursionCountByLock [ eLockNum ] ) + 1 ) );
16361654 return ;
16371655 }
16381656
@@ -1656,26 +1674,26 @@ UBaseType_t uxPortSetInterruptMaskFromISR( void )
16561674 __asm__ __volatile__ ( "dmb sy" ::: "memory" );
16571675
16581676 /* Assert the lock count is 0 when the spinlock is free and is acquired. */
1659- configASSERT ( prvGet64 ( & ucRecursionCountByLock [ eLockNum ] ) == 0 );
1677+ configASSERT ( prvGet64 ( & ullRecursionCountByLock [ eLockNum ] ) == 0 );
16601678
16611679 /* Set lock count as 1. */
1662- prvSet64 ( & ucRecursionCountByLock [ eLockNum ], 1 );
1663- /* Set ucOwnedByCore . */
1664- prvSet64 ( & ucOwnedByCore [ xCoreID ], ( prvGet64 ( & ucOwnedByCore [ xCoreID ] ) | ulLockBit ) );
1680+ prvSet64 ( & ullRecursionCountByLock [ eLockNum ], 1 );
1681+ /* Set ullOwnedByCore . */
1682+ prvSet64 ( & ullOwnedByCore [ xCoreID ], ( prvGet64 ( & ullOwnedByCore [ xCoreID ] ) | ulLockBit ) );
16651683 }
16661684 /* Lock release. */
16671685 else
16681686 {
16691687 /* Assert the lock is not free already. */
1670- configASSERT ( ( prvGet64 ( & ucOwnedByCore [ xCoreID ] ) & ulLockBit ) != 0 );
1671- configASSERT ( prvGet64 ( & ucRecursionCountByLock [ eLockNum ] ) != 0 );
1688+ configASSERT ( ( prvGet64 ( & ullOwnedByCore [ xCoreID ] ) & ulLockBit ) != 0 );
1689+ configASSERT ( prvGet64 ( & ullRecursionCountByLock [ eLockNum ] ) != 0 );
16721690
1673- /* Reduce ucRecursionCountByLock by 1. */
1674- prvSet64 ( & ucRecursionCountByLock [ eLockNum ], ( prvGet64 ( & ucRecursionCountByLock [ eLockNum ] ) - 1 ) );
1691+ /* Reduce ullRecursionCountByLock by 1. */
1692+ prvSet64 ( & ullRecursionCountByLock [ eLockNum ], ( prvGet64 ( & ullRecursionCountByLock [ eLockNum ] ) - 1 ) );
16751693
1676- if ( !prvGet64 ( & ucRecursionCountByLock [ eLockNum ] ) )
1694+ if ( !prvGet64 ( & ullRecursionCountByLock [ eLockNum ] ) )
16771695 {
1678- prvSet64 ( & ucOwnedByCore [ xCoreID ], ( prvGet64 ( & ucOwnedByCore [ xCoreID ] ) & ~ulLockBit ) );
1696+ prvSet64 ( & ullOwnedByCore [ xCoreID ], ( prvGet64 ( & ullOwnedByCore [ xCoreID ] ) & ~ulLockBit ) );
16791697 prvSpinUnlock ( & ulGateWord [ eLockNum ] );
16801698 /* Add barrier to ensure lock status is reflected before we proceed. */
16811699 __asm__ __volatile__ ( "dmb sy" ::: "memory" );
0 commit comments