@@ -263,9 +263,7 @@ namespace coreinit
263
263
thread = (OSThread_t*)memory_getPointerFromVirtualOffset (coreinit_allocFromSysArea (sizeof (OSThread_t), 32 ));
264
264
memset (thread, 0x00 , sizeof (OSThread_t));
265
265
// init signatures
266
- thread->context .magic0 = OS_CONTEXT_MAGIC_0;
267
- thread->context .magic1 = OS_CONTEXT_MAGIC_1;
268
- thread->magic = ' tHrD' ;
266
+ thread->SetMagic ();
269
267
thread->type = threadType;
270
268
thread->state = (entryPoint != MPTR_NULL) ? OSThread_t::THREAD_STATE::STATE_READY : OSThread_t::THREAD_STATE::STATE_NONE;
271
269
thread->entrypoint = _swapEndianU32 (entryPoint);
@@ -563,7 +561,10 @@ namespace coreinit
563
561
// adds the thread to each core's run queue if in runable state
564
562
void __OSAddReadyThreadToRunQueue (OSThread_t* thread)
565
563
{
564
+ cemu_assert_debug (MMU_IsInPPCMemorySpace (thread));
565
+ cemu_assert_debug (thread->IsValidMagic ());
566
566
cemu_assert_debug (__OSHasSchedulerLock ());
567
+
567
568
if (thread->state != OSThread_t::THREAD_STATE::STATE_READY)
568
569
return ;
569
570
if (thread->suspendCounter != 0 )
@@ -703,10 +704,18 @@ namespace coreinit
703
704
}
704
705
else if (prevAffinityMask != affinityMask)
705
706
{
706
- __OSRemoveThreadFromRunQueues (thread);
707
- thread->attr = (thread->attr & ~7 ) | (affinityMask & 7 );
708
- thread->context .setAffinity (affinityMask);
709
- __OSAddReadyThreadToRunQueue (thread);
707
+ if (thread->state != OSThread_t::THREAD_STATE::STATE_NONE)
708
+ {
709
+ __OSRemoveThreadFromRunQueues (thread);
710
+ thread->attr = (thread->attr & ~7 ) | (affinityMask & 7 );
711
+ thread->context .setAffinity (affinityMask);
712
+ __OSAddReadyThreadToRunQueue (thread);
713
+ }
714
+ else
715
+ {
716
+ thread->attr = (thread->attr & ~7 ) | (affinityMask & 7 );
717
+ thread->context .setAffinity (affinityMask);
718
+ }
710
719
}
711
720
__OSUnlockScheduler ();
712
721
return true ;
0 commit comments