@@ -351,7 +351,7 @@ _Py_ReachedRecursionLimitWithMargin(PyThreadState *tstate, int margin_count)
351
351
return 0 ;
352
352
}
353
353
if (_tstate -> c_stack_hard_limit == 0 ) {
354
- _Py_InitializeRecursionLimits (tstate );
354
+ PyUnstable_ThreadState_ResetStack (tstate );
355
355
}
356
356
return here_addr <= _tstate -> c_stack_soft_limit + margin_count * _PyOS_STACK_MARGIN_BYTES ;
357
357
}
@@ -440,34 +440,68 @@ int pthread_attr_destroy(pthread_attr_t *a)
440
440
441
441
442
442
void
443
- _Py_InitializeRecursionLimits (PyThreadState * tstate )
443
+ PyUnstable_ThreadState_SetStack (PyThreadState * tstate ,
444
+ void * stack_start_addr , size_t stack_size )
445
+ {
446
+ assert (stack_size > 0 );
447
+
448
+ _PyThreadStateImpl * ts = (_PyThreadStateImpl * )tstate ;
449
+ ts -> c_stack_hard_limit = (uintptr_t )stack_start_addr ;
450
+ ts -> c_stack_top = (uintptr_t )stack_start_addr + stack_size ;
451
+
452
+ uintptr_t soft_limit = ts -> c_stack_hard_limit ;
453
+ if (stack_size >= _PyOS_STACK_MARGIN_BYTES ) {
454
+ #ifdef _Py_THREAD_SANITIZER
455
+ // Thread sanitizer crashes if we use a bit more than half the stack.
456
+ soft_limit += (stack_size / 2 );
457
+ #else
458
+ soft_limit += _PyOS_STACK_MARGIN_BYTES ;
459
+ #endif
460
+ }
461
+ ts -> c_stack_soft_limit = soft_limit ;
462
+
463
+ // Sanity checks
464
+ assert (ts -> c_stack_hard_limit <= ts -> c_stack_soft_limit );
465
+ assert (ts -> c_stack_soft_limit < ts -> c_stack_top );
466
+
467
+ // Test the stack pointer
468
+ #ifndef NDEBUG
469
+ uintptr_t here_addr = _Py_get_machine_stack_pointer ();
470
+ #endif
471
+ assert (ts -> c_stack_soft_limit < here_addr );
472
+ assert (here_addr < ts -> c_stack_top );
473
+ }
474
+
475
+
476
+ void
477
+ PyUnstable_ThreadState_ResetStack (PyThreadState * tstate )
444
478
{
445
- _PyThreadStateImpl * _tstate = (_PyThreadStateImpl * )tstate ;
446
479
#ifdef WIN32
447
480
ULONG_PTR low , high ;
448
481
GetCurrentThreadStackLimits (& low , & high );
449
- _tstate -> c_stack_top = ( uintptr_t ) high ;
482
+
450
483
ULONG guarantee = 0 ;
451
484
SetThreadStackGuarantee (& guarantee );
452
- _tstate -> c_stack_hard_limit = ((uintptr_t )low ) + guarantee + _PyOS_STACK_MARGIN_BYTES ;
453
- _tstate -> c_stack_soft_limit = _tstate -> c_stack_hard_limit + _PyOS_STACK_MARGIN_BYTES ;
485
+
486
+ uintptr_t start = (uintptr_t )low + guarantee + _PyOS_STACK_MARGIN_BYTES ;
487
+ size_t size = (uintptr_t )high - start ;
488
+ PyUnstable_ThreadState_SetStack (tstate , (void * )start , size );
489
+
454
490
#elif defined(__APPLE__ )
455
491
pthread_t this_thread = pthread_self ();
456
- void * stack_addr = pthread_get_stackaddr_np (this_thread ); // top of the stack
457
- size_t stack_size = pthread_get_stacksize_np (this_thread );
458
- _tstate -> c_stack_top = (uintptr_t )stack_addr ;
459
- _tstate -> c_stack_hard_limit = _tstate -> c_stack_top - stack_size ;
460
- _tstate -> c_stack_soft_limit = _tstate -> c_stack_hard_limit + _PyOS_STACK_MARGIN_BYTES ;
492
+ void * top = pthread_get_stackaddr_np (this_thread ); // top of the stack
493
+ size_t size = pthread_get_stacksize_np (this_thread );
494
+ PyUnstable_ThreadState_SetStack (tstate , (char * )top - size , size );
495
+
461
496
#else
462
- uintptr_t here_addr = _Py_get_machine_stack_pointer ();
463
- /// XXX musl supports HAVE_PTHRED_GETATTR_NP, but the resulting stack size
464
- /// (on alpine at least) is much smaller than expected and imposes undue limits
465
- /// compared to the old stack size estimation. (We assume musl is not glibc.)
497
+ // XXX musl supports HAVE_PTHRED_GETATTR_NP, but the resulting stack size
498
+ // (on alpine at least) is much smaller than expected and imposes undue limits
499
+ // compared to the old stack size estimation. (We assume musl is not glibc.)
466
500
# if defined(HAVE_PTHREAD_GETATTR_NP ) && !defined(_AIX ) && \
467
501
!defined(__NetBSD__ ) && (defined(__GLIBC__ ) || !defined(__linux__ ))
468
- size_t stack_size , guard_size ;
469
- void * stack_addr ;
470
502
pthread_attr_t attr ;
503
+ size_t guard_size , stack_size ;
504
+ void * stack_addr ;
471
505
int err = pthread_getattr_np (pthread_self (), & attr );
472
506
if (err == 0 ) {
473
507
err = pthread_attr_getguardsize (& attr , & guard_size );
@@ -476,25 +510,23 @@ _Py_InitializeRecursionLimits(PyThreadState *tstate)
476
510
}
477
511
if (err == 0 ) {
478
512
uintptr_t base = ((uintptr_t )stack_addr ) + guard_size ;
479
- _tstate -> c_stack_top = base + stack_size ;
480
- #ifdef _Py_THREAD_SANITIZER
481
- // Thread sanitizer crashes if we use a bit more than half the stack.
482
- _tstate -> c_stack_soft_limit = base + (stack_size / 2 );
483
- #else
484
- _tstate -> c_stack_soft_limit = base + _PyOS_STACK_MARGIN_BYTES * 2 ;
485
- #endif
486
- _tstate -> c_stack_hard_limit = base + _PyOS_STACK_MARGIN_BYTES ;
487
- assert (_tstate -> c_stack_soft_limit < here_addr );
488
- assert (here_addr < _tstate -> c_stack_top );
489
- return ;
513
+ uintptr_t start = base + _PyOS_STACK_MARGIN_BYTES ;
514
+ size_t pystack_size = (base + stack_size ) - start ;
515
+ PyUnstable_ThreadState_SetStack (tstate , (void * )start , pystack_size );
490
516
}
517
+ else
491
518
# endif
492
- _tstate -> c_stack_top = _Py_SIZE_ROUND_UP (here_addr , 4096 );
493
- _tstate -> c_stack_soft_limit = _tstate -> c_stack_top - Py_C_STACK_SIZE ;
494
- _tstate -> c_stack_hard_limit = _tstate -> c_stack_top - (Py_C_STACK_SIZE + _PyOS_STACK_MARGIN_BYTES );
519
+ {
520
+ uintptr_t here_addr = _Py_get_machine_stack_pointer ();
521
+ uintptr_t top = _Py_SIZE_ROUND_UP (here_addr , 4096 );
522
+ uintptr_t start = top - (Py_C_STACK_SIZE + _PyOS_STACK_MARGIN_BYTES );
523
+ size_t pystack_size = top - start ;
524
+ PyUnstable_ThreadState_SetStack (tstate , (void * )start , pystack_size );
525
+ }
495
526
#endif
496
527
}
497
528
529
+
498
530
/* The function _Py_EnterRecursiveCallTstate() only calls _Py_CheckRecursiveCall()
499
531
if the recursion_depth reaches recursion_limit. */
500
532
int
0 commit comments