@@ -91,8 +91,6 @@ typedef struct {
9191
9292#define INLINE_CACHE_ENTRIES_FOR_ITER CACHE_ENTRIES(_PyForIterCache)
9393
94- extern uint8_t _PyOpcode_Adaptive [256 ];
95-
9694// Borrowed references to common callables:
9795struct callable_cache {
9896 PyObject * isinstance ;
@@ -219,11 +217,14 @@ extern int _Py_Specialize_LoadAttr(PyObject *owner, _Py_CODEUNIT *instr,
219217 PyObject * name );
220218extern int _Py_Specialize_StoreAttr (PyObject * owner , _Py_CODEUNIT * instr ,
221219 PyObject * name );
222- extern int _Py_Specialize_LoadGlobal (PyObject * globals , PyObject * builtins , _Py_CODEUNIT * instr , PyObject * name );
223- extern int _Py_Specialize_BinarySubscr (PyObject * sub , PyObject * container , _Py_CODEUNIT * instr );
224- extern int _Py_Specialize_StoreSubscr (PyObject * container , PyObject * sub , _Py_CODEUNIT * instr );
225- extern int _Py_Specialize_Call (PyObject * callable , _Py_CODEUNIT * instr ,
226- int nargs , PyObject * kwnames );
220+ extern void _Py_Specialize_LoadGlobal (PyObject * globals , PyObject * builtins ,
221+ _Py_CODEUNIT * instr , PyObject * name );
222+ extern void _Py_Specialize_BinarySubscr (PyObject * sub , PyObject * container ,
223+ _Py_CODEUNIT * instr );
224+ extern void _Py_Specialize_StoreSubscr (PyObject * container , PyObject * sub ,
225+ _Py_CODEUNIT * instr );
226+ extern void _Py_Specialize_Call (PyObject * callable , _Py_CODEUNIT * instr ,
227+ int nargs , PyObject * kwnames );
227228extern void _Py_Specialize_BinaryOp (PyObject * lhs , PyObject * rhs , _Py_CODEUNIT * instr ,
228229 int oparg , PyObject * * locals );
229230extern void _Py_Specialize_CompareOp (PyObject * lhs , PyObject * rhs ,
@@ -377,8 +378,22 @@ write_location_entry_start(uint8_t *ptr, int code, int length)
377378
378379/* With a 16-bit counter, we have 12 bits for the counter value, and 4 bits for the backoff */
379380#define ADAPTIVE_BACKOFF_BITS 4
380- /* The initial counter value is 1 == 2**ADAPTIVE_BACKOFF_START - 1 */
381- #define ADAPTIVE_BACKOFF_START 1
381+
382+ // A value of 1 means that we attempt to specialize the *second* time each
383+ // instruction is executed. Executing twice is a much better indicator of
384+ // "hotness" than executing once, but additional warmup delays only prevent
385+ // specialization. Most types stabilize by the second execution, too:
386+ #define ADAPTIVE_WARMUP_VALUE 1
387+ #define ADAPTIVE_WARMUP_BACKOFF 1
388+
389+ // A value of 52 means that we attempt to re-specialize after 53 misses (a prime
390+ // number, useful for avoiding artifacts if every nth value is a different type
391+ // or something). Setting the backoff to 0 means that the counter is reset to
392+ // the same state as a warming-up instruction (value == 1, backoff == 1) after
393+ // deoptimization. This isn't strictly necessary, but it is bit easier to reason
394+ // about when thinking about the opcode transitions as a state machine:
395+ #define ADAPTIVE_COOLDOWN_VALUE 52
396+ #define ADAPTIVE_COOLDOWN_BACKOFF 0
382397
383398#define MAX_BACKOFF_VALUE (16 - ADAPTIVE_BACKOFF_BITS)
384399
@@ -390,9 +405,15 @@ adaptive_counter_bits(int value, int backoff) {
390405}
391406
392407static inline uint16_t
393- adaptive_counter_start (void ) {
394- unsigned int value = (1 << ADAPTIVE_BACKOFF_START ) - 1 ;
395- return adaptive_counter_bits (value , ADAPTIVE_BACKOFF_START );
408+ adaptive_counter_warmup (void ) {
409+ return adaptive_counter_bits (ADAPTIVE_WARMUP_VALUE ,
410+ ADAPTIVE_WARMUP_BACKOFF );
411+ }
412+
413+ static inline uint16_t
414+ adaptive_counter_cooldown (void ) {
415+ return adaptive_counter_bits (ADAPTIVE_COOLDOWN_VALUE ,
416+ ADAPTIVE_COOLDOWN_BACKOFF );
396417}
397418
398419static inline uint16_t
0 commit comments