@@ -2929,15 +2929,15 @@ dummy_func(
29292929 };
29302930
29312931 specializing op (_SPECIALIZE_FOR_ITER , (counter /1 , iter -- iter )) {
2932- #if ENABLE_SPECIALIZATION
2932+ #if ENABLE_SPECIALIZATION_FT
29332933 if (ADAPTIVE_COUNTER_TRIGGERS (counter )) {
29342934 next_instr = this_instr ;
29352935 _Py_Specialize_ForIter (iter , next_instr , oparg );
29362936 DISPATCH_SAME_OPARG ();
29372937 }
29382938 OPCODE_DEFERRED_INC (FOR_ITER );
29392939 ADVANCE_ADAPTIVE_COUNTER (this_instr [1 ].counter );
2940- #endif /* ENABLE_SPECIALIZATION */
2940+ #endif /* ENABLE_SPECIALIZATION_FT */
29412941 }
29422942
29432943 replaced op (_FOR_ITER , (iter -- iter , next )) {
@@ -3015,10 +3015,20 @@ dummy_func(
30153015
30163016
30173017 op (_ITER_CHECK_LIST , (iter -- iter )) {
3018- EXIT_IF (Py_TYPE (PyStackRef_AsPyObjectBorrow (iter )) != & PyListIter_Type );
3018+ PyObject * iter_o = PyStackRef_AsPyObjectBorrow (iter );
3019+ EXIT_IF (Py_TYPE (iter_o ) != & PyListIter_Type );
3020+ #ifdef Py_GIL_DISABLED
3021+ _PyListIterObject * it = (_PyListIterObject * )iter_o ;
3022+ EXIT_IF (it -> it_seq == NULL ||
3023+ !_Py_IsOwnedByCurrentThread ((PyObject * )it -> it_seq ) ||
3024+ !_PyObject_GC_IS_SHARED (it -> it_seq ));
3025+ #endif
30193026 }
30203027
30213028 replaced op (_ITER_JUMP_LIST , (iter -- iter )) {
3029+ // For free-threaded Python, the loop exit can happen at any point during item
3030+ // retrieval, so separate ops don't make much sense.
3031+ #ifndef Py_GIL_DISABLED
30223032 PyObject * iter_o = PyStackRef_AsPyObjectBorrow (iter );
30233033 _PyListIterObject * it = (_PyListIterObject * )iter_o ;
30243034 assert (Py_TYPE (iter_o ) == & PyListIter_Type );
@@ -3036,10 +3046,12 @@ dummy_func(
30363046 JUMPBY (oparg + 1 );
30373047 DISPATCH ();
30383048 }
3049+ #endif
30393050 }
30403051
30413052 // Only used by Tier 2
30423053 op (_GUARD_NOT_EXHAUSTED_LIST , (iter -- iter )) {
3054+ #ifndef Py_GIL_DISABLED
30433055 PyObject * iter_o = PyStackRef_AsPyObjectBorrow (iter );
30443056 _PyListIterObject * it = (_PyListIterObject * )iter_o ;
30453057 assert (Py_TYPE (iter_o ) == & PyListIter_Type );
@@ -3049,6 +3061,7 @@ dummy_func(
30493061 it -> it_index = -1 ;
30503062 EXIT_IF (1 );
30513063 }
3064+ #endif
30523065 }
30533066
30543067 op (_ITER_NEXT_LIST , (iter -- iter , next )) {
@@ -3057,8 +3070,30 @@ dummy_func(
30573070 assert (Py_TYPE (iter_o ) == & PyListIter_Type );
30583071 PyListObject * seq = it -> it_seq ;
30593072 assert (seq );
3073+ #ifdef Py_GIL_DISABLED
3074+ assert (_Py_IsOwnedByCurrentThread ((PyObject * )seq ) ||
3075+ _PyObject_GC_IS_SHARED (seq ));
3076+ STAT_INC (FOR_ITER , hit );
3077+ Py_ssize_t idx = _Py_atomic_load_ssize_relaxed (& it -> it_index );
3078+ PyObject * item ;
3079+ int result = _PyList_GetItemRefNoLock (it -> it_seq , idx , & item );
3080+ // A negative result means we lost a race with another thread
3081+ // and we need to take the slow path.
3082+ EXIT_IF (result < 0 );
3083+ if (result == 0 ) {
3084+ _Py_atomic_store_ssize_relaxed (& it -> it_index , -1 );
3085+ PyStackRef_CLOSE (iter );
3086+ STACK_SHRINK (1 );
3087+ /* Jump forward oparg, then skip following END_FOR and POP_TOP instructions */
3088+ JUMPBY (oparg + 2 );
3089+ DISPATCH ();
3090+ }
3091+ _Py_atomic_store_ssize_relaxed (& it -> it_index , idx + 1 );
3092+ next = PyStackRef_FromPyObjectSteal (item );
3093+ #else
30603094 assert (it -> it_index < PyList_GET_SIZE (seq ));
30613095 next = PyStackRef_FromPyObjectNew (PyList_GET_ITEM (seq , it -> it_index ++ ));
3096+ #endif
30623097 }
30633098
30643099 macro (FOR_ITER_LIST ) =
@@ -3073,8 +3108,11 @@ dummy_func(
30733108
30743109 replaced op (_ITER_JUMP_TUPLE , (iter -- iter )) {
30753110 PyObject * iter_o = PyStackRef_AsPyObjectBorrow (iter );
3076- _PyTupleIterObject * it = (_PyTupleIterObject * )iter_o ;
30773111 assert (Py_TYPE (iter_o ) == & PyTupleIter_Type );
3112+ // For free-threaded Python, the loop exit can happen at any point during item
3113+ // retrieval, so separate ops don't make much sense.
3114+ #ifndef Py_GIL_DISABLED
3115+ _PyTupleIterObject * it = (_PyTupleIterObject * )iter_o ;
30783116 STAT_INC (FOR_ITER , hit );
30793117 PyTupleObject * seq = it -> it_seq ;
30803118 if (seq == NULL || it -> it_index >= PyTuple_GET_SIZE (seq )) {
@@ -3086,26 +3124,44 @@ dummy_func(
30863124 JUMPBY (oparg + 1 );
30873125 DISPATCH ();
30883126 }
3127+ #endif
30893128 }
30903129
30913130 // Only used by Tier 2
30923131 op (_GUARD_NOT_EXHAUSTED_TUPLE , (iter -- iter )) {
30933132 PyObject * iter_o = PyStackRef_AsPyObjectBorrow (iter );
30943133 _PyTupleIterObject * it = (_PyTupleIterObject * )iter_o ;
30953134 assert (Py_TYPE (iter_o ) == & PyTupleIter_Type );
3135+ #ifndef Py_GIL_DISABLED
30963136 PyTupleObject * seq = it -> it_seq ;
30973137 EXIT_IF (seq == NULL );
30983138 EXIT_IF (it -> it_index >= PyTuple_GET_SIZE (seq ));
3139+ #ifndef Py_GIL_DISABLED
30993140 }
31003141
31013142 op (_ITER_NEXT_TUPLE , (iter -- iter , next )) {
31023143 PyObject * iter_o = PyStackRef_AsPyObjectBorrow (iter );
31033144 _PyTupleIterObject * it = (_PyTupleIterObject * )iter_o ;
31043145 assert (Py_TYPE (iter_o ) == & PyTupleIter_Type );
31053146 PyTupleObject * seq = it -> it_seq ;
3147+ #ifdef Py_GIL_DISABLED
3148+ STAT_INC (FOR_ITER , hit );
3149+ Py_ssize_t idx = _Py_atomic_load_ssize_relaxed (& it -> it_index );
3150+ if (seq == NULL || (size_t )idx >= (size_t )PyTuple_GET_SIZE (seq )) {
3151+ _Py_atomic_store_ssize_relaxed (& it -> it_index , -1 );
3152+ PyStackRef_CLOSE (iter );
3153+ STACK_SHRINK (1 );
3154+ /* Jump forward oparg, then skip following END_FOR and POP_TOP instructions */
3155+ JUMPBY (oparg + 2 );
3156+ DISPATCH ();
3157+ }
3158+ _Py_atomic_store_ssize_relaxed (& it -> it_index , idx + 1 );
3159+ next = PyStackRef_FromPyObjectNew (PyTuple_GET_ITEM (seq , idx ));
3160+ #else
31063161 assert (seq );
31073162 assert (it -> it_index < PyTuple_GET_SIZE (seq ));
31083163 next = PyStackRef_FromPyObjectNew (PyTuple_GET_ITEM (seq , it -> it_index ++ ));
3164+ #endif
31093165 }
31103166
31113167 macro (FOR_ITER_TUPLE ) =
0 commit comments