@@ -600,6 +600,60 @@ static inline Py_ssize_t
600600_Py_atomic_load_ssize_acquire (const Py_ssize_t * obj )
601601{ return __atomic_load_n (obj , __ATOMIC_ACQUIRE ); }
602602
603+
604+ // --- _Py_atomic_memcpy / _Py_atomic_memmove ------------
605+
606+ static inline void *
607+ _Py_atomic_memcpy_ptr_store_relaxed (void * dest , void * src , size_t n )
608+ {
609+ assert (_Py_IS_ALIGNED (dest , sizeof (void * )));
610+ assert (_Py_IS_ALIGNED (src , sizeof (void * )));
611+ assert (n % sizeof (void * ) == 0 );
612+
613+ if (dest != src ) {
614+ void * * dest_ = (void * * )dest ;
615+ void * * src_ = (void * * )src ;
616+ void * * end = dest_ + n / sizeof (void * );
617+
618+ for (; dest_ != end ; dest_ ++ , src_ ++ ) {
619+ __atomic_store_n ((void * * )dest_ , * src_ , __ATOMIC_RELAXED );
620+ }
621+ }
622+
623+ return dest ;
624+ }
625+
626+ static inline void *
627+ _Py_atomic_memmove_ptr_store_relaxed (void * dest , void * src , size_t n )
628+ {
629+ assert (_Py_IS_ALIGNED (dest , sizeof (void * )));
630+ assert (_Py_IS_ALIGNED (src , sizeof (void * )));
631+ assert (n % sizeof (void * ) == 0 );
632+
633+ if (dest < src || dest >= (void * )((char * )src + n )) {
634+ void * * dest_ = (void * * )dest ;
635+ void * * src_ = (void * * )src ;
636+ void * * end = dest_ + n / sizeof (void * );
637+
638+ for (; dest_ != end ; dest_ ++ , src_ ++ ) {
639+ __atomic_store_n ((void * * )dest_ , * src_ , __ATOMIC_RELAXED );
640+ }
641+ }
642+ else if (dest > src ) {
643+ n = n / sizeof (void * ) - 1 ;
644+ void * * dest_ = (void * * )dest + n ;
645+ void * * src_ = (void * * )src + n ;
646+ void * * end = (void * * )dest - 1 ;
647+
648+ for (; dest_ != end ; dest_ -- , src_ -- ) {
649+ __atomic_store_n ((void * * )dest_ , * src_ , __ATOMIC_RELAXED );
650+ }
651+ }
652+
653+ return dest ;
654+ }
655+
656+
603657// --- _Py_atomic_fence ------------------------------------------------------
604658
605659static inline void
0 commit comments