@@ -545,6 +545,59 @@ static inline Py_ssize_t
545545_Py_atomic_load_ssize_acquire (const Py_ssize_t * obj );
546546
547547
548+ // --- _Py_atomic_memcpy / _Py_atomic_memmove ------------
549+
550+ static inline void *
551+ _Py_atomic_memcpy_ptr_store_relaxed (void * dest , void * src , size_t n )
552+ {
553+ assert (_Py_IS_ALIGNED (dest , sizeof (void * )));
554+ assert (_Py_IS_ALIGNED (src , sizeof (void * )));
555+ assert (n % sizeof (void * ) == 0 );
556+
557+ if (dest != src ) {
558+ void * * dest_ = (void * * )dest ;
559+ void * * src_ = (void * * )src ;
560+ void * * end = dest_ + n / sizeof (void * );
561+
562+ for (; dest_ != end ; dest_ ++ , src_ ++ ) {
563+ _Py_atomic_store_ptr_relaxed (dest_ , * src_ );
564+ }
565+ }
566+
567+ return dest ;
568+ }
569+
570+ static inline void *
571+ _Py_atomic_memmove_ptr_store_relaxed (void * dest , void * src , size_t n )
572+ {
573+ assert (_Py_IS_ALIGNED (dest , sizeof (void * )));
574+ assert (_Py_IS_ALIGNED (src , sizeof (void * )));
575+ assert (n % sizeof (void * ) == 0 );
576+
577+ if (dest < src || dest >= (void * )((char * )src + n )) {
578+ void * * dest_ = (void * * )dest ;
579+ void * * src_ = (void * * )src ;
580+ void * * end = dest_ + n / sizeof (void * );
581+
582+ for (; dest_ != end ; dest_ ++ , src_ ++ ) {
583+ _Py_atomic_store_ptr_relaxed (dest_ , * src_ );
584+ }
585+ }
586+ else if (dest > src ) {
587+ n = n / sizeof (void * ) - 1 ;
588+ void * * dest_ = (void * * )dest + n ;
589+ void * * src_ = (void * * )src + n ;
590+ void * * end = (void * * )dest - 1 ;
591+
592+ for (; dest_ != end ; dest_ -- , src_ -- ) {
593+ _Py_atomic_store_ptr_relaxed (dest_ , * src_ );
594+ }
595+ }
596+
597+ return dest ;
598+ }
599+
600+
548601
549602
550603// --- _Py_atomic_fence ------------------------------------------------------
0 commit comments