@@ -599,6 +599,23 @@ ptrack_walkdir(const char *path, Oid tablespaceOid, Oid dbOid)
599599 FreeDir (dir ); /* we ignore any error here */
600600}
601601
602+ static void
603+ ptrack_atomic_increase (XLogRecPtr new_lsn , pg_atomic_uint64 * var )
604+ {
605+ /*
606+ * We use pg_atomic_uint64 here only for alignment purposes, because
607+ * pg_atomic_uint64 is forcedly aligned on 8 bytes during the MSVC build.
608+ */
609+ pg_atomic_uint64 old_lsn ;
610+
611+ old_lsn .value = pg_atomic_read_u64 (var );
612+ #if USE_ASSERT_CHECKING
613+ elog (DEBUG3 , "ptrack_mark_block: " UINT64_FORMAT " <- " UINT64_FORMAT , old_lsn .value , new_lsn );
614+ #endif
615+ while (old_lsn .value < new_lsn &&
616+ !pg_atomic_compare_exchange_u64 (var , (uint64 * ) & old_lsn .value , new_lsn ));
617+ }
618+
602619/*
603620 * Mark modified block in ptrack_map.
604621 */
@@ -608,15 +625,9 @@ ptrack_mark_block(RelFileNodeBackend smgr_rnode,
608625{
609626 PtBlockId bid ;
610627 uint64 hash ;
611- size_t slot1 ;
612- size_t slot2 ;
628+ size_t slots [2 ];
613629 XLogRecPtr new_lsn ;
614- /*
615- * We use pg_atomic_uint64 here only for alignment purposes, because
616- * pg_atomic_uint64 is forcedly aligned on 8 bytes during the MSVC build.
617- */
618- pg_atomic_uint64 old_lsn ;
619- pg_atomic_uint64 old_init_lsn ;
630+ int i ;
620631
621632 if (ptrack_map_size == 0
622633 || ptrack_map == NULL
@@ -629,39 +640,29 @@ ptrack_mark_block(RelFileNodeBackend smgr_rnode,
629640 bid .blocknum = blocknum ;
630641
631642 hash = BID_HASH_FUNC (bid );
632- slot1 = (size_t )(hash % PtrackContentNblocks );
633- slot2 = (size_t )(((hash << 32 ) | (hash >> 32 )) % PtrackContentNblocks );
643+ slots [ 0 ] = (size_t )(hash % PtrackContentNblocks );
644+ slots [ 1 ] = (size_t )(((hash << 32 ) | (hash >> 32 )) % PtrackContentNblocks );
634645
635646 if (RecoveryInProgress ())
636647 new_lsn = GetXLogReplayRecPtr (NULL );
637648 else
638649 new_lsn = GetXLogInsertRecPtr ();
639650
640651 /* Atomically assign new init LSN value */
641- old_init_lsn .value = pg_atomic_read_u64 (& ptrack_map -> init_lsn );
642- if (old_init_lsn .value == InvalidXLogRecPtr )
652+ if (pg_atomic_read_u64 (& ptrack_map -> init_lsn ) == InvalidXLogRecPtr )
643653 {
644654#if USE_ASSERT_CHECKING
645- elog (DEBUG1 , "ptrack_mark_block: init_lsn " UINT64_FORMAT " <- " UINT64_FORMAT , old_init_lsn . value , new_lsn );
655+ elog (DEBUG3 , "ptrack_mark_block: init_lsn" );
646656#endif
647-
648- while (old_init_lsn .value < new_lsn &&
649- !pg_atomic_compare_exchange_u64 (& ptrack_map -> init_lsn , (uint64 * ) & old_init_lsn .value , new_lsn ));
657+ ptrack_atomic_increase (new_lsn , & ptrack_map -> init_lsn );
650658 }
651659
652- /* Atomically assign new LSN value to the first slot */
653- old_lsn .value = pg_atomic_read_u64 (& ptrack_map -> entries [slot1 ]);
654- #if USE_ASSERT_CHECKING
655- elog (DEBUG3 , "ptrack_mark_block: map[%zu]=" UINT64_FORMAT " <- " UINT64_FORMAT , slot1 , old_lsn .value , new_lsn );
656- #endif
657- while (old_lsn .value < new_lsn &&
658- !pg_atomic_compare_exchange_u64 (& ptrack_map -> entries [slot1 ], (uint64 * ) & old_lsn .value , new_lsn ));
659-
660- /* And to the second */
661- old_lsn .value = pg_atomic_read_u64 (& ptrack_map -> entries [slot2 ]);
660+ /* Atomically assign new LSN value to the slots */
661+ for (i = 0 ; i < lengthof (slots ); i ++ )
662+ {
662663#if USE_ASSERT_CHECKING
663- elog (DEBUG3 , "ptrack_mark_block: map[%zu]=" UINT64_FORMAT " <- " UINT64_FORMAT , slot2 , old_lsn . value , new_lsn );
664+ elog (DEBUG3 , "ptrack_mark_block: map[%zu]" , slots [ i ] );
664665#endif
665- while ( old_lsn . value < new_lsn &&
666- ! pg_atomic_compare_exchange_u64 ( & ptrack_map -> entries [ slot2 ], ( uint64 * ) & old_lsn . value , new_lsn ));
666+ ptrack_atomic_increase ( new_lsn , & ptrack_map -> entries [ slots [ i ]]);
667+ }
667668}
0 commit comments