@@ -165,6 +165,10 @@ dyld_tracker_start(void)
165165void
166166sentrycrashbic_iterateOverImages (sentrycrashbic_imageIteratorCallback callback , void * context )
167167{
168+ if (!atomic_load_explicit (& g_started , memory_order_acquire )) {
169+ return ;
170+ }
171+
168172 uint32_t count = atomic_load_explicit (& g_next_index , memory_order_acquire );
169173
170174 if (count > MAX_DYLD_IMAGES )
@@ -212,18 +216,14 @@ sentrycrashbic_addDyldNode(void)
212216 }
213217
214218 PublishedBinaryImage * entry = & g_images [idx ];
215- if (!sentrycrashdl_getBinaryImageForHeader ((const void * )header , "dyld" , & entry -> image , false)) {
219+ if (!sentrycrashdl_getBinaryImageForHeader (
220+ (const void * )header , "dyld" , & entry -> image , false)) {
216221 // Decrement because we couldn't add the image
217222 atomic_fetch_sub_explicit (& g_next_index , 1 , memory_order_relaxed );
218223 return ;
219224 }
220225
221- sentrycrashbic_cacheChangeCallback callback
222- = atomic_load_explicit (& g_addedCallback , memory_order_acquire );
223226 atomic_store_explicit (& entry -> ready , 1 , memory_order_release );
224- if (callback != NULL ) {
225- callback (& entry -> image );
226- }
227227}
228228
229229static void
@@ -236,6 +236,11 @@ sentrycrashbic_startCacheImpl(void)
236236 return ;
237237 }
238238
239+ // Reset g_next_index here rather than in stopCache to avoid the race where
240+ // a concurrent add_dyld_image increments g_next_index after stopCache resets it.
241+ // The compare-exchange above guarantees we are the only thread in this function.
242+ atomic_store_explicit (& g_next_index , 0 , memory_order_release );
243+
239244 if (sentrycrashbic_shouldAddDyld ()) {
240245 sentrycrashdl_initialize ();
241246 sentrycrashbic_addDyldNode ();
@@ -266,11 +271,13 @@ sentrycrashbic_startCache(void)
266271void
267272sentrycrashbic_stopCache (void )
268273{
269- // The binary image cache does not allocate memory, the cache takes a fixed
270- // size, it's not malloced. So this is kind of pointless, but there are a bunch
271- // of tests using it. We should clean it up and make startCache use dispatchOnce
274+ // Only flip the started flag. We intentionally do NOT reset g_next_index here
275+ // because a concurrent add_dyld_image (that already passed the g_started check)
276+ // could increment g_next_index after our reset, leaving the cache in an
277+ // inconsistent state. Instead, g_next_index is reset in startCacheImpl where
278+ // we have exclusive access via the compare-exchange. iterateOverImages checks
279+ // g_started before reading g_next_index, so the cache appears empty immediately.
272280 atomic_store_explicit (& g_started , 0 , memory_order_release );
273- atomic_store_explicit (& g_next_index , 0 , memory_order_release );
274281}
275282
276283void
0 commit comments