@@ -19,6 +19,9 @@ struct Memory_Profiler_Events {
1919 // Double-buffered event queues (contains events from all Capture instances).
2020 struct Memory_Profiler_Queue queues [2 ];
2121 struct Memory_Profiler_Queue * available , * processing ;
22+
23+ // Guard flag to prevent recursive processing (0 = not processing, 1 = processing)
24+ int processing_flag ;
2225
2326 // Postponed job handle for processing the queue.
2427 // Postponed job handles are an extremely limited resource, so we only register one global event queue.
@@ -59,6 +62,7 @@ static VALUE Memory_Profiler_Events_new(void) {
5962 // Start with queues[0] available for incoming events, queues[1] for processing (initially empty):
6063 events -> available = & events -> queues [0 ];
6164 events -> processing = & events -> queues [1 ];
65+ events -> processing_flag = 0 ;
6266
6367 // Pre-register the single postponed job for processing the queue:
6468 events -> postponed_job_handle = rb_postponed_job_preregister (0 ,
@@ -86,8 +90,6 @@ struct Memory_Profiler_Events* Memory_Profiler_Events_instance(void) {
8690 // Pin the global events object so it's never GC'd:
8791 rb_gc_register_mark_object (instance );
8892
89- if (DEBUG ) fprintf (stderr , "Global event queue system initialized and pinned\n" );
90-
9193 TypedData_Get_Struct (instance , struct Memory_Profiler_Events , & Memory_Profiler_Events_type , events );
9294 }
9395
@@ -195,14 +197,16 @@ int Memory_Profiler_Events_enqueue(
195197 RB_OBJ_WRITE (events -> self , & event -> klass , klass );
196198 RB_OBJ_WRITE (events -> self , & event -> object , object );
197199
198- if (DEBUG ) fprintf (stderr , "Queued %s to available queue, size: %zu\n" ,
199- Memory_Profiler_Event_Type_name (type ), events -> available -> count );
200+ if (DEBUG ) {
201+ fprintf (stderr , "[EVENTS] Enqueued %s: object=%p available_count=%zu processing_flag=%d\n" ,
202+ Memory_Profiler_Event_Type_name (type ), (void * )object , events -> available -> count , events -> processing_flag );
203+ }
200204
201205 rb_postponed_job_trigger (events -> postponed_job_handle );
202206 // Success:
203207 return 1 ;
204208 }
205-
209+
206210 // Queue full:
207211 return 0 ;
208212}
@@ -211,6 +215,12 @@ int Memory_Profiler_Events_enqueue(
211215// Public API function - called from Capture stop() to ensure all events are processed.
212216void Memory_Profiler_Events_process_all (void ) {
213217 struct Memory_Profiler_Events * events = Memory_Profiler_Events_instance ();
218+
219+ // Explicitly prevent re-entrancy here:
220+ if (events -> processing_flag ) {
221+ rb_raise (rb_eRuntimeError , "Recursive call detected!" );
222+ }
223+
214224 Memory_Profiler_Events_process_queue ((void * )events );
215225}
216226
@@ -228,17 +238,39 @@ static VALUE Memory_Profiler_Events_process_event_protected(VALUE arg) {
228238static void Memory_Profiler_Events_process_queue (void * arg ) {
229239 struct Memory_Profiler_Events * events = (struct Memory_Profiler_Events * )arg ;
230240
241+ // Check for recursive call - this would break double buffering!
242+ if (events -> processing_flag ) {
243+ // Explicitly allow re-entrancy here, as the postponed job could be triggered during `process_all`.
244+ return ;
245+ }
246+
247+ // Set processing flag to prevent recursion
248+ events -> processing_flag = 1 ;
249+
250+ if (DEBUG ) {
251+ fprintf (stderr , "[EVENTS] process_queue START: available_count=%zu processing_count=%zu\n" ,
252+ events -> available -> count , events -> processing -> count );
253+ }
254+
231255 // Swap the queues: available becomes processing, and the old processing queue (now empty) becomes available. This allows new events to continue enqueueing to the new available queue while we process.
232256 struct Memory_Profiler_Queue * queue_to_process = events -> available ;
233257 events -> available = events -> processing ;
234258 events -> processing = queue_to_process ;
235259
236- if (DEBUG ) fprintf (stderr , "Processing event queue: %zu events\n" , events -> processing -> count );
260+ if (DEBUG ) {
261+ fprintf (stderr , "[EVENTS] Queues swapped: processing_count=%zu (was available), available_count=%zu (was processing)\n" ,
262+ events -> processing -> count , events -> available -> count );
263+ }
237264
238265 // Process all events in order (maintains NEWOBJ before FREEOBJ for same object):
239266 for (size_t i = 0 ; i < events -> processing -> count ; i ++ ) {
240267 struct Memory_Profiler_Event * event = Memory_Profiler_Queue_at (events -> processing , i );
241268
269+ if (DEBUG ) {
270+ fprintf (stderr , "[EVENTS] Processing event[%zu]: type=%s object=%p capture=%p\n" ,
271+ i , Memory_Profiler_Event_Type_name (event -> type ), (void * )event -> object , (void * )event -> capture );
272+ }
273+
242274 // Process event with rb_protect to catch any exceptions:
243275 int state = 0 ;
244276 rb_protect (Memory_Profiler_Events_process_event_protected , (VALUE )event , & state );
@@ -249,13 +281,28 @@ static void Memory_Profiler_Events_process_queue(void *arg) {
249281 rb_set_errinfo (Qnil );
250282 }
251283
284+ if (DEBUG ) {
285+ fprintf (stderr , "[EVENTS] Processed event[%zu]: type=%s object=%p (exception=%d)\n" ,
286+ i , Memory_Profiler_Event_Type_name (event -> type ), (void * )event -> object , state );
287+ }
288+
252289 // Clear this event after processing to prevent marking stale data if GC runs:
253290 event -> type = MEMORY_PROFILER_EVENT_TYPE_NONE ;
254291 RB_OBJ_WRITE (events -> self , & event -> capture , Qnil );
255292 RB_OBJ_WRITE (events -> self , & event -> klass , Qnil );
256293 RB_OBJ_WRITE (events -> self , & event -> object , Qnil );
257294 }
258295
296+ // Save count before clearing for logging
297+ size_t processed_count = events -> processing -> count ;
298+
259299 // Clear the processing queue (which is now empty logically):
260300 Memory_Profiler_Queue_clear (events -> processing );
301+
302+ // Clear processing flag
303+ events -> processing_flag = 0 ;
304+
305+ if (DEBUG ) {
306+ fprintf (stderr , "[EVENTS] process_queue END: processed %zu events\n" , processed_count );
307+ }
261308}
0 commit comments