Skip to content

Commit da899bb

Browse files
Fix handling of event queue.
1 parent f1e1f84 commit da899bb

File tree

4 files changed

+91
-35
lines changed

4 files changed

+91
-35
lines changed

ext/memory/profiler/capture.c

Lines changed: 32 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,9 @@
1313

1414
enum {
1515
DEBUG = 0,
16+
17+
// This generates a lot of output:
18+
DEBUG_EVENT = 0,
1619
};
1720

1821
static VALUE Memory_Profiler_Capture = Qnil;
@@ -224,10 +227,7 @@ static void Memory_Profiler_Capture_process_freeobj(VALUE capture_value, VALUE u
224227
struct Memory_Profiler_Object_Table_Entry *entry = Memory_Profiler_Object_Table_lookup(capture->states, object);
225228

226229
if (!entry) {
227-
if (DEBUG) fprintf(stderr, "[FREEOBJ] Object not found in table: %p\n", (void*)object);
228230
goto done;
229-
} else {
230-
if (DEBUG) fprintf(stderr, "[FREEOBJ] Object found in table: %p\n", (void*)object);
231231
}
232232

233233
VALUE klass = entry->klass;
@@ -246,7 +246,7 @@ static void Memory_Profiler_Capture_process_freeobj(VALUE capture_value, VALUE u
246246
Memory_Profiler_Object_Table_delete_entry(capture->states, entry);
247247

248248
struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
249-
249+
250250
// Increment global free count
251251
capture->free_count++;
252252

@@ -323,13 +323,12 @@ static void Memory_Profiler_Capture_event_callback(VALUE self, void *ptr) {
323323
TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
324324

325325
VALUE object = rb_tracearg_object(trace_arg);
326-
327-
// We don't want to track internal non-Object allocations:
328-
if (!Memory_Profiler_Capture_trackable_p(object)) return;
329-
330326
rb_event_flag_t event_flag = rb_tracearg_event_flag(trace_arg);
331327

332328
if (event_flag == RUBY_INTERNAL_EVENT_NEWOBJ) {
329+
// We don't want to track internal non-Object allocations:
330+
if (!Memory_Profiler_Capture_trackable_p(object)) return;
331+
333332
// Skip NEWOBJ if disabled (during callback) to prevent infinite recursion
334333
if (capture->paused) return;
335334

@@ -338,12 +337,10 @@ static void Memory_Profiler_Capture_event_callback(VALUE self, void *ptr) {
338337
// Skip if klass is not a Class
339338
if (rb_type(klass) != RUBY_T_CLASS) return;
340339

341-
// Enqueue actual object (not object_id) - queue retains it until processed
342-
// Ruby 3.5 compatible: no need for FL_SEEN_OBJ_ID or rb_obj_id
343-
if (DEBUG) fprintf(stderr, "[NEWOBJ] Enqueuing event for object: %p\n", (void*)object);
340+
if (DEBUG_EVENT) fprintf(stderr, "[NEWOBJ] Enqueuing event for object: %p\n", (void*)object);
344341
Memory_Profiler_Events_enqueue(MEMORY_PROFILER_EVENT_TYPE_NEWOBJ, self, klass, object);
345342
} else if (event_flag == RUBY_INTERNAL_EVENT_FREEOBJ) {
346-
if (DEBUG) fprintf(stderr, "[FREEOBJ] Enqueuing event for object: %p\n", (void*)object);
343+
if (DEBUG_EVENT) fprintf(stderr, "[FREEOBJ] Enqueuing event for object: %p\n", (void*)object);
347344
Memory_Profiler_Events_enqueue(MEMORY_PROFILER_EVENT_TYPE_FREEOBJ, self, Qnil, object);
348345
}
349346
}
@@ -596,12 +593,19 @@ struct Memory_Profiler_Each_Object_Arguments {
596593

597594
// The allocations wrapper to filter by (Qnil = no filter).
598595
VALUE allocations;
596+
597+
// Previous GC state (to restore in ensure handler)
598+
int gc_was_enabled;
599599
};
600600

601-
// Cleanup function to re-enable GC
601+
// Cleanup function to restore GC state
602602
static VALUE Memory_Profiler_Capture_each_object_ensure(VALUE arg) {
603-
// Re-enable GC (rb_gc_enable returns previous state, but we don't need it)
604-
rb_gc_enable();
603+
struct Memory_Profiler_Each_Object_Arguments *arguments = (struct Memory_Profiler_Each_Object_Arguments *)arg;
604+
605+
// Restore GC state only if it was enabled before
606+
if (arguments->gc_was_enabled) {
607+
rb_gc_enable();
608+
}
605609

606610
return Qnil;
607611
}
@@ -619,8 +623,8 @@ static VALUE Memory_Profiler_Capture_each_object_body(VALUE arg) {
619623
for (size_t i = 0; i < capture->states->capacity; i++) {
620624
struct Memory_Profiler_Object_Table_Entry *entry = &capture->states->entries[i];
621625

622-
// Skip empty or deleted slots (0 = not set)
623-
if (entry->object == 0) {
626+
// Skip empty or deleted slots (0 = not set, Qnil = deleted)
627+
if (entry->object == 0 || entry->object == Qnil) {
624628
continue;
625629
}
626630

@@ -661,31 +665,31 @@ static VALUE Memory_Profiler_Capture_each_object(int argc, VALUE *argv, VALUE se
661665

662666
RETURN_ENUMERATOR(self, argc, argv);
663667

664-
// Disable GC to prevent objects from being collected during iteration
665-
rb_gc_disable();
666-
667-
// Process all pending events to clean up stale entries
668-
// At this point, all remaining objects in the table should be valid
669-
Memory_Profiler_Events_process_all();
670-
671668
// If class provided, look up its allocations wrapper
672669
VALUE allocations = Qnil;
673670
if (!NIL_P(klass)) {
674671
st_data_t allocations_data;
675672
if (st_lookup(capture->tracked, (st_data_t)klass, &allocations_data)) {
676673
allocations = (VALUE)allocations_data;
677674
} else {
678-
// Class not tracked - nothing to iterate
679-
// Re-enable GC before returning
680-
rb_gc_enable();
675+
// Class not tracked - nothing to iterate:
681676
return self;
682677
}
683678
}
679+
680+
// Disable GC to prevent objects from being collected during iteration
681+
// rb_gc_disable returns the previous state (non-zero = was enabled, 0 = was disabled)
682+
int gc_was_enabled = RTEST(rb_gc_disable());
683+
684+
// Process all pending events to clean up stale entries:
685+
Memory_Profiler_Events_process_all();
686+
// At this point, all remaining objects in the table should be valid.
684687

685688
// Setup arguments for iteration
686689
struct Memory_Profiler_Each_Object_Arguments arguments = {
687690
.self = self,
688-
.allocations = allocations
691+
.allocations = allocations,
692+
.gc_was_enabled = gc_was_enabled
689693
};
690694

691695
// Use rb_ensure to guarantee cleanup even if exception is raised

ext/memory/profiler/events.c

Lines changed: 53 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,9 @@ struct Memory_Profiler_Events {
1919
// Double-buffered event queues (contains events from all Capture instances).
2020
struct Memory_Profiler_Queue queues[2];
2121
struct Memory_Profiler_Queue *available, *processing;
22+
23+
// Guard flag to prevent recursive processing (0 = not processing, 1 = processing)
24+
int processing_flag;
2225

2326
// Postponed job handle for processing the queue.
2427
// Postponed job handles are an extremely limited resource, so we only register one global event queue.
@@ -59,6 +62,7 @@ static VALUE Memory_Profiler_Events_new(void) {
5962
// Start with queues[0] available for incoming events, queues[1] for processing (initially empty):
6063
events->available = &events->queues[0];
6164
events->processing = &events->queues[1];
65+
events->processing_flag = 0;
6266

6367
// Pre-register the single postponed job for processing the queue:
6468
events->postponed_job_handle = rb_postponed_job_preregister(0,
@@ -86,8 +90,6 @@ struct Memory_Profiler_Events* Memory_Profiler_Events_instance(void) {
8690
// Pin the global events object so it's never GC'd:
8791
rb_gc_register_mark_object(instance);
8892

89-
if (DEBUG) fprintf(stderr, "Global event queue system initialized and pinned\n");
90-
9193
TypedData_Get_Struct(instance, struct Memory_Profiler_Events, &Memory_Profiler_Events_type, events);
9294
}
9395

@@ -195,14 +197,16 @@ int Memory_Profiler_Events_enqueue(
195197
RB_OBJ_WRITE(events->self, &event->klass, klass);
196198
RB_OBJ_WRITE(events->self, &event->object, object);
197199

198-
if (DEBUG) fprintf(stderr, "Queued %s to available queue, size: %zu\n",
199-
Memory_Profiler_Event_Type_name(type), events->available->count);
200+
if (DEBUG) {
201+
fprintf(stderr, "[EVENTS] Enqueued %s: object=%p available_count=%zu processing_flag=%d\n",
202+
Memory_Profiler_Event_Type_name(type), (void*)object, events->available->count, events->processing_flag);
203+
}
200204

201205
rb_postponed_job_trigger(events->postponed_job_handle);
202206
// Success:
203207
return 1;
204208
}
205-
209+
206210
// Queue full:
207211
return 0;
208212
}
@@ -211,6 +215,12 @@ int Memory_Profiler_Events_enqueue(
211215
// Public API function - called from Capture stop() to ensure all events are processed.
212216
void Memory_Profiler_Events_process_all(void) {
213217
struct Memory_Profiler_Events *events = Memory_Profiler_Events_instance();
218+
219+
// Explicitly prevent re-entrancy here:
220+
if (events->processing_flag) {
221+
rb_raise(rb_eRuntimeError, "Recursive call detected!");
222+
}
223+
214224
Memory_Profiler_Events_process_queue((void *)events);
215225
}
216226

@@ -228,17 +238,39 @@ static VALUE Memory_Profiler_Events_process_event_protected(VALUE arg) {
228238
static void Memory_Profiler_Events_process_queue(void *arg) {
229239
struct Memory_Profiler_Events *events = (struct Memory_Profiler_Events *)arg;
230240

241+
// Check for recursive call - this would break double buffering!
242+
if (events->processing_flag) {
243+
// Explicitly allow re-entrancy here, as the postponed job could be triggered during `process_all`.
244+
return;
245+
}
246+
247+
// Set processing flag to prevent recursion
248+
events->processing_flag = 1;
249+
250+
if (DEBUG) {
251+
fprintf(stderr, "[EVENTS] process_queue START: available_count=%zu processing_count=%zu\n",
252+
events->available->count, events->processing->count);
253+
}
254+
231255
// Swap the queues: available becomes processing, and the old processing queue (now empty) becomes available. This allows new events to continue enqueueing to the new available queue while we process.
232256
struct Memory_Profiler_Queue *queue_to_process = events->available;
233257
events->available = events->processing;
234258
events->processing = queue_to_process;
235259

236-
if (DEBUG) fprintf(stderr, "Processing event queue: %zu events\n", events->processing->count);
260+
if (DEBUG) {
261+
fprintf(stderr, "[EVENTS] Queues swapped: processing_count=%zu (was available), available_count=%zu (was processing)\n",
262+
events->processing->count, events->available->count);
263+
}
237264

238265
// Process all events in order (maintains NEWOBJ before FREEOBJ for same object):
239266
for (size_t i = 0; i < events->processing->count; i++) {
240267
struct Memory_Profiler_Event *event = Memory_Profiler_Queue_at(events->processing, i);
241268

269+
if (DEBUG) {
270+
fprintf(stderr, "[EVENTS] Processing event[%zu]: type=%s object=%p capture=%p\n",
271+
i, Memory_Profiler_Event_Type_name(event->type), (void*)event->object, (void*)event->capture);
272+
}
273+
242274
// Process event with rb_protect to catch any exceptions:
243275
int state = 0;
244276
rb_protect(Memory_Profiler_Events_process_event_protected, (VALUE)event, &state);
@@ -249,13 +281,28 @@ static void Memory_Profiler_Events_process_queue(void *arg) {
249281
rb_set_errinfo(Qnil);
250282
}
251283

284+
if (DEBUG) {
285+
fprintf(stderr, "[EVENTS] Processed event[%zu]: type=%s object=%p (exception=%d)\n",
286+
i, Memory_Profiler_Event_Type_name(event->type), (void*)event->object, state);
287+
}
288+
252289
// Clear this event after processing to prevent marking stale data if GC runs:
253290
event->type = MEMORY_PROFILER_EVENT_TYPE_NONE;
254291
RB_OBJ_WRITE(events->self, &event->capture, Qnil);
255292
RB_OBJ_WRITE(events->self, &event->klass, Qnil);
256293
RB_OBJ_WRITE(events->self, &event->object, Qnil);
257294
}
258295

296+
// Save count before clearing for logging
297+
size_t processed_count = events->processing->count;
298+
259299
// Clear the processing queue (which is now empty logically):
260300
Memory_Profiler_Queue_clear(events->processing);
301+
302+
// Clear processing flag
303+
events->processing_flag = 0;
304+
305+
if (DEBUG) {
306+
fprintf(stderr, "[EVENTS] process_queue END: processed %zu events\n", processed_count);
307+
}
261308
}

ext/memory/profiler/table.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -365,7 +365,7 @@ void Memory_Profiler_Object_Table_compact(struct Memory_Profiler_Object_Table *t
365365
table->count = 0;
366366
table->tombstones = 0; // Compaction clears tombstones
367367

368-
// Reinsert all entries with new hash values
368+
// Reinsert all entries with new hash values:
369369
for (size_t i = 0; i < temp_count; i++) {
370370
int found;
371371
size_t index = find_entry(table->entries, table->capacity, temp_entries[i].object, &found, NULL, "compact");

releases.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,10 @@
11
# Releases
22

3+
## Unreleased
4+
5+
- Ensure all `FREEOBJ` events are enqueued.
6+
- Allow limited re-entrancy in queue processing.
7+
38
## v1.6.1
49

510
- Add `track_all` option to `Sampler`.

0 commit comments

Comments
 (0)