Skip to content

Commit d50a0ba

Browse files
Single global queue for event handling.
1 parent 1bcea64 commit d50a0ba

File tree

6 files changed

+526
-158
lines changed

6 files changed

+526
-158
lines changed

ext/extconf.rb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
append_cflags(["-DRUBY_DEBUG", "-O0"])
1717
end
1818

19-
$srcs = ["memory/profiler/profiler.c", "memory/profiler/capture.c", "memory/profiler/allocations.c"]
19+
$srcs = ["memory/profiler/profiler.c", "memory/profiler/capture.c", "memory/profiler/allocations.c", "memory/profiler/events.c"]
2020
$VPATH << "$(srcdir)/memory/profiler"
2121

2222
# Check for required headers

ext/memory/profiler/capture.c

Lines changed: 56 additions & 154 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33

44
#include "capture.h"
55
#include "allocations.h"
6-
#include "queue.h"
6+
#include "events.h"
77

88
#include "ruby.h"
99
#include "ruby/debug.h"
@@ -13,8 +13,7 @@
1313

1414
enum {
1515
DEBUG = 0,
16-
DEBUG_EVENT_QUEUES = 1,
17-
DEBUG_STATE = 1,
16+
DEBUG_STATE = 0,
1817
};
1918

2019
static VALUE Memory_Profiler_Capture = Qnil;
@@ -23,23 +22,7 @@ static VALUE Memory_Profiler_Capture = Qnil;
2322
static VALUE sym_newobj;
2423
static VALUE sym_freeobj;
2524

26-
struct Memory_Profiler_Queue_Item {
27-
enum EventType {
28-
EVENT_TYPE_NEWOBJ,
29-
EVENT_TYPE_FREEOBJ,
30-
} type;
31-
32-
// The class of the object:
33-
VALUE klass;
34-
35-
// The Allocations wrapper:
36-
VALUE allocations;
37-
38-
// The object itself:
39-
VALUE object;
40-
};
41-
42-
// Main capture state
25+
// Main capture state (per-instance)
4326
struct Memory_Profiler_Capture {
4427
// class => VALUE (wrapped Memory_Profiler_Capture_Allocations).
4528
st_table *tracked_classes;
@@ -49,12 +32,6 @@ struct Memory_Profiler_Capture {
4932

5033
// Internal - should we queue callbacks? (temporarily disabled during queue processing)
5134
int enabled;
52-
53-
// Queue for new objects (processed via postponed job):
54-
struct Memory_Profiler_Queue event_queue;
55-
56-
// Handle for the postponed job (processes both queues)
57-
rb_postponed_job_handle_t postponed_job_handle;
5835
};
5936

6037
// GC mark callback for tracked_classes table
@@ -74,26 +51,9 @@ static int Memory_Profiler_Capture_tracked_classes_mark(st_data_t key, st_data_t
7451
static void Memory_Profiler_Capture_mark(void *ptr) {
7552
struct Memory_Profiler_Capture *capture = ptr;
7653

77-
if (!capture) {
78-
return;
79-
}
80-
8154
if (capture->tracked_classes) {
8255
st_foreach(capture->tracked_classes, Memory_Profiler_Capture_tracked_classes_mark, 0);
8356
}
84-
85-
// Mark objects in the event queue:
86-
for (size_t i = 0; i < capture->event_queue.count; i++) {
87-
struct Memory_Profiler_Queue_Item *event = Memory_Profiler_Queue_at(&capture->event_queue, i);
88-
rb_gc_mark_movable(event->klass);
89-
rb_gc_mark_movable(event->allocations);
90-
91-
// For NEWOBJ, mark the object (it's alive)
92-
// For FREEOBJ, DON'T mark (it's being freed - just used as key for lookup)
93-
if (event->type == EVENT_TYPE_NEWOBJ) {
94-
rb_gc_mark_movable(event->object);
95-
}
96-
}
9757
}
9858

9959
// GC free function
@@ -104,9 +64,6 @@ static void Memory_Profiler_Capture_free(void *ptr) {
10464
st_free_table(capture->tracked_classes);
10565
}
10666

107-
// Free both queues (elements are stored directly, just free the queues)
108-
Memory_Profiler_Queue_free(&capture->event_queue);
109-
11067
xfree(capture);
11168
}
11269

@@ -119,9 +76,6 @@ static size_t Memory_Profiler_Capture_memsize(const void *ptr) {
11976
size += capture->tracked_classes->num_entries * (sizeof(st_data_t) + sizeof(struct Memory_Profiler_Capture_Allocations));
12077
}
12178

122-
// Add size of both queues (elements stored directly)
123-
size += capture->event_queue.capacity * capture->event_queue.element_size;
124-
12579
return size;
12680
}
12781

@@ -150,31 +104,18 @@ static int Memory_Profiler_Capture_tracked_classes_update(st_data_t *key, st_dat
150104
return ST_CONTINUE;
151105
}
152106

153-
// GC compact function - update VALUEs when GC compaction moves objects
107+
// GC compact function
154108
static void Memory_Profiler_Capture_compact(void *ptr) {
155109
struct Memory_Profiler_Capture *capture = ptr;
156110

157-
// Update tracked_classes keys and callback values in-place
111+
// Update tracked_classes keys and allocations values in-place
158112
if (capture->tracked_classes && capture->tracked_classes->num_entries > 0) {
159113
if (st_foreach_with_replace(capture->tracked_classes, Memory_Profiler_Capture_tracked_classes_foreach, Memory_Profiler_Capture_tracked_classes_update, 0)) {
160114
rb_raise(rb_eRuntimeError, "tracked_classes modified during GC compaction");
161115
}
162116
}
163117

164-
// Update objects in the event queue
165-
for (size_t i = 0; i < capture->event_queue.count; i++) {
166-
struct Memory_Profiler_Queue_Item *event = Memory_Profiler_Queue_at(&capture->event_queue, i);
167-
168-
// Update all VALUEs if they moved during compaction
169-
event->klass = rb_gc_location(event->klass);
170-
event->allocations = rb_gc_location(event->allocations);
171-
172-
// For NEWOBJ, update the object pointer
173-
// For FREEOBJ, DON'T update (it's being freed, pointer is stale)
174-
if (event->type == EVENT_TYPE_NEWOBJ) {
175-
event->object = rb_gc_location(event->object);
176-
}
177-
}
118+
// Global event queue is automatically compacted via its pinned TypedData object
178119
}
179120

180121
static const rb_data_type_t Memory_Profiler_Capture_type = {
@@ -206,9 +147,12 @@ const char *event_flag_name(rb_event_flag_t event_flag) {
206147
}
207148
}
208149

209-
// Process a NEWOBJ event from the queue
150+
// Internal: Process a NEWOBJ event
210151
// Handles both callback and non-callback cases, stores appropriate state
211-
static void Memory_Profiler_Capture_process_newobj(VALUE self, struct Memory_Profiler_Capture *capture, VALUE klass, VALUE allocations, VALUE object) {
152+
static void Memory_Profiler_Capture_process_newobj(VALUE capture_value, VALUE klass, VALUE allocations, VALUE object) {
153+
struct Memory_Profiler_Capture *capture;
154+
TypedData_Get_Struct(capture_value, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
155+
212156
struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
213157

214158
// Ensure object_states table exists
@@ -218,14 +162,19 @@ static void Memory_Profiler_Capture_process_newobj(VALUE self, struct Memory_Pro
218162

219163
VALUE state;
220164

221-
if (!NIL_P(record->callback) && capture->enabled) {
222-
// Normal case: call callback and store returned state
223-
// (callback can allocate, triggering NEWOBJ with enabled=0 → sentinel)
165+
if (!NIL_P(record->callback)) {
166+
// Call callback and store returned state
167+
// Temporarily disable queueing to prevent infinite loop if callback allocates
168+
int was_enabled = capture->enabled;
169+
capture->enabled = 0;
170+
224171
state = rb_funcall(record->callback, rb_intern("call"), 3, klass, sym_newobj, Qnil);
225172

173+
capture->enabled = was_enabled;
174+
226175
if (DEBUG_STATE) fprintf(stderr, "Storing callback state for object: %p\n", (void *)object);
227176
} else {
228-
// No callback OR enabled=0: store sentinel
177+
// No callback: store sentinel
229178
// Qnil = "tracked but no callback data"
230179
state = Qnil;
231180

@@ -236,9 +185,12 @@ static void Memory_Profiler_Capture_process_newobj(VALUE self, struct Memory_Pro
236185
st_insert(record->object_states, (st_data_t)object, (st_data_t)state);
237186
}
238187

239-
// Process a FREEOBJ event from the queue
188+
// Internal: Process a FREEOBJ event
240189
// Looks up and deletes state, optionally calls callback
241-
static void Memory_Profiler_Capture_process_freeobj(VALUE self, struct Memory_Profiler_Capture *capture, VALUE klass, VALUE allocations, VALUE object) {
190+
static void Memory_Profiler_Capture_process_freeobj(VALUE capture_value, VALUE klass, VALUE allocations, VALUE object) {
191+
struct Memory_Profiler_Capture *capture;
192+
TypedData_Get_Struct(capture_value, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
193+
242194
struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
243195

244196
// Try to look up and delete state
@@ -253,7 +205,13 @@ static void Memory_Profiler_Capture_process_freeobj(VALUE self, struct Memory_Pr
253205

254206
// Only call callback if we have both callback AND real state (not Qnil sentinel)
255207
if (!NIL_P(record->callback) && !NIL_P(state)) {
208+
// Temporarily disable queueing to prevent infinite loop if callback allocates
209+
int was_enabled = capture->enabled;
210+
capture->enabled = 0;
211+
256212
rb_funcall(record->callback, rb_intern("call"), 3, klass, sym_freeobj, state);
213+
214+
capture->enabled = was_enabled;
257215
}
258216
} else {
259217
// State not found - object allocated before tracking started
@@ -262,47 +220,23 @@ static void Memory_Profiler_Capture_process_freeobj(VALUE self, struct Memory_Pr
262220
}
263221
}
264222

265-
// Postponed job callback - processes queued events in order
266-
// This runs when it's safe to call Ruby code (not during allocation or GC)
267-
static void Memory_Profiler_Capture_process_queues(void *arg) {
268-
VALUE self = (VALUE)arg;
269-
struct Memory_Profiler_Capture *capture;
270-
TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
271-
272-
if (DEBUG_EVENT_QUEUES) fprintf(stderr, "Processing event queue: %zu events\n",
273-
capture->event_queue.count);
274-
275-
// Disable callback queueing during processing to prevent infinite loop
276-
// (rb_funcall can allocate, which would trigger more NEWOBJ events)
277-
// But sentinels are still stored to maintain NEWOBJ/FREEOBJ symmetry
278-
int was_enabled = capture->enabled;
279-
capture->enabled = 0;
280-
281-
// Process all events in order (maintains NEWOBJ before FREEOBJ for same object)
282-
for (size_t i = 0; i < capture->event_queue.count; i++) {
283-
struct Memory_Profiler_Queue_Item *event = Memory_Profiler_Queue_at(&capture->event_queue, i);
284-
285-
switch (event->type) {
286-
case EVENT_TYPE_NEWOBJ:
287-
Memory_Profiler_Capture_process_newobj(self, capture, event->klass, event->allocations, event->object);
288-
break;
289-
case EVENT_TYPE_FREEOBJ:
290-
Memory_Profiler_Capture_process_freeobj(self, capture, event->klass, event->allocations, event->object);
291-
break;
292-
}
223+
// Public API: Process a single event (NEWOBJ or FREEOBJ)
224+
// Called from events.c via rb_protect to catch exceptions
225+
void Memory_Profiler_Capture_process_event(struct Memory_Profiler_Event *event) {
226+
switch (event->type) {
227+
case MEMORY_PROFILER_EVENT_TYPE_NEWOBJ:
228+
Memory_Profiler_Capture_process_newobj(event->capture, event->klass, event->allocations, event->object);
229+
break;
230+
case MEMORY_PROFILER_EVENT_TYPE_FREEOBJ:
231+
Memory_Profiler_Capture_process_freeobj(event->capture, event->klass, event->allocations, event->object);
232+
break;
293233
}
294-
295-
// Clear the queue (elements are reused on next cycle)
296-
Memory_Profiler_Queue_clear(&capture->event_queue);
297-
298-
// Restore tracking state
299-
capture->enabled = was_enabled;
300234
}
301235

302236
#pragma mark - Event Handlers
303237

304238
// Handler for NEWOBJ event - increment count and enqueue
305-
// Simple: just count + enqueue, all logic is in queue processing
239+
// Automatically tracks ALL allocations (creates records on demand)
306240
static void Memory_Profiler_Capture_newobj_handler(VALUE self, struct Memory_Profiler_Capture *capture, VALUE klass, VALUE object) {
307241
st_data_t allocations_data;
308242
VALUE allocations;
@@ -313,7 +247,7 @@ static void Memory_Profiler_Capture_newobj_handler(VALUE self, struct Memory_Pro
313247
struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
314248
record->new_count++;
315249
} else {
316-
// First time seeing this class - create record
250+
// First time seeing this class - create record automatically
317251
struct Memory_Profiler_Capture_Allocations *record = ALLOC(struct Memory_Profiler_Capture_Allocations);
318252
record->callback = Qnil;
319253
record->new_count = 1;
@@ -326,17 +260,8 @@ static void Memory_Profiler_Capture_newobj_handler(VALUE self, struct Memory_Pro
326260
RB_OBJ_WRITTEN(self, Qnil, allocations);
327261
}
328262

329-
// Always enqueue - queue processing decides what to do
330-
struct Memory_Profiler_Queue_Item *event = Memory_Profiler_Queue_push(&capture->event_queue);
331-
if (event) {
332-
event->type = EVENT_TYPE_NEWOBJ;
333-
RB_OBJ_WRITE(self, &event->klass, klass);
334-
RB_OBJ_WRITE(self, &event->allocations, allocations);
335-
RB_OBJ_WRITE(self, &event->object, object);
336-
337-
if (DEBUG_EVENT_QUEUES) fprintf(stderr, "Queued NEWOBJ, queue size: %zu\n", capture->event_queue.count);
338-
rb_postponed_job_trigger(capture->postponed_job_handle);
339-
}
263+
// Enqueue to global event queue
264+
Memory_Profiler_Events_enqueue(MEMORY_PROFILER_EVENT_TYPE_NEWOBJ, self, klass, allocations, object);
340265
}
341266

342267
// Handler for FREEOBJ event - increment count and enqueue
@@ -350,18 +275,8 @@ static void Memory_Profiler_Capture_freeobj_handler(VALUE self, struct Memory_Pr
350275
// Always track counts
351276
record->free_count++;
352277

353-
// Always enqueue - queue processing decides what to do
354-
struct Memory_Profiler_Queue_Item *event = Memory_Profiler_Queue_push(&capture->event_queue);
355-
if (event) {
356-
event->type = EVENT_TYPE_FREEOBJ;
357-
RB_OBJ_WRITE(self, &event->klass, klass);
358-
RB_OBJ_WRITE(self, &event->allocations, allocations);
359-
// For FREEOBJ, object is just a key for lookup - don't use write barrier
360-
event->object = object;
361-
362-
if (DEBUG_EVENT_QUEUES) fprintf(stderr, "Queued FREEOBJ, queue size: %zu\n", capture->event_queue.count);
363-
rb_postponed_job_trigger(capture->postponed_job_handle);
364-
}
278+
// Enqueue to global event queue
279+
Memory_Profiler_Events_enqueue(MEMORY_PROFILER_EVENT_TYPE_FREEOBJ, self, klass, allocations, object);
365280
}
366281
}
367282

@@ -455,20 +370,7 @@ static VALUE Memory_Profiler_Capture_alloc(VALUE klass) {
455370
capture->running = 0;
456371
capture->enabled = 0;
457372

458-
// Initialize unified event queue
459-
Memory_Profiler_Queue_initialize(&capture->event_queue, sizeof(struct Memory_Profiler_Queue_Item));
460-
461-
// Pre-register the postponed job for processing both queues
462-
// The job will be triggered whenever we queue newobj or freeobj events
463-
capture->postponed_job_handle = rb_postponed_job_preregister(
464-
0, // flags
465-
Memory_Profiler_Capture_process_queues,
466-
(void *)obj
467-
);
468-
469-
if (capture->postponed_job_handle == POSTPONED_JOB_HANDLE_INVALID) {
470-
rb_raise(rb_eRuntimeError, "Failed to register postponed job!");
471-
}
373+
// Global event queue system will auto-initialize on first use (lazy initialization)
472374

473375
return obj;
474376
}
@@ -485,6 +387,10 @@ static VALUE Memory_Profiler_Capture_start(VALUE self) {
485387

486388
if (capture->running) return Qfalse;
487389

390+
// Ensure global event queue system is initialized:
391+
// It could fail and we want to raise an error if it does, here specifically.
392+
Memory_Profiler_Events_instance();
393+
488394
// Add event hook for NEWOBJ and FREEOBJ with RAW_ARG to get trace_arg
489395
rb_add_event_hook2(
490396
(rb_event_hook_func_t)Memory_Profiler_Capture_event_callback,
@@ -510,8 +416,9 @@ static VALUE Memory_Profiler_Capture_stop(VALUE self) {
510416
// Remove event hook using same data (self) we registered with. No more events will be queued after this point:
511417
rb_remove_event_hook_with_data((rb_event_hook_func_t)Memory_Profiler_Capture_event_callback, self);
512418

513-
// Flush any pending queued events before stopping. This ensures all callbacks are invoked and object_states is properly maintained.
514-
Memory_Profiler_Capture_process_queues((void *)self);
419+
// Flush any pending queued events in the global queue before stopping.
420+
// This ensures all callbacks are invoked and object_states is properly maintained.
421+
Memory_Profiler_Events_process_all();
515422

516423
// Clear both flags - we're no longer running and callbacks are disabled
517424
capture->running = 0;
@@ -584,7 +491,6 @@ static VALUE Memory_Profiler_Capture_tracking_p(VALUE self, VALUE klass) {
584491
return st_lookup(capture->tracked_classes, (st_data_t)klass, NULL) ? Qtrue : Qfalse;
585492
}
586493

587-
588494
// Get count of live objects for a specific class (O(1) lookup!)
589495
static VALUE Memory_Profiler_Capture_count_for(VALUE self, VALUE klass) {
590496
struct Memory_Profiler_Capture *capture;
@@ -693,11 +599,7 @@ static VALUE Memory_Profiler_Capture_statistics(VALUE self) {
693599
// Tracked classes count
694600
rb_hash_aset(statistics, ID2SYM(rb_intern("tracked_classes_count")), SIZET2NUM(capture->tracked_classes->num_entries));
695601

696-
// Event queue sizes
697-
rb_hash_aset(statistics, ID2SYM(rb_intern("event_queue_size")),
698-
SIZET2NUM(capture->event_queue.count));
699-
rb_hash_aset(statistics, ID2SYM(rb_intern("event_queue_capacity")),
700-
SIZET2NUM(capture->event_queue.capacity));
602+
// Note: Global event queue stats are internal to the events module
701603

702604
// Count object_states entries for each tracked class
703605
struct Memory_Profiler_Allocations_Statistics allocations_statistics = {

0 commit comments

Comments
 (0)