|
| 1 | +// Released under the MIT License. |
| 2 | +// Copyright, 2025, by Samuel Williams. |
| 3 | + |
| 4 | +#include "allocations.h" |
| 5 | + |
| 6 | +#include "ruby.h" |
| 7 | +#include "ruby/debug.h" |
| 8 | +#include "ruby/st.h" |
| 9 | +#include <stdio.h> |
| 10 | + |
| 11 | +static VALUE Memory_Profiler_Allocations = Qnil; |
| 12 | + |
| 13 | +// Helper to mark object_states table values |
| 14 | +static int Memory_Profiler_Allocations_object_states_mark(st_data_t key, st_data_t value, st_data_t arg) { |
| 15 | + VALUE object = (VALUE)key; |
| 16 | + rb_gc_mark_movable(object); |
| 17 | + |
| 18 | + VALUE state = (VALUE)value; |
| 19 | + if (!NIL_P(state)) { |
| 20 | + rb_gc_mark_movable(state); |
| 21 | + } |
| 22 | + return ST_CONTINUE; |
| 23 | +} |
| 24 | + |
| 25 | +// Foreach callback for st_foreach_with_replace (iteration logic) |
| 26 | +static int Memory_Profiler_Allocations_object_states_foreach(st_data_t key, st_data_t value, st_data_t argp, int error) { |
| 27 | + // Return ST_REPLACE to trigger the replace callback for each entry |
| 28 | + return ST_REPLACE; |
| 29 | +} |
| 30 | + |
| 31 | +// Replace callback for st_foreach_with_replace to update object_states keys and values during compaction |
| 32 | +static int Memory_Profiler_Allocations_object_states_compact(st_data_t *key, st_data_t *value, st_data_t data, int existing) { |
| 33 | + VALUE old_object = (VALUE)*key; |
| 34 | + VALUE old_state = (VALUE)*value; |
| 35 | + |
| 36 | + VALUE new_object = rb_gc_location(old_object); |
| 37 | + VALUE new_state = rb_gc_location(old_state); |
| 38 | + |
| 39 | + // Update key if it moved |
| 40 | + if (old_object != new_object) { |
| 41 | + *key = (st_data_t)new_object; |
| 42 | + } |
| 43 | + |
| 44 | + // Update value if it moved |
| 45 | + if (old_state != new_state) { |
| 46 | + *value = (st_data_t)new_state; |
| 47 | + } |
| 48 | + |
| 49 | + return ST_CONTINUE; |
| 50 | +} |
| 51 | + |
| 52 | +// GC mark function for Allocations |
| 53 | +static void Memory_Profiler_Allocations_mark(void *ptr) { |
| 54 | + struct Memory_Profiler_Capture_Allocations *record = ptr; |
| 55 | + |
| 56 | + if (!record) { |
| 57 | + return; |
| 58 | + } |
| 59 | + |
| 60 | + if (!NIL_P(record->callback)) { |
| 61 | + rb_gc_mark_movable(record->callback); |
| 62 | + } |
| 63 | + |
| 64 | + // Mark object_states table if it exists |
| 65 | + if (record->object_states) { |
| 66 | + st_foreach(record->object_states, Memory_Profiler_Allocations_object_states_mark, 0); |
| 67 | + } |
| 68 | +} |
| 69 | + |
| 70 | +// GC free function for Allocations |
| 71 | +static void Memory_Profiler_Allocations_free(void *ptr) { |
| 72 | + struct Memory_Profiler_Capture_Allocations *record = ptr; |
| 73 | + |
| 74 | + if (record->object_states) { |
| 75 | + st_free_table(record->object_states); |
| 76 | + } |
| 77 | + |
| 78 | + xfree(record); |
| 79 | +} |
| 80 | + |
| 81 | +// GC compact function for Allocations |
| 82 | +static void Memory_Profiler_Allocations_compact(void *ptr) { |
| 83 | + struct Memory_Profiler_Capture_Allocations *record = ptr; |
| 84 | + |
| 85 | + // Update callback if it moved |
| 86 | + if (!NIL_P(record->callback)) { |
| 87 | + record->callback = rb_gc_location(record->callback); |
| 88 | + } |
| 89 | + |
| 90 | + // Update object_states table if it exists |
| 91 | + if (record->object_states && record->object_states->num_entries > 0) { |
| 92 | + if (st_foreach_with_replace(record->object_states, Memory_Profiler_Allocations_object_states_foreach, Memory_Profiler_Allocations_object_states_compact, 0)) { |
| 93 | + rb_raise(rb_eRuntimeError, "object_states modified during GC compaction"); |
| 94 | + } |
| 95 | + } |
| 96 | +} |
| 97 | + |
| 98 | +static const rb_data_type_t Memory_Profiler_Allocations_type = { |
| 99 | + "Memory::Profiler::Allocations", |
| 100 | + { |
| 101 | + .dmark = Memory_Profiler_Allocations_mark, |
| 102 | + .dcompact = Memory_Profiler_Allocations_compact, |
| 103 | + .dfree = Memory_Profiler_Allocations_free, |
| 104 | + }, |
| 105 | + 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED |
| 106 | +}; |
| 107 | + |
| 108 | +// Wrap an allocations record |
| 109 | +VALUE Memory_Profiler_Allocations_wrap(struct Memory_Profiler_Capture_Allocations *record) { |
| 110 | + return TypedData_Wrap_Struct(Memory_Profiler_Allocations, &Memory_Profiler_Allocations_type, record); |
| 111 | +} |
| 112 | + |
| 113 | +// Get allocations record from wrapper |
| 114 | +struct Memory_Profiler_Capture_Allocations* Memory_Profiler_Allocations_get(VALUE self) { |
| 115 | + struct Memory_Profiler_Capture_Allocations *record; |
| 116 | + TypedData_Get_Struct(self, struct Memory_Profiler_Capture_Allocations, &Memory_Profiler_Allocations_type, record); |
| 117 | + return record; |
| 118 | +} |
| 119 | + |
| 120 | +// Allocations#new_count |
| 121 | +static VALUE Memory_Profiler_Allocations_new_count(VALUE self) { |
| 122 | + struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(self); |
| 123 | + return SIZET2NUM(record->new_count); |
| 124 | +} |
| 125 | + |
| 126 | +// Allocations#free_count |
| 127 | +static VALUE Memory_Profiler_Allocations_free_count(VALUE self) { |
| 128 | + struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(self); |
| 129 | + return SIZET2NUM(record->free_count); |
| 130 | +} |
| 131 | + |
| 132 | +// Allocations#retained_count |
| 133 | +static VALUE Memory_Profiler_Allocations_retained_count(VALUE self) { |
| 134 | + struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(self); |
| 135 | + // Handle underflow when free_count > new_count |
| 136 | + size_t retained = record->free_count > record->new_count ? 0 : record->new_count - record->free_count; |
| 137 | + return SIZET2NUM(retained); |
| 138 | +} |
| 139 | + |
| 140 | +// Allocations#track { |klass| ... } |
| 141 | +static VALUE Memory_Profiler_Allocations_track(int argc, VALUE *argv, VALUE self) { |
| 142 | + struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(self); |
| 143 | + |
| 144 | + VALUE callback; |
| 145 | + rb_scan_args(argc, argv, "&", &callback); |
| 146 | + |
| 147 | + // Use write barrier - self (Allocations wrapper) keeps Capture alive, which keeps callback alive |
| 148 | + RB_OBJ_WRITE(self, &record->callback, callback); |
| 149 | + |
| 150 | + return self; |
| 151 | +} |
| 152 | + |
| 153 | +// Clear/reset allocation counts and state for a record |
| 154 | +void Memory_Profiler_Allocations_clear(VALUE allocations) { |
| 155 | + struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations); |
| 156 | + record->new_count = 0; // Reset allocation count |
| 157 | + record->free_count = 0; // Reset free count |
| 158 | + RB_OBJ_WRITE(allocations, &record->callback, Qnil); // Clear callback with write barrier |
| 159 | + |
| 160 | + // Clear object states |
| 161 | + if (record->object_states) { |
| 162 | + st_free_table(record->object_states); |
| 163 | + record->object_states = NULL; |
| 164 | + } |
| 165 | +} |
| 166 | + |
| 167 | +void Init_Memory_Profiler_Allocations(VALUE Memory_Profiler) |
| 168 | +{ |
| 169 | + // Allocations class - wraps allocation data for a specific class |
| 170 | + Memory_Profiler_Allocations = rb_define_class_under(Memory_Profiler, "Allocations", rb_cObject); |
| 171 | + |
| 172 | + // Allocations objects are only created internally via wrap, never from Ruby: |
| 173 | + rb_undef_alloc_func(Memory_Profiler_Allocations); |
| 174 | + |
| 175 | + rb_define_method(Memory_Profiler_Allocations, "new_count", Memory_Profiler_Allocations_new_count, 0); |
| 176 | + rb_define_method(Memory_Profiler_Allocations, "free_count", Memory_Profiler_Allocations_free_count, 0); |
| 177 | + rb_define_method(Memory_Profiler_Allocations, "retained_count", Memory_Profiler_Allocations_retained_count, 0); |
| 178 | + rb_define_method(Memory_Profiler_Allocations, "track", Memory_Profiler_Allocations_track, -1); // -1 to accept block |
| 179 | +} |
0 commit comments