|
| 1 | +//! @file |
| 2 | +//! |
| 3 | +//! Copyright (c) Memfault, Inc. |
| 4 | +//! See License.txt for details |
| 5 | +//! |
| 6 | +//! Simple heap allocation tracking utility. Intended to shim into a system's |
| 7 | +//! malloc/free implementation to track last allocations with callsite |
| 8 | +//! information. |
| 9 | + |
| 10 | +#include "memfault/core/heap_stats.h" |
| 11 | +#include "memfault/core/heap_stats_impl.h" |
| 12 | + |
| 13 | +#include <stdbool.h> |
| 14 | +#include <stddef.h> |
| 15 | +#include <stdint.h> |
| 16 | +#include <string.h> |
| 17 | + |
| 18 | +#include "memfault/config.h" |
| 19 | +#include "memfault/core/compiler.h" |
| 20 | +#include "memfault/core/debug_log.h" |
| 21 | +#include "memfault/core/math.h" |
| 22 | +#include "memfault/core/platform/debug_log.h" |
| 23 | +#include "memfault/core/platform/overrides.h" |
| 24 | + |
| 25 | +#define MEMFAULT_HEAP_STATS_VERSION 1 |
| 26 | + |
| 27 | +sMfltHeapStats g_memfault_heap_stats = { |
| 28 | + .version = MEMFAULT_HEAP_STATS_VERSION, |
| 29 | +}; |
| 30 | +sMfltHeapStatEntry g_memfault_heap_stats_pool[MEMFAULT_HEAP_STATS_MAX_COUNT]; |
| 31 | + |
| 32 | +static void prv_heap_stats_lock(void) { |
| 33 | +#if MEMFAULT_COREDUMP_HEAP_STATS_LOCK_ENABLE |
| 34 | + memfault_lock(); |
| 35 | +#endif |
| 36 | +} |
| 37 | + |
| 38 | +static void prv_heap_stats_unlock(void) { |
| 39 | +#if MEMFAULT_COREDUMP_HEAP_STATS_LOCK_ENABLE |
| 40 | + memfault_unlock(); |
| 41 | +#endif |
| 42 | +} |
| 43 | + |
| 44 | +void memfault_heap_stats_reset(void) { |
| 45 | + prv_heap_stats_lock(); |
| 46 | + g_memfault_heap_stats = (sMfltHeapStats){0}; |
| 47 | + memset(g_memfault_heap_stats_pool, 0, sizeof(g_memfault_heap_stats_pool)); |
| 48 | + prv_heap_stats_unlock(); |
| 49 | +} |
| 50 | + |
| 51 | +bool memfault_heap_stats_empty(void) { |
| 52 | + // if the first entry has a zero size, we know there was no entry ever |
| 53 | + // populated |
| 54 | + return g_memfault_heap_stats_pool[0].info.size == 0; |
| 55 | +} |
| 56 | + |
| 57 | +//! Return the next slot to write |
| 58 | +static sMfltHeapStatEntry *prv_get_next_entry(void) { |
| 59 | + sMfltHeapStatEntry *slot = |
| 60 | + &g_memfault_heap_stats_pool[g_memfault_heap_stats.stats_pool_head]; |
| 61 | + g_memfault_heap_stats.stats_pool_head = (g_memfault_heap_stats.stats_pool_head + 1) % |
| 62 | + MEMFAULT_ARRAY_SIZE(g_memfault_heap_stats_pool); |
| 63 | + return slot; |
| 64 | +} |
| 65 | + |
| 66 | +void memfault_heap_stats_malloc(const void *lr, const void *ptr, size_t size) { |
| 67 | + prv_heap_stats_lock(); |
| 68 | + |
| 69 | + if (ptr) { |
| 70 | + g_memfault_heap_stats.in_use_block_count++; |
| 71 | + if (g_memfault_heap_stats.in_use_block_count > g_memfault_heap_stats.max_in_use_block_count) { |
| 72 | + g_memfault_heap_stats.max_in_use_block_count = g_memfault_heap_stats.in_use_block_count; |
| 73 | + } |
| 74 | + sMfltHeapStatEntry *slot = prv_get_next_entry(); |
| 75 | + *slot = (sMfltHeapStatEntry){ |
| 76 | + .lr = lr, |
| 77 | + .ptr = ptr, |
| 78 | + .info = |
| 79 | + { |
| 80 | + .size = size & (~(1u << 31)), |
| 81 | + .in_use = 1u, |
| 82 | + }, |
| 83 | + }; |
| 84 | + } |
| 85 | + |
| 86 | + prv_heap_stats_unlock(); |
| 87 | +} |
| 88 | + |
| 89 | +void memfault_heap_stats_free(const void *ptr) { |
| 90 | + prv_heap_stats_lock(); |
| 91 | + if (ptr) { |
| 92 | + g_memfault_heap_stats.in_use_block_count--; |
| 93 | + |
| 94 | + // if the pointer exists in the tracked stats, mark it as freed |
| 95 | + for (size_t i = 0; i < MEMFAULT_ARRAY_SIZE(g_memfault_heap_stats_pool); i++) { |
| 96 | + if (g_memfault_heap_stats_pool[i].ptr == ptr) { |
| 97 | + g_memfault_heap_stats_pool[i].info.in_use = 0; |
| 98 | + break; |
| 99 | + } |
| 100 | + } |
| 101 | + } |
| 102 | + prv_heap_stats_unlock(); |
| 103 | +} |
0 commit comments