Skip to content

Commit 09435f8

Browse files
authored
Merge pull request #21 from visitorckw/allocator-improvement
Improvements to memory allocator safety validation and optimizations
2 parents d6232aa + 2134290 commit 09435f8

File tree

3 files changed

+89
-31
lines changed

3 files changed

+89
-31
lines changed

include/private/error.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ enum {
2727

2828
/* Memory Protection Errors */
2929
ERR_STACK_CHECK, /* Stack overflow or corruption detected */
30+
ERR_HEAP_CORRUPT, /* Heap corruption or invalid free detected */
3031

3132
/* IPC and Synchronization Errors */
3233
ERR_PIPE_ALLOC, /* Pipe allocation failed */

kernel/error.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@ static const struct error_code error_desc[] = {
2222

2323
/* stack guard */
2424
{ERR_STACK_CHECK, "stack corruption"},
25+
{ERR_HEAP_CORRUPT, "heap corruption or invalid free"},
2526

2627
/* IPC / sync */
2728
{ERR_PIPE_ALLOC, "pipe allocation"},

lib/malloc.c

Lines changed: 87 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
#include <sys/task.h>
55
#include <types.h>
66

7+
#include "private/error.h"
78
#include "private/utils.h"
89

910
/* Memory allocator using first-fit strategy with selective coalescing.
@@ -44,15 +45,21 @@ static uint32_t free_blocks_count; /* track fragmentation */
4445
/* Validate block integrity */
4546
static inline bool validate_block(memblock_t *block)
4647
{
47-
if (!IS_VALID_BLOCK(block))
48+
if (unlikely(!IS_VALID_BLOCK(block)))
4849
return false;
4950

5051
size_t size = GET_SIZE(block);
51-
if (!size || size > MALLOC_MAX_SIZE)
52+
if (unlikely(!size || size > MALLOC_MAX_SIZE))
5253
return false;
5354

5455
/* Check if block extends beyond heap */
55-
if ((uint8_t *) block + sizeof(memblock_t) + size > (uint8_t *) heap_end)
56+
if (unlikely((uint8_t *) block + sizeof(memblock_t) + size >
57+
(uint8_t *) heap_end))
58+
return false;
59+
60+
if (unlikely(block->next &&
61+
(uint8_t *) block + sizeof(memblock_t) + GET_SIZE(block) !=
62+
(uint8_t *) block->next))
5663
return false;
5764

5865
return true;
@@ -69,18 +76,17 @@ void free(void *ptr)
6976
memblock_t *p = ((memblock_t *) ptr) - 1;
7077

7178
/* Validate the block being freed */
72-
if (!validate_block(p) || !IS_USED(p)) {
79+
if (unlikely(!validate_block(p) || !IS_USED(p))) {
7380
CRITICAL_LEAVE();
81+
panic(ERR_HEAP_CORRUPT);
7482
return; /* Invalid or double-free */
7583
}
7684

7785
MARK_FREE(p);
7886
free_blocks_count++;
7987

8088
/* Forward merge if the next block is free and physically adjacent */
81-
if (p->next && !IS_USED(p->next) &&
82-
(uint8_t *) p + sizeof(memblock_t) + GET_SIZE(p) ==
83-
(uint8_t *) p->next) {
89+
if (p->next && !IS_USED(p->next)) {
8490
p->size = GET_SIZE(p) + sizeof(memblock_t) + GET_SIZE(p->next);
8591
p->next = p->next->next;
8692
free_blocks_count--;
@@ -94,9 +100,12 @@ void free(void *ptr)
94100
current = current->next;
95101
}
96102

97-
if (prev && !IS_USED(prev) &&
98-
(uint8_t *) prev + sizeof(memblock_t) + GET_SIZE(prev) ==
99-
(uint8_t *) p) {
103+
if (prev && !IS_USED(prev)) {
104+
if (unlikely(!validate_block(prev))) {
105+
CRITICAL_LEAVE();
106+
panic(ERR_HEAP_CORRUPT);
107+
return;
108+
}
100109
prev->size = GET_SIZE(prev) + sizeof(memblock_t) + GET_SIZE(p);
101110
prev->next = p->next;
102111
free_blocks_count--;
@@ -109,22 +118,45 @@ void free(void *ptr)
109118
static void selective_coalesce(void)
110119
{
111120
memblock_t *p = first_free;
112-
uint32_t coalesced = 0;
113121

114122
while (p && p->next) {
115123
/* Merge only when blocks are FREE *and* adjacent in memory */
116-
uint8_t *pend = (uint8_t *) p + sizeof(memblock_t) + GET_SIZE(p);
117-
if (!IS_USED(p) && !IS_USED(p->next) && pend == (uint8_t *) p->next) {
124+
if (unlikely(!validate_block(p))) {
125+
panic(ERR_HEAP_CORRUPT);
126+
return;
127+
}
128+
if (!IS_USED(p) && !IS_USED(p->next)) {
118129
p->size = GET_SIZE(p) + sizeof(memblock_t) + GET_SIZE(p->next);
119130
p->next = p->next->next;
120-
coalesced++;
121131
free_blocks_count--;
122132
} else {
123133
p = p->next;
124134
}
125135
}
126136
}
127137

138+
static inline void split_block(memblock_t *block, size_t size)
139+
{
140+
size_t remaining;
141+
memblock_t *new_block;
142+
143+
if (unlikely(size >= GET_SIZE(block))) {
144+
panic(ERR_HEAP_CORRUPT);
145+
return;
146+
}
147+
remaining = GET_SIZE(block) - size;
148+
/* Split only when remaining memory is large enough */
149+
if (remaining < sizeof(memblock_t) + MALLOC_MIN_SIZE)
150+
return;
151+
new_block = (memblock_t *) ((size_t) block + sizeof(memblock_t) + size);
152+
new_block->next = block->next;
153+
new_block->size = remaining - sizeof(memblock_t);
154+
MARK_FREE(new_block);
155+
block->next = new_block;
156+
block->size = size | IS_USED(block);
157+
free_blocks_count++; /* New free block created */
158+
}
159+
128160
/* O(n) first-fit allocation with selective coalescing */
129161
void *malloc(uint32_t size)
130162
{
@@ -146,29 +178,22 @@ void *malloc(uint32_t size)
146178

147179
memblock_t *p = first_free;
148180
while (p) {
149-
if (!validate_block(p)) {
181+
if (unlikely(!validate_block(p))) {
150182
CRITICAL_LEAVE();
183+
panic(ERR_HEAP_CORRUPT);
151184
return NULL; /* Heap corruption detected */
152185
}
153186

154187
if (!IS_USED(p) && GET_SIZE(p) >= size) {
155-
size_t remaining = GET_SIZE(p) - size;
156-
157188
/* Split block only if remainder is large enough to be useful */
158-
if (remaining >= sizeof(memblock_t) + MALLOC_MIN_SIZE) {
159-
memblock_t *new_block =
160-
(memblock_t *) ((size_t) p + sizeof(memblock_t) + size);
161-
new_block->next = p->next;
162-
new_block->size = remaining - sizeof(memblock_t);
163-
MARK_FREE(new_block);
164-
p->next = new_block;
165-
p->size = size;
166-
free_blocks_count++; /* New free block created */
167-
}
189+
split_block(p, size);
168190

169191
MARK_USED(p);
170-
if (free_blocks_count > 0)
171-
free_blocks_count--;
192+
if (unlikely(free_blocks_count <= 0)) {
193+
panic(ERR_HEAP_CORRUPT);
194+
return NULL;
195+
}
196+
free_blocks_count--;
172197

173198
CRITICAL_LEAVE();
174199
return (void *) (p + 1);
@@ -213,7 +238,7 @@ void *calloc(uint32_t nmemb, uint32_t size)
213238
if (unlikely(nmemb && size > MALLOC_MAX_SIZE / nmemb))
214239
return NULL;
215240

216-
uint32_t total_size = nmemb * size;
241+
uint32_t total_size = ALIGN4(nmemb * size);
217242
void *buf = malloc(total_size);
218243

219244
if (buf)
@@ -236,11 +261,15 @@ void *realloc(void *ptr, uint32_t size)
236261
return NULL;
237262
}
238263

264+
size = ALIGN4(size);
265+
239266
memblock_t *old_block = ((memblock_t *) ptr) - 1;
240267

241268
/* Validate the existing block */
242-
if (!validate_block(old_block) || !IS_USED(old_block))
269+
if (unlikely(!validate_block(old_block) || !IS_USED(old_block))) {
270+
panic(ERR_HEAP_CORRUPT);
243271
return NULL;
272+
}
244273

245274
size_t old_size = GET_SIZE(old_block);
246275

@@ -249,6 +278,33 @@ void *realloc(void *ptr, uint32_t size)
249278
old_size - size < sizeof(memblock_t) + MALLOC_MIN_SIZE)
250279
return ptr;
251280

281+
/* fast path for shrinking */
282+
if (size <= old_size) {
283+
split_block(old_block, size);
284+
/* Trigger coalescing only when fragmentation is high */
285+
if (free_blocks_count > COALESCE_THRESHOLD)
286+
selective_coalesce();
287+
CRITICAL_LEAVE();
288+
return (void *) (old_block + 1);
289+
}
290+
291+
/* fast path for growing */
292+
if (old_block->next && !IS_USED(old_block->next) &&
293+
GET_SIZE(old_block) + sizeof(memblock_t) + GET_SIZE(old_block->next) >=
294+
size) {
295+
old_block->size = GET_SIZE(old_block) + sizeof(memblock_t) +
296+
GET_SIZE(old_block->next);
297+
old_block->next = old_block->next->next;
298+
free_blocks_count--;
299+
split_block(old_block, size);
300+
/* Trigger coalescing only when fragmentation is high */
301+
if (free_blocks_count > COALESCE_THRESHOLD)
302+
selective_coalesce();
303+
CRITICAL_LEAVE();
304+
return (void *) (old_block + 1);
305+
}
306+
307+
252308
void *new_buf = malloc(size);
253309
if (new_buf) {
254310
memcpy(new_buf, ptr, min(old_size, size));

0 commit comments

Comments
 (0)