Skip to content

Commit e4ae0a1

Browse files
add allocator spinlock
1 parent bab3d63 commit e4ae0a1

File tree

2 files changed

+15
-2
lines changed

2 files changed

+15
-2
lines changed

src/ap.c

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,8 @@ void kmain_ap(struct limine_smp_info *info)
1313
* @todo Insert cpu-local scheduler loop here.
1414
* Each AP will run its own list of executing BASIC processes. Accessing
1515
* the list of other APs and the BSP will be strictly controlled via a
16-
* marshalled lookup system using a spinlock, e.g. if AP 1 wants to check if
17-
* PID X on AP 2 is still running.
16+
* marshalled lookup system using a spinlock, e.g. if AP 1 wants to start a new
17+
* process on AP 2, it signals via this system.
1818
*
1919
* This will be done as follows:
2020
*
@@ -24,5 +24,8 @@ void kmain_ap(struct limine_smp_info *info)
2424
* 4) Initially AP's will wait for a start command in their queue,
2525
* they won't run their scheduler until they receive this command. This allows them all to
2626
* gracefully wait until the first BASIC process is ready to be loaded (/programs/init).
27+
* 5) A shared process list will be used for enumerating processes and checking process
28+
* state, so we dont have to peek at another scheduler instance's command queue to
29+
* see if a process ours is waiting on still lives.
2730
*/
2831
}

src/kmalloc.c

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,8 @@ static uint64_t allocated = 0;
1313

1414
static uint32_t low_mem_cur = LOW_HEAP_START;
1515

16+
static spinlock_t allocator_lock = 0;
17+
1618
volatile struct limine_memmap_request memory_map_request = {
1719
.id = LIMINE_MEMMAP_REQUEST,
1820
.revision = 0,
@@ -29,6 +31,8 @@ void init_heap() {
2931
uint64_t best_len = 0;
3032
uint64_t best_addr = 0;
3133

34+
init_spinlock(&allocator_lock);
35+
3236
for (uint64_t i = 0; i < memory_map_request.response->entry_count; ++i) {
3337
struct limine_memmap_entry* entry = memory_map_request.response->entries[i];
3438
if (entry->type == LIMINE_MEMMAP_USABLE && entry->length > best_len) {
@@ -54,29 +58,35 @@ void print_heapinfo() {
5458
}
5559

5660
void* kmalloc(uint64_t size) {
61+
lock_spinlock(&allocator_lock);
5762
void* p = ta_alloc(size);
5863
allocated += ta_usable_size((void*)p);
64+
unlock_spinlock(&allocator_lock);
5965
return p;
6066
}
6167

6268
void kfree(const void* ptr) {
6369
if (!ptr) {
6470
return;
6571
}
72+
lock_spinlock(&allocator_lock);
6673
uintptr_t a = (uintptr_t)ptr;
6774
if (a >= LOW_HEAP_START && a < LOW_HEAP_MAX) {
6875
preboot_fail("kfree: tried to free low heap memory - use kfree_low instead!");
6976
}
7077
allocated -= ta_usable_size((void*)ptr);
7178
ta_free((void*)ptr);
79+
unlock_spinlock(&allocator_lock);
7280
}
7381

7482
uint32_t kmalloc_low(uint32_t size) {
83+
lock_spinlock(&allocator_lock);
7584
uint32_t ret = low_mem_cur;
7685
if (ret + size >= LOW_HEAP_MAX) {
7786
preboot_fail("kmalloc_low exhausted");
7887
}
7988
low_mem_cur += size;
89+
unlock_spinlock(&allocator_lock);
8090
return ret;
8191
}
8292

0 commit comments

Comments
 (0)