Skip to content
This repository was archived by the owner on Dec 25, 2025. It is now read-only.

Commit 1d56222

Browse files
committed
Add mprotect()
1 parent 230c5f2 commit 1d56222

File tree

6 files changed

+129
-13
lines changed

6 files changed

+129
-13
lines changed

kernel/include/memory/pagemap.hpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,8 @@ class PageMap {
3838

3939
void unmap(uintptr_t virt_addr, uint16_t owner_pcid = 0, bool free_phys = false);
4040
uintptr_t translate(uintptr_t virt_addr);
41+
size_t set_page_flags(uintptr_t virt_addr, uint8_t flags,
42+
CacheType cache = CacheType::WriteBack, uint16_t owner_pcid = 0);
4143

4244
void load(uint16_t pcid = 0, bool flush = true);
4345

kernel/include/task/process.hpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ struct Thread : public IntrusiveListNode<SchedulerTag>,
5656

5757
Thread() = default;
5858
Thread(Process* parent, void (*callback)(void*), void* args);
59-
~Thread() {}
59+
~Thread();
6060

6161
private:
6262
void arch_init(uintptr_t entry, uintptr_t arg);
@@ -88,6 +88,7 @@ struct Process : public IntrusiveListNode<ProcessTag> {
8888

8989
void* mmap(void* addr, size_t len, int prot, int flags);
9090
void munmap(void* ptr, size_t len);
91+
int mprotect(void* addr, size_t len, int prot);
9192

9293
static void init();
9394

kernel/src/arch/x86_64/memory/paging.cpp

Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
#include "memory/pagemap.hpp"
33
#include "memory/memory.hpp"
44
#include "memory/paging.hpp"
5+
#include <cstdint>
56
#include "memory/pcid_manager.hpp"
67
#include "memory/pmm.hpp"
78
#include "cpu/registers.hpp"
@@ -525,6 +526,67 @@ uintptr_t PageMap::translate(uintptr_t virt_addr) {
525526
return 0;
526527
}
527528

529+
size_t PageMap::set_page_flags(uintptr_t virt_addr, uint8_t flags, CacheType cache,
530+
uint16_t owner_pcid) {
531+
uintptr_t curr_table_phys = this->phys_root_addr;
532+
size_t new_flags = convert_generic_flags(flags, cache, PageSize::Size4K);
533+
534+
for (int level = max_levels; level >= 1; --level) {
535+
uintptr_t* table_virt = reinterpret_cast<uintptr_t*>(to_higher_half(curr_table_phys));
536+
537+
int shift = 12 + (level - 1) * 9;
538+
int index = static_cast<int>((virt_addr >> shift) & 0x1FF);
539+
540+
uint64_t entry = table_virt[index];
541+
542+
if (!(entry & FlagPresent)) {
543+
return false;
544+
}
545+
546+
bool is_huge = (level > 1) && (entry & FlagHuge);
547+
bool is_leaf = (level == 1);
548+
549+
if (is_huge || is_leaf) {
550+
uintptr_t phys_base = entry & page_mask;
551+
uint64_t new_entry = phys_base | new_flags;
552+
553+
if (is_huge) {
554+
new_entry |= FlagHuge;
555+
556+
// convert_generic_flags() doesn't handles Large PAT here.
557+
// So, we do it manually.
558+
switch (cache) {
559+
case CacheType::WriteProtected:
560+
new_entry |= FlagLPAT;
561+
break;
562+
case CacheType::WriteCombining:
563+
new_entry |= FlagLPAT;
564+
break;
565+
case CacheType::WriteBack:
566+
case CacheType::WriteThrough:
567+
case CacheType::Uncached:
568+
default:
569+
break;
570+
}
571+
}
572+
573+
table_virt[index] = new_entry;
574+
575+
if (this->is_active()) {
576+
TLB::flush(virt_addr);
577+
} else {
578+
TLB::flush_specific(virt_addr, owner_pcid);
579+
}
580+
581+
return 1ul << shift;
582+
}
583+
584+
curr_table_phys = entry & page_mask;
585+
}
586+
587+
return 0;
588+
}
589+
528590
void PageMap::create_new(PageMap* map) {
529591
static bool kernel_initialized = false;
530592
uintptr_t* root_phys = static_cast<uintptr_t*>(PhysicalManager::alloc());

kernel/src/libs/mutex.cpp

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ void Mutex::timeout_callback(void* data) {
1111

1212
ctx->timed_out.store(true, std::memory_order_release);
1313

14-
task::Scheduler& sched = cpu::CpuCoreManager::get().get_current_core()->sched;
14+
task::Scheduler& sched = task::Scheduler::get();
1515
sched.unblock(ctx->thread);
1616
}
1717

@@ -130,9 +130,8 @@ void Mutex::wakeup_next() {
130130
LockGuard guard(this->queue_lock);
131131

132132
if (this->wait_head) {
133-
task::Thread* t = this->wait_head->thread;
134-
cpu::PerCpuData* cpu = cpu::CpuCoreManager::get().get_current_core();
135-
cpu->sched.unblock(t);
133+
task::Thread* t = this->wait_head->thread;
134+
task::Scheduler::get().unblock(t);
136135

137136
WaitNode* old = this->wait_head;
138137
this->wait_head = this->wait_head->next;

kernel/src/task/process.cpp

Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,16 @@ Thread::Thread(Process* proc, void (*callback)(void*), void* args) {
3636
this->arch_init(reinterpret_cast<uintptr_t>(callback), reinterpret_cast<uintptr_t>(args));
3737
}
3838

39+
Thread::~Thread() {
40+
if (this->is_user_thread) {
41+
this->owner->munmap(this->kernel_stack, USTACK_SIZE);
42+
} else {
43+
delete this->kernel_stack;
44+
}
45+
46+
delete this->fpu_storage;
47+
}
48+
3949
Process::Process(memory::PageMap* map) : map(map) {
4050
this->pid = next_pid.fetch_add(1, std::memory_order_relaxed);
4151
this->next_tid.store(1, std::memory_order_relaxed);
@@ -145,4 +155,43 @@ void Process::munmap(void* ptr, size_t) {
145155
void Process::init() {
146156
kernel_proc = new Process(memory::PageMap::get_kernel_map());
147157
}
158+
159+
int Process::mprotect(void* addr, size_t len, int prot) {
160+
uintptr_t virt_start = reinterpret_cast<uintptr_t>(addr);
161+
162+
if (virt_start & (memory::PAGE_SIZE_4K - 1)) {
163+
return -1;
164+
}
165+
166+
uint8_t flags = memory::Read | memory::User;
167+
memory::CacheType cache = memory::CacheType::WriteBack;
168+
169+
// Align length to 4KiB
170+
size_t aligned_len = align_up(len, memory::PAGE_SIZE_4K);
171+
172+
if (prot & PROT_WRITE) {
173+
flags |= memory::Write;
174+
}
175+
176+
if (prot & PROT_EXEC) {
177+
flags |= memory::Execute;
178+
}
179+
180+
uintptr_t curr_virt = virt_start;
181+
uintptr_t virt_end = virt_start + aligned_len;
182+
uint16_t active_pcid = memory::PcidManager::get().get_pcid(this);
183+
184+
// TODO: Implement shatter page in PageMap
185+
while (curr_virt < virt_end) {
186+
size_t chunk_size = this->map->set_page_flags(curr_virt, flags, cache, active_pcid);
187+
188+
if (chunk_size == 0) {
189+
return -1;
190+
}
191+
192+
curr_virt += chunk_size;
193+
}
194+
195+
return 0;
196+
}
148197
} // namespace kernel::task

kernel/src/task/scheduler.cpp

Lines changed: 11 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,9 @@ constexpr uint16_t TIME_SLICE_QUANTA[64] = {
1717
} // namespace
1818

1919
void Scheduler::add_thread(Thread* t) {
20+
cpu::CpuCoreManager& cpu_manager = cpu::CpuCoreManager::get();
21+
cpu::PerCpuData* cpu = cpu_manager.get_core_by_index(this->cpu_id);
22+
2023
if (t->priority >= MLFQ_LEVELS) {
2124
t->priority = MLFQ_LEVELS - 1;
2225
}
@@ -25,9 +28,8 @@ void Scheduler::add_thread(Thread* t) {
2528
t->quantum = TIME_SLICE_QUANTA[t->priority];
2629
}
2730

28-
t->state = Ready;
29-
cpu::PerCpuData* cpu = cpu::CpuCoreManager::get().get_core_by_index(this->cpu_id);
30-
t->cpu = cpu;
31+
t->state = Ready;
32+
t->cpu = cpu;
3133

3234
{
3335
LockGuard guard(this->lock);
@@ -45,8 +47,8 @@ void Scheduler::add_thread(Thread* t) {
4547
cpu->reschedule_needed = true;
4648

4749
// If this is running on a different core, send an IPI to wake it up
48-
if (cpu::CpuCoreManager::get().get_current_core()->core_idx != this->cpu_id) {
49-
cpu::CpuCoreManager::get().send_ipi(this->cpu_id, IPI_RESCHEDULE_VECTOR);
50+
if (cpu_manager.get_current_core()->core_idx != this->cpu_id) {
51+
cpu_manager.send_ipi(this->cpu_id, IPI_RESCHEDULE_VECTOR);
5052
}
5153
}
5254
}
@@ -77,8 +79,9 @@ Thread* Scheduler::get_next_thread() {
7779
}
7880

7981
Thread* Scheduler::try_steal() {
82+
cpu::CpuCoreManager& cpu_manager = cpu::CpuCoreManager::get();
8083
// Iterate over all other CPUs
81-
size_t max_cpus = cpu::CpuCoreManager::get().get_total_cores();
84+
size_t max_cpus = cpu_manager.get_total_cores();
8285

8386
for (size_t i = 0; i < max_cpus; ++i) {
8487
size_t victim_id = (this->cpu_id + i) % max_cpus;
@@ -89,7 +92,7 @@ Thread* Scheduler::try_steal() {
8992
}
9093

9194
cpu::PerCpuData* victim_cpu =
92-
cpu::CpuCoreManager::get().get_core_by_index(static_cast<uint32_t>(victim_id));
95+
cpu_manager.get_core_by_index(static_cast<uint32_t>(victim_id));
9396
Scheduler& victim_sched = victim_cpu->sched;
9497

9598
// Use `try_lock()` to avoid deadlock
@@ -119,7 +122,7 @@ Thread* Scheduler::try_steal() {
119122

120123
if (stolen) {
121124
// Migrate the thread to this cpu
122-
stolen->cpu = cpu::CpuCoreManager::get().get_core_by_index(this->cpu_id);
125+
stolen->cpu = cpu_manager.get_core_by_index(this->cpu_id);
123126
return stolen;
124127
}
125128
}

0 commit comments

Comments
 (0)