11/*
22 * Copyright (c) 2018-2023, Andreas Kling <[email protected] > 33 * Copyright (c) 2023, Idan Horowitz <[email protected] > 4+ * Copyright (c) 2025, Kusekushi <[email protected] > 45 *
56 * SPDX-License-Identifier: BSD-2-Clause
67 */
78
8- #include < Kernel/Debug.h>
9- #include < Kernel/Devices/TTY/TTY.h>
10- #include < Kernel/FileSystem/Custody.h>
11- #include < Kernel/Memory/Region.h>
12- #include < Kernel/Tasks/PerformanceManager.h>
9+ #include < AK/Types.h>
10+ #include < Kernel/API/POSIX/unistd.h>
1311#include < Kernel/Tasks/Process.h>
14- #include < Kernel/Tasks/Scheduler.h>
15- #include < Kernel/Tasks/ScopedProcessList.h>
1612
1713namespace Kernel {
1814
@@ -21,152 +17,6 @@ ErrorOr<FlatPtr> Process::sys$fork(RegisterState& regs)
2117 VERIFY_NO_PROCESS_BIG_LOCK (this );
2218 TRY (require_promise (Pledge::proc));
2319
24- auto credentials = this ->credentials ();
25- auto child_and_first_thread = TRY (Process::create_with_forked_name (credentials->uid (), credentials->gid (), pid (), m_is_kernel_process, vfs_root_context (), hostname_context (), current_directory (), executable (), tty (), this ));
26- auto & child = child_and_first_thread.process ;
27- auto & child_first_thread = child_and_first_thread.first_thread ;
28-
29- ArmedScopeGuard thread_finalizer_guard = [&child_first_thread]() {
30- SpinlockLocker lock (g_scheduler_lock);
31- child_first_thread->detach ();
32- child_first_thread->set_state (Thread::State::Dying);
33- };
34-
35- TRY (m_unveil_data.with ([&](auto & parent_unveil_data) -> ErrorOr<void > {
36- return child->m_unveil_data .with ([&](auto & child_unveil_data) -> ErrorOr<void > {
37- child_unveil_data.state = parent_unveil_data.state ;
38- child_unveil_data.paths = TRY (parent_unveil_data.paths .deep_copy ());
39- return {};
40- });
41- }));
42-
43- TRY (m_exec_unveil_data.with ([&](auto & parent_exec_unveil_data) -> ErrorOr<void > {
44- return child->m_exec_unveil_data .with ([&](auto & child_exec_unveil_data) -> ErrorOr<void > {
45- child_exec_unveil_data.state = parent_exec_unveil_data.state ;
46- child_exec_unveil_data.paths = TRY (parent_exec_unveil_data.paths .deep_copy ());
47- return {};
48- });
49- }));
50-
51- TRY (child->m_fds .with_exclusive ([&](auto & child_fds) {
52- return m_fds.with_exclusive ([&](auto & parent_fds) {
53- return child_fds.try_clone (parent_fds);
54- });
55- }));
56-
57- with_protected_data ([&](auto & my_protected_data) {
58- child->with_mutable_protected_data ([&](auto & child_protected_data) {
59- child_protected_data.promises = my_protected_data.promises ;
60- child_protected_data.execpromises = my_protected_data.execpromises ;
61- child_protected_data.has_promises = my_protected_data.has_promises ;
62- child_protected_data.has_execpromises = my_protected_data.has_execpromises ;
63- child_protected_data.credentials = my_protected_data.credentials ;
64- child_protected_data.umask = my_protected_data.umask ;
65- child_protected_data.signal_trampoline = my_protected_data.signal_trampoline ;
66- child_protected_data.dumpable = my_protected_data.dumpable ;
67- child_protected_data.process_group = my_protected_data.process_group ;
68- // NOTE: Propagate jailed_until_exit property to child processes.
69- // The jailed_until_exec property is also propagated, but will be
70- // set to false once the child process is calling the execve syscall.
71- if (my_protected_data.jailed_until_exit .was_set ())
72- child_protected_data.jailed_until_exit .set ();
73- child_protected_data.jailed_until_exec = my_protected_data.jailed_until_exec ;
74- });
75- });
76-
77- dbgln_if (FORK_DEBUG, " fork: child={}" , child);
78-
79- // A child created via fork(2) inherits a copy of its parent's signal mask
80- child_first_thread->update_signal_mask (Thread::current ()->signal_mask ());
81-
82- // A child process created via fork(2) inherits a copy of its parent's alternate signal stack settings.
83- child_first_thread->m_alternative_signal_stack = Thread::current ()->m_alternative_signal_stack ;
84-
85- auto & child_regs = child_first_thread->m_regs ;
86- #if ARCH(X86_64)
87- child_regs.rax = 0 ; // fork() returns 0 in the child :^)
88- child_regs.rbx = regs.rbx ;
89- child_regs.rcx = regs.rcx ;
90- child_regs.rdx = regs.rdx ;
91- child_regs.rbp = regs.rbp ;
92- child_regs.rsp = regs.userspace_rsp ;
93- child_regs.rsi = regs.rsi ;
94- child_regs.rdi = regs.rdi ;
95- child_regs.r8 = regs.r8 ;
96- child_regs.r9 = regs.r9 ;
97- child_regs.r10 = regs.r10 ;
98- child_regs.r11 = regs.r11 ;
99- child_regs.r12 = regs.r12 ;
100- child_regs.r13 = regs.r13 ;
101- child_regs.r14 = regs.r14 ;
102- child_regs.r15 = regs.r15 ;
103- child_regs.rflags = regs.rflags ;
104- child_regs.rip = regs.rip ;
105- child_regs.cs = regs.cs ;
106-
107- dbgln_if (FORK_DEBUG, " fork: child will begin executing at {:#04x}:{:p} with stack {:p}, kstack {:p}" ,
108- child_regs.cs , child_regs.rip , child_regs.rsp , child_regs.rsp0 );
109- #elif ARCH(AARCH64)
110- child_regs.x [0 ] = 0 ; // fork() returns 0 in the child :^)
111- for (size_t i = 1 ; i < array_size (child_regs.x ); ++i)
112- child_regs.x [i] = regs.x [i];
113- child_regs.spsr_el1 = regs.spsr_el1 ;
114- child_regs.elr_el1 = regs.elr_el1 ;
115- child_regs.sp_el0 = regs.sp_el0 ;
116- child_regs.tpidr_el0 = regs.tpidr_el0 ;
117- #elif ARCH(RISCV64)
118- for (size_t i = 0 ; i < array_size (child_regs.x ); ++i)
119- child_regs.x [i] = regs.x [i];
120- child_regs.x [9 ] = 0 ; // fork() returns 0 in the child :^)
121- child_regs.sstatus = regs.sstatus ;
122- child_regs.pc = regs.sepc ;
123- dbgln_if (FORK_DEBUG, " fork: child will begin executing at {:p} with stack {:p}, kstack {:p}" ,
124- child_regs.pc , child_regs.sp (), child_regs.kernel_sp );
125- #else
126- # error Unknown architecture
127- #endif
128-
129- Processor::store_fpu_state (child_first_thread->fpu_state ());
130-
131- TRY (address_space ().with ([&](auto & parent_space) {
132- return child->address_space ().with ([&](auto & child_space) -> ErrorOr<void > {
133- if (parent_space->enforces_syscall_regions ())
134- child_space->set_enforces_syscall_regions ();
135- for (auto & region : parent_space->region_tree ().regions ()) {
136- dbgln_if (FORK_DEBUG, " fork: cloning Region '{}' @ {}" , region.name (), region.vaddr ());
137- auto region_clone = TRY (region.try_clone ());
138- TRY (region_clone->map (child_space->page_directory (), Memory::ShouldFlushTLB::No));
139- TRY (child_space->region_tree ().place_specifically (*region_clone, region.range ()));
140- (void )region_clone.leak_ptr ();
141- }
142- return {};
143- });
144- }));
145-
146- thread_finalizer_guard.disarm ();
147-
148- m_scoped_process_list.with ([&](auto & list_ptr) {
149- if (list_ptr) {
150- child->m_scoped_process_list .with ([&](auto & child_list_ptr) {
151- child_list_ptr = list_ptr;
152- });
153- list_ptr->attach (*child);
154- }
155- });
156-
157- Process::register_new (*child);
158-
159- // NOTE: All user processes have a leaked ref on them. It's balanced by Thread::WaitBlockerSet::finalize().
160- child->ref ();
161-
162- PerformanceManager::add_process_created_event (*child);
163-
164- SpinlockLocker lock (g_scheduler_lock);
165- child_first_thread->set_affinity (Thread::current ()->affinity ());
166- child_first_thread->set_state (Thread::State::Runnable);
167-
168- auto child_pid = child->pid ().value ();
169-
170- return child_pid;
20+ return do_fork_common (regs, RFPROC | RFFDG);
17121}
17222}
0 commit comments