|
| 1 | +#include "proc.h" |
| 2 | +#include "defs.h" |
| 3 | +#include "loader.h" |
| 4 | +#include "trap.h" |
| 5 | + |
| 6 | +struct proc pool[NPROC]; |
| 7 | +char kstack[NPROC][PAGE_SIZE]; |
| 8 | +__attribute__((aligned(4096))) char ustack[NPROC][PAGE_SIZE]; |
| 9 | +__attribute__((aligned(4096))) char trapframe[NPROC][PAGE_SIZE]; |
| 10 | + |
| 11 | +extern char boot_stack_top[]; |
| 12 | +struct proc *current_proc; |
| 13 | +struct proc idle; |
| 14 | + |
| 15 | +int threadid() |
| 16 | +{ |
| 17 | + return curr_proc()->pid; |
| 18 | +} |
| 19 | + |
| 20 | +struct proc *curr_proc() |
| 21 | +{ |
| 22 | + return current_proc; |
| 23 | +} |
| 24 | + |
| 25 | +// initialize the proc table at boot time. |
| 26 | +void proc_init(void) |
| 27 | +{ |
| 28 | + struct proc *p; |
| 29 | + for (p = pool; p < &pool[NPROC]; p++) { |
| 30 | + p->state = UNUSED; |
| 31 | + p->kstack = (uint64)kstack[p - pool]; |
| 32 | + p->ustack = (uint64)ustack[p - pool]; |
| 33 | + p->trapframe = (struct trapframe *)trapframe[p - pool]; |
| 34 | + /* |
| 35 | + * LAB1: you may need to initialize your new fields of proc here |
| 36 | + */ |
| 37 | + } |
| 38 | + idle.kstack = (uint64)boot_stack_top; |
| 39 | + idle.pid = 0; |
| 40 | + current_proc = &idle; |
| 41 | +} |
| 42 | + |
| 43 | +int allocpid() |
| 44 | +{ |
| 45 | + static int PID = 1; |
| 46 | + return PID++; |
| 47 | +} |
| 48 | + |
| 49 | +// Look in the process table for an UNUSED proc. |
| 50 | +// If found, initialize state required to run in the kernel. |
| 51 | +// If there are no free procs, or a memory allocation fails, return 0. |
| 52 | +struct proc *allocproc(void) |
| 53 | +{ |
| 54 | + struct proc *p; |
| 55 | + for (p = pool; p < &pool[NPROC]; p++) { |
| 56 | + if (p->state == UNUSED) { |
| 57 | + goto found; |
| 58 | + } |
| 59 | + } |
| 60 | + return 0; |
| 61 | + |
| 62 | +found: |
| 63 | + p->pid = allocpid(); |
| 64 | + p->state = USED; |
| 65 | + memset(&p->context, 0, sizeof(p->context)); |
| 66 | + memset(p->trapframe, 0, PAGE_SIZE); |
| 67 | + memset((void *)p->kstack, 0, PAGE_SIZE); |
| 68 | + p->context.ra = (uint64)usertrapret; |
| 69 | + p->context.sp = p->kstack + PAGE_SIZE; |
| 70 | + return p; |
| 71 | +} |
| 72 | + |
| 73 | +// Scheduler never returns. It loops, doing: |
| 74 | +// - choose a process to run. |
| 75 | +// - swtch to start running that process. |
| 76 | +// - eventually that process transfers control |
| 77 | +// via swtch back to the scheduler. |
| 78 | +void scheduler(void) |
| 79 | +{ |
| 80 | + struct proc *p; |
| 81 | + for (;;) { |
| 82 | + for (p = pool; p < &pool[NPROC]; p++) { |
| 83 | + if (p->state == RUNNABLE) { |
| 84 | + /* |
| 85 | + * LAB1: you may need to init proc start time here |
| 86 | + */ |
| 87 | + p->state = RUNNING; |
| 88 | + current_proc = p; |
| 89 | + swtch(&idle.context, &p->context); |
| 90 | + } |
| 91 | + } |
| 92 | + } |
| 93 | +} |
| 94 | + |
| 95 | +// Switch to scheduler. Must hold only p->lock |
| 96 | +// and have changed proc->state. Saves and restores |
| 97 | +// intena because intena is a property of this |
| 98 | +// kernel thread, not this CPU. It should |
| 99 | +// be proc->intena and proc->noff, but that would |
| 100 | +// break in the few places where a lock is held but |
| 101 | +// there's no process. |
| 102 | +void sched(void) |
| 103 | +{ |
| 104 | + struct proc *p = curr_proc(); |
| 105 | + if (p->state == RUNNING) |
| 106 | + panic("sched running"); |
| 107 | + swtch(&p->context, &idle.context); |
| 108 | +} |
| 109 | + |
| 110 | +// Give up the CPU for one scheduling round. |
| 111 | +void yield(void) |
| 112 | +{ |
| 113 | + current_proc->state = RUNNABLE; |
| 114 | + sched(); |
| 115 | +} |
| 116 | + |
| 117 | +// Exit the current process. |
| 118 | +void exit(int code) |
| 119 | +{ |
| 120 | + struct proc *p = curr_proc(); |
| 121 | + infof("proc %d exit with %d", p->pid, code); |
| 122 | + p->state = UNUSED; |
| 123 | + finished(); |
| 124 | + sched(); |
| 125 | +} |
0 commit comments