added base src
[xv6-db.git] / proc.c
blob92f59648b1c460d32613d79d8813b25f70cd4637
1 #include "types.h"
2 #include "defs.h"
3 #include "param.h"
4 #include "mmu.h"
5 #include "x86.h"
6 #include "proc.h"
7 #include "spinlock.h"
9 struct {
10 struct spinlock lock;
11 struct proc proc[NPROC];
12 } ptable;
14 static struct proc *initproc;
16 int nextpid = 1;
17 extern void forkret(void);
18 extern void trapret(void);
20 static void wakeup1(void *chan);
22 void
23 pinit(void)
25 initlock(&ptable.lock, "ptable");
28 // Look in the process table for an UNUSED proc.
29 // If found, change state to EMBRYO and initialize
30 // state required to run in the kernel.
31 // Otherwise return 0.
32 static struct proc*
33 allocproc(void)
35 struct proc *p;
36 char *sp;
38 acquire(&ptable.lock);
39 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++)
40 if(p->state == UNUSED)
41 goto found;
42 release(&ptable.lock);
43 return 0;
45 found:
46 p->state = EMBRYO;
47 p->pid = nextpid++;
48 release(&ptable.lock);
50 // Allocate kernel stack if possible.
51 if((p->kstack = kalloc()) == 0){
52 p->state = UNUSED;
53 return 0;
55 sp = p->kstack + KSTACKSIZE;
57 // Leave room for trap frame.
58 sp -= sizeof *p->tf;
59 p->tf = (struct trapframe*)sp;
61 // Set up new context to start executing at forkret,
62 // which returns to trapret.
63 sp -= 4;
64 *(uint*)sp = (uint)trapret;
66 sp -= sizeof *p->context;
67 p->context = (struct context*)sp;
68 memset(p->context, 0, sizeof *p->context);
69 p->context->eip = (uint)forkret;
71 return p;
74 // Set up first user process.
75 void
76 userinit(void)
78 struct proc *p;
79 extern char _binary_initcode_start[], _binary_initcode_size[];
81 p = allocproc();
82 initproc = p;
83 if((p->pgdir = setupkvm()) == 0)
84 panic("userinit: out of memory?");
85 inituvm(p->pgdir, _binary_initcode_start, (int)_binary_initcode_size);
86 p->sz = PGSIZE;
87 memset(p->tf, 0, sizeof(*p->tf));
88 p->tf->cs = (SEG_UCODE << 3) | DPL_USER;
89 p->tf->ds = (SEG_UDATA << 3) | DPL_USER;
90 p->tf->es = p->tf->ds;
91 p->tf->ss = p->tf->ds;
92 p->tf->eflags = FL_IF;
93 p->tf->esp = PGSIZE;
94 p->tf->eip = 0; // beginning of initcode.S
96 safestrcpy(p->name, "initcode", sizeof(p->name));
97 p->cwd = namei("/");
99 p->state = RUNNABLE;
102 // Grow current process's memory by n bytes.
103 // Return 0 on success, -1 on failure.
105 growproc(int n)
107 uint sz;
109 sz = proc->sz;
110 if(n > 0){
111 if((sz = allocuvm(proc->pgdir, sz, sz + n)) == 0)
112 return -1;
113 } else if(n < 0){
114 if((sz = deallocuvm(proc->pgdir, sz, sz + n)) == 0)
115 return -1;
117 proc->sz = sz;
118 switchuvm(proc);
119 return 0;
122 // Create a new process copying p as the parent.
123 // Sets up stack to return as if from system call.
124 // Caller must set state of returned proc to RUNNABLE.
126 fork(void)
128 int i, pid;
129 struct proc *np;
131 // Allocate process.
132 if((np = allocproc()) == 0)
133 return -1;
135 // Copy process state from p.
136 if((np->pgdir = copyuvm(proc->pgdir, proc->sz)) == 0){
137 kfree(np->kstack);
138 np->kstack = 0;
139 np->state = UNUSED;
140 return -1;
142 np->sz = proc->sz;
143 np->parent = proc;
144 *np->tf = *proc->tf;
146 // Clear %eax so that fork returns 0 in the child.
147 np->tf->eax = 0;
149 for(i = 0; i < NOFILE; i++)
150 if(proc->ofile[i])
151 np->ofile[i] = filedup(proc->ofile[i]);
152 np->cwd = idup(proc->cwd);
154 pid = np->pid;
155 np->state = RUNNABLE;
156 safestrcpy(np->name, proc->name, sizeof(proc->name));
157 return pid;
160 // Exit the current process. Does not return.
161 // An exited process remains in the zombie state
162 // until its parent calls wait() to find out it exited.
163 void
164 exit(void)
166 struct proc *p;
167 int fd;
169 if(proc == initproc)
170 panic("init exiting");
172 // Close all open files.
173 for(fd = 0; fd < NOFILE; fd++){
174 if(proc->ofile[fd]){
175 fileclose(proc->ofile[fd]);
176 proc->ofile[fd] = 0;
180 iput(proc->cwd);
181 proc->cwd = 0;
183 acquire(&ptable.lock);
185 // Parent might be sleeping in wait().
186 wakeup1(proc->parent);
188 // Pass abandoned children to init.
189 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
190 if(p->parent == proc){
191 p->parent = initproc;
192 if(p->state == ZOMBIE)
193 wakeup1(initproc);
197 // Jump into the scheduler, never to return.
198 proc->state = ZOMBIE;
199 sched();
200 panic("zombie exit");
203 // Wait for a child process to exit and return its pid.
204 // Return -1 if this process has no children.
206 wait(void)
208 struct proc *p;
209 int havekids, pid;
211 acquire(&ptable.lock);
212 for(;;){
213 // Scan through table looking for zombie children.
214 havekids = 0;
215 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
216 if(p->parent != proc)
217 continue;
218 havekids = 1;
219 if(p->state == ZOMBIE){
220 // Found one.
221 pid = p->pid;
222 kfree(p->kstack);
223 p->kstack = 0;
224 freevm(p->pgdir);
225 p->state = UNUSED;
226 p->pid = 0;
227 p->parent = 0;
228 p->name[0] = 0;
229 p->killed = 0;
230 release(&ptable.lock);
231 return pid;
235 // No point waiting if we don't have any children.
236 if(!havekids || proc->killed){
237 release(&ptable.lock);
238 return -1;
241 // Wait for children to exit. (See wakeup1 call in proc_exit.)
242 sleep(proc, &ptable.lock); //DOC: wait-sleep
246 // Per-CPU process scheduler.
247 // Each CPU calls scheduler() after setting itself up.
248 // Scheduler never returns. It loops, doing:
249 // - choose a process to run
250 // - swtch to start running that process
251 // - eventually that process transfers control
252 // via swtch back to the scheduler.
253 void
254 scheduler(void)
256 struct proc *p;
258 for(;;){
259 // Enable interrupts on this processor.
260 sti();
262 // Loop over process table looking for process to run.
263 acquire(&ptable.lock);
264 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
265 if(p->state != RUNNABLE)
266 continue;
268 // Switch to chosen process. It is the process's job
269 // to release ptable.lock and then reacquire it
270 // before jumping back to us.
271 proc = p;
272 switchuvm(p);
273 p->state = RUNNING;
274 swtch(&cpu->scheduler, proc->context);
275 switchkvm();
277 // Process is done running for now.
278 // It should have changed its p->state before coming back.
279 proc = 0;
281 release(&ptable.lock);
286 // Enter scheduler. Must hold only ptable.lock
287 // and have changed proc->state.
288 void
289 sched(void)
291 int intena;
293 if(!holding(&ptable.lock))
294 panic("sched ptable.lock");
295 if(cpu->ncli != 1)
296 panic("sched locks");
297 if(proc->state == RUNNING)
298 panic("sched running");
299 if(readeflags()&FL_IF)
300 panic("sched interruptible");
301 intena = cpu->intena;
302 swtch(&proc->context, cpu->scheduler);
303 cpu->intena = intena;
306 // Give up the CPU for one scheduling round.
307 void
308 yield(void)
310 acquire(&ptable.lock); //DOC: yieldlock
311 proc->state = RUNNABLE;
312 sched();
313 release(&ptable.lock);
316 // A fork child's very first scheduling by scheduler()
317 // will swtch here. "Return" to user space.
318 void
319 forkret(void)
321 // Still holding ptable.lock from scheduler.
322 release(&ptable.lock);
324 // Return to "caller", actually trapret (see allocproc).
327 // Atomically release lock and sleep on chan.
328 // Reacquires lock when awakened.
329 void
330 sleep(void *chan, struct spinlock *lk)
332 if(proc == 0)
333 panic("sleep");
335 if(lk == 0)
336 panic("sleep without lk");
338 // Must acquire ptable.lock in order to
339 // change p->state and then call sched.
340 // Once we hold ptable.lock, we can be
341 // guaranteed that we won't miss any wakeup
342 // (wakeup runs with ptable.lock locked),
343 // so it's okay to release lk.
344 if(lk != &ptable.lock){ //DOC: sleeplock0
345 acquire(&ptable.lock); //DOC: sleeplock1
346 release(lk);
349 // Go to sleep.
350 proc->chan = chan;
351 proc->state = SLEEPING;
352 sched();
354 // Tidy up.
355 proc->chan = 0;
357 // Reacquire original lock.
358 if(lk != &ptable.lock){ //DOC: sleeplock2
359 release(&ptable.lock);
360 acquire(lk);
364 // Wake up all processes sleeping on chan.
365 // The ptable lock must be held.
366 static void
367 wakeup1(void *chan)
369 struct proc *p;
371 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++)
372 if(p->state == SLEEPING && p->chan == chan)
373 p->state = RUNNABLE;
376 // Wake up all processes sleeping on chan.
377 void
378 wakeup(void *chan)
380 acquire(&ptable.lock);
381 wakeup1(chan);
382 release(&ptable.lock);
385 // Kill the process with the given pid.
386 // Process won't exit until it returns
387 // to user space (see trap in trap.c).
389 kill(int pid)
391 struct proc *p;
393 acquire(&ptable.lock);
394 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
395 if(p->pid == pid){
396 p->killed = 1;
397 // Wake process from sleep if necessary.
398 if(p->state == SLEEPING)
399 p->state = RUNNABLE;
400 release(&ptable.lock);
401 return 0;
404 release(&ptable.lock);
405 return -1;
408 // Print a process listing to console. For debugging.
409 // Runs when user types ^P on console.
410 // No lock to avoid wedging a stuck machine further.
411 void
412 procdump(void)
414 static char *states[] = {
415 [UNUSED] "unused",
416 [EMBRYO] "embryo",
417 [SLEEPING] "sleep ",
418 [RUNNABLE] "runble",
419 [RUNNING] "run ",
420 [ZOMBIE] "zombie"
422 int i;
423 struct proc *p;
424 char *state;
425 uint pc[10];
427 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
428 if(p->state == UNUSED)
429 continue;
430 if(p->state >= 0 && p->state < NELEM(states) && states[p->state])
431 state = states[p->state];
432 else
433 state = "???";
434 cprintf("%d %s %s", p->pid, state, p->name);
435 if(p->state == SLEEPING){
436 getcallerpcs((uint*)p->context->ebp+2, pc);
437 for(i=0; i<10 && pc[i] != 0; i++)
438 cprintf(" %p", pc[i]);
440 cprintf("\n");