xv6

port of xv6 to x86-64
git clone http://frotz.net/git/xv6.git
Log | Files | Refs | README | LICENSE

proc.c (10154B)


      1 #include "types.h"
      2 #include "defs.h"
      3 #include "param.h"
      4 #include "memlayout.h"
      5 #include "mmu.h"
      6 #include "x86.h"
      7 #include "proc.h"
      8 #include "spinlock.h"
      9 
     10 struct {
     11   struct spinlock lock;
     12   struct proc proc[NPROC];
     13 } ptable;
     14 
     15 static struct proc *initproc;
     16 
     17 int nextpid = 1;
     18 extern void forkret(void);
     19 extern void trapret(void);
     20 
     21 static void wakeup1(void *chan);
     22 
     23 void
     24 pinit(void)
     25 {
     26   initlock(&ptable.lock, "ptable");
     27 }
     28 
     29 //PAGEBREAK: 32
     30 // Look in the process table for an UNUSED proc.
     31 // If found, change state to EMBRYO and initialize
     32 // state required to run in the kernel.
     33 // Otherwise return 0.
     34 static struct proc*
     35 allocproc(void)
     36 {
     37   struct proc *p;
     38   char *sp;
     39 
     40   acquire(&ptable.lock);
     41   for(p = ptable.proc; p < &ptable.proc[NPROC]; p++)
     42     if(p->state == UNUSED)
     43       goto found;
     44   release(&ptable.lock);
     45   return 0;
     46 
     47 found:
     48   p->state = EMBRYO;
     49   p->pid = nextpid++;
     50   release(&ptable.lock);
     51 
     52   // Allocate kernel stack.
     53   if((p->kstack = kalloc()) == 0){
     54     p->state = UNUSED;
     55     return 0;
     56   }
     57   sp = p->kstack + KSTACKSIZE;
     58   
     59   // Leave room for trap frame.
     60   sp -= sizeof *p->tf;
     61   p->tf = (struct trapframe*)sp;
     62   
     63   // Set up new context to start executing at forkret,
     64   // which returns to trapret.
     65   sp -= sizeof(uintp);
     66   *(uintp*)sp = (uintp)trapret;
     67 
     68   sp -= sizeof *p->context;
     69   p->context = (struct context*)sp;
     70   memset(p->context, 0, sizeof *p->context);
     71   p->context->eip = (uintp)forkret;
     72 
     73   return p;
     74 }
     75 
     76 //PAGEBREAK: 32
     77 // Set up first user process.
     78 void
     79 userinit(void)
     80 {
     81   struct proc *p;
     82   extern char _binary_out_initcode_start[], _binary_out_initcode_size[];
     83   
     84   p = allocproc();
     85   initproc = p;
     86   if((p->pgdir = setupkvm()) == 0)
     87     panic("userinit: out of memory?");
     88   inituvm(p->pgdir, _binary_out_initcode_start, (uintp)_binary_out_initcode_size);
     89   p->sz = PGSIZE;
     90   memset(p->tf, 0, sizeof(*p->tf));
     91   p->tf->cs = (SEG_UCODE << 3) | DPL_USER;
     92   p->tf->ds = (SEG_UDATA << 3) | DPL_USER;
     93 #ifndef X64
     94   p->tf->es = p->tf->ds;
     95   p->tf->ss = p->tf->ds;
     96 #endif
     97   p->tf->eflags = FL_IF;
     98   p->tf->esp = PGSIZE;
     99   p->tf->eip = 0;  // beginning of initcode.S
    100 
    101   safestrcpy(p->name, "initcode", sizeof(p->name));
    102   p->cwd = namei("/");
    103 
    104   p->state = RUNNABLE;
    105 }
    106 
    107 // Grow current process's memory by n bytes.
    108 // Return 0 on success, -1 on failure.
    109 int
    110 growproc(int n)
    111 {
    112   uint sz;
    113   
    114   sz = proc->sz;
    115   if(n > 0){
    116     if((sz = allocuvm(proc->pgdir, sz, sz + n)) == 0)
    117       return -1;
    118   } else if(n < 0){
    119     if((sz = deallocuvm(proc->pgdir, sz, sz + n)) == 0)
    120       return -1;
    121   }
    122   proc->sz = sz;
    123   switchuvm(proc);
    124   return 0;
    125 }
    126 
    127 // Create a new process copying p as the parent.
    128 // Sets up stack to return as if from system call.
    129 // Caller must set state of returned proc to RUNNABLE.
    130 int
    131 fork(void)
    132 {
    133   int i, pid;
    134   struct proc *np;
    135 
    136   // Allocate process.
    137   if((np = allocproc()) == 0)
    138     return -1;
    139 
    140   // Copy process state from p.
    141   if((np->pgdir = copyuvm(proc->pgdir, proc->sz)) == 0){
    142     kfree(np->kstack);
    143     np->kstack = 0;
    144     np->state = UNUSED;
    145     return -1;
    146   }
    147   np->sz = proc->sz;
    148   np->parent = proc;
    149   *np->tf = *proc->tf;
    150 
    151   // Clear %eax so that fork returns 0 in the child.
    152   np->tf->eax = 0;
    153 
    154   for(i = 0; i < NOFILE; i++)
    155     if(proc->ofile[i])
    156       np->ofile[i] = filedup(proc->ofile[i]);
    157   np->cwd = idup(proc->cwd);
    158  
    159   pid = np->pid;
    160   np->state = RUNNABLE;
    161   safestrcpy(np->name, proc->name, sizeof(proc->name));
    162   return pid;
    163 }
    164 
    165 // Exit the current process.  Does not return.
    166 // An exited process remains in the zombie state
    167 // until its parent calls wait() to find out it exited.
    168 void
    169 exit(void)
    170 {
    171   struct proc *p;
    172   int fd;
    173 
    174   if(proc == initproc)
    175     panic("init exiting");
    176 
    177   // Close all open files.
    178   for(fd = 0; fd < NOFILE; fd++){
    179     if(proc->ofile[fd]){
    180       fileclose(proc->ofile[fd]);
    181       proc->ofile[fd] = 0;
    182     }
    183   }
    184 
    185   iput(proc->cwd);
    186   proc->cwd = 0;
    187 
    188   acquire(&ptable.lock);
    189 
    190   // Parent might be sleeping in wait().
    191   wakeup1(proc->parent);
    192 
    193   // Pass abandoned children to init.
    194   for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
    195     if(p->parent == proc){
    196       p->parent = initproc;
    197       if(p->state == ZOMBIE)
    198         wakeup1(initproc);
    199     }
    200   }
    201 
    202   // Jump into the scheduler, never to return.
    203   proc->state = ZOMBIE;
    204   sched();
    205   panic("zombie exit");
    206 }
    207 
    208 // Wait for a child process to exit and return its pid.
    209 // Return -1 if this process has no children.
    210 int
    211 wait(void)
    212 {
    213   struct proc *p;
    214   int havekids, pid;
    215 
    216   acquire(&ptable.lock);
    217   for(;;){
    218     // Scan through table looking for zombie children.
    219     havekids = 0;
    220     for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
    221       if(p->parent != proc)
    222         continue;
    223       havekids = 1;
    224       if(p->state == ZOMBIE){
    225         // Found one.
    226         pid = p->pid;
    227         kfree(p->kstack);
    228         p->kstack = 0;
    229         freevm(p->pgdir);
    230         p->state = UNUSED;
    231         p->pid = 0;
    232         p->parent = 0;
    233         p->name[0] = 0;
    234         p->killed = 0;
    235         release(&ptable.lock);
    236         return pid;
    237       }
    238     }
    239 
    240     // No point waiting if we don't have any children.
    241     if(!havekids || proc->killed){
    242       release(&ptable.lock);
    243       return -1;
    244     }
    245 
    246     // Wait for children to exit.  (See wakeup1 call in proc_exit.)
    247     sleep(proc, &ptable.lock);  //DOC: wait-sleep
    248   }
    249 }
    250 
    251 //PAGEBREAK: 42
    252 // Per-CPU process scheduler.
    253 // Each CPU calls scheduler() after setting itself up.
    254 // Scheduler never returns.  It loops, doing:
    255 //  - choose a process to run
    256 //  - swtch to start running that process
    257 //  - eventually that process transfers control
    258 //      via swtch back to the scheduler.
    259 void
    260 scheduler(void)
    261 {
    262   struct proc *p = 0;
    263 
    264   for(;;){
    265     // Enable interrupts on this processor.
    266     sti();
    267 
    268     // no runnable processes? (did we hit the end of the table last time?)
    269     // if so, wait for irq before trying again.
    270     if (p == &ptable.proc[NPROC])
    271       hlt();
    272 
    273     // Loop over process table looking for process to run.
    274     acquire(&ptable.lock);
    275     for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
    276       if(p->state != RUNNABLE)
    277         continue;
    278 
    279       // Switch to chosen process.  It is the process's job
    280       // to release ptable.lock and then reacquire it
    281       // before jumping back to us.
    282       proc = p;
    283       switchuvm(p);
    284       p->state = RUNNING;
    285       swtch(&cpu->scheduler, proc->context);
    286       switchkvm();
    287 
    288       // Process is done running for now.
    289       // It should have changed its p->state before coming back.
    290       proc = 0;
    291     }
    292     release(&ptable.lock);
    293 
    294   }
    295 }
    296 
    297 // Enter scheduler.  Must hold only ptable.lock
    298 // and have changed proc->state.
    299 void
    300 sched(void)
    301 {
    302   int intena;
    303 
    304   if(!holding(&ptable.lock))
    305     panic("sched ptable.lock");
    306   if(cpu->ncli != 1)
    307     panic("sched locks");
    308   if(proc->state == RUNNING)
    309     panic("sched running");
    310   if(readeflags()&FL_IF)
    311     panic("sched interruptible");
    312   intena = cpu->intena;
    313   swtch(&proc->context, cpu->scheduler);
    314   cpu->intena = intena;
    315 }
    316 
    317 // Give up the CPU for one scheduling round.
    318 void
    319 yield(void)
    320 {
    321   acquire(&ptable.lock);  //DOC: yieldlock
    322   proc->state = RUNNABLE;
    323   sched();
    324   release(&ptable.lock);
    325 }
    326 
    327 // A fork child's very first scheduling by scheduler()
    328 // will swtch here.  "Return" to user space.
    329 void
    330 forkret(void)
    331 {
    332   static int first = 1;
    333   // Still holding ptable.lock from scheduler.
    334   release(&ptable.lock);
    335 
    336   if (first) {
    337     // Some initialization functions must be run in the context
    338     // of a regular process (e.g., they call sleep), and thus cannot 
    339     // be run from main().
    340     first = 0;
    341     initlog();
    342   }
    343   
    344   // Return to "caller", actually trapret (see allocproc).
    345 }
    346 
    347 // Atomically release lock and sleep on chan.
    348 // Reacquires lock when awakened.
    349 void
    350 sleep(void *chan, struct spinlock *lk)
    351 {
    352   if(proc == 0)
    353     panic("sleep");
    354 
    355   if(lk == 0)
    356     panic("sleep without lk");
    357 
    358   // Must acquire ptable.lock in order to
    359   // change p->state and then call sched.
    360   // Once we hold ptable.lock, we can be
    361   // guaranteed that we won't miss any wakeup
    362   // (wakeup runs with ptable.lock locked),
    363   // so it's okay to release lk.
    364   if(lk != &ptable.lock){  //DOC: sleeplock0
    365     acquire(&ptable.lock);  //DOC: sleeplock1
    366     release(lk);
    367   }
    368 
    369   // Go to sleep.
    370   proc->chan = chan;
    371   proc->state = SLEEPING;
    372   sched();
    373 
    374   // Tidy up.
    375   proc->chan = 0;
    376 
    377   // Reacquire original lock.
    378   if(lk != &ptable.lock){  //DOC: sleeplock2
    379     release(&ptable.lock);
    380     acquire(lk);
    381   }
    382 }
    383 
    384 //PAGEBREAK!
    385 // Wake up all processes sleeping on chan.
    386 // The ptable lock must be held.
    387 static void
    388 wakeup1(void *chan)
    389 {
    390   struct proc *p;
    391 
    392   for(p = ptable.proc; p < &ptable.proc[NPROC]; p++)
    393     if(p->state == SLEEPING && p->chan == chan)
    394       p->state = RUNNABLE;
    395 }
    396 
    397 // Wake up all processes sleeping on chan.
    398 void
    399 wakeup(void *chan)
    400 {
    401   acquire(&ptable.lock);
    402   wakeup1(chan);
    403   release(&ptable.lock);
    404 }
    405 
    406 // Kill the process with the given pid.
    407 // Process won't exit until it returns
    408 // to user space (see trap in trap.c).
    409 int
    410 kill(int pid)
    411 {
    412   struct proc *p;
    413 
    414   acquire(&ptable.lock);
    415   for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
    416     if(p->pid == pid){
    417       p->killed = 1;
    418       // Wake process from sleep if necessary.
    419       if(p->state == SLEEPING)
    420         p->state = RUNNABLE;
    421       release(&ptable.lock);
    422       return 0;
    423     }
    424   }
    425   release(&ptable.lock);
    426   return -1;
    427 }
    428 
    429 //PAGEBREAK: 36
    430 // Print a process listing to console.  For debugging.
    431 // Runs when user types ^P on console.
    432 // No lock to avoid wedging a stuck machine further.
    433 void
    434 procdump(void)
    435 {
    436   static char *states[] = {
    437   [UNUSED]    "unused",
    438   [EMBRYO]    "embryo",
    439   [SLEEPING]  "sleep ",
    440   [RUNNABLE]  "runble",
    441   [RUNNING]   "run   ",
    442   [ZOMBIE]    "zombie"
    443   };
    444   int i;
    445   struct proc *p;
    446   char *state;
    447   uintp pc[10];
    448   
    449   for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
    450     if(p->state == UNUSED)
    451       continue;
    452     if(p->state >= 0 && p->state < NELEM(states) && states[p->state])
    453       state = states[p->state];
    454     else
    455       state = "???";
    456     cprintf("%d %s %s", p->pid, state, p->name);
    457     if(p->state == SLEEPING){
    458       getstackpcs((uintp*)p->context->ebp, pc);
    459       for(i=0; i<10 && pc[i] != 0; i++)
    460         cprintf(" %p", pc[i]);
    461     }
    462     cprintf("\n");
    463   }
    464 }
    465 
    466