xv6

port of xv6 to x86-64
git clone http://frotz.net/git/xv6.git
Log | Files | Refs | README | LICENSE

spinlock.c (2859B)


      1 // Mutual exclusion spin locks.
      2 
      3 #include "types.h"
      4 #include "defs.h"
      5 #include "param.h"
      6 #include "x86.h"
      7 #include "memlayout.h"
      8 #include "mmu.h"
      9 #include "proc.h"
     10 #include "spinlock.h"
     11 
     12 void
     13 initlock(struct spinlock *lk, char *name)
     14 {
     15   lk->name = name;
     16   lk->locked = 0;
     17   lk->cpu = 0;
     18 }
     19 
     20 // Acquire the lock.
     21 // Loops (spins) until the lock is acquired.
     22 // Holding a lock for a long time may cause
     23 // other CPUs to waste time spinning to acquire it.
     24 void
     25 acquire(struct spinlock *lk)
     26 {
     27   pushcli(); // disable interrupts to avoid deadlock.
     28   if(holding(lk)) {
     29     int i;
     30     cprintf("lock '%s':\n", lk->name);
     31     for (i = 0; i < 10; i++)
     32       cprintf(" %p", lk->pcs[i]);
     33     cprintf("\n");
     34     panic("acquire");
     35   }
     36 
     37   // The xchg is atomic.
     38   // It also serializes, so that reads after acquire are not
     39   // reordered before it. 
     40   while(xchg(&lk->locked, 1) != 0)
     41     ;
     42 
     43   // Record info about lock acquisition for debugging.
     44   lk->cpu = cpu;
     45   getcallerpcs(&lk, lk->pcs);
     46 }
     47 
     48 // Release the lock.
     49 void
     50 release(struct spinlock *lk)
     51 {
     52   if(!holding(lk))
     53     panic("release");
     54 
     55   lk->pcs[0] = 0;
     56   lk->cpu = 0;
     57 
     58   // The xchg serializes, so that reads before release are 
     59   // not reordered after it.  The 1996 PentiumPro manual (Volume 3,
     60   // 7.2) says reads can be carried out speculatively and in
     61   // any order, which implies we need to serialize here.
     62   // But the 2007 Intel 64 Architecture Memory Ordering White
     63   // Paper says that Intel 64 and IA-32 will not move a load
     64   // after a store. So lock->locked = 0 would work here.
     65   // The xchg being asm volatile ensures gcc emits it after
     66   // the above assignments (and after the critical section).
     67   xchg(&lk->locked, 0);
     68 
     69   popcli();
     70 }
     71 
     72 // Record the current call stack in pcs[] by following the %ebp chain.
     73 void
     74 getcallerpcs(void *v, uintp pcs[])
     75 {
     76   uintp *ebp;
     77 #if X64
     78   asm volatile("mov %%rbp, %0" : "=r" (ebp));  
     79 #else
     80   ebp = (uintp*)v - 2;
     81 #endif
     82   getstackpcs(ebp, pcs);
     83 }
     84 
     85 void
     86 getstackpcs(uintp *ebp, uintp pcs[])
     87 {
     88   int i;
     89   
     90   for(i = 0; i < 10; i++){
     91     if(ebp == 0 || ebp < (uintp*)KERNBASE || ebp == (uintp*)0xffffffff)
     92       break;
     93     pcs[i] = ebp[1];     // saved %eip
     94     ebp = (uintp*)ebp[0]; // saved %ebp
     95   }
     96   for(; i < 10; i++)
     97     pcs[i] = 0;
     98 }
     99 
    100 // Check whether this cpu is holding the lock.
    101 int
    102 holding(struct spinlock *lock)
    103 {
    104   return lock->locked && lock->cpu == cpu;
    105 }
    106 
    107 
    108 // Pushcli/popcli are like cli/sti except that they are matched:
    109 // it takes two popcli to undo two pushcli.  Also, if interrupts
    110 // are off, then pushcli, popcli leaves them off.
    111 
    112 void
    113 pushcli(void)
    114 {
    115   int eflags;
    116   
    117   eflags = readeflags();
    118   cli();
    119   if(cpu->ncli++ == 0)
    120     cpu->intena = eflags & FL_IF;
    121 }
    122 
    123 void
    124 popcli(void)
    125 {
    126   if(readeflags()&FL_IF)
    127     panic("popcli - interruptible");
    128   if(--cpu->ncli < 0)
    129     panic("popcli");
    130   if(cpu->ncli == 0 && cpu->intena)
    131     sti();
    132 }
    133