Import 2.3.18pre1
[davej-history.git] / include / asm-i386 / locks.h
blob1cc171f9b600e758640d0af097c7c3a20419588c
1 /*
2 * SMP locks primitives for building ix86 locks
3 * (not yet used).
5 * Alan Cox, alan@redhat.com, 1995
6 */
8 /*
9 * This would be much easier but far less clear and easy
10 * to borrow for other processors if it was just assembler.
13 extern __inline__ void prim_spin_lock(struct spinlock *sp)
15 int processor=smp_processor_id();
18 * Grab the lock bit
21 while(lock_set_bit(0,&sp->lock))
24 * Failed, but that's cos we own it!
27 if(sp->cpu==processor)
29 sp->users++;
30 return 0;
33 * Spin in the cache S state if possible
35 while(sp->lock)
38 * Wait for any invalidates to go off
41 if(smp_invalidate_needed&(1<<processor))
42 while(lock_clear_bit(processor,&smp_invalidate_needed))
43 local_flush_tlb();
44 sp->spins++;
47 * Someone wrote the line, we go 'I' and get
48 * the cache entry. Now try to regrab
51 sp->users++;sp->cpu=processor;
52 return 1;
56 * Release a spin lock
59 extern __inline__ int prim_spin_unlock(struct spinlock *sp)
61 /* This is safe. The decrement is still guarded by the lock. A multilock would
62 not be safe this way */
63 if(!--sp->users)
65 sp->cpu= NO_PROC_ID;lock_clear_bit(0,&sp->lock);
66 return 1;
68 return 0;
73 * Non blocking lock grab
76 extern __inline__ int prim_spin_lock_nb(struct spinlock *sp)
78 if(lock_set_bit(0,&sp->lock))
79 return 0; /* Locked already */
80 sp->users++;
81 return 1; /* We got the lock */
86 * These wrap the locking primitives up for usage
89 extern __inline__ void spinlock(struct spinlock *sp)
91 if(sp->priority<current->lock_order)
92 panic("lock order violation: %s (%d)\n", sp->name, current->lock_order);
93 if(prim_spin_lock(sp))
96 * We got a new lock. Update the priority chain
98 sp->oldpri=current->lock_order;
99 current->lock_order=sp->priority;
103 extern __inline__ void spinunlock(struct spinlock *sp)
105 int pri;
106 if(current->lock_order!=sp->priority)
107 panic("lock release order violation %s (%d)\n", sp->name, current->lock_order);
108 pri=sp->oldpri;
109 if(prim_spin_unlock(sp))
112 * Update the debugging lock priority chain. We dumped
113 * our last right to the lock.
115 current->lock_order=sp->pri;
119 extern __inline__ void spintestlock(struct spinlock *sp)
122 * We do no sanity checks, it's legal to optimistically
123 * get a lower lock.
125 prim_spin_lock_nb(sp);
128 extern __inline__ void spintestunlock(struct spinlock *sp)
131 * A testlock doesn't update the lock chain so we
132 * must not update it on free
134 prim_spin_unlock(sp);