2 * SMP locks primitives for building ix86 locks
5 * Alan Cox, alan@redhat.com, 1995
9 * This would be much easier but far less clear and easy
10 * to borrow for other processors if it was just assembler.
13 extern __inline__
void prim_spin_lock(struct spinlock
*sp
)
15 int processor
=smp_processor_id();
21 while(lock_set_bit(0,&sp
->lock
))
24 * Failed, but that's cos we own it!
27 if(sp
->cpu
==processor
)
33 * Spin in the cache S state if possible
38 * Wait for any invalidates to go off
41 if(smp_invalidate_needed
&(1<<processor
))
42 while(lock_clear_bit(processor
,&smp_invalidate_needed
))
47 * Someone wrote the line, we go 'I' and get
48 * the cache entry. Now try to regrab
51 sp
->users
++;sp
->cpu
=processor
;
59 extern __inline__
int prim_spin_unlock(struct spinlock
*sp
)
61 /* This is safe. The decrement is still guarded by the lock. A multilock would
62 not be safe this way */
65 sp
->cpu
= NO_PROC_ID
;lock_clear_bit(0,&sp
->lock
);
73 * Non blocking lock grab
76 extern __inline__
int prim_spin_lock_nb(struct spinlock
*sp
)
78 if(lock_set_bit(0,&sp
->lock
))
79 return 0; /* Locked already */
81 return 1; /* We got the lock */
86 * These wrap the locking primitives up for usage
89 extern __inline__
void spinlock(struct spinlock
*sp
)
91 if(sp
->priority
<current
->lock_order
)
92 panic("lock order violation: %s (%d)\n", sp
->name
, current
->lock_order
);
93 if(prim_spin_lock(sp
))
96 * We got a new lock. Update the priority chain
98 sp
->oldpri
=current
->lock_order
;
99 current
->lock_order
=sp
->priority
;
103 extern __inline__
void spinunlock(struct spinlock
*sp
)
106 if(current
->lock_order
!=sp
->priority
)
107 panic("lock release order violation %s (%d)\n", sp
->name
, current
->lock_order
);
109 if(prim_spin_unlock(sp
))
112 * Update the debugging lock priority chain. We dumped
113 * our last right to the lock.
115 current
->lock_order
=sp
->pri
;
119 extern __inline__
void spintestlock(struct spinlock
*sp
)
122 * We do no sanity checks, it's legal to optimistically
125 prim_spin_lock_nb(sp
);
128 extern __inline__
void spintestunlock(struct spinlock
*sp
)
131 * A testlock doesn't update the lock chain so we
132 * must not update it on free
134 prim_spin_unlock(sp
);