1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
6 * Simple spin lock operations.
8 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
9 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
10 * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
11 * Rework to support virtual processors
13 * Type of int is used as a full 64b word is not necessary.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
20 * (the type definitions are in asm/spinlock_types.h)
24 #include <asm/hvcall.h>
25 #include <asm/iseries/hv_call.h>
27 #include <asm/asm-compat.h>
28 #include <asm/synch.h>
30 #define __raw_spin_is_locked(x) ((x)->slock != 0)
33 /* use 0x800000yy when locked, where yy == CPU number */
34 #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
39 #if defined(CONFIG_PPC64) && defined(CONFIG_SMP)
40 #define CLEAR_IO_SYNC (get_paca()->io_sync = 0)
41 #define SYNC_IO do { \
42 if (unlikely(get_paca()->io_sync)) { \
44 get_paca()->io_sync = 0; \
53 * This returns the old value in the lock, so we succeeded
54 * in getting the lock if the return value is 0.
56 static __inline__
unsigned long __spin_trylock(raw_spinlock_t
*lock
)
58 unsigned long tmp
, token
;
69 : "r" (token
), "r" (&lock
->slock
)
75 static int __inline__
__raw_spin_trylock(raw_spinlock_t
*lock
)
78 return __spin_trylock(lock
) == 0;
82 * On a system with shared processors (that is, where a physical
83 * processor is multiplexed between several virtual processors),
84 * there is no point spinning on a lock if the holder of the lock
85 * isn't currently scheduled on a physical processor. Instead
86 * we detect this situation and ask the hypervisor to give the
87 * rest of our timeslice to the lock holder.
89 * So that we can tell which virtual processor is holding a lock,
90 * we put 0x80000000 | smp_processor_id() in the lock when it is
91 * held. Conveniently, we have a word in the paca that holds this
95 #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
96 /* We only yield to the hypervisor if we are in shared processor mode */
97 #define SHARED_PROCESSOR (get_lppaca()->shared_proc)
98 extern void __spin_yield(raw_spinlock_t
*lock
);
99 extern void __rw_yield(raw_rwlock_t
*lock
);
100 #else /* SPLPAR || ISERIES */
101 #define __spin_yield(x) barrier()
102 #define __rw_yield(x) barrier()
103 #define SHARED_PROCESSOR 0
106 static void __inline__
__raw_spin_lock(raw_spinlock_t
*lock
)
110 if (likely(__spin_trylock(lock
) == 0))
114 if (SHARED_PROCESSOR
)
116 } while (unlikely(lock
->slock
!= 0));
121 static void __inline__
__raw_spin_lock_flags(raw_spinlock_t
*lock
, unsigned long flags
)
123 unsigned long flags_dis
;
127 if (likely(__spin_trylock(lock
) == 0))
129 local_save_flags(flags_dis
);
130 local_irq_restore(flags
);
133 if (SHARED_PROCESSOR
)
135 } while (unlikely(lock
->slock
!= 0));
137 local_irq_restore(flags_dis
);
141 static __inline__
void __raw_spin_unlock(raw_spinlock_t
*lock
)
144 __asm__
__volatile__("# __raw_spin_unlock\n\t"
145 LWSYNC_ON_SMP
: : :"memory");
150 extern void __raw_spin_unlock_wait(raw_spinlock_t
*lock
);
152 #define __raw_spin_unlock_wait(lock) \
153 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
157 * Read-write spinlocks, allowing multiple readers
158 * but only one writer.
160 * NOTE! it is quite common to have readers in interrupts
161 * but no interrupt writers. For those circumstances we
162 * can "mix" irq-safe locks - any writer needs to get a
163 * irq-safe write-lock, but readers can get non-irqsafe
167 #define __raw_read_can_lock(rw) ((rw)->lock >= 0)
168 #define __raw_write_can_lock(rw) (!(rw)->lock)
171 #define __DO_SIGN_EXTEND "extsw %0,%0\n"
172 #define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */
174 #define __DO_SIGN_EXTEND
175 #define WRLOCK_TOKEN (-1)
179 * This returns the old value in the lock + 1,
180 * so we got a read lock if the return value is > 0.
182 static long __inline__
__read_trylock(raw_rwlock_t
*rw
)
186 __asm__
__volatile__(
197 : "cr0", "xer", "memory");
203 * This returns the old value in the lock,
204 * so we got the write lock if the return value is 0.
206 static __inline__
long __write_trylock(raw_rwlock_t
*rw
)
210 token
= WRLOCK_TOKEN
;
211 __asm__
__volatile__(
220 : "r" (token
), "r" (&rw
->lock
)
226 static void __inline__
__raw_read_lock(raw_rwlock_t
*rw
)
229 if (likely(__read_trylock(rw
) > 0))
233 if (SHARED_PROCESSOR
)
235 } while (unlikely(rw
->lock
< 0));
240 static void __inline__
__raw_write_lock(raw_rwlock_t
*rw
)
243 if (likely(__write_trylock(rw
) == 0))
247 if (SHARED_PROCESSOR
)
249 } while (unlikely(rw
->lock
!= 0));
254 static int __inline__
__raw_read_trylock(raw_rwlock_t
*rw
)
256 return __read_trylock(rw
) > 0;
259 static int __inline__
__raw_write_trylock(raw_rwlock_t
*rw
)
261 return __write_trylock(rw
) == 0;
264 static void __inline__
__raw_read_unlock(raw_rwlock_t
*rw
)
268 __asm__
__volatile__(
281 static __inline__
void __raw_write_unlock(raw_rwlock_t
*rw
)
283 __asm__
__volatile__("# write_unlock\n\t"
284 LWSYNC_ON_SMP
: : :"memory");
288 #define _raw_spin_relax(lock) __spin_yield(lock)
289 #define _raw_read_relax(lock) __rw_yield(lock)
290 #define _raw_write_relax(lock) __rw_yield(lock)
292 #endif /* __KERNEL__ */
293 #endif /* __ASM_SPINLOCK_H */