[PATCH] tmpfs: fix mount mpol nodelist parsing
[linux-2.6.22.y-op.git] / include / asm-s390 / spinlock.h
blob273dbecf8acef0465060133bbac01bab72118fc5
1 /*
2 * include/asm-s390/spinlock.h
4 * S390 version
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
8 * Derived from "include/asm-i386/spinlock.h"
9 */
11 #ifndef __ASM_SPINLOCK_H
12 #define __ASM_SPINLOCK_H
14 static inline int
15 _raw_compare_and_swap(volatile unsigned int *lock,
16 unsigned int old, unsigned int new)
18 asm volatile ("cs %0,%3,0(%4)"
19 : "=d" (old), "=m" (*lock)
20 : "0" (old), "d" (new), "a" (lock), "m" (*lock)
21 : "cc", "memory" );
22 return old;
26 * Simple spin lock operations. There are two variants, one clears IRQ's
27 * on the local processor, one does not.
29 * We make no fairness assumptions. They have a cost.
31 * (the type definitions are in asm/spinlock_types.h)
34 #define __raw_spin_is_locked(x) ((x)->lock != 0)
35 #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
36 #define __raw_spin_unlock_wait(lock) \
37 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
39 extern void _raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc);
40 extern int _raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc);
42 static inline void __raw_spin_lock(raw_spinlock_t *lp)
44 unsigned long pc = 1 | (unsigned long) __builtin_return_address(0);
46 if (unlikely(_raw_compare_and_swap(&lp->lock, 0, pc) != 0))
47 _raw_spin_lock_wait(lp, pc);
50 static inline int __raw_spin_trylock(raw_spinlock_t *lp)
52 unsigned long pc = 1 | (unsigned long) __builtin_return_address(0);
54 if (likely(_raw_compare_and_swap(&lp->lock, 0, pc) == 0))
55 return 1;
56 return _raw_spin_trylock_retry(lp, pc);
59 static inline void __raw_spin_unlock(raw_spinlock_t *lp)
61 _raw_compare_and_swap(&lp->lock, lp->lock, 0);
65 * Read-write spinlocks, allowing multiple readers
66 * but only one writer.
68 * NOTE! it is quite common to have readers in interrupts
69 * but no interrupt writers. For those circumstances we
70 * can "mix" irq-safe locks - any writer needs to get a
71 * irq-safe write-lock, but readers can get non-irqsafe
72 * read-locks.
75 /**
76 * read_can_lock - would read_trylock() succeed?
77 * @lock: the rwlock in question.
79 #define __raw_read_can_lock(x) ((int)(x)->lock >= 0)
81 /**
82 * write_can_lock - would write_trylock() succeed?
83 * @lock: the rwlock in question.
85 #define __raw_write_can_lock(x) ((x)->lock == 0)
87 extern void _raw_read_lock_wait(raw_rwlock_t *lp);
88 extern int _raw_read_trylock_retry(raw_rwlock_t *lp);
89 extern void _raw_write_lock_wait(raw_rwlock_t *lp);
90 extern int _raw_write_trylock_retry(raw_rwlock_t *lp);
92 static inline void __raw_read_lock(raw_rwlock_t *rw)
94 unsigned int old;
95 old = rw->lock & 0x7fffffffU;
96 if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old)
97 _raw_read_lock_wait(rw);
100 static inline void __raw_read_unlock(raw_rwlock_t *rw)
102 unsigned int old, cmp;
104 old = rw->lock;
105 do {
106 cmp = old;
107 old = _raw_compare_and_swap(&rw->lock, old, old - 1);
108 } while (cmp != old);
111 static inline void __raw_write_lock(raw_rwlock_t *rw)
113 if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
114 _raw_write_lock_wait(rw);
117 static inline void __raw_write_unlock(raw_rwlock_t *rw)
119 _raw_compare_and_swap(&rw->lock, 0x80000000, 0);
122 static inline int __raw_read_trylock(raw_rwlock_t *rw)
124 unsigned int old;
125 old = rw->lock & 0x7fffffffU;
126 if (likely(_raw_compare_and_swap(&rw->lock, old, old + 1) == old))
127 return 1;
128 return _raw_read_trylock_retry(rw);
131 static inline int __raw_write_trylock(raw_rwlock_t *rw)
133 if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0))
134 return 1;
135 return _raw_write_trylock_retry(rw);
138 #endif /* __ASM_SPINLOCK_H */