Merge branch 'sh/stable-updates' into sh/for-2.6.30
[linux-2.6/mini2440.git] / arch / s390 / lib / spinlock.c
blobe41f4008afc501927a5c3b08fda18891c1afc887
1 /*
2 * arch/s390/lib/spinlock.c
3 * Out of line spinlock code.
5 * Copyright (C) IBM Corp. 2004, 2006
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7 */
9 #include <linux/types.h>
10 #include <linux/module.h>
11 #include <linux/spinlock.h>
12 #include <linux/init.h>
13 #include <asm/io.h>
15 int spin_retry = 1000;
17 /**
18 * spin_retry= parameter
20 static int __init spin_retry_setup(char *str)
22 spin_retry = simple_strtoul(str, &str, 0);
23 return 1;
25 __setup("spin_retry=", spin_retry_setup);
27 static inline void _raw_yield(void)
29 if (MACHINE_HAS_DIAG44)
30 asm volatile("diag 0,0,0x44");
33 static inline void _raw_yield_cpu(int cpu)
35 if (MACHINE_HAS_DIAG9C)
36 asm volatile("diag %0,0,0x9c"
37 : : "d" (__cpu_logical_map[cpu]));
38 else
39 _raw_yield();
42 void _raw_spin_lock_wait(raw_spinlock_t *lp)
44 int count = spin_retry;
45 unsigned int cpu = ~smp_processor_id();
47 while (1) {
48 if (count-- <= 0) {
49 unsigned int owner = lp->owner_cpu;
50 if (owner != 0)
51 _raw_yield_cpu(~owner);
52 count = spin_retry;
54 if (__raw_spin_is_locked(lp))
55 continue;
56 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
57 return;
60 EXPORT_SYMBOL(_raw_spin_lock_wait);
62 void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags)
64 int count = spin_retry;
65 unsigned int cpu = ~smp_processor_id();
67 local_irq_restore(flags);
68 while (1) {
69 if (count-- <= 0) {
70 unsigned int owner = lp->owner_cpu;
71 if (owner != 0)
72 _raw_yield_cpu(~owner);
73 count = spin_retry;
75 if (__raw_spin_is_locked(lp))
76 continue;
77 local_irq_disable();
78 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
79 return;
80 local_irq_restore(flags);
83 EXPORT_SYMBOL(_raw_spin_lock_wait_flags);
85 int _raw_spin_trylock_retry(raw_spinlock_t *lp)
87 unsigned int cpu = ~smp_processor_id();
88 int count;
90 for (count = spin_retry; count > 0; count--) {
91 if (__raw_spin_is_locked(lp))
92 continue;
93 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
94 return 1;
96 return 0;
98 EXPORT_SYMBOL(_raw_spin_trylock_retry);
100 void _raw_spin_relax(raw_spinlock_t *lock)
102 unsigned int cpu = lock->owner_cpu;
103 if (cpu != 0)
104 _raw_yield_cpu(~cpu);
106 EXPORT_SYMBOL(_raw_spin_relax);
108 void _raw_read_lock_wait(raw_rwlock_t *rw)
110 unsigned int old;
111 int count = spin_retry;
113 while (1) {
114 if (count-- <= 0) {
115 _raw_yield();
116 count = spin_retry;
118 if (!__raw_read_can_lock(rw))
119 continue;
120 old = rw->lock & 0x7fffffffU;
121 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
122 return;
125 EXPORT_SYMBOL(_raw_read_lock_wait);
127 int _raw_read_trylock_retry(raw_rwlock_t *rw)
129 unsigned int old;
130 int count = spin_retry;
132 while (count-- > 0) {
133 if (!__raw_read_can_lock(rw))
134 continue;
135 old = rw->lock & 0x7fffffffU;
136 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
137 return 1;
139 return 0;
141 EXPORT_SYMBOL(_raw_read_trylock_retry);
143 void _raw_write_lock_wait(raw_rwlock_t *rw)
145 int count = spin_retry;
147 while (1) {
148 if (count-- <= 0) {
149 _raw_yield();
150 count = spin_retry;
152 if (!__raw_write_can_lock(rw))
153 continue;
154 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
155 return;
158 EXPORT_SYMBOL(_raw_write_lock_wait);
160 int _raw_write_trylock_retry(raw_rwlock_t *rw)
162 int count = spin_retry;
164 while (count-- > 0) {
165 if (!__raw_write_can_lock(rw))
166 continue;
167 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
168 return 1;
170 return 0;
172 EXPORT_SYMBOL(_raw_write_trylock_retry);