me stupid.
[Samba.git] / source / tdb / spinlock.c
blob731188a15308d3203f804a3327f6836ea813152c
1 #if STANDALONE
2 #if HAVE_CONFIG_H
3 #include <config.h>
4 #endif
6 #include <stdlib.h>
7 #include <stdio.h>
8 #include <unistd.h>
9 #include <string.h>
10 #include <fcntl.h>
11 #include <errno.h>
12 #include <sys/stat.h>
13 #include <time.h>
14 #include "tdb.h"
15 #include "spinlock.h"
17 #define DEBUG
18 #else
19 #include "includes.h"
20 #endif
22 #ifdef USE_SPINLOCKS
25 * ARCH SPECIFIC
28 #if defined(SPARC_SPINLOCKS)
30 static inline int __spin_trylock(spinlock_t *lock)
32 unsigned int result;
34 asm volatile("ldstub [%1], %0"
35 : "=r" (result)
36 : "r" (lock)
37 : "memory");
39 return (result == 0) ? 0 : EBUSY;
42 static inline void __spin_unlock(spinlock_t *lock)
44 *lock = 0;
47 static inline void __spin_lock_init(spinlock_t *lock)
49 *lock = 0;
52 static inline int __spin_is_locked(spinlock_t *lock)
54 return (*lock != 0);
57 #elif defined(POWERPC_SPINLOCKS)
59 static inline int __spin_trylock(spinlock_t *lock)
61 int result;
63 __asm__ __volatile__ (
64 " eieio;"
65 "0: lwarx %0,0,%1;"
66 " cmpwi 0,%0,0;"
67 " bne- 1f;"
68 " stwcx. %2,0,%1;"
69 " bne- 0b;"
70 " sync;"
71 "1:"
72 : "=&r"(result)
73 : "r"(lock), "r"(1)
74 : "cr0", "memory");
76 return (result == 0) ? 0 : EBUSY;
79 static inline void __spin_unlock(spinlock_t *lock)
81 asm volatile("sync");
82 *lock = 0;
85 static inline void __spin_lock_init(spinlock_t *lock)
87 *lock = 0;
90 static inline int __spin_is_locked(spinlock_t *lock)
92 return (*lock != 0);
95 #elif defined(INTEL_SPINLOCKS)
97 static inline int __spin_trylock(spinlock_t *lock)
99 int oldval;
101 asm volatile("xchgl %0,%1"
102 : "=r" (oldval), "=m" (*lock)
103 : "0" (0));
104 return oldval > 0 ? 0 : EBUSY;
107 static inline void __spin_unlock(spinlock_t *lock)
109 *lock = 1;
112 static inline void __spin_lock_init(spinlock_t *lock)
114 *lock = 1;
117 static inline int __spin_is_locked(spinlock_t *lock)
119 return (*lock != 1);
122 #elif defined(MIPS_SPINLOCKS)
124 static inline unsigned int load_linked(unsigned long addr)
126 unsigned int res;
128 __asm__ __volatile__("ll\t%0,(%1)"
129 : "=r" (res)
130 : "r" (addr));
132 return res;
135 static inline unsigned int store_conditional(unsigned long addr, unsigned int value)
137 unsigned int res;
139 __asm__ __volatile__("sc\t%0,(%2)"
140 : "=r" (res)
141 : "0" (value), "r" (addr));
142 return res;
145 static inline int __spin_trylock(spinlock_t *lock)
147 unsigned int mw;
149 do {
150 mw = load_linked(lock);
151 if (mw)
152 return EBUSY;
153 } while (!store_conditional(lock, 1));
155 return 0;
158 static inline void __spin_unlock(spinlock_t *lock)
160 *lock = 0;
163 static inline void __spin_lock_init(spinlock_t *lock)
165 *lock = 0;
168 static inline int __spin_is_locked(spinlock_t *lock)
170 return (*lock != 0);
173 #else
174 #error Need to implement spinlock code in spinlock.c
175 #endif
178 * OS SPECIFIC
181 static void yield_cpu(void)
183 struct timespec tm;
185 #ifdef USE_SCHED_YIELD
186 sched_yield();
187 #else
188 /* Linux will busy loop for delays < 2ms on real time tasks */
189 tm.tv_sec = 0;
190 tm.tv_nsec = 2000000L + 1;
191 nanosleep(&tm, NULL);
192 #endif
195 static int this_is_smp(void)
197 return 0;
201 * GENERIC
204 static int smp_machine = 0;
206 static inline void __spin_lock(spinlock_t *lock)
208 int ntries = 0;
210 while(__spin_trylock(lock)) {
211 while(__spin_is_locked(lock)) {
212 if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
213 continue;
214 yield_cpu();
219 static void __read_lock(rwlock_t *rwlock)
221 int ntries = 0;
223 while(1) {
224 __spin_lock(&rwlock->lock);
226 if (!(rwlock->count & RWLOCK_BIAS)) {
227 rwlock->count++;
228 __spin_unlock(&rwlock->lock);
229 return;
232 __spin_unlock(&rwlock->lock);
234 while(rwlock->count & RWLOCK_BIAS) {
235 if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
236 continue;
237 yield_cpu();
242 static void __write_lock(rwlock_t *rwlock)
244 int ntries = 0;
246 while(1) {
247 __spin_lock(&rwlock->lock);
249 if (rwlock->count == 0) {
250 rwlock->count |= RWLOCK_BIAS;
251 __spin_unlock(&rwlock->lock);
252 return;
255 __spin_unlock(&rwlock->lock);
257 while(rwlock->count != 0) {
258 if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
259 continue;
260 yield_cpu();
265 static void __write_unlock(rwlock_t *rwlock)
267 __spin_lock(&rwlock->lock);
269 #ifdef DEBUG
270 if (!(rwlock->count & RWLOCK_BIAS))
271 fprintf(stderr, "bug: write_unlock\n");
272 #endif
274 rwlock->count &= ~RWLOCK_BIAS;
275 __spin_unlock(&rwlock->lock);
278 static void __read_unlock(rwlock_t *rwlock)
280 __spin_lock(&rwlock->lock);
282 #ifdef DEBUG
283 if (!rwlock->count)
284 fprintf(stderr, "bug: read_unlock\n");
286 if (rwlock->count & RWLOCK_BIAS)
287 fprintf(stderr, "bug: read_unlock\n");
288 #endif
290 rwlock->count--;
291 __spin_unlock(&rwlock->lock);
294 /* TDB SPECIFIC */
296 /* lock a list in the database. list -1 is the alloc list */
297 int tdb_spinlock(TDB_CONTEXT *tdb, int list, int rw_type)
299 rwlock_t *rwlocks;
301 if (!tdb->map_ptr) return -1;
302 rwlocks = (rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
304 switch(rw_type) {
305 case F_RDLCK:
306 __read_lock(&rwlocks[list+1]);
307 break;
309 case F_WRLCK:
310 __write_lock(&rwlocks[list+1]);
311 break;
313 default:
314 return TDB_ERRCODE(TDB_ERR_LOCK, -1);
316 return 0;
319 /* unlock the database. */
320 int tdb_spinunlock(TDB_CONTEXT *tdb, int list, int rw_type)
322 rwlock_t *rwlocks;
324 if (!tdb->map_ptr) return -1;
325 rwlocks = (rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
327 switch(rw_type) {
328 case F_RDLCK:
329 __read_unlock(&rwlocks[list+1]);
330 break;
332 case F_WRLCK:
333 __write_unlock(&rwlocks[list+1]);
334 break;
336 default:
337 return TDB_ERRCODE(TDB_ERR_LOCK, -1);
340 return 0;
343 int tdb_create_rwlocks(int fd, unsigned int hash_size)
345 unsigned size, i;
346 rwlock_t *rwlocks;
348 size = (hash_size + 1) * sizeof(rwlock_t);
349 rwlocks = malloc(size);
350 if (!rwlocks)
351 return -1;
353 for(i = 0; i < hash_size+1; i++) {
354 __spin_lock_init(&rwlocks[i].lock);
355 rwlocks[i].count = 0;
358 /* Write it out (appending to end) */
359 if (write(fd, rwlocks, size) != size) {
360 free(rwlocks);
361 return -1;
363 smp_machine = this_is_smp();
364 free(rwlocks);
365 return 0;
368 int tdb_clear_spinlocks(TDB_CONTEXT *tdb)
370 rwlock_t *rwlocks;
371 unsigned i;
373 if (tdb->header.rwlocks == 0) return 0;
374 if (!tdb->map_ptr) return -1;
376 /* We're mmapped here */
377 rwlocks = (rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
378 for(i = 0; i < tdb->header.hash_size+1; i++) {
379 __spin_lock_init(&rwlocks[i].lock);
380 rwlocks[i].count = 0;
382 return 0;
384 #else
385 int tdb_create_rwlocks(int fd, unsigned int hash_size) { return 0; }
386 int tdb_spinlock(TDB_CONTEXT *tdb, int list, int rw_type) { return -1; }
387 int tdb_spinunlock(TDB_CONTEXT *tdb, int list, int rw_type) { return -1; }
389 /* Non-spinlock version: remove spinlock pointer */
390 int tdb_clear_spinlocks(TDB_CONTEXT *tdb)
392 tdb_off off = (tdb_off)((char *)&tdb->header.rwlocks
393 - (char *)&tdb->header);
395 tdb->header.rwlocks = 0;
396 if (lseek(tdb->fd, off, SEEK_SET) != off
397 || write(tdb->fd, (void *)&tdb->header.rwlocks,
398 sizeof(tdb->header.rwlocks))
399 != sizeof(tdb->header.rwlocks))
400 return -1;
401 return 0;
403 #endif