r559: Some fixes from coolo ...
[Samba/gebeck_regimport.git] / source / tdb / spinlock.c
blob24c4371decc8a7858a128b32840ccc85b1d823a1
1 /*
2 Unix SMB/CIFS implementation.
4 trivial database library
6 Copyright (C) Anton Blanchard 2001
8 ** NOTE! The following LGPL license applies to the tdb
9 ** library. This does NOT imply that all of Samba is released
10 ** under the LGPL
12 This library is free software; you can redistribute it and/or
13 modify it under the terms of the GNU Lesser General Public
14 License as published by the Free Software Foundation; either
15 version 2 of the License, or (at your option) any later version.
17 This library is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 Lesser General Public License for more details.
22 You should have received a copy of the GNU Lesser General Public
23 License along with this library; if not, write to the Free Software
24 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 #if HAVE_CONFIG_H
27 #include <config.h>
28 #endif
30 #if STANDALONE
31 #include <stdlib.h>
32 #include <stdio.h>
33 #include <unistd.h>
34 #include <string.h>
35 #include <fcntl.h>
36 #include <errno.h>
37 #include <sys/stat.h>
38 #include <time.h>
39 #include <signal.h>
40 #include "tdb.h"
41 #include "spinlock.h"
43 #define DEBUG
44 #else
45 #include "includes.h"
46 #endif
48 #ifdef USE_SPINLOCKS
51 * ARCH SPECIFIC
54 #if defined(SPARC_SPINLOCKS)
56 static inline int __spin_trylock(spinlock_t *lock)
58 unsigned int result;
60 asm volatile("ldstub [%1], %0"
61 : "=r" (result)
62 : "r" (lock)
63 : "memory");
65 return (result == 0) ? 0 : EBUSY;
68 static inline void __spin_unlock(spinlock_t *lock)
70 asm volatile("":::"memory");
71 *lock = 0;
74 static inline void __spin_lock_init(spinlock_t *lock)
76 *lock = 0;
79 static inline int __spin_is_locked(spinlock_t *lock)
81 return (*lock != 0);
84 #elif defined(POWERPC_SPINLOCKS)
86 static inline int __spin_trylock(spinlock_t *lock)
88 unsigned int result;
90 __asm__ __volatile__(
91 "1: lwarx %0,0,%1\n\
92 cmpwi 0,%0,0\n\
93 li %0,0\n\
94 bne- 2f\n\
95 li %0,1\n\
96 stwcx. %0,0,%1\n\
97 bne- 1b\n\
98 isync\n\
99 2:" : "=&r"(result)
100 : "r"(lock)
101 : "cr0", "memory");
103 return (result == 1) ? 0 : EBUSY;
106 static inline void __spin_unlock(spinlock_t *lock)
108 asm volatile("eieio":::"memory");
109 *lock = 0;
112 static inline void __spin_lock_init(spinlock_t *lock)
114 *lock = 0;
117 static inline int __spin_is_locked(spinlock_t *lock)
119 return (*lock != 0);
122 #elif defined(INTEL_SPINLOCKS)
124 static inline int __spin_trylock(spinlock_t *lock)
126 int oldval;
128 asm volatile("xchgl %0,%1"
129 : "=r" (oldval), "=m" (*lock)
130 : "0" (0)
131 : "memory");
133 return oldval > 0 ? 0 : EBUSY;
136 static inline void __spin_unlock(spinlock_t *lock)
138 asm volatile("":::"memory");
139 *lock = 1;
142 static inline void __spin_lock_init(spinlock_t *lock)
144 *lock = 1;
147 static inline int __spin_is_locked(spinlock_t *lock)
149 return (*lock != 1);
152 #elif defined(MIPS_SPINLOCKS) && defined(sgi) && (_COMPILER_VERSION >= 730)
154 /* Implement spinlocks on IRIX using the MIPSPro atomic fetch operations. See
155 * sync(3) for the details of the intrinsic operations.
157 * "sgi" and "_COMPILER_VERSION" are always defined by MIPSPro.
160 #if defined(STANDALONE)
162 /* MIPSPro 7.3 has "__inline" as an extension, but not "inline. */
163 #define inline __inline
165 #endif /* STANDALONE */
167 /* Returns 0 if the lock is acquired, EBUSY otherwise. */
168 static inline int __spin_trylock(spinlock_t *lock)
170 unsigned int val;
171 val = __lock_test_and_set(lock, 1);
172 return val == 0 ? 0 : EBUSY;
175 static inline void __spin_unlock(spinlock_t *lock)
177 __lock_release(lock);
180 static inline void __spin_lock_init(spinlock_t *lock)
182 __lock_release(lock);
185 /* Returns 1 if the lock is held, 0 otherwise. */
186 static inline int __spin_is_locked(spinlock_t *lock)
188 unsigned int val;
189 val = __add_and_fetch(lock, 0);
190 return val;
193 #elif defined(MIPS_SPINLOCKS)
195 static inline unsigned int load_linked(unsigned long addr)
197 unsigned int res;
199 __asm__ __volatile__("ll\t%0,(%1)"
200 : "=r" (res)
201 : "r" (addr));
203 return res;
206 static inline unsigned int store_conditional(unsigned long addr, unsigned int value)
208 unsigned int res;
210 __asm__ __volatile__("sc\t%0,(%2)"
211 : "=r" (res)
212 : "0" (value), "r" (addr));
213 return res;
216 static inline int __spin_trylock(spinlock_t *lock)
218 unsigned int mw;
220 do {
221 mw = load_linked(lock);
222 if (mw)
223 return EBUSY;
224 } while (!store_conditional(lock, 1));
226 asm volatile("":::"memory");
228 return 0;
231 static inline void __spin_unlock(spinlock_t *lock)
233 asm volatile("":::"memory");
234 *lock = 0;
237 static inline void __spin_lock_init(spinlock_t *lock)
239 *lock = 0;
242 static inline int __spin_is_locked(spinlock_t *lock)
244 return (*lock != 0);
247 #else
248 #error Need to implement spinlock code in spinlock.c
249 #endif
252 * OS SPECIFIC
255 static void yield_cpu(void)
257 struct timespec tm;
259 #ifdef USE_SCHED_YIELD
260 sched_yield();
261 #else
262 /* Linux will busy loop for delays < 2ms on real time tasks */
263 tm.tv_sec = 0;
264 tm.tv_nsec = 2000000L + 1;
265 nanosleep(&tm, NULL);
266 #endif
269 static int this_is_smp(void)
271 #if defined(HAVE_SYSCONF) && defined(SYSCONF_SC_NPROC_ONLN)
272 return (sysconf(_SC_NPROC_ONLN) > 1) ? 1 : 0;
273 #else
274 return 0;
275 #endif
279 * GENERIC
282 static int smp_machine = 0;
284 static inline void __spin_lock(spinlock_t *lock)
286 int ntries = 0;
288 while(__spin_trylock(lock)) {
289 while(__spin_is_locked(lock)) {
290 if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
291 continue;
292 yield_cpu();
297 static void __read_lock(tdb_rwlock_t *rwlock)
299 int ntries = 0;
301 while(1) {
302 __spin_lock(&rwlock->lock);
304 if (!(rwlock->count & RWLOCK_BIAS)) {
305 rwlock->count++;
306 __spin_unlock(&rwlock->lock);
307 return;
310 __spin_unlock(&rwlock->lock);
312 while(rwlock->count & RWLOCK_BIAS) {
313 if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
314 continue;
315 yield_cpu();
320 static void __write_lock(tdb_rwlock_t *rwlock)
322 int ntries = 0;
324 while(1) {
325 __spin_lock(&rwlock->lock);
327 if (rwlock->count == 0) {
328 rwlock->count |= RWLOCK_BIAS;
329 __spin_unlock(&rwlock->lock);
330 return;
333 __spin_unlock(&rwlock->lock);
335 while(rwlock->count != 0) {
336 if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
337 continue;
338 yield_cpu();
343 static void __write_unlock(tdb_rwlock_t *rwlock)
345 __spin_lock(&rwlock->lock);
347 #ifdef DEBUG
348 if (!(rwlock->count & RWLOCK_BIAS))
349 fprintf(stderr, "bug: write_unlock\n");
350 #endif
352 rwlock->count &= ~RWLOCK_BIAS;
353 __spin_unlock(&rwlock->lock);
356 static void __read_unlock(tdb_rwlock_t *rwlock)
358 __spin_lock(&rwlock->lock);
360 #ifdef DEBUG
361 if (!rwlock->count)
362 fprintf(stderr, "bug: read_unlock\n");
364 if (rwlock->count & RWLOCK_BIAS)
365 fprintf(stderr, "bug: read_unlock\n");
366 #endif
368 rwlock->count--;
369 __spin_unlock(&rwlock->lock);
372 /* TDB SPECIFIC */
374 /* lock a list in the database. list -1 is the alloc list */
375 int tdb_spinlock(TDB_CONTEXT *tdb, int list, int rw_type)
377 tdb_rwlock_t *rwlocks;
379 if (!tdb->map_ptr) return -1;
380 rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
382 switch(rw_type) {
383 case F_RDLCK:
384 __read_lock(&rwlocks[list+1]);
385 break;
387 case F_WRLCK:
388 __write_lock(&rwlocks[list+1]);
389 break;
391 default:
392 return TDB_ERRCODE(TDB_ERR_LOCK, -1);
394 return 0;
397 /* unlock the database. */
398 int tdb_spinunlock(TDB_CONTEXT *tdb, int list, int rw_type)
400 tdb_rwlock_t *rwlocks;
402 if (!tdb->map_ptr) return -1;
403 rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
405 switch(rw_type) {
406 case F_RDLCK:
407 __read_unlock(&rwlocks[list+1]);
408 break;
410 case F_WRLCK:
411 __write_unlock(&rwlocks[list+1]);
412 break;
414 default:
415 return TDB_ERRCODE(TDB_ERR_LOCK, -1);
418 return 0;
421 int tdb_create_rwlocks(int fd, unsigned int hash_size)
423 unsigned size, i;
424 tdb_rwlock_t *rwlocks;
426 size = TDB_SPINLOCK_SIZE(hash_size);
427 rwlocks = malloc(size);
428 if (!rwlocks)
429 return -1;
431 for(i = 0; i < hash_size+1; i++) {
432 __spin_lock_init(&rwlocks[i].lock);
433 rwlocks[i].count = 0;
436 /* Write it out (appending to end) */
437 if (write(fd, rwlocks, size) != size) {
438 free(rwlocks);
439 return -1;
441 smp_machine = this_is_smp();
442 free(rwlocks);
443 return 0;
446 int tdb_clear_spinlocks(TDB_CONTEXT *tdb)
448 tdb_rwlock_t *rwlocks;
449 unsigned i;
451 if (tdb->header.rwlocks == 0) return 0;
452 if (!tdb->map_ptr) return -1;
454 /* We're mmapped here */
455 rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
456 for(i = 0; i < tdb->header.hash_size+1; i++) {
457 __spin_lock_init(&rwlocks[i].lock);
458 rwlocks[i].count = 0;
460 return 0;
462 #else
463 int tdb_create_rwlocks(int fd, unsigned int hash_size) { return 0; }
464 int tdb_spinlock(TDB_CONTEXT *tdb, int list, int rw_type) { return -1; }
465 int tdb_spinunlock(TDB_CONTEXT *tdb, int list, int rw_type) { return -1; }
467 /* Non-spinlock version: remove spinlock pointer */
468 int tdb_clear_spinlocks(TDB_CONTEXT *tdb)
470 tdb_off off = (tdb_off)((char *)&tdb->header.rwlocks
471 - (char *)&tdb->header);
473 tdb->header.rwlocks = 0;
474 if (lseek(tdb->fd, off, SEEK_SET) != off
475 || write(tdb->fd, (void *)&tdb->header.rwlocks,
476 sizeof(tdb->header.rwlocks))
477 != sizeof(tdb->header.rwlocks))
478 return -1;
479 return 0;
481 #endif