missed one on BUG 1195; make sure to set the private * to NULL
[Samba/gebeck_regimport.git] / source3 / tdb / spinlock.c
blob3b3ebefded302d6738620508d050b09c8adf30c7
1 /*
2 Unix SMB/CIFS implementation.
3 Samba database functions
4 Copyright (C) Anton Blanchard 2001
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 #if HAVE_CONFIG_H
21 #include <config.h>
22 #endif
24 #if STANDALONE
25 #include <stdlib.h>
26 #include <stdio.h>
27 #include <unistd.h>
28 #include <string.h>
29 #include <fcntl.h>
30 #include <errno.h>
31 #include <sys/stat.h>
32 #include <time.h>
33 #include <signal.h>
34 #include "tdb.h"
35 #include "spinlock.h"
37 #define DEBUG
38 #else
39 #include "includes.h"
40 #endif
42 #ifdef USE_SPINLOCKS
45 * ARCH SPECIFIC
48 #if defined(SPARC_SPINLOCKS)
50 static inline int __spin_trylock(spinlock_t *lock)
52 unsigned int result;
54 asm volatile("ldstub [%1], %0"
55 : "=r" (result)
56 : "r" (lock)
57 : "memory");
59 return (result == 0) ? 0 : EBUSY;
62 static inline void __spin_unlock(spinlock_t *lock)
64 asm volatile("":::"memory");
65 *lock = 0;
68 static inline void __spin_lock_init(spinlock_t *lock)
70 *lock = 0;
73 static inline int __spin_is_locked(spinlock_t *lock)
75 return (*lock != 0);
78 #elif defined(POWERPC_SPINLOCKS)
80 static inline int __spin_trylock(spinlock_t *lock)
82 unsigned int result;
84 __asm__ __volatile__(
85 "1: lwarx %0,0,%1\n\
86 cmpwi 0,%0,0\n\
87 li %0,0\n\
88 bne- 2f\n\
89 li %0,1\n\
90 stwcx. %0,0,%1\n\
91 bne- 1b\n\
92 isync\n\
93 2:" : "=&r"(result)
94 : "r"(lock)
95 : "cr0", "memory");
97 return (result == 1) ? 0 : EBUSY;
100 static inline void __spin_unlock(spinlock_t *lock)
102 asm volatile("eieio":::"memory");
103 *lock = 0;
106 static inline void __spin_lock_init(spinlock_t *lock)
108 *lock = 0;
111 static inline int __spin_is_locked(spinlock_t *lock)
113 return (*lock != 0);
116 #elif defined(INTEL_SPINLOCKS)
118 static inline int __spin_trylock(spinlock_t *lock)
120 int oldval;
122 asm volatile("xchgl %0,%1"
123 : "=r" (oldval), "=m" (*lock)
124 : "0" (0)
125 : "memory");
127 return oldval > 0 ? 0 : EBUSY;
130 static inline void __spin_unlock(spinlock_t *lock)
132 asm volatile("":::"memory");
133 *lock = 1;
136 static inline void __spin_lock_init(spinlock_t *lock)
138 *lock = 1;
141 static inline int __spin_is_locked(spinlock_t *lock)
143 return (*lock != 1);
146 #elif defined(MIPS_SPINLOCKS) && defined(sgi) && (_COMPILER_VERSION >= 730)
148 /* Implement spinlocks on IRIX using the MIPSPro atomic fetch operations. See
149 * sync(3) for the details of the intrinsic operations.
151 * "sgi" and "_COMPILER_VERSION" are always defined by MIPSPro.
154 #if defined(STANDALONE)
156 /* MIPSPro 7.3 has "__inline" as an extension, but not "inline. */
157 #define inline __inline
159 #endif /* STANDALONE */
161 /* Returns 0 if the lock is acquired, EBUSY otherwise. */
162 static inline int __spin_trylock(spinlock_t *lock)
164 unsigned int val;
165 val = __lock_test_and_set(lock, 1);
166 return val == 0 ? 0 : EBUSY;
169 static inline void __spin_unlock(spinlock_t *lock)
171 __lock_release(lock);
174 static inline void __spin_lock_init(spinlock_t *lock)
176 __lock_release(lock);
179 /* Returns 1 if the lock is held, 0 otherwise. */
180 static inline int __spin_is_locked(spinlock_t *lock)
182 unsigned int val;
183 val = __add_and_fetch(lock, 0);
184 return val;
187 #elif defined(MIPS_SPINLOCKS)
189 static inline unsigned int load_linked(unsigned long addr)
191 unsigned int res;
193 __asm__ __volatile__("ll\t%0,(%1)"
194 : "=r" (res)
195 : "r" (addr));
197 return res;
200 static inline unsigned int store_conditional(unsigned long addr, unsigned int value)
202 unsigned int res;
204 __asm__ __volatile__("sc\t%0,(%2)"
205 : "=r" (res)
206 : "0" (value), "r" (addr));
207 return res;
210 static inline int __spin_trylock(spinlock_t *lock)
212 unsigned int mw;
214 do {
215 mw = load_linked(lock);
216 if (mw)
217 return EBUSY;
218 } while (!store_conditional(lock, 1));
220 asm volatile("":::"memory");
222 return 0;
225 static inline void __spin_unlock(spinlock_t *lock)
227 asm volatile("":::"memory");
228 *lock = 0;
231 static inline void __spin_lock_init(spinlock_t *lock)
233 *lock = 0;
236 static inline int __spin_is_locked(spinlock_t *lock)
238 return (*lock != 0);
241 #else
242 #error Need to implement spinlock code in spinlock.c
243 #endif
246 * OS SPECIFIC
249 static void yield_cpu(void)
251 struct timespec tm;
253 #ifdef USE_SCHED_YIELD
254 sched_yield();
255 #else
256 /* Linux will busy loop for delays < 2ms on real time tasks */
257 tm.tv_sec = 0;
258 tm.tv_nsec = 2000000L + 1;
259 nanosleep(&tm, NULL);
260 #endif
263 static int this_is_smp(void)
265 #if defined(HAVE_SYSCONF) && defined(SYSCONF_SC_NPROC_ONLN)
266 return (sysconf(_SC_NPROC_ONLN) > 1) ? 1 : 0;
267 #else
268 return 0;
269 #endif
273 * GENERIC
276 static int smp_machine = 0;
278 static inline void __spin_lock(spinlock_t *lock)
280 int ntries = 0;
282 while(__spin_trylock(lock)) {
283 while(__spin_is_locked(lock)) {
284 if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
285 continue;
286 yield_cpu();
291 static void __read_lock(tdb_rwlock_t *rwlock)
293 int ntries = 0;
295 while(1) {
296 __spin_lock(&rwlock->lock);
298 if (!(rwlock->count & RWLOCK_BIAS)) {
299 rwlock->count++;
300 __spin_unlock(&rwlock->lock);
301 return;
304 __spin_unlock(&rwlock->lock);
306 while(rwlock->count & RWLOCK_BIAS) {
307 if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
308 continue;
309 yield_cpu();
314 static void __write_lock(tdb_rwlock_t *rwlock)
316 int ntries = 0;
318 while(1) {
319 __spin_lock(&rwlock->lock);
321 if (rwlock->count == 0) {
322 rwlock->count |= RWLOCK_BIAS;
323 __spin_unlock(&rwlock->lock);
324 return;
327 __spin_unlock(&rwlock->lock);
329 while(rwlock->count != 0) {
330 if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
331 continue;
332 yield_cpu();
337 static void __write_unlock(tdb_rwlock_t *rwlock)
339 __spin_lock(&rwlock->lock);
341 #ifdef DEBUG
342 if (!(rwlock->count & RWLOCK_BIAS))
343 fprintf(stderr, "bug: write_unlock\n");
344 #endif
346 rwlock->count &= ~RWLOCK_BIAS;
347 __spin_unlock(&rwlock->lock);
350 static void __read_unlock(tdb_rwlock_t *rwlock)
352 __spin_lock(&rwlock->lock);
354 #ifdef DEBUG
355 if (!rwlock->count)
356 fprintf(stderr, "bug: read_unlock\n");
358 if (rwlock->count & RWLOCK_BIAS)
359 fprintf(stderr, "bug: read_unlock\n");
360 #endif
362 rwlock->count--;
363 __spin_unlock(&rwlock->lock);
366 /* TDB SPECIFIC */
368 /* lock a list in the database. list -1 is the alloc list */
369 int tdb_spinlock(TDB_CONTEXT *tdb, int list, int rw_type)
371 tdb_rwlock_t *rwlocks;
373 if (!tdb->map_ptr) return -1;
374 rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
376 switch(rw_type) {
377 case F_RDLCK:
378 __read_lock(&rwlocks[list+1]);
379 break;
381 case F_WRLCK:
382 __write_lock(&rwlocks[list+1]);
383 break;
385 default:
386 return TDB_ERRCODE(TDB_ERR_LOCK, -1);
388 return 0;
391 /* unlock the database. */
392 int tdb_spinunlock(TDB_CONTEXT *tdb, int list, int rw_type)
394 tdb_rwlock_t *rwlocks;
396 if (!tdb->map_ptr) return -1;
397 rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
399 switch(rw_type) {
400 case F_RDLCK:
401 __read_unlock(&rwlocks[list+1]);
402 break;
404 case F_WRLCK:
405 __write_unlock(&rwlocks[list+1]);
406 break;
408 default:
409 return TDB_ERRCODE(TDB_ERR_LOCK, -1);
412 return 0;
415 int tdb_create_rwlocks(int fd, unsigned int hash_size)
417 unsigned size, i;
418 tdb_rwlock_t *rwlocks;
420 size = TDB_SPINLOCK_SIZE(hash_size);
421 rwlocks = malloc(size);
422 if (!rwlocks)
423 return -1;
425 for(i = 0; i < hash_size+1; i++) {
426 __spin_lock_init(&rwlocks[i].lock);
427 rwlocks[i].count = 0;
430 /* Write it out (appending to end) */
431 if (write(fd, rwlocks, size) != size) {
432 free(rwlocks);
433 return -1;
435 smp_machine = this_is_smp();
436 free(rwlocks);
437 return 0;
440 int tdb_clear_spinlocks(TDB_CONTEXT *tdb)
442 tdb_rwlock_t *rwlocks;
443 unsigned i;
445 if (tdb->header.rwlocks == 0) return 0;
446 if (!tdb->map_ptr) return -1;
448 /* We're mmapped here */
449 rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
450 for(i = 0; i < tdb->header.hash_size+1; i++) {
451 __spin_lock_init(&rwlocks[i].lock);
452 rwlocks[i].count = 0;
454 return 0;
456 #else
457 int tdb_create_rwlocks(int fd, unsigned int hash_size) { return 0; }
458 int tdb_spinlock(TDB_CONTEXT *tdb, int list, int rw_type) { return -1; }
459 int tdb_spinunlock(TDB_CONTEXT *tdb, int list, int rw_type) { return -1; }
461 /* Non-spinlock version: remove spinlock pointer */
462 int tdb_clear_spinlocks(TDB_CONTEXT *tdb)
464 tdb_off off = (tdb_off)((char *)&tdb->header.rwlocks
465 - (char *)&tdb->header);
467 tdb->header.rwlocks = 0;
468 if (lseek(tdb->fd, off, SEEK_SET) != off
469 || write(tdb->fd, (void *)&tdb->header.rwlocks,
470 sizeof(tdb->header.rwlocks))
471 != sizeof(tdb->header.rwlocks))
472 return -1;
473 return 0;
475 #endif