2 Unix SMB/CIFS implementation.
4 trivial database library
6 Copyright (C) Anton Blanchard 2001
8 ** NOTE! The following LGPL license applies to the tdb
9 ** library. This does NOT imply that all of Samba is released
12 This library is free software; you can redistribute it and/or
13 modify it under the terms of the GNU Lesser General Public
14 License as published by the Free Software Foundation; either
15 version 2 of the License, or (at your option) any later version.
17 This library is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 Lesser General Public License for more details.
22 You should have received a copy of the GNU Lesser General Public
23 License along with this library; if not, write to the Free Software
24 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
54 #if defined(SPARC_SPINLOCKS)
56 static inline int __spin_trylock(spinlock_t
*lock
)
60 asm volatile("ldstub [%1], %0"
65 return (result
== 0) ? 0 : EBUSY
;
68 static inline void __spin_unlock(spinlock_t
*lock
)
70 asm volatile("":::"memory");
74 static inline void __spin_lock_init(spinlock_t
*lock
)
79 static inline int __spin_is_locked(spinlock_t
*lock
)
84 #elif defined(POWERPC_SPINLOCKS)
86 static inline int __spin_trylock(spinlock_t
*lock
)
103 return (result
== 1) ? 0 : EBUSY
;
106 static inline void __spin_unlock(spinlock_t
*lock
)
108 asm volatile("eieio":::"memory");
112 static inline void __spin_lock_init(spinlock_t
*lock
)
117 static inline int __spin_is_locked(spinlock_t
*lock
)
122 #elif defined(INTEL_SPINLOCKS)
124 static inline int __spin_trylock(spinlock_t
*lock
)
128 asm volatile("xchgl %0,%1"
129 : "=r" (oldval
), "=m" (*lock
)
133 return oldval
> 0 ? 0 : EBUSY
;
136 static inline void __spin_unlock(spinlock_t
*lock
)
138 asm volatile("":::"memory");
142 static inline void __spin_lock_init(spinlock_t
*lock
)
147 static inline int __spin_is_locked(spinlock_t
*lock
)
152 #elif defined(MIPS_SPINLOCKS) && defined(sgi) && (_COMPILER_VERSION >= 730)
154 /* Implement spinlocks on IRIX using the MIPSPro atomic fetch operations. See
155 * sync(3) for the details of the intrinsic operations.
157 * "sgi" and "_COMPILER_VERSION" are always defined by MIPSPro.
160 #if defined(STANDALONE)
162 /* MIPSPro 7.3 has "__inline" as an extension, but not "inline. */
163 #define inline __inline
165 #endif /* STANDALONE */
167 /* Returns 0 if the lock is acquired, EBUSY otherwise. */
168 static inline int __spin_trylock(spinlock_t
*lock
)
171 val
= __lock_test_and_set(lock
, 1);
172 return val
== 0 ? 0 : EBUSY
;
175 static inline void __spin_unlock(spinlock_t
*lock
)
177 __lock_release(lock
);
180 static inline void __spin_lock_init(spinlock_t
*lock
)
182 __lock_release(lock
);
185 /* Returns 1 if the lock is held, 0 otherwise. */
186 static inline int __spin_is_locked(spinlock_t
*lock
)
189 val
= __add_and_fetch(lock
, 0);
193 #elif defined(MIPS_SPINLOCKS)
195 static inline unsigned int load_linked(unsigned long addr
)
199 __asm__
__volatile__("ll\t%0,(%1)"
206 static inline unsigned int store_conditional(unsigned long addr
, unsigned int value
)
210 __asm__
__volatile__("sc\t%0,(%2)"
212 : "0" (value
), "r" (addr
));
216 static inline int __spin_trylock(spinlock_t
*lock
)
221 mw
= load_linked(lock
);
224 } while (!store_conditional(lock
, 1));
226 asm volatile("":::"memory");
231 static inline void __spin_unlock(spinlock_t
*lock
)
233 asm volatile("":::"memory");
237 static inline void __spin_lock_init(spinlock_t
*lock
)
242 static inline int __spin_is_locked(spinlock_t
*lock
)
248 #error Need to implement spinlock code in spinlock.c
255 static void yield_cpu(void)
259 #ifdef USE_SCHED_YIELD
262 /* Linux will busy loop for delays < 2ms on real time tasks */
264 tm
.tv_nsec
= 2000000L + 1;
265 nanosleep(&tm
, NULL
);
269 static int this_is_smp(void)
271 #if defined(HAVE_SYSCONF) && defined(SYSCONF_SC_NPROC_ONLN)
272 return (sysconf(_SC_NPROC_ONLN
) > 1) ? 1 : 0;
282 static int smp_machine
= 0;
284 static inline void __spin_lock(spinlock_t
*lock
)
288 while(__spin_trylock(lock
)) {
289 while(__spin_is_locked(lock
)) {
290 if (smp_machine
&& ntries
++ < MAX_BUSY_LOOPS
)
297 static void __read_lock(tdb_rwlock_t
*rwlock
)
302 __spin_lock(&rwlock
->lock
);
304 if (!(rwlock
->count
& RWLOCK_BIAS
)) {
306 __spin_unlock(&rwlock
->lock
);
310 __spin_unlock(&rwlock
->lock
);
312 while(rwlock
->count
& RWLOCK_BIAS
) {
313 if (smp_machine
&& ntries
++ < MAX_BUSY_LOOPS
)
320 static void __write_lock(tdb_rwlock_t
*rwlock
)
325 __spin_lock(&rwlock
->lock
);
327 if (rwlock
->count
== 0) {
328 rwlock
->count
|= RWLOCK_BIAS
;
329 __spin_unlock(&rwlock
->lock
);
333 __spin_unlock(&rwlock
->lock
);
335 while(rwlock
->count
!= 0) {
336 if (smp_machine
&& ntries
++ < MAX_BUSY_LOOPS
)
343 static void __write_unlock(tdb_rwlock_t
*rwlock
)
345 __spin_lock(&rwlock
->lock
);
348 if (!(rwlock
->count
& RWLOCK_BIAS
))
349 fprintf(stderr
, "bug: write_unlock\n");
352 rwlock
->count
&= ~RWLOCK_BIAS
;
353 __spin_unlock(&rwlock
->lock
);
356 static void __read_unlock(tdb_rwlock_t
*rwlock
)
358 __spin_lock(&rwlock
->lock
);
362 fprintf(stderr
, "bug: read_unlock\n");
364 if (rwlock
->count
& RWLOCK_BIAS
)
365 fprintf(stderr
, "bug: read_unlock\n");
369 __spin_unlock(&rwlock
->lock
);
374 /* lock a list in the database. list -1 is the alloc list */
375 int tdb_spinlock(TDB_CONTEXT
*tdb
, int list
, int rw_type
)
377 tdb_rwlock_t
*rwlocks
;
379 if (!tdb
->map_ptr
) return -1;
380 rwlocks
= (tdb_rwlock_t
*)((char *)tdb
->map_ptr
+ tdb
->header
.rwlocks
);
384 __read_lock(&rwlocks
[list
+1]);
388 __write_lock(&rwlocks
[list
+1]);
392 return TDB_ERRCODE(TDB_ERR_LOCK
, -1);
397 /* unlock the database. */
398 int tdb_spinunlock(TDB_CONTEXT
*tdb
, int list
, int rw_type
)
400 tdb_rwlock_t
*rwlocks
;
402 if (!tdb
->map_ptr
) return -1;
403 rwlocks
= (tdb_rwlock_t
*)((char *)tdb
->map_ptr
+ tdb
->header
.rwlocks
);
407 __read_unlock(&rwlocks
[list
+1]);
411 __write_unlock(&rwlocks
[list
+1]);
415 return TDB_ERRCODE(TDB_ERR_LOCK
, -1);
421 int tdb_create_rwlocks(int fd
, unsigned int hash_size
)
424 tdb_rwlock_t
*rwlocks
;
426 size
= TDB_SPINLOCK_SIZE(hash_size
);
427 rwlocks
= malloc(size
);
431 for(i
= 0; i
< hash_size
+1; i
++) {
432 __spin_lock_init(&rwlocks
[i
].lock
);
433 rwlocks
[i
].count
= 0;
436 /* Write it out (appending to end) */
437 if (write(fd
, rwlocks
, size
) != size
) {
441 smp_machine
= this_is_smp();
446 int tdb_clear_spinlocks(TDB_CONTEXT
*tdb
)
448 tdb_rwlock_t
*rwlocks
;
451 if (tdb
->header
.rwlocks
== 0) return 0;
452 if (!tdb
->map_ptr
) return -1;
454 /* We're mmapped here */
455 rwlocks
= (tdb_rwlock_t
*)((char *)tdb
->map_ptr
+ tdb
->header
.rwlocks
);
456 for(i
= 0; i
< tdb
->header
.hash_size
+1; i
++) {
457 __spin_lock_init(&rwlocks
[i
].lock
);
458 rwlocks
[i
].count
= 0;
463 int tdb_create_rwlocks(int fd
, unsigned int hash_size
) { return 0; }
464 int tdb_spinlock(TDB_CONTEXT
*tdb
, int list
, int rw_type
) { return -1; }
465 int tdb_spinunlock(TDB_CONTEXT
*tdb
, int list
, int rw_type
) { return -1; }
467 /* Non-spinlock version: remove spinlock pointer */
468 int tdb_clear_spinlocks(TDB_CONTEXT
*tdb
)
470 tdb_off off
= (tdb_off
)((char *)&tdb
->header
.rwlocks
471 - (char *)&tdb
->header
);
473 tdb
->header
.rwlocks
= 0;
474 if (lseek(tdb
->fd
, off
, SEEK_SET
) != off
475 || write(tdb
->fd
, (void *)&tdb
->header
.rwlocks
,
476 sizeof(tdb
->header
.rwlocks
))
477 != sizeof(tdb
->header
.rwlocks
))