2 Unix SMB/CIFS implementation.
3 Samba database functions
4 Copyright (C) Anton Blanchard 2001
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
48 #if defined(SPARC_SPINLOCKS)
50 static inline int __spin_trylock(spinlock_t
*lock
)
54 asm volatile("ldstub [%1], %0"
59 return (result
== 0) ? 0 : EBUSY
;
62 static inline void __spin_unlock(spinlock_t
*lock
)
64 asm volatile("":::"memory");
68 static inline void __spin_lock_init(spinlock_t
*lock
)
73 static inline int __spin_is_locked(spinlock_t
*lock
)
78 #elif defined(POWERPC_SPINLOCKS)
80 static inline int __spin_trylock(spinlock_t
*lock
)
97 return (result
== 1) ? 0 : EBUSY
;
100 static inline void __spin_unlock(spinlock_t
*lock
)
102 asm volatile("eieio":::"memory");
106 static inline void __spin_lock_init(spinlock_t
*lock
)
111 static inline int __spin_is_locked(spinlock_t
*lock
)
116 #elif defined(INTEL_SPINLOCKS)
118 static inline int __spin_trylock(spinlock_t
*lock
)
122 asm volatile("xchgl %0,%1"
123 : "=r" (oldval
), "=m" (*lock
)
127 return oldval
> 0 ? 0 : EBUSY
;
130 static inline void __spin_unlock(spinlock_t
*lock
)
132 asm volatile("":::"memory");
136 static inline void __spin_lock_init(spinlock_t
*lock
)
141 static inline int __spin_is_locked(spinlock_t
*lock
)
146 #elif defined(MIPS_SPINLOCKS) && defined(sgi) && (_COMPILER_VERSION >= 730)
148 /* Implement spinlocks on IRIX using the MIPSPro atomic fetch operations. See
149 * sync(3) for the details of the intrinsic operations.
151 * "sgi" and "_COMPILER_VERSION" are always defined by MIPSPro.
154 #if defined(STANDALONE)
156 /* MIPSPro 7.3 has "__inline" as an extension, but not "inline. */
157 #define inline __inline
159 #endif /* STANDALONE */
161 /* Returns 0 if the lock is acquired, EBUSY otherwise. */
162 static inline int __spin_trylock(spinlock_t
*lock
)
165 val
= __lock_test_and_set(lock
, 1);
166 return val
== 0 ? 0 : EBUSY
;
169 static inline void __spin_unlock(spinlock_t
*lock
)
171 __lock_release(lock
);
174 static inline void __spin_lock_init(spinlock_t
*lock
)
176 __lock_release(lock
);
179 /* Returns 1 if the lock is held, 0 otherwise. */
180 static inline int __spin_is_locked(spinlock_t
*lock
)
183 val
= __add_and_fetch(lock
, 0);
187 #elif defined(MIPS_SPINLOCKS)
189 static inline unsigned int load_linked(unsigned long addr
)
193 __asm__
__volatile__("ll\t%0,(%1)"
200 static inline unsigned int store_conditional(unsigned long addr
, unsigned int value
)
204 __asm__
__volatile__("sc\t%0,(%2)"
206 : "0" (value
), "r" (addr
));
210 static inline int __spin_trylock(spinlock_t
*lock
)
215 mw
= load_linked(lock
);
218 } while (!store_conditional(lock
, 1));
220 asm volatile("":::"memory");
225 static inline void __spin_unlock(spinlock_t
*lock
)
227 asm volatile("":::"memory");
231 static inline void __spin_lock_init(spinlock_t
*lock
)
236 static inline int __spin_is_locked(spinlock_t
*lock
)
242 #error Need to implement spinlock code in spinlock.c
249 static void yield_cpu(void)
253 #ifdef USE_SCHED_YIELD
256 /* Linux will busy loop for delays < 2ms on real time tasks */
258 tm
.tv_nsec
= 2000000L + 1;
259 nanosleep(&tm
, NULL
);
263 static int this_is_smp(void)
265 #if defined(HAVE_SYSCONF) && defined(SYSCONF_SC_NPROC_ONLN)
266 return (sysconf(_SC_NPROC_ONLN
) > 1) ? 1 : 0;
276 static int smp_machine
= 0;
278 static inline void __spin_lock(spinlock_t
*lock
)
282 while(__spin_trylock(lock
)) {
283 while(__spin_is_locked(lock
)) {
284 if (smp_machine
&& ntries
++ < MAX_BUSY_LOOPS
)
291 static void __read_lock(tdb_rwlock_t
*rwlock
)
296 __spin_lock(&rwlock
->lock
);
298 if (!(rwlock
->count
& RWLOCK_BIAS
)) {
300 __spin_unlock(&rwlock
->lock
);
304 __spin_unlock(&rwlock
->lock
);
306 while(rwlock
->count
& RWLOCK_BIAS
) {
307 if (smp_machine
&& ntries
++ < MAX_BUSY_LOOPS
)
314 static void __write_lock(tdb_rwlock_t
*rwlock
)
319 __spin_lock(&rwlock
->lock
);
321 if (rwlock
->count
== 0) {
322 rwlock
->count
|= RWLOCK_BIAS
;
323 __spin_unlock(&rwlock
->lock
);
327 __spin_unlock(&rwlock
->lock
);
329 while(rwlock
->count
!= 0) {
330 if (smp_machine
&& ntries
++ < MAX_BUSY_LOOPS
)
337 static void __write_unlock(tdb_rwlock_t
*rwlock
)
339 __spin_lock(&rwlock
->lock
);
342 if (!(rwlock
->count
& RWLOCK_BIAS
))
343 fprintf(stderr
, "bug: write_unlock\n");
346 rwlock
->count
&= ~RWLOCK_BIAS
;
347 __spin_unlock(&rwlock
->lock
);
350 static void __read_unlock(tdb_rwlock_t
*rwlock
)
352 __spin_lock(&rwlock
->lock
);
356 fprintf(stderr
, "bug: read_unlock\n");
358 if (rwlock
->count
& RWLOCK_BIAS
)
359 fprintf(stderr
, "bug: read_unlock\n");
363 __spin_unlock(&rwlock
->lock
);
368 /* lock a list in the database. list -1 is the alloc list */
369 int tdb_spinlock(TDB_CONTEXT
*tdb
, int list
, int rw_type
)
371 tdb_rwlock_t
*rwlocks
;
373 if (!tdb
->map_ptr
) return -1;
374 rwlocks
= (tdb_rwlock_t
*)((char *)tdb
->map_ptr
+ tdb
->header
.rwlocks
);
378 __read_lock(&rwlocks
[list
+1]);
382 __write_lock(&rwlocks
[list
+1]);
386 return TDB_ERRCODE(TDB_ERR_LOCK
, -1);
391 /* unlock the database. */
392 int tdb_spinunlock(TDB_CONTEXT
*tdb
, int list
, int rw_type
)
394 tdb_rwlock_t
*rwlocks
;
396 if (!tdb
->map_ptr
) return -1;
397 rwlocks
= (tdb_rwlock_t
*)((char *)tdb
->map_ptr
+ tdb
->header
.rwlocks
);
401 __read_unlock(&rwlocks
[list
+1]);
405 __write_unlock(&rwlocks
[list
+1]);
409 return TDB_ERRCODE(TDB_ERR_LOCK
, -1);
415 int tdb_create_rwlocks(int fd
, unsigned int hash_size
)
418 tdb_rwlock_t
*rwlocks
;
420 size
= TDB_SPINLOCK_SIZE(hash_size
);
421 rwlocks
= malloc(size
);
425 for(i
= 0; i
< hash_size
+1; i
++) {
426 __spin_lock_init(&rwlocks
[i
].lock
);
427 rwlocks
[i
].count
= 0;
430 /* Write it out (appending to end) */
431 if (write(fd
, rwlocks
, size
) != size
) {
435 smp_machine
= this_is_smp();
440 int tdb_clear_spinlocks(TDB_CONTEXT
*tdb
)
442 tdb_rwlock_t
*rwlocks
;
445 if (tdb
->header
.rwlocks
== 0) return 0;
446 if (!tdb
->map_ptr
) return -1;
448 /* We're mmapped here */
449 rwlocks
= (tdb_rwlock_t
*)((char *)tdb
->map_ptr
+ tdb
->header
.rwlocks
);
450 for(i
= 0; i
< tdb
->header
.hash_size
+1; i
++) {
451 __spin_lock_init(&rwlocks
[i
].lock
);
452 rwlocks
[i
].count
= 0;
457 int tdb_create_rwlocks(int fd
, unsigned int hash_size
) { return 0; }
458 int tdb_spinlock(TDB_CONTEXT
*tdb
, int list
, int rw_type
) { return -1; }
459 int tdb_spinunlock(TDB_CONTEXT
*tdb
, int list
, int rw_type
) { return -1; }
461 /* Non-spinlock version: remove spinlock pointer */
462 int tdb_clear_spinlocks(TDB_CONTEXT
*tdb
)
464 tdb_off off
= (tdb_off
)((char *)&tdb
->header
.rwlocks
465 - (char *)&tdb
->header
);
467 tdb
->header
.rwlocks
= 0;
468 if (lseek(tdb
->fd
, off
, SEEK_SET
) != off
469 || write(tdb
->fd
, (void *)&tdb
->header
.rwlocks
,
470 sizeof(tdb
->header
.rwlocks
))
471 != sizeof(tdb
->header
.rwlocks
))