28 #if defined(SPARC_SPINLOCKS)
30 static inline int __spin_trylock(spinlock_t
*lock
)
34 asm volatile("ldstub [%1], %0"
39 return (result
== 0) ? 0 : EBUSY
;
42 static inline void __spin_unlock(spinlock_t
*lock
)
47 static inline void __spin_lock_init(spinlock_t
*lock
)
52 static inline int __spin_is_locked(spinlock_t
*lock
)
57 #elif defined(POWERPC_SPINLOCKS)
59 static inline int __spin_trylock(spinlock_t
*lock
)
63 __asm__
__volatile__ (
76 return (result
== 0) ? 0 : EBUSY
;
79 static inline void __spin_unlock(spinlock_t
*lock
)
85 static inline void __spin_lock_init(spinlock_t
*lock
)
90 static inline int __spin_is_locked(spinlock_t
*lock
)
95 #elif defined(INTEL_SPINLOCKS)
97 static inline int __spin_trylock(spinlock_t
*lock
)
101 asm volatile("xchgl %0,%1"
102 : "=r" (oldval
), "=m" (*lock
)
104 return oldval
> 0 ? 0 : EBUSY
;
107 static inline void __spin_unlock(spinlock_t
*lock
)
112 static inline void __spin_lock_init(spinlock_t
*lock
)
117 static inline int __spin_is_locked(spinlock_t
*lock
)
122 #elif defined(MIPS_SPINLOCKS)
124 static inline unsigned int load_linked(unsigned long addr
)
128 __asm__
__volatile__("ll\t%0,(%1)"
135 static inline unsigned int store_conditional(unsigned long addr
, unsigned int value
)
139 __asm__
__volatile__("sc\t%0,(%2)"
141 : "0" (value
), "r" (addr
));
145 static inline int __spin_trylock(spinlock_t
*lock
)
150 mw
= load_linked(lock
);
153 } while (!store_conditional(lock
, 1));
158 static inline void __spin_unlock(spinlock_t
*lock
)
163 static inline void __spin_lock_init(spinlock_t
*lock
)
168 static inline int __spin_is_locked(spinlock_t
*lock
)
174 #error Need to implement spinlock code in spinlock.c
181 static void yield_cpu(void)
185 #ifdef USE_SCHED_YIELD
188 /* Linux will busy loop for delays < 2ms on real time tasks */
190 tm
.tv_nsec
= 2000000L + 1;
191 nanosleep(&tm
, NULL
);
195 static int this_is_smp(void)
204 static int smp_machine
= 0;
206 static inline void __spin_lock(spinlock_t
*lock
)
210 while(__spin_trylock(lock
)) {
211 while(__spin_is_locked(lock
)) {
212 if (smp_machine
&& ntries
++ < MAX_BUSY_LOOPS
)
219 static void __read_lock(rwlock_t
*rwlock
)
224 __spin_lock(&rwlock
->lock
);
226 if (!(rwlock
->count
& RWLOCK_BIAS
)) {
228 __spin_unlock(&rwlock
->lock
);
232 __spin_unlock(&rwlock
->lock
);
234 while(rwlock
->count
& RWLOCK_BIAS
) {
235 if (smp_machine
&& ntries
++ < MAX_BUSY_LOOPS
)
242 static void __write_lock(rwlock_t
*rwlock
)
247 __spin_lock(&rwlock
->lock
);
249 if (rwlock
->count
== 0) {
250 rwlock
->count
|= RWLOCK_BIAS
;
251 __spin_unlock(&rwlock
->lock
);
255 __spin_unlock(&rwlock
->lock
);
257 while(rwlock
->count
!= 0) {
258 if (smp_machine
&& ntries
++ < MAX_BUSY_LOOPS
)
265 static void __write_unlock(rwlock_t
*rwlock
)
267 __spin_lock(&rwlock
->lock
);
270 if (!(rwlock
->count
& RWLOCK_BIAS
))
271 fprintf(stderr
, "bug: write_unlock\n");
274 rwlock
->count
&= ~RWLOCK_BIAS
;
275 __spin_unlock(&rwlock
->lock
);
278 static void __read_unlock(rwlock_t
*rwlock
)
280 __spin_lock(&rwlock
->lock
);
284 fprintf(stderr
, "bug: read_unlock\n");
286 if (rwlock
->count
& RWLOCK_BIAS
)
287 fprintf(stderr
, "bug: read_unlock\n");
291 __spin_unlock(&rwlock
->lock
);
296 /* lock a list in the database. list -1 is the alloc list */
297 int tdb_spinlock(TDB_CONTEXT
*tdb
, int list
, int rw_type
)
301 if (!tdb
->map_ptr
) return -1;
302 rwlocks
= (rwlock_t
*)((char *)tdb
->map_ptr
+ tdb
->header
.rwlocks
);
306 __read_lock(&rwlocks
[list
+1]);
310 __write_lock(&rwlocks
[list
+1]);
314 return TDB_ERRCODE(TDB_ERR_LOCK
, -1);
319 /* unlock the database. */
320 int tdb_spinunlock(TDB_CONTEXT
*tdb
, int list
, int rw_type
)
324 if (!tdb
->map_ptr
) return -1;
325 rwlocks
= (rwlock_t
*)((char *)tdb
->map_ptr
+ tdb
->header
.rwlocks
);
329 __read_unlock(&rwlocks
[list
+1]);
333 __write_unlock(&rwlocks
[list
+1]);
337 return TDB_ERRCODE(TDB_ERR_LOCK
, -1);
343 int tdb_create_rwlocks(int fd
, unsigned int hash_size
)
348 size
= (hash_size
+ 1) * sizeof(rwlock_t
);
349 rwlocks
= malloc(size
);
353 for(i
= 0; i
< hash_size
+1; i
++) {
354 __spin_lock_init(&rwlocks
[i
].lock
);
355 rwlocks
[i
].count
= 0;
358 /* Write it out (appending to end) */
359 if (write(fd
, rwlocks
, size
) != size
) {
363 smp_machine
= this_is_smp();
368 int tdb_clear_spinlocks(TDB_CONTEXT
*tdb
)
373 if (tdb
->header
.rwlocks
== 0) return 0;
374 if (!tdb
->map_ptr
) return -1;
376 /* We're mmapped here */
377 rwlocks
= (rwlock_t
*)((char *)tdb
->map_ptr
+ tdb
->header
.rwlocks
);
378 for(i
= 0; i
< tdb
->header
.hash_size
+1; i
++) {
379 __spin_lock_init(&rwlocks
[i
].lock
);
380 rwlocks
[i
].count
= 0;
385 int tdb_create_rwlocks(int fd
, unsigned int hash_size
) { return 0; }
386 int tdb_spinlock(TDB_CONTEXT
*tdb
, int list
, int rw_type
) { return -1; }
387 int tdb_spinunlock(TDB_CONTEXT
*tdb
, int list
, int rw_type
) { return -1; }
389 /* Non-spinlock version: remove spinlock pointer */
390 int tdb_clear_spinlocks(TDB_CONTEXT
*tdb
)
392 tdb_off off
= (tdb_off
)((char *)&tdb
->header
.rwlocks
393 - (char *)&tdb
->header
);
395 tdb
->header
.rwlocks
= 0;
396 if (lseek(tdb
->fd
, off
, SEEK_SET
) != off
397 || write(tdb
->fd
, (void *)&tdb
->header
.rwlocks
,
398 sizeof(tdb
->header
.rwlocks
))
399 != sizeof(tdb
->header
.rwlocks
))