1 /* libc-internal interface for mutex locks. NPTL version.
2 Copyright (C) 1996-2001, 2002, 2003 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Library General Public License as
7 published by the Free Software Foundation; either version 2 of the
8 License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Library General Public License for more details.
15 You should have received a copy of the GNU Library General Public
16 License along with the GNU C Library; see the file COPYING.LIB. If not,
17 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
18 Boston, MA 02111-1307, USA. */
20 #ifndef _BITS_LIBC_LOCK_H
21 #define _BITS_LIBC_LOCK_H 1
28 /* Fortunately Linux now has a mean to do locking which is realtime
29 safe without the aid of the thread library. We also need no fancy
30 options like error checking mutexes etc. We only need simple
31 locks, maybe recursive. This can be easily and cheaply implemented
32 using futexes. We will use them everywhere except in ld.so since
33 ld.so might be used on old kernels with a different libc.so. */
35 # include <lowlevellock.h>
37 # include <pthread-functions.h>
41 #if defined _LIBC || defined _IO_MTSAFE_IO
42 # if (defined NOT_IN_libc && !defined IS_IN_libpthread) || !defined _LIBC
43 typedef pthread_mutex_t __libc_lock_t
;
44 typedef struct { pthread_mutex_t mutex
; } __libc_lock_recursive_t
;
46 typedef int __libc_lock_t
;
47 typedef struct { int lock
; int cnt
; void *owner
; } __libc_lock_recursive_t
;
49 typedef struct { pthread_mutex_t mutex
; } __rtld_lock_recursive_t
;
51 typedef pthread_rwlock_t __libc_rwlock_t
;
53 typedef struct __libc_rwlock_opaque__ __libc_rwlock_t
;
56 typedef struct __libc_lock_opaque__ __libc_lock_t
;
57 typedef struct __libc_lock_recursive_opaque__ __libc_lock_recursive_t
;
58 typedef struct __libc_rwlock_opaque__ __libc_rwlock_t
;
61 /* Type for key to thread-specific data. */
62 typedef pthread_key_t __libc_key_t
;
64 /* Define a lock variable NAME with storage class CLASS. The lock must be
65 initialized with __libc_lock_init before it can be used (or define it
66 with __libc_lock_define_initialized, below). Use `extern' for CLASS to
67 declare a lock defined in another module. In public structure
68 definitions you must use a pointer to the lock structure (i.e., NAME
69 begins with a `*'), because its storage size will not be known outside
71 #define __libc_lock_define(CLASS,NAME) \
72 CLASS __libc_lock_t NAME;
73 #define __libc_rwlock_define(CLASS,NAME) \
74 CLASS __libc_rwlock_t NAME;
75 #define __libc_lock_define_recursive(CLASS,NAME) \
76 CLASS __libc_lock_recursive_t NAME;
77 #define __rtld_lock_define_recursive(CLASS,NAME) \
78 CLASS __rtld_lock_recursive_t NAME;
80 /* Define an initialized lock variable NAME with storage class CLASS.
82 For the C library we take a deeper look at the initializer. For
83 this implementation all fields are initialized to zero. Therefore
84 we don't initialize the variable which allows putting it into the
85 BSS section. (Except on PA-RISC and other odd architectures, where
86 initialized locks must be set to one due to the lack of normal
87 atomic operations.) */
89 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
90 # if LLL_LOCK_INITIALIZER == 0
91 # define __libc_lock_define_initialized(CLASS,NAME) \
92 CLASS __libc_lock_t NAME;
94 # define __libc_lock_define_initialized(CLASS,NAME) \
95 CLASS __libc_lock_t NAME = LLL_LOCK_INITIALIZER;
98 # if __LT_SPINLOCK_INIT == 0
99 # define __libc_lock_define_initialized(CLASS,NAME) \
100 CLASS __libc_lock_t NAME;
102 # define __libc_lock_define_initialized(CLASS,NAME) \
103 CLASS __libc_lock_t NAME = PTHREAD_MUTEX_INITIALIZER;
107 #define __libc_rwlock_define_initialized(CLASS,NAME) \
108 CLASS __libc_rwlock_t NAME = PTHREAD_RWLOCK_INITIALIZER;
110 /* Define an initialized recursive lock variable NAME with storage
112 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
113 # if LLL_LOCK_INITIALIZER == 0
114 # define __libc_lock_define_initialized_recursive(CLASS,NAME) \
115 CLASS __libc_lock_recursive_t NAME;
117 # define __libc_lock_define_initialized_recursive(CLASS,NAME) \
118 CLASS __libc_lock_recursive_t NAME = _LIBC_LOCK_RECURSIVE_INITIALIZER;
120 # define _LIBC_LOCK_RECURSIVE_INITIALIZER \
121 { LLL_LOCK_INITIALIZER, 0, NULL }
123 # define __libc_lock_define_initialized_recursive(CLASS,NAME) \
124 CLASS __libc_lock_recursive_t NAME = _LIBC_LOCK_RECURSIVE_INITIALIZER;
125 # define _LIBC_LOCK_RECURSIVE_INITIALIZER \
126 {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP}
129 #define __rtld_lock_define_initialized_recursive(CLASS,NAME) \
130 CLASS __rtld_lock_recursive_t NAME = _RTLD_LOCK_RECURSIVE_INITIALIZER;
131 #define _RTLD_LOCK_RECURSIVE_INITIALIZER \
132 {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP}
134 /* If we check for a weakly referenced symbol and then perform a
135 normal jump to it te code generated for some platforms in case of
136 PIC is unnecessarily slow. What would happen is that the function
137 is first referenced as data and then it is called indirectly
138 through the PLT. We can make this a direct jump. */
140 # define __libc_maybe_call(FUNC, ARGS, ELSE) \
141 (__extension__ ({ __typeof (FUNC) *_fn = (FUNC); \
142 _fn != NULL ? (*_fn) ARGS : ELSE; }))
144 # define __libc_maybe_call(FUNC, ARGS, ELSE) \
145 (FUNC != NULL ? FUNC ARGS : ELSE)
148 /* Call thread functions through the function pointer table. */
149 #if defined SHARED && !defined NOT_IN_libc
150 # define PTF(NAME) __libc_pthread_functions.ptr_##NAME
151 # define __libc_ptf_call(FUNC, ARGS, ELSE) \
152 (PTF(FUNC) != NULL ? PTF(FUNC) ARGS : ELSE)
154 # define PTF(NAME) NAME
155 # define __libc_ptf_call(FUNC, ARGS, ELSE) \
156 __libc_maybe_call (FUNC, ARGS, ELSE)
160 /* Initialize the named lock variable, leaving it in a consistent, unlocked
162 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
163 # define __libc_lock_init(NAME) ((NAME) = LLL_LOCK_INITIALIZER, 0)
165 # define __libc_lock_init(NAME) \
166 __libc_maybe_call (__pthread_mutex_init, (&(NAME), NULL), 0)
168 #define __libc_rwlock_init(NAME) \
169 __libc_maybe_call (__pthread_rwlock_init, (&(NAME), NULL), 0)
171 /* Same as last but this time we initialize a recursive mutex. */
172 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
173 # define __libc_lock_init_recursive(NAME) \
174 ((NAME) = (__libc_lock_recursive_t) _LIBC_LOCK_RECURSIVE_INITIALIZER, 0)
176 # define __libc_lock_init_recursive(NAME) \
178 if (__pthread_mutex_init != NULL) \
180 pthread_mutexattr_t __attr; \
181 __pthread_mutexattr_init (&__attr); \
182 __pthread_mutexattr_settype (&__attr, PTHREAD_MUTEX_RECURSIVE_NP); \
183 __pthread_mutex_init (&(NAME).mutex, &__attr); \
184 __pthread_mutexattr_destroy (&__attr); \
189 #define __rtld_lock_init_recursive(NAME) \
191 if (__pthread_mutex_init != NULL) \
193 pthread_mutexattr_t __attr; \
194 __pthread_mutexattr_init (&__attr); \
195 __pthread_mutexattr_settype (&__attr, PTHREAD_MUTEX_RECURSIVE_NP); \
196 __pthread_mutex_init (&(NAME).mutex, &__attr); \
197 __pthread_mutexattr_destroy (&__attr); \
201 /* Finalize the named lock variable, which must be locked. It cannot be
202 used again until __libc_lock_init is called again on it. This must be
203 called on a lock variable before the containing storage is reused. */
204 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
205 # define __libc_lock_fini(NAME) ((void) 0)
207 # define __libc_lock_fini(NAME) \
208 __libc_maybe_call (__pthread_mutex_destroy, (&(NAME)), 0)
210 #define __libc_rwlock_fini(NAME) \
211 __libc_maybe_call (__pthread_rwlock_destroy, (&(NAME)), 0)
213 /* Finalize recursive named lock. */
214 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
215 # define __libc_lock_fini_recursive(NAME) ((void) 0)
217 # define __libc_lock_fini_recursive(NAME) \
218 __libc_maybe_call (__pthread_mutex_destroy, (&(NAME)), 0)
221 /* Lock the named lock variable. */
222 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
223 # define __libc_lock_lock(NAME) \
224 ({ lll_lock (NAME); 0; })
226 # define __libc_lock_lock(NAME) \
227 __libc_maybe_call (__pthread_mutex_lock, (&(NAME)), 0)
229 #define __libc_rwlock_rdlock(NAME) \
230 __libc_ptf_call (__pthread_rwlock_rdlock, (&(NAME)), 0)
231 #define __libc_rwlock_wrlock(NAME) \
232 __libc_ptf_call (__pthread_rwlock_wrlock, (&(NAME)), 0)
234 /* Lock the recursive named lock variable. */
235 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
236 # define __libc_lock_lock_recursive(NAME) \
238 void *self = THREAD_SELF; \
239 if ((NAME).owner != self) \
241 lll_lock ((NAME).lock); \
242 (NAME).owner = self; \
247 # define __libc_lock_lock_recursive(NAME) \
248 __libc_maybe_call (__pthread_mutex_lock, (&(NAME).mutex), 0)
251 /* Try to lock the named lock variable. */
252 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
253 # define __libc_lock_trylock(NAME) \
256 # define __libc_lock_trylock(NAME) \
257 __libc_maybe_call (__pthread_mutex_trylock, (&(NAME)), 0)
259 #define __libc_rwlock_tryrdlock(NAME) \
260 __libc_maybe_call (__pthread_rwlock_tryrdlock, (&(NAME)), 0)
261 #define __libc_rwlock_trywrlock(NAME) \
262 __libc_maybe_call (__pthread_rwlock_trywrlock, (&(NAME)), 0)
264 /* Try to lock the recursive named lock variable. */
265 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
266 # define __libc_lock_trylock_recursive(NAME) \
269 void *self = THREAD_SELF; \
270 if ((NAME).owner != self) \
272 if (lll_trylock ((NAME).lock) == 0) \
274 (NAME).owner = self; \
285 # define __libc_lock_trylock_recursive(NAME) \
286 __libc_maybe_call (__pthread_mutex_trylock, (&(NAME)), 0)
289 #define __rtld_lock_trylock_recursive(NAME) \
290 __libc_maybe_call (__pthread_mutex_trylock, (&(NAME).mutex), 0)
292 /* Unlock the named lock variable. */
293 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
294 # define __libc_lock_unlock(NAME) \
297 # define __libc_lock_unlock(NAME) \
298 __libc_maybe_call (__pthread_mutex_unlock, (&(NAME)), 0)
300 #define __libc_rwlock_unlock(NAME) \
301 __libc_ptf_call (__pthread_rwlock_unlock, (&(NAME)), 0)
303 /* Unlock the recursive named lock variable. */
304 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
305 /* We do no error checking here. */
306 # define __libc_lock_unlock_recursive(NAME) \
308 if (--(NAME).cnt == 0) \
310 (NAME).owner = NULL; \
311 lll_unlock ((NAME).lock); \
315 # define __libc_lock_unlock_recursive(NAME) \
316 __libc_maybe_call (__pthread_mutex_unlock, (&(NAME)), 0)
319 #if defined _LIBC && defined SHARED
320 # define __rtld_lock_default_lock_recursive(lock) \
321 ++((pthread_mutex_t *)(lock))->__data.__count;
323 # define __rtld_lock_default_unlock_recursive(lock) \
324 --((pthread_mutex_t *)(lock))->__data.__count;
326 # define __rtld_lock_lock_recursive(NAME) \
327 GL(dl_rtld_lock_recursive) (&(NAME).mutex)
329 # define __rtld_lock_unlock_recursive(NAME) \
330 GL(dl_rtld_unlock_recursive) (&(NAME).mutex)
332 # define __rtld_lock_lock_recursive(NAME) \
333 __libc_maybe_call (__pthread_mutex_lock, (&(NAME).mutex), 0)
335 # define __rtld_lock_unlock_recursive(NAME) \
336 __libc_maybe_call (__pthread_mutex_unlock, (&(NAME).mutex), 0)
339 /* Define once control variable. */
340 #if PTHREAD_ONCE_INIT == 0
341 /* Special case for static variables where we can avoid the initialization
343 # define __libc_once_define(CLASS, NAME) \
344 CLASS pthread_once_t NAME
346 # define __libc_once_define(CLASS, NAME) \
347 CLASS pthread_once_t NAME = PTHREAD_ONCE_INIT
350 /* Call handler iff the first call. */
351 #define __libc_once(ONCE_CONTROL, INIT_FUNCTION) \
353 if (PTF(__pthread_once) != NULL) \
354 PTF(__pthread_once) (&(ONCE_CONTROL), INIT_FUNCTION); \
355 else if ((ONCE_CONTROL) == PTHREAD_ONCE_INIT) { \
357 (ONCE_CONTROL) |= 2; \
362 /* Note that for I/O cleanup handling we are using the old-style
363 cancel handling. It does not have to be integrated with C++ snce
364 no C++ code is called in the middle. The old-style handling is
365 faster and the support is not going away. */
366 extern void _pthread_cleanup_push (struct _pthread_cleanup_buffer
*buffer
,
367 void (*routine
) (void *), void *arg
);
368 extern void _pthread_cleanup_pop (struct _pthread_cleanup_buffer
*buffer
,
370 extern void _pthread_cleanup_push_defer (struct _pthread_cleanup_buffer
*buffer
,
371 void (*routine
) (void *), void *arg
);
372 extern void _pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer
*buffer
,
375 /* Start critical region with cleanup. */
376 #define __libc_cleanup_region_start(DOIT, FCT, ARG) \
377 { struct _pthread_cleanup_buffer _buffer; \
380 _avail = PTF(_pthread_cleanup_push_defer) != NULL; \
382 PTF(_pthread_cleanup_push_defer) (&_buffer, FCT, ARG); \
384 _buffer.__routine = (FCT); \
385 _buffer.__arg = (ARG); \
391 /* End critical region with cleanup. */
392 #define __libc_cleanup_region_end(DOIT) \
394 PTF(_pthread_cleanup_pop_restore) (&_buffer, DOIT); \
396 _buffer.__routine (_buffer.__arg); \
399 /* Sometimes we have to exit the block in the middle. */
400 #define __libc_cleanup_end(DOIT) \
402 PTF(_pthread_cleanup_pop_restore) (&_buffer, DOIT); \
404 _buffer.__routine (_buffer.__arg)
407 /* Normal cleanup handling, based on C cleanup attribute. */
409 __libc_cleanup_routine (struct __pthread_cleanup_frame
*f
)
412 f
->__cancel_routine (f
->__cancel_arg
);
415 #define __libc_cleanup_push(fct, arg) \
417 struct __pthread_cleanup_frame __clframe \
418 __attribute__ ((__cleanup__ (__libc_cleanup_routine))) \
419 = { .__cancel_routine = (fct), .__cancel_arg = (arg), \
422 #define __libc_cleanup_pop(execute) \
423 __clframe.__do_it = (execute); \
427 /* Create thread-specific key. */
428 #define __libc_key_create(KEY, DESTRUCTOR) \
429 __libc_ptf_call (__pthread_key_create, (KEY, DESTRUCTOR), 1)
431 /* Get thread-specific data. */
432 #define __libc_getspecific(KEY) \
433 __libc_ptf_call (__pthread_getspecific, (KEY), NULL)
435 /* Set thread-specific data. */
436 #define __libc_setspecific(KEY, VALUE) \
437 __libc_ptf_call (__pthread_setspecific, (KEY, VALUE), 0)
440 /* Register handlers to execute before and after `fork'. Note that the
441 last parameter is NULL. The handlers registered by the libc are
442 never removed so this is OK. */
443 #define __libc_atfork(PREPARE, PARENT, CHILD) \
444 __register_atfork (PREPARE, PARENT, CHILD, NULL)
445 extern int __register_atfork (void (*__prepare
) (void),
446 void (*__parent
) (void),
447 void (*__child
) (void),
450 /* Functions that are used by this file and are internal to the GNU C
453 extern int __pthread_mutex_init (pthread_mutex_t
*__mutex
,
454 __const pthread_mutexattr_t
*__mutex_attr
);
456 extern int __pthread_mutex_destroy (pthread_mutex_t
*__mutex
);
458 extern int __pthread_mutex_trylock (pthread_mutex_t
*__mutex
);
460 extern int __pthread_mutex_lock (pthread_mutex_t
*__mutex
);
462 extern int __pthread_mutex_unlock (pthread_mutex_t
*__mutex
);
464 extern int __pthread_mutexattr_init (pthread_mutexattr_t
*__attr
);
466 extern int __pthread_mutexattr_destroy (pthread_mutexattr_t
*__attr
);
468 extern int __pthread_mutexattr_settype (pthread_mutexattr_t
*__attr
,
472 extern int __pthread_rwlock_init (pthread_rwlock_t
*__rwlock
,
473 __const pthread_rwlockattr_t
*__attr
);
475 extern int __pthread_rwlock_destroy (pthread_rwlock_t
*__rwlock
);
477 extern int __pthread_rwlock_rdlock (pthread_rwlock_t
*__rwlock
);
479 extern int __pthread_rwlock_tryrdlock (pthread_rwlock_t
*__rwlock
);
481 extern int __pthread_rwlock_wrlock (pthread_rwlock_t
*__rwlock
);
483 extern int __pthread_rwlock_trywrlock (pthread_rwlock_t
*__rwlock
);
485 extern int __pthread_rwlock_unlock (pthread_rwlock_t
*__rwlock
);
488 extern int __pthread_key_create (pthread_key_t
*__key
,
489 void (*__destr_function
) (void *));
491 extern int __pthread_setspecific (pthread_key_t __key
,
492 __const
void *__pointer
);
494 extern void *__pthread_getspecific (pthread_key_t __key
);
496 extern int __pthread_once (pthread_once_t
*__once_control
,
497 void (*__init_routine
) (void));
499 extern int __pthread_atfork (void (*__prepare
) (void),
500 void (*__parent
) (void),
501 void (*__child
) (void));
505 /* Make the pthread functions weak so that we can elide them from
506 single-threaded processes. */
507 #ifndef __NO_WEAK_PTHREAD_ALIASES
512 # define BP_SYM (sym) sym
514 weak_extern (BP_SYM (__pthread_mutex_init
))
515 weak_extern (BP_SYM (__pthread_mutex_destroy
))
516 weak_extern (BP_SYM (__pthread_mutex_lock
))
517 weak_extern (BP_SYM (__pthread_mutex_trylock
))
518 weak_extern (BP_SYM (__pthread_mutex_unlock
))
519 weak_extern (BP_SYM (__pthread_mutexattr_init
))
520 weak_extern (BP_SYM (__pthread_mutexattr_destroy
))
521 weak_extern (BP_SYM (__pthread_mutexattr_settype
))
522 weak_extern (BP_SYM (__pthread_rwlock_init
))
523 weak_extern (BP_SYM (__pthread_rwlock_destroy
))
524 weak_extern (BP_SYM (__pthread_rwlock_rdlock
))
525 weak_extern (BP_SYM (__pthread_rwlock_tryrdlock
))
526 weak_extern (BP_SYM (__pthread_rwlock_wrlock
))
527 weak_extern (BP_SYM (__pthread_rwlock_trywrlock
))
528 weak_extern (BP_SYM (__pthread_rwlock_unlock
))
529 weak_extern (BP_SYM (__pthread_key_create
))
530 weak_extern (BP_SYM (__pthread_setspecific
))
531 weak_extern (BP_SYM (__pthread_getspecific
))
532 weak_extern (BP_SYM (__pthread_once
))
533 weak_extern (__pthread_initialize
)
534 weak_extern (__pthread_atfork
)
535 weak_extern (BP_SYM (_pthread_cleanup_push_defer
))
536 weak_extern (BP_SYM (_pthread_cleanup_pop_restore
))
537 weak_extern (BP_SYM (pthread_setcancelstate
))
539 # pragma weak __pthread_mutex_init
540 # pragma weak __pthread_mutex_destroy
541 # pragma weak __pthread_mutex_lock
542 # pragma weak __pthread_mutex_trylock
543 # pragma weak __pthread_mutex_unlock
544 # pragma weak __pthread_mutexattr_init
545 # pragma weak __pthread_mutexattr_destroy
546 # pragma weak __pthread_mutexattr_settype
547 # pragma weak __pthread_rwlock_destroy
548 # pragma weak __pthread_rwlock_rdlock
549 # pragma weak __pthread_rwlock_tryrdlock
550 # pragma weak __pthread_rwlock_wrlock
551 # pragma weak __pthread_rwlock_trywrlock
552 # pragma weak __pthread_rwlock_unlock
553 # pragma weak __pthread_key_create
554 # pragma weak __pthread_setspecific
555 # pragma weak __pthread_getspecific
556 # pragma weak __pthread_once
557 # pragma weak __pthread_initialize
558 # pragma weak __pthread_atfork
559 # pragma weak _pthread_cleanup_push_defer
560 # pragma weak _pthread_cleanup_pop_restore
561 # pragma weak pthread_setcancelstate
565 #endif /* bits/libc-lock.h */