Updated to fedora-glibc-20070317T2130
[glibc.git] / nptl / sysdeps / pthread / bits / libc-lock.h
blob0c8c0ada8852abe266e59a9fa0f075e6339f3f1d
1 /* libc-internal interface for mutex locks. NPTL version.
2 Copyright (C) 1996-2003, 2005, 2007 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public License as
7 published by the Free Software Foundation; either version 2.1 of the
8 License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; see the file COPYING.LIB. If not,
17 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
18 Boston, MA 02111-1307, USA. */
20 #ifndef _BITS_LIBC_LOCK_H
21 #define _BITS_LIBC_LOCK_H 1
23 #include <pthread.h>
24 #define __need_NULL
25 #include <stddef.h>
28 /* Fortunately Linux now has a mean to do locking which is realtime
29 safe without the aid of the thread library. We also need no fancy
30 options like error checking mutexes etc. We only need simple
31 locks, maybe recursive. This can be easily and cheaply implemented
32 using futexes. We will use them everywhere except in ld.so since
33 ld.so might be used on old kernels with a different libc.so. */
34 #ifdef _LIBC
35 # include <lowlevellock.h>
36 # include <tls.h>
37 # include <pthread-functions.h>
38 #endif
40 /* Mutex type. */
41 #if defined _LIBC || defined _IO_MTSAFE_IO
42 # if (defined NOT_IN_libc && !defined IS_IN_libpthread) || !defined _LIBC
43 typedef pthread_mutex_t __libc_lock_t;
44 typedef struct { pthread_mutex_t mutex; } __libc_lock_recursive_t;
45 # else
46 typedef int __libc_lock_t;
47 typedef struct { int lock; int cnt; void *owner; } __libc_lock_recursive_t;
48 # endif
49 typedef struct { pthread_mutex_t mutex; } __rtld_lock_recursive_t;
50 # ifdef __USE_UNIX98
51 typedef pthread_rwlock_t __libc_rwlock_t;
52 # else
53 typedef struct __libc_rwlock_opaque__ __libc_rwlock_t;
54 # endif
55 #else
56 typedef struct __libc_lock_opaque__ __libc_lock_t;
57 typedef struct __libc_lock_recursive_opaque__ __libc_lock_recursive_t;
58 typedef struct __libc_rwlock_opaque__ __libc_rwlock_t;
59 #endif
61 /* Type for key to thread-specific data. */
62 typedef pthread_key_t __libc_key_t;
64 /* Define a lock variable NAME with storage class CLASS. The lock must be
65 initialized with __libc_lock_init before it can be used (or define it
66 with __libc_lock_define_initialized, below). Use `extern' for CLASS to
67 declare a lock defined in another module. In public structure
68 definitions you must use a pointer to the lock structure (i.e., NAME
69 begins with a `*'), because its storage size will not be known outside
70 of libc. */
71 #define __libc_lock_define(CLASS,NAME) \
72 CLASS __libc_lock_t NAME;
73 #define __libc_rwlock_define(CLASS,NAME) \
74 CLASS __libc_rwlock_t NAME;
75 #define __libc_lock_define_recursive(CLASS,NAME) \
76 CLASS __libc_lock_recursive_t NAME;
77 #define __rtld_lock_define_recursive(CLASS,NAME) \
78 CLASS __rtld_lock_recursive_t NAME;
80 /* Define an initialized lock variable NAME with storage class CLASS.
82 For the C library we take a deeper look at the initializer. For
83 this implementation all fields are initialized to zero. Therefore
84 we don't initialize the variable which allows putting it into the
85 BSS section. (Except on PA-RISC and other odd architectures, where
86 initialized locks must be set to one due to the lack of normal
87 atomic operations.) */
89 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
90 # if LLL_LOCK_INITIALIZER == 0
91 # define __libc_lock_define_initialized(CLASS,NAME) \
92 CLASS __libc_lock_t NAME;
93 # else
94 # define __libc_lock_define_initialized(CLASS,NAME) \
95 CLASS __libc_lock_t NAME = LLL_LOCK_INITIALIZER;
96 # endif
97 #else
98 # if __LT_SPINLOCK_INIT == 0
99 # define __libc_lock_define_initialized(CLASS,NAME) \
100 CLASS __libc_lock_t NAME;
101 # else
102 # define __libc_lock_define_initialized(CLASS,NAME) \
103 CLASS __libc_lock_t NAME = PTHREAD_MUTEX_INITIALIZER;
104 # endif
105 #endif
107 #define __libc_rwlock_define_initialized(CLASS,NAME) \
108 CLASS __libc_rwlock_t NAME = PTHREAD_RWLOCK_INITIALIZER;
110 /* Define an initialized recursive lock variable NAME with storage
111 class CLASS. */
112 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
113 # if LLL_LOCK_INITIALIZER == 0
114 # define __libc_lock_define_initialized_recursive(CLASS,NAME) \
115 CLASS __libc_lock_recursive_t NAME;
116 # else
117 # define __libc_lock_define_initialized_recursive(CLASS,NAME) \
118 CLASS __libc_lock_recursive_t NAME = _LIBC_LOCK_RECURSIVE_INITIALIZER;
119 # endif
120 # define _LIBC_LOCK_RECURSIVE_INITIALIZER \
121 { LLL_LOCK_INITIALIZER, 0, NULL }
122 #else
123 # define __libc_lock_define_initialized_recursive(CLASS,NAME) \
124 CLASS __libc_lock_recursive_t NAME = _LIBC_LOCK_RECURSIVE_INITIALIZER;
125 # define _LIBC_LOCK_RECURSIVE_INITIALIZER \
126 {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP}
127 #endif
129 #define __rtld_lock_define_initialized_recursive(CLASS,NAME) \
130 CLASS __rtld_lock_recursive_t NAME = _RTLD_LOCK_RECURSIVE_INITIALIZER;
131 #define _RTLD_LOCK_RECURSIVE_INITIALIZER \
132 {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP}
134 #define __rtld_lock_initialize(NAME) \
135 (void) ((NAME) = (__rtld_lock_recursive_t) _RTLD_LOCK_RECURSIVE_INITIALIZER)
137 /* If we check for a weakly referenced symbol and then perform a
138 normal jump to it te code generated for some platforms in case of
139 PIC is unnecessarily slow. What would happen is that the function
140 is first referenced as data and then it is called indirectly
141 through the PLT. We can make this a direct jump. */
142 #ifdef __PIC__
143 # define __libc_maybe_call(FUNC, ARGS, ELSE) \
144 (__extension__ ({ __typeof (FUNC) *_fn = (FUNC); \
145 _fn != NULL ? (*_fn) ARGS : ELSE; }))
146 #else
147 # define __libc_maybe_call(FUNC, ARGS, ELSE) \
148 (FUNC != NULL ? FUNC ARGS : ELSE)
149 #endif
151 /* Call thread functions through the function pointer table. */
152 #if defined SHARED && !defined NOT_IN_libc
153 # define PTFAVAIL(NAME) __libc_pthread_functions_init
154 # define __libc_ptf_call(FUNC, ARGS, ELSE) \
155 (__libc_pthread_functions_init ? PTHFCT_CALL (ptr_##FUNC, ARGS) : ELSE)
156 # define __libc_ptf_call_always(FUNC, ARGS) \
157 PTHFCT_CALL (ptr_##FUNC, ARGS)
158 #else
159 # define PTFAVAIL(NAME) (NAME != NULL)
160 # define __libc_ptf_call(FUNC, ARGS, ELSE) \
161 __libc_maybe_call (FUNC, ARGS, ELSE)
162 # define __libc_ptf_call_always(FUNC, ARGS) \
163 FUNC ARGS
164 #endif
167 /* Initialize the named lock variable, leaving it in a consistent, unlocked
168 state. */
169 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
170 # define __libc_lock_init(NAME) ((NAME) = LLL_LOCK_INITIALIZER, 0)
171 #else
172 # define __libc_lock_init(NAME) \
173 __libc_maybe_call (__pthread_mutex_init, (&(NAME), NULL), 0)
174 #endif
175 #define __libc_rwlock_init(NAME) \
176 __libc_maybe_call (__pthread_rwlock_init, (&(NAME), NULL), 0)
178 /* Same as last but this time we initialize a recursive mutex. */
179 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
180 # define __libc_lock_init_recursive(NAME) \
181 ((NAME) = (__libc_lock_recursive_t) _LIBC_LOCK_RECURSIVE_INITIALIZER, 0)
182 #else
183 # define __libc_lock_init_recursive(NAME) \
184 do { \
185 if (__pthread_mutex_init != NULL) \
187 pthread_mutexattr_t __attr; \
188 __pthread_mutexattr_init (&__attr); \
189 __pthread_mutexattr_settype (&__attr, PTHREAD_MUTEX_RECURSIVE_NP); \
190 __pthread_mutex_init (&(NAME).mutex, &__attr); \
191 __pthread_mutexattr_destroy (&__attr); \
193 } while (0)
194 #endif
196 #define __rtld_lock_init_recursive(NAME) \
197 do { \
198 if (__pthread_mutex_init != NULL) \
200 pthread_mutexattr_t __attr; \
201 __pthread_mutexattr_init (&__attr); \
202 __pthread_mutexattr_settype (&__attr, PTHREAD_MUTEX_RECURSIVE_NP); \
203 __pthread_mutex_init (&(NAME).mutex, &__attr); \
204 __pthread_mutexattr_destroy (&__attr); \
206 } while (0)
208 /* Finalize the named lock variable, which must be locked. It cannot be
209 used again until __libc_lock_init is called again on it. This must be
210 called on a lock variable before the containing storage is reused. */
211 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
212 # define __libc_lock_fini(NAME) ((void) 0)
213 #else
214 # define __libc_lock_fini(NAME) \
215 __libc_maybe_call (__pthread_mutex_destroy, (&(NAME)), 0)
216 #endif
217 #define __libc_rwlock_fini(NAME) \
218 __libc_maybe_call (__pthread_rwlock_destroy, (&(NAME)), 0)
220 /* Finalize recursive named lock. */
221 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
222 # define __libc_lock_fini_recursive(NAME) ((void) 0)
223 #else
224 # define __libc_lock_fini_recursive(NAME) \
225 __libc_maybe_call (__pthread_mutex_destroy, (&(NAME)), 0)
226 #endif
228 /* Lock the named lock variable. */
229 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
230 # define __libc_lock_lock(NAME) \
231 ({ lll_lock (NAME); 0; })
232 #else
233 # define __libc_lock_lock(NAME) \
234 __libc_maybe_call (__pthread_mutex_lock, (&(NAME)), 0)
235 #endif
236 #define __libc_rwlock_rdlock(NAME) \
237 __libc_ptf_call (__pthread_rwlock_rdlock, (&(NAME)), 0)
238 #define __libc_rwlock_wrlock(NAME) \
239 __libc_ptf_call (__pthread_rwlock_wrlock, (&(NAME)), 0)
241 /* Lock the recursive named lock variable. */
242 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
243 # define __libc_lock_lock_recursive(NAME) \
244 do { \
245 void *self = THREAD_SELF; \
246 if ((NAME).owner != self) \
248 lll_lock ((NAME).lock); \
249 (NAME).owner = self; \
251 ++(NAME).cnt; \
252 } while (0)
253 #else
254 # define __libc_lock_lock_recursive(NAME) \
255 __libc_maybe_call (__pthread_mutex_lock, (&(NAME).mutex), 0)
256 #endif
258 /* Try to lock the named lock variable. */
259 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
260 # define __libc_lock_trylock(NAME) \
261 lll_trylock (NAME)
262 #else
263 # define __libc_lock_trylock(NAME) \
264 __libc_maybe_call (__pthread_mutex_trylock, (&(NAME)), 0)
265 #endif
266 #define __libc_rwlock_tryrdlock(NAME) \
267 __libc_maybe_call (__pthread_rwlock_tryrdlock, (&(NAME)), 0)
268 #define __libc_rwlock_trywrlock(NAME) \
269 __libc_maybe_call (__pthread_rwlock_trywrlock, (&(NAME)), 0)
271 /* Try to lock the recursive named lock variable. */
272 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
273 # define __libc_lock_trylock_recursive(NAME) \
274 ({ \
275 int result = 0; \
276 void *self = THREAD_SELF; \
277 if ((NAME).owner != self) \
279 if (lll_trylock ((NAME).lock) == 0) \
281 (NAME).owner = self; \
282 (NAME).cnt = 1; \
284 else \
285 result = EBUSY; \
287 else \
288 ++(NAME).cnt; \
289 result; \
291 #else
292 # define __libc_lock_trylock_recursive(NAME) \
293 __libc_maybe_call (__pthread_mutex_trylock, (&(NAME)), 0)
294 #endif
296 #define __rtld_lock_trylock_recursive(NAME) \
297 __libc_maybe_call (__pthread_mutex_trylock, (&(NAME).mutex), 0)
299 /* Unlock the named lock variable. */
300 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
301 # define __libc_lock_unlock(NAME) \
302 lll_unlock (NAME)
303 #else
304 # define __libc_lock_unlock(NAME) \
305 __libc_maybe_call (__pthread_mutex_unlock, (&(NAME)), 0)
306 #endif
307 #define __libc_rwlock_unlock(NAME) \
308 __libc_ptf_call (__pthread_rwlock_unlock, (&(NAME)), 0)
310 /* Unlock the recursive named lock variable. */
311 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
312 /* We do no error checking here. */
313 # define __libc_lock_unlock_recursive(NAME) \
314 do { \
315 if (--(NAME).cnt == 0) \
317 (NAME).owner = NULL; \
318 lll_unlock ((NAME).lock); \
320 } while (0)
321 #else
322 # define __libc_lock_unlock_recursive(NAME) \
323 __libc_maybe_call (__pthread_mutex_unlock, (&(NAME)), 0)
324 #endif
326 #if defined _LIBC && defined SHARED
327 # define __rtld_lock_default_lock_recursive(lock) \
328 ++((pthread_mutex_t *)(lock))->__data.__count;
330 # define __rtld_lock_default_unlock_recursive(lock) \
331 --((pthread_mutex_t *)(lock))->__data.__count;
333 # define __rtld_lock_lock_recursive(NAME) \
334 GL(dl_rtld_lock_recursive) (&(NAME).mutex)
336 # define __rtld_lock_unlock_recursive(NAME) \
337 GL(dl_rtld_unlock_recursive) (&(NAME).mutex)
338 #else
339 # define __rtld_lock_lock_recursive(NAME) \
340 __libc_maybe_call (__pthread_mutex_lock, (&(NAME).mutex), 0)
342 # define __rtld_lock_unlock_recursive(NAME) \
343 __libc_maybe_call (__pthread_mutex_unlock, (&(NAME).mutex), 0)
344 #endif
346 /* Define once control variable. */
347 #if PTHREAD_ONCE_INIT == 0
348 /* Special case for static variables where we can avoid the initialization
349 if it is zero. */
350 # define __libc_once_define(CLASS, NAME) \
351 CLASS pthread_once_t NAME
352 #else
353 # define __libc_once_define(CLASS, NAME) \
354 CLASS pthread_once_t NAME = PTHREAD_ONCE_INIT
355 #endif
357 /* Call handler iff the first call. */
358 #define __libc_once(ONCE_CONTROL, INIT_FUNCTION) \
359 do { \
360 if (PTFAVAIL (__pthread_once)) \
361 __libc_ptf_call_always (__pthread_once, (&(ONCE_CONTROL), \
362 INIT_FUNCTION)); \
363 else if ((ONCE_CONTROL) == PTHREAD_ONCE_INIT) { \
364 INIT_FUNCTION (); \
365 (ONCE_CONTROL) |= 2; \
367 } while (0)
370 /* Note that for I/O cleanup handling we are using the old-style
371 cancel handling. It does not have to be integrated with C++ snce
372 no C++ code is called in the middle. The old-style handling is
373 faster and the support is not going away. */
374 extern void _pthread_cleanup_push (struct _pthread_cleanup_buffer *buffer,
375 void (*routine) (void *), void *arg);
376 extern void _pthread_cleanup_pop (struct _pthread_cleanup_buffer *buffer,
377 int execute);
378 extern void _pthread_cleanup_push_defer (struct _pthread_cleanup_buffer *buffer,
379 void (*routine) (void *), void *arg);
380 extern void _pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer,
381 int execute);
383 /* Start critical region with cleanup. */
384 #define __libc_cleanup_region_start(DOIT, FCT, ARG) \
385 { struct _pthread_cleanup_buffer _buffer; \
386 int _avail; \
387 if (DOIT) { \
388 _avail = PTFAVAIL (_pthread_cleanup_push_defer); \
389 if (_avail) { \
390 __libc_ptf_call_always (_pthread_cleanup_push_defer, (&_buffer, FCT, \
391 ARG)); \
392 } else { \
393 _buffer.__routine = (FCT); \
394 _buffer.__arg = (ARG); \
396 } else { \
397 _avail = 0; \
400 /* End critical region with cleanup. */
401 #define __libc_cleanup_region_end(DOIT) \
402 if (_avail) { \
403 __libc_ptf_call_always (_pthread_cleanup_pop_restore, (&_buffer, DOIT));\
404 } else if (DOIT) \
405 _buffer.__routine (_buffer.__arg); \
408 /* Sometimes we have to exit the block in the middle. */
409 #define __libc_cleanup_end(DOIT) \
410 if (_avail) { \
411 __libc_ptf_call_always (_pthread_cleanup_pop_restore, (&_buffer, DOIT));\
412 } else if (DOIT) \
413 _buffer.__routine (_buffer.__arg)
416 /* Normal cleanup handling, based on C cleanup attribute. */
417 __extern_inline void
418 __libc_cleanup_routine (struct __pthread_cleanup_frame *f)
420 if (f->__do_it)
421 f->__cancel_routine (f->__cancel_arg);
424 #define __libc_cleanup_push(fct, arg) \
425 do { \
426 struct __pthread_cleanup_frame __clframe \
427 __attribute__ ((__cleanup__ (__libc_cleanup_routine))) \
428 = { .__cancel_routine = (fct), .__cancel_arg = (arg), \
429 .__do_it = 1 };
431 #define __libc_cleanup_pop(execute) \
432 __clframe.__do_it = (execute); \
433 } while (0)
436 /* Create thread-specific key. */
437 #define __libc_key_create(KEY, DESTRUCTOR) \
438 __libc_ptf_call (__pthread_key_create, (KEY, DESTRUCTOR), 1)
440 /* Get thread-specific data. */
441 #define __libc_getspecific(KEY) \
442 __libc_ptf_call (__pthread_getspecific, (KEY), NULL)
444 /* Set thread-specific data. */
445 #define __libc_setspecific(KEY, VALUE) \
446 __libc_ptf_call (__pthread_setspecific, (KEY, VALUE), 0)
449 /* Register handlers to execute before and after `fork'. Note that the
450 last parameter is NULL. The handlers registered by the libc are
451 never removed so this is OK. */
452 #define __libc_atfork(PREPARE, PARENT, CHILD) \
453 __register_atfork (PREPARE, PARENT, CHILD, NULL)
454 extern int __register_atfork (void (*__prepare) (void),
455 void (*__parent) (void),
456 void (*__child) (void),
457 void *__dso_handle);
459 /* Functions that are used by this file and are internal to the GNU C
460 library. */
462 extern int __pthread_mutex_init (pthread_mutex_t *__mutex,
463 __const pthread_mutexattr_t *__mutex_attr);
465 extern int __pthread_mutex_destroy (pthread_mutex_t *__mutex);
467 extern int __pthread_mutex_trylock (pthread_mutex_t *__mutex);
469 extern int __pthread_mutex_lock (pthread_mutex_t *__mutex);
471 extern int __pthread_mutex_unlock (pthread_mutex_t *__mutex);
473 extern int __pthread_mutexattr_init (pthread_mutexattr_t *__attr);
475 extern int __pthread_mutexattr_destroy (pthread_mutexattr_t *__attr);
477 extern int __pthread_mutexattr_settype (pthread_mutexattr_t *__attr,
478 int __kind);
480 #ifdef __USE_UNIX98
481 extern int __pthread_rwlock_init (pthread_rwlock_t *__rwlock,
482 __const pthread_rwlockattr_t *__attr);
484 extern int __pthread_rwlock_destroy (pthread_rwlock_t *__rwlock);
486 extern int __pthread_rwlock_rdlock (pthread_rwlock_t *__rwlock);
488 extern int __pthread_rwlock_tryrdlock (pthread_rwlock_t *__rwlock);
490 extern int __pthread_rwlock_wrlock (pthread_rwlock_t *__rwlock);
492 extern int __pthread_rwlock_trywrlock (pthread_rwlock_t *__rwlock);
494 extern int __pthread_rwlock_unlock (pthread_rwlock_t *__rwlock);
495 #endif
497 extern int __pthread_key_create (pthread_key_t *__key,
498 void (*__destr_function) (void *));
500 extern int __pthread_setspecific (pthread_key_t __key,
501 __const void *__pointer);
503 extern void *__pthread_getspecific (pthread_key_t __key);
505 extern int __pthread_once (pthread_once_t *__once_control,
506 void (*__init_routine) (void));
508 extern int __pthread_atfork (void (*__prepare) (void),
509 void (*__parent) (void),
510 void (*__child) (void));
514 /* Make the pthread functions weak so that we can elide them from
515 single-threaded processes. */
516 #ifndef __NO_WEAK_PTHREAD_ALIASES
517 # ifdef weak_extern
518 # if _LIBC
519 # include <bp-sym.h>
520 # else
521 # define BP_SYM (sym) sym
522 # endif
523 weak_extern (BP_SYM (__pthread_mutex_init))
524 weak_extern (BP_SYM (__pthread_mutex_destroy))
525 weak_extern (BP_SYM (__pthread_mutex_lock))
526 weak_extern (BP_SYM (__pthread_mutex_trylock))
527 weak_extern (BP_SYM (__pthread_mutex_unlock))
528 weak_extern (BP_SYM (__pthread_mutexattr_init))
529 weak_extern (BP_SYM (__pthread_mutexattr_destroy))
530 weak_extern (BP_SYM (__pthread_mutexattr_settype))
531 weak_extern (BP_SYM (__pthread_rwlock_init))
532 weak_extern (BP_SYM (__pthread_rwlock_destroy))
533 weak_extern (BP_SYM (__pthread_rwlock_rdlock))
534 weak_extern (BP_SYM (__pthread_rwlock_tryrdlock))
535 weak_extern (BP_SYM (__pthread_rwlock_wrlock))
536 weak_extern (BP_SYM (__pthread_rwlock_trywrlock))
537 weak_extern (BP_SYM (__pthread_rwlock_unlock))
538 weak_extern (BP_SYM (__pthread_key_create))
539 weak_extern (BP_SYM (__pthread_setspecific))
540 weak_extern (BP_SYM (__pthread_getspecific))
541 weak_extern (BP_SYM (__pthread_once))
542 weak_extern (__pthread_initialize)
543 weak_extern (__pthread_atfork)
544 weak_extern (BP_SYM (_pthread_cleanup_push_defer))
545 weak_extern (BP_SYM (_pthread_cleanup_pop_restore))
546 weak_extern (BP_SYM (pthread_setcancelstate))
547 # else
548 # pragma weak __pthread_mutex_init
549 # pragma weak __pthread_mutex_destroy
550 # pragma weak __pthread_mutex_lock
551 # pragma weak __pthread_mutex_trylock
552 # pragma weak __pthread_mutex_unlock
553 # pragma weak __pthread_mutexattr_init
554 # pragma weak __pthread_mutexattr_destroy
555 # pragma weak __pthread_mutexattr_settype
556 # pragma weak __pthread_rwlock_destroy
557 # pragma weak __pthread_rwlock_rdlock
558 # pragma weak __pthread_rwlock_tryrdlock
559 # pragma weak __pthread_rwlock_wrlock
560 # pragma weak __pthread_rwlock_trywrlock
561 # pragma weak __pthread_rwlock_unlock
562 # pragma weak __pthread_key_create
563 # pragma weak __pthread_setspecific
564 # pragma weak __pthread_getspecific
565 # pragma weak __pthread_once
566 # pragma weak __pthread_initialize
567 # pragma weak __pthread_atfork
568 # pragma weak _pthread_cleanup_push_defer
569 # pragma weak _pthread_cleanup_pop_restore
570 # pragma weak pthread_setcancelstate
571 # endif
572 #endif
574 #endif /* bits/libc-lock.h */