2 * Copyright (C) 2005 Daniel M. Eischen <deischen@freebsd.org>
3 * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
4 * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * $FreeBSD: head/lib/libthr/thread/thr_private.h 217706 2010-08-23 $
32 * Private thread definitions for the uthread kernel.
35 #ifndef _THR_PRIVATE_H
36 #define _THR_PRIVATE_H
41 #include <sys/types.h>
43 #include <sys/cdefs.h>
44 #include <sys/queue.h>
45 #include <sys/rtprio.h>
46 #include <machine/atomic.h>
47 #include <machine/cpumask.h>
51 #include <sys/sched.h>
55 #include <pthread_np.h>
57 #if defined(_PTHREADS_DEBUGGING) || defined(_PTHREADS_DEBUGGING2)
58 void _thr_log(const char *buf
, size_t bytes
);
61 #include "pthread_md.h"
63 #include "thread_db.h"
65 /* Signal to do cancellation */
69 * Kernel fatal error handler macro.
71 #define PANIC(args...) _thread_exitf(__FILE__, __LINE__, ##args)
73 /* Output debug messages like this: */
74 #define stdout_debug(args...) _thread_printf(STDOUT_FILENO, ##args)
75 #define stderr_debug(args...) _thread_printf(STDERR_FILENO, ##args)
77 #ifdef _PTHREADS_INVARIANTS
78 #define THR_ASSERT(cond, msg) do { \
79 if (__predict_false(!(cond))) \
83 #define THR_ASSERT(cond, msg)
87 #define STATIC_LIB_REQUIRE(name)
89 #define STATIC_LIB_REQUIRE(name) __asm(".globl " #name)
92 TAILQ_HEAD(thread_head
, pthread
) thread_head
;
93 TAILQ_HEAD(atfork_head
, pthread_atfork
) atfork_head
;
95 #define TIMESPEC_ADD(dst, src, val) \
97 (dst)->tv_sec = (src)->tv_sec + (val)->tv_sec; \
98 (dst)->tv_nsec = (src)->tv_nsec + (val)->tv_nsec; \
99 if ((dst)->tv_nsec >= 1000000000) { \
101 (dst)->tv_nsec -= 1000000000; \
105 #define TIMESPEC_SUB(dst, src, val) \
107 (dst)->tv_sec = (src)->tv_sec - (val)->tv_sec; \
108 (dst)->tv_nsec = (src)->tv_nsec - (val)->tv_nsec; \
109 if ((dst)->tv_nsec < 0) { \
111 (dst)->tv_nsec += 1000000000; \
115 struct pthread_mutex
{
117 * Lock for accesses to this structure.
119 volatile umtx_t m_lock
;
120 #ifdef _PTHREADS_DEBUGGING2
123 enum pthread_mutextype m_type
;
125 TAILQ_HEAD(mutex_head
, pthread
) m_queue
;
126 struct pthread
*m_owner
;
132 * Used for priority inheritance and protection.
134 * m_prio - For priority inheritance, the highest active
135 * priority (threads locking the mutex inherit
136 * this priority). For priority protection, the
137 * ceiling priority of this mutex.
138 * m_saved_prio - mutex owners inherited priority before
139 * taking the mutex, restored when the owner
146 * Link for list of all mutexes a thread currently owns.
148 TAILQ_ENTRY(pthread_mutex
) m_qe
;
151 #define TAILQ_INITIALIZER { NULL, NULL }
153 #define PTHREAD_MUTEX_STATIC_INITIALIZER \
155 .m_type = PTHREAD_MUTEX_DEFAULT, \
156 .m_protocol = PTHREAD_PRIO_NONE, \
157 .m_queue = TAILQ_INITIALIZER, \
158 .m_flags = MUTEX_FLAGS_PRIVATE \
163 #define MUTEX_FLAGS_PRIVATE 0x01
164 #define MUTEX_FLAGS_INITED 0x02
166 struct pthread_mutex_attr
{
167 enum pthread_mutextype m_type
;
173 #define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \
174 { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE }
176 struct cond_cancel_info
;
178 struct pthread_cond
{
180 * Lock for accesses to this structure.
182 volatile umtx_t c_lock
;
183 volatile int c_unused01
;
186 TAILQ_HEAD(, cond_cancel_info
) c_waitlist
;
189 struct pthread_cond_attr
{
195 * Flags for condition variables.
197 #define COND_FLAGS_PRIVATE 0x01
198 #define COND_FLAGS_INITED 0x02
200 struct pthread_barrier
{
201 volatile umtx_t b_lock
;
202 volatile umtx_t b_cycle
;
203 volatile int b_count
;
204 volatile int b_waiters
;
207 struct pthread_barrierattr
{
211 struct pthread_spinlock
{
212 volatile umtx_t s_lock
;
216 * Cleanup definitions.
218 struct pthread_cleanup
{
219 struct pthread_cleanup
*next
;
220 void (*routine
)(void *);
225 #define THR_CLEANUP_PUSH(td, func, arg) { \
226 struct pthread_cleanup __cup; \
228 __cup.routine = func; \
229 __cup.routine_arg = arg; \
231 __cup.next = (td)->cleanup; \
232 (td)->cleanup = &__cup;
234 #define THR_CLEANUP_POP(td, exec) \
235 (td)->cleanup = __cup.next; \
237 __cup.routine(__cup.routine_arg); \
240 struct pthread_atfork
{
241 TAILQ_ENTRY(pthread_atfork
) qe
;
242 void (*prepare
)(void);
243 void (*parent
)(void);
247 struct pthread_attr
{
252 #define THR_STACK_USER 0x100 /* 0xFF reserved for <pthread.h> */
253 #define THR_CPUMASK 0x200 /* cpumask is valid */
255 void *stackaddr_attr
;
256 size_t stacksize_attr
;
257 size_t guardsize_attr
;
262 * Thread creation state attributes.
264 #define THR_CREATE_RUNNING 0
265 #define THR_CREATE_SUSPENDED 1
268 * Miscellaneous definitions.
270 #define THR_STACK_DEFAULT (sizeof(void *) / 4 * 1024 * 1024)
273 * Maximum size of initial thread's stack. This perhaps deserves to be larger
274 * than the stacks of other threads, since many applications are likely to run
275 * almost entirely on this stack.
277 #define THR_STACK_INITIAL (THR_STACK_DEFAULT * 2)
280 * Define the different priority ranges. All applications have thread
281 * priorities constrained within 0-31. The threads library raises the
282 * priority when delivering signals in order to ensure that signal
283 * delivery happens (from the POSIX spec) "as soon as possible".
284 * In the future, the threads library will also be able to map specific
285 * threads into real-time (cooperating) processes or kernel threads.
286 * The RT and SIGNAL priorities will be used internally and added to
287 * thread base priorities so that the scheduling queue can handle both
288 * normal and RT priority threads with and without signal handling.
290 * The approach taken is that, within each class, signal delivery
291 * always has priority over thread execution.
293 #define THR_DEFAULT_PRIORITY 0
294 #define THR_MUTEX_CEIL_PRIORITY 31 /* dummy */
297 * Time slice period in microseconds.
299 #define TIMESLICE_USEC 20000
301 struct pthread_rwlockattr
{
305 struct pthread_rwlock
{
306 pthread_mutex_t lock
; /* monitor lock */
307 pthread_cond_t read_signal
;
308 pthread_cond_t write_signal
;
309 int state
; /* 0 = idle >0 = # of readers -1 = writer */
321 struct pthread_specific_elem
{
327 volatile int allocated
;
330 void (*destructor
)(void *);
338 * Magic value to help recognize a valid thread structure
339 * from an invalid one:
341 #define THR_MAGIC ((u_int32_t) 0xd09ba115)
344 u_int64_t uniqueid
; /* for gdb */
347 * Lock for accesses to this thread structure.
351 /* Thread is terminated in kernel, written by kernel. */
354 /* Kernel thread id. */
357 /* Internal condition variable cycle number. */
360 /* How many low level locks the thread held. */
364 * Set to non-zero when this thread has entered a critical
365 * region. We allow for recursive entries into critical regions.
369 /* Signal blocked counter. */
372 /* Queue entry for list of all threads. */
373 TAILQ_ENTRY(pthread
) tle
; /* link for all threads in process */
375 /* Queue entry for GC lists. */
376 TAILQ_ENTRY(pthread
) gcle
;
378 /* Hash queue entry. */
379 LIST_ENTRY(pthread
) hle
;
381 /* Threads reference count. */
385 * Thread start routine, argument, stack pointer and thread
388 void *(*start_routine
)(void *);
390 struct pthread_attr attr
;
393 * Cancelability flags
395 #define THR_CANCEL_DISABLE 0x0001
396 #define THR_CANCEL_EXITING 0x0002
397 #define THR_CANCEL_AT_POINT 0x0004
398 #define THR_CANCEL_NEEDED 0x0008
399 #define SHOULD_CANCEL(val) \
400 (((val) & (THR_CANCEL_DISABLE | THR_CANCEL_EXITING | \
401 THR_CANCEL_NEEDED)) == THR_CANCEL_NEEDED)
403 #define SHOULD_ASYNC_CANCEL(val) \
404 (((val) & (THR_CANCEL_DISABLE | THR_CANCEL_EXITING | \
405 THR_CANCEL_NEEDED | THR_CANCEL_AT_POINT)) == \
406 (THR_CANCEL_NEEDED | THR_CANCEL_AT_POINT))
409 /* Thread temporary signal mask. */
416 * Error variable used instead of errno, used for internal.
421 * The joiner is the thread that is joining to this thread. The
422 * join status keeps track of a join operation to another thread.
424 struct pthread
*joiner
;
427 * The current thread can belong to a priority mutex queue.
428 * This is the synchronization queue link.
430 TAILQ_ENTRY(pthread
) sqe
;
432 /* Miscellaneous flags; only set with scheduling lock held. */
434 #define THR_FLAGS_PRIVATE 0x0001
435 #define THR_FLAGS_NEED_SUSPEND 0x0002 /* thread should be suspended */
436 #define THR_FLAGS_SUSPENDED 0x0004 /* thread is suspended */
438 /* Thread list flags; only set with thread list lock held. */
440 #define TLFLAGS_GC_SAFE 0x0001 /* thread safe for cleaning */
441 #define TLFLAGS_IN_TDLIST 0x0002 /* thread in all thread list */
442 #define TLFLAGS_IN_GCLIST 0x0004 /* thread in gc list */
443 #define TLFLAGS_DETACHED 0x0008 /* thread is detached */
446 * Base priority is the user setable and retrievable priority
447 * of the thread. It is only affected by explicit calls to
448 * set thread priority and upon thread creation via a thread
449 * attribute or default priority.
454 * Inherited priority is the priority a thread inherits by
455 * taking a priority inheritance or protection mutex. It
456 * is not affected by base priority changes. Inherited
457 * priority defaults to and remains 0 until a mutex is taken
458 * that is being waited on by any other thread whose priority
461 char inherited_priority
;
464 * Active priority is always the maximum of the threads base
465 * priority and inherited priority. When there is a change
466 * in either the base or inherited priority, the active
467 * priority must be recalculated.
469 char active_priority
;
471 /* Number of priority ceiling or protection mutexes owned. */
472 int priority_mutex_count
;
474 /* Queue of currently owned simple type mutexes. */
475 TAILQ_HEAD(, pthread_mutex
) mutexq
;
478 struct pthread_specific_elem
*specific
;
479 int specific_data_count
;
481 /* Number rwlocks rdlocks held. */
485 * Current locks bitmap for rtld. */
488 /* Thread control block */
491 /* Cleanup handlers Link List */
492 struct pthread_cleanup
*cleanup
;
494 /* Enable event reporting */
498 td_thr_events_t event_mask
;
501 td_event_msg_t event_buf
;
504 #define THR_IN_CRITICAL(thrd) \
505 (((thrd)->locklevel > 0) || \
506 ((thrd)->critical_count > 0))
508 #define THR_UMTX_TRYLOCK(thrd, lck) \
509 _thr_umtx_trylock((lck), (thrd)->tid)
511 #define THR_UMTX_LOCK(thrd, lck) \
512 _thr_umtx_lock((lck), (thrd)->tid)
514 #define THR_UMTX_TIMEDLOCK(thrd, lck, timo) \
515 _thr_umtx_timedlock((lck), (thrd)->tid, (timo))
517 #define THR_UMTX_UNLOCK(thrd, lck) \
518 _thr_umtx_unlock((lck), (thrd)->tid)
520 #define THR_LOCK_ACQUIRE(thrd, lck) \
522 (thrd)->locklevel++; \
523 _thr_umtx_lock((lck), (thrd)->tid); \
526 #ifdef _PTHREADS_INVARIANTS
527 #define THR_ASSERT_LOCKLEVEL(thrd) \
529 if (__predict_false((thrd)->locklevel <= 0)) \
530 _thr_assert_lock_level(); \
533 #define THR_ASSERT_LOCKLEVEL(thrd)
536 #define THR_LOCK_RELEASE(thrd, lck) \
538 THR_ASSERT_LOCKLEVEL(thrd); \
539 _thr_umtx_unlock((lck), (thrd)->tid); \
540 (thrd)->locklevel--; \
544 #define THR_LOCK(curthrd) THR_LOCK_ACQUIRE(curthrd, &(curthrd)->lock)
545 #define THR_UNLOCK(curthrd) THR_LOCK_RELEASE(curthrd, &(curthrd)->lock)
546 #define THR_THREAD_LOCK(curthrd, thr) THR_LOCK_ACQUIRE(curthrd, &(thr)->lock)
547 #define THR_THREAD_UNLOCK(curthrd, thr) THR_LOCK_RELEASE(curthrd, &(thr)->lock)
549 #define THREAD_LIST_LOCK(curthrd) \
551 THR_LOCK_ACQUIRE((curthrd), &_thr_list_lock); \
554 #define THREAD_LIST_UNLOCK(curthrd) \
556 THR_LOCK_RELEASE((curthrd), &_thr_list_lock); \
560 * Macros to insert/remove threads to the all thread list and
563 #define THR_LIST_ADD(thrd) do { \
564 if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) == 0) { \
565 TAILQ_INSERT_HEAD(&_thread_list, thrd, tle); \
566 _thr_hash_add(thrd); \
567 (thrd)->tlflags |= TLFLAGS_IN_TDLIST; \
570 #define THR_LIST_REMOVE(thrd) do { \
571 if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) != 0) { \
572 TAILQ_REMOVE(&_thread_list, thrd, tle); \
573 _thr_hash_remove(thrd); \
574 (thrd)->tlflags &= ~TLFLAGS_IN_TDLIST; \
577 #define THR_GCLIST_ADD(thrd) do { \
578 if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) == 0) { \
579 TAILQ_INSERT_HEAD(&_thread_gc_list, thrd, gcle);\
580 (thrd)->tlflags |= TLFLAGS_IN_GCLIST; \
584 #define THR_GCLIST_REMOVE(thrd) do { \
585 if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) != 0) { \
586 TAILQ_REMOVE(&_thread_gc_list, thrd, gcle); \
587 (thrd)->tlflags &= ~TLFLAGS_IN_GCLIST; \
592 #define GC_NEEDED() (_thr_gc_count >= 5)
594 #define THR_IN_SYNCQ(thrd) (((thrd)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
596 #define SHOULD_REPORT_EVENT(curthr, e) \
597 (curthr->report_events && \
598 (((curthr)->event_mask | _thread_event_mask ) & e) != 0)
600 #ifndef __LIBC_ISTHREADED_DECLARED
601 #define __LIBC_ISTHREADED_DECLARED
602 extern int __isthreaded
;
606 * Global variables for the pthread library.
608 extern char *_usrstack
;
609 extern struct pthread
*_thr_initial
;
612 extern int _libthread_xu_debug
;
613 extern int _thread_event_mask
;
614 extern struct pthread
*_thread_last_event
;
616 /* List of all threads */
617 extern struct thread_head _thread_list
;
619 /* List of threads needing GC */
620 extern struct thread_head _thread_gc_list
;
622 extern int _thread_active_threads
;
624 extern struct atfork_head _thr_atfork_list
;
625 extern struct atfork_head _thr_atfork_kern_list
;
626 extern umtx_t _thr_atfork_lock
;
628 /* Default thread attributes */
629 extern struct pthread_attr _pthread_attr_default
;
631 /* Default mutex attributes */
632 extern struct pthread_mutex_attr _pthread_mutexattr_default
;
634 /* Default condition variable attributes */
635 extern struct pthread_cond_attr _pthread_condattr_default
;
637 extern pid_t _thr_pid
;
638 extern size_t _thr_guard_default
;
639 extern size_t _thr_stack_default
;
640 extern size_t _thr_stack_initial
;
641 extern int _thr_page_size
;
642 extern int _thr_gc_count
;
644 extern umtx_t _mutex_static_lock
;
645 extern umtx_t _cond_static_lock
;
646 extern umtx_t _rwlock_static_lock
;
647 extern umtx_t _keytable_lock
;
648 extern umtx_t _thr_list_lock
;
649 extern umtx_t _thr_event_lock
;
652 * Function prototype definitions.
655 int _thr_setthreaded(int);
656 int _mutex_cv_lock(pthread_mutex_t
*, int count
);
657 int _mutex_cv_unlock(pthread_mutex_t
*, int *count
);
658 void _mutex_notify_priochange(struct pthread
*, struct pthread
*, int);
659 void _mutex_fork(struct pthread
*curthread
);
660 void _mutex_unlock_private(struct pthread
*);
663 int _mutex_reinit(pthread_mutex_t
*);
664 void _cond_reinit(pthread_cond_t pcond
);
665 void _rwlock_reinit(pthread_rwlock_t prwlock
);
668 void _libpthread_init(struct pthread
*);
669 struct pthread
*_thr_alloc(struct pthread
*);
670 void _thread_exit(const char *, int, const char *) __dead2
;
671 void _thread_exitf(const char *, int, const char *, ...) __dead2
673 void _thr_exit_cleanup(void);
674 void _thr_atfork_kern(void (*prepare
)(void), void (*parent
)(void),
675 void (*child
)(void));
676 int _thr_ref_add(struct pthread
*, struct pthread
*, int);
677 void _thr_ref_delete(struct pthread
*, struct pthread
*);
678 void _thr_ref_delete_unlocked(struct pthread
*, struct pthread
*);
679 int _thr_find_thread(struct pthread
*, struct pthread
*, int);
680 void _thr_malloc_init(void);
681 void _rtld_setthreaded(int);
682 void _thr_rtld_init(void);
683 void _thr_rtld_fini(void);
684 int _thr_stack_alloc(struct pthread_attr
*);
685 void _thr_stack_free(struct pthread_attr
*);
686 void _thr_stack_cleanup(void);
687 void _thr_sem_init(void);
688 void _thr_free(struct pthread
*, struct pthread
*);
689 void _thr_gc(struct pthread
*);
690 void _thread_cleanupspecific(void);
691 void _thread_dump_info(void);
692 void _thread_printf(int, const char *, ...) __printflike(2, 3);
693 void _thread_vprintf(int, const char *, va_list);
694 void _thr_spinlock_init(void);
695 int _thr_cancel_enter(struct pthread
*);
696 void _thr_cancel_leave(struct pthread
*, int);
697 void _thr_signal_block(struct pthread
*);
698 void _thr_signal_unblock(struct pthread
*);
699 void _thr_signal_init(void);
700 void _thr_signal_deinit(void);
701 int _thr_send_sig(struct pthread
*, int sig
);
702 void _thr_list_init(void);
703 void _thr_hash_add(struct pthread
*);
704 void _thr_hash_remove(struct pthread
*);
705 struct pthread
*_thr_hash_find(struct pthread
*);
706 void _thr_link(struct pthread
*curthread
, struct pthread
*thread
);
707 void _thr_unlink(struct pthread
*curthread
, struct pthread
*thread
);
708 void _thr_suspend_check(struct pthread
*curthread
);
709 void _thr_assert_lock_level(void) __dead2
;
710 void _thr_ast(struct pthread
*);
711 int _thr_get_tid(void);
712 void _thr_report_creation(struct pthread
*curthread
,
713 struct pthread
*newthread
);
714 void _thr_report_death(struct pthread
*curthread
);
715 void _thread_bp_create(void);
716 void _thread_bp_death(void);
717 int _thr_getscheduler(lwpid_t
, int *, struct sched_param
*);
718 int _thr_setscheduler(lwpid_t
, int, const struct sched_param
*);
719 int _thr_set_sched_other_prio(struct pthread
*, int);
720 int _rtp_to_schedparam(const struct rtprio
*rtp
, int *policy
,
721 struct sched_param
*param
);
722 int _schedparam_to_rtp(int policy
, const struct sched_param
*param
,
724 int _umtx_sleep_err(volatile const int *, int, int);
725 int _umtx_wakeup_err(volatile const int *, int);
727 /* #include <fcntl.h> */
729 int __sys_fcntl(int, int, ...);
730 int __sys_open(const char *, int, ...);
731 int __sys_openat(int, const char *, int, ...);
734 /* #include <sys/ioctl.h> */
736 int __sys_ioctl(int, unsigned long, ...);
739 /* #include <sched.h> */
741 int __sys_sched_yield(void);
744 /* #include <signal.h> */
746 int __sys_kill(pid_t
, int);
747 int __sys_sigaction(int, const struct sigaction
*, struct sigaction
*);
748 int __sys_sigpending(sigset_t
*);
749 int __sys_sigprocmask(int, const sigset_t
*, sigset_t
*);
750 int __sys_sigsuspend(const sigset_t
*);
751 int __sys_sigreturn(ucontext_t
*);
752 int __sys_sigaltstack(const struct sigaltstack
*, struct sigaltstack
*);
755 /* #include <time.h> */
757 int __sys_nanosleep(const struct timespec
*, struct timespec
*);
760 /* #include <unistd.h> */
762 int __sys_close(int);
763 int __sys_execve(const char *, char * const *, char * const *);
764 pid_t
__sys_getpid(void);
765 ssize_t
__sys_read(int, void *, size_t);
766 ssize_t
__sys_write(int, const void *, size_t);
767 void __sys_exit(int);
768 int __sys_sigwait(const sigset_t
*, int *);
769 int __sys_sigtimedwait(const sigset_t
*, siginfo_t
*,
770 const struct timespec
*);
771 int __sys_sigwaitinfo(const sigset_t
*set
, siginfo_t
*info
);
775 _thr_isthreaded(void)
777 return (__isthreaded
!= 0);
783 return (_thr_initial
!= NULL
);
787 _thr_check_init(void)
789 if (_thr_initial
== NULL
)
790 _libpthread_init(NULL
);
794 void __pthread_cxa_finalize(struct dl_phdr_info
*phdr_info
);
798 #endif /* !_THR_PRIVATE_H */