sbin/hammer2/cmd_debug.c: Clear errno
[dragonfly.git] / lib / libthread_xu / thread / thr_private.h
blob84920de69cf586e77e8ee24938f9b112b16f6cab
1 /*
2 * Copyright (C) 2005 Daniel M. Eischen <deischen@freebsd.org>
3 * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
4 * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>.
6 * All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * $FreeBSD: head/lib/libthr/thread/thr_private.h 217706 2010-08-23 $
32 * Private thread definitions for the uthread kernel.
35 #ifndef _THR_PRIVATE_H
36 #define _THR_PRIVATE_H
39 * Include files.
41 #include <sys/types.h>
42 #include <sys/time.h>
43 #include <sys/cdefs.h>
44 #include <sys/queue.h>
45 #include <sys/rtprio.h>
46 #include <sys/mman.h>
47 #include <machine/atomic.h>
48 #include <errno.h>
49 #include <limits.h>
50 #include <signal.h>
51 #include <sys/cpumask.h>
52 #include <sys/sched.h>
53 #include <stdarg.h>
54 #include <unistd.h>
55 #include <pthread.h>
56 #include <pthread_np.h>
58 #if defined(_PTHREADS_DEBUGGING) || defined(_PTHREADS_DEBUGGING2)
59 void _thr_log(const char *buf, size_t bytes);
60 #endif
62 #include "pthread_md.h"
63 #include "thr_umtx.h"
64 #include "thread_db.h"
66 /* Signal to do cancellation */
67 #define SIGCANCEL 32
70 * Kernel fatal error handler macro.
72 #define PANIC(args...) _thread_exitf(__FILE__, __LINE__, ##args)
74 /* Output debug messages like this: */
75 #define stdout_debug(args...) _thread_printf(STDOUT_FILENO, ##args)
76 #define stderr_debug(args...) _thread_printf(STDERR_FILENO, ##args)
78 #ifdef _PTHREADS_INVARIANTS
79 #define THR_ASSERT(cond, msg) do { \
80 if (__predict_false(!(cond))) \
81 PANIC(msg); \
82 } while (0)
83 #else
84 #define THR_ASSERT(cond, msg)
85 #endif
87 #ifdef PIC
88 #define STATIC_LIB_REQUIRE(name)
89 #else
90 #define STATIC_LIB_REQUIRE(name) __asm(".globl " #name)
91 #endif
93 typedef TAILQ_HEAD(thread_head, __pthread_s) thread_head;
94 typedef TAILQ_HEAD(atfork_head, pthread_atfork) atfork_head;
96 struct __pthread_mutex_s {
98 * Lock for accesses to this structure.
100 volatile umtx_t m_lock;
101 #ifdef _PTHREADS_DEBUGGING2
102 int m_lastop[32];
103 #endif
104 enum pthread_mutextype m_type;
105 int m_protocol;
106 TAILQ_HEAD(mutex_head, __pthread_s) m_queue;
107 struct __pthread_s *m_owner;
108 long m_flags;
109 int m_count;
110 int m_refcount;
113 * Used for priority inheritance and protection.
115 * m_prio - For priority inheritance, the highest active
116 * priority (threads locking the mutex inherit
117 * this priority). For priority protection, the
118 * ceiling priority of this mutex.
119 * m_saved_prio - mutex owners inherited priority before
120 * taking the mutex, restored when the owner
121 * unlocks the mutex.
123 int m_prio;
124 int m_saved_prio;
127 * Link for list of all mutexes a thread currently owns.
129 TAILQ_ENTRY(__pthread_mutex_s) m_qe;
132 #define TAILQ_INITIALIZER { NULL, NULL }
134 #define PTHREAD_MUTEX_STATIC_INITIALIZER \
135 { .m_lock = 0, \
136 .m_type = PTHREAD_MUTEX_DEFAULT, \
137 .m_protocol = PTHREAD_PRIO_NONE, \
138 .m_queue = TAILQ_INITIALIZER, \
139 .m_flags = MUTEX_FLAGS_PRIVATE \
142 * Flags for mutexes.
144 #define MUTEX_FLAGS_PRIVATE 0x01
145 #define MUTEX_FLAGS_INITED 0x02
147 struct __pthread_mutexattr_s {
148 enum pthread_mutextype m_type;
149 int m_protocol;
150 int m_ceiling;
151 int m_flags;
154 #define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \
155 { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE }
157 struct cond_cancel_info;
159 struct __pthread_cond_s {
161 * Lock for accesses to this structure.
163 volatile umtx_t c_lock;
164 volatile int c_unused01;
165 int c_pshared;
166 int c_clockid;
167 TAILQ_HEAD(, cond_cancel_info) c_waitlist;
170 struct __pthread_condattr_s {
171 int c_pshared;
172 int c_clockid;
176 * Flags for condition variables.
178 #define COND_FLAGS_PRIVATE 0x01
179 #define COND_FLAGS_INITED 0x02
181 struct __pthread_barrier_s {
182 volatile umtx_t b_lock;
183 volatile umtx_t b_cycle;
184 volatile int b_count;
185 volatile int b_waiters;
188 struct __pthread_barrierattr_s {
189 int pshared;
192 struct __pthread_spinlock_s {
193 volatile umtx_t s_lock;
197 * Cleanup definitions.
199 struct pthread_cleanup {
200 struct pthread_cleanup *next;
201 void (*routine)(void *);
202 void *routine_arg;
203 int onstack;
206 #define THR_CLEANUP_PUSH(td, func, arg) { \
207 struct pthread_cleanup __cup; \
209 __cup.routine = func; \
210 __cup.routine_arg = arg; \
211 __cup.onstack = 1; \
212 __cup.next = (td)->cleanup; \
213 (td)->cleanup = &__cup;
215 #define THR_CLEANUP_POP(td, exec) \
216 (td)->cleanup = __cup.next; \
217 if ((exec) != 0) \
218 __cup.routine(__cup.routine_arg); \
221 struct pthread_atfork {
222 TAILQ_ENTRY(pthread_atfork) qe;
223 void (*prepare)(void);
224 void (*parent)(void);
225 void (*child)(void);
228 struct __pthread_attr_s {
229 int sched_policy;
230 int sched_inherit;
231 int prio;
232 int suspend;
233 #define THR_STACK_USER 0x100 /* 0xFF reserved for <pthread.h> */
234 #define THR_CPUMASK 0x200 /* cpumask is valid */
235 int flags;
236 void *stackaddr_attr;
237 size_t stacksize_attr;
238 size_t guardsize_attr;
239 cpumask_t cpumask;
243 * Thread creation state attributes.
245 #define THR_CREATE_RUNNING 0
246 #define THR_CREATE_SUSPENDED 1
249 * Miscellaneous definitions.
251 #define THR_STACK_DEFAULT (sizeof(void *) / 4 * 1024 * 1024)
254 * Maximum size of initial thread's stack. This perhaps deserves to be larger
255 * than the stacks of other threads, since many applications are likely to run
256 * almost entirely on this stack.
258 #define THR_STACK_INITIAL (THR_STACK_DEFAULT * 2)
261 * Define the different priority ranges. All applications have thread
262 * priorities constrained within 0-31. The threads library raises the
263 * priority when delivering signals in order to ensure that signal
264 * delivery happens (from the POSIX spec) "as soon as possible".
265 * In the future, the threads library will also be able to map specific
266 * threads into real-time (cooperating) processes or kernel threads.
267 * The RT and SIGNAL priorities will be used internally and added to
268 * thread base priorities so that the scheduling queue can handle both
269 * normal and RT priority threads with and without signal handling.
271 * The approach taken is that, within each class, signal delivery
272 * always has priority over thread execution.
274 #define THR_DEFAULT_PRIORITY 0
275 #define THR_MUTEX_CEIL_PRIORITY 31 /* dummy */
278 * Time slice period in microseconds.
280 #define TIMESLICE_USEC 20000
282 struct __pthread_rwlockattr_s {
283 int pshared;
286 struct __pthread_rwlock_s {
287 pthread_mutex_t lock; /* monitor lock */
288 pthread_cond_t read_signal;
289 pthread_cond_t write_signal;
290 int state; /* 0 = idle >0 = # of readers -1 = writer */
291 int blocked_writers;
295 * Thread states.
297 enum pthread_state {
298 PS_RUNNING,
299 PS_DEAD
302 struct pthread_specific_elem {
303 const void *data;
304 int seqno;
307 struct pthread_key {
308 volatile int allocated;
309 volatile int count;
310 int seqno;
311 void (*destructor)(void *);
315 * Thread structure.
317 struct __pthread_s {
319 * Magic value to help recognize a valid thread structure
320 * from an invalid one:
322 #define THR_MAGIC ((u_int32_t) 0xd09ba115)
323 u_int32_t magic;
324 char *name;
325 u_int64_t uniqueid; /* for gdb */
328 * Lock for accesses to this thread structure.
330 umtx_t lock;
332 /* Thread is terminated in kernel, written by kernel. */
333 long terminated;
335 /* Kernel thread id. */
336 lwpid_t tid;
338 /* Internal condition variable cycle number. */
339 umtx_t cycle;
341 /* How many low level locks the thread held. */
342 int locklevel;
345 * Set to non-zero when this thread has entered a critical
346 * region. We allow for recursive entries into critical regions.
348 int critical_count;
350 /* Signal blocked counter. */
351 int sigblock;
353 /* Queue entry for list of all threads. */
354 TAILQ_ENTRY(__pthread_s) tle; /* link for all threads in process */
356 /* Queue entry for GC lists. */
357 TAILQ_ENTRY(__pthread_s) gcle;
359 /* Hash queue entry. */
360 LIST_ENTRY(__pthread_s) hle;
362 /* Threads reference count. */
363 int refcount;
366 * Thread start routine, argument, stack pointer and thread
367 * attributes.
369 void *(*start_routine)(void *);
370 void *arg;
371 struct __pthread_attr_s attr;
374 * Cancelability flags
376 #define THR_CANCEL_DISABLE 0x0001
377 #define THR_CANCEL_EXITING 0x0002
378 #define THR_CANCEL_AT_POINT 0x0004
379 #define THR_CANCEL_NEEDED 0x0008
380 #define SHOULD_CANCEL(val) \
381 (((val) & (THR_CANCEL_DISABLE | THR_CANCEL_EXITING | \
382 THR_CANCEL_NEEDED)) == THR_CANCEL_NEEDED)
384 #define SHOULD_ASYNC_CANCEL(val) \
385 (((val) & (THR_CANCEL_DISABLE | THR_CANCEL_EXITING | \
386 THR_CANCEL_NEEDED | THR_CANCEL_AT_POINT)) == \
387 (THR_CANCEL_NEEDED | THR_CANCEL_AT_POINT))
388 int cancelflags;
390 /* Thread temporary signal mask. */
391 sigset_t sigmask;
393 /* Thread state: */
394 umtx_t state;
397 * Error variable used instead of errno, used for internal.
399 int error;
402 * The joiner is the thread that is joining to this thread. The
403 * join status keeps track of a join operation to another thread.
405 struct __pthread_s *joiner;
408 * The current thread can belong to a priority mutex queue.
409 * This is the synchronization queue link.
411 TAILQ_ENTRY(__pthread_s) sqe;
413 /* Miscellaneous flags; only set with scheduling lock held. */
414 int flags;
415 #define THR_FLAGS_PRIVATE 0x0001
416 #define THR_FLAGS_NEED_SUSPEND 0x0002 /* thread should be suspended */
417 #define THR_FLAGS_SUSPENDED 0x0004 /* thread is suspended */
419 /* Thread list flags; only set with thread list lock held. */
420 int tlflags;
421 #define TLFLAGS_GC_SAFE 0x0001 /* thread safe for cleaning */
422 #define TLFLAGS_IN_TDLIST 0x0002 /* thread in all thread list */
423 #define TLFLAGS_IN_GCLIST 0x0004 /* thread in gc list */
424 #define TLFLAGS_DETACHED 0x0008 /* thread is detached */
427 * Base priority is the user setable and retrievable priority
428 * of the thread. It is only affected by explicit calls to
429 * set thread priority and upon thread creation via a thread
430 * attribute or default priority.
432 char base_priority;
435 * Inherited priority is the priority a thread inherits by
436 * taking a priority inheritance or protection mutex. It
437 * is not affected by base priority changes. Inherited
438 * priority defaults to and remains 0 until a mutex is taken
439 * that is being waited on by any other thread whose priority
440 * is non-zero.
442 char inherited_priority;
445 * Active priority is always the maximum of the threads base
446 * priority and inherited priority. When there is a change
447 * in either the base or inherited priority, the active
448 * priority must be recalculated.
450 char active_priority;
452 /* Number of priority ceiling or protection mutexes owned. */
453 int priority_mutex_count;
455 /* Queue of currently owned simple type mutexes. */
456 TAILQ_HEAD(, __pthread_mutex_s) mutexq;
458 void *ret;
459 struct pthread_specific_elem *specific;
460 int specific_data_count;
462 /* Number rwlocks rdlocks held. */
463 int rdlock_count;
466 * Current locks bitmap for rtld. */
467 int rtld_bits;
469 /* Thread control block */
470 struct tls_tcb *tcb;
472 /* Cleanup handlers Link List */
473 struct pthread_cleanup *cleanup;
475 /* Enable event reporting */
476 int report_events;
478 /* Event mask */
479 td_thr_events_t event_mask;
481 /* Event */
482 td_event_msg_t event_buf;
485 #define THR_IN_CRITICAL(thrd) \
486 (((thrd)->locklevel > 0) || \
487 ((thrd)->critical_count > 0))
490 * Internal temporary locks without suspend check
492 #define THR_UMTX_TRYLOCK(thrd, lck) \
493 _thr_umtx_trylock((lck), (thrd)->tid, 1)
495 #define THR_UMTX_LOCK(thrd, lck) \
496 _thr_umtx_lock((lck), (thrd)->tid, 1)
498 #define THR_UMTX_TIMEDLOCK(thrd, lck, timo) \
499 _thr_umtx_timedlock((lck), (thrd)->tid, (timo), 1)
501 #define THR_UMTX_UNLOCK(thrd, lck) \
502 _thr_umtx_unlock((lck), (thrd)->tid, 1)
505 * Interal locks without suspend check, used when the lock
506 * state needs to persist (i.e. to help implement things
507 * like pthread_mutex_lock()). Non-temporary.
509 #define THR_UMTX_TRYLOCK_PERSIST(thrd, lck) \
510 _thr_umtx_trylock((lck), (thrd)->tid, 0)
512 #define THR_UMTX_LOCK_PERSIST(thrd, lck) \
513 _thr_umtx_lock((lck), (thrd)->tid, 0)
515 #define THR_UMTX_TIMEDLOCK_PERSIST(thrd, lck, timo) \
516 _thr_umtx_timedlock((lck), (thrd)->tid, (timo), 0)
518 #define THR_UMTX_UNLOCK_PERSIST(thrd, lck) \
519 _thr_umtx_unlock((lck), (thrd)->tid, 0)
522 * Internal temporary locks with suspend check
524 #define THR_LOCK_ACQUIRE(thrd, lck) \
525 do { \
526 (thrd)->locklevel++; \
527 _thr_umtx_lock((lck), (thrd)->tid, 1); \
528 } while (0)
530 #ifdef _PTHREADS_INVARIANTS
531 #define THR_ASSERT_LOCKLEVEL(thrd) \
532 do { \
533 if (__predict_false((thrd)->locklevel <= 0)) \
534 _thr_assert_lock_level(); \
535 } while (0)
536 #else
537 #define THR_ASSERT_LOCKLEVEL(thrd)
538 #endif
540 #define THR_LOCK_RELEASE(thrd, lck) \
541 do { \
542 THR_ASSERT_LOCKLEVEL(thrd); \
543 _thr_umtx_unlock((lck), (thrd)->tid, 1); \
544 (thrd)->locklevel--; \
545 _thr_ast(thrd); \
546 } while (0)
548 #define THR_LOCK(curthrd) THR_LOCK_ACQUIRE(curthrd, &(curthrd)->lock)
549 #define THR_UNLOCK(curthrd) THR_LOCK_RELEASE(curthrd, &(curthrd)->lock)
550 #define THR_THREAD_LOCK(curthrd, thr) THR_LOCK_ACQUIRE(curthrd, &(thr)->lock)
551 #define THR_THREAD_UNLOCK(curthrd, thr) THR_LOCK_RELEASE(curthrd, &(thr)->lock)
553 #define THREAD_LIST_LOCK(curthrd) \
554 do { \
555 THR_LOCK_ACQUIRE((curthrd), &_thr_list_lock); \
556 } while (0)
558 #define THREAD_LIST_UNLOCK(curthrd) \
559 do { \
560 THR_LOCK_RELEASE((curthrd), &_thr_list_lock); \
561 } while (0)
564 * Macros to insert/remove threads to the all thread list and
565 * the gc list.
567 #define THR_LIST_ADD(thrd) do { \
568 if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) == 0) { \
569 TAILQ_INSERT_HEAD(&_thread_list, thrd, tle); \
570 _thr_hash_add(thrd); \
571 (thrd)->tlflags |= TLFLAGS_IN_TDLIST; \
573 } while (0)
574 #define THR_LIST_REMOVE(thrd) do { \
575 if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) != 0) { \
576 TAILQ_REMOVE(&_thread_list, thrd, tle); \
577 _thr_hash_remove(thrd); \
578 (thrd)->tlflags &= ~TLFLAGS_IN_TDLIST; \
580 } while (0)
581 #define THR_GCLIST_ADD(thrd) do { \
582 if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) == 0) { \
583 TAILQ_INSERT_HEAD(&_thread_gc_list, thrd, gcle);\
584 (thrd)->tlflags |= TLFLAGS_IN_GCLIST; \
585 _thr_gc_count++; \
587 } while (0)
588 #define THR_GCLIST_REMOVE(thrd) do { \
589 if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) != 0) { \
590 TAILQ_REMOVE(&_thread_gc_list, thrd, gcle); \
591 (thrd)->tlflags &= ~TLFLAGS_IN_GCLIST; \
592 _thr_gc_count--; \
594 } while (0)
596 #define GC_NEEDED() (_thr_gc_count >= 5)
598 #define THR_IN_SYNCQ(thrd) (((thrd)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
600 #define SHOULD_REPORT_EVENT(curthr, e) \
601 (curthr->report_events && \
602 (((curthr)->event_mask | _thread_event_mask ) & e) != 0)
604 #ifndef __LIBC_ISTHREADED_DECLARED
605 #define __LIBC_ISTHREADED_DECLARED
606 extern int __isthreaded;
607 #endif
610 * Global variables for the pthread library.
612 extern char *_usrstack;
613 extern pthread_t _thr_initial;
615 /* For debugger */
616 extern int _libthread_xu_debug;
617 extern int _thread_event_mask;
618 extern pthread_t _thread_last_event;
620 /* List of all threads */
621 extern struct thread_head _thread_list;
623 /* List of threads needing GC */
624 extern struct thread_head _thread_gc_list;
626 extern int _thread_active_threads;
628 extern struct atfork_head _thr_atfork_list;
629 extern struct atfork_head _thr_atfork_kern_list;
630 extern umtx_t _thr_atfork_lock;
632 /* Default thread attributes */
633 extern struct __pthread_attr_s _pthread_attr_default;
635 /* Default mutex attributes */
636 extern struct __pthread_mutexattr_s _pthread_mutexattr_default;
638 /* Default condition variable attributes */
639 extern struct __pthread_condattr_s _pthread_condattr_default;
641 extern pid_t _thr_pid;
642 extern size_t _thr_guard_default;
643 extern size_t _thr_stack_default;
644 extern size_t _thr_stack_initial;
645 extern int _thr_page_size;
646 extern int _thr_gc_count;
648 extern umtx_t _mutex_static_lock;
649 extern umtx_t _cond_static_lock;
650 extern umtx_t _rwlock_static_lock;
651 extern umtx_t _keytable_lock;
652 extern umtx_t _thr_list_lock;
653 extern umtx_t _thr_event_lock;
656 * Function prototype definitions.
658 __BEGIN_DECLS
659 int _thr_setthreaded(int);
660 int _mutex_cv_lock(pthread_mutex_t *, int count);
661 int _mutex_cv_unlock(pthread_mutex_t *, int *count);
662 void _mutex_notify_priochange(pthread_t, pthread_t, int);
663 void _mutex_fork(pthread_t, lwpid_t tid);
664 void _mutex_unlock_private(pthread_t);
666 #if 0
667 int _mutex_reinit(pthread_mutex_t *);
668 void _cond_reinit(pthread_cond_t pcond);
669 void _rwlock_reinit(pthread_rwlock_t prwlock);
670 #endif
672 void _libpthread_init(pthread_t);
673 pthread_t _thr_alloc(pthread_t);
674 void _thread_exit(const char *, int, const char *) __dead2;
675 void _thread_exitf(const char *, int, const char *, ...) __dead2
676 __printflike(3, 4);
677 void _thr_exit_cleanup(void);
678 void _thr_atfork_kern(void (*prepare)(void), void (*parent)(void),
679 void (*child)(void));
680 int _thr_ref_add(pthread_t, pthread_t, int);
681 void _thr_ref_delete(pthread_t, pthread_t);
682 void _thr_ref_delete_unlocked(pthread_t, pthread_t);
683 int _thr_find_thread(pthread_t, pthread_t, int);
684 void _thr_malloc_init(void);
685 void _rtld_setthreaded(int);
686 void _thr_rtld_init(void);
687 void _thr_rtld_fini(void);
688 int _thr_stack_alloc(pthread_attr_t);
689 void _thr_stack_free(pthread_attr_t);
690 void _thr_stack_cleanup(void);
691 void _thr_sem_init(void);
692 void _thr_free(pthread_t, pthread_t);
693 void _thr_gc(pthread_t);
694 void _thread_cleanupspecific(void);
695 void _thread_dump_info(void);
696 void _thread_printf(int, const char *, ...) __printflike(2, 3);
697 void _thread_vprintf(int, const char *, va_list);
698 void _thr_spinlock_init(void);
699 int _thr_cancel_enter(pthread_t);
700 void _thr_cancel_leave(pthread_t, int);
701 void _thr_signal_block(pthread_t);
702 void _thr_signal_unblock(pthread_t);
703 void _thr_signal_init(void);
704 void _thr_signal_deinit(void);
705 int _thr_send_sig(pthread_t, int sig);
706 void _thr_list_init(void);
707 void _thr_hash_add(pthread_t);
708 void _thr_hash_remove(pthread_t);
709 pthread_t _thr_hash_find(pthread_t);
710 void _thr_link(pthread_t, pthread_t);
711 void _thr_unlink(pthread_t, pthread_t);
712 void _thr_suspend_check(pthread_t);
713 void _thr_assert_lock_level(void) __dead2;
714 void _thr_ast(pthread_t);
715 int _thr_get_tid(void);
716 void _thr_report_creation(pthread_t, pthread_t);
717 void _thr_report_death(pthread_t);
718 void _thread_bp_create(void);
719 void _thread_bp_death(void);
720 int _thr_getscheduler(lwpid_t, int *, struct sched_param *);
721 int _thr_setscheduler(lwpid_t, int, const struct sched_param *);
722 int _thr_set_sched_other_prio(pthread_t, int);
723 int _rtp_to_schedparam(const struct rtprio *rtp, int *policy,
724 struct sched_param *param);
725 int _schedparam_to_rtp(int policy, const struct sched_param *param,
726 struct rtprio *rtp);
727 int _umtx_sleep_err(volatile const int *, int, int);
728 int _umtx_wakeup_err(volatile const int *, int);
730 /* #include <fcntl.h> */
731 #ifdef _SYS_FCNTL_H_
732 int __sys_fcntl(int, int, ...);
733 int __sys_open(const char *, int, ...);
734 int __sys_openat(int, const char *, int, ...);
735 #endif
737 /* #include <sys/ioctl.h> */
738 #ifdef _SYS_IOCTL_H_
739 int __sys_ioctl(int, unsigned long, ...);
740 #endif
742 /* #include <sched.h> */
743 #ifdef _SYS_SCHED_H_
744 int __sys_sched_yield(void);
745 #endif
747 /* #include <signal.h> */
748 #ifdef _SIGNAL_H_
749 int __sys_sigaction(int, const struct sigaction *, struct sigaction *);
750 int __sys_sigprocmask(int, const sigset_t *, sigset_t *);
751 int __sys_sigsuspend(const sigset_t *);
752 #endif
754 /* #include <time.h> */
755 #ifdef _TIME_H_
756 int __sys_nanosleep(const struct timespec *, struct timespec *);
757 int __sys_clock_nanosleep(clockid_t, int, const struct timespec *,
758 struct timespec *);
759 #endif
761 /* #include <unistd.h> */
762 #ifdef _UNISTD_H_
763 int __sys_close(int);
764 pid_t __sys_getpid(void);
765 ssize_t __sys_read(int, void *, size_t);
766 ssize_t __sys_write(int, const void *, size_t);
767 int __sys_sigtimedwait(const sigset_t *, siginfo_t *,
768 const struct timespec *);
769 int __sys_sigwaitinfo(const sigset_t *set, siginfo_t *info);
770 #endif
772 static inline int
773 _thr_isthreaded(void)
775 return (__isthreaded != 0);
778 static inline int
779 _thr_is_inited(void)
781 return (_thr_initial != NULL);
784 static inline void
785 _thr_check_init(void)
787 if (_thr_initial == NULL)
788 _libpthread_init(NULL);
791 struct dl_phdr_info;
792 void __pthread_cxa_finalize(struct dl_phdr_info *phdr_info);
795 * Used in low-level init to directly call libc's malloc implementation
796 * instead of a potentially third-party malloc implementation. Required
797 * for bootstrapping pthreads.
799 void *__malloc(size_t bytes);
800 void __free(void *ptr);
802 __END_DECLS
804 #endif /* !_THR_PRIVATE_H */