sbin/hammer: Make info command properly handle non HAMMER path
[dragonfly.git] / lib / libc_r / uthread / pthread_private.h
blobdef75d5fe7f2396b984103227e67f923742baae9
1 /*
2 * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>.
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by John Birrell.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
32 * Private thread definitions for the uthread kernel.
34 * $FreeBSD: src/lib/libc_r/uthread/pthread_private.h,v 1.36.2.21 2002/10/22 14:44:02 fjoe Exp $
37 #ifndef _PTHREAD_PRIVATE_H
38 #define _PTHREAD_PRIVATE_H
41 * Evaluate the storage class specifier.
43 #ifdef GLOBAL_PTHREAD_PRIVATE
44 #define SCLASS
45 #else
46 #define SCLASS extern
47 #endif
50 * Include files.
52 #include <setjmp.h>
53 #include <signal.h>
54 #include <stdio.h>
55 #include <sys/queue.h>
56 #include <sys/types.h>
57 #include <sys/time.h>
58 #include <sys/cdefs.h>
59 #include <sys/sched.h>
60 #include <spinlock.h>
61 #include <pthread_np.h>
63 #include <machine/tls.h>
66 * Define machine dependent macros to get and set the stack pointer
67 * from the supported contexts. Also define a macro to set the return
68 * address in a jmp_buf context.
70 * XXX - These need to be moved into architecture dependent support files.
72 #if defined(__x86_64__)
73 #define GET_STACK_JB(jb) ((unsigned long)((jb)[0]._jb[2]))
74 #define GET_STACK_SJB(sjb) ((unsigned long)((sjb)[0]._sjb[2]))
75 #define GET_STACK_UC(ucp) ((unsigned long)((ucp)->uc_mcontext.mc_rsp))
76 #define SET_STACK_JB(jb, stk) (jb)[0]._jb[2] = (long)(stk)
77 #define SET_STACK_SJB(sjb, stk) (sjb)[0]._sjb[2] = (long)(stk)
78 #define SET_STACK_UC(ucp, stk) (ucp)->uc_mcontext.mc_rsp = (long)(stk)
79 #define FP_SAVE_UC(ucp) do { \
80 char *fdata; \
81 fdata = (char *) (ucp)->uc_mcontext.mc_fpstate; \
82 __asm__("fxsave %0": :"m"(*fdata)); \
83 } while (0)
84 #define FP_RESTORE_UC(ucp) do { \
85 char *fdata; \
86 fdata = (char *) (ucp)->uc_mcontext.mc_fpstate; \
87 __asm__("fxrstor %0": :"m"(*fdata)); \
88 } while (0)
89 #define SET_RETURN_ADDR_JB(jb, ra) (jb)[0]._jb[0] = (long)(ra)
90 #else
91 #error "Don't recognize this architecture!"
92 #endif
95 * Kernel fatal error handler macro.
97 #define PANIC(string) _thread_exit(__FILE__,__LINE__,string)
100 /* Output debug messages like this: */
101 #define stdout_debug(args...) do { \
102 char buf[128]; \
103 snprintf(buf, sizeof(buf), ##args); \
104 __sys_extpwrite(1, buf, strlen(buf), O_FBLOCKING, -1); \
105 } while (0)
106 #define stderr_debug(args...) do { \
107 char buf[128]; \
108 snprintf(buf, sizeof(buf), ##args); \
109 __sys_extpwrite(2, buf, strlen(buf), O_FBLOCKING, -1); \
110 } while (0)
115 * Priority queue manipulation macros (using pqe link):
117 #define PTHREAD_PRIOQ_INSERT_HEAD(thrd) _pq_insert_head(&_readyq,thrd)
118 #define PTHREAD_PRIOQ_INSERT_TAIL(thrd) _pq_insert_tail(&_readyq,thrd)
119 #define PTHREAD_PRIOQ_REMOVE(thrd) _pq_remove(&_readyq,thrd)
120 #define PTHREAD_PRIOQ_FIRST() _pq_first(&_readyq)
123 * Waiting queue manipulation macros (using pqe link):
125 #define PTHREAD_WAITQ_REMOVE(thrd) _waitq_remove(thrd)
126 #define PTHREAD_WAITQ_INSERT(thrd) _waitq_insert(thrd)
128 #if defined(_PTHREADS_INVARIANTS)
129 #define PTHREAD_WAITQ_CLEARACTIVE() _waitq_clearactive()
130 #define PTHREAD_WAITQ_SETACTIVE() _waitq_setactive()
131 #else
132 #define PTHREAD_WAITQ_CLEARACTIVE()
133 #define PTHREAD_WAITQ_SETACTIVE()
134 #endif
137 * Work queue manipulation macros (using qe link):
139 #define PTHREAD_WORKQ_INSERT(thrd) do { \
140 TAILQ_INSERT_TAIL(&_workq,thrd,qe); \
141 (thrd)->flags |= PTHREAD_FLAGS_IN_WORKQ; \
142 } while (0)
143 #define PTHREAD_WORKQ_REMOVE(thrd) do { \
144 TAILQ_REMOVE(&_workq,thrd,qe); \
145 (thrd)->flags &= ~PTHREAD_FLAGS_IN_WORKQ; \
146 } while (0)
150 * State change macro without scheduling queue change:
152 #define PTHREAD_SET_STATE(thrd, newstate) do { \
153 (thrd)->state = newstate; \
154 (thrd)->fname = __FILE__; \
155 (thrd)->lineno = __LINE__; \
156 } while (0)
159 * State change macro with scheduling queue change - This must be
160 * called with preemption deferred (see thread_kern_sched_[un]defer).
162 #if defined(_PTHREADS_INVARIANTS)
163 #include <assert.h>
164 #define PTHREAD_ASSERT(cond, msg) do { \
165 if (!(cond)) \
166 PANIC(msg); \
167 } while (0)
168 #define PTHREAD_ASSERT_NOT_IN_SYNCQ(thrd) \
169 PTHREAD_ASSERT((((thrd)->flags & PTHREAD_FLAGS_IN_SYNCQ) == 0), \
170 "Illegal call from signal handler");
171 #define PTHREAD_NEW_STATE(thrd, newstate) do { \
172 if (_thread_kern_new_state != 0) \
173 PANIC("Recursive PTHREAD_NEW_STATE"); \
174 _thread_kern_new_state = 1; \
175 if ((thrd)->state != newstate) { \
176 if ((thrd)->state == PS_RUNNING) { \
177 PTHREAD_PRIOQ_REMOVE(thrd); \
178 PTHREAD_SET_STATE(thrd, newstate); \
179 PTHREAD_WAITQ_INSERT(thrd); \
180 } else if (newstate == PS_RUNNING) { \
181 PTHREAD_WAITQ_REMOVE(thrd); \
182 PTHREAD_SET_STATE(thrd, newstate); \
183 PTHREAD_PRIOQ_INSERT_TAIL(thrd); \
186 _thread_kern_new_state = 0; \
187 } while (0)
188 #else
189 #define PTHREAD_ASSERT(cond, msg)
190 #define PTHREAD_ASSERT_NOT_IN_SYNCQ(thrd)
191 #define PTHREAD_NEW_STATE(thrd, newstate) do { \
192 if ((thrd)->state != newstate) { \
193 if ((thrd)->state == PS_RUNNING) { \
194 PTHREAD_PRIOQ_REMOVE(thrd); \
195 PTHREAD_WAITQ_INSERT(thrd); \
196 } else if (newstate == PS_RUNNING) { \
197 PTHREAD_WAITQ_REMOVE(thrd); \
198 PTHREAD_PRIOQ_INSERT_TAIL(thrd); \
201 PTHREAD_SET_STATE(thrd, newstate); \
202 } while (0)
203 #endif
206 * Define the signals to be used for scheduling.
208 #if defined(_PTHREADS_COMPAT_SCHED)
209 #define _ITIMER_SCHED_TIMER ITIMER_VIRTUAL
210 #define _SCHED_SIGNAL SIGVTALRM
211 #else
212 #define _ITIMER_SCHED_TIMER ITIMER_PROF
213 #define _SCHED_SIGNAL SIGPROF
214 #endif
217 * Priority queues.
219 * XXX It'd be nice if these were contained in uthread_priority_queue.[ch].
221 typedef struct pq_list {
222 TAILQ_HEAD(, pthread) pl_head; /* list of threads at this priority */
223 TAILQ_ENTRY(pq_list) pl_link; /* link for queue of priority lists */
224 int pl_prio; /* the priority of this list */
225 int pl_queued; /* is this in the priority queue */
226 } pq_list_t;
228 typedef struct pq_queue {
229 TAILQ_HEAD(, pq_list) pq_queue; /* queue of priority lists */
230 pq_list_t *pq_lists; /* array of all priority lists */
231 int pq_size; /* number of priority lists */
232 } pq_queue_t;
236 * TailQ initialization values.
238 #define TAILQ_INITIALIZER { NULL, NULL }
241 * Mutex definitions.
243 union pthread_mutex_data {
244 void *m_ptr;
245 int m_count;
248 struct pthread_mutex {
249 enum pthread_mutextype m_type;
250 int m_protocol;
251 TAILQ_HEAD(mutex_head, pthread) m_queue;
252 struct pthread *m_owner;
253 union pthread_mutex_data m_data;
254 long m_flags;
255 int m_refcount;
258 * Used for priority inheritence and protection.
260 * m_prio - For priority inheritence, the highest active
261 * priority (threads locking the mutex inherit
262 * this priority). For priority protection, the
263 * ceiling priority of this mutex.
264 * m_saved_prio - mutex owners inherited priority before
265 * taking the mutex, restored when the owner
266 * unlocks the mutex.
268 int m_prio;
269 int m_saved_prio;
272 * Link for list of all mutexes a thread currently owns.
274 TAILQ_ENTRY(pthread_mutex) m_qe;
277 * Lock for accesses to this structure.
279 spinlock_t lock;
283 * Flags for mutexes.
285 #define MUTEX_FLAGS_PRIVATE 0x01
286 #define MUTEX_FLAGS_INITED 0x02
287 #define MUTEX_FLAGS_BUSY 0x04
290 * Static mutex initialization values.
292 #define PTHREAD_MUTEX_STATIC_INITIALIZER \
293 { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, TAILQ_INITIALIZER, \
294 NULL, { NULL }, MUTEX_FLAGS_PRIVATE, 0, 0, 0, TAILQ_INITIALIZER, \
295 _SPINLOCK_INITIALIZER }
297 struct pthread_mutex_attr {
298 enum pthread_mutextype m_type;
299 int m_protocol;
300 int m_ceiling;
301 long m_flags;
305 * Condition variable definitions.
307 enum pthread_cond_type {
308 COND_TYPE_FAST,
309 COND_TYPE_MAX
312 struct pthread_cond {
313 enum pthread_cond_type c_type;
314 TAILQ_HEAD(cond_head, pthread) c_queue;
315 pthread_mutex_t c_mutex;
316 void *c_data;
317 long c_flags;
318 int c_seqno;
321 * Lock for accesses to this structure.
323 spinlock_t lock;
326 struct pthread_cond_attr {
327 enum pthread_cond_type c_type;
328 long c_flags;
332 * Flags for condition variables.
334 #define COND_FLAGS_PRIVATE 0x01
335 #define COND_FLAGS_INITED 0x02
336 #define COND_FLAGS_BUSY 0x04
339 * Static cond initialization values.
341 #define PTHREAD_COND_STATIC_INITIALIZER \
342 { COND_TYPE_FAST, TAILQ_INITIALIZER, NULL, NULL, \
343 0, 0, _SPINLOCK_INITIALIZER }
346 * Semaphore definitions.
348 struct sem {
349 #define SEM_MAGIC ((u_int32_t) 0x09fa4012)
350 u_int32_t magic;
351 pthread_mutex_t lock;
352 pthread_cond_t gtzero;
353 u_int32_t count;
354 u_int32_t nwaiters;
358 * Cleanup definitions.
360 struct pthread_cleanup {
361 struct pthread_cleanup *next;
362 void (*routine)(void *);
363 void *routine_arg;
366 struct pthread_atfork {
367 TAILQ_ENTRY(pthread_atfork) qe;
368 void (*prepare)(void);
369 void (*parent)(void);
370 void (*child)(void);
373 struct pthread_attr {
374 int sched_policy;
375 int sched_inherit;
376 int sched_interval;
377 int prio;
378 int suspend;
379 int flags;
380 void *arg_attr;
381 void (*cleanup_attr)(void *);
382 void *stackaddr_attr;
383 size_t stacksize_attr;
387 * Thread creation state attributes.
389 #define PTHREAD_CREATE_RUNNING 0
390 #define PTHREAD_CREATE_SUSPENDED 1
393 * Miscellaneous definitions.
395 #define PTHREAD_STACK_DEFAULT 65536
397 * Size of red zone at the end of each stack. In actuality, this "red zone" is
398 * merely an unmapped region, except in the case of the initial stack. Since
399 * mmap() makes it possible to specify the maximum growth of a MAP_STACK region,
400 * an unmapped gap between thread stacks achieves the same effect as explicitly
401 * mapped red zones.
403 #define PTHREAD_STACK_GUARD PAGE_SIZE
406 * Maximum size of initial thread's stack. This perhaps deserves to be larger
407 * than the stacks of other threads, since many applications are likely to run
408 * almost entirely on this stack.
410 #define PTHREAD_STACK_INITIAL 0x100000
412 /* Size of the scheduler stack: */
413 #define SCHED_STACK_SIZE PAGE_SIZE
416 * Define the different priority ranges. All applications have thread
417 * priorities constrained within 0-31. The threads library raises the
418 * priority when delivering signals in order to ensure that signal
419 * delivery happens (from the POSIX spec) "as soon as possible".
420 * In the future, the threads library will also be able to map specific
421 * threads into real-time (cooperating) processes or kernel threads.
422 * The RT and SIGNAL priorities will be used internally and added to
423 * thread base priorities so that the scheduling queue can handle both
424 * normal and RT priority threads with and without signal handling.
426 * The approach taken is that, within each class, signal delivery
427 * always has priority over thread execution.
429 #define PTHREAD_DEFAULT_PRIORITY 15
430 #define PTHREAD_MIN_PRIORITY 0
431 #define PTHREAD_MAX_PRIORITY 31 /* 0x1F */
432 #define PTHREAD_SIGNAL_PRIORITY 32 /* 0x20 */
433 #define PTHREAD_RT_PRIORITY 64 /* 0x40 */
434 #define PTHREAD_FIRST_PRIORITY PTHREAD_MIN_PRIORITY
435 #define PTHREAD_LAST_PRIORITY \
436 (PTHREAD_MAX_PRIORITY + PTHREAD_SIGNAL_PRIORITY + PTHREAD_RT_PRIORITY)
437 #define PTHREAD_BASE_PRIORITY(prio) ((prio) & PTHREAD_MAX_PRIORITY)
440 * Clock resolution in microseconds.
442 #define CLOCK_RES_USEC 10000
443 #define CLOCK_RES_USEC_MIN 1000
446 * Time slice period in microseconds.
448 #define TIMESLICE_USEC 20000
451 * Define a thread-safe macro to get the current time of day
452 * which is updated at regular intervals by the scheduling signal
453 * handler.
455 #define GET_CURRENT_TOD(tv) \
456 do { \
457 tv.tv_sec = _sched_tod.tv_sec; \
458 tv.tv_usec = _sched_tod.tv_usec; \
459 } while (tv.tv_sec != _sched_tod.tv_sec)
462 struct pthread_key {
463 spinlock_t lock;
464 volatile int allocated;
465 volatile int count;
466 void (*destructor)(void *);
469 struct pthread_rwlockattr {
470 int pshared;
473 struct pthread_rwlock {
474 pthread_mutex_t lock; /* monitor lock */
475 int state; /* 0 = idle >0 = # of readers -1 = writer */
476 pthread_cond_t read_signal;
477 pthread_cond_t write_signal;
478 int blocked_writers;
482 * Thread states.
484 enum pthread_state {
485 PS_RUNNING,
486 PS_SIGTHREAD,
487 PS_MUTEX_WAIT,
488 PS_COND_WAIT,
489 PS_FDLR_WAIT,
490 PS_FDLW_WAIT,
491 PS_FDR_WAIT,
492 PS_FDW_WAIT,
493 PS_POLL_WAIT,
494 PS_SELECT_WAIT,
495 PS_SLEEP_WAIT,
496 PS_WAIT_WAIT,
497 PS_SIGSUSPEND,
498 PS_SIGWAIT,
499 PS_SPINBLOCK,
500 PS_JOIN,
501 PS_SUSPENDED,
502 PS_DEAD,
503 PS_DEADLOCK,
504 PS_STATE_MAX
509 * File descriptor locking definitions.
511 #define FD_READ 0x1
512 #define FD_WRITE 0x2
513 #define FD_RDWR (FD_READ | FD_WRITE)
516 * File descriptor table structure.
518 struct fd_table_entry {
520 * Lock for accesses to this file descriptor table
521 * entry. This is passed to _spinlock() to provide atomic
522 * access to this structure. It does *not* represent the
523 * state of the lock on the file descriptor.
525 spinlock_t lock;
526 TAILQ_HEAD(, pthread) r_queue; /* Read queue. */
527 TAILQ_HEAD(, pthread) w_queue; /* Write queue. */
528 struct pthread *r_owner; /* Ptr to thread owning read lock. */
529 struct pthread *w_owner; /* Ptr to thread owning write lock. */
530 char *r_fname; /* Ptr to read lock source file name */
531 int r_lineno; /* Read lock source line number. */
532 char *w_fname; /* Ptr to write lock source file name */
533 int w_lineno; /* Write lock source line number. */
534 int r_lockcount; /* Count for FILE read locks. */
535 int w_lockcount; /* Count for FILE write locks. */
536 int flags; /* Flags used in open. */
539 struct pthread_poll_data {
540 int nfds;
541 struct pollfd *fds;
544 union pthread_wait_data {
545 pthread_mutex_t mutex;
546 pthread_cond_t cond;
547 const sigset_t *sigwait; /* Waiting on a signal in sigwait */
548 struct {
549 short fd; /* Used when thread waiting on fd */
550 short branch; /* Line number, for debugging. */
551 char *fname; /* Source file name for debugging.*/
552 } fd;
553 FILE *fp;
554 struct pthread_poll_data *poll_data;
555 spinlock_t *spinlock;
556 struct pthread *thread;
560 * Define a continuation routine that can be used to perform a
561 * transfer of control:
563 typedef void (*thread_continuation_t) (void *);
565 struct pthread_signal_frame;
567 struct pthread_state_data {
568 struct pthread_signal_frame *psd_curframe;
569 sigset_t psd_sigmask;
570 struct timespec psd_wakeup_time;
571 union pthread_wait_data psd_wait_data;
572 enum pthread_state psd_state;
573 int psd_flags;
574 int psd_interrupted;
575 int psd_longjmp_val;
576 int psd_sigmask_seqno;
577 int psd_signo;
578 int psd_sig_defer_count;
579 /* XXX - What about thread->timeout and/or thread->error? */
582 struct join_status {
583 struct pthread *thread;
584 void *ret;
585 int error;
589 * The frame that is added to the top of a threads stack when setting up
590 * up the thread to run a signal handler.
592 struct pthread_signal_frame {
594 * This stores the threads state before the signal.
596 struct pthread_state_data saved_state;
599 * Threads return context; we use only jmp_buf's for now.
601 union {
602 jmp_buf jb;
603 ucontext_t uc;
604 } ctx;
605 int signo; /* signal, arg 1 to sighandler */
606 int sig_has_args; /* use signal args if true */
607 ucontext_t uc;
608 siginfo_t siginfo;
612 * Thread structure.
614 struct pthread {
616 * Magic value to help recognize a valid thread structure
617 * from an invalid one:
619 #define PTHREAD_MAGIC ((u_int32_t) 0xd09ba115)
620 u_int32_t magic;
621 char *name;
622 u_int64_t uniqueid; /* for gdb */
623 struct tls_tcb *tcb;
626 * Lock for accesses to this thread structure.
628 spinlock_t lock;
630 /* Queue entry for list of all threads: */
631 TAILQ_ENTRY(pthread) tle;
633 /* Queue entry for list of dead threads: */
634 TAILQ_ENTRY(pthread) dle;
637 * Thread start routine, argument, stack pointer and thread
638 * attributes.
640 void *(*start_routine)(void *);
641 void *arg;
642 void *stack;
643 struct pthread_attr attr;
646 * Threads return context; we use only jmp_buf's for now.
648 union {
649 jmp_buf jb;
650 ucontext_t uc;
651 } ctx;
654 * Used for tracking delivery of signal handlers.
656 struct pthread_signal_frame *curframe;
659 * Cancelability flags - the lower 2 bits are used by cancel
660 * definitions in pthread.h
662 #define PTHREAD_AT_CANCEL_POINT 0x0004
663 #define PTHREAD_CANCELLING 0x0008
664 #define PTHREAD_CANCEL_NEEDED 0x0010
665 int cancelflags;
667 thread_continuation_t continuation;
670 * Current signal mask and pending signals.
672 sigset_t sigmask;
673 sigset_t sigpend;
674 int sigmask_seqno;
675 int check_pending;
677 /* Thread state: */
678 enum pthread_state state;
680 /* Scheduling clock when this thread was last made active. */
681 long last_active;
683 /* Scheduling clock when this thread was last made inactive. */
684 long last_inactive;
687 * Number of microseconds accumulated by this thread when
688 * time slicing is active.
690 long slice_usec;
693 * Time to wake up thread. This is used for sleeping threads and
694 * for any operation which may time out (such as select).
696 struct timespec wakeup_time;
698 /* TRUE if operation has timed out. */
699 int timeout;
702 * The joiner is the thread that is joining to this thread. The
703 * join status keeps track of a join operation to another thread.
705 struct pthread *joiner;
706 struct join_status join_status;
709 * The current thread can belong to only one scheduling queue at
710 * a time (ready or waiting queue). It can also belong to:
712 * o A queue of threads waiting for a mutex
713 * o A queue of threads waiting for a condition variable
714 * o A queue of threads waiting for a file descriptor lock
715 * o A queue of threads needing work done by the kernel thread
716 * (waiting for a spinlock or file I/O)
718 * A thread can also be joining a thread (the joiner field above).
720 * It must not be possible for a thread to belong to any of the
721 * above queues while it is handling a signal. Signal handlers
722 * may longjmp back to previous stack frames circumventing normal
723 * control flow. This could corrupt queue integrity if the thread
724 * retains membership in the queue. Therefore, if a thread is a
725 * member of one of these queues when a signal handler is invoked,
726 * it must remove itself from the queue before calling the signal
727 * handler and reinsert itself after normal return of the handler.
729 * Use pqe for the scheduling queue link (both ready and waiting),
730 * sqe for synchronization (mutex and condition variable) queue
731 * links, and qe for all other links.
733 TAILQ_ENTRY(pthread) pqe; /* priority queue link */
734 TAILQ_ENTRY(pthread) sqe; /* synchronization queue link */
735 TAILQ_ENTRY(pthread) qe; /* all other queues link */
737 /* Wait data. */
738 union pthread_wait_data data;
741 * Allocated for converting select into poll.
743 struct pthread_poll_data poll_data;
746 * Set to TRUE if a blocking operation was
747 * interrupted by a signal:
749 int interrupted;
751 /* Signal number when in state PS_SIGWAIT: */
752 int signo;
755 * Set to non-zero when this thread has deferred signals.
756 * We allow for recursive deferral.
758 int sig_defer_count;
761 * Set to TRUE if this thread should yield after undeferring
762 * signals.
764 int yield_on_sig_undefer;
766 /* Miscellaneous flags; only set with signals deferred. */
767 int flags;
768 #define PTHREAD_FLAGS_PRIVATE 0x0001
769 #define PTHREAD_EXITING 0x0002
770 #define PTHREAD_FLAGS_IN_WAITQ 0x0004 /* in waiting queue using pqe link */
771 #define PTHREAD_FLAGS_IN_PRIOQ 0x0008 /* in priority queue using pqe link */
772 #define PTHREAD_FLAGS_IN_WORKQ 0x0010 /* in work queue using qe link */
773 #define PTHREAD_FLAGS_IN_FILEQ 0x0020 /* in file lock queue using qe link */
774 #define PTHREAD_FLAGS_IN_FDQ 0x0040 /* in fd lock queue using qe link */
775 #define PTHREAD_FLAGS_IN_CONDQ 0x0080 /* in condition queue using sqe link*/
776 #define PTHREAD_FLAGS_IN_MUTEXQ 0x0100 /* in mutex queue using sqe link */
777 #define PTHREAD_FLAGS_SUSPENDED 0x0200 /* thread is suspended */
778 #define PTHREAD_FLAGS_TRACE 0x0400 /* for debugging purposes */
779 #define PTHREAD_FLAGS_IN_SYNCQ \
780 (PTHREAD_FLAGS_IN_CONDQ | PTHREAD_FLAGS_IN_MUTEXQ)
783 * Base priority is the user setable and retrievable priority
784 * of the thread. It is only affected by explicit calls to
785 * set thread priority and upon thread creation via a thread
786 * attribute or default priority.
788 char base_priority;
791 * Inherited priority is the priority a thread inherits by
792 * taking a priority inheritence or protection mutex. It
793 * is not affected by base priority changes. Inherited
794 * priority defaults to and remains 0 until a mutex is taken
795 * that is being waited on by any other thread whose priority
796 * is non-zero.
798 char inherited_priority;
801 * Active priority is always the maximum of the threads base
802 * priority and inherited priority. When there is a change
803 * in either the base or inherited priority, the active
804 * priority must be recalculated.
806 char active_priority;
808 /* Number of priority ceiling or protection mutexes owned. */
809 int priority_mutex_count;
812 * Queue of currently owned mutexes.
814 TAILQ_HEAD(, pthread_mutex) mutexq;
816 void *ret;
817 const void **specific_data;
818 int specific_data_count;
820 /* Cleanup handlers Link List */
821 struct pthread_cleanup *cleanup;
822 char *fname; /* Ptr to source file name */
823 int lineno; /* Source line number. */
826 /* Spare thread stack. */
827 struct stack {
828 SLIST_ENTRY(stack) qe; /* Queue entry for this stack. */
832 * Global variables for the uthread kernel.
835 SCLASS void *_usrstack
836 #ifdef GLOBAL_PTHREAD_PRIVATE
837 = (void *) USRSTACK;
838 #else
840 #endif
842 /* Kernel thread structure used when there are no running threads: */
843 SCLASS struct pthread _thread_kern_thread;
845 /* Ptr to the thread structure for the running thread: */
846 SCLASS struct pthread * volatile _thread_run
847 #ifdef GLOBAL_PTHREAD_PRIVATE
848 = &_thread_kern_thread;
849 #else
851 #endif
853 /* Ptr to the thread structure for the last user thread to run: */
854 SCLASS struct pthread * volatile _last_user_thread
855 #ifdef GLOBAL_PTHREAD_PRIVATE
856 = &_thread_kern_thread;
857 #else
859 #endif
861 /* List of all threads: */
862 SCLASS TAILQ_HEAD(, pthread) _thread_list
863 #ifdef GLOBAL_PTHREAD_PRIVATE
864 = TAILQ_HEAD_INITIALIZER(_thread_list);
865 #else
867 #endif
870 * Array of kernel pipe file descriptors that are used to ensure that
871 * no signals are missed in calls to _select.
873 SCLASS int _thread_kern_pipe[2]
874 #ifdef GLOBAL_PTHREAD_PRIVATE
879 #else
881 #endif
882 SCLASS int volatile _queue_signals
883 #ifdef GLOBAL_PTHREAD_PRIVATE
884 = 0;
885 #else
887 #endif
888 SCLASS int _thread_kern_in_sched
889 #ifdef GLOBAL_PTHREAD_PRIVATE
890 = 0;
891 #else
893 #endif
895 SCLASS int _sig_in_handler
896 #ifdef GLOBAL_PTHREAD_PRIVATE
897 = 0;
898 #else
900 #endif
902 /* Time of day at last scheduling timer signal: */
903 SCLASS struct timeval volatile _sched_tod
904 #ifdef GLOBAL_PTHREAD_PRIVATE
905 = { 0, 0 };
906 #else
908 #endif
911 * Current scheduling timer ticks; used as resource usage.
913 SCLASS unsigned int volatile _sched_ticks
914 #ifdef GLOBAL_PTHREAD_PRIVATE
915 = 0;
916 #else
918 #endif
920 /* Dead threads: */
921 SCLASS TAILQ_HEAD(, pthread) _dead_list
922 #ifdef GLOBAL_PTHREAD_PRIVATE
923 = TAILQ_HEAD_INITIALIZER(_dead_list);
924 #else
926 #endif
928 /* Initial thread: */
929 SCLASS struct pthread *_thread_initial
930 #ifdef GLOBAL_PTHREAD_PRIVATE
931 = NULL;
932 #else
934 #endif
936 SCLASS TAILQ_HEAD(atfork_head, pthread_atfork) _atfork_list;
937 SCLASS pthread_mutex_t _atfork_mutex;
939 /* Default thread attributes: */
940 SCLASS struct pthread_attr pthread_attr_default
941 #ifdef GLOBAL_PTHREAD_PRIVATE
942 = { SCHED_RR, 0, TIMESLICE_USEC, PTHREAD_DEFAULT_PRIORITY, PTHREAD_CREATE_RUNNING,
943 PTHREAD_CREATE_JOINABLE, NULL, NULL, NULL, PTHREAD_STACK_DEFAULT };
944 #else
946 #endif
948 /* Default mutex attributes: */
949 SCLASS struct pthread_mutex_attr pthread_mutexattr_default
950 #ifdef GLOBAL_PTHREAD_PRIVATE
951 = { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, 0 };
952 #else
954 #endif
956 /* Default condition variable attributes: */
957 SCLASS struct pthread_cond_attr pthread_condattr_default
958 #ifdef GLOBAL_PTHREAD_PRIVATE
959 = { COND_TYPE_FAST, 0 };
960 #else
962 #endif
965 * Standard I/O file descriptors need special flag treatment since
966 * setting one to non-blocking does all on *BSD. Sigh. This array
967 * is used to store the initial flag settings.
969 SCLASS int _pthread_stdio_flags[3];
971 /* File table information: */
972 SCLASS struct fd_table_entry **_thread_fd_table
973 #ifdef GLOBAL_PTHREAD_PRIVATE
974 = NULL;
975 #else
977 #endif
979 /* Table for polling file descriptors: */
980 SCLASS struct pollfd *_thread_pfd_table
981 #ifdef GLOBAL_PTHREAD_PRIVATE
982 = NULL;
983 #else
985 #endif
987 SCLASS const int dtablecount
988 #ifdef GLOBAL_PTHREAD_PRIVATE
989 = 4096/sizeof(struct fd_table_entry);
990 #else
992 #endif
993 SCLASS int _thread_dtablesize /* Descriptor table size. */
994 #ifdef GLOBAL_PTHREAD_PRIVATE
995 = 0;
996 #else
998 #endif
1000 SCLASS int _clock_res_usec /* Clock resolution in usec. */
1001 #ifdef GLOBAL_PTHREAD_PRIVATE
1002 = CLOCK_RES_USEC;
1003 #else
1005 #endif
1007 /* Garbage collector mutex and condition variable. */
1008 SCLASS pthread_mutex_t _gc_mutex
1009 #ifdef GLOBAL_PTHREAD_PRIVATE
1010 = NULL
1011 #endif
1013 SCLASS pthread_cond_t _gc_cond
1014 #ifdef GLOBAL_PTHREAD_PRIVATE
1015 = NULL
1016 #endif
1020 * Array of signal actions for this process.
1022 SCLASS struct sigaction _thread_sigact[NSIG];
1025 * Array of counts of dummy handlers for SIG_DFL signals. This is used to
1026 * assure that there is always a dummy signal handler installed while there is a
1027 * thread sigwait()ing on the corresponding signal.
1029 SCLASS int _thread_dfl_count[NSIG];
1032 * Pending signals and mask for this process:
1034 SCLASS sigset_t _process_sigpending;
1035 SCLASS sigset_t _process_sigmask
1036 #ifdef GLOBAL_PTHREAD_PRIVATE
1037 = { {0, 0, 0, 0} }
1038 #endif
1042 * Scheduling queues:
1044 SCLASS pq_queue_t _readyq;
1045 SCLASS TAILQ_HEAD(, pthread) _waitingq;
1048 * Work queue:
1050 SCLASS TAILQ_HEAD(, pthread) _workq;
1052 /* Tracks the number of threads blocked while waiting for a spinlock. */
1053 SCLASS volatile int _spinblock_count
1054 #ifdef GLOBAL_PTHREAD_PRIVATE
1056 #endif
1059 /* Used to maintain pending and active signals: */
1060 struct sigstatus {
1061 int pending; /* Is this a pending signal? */
1062 int blocked; /*
1063 * A handler is currently active for
1064 * this signal; ignore subsequent
1065 * signals until the handler is done.
1067 int signo; /* arg 1 to signal handler */
1068 siginfo_t siginfo; /* arg 2 to signal handler */
1069 ucontext_t uc; /* arg 3 to signal handler */
1072 SCLASS struct sigstatus _thread_sigq[NSIG];
1074 /* Indicates that the signal queue needs to be checked. */
1075 SCLASS volatile int _sigq_check_reqd
1076 #ifdef GLOBAL_PTHREAD_PRIVATE
1078 #endif
1081 /* Thread switch hook. */
1082 SCLASS pthread_switch_routine_t _sched_switch_hook
1083 #ifdef GLOBAL_PTHREAD_PRIVATE
1084 = NULL
1085 #endif
1089 * Spare stack queue. Stacks of default size are cached in order to reduce
1090 * thread creation time. Spare stacks are used in LIFO order to increase cache
1091 * locality.
1093 SCLASS SLIST_HEAD(, stack) _stackq;
1096 * Base address of next unallocated default-size {stack, red zone}. Stacks are
1097 * allocated contiguously, starting below the bottom of the main stack. When a
1098 * new stack is created, a red zone is created (actually, the red zone is simply
1099 * left unmapped) below the bottom of the stack, such that the stack will not be
1100 * able to grow all the way to the top of the next stack. This isn't
1101 * fool-proof. It is possible for a stack to grow by a large amount, such that
1102 * it grows into the next stack, and as long as the memory within the red zone
1103 * is never accessed, nothing will prevent one thread stack from trouncing all
1104 * over the next.
1106 SCLASS void * _next_stack
1107 #ifdef GLOBAL_PTHREAD_PRIVATE
1108 /* main stack top - main stack size - stack size - (red zone + main stack red zone) */
1109 = (void *) USRSTACK - PTHREAD_STACK_INITIAL - PTHREAD_STACK_DEFAULT - (2 * PTHREAD_STACK_GUARD)
1110 #endif
1114 * Declare the kernel scheduler jump buffer and stack:
1116 SCLASS jmp_buf _thread_kern_sched_jb;
1118 SCLASS void * _thread_kern_sched_stack
1119 #ifdef GLOBAL_PTHREAD_PRIVATE
1120 = NULL
1121 #endif
1125 /* Used for _PTHREADS_INVARIANTS checking. */
1126 SCLASS int _thread_kern_new_state
1127 #ifdef GLOBAL_PTHREAD_PRIVATE
1129 #endif
1132 /* Undefine the storage class specifier: */
1133 #undef SCLASS
1135 #ifdef _LOCK_DEBUG
1136 #define _FD_LOCK(_fd,_type,_ts) _thread_fd_lock_debug(_fd, _type, \
1137 _ts, __FILE__, __LINE__)
1138 #define _FD_UNLOCK(_fd,_type) _thread_fd_unlock_debug(_fd, _type, \
1139 __FILE__, __LINE__)
1140 #else
1141 #define _FD_LOCK(_fd,_type,_ts) _thread_fd_lock(_fd, _type, _ts)
1142 #define _FD_UNLOCK(_fd,_type) _thread_fd_unlock(_fd, _type)
1143 #endif
1146 * Function prototype definitions.
1148 __BEGIN_DECLS
1149 int _close(int);
1150 char *__ttyname_basic(int);
1151 void _cond_wait_backout(pthread_t);
1152 void _fd_lock_backout(pthread_t);
1153 int _find_thread(pthread_t);
1154 struct pthread *_get_curthread(void);
1155 void _set_curthread(struct pthread *);
1156 void _flockfile_backout(struct pthread *);
1157 void _funlock_owned(struct pthread *);
1158 int _thread_create(pthread_t *,const pthread_attr_t *,void *(*start_routine)(void *),void *,pthread_t);
1159 int _mutex_cv_lock(pthread_mutex_t *);
1160 int _mutex_cv_unlock(pthread_mutex_t *);
1161 void _mutex_lock_backout(pthread_t);
1162 void _mutex_notify_priochange(pthread_t);
1163 int _mutex_reinit(pthread_mutex_t *);
1164 void _mutex_unlock_private(pthread_t);
1165 int _cond_reinit(pthread_cond_t *);
1166 int _pq_alloc(struct pq_queue *, int, int);
1167 int _pq_init(struct pq_queue *);
1168 void _pq_remove(struct pq_queue *pq, struct pthread *);
1169 void _pq_insert_head(struct pq_queue *pq, struct pthread *);
1170 void _pq_insert_tail(struct pq_queue *pq, struct pthread *);
1171 struct pthread *_pq_first(struct pq_queue *pq);
1172 void *_pthread_getspecific(pthread_key_t);
1173 int _pthread_key_create(pthread_key_t *, void (*) (void *));
1174 int _pthread_key_delete(pthread_key_t);
1175 int _pthread_mutex_destroy(pthread_mutex_t *);
1176 int _pthread_mutex_init(pthread_mutex_t *, const pthread_mutexattr_t *);
1177 int _pthread_mutex_lock(pthread_mutex_t *);
1178 int _pthread_mutex_trylock(pthread_mutex_t *);
1179 int _pthread_mutex_unlock(pthread_mutex_t *);
1180 int _pthread_once(pthread_once_t *, void (*) (void));
1181 int _pthread_setspecific(pthread_key_t, const void *);
1182 int _pthread_cond_init(pthread_cond_t *, const pthread_condattr_t *);
1183 int _pthread_cond_destroy(pthread_cond_t *);
1184 int _pthread_cond_wait(pthread_cond_t *, pthread_mutex_t *);
1185 int _pthread_cond_timedwait(pthread_cond_t *, pthread_mutex_t *,
1186 const struct timespec *);
1187 int _pthread_cond_signal(pthread_cond_t *);
1188 int _pthread_cond_broadcast(pthread_cond_t *);
1189 void _waitq_insert(pthread_t pthread);
1190 void _waitq_remove(pthread_t pthread);
1191 #if defined(_PTHREADS_INVARIANTS)
1192 void _waitq_setactive(void);
1193 void _waitq_clearactive(void);
1194 #endif
1195 void _thread_exit(char *, int, char *);
1196 void _thread_exit_cleanup(void);
1197 int _thread_fd_getflags(int);
1198 int _thread_fd_lock(int, int, struct timespec *);
1199 int _thread_fd_lock_debug(int, int, struct timespec *,char *fname,int lineno);
1200 void _thread_fd_setflags(int, int);
1201 int _thread_fd_table_init(int fd);
1202 void _thread_fd_unlock(int, int);
1203 void _thread_fd_unlock_debug(int, int, char *, int);
1204 void _thread_fd_unlock_owned(pthread_t);
1205 void *_thread_cleanup(pthread_t);
1206 void _thread_cleanupspecific(void);
1207 void _thread_dump_info(void);
1208 void _thread_init(void);
1209 void _thread_kern_sched(ucontext_t *);
1210 void _thread_kern_scheduler(void);
1211 void _thread_kern_sched_frame(struct pthread_signal_frame *psf);
1212 void _thread_kern_sched_sig(void);
1213 void _thread_kern_sched_state(enum pthread_state, char *fname, int lineno);
1214 void _thread_kern_sched_state_unlock(enum pthread_state state,
1215 spinlock_t *lock, char *fname, int lineno);
1216 void _thread_kern_set_timeout(const struct timespec *);
1217 void _thread_kern_sig_defer(void);
1218 void _thread_kern_sig_undefer(void);
1219 void _thread_mksigpipe(void);
1220 void _thread_sig_handler(int, siginfo_t *, ucontext_t *);
1221 void _thread_sig_check_pending(struct pthread *pthread);
1222 void _thread_sig_handle_pending(void);
1223 void _thread_sig_send(struct pthread *pthread, int sig);
1224 void _thread_sig_wrapper(void);
1225 void _thread_sigframe_restore(struct pthread *thread,
1226 struct pthread_signal_frame *psf);
1227 void _thread_start(void);
1228 pthread_addr_t _thread_gc(pthread_addr_t);
1229 void _thread_enter_cancellation_point(void);
1230 void _thread_leave_cancellation_point(void);
1231 void _thread_cancellation_point(void);
1233 /* #include <aio.h> */
1234 #ifdef _SYS_AIO_H_
1235 int __sys_aio_suspend(const struct aiocb * const[], int, const struct timespec *);
1236 #endif
1238 /* #include <sys/event.h> */
1239 #ifdef _SYS_EVENT_H_
1240 int __sys_kevent(int, const struct kevent *, int, struct kevent *,
1241 int, const struct timespec *);
1242 int __sys_kqueue(void);
1243 #endif
1245 /* #include <sys/ioctl.h> */
1246 #ifdef _SYS_IOCTL_H_
1247 int __sys_ioctl(int, unsigned long, ...);
1248 #endif
1250 /* #include <sys/mman.h> */
1251 #ifdef _SYS_MMAN_H_
1252 int __sys_msync(void *, size_t, int);
1253 #endif
1255 /* #include <sys/mount.h> */
1256 #ifdef _SYS_MOUNT_H_
1257 int __sys_fstatfs(int, struct statfs *);
1258 #endif
1260 /* #include <sys/socket.h> */
1261 #ifdef _SYS_SOCKET_H_
1262 int __sys_accept(int, struct sockaddr *, socklen_t *);
1263 int __sys_extaccept(int, int, struct sockaddr *, socklen_t *);
1264 int __sys_bind(int, const struct sockaddr *, socklen_t);
1265 int __sys_connect(int, const struct sockaddr *, socklen_t);
1266 int __sys_extconnect(int, int, const struct sockaddr *, socklen_t);
1267 int __sys_getpeername(int, struct sockaddr *, socklen_t *);
1268 int __sys_getsockname(int, struct sockaddr *, socklen_t *);
1269 int __sys_getsockopt(int, int, int, void *, socklen_t *);
1270 int __sys_listen(int, int);
1271 ssize_t __sys_recvfrom(int, void *, size_t, int, struct sockaddr *, socklen_t *);
1272 ssize_t __sys_recvmsg(int, struct msghdr *, int);
1273 int __sys_sendfile(int, int, off_t, size_t, struct sf_hdtr *, off_t *, int);
1274 ssize_t __sys_sendmsg(int, const struct msghdr *, int);
1275 ssize_t __sys_sendto(int, const void *,size_t, int, const struct sockaddr *, socklen_t);
1276 int __sys_setsockopt(int, int, int, const void *, socklen_t);
1277 int __sys_shutdown(int, int);
1278 int __sys_socket(int, int, int);
1279 int __sys_socketpair(int, int, int, int *);
1280 #endif
1282 /* #include <sys/stat.h> */
1283 #ifdef _SYS_STAT_H_
1284 int __sys_fchflags(int, u_long);
1285 int __sys_fchmod(int, mode_t);
1286 int __sys_fchmodat(int, const char *, mode_t, int);
1287 int __sys_fstat(int, struct stat *);
1288 int __sys_fstatat(int, const char *, struct stat *, int);
1289 #endif
1291 /* #include <sys/uio.h> */
1292 #ifdef _SYS_UIO_H_
1293 ssize_t __sys_readv(int, const struct iovec *, int);
1294 ssize_t __sys_writev(int, const struct iovec *, int);
1295 ssize_t __sys_extpreadv(int, const struct iovec *, int, int, off_t);
1296 ssize_t __sys_extpwritev(int, const struct iovec *, int, int, off_t);
1297 #endif
1299 /* #include <sys/wait.h> */
1300 #ifdef WNOHANG
1301 pid_t __sys_wait4(pid_t, int *, int, struct rusage *);
1302 #endif
1304 /* #include <dirent.h> */
1305 #ifdef _DIRENT_H_
1306 int __sys_getdirentries(int, char *, int, long *);
1307 #endif
1309 /* #include <fcntl.h> */
1310 #ifdef _SYS_FCNTL_H_
1311 int __sys_fcntl(int, int, ...);
1312 int __sys_flock(int, int);
1313 int __sys_open(const char *, int, ...);
1314 int __sys_openat(int, const char *, int, ...);
1315 #endif
1317 /* #include <poll.h> */
1318 #ifdef _SYS_POLL_H_
1319 int __sys_poll(struct pollfd *, unsigned, int);
1320 #endif
1322 /* #include <signal.h> */
1323 #ifdef _SIGNAL_H_
1324 int __sys_sigaction(int, const struct sigaction *, struct sigaction *);
1325 int __sys_sigprocmask(int, const sigset_t *, sigset_t *);
1326 int __sys_sigreturn(ucontext_t *);
1327 #endif
1329 /* #include <unistd.h> */
1330 #ifdef _UNISTD_H_
1331 void __sys_exit(int);
1332 int __sys_close(int);
1333 int __sys_closefrom(int);
1334 int __sys_dup(int);
1335 int __sys_dup2(int, int);
1336 int __sys_execve(const char *, char * const *, char * const *);
1337 int __sys_faccessat(int, const char *, int, int);
1338 int __sys_fchown(int, uid_t, gid_t);
1339 int __sys_fchownat(int, const char *, uid_t, gid_t, int);
1340 long __sys_fpathconf(int, int);
1341 int __sys_fsync(int);
1342 int __sys_pipe(int *);
1343 ssize_t __sys_read(int, void *, size_t);
1344 ssize_t __sys_extpread(int, void *, size_t, int, off_t);
1345 ssize_t __sys_write(int, const void *, size_t);
1346 ssize_t __sys_extpwrite(int, const void *, size_t, int, off_t);
1347 int __sys_unlinkat(int, const char *, int);
1348 #endif
1350 /* #include <setjmp.h> */
1351 #ifdef _SETJMP_H_
1352 extern void __siglongjmp(sigjmp_buf, int) __dead2;
1353 extern void __longjmp(jmp_buf, int) __dead2;
1354 extern void ___longjmp(jmp_buf, int) __dead2;
1355 #endif
1356 __END_DECLS
1358 #endif /* !_PTHREAD_PRIVATE_H */