4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2016 by Delphix. All rights reserved.
25 * Copyright (c) 2017 by The MathWorks, Inc. All rights reserved.
28 * Copyright 2016 Joyent, Inc.
32 #include "thr_uberdata.h"
40 * These symbols should not be exported from libc, but
41 * /lib/libm.so.2 references _thr_main. libm needs to be fixed.
42 * Also, some older versions of the Studio compiler/debugger
43 * components reference them. These need to be fixed, too.
45 #pragma weak _thr_main = thr_main
46 #pragma weak _thr_create = thr_create
47 #pragma weak _thr_join = thr_join
48 #pragma weak _thr_self = thr_self
54 * __libc_threaded symbol indicates that "more than one thread exists".
56 int __libc_threaded
= 0; /* zero until first thr_create() */
59 * thr_concurrency and pthread_concurrency are not used by the library.
60 * They exist solely to hold and return the values set by calls to
61 * thr_setconcurrency() and pthread_setconcurrency().
62 * Because thr_concurrency is affected by the THR_NEW_LWP flag
63 * to thr_create(), thr_concurrency is protected by link_lock.
65 static int thr_concurrency
= 1;
66 static int pthread_concurrency
;
68 #define HASHTBLSZ 1024 /* must be a power of two */
69 #define TIDHASH(tid, udp) (tid & (udp)->hash_mask)
71 /* initial allocation, just enough for one lwp */
72 #pragma align 64(init_hash_table)
73 thr_hash_table_t init_hash_table
[1] = {
74 { DEFAULTMUTEX
, DEFAULTCV
, NULL
},
77 extern const Lc_interface rtld_funcs
[];
80 * The weak version is known to libc_db and mdb.
82 #pragma weak _uberdata = __uberdata
83 uberdata_t __uberdata
= {
84 { DEFAULTMUTEX
, 0, 0 }, /* link_lock */
85 { RECURSIVEMUTEX
, 0, 0 }, /* ld_lock */
86 { RECURSIVEMUTEX
, 0, 0 }, /* fork_lock */
87 { RECURSIVEMUTEX
, 0, 0 }, /* atfork_lock */
88 { RECURSIVEMUTEX
, 0, 0 }, /* callout_lock */
89 { DEFAULTMUTEX
, 0, 0 }, /* tdb_hash_lock */
90 { 0, }, /* tdb_hash_lock_stats */
91 { { 0 }, }, /* siguaction[NSIG] */
92 {{ DEFAULTMUTEX
, NULL
, 0 }, /* bucket[NBUCKETS] */
93 { DEFAULTMUTEX
, NULL
, 0 },
94 { DEFAULTMUTEX
, NULL
, 0 },
95 { DEFAULTMUTEX
, NULL
, 0 },
96 { DEFAULTMUTEX
, NULL
, 0 },
97 { DEFAULTMUTEX
, NULL
, 0 },
98 { DEFAULTMUTEX
, NULL
, 0 },
99 { DEFAULTMUTEX
, NULL
, 0 },
100 { DEFAULTMUTEX
, NULL
, 0 },
101 { DEFAULTMUTEX
, NULL
, 0 }},
102 { RECURSIVEMUTEX
, NULL
, NULL
}, /* atexit_root */
103 { RECURSIVEMUTEX
, NULL
}, /* quickexit_root */
104 { DEFAULTMUTEX
, 0, 0, NULL
}, /* tsd_metadata */
105 { DEFAULTMUTEX
, {0, 0}, {0, 0} }, /* tls_metadata */
110 { 0 }, /* uberflags */
111 NULL
, /* queue_head */
112 init_hash_table
, /* thr_hash_table */
113 1, /* hash_size: size of the hash table */
114 0, /* hash_mask: hash_size - 1 */
117 NULL
, /* all_zombies */
122 sigacthandler
, /* sigacthandler */
123 NULL
, /* lwp_stacks */
124 NULL
, /* lwp_laststack */
126 10, /* thread_stack_cache */
127 NULL
, /* ulwp_freelist */
128 NULL
, /* ulwp_lastfree */
129 NULL
, /* ulwp_replace_free */
130 NULL
, /* ulwp_replace_last */
131 NULL
, /* atforklist */
132 NULL
, /* robustlocks */
133 NULL
, /* robustlist */
135 NULL
, /* ub_comm_page */
136 NULL
, /* __tdb_bootstrap */
138 NULL
, /* tdb_sync_addr_hash */
139 0, /* tdb_register_count */
140 0, /* tdb_hash_alloc_failed */
141 NULL
, /* tdb_sync_addr_free */
142 NULL
, /* tdb_sync_addr_last */
143 0, /* tdb_sync_alloc */
144 { 0, 0 }, /* tdb_ev_global_mask */
145 tdb_events
, /* tdb_events array */
150 * The weak version is known to libc_db and mdb.
152 #pragma weak _tdb_bootstrap = __tdb_bootstrap
153 uberdata_t
**__tdb_bootstrap
= NULL
;
155 int thread_queue_fifo
= 4;
156 int thread_queue_dump
= 0;
157 int thread_cond_wait_defer
= 0;
158 int thread_error_detection
= 0;
159 int thread_async_safe
= 0;
160 int thread_stack_cache
= 10;
161 int thread_door_noreserve
= 0;
162 int thread_locks_misaligned
= 0;
164 static ulwp_t
*ulwp_alloc(void);
165 static void ulwp_free(ulwp_t
*);
168 * Insert the lwp into the hash table.
171 hash_in_unlocked(ulwp_t
*ulwp
, int ix
, uberdata_t
*udp
)
173 ulwp
->ul_hash
= udp
->thr_hash_table
[ix
].hash_bucket
;
174 udp
->thr_hash_table
[ix
].hash_bucket
= ulwp
;
179 hash_in(ulwp_t
*ulwp
, uberdata_t
*udp
)
181 int ix
= TIDHASH(ulwp
->ul_lwpid
, udp
);
182 mutex_t
*mp
= &udp
->thr_hash_table
[ix
].hash_lock
;
185 hash_in_unlocked(ulwp
, ix
, udp
);
190 * Delete the lwp from the hash table.
193 hash_out_unlocked(ulwp_t
*ulwp
, int ix
, uberdata_t
*udp
)
197 for (ulwpp
= &udp
->thr_hash_table
[ix
].hash_bucket
;
199 ulwpp
= &(*ulwpp
)->ul_hash
)
201 *ulwpp
= ulwp
->ul_hash
;
202 ulwp
->ul_hash
= NULL
;
207 hash_out(ulwp_t
*ulwp
, uberdata_t
*udp
)
211 if ((ix
= ulwp
->ul_ix
) >= 0) {
212 mutex_t
*mp
= &udp
->thr_hash_table
[ix
].hash_lock
;
215 hash_out_unlocked(ulwp
, ix
, udp
);
221 * Retain stack information for thread structures that are being recycled for
222 * new threads. All other members of the thread structure should be zeroed.
225 ulwp_clean(ulwp_t
*ulwp
)
227 caddr_t stk
= ulwp
->ul_stk
;
228 size_t mapsiz
= ulwp
->ul_mapsiz
;
229 size_t guardsize
= ulwp
->ul_guardsize
;
230 uintptr_t stktop
= ulwp
->ul_stktop
;
231 size_t stksiz
= ulwp
->ul_stksiz
;
233 (void) memset(ulwp
, 0, sizeof (*ulwp
));
236 ulwp
->ul_mapsiz
= mapsiz
;
237 ulwp
->ul_guardsize
= guardsize
;
238 ulwp
->ul_stktop
= stktop
;
239 ulwp
->ul_stksiz
= stksiz
;
242 static int stackprot
;
245 * Answer the question, "Is the lwp in question really dead?"
246 * We must inquire of the operating system to be really sure
247 * because the lwp may have called lwp_exit() but it has not
248 * yet completed the exit.
251 dead_and_buried(ulwp_t
*ulwp
)
253 if (ulwp
->ul_lwpid
== (lwpid_t
)(-1))
255 if (ulwp
->ul_dead
&& ulwp
->ul_detached
&&
256 _lwp_kill(ulwp
->ul_lwpid
, 0) == ESRCH
) {
257 ulwp
->ul_lwpid
= (lwpid_t
)(-1);
264 * Attempt to keep the stack cache within the specified cache limit.
267 trim_stack_cache(int cache_limit
)
269 ulwp_t
*self
= curthread
;
270 uberdata_t
*udp
= self
->ul_uberdata
;
272 ulwp_t
**ulwpp
= &udp
->lwp_stacks
;
275 ASSERT(udp
->nthreads
<= 1 || MUTEX_OWNED(&udp
->link_lock
, self
));
277 while (udp
->nfreestack
> cache_limit
&& (ulwp
= *ulwpp
) != NULL
) {
278 if (dead_and_buried(ulwp
)) {
279 *ulwpp
= ulwp
->ul_next
;
280 if (ulwp
== udp
->lwp_laststack
)
281 udp
->lwp_laststack
= prev
;
284 (void) munmap(ulwp
->ul_stk
, ulwp
->ul_mapsiz
);
286 * Now put the free ulwp on the ulwp freelist.
289 ulwp
->ul_next
= NULL
;
290 if (udp
->ulwp_freelist
== NULL
)
291 udp
->ulwp_freelist
= udp
->ulwp_lastfree
= ulwp
;
293 udp
->ulwp_lastfree
->ul_next
= ulwp
;
294 udp
->ulwp_lastfree
= ulwp
;
298 ulwpp
= &ulwp
->ul_next
;
304 * Find an unused stack of the requested size
305 * or create a new stack of the requested size.
306 * Return a pointer to the ulwp_t structure referring to the stack, or NULL.
307 * thr_exit() stores 1 in the ul_dead member.
308 * thr_join() stores -1 in the ul_lwpid member.
311 find_stack(size_t stksize
, size_t guardsize
)
313 static size_t pagesize
= 0;
315 uberdata_t
*udp
= curthread
->ul_uberdata
;
323 * The stack is allocated PROT_READ|PROT_WRITE|PROT_EXEC
324 * unless overridden by the system's configuration.
326 if (stackprot
== 0) { /* do this once */
327 long lprot
= _sysconf(_SC_STACK_PROT
);
329 lprot
= (PROT_READ
|PROT_WRITE
|PROT_EXEC
);
330 stackprot
= (int)lprot
;
332 if (pagesize
== 0) /* do this once */
333 pagesize
= _sysconf(_SC_PAGESIZE
);
336 * One megabyte stacks by default, but subtract off
337 * two pages for the system-created red zones.
338 * Round up a non-zero stack size to a pagesize multiple.
341 stksize
= DEFAULTSTACK
- 2 * pagesize
;
343 stksize
= ((stksize
+ pagesize
- 1) & -pagesize
);
346 * Round up the mapping size to a multiple of pagesize.
347 * Note: mmap() provides at least one page of red zone
348 * so we deduct that from the value of guardsize.
351 guardsize
= ((guardsize
+ pagesize
- 1) & -pagesize
) - pagesize
;
352 mapsize
= stksize
+ guardsize
;
354 lmutex_lock(&udp
->link_lock
);
355 for (prev
= NULL
, ulwpp
= &udp
->lwp_stacks
;
356 (ulwp
= *ulwpp
) != NULL
;
357 prev
= ulwp
, ulwpp
= &ulwp
->ul_next
) {
358 if (ulwp
->ul_mapsiz
== mapsize
&&
359 ulwp
->ul_guardsize
== guardsize
&&
360 dead_and_buried(ulwp
)) {
362 * The previous lwp is gone; reuse the stack.
363 * Remove the ulwp from the stack list.
365 *ulwpp
= ulwp
->ul_next
;
366 ulwp
->ul_next
= NULL
;
367 if (ulwp
== udp
->lwp_laststack
)
368 udp
->lwp_laststack
= prev
;
371 lmutex_unlock(&udp
->link_lock
);
378 * None of the cached stacks matched our mapping size.
379 * Reduce the stack cache to get rid of possibly
380 * very old stacks that will never be reused.
382 if (udp
->nfreestack
> udp
->thread_stack_cache
)
383 trim_stack_cache(udp
->thread_stack_cache
);
384 else if (udp
->nfreestack
> 0)
385 trim_stack_cache(udp
->nfreestack
- 1);
386 lmutex_unlock(&udp
->link_lock
);
389 * Create a new stack.
391 if ((stk
= mmap(NULL
, mapsize
, stackprot
,
392 MAP_PRIVATE
|MAP_NORESERVE
|MAP_ANON
, -1, (off_t
)0)) != MAP_FAILED
) {
394 * We have allocated our stack. Now allocate the ulwp.
398 (void) munmap(stk
, mapsize
);
401 ulwp
->ul_mapsiz
= mapsize
;
402 ulwp
->ul_guardsize
= guardsize
;
403 ulwp
->ul_stktop
= (uintptr_t)stk
+ mapsize
;
404 ulwp
->ul_stksiz
= stksize
;
405 if (guardsize
) /* protect the extra red zone */
406 (void) mprotect(stk
, guardsize
, PROT_NONE
);
413 * Get a ulwp_t structure from the free list or allocate a new one.
414 * Such ulwp_t's do not have a stack allocated by the library.
419 ulwp_t
*self
= curthread
;
420 uberdata_t
*udp
= self
->ul_uberdata
;
427 lmutex_lock(&udp
->link_lock
);
428 for (prev
= NULL
, ulwpp
= &udp
->ulwp_freelist
;
429 (ulwp
= *ulwpp
) != NULL
;
430 prev
= ulwp
, ulwpp
= &ulwp
->ul_next
) {
431 if (dead_and_buried(ulwp
)) {
432 *ulwpp
= ulwp
->ul_next
;
433 ulwp
->ul_next
= NULL
;
434 if (ulwp
== udp
->ulwp_lastfree
)
435 udp
->ulwp_lastfree
= prev
;
437 lmutex_unlock(&udp
->link_lock
);
442 lmutex_unlock(&udp
->link_lock
);
444 tls_size
= roundup64(udp
->tls_metadata
.static_tls
.tls_size
);
445 data
= lmalloc(sizeof (*ulwp
) + tls_size
);
447 /* LINTED pointer cast may result in improper alignment */
448 ulwp
= (ulwp_t
*)(data
+ tls_size
);
454 * Free a ulwp structure.
455 * If there is an associated stack, put it on the stack list and
456 * munmap() previously freed stacks up to the residual cache limit.
457 * Else put it on the ulwp free list and never call lfree() on it.
460 ulwp_free(ulwp_t
*ulwp
)
462 uberdata_t
*udp
= curthread
->ul_uberdata
;
464 ASSERT(udp
->nthreads
<= 1 || MUTEX_OWNED(&udp
->link_lock
, curthread
));
465 ulwp
->ul_next
= NULL
;
466 if (ulwp
== udp
->ulwp_one
) /* don't reuse the primoridal stack */
468 else if (ulwp
->ul_mapsiz
!= 0) {
469 if (udp
->lwp_stacks
== NULL
)
470 udp
->lwp_stacks
= udp
->lwp_laststack
= ulwp
;
472 udp
->lwp_laststack
->ul_next
= ulwp
;
473 udp
->lwp_laststack
= ulwp
;
475 if (++udp
->nfreestack
> udp
->thread_stack_cache
)
476 trim_stack_cache(udp
->thread_stack_cache
);
478 if (udp
->ulwp_freelist
== NULL
)
479 udp
->ulwp_freelist
= udp
->ulwp_lastfree
= ulwp
;
481 udp
->ulwp_lastfree
->ul_next
= ulwp
;
482 udp
->ulwp_lastfree
= ulwp
;
488 * Find a named lwp and return a pointer to its hash list location.
489 * On success, returns with the hash lock held.
492 find_lwpp(thread_t tid
)
494 uberdata_t
*udp
= curthread
->ul_uberdata
;
495 int ix
= TIDHASH(tid
, udp
);
496 mutex_t
*mp
= &udp
->thr_hash_table
[ix
].hash_lock
;
504 for (ulwpp
= &udp
->thr_hash_table
[ix
].hash_bucket
;
505 (ulwp
= *ulwpp
) != NULL
;
506 ulwpp
= &ulwp
->ul_hash
) {
507 if (ulwp
->ul_lwpid
== tid
)
515 * Wake up all lwps waiting on this lwp for some reason.
518 ulwp_broadcast(ulwp_t
*ulwp
)
520 ulwp_t
*self
= curthread
;
521 uberdata_t
*udp
= self
->ul_uberdata
;
523 ASSERT(MUTEX_OWNED(ulwp_mutex(ulwp
, udp
), self
));
524 (void) cond_broadcast(ulwp_condvar(ulwp
, udp
));
528 * Find a named lwp and return a pointer to it.
529 * Returns with the hash lock held.
532 find_lwp(thread_t tid
)
534 ulwp_t
*self
= curthread
;
535 uberdata_t
*udp
= self
->ul_uberdata
;
539 if (self
->ul_lwpid
== tid
) {
541 ulwp_lock(ulwp
, udp
);
542 } else if ((ulwpp
= find_lwpp(tid
)) != NULL
) {
546 if (ulwp
&& ulwp
->ul_dead
) {
547 ulwp_unlock(ulwp
, udp
);
555 _thrp_create(void *stk
, size_t stksize
, void *(*func
)(void *), void *arg
,
556 long flags
, thread_t
*new_thread
, size_t guardsize
)
558 ulwp_t
*self
= curthread
;
559 uberdata_t
*udp
= self
->ul_uberdata
;
567 * Enforce the restriction of not creating any threads
568 * until the primary link map has been initialized.
569 * Also, disallow thread creation to a child of vfork().
571 if (!self
->ul_primarymap
|| self
->ul_vfork
)
574 if (udp
->hash_size
== 1)
577 if ((stk
|| stksize
) && stksize
< MINSTACK
)
581 if ((ulwp
= find_stack(stksize
, guardsize
)) == NULL
)
583 stksize
= ulwp
->ul_mapsiz
- ulwp
->ul_guardsize
;
585 /* initialize the private stack */
586 if ((ulwp
= ulwp_alloc()) == NULL
)
589 ulwp
->ul_stktop
= (uintptr_t)stk
+ stksize
;
590 ulwp
->ul_stksiz
= stksize
;
592 /* ulwp is not in the hash table; make sure hash_out() doesn't fail */
594 ulwp
->ul_errnop
= &ulwp
->ul_errno
;
596 lwp_flags
= LWP_SUSPENDED
;
597 if (flags
& (THR_DETACHED
|THR_DAEMON
)) {
598 flags
|= THR_DETACHED
;
599 lwp_flags
|= LWP_DETACHED
;
601 if (flags
& THR_DAEMON
)
602 lwp_flags
|= LWP_DAEMON
;
604 /* creating a thread: enforce mt-correctness in mutex_lock() */
605 self
->ul_async_safe
= 1;
607 /* per-thread copies of global variables, for speed */
608 ulwp
->ul_queue_fifo
= self
->ul_queue_fifo
;
609 ulwp
->ul_cond_wait_defer
= self
->ul_cond_wait_defer
;
610 ulwp
->ul_error_detection
= self
->ul_error_detection
;
611 ulwp
->ul_async_safe
= self
->ul_async_safe
;
612 ulwp
->ul_max_spinners
= self
->ul_max_spinners
;
613 ulwp
->ul_adaptive_spin
= self
->ul_adaptive_spin
;
614 ulwp
->ul_queue_spin
= self
->ul_queue_spin
;
615 ulwp
->ul_door_noreserve
= self
->ul_door_noreserve
;
616 ulwp
->ul_misaligned
= self
->ul_misaligned
;
618 /* new thread inherits creating thread's scheduling parameters */
619 ulwp
->ul_policy
= self
->ul_policy
;
620 ulwp
->ul_pri
= (self
->ul_epri
? self
->ul_epri
: self
->ul_pri
);
621 ulwp
->ul_cid
= self
->ul_cid
;
622 ulwp
->ul_rtclassid
= self
->ul_rtclassid
;
624 ulwp
->ul_primarymap
= self
->ul_primarymap
;
625 ulwp
->ul_self
= ulwp
;
626 ulwp
->ul_uberdata
= udp
;
628 /* debugger support */
629 ulwp
->ul_usropts
= flags
;
632 ulwp
->ul_startpc
= func
;
633 ulwp
->ul_startarg
= arg
;
636 * Defer signals on the new thread until its TLS constructors
637 * have been called. _thrp_setup() will call sigon() after
638 * it has called tls_setup().
640 ulwp
->ul_sigdefer
= 1;
642 error
= setup_context(&uc
, _thrp_setup
, ulwp
,
643 (caddr_t
)ulwp
->ul_stk
+ ulwp
->ul_guardsize
, stksize
);
644 if (error
!= 0 && stk
!= NULL
) /* inaccessible stack */
648 * Call enter_critical() to avoid being suspended until we
649 * have linked the new thread into the proper lists.
650 * This is necessary because forkall() and fork1() must
651 * suspend all threads and they must see a complete list.
653 enter_critical(self
);
654 uc
.uc_sigmask
= ulwp
->ul_sigmask
= self
->ul_sigmask
;
656 (error
= __lwp_create(&uc
, lwp_flags
, &tid
)) != 0) {
658 ulwp
->ul_lwpid
= (lwpid_t
)(-1);
660 ulwp
->ul_detached
= 1;
661 lmutex_lock(&udp
->link_lock
);
663 lmutex_unlock(&udp
->link_lock
);
666 self
->ul_nocancel
= 0; /* cancellation is now possible */
667 udp
->uberflags
.uf_mt
= 1;
670 if (flags
& THR_DETACHED
)
671 ulwp
->ul_detached
= 1;
672 ulwp
->ul_lwpid
= tid
;
673 ulwp
->ul_stop
= TSTP_REGULAR
;
674 if (flags
& THR_SUSPENDED
)
675 ulwp
->ul_created
= 1;
677 lmutex_lock(&udp
->link_lock
);
678 ulwp
->ul_forw
= udp
->all_lwps
;
679 ulwp
->ul_back
= udp
->all_lwps
->ul_back
;
680 ulwp
->ul_back
->ul_forw
= ulwp
;
681 ulwp
->ul_forw
->ul_back
= ulwp
;
684 if (flags
& THR_DAEMON
)
686 if (flags
& THR_NEW_LWP
)
688 __libc_threaded
= 1; /* inform stdio */
689 lmutex_unlock(&udp
->link_lock
);
691 if (__td_event_report(self
, TD_CREATE
, udp
)) {
692 self
->ul_td_evbuf
.eventnum
= TD_CREATE
;
693 self
->ul_td_evbuf
.eventdata
= (void *)(uintptr_t)tid
;
694 tdb_event(TD_CREATE
, udp
);
699 if (!(flags
& THR_SUSPENDED
))
700 (void) _thrp_continue(tid
, TSTP_REGULAR
);
706 thr_create(void *stk
, size_t stksize
, void *(*func
)(void *), void *arg
,
707 long flags
, thread_t
*new_thread
)
709 return (_thrp_create(stk
, stksize
, func
, arg
, flags
, new_thread
, 0));
713 * A special cancellation cleanup hook for DCE.
714 * cleanuphndlr, when it is not NULL, will contain a callback
715 * function to be called before a thread is terminated in
716 * thr_exit() as a result of being cancelled.
718 static void (*cleanuphndlr
)(void) = NULL
;
721 * _pthread_setcleanupinit: sets the cleanup hook.
724 _pthread_setcleanupinit(void (*func
)(void))
733 ulwp_t
*self
= curthread
;
734 uberdata_t
*udp
= self
->ul_uberdata
;
735 ulwp_t
*replace
= NULL
;
737 if (__td_event_report(self
, TD_DEATH
, udp
)) {
738 self
->ul_td_evbuf
.eventnum
= TD_DEATH
;
739 tdb_event(TD_DEATH
, udp
);
742 ASSERT(self
->ul_sigdefer
!= 0);
744 lmutex_lock(&udp
->link_lock
);
746 if (self
->ul_usropts
& THR_NEW_LWP
)
748 if (self
->ul_usropts
& THR_DAEMON
)
750 else if (udp
->nthreads
== udp
->ndaemons
) {
752 * We are the last non-daemon thread exiting.
753 * Exit the process. We retain our TSD and TLS so
754 * that atexit() application functions can use them.
756 lmutex_unlock(&udp
->link_lock
);
758 thr_panic("_thrp_exit(): exit(0) returned");
760 lmutex_unlock(&udp
->link_lock
);
763 * tsd_exit() may call its destructor free(), thus depending on
764 * tmem, therefore tmem_exit() needs to be called after tsd_exit()
767 tsd_exit(); /* deallocate thread-specific data */
768 tls_exit(); /* deallocate thread-local storage */
769 tmem_exit(); /* deallocate tmem allocations */
770 heldlock_exit(); /* deal with left-over held locks */
772 /* block all signals to finish exiting */
773 block_all_signals(self
);
774 /* also prevent ourself from being suspended */
775 enter_critical(self
);
777 lmutex_lock(&udp
->link_lock
);
779 (void) ulwp_lock(self
, udp
);
781 if (self
->ul_mapsiz
&& !self
->ul_detached
) {
783 * We want to free the stack for reuse but must keep
784 * the ulwp_t struct for the benefit of thr_join().
785 * For this purpose we allocate a replacement ulwp_t.
787 if ((replace
= udp
->ulwp_replace_free
) == NULL
)
788 replace
= lmalloc(REPLACEMENT_SIZE
);
789 else if ((udp
->ulwp_replace_free
= replace
->ul_next
) == NULL
)
790 udp
->ulwp_replace_last
= NULL
;
793 if (udp
->all_lwps
== self
)
794 udp
->all_lwps
= self
->ul_forw
;
795 if (udp
->all_lwps
== self
)
796 udp
->all_lwps
= NULL
;
798 self
->ul_forw
->ul_back
= self
->ul_back
;
799 self
->ul_back
->ul_forw
= self
->ul_forw
;
801 self
->ul_forw
= self
->ul_back
= NULL
;
802 #if defined(THREAD_DEBUG)
803 /* collect queue lock statistics before marking ourself dead */
804 record_spin_locks(self
);
807 self
->ul_pleasestop
= 0;
808 if (replace
!= NULL
) {
809 int ix
= self
->ul_ix
; /* the hash index */
810 (void) memcpy(replace
, self
, REPLACEMENT_SIZE
);
811 replace
->ul_self
= replace
;
812 replace
->ul_next
= NULL
; /* clone not on stack list */
813 replace
->ul_mapsiz
= 0; /* allows clone to be freed */
814 replace
->ul_replace
= 1; /* requires clone to be freed */
815 hash_out_unlocked(self
, ix
, udp
);
816 hash_in_unlocked(replace
, ix
, udp
);
817 ASSERT(!(self
->ul_detached
));
818 self
->ul_detached
= 1; /* this frees the stack */
819 self
->ul_schedctl
= NULL
;
820 self
->ul_schedctl_called
= &udp
->uberflags
;
821 set_curthread(self
= replace
);
823 * Having just changed the address of curthread, we
824 * must reset the ownership of the locks we hold so
825 * that assertions will not fire when we release them.
827 udp
->link_lock
.mutex_owner
= (uintptr_t)self
;
828 ulwp_mutex(self
, udp
)->mutex_owner
= (uintptr_t)self
;
831 * On i386, %gs still references the original, not the
832 * replacement, ulwp structure. Fetching the replacement
833 * curthread pointer via %gs:0 works correctly since the
834 * original ulwp structure will not be reallocated until
835 * this lwp has completed its lwp_exit() system call (see
836 * dead_and_buried()), but from here on out, we must make
837 * no references to %gs:<offset> other than %gs:0.
841 * Put non-detached terminated threads in the all_zombies list.
843 if (!self
->ul_detached
) {
845 if (udp
->all_zombies
== NULL
) {
846 ASSERT(udp
->nzombies
== 1);
847 udp
->all_zombies
= self
->ul_forw
= self
->ul_back
= self
;
849 self
->ul_forw
= udp
->all_zombies
;
850 self
->ul_back
= udp
->all_zombies
->ul_back
;
851 self
->ul_back
->ul_forw
= self
;
852 self
->ul_forw
->ul_back
= self
;
856 * Notify everyone waiting for this thread.
858 ulwp_broadcast(self
);
859 (void) ulwp_unlock(self
, udp
);
861 * Prevent any more references to the schedctl data.
862 * We are exiting and continue_fork() may not find us.
863 * Do this just before dropping link_lock, since fork
864 * serializes on link_lock.
866 self
->ul_schedctl
= NULL
;
867 self
->ul_schedctl_called
= &udp
->uberflags
;
868 lmutex_unlock(&udp
->link_lock
);
870 ASSERT(self
->ul_critical
== 1);
871 ASSERT(self
->ul_preempt
== 0);
872 _lwp_terminate(); /* never returns */
873 thr_panic("_thrp_exit(): _lwp_terminate() returned");
876 #if defined(THREAD_DEBUG)
878 collect_queue_statistics()
880 uberdata_t
*udp
= curthread
->ul_uberdata
;
883 if (thread_queue_dump
) {
884 lmutex_lock(&udp
->link_lock
);
885 if ((ulwp
= udp
->all_lwps
) != NULL
) {
887 record_spin_locks(ulwp
);
888 } while ((ulwp
= ulwp
->ul_forw
) != udp
->all_lwps
);
890 lmutex_unlock(&udp
->link_lock
);
895 static void __NORETURN
896 _thrp_exit_common(void *status
, int unwind
)
898 ulwp_t
*self
= curthread
;
899 int cancelled
= (self
->ul_cancel_pending
&& status
== PTHREAD_CANCELED
);
901 ASSERT(self
->ul_critical
== 0 && self
->ul_preempt
== 0);
904 * Disable cancellation and call the special DCE cancellation
905 * cleanup hook if it is enabled. Do nothing else before calling
906 * the DCE cancellation cleanup hook; it may call longjmp() and
909 self
->ul_cancel_disabled
= 1;
910 self
->ul_cancel_async
= 0;
911 self
->ul_save_async
= 0;
912 self
->ul_cancelable
= 0;
913 self
->ul_cancel_pending
= 0;
914 set_cancel_pending_flag(self
, 1);
915 if (cancelled
&& cleanuphndlr
!= NULL
)
919 * Block application signals while we are exiting.
920 * We call out to C++, TSD, and TLS destructors while exiting
921 * and these are application-defined, so we cannot be assured
922 * that they won't reset the signal mask. We use sigoff() to
923 * defer any signals that may be received as a result of this
924 * bad behavior. Such signals will be lost to the process
925 * when the thread finishes exiting.
927 (void) thr_sigsetmask(SIG_SETMASK
, &maskset
, NULL
);
930 self
->ul_rval
= status
;
933 * If thr_exit is being called from the places where
934 * C++ destructors are to be called such as cancellation
935 * points, then set this flag. It is checked in _t_cancel()
936 * to decide whether _ex_unwind() is to be called or not.
942 * _thrp_unwind() will eventually call _thrp_exit().
946 thr_panic("_thrp_exit_common(): _thrp_unwind() returned");
948 for (;;) /* to shut the compiler up about __NORETURN */
953 * Called when a thread returns from its start function.
954 * We are at the top of the stack; no unwinding is necessary.
957 _thrp_terminate(void *status
)
959 _thrp_exit_common(status
, 0);
962 #pragma weak pthread_exit = thr_exit
963 #pragma weak _thr_exit = thr_exit
965 thr_exit(void *status
)
967 _thrp_exit_common(status
, 1);
971 _thrp_join(thread_t tid
, thread_t
*departed
, void **status
, int do_cancel
)
973 uberdata_t
*udp
= curthread
->ul_uberdata
;
983 error
= lwp_wait(tid
, &found
);
985 while ((error
= __lwp_wait(tid
, &found
)) == EINTR
)
992 * We must hold link_lock to avoid a race condition with find_stack().
994 lmutex_lock(&udp
->link_lock
);
995 if ((ulwpp
= find_lwpp(found
)) == NULL
) {
997 * lwp_wait() found an lwp that the library doesn't know
998 * about. It must have been created with _lwp_create().
999 * Just return its lwpid; we can't know its status.
1001 lmutex_unlock(&udp
->link_lock
);
1005 * Remove ulwp from the hash table.
1008 *ulwpp
= ulwp
->ul_hash
;
1009 ulwp
->ul_hash
= NULL
;
1011 * Remove ulwp from all_zombies list.
1013 ASSERT(udp
->nzombies
>= 1);
1014 if (udp
->all_zombies
== ulwp
)
1015 udp
->all_zombies
= ulwp
->ul_forw
;
1016 if (udp
->all_zombies
== ulwp
)
1017 udp
->all_zombies
= NULL
;
1019 ulwp
->ul_forw
->ul_back
= ulwp
->ul_back
;
1020 ulwp
->ul_back
->ul_forw
= ulwp
->ul_forw
;
1022 ulwp
->ul_forw
= ulwp
->ul_back
= NULL
;
1024 ASSERT(ulwp
->ul_dead
&& !ulwp
->ul_detached
&&
1025 !(ulwp
->ul_usropts
& (THR_DETACHED
|THR_DAEMON
)));
1027 * We can't call ulwp_unlock(ulwp) after we set
1028 * ulwp->ul_ix = -1 so we have to get a pointer to the
1029 * ulwp's hash table mutex now in order to unlock it below.
1031 mp
= ulwp_mutex(ulwp
, udp
);
1032 ulwp
->ul_lwpid
= (lwpid_t
)(-1);
1034 rval
= ulwp
->ul_rval
;
1035 replace
= ulwp
->ul_replace
;
1038 ulwp
->ul_next
= NULL
;
1039 if (udp
->ulwp_replace_free
== NULL
)
1040 udp
->ulwp_replace_free
=
1041 udp
->ulwp_replace_last
= ulwp
;
1043 udp
->ulwp_replace_last
->ul_next
= ulwp
;
1044 udp
->ulwp_replace_last
= ulwp
;
1047 lmutex_unlock(&udp
->link_lock
);
1050 if (departed
!= NULL
)
1058 thr_join(thread_t tid
, thread_t
*departed
, void **status
)
1060 int error
= _thrp_join(tid
, departed
, status
, 1);
1061 return ((error
== EINVAL
)? ESRCH
: error
);
1065 * pthread_join() differs from Solaris thr_join():
1066 * It does not return the departed thread's id
1067 * and hence does not have a "departed" argument.
1068 * It returns EINVAL if tid refers to a detached thread.
1070 #pragma weak _pthread_join = pthread_join
1072 pthread_join(pthread_t tid
, void **status
)
1074 return ((tid
== 0)? ESRCH
: _thrp_join(tid
, NULL
, status
, 1));
1078 pthread_detach(pthread_t tid
)
1080 uberdata_t
*udp
= curthread
->ul_uberdata
;
1085 if ((ulwpp
= find_lwpp(tid
)) == NULL
)
1089 if (ulwp
->ul_dead
) {
1090 ulwp_unlock(ulwp
, udp
);
1091 error
= _thrp_join(tid
, NULL
, NULL
, 0);
1093 error
= __lwp_detach(tid
);
1094 ulwp
->ul_detached
= 1;
1095 ulwp
->ul_usropts
|= THR_DETACHED
;
1096 ulwp_unlock(ulwp
, udp
);
1102 ematch(const char *ev
, const char *match
)
1106 while ((c
= *match
++) != '\0') {
1116 envvar(const char *ev
, const char *match
, int limit
)
1121 if ((ename
= ematch(ev
, match
)) != NULL
) {
1123 for (val
= 0; (c
= *ename
) != '\0'; ename
++) {
1128 val
= val
* 10 + (c
- '0');
1139 etest(const char *ev
)
1143 if ((value
= envvar(ev
, "QUEUE_SPIN", 1000000)) >= 0)
1144 thread_queue_spin
= value
;
1145 if ((value
= envvar(ev
, "ADAPTIVE_SPIN", 1000000)) >= 0)
1146 thread_adaptive_spin
= value
;
1147 if ((value
= envvar(ev
, "MAX_SPINNERS", 255)) >= 0)
1148 thread_max_spinners
= value
;
1149 if ((value
= envvar(ev
, "QUEUE_FIFO", 8)) >= 0)
1150 thread_queue_fifo
= value
;
1151 #if defined(THREAD_DEBUG)
1152 if ((value
= envvar(ev
, "QUEUE_VERIFY", 1)) >= 0)
1153 thread_queue_verify
= value
;
1154 if ((value
= envvar(ev
, "QUEUE_DUMP", 1)) >= 0)
1155 thread_queue_dump
= value
;
1157 if ((value
= envvar(ev
, "STACK_CACHE", 10000)) >= 0)
1158 thread_stack_cache
= value
;
1159 if ((value
= envvar(ev
, "COND_WAIT_DEFER", 1)) >= 0)
1160 thread_cond_wait_defer
= value
;
1161 if ((value
= envvar(ev
, "ERROR_DETECTION", 2)) >= 0)
1162 thread_error_detection
= value
;
1163 if ((value
= envvar(ev
, "ASYNC_SAFE", 1)) >= 0)
1164 thread_async_safe
= value
;
1165 if ((value
= envvar(ev
, "DOOR_NORESERVE", 1)) >= 0)
1166 thread_door_noreserve
= value
;
1167 if ((value
= envvar(ev
, "LOCKS_MISALIGNED", 1)) >= 0)
1168 thread_locks_misaligned
= value
;
1172 * Look for and evaluate environment variables of the form "_THREAD_*".
1173 * For compatibility with the past, we also look for environment
1174 * names of the form "LIBTHREAD_*".
1179 extern const char **_environ
;
1184 if ((pev
= _environ
) == NULL
)
1186 while ((ev
= *pev
++) != NULL
) {
1188 if (c
== '_' && strncmp(ev
, "_THREAD_", 8) == 0)
1190 if (c
== 'L' && strncmp(ev
, "LIBTHREAD_", 10) == 0)
1195 /* PROBE_SUPPORT begin */
1196 #pragma weak __tnf_probe_notify
1197 extern void __tnf_probe_notify(void);
1198 /* PROBE_SUPPORT end */
1200 /* same as atexit() but private to the library */
1201 extern int _atexit(void (*)(void));
1203 /* same as _cleanup() but private to the library */
1204 extern void __cleanup(void);
1206 extern void atfork_init(void);
1209 extern void __proc64id(void);
1213 init_auxv_data(uberdata_t
*udp
)
1217 udp
->ub_comm_page
= NULL
;
1218 if (dlinfo(RTLD_SELF
, RTLD_DI_ARGSINFO
, &args
) < 0)
1221 while (args
.dla_auxv
->a_type
!= AT_NULL
) {
1222 if (args
.dla_auxv
->a_type
== AT_SUN_COMMPAGE
) {
1223 udp
->ub_comm_page
= args
.dla_auxv
->a_un
.a_ptr
;
1230 * libc_init() is called by ld.so.1 for library initialization.
1231 * We perform minimal initialization; enough to work with the main thread.
1236 uberdata_t
*udp
= &__uberdata
;
1237 ulwp_t
*oldself
= __curthread();
1246 * For the initial stage of initialization, we must be careful
1247 * not to call any function that could possibly call _cerror().
1248 * For this purpose, we call only the raw system call wrappers.
1253 * Gather information about cache layouts for optimized
1254 * AMD and Intel assembler strfoo() and memfoo() functions.
1260 * Every libc, regardless of which link map, must register __cleanup().
1262 (void) _atexit(__cleanup
);
1265 * Every libc, regardless of link map, needs to go through and check
1266 * its aux vectors. Doing so will indicate whether or not this has
1267 * been given a comm page (to optimize certain system actions).
1269 init_auxv_data(udp
);
1272 * We keep our uberdata on one of (a) the first alternate link map
1273 * or (b) the primary link map. We switch to the primary link map
1274 * and stay there once we see it. All intermediate link maps are
1275 * subject to being unloaded at any time.
1277 if (oldself
!= NULL
&& (oldself
->ul_primarymap
|| !primary_link_map
)) {
1278 __tdb_bootstrap
= oldself
->ul_uberdata
->tdb_bootstrap
;
1280 atfork_init(); /* every link map needs atfork() processing */
1286 * To establish the main stack information, we have to get our context.
1287 * This is also convenient to use for getting our signal mask.
1289 uc
.uc_flags
= UC_ALL
;
1290 (void) __getcontext(&uc
);
1291 ASSERT(uc
.uc_link
== NULL
);
1293 tls_size
= roundup64(udp
->tls_metadata
.static_tls
.tls_size
);
1294 ASSERT(primary_link_map
|| tls_size
== 0);
1295 data
= lmalloc(sizeof (ulwp_t
) + tls_size
);
1297 thr_panic("cannot allocate thread structure for main thread");
1298 /* LINTED pointer cast may result in improper alignment */
1299 self
= (ulwp_t
*)(data
+ tls_size
);
1300 init_hash_table
[0].hash_bucket
= self
;
1302 self
->ul_sigmask
= uc
.uc_sigmask
;
1303 delete_reserved_signals(&self
->ul_sigmask
);
1305 * Are the old and new sets different?
1306 * (This can happen if we are currently blocking SIGCANCEL.)
1307 * If so, we must explicitly set our signal mask, below.
1310 ((self
->ul_sigmask
.__sigbits
[0] ^ uc
.uc_sigmask
.__sigbits
[0]) |
1311 (self
->ul_sigmask
.__sigbits
[1] ^ uc
.uc_sigmask
.__sigbits
[1]) |
1312 (self
->ul_sigmask
.__sigbits
[2] ^ uc
.uc_sigmask
.__sigbits
[2]) |
1313 (self
->ul_sigmask
.__sigbits
[3] ^ uc
.uc_sigmask
.__sigbits
[3]));
1316 self
->ul_stktop
= (uintptr_t)uc
.uc_stack
.ss_sp
+ uc
.uc_stack
.ss_size
;
1317 (void) getrlimit(RLIMIT_STACK
, &rl
);
1318 self
->ul_stksiz
= rl
.rlim_cur
;
1319 self
->ul_stk
= (caddr_t
)(self
->ul_stktop
- self
->ul_stksiz
);
1321 self
->ul_forw
= self
->ul_back
= self
;
1322 self
->ul_hash
= NULL
;
1324 self
->ul_lwpid
= 1; /* _lwp_self() */
1326 self
->ul_self
= self
;
1327 self
->ul_policy
= -1; /* initialize only when needed */
1330 self
->ul_rtclassid
= -1;
1331 self
->ul_uberdata
= udp
;
1332 if (oldself
!= NULL
) {
1335 ASSERT(primary_link_map
);
1336 ASSERT(oldself
->ul_main
== 1);
1337 self
->ul_stsd
= oldself
->ul_stsd
;
1338 for (i
= 0; i
< TSD_NFAST
; i
++)
1339 self
->ul_ftsd
[i
] = oldself
->ul_ftsd
[i
];
1340 self
->ul_tls
= oldself
->ul_tls
;
1342 * Retrieve all pointers to uberdata allocated
1343 * while running on previous link maps.
1344 * We would like to do a structure assignment here, but
1345 * gcc turns structure assignments into calls to memcpy(),
1346 * a function exported from libc. We can't call any such
1347 * external functions until we establish curthread, below,
1348 * so we just call our private version of memcpy().
1350 (void) memcpy(udp
, oldself
->ul_uberdata
, sizeof (*udp
));
1352 * These items point to global data on the primary link map.
1354 udp
->thr_hash_table
= init_hash_table
;
1355 udp
->sigacthandler
= sigacthandler
;
1356 udp
->tdb
.tdb_events
= tdb_events
;
1357 ASSERT(udp
->nthreads
== 1 && !udp
->uberflags
.uf_mt
);
1358 ASSERT(udp
->lwp_stacks
== NULL
);
1359 ASSERT(udp
->ulwp_freelist
== NULL
);
1360 ASSERT(udp
->ulwp_replace_free
== NULL
);
1361 ASSERT(udp
->hash_size
== 1);
1363 udp
->all_lwps
= self
;
1364 udp
->ulwp_one
= self
;
1365 udp
->pid
= getpid();
1368 * In every link map, tdb_bootstrap points to the same piece of
1369 * allocated memory. When the primary link map is initialized,
1370 * the allocated memory is assigned a pointer to the one true
1371 * uberdata. This allows libc_db to initialize itself regardless
1372 * of which instance of libc it finds in the address space.
1374 if (udp
->tdb_bootstrap
== NULL
)
1375 udp
->tdb_bootstrap
= lmalloc(sizeof (uberdata_t
*));
1376 __tdb_bootstrap
= udp
->tdb_bootstrap
;
1377 if (primary_link_map
) {
1378 self
->ul_primarymap
= 1;
1379 udp
->primary_map
= 1;
1380 *udp
->tdb_bootstrap
= udp
;
1383 * Cancellation can't happen until:
1384 * pthread_cancel() is called
1386 * another thread is created
1387 * For now, as a single-threaded process, set the flag that tells
1388 * PROLOGUE/EPILOGUE (in scalls.c) that cancellation can't happen.
1390 self
->ul_nocancel
= 1;
1392 #if defined(__amd64)
1393 (void) ___lwp_private(_LWP_SETPRIVATE
, _LWP_FSBASE
, self
);
1394 #elif defined(__i386)
1395 (void) ___lwp_private(_LWP_SETPRIVATE
, _LWP_GSBASE
, self
);
1396 #endif /* __i386 || __amd64 */
1397 set_curthread(self
); /* redundant on i386 */
1399 * Now curthread is established and it is safe to call any
1400 * function in libc except one that uses thread-local storage.
1402 self
->ul_errnop
= &errno
;
1403 if (oldself
!= NULL
) {
1404 /* tls_size was zero when oldself was allocated */
1405 lfree(oldself
, sizeof (ulwp_t
));
1412 * If the stack is unlimited, we set the size to zero to disable
1414 * XXX: Work harder here. Get the stack size from /proc/self/rmap
1416 if (self
->ul_stksiz
== RLIM_INFINITY
) {
1417 self
->ul_ustack
.ss_sp
= (void *)self
->ul_stktop
;
1418 self
->ul_ustack
.ss_size
= 0;
1420 self
->ul_ustack
.ss_sp
= self
->ul_stk
;
1421 self
->ul_ustack
.ss_size
= self
->ul_stksiz
;
1423 self
->ul_ustack
.ss_flags
= 0;
1424 (void) setustack(&self
->ul_ustack
);
1427 * Get the variables that affect thread behavior from the environment.
1430 udp
->uberflags
.uf_thread_error_detection
= (char)thread_error_detection
;
1431 udp
->thread_stack_cache
= thread_stack_cache
;
1434 * Make per-thread copies of global variables, for speed.
1436 self
->ul_queue_fifo
= (char)thread_queue_fifo
;
1437 self
->ul_cond_wait_defer
= (char)thread_cond_wait_defer
;
1438 self
->ul_error_detection
= (char)thread_error_detection
;
1439 self
->ul_async_safe
= (char)thread_async_safe
;
1440 self
->ul_door_noreserve
= (char)thread_door_noreserve
;
1441 self
->ul_misaligned
= (char)thread_locks_misaligned
;
1442 self
->ul_max_spinners
= (uint8_t)thread_max_spinners
;
1443 self
->ul_adaptive_spin
= thread_adaptive_spin
;
1444 self
->ul_queue_spin
= thread_queue_spin
;
1446 #if defined(__sparc) && !defined(_LP64)
1447 if (self
->ul_misaligned
) {
1449 * Tell the kernel to fix up ldx/stx instructions that
1450 * refer to non-8-byte aligned data instead of giving
1451 * the process an alignment trap and generating SIGBUS.
1453 * Programs compiled for 32-bit sparc with the Studio SS12
1454 * compiler get this done for them automatically (in _init()).
1455 * We do it here for the benefit of programs compiled with
1456 * other compilers, like gcc.
1458 * This is necessary for the _THREAD_LOCKS_MISALIGNED=1
1459 * environment variable horrible hack to work.
1461 extern void _do_fix_align(void);
1467 * When we have initialized the primary link map, inform
1468 * the dynamic linker about our interface functions.
1469 * Set up our pointer to the program name.
1471 if (self
->ul_primarymap
)
1472 _ld_libc((void *)rtld_funcs
);
1476 * Defer signals until TLS constructors have been called.
1482 (void) restore_signals(self
);
1485 * Make private copies of __xpg4 and __xpg6 so libc can test
1486 * them after this point without invoking the dynamic linker.
1488 libc__xpg4
= __xpg4
;
1489 libc__xpg6
= __xpg6
;
1491 /* PROBE_SUPPORT begin */
1492 if (self
->ul_primarymap
&& __tnf_probe_notify
!= NULL
)
1493 __tnf_probe_notify();
1494 /* PROBE_SUPPORT end */
1496 init_sigev_thread();
1500 #pragma fini(libc_fini)
1505 * If we are doing fini processing for the instance of libc
1506 * on the first alternate link map (this happens only when
1507 * the dynamic linker rejects a bad audit library), then clear
1508 * __curthread(). We abandon whatever memory was allocated by
1509 * lmalloc() while running on this alternate link-map but we
1510 * don't care (and can't find the memory in any case); we just
1511 * want to protect the application from this bad audit library.
1512 * No fini processing is done by libc in the normal case.
1515 uberdata_t
*udp
= curthread
->ul_uberdata
;
1517 if (udp
->primary_map
== 0 && udp
== &__uberdata
)
1518 set_curthread(NULL
);
1522 * finish_init is called when we are about to become multi-threaded,
1523 * that is, on the first call to thr_create().
1528 ulwp_t
*self
= curthread
;
1529 uberdata_t
*udp
= self
->ul_uberdata
;
1530 thr_hash_table_t
*htp
;
1535 * No locks needed here; we are single-threaded on the first call.
1536 * We can be called only after the primary link map has been set up.
1538 ASSERT(self
->ul_primarymap
);
1539 ASSERT(self
== udp
->ulwp_one
);
1540 ASSERT(!udp
->uberflags
.uf_mt
);
1541 ASSERT(udp
->hash_size
== 1);
1544 * Initialize self->ul_policy, self->ul_cid, and self->ul_pri.
1549 * Allocate the queue_head array if not already allocated.
1551 if (udp
->queue_head
== NULL
)
1555 * Now allocate the thread hash table.
1557 if ((data
= mmap(NULL
, HASHTBLSZ
* sizeof (thr_hash_table_t
),
1558 PROT_READ
| PROT_WRITE
, MAP_PRIVATE
| MAP_ANON
, -1, (off_t
)0))
1560 thr_panic("cannot allocate thread hash table");
1562 udp
->thr_hash_table
= htp
= (thr_hash_table_t
*)data
;
1563 udp
->hash_size
= HASHTBLSZ
;
1564 udp
->hash_mask
= HASHTBLSZ
- 1;
1566 for (i
= 0; i
< HASHTBLSZ
; i
++, htp
++) {
1567 htp
->hash_lock
.mutex_flag
= LOCK_INITED
;
1568 htp
->hash_lock
.mutex_magic
= MUTEX_MAGIC
;
1569 htp
->hash_cond
.cond_magic
= COND_MAGIC
;
1571 hash_in_unlocked(self
, TIDHASH(self
->ul_lwpid
, udp
), udp
);
1574 * Set up the SIGCANCEL handler for threads cancellation.
1576 setup_cancelsig(SIGCANCEL
);
1579 * Arrange to do special things on exit --
1580 * - collect queue statistics from all remaining active threads.
1581 * - dump queue statistics to stderr if _THREAD_QUEUE_DUMP is set.
1582 * - grab assert_lock to ensure that assertion failures
1583 * and a core dump take precedence over _exit().
1584 * (Functions are called in the reverse order of their registration.)
1586 (void) _atexit(grab_assert_lock
);
1587 #if defined(THREAD_DEBUG)
1588 (void) _atexit(dump_queue_statistics
);
1589 (void) _atexit(collect_queue_statistics
);
1594 * Used only by postfork1_child(), below.
1597 mark_dead_and_buried(ulwp_t
*ulwp
)
1600 ulwp
->ul_lwpid
= (lwpid_t
)(-1);
1601 ulwp
->ul_hash
= NULL
;
1603 ulwp
->ul_schedctl
= NULL
;
1604 ulwp
->ul_schedctl_called
= NULL
;
1608 * This is called from fork1() in the child.
1609 * Reset our data structures to reflect one lwp.
1614 ulwp_t
*self
= curthread
;
1615 uberdata_t
*udp
= self
->ul_uberdata
;
1621 /* daemon threads shouldn't call fork1(), but oh well... */
1622 self
->ul_usropts
&= ~THR_DAEMON
;
1625 udp
->uberflags
.uf_mt
= 0;
1626 __libc_threaded
= 0;
1627 for (i
= 0; i
< udp
->hash_size
; i
++)
1628 udp
->thr_hash_table
[i
].hash_bucket
= NULL
;
1629 self
->ul_lwpid
= _lwp_self();
1630 hash_in_unlocked(self
, TIDHASH(self
->ul_lwpid
, udp
), udp
);
1633 * Some thread in the parent might have been suspended
1634 * while holding udp->callout_lock or udp->ld_lock.
1635 * Reinitialize the child's copies.
1637 (void) mutex_init(&udp
->callout_lock
,
1638 USYNC_THREAD
| LOCK_RECURSIVE
, NULL
);
1639 (void) mutex_init(&udp
->ld_lock
,
1640 USYNC_THREAD
| LOCK_RECURSIVE
, NULL
);
1642 /* no one in the child is on a sleep queue; reinitialize */
1643 if ((qp
= udp
->queue_head
) != NULL
) {
1644 (void) memset(qp
, 0, 2 * QHASHSIZE
* sizeof (queue_head_t
));
1645 for (i
= 0; i
< 2 * QHASHSIZE
; qp
++, i
++) {
1646 qp
->qh_type
= (i
< QHASHSIZE
)? MX
: CV
;
1647 qp
->qh_lock
.mutex_flag
= LOCK_INITED
;
1648 qp
->qh_lock
.mutex_magic
= MUTEX_MAGIC
;
1649 qp
->qh_hlist
= &qp
->qh_def_root
;
1650 #if defined(THREAD_DEBUG)
1658 * Do post-fork1 processing for subsystems that need it.
1659 * We need to do this before unmapping all of the abandoned
1660 * threads' stacks, below(), because the post-fork1 actions
1661 * might require access to those stacks.
1663 postfork1_child_sigev_aio();
1664 postfork1_child_sigev_mq();
1665 postfork1_child_sigev_timer();
1666 postfork1_child_aio();
1668 * The above subsystems use thread pools, so this action
1669 * must be performed after those actions.
1671 postfork1_child_tpool();
1674 * All lwps except ourself are gone. Mark them so.
1675 * First mark all of the lwps that have already been freed.
1676 * Then mark and free all of the active lwps except ourself.
1677 * Since we are single-threaded, no locks are required here.
1679 for (ulwp
= udp
->lwp_stacks
; ulwp
!= NULL
; ulwp
= ulwp
->ul_next
)
1680 mark_dead_and_buried(ulwp
);
1681 for (ulwp
= udp
->ulwp_freelist
; ulwp
!= NULL
; ulwp
= ulwp
->ul_next
)
1682 mark_dead_and_buried(ulwp
);
1683 for (ulwp
= self
->ul_forw
; ulwp
!= self
; ulwp
= next
) {
1684 next
= ulwp
->ul_forw
;
1685 ulwp
->ul_forw
= ulwp
->ul_back
= NULL
;
1686 mark_dead_and_buried(ulwp
);
1690 heldlock_free(ulwp
);
1693 self
->ul_forw
= self
->ul_back
= udp
->all_lwps
= self
;
1694 if (self
!= udp
->ulwp_one
)
1695 mark_dead_and_buried(udp
->ulwp_one
);
1696 if ((ulwp
= udp
->all_zombies
) != NULL
) {
1697 ASSERT(udp
->nzombies
!= 0);
1699 next
= ulwp
->ul_forw
;
1700 ulwp
->ul_forw
= ulwp
->ul_back
= NULL
;
1701 mark_dead_and_buried(ulwp
);
1703 if (ulwp
->ul_replace
) {
1704 ulwp
->ul_next
= NULL
;
1705 if (udp
->ulwp_replace_free
== NULL
) {
1706 udp
->ulwp_replace_free
=
1707 udp
->ulwp_replace_last
= ulwp
;
1709 udp
->ulwp_replace_last
->ul_next
= ulwp
;
1710 udp
->ulwp_replace_last
= ulwp
;
1713 } while ((ulwp
= next
) != udp
->all_zombies
);
1714 ASSERT(udp
->nzombies
== 0);
1715 udp
->all_zombies
= NULL
;
1718 trim_stack_cache(0);
1724 return (curthread
->ul_lwpid
);
1727 #pragma weak _ti_thr_self = thr_self
1728 #pragma weak pthread_self = thr_self
1732 return (curthread
->ul_lwpid
);
1738 ulwp_t
*self
= __curthread();
1740 return ((self
== NULL
)? -1 : self
->ul_main
);
1744 _thrp_cancelled(void)
1746 return (curthread
->ul_rval
== PTHREAD_CANCELED
);
1750 _thrp_stksegment(ulwp_t
*ulwp
, stack_t
*stk
)
1752 stk
->ss_sp
= (void *)ulwp
->ul_stktop
;
1753 stk
->ss_size
= ulwp
->ul_stksiz
;
1758 #pragma weak _thr_stksegment = thr_stksegment
1760 thr_stksegment(stack_t
*stk
)
1762 return (_thrp_stksegment(curthread
, stk
));
1766 force_continue(ulwp_t
*ulwp
)
1768 #if defined(THREAD_DEBUG)
1769 ulwp_t
*self
= curthread
;
1770 uberdata_t
*udp
= self
->ul_uberdata
;
1775 ASSERT(MUTEX_OWNED(&udp
->fork_lock
, self
));
1776 ASSERT(MUTEX_OWNED(ulwp_mutex(ulwp
, udp
), self
));
1779 error
= _lwp_continue(ulwp
->ul_lwpid
);
1780 if (error
!= 0 && error
!= EINTR
)
1783 if (ulwp
->ul_stopping
) { /* it is stopping itself */
1784 ts
.tv_sec
= 0; /* give it a chance to run */
1785 ts
.tv_nsec
= 100000; /* 100 usecs or clock tick */
1786 (void) __nanosleep(&ts
, NULL
);
1788 if (!ulwp
->ul_stopping
) /* it is running now */
1789 break; /* so we are done */
1791 * It is marked as being in the process of stopping
1792 * itself. Loop around and continue it again.
1793 * It may not have been stopped the first time.
1799 * Suspend an lwp with lwp_suspend(), then move it to a safe point,
1800 * that is, to a point where ul_critical and ul_rtld are both zero.
1801 * On return, the ulwp_lock() is dropped as with ulwp_unlock().
1802 * If 'link_dropped' is non-NULL, then 'link_lock' is held on entry.
1803 * If we have to drop link_lock, we store 1 through link_dropped.
1804 * If the lwp exits before it can be suspended, we return ESRCH.
1807 safe_suspend(ulwp_t
*ulwp
, uchar_t whystopped
, int *link_dropped
)
1809 ulwp_t
*self
= curthread
;
1810 uberdata_t
*udp
= self
->ul_uberdata
;
1811 cond_t
*cvp
= ulwp_condvar(ulwp
, udp
);
1812 mutex_t
*mp
= ulwp_mutex(ulwp
, udp
);
1813 thread_t tid
= ulwp
->ul_lwpid
;
1814 int ix
= ulwp
->ul_ix
;
1817 ASSERT(whystopped
== TSTP_REGULAR
||
1818 whystopped
== TSTP_MUTATOR
||
1819 whystopped
== TSTP_FORK
);
1820 ASSERT(ulwp
!= self
);
1821 ASSERT(!ulwp
->ul_stop
);
1822 ASSERT(MUTEX_OWNED(&udp
->fork_lock
, self
));
1823 ASSERT(MUTEX_OWNED(mp
, self
));
1825 if (link_dropped
!= NULL
)
1829 * We must grab the target's spin lock before suspending it.
1830 * See the comments below and in _thrp_suspend() for why.
1832 spin_lock_set(&ulwp
->ul_spinlock
);
1833 (void) ___lwp_suspend(tid
);
1834 spin_lock_clear(&ulwp
->ul_spinlock
);
1837 if ((ulwp
->ul_critical
== 0 && ulwp
->ul_rtld
== 0) ||
1838 ulwp
->ul_stopping
) {
1839 /* thread is already safe */
1840 ulwp
->ul_stop
|= whystopped
;
1843 * Setting ul_pleasestop causes the target thread to stop
1844 * itself in _thrp_suspend(), below, after we drop its lock.
1845 * We must continue the critical thread before dropping
1846 * link_lock because the critical thread may be holding
1847 * the queue lock for link_lock. This is delicate.
1849 ulwp
->ul_pleasestop
|= whystopped
;
1850 force_continue(ulwp
);
1851 if (link_dropped
!= NULL
) {
1853 lmutex_unlock(&udp
->link_lock
);
1854 /* be sure to drop link_lock only once */
1855 link_dropped
= NULL
;
1859 * The thread may disappear by calling thr_exit() so we
1860 * cannot rely on the ulwp pointer after dropping the lock.
1861 * Instead, we search the hash table to find it again.
1862 * When we return, we may find that the thread has been
1863 * continued by some other thread. The suspend/continue
1864 * interfaces are prone to such race conditions by design.
1866 while (ulwp
&& !ulwp
->ul_dead
&& !ulwp
->ul_stop
&&
1867 (ulwp
->ul_pleasestop
& whystopped
)) {
1868 (void) __cond_wait(cvp
, mp
);
1869 for (ulwp
= udp
->thr_hash_table
[ix
].hash_bucket
;
1870 ulwp
!= NULL
; ulwp
= ulwp
->ul_hash
) {
1871 if (ulwp
->ul_lwpid
== tid
)
1876 if (ulwp
== NULL
|| ulwp
->ul_dead
)
1880 * Do another lwp_suspend() to make sure we don't
1881 * return until the target thread is fully stopped
1882 * in the kernel. Don't apply lwp_suspend() until
1883 * we know that the target is not holding any
1884 * queue locks, that is, that it has completed
1885 * ulwp_unlock(self) and has, or at least is
1886 * about to, call lwp_suspend() on itself. We do
1887 * this by grabbing the target's spin lock.
1889 ASSERT(ulwp
->ul_lwpid
== tid
);
1890 spin_lock_set(&ulwp
->ul_spinlock
);
1891 (void) ___lwp_suspend(tid
);
1892 spin_lock_clear(&ulwp
->ul_spinlock
);
1894 * If some other thread did a thr_continue()
1895 * on the target thread we have to start over.
1897 if (!ulwp
->ul_stopping
|| !(ulwp
->ul_stop
& whystopped
))
1902 (void) cond_broadcast(cvp
);
1908 _thrp_suspend(thread_t tid
, uchar_t whystopped
)
1910 ulwp_t
*self
= curthread
;
1911 uberdata_t
*udp
= self
->ul_uberdata
;
1915 ASSERT((whystopped
& (TSTP_REGULAR
|TSTP_MUTATOR
|TSTP_FORK
)) != 0);
1916 ASSERT((whystopped
& ~(TSTP_REGULAR
|TSTP_MUTATOR
|TSTP_FORK
)) == 0);
1919 * We can't suspend anyone except ourself while
1920 * some other thread is performing a fork.
1921 * This also allows only one suspension at a time.
1923 if (tid
!= self
->ul_lwpid
)
1926 if ((ulwp
= find_lwp(tid
)) == NULL
)
1928 else if (whystopped
== TSTP_MUTATOR
&& !ulwp
->ul_mutator
) {
1929 ulwp_unlock(ulwp
, udp
);
1931 } else if (ulwp
->ul_stop
) { /* already stopped */
1932 ulwp
->ul_stop
|= whystopped
;
1933 ulwp_broadcast(ulwp
);
1934 ulwp_unlock(ulwp
, udp
);
1935 } else if (ulwp
!= self
) {
1937 * After suspending the other thread, move it out of a
1938 * critical section and deal with the schedctl mappings.
1939 * safe_suspend() suspends the other thread, calls
1940 * ulwp_broadcast(ulwp) and drops the ulwp lock.
1942 error
= safe_suspend(ulwp
, whystopped
, NULL
);
1944 int schedctl_after_fork
= 0;
1947 * We are suspending ourself. We must not take a signal
1948 * until we return from lwp_suspend() and clear ul_stopping.
1949 * This is to guard against siglongjmp().
1951 enter_critical(self
);
1952 self
->ul_sp
= stkptr();
1953 _flush_windows(); /* sparc */
1954 self
->ul_pleasestop
= 0;
1955 self
->ul_stop
|= whystopped
;
1957 * Grab our spin lock before dropping ulwp_mutex(self).
1958 * This prevents the suspending thread from applying
1959 * lwp_suspend() to us before we emerge from
1960 * lmutex_unlock(mp) and have dropped mp's queue lock.
1962 spin_lock_set(&self
->ul_spinlock
);
1963 self
->ul_stopping
= 1;
1964 ulwp_broadcast(self
);
1965 ulwp_unlock(self
, udp
);
1967 * From this point until we return from lwp_suspend(),
1968 * we must not call any function that might invoke the
1969 * dynamic linker, that is, we can only call functions
1970 * private to the library.
1972 * Also, this is a nasty race condition for a process
1973 * that is undergoing a forkall() operation:
1974 * Once we clear our spinlock (below), we are vulnerable
1975 * to being suspended by the forkall() thread before
1976 * we manage to suspend ourself in ___lwp_suspend().
1977 * See safe_suspend() and force_continue().
1979 * To avoid a SIGSEGV due to the disappearance
1980 * of the schedctl mappings in the child process,
1981 * which can happen in spin_lock_clear() if we
1982 * are suspended while we are in the middle of
1983 * its call to preempt(), we preemptively clear
1984 * our own schedctl pointer before dropping our
1985 * spinlock. We reinstate it, in both the parent
1986 * and (if this really is a forkall()) the child.
1988 if (whystopped
& TSTP_FORK
) {
1989 schedctl_after_fork
= 1;
1990 self
->ul_schedctl
= NULL
;
1991 self
->ul_schedctl_called
= &udp
->uberflags
;
1993 spin_lock_clear(&self
->ul_spinlock
);
1994 (void) ___lwp_suspend(tid
);
1996 * Somebody else continued us.
1997 * We can't grab ulwp_lock(self)
1998 * until after clearing ul_stopping.
1999 * force_continue() relies on this.
2001 self
->ul_stopping
= 0;
2003 if (schedctl_after_fork
) {
2004 self
->ul_schedctl_called
= NULL
;
2005 self
->ul_schedctl
= NULL
;
2006 (void) setup_schedctl();
2008 ulwp_lock(self
, udp
);
2009 ulwp_broadcast(self
);
2010 ulwp_unlock(self
, udp
);
2011 exit_critical(self
);
2014 if (tid
!= self
->ul_lwpid
)
2021 * Suspend all lwps other than ourself in preparation for fork.
2026 ulwp_t
*self
= curthread
;
2027 uberdata_t
*udp
= self
->ul_uberdata
;
2031 ASSERT(MUTEX_OWNED(&udp
->fork_lock
, self
));
2033 lmutex_lock(&udp
->link_lock
);
2035 for (ulwp
= self
->ul_forw
; ulwp
!= self
; ulwp
= ulwp
->ul_forw
) {
2036 ulwp_lock(ulwp
, udp
);
2037 if (ulwp
->ul_stop
) { /* already stopped */
2038 ulwp
->ul_stop
|= TSTP_FORK
;
2039 ulwp_broadcast(ulwp
);
2040 ulwp_unlock(ulwp
, udp
);
2043 * Move the stopped lwp out of a critical section.
2045 if (safe_suspend(ulwp
, TSTP_FORK
, &link_dropped
) ||
2051 lmutex_unlock(&udp
->link_lock
);
2055 continue_fork(int child
)
2057 ulwp_t
*self
= curthread
;
2058 uberdata_t
*udp
= self
->ul_uberdata
;
2061 ASSERT(MUTEX_OWNED(&udp
->fork_lock
, self
));
2064 * Clear the schedctl pointers in the child of forkall().
2067 for (ulwp
= self
->ul_forw
; ulwp
!= self
; ulwp
= ulwp
->ul_forw
) {
2068 ulwp
->ul_schedctl_called
=
2069 ulwp
->ul_dead
? &udp
->uberflags
: NULL
;
2070 ulwp
->ul_schedctl
= NULL
;
2075 * Set all lwps that were stopped for fork() running again.
2077 lmutex_lock(&udp
->link_lock
);
2078 for (ulwp
= self
->ul_forw
; ulwp
!= self
; ulwp
= ulwp
->ul_forw
) {
2079 mutex_t
*mp
= ulwp_mutex(ulwp
, udp
);
2081 ASSERT(ulwp
->ul_stop
& TSTP_FORK
);
2082 ulwp
->ul_stop
&= ~TSTP_FORK
;
2083 ulwp_broadcast(ulwp
);
2085 force_continue(ulwp
);
2088 lmutex_unlock(&udp
->link_lock
);
2092 _thrp_continue(thread_t tid
, uchar_t whystopped
)
2094 uberdata_t
*udp
= curthread
->ul_uberdata
;
2099 ASSERT(whystopped
== TSTP_REGULAR
||
2100 whystopped
== TSTP_MUTATOR
);
2103 * We single-thread the entire thread suspend/continue mechanism.
2107 if ((ulwp
= find_lwp(tid
)) == NULL
) {
2112 mp
= ulwp_mutex(ulwp
, udp
);
2113 if ((whystopped
== TSTP_MUTATOR
&& !ulwp
->ul_mutator
)) {
2115 } else if (ulwp
->ul_stop
& whystopped
) {
2116 ulwp
->ul_stop
&= ~whystopped
;
2117 ulwp_broadcast(ulwp
);
2118 if (!ulwp
->ul_stop
) {
2119 if (whystopped
== TSTP_REGULAR
&& ulwp
->ul_created
) {
2121 ulwp
->ul_created
= 0;
2123 force_continue(ulwp
);
2133 thr_suspend(thread_t tid
)
2135 return (_thrp_suspend(tid
, TSTP_REGULAR
));
2139 thr_continue(thread_t tid
)
2141 return (_thrp_continue(tid
, TSTP_REGULAR
));
2150 #pragma weak pthread_kill = thr_kill
2151 #pragma weak _thr_kill = thr_kill
2153 thr_kill(thread_t tid
, int sig
)
2155 if (sig
== SIGCANCEL
)
2157 return (_lwp_kill(tid
, sig
));
2161 * Exit a critical section, take deferred actions if necessary.
2162 * Called from exit_critical() and from sigon().
2167 ulwp_t
*self
= curthread
;
2170 ASSERT(self
->ul_critical
== 0);
2173 * Don't suspend ourself or take a deferred signal while dying
2174 * or while executing inside the dynamic linker (ld.so.1).
2176 if (self
->ul_dead
|| self
->ul_rtld
)
2179 while (self
->ul_pleasestop
||
2180 (self
->ul_cursig
!= 0 && self
->ul_sigdefer
== 0)) {
2182 * Avoid a recursive call to exit_critical() in _thrp_suspend()
2183 * by keeping self->ul_critical == 1 here.
2185 self
->ul_critical
++;
2186 while (self
->ul_pleasestop
) {
2188 * Guard against suspending ourself while on a sleep
2189 * queue. See the comments in call_user_handler().
2192 set_parking_flag(self
, 0);
2193 (void) _thrp_suspend(self
->ul_lwpid
,
2194 self
->ul_pleasestop
);
2196 self
->ul_critical
--;
2198 if ((sig
= self
->ul_cursig
) != 0 && self
->ul_sigdefer
== 0) {
2200 * Clear ul_cursig before proceeding.
2201 * This protects us from the dynamic linker's
2202 * calls to bind_guard()/bind_clear() in the
2203 * event that it is invoked to resolve a symbol
2204 * like take_deferred_signal() below.
2206 self
->ul_cursig
= 0;
2207 take_deferred_signal(sig
);
2208 ASSERT(self
->ul_cursig
== 0);
2211 ASSERT(self
->ul_critical
== 0);
2215 * _ti_bind_guard() and _ti_bind_clear() are called by the dynamic linker
2216 * (ld.so.1) when it has do do something, like resolve a symbol to be called
2217 * by the application or one of its libraries. _ti_bind_guard() is called
2218 * on entry to ld.so.1, _ti_bind_clear() on exit from ld.so.1 back to the
2219 * application. The dynamic linker gets special dispensation from libc to
2220 * run in a critical region (all signals deferred and no thread suspension
2221 * or forking allowed), and to be immune from cancellation for the duration.
2224 _ti_bind_guard(int flags
)
2226 ulwp_t
*self
= curthread
;
2227 uberdata_t
*udp
= self
->ul_uberdata
;
2228 int bindflag
= (flags
& THR_FLG_RTLD
);
2230 if ((self
->ul_bindflags
& bindflag
) == bindflag
)
2232 self
->ul_bindflags
|= bindflag
;
2233 if ((flags
& (THR_FLG_NOLOCK
| THR_FLG_REENTER
)) == THR_FLG_NOLOCK
) {
2234 sigoff(self
); /* see no signals while holding ld_lock */
2235 self
->ul_rtld
++; /* don't suspend while in ld.so.1 */
2236 (void) mutex_lock(&udp
->ld_lock
);
2238 enter_critical(self
);
2239 self
->ul_save_state
= self
->ul_cancel_disabled
;
2240 self
->ul_cancel_disabled
= 1;
2241 set_cancel_pending_flag(self
, 0);
2246 _ti_bind_clear(int flags
)
2248 ulwp_t
*self
= curthread
;
2249 uberdata_t
*udp
= self
->ul_uberdata
;
2250 int bindflag
= (flags
& THR_FLG_RTLD
);
2252 if ((self
->ul_bindflags
& bindflag
) == 0)
2253 return (self
->ul_bindflags
);
2254 self
->ul_bindflags
&= ~bindflag
;
2255 self
->ul_cancel_disabled
= self
->ul_save_state
;
2256 set_cancel_pending_flag(self
, 0);
2257 exit_critical(self
);
2258 if ((flags
& (THR_FLG_NOLOCK
| THR_FLG_REENTER
)) == THR_FLG_NOLOCK
) {
2259 if (MUTEX_OWNED(&udp
->ld_lock
, self
)) {
2260 (void) mutex_unlock(&udp
->ld_lock
);
2262 sigon(self
); /* reenable signals */
2265 return (self
->ul_bindflags
);
2269 * Tell the dynamic linker (ld.so.1) whether or not it was entered from
2270 * a critical region in libc. Return zero if not, else return non-zero.
2275 ulwp_t
*self
= curthread
;
2276 int level
= self
->ul_critical
;
2278 if ((self
->ul_bindflags
& THR_FLG_RTLD
) == 0 || level
== 0)
2279 return (level
); /* ld.so.1 hasn't (yet) called enter() */
2284 * sigoff() and sigon() enable cond_wait() to behave (optionally) like
2285 * it does in the old libthread (see the comments in cond_wait_queue()).
2286 * Also, signals are deferred at thread startup until TLS constructors
2287 * have all been called, at which time _thrp_setup() calls sigon().
2289 * _sigoff() and _sigon() are external consolidation-private interfaces to
2290 * sigoff() and sigon(), respectively, in libc. These are used in libnsl.
2291 * Also, _sigoff() and _sigon() are called from dbx's run-time checking
2292 * (librtc.so) to defer signals during its critical sections (not to be
2293 * confused with libc critical sections [see exit_critical() above]).
2298 ulwp_t
*self
= curthread
;
2306 ulwp_t
*self
= curthread
;
2308 ASSERT(self
->ul_sigdefer
> 0);
2313 thr_getconcurrency()
2315 return (thr_concurrency
);
2319 pthread_getconcurrency()
2321 return (pthread_concurrency
);
2325 thr_setconcurrency(int new_level
)
2327 uberdata_t
*udp
= curthread
->ul_uberdata
;
2331 if (new_level
> 65536) /* 65536 is totally arbitrary */
2333 lmutex_lock(&udp
->link_lock
);
2334 if (new_level
> thr_concurrency
)
2335 thr_concurrency
= new_level
;
2336 lmutex_unlock(&udp
->link_lock
);
2341 pthread_setconcurrency(int new_level
)
2345 if (new_level
> 65536) /* 65536 is totally arbitrary */
2347 pthread_concurrency
= new_level
;
2360 return (curthread
->ul_uberdata
->nthreads
);
2365 * The remainder of this file implements the private interfaces to java for
2366 * garbage collection. It is no longer used, at least by java 1.2.
2367 * It can all go away once all old JVMs have disappeared.
2370 int suspendingallmutators
; /* when non-zero, suspending all mutators. */
2371 int suspendedallmutators
; /* when non-zero, all mutators suspended. */
2372 int mutatorsbarrier
; /* when non-zero, mutators barrier imposed. */
2373 mutex_t mutatorslock
= DEFAULTMUTEX
; /* used to enforce mutators barrier. */
2374 cond_t mutatorscv
= DEFAULTCV
; /* where non-mutators sleep. */
2377 * Get the available register state for the target thread.
2378 * Return non-volatile registers: TRS_NONVOLATILE
2380 #pragma weak _thr_getstate = thr_getstate
2382 thr_getstate(thread_t tid
, int *flag
, lwpid_t
*lwp
, stack_t
*ss
, gregset_t rs
)
2384 ulwp_t
*self
= curthread
;
2385 uberdata_t
*udp
= self
->ul_uberdata
;
2389 int trs_flag
= TRS_LWPID
;
2391 if (tid
== 0 || self
->ul_lwpid
== tid
) {
2393 ulwp_lock(ulwp
, udp
);
2394 } else if ((ulwpp
= find_lwpp(tid
)) != NULL
) {
2398 *flag
= TRS_INVALID
;
2402 if (ulwp
->ul_dead
) {
2403 trs_flag
= TRS_INVALID
;
2404 } else if (!ulwp
->ul_stop
&& !suspendedallmutators
) {
2406 trs_flag
= TRS_INVALID
;
2407 } else if (ulwp
->ul_stop
) {
2408 trs_flag
= TRS_NONVOLATILE
;
2417 (void) _thrp_stksegment(ulwp
, ss
);
2419 ulwp_unlock(ulwp
, udp
);
2424 * Set the appropriate register state for the target thread.
2425 * This is not used by java. It exists solely for the MSTC test suite.
2427 #pragma weak _thr_setstate = thr_setstate
2429 thr_setstate(thread_t tid
, int flag
, gregset_t rs
)
2431 uberdata_t
*udp
= curthread
->ul_uberdata
;
2435 if ((ulwp
= find_lwp(tid
)) == NULL
)
2438 if (!ulwp
->ul_stop
&& !suspendedallmutators
)
2440 else if (rs
!= NULL
) {
2442 case TRS_NONVOLATILE
:
2443 /* do /proc stuff here? */
2449 case TRS_LWPID
: /* do /proc stuff here? */
2456 ulwp_unlock(ulwp
, udp
);
2461 getlwpstatus(thread_t tid
, struct lwpstatus
*sp
)
2464 extern ssize_t
__pread(int, void *, size_t, off_t
);
2466 extern ssize_t
__pread64(int, void *, size_t, off64_t
);
2467 #define __pread __pread64
2472 /* "/proc/self/lwp/%u/lwpstatus" w/o stdio */
2473 (void) strcpy(buf
, "/proc/self/lwp/");
2474 ultos((uint64_t)tid
, 10, buf
+ strlen(buf
));
2475 (void) strcat(buf
, "/lwpstatus");
2476 if ((fd
= __open(buf
, O_RDONLY
, 0)) >= 0) {
2477 while (__pread(fd
, sp
, sizeof (*sp
), 0) == sizeof (*sp
)) {
2478 if (sp
->pr_flags
& PR_STOPPED
) {
2482 yield(); /* give it a chance to stop */
2490 putlwpregs(thread_t tid
, prgregset_t prp
)
2492 extern ssize_t
__writev(int, const struct iovec
*, int);
2499 /* "/proc/self/lwp/%u/lwpctl" w/o stdio */
2500 (void) strcpy(buf
, "/proc/self/lwp/");
2501 ultos((uint64_t)tid
, 10, buf
+ strlen(buf
));
2502 (void) strcat(buf
, "/lwpctl");
2503 if ((fd
= __open(buf
, O_WRONLY
, 0)) >= 0) {
2504 dstop_sreg
[0] = PCDSTOP
; /* direct it to stop */
2505 dstop_sreg
[1] = PCSREG
; /* set the registers */
2506 iov
[0].iov_base
= (caddr_t
)dstop_sreg
;
2507 iov
[0].iov_len
= sizeof (dstop_sreg
);
2508 iov
[1].iov_base
= (caddr_t
)prp
; /* from the register set */
2509 iov
[1].iov_len
= sizeof (prgregset_t
);
2510 run_null
[0] = PCRUN
; /* make it runnable again */
2512 iov
[2].iov_base
= (caddr_t
)run_null
;
2513 iov
[2].iov_len
= sizeof (run_null
);
2514 if (__writev(fd
, iov
, 3) >= 0) {
2524 gettsp_slow(thread_t tid
)
2527 struct lwpstatus status
;
2529 if (getlwpstatus(tid
, &status
) != 0) {
2530 /* "__gettsp(%u): can't read lwpstatus" w/o stdio */
2531 (void) strcpy(buf
, "__gettsp(");
2532 ultos((uint64_t)tid
, 10, buf
+ strlen(buf
));
2533 (void) strcat(buf
, "): can't read lwpstatus");
2536 return (status
.pr_reg
[R_SP
]);
2540 __gettsp(thread_t tid
)
2542 uberdata_t
*udp
= curthread
->ul_uberdata
;
2546 if ((ulwp
= find_lwp(tid
)) == NULL
)
2549 if (ulwp
->ul_stop
&& (result
= ulwp
->ul_sp
) != 0) {
2550 ulwp_unlock(ulwp
, udp
);
2554 result
= gettsp_slow(tid
);
2555 ulwp_unlock(ulwp
, udp
);
2560 * This tells java stack walkers how to find the ucontext
2561 * structure passed to signal handlers.
2563 #pragma weak _thr_sighndlrinfo = thr_sighndlrinfo
2565 thr_sighndlrinfo(void (**func
)(), int *funcsize
)
2567 *func
= &__sighndlr
;
2568 *funcsize
= (char *)&__sighndlrend
- (char *)&__sighndlr
;
2572 * Mark a thread a mutator or reset a mutator to being a default,
2573 * non-mutator thread.
2575 #pragma weak _thr_setmutator = thr_setmutator
2577 thr_setmutator(thread_t tid
, int enabled
)
2579 ulwp_t
*self
= curthread
;
2580 uberdata_t
*udp
= self
->ul_uberdata
;
2585 enabled
= enabled
? 1 : 0;
2589 ulwp_lock(ulwp
, udp
);
2590 } else if ((ulwp
= find_lwp(tid
)) == NULL
) {
2595 * The target thread should be the caller itself or a suspended thread.
2596 * This prevents the target from also changing its ul_mutator field.
2599 if (ulwp
!= self
&& !ulwp
->ul_stop
&& enabled
)
2601 else if (ulwp
->ul_mutator
!= enabled
) {
2602 lmutex_lock(&mutatorslock
);
2603 if (mutatorsbarrier
) {
2604 ulwp_unlock(ulwp
, udp
);
2605 (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE
,
2607 while (mutatorsbarrier
)
2608 (void) cond_wait(&mutatorscv
, &mutatorslock
);
2609 (void) pthread_setcancelstate(cancel_state
, NULL
);
2610 lmutex_unlock(&mutatorslock
);
2613 ulwp
->ul_mutator
= enabled
;
2614 lmutex_unlock(&mutatorslock
);
2617 ulwp_unlock(ulwp
, udp
);
2622 * Establish a barrier against new mutators. Any non-mutator trying
2623 * to become a mutator is suspended until the barrier is removed.
2625 #pragma weak _thr_mutators_barrier = thr_mutators_barrier
2627 thr_mutators_barrier(int enabled
)
2632 lmutex_lock(&mutatorslock
);
2635 * Wait if trying to set the barrier while it is already set.
2637 (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE
, &cancel_state
);
2638 while (mutatorsbarrier
&& enabled
)
2639 (void) cond_wait(&mutatorscv
, &mutatorslock
);
2640 (void) pthread_setcancelstate(cancel_state
, NULL
);
2642 oldvalue
= mutatorsbarrier
;
2643 mutatorsbarrier
= enabled
;
2645 * Wakeup any blocked non-mutators when barrier is removed.
2647 if (oldvalue
&& !enabled
)
2648 (void) cond_broadcast(&mutatorscv
);
2649 lmutex_unlock(&mutatorslock
);
2653 * Suspend the set of all mutators except for the caller. The list
2654 * of actively running threads is searched and only the mutators
2655 * in this list are suspended. Actively running non-mutators remain
2656 * running. Any other thread is suspended.
2658 #pragma weak _thr_suspend_allmutators = thr_suspend_allmutators
2660 thr_suspend_allmutators(void)
2662 ulwp_t
*self
= curthread
;
2663 uberdata_t
*udp
= self
->ul_uberdata
;
2668 * We single-thread the entire thread suspend/continue mechanism.
2673 lmutex_lock(&udp
->link_lock
);
2675 if (suspendingallmutators
|| suspendedallmutators
) {
2676 lmutex_unlock(&udp
->link_lock
);
2680 suspendingallmutators
= 1;
2682 for (ulwp
= self
->ul_forw
; ulwp
!= self
; ulwp
= ulwp
->ul_forw
) {
2683 ulwp_lock(ulwp
, udp
);
2684 if (!ulwp
->ul_mutator
) {
2685 ulwp_unlock(ulwp
, udp
);
2686 } else if (ulwp
->ul_stop
) { /* already stopped */
2687 ulwp
->ul_stop
|= TSTP_MUTATOR
;
2688 ulwp_broadcast(ulwp
);
2689 ulwp_unlock(ulwp
, udp
);
2692 * Move the stopped lwp out of a critical section.
2694 if (safe_suspend(ulwp
, TSTP_MUTATOR
, &link_dropped
) ||
2696 suspendingallmutators
= 0;
2702 suspendedallmutators
= 1;
2703 suspendingallmutators
= 0;
2704 lmutex_unlock(&udp
->link_lock
);
2710 * Suspend the target mutator. The caller is permitted to suspend
2711 * itself. If a mutator barrier is enabled, the caller will suspend
2712 * itself as though it had been suspended by thr_suspend_allmutators().
2713 * When the barrier is removed, this thread will be resumed. Any
2714 * suspended mutator, whether suspended by thr_suspend_mutator(), or by
2715 * thr_suspend_allmutators(), can be resumed by thr_continue_mutator().
2717 #pragma weak _thr_suspend_mutator = thr_suspend_mutator
2719 thr_suspend_mutator(thread_t tid
)
2722 tid
= curthread
->ul_lwpid
;
2723 return (_thrp_suspend(tid
, TSTP_MUTATOR
));
2727 * Resume the set of all suspended mutators.
2729 #pragma weak _thr_continue_allmutators = thr_continue_allmutators
2731 thr_continue_allmutators()
2733 ulwp_t
*self
= curthread
;
2734 uberdata_t
*udp
= self
->ul_uberdata
;
2738 * We single-thread the entire thread suspend/continue mechanism.
2742 lmutex_lock(&udp
->link_lock
);
2743 if (!suspendedallmutators
) {
2744 lmutex_unlock(&udp
->link_lock
);
2748 suspendedallmutators
= 0;
2750 for (ulwp
= self
->ul_forw
; ulwp
!= self
; ulwp
= ulwp
->ul_forw
) {
2751 mutex_t
*mp
= ulwp_mutex(ulwp
, udp
);
2753 if (ulwp
->ul_stop
& TSTP_MUTATOR
) {
2754 ulwp
->ul_stop
&= ~TSTP_MUTATOR
;
2755 ulwp_broadcast(ulwp
);
2757 force_continue(ulwp
);
2762 lmutex_unlock(&udp
->link_lock
);
2768 * Resume a suspended mutator.
2770 #pragma weak _thr_continue_mutator = thr_continue_mutator
2772 thr_continue_mutator(thread_t tid
)
2774 return (_thrp_continue(tid
, TSTP_MUTATOR
));
2777 #pragma weak _thr_wait_mutator = thr_wait_mutator
2779 thr_wait_mutator(thread_t tid
, int dontwait
)
2781 uberdata_t
*udp
= curthread
->ul_uberdata
;
2786 (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE
, &cancel_state
);
2788 if ((ulwp
= find_lwp(tid
)) == NULL
) {
2789 (void) pthread_setcancelstate(cancel_state
, NULL
);
2793 if (!ulwp
->ul_mutator
)
2795 else if (dontwait
) {
2796 if (!(ulwp
->ul_stop
& TSTP_MUTATOR
))
2797 error
= EWOULDBLOCK
;
2798 } else if (!(ulwp
->ul_stop
& TSTP_MUTATOR
)) {
2799 cond_t
*cvp
= ulwp_condvar(ulwp
, udp
);
2800 mutex_t
*mp
= ulwp_mutex(ulwp
, udp
);
2802 (void) cond_wait(cvp
, mp
);
2803 (void) lmutex_unlock(mp
);
2807 ulwp_unlock(ulwp
, udp
);
2808 (void) pthread_setcancelstate(cancel_state
, NULL
);
2812 /* PROBE_SUPPORT begin */
2815 thr_probe_setup(void *data
)
2817 curthread
->ul_tpdp
= data
;
2821 _thread_probe_getfunc()
2823 return (curthread
->ul_tpdp
);
2826 void * (*thr_probe_getfunc_addr
)(void) = _thread_probe_getfunc
;
2830 _resume(ulwp_t
*ulwp
, caddr_t sp
, int dontsave
)
2837 _resume_ret(ulwp_t
*oldlwp
)
2842 /* PROBE_SUPPORT end */