2 #include "pthread_impl.h"
3 #include "stdio_impl.h"
13 weak_alias(dummy_0
, __acquire_ptc
);
14 weak_alias(dummy_0
, __release_ptc
);
15 weak_alias(dummy_0
, __pthread_tsd_run_dtors
);
16 weak_alias(dummy_0
, __do_orphaned_stdio_locks
);
17 weak_alias(dummy_0
, __dl_thread_cleanup
);
19 static void *dummy_1(void *p
)
23 weak_alias(dummy_1
, __start_sched
);
25 _Noreturn
void __pthread_exit(void *result
)
27 pthread_t self
= __pthread_self();
30 self
->canceldisable
= 1;
31 self
->cancelasync
= 0;
32 self
->result
= result
;
34 while (self
->cancelbuf
) {
35 void (*f
)(void *) = self
->cancelbuf
->__f
;
36 void *x
= self
->cancelbuf
->__x
;
37 self
->cancelbuf
= self
->cancelbuf
->__next
;
41 __pthread_tsd_run_dtors();
43 /* Access to target the exiting thread with syscalls that use
44 * its kernel tid is controlled by killlock. For detached threads,
45 * any use past this point would have undefined behavior, but for
46 * joinable threads it's a valid usage that must be handled. */
49 /* Block all signals before decrementing the live thread count.
50 * This is important to ensure that dynamically allocated TLS
51 * is not under-allocated/over-committed, and possibly for other
53 __block_all_sigs(&set
);
55 /* It's impossible to determine whether this is "the last thread"
56 * until performing the atomic decrement, since multiple threads
57 * could exit at the same time. For the last thread, revert the
58 * decrement, restore the tid, and unblock signals to give the
59 * atexit handlers and stdio cleanup code a consistent state. */
60 if (a_fetch_add(&libc
.threads_minus_1
, -1)==0) {
61 libc
.threads_minus_1
= 0;
62 UNLOCK(self
->killlock
);
67 /* Process robust list in userspace to handle non-pshared mutexes
68 * and the detached thread case where the robust list head will
69 * be invalid when the kernel would process it. */
71 volatile void *volatile *rp
;
72 while ((rp
=self
->robust_list
.head
) && rp
!= &self
->robust_list
.head
) {
73 pthread_mutex_t
*m
= (void *)((char *)rp
74 - offsetof(pthread_mutex_t
, _m_next
));
75 int waiters
= m
->_m_waiters
;
76 int priv
= (m
->_m_type
& 128) ^ 128;
77 self
->robust_list
.pending
= rp
;
78 self
->robust_list
.head
= *rp
;
79 int cont
= a_swap(&m
->_m_lock
, 0x40000000);
80 self
->robust_list
.pending
= 0;
81 if (cont
< 0 || waiters
)
82 __wake(&m
->_m_lock
, 1, priv
);
86 __do_orphaned_stdio_locks();
87 __dl_thread_cleanup();
89 /* This atomic potentially competes with a concurrent pthread_detach
90 * call; the loser is responsible for freeing thread resources. */
91 int state
= a_cas(&self
->detach_state
, DT_JOINABLE
, DT_EXITING
);
93 if (state
>=DT_DETACHED
&& self
->map_base
) {
94 /* Detached threads must avoid the kernel clear_child_tid
95 * feature, since the virtual address will have been
96 * unmapped and possibly already reused by a new mapping
97 * at the time the kernel would perform the write. In
98 * the case of threads that started out detached, the
99 * initial clone flags are correct, but if the thread was
100 * detached later, we need to clear it here. */
101 if (state
== DT_DYNAMIC
) __syscall(SYS_set_tid_address
, 0);
103 /* Robust list will no longer be valid, and was already
104 * processed above, so unregister it with the kernel. */
105 if (self
->robust_list
.off
)
106 __syscall(SYS_set_robust_list
, 0, 3*sizeof(long));
108 /* Since __unmapself bypasses the normal munmap code path,
109 * explicitly wait for vmlock holders first. */
112 /* The following call unmaps the thread's stack mapping
113 * and then exits without touching the stack. */
114 __unmapself(self
->map_base
, self
->map_size
);
117 /* After the kernel thread exits, its tid may be reused. Clear it
118 * to prevent inadvertent use and inform functions that would use
119 * it that it's no longer available. */
121 UNLOCK(self
->killlock
);
123 for (;;) __syscall(SYS_exit
, 0);
126 void __do_cleanup_push(struct __ptcb
*cb
)
128 struct pthread
*self
= __pthread_self();
129 cb
->__next
= self
->cancelbuf
;
130 self
->cancelbuf
= cb
;
133 void __do_cleanup_pop(struct __ptcb
*cb
)
135 __pthread_self()->cancelbuf
= cb
->__next
;
138 static int start(void *p
)
141 if (self
->unblock_cancel
)
142 __syscall(SYS_rt_sigprocmask
, SIG_UNBLOCK
,
143 SIGPT_SET
, 0, _NSIG
/8);
144 __pthread_exit(self
->start(self
->start_arg
));
148 static int start_c11(void *p
)
151 int (*start
)(void*) = (int(*)(void*)) self
->start
;
152 __pthread_exit((void *)(uintptr_t)start(self
->start_arg
));
156 #define ROUND(x) (((x)+PAGE_SIZE-1)&-PAGE_SIZE)
158 /* pthread_key_create.c overrides this */
159 static volatile size_t dummy
= 0;
160 weak_alias(dummy
, __pthread_tsd_size
);
161 static void *dummy_tsd
[1] = { 0 };
162 weak_alias(dummy_tsd
, __pthread_tsd_main
);
164 volatile int __block_new_threads
= 0;
165 extern size_t __default_stacksize
;
166 extern size_t __default_guardsize
;
168 static FILE *volatile dummy_file
= 0;
169 weak_alias(dummy_file
, __stdin_used
);
170 weak_alias(dummy_file
, __stdout_used
);
171 weak_alias(dummy_file
, __stderr_used
);
173 static void init_file_lock(FILE *f
)
175 if (f
&& f
->lock
<0) f
->lock
= 0;
178 int __pthread_create(pthread_t
*restrict res
, const pthread_attr_t
*restrict attrp
, void *(*entry
)(void *), void *restrict arg
)
180 int ret
, c11
= (attrp
== __ATTRP_C11_THREAD
);
182 struct pthread
*self
, *new;
183 unsigned char *map
= 0, *stack
= 0, *tsd
= 0, *stack_limit
;
184 unsigned flags
= CLONE_VM
| CLONE_FS
| CLONE_FILES
| CLONE_SIGHAND
185 | CLONE_THREAD
| CLONE_SYSVSEM
| CLONE_SETTLS
186 | CLONE_PARENT_SETTID
| CLONE_CHILD_CLEARTID
| CLONE_DETACHED
;
188 pthread_attr_t attr
= { 0 };
189 struct start_sched_args ssa
;
191 if (!libc
.can_do_threads
) return ENOSYS
;
192 self
= __pthread_self();
193 if (!libc
.threaded
) {
194 for (FILE *f
=*__ofl_lock(); f
; f
=f
->next
)
197 init_file_lock(__stdin_used
);
198 init_file_lock(__stdout_used
);
199 init_file_lock(__stderr_used
);
200 __syscall(SYS_rt_sigprocmask
, SIG_UNBLOCK
, SIGPT_SET
, 0, _NSIG
/8);
201 self
->tsd
= (void **)__pthread_tsd_main
;
204 if (attrp
&& !c11
) attr
= *attrp
;
208 attr
._a_stacksize
= __default_stacksize
;
209 attr
._a_guardsize
= __default_guardsize
;
212 if (__block_new_threads
) __wait(&__block_new_threads
, 0, 1, 1);
214 if (attr
._a_stackaddr
) {
215 size_t need
= libc
.tls_size
+ __pthread_tsd_size
;
216 size
= attr
._a_stacksize
;
217 stack
= (void *)(attr
._a_stackaddr
& -16);
218 stack_limit
= (void *)(attr
._a_stackaddr
- size
);
219 /* Use application-provided stack for TLS only when
220 * it does not take more than ~12% or 2k of the
221 * application's stack space. */
222 if (need
< size
/8 && need
< 2048) {
223 tsd
= stack
- __pthread_tsd_size
;
224 stack
= tsd
- libc
.tls_size
;
225 memset(stack
, 0, need
);
231 guard
= ROUND(attr
._a_guardsize
);
232 size
= guard
+ ROUND(attr
._a_stacksize
233 + libc
.tls_size
+ __pthread_tsd_size
);
238 map
= __mmap(0, size
, PROT_NONE
, MAP_PRIVATE
|MAP_ANON
, -1, 0);
239 if (map
== MAP_FAILED
) goto fail
;
240 if (__mprotect(map
+guard
, size
-guard
, PROT_READ
|PROT_WRITE
)
241 && errno
!= ENOSYS
) {
246 map
= __mmap(0, size
, PROT_READ
|PROT_WRITE
, MAP_PRIVATE
|MAP_ANON
, -1, 0);
247 if (map
== MAP_FAILED
) goto fail
;
249 tsd
= map
+ size
- __pthread_tsd_size
;
251 stack
= tsd
- libc
.tls_size
;
252 stack_limit
= map
+ guard
;
256 new = __copy_tls(tsd
- libc
.tls_size
);
258 new->map_size
= size
;
260 new->stack_size
= stack
- stack_limit
;
261 new->guard_size
= guard
;
263 new->start_arg
= arg
;
265 new->tsd
= (void *)tsd
;
266 new->locale
= &libc
.global_locale
;
267 if (attr
._a_detach
) {
268 new->detach_state
= DT_DETACHED
;
269 flags
-= CLONE_CHILD_CLEARTID
;
271 new->detach_state
= DT_JOINABLE
;
276 ssa
.start_fn
= new->start
;
277 ssa
.start_arg
= new->start_arg
;
279 new->start
= __start_sched
;
280 new->start_arg
= &ssa
;
281 __block_app_sigs(&ssa
.mask
);
283 new->robust_list
.head
= &new->robust_list
.head
;
284 new->unblock_cancel
= self
->cancel
;
285 new->CANARY
= self
->CANARY
;
287 a_inc(&libc
.threads_minus_1
);
288 ret
= __clone((c11
? start_c11
: start
), stack
, flags
, new, &new->tid
, TP_ADJ(new), &new->detach_state
);
293 __restore_sigs(&ssa
.mask
);
297 a_dec(&libc
.threads_minus_1
);
298 if (map
) __munmap(map
, size
);
303 __futexwait(&ssa
.futex
, -1, 1);
315 weak_alias(__pthread_exit
, pthread_exit
);
316 weak_alias(__pthread_create
, pthread_create
);