2 #include "pthread_impl.h"
3 #include "stdio_impl.h"
13 weak_alias(dummy_0
, __acquire_ptc
);
14 weak_alias(dummy_0
, __release_ptc
);
15 weak_alias(dummy_0
, __pthread_tsd_run_dtors
);
16 weak_alias(dummy_0
, __do_orphaned_stdio_locks
);
17 weak_alias(dummy_0
, __dl_thread_cleanup
);
18 weak_alias(dummy_0
, __membarrier_init
);
20 static int tl_lock_count
;
21 static int tl_lock_waiters
;
25 int tid
= __pthread_self()->tid
;
26 int val
= __thread_list_lock
;
31 while ((val
= a_cas(&__thread_list_lock
, 0, tid
)))
32 __wait(&__thread_list_lock
, &tl_lock_waiters
, val
, 0);
35 void __tl_unlock(void)
41 a_store(&__thread_list_lock
, 0);
42 if (tl_lock_waiters
) __wake(&__thread_list_lock
, 1, 0);
45 void __tl_sync(pthread_t td
)
48 int val
= __thread_list_lock
;
50 __wait(&__thread_list_lock
, &tl_lock_waiters
, val
, 0);
51 if (tl_lock_waiters
) __wake(&__thread_list_lock
, 1, 0);
54 _Noreturn
void __pthread_exit(void *result
)
56 pthread_t self
= __pthread_self();
59 self
->canceldisable
= 1;
60 self
->cancelasync
= 0;
61 self
->result
= result
;
63 while (self
->cancelbuf
) {
64 void (*f
)(void *) = self
->cancelbuf
->__f
;
65 void *x
= self
->cancelbuf
->__x
;
66 self
->cancelbuf
= self
->cancelbuf
->__next
;
70 __pthread_tsd_run_dtors();
72 __block_app_sigs(&set
);
74 /* This atomic potentially competes with a concurrent pthread_detach
75 * call; the loser is responsible for freeing thread resources. */
76 int state
= a_cas(&self
->detach_state
, DT_JOINABLE
, DT_EXITING
);
78 if (state
==DT_DETACHED
&& self
->map_base
) {
79 /* Since __unmapself bypasses the normal munmap code path,
80 * explicitly wait for vmlock holders first. This must be
81 * done before any locks are taken, to avoid lock ordering
82 * issues that could lead to deadlock. */
86 /* Access to target the exiting thread with syscalls that use
87 * its kernel tid is controlled by killlock. For detached threads,
88 * any use past this point would have undefined behavior, but for
89 * joinable threads it's a valid usage that must be handled.
90 * Signals must be blocked since pthread_kill must be AS-safe. */
93 /* The thread list lock must be AS-safe, and thus depends on
94 * application signals being blocked above. */
97 /* If this is the only thread in the list, don't proceed with
98 * termination of the thread, but restore the previous lock and
99 * signal state to prepare for exit to call atexit handlers. */
100 if (self
->next
== self
) {
102 UNLOCK(self
->killlock
);
103 self
->detach_state
= state
;
104 __restore_sigs(&set
);
108 /* At this point we are committed to thread termination. */
110 /* Process robust list in userspace to handle non-pshared mutexes
111 * and the detached thread case where the robust list head will
112 * be invalid when the kernel would process it. */
114 volatile void *volatile *rp
;
115 while ((rp
=self
->robust_list
.head
) && rp
!= &self
->robust_list
.head
) {
116 pthread_mutex_t
*m
= (void *)((char *)rp
117 - offsetof(pthread_mutex_t
, _m_next
));
118 int waiters
= m
->_m_waiters
;
119 int priv
= (m
->_m_type
& 128) ^ 128;
120 self
->robust_list
.pending
= rp
;
121 self
->robust_list
.head
= *rp
;
122 int cont
= a_swap(&m
->_m_lock
, 0x40000000);
123 self
->robust_list
.pending
= 0;
124 if (cont
< 0 || waiters
)
125 __wake(&m
->_m_lock
, 1, priv
);
129 __do_orphaned_stdio_locks();
130 __dl_thread_cleanup();
132 /* Last, unlink thread from the list. This change will not be visible
133 * until the lock is released, which only happens after SYS_exit
134 * has been called, via the exit futex address pointing at the lock.
135 * This needs to happen after any possible calls to LOCK() that might
136 * skip locking if process appears single-threaded. */
137 if (!--libc
.threads_minus_1
) libc
.need_locks
= -1;
138 self
->next
->prev
= self
->prev
;
139 self
->prev
->next
= self
->next
;
140 self
->prev
= self
->next
= self
;
142 if (state
==DT_DETACHED
&& self
->map_base
) {
143 /* Detached threads must block even implementation-internal
144 * signals, since they will not have a stack in their last
145 * moments of existence. */
146 __block_all_sigs(&set
);
148 /* Robust list will no longer be valid, and was already
149 * processed above, so unregister it with the kernel. */
150 if (self
->robust_list
.off
)
151 __syscall(SYS_set_robust_list
, 0, 3*sizeof(long));
153 /* The following call unmaps the thread's stack mapping
154 * and then exits without touching the stack. */
155 __unmapself(self
->map_base
, self
->map_size
);
158 /* Wake any joiner. */
159 a_store(&self
->detach_state
, DT_EXITED
);
160 __wake(&self
->detach_state
, 1, 1);
162 /* After the kernel thread exits, its tid may be reused. Clear it
163 * to prevent inadvertent use and inform functions that would use
164 * it that it's no longer available. */
166 UNLOCK(self
->killlock
);
168 for (;;) __syscall(SYS_exit
, 0);
171 void __do_cleanup_push(struct __ptcb
*cb
)
173 struct pthread
*self
= __pthread_self();
174 cb
->__next
= self
->cancelbuf
;
175 self
->cancelbuf
= cb
;
178 void __do_cleanup_pop(struct __ptcb
*cb
)
180 __pthread_self()->cancelbuf
= cb
->__next
;
184 void *(*start_func
)(void *);
186 volatile int control
;
187 unsigned long sig_mask
[_NSIG
/8/sizeof(long)];
190 static int start(void *p
)
192 struct start_args
*args
= p
;
193 int state
= args
->control
;
195 if (a_cas(&args
->control
, 1, 2)==1)
196 __wait(&args
->control
, 0, 2, 1);
198 __syscall(SYS_set_tid_address
, &args
->control
);
199 for (;;) __syscall(SYS_exit
, 0);
202 __syscall(SYS_rt_sigprocmask
, SIG_SETMASK
, &args
->sig_mask
, 0, _NSIG
/8);
203 __pthread_exit(args
->start_func(args
->start_arg
));
207 static int start_c11(void *p
)
209 struct start_args
*args
= p
;
210 int (*start
)(void*) = (int(*)(void*)) args
->start_func
;
211 __pthread_exit((void *)(uintptr_t)start(args
->start_arg
));
215 #define ROUND(x) (((x)+PAGE_SIZE-1)&-PAGE_SIZE)
217 /* pthread_key_create.c overrides this */
218 static volatile size_t dummy
= 0;
219 weak_alias(dummy
, __pthread_tsd_size
);
220 static void *dummy_tsd
[1] = { 0 };
221 weak_alias(dummy_tsd
, __pthread_tsd_main
);
223 static FILE *volatile dummy_file
= 0;
224 weak_alias(dummy_file
, __stdin_used
);
225 weak_alias(dummy_file
, __stdout_used
);
226 weak_alias(dummy_file
, __stderr_used
);
228 static void init_file_lock(FILE *f
)
230 if (f
&& f
->lock
<0) f
->lock
= 0;
233 int __pthread_create(pthread_t
*restrict res
, const pthread_attr_t
*restrict attrp
, void *(*entry
)(void *), void *restrict arg
)
235 int ret
, c11
= (attrp
== __ATTRP_C11_THREAD
);
237 struct pthread
*self
, *new;
238 unsigned char *map
= 0, *stack
= 0, *tsd
= 0, *stack_limit
;
239 unsigned flags
= CLONE_VM
| CLONE_FS
| CLONE_FILES
| CLONE_SIGHAND
240 | CLONE_THREAD
| CLONE_SYSVSEM
| CLONE_SETTLS
241 | CLONE_PARENT_SETTID
| CLONE_CHILD_CLEARTID
| CLONE_DETACHED
;
242 pthread_attr_t attr
= { 0 };
245 if (!libc
.can_do_threads
) return ENOSYS
;
246 self
= __pthread_self();
247 if (!libc
.threaded
) {
248 for (FILE *f
=*__ofl_lock(); f
; f
=f
->next
)
251 init_file_lock(__stdin_used
);
252 init_file_lock(__stdout_used
);
253 init_file_lock(__stderr_used
);
254 __syscall(SYS_rt_sigprocmask
, SIG_UNBLOCK
, SIGPT_SET
, 0, _NSIG
/8);
255 self
->tsd
= (void **)__pthread_tsd_main
;
259 if (attrp
&& !c11
) attr
= *attrp
;
263 attr
._a_stacksize
= __default_stacksize
;
264 attr
._a_guardsize
= __default_guardsize
;
267 if (attr
._a_stackaddr
) {
268 size_t need
= libc
.tls_size
+ __pthread_tsd_size
;
269 size
= attr
._a_stacksize
;
270 stack
= (void *)(attr
._a_stackaddr
& -16);
271 stack_limit
= (void *)(attr
._a_stackaddr
- size
);
272 /* Use application-provided stack for TLS only when
273 * it does not take more than ~12% or 2k of the
274 * application's stack space. */
275 if (need
< size
/8 && need
< 2048) {
276 tsd
= stack
- __pthread_tsd_size
;
277 stack
= tsd
- libc
.tls_size
;
278 memset(stack
, 0, need
);
284 guard
= ROUND(attr
._a_guardsize
);
285 size
= guard
+ ROUND(attr
._a_stacksize
286 + libc
.tls_size
+ __pthread_tsd_size
);
291 map
= __mmap(0, size
, PROT_NONE
, MAP_PRIVATE
|MAP_ANON
, -1, 0);
292 if (map
== MAP_FAILED
) goto fail
;
293 if (__mprotect(map
+guard
, size
-guard
, PROT_READ
|PROT_WRITE
)
294 && errno
!= ENOSYS
) {
299 map
= __mmap(0, size
, PROT_READ
|PROT_WRITE
, MAP_PRIVATE
|MAP_ANON
, -1, 0);
300 if (map
== MAP_FAILED
) goto fail
;
302 tsd
= map
+ size
- __pthread_tsd_size
;
304 stack
= tsd
- libc
.tls_size
;
305 stack_limit
= map
+ guard
;
309 new = __copy_tls(tsd
- libc
.tls_size
);
311 new->map_size
= size
;
313 new->stack_size
= stack
- stack_limit
;
314 new->guard_size
= guard
;
316 new->tsd
= (void *)tsd
;
317 new->locale
= &libc
.global_locale
;
318 if (attr
._a_detach
) {
319 new->detach_state
= DT_DETACHED
;
321 new->detach_state
= DT_JOINABLE
;
323 new->robust_list
.head
= &new->robust_list
.head
;
324 new->canary
= self
->canary
;
325 new->sysinfo
= self
->sysinfo
;
327 /* Setup argument structure for the new thread on its stack.
328 * It's safe to access from the caller only until the thread
329 * list is unlocked. */
330 stack
-= (uintptr_t)stack
% sizeof(uintptr_t);
331 stack
-= sizeof(struct start_args
);
332 struct start_args
*args
= (void *)stack
;
333 args
->start_func
= entry
;
334 args
->start_arg
= arg
;
335 args
->control
= attr
._a_sched
? 1 : 0;
337 /* Application signals (but not the synccall signal) must be
338 * blocked before the thread list lock can be taken, to ensure
339 * that the lock is AS-safe. */
340 __block_app_sigs(&set
);
342 /* Ensure SIGCANCEL is unblocked in new thread. This requires
343 * working with a copy of the set so we can restore the
344 * original mask in the calling thread. */
345 memcpy(&args
->sig_mask
, &set
, sizeof args
->sig_mask
);
346 args
->sig_mask
[(SIGCANCEL
-1)/8/sizeof(long)] &=
347 ~(1UL<<((SIGCANCEL
-1)%(8*sizeof(long))));
350 if (!libc
.threads_minus_1
++) libc
.need_locks
= 1;
351 ret
= __clone((c11
? start_c11
: start
), stack
, flags
, args
, &new->tid
, TP_ADJ(new), &__thread_list_lock
);
353 /* All clone failures translate to EAGAIN. If explicit scheduling
354 * was requested, attempt it before unlocking the thread list so
355 * that the failed thread is never exposed and so that we can
356 * clean up all transient resource usage before returning. */
359 } else if (attr
._a_sched
) {
360 ret
= __syscall(SYS_sched_setscheduler
,
361 new->tid
, attr
._a_policy
, &attr
._a_prio
);
362 if (a_swap(&args
->control
, ret
? 3 : 0)==2)
363 __wake(&args
->control
, 1, 1);
365 __wait(&args
->control
, 0, 3, 0);
369 new->next
= self
->next
;
371 new->next
->prev
= new;
372 new->prev
->next
= new;
374 if (!--libc
.threads_minus_1
) libc
.need_locks
= 0;
377 __restore_sigs(&set
);
381 if (map
) __munmap(map
, size
);
392 weak_alias(__pthread_exit
, pthread_exit
);
393 weak_alias(__pthread_create
, pthread_create
);