S390: Move utf8-utf32-z9.c to multiarch folder and use s390_libc_ifunc_expr macro.
[glibc.git] / nptl / nptl-init.c
blob29216077a2cf7ffb334589ceb4d061835070a6c0
1 /* Copyright (C) 2002-2017 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <errno.h>
21 #include <limits.h>
22 #include <signal.h>
23 #include <stdlib.h>
24 #include <unistd.h>
25 #include <sys/param.h>
26 #include <sys/resource.h>
27 #include <pthreadP.h>
28 #include <atomic.h>
29 #include <ldsodefs.h>
30 #include <tls.h>
31 #include <list.h>
32 #include <fork.h>
33 #include <version.h>
34 #include <shlib-compat.h>
35 #include <smp.h>
36 #include <lowlevellock.h>
37 #include <futex-internal.h>
38 #include <kernel-features.h>
39 #include <libc-pointer-arith.h>
40 #include <pthread-pids.h>
42 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
43 /* Pointer to the corresponding variable in libc. */
44 int *__libc_multiple_threads_ptr attribute_hidden;
45 #endif
47 /* Size and alignment of static TLS block. */
48 size_t __static_tls_size;
49 size_t __static_tls_align_m1;
51 #ifndef __ASSUME_SET_ROBUST_LIST
52 /* Negative if we do not have the system call and we can use it. */
53 int __set_robust_list_avail;
54 # define set_robust_list_not_avail() \
55 __set_robust_list_avail = -1
56 #else
57 # define set_robust_list_not_avail() do { } while (0)
58 #endif
60 #ifndef __ASSUME_FUTEX_CLOCK_REALTIME
61 /* Nonzero if we do not have FUTEX_CLOCK_REALTIME. */
62 int __have_futex_clock_realtime;
63 # define __set_futex_clock_realtime() \
64 __have_futex_clock_realtime = 1
65 #else
66 #define __set_futex_clock_realtime() do { } while (0)
67 #endif
69 /* Version of the library, used in libthread_db to detect mismatches. */
70 static const char nptl_version[] __attribute_used__ = VERSION;
73 #ifdef SHARED
74 static
75 #else
76 extern
77 #endif
78 void __nptl_set_robust (struct pthread *);
80 #ifdef SHARED
81 static void nptl_freeres (void);
84 static const struct pthread_functions pthread_functions =
86 .ptr_pthread_attr_destroy = __pthread_attr_destroy,
87 # if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
88 .ptr___pthread_attr_init_2_0 = __pthread_attr_init_2_0,
89 # endif
90 .ptr___pthread_attr_init_2_1 = __pthread_attr_init_2_1,
91 .ptr_pthread_attr_getdetachstate = __pthread_attr_getdetachstate,
92 .ptr_pthread_attr_setdetachstate = __pthread_attr_setdetachstate,
93 .ptr_pthread_attr_getinheritsched = __pthread_attr_getinheritsched,
94 .ptr_pthread_attr_setinheritsched = __pthread_attr_setinheritsched,
95 .ptr_pthread_attr_getschedparam = __pthread_attr_getschedparam,
96 .ptr_pthread_attr_setschedparam = __pthread_attr_setschedparam,
97 .ptr_pthread_attr_getschedpolicy = __pthread_attr_getschedpolicy,
98 .ptr_pthread_attr_setschedpolicy = __pthread_attr_setschedpolicy,
99 .ptr_pthread_attr_getscope = __pthread_attr_getscope,
100 .ptr_pthread_attr_setscope = __pthread_attr_setscope,
101 .ptr_pthread_condattr_destroy = __pthread_condattr_destroy,
102 .ptr_pthread_condattr_init = __pthread_condattr_init,
103 .ptr___pthread_cond_broadcast = __pthread_cond_broadcast,
104 .ptr___pthread_cond_destroy = __pthread_cond_destroy,
105 .ptr___pthread_cond_init = __pthread_cond_init,
106 .ptr___pthread_cond_signal = __pthread_cond_signal,
107 .ptr___pthread_cond_wait = __pthread_cond_wait,
108 .ptr___pthread_cond_timedwait = __pthread_cond_timedwait,
109 # if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_3_2)
110 .ptr___pthread_cond_broadcast_2_0 = __pthread_cond_broadcast_2_0,
111 .ptr___pthread_cond_destroy_2_0 = __pthread_cond_destroy_2_0,
112 .ptr___pthread_cond_init_2_0 = __pthread_cond_init_2_0,
113 .ptr___pthread_cond_signal_2_0 = __pthread_cond_signal_2_0,
114 .ptr___pthread_cond_wait_2_0 = __pthread_cond_wait_2_0,
115 .ptr___pthread_cond_timedwait_2_0 = __pthread_cond_timedwait_2_0,
116 # endif
117 .ptr_pthread_equal = __pthread_equal,
118 .ptr___pthread_exit = __pthread_exit,
119 .ptr_pthread_getschedparam = __pthread_getschedparam,
120 .ptr_pthread_setschedparam = __pthread_setschedparam,
121 .ptr_pthread_mutex_destroy = __pthread_mutex_destroy,
122 .ptr_pthread_mutex_init = __pthread_mutex_init,
123 .ptr_pthread_mutex_lock = __pthread_mutex_lock,
124 .ptr_pthread_mutex_unlock = __pthread_mutex_unlock,
125 .ptr_pthread_self = __pthread_self,
126 .ptr___pthread_setcancelstate = __pthread_setcancelstate,
127 .ptr_pthread_setcanceltype = __pthread_setcanceltype,
128 .ptr___pthread_cleanup_upto = __pthread_cleanup_upto,
129 .ptr___pthread_once = __pthread_once,
130 .ptr___pthread_rwlock_rdlock = __pthread_rwlock_rdlock,
131 .ptr___pthread_rwlock_wrlock = __pthread_rwlock_wrlock,
132 .ptr___pthread_rwlock_unlock = __pthread_rwlock_unlock,
133 .ptr___pthread_key_create = __pthread_key_create,
134 .ptr___pthread_getspecific = __pthread_getspecific,
135 .ptr___pthread_setspecific = __pthread_setspecific,
136 .ptr__pthread_cleanup_push_defer = __pthread_cleanup_push_defer,
137 .ptr__pthread_cleanup_pop_restore = __pthread_cleanup_pop_restore,
138 .ptr_nthreads = &__nptl_nthreads,
139 .ptr___pthread_unwind = &__pthread_unwind,
140 .ptr__nptl_deallocate_tsd = __nptl_deallocate_tsd,
141 # ifdef SIGSETXID
142 .ptr__nptl_setxid = __nptl_setxid,
143 # endif
144 /* For now only the stack cache needs to be freed. */
145 .ptr_freeres = nptl_freeres,
146 .ptr_set_robust = __nptl_set_robust
148 # define ptr_pthread_functions &pthread_functions
149 #else
150 # define ptr_pthread_functions NULL
151 #endif
154 #ifdef SHARED
155 /* This function is called indirectly from the freeres code in libc. */
156 static void
157 __libc_freeres_fn_section
158 nptl_freeres (void)
160 __unwind_freeres ();
161 __free_stacks (0);
165 static
166 #endif
167 void
168 __nptl_set_robust (struct pthread *self)
170 #ifdef __NR_set_robust_list
171 INTERNAL_SYSCALL_DECL (err);
172 INTERNAL_SYSCALL (set_robust_list, err, 2, &self->robust_head,
173 sizeof (struct robust_list_head));
174 #endif
178 #ifdef SIGCANCEL
179 /* For asynchronous cancellation we use a signal. This is the handler. */
180 static void
181 sigcancel_handler (int sig, siginfo_t *si, void *ctx)
183 /* Safety check. It would be possible to call this function for
184 other signals and send a signal from another process. This is not
185 correct and might even be a security problem. Try to catch as
186 many incorrect invocations as possible. */
187 if (sig != SIGCANCEL
188 || si->si_pid != __getpid()
189 || si->si_code != SI_TKILL)
190 return;
192 struct pthread *self = THREAD_SELF;
194 int oldval = THREAD_GETMEM (self, cancelhandling);
195 while (1)
197 /* We are canceled now. When canceled by another thread this flag
198 is already set but if the signal is directly send (internally or
199 from another process) is has to be done here. */
200 int newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK;
202 if (oldval == newval || (oldval & EXITING_BITMASK) != 0)
203 /* Already canceled or exiting. */
204 break;
206 int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
207 oldval);
208 if (curval == oldval)
210 /* Set the return value. */
211 THREAD_SETMEM (self, result, PTHREAD_CANCELED);
213 /* Make sure asynchronous cancellation is still enabled. */
214 if ((newval & CANCELTYPE_BITMASK) != 0)
215 /* Run the registered destructors and terminate the thread. */
216 __do_cancel ();
218 break;
221 oldval = curval;
224 #endif
227 #ifdef SIGSETXID
228 struct xid_command *__xidcmd attribute_hidden;
230 /* We use the SIGSETXID signal in the setuid, setgid, etc. implementations to
231 tell each thread to call the respective setxid syscall on itself. This is
232 the handler. */
233 static void
234 sighandler_setxid (int sig, siginfo_t *si, void *ctx)
236 int result;
238 /* Safety check. It would be possible to call this function for
239 other signals and send a signal from another process. This is not
240 correct and might even be a security problem. Try to catch as
241 many incorrect invocations as possible. */
242 if (sig != SIGSETXID
243 || si->si_pid != __getpid ()
244 || si->si_code != SI_TKILL)
245 return;
247 INTERNAL_SYSCALL_DECL (err);
248 result = INTERNAL_SYSCALL_NCS (__xidcmd->syscall_no, err, 3, __xidcmd->id[0],
249 __xidcmd->id[1], __xidcmd->id[2]);
250 int error = 0;
251 if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (result, err)))
252 error = INTERNAL_SYSCALL_ERRNO (result, err);
253 __nptl_setxid_error (__xidcmd, error);
255 /* Reset the SETXID flag. */
256 struct pthread *self = THREAD_SELF;
257 int flags, newval;
260 flags = THREAD_GETMEM (self, cancelhandling);
261 newval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling,
262 flags & ~SETXID_BITMASK, flags);
264 while (flags != newval);
266 /* And release the futex. */
267 self->setxid_futex = 1;
268 futex_wake (&self->setxid_futex, 1, FUTEX_PRIVATE);
270 if (atomic_decrement_val (&__xidcmd->cntr) == 0)
271 futex_wake ((unsigned int *) &__xidcmd->cntr, 1, FUTEX_PRIVATE);
273 #endif
276 /* When using __thread for this, we do it in libc so as not
277 to give libpthread its own TLS segment just for this. */
278 extern void **__libc_dl_error_tsd (void) __attribute__ ((const));
281 /* This can be set by the debugger before initialization is complete. */
282 static bool __nptl_initial_report_events __attribute_used__;
284 void
285 __pthread_initialize_minimal_internal (void)
287 /* Minimal initialization of the thread descriptor. */
288 struct pthread *pd = THREAD_SELF;
289 __pthread_initialize_pids (pd);
290 THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]);
291 THREAD_SETMEM (pd, user_stack, true);
292 if (LLL_LOCK_INITIALIZER != 0)
293 THREAD_SETMEM (pd, lock, LLL_LOCK_INITIALIZER);
294 #if HP_TIMING_AVAIL
295 THREAD_SETMEM (pd, cpuclock_offset, GL(dl_cpuclock_offset));
296 #endif
298 /* Initialize the robust mutex data. */
300 #ifdef __PTHREAD_MUTEX_HAVE_PREV
301 pd->robust_prev = &pd->robust_head;
302 #endif
303 pd->robust_head.list = &pd->robust_head;
304 #ifdef __NR_set_robust_list
305 pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
306 - offsetof (pthread_mutex_t,
307 __data.__list.__next));
308 INTERNAL_SYSCALL_DECL (err);
309 int res = INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
310 sizeof (struct robust_list_head));
311 if (INTERNAL_SYSCALL_ERROR_P (res, err))
312 #endif
313 set_robust_list_not_avail ();
316 #ifdef __NR_futex
317 # ifndef __ASSUME_PRIVATE_FUTEX
318 /* Private futexes are always used (at least internally) so that
319 doing the test once this early is beneficial. */
321 int word = 0;
322 INTERNAL_SYSCALL_DECL (err);
323 word = INTERNAL_SYSCALL (futex, err, 3, &word,
324 FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1);
325 if (!INTERNAL_SYSCALL_ERROR_P (word, err))
326 THREAD_SETMEM (pd, header.private_futex, FUTEX_PRIVATE_FLAG);
329 /* Private futexes have been introduced earlier than the
330 FUTEX_CLOCK_REALTIME flag. We don't have to run the test if we
331 know the former are not supported. This also means we know the
332 kernel will return ENOSYS for unknown operations. */
333 if (THREAD_GETMEM (pd, header.private_futex) != 0)
334 # endif
335 # ifndef __ASSUME_FUTEX_CLOCK_REALTIME
337 int word = 0;
338 /* NB: the syscall actually takes six parameters. The last is the
339 bit mask. But since we will not actually wait at all the value
340 is irrelevant. Given that passing six parameters is difficult
341 on some architectures we just pass whatever random value the
342 calling convention calls for to the kernel. It causes no harm. */
343 INTERNAL_SYSCALL_DECL (err);
344 word = INTERNAL_SYSCALL (futex, err, 5, &word,
345 FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME
346 | FUTEX_PRIVATE_FLAG, 1, NULL, 0);
347 assert (INTERNAL_SYSCALL_ERROR_P (word, err));
348 if (INTERNAL_SYSCALL_ERRNO (word, err) != ENOSYS)
349 __set_futex_clock_realtime ();
351 # endif
352 #endif
354 /* Set initial thread's stack block from 0 up to __libc_stack_end.
355 It will be bigger than it actually is, but for unwind.c/pt-longjmp.c
356 purposes this is good enough. */
357 THREAD_SETMEM (pd, stackblock_size, (size_t) __libc_stack_end);
359 /* Initialize the list of all running threads with the main thread. */
360 INIT_LIST_HEAD (&__stack_user);
361 list_add (&pd->list, &__stack_user);
363 /* Before initializing __stack_user, the debugger could not find us and
364 had to set __nptl_initial_report_events. Propagate its setting. */
365 THREAD_SETMEM (pd, report_events, __nptl_initial_report_events);
367 #if defined SIGCANCEL || defined SIGSETXID
368 struct sigaction sa;
369 __sigemptyset (&sa.sa_mask);
371 # ifdef SIGCANCEL
372 /* Install the cancellation signal handler. If for some reason we
373 cannot install the handler we do not abort. Maybe we should, but
374 it is only asynchronous cancellation which is affected. */
375 sa.sa_sigaction = sigcancel_handler;
376 sa.sa_flags = SA_SIGINFO;
377 (void) __libc_sigaction (SIGCANCEL, &sa, NULL);
378 # endif
380 # ifdef SIGSETXID
381 /* Install the handle to change the threads' uid/gid. */
382 sa.sa_sigaction = sighandler_setxid;
383 sa.sa_flags = SA_SIGINFO | SA_RESTART;
384 (void) __libc_sigaction (SIGSETXID, &sa, NULL);
385 # endif
387 /* The parent process might have left the signals blocked. Just in
388 case, unblock it. We reuse the signal mask in the sigaction
389 structure. It is already cleared. */
390 # ifdef SIGCANCEL
391 __sigaddset (&sa.sa_mask, SIGCANCEL);
392 # endif
393 # ifdef SIGSETXID
394 __sigaddset (&sa.sa_mask, SIGSETXID);
395 # endif
397 INTERNAL_SYSCALL_DECL (err);
398 (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &sa.sa_mask,
399 NULL, _NSIG / 8);
401 #endif
403 /* Get the size of the static and alignment requirements for the TLS
404 block. */
405 size_t static_tls_align;
406 _dl_get_tls_static_info (&__static_tls_size, &static_tls_align);
408 /* Make sure the size takes all the alignments into account. */
409 if (STACK_ALIGN > static_tls_align)
410 static_tls_align = STACK_ALIGN;
411 __static_tls_align_m1 = static_tls_align - 1;
413 __static_tls_size = roundup (__static_tls_size, static_tls_align);
415 /* Determine the default allowed stack size. This is the size used
416 in case the user does not specify one. */
417 struct rlimit limit;
418 if (__getrlimit (RLIMIT_STACK, &limit) != 0
419 || limit.rlim_cur == RLIM_INFINITY)
420 /* The system limit is not usable. Use an architecture-specific
421 default. */
422 limit.rlim_cur = ARCH_STACK_DEFAULT_SIZE;
423 else if (limit.rlim_cur < PTHREAD_STACK_MIN)
424 /* The system limit is unusably small.
425 Use the minimal size acceptable. */
426 limit.rlim_cur = PTHREAD_STACK_MIN;
428 /* Make sure it meets the minimum size that allocate_stack
429 (allocatestack.c) will demand, which depends on the page size. */
430 const uintptr_t pagesz = GLRO(dl_pagesize);
431 const size_t minstack = pagesz + __static_tls_size + MINIMAL_REST_STACK;
432 if (limit.rlim_cur < minstack)
433 limit.rlim_cur = minstack;
435 /* Round the resource limit up to page size. */
436 limit.rlim_cur = ALIGN_UP (limit.rlim_cur, pagesz);
437 lll_lock (__default_pthread_attr_lock, LLL_PRIVATE);
438 __default_pthread_attr.stacksize = limit.rlim_cur;
439 __default_pthread_attr.guardsize = GLRO (dl_pagesize);
440 lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE);
442 #ifdef SHARED
443 /* Make __rtld_lock_{,un}lock_recursive use pthread_mutex_{,un}lock,
444 keep the lock count from the ld.so implementation. */
445 GL(dl_rtld_lock_recursive) = (void *) __pthread_mutex_lock;
446 GL(dl_rtld_unlock_recursive) = (void *) __pthread_mutex_unlock;
447 unsigned int rtld_lock_count = GL(dl_load_lock).mutex.__data.__count;
448 GL(dl_load_lock).mutex.__data.__count = 0;
449 while (rtld_lock_count-- > 0)
450 __pthread_mutex_lock (&GL(dl_load_lock).mutex);
452 GL(dl_make_stack_executable_hook) = &__make_stacks_executable;
453 #endif
455 GL(dl_init_static_tls) = &__pthread_init_static_tls;
457 GL(dl_wait_lookup_done) = &__wait_lookup_done;
459 /* Register the fork generation counter with the libc. */
460 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
461 __libc_multiple_threads_ptr =
462 #endif
463 __libc_pthread_init (&__fork_generation, __reclaim_stacks,
464 ptr_pthread_functions);
466 /* Determine whether the machine is SMP or not. */
467 __is_smp = is_smp_system ();
469 strong_alias (__pthread_initialize_minimal_internal,
470 __pthread_initialize_minimal)
473 size_t
474 __pthread_get_minstack (const pthread_attr_t *attr)
476 struct pthread_attr *iattr = (struct pthread_attr *) attr;
478 return (GLRO(dl_pagesize) + __static_tls_size + PTHREAD_STACK_MIN
479 + iattr->guardsize);