powerpc64le: use common fmaf128 implementation
[glibc.git] / nptl / nptl-init.c
blobd4cf20e3d1b09f10ba31670f7315843aa3e5bdcf
1 /* Copyright (C) 2002-2020 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <errno.h>
21 #include <limits.h>
22 #include <signal.h>
23 #include <stdlib.h>
24 #include <unistd.h>
25 #include <sys/param.h>
26 #include <sys/resource.h>
27 #include <pthreadP.h>
28 #include <atomic.h>
29 #include <ldsodefs.h>
30 #include <tls.h>
31 #include <list.h>
32 #include <fork.h>
33 #include <version.h>
34 #include <shlib-compat.h>
35 #include <smp.h>
36 #include <lowlevellock.h>
37 #include <futex-internal.h>
38 #include <kernel-features.h>
39 #include <libc-pointer-arith.h>
40 #include <pthread-pids.h>
41 #include <pthread_mutex_conf.h>
43 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
44 /* Pointer to the corresponding variable in libc. */
45 int *__libc_multiple_threads_ptr attribute_hidden;
46 #endif
48 /* Size and alignment of static TLS block. */
49 size_t __static_tls_size;
50 size_t __static_tls_align_m1;
52 #ifndef __ASSUME_SET_ROBUST_LIST
53 /* Negative if we do not have the system call and we can use it. */
54 int __set_robust_list_avail;
55 # define set_robust_list_not_avail() \
56 __set_robust_list_avail = -1
57 #else
58 # define set_robust_list_not_avail() do { } while (0)
59 #endif
61 /* Version of the library, used in libthread_db to detect mismatches. */
62 static const char nptl_version[] __attribute_used__ = VERSION;
65 #ifdef SHARED
66 static
67 #else
68 extern
69 #endif
70 void __nptl_set_robust (struct pthread *);
72 #ifdef SHARED
73 static const struct pthread_functions pthread_functions =
75 .ptr___pthread_cond_broadcast = __pthread_cond_broadcast,
76 .ptr___pthread_cond_signal = __pthread_cond_signal,
77 .ptr___pthread_cond_wait = __pthread_cond_wait,
78 .ptr___pthread_cond_timedwait = __pthread_cond_timedwait,
79 # if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_3_2)
80 .ptr___pthread_cond_broadcast_2_0 = __pthread_cond_broadcast_2_0,
81 .ptr___pthread_cond_signal_2_0 = __pthread_cond_signal_2_0,
82 .ptr___pthread_cond_wait_2_0 = __pthread_cond_wait_2_0,
83 .ptr___pthread_cond_timedwait_2_0 = __pthread_cond_timedwait_2_0,
84 # endif
85 .ptr___pthread_exit = __pthread_exit,
86 .ptr_pthread_mutex_destroy = __pthread_mutex_destroy,
87 .ptr_pthread_mutex_init = __pthread_mutex_init,
88 .ptr_pthread_mutex_lock = __pthread_mutex_lock,
89 .ptr_pthread_mutex_unlock = __pthread_mutex_unlock,
90 .ptr___pthread_setcancelstate = __pthread_setcancelstate,
91 .ptr_pthread_setcanceltype = __pthread_setcanceltype,
92 .ptr___pthread_cleanup_upto = __pthread_cleanup_upto,
93 .ptr___pthread_once = __pthread_once,
94 .ptr___pthread_rwlock_rdlock = __pthread_rwlock_rdlock,
95 .ptr___pthread_rwlock_wrlock = __pthread_rwlock_wrlock,
96 .ptr___pthread_rwlock_unlock = __pthread_rwlock_unlock,
97 .ptr___pthread_key_create = __pthread_key_create,
98 .ptr___pthread_getspecific = __pthread_getspecific,
99 .ptr___pthread_setspecific = __pthread_setspecific,
100 .ptr__pthread_cleanup_push_defer = __pthread_cleanup_push_defer,
101 .ptr__pthread_cleanup_pop_restore = __pthread_cleanup_pop_restore,
102 .ptr_nthreads = &__nptl_nthreads,
103 .ptr___pthread_unwind = &__pthread_unwind,
104 .ptr__nptl_deallocate_tsd = __nptl_deallocate_tsd,
105 .ptr__nptl_setxid = __nptl_setxid,
106 .ptr_set_robust = __nptl_set_robust
108 # define ptr_pthread_functions &pthread_functions
109 #else
110 # define ptr_pthread_functions NULL
111 #endif
114 #ifdef SHARED
115 static
116 #endif
117 void
118 __nptl_set_robust (struct pthread *self)
120 INTERNAL_SYSCALL_CALL (set_robust_list, &self->robust_head,
121 sizeof (struct robust_list_head));
125 /* For asynchronous cancellation we use a signal. This is the handler. */
126 static void
127 sigcancel_handler (int sig, siginfo_t *si, void *ctx)
129 /* Safety check. It would be possible to call this function for
130 other signals and send a signal from another process. This is not
131 correct and might even be a security problem. Try to catch as
132 many incorrect invocations as possible. */
133 if (sig != SIGCANCEL
134 || si->si_pid != __getpid()
135 || si->si_code != SI_TKILL)
136 return;
138 struct pthread *self = THREAD_SELF;
140 int oldval = THREAD_GETMEM (self, cancelhandling);
141 while (1)
143 /* We are canceled now. When canceled by another thread this flag
144 is already set but if the signal is directly send (internally or
145 from another process) is has to be done here. */
146 int newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK;
148 if (oldval == newval || (oldval & EXITING_BITMASK) != 0)
149 /* Already canceled or exiting. */
150 break;
152 int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
153 oldval);
154 if (curval == oldval)
156 /* Set the return value. */
157 THREAD_SETMEM (self, result, PTHREAD_CANCELED);
159 /* Make sure asynchronous cancellation is still enabled. */
160 if ((newval & CANCELTYPE_BITMASK) != 0)
161 /* Run the registered destructors and terminate the thread. */
162 __do_cancel ();
164 break;
167 oldval = curval;
172 struct xid_command *__xidcmd attribute_hidden;
174 /* We use the SIGSETXID signal in the setuid, setgid, etc. implementations to
175 tell each thread to call the respective setxid syscall on itself. This is
176 the handler. */
177 static void
178 sighandler_setxid (int sig, siginfo_t *si, void *ctx)
180 int result;
182 /* Safety check. It would be possible to call this function for
183 other signals and send a signal from another process. This is not
184 correct and might even be a security problem. Try to catch as
185 many incorrect invocations as possible. */
186 if (sig != SIGSETXID
187 || si->si_pid != __getpid ()
188 || si->si_code != SI_TKILL)
189 return;
191 result = INTERNAL_SYSCALL_NCS (__xidcmd->syscall_no, 3, __xidcmd->id[0],
192 __xidcmd->id[1], __xidcmd->id[2]);
193 int error = 0;
194 if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (result)))
195 error = INTERNAL_SYSCALL_ERRNO (result);
196 __nptl_setxid_error (__xidcmd, error);
198 /* Reset the SETXID flag. */
199 struct pthread *self = THREAD_SELF;
200 int flags, newval;
203 flags = THREAD_GETMEM (self, cancelhandling);
204 newval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling,
205 flags & ~SETXID_BITMASK, flags);
207 while (flags != newval);
209 /* And release the futex. */
210 self->setxid_futex = 1;
211 futex_wake (&self->setxid_futex, 1, FUTEX_PRIVATE);
213 if (atomic_decrement_val (&__xidcmd->cntr) == 0)
214 futex_wake ((unsigned int *) &__xidcmd->cntr, 1, FUTEX_PRIVATE);
218 /* When using __thread for this, we do it in libc so as not
219 to give libpthread its own TLS segment just for this. */
220 extern void **__libc_dl_error_tsd (void) __attribute__ ((const));
223 /* This can be set by the debugger before initialization is complete. */
224 static bool __nptl_initial_report_events __attribute_used__;
226 void
227 __pthread_initialize_minimal_internal (void)
229 /* Minimal initialization of the thread descriptor. */
230 struct pthread *pd = THREAD_SELF;
231 __pthread_initialize_pids (pd);
232 THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]);
233 THREAD_SETMEM (pd, user_stack, true);
235 /* Initialize the robust mutex data. */
237 #if __PTHREAD_MUTEX_HAVE_PREV
238 pd->robust_prev = &pd->robust_head;
239 #endif
240 pd->robust_head.list = &pd->robust_head;
241 pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
242 - offsetof (pthread_mutex_t,
243 __data.__list.__next));
244 int res = INTERNAL_SYSCALL_CALL (set_robust_list, &pd->robust_head,
245 sizeof (struct robust_list_head));
246 if (INTERNAL_SYSCALL_ERROR_P (res))
247 set_robust_list_not_avail ();
250 /* Set initial thread's stack block from 0 up to __libc_stack_end.
251 It will be bigger than it actually is, but for unwind.c/pt-longjmp.c
252 purposes this is good enough. */
253 THREAD_SETMEM (pd, stackblock_size, (size_t) __libc_stack_end);
255 /* Initialize the list of all running threads with the main thread. */
256 INIT_LIST_HEAD (&__stack_user);
257 list_add (&pd->list, &__stack_user);
259 /* Before initializing __stack_user, the debugger could not find us and
260 had to set __nptl_initial_report_events. Propagate its setting. */
261 THREAD_SETMEM (pd, report_events, __nptl_initial_report_events);
263 struct sigaction sa;
264 __sigemptyset (&sa.sa_mask);
266 /* Install the cancellation signal handler. If for some reason we
267 cannot install the handler we do not abort. Maybe we should, but
268 it is only asynchronous cancellation which is affected. */
269 sa.sa_sigaction = sigcancel_handler;
270 sa.sa_flags = SA_SIGINFO;
271 (void) __libc_sigaction (SIGCANCEL, &sa, NULL);
273 /* Install the handle to change the threads' uid/gid. */
274 sa.sa_sigaction = sighandler_setxid;
275 sa.sa_flags = SA_SIGINFO | SA_RESTART;
276 (void) __libc_sigaction (SIGSETXID, &sa, NULL);
278 /* The parent process might have left the signals blocked. Just in
279 case, unblock it. We reuse the signal mask in the sigaction
280 structure. It is already cleared. */
281 __sigaddset (&sa.sa_mask, SIGCANCEL);
282 __sigaddset (&sa.sa_mask, SIGSETXID);
283 INTERNAL_SYSCALL_CALL (rt_sigprocmask, SIG_UNBLOCK, &sa.sa_mask,
284 NULL, _NSIG / 8);
286 /* Get the size of the static and alignment requirements for the TLS
287 block. */
288 size_t static_tls_align;
289 _dl_get_tls_static_info (&__static_tls_size, &static_tls_align);
291 /* Make sure the size takes all the alignments into account. */
292 if (STACK_ALIGN > static_tls_align)
293 static_tls_align = STACK_ALIGN;
294 __static_tls_align_m1 = static_tls_align - 1;
296 __static_tls_size = roundup (__static_tls_size, static_tls_align);
298 /* Determine the default allowed stack size. This is the size used
299 in case the user does not specify one. */
300 struct rlimit limit;
301 if (__getrlimit (RLIMIT_STACK, &limit) != 0
302 || limit.rlim_cur == RLIM_INFINITY)
303 /* The system limit is not usable. Use an architecture-specific
304 default. */
305 limit.rlim_cur = ARCH_STACK_DEFAULT_SIZE;
306 else if (limit.rlim_cur < PTHREAD_STACK_MIN)
307 /* The system limit is unusably small.
308 Use the minimal size acceptable. */
309 limit.rlim_cur = PTHREAD_STACK_MIN;
311 /* Make sure it meets the minimum size that allocate_stack
312 (allocatestack.c) will demand, which depends on the page size. */
313 const uintptr_t pagesz = GLRO(dl_pagesize);
314 const size_t minstack = pagesz + __static_tls_size + MINIMAL_REST_STACK;
315 if (limit.rlim_cur < minstack)
316 limit.rlim_cur = minstack;
318 /* Round the resource limit up to page size. */
319 limit.rlim_cur = ALIGN_UP (limit.rlim_cur, pagesz);
320 lll_lock (__default_pthread_attr_lock, LLL_PRIVATE);
321 __default_pthread_attr.internal.stacksize = limit.rlim_cur;
322 __default_pthread_attr.internal.guardsize = GLRO (dl_pagesize);
323 lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE);
325 #ifdef SHARED
326 /* Make __rtld_lock_{,un}lock_recursive use pthread_mutex_{,un}lock,
327 keep the lock count from the ld.so implementation. */
328 GL(dl_rtld_lock_recursive) = (void *) __pthread_mutex_lock;
329 GL(dl_rtld_unlock_recursive) = (void *) __pthread_mutex_unlock;
330 unsigned int rtld_lock_count = GL(dl_load_lock).mutex.__data.__count;
331 GL(dl_load_lock).mutex.__data.__count = 0;
332 while (rtld_lock_count-- > 0)
333 __pthread_mutex_lock (&GL(dl_load_lock).mutex);
335 GL(dl_make_stack_executable_hook) = &__make_stacks_executable;
336 #endif
338 GL(dl_init_static_tls) = &__pthread_init_static_tls;
340 GL(dl_wait_lookup_done) = &__wait_lookup_done;
342 /* Register the fork generation counter with the libc. */
343 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
344 __libc_multiple_threads_ptr =
345 #endif
346 __libc_pthread_init (&__fork_generation, __reclaim_stacks,
347 ptr_pthread_functions);
349 /* Determine whether the machine is SMP or not. */
350 __is_smp = is_smp_system ();
352 #if HAVE_TUNABLES
353 __pthread_tunables_init ();
354 #endif
356 strong_alias (__pthread_initialize_minimal_internal,
357 __pthread_initialize_minimal)
360 /* This function is internal (it has a GLIBC_PRIVATE) version, but it
361 is widely used (either via weak symbol, or dlsym) to obtain the
362 __static_tls_size value. This value is then used to adjust the
363 value of the stack size attribute, so that applications receive the
364 full requested stack size, not diminished by the TCB and static TLS
365 allocation on the stack. Once the TCB is separately allocated,
366 this function should be removed or renamed (if it is still
367 necessary at that point). */
368 size_t
369 __pthread_get_minstack (const pthread_attr_t *attr)
371 return GLRO(dl_pagesize) + __static_tls_size + PTHREAD_STACK_MIN;