Run nptl/tst-pthread-getattr in a container
[glibc.git] / nptl / nptl-init.c
blob18772480146ca0e90407b3749d7a7b80b4ee41c5
1 /* Copyright (C) 2002-2020 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <errno.h>
21 #include <limits.h>
22 #include <signal.h>
23 #include <stdlib.h>
24 #include <unistd.h>
25 #include <sys/param.h>
26 #include <sys/resource.h>
27 #include <pthreadP.h>
28 #include <atomic.h>
29 #include <ldsodefs.h>
30 #include <tls.h>
31 #include <list.h>
32 #include <fork.h>
33 #include <version.h>
34 #include <shlib-compat.h>
35 #include <smp.h>
36 #include <lowlevellock.h>
37 #include <futex-internal.h>
38 #include <kernel-features.h>
39 #include <libc-pointer-arith.h>
40 #include <pthread-pids.h>
41 #include <pthread_mutex_conf.h>
43 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
44 /* Pointer to the corresponding variable in libc. */
45 int *__libc_multiple_threads_ptr attribute_hidden;
46 #endif
48 /* Size and alignment of static TLS block. */
49 size_t __static_tls_size;
50 size_t __static_tls_align_m1;
52 #ifndef __ASSUME_SET_ROBUST_LIST
53 /* Negative if we do not have the system call and we can use it. */
54 int __set_robust_list_avail;
55 # define set_robust_list_not_avail() \
56 __set_robust_list_avail = -1
57 #else
58 # define set_robust_list_not_avail() do { } while (0)
59 #endif
61 /* Version of the library, used in libthread_db to detect mismatches. */
62 static const char nptl_version[] __attribute_used__ = VERSION;
65 #ifdef SHARED
66 static
67 #else
68 extern
69 #endif
70 void __nptl_set_robust (struct pthread *);
72 #ifdef SHARED
73 static const struct pthread_functions pthread_functions =
75 .ptr_pthread_attr_getschedpolicy = __pthread_attr_getschedpolicy,
76 .ptr_pthread_attr_setschedpolicy = __pthread_attr_setschedpolicy,
77 .ptr_pthread_attr_getscope = __pthread_attr_getscope,
78 .ptr_pthread_attr_setscope = __pthread_attr_setscope,
79 .ptr_pthread_condattr_destroy = __pthread_condattr_destroy,
80 .ptr_pthread_condattr_init = __pthread_condattr_init,
81 .ptr___pthread_cond_broadcast = __pthread_cond_broadcast,
82 .ptr___pthread_cond_destroy = __pthread_cond_destroy,
83 .ptr___pthread_cond_init = __pthread_cond_init,
84 .ptr___pthread_cond_signal = __pthread_cond_signal,
85 .ptr___pthread_cond_wait = __pthread_cond_wait,
86 .ptr___pthread_cond_timedwait = __pthread_cond_timedwait,
87 # if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_3_2)
88 .ptr___pthread_cond_broadcast_2_0 = __pthread_cond_broadcast_2_0,
89 .ptr___pthread_cond_destroy_2_0 = __pthread_cond_destroy_2_0,
90 .ptr___pthread_cond_init_2_0 = __pthread_cond_init_2_0,
91 .ptr___pthread_cond_signal_2_0 = __pthread_cond_signal_2_0,
92 .ptr___pthread_cond_wait_2_0 = __pthread_cond_wait_2_0,
93 .ptr___pthread_cond_timedwait_2_0 = __pthread_cond_timedwait_2_0,
94 # endif
95 .ptr___pthread_exit = __pthread_exit,
96 .ptr_pthread_getschedparam = __pthread_getschedparam,
97 .ptr_pthread_setschedparam = __pthread_setschedparam,
98 .ptr_pthread_mutex_destroy = __pthread_mutex_destroy,
99 .ptr_pthread_mutex_init = __pthread_mutex_init,
100 .ptr_pthread_mutex_lock = __pthread_mutex_lock,
101 .ptr_pthread_mutex_unlock = __pthread_mutex_unlock,
102 .ptr___pthread_setcancelstate = __pthread_setcancelstate,
103 .ptr_pthread_setcanceltype = __pthread_setcanceltype,
104 .ptr___pthread_cleanup_upto = __pthread_cleanup_upto,
105 .ptr___pthread_once = __pthread_once,
106 .ptr___pthread_rwlock_rdlock = __pthread_rwlock_rdlock,
107 .ptr___pthread_rwlock_wrlock = __pthread_rwlock_wrlock,
108 .ptr___pthread_rwlock_unlock = __pthread_rwlock_unlock,
109 .ptr___pthread_key_create = __pthread_key_create,
110 .ptr___pthread_getspecific = __pthread_getspecific,
111 .ptr___pthread_setspecific = __pthread_setspecific,
112 .ptr__pthread_cleanup_push_defer = __pthread_cleanup_push_defer,
113 .ptr__pthread_cleanup_pop_restore = __pthread_cleanup_pop_restore,
114 .ptr_nthreads = &__nptl_nthreads,
115 .ptr___pthread_unwind = &__pthread_unwind,
116 .ptr__nptl_deallocate_tsd = __nptl_deallocate_tsd,
117 .ptr__nptl_setxid = __nptl_setxid,
118 .ptr_set_robust = __nptl_set_robust
120 # define ptr_pthread_functions &pthread_functions
121 #else
122 # define ptr_pthread_functions NULL
123 #endif
126 #ifdef SHARED
127 static
128 #endif
129 void
130 __nptl_set_robust (struct pthread *self)
132 #ifdef __NR_set_robust_list
133 INTERNAL_SYSCALL_DECL (err);
134 INTERNAL_SYSCALL (set_robust_list, err, 2, &self->robust_head,
135 sizeof (struct robust_list_head));
136 #endif
140 /* For asynchronous cancellation we use a signal. This is the handler. */
141 static void
142 sigcancel_handler (int sig, siginfo_t *si, void *ctx)
144 /* Safety check. It would be possible to call this function for
145 other signals and send a signal from another process. This is not
146 correct and might even be a security problem. Try to catch as
147 many incorrect invocations as possible. */
148 if (sig != SIGCANCEL
149 || si->si_pid != __getpid()
150 || si->si_code != SI_TKILL)
151 return;
153 struct pthread *self = THREAD_SELF;
155 int oldval = THREAD_GETMEM (self, cancelhandling);
156 while (1)
158 /* We are canceled now. When canceled by another thread this flag
159 is already set but if the signal is directly send (internally or
160 from another process) is has to be done here. */
161 int newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK;
163 if (oldval == newval || (oldval & EXITING_BITMASK) != 0)
164 /* Already canceled or exiting. */
165 break;
167 int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
168 oldval);
169 if (curval == oldval)
171 /* Set the return value. */
172 THREAD_SETMEM (self, result, PTHREAD_CANCELED);
174 /* Make sure asynchronous cancellation is still enabled. */
175 if ((newval & CANCELTYPE_BITMASK) != 0)
176 /* Run the registered destructors and terminate the thread. */
177 __do_cancel ();
179 break;
182 oldval = curval;
187 struct xid_command *__xidcmd attribute_hidden;
189 /* We use the SIGSETXID signal in the setuid, setgid, etc. implementations to
190 tell each thread to call the respective setxid syscall on itself. This is
191 the handler. */
192 static void
193 sighandler_setxid (int sig, siginfo_t *si, void *ctx)
195 int result;
197 /* Safety check. It would be possible to call this function for
198 other signals and send a signal from another process. This is not
199 correct and might even be a security problem. Try to catch as
200 many incorrect invocations as possible. */
201 if (sig != SIGSETXID
202 || si->si_pid != __getpid ()
203 || si->si_code != SI_TKILL)
204 return;
206 INTERNAL_SYSCALL_DECL (err);
207 result = INTERNAL_SYSCALL_NCS (__xidcmd->syscall_no, err, 3, __xidcmd->id[0],
208 __xidcmd->id[1], __xidcmd->id[2]);
209 int error = 0;
210 if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (result, err)))
211 error = INTERNAL_SYSCALL_ERRNO (result, err);
212 __nptl_setxid_error (__xidcmd, error);
214 /* Reset the SETXID flag. */
215 struct pthread *self = THREAD_SELF;
216 int flags, newval;
219 flags = THREAD_GETMEM (self, cancelhandling);
220 newval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling,
221 flags & ~SETXID_BITMASK, flags);
223 while (flags != newval);
225 /* And release the futex. */
226 self->setxid_futex = 1;
227 futex_wake (&self->setxid_futex, 1, FUTEX_PRIVATE);
229 if (atomic_decrement_val (&__xidcmd->cntr) == 0)
230 futex_wake ((unsigned int *) &__xidcmd->cntr, 1, FUTEX_PRIVATE);
234 /* When using __thread for this, we do it in libc so as not
235 to give libpthread its own TLS segment just for this. */
236 extern void **__libc_dl_error_tsd (void) __attribute__ ((const));
239 /* This can be set by the debugger before initialization is complete. */
240 static bool __nptl_initial_report_events __attribute_used__;
242 void
243 __pthread_initialize_minimal_internal (void)
245 /* Minimal initialization of the thread descriptor. */
246 struct pthread *pd = THREAD_SELF;
247 __pthread_initialize_pids (pd);
248 THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]);
249 THREAD_SETMEM (pd, user_stack, true);
251 /* Initialize the robust mutex data. */
253 #if __PTHREAD_MUTEX_HAVE_PREV
254 pd->robust_prev = &pd->robust_head;
255 #endif
256 pd->robust_head.list = &pd->robust_head;
257 #ifdef __NR_set_robust_list
258 pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
259 - offsetof (pthread_mutex_t,
260 __data.__list.__next));
261 INTERNAL_SYSCALL_DECL (err);
262 int res = INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
263 sizeof (struct robust_list_head));
264 if (INTERNAL_SYSCALL_ERROR_P (res, err))
265 #endif
266 set_robust_list_not_avail ();
269 /* Set initial thread's stack block from 0 up to __libc_stack_end.
270 It will be bigger than it actually is, but for unwind.c/pt-longjmp.c
271 purposes this is good enough. */
272 THREAD_SETMEM (pd, stackblock_size, (size_t) __libc_stack_end);
274 /* Initialize the list of all running threads with the main thread. */
275 INIT_LIST_HEAD (&__stack_user);
276 list_add (&pd->list, &__stack_user);
278 /* Before initializing __stack_user, the debugger could not find us and
279 had to set __nptl_initial_report_events. Propagate its setting. */
280 THREAD_SETMEM (pd, report_events, __nptl_initial_report_events);
282 struct sigaction sa;
283 __sigemptyset (&sa.sa_mask);
285 /* Install the cancellation signal handler. If for some reason we
286 cannot install the handler we do not abort. Maybe we should, but
287 it is only asynchronous cancellation which is affected. */
288 sa.sa_sigaction = sigcancel_handler;
289 sa.sa_flags = SA_SIGINFO;
290 (void) __libc_sigaction (SIGCANCEL, &sa, NULL);
292 /* Install the handle to change the threads' uid/gid. */
293 sa.sa_sigaction = sighandler_setxid;
294 sa.sa_flags = SA_SIGINFO | SA_RESTART;
295 (void) __libc_sigaction (SIGSETXID, &sa, NULL);
297 /* The parent process might have left the signals blocked. Just in
298 case, unblock it. We reuse the signal mask in the sigaction
299 structure. It is already cleared. */
300 __sigaddset (&sa.sa_mask, SIGCANCEL);
301 __sigaddset (&sa.sa_mask, SIGSETXID);
303 INTERNAL_SYSCALL_DECL (err);
304 (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &sa.sa_mask,
305 NULL, _NSIG / 8);
308 /* Get the size of the static and alignment requirements for the TLS
309 block. */
310 size_t static_tls_align;
311 _dl_get_tls_static_info (&__static_tls_size, &static_tls_align);
313 /* Make sure the size takes all the alignments into account. */
314 if (STACK_ALIGN > static_tls_align)
315 static_tls_align = STACK_ALIGN;
316 __static_tls_align_m1 = static_tls_align - 1;
318 __static_tls_size = roundup (__static_tls_size, static_tls_align);
320 /* Determine the default allowed stack size. This is the size used
321 in case the user does not specify one. */
322 struct rlimit limit;
323 if (__getrlimit (RLIMIT_STACK, &limit) != 0
324 || limit.rlim_cur == RLIM_INFINITY)
325 /* The system limit is not usable. Use an architecture-specific
326 default. */
327 limit.rlim_cur = ARCH_STACK_DEFAULT_SIZE;
328 else if (limit.rlim_cur < PTHREAD_STACK_MIN)
329 /* The system limit is unusably small.
330 Use the minimal size acceptable. */
331 limit.rlim_cur = PTHREAD_STACK_MIN;
333 /* Make sure it meets the minimum size that allocate_stack
334 (allocatestack.c) will demand, which depends on the page size. */
335 const uintptr_t pagesz = GLRO(dl_pagesize);
336 const size_t minstack = pagesz + __static_tls_size + MINIMAL_REST_STACK;
337 if (limit.rlim_cur < minstack)
338 limit.rlim_cur = minstack;
340 /* Round the resource limit up to page size. */
341 limit.rlim_cur = ALIGN_UP (limit.rlim_cur, pagesz);
342 lll_lock (__default_pthread_attr_lock, LLL_PRIVATE);
343 __default_pthread_attr.stacksize = limit.rlim_cur;
344 __default_pthread_attr.guardsize = GLRO (dl_pagesize);
345 lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE);
347 #ifdef SHARED
348 /* Make __rtld_lock_{,un}lock_recursive use pthread_mutex_{,un}lock,
349 keep the lock count from the ld.so implementation. */
350 GL(dl_rtld_lock_recursive) = (void *) __pthread_mutex_lock;
351 GL(dl_rtld_unlock_recursive) = (void *) __pthread_mutex_unlock;
352 unsigned int rtld_lock_count = GL(dl_load_lock).mutex.__data.__count;
353 GL(dl_load_lock).mutex.__data.__count = 0;
354 while (rtld_lock_count-- > 0)
355 __pthread_mutex_lock (&GL(dl_load_lock).mutex);
357 GL(dl_make_stack_executable_hook) = &__make_stacks_executable;
358 #endif
360 GL(dl_init_static_tls) = &__pthread_init_static_tls;
362 GL(dl_wait_lookup_done) = &__wait_lookup_done;
364 /* Register the fork generation counter with the libc. */
365 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
366 __libc_multiple_threads_ptr =
367 #endif
368 __libc_pthread_init (&__fork_generation, __reclaim_stacks,
369 ptr_pthread_functions);
371 /* Determine whether the machine is SMP or not. */
372 __is_smp = is_smp_system ();
374 #if HAVE_TUNABLES
375 __pthread_tunables_init ();
376 #endif
378 strong_alias (__pthread_initialize_minimal_internal,
379 __pthread_initialize_minimal)
382 /* This function is internal (it has a GLIBC_PRIVATE) version, but it
383 is widely used (either via weak symbol, or dlsym) to obtain the
384 __static_tls_size value. This value is then used to adjust the
385 value of the stack size attribute, so that applications receive the
386 full requested stack size, not diminished by the TCB and static TLS
387 allocation on the stack. Once the TCB is separately allocated,
388 this function should be removed or renamed (if it is still
389 necessary at that point). */
390 size_t
391 __pthread_get_minstack (const pthread_attr_t *attr)
393 return GLRO(dl_pagesize) + __static_tls_size + PTHREAD_STACK_MIN;