Fix error checking in iconv.
[glibc.git] / nptl / nptl-init.c
blob851bab2bf1998d4ef772768d39efddf76d13cad2
1 /* Copyright (C) 2002-2007, 2008, 2009 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
20 #include <assert.h>
21 #include <errno.h>
22 #include <limits.h>
23 #include <signal.h>
24 #include <stdlib.h>
25 #include <unistd.h>
26 #include <sys/param.h>
27 #include <sys/resource.h>
28 #include <pthreadP.h>
29 #include <atomic.h>
30 #include <ldsodefs.h>
31 #include <tls.h>
32 #include <fork.h>
33 #include <version.h>
34 #include <shlib-compat.h>
35 #include <smp.h>
36 #include <lowlevellock.h>
37 #include <kernel-features.h>
40 /* Size and alignment of static TLS block. */
41 size_t __static_tls_size;
42 size_t __static_tls_align_m1;
44 #ifndef __ASSUME_SET_ROBUST_LIST
45 /* Negative if we do not have the system call and we can use it. */
46 int __set_robust_list_avail;
47 # define set_robust_list_not_avail() \
48 __set_robust_list_avail = -1
49 #else
50 # define set_robust_list_not_avail() do { } while (0)
51 #endif
53 #ifndef __ASSUME_FUTEX_CLOCK_REALTIME
54 /* Nonzero if we do not have FUTEX_CLOCK_REALTIME. */
55 int __have_futex_clock_realtime;
56 # define __set_futex_clock_realtime() \
57 __have_futex_clock_realtime = 1
58 #else
59 #define __set_futex_clock_realtime() do { } while (0)
60 #endif
62 /* Version of the library, used in libthread_db to detect mismatches. */
63 static const char nptl_version[] __attribute_used__ = VERSION;
66 #ifndef SHARED
67 extern void __libc_setup_tls (size_t tcbsize, size_t tcbalign);
68 #endif
70 #ifdef SHARED
71 static void nptl_freeres (void);
74 static const struct pthread_functions pthread_functions =
76 .ptr_pthread_attr_destroy = __pthread_attr_destroy,
77 # if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
78 .ptr___pthread_attr_init_2_0 = __pthread_attr_init_2_0,
79 # endif
80 .ptr___pthread_attr_init_2_1 = __pthread_attr_init_2_1,
81 .ptr_pthread_attr_getdetachstate = __pthread_attr_getdetachstate,
82 .ptr_pthread_attr_setdetachstate = __pthread_attr_setdetachstate,
83 .ptr_pthread_attr_getinheritsched = __pthread_attr_getinheritsched,
84 .ptr_pthread_attr_setinheritsched = __pthread_attr_setinheritsched,
85 .ptr_pthread_attr_getschedparam = __pthread_attr_getschedparam,
86 .ptr_pthread_attr_setschedparam = __pthread_attr_setschedparam,
87 .ptr_pthread_attr_getschedpolicy = __pthread_attr_getschedpolicy,
88 .ptr_pthread_attr_setschedpolicy = __pthread_attr_setschedpolicy,
89 .ptr_pthread_attr_getscope = __pthread_attr_getscope,
90 .ptr_pthread_attr_setscope = __pthread_attr_setscope,
91 .ptr_pthread_condattr_destroy = __pthread_condattr_destroy,
92 .ptr_pthread_condattr_init = __pthread_condattr_init,
93 .ptr___pthread_cond_broadcast = __pthread_cond_broadcast,
94 .ptr___pthread_cond_destroy = __pthread_cond_destroy,
95 .ptr___pthread_cond_init = __pthread_cond_init,
96 .ptr___pthread_cond_signal = __pthread_cond_signal,
97 .ptr___pthread_cond_wait = __pthread_cond_wait,
98 .ptr___pthread_cond_timedwait = __pthread_cond_timedwait,
99 # if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_3_2)
100 .ptr___pthread_cond_broadcast_2_0 = __pthread_cond_broadcast_2_0,
101 .ptr___pthread_cond_destroy_2_0 = __pthread_cond_destroy_2_0,
102 .ptr___pthread_cond_init_2_0 = __pthread_cond_init_2_0,
103 .ptr___pthread_cond_signal_2_0 = __pthread_cond_signal_2_0,
104 .ptr___pthread_cond_wait_2_0 = __pthread_cond_wait_2_0,
105 .ptr___pthread_cond_timedwait_2_0 = __pthread_cond_timedwait_2_0,
106 # endif
107 .ptr_pthread_equal = __pthread_equal,
108 .ptr___pthread_exit = __pthread_exit,
109 .ptr_pthread_getschedparam = __pthread_getschedparam,
110 .ptr_pthread_setschedparam = __pthread_setschedparam,
111 .ptr_pthread_mutex_destroy = INTUSE(__pthread_mutex_destroy),
112 .ptr_pthread_mutex_init = INTUSE(__pthread_mutex_init),
113 .ptr_pthread_mutex_lock = INTUSE(__pthread_mutex_lock),
114 .ptr_pthread_mutex_unlock = INTUSE(__pthread_mutex_unlock),
115 .ptr_pthread_self = __pthread_self,
116 .ptr_pthread_setcancelstate = __pthread_setcancelstate,
117 .ptr_pthread_setcanceltype = __pthread_setcanceltype,
118 .ptr___pthread_cleanup_upto = __pthread_cleanup_upto,
119 .ptr___pthread_once = __pthread_once_internal,
120 .ptr___pthread_rwlock_rdlock = __pthread_rwlock_rdlock_internal,
121 .ptr___pthread_rwlock_wrlock = __pthread_rwlock_wrlock_internal,
122 .ptr___pthread_rwlock_unlock = __pthread_rwlock_unlock_internal,
123 .ptr___pthread_key_create = __pthread_key_create_internal,
124 .ptr___pthread_getspecific = __pthread_getspecific_internal,
125 .ptr___pthread_setspecific = __pthread_setspecific_internal,
126 .ptr__pthread_cleanup_push_defer = __pthread_cleanup_push_defer,
127 .ptr__pthread_cleanup_pop_restore = __pthread_cleanup_pop_restore,
128 .ptr_nthreads = &__nptl_nthreads,
129 .ptr___pthread_unwind = &__pthread_unwind,
130 .ptr__nptl_deallocate_tsd = __nptl_deallocate_tsd,
131 .ptr__nptl_setxid = __nptl_setxid,
132 /* For now only the stack cache needs to be freed. */
133 .ptr_freeres = nptl_freeres
135 # define ptr_pthread_functions &pthread_functions
136 #else
137 # define ptr_pthread_functions NULL
138 #endif
141 #ifdef SHARED
142 /* This function is called indirectly from the freeres code in libc. */
143 static void
144 __libc_freeres_fn_section
145 nptl_freeres (void)
147 __unwind_freeres ();
148 __free_stacks (0);
150 #endif
153 /* For asynchronous cancellation we use a signal. This is the handler. */
154 static void
155 sigcancel_handler (int sig, siginfo_t *si, void *ctx)
157 #ifdef __ASSUME_CORRECT_SI_PID
158 /* Determine the process ID. It might be negative if the thread is
159 in the middle of a fork() call. */
160 pid_t pid = THREAD_GETMEM (THREAD_SELF, pid);
161 if (__builtin_expect (pid < 0, 0))
162 pid = -pid;
163 #endif
165 /* Safety check. It would be possible to call this function for
166 other signals and send a signal from another process. This is not
167 correct and might even be a security problem. Try to catch as
168 many incorrect invocations as possible. */
169 if (sig != SIGCANCEL
170 #ifdef __ASSUME_CORRECT_SI_PID
171 /* Kernels before 2.5.75 stored the thread ID and not the process
172 ID in si_pid so we skip this test. */
173 || si->si_pid != pid
174 #endif
175 || si->si_code != SI_TKILL)
176 return;
178 struct pthread *self = THREAD_SELF;
180 int oldval = THREAD_GETMEM (self, cancelhandling);
181 while (1)
183 /* We are canceled now. When canceled by another thread this flag
184 is already set but if the signal is directly send (internally or
185 from another process) is has to be done here. */
186 int newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK;
188 if (oldval == newval || (oldval & EXITING_BITMASK) != 0)
189 /* Already canceled or exiting. */
190 break;
192 int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
193 oldval);
194 if (curval == oldval)
196 /* Set the return value. */
197 THREAD_SETMEM (self, result, PTHREAD_CANCELED);
199 /* Make sure asynchronous cancellation is still enabled. */
200 if ((newval & CANCELTYPE_BITMASK) != 0)
201 /* Run the registered destructors and terminate the thread. */
202 __do_cancel ();
204 break;
207 oldval = curval;
212 struct xid_command *__xidcmd attribute_hidden;
214 /* For asynchronous cancellation we use a signal. This is the handler. */
215 static void
216 sighandler_setxid (int sig, siginfo_t *si, void *ctx)
218 #ifdef __ASSUME_CORRECT_SI_PID
219 /* Determine the process ID. It might be negative if the thread is
220 in the middle of a fork() call. */
221 pid_t pid = THREAD_GETMEM (THREAD_SELF, pid);
222 if (__builtin_expect (pid < 0, 0))
223 pid = -pid;
224 #endif
226 /* Safety check. It would be possible to call this function for
227 other signals and send a signal from another process. This is not
228 correct and might even be a security problem. Try to catch as
229 many incorrect invocations as possible. */
230 if (sig != SIGSETXID
231 #ifdef __ASSUME_CORRECT_SI_PID
232 /* Kernels before 2.5.75 stored the thread ID and not the process
233 ID in si_pid so we skip this test. */
234 || si->si_pid != pid
235 #endif
236 || si->si_code != SI_TKILL)
237 return;
239 INTERNAL_SYSCALL_DECL (err);
240 INTERNAL_SYSCALL_NCS (__xidcmd->syscall_no, err, 3, __xidcmd->id[0],
241 __xidcmd->id[1], __xidcmd->id[2]);
243 /* Reset the SETXID flag. */
244 struct pthread *self = THREAD_SELF;
245 int flags, newval;
248 flags = THREAD_GETMEM (self, cancelhandling);
249 newval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling,
250 flags & ~SETXID_BITMASK, flags);
252 while (flags != newval);
254 /* And release the futex. */
255 self->setxid_futex = 1;
256 lll_futex_wake (&self->setxid_futex, 1, LLL_PRIVATE);
258 if (atomic_decrement_val (&__xidcmd->cntr) == 0)
259 lll_futex_wake (&__xidcmd->cntr, 1, LLL_PRIVATE);
263 /* When using __thread for this, we do it in libc so as not
264 to give libpthread its own TLS segment just for this. */
265 extern void **__libc_dl_error_tsd (void) __attribute__ ((const));
268 /* This can be set by the debugger before initialization is complete. */
269 static bool __nptl_initial_report_events __attribute_used__;
271 void
272 __pthread_initialize_minimal_internal (void)
274 #ifndef SHARED
275 /* Unlike in the dynamically linked case the dynamic linker has not
276 taken care of initializing the TLS data structures. */
277 __libc_setup_tls (TLS_TCB_SIZE, TLS_TCB_ALIGN);
279 /* We must prevent gcc from being clever and move any of the
280 following code ahead of the __libc_setup_tls call. This function
281 will initialize the thread register which is subsequently
282 used. */
283 __asm __volatile ("");
284 #endif
286 /* Minimal initialization of the thread descriptor. */
287 struct pthread *pd = THREAD_SELF;
288 INTERNAL_SYSCALL_DECL (err);
289 pd->pid = pd->tid = INTERNAL_SYSCALL (set_tid_address, err, 1, &pd->tid);
290 THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]);
291 THREAD_SETMEM (pd, user_stack, true);
292 if (LLL_LOCK_INITIALIZER != 0)
293 THREAD_SETMEM (pd, lock, LLL_LOCK_INITIALIZER);
294 #if HP_TIMING_AVAIL
295 THREAD_SETMEM (pd, cpuclock_offset, GL(dl_cpuclock_offset));
296 #endif
298 /* Initialize the robust mutex data. */
299 #ifdef __PTHREAD_MUTEX_HAVE_PREV
300 pd->robust_prev = &pd->robust_head;
301 #endif
302 pd->robust_head.list = &pd->robust_head;
303 #ifdef __NR_set_robust_list
304 pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
305 - offsetof (pthread_mutex_t,
306 __data.__list.__next));
307 int res = INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
308 sizeof (struct robust_list_head));
309 if (INTERNAL_SYSCALL_ERROR_P (res, err))
310 #endif
311 set_robust_list_not_avail ();
313 #ifndef __ASSUME_PRIVATE_FUTEX
314 /* Private futexes are always used (at least internally) so that
315 doing the test once this early is beneficial. */
317 int word = 0;
318 word = INTERNAL_SYSCALL (futex, err, 3, &word,
319 FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1);
320 if (!INTERNAL_SYSCALL_ERROR_P (word, err))
321 THREAD_SETMEM (pd, header.private_futex, FUTEX_PRIVATE_FLAG);
324 /* Private futexes have been introduced earlier than the
325 FUTEX_CLOCK_REALTIME flag. We don't have to run the test if we
326 know the former are not supported. This also means we know the
327 kernel will return ENOSYS for unknown operations. */
328 if (THREAD_GETMEM (pd, header.private_futex) != 0)
329 #endif
330 #ifndef __ASSUME_FUTEX_CLOCK_REALTIME
332 int word = 0;
333 /* NB: the syscall actually takes six parameters. The last is the
334 bit mask. But since we will not actually wait at all the value
335 is irrelevant. Given that passing six parameters is difficult
336 on some architectures we just pass whatever random value the
337 calling convention calls for to the kernel. It causes no harm. */
338 word = INTERNAL_SYSCALL (futex, err, 5, &word,
339 FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME
340 | FUTEX_PRIVATE_FLAG, 1, NULL, 0);
341 assert (INTERNAL_SYSCALL_ERROR_P (word, err));
342 if (INTERNAL_SYSCALL_ERRNO (word, err) != ENOSYS)
343 __set_futex_clock_realtime ();
345 #endif
347 /* Set initial thread's stack block from 0 up to __libc_stack_end.
348 It will be bigger than it actually is, but for unwind.c/pt-longjmp.c
349 purposes this is good enough. */
350 THREAD_SETMEM (pd, stackblock_size, (size_t) __libc_stack_end);
352 /* Initialize the list of all running threads with the main thread. */
353 INIT_LIST_HEAD (&__stack_user);
354 list_add (&pd->list, &__stack_user);
356 /* Before initializing __stack_user, the debugger could not find us and
357 had to set __nptl_initial_report_events. Propagate its setting. */
358 THREAD_SETMEM (pd, report_events, __nptl_initial_report_events);
360 /* Install the cancellation signal handler. If for some reason we
361 cannot install the handler we do not abort. Maybe we should, but
362 it is only asynchronous cancellation which is affected. */
363 struct sigaction sa;
364 sa.sa_sigaction = sigcancel_handler;
365 sa.sa_flags = SA_SIGINFO;
366 __sigemptyset (&sa.sa_mask);
368 (void) __libc_sigaction (SIGCANCEL, &sa, NULL);
370 /* Install the handle to change the threads' uid/gid. */
371 sa.sa_sigaction = sighandler_setxid;
372 sa.sa_flags = SA_SIGINFO | SA_RESTART;
374 (void) __libc_sigaction (SIGSETXID, &sa, NULL);
376 /* The parent process might have left the signals blocked. Just in
377 case, unblock it. We reuse the signal mask in the sigaction
378 structure. It is already cleared. */
379 __sigaddset (&sa.sa_mask, SIGCANCEL);
380 __sigaddset (&sa.sa_mask, SIGSETXID);
381 (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &sa.sa_mask,
382 NULL, _NSIG / 8);
384 /* Get the size of the static and alignment requirements for the TLS
385 block. */
386 size_t static_tls_align;
387 _dl_get_tls_static_info (&__static_tls_size, &static_tls_align);
389 /* Make sure the size takes all the alignments into account. */
390 if (STACK_ALIGN > static_tls_align)
391 static_tls_align = STACK_ALIGN;
392 __static_tls_align_m1 = static_tls_align - 1;
394 __static_tls_size = roundup (__static_tls_size, static_tls_align);
396 /* Determine the default allowed stack size. This is the size used
397 in case the user does not specify one. */
398 struct rlimit limit;
399 if (getrlimit (RLIMIT_STACK, &limit) != 0
400 || limit.rlim_cur == RLIM_INFINITY)
401 /* The system limit is not usable. Use an architecture-specific
402 default. */
403 limit.rlim_cur = ARCH_STACK_DEFAULT_SIZE;
404 else if (limit.rlim_cur < PTHREAD_STACK_MIN)
405 /* The system limit is unusably small.
406 Use the minimal size acceptable. */
407 limit.rlim_cur = PTHREAD_STACK_MIN;
409 /* Make sure it meets the minimum size that allocate_stack
410 (allocatestack.c) will demand, which depends on the page size. */
411 const uintptr_t pagesz = __sysconf (_SC_PAGESIZE);
412 const size_t minstack = pagesz + __static_tls_size + MINIMAL_REST_STACK;
413 if (limit.rlim_cur < minstack)
414 limit.rlim_cur = minstack;
416 /* Round the resource limit up to page size. */
417 limit.rlim_cur = (limit.rlim_cur + pagesz - 1) & -pagesz;
418 __default_stacksize = limit.rlim_cur;
420 #ifdef SHARED
421 /* Transfer the old value from the dynamic linker's internal location. */
422 *__libc_dl_error_tsd () = *(*GL(dl_error_catch_tsd)) ();
423 GL(dl_error_catch_tsd) = &__libc_dl_error_tsd;
425 /* Make __rtld_lock_{,un}lock_recursive use pthread_mutex_{,un}lock,
426 keep the lock count from the ld.so implementation. */
427 GL(dl_rtld_lock_recursive) = (void *) INTUSE (__pthread_mutex_lock);
428 GL(dl_rtld_unlock_recursive) = (void *) INTUSE (__pthread_mutex_unlock);
429 unsigned int rtld_lock_count = GL(dl_load_lock).mutex.__data.__count;
430 GL(dl_load_lock).mutex.__data.__count = 0;
431 while (rtld_lock_count-- > 0)
432 INTUSE (__pthread_mutex_lock) (&GL(dl_load_lock).mutex);
434 GL(dl_make_stack_executable_hook) = &__make_stacks_executable;
435 #endif
437 GL(dl_init_static_tls) = &__pthread_init_static_tls;
439 GL(dl_wait_lookup_done) = &__wait_lookup_done;
441 /* Register the fork generation counter with the libc. */
442 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
443 __libc_multiple_threads_ptr =
444 #endif
445 __libc_pthread_init (&__fork_generation, __reclaim_stacks,
446 ptr_pthread_functions);
448 /* Determine whether the machine is SMP or not. */
449 __is_smp = is_smp_system ();
451 strong_alias (__pthread_initialize_minimal_internal,
452 __pthread_initialize_minimal)