2.4.90-20
[glibc.git] / nptl / init.c
blob7cfe803c4237b91e8654c416f2ba18951eb80a12
1 /* Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
20 #include <assert.h>
21 #include <limits.h>
22 #include <signal.h>
23 #include <stdlib.h>
24 #include <unistd.h>
25 #include <sys/param.h>
26 #include <sys/resource.h>
27 #include <pthreadP.h>
28 #include <atomic.h>
29 #include <ldsodefs.h>
30 #include <tls.h>
31 #include <fork.h>
32 #include <version.h>
33 #include <shlib-compat.h>
34 #include <smp.h>
35 #include <lowlevellock.h>
38 #ifndef __NR_set_tid_address
39 /* XXX For the time being... Once we can rely on the kernel headers
40 having the definition remove these lines. */
41 #if defined __s390__
42 # define __NR_set_tid_address 252
43 #elif defined __ia64__
44 # define __NR_set_tid_address 1233
45 #elif defined __i386__
46 # define __NR_set_tid_address 258
47 #elif defined __x86_64__
48 # define __NR_set_tid_address 218
49 #elif defined __powerpc__
50 # define __NR_set_tid_address 232
51 #elif defined __sparc__
52 # define __NR_set_tid_address 166
53 #else
54 # error "define __NR_set_tid_address"
55 #endif
56 #endif
59 /* Size and alignment of static TLS block. */
60 size_t __static_tls_size;
61 size_t __static_tls_align_m1;
63 #ifndef __ASSUME_SET_ROBUST_LIST
64 /* Negative if we do not have the system call and we can use it. */
65 int __set_robust_list_avail;
66 # define set_robust_list_not_avail() \
67 __set_robust_list_avail = -1
68 #else
69 # define set_robust_list_not_avail() do { } while (0)
70 #endif
72 /* Version of the library, used in libthread_db to detect mismatches. */
73 static const char nptl_version[] __attribute_used__ = VERSION;
76 #if defined USE_TLS && !defined SHARED
77 extern void __libc_setup_tls (size_t tcbsize, size_t tcbalign);
78 #endif
81 #ifdef SHARED
82 static const struct pthread_functions pthread_functions =
84 .ptr_pthread_attr_destroy = __pthread_attr_destroy,
85 # if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
86 .ptr___pthread_attr_init_2_0 = __pthread_attr_init_2_0,
87 # endif
88 .ptr___pthread_attr_init_2_1 = __pthread_attr_init_2_1,
89 .ptr_pthread_attr_getdetachstate = __pthread_attr_getdetachstate,
90 .ptr_pthread_attr_setdetachstate = __pthread_attr_setdetachstate,
91 .ptr_pthread_attr_getinheritsched = __pthread_attr_getinheritsched,
92 .ptr_pthread_attr_setinheritsched = __pthread_attr_setinheritsched,
93 .ptr_pthread_attr_getschedparam = __pthread_attr_getschedparam,
94 .ptr_pthread_attr_setschedparam = __pthread_attr_setschedparam,
95 .ptr_pthread_attr_getschedpolicy = __pthread_attr_getschedpolicy,
96 .ptr_pthread_attr_setschedpolicy = __pthread_attr_setschedpolicy,
97 .ptr_pthread_attr_getscope = __pthread_attr_getscope,
98 .ptr_pthread_attr_setscope = __pthread_attr_setscope,
99 .ptr_pthread_condattr_destroy = __pthread_condattr_destroy,
100 .ptr_pthread_condattr_init = __pthread_condattr_init,
101 .ptr___pthread_cond_broadcast = __pthread_cond_broadcast,
102 .ptr___pthread_cond_destroy = __pthread_cond_destroy,
103 .ptr___pthread_cond_init = __pthread_cond_init,
104 .ptr___pthread_cond_signal = __pthread_cond_signal,
105 .ptr___pthread_cond_wait = __pthread_cond_wait,
106 .ptr___pthread_cond_timedwait = __pthread_cond_timedwait,
107 # if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_3_2)
108 .ptr___pthread_cond_broadcast_2_0 = __pthread_cond_broadcast_2_0,
109 .ptr___pthread_cond_destroy_2_0 = __pthread_cond_destroy_2_0,
110 .ptr___pthread_cond_init_2_0 = __pthread_cond_init_2_0,
111 .ptr___pthread_cond_signal_2_0 = __pthread_cond_signal_2_0,
112 .ptr___pthread_cond_wait_2_0 = __pthread_cond_wait_2_0,
113 .ptr___pthread_cond_timedwait_2_0 = __pthread_cond_timedwait_2_0,
114 # endif
115 .ptr_pthread_equal = __pthread_equal,
116 .ptr___pthread_exit = __pthread_exit,
117 .ptr_pthread_getschedparam = __pthread_getschedparam,
118 .ptr_pthread_setschedparam = __pthread_setschedparam,
119 .ptr_pthread_mutex_destroy = INTUSE(__pthread_mutex_destroy),
120 .ptr_pthread_mutex_init = INTUSE(__pthread_mutex_init),
121 .ptr_pthread_mutex_lock = INTUSE(__pthread_mutex_lock),
122 .ptr_pthread_mutex_unlock = INTUSE(__pthread_mutex_unlock),
123 .ptr_pthread_self = __pthread_self,
124 .ptr_pthread_setcancelstate = __pthread_setcancelstate,
125 .ptr_pthread_setcanceltype = __pthread_setcanceltype,
126 .ptr___pthread_cleanup_upto = __pthread_cleanup_upto,
127 .ptr___pthread_once = __pthread_once_internal,
128 .ptr___pthread_rwlock_rdlock = __pthread_rwlock_rdlock_internal,
129 .ptr___pthread_rwlock_wrlock = __pthread_rwlock_wrlock_internal,
130 .ptr___pthread_rwlock_unlock = __pthread_rwlock_unlock_internal,
131 .ptr___pthread_key_create = __pthread_key_create_internal,
132 .ptr___pthread_getspecific = __pthread_getspecific_internal,
133 .ptr___pthread_setspecific = __pthread_setspecific_internal,
134 .ptr__pthread_cleanup_push_defer = __pthread_cleanup_push_defer,
135 .ptr__pthread_cleanup_pop_restore = __pthread_cleanup_pop_restore,
136 .ptr_nthreads = &__nptl_nthreads,
137 .ptr___pthread_unwind = &__pthread_unwind,
138 .ptr__nptl_deallocate_tsd = __nptl_deallocate_tsd,
139 .ptr__nptl_setxid = __nptl_setxid
141 # define ptr_pthread_functions &pthread_functions
142 #else
143 # define ptr_pthread_functions NULL
144 #endif
147 /* For asynchronous cancellation we use a signal. This is the handler. */
148 static void
149 sigcancel_handler (int sig, siginfo_t *si, void *ctx)
151 #ifdef __ASSUME_CORRECT_SI_PID
152 /* Determine the process ID. It might be negative if the thread is
153 in the middle of a fork() call. */
154 pid_t pid = THREAD_GETMEM (THREAD_SELF, pid);
155 if (__builtin_expect (pid < 0, 0))
156 pid = -pid;
157 #endif
159 /* Safety check. It would be possible to call this function for
160 other signals and send a signal from another process. This is not
161 correct and might even be a security problem. Try to catch as
162 many incorrect invocations as possible. */
163 if (sig != SIGCANCEL
164 #ifdef __ASSUME_CORRECT_SI_PID
165 /* Kernels before 2.5.75 stored the thread ID and not the process
166 ID in si_pid so we skip this test. */
167 || si->si_pid != pid
168 #endif
169 || si->si_code != SI_TKILL)
170 return;
172 struct pthread *self = THREAD_SELF;
174 int oldval = THREAD_GETMEM (self, cancelhandling);
175 while (1)
177 /* We are canceled now. When canceled by another thread this flag
178 is already set but if the signal is directly send (internally or
179 from another process) is has to be done here. */
180 int newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK;
182 if (oldval == newval || (oldval & EXITING_BITMASK) != 0)
183 /* Already canceled or exiting. */
184 break;
186 int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
187 oldval);
188 if (curval == oldval)
190 /* Set the return value. */
191 THREAD_SETMEM (self, result, PTHREAD_CANCELED);
193 /* Make sure asynchronous cancellation is still enabled. */
194 if ((newval & CANCELTYPE_BITMASK) != 0)
195 /* Run the registered destructors and terminate the thread. */
196 __do_cancel ();
198 break;
201 oldval = curval;
206 struct xid_command *__xidcmd attribute_hidden;
208 /* For asynchronous cancellation we use a signal. This is the handler. */
209 static void
210 sighandler_setxid (int sig, siginfo_t *si, void *ctx)
212 #ifdef __ASSUME_CORRECT_SI_PID
213 /* Determine the process ID. It might be negative if the thread is
214 in the middle of a fork() call. */
215 pid_t pid = THREAD_GETMEM (THREAD_SELF, pid);
216 if (__builtin_expect (pid < 0, 0))
217 pid = -pid;
218 #endif
220 /* Safety check. It would be possible to call this function for
221 other signals and send a signal from another process. This is not
222 correct and might even be a security problem. Try to catch as
223 many incorrect invocations as possible. */
224 if (sig != SIGSETXID
225 #ifdef __ASSUME_CORRECT_SI_PID
226 /* Kernels before 2.5.75 stored the thread ID and not the process
227 ID in si_pid so we skip this test. */
228 || si->si_pid != pid
229 #endif
230 || si->si_code != SI_TKILL)
231 return;
233 INTERNAL_SYSCALL_DECL (err);
234 INTERNAL_SYSCALL_NCS (__xidcmd->syscall_no, err, 3, __xidcmd->id[0],
235 __xidcmd->id[1], __xidcmd->id[2]);
237 if (atomic_decrement_val (&__xidcmd->cntr) == 0)
238 lll_futex_wake (&__xidcmd->cntr, 1);
240 /* Reset the SETXID flag. */
241 struct pthread *self = THREAD_SELF;
242 int flags = THREAD_GETMEM (self, cancelhandling);
243 THREAD_SETMEM (self, cancelhandling, flags & ~SETXID_BITMASK);
245 /* And release the futex. */
246 self->setxid_futex = 1;
247 lll_futex_wake (&self->setxid_futex, 1);
251 /* When using __thread for this, we do it in libc so as not
252 to give libpthread its own TLS segment just for this. */
253 extern void **__libc_dl_error_tsd (void) __attribute__ ((const));
256 void
257 __pthread_initialize_minimal_internal (void)
259 #ifndef SHARED
260 /* Unlike in the dynamically linked case the dynamic linker has not
261 taken care of initializing the TLS data structures. */
262 __libc_setup_tls (TLS_TCB_SIZE, TLS_TCB_ALIGN);
264 /* We must prevent gcc from being clever and move any of the
265 following code ahead of the __libc_setup_tls call. This function
266 will initialize the thread register which is subsequently
267 used. */
268 __asm __volatile ("");
269 #endif
271 /* Minimal initialization of the thread descriptor. */
272 struct pthread *pd = THREAD_SELF;
273 INTERNAL_SYSCALL_DECL (err);
274 pd->pid = pd->tid = INTERNAL_SYSCALL (set_tid_address, err, 1, &pd->tid);
275 THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]);
276 THREAD_SETMEM (pd, user_stack, true);
277 if (LLL_LOCK_INITIALIZER != 0)
278 THREAD_SETMEM (pd, lock, LLL_LOCK_INITIALIZER);
279 #if HP_TIMING_AVAIL
280 THREAD_SETMEM (pd, cpuclock_offset, GL(dl_cpuclock_offset));
281 #endif
283 /* Initialize the robust mutex data. */
284 #ifdef __PTHREAD_MUTEX_HAVE_PREV
285 pd->robust_prev = &pd->robust_head;
286 #endif
287 pd->robust_head.list = &pd->robust_head;
288 #ifdef __NR_set_robust_list
289 pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
290 - offsetof (pthread_mutex_t,
291 __data.__list.__next));
292 int res = INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
293 sizeof (struct robust_list_head));
294 if (INTERNAL_SYSCALL_ERROR_P (res, err))
295 #endif
296 set_robust_list_not_avail ();
298 /* Set initial thread's stack block from 0 up to __libc_stack_end.
299 It will be bigger than it actually is, but for unwind.c/pt-longjmp.c
300 purposes this is good enough. */
301 THREAD_SETMEM (pd, stackblock_size, (size_t) __libc_stack_end);
303 /* Initialize the list of all running threads with the main thread. */
304 INIT_LIST_HEAD (&__stack_user);
305 list_add (&pd->list, &__stack_user);
308 /* Install the cancellation signal handler. If for some reason we
309 cannot install the handler we do not abort. Maybe we should, but
310 it is only asynchronous cancellation which is affected. */
311 struct sigaction sa;
312 sa.sa_sigaction = sigcancel_handler;
313 sa.sa_flags = SA_SIGINFO;
314 __sigemptyset (&sa.sa_mask);
316 (void) __libc_sigaction (SIGCANCEL, &sa, NULL);
318 /* Install the handle to change the threads' uid/gid. */
319 sa.sa_sigaction = sighandler_setxid;
320 sa.sa_flags = SA_SIGINFO | SA_RESTART;
322 (void) __libc_sigaction (SIGSETXID, &sa, NULL);
324 /* The parent process might have left the signals blocked. Just in
325 case, unblock it. We reuse the signal mask in the sigaction
326 structure. It is already cleared. */
327 __sigaddset (&sa.sa_mask, SIGCANCEL);
328 __sigaddset (&sa.sa_mask, SIGSETXID);
329 (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &sa.sa_mask,
330 NULL, _NSIG / 8);
332 /* Get the size of the static and alignment requirements for the TLS
333 block. */
334 size_t static_tls_align;
335 _dl_get_tls_static_info (&__static_tls_size, &static_tls_align);
337 /* Make sure the size takes all the alignments into account. */
338 if (STACK_ALIGN > static_tls_align)
339 static_tls_align = STACK_ALIGN;
340 __static_tls_align_m1 = static_tls_align - 1;
342 __static_tls_size = roundup (__static_tls_size, static_tls_align);
344 /* Determine the default allowed stack size. This is the size used
345 in case the user does not specify one. */
346 struct rlimit limit;
347 if (getrlimit (RLIMIT_STACK, &limit) != 0
348 || limit.rlim_cur == RLIM_INFINITY)
349 /* The system limit is not usable. Use an architecture-specific
350 default. */
351 limit.rlim_cur = ARCH_STACK_DEFAULT_SIZE;
352 else if (limit.rlim_cur < PTHREAD_STACK_MIN)
353 /* The system limit is unusably small.
354 Use the minimal size acceptable. */
355 limit.rlim_cur = PTHREAD_STACK_MIN;
357 /* Make sure it meets the minimum size that allocate_stack
358 (allocatestack.c) will demand, which depends on the page size. */
359 const uintptr_t pagesz = __sysconf (_SC_PAGESIZE);
360 const size_t minstack = pagesz + __static_tls_size + MINIMAL_REST_STACK;
361 if (limit.rlim_cur < minstack)
362 limit.rlim_cur = minstack;
364 /* Round the resource limit up to page size. */
365 limit.rlim_cur = (limit.rlim_cur + pagesz - 1) & -pagesz;
366 __default_stacksize = limit.rlim_cur;
368 #ifdef SHARED
369 /* Transfer the old value from the dynamic linker's internal location. */
370 *__libc_dl_error_tsd () = *(*GL(dl_error_catch_tsd)) ();
371 GL(dl_error_catch_tsd) = &__libc_dl_error_tsd;
373 /* Make __rtld_lock_{,un}lock_recursive use pthread_mutex_{,un}lock,
374 keep the lock count from the ld.so implementation. */
375 GL(dl_rtld_lock_recursive) = (void *) INTUSE (__pthread_mutex_lock);
376 GL(dl_rtld_unlock_recursive) = (void *) INTUSE (__pthread_mutex_unlock);
377 unsigned int rtld_lock_count = GL(dl_load_lock).mutex.__data.__count;
378 GL(dl_load_lock).mutex.__data.__count = 0;
379 while (rtld_lock_count-- > 0)
380 INTUSE (__pthread_mutex_lock) (&GL(dl_load_lock).mutex);
382 GL(dl_make_stack_executable_hook) = &__make_stacks_executable;
383 #endif
385 GL(dl_init_static_tls) = &__pthread_init_static_tls;
387 /* Register the fork generation counter with the libc. */
388 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
389 __libc_multiple_threads_ptr =
390 #endif
391 __libc_pthread_init (&__fork_generation, __reclaim_stacks,
392 ptr_pthread_functions);
394 /* Determine whether the machine is SMP or not. */
395 __is_smp = is_smp_system ();
397 strong_alias (__pthread_initialize_minimal_internal,
398 __pthread_initialize_minimal)