* pthread_create.c (__pthread_create_2_0): Clear new_attr.cpuset.
[glibc.git] / nptl / init.c
blobdae24f1eac59bbdaecbf6b20aa0253466a14edfb
1 /* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
20 #include <assert.h>
21 #include <limits.h>
22 #include <signal.h>
23 #include <stdlib.h>
24 #include <unistd.h>
25 #include <sys/param.h>
26 #include <sys/resource.h>
27 #include <pthreadP.h>
28 #include <atomic.h>
29 #include <ldsodefs.h>
30 #include <tls.h>
31 #include <fork.h>
32 #include <version.h>
33 #include <shlib-compat.h>
36 #ifndef __NR_set_tid_address
37 /* XXX For the time being... Once we can rely on the kernel headers
38 having the definition remove these lines. */
39 #if defined __s390__
40 # define __NR_set_tid_address 252
41 #elif defined __ia64__
42 # define __NR_set_tid_address 1233
43 #elif defined __i386__
44 # define __NR_set_tid_address 258
45 #elif defined __x86_64__
46 # define __NR_set_tid_address 218
47 #elif defined __powerpc__
48 # define __NR_set_tid_address 232
49 #else
50 # error "define __NR_set_tid_address"
51 #endif
52 #endif
55 /* Default stack size. */
56 size_t __default_stacksize attribute_hidden;
58 /* Size and alignment of static TLS block. */
59 size_t __static_tls_size;
60 size_t __static_tls_align_m1;
62 /* Version of the library, used in libthread_db to detect mismatches. */
63 static const char nptl_version[] = VERSION;
66 #if defined USE_TLS && !defined SHARED
67 extern void __libc_setup_tls (size_t tcbsize, size_t tcbalign);
68 #endif
71 #ifdef SHARED
72 static struct pthread_functions pthread_functions =
74 .ptr_pthread_attr_destroy = __pthread_attr_destroy,
75 # if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
76 .ptr___pthread_attr_init_2_0 = __pthread_attr_init_2_0,
77 # endif
78 .ptr___pthread_attr_init_2_1 = __pthread_attr_init_2_1,
79 .ptr_pthread_attr_getdetachstate = __pthread_attr_getdetachstate,
80 .ptr_pthread_attr_setdetachstate = __pthread_attr_setdetachstate,
81 .ptr_pthread_attr_getinheritsched = __pthread_attr_getinheritsched,
82 .ptr_pthread_attr_setinheritsched = __pthread_attr_setinheritsched,
83 .ptr_pthread_attr_getschedparam = __pthread_attr_getschedparam,
84 .ptr_pthread_attr_setschedparam = __pthread_attr_setschedparam,
85 .ptr_pthread_attr_getschedpolicy = __pthread_attr_getschedpolicy,
86 .ptr_pthread_attr_setschedpolicy = __pthread_attr_setschedpolicy,
87 .ptr_pthread_attr_getscope = __pthread_attr_getscope,
88 .ptr_pthread_attr_setscope = __pthread_attr_setscope,
89 .ptr_pthread_condattr_destroy = __pthread_condattr_destroy,
90 .ptr_pthread_condattr_init = __pthread_condattr_init,
91 .ptr___pthread_cond_broadcast = __pthread_cond_broadcast,
92 .ptr___pthread_cond_destroy = __pthread_cond_destroy,
93 .ptr___pthread_cond_init = __pthread_cond_init,
94 .ptr___pthread_cond_signal = __pthread_cond_signal,
95 .ptr___pthread_cond_wait = __pthread_cond_wait,
96 # if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_3_2)
97 .ptr___pthread_cond_broadcast_2_0 = __pthread_cond_broadcast_2_0,
98 .ptr___pthread_cond_destroy_2_0 = __pthread_cond_destroy_2_0,
99 .ptr___pthread_cond_init_2_0 = __pthread_cond_init_2_0,
100 .ptr___pthread_cond_signal_2_0 = __pthread_cond_signal_2_0,
101 .ptr___pthread_cond_wait_2_0 = __pthread_cond_wait_2_0,
102 # endif
103 .ptr_pthread_equal = __pthread_equal,
104 .ptr___pthread_exit = __pthread_exit,
105 .ptr_pthread_getschedparam = __pthread_getschedparam,
106 .ptr_pthread_setschedparam = __pthread_setschedparam,
107 .ptr_pthread_mutex_destroy = INTUSE(__pthread_mutex_destroy),
108 .ptr_pthread_mutex_init = INTUSE(__pthread_mutex_init),
109 .ptr_pthread_mutex_lock = INTUSE(__pthread_mutex_lock),
110 .ptr_pthread_mutex_unlock = INTUSE(__pthread_mutex_unlock),
111 .ptr_pthread_self = __pthread_self,
112 .ptr_pthread_setcancelstate = __pthread_setcancelstate,
113 .ptr_pthread_setcanceltype = __pthread_setcanceltype,
114 .ptr___pthread_cleanup_upto = __pthread_cleanup_upto,
115 .ptr___pthread_once = __pthread_once_internal,
116 .ptr___pthread_rwlock_rdlock = __pthread_rwlock_rdlock_internal,
117 .ptr___pthread_rwlock_wrlock = __pthread_rwlock_wrlock_internal,
118 .ptr___pthread_rwlock_unlock = __pthread_rwlock_unlock_internal,
119 .ptr___pthread_key_create = __pthread_key_create_internal,
120 .ptr___pthread_getspecific = __pthread_getspecific_internal,
121 .ptr___pthread_setspecific = __pthread_setspecific_internal,
122 .ptr__pthread_cleanup_push_defer = __pthread_cleanup_push_defer,
123 .ptr__pthread_cleanup_pop_restore = __pthread_cleanup_pop_restore,
124 .ptr_nthreads = &__nptl_nthreads,
125 .ptr___pthread_unwind = &__pthread_unwind
127 # define ptr_pthread_functions &pthread_functions
128 #else
129 # define ptr_pthread_functions NULL
130 #endif
133 /* For asynchronous cancellation we use a signal. This is the handler. */
134 static void
135 sigcancel_handler (int sig, siginfo_t *si, void *ctx)
137 /* Safety check. It would be possible to call this function for
138 other signals and send a signal from another thread. This is not
139 correct and might even be a security problem. Try to catch as
140 many incorrect invocations as possible. */
141 if (sig != SIGCANCEL
142 #ifdef __ASSUME_CORRECT_SI_PID
143 /* Kernels before 2.5.75 stored the thread ID and not the process
144 ID in si_pid so we skip this test. */
145 || si->si_pid != THREAD_GETMEM (THREAD_SELF, pid)
146 #endif
147 || si->si_code != SI_TKILL)
148 return;
150 struct pthread *self = THREAD_SELF;
152 int oldval = THREAD_GETMEM (self, cancelhandling);
153 while (1)
155 /* We are canceled now. When canceled by another thread this flag
156 is already set but if the signal is directly send (internally or
157 from another process) is has to be done here. */
158 int newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK;
160 if (oldval == newval || (oldval & EXITING_BITMASK) != 0)
161 /* Already canceled or exiting. */
162 break;
164 int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
165 oldval);
166 if (curval == oldval)
168 /* Set the return value. */
169 THREAD_SETMEM (self, result, PTHREAD_CANCELED);
171 /* Make sure asynchronous cancellation is still enabled. */
172 if ((newval & CANCELTYPE_BITMASK) != 0)
173 /* Run the registered destructors and terminate the thread. */
174 __do_cancel ();
176 break;
179 oldval = curval;
184 /* When using __thread for this, we do it in libc so as not
185 to give libpthread its own TLS segment just for this. */
186 extern void **__libc_dl_error_tsd (void) __attribute__ ((const));
189 void
190 __pthread_initialize_minimal_internal (void)
192 #ifndef SHARED
193 /* Unlike in the dynamically linked case the dynamic linker has not
194 taken care of initializing the TLS data structures. */
195 __libc_setup_tls (TLS_TCB_SIZE, TLS_TCB_ALIGN);
197 /* We must prevent gcc from being clever and move any of the
198 following code ahead of the __libc_setup_tls call. This function
199 will initialize the thread register which is subsequently
200 used. */
201 __asm __volatile ("");
202 #endif
204 /* Minimal initialization of the thread descriptor. */
205 struct pthread *pd = THREAD_SELF;
206 INTERNAL_SYSCALL_DECL (err);
207 pd->pid = pd->tid = INTERNAL_SYSCALL (set_tid_address, err, 1, &pd->tid);
208 THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]);
209 THREAD_SETMEM (pd, user_stack, true);
210 if (LLL_LOCK_INITIALIZER != 0)
211 THREAD_SETMEM (pd, lock, LLL_LOCK_INITIALIZER);
212 #if HP_TIMING_AVAIL
213 THREAD_SETMEM (pd, cpuclock_offset, GL(dl_cpuclock_offset));
214 #endif
216 /* Initialize the list of all running threads with the main thread. */
217 INIT_LIST_HEAD (&__stack_user);
218 list_add (&pd->list, &__stack_user);
221 /* Install the cancellation signal handler. If for some reason we
222 cannot install the handler we do not abort. Maybe we should, but
223 it is only asynchronous cancellation which is affected. */
224 struct sigaction sa;
225 sa.sa_sigaction = sigcancel_handler;
226 sa.sa_flags = SA_SIGINFO;
227 sigemptyset (&sa.sa_mask);
229 (void) __libc_sigaction (SIGCANCEL, &sa, NULL);
231 /* The parent process might have left the signal blocked. Just in
232 case, unblock it. We reuse the signal mask in the sigaction
233 structure. It is already cleared. */
234 __sigaddset (&sa.sa_mask, SIGCANCEL);
235 (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &sa.sa_mask,
236 NULL, _NSIG / 8);
239 /* Determine the default allowed stack size. This is the size used
240 in case the user does not specify one. */
241 struct rlimit limit;
242 if (getrlimit (RLIMIT_STACK, &limit) != 0
243 || limit.rlim_cur == RLIM_INFINITY)
244 /* The system limit is not usable. Use an architecture-specific
245 default. */
246 limit.rlim_cur = ARCH_STACK_DEFAULT_SIZE;
248 #ifdef NEED_SEPARATE_REGISTER_STACK
249 __default_stacksize = MAX (limit.rlim_cur / 2, PTHREAD_STACK_MIN);
250 #else
251 __default_stacksize = MAX (limit.rlim_cur, PTHREAD_STACK_MIN);
252 #endif
253 /* The maximum page size better should be a multiple of the page
254 size. */
255 assert (__default_stacksize % __sysconf (_SC_PAGESIZE) == 0);
257 /* Get the size of the static and alignment requirements for the TLS
258 block. */
259 size_t static_tls_align;
260 _dl_get_tls_static_info (&__static_tls_size, &static_tls_align);
262 /* Make sure the size takes all the alignments into account. */
263 if (STACK_ALIGN > static_tls_align)
264 static_tls_align = STACK_ALIGN;
265 __static_tls_align_m1 = static_tls_align - 1;
267 __static_tls_size = roundup (__static_tls_size, static_tls_align);
269 #ifdef SHARED
270 /* Transfer the old value from the dynamic linker's internal location. */
271 *__libc_dl_error_tsd () = *(*GL(dl_error_catch_tsd)) ();
272 GL(dl_error_catch_tsd) = &__libc_dl_error_tsd;
273 #endif
275 GL(dl_init_static_tls) = &__pthread_init_static_tls;
277 /* Register the fork generation counter with the libc. */
278 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
279 __libc_multiple_threads_ptr =
280 #endif
281 __libc_pthread_init (&__fork_generation, __reclaim_stacks,
282 ptr_pthread_functions);
284 strong_alias (__pthread_initialize_minimal_internal,
285 __pthread_initialize_minimal)