1 /* Copyright (C) 2002-2014 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
25 #include <sys/param.h>
26 #include <sys/resource.h>
34 #include <shlib-compat.h>
36 #include <lowlevellock.h>
37 #include <kernel-features.h>
38 #include <libc-internal.h>
40 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
41 /* Pointer to the corresponding variable in libc. */
42 int *__libc_multiple_threads_ptr attribute_hidden
;
45 /* Size and alignment of static TLS block. */
46 size_t __static_tls_size
;
47 size_t __static_tls_align_m1
;
49 #ifndef __ASSUME_SET_ROBUST_LIST
50 /* Negative if we do not have the system call and we can use it. */
51 int __set_robust_list_avail
;
52 # define set_robust_list_not_avail() \
53 __set_robust_list_avail = -1
55 # define set_robust_list_not_avail() do { } while (0)
58 #ifndef __ASSUME_FUTEX_CLOCK_REALTIME
59 /* Nonzero if we do not have FUTEX_CLOCK_REALTIME. */
60 int __have_futex_clock_realtime
;
61 # define __set_futex_clock_realtime() \
62 __have_futex_clock_realtime = 1
64 #define __set_futex_clock_realtime() do { } while (0)
67 /* Version of the library, used in libthread_db to detect mismatches. */
68 static const char nptl_version
[] __attribute_used__
= VERSION
;
72 extern void __libc_setup_tls (size_t tcbsize
, size_t tcbalign
);
80 void __nptl_set_robust (struct pthread
*);
83 static void nptl_freeres (void);
86 static const struct pthread_functions pthread_functions
=
88 .ptr_pthread_attr_destroy
= __pthread_attr_destroy
,
89 # if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
90 .ptr___pthread_attr_init_2_0
= __pthread_attr_init_2_0
,
92 .ptr___pthread_attr_init_2_1
= __pthread_attr_init_2_1
,
93 .ptr_pthread_attr_getdetachstate
= __pthread_attr_getdetachstate
,
94 .ptr_pthread_attr_setdetachstate
= __pthread_attr_setdetachstate
,
95 .ptr_pthread_attr_getinheritsched
= __pthread_attr_getinheritsched
,
96 .ptr_pthread_attr_setinheritsched
= __pthread_attr_setinheritsched
,
97 .ptr_pthread_attr_getschedparam
= __pthread_attr_getschedparam
,
98 .ptr_pthread_attr_setschedparam
= __pthread_attr_setschedparam
,
99 .ptr_pthread_attr_getschedpolicy
= __pthread_attr_getschedpolicy
,
100 .ptr_pthread_attr_setschedpolicy
= __pthread_attr_setschedpolicy
,
101 .ptr_pthread_attr_getscope
= __pthread_attr_getscope
,
102 .ptr_pthread_attr_setscope
= __pthread_attr_setscope
,
103 .ptr_pthread_condattr_destroy
= __pthread_condattr_destroy
,
104 .ptr_pthread_condattr_init
= __pthread_condattr_init
,
105 .ptr___pthread_cond_broadcast
= __pthread_cond_broadcast
,
106 .ptr___pthread_cond_destroy
= __pthread_cond_destroy
,
107 .ptr___pthread_cond_init
= __pthread_cond_init
,
108 .ptr___pthread_cond_signal
= __pthread_cond_signal
,
109 .ptr___pthread_cond_wait
= __pthread_cond_wait
,
110 .ptr___pthread_cond_timedwait
= __pthread_cond_timedwait
,
111 # if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_3_2)
112 .ptr___pthread_cond_broadcast_2_0
= __pthread_cond_broadcast_2_0
,
113 .ptr___pthread_cond_destroy_2_0
= __pthread_cond_destroy_2_0
,
114 .ptr___pthread_cond_init_2_0
= __pthread_cond_init_2_0
,
115 .ptr___pthread_cond_signal_2_0
= __pthread_cond_signal_2_0
,
116 .ptr___pthread_cond_wait_2_0
= __pthread_cond_wait_2_0
,
117 .ptr___pthread_cond_timedwait_2_0
= __pthread_cond_timedwait_2_0
,
119 .ptr_pthread_equal
= __pthread_equal
,
120 .ptr___pthread_exit
= __pthread_exit
,
121 .ptr_pthread_getschedparam
= __pthread_getschedparam
,
122 .ptr_pthread_setschedparam
= __pthread_setschedparam
,
123 .ptr_pthread_mutex_destroy
= __pthread_mutex_destroy
,
124 .ptr_pthread_mutex_init
= __pthread_mutex_init
,
125 .ptr_pthread_mutex_lock
= __pthread_mutex_lock
,
126 .ptr_pthread_mutex_unlock
= __pthread_mutex_unlock
,
127 .ptr_pthread_self
= __pthread_self
,
128 .ptr_pthread_setcancelstate
= __pthread_setcancelstate
,
129 .ptr_pthread_setcanceltype
= __pthread_setcanceltype
,
130 .ptr___pthread_cleanup_upto
= __pthread_cleanup_upto
,
131 .ptr___pthread_once
= __pthread_once
,
132 .ptr___pthread_rwlock_rdlock
= __pthread_rwlock_rdlock
,
133 .ptr___pthread_rwlock_wrlock
= __pthread_rwlock_wrlock
,
134 .ptr___pthread_rwlock_unlock
= __pthread_rwlock_unlock
,
135 .ptr___pthread_key_create
= __pthread_key_create
,
136 .ptr___pthread_getspecific
= __pthread_getspecific
,
137 .ptr___pthread_setspecific
= __pthread_setspecific
,
138 .ptr__pthread_cleanup_push_defer
= __pthread_cleanup_push_defer
,
139 .ptr__pthread_cleanup_pop_restore
= __pthread_cleanup_pop_restore
,
140 .ptr_nthreads
= &__nptl_nthreads
,
141 .ptr___pthread_unwind
= &__pthread_unwind
,
142 .ptr__nptl_deallocate_tsd
= __nptl_deallocate_tsd
,
143 .ptr__nptl_setxid
= __nptl_setxid
,
144 /* For now only the stack cache needs to be freed. */
145 .ptr_freeres
= nptl_freeres
,
146 .ptr_set_robust
= __nptl_set_robust
148 # define ptr_pthread_functions &pthread_functions
150 # define ptr_pthread_functions NULL
155 /* This function is called indirectly from the freeres code in libc. */
157 __libc_freeres_fn_section
168 __nptl_set_robust (struct pthread
*self
)
170 #ifdef __NR_set_robust_list
171 INTERNAL_SYSCALL_DECL (err
);
172 INTERNAL_SYSCALL (set_robust_list
, err
, 2, &self
->robust_head
,
173 sizeof (struct robust_list_head
));
179 /* For asynchronous cancellation we use a signal. This is the handler. */
181 sigcancel_handler (int sig
, siginfo_t
*si
, void *ctx
)
183 /* Determine the process ID. It might be negative if the thread is
184 in the middle of a fork() call. */
185 pid_t pid
= THREAD_GETMEM (THREAD_SELF
, pid
);
186 if (__glibc_unlikely (pid
< 0))
189 /* Safety check. It would be possible to call this function for
190 other signals and send a signal from another process. This is not
191 correct and might even be a security problem. Try to catch as
192 many incorrect invocations as possible. */
195 || si
->si_code
!= SI_TKILL
)
198 struct pthread
*self
= THREAD_SELF
;
200 int oldval
= THREAD_GETMEM (self
, cancelhandling
);
203 /* We are canceled now. When canceled by another thread this flag
204 is already set but if the signal is directly send (internally or
205 from another process) is has to be done here. */
206 int newval
= oldval
| CANCELING_BITMASK
| CANCELED_BITMASK
;
208 if (oldval
== newval
|| (oldval
& EXITING_BITMASK
) != 0)
209 /* Already canceled or exiting. */
212 int curval
= THREAD_ATOMIC_CMPXCHG_VAL (self
, cancelhandling
, newval
,
214 if (curval
== oldval
)
216 /* Set the return value. */
217 THREAD_SETMEM (self
, result
, PTHREAD_CANCELED
);
219 /* Make sure asynchronous cancellation is still enabled. */
220 if ((newval
& CANCELTYPE_BITMASK
) != 0)
221 /* Run the registered destructors and terminate the thread. */
234 struct xid_command
*__xidcmd attribute_hidden
;
236 /* We use the SIGSETXID signal in the setuid, setgid, etc. implementations to
237 tell each thread to call the respective setxid syscall on itself. This is
240 sighandler_setxid (int sig
, siginfo_t
*si
, void *ctx
)
242 /* Determine the process ID. It might be negative if the thread is
243 in the middle of a fork() call. */
244 pid_t pid
= THREAD_GETMEM (THREAD_SELF
, pid
);
246 if (__glibc_unlikely (pid
< 0))
249 /* Safety check. It would be possible to call this function for
250 other signals and send a signal from another process. This is not
251 correct and might even be a security problem. Try to catch as
252 many incorrect invocations as possible. */
255 || si
->si_code
!= SI_TKILL
)
258 INTERNAL_SYSCALL_DECL (err
);
259 result
= INTERNAL_SYSCALL_NCS (__xidcmd
->syscall_no
, err
, 3, __xidcmd
->id
[0],
260 __xidcmd
->id
[1], __xidcmd
->id
[2]);
262 if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (result
, err
)))
263 error
= INTERNAL_SYSCALL_ERRNO (result
, err
);
264 __nptl_setxid_error (__xidcmd
, error
);
266 /* Reset the SETXID flag. */
267 struct pthread
*self
= THREAD_SELF
;
271 flags
= THREAD_GETMEM (self
, cancelhandling
);
272 newval
= THREAD_ATOMIC_CMPXCHG_VAL (self
, cancelhandling
,
273 flags
& ~SETXID_BITMASK
, flags
);
275 while (flags
!= newval
);
277 /* And release the futex. */
278 self
->setxid_futex
= 1;
279 lll_futex_wake (&self
->setxid_futex
, 1, LLL_PRIVATE
);
281 if (atomic_decrement_val (&__xidcmd
->cntr
) == 0)
282 lll_futex_wake (&__xidcmd
->cntr
, 1, LLL_PRIVATE
);
287 /* When using __thread for this, we do it in libc so as not
288 to give libpthread its own TLS segment just for this. */
289 extern void **__libc_dl_error_tsd (void) __attribute__ ((const));
292 /* This can be set by the debugger before initialization is complete. */
293 static bool __nptl_initial_report_events __attribute_used__
;
296 __pthread_initialize_minimal_internal (void)
299 /* Unlike in the dynamically linked case the dynamic linker has not
300 taken care of initializing the TLS data structures. */
301 __libc_setup_tls (TLS_TCB_SIZE
, TLS_TCB_ALIGN
);
303 /* We must prevent gcc from being clever and move any of the
304 following code ahead of the __libc_setup_tls call. This function
305 will initialize the thread register which is subsequently
307 __asm
__volatile ("");
310 /* Minimal initialization of the thread descriptor. */
311 struct pthread
*pd
= THREAD_SELF
;
312 #ifdef __NR_set_tid_address
313 INTERNAL_SYSCALL_DECL (err
);
314 pd
->pid
= pd
->tid
= INTERNAL_SYSCALL (set_tid_address
, err
, 1, &pd
->tid
);
316 THREAD_SETMEM (pd
, specific
[0], &pd
->specific_1stblock
[0]);
317 THREAD_SETMEM (pd
, user_stack
, true);
318 if (LLL_LOCK_INITIALIZER
!= 0)
319 THREAD_SETMEM (pd
, lock
, LLL_LOCK_INITIALIZER
);
321 THREAD_SETMEM (pd
, cpuclock_offset
, GL(dl_cpuclock_offset
));
324 /* Initialize the robust mutex data. */
325 #ifdef __PTHREAD_MUTEX_HAVE_PREV
326 pd
->robust_prev
= &pd
->robust_head
;
328 pd
->robust_head
.list
= &pd
->robust_head
;
329 #ifdef __NR_set_robust_list
330 pd
->robust_head
.futex_offset
= (offsetof (pthread_mutex_t
, __data
.__lock
)
331 - offsetof (pthread_mutex_t
,
332 __data
.__list
.__next
));
333 int res
= INTERNAL_SYSCALL (set_robust_list
, err
, 2, &pd
->robust_head
,
334 sizeof (struct robust_list_head
));
335 if (INTERNAL_SYSCALL_ERROR_P (res
, err
))
337 set_robust_list_not_avail ();
340 # ifndef __ASSUME_PRIVATE_FUTEX
341 /* Private futexes are always used (at least internally) so that
342 doing the test once this early is beneficial. */
345 word
= INTERNAL_SYSCALL (futex
, err
, 3, &word
,
346 FUTEX_WAKE
| FUTEX_PRIVATE_FLAG
, 1);
347 if (!INTERNAL_SYSCALL_ERROR_P (word
, err
))
348 THREAD_SETMEM (pd
, header
.private_futex
, FUTEX_PRIVATE_FLAG
);
351 /* Private futexes have been introduced earlier than the
352 FUTEX_CLOCK_REALTIME flag. We don't have to run the test if we
353 know the former are not supported. This also means we know the
354 kernel will return ENOSYS for unknown operations. */
355 if (THREAD_GETMEM (pd
, header
.private_futex
) != 0)
357 # ifndef __ASSUME_FUTEX_CLOCK_REALTIME
360 /* NB: the syscall actually takes six parameters. The last is the
361 bit mask. But since we will not actually wait at all the value
362 is irrelevant. Given that passing six parameters is difficult
363 on some architectures we just pass whatever random value the
364 calling convention calls for to the kernel. It causes no harm. */
365 word
= INTERNAL_SYSCALL (futex
, err
, 5, &word
,
366 FUTEX_WAIT_BITSET
| FUTEX_CLOCK_REALTIME
367 | FUTEX_PRIVATE_FLAG
, 1, NULL
, 0);
368 assert (INTERNAL_SYSCALL_ERROR_P (word
, err
));
369 if (INTERNAL_SYSCALL_ERRNO (word
, err
) != ENOSYS
)
370 __set_futex_clock_realtime ();
375 /* Set initial thread's stack block from 0 up to __libc_stack_end.
376 It will be bigger than it actually is, but for unwind.c/pt-longjmp.c
377 purposes this is good enough. */
378 THREAD_SETMEM (pd
, stackblock_size
, (size_t) __libc_stack_end
);
380 /* Initialize the list of all running threads with the main thread. */
381 INIT_LIST_HEAD (&__stack_user
);
382 list_add (&pd
->list
, &__stack_user
);
384 /* Before initializing __stack_user, the debugger could not find us and
385 had to set __nptl_initial_report_events. Propagate its setting. */
386 THREAD_SETMEM (pd
, report_events
, __nptl_initial_report_events
);
388 #if defined SIGCANCEL || defined SIGSETXID
390 __sigemptyset (&sa
.sa_mask
);
393 /* Install the cancellation signal handler. If for some reason we
394 cannot install the handler we do not abort. Maybe we should, but
395 it is only asynchronous cancellation which is affected. */
396 sa
.sa_sigaction
= sigcancel_handler
;
397 sa
.sa_flags
= SA_SIGINFO
;
398 (void) __libc_sigaction (SIGCANCEL
, &sa
, NULL
);
402 /* Install the handle to change the threads' uid/gid. */
403 sa
.sa_sigaction
= sighandler_setxid
;
404 sa
.sa_flags
= SA_SIGINFO
| SA_RESTART
;
405 (void) __libc_sigaction (SIGSETXID
, &sa
, NULL
);
408 /* The parent process might have left the signals blocked. Just in
409 case, unblock it. We reuse the signal mask in the sigaction
410 structure. It is already cleared. */
412 __sigaddset (&sa
.sa_mask
, SIGCANCEL
);
415 __sigaddset (&sa
.sa_mask
, SIGSETXID
);
417 (void) INTERNAL_SYSCALL (rt_sigprocmask
, err
, 4, SIG_UNBLOCK
, &sa
.sa_mask
,
421 /* Get the size of the static and alignment requirements for the TLS
423 size_t static_tls_align
;
424 _dl_get_tls_static_info (&__static_tls_size
, &static_tls_align
);
426 /* Make sure the size takes all the alignments into account. */
427 if (STACK_ALIGN
> static_tls_align
)
428 static_tls_align
= STACK_ALIGN
;
429 __static_tls_align_m1
= static_tls_align
- 1;
431 __static_tls_size
= roundup (__static_tls_size
, static_tls_align
);
433 /* Determine the default allowed stack size. This is the size used
434 in case the user does not specify one. */
436 if (__getrlimit (RLIMIT_STACK
, &limit
) != 0
437 || limit
.rlim_cur
== RLIM_INFINITY
)
438 /* The system limit is not usable. Use an architecture-specific
440 limit
.rlim_cur
= ARCH_STACK_DEFAULT_SIZE
;
441 else if (limit
.rlim_cur
< PTHREAD_STACK_MIN
)
442 /* The system limit is unusably small.
443 Use the minimal size acceptable. */
444 limit
.rlim_cur
= PTHREAD_STACK_MIN
;
446 /* Make sure it meets the minimum size that allocate_stack
447 (allocatestack.c) will demand, which depends on the page size. */
448 const uintptr_t pagesz
= GLRO(dl_pagesize
);
449 const size_t minstack
= pagesz
+ __static_tls_size
+ MINIMAL_REST_STACK
;
450 if (limit
.rlim_cur
< minstack
)
451 limit
.rlim_cur
= minstack
;
453 /* Round the resource limit up to page size. */
454 limit
.rlim_cur
= ALIGN_UP (limit
.rlim_cur
, pagesz
);
455 lll_lock (__default_pthread_attr_lock
, LLL_PRIVATE
);
456 __default_pthread_attr
.stacksize
= limit
.rlim_cur
;
457 __default_pthread_attr
.guardsize
= GLRO (dl_pagesize
);
458 lll_unlock (__default_pthread_attr_lock
, LLL_PRIVATE
);
461 /* Transfer the old value from the dynamic linker's internal location. */
462 *__libc_dl_error_tsd () = *(*GL(dl_error_catch_tsd
)) ();
463 GL(dl_error_catch_tsd
) = &__libc_dl_error_tsd
;
465 /* Make __rtld_lock_{,un}lock_recursive use pthread_mutex_{,un}lock,
466 keep the lock count from the ld.so implementation. */
467 GL(dl_rtld_lock_recursive
) = (void *) __pthread_mutex_lock
;
468 GL(dl_rtld_unlock_recursive
) = (void *) __pthread_mutex_unlock
;
469 unsigned int rtld_lock_count
= GL(dl_load_lock
).mutex
.__data
.__count
;
470 GL(dl_load_lock
).mutex
.__data
.__count
= 0;
471 while (rtld_lock_count
-- > 0)
472 __pthread_mutex_lock (&GL(dl_load_lock
).mutex
);
474 GL(dl_make_stack_executable_hook
) = &__make_stacks_executable
;
477 GL(dl_init_static_tls
) = &__pthread_init_static_tls
;
479 GL(dl_wait_lookup_done
) = &__wait_lookup_done
;
481 /* Register the fork generation counter with the libc. */
482 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
483 __libc_multiple_threads_ptr
=
485 __libc_pthread_init (&__fork_generation
, __reclaim_stacks
,
486 ptr_pthread_functions
);
488 /* Determine whether the machine is SMP or not. */
489 __is_smp
= is_smp_system ();
491 strong_alias (__pthread_initialize_minimal_internal
,
492 __pthread_initialize_minimal
)
496 __pthread_get_minstack (const pthread_attr_t
*attr
)
498 struct pthread_attr
*iattr
= (struct pthread_attr
*) attr
;
500 return (GLRO(dl_pagesize
) + __static_tls_size
+ PTHREAD_STACK_MIN