1 /* Completion of TCB initialization after TLS_INIT_TP. NPTL version.
2 Copyright (C) 2020-2023 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
19 #include <kernel-features.h>
24 #include <rseq-internal.h>
25 #include <thread_pointer.h>
27 #define TUNABLE_NAMESPACE pthread
28 #include <dl-tunables.h>
30 #ifndef __ASSUME_SET_ROBUST_LIST
31 bool __nptl_set_robust_list_avail
;
32 rtld_hidden_data_def (__nptl_set_robust_list_avail
)
35 bool __nptl_initial_report_events
;
36 rtld_hidden_def (__nptl_initial_report_events
)
39 /* Dummy implementation. See __rtld_mutex_init. */
41 rtld_mutex_dummy (pthread_mutex_t
*lock
)
47 const unsigned int __rseq_flags
;
48 const unsigned int __rseq_size attribute_relro
;
49 const ptrdiff_t __rseq_offset attribute_relro
;
52 __tls_pre_init_tp (void)
54 /* The list data structures are not consistent until
56 INIT_LIST_HEAD (&GL (dl_stack_used
));
57 INIT_LIST_HEAD (&GL (dl_stack_user
));
58 INIT_LIST_HEAD (&GL (dl_stack_cache
));
61 ___rtld_mutex_lock
= rtld_mutex_dummy
;
62 ___rtld_mutex_unlock
= rtld_mutex_dummy
;
69 struct pthread
*pd
= THREAD_SELF
;
71 /* Set up thread stack list management. */
72 list_add (&pd
->list
, &GL (dl_stack_user
));
74 /* Early initialization of the TCB. */
75 pd
->tid
= INTERNAL_SYSCALL_CALL (set_tid_address
, &pd
->tid
);
76 THREAD_SETMEM (pd
, specific
[0], &pd
->specific_1stblock
[0]);
77 THREAD_SETMEM (pd
, user_stack
, true);
79 /* Before initializing GL (dl_stack_user), the debugger could not
80 find us and had to set __nptl_initial_report_events. Propagate
82 THREAD_SETMEM (pd
, report_events
, __nptl_initial_report_events
);
84 /* Initialize the robust mutex data. */
86 #if __PTHREAD_MUTEX_HAVE_PREV
87 pd
->robust_prev
= &pd
->robust_head
;
89 pd
->robust_head
.list
= &pd
->robust_head
;
90 pd
->robust_head
.futex_offset
= (offsetof (pthread_mutex_t
, __data
.__lock
)
91 - offsetof (pthread_mutex_t
,
92 __data
.__list
.__next
));
93 int res
= INTERNAL_SYSCALL_CALL (set_robust_list
, &pd
->robust_head
,
94 sizeof (struct robust_list_head
));
95 if (!INTERNAL_SYSCALL_ERROR_P (res
))
97 #ifndef __ASSUME_SET_ROBUST_LIST
98 __nptl_set_robust_list_avail
= true;
105 do_rseq
= TUNABLE_GET (rseq
, int, NULL
);
106 if (rseq_register_current_thread (pd
, do_rseq
))
108 /* We need a writable view of the variables. They are in
109 .data.relro and are not yet write-protected. */
110 extern unsigned int size
__asm__ ("__rseq_size");
111 size
= sizeof (pd
->rseq_area
);
115 /* This should be a compile-time constant, but the current
116 infrastructure makes it difficult to determine its value. Not
117 all targets support __thread_pointer, so set __rseq_offset only
118 if the rseq registration may have happened because RSEQ_SIG is
120 extern ptrdiff_t offset
__asm__ ("__rseq_offset");
121 offset
= (char *) &pd
->rseq_area
- (char *) __thread_pointer ();
125 /* Set initial thread's stack block from 0 up to __libc_stack_end.
126 It will be bigger than it actually is, but for unwind.c/pt-longjmp.c
127 purposes this is good enough. */
128 THREAD_SETMEM (pd
, stackblock_size
, (size_t) __libc_stack_end
);