locale: Fix localedata/sort-test undefined behavior
[glibc.git] / elf / dl-open.c
blob6ea5dd245737246c06e436d9025a40b86e3b90fc
1 /* Load a shared object at runtime, relocate it, and run its initializer.
2 Copyright (C) 1996-2021 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <dlfcn.h>
21 #include <errno.h>
22 #include <libintl.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <unistd.h>
27 #include <sys/mman.h> /* Check whether MAP_COPY is defined. */
28 #include <sys/param.h>
29 #include <libc-lock.h>
30 #include <ldsodefs.h>
31 #include <sysdep-cancel.h>
32 #include <tls.h>
33 #include <stap-probe.h>
34 #include <atomic.h>
35 #include <libc-internal.h>
36 #include <array_length.h>
37 #include <libc-early-init.h>
38 #include <gnu/lib-names.h>
40 #include <dl-dst.h>
41 #include <dl-prop.h>
44 /* We must be careful not to leave us in an inconsistent state. Thus we
45 catch any error and re-raise it after cleaning up. */
47 struct dl_open_args
49 const char *file;
50 int mode;
51 /* This is the caller of the dlopen() function. */
52 const void *caller_dlopen;
53 struct link_map *map;
54 /* Namespace ID. */
55 Lmid_t nsid;
57 /* Original value of _ns_global_scope_pending_adds. Set by
58 dl_open_worker. Only valid if nsid is a real namespace
59 (non-negative). */
60 unsigned int original_global_scope_pending_adds;
62 /* Set to true by dl_open_worker if libc.so was already loaded into
63 the namespace at the time dl_open_worker was called. This is
64 used to determine whether libc.so early initialization has
65 already been done before, and whether to roll back the cached
66 libc_map value in the namespace in case of a dlopen failure. */
67 bool libc_already_loaded;
69 /* Set to true if the end of dl_open_worker_begin was reached. */
70 bool worker_continue;
72 /* Original parameters to the program and the current environment. */
73 int argc;
74 char **argv;
75 char **env;
78 /* Called in case the global scope cannot be extended. */
79 static void __attribute__ ((noreturn))
80 add_to_global_resize_failure (struct link_map *new)
82 _dl_signal_error (ENOMEM, new->l_libname->name, NULL,
83 N_ ("cannot extend global scope"));
86 /* Grow the global scope array for the namespace, so that all the new
87 global objects can be added later in add_to_global_update, without
88 risk of memory allocation failure. add_to_global_resize raises
89 exceptions for memory allocation errors. */
90 static void
91 add_to_global_resize (struct link_map *new)
93 struct link_namespaces *ns = &GL (dl_ns)[new->l_ns];
95 /* Count the objects we have to put in the global scope. */
96 unsigned int to_add = 0;
97 for (unsigned int cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
98 if (new->l_searchlist.r_list[cnt]->l_global == 0)
99 ++to_add;
101 /* The symbols of the new objects and its dependencies are to be
102 introduced into the global scope that will be used to resolve
103 references from other dynamically-loaded objects.
105 The global scope is the searchlist in the main link map. We
106 extend this list if necessary. There is one problem though:
107 since this structure was allocated very early (before the libc
108 is loaded) the memory it uses is allocated by the malloc()-stub
109 in the ld.so. When we come here these functions are not used
110 anymore. Instead the malloc() implementation of the libc is
111 used. But this means the block from the main map cannot be used
112 in an realloc() call. Therefore we allocate a completely new
113 array the first time we have to add something to the locale scope. */
115 if (__builtin_add_overflow (ns->_ns_global_scope_pending_adds, to_add,
116 &ns->_ns_global_scope_pending_adds))
117 add_to_global_resize_failure (new);
119 unsigned int new_size = 0; /* 0 means no new allocation. */
120 void *old_global = NULL; /* Old allocation if free-able. */
122 /* Minimum required element count for resizing. Adjusted below for
123 an exponential resizing policy. */
124 size_t required_new_size;
125 if (__builtin_add_overflow (ns->_ns_main_searchlist->r_nlist,
126 ns->_ns_global_scope_pending_adds,
127 &required_new_size))
128 add_to_global_resize_failure (new);
130 if (ns->_ns_global_scope_alloc == 0)
132 if (__builtin_add_overflow (required_new_size, 8, &new_size))
133 add_to_global_resize_failure (new);
135 else if (required_new_size > ns->_ns_global_scope_alloc)
137 if (__builtin_mul_overflow (required_new_size, 2, &new_size))
138 add_to_global_resize_failure (new);
140 /* The old array was allocated with our malloc, not the minimal
141 malloc. */
142 old_global = ns->_ns_main_searchlist->r_list;
145 if (new_size > 0)
147 size_t allocation_size;
148 if (__builtin_mul_overflow (new_size, sizeof (struct link_map *),
149 &allocation_size))
150 add_to_global_resize_failure (new);
151 struct link_map **new_global = malloc (allocation_size);
152 if (new_global == NULL)
153 add_to_global_resize_failure (new);
155 /* Copy over the old entries. */
156 memcpy (new_global, ns->_ns_main_searchlist->r_list,
157 ns->_ns_main_searchlist->r_nlist * sizeof (struct link_map *));
159 ns->_ns_global_scope_alloc = new_size;
160 ns->_ns_main_searchlist->r_list = new_global;
162 if (!RTLD_SINGLE_THREAD_P)
163 THREAD_GSCOPE_WAIT ();
165 free (old_global);
169 /* Actually add the new global objects to the global scope. Must be
170 called after add_to_global_resize. This function cannot fail. */
171 static void
172 add_to_global_update (struct link_map *new)
174 struct link_namespaces *ns = &GL (dl_ns)[new->l_ns];
176 /* Now add the new entries. */
177 unsigned int new_nlist = ns->_ns_main_searchlist->r_nlist;
178 for (unsigned int cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
180 struct link_map *map = new->l_searchlist.r_list[cnt];
182 if (map->l_global == 0)
184 map->l_global = 1;
186 /* The array has been resized by add_to_global_resize. */
187 assert (new_nlist < ns->_ns_global_scope_alloc);
189 ns->_ns_main_searchlist->r_list[new_nlist++] = map;
191 /* We modify the global scope. Report this. */
192 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
193 _dl_debug_printf ("\nadd %s [%lu] to global scope\n",
194 map->l_name, map->l_ns);
198 /* Some of the pending adds have been performed by the loop above.
199 Adjust the counter accordingly. */
200 unsigned int added = new_nlist - ns->_ns_main_searchlist->r_nlist;
201 assert (added <= ns->_ns_global_scope_pending_adds);
202 ns->_ns_global_scope_pending_adds -= added;
204 atomic_write_barrier ();
205 ns->_ns_main_searchlist->r_nlist = new_nlist;
208 /* Search link maps in all namespaces for the DSO that contains the object at
209 address ADDR. Returns the pointer to the link map of the matching DSO, or
210 NULL if a match is not found. */
211 struct link_map *
212 _dl_find_dso_for_object (const ElfW(Addr) addr)
214 struct link_map *l;
216 /* Find the highest-addressed object that ADDR is not below. */
217 for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns)
218 for (l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next)
219 if (addr >= l->l_map_start && addr < l->l_map_end
220 && (l->l_contiguous
221 || _dl_addr_inside_object (l, (ElfW(Addr)) addr)))
223 assert (ns == l->l_ns);
224 return l;
226 return NULL;
228 rtld_hidden_def (_dl_find_dso_for_object);
230 /* Return true if NEW is found in the scope for MAP. */
231 static size_t
232 scope_has_map (struct link_map *map, struct link_map *new)
234 size_t cnt;
235 for (cnt = 0; map->l_scope[cnt] != NULL; ++cnt)
236 if (map->l_scope[cnt] == &new->l_searchlist)
237 return true;
238 return false;
241 /* Return the length of the scope for MAP. */
242 static size_t
243 scope_size (struct link_map *map)
245 size_t cnt;
246 for (cnt = 0; map->l_scope[cnt] != NULL; )
247 ++cnt;
248 return cnt;
251 /* Resize the scopes of depended-upon objects, so that the new object
252 can be added later without further allocation of memory. This
253 function can raise an exceptions due to malloc failure. */
254 static void
255 resize_scopes (struct link_map *new)
257 /* If the file is not loaded now as a dependency, add the search
258 list of the newly loaded object to the scope. */
259 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
261 struct link_map *imap = new->l_searchlist.r_list[i];
263 /* If the initializer has been called already, the object has
264 not been loaded here and now. */
265 if (imap->l_init_called && imap->l_type == lt_loaded)
267 if (scope_has_map (imap, new))
268 /* Avoid duplicates. */
269 continue;
271 size_t cnt = scope_size (imap);
272 if (__glibc_unlikely (cnt + 1 >= imap->l_scope_max))
274 /* The l_scope array is too small. Allocate a new one
275 dynamically. */
276 size_t new_size;
277 struct r_scope_elem **newp;
279 if (imap->l_scope != imap->l_scope_mem
280 && imap->l_scope_max < array_length (imap->l_scope_mem))
282 /* If the current l_scope memory is not pointing to
283 the static memory in the structure, but the
284 static memory in the structure is large enough to
285 use for cnt + 1 scope entries, then switch to
286 using the static memory. */
287 new_size = array_length (imap->l_scope_mem);
288 newp = imap->l_scope_mem;
290 else
292 new_size = imap->l_scope_max * 2;
293 newp = (struct r_scope_elem **)
294 malloc (new_size * sizeof (struct r_scope_elem *));
295 if (newp == NULL)
296 _dl_signal_error (ENOMEM, "dlopen", NULL,
297 N_("cannot create scope list"));
300 /* Copy the array and the terminating NULL. */
301 memcpy (newp, imap->l_scope,
302 (cnt + 1) * sizeof (imap->l_scope[0]));
303 struct r_scope_elem **old = imap->l_scope;
305 imap->l_scope = newp;
307 if (old != imap->l_scope_mem)
308 _dl_scope_free (old);
310 imap->l_scope_max = new_size;
316 /* Second stage of resize_scopes: Add NEW to the scopes. Also print
317 debugging information about scopes if requested.
319 This function cannot raise an exception because all required memory
320 has been allocated by a previous call to resize_scopes. */
321 static void
322 update_scopes (struct link_map *new)
324 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
326 struct link_map *imap = new->l_searchlist.r_list[i];
327 int from_scope = 0;
329 if (imap->l_init_called && imap->l_type == lt_loaded)
331 if (scope_has_map (imap, new))
332 /* Avoid duplicates. */
333 continue;
335 size_t cnt = scope_size (imap);
336 /* Assert that resize_scopes has sufficiently enlarged the
337 array. */
338 assert (cnt + 1 < imap->l_scope_max);
340 /* First terminate the extended list. Otherwise a thread
341 might use the new last element and then use the garbage
342 at offset IDX+1. */
343 imap->l_scope[cnt + 1] = NULL;
344 atomic_write_barrier ();
345 imap->l_scope[cnt] = &new->l_searchlist;
347 from_scope = cnt;
350 /* Print scope information. */
351 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
352 _dl_show_scope (imap, from_scope);
356 /* Call _dl_add_to_slotinfo with DO_ADD set to false, to allocate
357 space in GL (dl_tls_dtv_slotinfo_list). This can raise an
358 exception. The return value is true if any of the new objects use
359 TLS. */
360 static bool
361 resize_tls_slotinfo (struct link_map *new)
363 bool any_tls = false;
364 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
366 struct link_map *imap = new->l_searchlist.r_list[i];
368 /* Only add TLS memory if this object is loaded now and
369 therefore is not yet initialized. */
370 if (! imap->l_init_called && imap->l_tls_blocksize > 0)
372 _dl_add_to_slotinfo (imap, false);
373 any_tls = true;
376 return any_tls;
379 /* Second stage of TLS update, after resize_tls_slotinfo. This
380 function does not raise any exception. It should only be called if
381 resize_tls_slotinfo returned true. */
382 static void
383 update_tls_slotinfo (struct link_map *new)
385 unsigned int first_static_tls = new->l_searchlist.r_nlist;
386 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
388 struct link_map *imap = new->l_searchlist.r_list[i];
390 /* Only add TLS memory if this object is loaded now and
391 therefore is not yet initialized. */
392 if (! imap->l_init_called && imap->l_tls_blocksize > 0)
394 _dl_add_to_slotinfo (imap, true);
396 if (imap->l_need_tls_init
397 && first_static_tls == new->l_searchlist.r_nlist)
398 first_static_tls = i;
402 size_t newgen = GL(dl_tls_generation) + 1;
403 if (__glibc_unlikely (newgen == 0))
404 _dl_fatal_printf (N_("\
405 TLS generation counter wrapped! Please report this."));
406 /* Can be read concurrently. */
407 atomic_store_relaxed (&GL(dl_tls_generation), newgen);
409 /* We need a second pass for static tls data, because
410 _dl_update_slotinfo must not be run while calls to
411 _dl_add_to_slotinfo are still pending. */
412 for (unsigned int i = first_static_tls; i < new->l_searchlist.r_nlist; ++i)
414 struct link_map *imap = new->l_searchlist.r_list[i];
416 if (imap->l_need_tls_init
417 && ! imap->l_init_called
418 && imap->l_tls_blocksize > 0)
420 /* For static TLS we have to allocate the memory here and
421 now, but we can delay updating the DTV. */
422 imap->l_need_tls_init = 0;
423 #ifdef SHARED
424 /* Update the slot information data for at least the
425 generation of the DSO we are allocating data for. */
427 /* FIXME: This can terminate the process on memory
428 allocation failure. It is not possible to raise
429 exceptions from this context; to fix this bug,
430 _dl_update_slotinfo would have to be split into two
431 operations, similar to resize_scopes and update_scopes
432 above. This is related to bug 16134. */
433 _dl_update_slotinfo (imap->l_tls_modid);
434 #endif
436 dl_init_static_tls (imap);
437 assert (imap->l_need_tls_init == 0);
442 /* Mark the objects as NODELETE if required. This is delayed until
443 after dlopen failure is not possible, so that _dl_close can clean
444 up objects if necessary. */
445 static void
446 activate_nodelete (struct link_map *new)
448 /* It is necessary to traverse the entire namespace. References to
449 objects in the global scope and unique symbol bindings can force
450 NODELETE status for objects outside the local scope. */
451 for (struct link_map *l = GL (dl_ns)[new->l_ns]._ns_loaded; l != NULL;
452 l = l->l_next)
453 if (l->l_nodelete_pending)
455 if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_FILES))
456 _dl_debug_printf ("activating NODELETE for %s [%lu]\n",
457 l->l_name, l->l_ns);
459 /* The flag can already be true at this point, e.g. a signal
460 handler may have triggered lazy binding and set NODELETE
461 status immediately. */
462 l->l_nodelete_active = true;
464 /* This is just a debugging aid, to indicate that
465 activate_nodelete has run for this map. */
466 l->l_nodelete_pending = false;
470 /* struct dl_init_args and call_dl_init are used to call _dl_init with
471 exception handling disabled. */
472 struct dl_init_args
474 struct link_map *new;
475 int argc;
476 char **argv;
477 char **env;
480 static void
481 call_dl_init (void *closure)
483 struct dl_init_args *args = closure;
484 _dl_init (args->new, args->argc, args->argv, args->env);
487 static void
488 dl_open_worker_begin (void *a)
490 struct dl_open_args *args = a;
491 const char *file = args->file;
492 int mode = args->mode;
493 struct link_map *call_map = NULL;
495 /* Determine the caller's map if necessary. This is needed in case
496 we have a DST, when we don't know the namespace ID we have to put
497 the new object in, or when the file name has no path in which
498 case we need to look along the RUNPATH/RPATH of the caller. */
499 const char *dst = strchr (file, '$');
500 if (dst != NULL || args->nsid == __LM_ID_CALLER
501 || strchr (file, '/') == NULL)
503 const void *caller_dlopen = args->caller_dlopen;
505 /* We have to find out from which object the caller is calling.
506 By default we assume this is the main application. */
507 call_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
509 struct link_map *l = _dl_find_dso_for_object ((ElfW(Addr)) caller_dlopen);
511 if (l)
512 call_map = l;
514 if (args->nsid == __LM_ID_CALLER)
515 args->nsid = call_map->l_ns;
518 /* The namespace ID is now known. Keep track of whether libc.so was
519 already loaded, to determine whether it is necessary to call the
520 early initialization routine (or clear libc_map on error). */
521 args->libc_already_loaded = GL(dl_ns)[args->nsid].libc_map != NULL;
523 /* Retain the old value, so that it can be restored. */
524 args->original_global_scope_pending_adds
525 = GL (dl_ns)[args->nsid]._ns_global_scope_pending_adds;
527 /* One might be tempted to assert that we are RT_CONSISTENT at this point, but that
528 may not be true if this is a recursive call to dlopen. */
529 _dl_debug_initialize (0, args->nsid);
531 /* Load the named object. */
532 struct link_map *new;
533 args->map = new = _dl_map_object (call_map, file, lt_loaded, 0,
534 mode | __RTLD_CALLMAP, args->nsid);
536 /* If the pointer returned is NULL this means the RTLD_NOLOAD flag is
537 set and the object is not already loaded. */
538 if (new == NULL)
540 assert (mode & RTLD_NOLOAD);
541 return;
544 if (__glibc_unlikely (mode & __RTLD_SPROF))
545 /* This happens only if we load a DSO for 'sprof'. */
546 return;
548 /* This object is directly loaded. */
549 ++new->l_direct_opencount;
551 /* It was already open. */
552 if (__glibc_unlikely (new->l_searchlist.r_list != NULL))
554 /* Let the user know about the opencount. */
555 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
556 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
557 new->l_name, new->l_ns, new->l_direct_opencount);
559 /* If the user requested the object to be in the global
560 namespace but it is not so far, prepare to add it now. This
561 can raise an exception to do a malloc failure. */
562 if ((mode & RTLD_GLOBAL) && new->l_global == 0)
563 add_to_global_resize (new);
565 /* Mark the object as not deletable if the RTLD_NODELETE flags
566 was passed. */
567 if (__glibc_unlikely (mode & RTLD_NODELETE))
569 if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_FILES)
570 && !new->l_nodelete_active)
571 _dl_debug_printf ("marking %s [%lu] as NODELETE\n",
572 new->l_name, new->l_ns);
573 new->l_nodelete_active = true;
576 /* Finalize the addition to the global scope. */
577 if ((mode & RTLD_GLOBAL) && new->l_global == 0)
578 add_to_global_update (new);
580 assert (_dl_debug_update (args->nsid)->r_state == RT_CONSISTENT);
582 return;
585 /* Schedule NODELETE marking for the directly loaded object if
586 requested. */
587 if (__glibc_unlikely (mode & RTLD_NODELETE))
588 new->l_nodelete_pending = true;
590 /* Load that object's dependencies. */
591 _dl_map_object_deps (new, NULL, 0, 0,
592 mode & (__RTLD_DLOPEN | RTLD_DEEPBIND | __RTLD_AUDIT));
594 /* So far, so good. Now check the versions. */
595 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
596 if (new->l_searchlist.r_list[i]->l_real->l_versions == NULL)
598 struct link_map *map = new->l_searchlist.r_list[i]->l_real;
599 _dl_check_map_versions (map, 0, 0);
600 #ifndef SHARED
601 /* During static dlopen, check if ld.so has been loaded.
602 Perform partial initialization in this case. This must
603 come after the symbol versioning initialization in
604 _dl_check_map_versions. */
605 if (map->l_info[DT_SONAME] != NULL
606 && strcmp (((const char *) D_PTR (map, l_info[DT_STRTAB])
607 + map->l_info[DT_SONAME]->d_un.d_val), LD_SO) == 0)
608 __rtld_static_init (map);
609 #endif
612 #ifdef SHARED
613 /* Auditing checkpoint: we have added all objects. */
614 if (__glibc_unlikely (GLRO(dl_naudit) > 0))
616 struct link_map *head = GL(dl_ns)[new->l_ns]._ns_loaded;
617 /* Do not call the functions for any auditing object. */
618 if (head->l_auditing == 0)
620 struct audit_ifaces *afct = GLRO(dl_audit);
621 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
623 if (afct->activity != NULL)
625 struct auditstate *state = link_map_audit_state (head, cnt);
626 afct->activity (&state->cookie, LA_ACT_CONSISTENT);
629 afct = afct->next;
633 #endif
635 /* Notify the debugger all new objects are now ready to go. */
636 struct r_debug *r = _dl_debug_update (args->nsid);
637 r->r_state = RT_CONSISTENT;
638 _dl_debug_state ();
639 LIBC_PROBE (map_complete, 3, args->nsid, r, new);
641 _dl_open_check (new);
643 /* Print scope information. */
644 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
645 _dl_show_scope (new, 0);
647 /* Only do lazy relocation if `LD_BIND_NOW' is not set. */
648 int reloc_mode = mode & __RTLD_AUDIT;
649 if (GLRO(dl_lazy))
650 reloc_mode |= mode & RTLD_LAZY;
652 /* Objects must be sorted by dependency for the relocation process.
653 This allows IFUNC relocations to work and it also means copy
654 relocation of dependencies are if necessary overwritten.
655 __dl_map_object_deps has already sorted l_initfini for us. */
656 unsigned int first = UINT_MAX;
657 unsigned int last = 0;
658 unsigned int j = 0;
659 struct link_map *l = new->l_initfini[0];
662 if (! l->l_real->l_relocated)
664 if (first == UINT_MAX)
665 first = j;
666 last = j + 1;
668 l = new->l_initfini[++j];
670 while (l != NULL);
672 int relocation_in_progress = 0;
674 /* Perform relocation. This can trigger lazy binding in IFUNC
675 resolvers. For NODELETE mappings, these dependencies are not
676 recorded because the flag has not been applied to the newly
677 loaded objects. This means that upon dlopen failure, these
678 NODELETE objects can be unloaded despite existing references to
679 them. However, such relocation dependencies in IFUNC resolvers
680 are undefined anyway, so this is not a problem. */
682 for (unsigned int i = last; i-- > first; )
684 l = new->l_initfini[i];
686 if (l->l_real->l_relocated)
687 continue;
689 if (! relocation_in_progress)
691 /* Notify the debugger that relocations are about to happen. */
692 LIBC_PROBE (reloc_start, 2, args->nsid, r);
693 relocation_in_progress = 1;
696 #ifdef SHARED
697 if (__glibc_unlikely (GLRO(dl_profile) != NULL))
699 /* If this here is the shared object which we want to profile
700 make sure the profile is started. We can find out whether
701 this is necessary or not by observing the `_dl_profile_map'
702 variable. If it was NULL but is not NULL afterwards we must
703 start the profiling. */
704 struct link_map *old_profile_map = GL(dl_profile_map);
706 _dl_relocate_object (l, l->l_scope, reloc_mode | RTLD_LAZY, 1);
708 if (old_profile_map == NULL && GL(dl_profile_map) != NULL)
710 /* We must prepare the profiling. */
711 _dl_start_profile ();
713 /* Prevent unloading the object. */
714 GL(dl_profile_map)->l_nodelete_active = true;
717 else
718 #endif
719 _dl_relocate_object (l, l->l_scope, reloc_mode, 0);
722 /* This only performs the memory allocations. The actual update of
723 the scopes happens below, after failure is impossible. */
724 resize_scopes (new);
726 /* Increase the size of the GL (dl_tls_dtv_slotinfo_list) data
727 structure. */
728 bool any_tls = resize_tls_slotinfo (new);
730 /* Perform the necessary allocations for adding new global objects
731 to the global scope below. */
732 if (mode & RTLD_GLOBAL)
733 add_to_global_resize (new);
735 /* Demarcation point: After this, no recoverable errors are allowed.
736 All memory allocations for new objects must have happened
737 before. */
739 /* Finalize the NODELETE status first. This comes before
740 update_scopes, so that lazy binding will not see pending NODELETE
741 state for newly loaded objects. There is a compiler barrier in
742 update_scopes which ensures that the changes from
743 activate_nodelete are visible before new objects show up in the
744 local scope. */
745 activate_nodelete (new);
747 /* Second stage after resize_scopes: Actually perform the scope
748 update. After this, dlsym and lazy binding can bind to new
749 objects. */
750 update_scopes (new);
752 /* FIXME: It is unclear whether the order here is correct.
753 Shouldn't new objects be made available for binding (and thus
754 execution) only after there TLS data has been set up fully?
755 Fixing bug 16134 will likely make this distinction less
756 important. */
758 /* Second stage after resize_tls_slotinfo: Update the slotinfo data
759 structures. */
760 if (any_tls)
761 /* FIXME: This calls _dl_update_slotinfo, which aborts the process
762 on memory allocation failure. See bug 16134. */
763 update_tls_slotinfo (new);
765 /* Notify the debugger all new objects have been relocated. */
766 if (relocation_in_progress)
767 LIBC_PROBE (reloc_complete, 3, args->nsid, r, new);
769 /* If libc.so was not there before, attempt to call its early
770 initialization routine. Indicate to the initialization routine
771 whether the libc being initialized is the one in the base
772 namespace. */
773 if (!args->libc_already_loaded)
775 /* dlopen cannot be used to load an initial libc by design. */
776 struct link_map *libc_map = GL(dl_ns)[args->nsid].libc_map;
777 _dl_call_libc_early_init (libc_map, false);
780 args->worker_continue = true;
783 static void
784 dl_open_worker (void *a)
786 struct dl_open_args *args = a;
788 args->worker_continue = false;
791 /* Protects global and module specific TLS state. */
792 __rtld_lock_lock_recursive (GL(dl_load_tls_lock));
794 struct dl_exception ex;
795 int err = _dl_catch_exception (&ex, dl_open_worker_begin, args);
797 __rtld_lock_unlock_recursive (GL(dl_load_tls_lock));
799 if (__glibc_unlikely (ex.errstring != NULL))
800 /* Reraise the error. */
801 _dl_signal_exception (err, &ex, NULL);
804 if (!args->worker_continue)
805 return;
807 int mode = args->mode;
808 struct link_map *new = args->map;
810 /* Run the initializer functions of new objects. Temporarily
811 disable the exception handler, so that lazy binding failures are
812 fatal. */
814 struct dl_init_args init_args =
816 .new = new,
817 .argc = args->argc,
818 .argv = args->argv,
819 .env = args->env
821 _dl_catch_exception (NULL, call_dl_init, &init_args);
824 /* Now we can make the new map available in the global scope. */
825 if (mode & RTLD_GLOBAL)
826 add_to_global_update (new);
828 /* Let the user know about the opencount. */
829 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
830 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
831 new->l_name, new->l_ns, new->l_direct_opencount);
834 void *
835 _dl_open (const char *file, int mode, const void *caller_dlopen, Lmid_t nsid,
836 int argc, char *argv[], char *env[])
838 if ((mode & RTLD_BINDING_MASK) == 0)
839 /* One of the flags must be set. */
840 _dl_signal_error (EINVAL, file, NULL, N_("invalid mode for dlopen()"));
842 /* Make sure we are alone. */
843 __rtld_lock_lock_recursive (GL(dl_load_lock));
845 if (__glibc_unlikely (nsid == LM_ID_NEWLM))
847 /* Find a new namespace. */
848 for (nsid = 1; DL_NNS > 1 && nsid < GL(dl_nns); ++nsid)
849 if (GL(dl_ns)[nsid]._ns_loaded == NULL)
850 break;
852 if (__glibc_unlikely (nsid == DL_NNS))
854 /* No more namespace available. */
855 __rtld_lock_unlock_recursive (GL(dl_load_lock));
857 _dl_signal_error (EINVAL, file, NULL, N_("\
858 no more namespaces available for dlmopen()"));
860 else if (nsid == GL(dl_nns))
862 __rtld_lock_initialize (GL(dl_ns)[nsid]._ns_unique_sym_table.lock);
863 ++GL(dl_nns);
866 _dl_debug_update (nsid)->r_state = RT_CONSISTENT;
868 /* Never allow loading a DSO in a namespace which is empty. Such
869 direct placements is only causing problems. Also don't allow
870 loading into a namespace used for auditing. */
871 else if (__glibc_unlikely (nsid != LM_ID_BASE && nsid != __LM_ID_CALLER)
872 && (__glibc_unlikely (nsid < 0 || nsid >= GL(dl_nns))
873 /* This prevents the [NSID] index expressions from being
874 evaluated, so the compiler won't think that we are
875 accessing an invalid index here in the !SHARED case where
876 DL_NNS is 1 and so any NSID != 0 is invalid. */
877 || DL_NNS == 1
878 || GL(dl_ns)[nsid]._ns_nloaded == 0
879 || GL(dl_ns)[nsid]._ns_loaded->l_auditing))
880 _dl_signal_error (EINVAL, file, NULL,
881 N_("invalid target namespace in dlmopen()"));
883 struct dl_open_args args;
884 args.file = file;
885 args.mode = mode;
886 args.caller_dlopen = caller_dlopen;
887 args.map = NULL;
888 args.nsid = nsid;
889 /* args.libc_already_loaded is always assigned by dl_open_worker
890 (before any explicit/non-local returns). */
891 args.argc = argc;
892 args.argv = argv;
893 args.env = env;
895 struct dl_exception exception;
896 int errcode = _dl_catch_exception (&exception, dl_open_worker, &args);
898 #if defined USE_LDCONFIG && !defined MAP_COPY
899 /* We must unmap the cache file. */
900 _dl_unload_cache ();
901 #endif
903 /* Do this for both the error and success cases. The old value has
904 only been determined if the namespace ID was assigned (i.e., it
905 is not __LM_ID_CALLER). In the success case, we actually may
906 have consumed more pending adds than planned (because the local
907 scopes overlap in case of a recursive dlopen, the inner dlopen
908 doing some of the globalization work of the outer dlopen), so the
909 old pending adds value is larger than absolutely necessary.
910 Since it is just a conservative upper bound, this is harmless.
911 The top-level dlopen call will restore the field to zero. */
912 if (args.nsid >= 0)
913 GL (dl_ns)[args.nsid]._ns_global_scope_pending_adds
914 = args.original_global_scope_pending_adds;
916 /* See if an error occurred during loading. */
917 if (__glibc_unlikely (exception.errstring != NULL))
919 /* Avoid keeping around a dangling reference to the libc.so link
920 map in case it has been cached in libc_map. */
921 if (!args.libc_already_loaded)
922 GL(dl_ns)[args.nsid].libc_map = NULL;
924 /* Remove the object from memory. It may be in an inconsistent
925 state if relocation failed, for example. */
926 if (args.map)
928 _dl_close_worker (args.map, true);
930 /* All l_nodelete_pending objects should have been deleted
931 at this point, which is why it is not necessary to reset
932 the flag here. */
935 assert (_dl_debug_update (args.nsid)->r_state == RT_CONSISTENT);
937 /* Release the lock. */
938 __rtld_lock_unlock_recursive (GL(dl_load_lock));
940 /* Reraise the error. */
941 _dl_signal_exception (errcode, &exception, NULL);
944 assert (_dl_debug_update (args.nsid)->r_state == RT_CONSISTENT);
946 /* Release the lock. */
947 __rtld_lock_unlock_recursive (GL(dl_load_lock));
949 return args.map;
953 void
954 _dl_show_scope (struct link_map *l, int from)
956 _dl_debug_printf ("object=%s [%lu]\n",
957 DSO_FILENAME (l->l_name), l->l_ns);
958 if (l->l_scope != NULL)
959 for (int scope_cnt = from; l->l_scope[scope_cnt] != NULL; ++scope_cnt)
961 _dl_debug_printf (" scope %u:", scope_cnt);
963 for (unsigned int cnt = 0; cnt < l->l_scope[scope_cnt]->r_nlist; ++cnt)
964 if (*l->l_scope[scope_cnt]->r_list[cnt]->l_name)
965 _dl_debug_printf_c (" %s",
966 l->l_scope[scope_cnt]->r_list[cnt]->l_name);
967 else
968 _dl_debug_printf_c (" %s", RTLD_PROGNAME);
970 _dl_debug_printf_c ("\n");
972 else
973 _dl_debug_printf (" no scope\n");
974 _dl_debug_printf ("\n");