Update kernel version to 5.13 in tst-mman-consts.py
[glibc.git] / elf / dl-open.c
bloba066f39bd09131f10bf2824b24091e54eb1d78a6
1 /* Load a shared object at runtime, relocate it, and run its initializer.
2 Copyright (C) 1996-2021 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <dlfcn.h>
21 #include <errno.h>
22 #include <libintl.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <unistd.h>
27 #include <sys/mman.h> /* Check whether MAP_COPY is defined. */
28 #include <sys/param.h>
29 #include <libc-lock.h>
30 #include <ldsodefs.h>
31 #include <sysdep-cancel.h>
32 #include <tls.h>
33 #include <stap-probe.h>
34 #include <atomic.h>
35 #include <libc-internal.h>
36 #include <array_length.h>
37 #include <libc-early-init.h>
38 #include <gnu/lib-names.h>
40 #include <dl-dst.h>
41 #include <dl-prop.h>
44 /* We must be careful not to leave us in an inconsistent state. Thus we
45 catch any error and re-raise it after cleaning up. */
47 struct dl_open_args
49 const char *file;
50 int mode;
51 /* This is the caller of the dlopen() function. */
52 const void *caller_dlopen;
53 struct link_map *map;
54 /* Namespace ID. */
55 Lmid_t nsid;
57 /* Original value of _ns_global_scope_pending_adds. Set by
58 dl_open_worker. Only valid if nsid is a real namespace
59 (non-negative). */
60 unsigned int original_global_scope_pending_adds;
62 /* Set to true by dl_open_worker if libc.so was already loaded into
63 the namespace at the time dl_open_worker was called. This is
64 used to determine whether libc.so early initialization has
65 already been done before, and whether to roll back the cached
66 libc_map value in the namespace in case of a dlopen failure. */
67 bool libc_already_loaded;
69 /* Original parameters to the program and the current environment. */
70 int argc;
71 char **argv;
72 char **env;
75 /* Called in case the global scope cannot be extended. */
76 static void __attribute__ ((noreturn))
77 add_to_global_resize_failure (struct link_map *new)
79 _dl_signal_error (ENOMEM, new->l_libname->name, NULL,
80 N_ ("cannot extend global scope"));
83 /* Grow the global scope array for the namespace, so that all the new
84 global objects can be added later in add_to_global_update, without
85 risk of memory allocation failure. add_to_global_resize raises
86 exceptions for memory allocation errors. */
87 static void
88 add_to_global_resize (struct link_map *new)
90 struct link_namespaces *ns = &GL (dl_ns)[new->l_ns];
92 /* Count the objects we have to put in the global scope. */
93 unsigned int to_add = 0;
94 for (unsigned int cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
95 if (new->l_searchlist.r_list[cnt]->l_global == 0)
96 ++to_add;
98 /* The symbols of the new objects and its dependencies are to be
99 introduced into the global scope that will be used to resolve
100 references from other dynamically-loaded objects.
102 The global scope is the searchlist in the main link map. We
103 extend this list if necessary. There is one problem though:
104 since this structure was allocated very early (before the libc
105 is loaded) the memory it uses is allocated by the malloc()-stub
106 in the ld.so. When we come here these functions are not used
107 anymore. Instead the malloc() implementation of the libc is
108 used. But this means the block from the main map cannot be used
109 in an realloc() call. Therefore we allocate a completely new
110 array the first time we have to add something to the locale scope. */
112 if (__builtin_add_overflow (ns->_ns_global_scope_pending_adds, to_add,
113 &ns->_ns_global_scope_pending_adds))
114 add_to_global_resize_failure (new);
116 unsigned int new_size = 0; /* 0 means no new allocation. */
117 void *old_global = NULL; /* Old allocation if free-able. */
119 /* Minimum required element count for resizing. Adjusted below for
120 an exponential resizing policy. */
121 size_t required_new_size;
122 if (__builtin_add_overflow (ns->_ns_main_searchlist->r_nlist,
123 ns->_ns_global_scope_pending_adds,
124 &required_new_size))
125 add_to_global_resize_failure (new);
127 if (ns->_ns_global_scope_alloc == 0)
129 if (__builtin_add_overflow (required_new_size, 8, &new_size))
130 add_to_global_resize_failure (new);
132 else if (required_new_size > ns->_ns_global_scope_alloc)
134 if (__builtin_mul_overflow (required_new_size, 2, &new_size))
135 add_to_global_resize_failure (new);
137 /* The old array was allocated with our malloc, not the minimal
138 malloc. */
139 old_global = ns->_ns_main_searchlist->r_list;
142 if (new_size > 0)
144 size_t allocation_size;
145 if (__builtin_mul_overflow (new_size, sizeof (struct link_map *),
146 &allocation_size))
147 add_to_global_resize_failure (new);
148 struct link_map **new_global = malloc (allocation_size);
149 if (new_global == NULL)
150 add_to_global_resize_failure (new);
152 /* Copy over the old entries. */
153 memcpy (new_global, ns->_ns_main_searchlist->r_list,
154 ns->_ns_main_searchlist->r_nlist * sizeof (struct link_map *));
156 ns->_ns_global_scope_alloc = new_size;
157 ns->_ns_main_searchlist->r_list = new_global;
159 if (!RTLD_SINGLE_THREAD_P)
160 THREAD_GSCOPE_WAIT ();
162 free (old_global);
166 /* Actually add the new global objects to the global scope. Must be
167 called after add_to_global_resize. This function cannot fail. */
168 static void
169 add_to_global_update (struct link_map *new)
171 struct link_namespaces *ns = &GL (dl_ns)[new->l_ns];
173 /* Now add the new entries. */
174 unsigned int new_nlist = ns->_ns_main_searchlist->r_nlist;
175 for (unsigned int cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
177 struct link_map *map = new->l_searchlist.r_list[cnt];
179 if (map->l_global == 0)
181 map->l_global = 1;
183 /* The array has been resized by add_to_global_resize. */
184 assert (new_nlist < ns->_ns_global_scope_alloc);
186 ns->_ns_main_searchlist->r_list[new_nlist++] = map;
188 /* We modify the global scope. Report this. */
189 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
190 _dl_debug_printf ("\nadd %s [%lu] to global scope\n",
191 map->l_name, map->l_ns);
195 /* Some of the pending adds have been performed by the loop above.
196 Adjust the counter accordingly. */
197 unsigned int added = new_nlist - ns->_ns_main_searchlist->r_nlist;
198 assert (added <= ns->_ns_global_scope_pending_adds);
199 ns->_ns_global_scope_pending_adds -= added;
201 atomic_write_barrier ();
202 ns->_ns_main_searchlist->r_nlist = new_nlist;
205 /* Search link maps in all namespaces for the DSO that contains the object at
206 address ADDR. Returns the pointer to the link map of the matching DSO, or
207 NULL if a match is not found. */
208 struct link_map *
209 _dl_find_dso_for_object (const ElfW(Addr) addr)
211 struct link_map *l;
213 /* Find the highest-addressed object that ADDR is not below. */
214 for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns)
215 for (l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next)
216 if (addr >= l->l_map_start && addr < l->l_map_end
217 && (l->l_contiguous
218 || _dl_addr_inside_object (l, (ElfW(Addr)) addr)))
220 assert (ns == l->l_ns);
221 return l;
223 return NULL;
225 rtld_hidden_def (_dl_find_dso_for_object);
227 /* Return true if NEW is found in the scope for MAP. */
228 static size_t
229 scope_has_map (struct link_map *map, struct link_map *new)
231 size_t cnt;
232 for (cnt = 0; map->l_scope[cnt] != NULL; ++cnt)
233 if (map->l_scope[cnt] == &new->l_searchlist)
234 return true;
235 return false;
238 /* Return the length of the scope for MAP. */
239 static size_t
240 scope_size (struct link_map *map)
242 size_t cnt;
243 for (cnt = 0; map->l_scope[cnt] != NULL; )
244 ++cnt;
245 return cnt;
248 /* Resize the scopes of depended-upon objects, so that the new object
249 can be added later without further allocation of memory. This
250 function can raise an exceptions due to malloc failure. */
251 static void
252 resize_scopes (struct link_map *new)
254 /* If the file is not loaded now as a dependency, add the search
255 list of the newly loaded object to the scope. */
256 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
258 struct link_map *imap = new->l_searchlist.r_list[i];
260 /* If the initializer has been called already, the object has
261 not been loaded here and now. */
262 if (imap->l_init_called && imap->l_type == lt_loaded)
264 if (scope_has_map (imap, new))
265 /* Avoid duplicates. */
266 continue;
268 size_t cnt = scope_size (imap);
269 if (__glibc_unlikely (cnt + 1 >= imap->l_scope_max))
271 /* The l_scope array is too small. Allocate a new one
272 dynamically. */
273 size_t new_size;
274 struct r_scope_elem **newp;
276 if (imap->l_scope != imap->l_scope_mem
277 && imap->l_scope_max < array_length (imap->l_scope_mem))
279 /* If the current l_scope memory is not pointing to
280 the static memory in the structure, but the
281 static memory in the structure is large enough to
282 use for cnt + 1 scope entries, then switch to
283 using the static memory. */
284 new_size = array_length (imap->l_scope_mem);
285 newp = imap->l_scope_mem;
287 else
289 new_size = imap->l_scope_max * 2;
290 newp = (struct r_scope_elem **)
291 malloc (new_size * sizeof (struct r_scope_elem *));
292 if (newp == NULL)
293 _dl_signal_error (ENOMEM, "dlopen", NULL,
294 N_("cannot create scope list"));
297 /* Copy the array and the terminating NULL. */
298 memcpy (newp, imap->l_scope,
299 (cnt + 1) * sizeof (imap->l_scope[0]));
300 struct r_scope_elem **old = imap->l_scope;
302 imap->l_scope = newp;
304 if (old != imap->l_scope_mem)
305 _dl_scope_free (old);
307 imap->l_scope_max = new_size;
313 /* Second stage of resize_scopes: Add NEW to the scopes. Also print
314 debugging information about scopes if requested.
316 This function cannot raise an exception because all required memory
317 has been allocated by a previous call to resize_scopes. */
318 static void
319 update_scopes (struct link_map *new)
321 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
323 struct link_map *imap = new->l_searchlist.r_list[i];
324 int from_scope = 0;
326 if (imap->l_init_called && imap->l_type == lt_loaded)
328 if (scope_has_map (imap, new))
329 /* Avoid duplicates. */
330 continue;
332 size_t cnt = scope_size (imap);
333 /* Assert that resize_scopes has sufficiently enlarged the
334 array. */
335 assert (cnt + 1 < imap->l_scope_max);
337 /* First terminate the extended list. Otherwise a thread
338 might use the new last element and then use the garbage
339 at offset IDX+1. */
340 imap->l_scope[cnt + 1] = NULL;
341 atomic_write_barrier ();
342 imap->l_scope[cnt] = &new->l_searchlist;
344 from_scope = cnt;
347 /* Print scope information. */
348 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
349 _dl_show_scope (imap, from_scope);
353 /* Call _dl_add_to_slotinfo with DO_ADD set to false, to allocate
354 space in GL (dl_tls_dtv_slotinfo_list). This can raise an
355 exception. The return value is true if any of the new objects use
356 TLS. */
357 static bool
358 resize_tls_slotinfo (struct link_map *new)
360 bool any_tls = false;
361 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
363 struct link_map *imap = new->l_searchlist.r_list[i];
365 /* Only add TLS memory if this object is loaded now and
366 therefore is not yet initialized. */
367 if (! imap->l_init_called && imap->l_tls_blocksize > 0)
369 _dl_add_to_slotinfo (imap, false);
370 any_tls = true;
373 return any_tls;
376 /* Second stage of TLS update, after resize_tls_slotinfo. This
377 function does not raise any exception. It should only be called if
378 resize_tls_slotinfo returned true. */
379 static void
380 update_tls_slotinfo (struct link_map *new)
382 unsigned int first_static_tls = new->l_searchlist.r_nlist;
383 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
385 struct link_map *imap = new->l_searchlist.r_list[i];
387 /* Only add TLS memory if this object is loaded now and
388 therefore is not yet initialized. */
389 if (! imap->l_init_called && imap->l_tls_blocksize > 0)
391 _dl_add_to_slotinfo (imap, true);
393 if (imap->l_need_tls_init
394 && first_static_tls == new->l_searchlist.r_nlist)
395 first_static_tls = i;
399 size_t newgen = GL(dl_tls_generation) + 1;
400 if (__glibc_unlikely (newgen == 0))
401 _dl_fatal_printf (N_("\
402 TLS generation counter wrapped! Please report this."));
403 /* Can be read concurrently. */
404 atomic_store_relaxed (&GL(dl_tls_generation), newgen);
406 /* We need a second pass for static tls data, because
407 _dl_update_slotinfo must not be run while calls to
408 _dl_add_to_slotinfo are still pending. */
409 for (unsigned int i = first_static_tls; i < new->l_searchlist.r_nlist; ++i)
411 struct link_map *imap = new->l_searchlist.r_list[i];
413 if (imap->l_need_tls_init
414 && ! imap->l_init_called
415 && imap->l_tls_blocksize > 0)
417 /* For static TLS we have to allocate the memory here and
418 now, but we can delay updating the DTV. */
419 imap->l_need_tls_init = 0;
420 #ifdef SHARED
421 /* Update the slot information data for at least the
422 generation of the DSO we are allocating data for. */
424 /* FIXME: This can terminate the process on memory
425 allocation failure. It is not possible to raise
426 exceptions from this context; to fix this bug,
427 _dl_update_slotinfo would have to be split into two
428 operations, similar to resize_scopes and update_scopes
429 above. This is related to bug 16134. */
430 _dl_update_slotinfo (imap->l_tls_modid);
431 #endif
433 dl_init_static_tls (imap);
434 assert (imap->l_need_tls_init == 0);
439 /* Mark the objects as NODELETE if required. This is delayed until
440 after dlopen failure is not possible, so that _dl_close can clean
441 up objects if necessary. */
442 static void
443 activate_nodelete (struct link_map *new)
445 /* It is necessary to traverse the entire namespace. References to
446 objects in the global scope and unique symbol bindings can force
447 NODELETE status for objects outside the local scope. */
448 for (struct link_map *l = GL (dl_ns)[new->l_ns]._ns_loaded; l != NULL;
449 l = l->l_next)
450 if (l->l_nodelete_pending)
452 if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_FILES))
453 _dl_debug_printf ("activating NODELETE for %s [%lu]\n",
454 l->l_name, l->l_ns);
456 /* The flag can already be true at this point, e.g. a signal
457 handler may have triggered lazy binding and set NODELETE
458 status immediately. */
459 l->l_nodelete_active = true;
461 /* This is just a debugging aid, to indicate that
462 activate_nodelete has run for this map. */
463 l->l_nodelete_pending = false;
467 /* struct dl_init_args and call_dl_init are used to call _dl_init with
468 exception handling disabled. */
469 struct dl_init_args
471 struct link_map *new;
472 int argc;
473 char **argv;
474 char **env;
477 static void
478 call_dl_init (void *closure)
480 struct dl_init_args *args = closure;
481 _dl_init (args->new, args->argc, args->argv, args->env);
484 static void
485 dl_open_worker (void *a)
487 struct dl_open_args *args = a;
488 const char *file = args->file;
489 int mode = args->mode;
490 struct link_map *call_map = NULL;
492 /* Determine the caller's map if necessary. This is needed in case
493 we have a DST, when we don't know the namespace ID we have to put
494 the new object in, or when the file name has no path in which
495 case we need to look along the RUNPATH/RPATH of the caller. */
496 const char *dst = strchr (file, '$');
497 if (dst != NULL || args->nsid == __LM_ID_CALLER
498 || strchr (file, '/') == NULL)
500 const void *caller_dlopen = args->caller_dlopen;
502 /* We have to find out from which object the caller is calling.
503 By default we assume this is the main application. */
504 call_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
506 struct link_map *l = _dl_find_dso_for_object ((ElfW(Addr)) caller_dlopen);
508 if (l)
509 call_map = l;
511 if (args->nsid == __LM_ID_CALLER)
512 args->nsid = call_map->l_ns;
515 /* The namespace ID is now known. Keep track of whether libc.so was
516 already loaded, to determine whether it is necessary to call the
517 early initialization routine (or clear libc_map on error). */
518 args->libc_already_loaded = GL(dl_ns)[args->nsid].libc_map != NULL;
520 /* Retain the old value, so that it can be restored. */
521 args->original_global_scope_pending_adds
522 = GL (dl_ns)[args->nsid]._ns_global_scope_pending_adds;
524 /* One might be tempted to assert that we are RT_CONSISTENT at this point, but that
525 may not be true if this is a recursive call to dlopen. */
526 _dl_debug_initialize (0, args->nsid);
528 /* Load the named object. */
529 struct link_map *new;
530 args->map = new = _dl_map_object (call_map, file, lt_loaded, 0,
531 mode | __RTLD_CALLMAP, args->nsid);
533 /* If the pointer returned is NULL this means the RTLD_NOLOAD flag is
534 set and the object is not already loaded. */
535 if (new == NULL)
537 assert (mode & RTLD_NOLOAD);
538 return;
541 if (__glibc_unlikely (mode & __RTLD_SPROF))
542 /* This happens only if we load a DSO for 'sprof'. */
543 return;
545 /* This object is directly loaded. */
546 ++new->l_direct_opencount;
548 /* It was already open. */
549 if (__glibc_unlikely (new->l_searchlist.r_list != NULL))
551 /* Let the user know about the opencount. */
552 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
553 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
554 new->l_name, new->l_ns, new->l_direct_opencount);
556 /* If the user requested the object to be in the global
557 namespace but it is not so far, prepare to add it now. This
558 can raise an exception to do a malloc failure. */
559 if ((mode & RTLD_GLOBAL) && new->l_global == 0)
560 add_to_global_resize (new);
562 /* Mark the object as not deletable if the RTLD_NODELETE flags
563 was passed. */
564 if (__glibc_unlikely (mode & RTLD_NODELETE))
566 if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_FILES)
567 && !new->l_nodelete_active)
568 _dl_debug_printf ("marking %s [%lu] as NODELETE\n",
569 new->l_name, new->l_ns);
570 new->l_nodelete_active = true;
573 /* Finalize the addition to the global scope. */
574 if ((mode & RTLD_GLOBAL) && new->l_global == 0)
575 add_to_global_update (new);
577 assert (_dl_debug_initialize (0, args->nsid)->r_state == RT_CONSISTENT);
579 return;
582 /* Schedule NODELETE marking for the directly loaded object if
583 requested. */
584 if (__glibc_unlikely (mode & RTLD_NODELETE))
585 new->l_nodelete_pending = true;
587 /* Load that object's dependencies. */
588 _dl_map_object_deps (new, NULL, 0, 0,
589 mode & (__RTLD_DLOPEN | RTLD_DEEPBIND | __RTLD_AUDIT));
591 /* So far, so good. Now check the versions. */
592 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
593 if (new->l_searchlist.r_list[i]->l_real->l_versions == NULL)
595 struct link_map *map = new->l_searchlist.r_list[i]->l_real;
596 _dl_check_map_versions (map, 0, 0);
597 #ifndef SHARED
598 /* During static dlopen, check if ld.so has been loaded.
599 Perform partial initialization in this case. This must
600 come after the symbol versioning initialization in
601 _dl_check_map_versions. */
602 if (map->l_info[DT_SONAME] != NULL
603 && strcmp (((const char *) D_PTR (map, l_info[DT_STRTAB])
604 + map->l_info[DT_SONAME]->d_un.d_val), LD_SO) == 0)
605 __rtld_static_init (map);
606 #endif
609 #ifdef SHARED
610 /* Auditing checkpoint: we have added all objects. */
611 if (__glibc_unlikely (GLRO(dl_naudit) > 0))
613 struct link_map *head = GL(dl_ns)[new->l_ns]._ns_loaded;
614 /* Do not call the functions for any auditing object. */
615 if (head->l_auditing == 0)
617 struct audit_ifaces *afct = GLRO(dl_audit);
618 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
620 if (afct->activity != NULL)
622 struct auditstate *state = link_map_audit_state (head, cnt);
623 afct->activity (&state->cookie, LA_ACT_CONSISTENT);
626 afct = afct->next;
630 #endif
632 /* Notify the debugger all new objects are now ready to go. */
633 struct r_debug *r = _dl_debug_initialize (0, args->nsid);
634 r->r_state = RT_CONSISTENT;
635 _dl_debug_state ();
636 LIBC_PROBE (map_complete, 3, args->nsid, r, new);
638 _dl_open_check (new);
640 /* Print scope information. */
641 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
642 _dl_show_scope (new, 0);
644 /* Only do lazy relocation if `LD_BIND_NOW' is not set. */
645 int reloc_mode = mode & __RTLD_AUDIT;
646 if (GLRO(dl_lazy))
647 reloc_mode |= mode & RTLD_LAZY;
649 /* Objects must be sorted by dependency for the relocation process.
650 This allows IFUNC relocations to work and it also means copy
651 relocation of dependencies are if necessary overwritten.
652 __dl_map_object_deps has already sorted l_initfini for us. */
653 unsigned int first = UINT_MAX;
654 unsigned int last = 0;
655 unsigned int j = 0;
656 struct link_map *l = new->l_initfini[0];
659 if (! l->l_real->l_relocated)
661 if (first == UINT_MAX)
662 first = j;
663 last = j + 1;
665 l = new->l_initfini[++j];
667 while (l != NULL);
669 int relocation_in_progress = 0;
671 /* Perform relocation. This can trigger lazy binding in IFUNC
672 resolvers. For NODELETE mappings, these dependencies are not
673 recorded because the flag has not been applied to the newly
674 loaded objects. This means that upon dlopen failure, these
675 NODELETE objects can be unloaded despite existing references to
676 them. However, such relocation dependencies in IFUNC resolvers
677 are undefined anyway, so this is not a problem. */
679 for (unsigned int i = last; i-- > first; )
681 l = new->l_initfini[i];
683 if (l->l_real->l_relocated)
684 continue;
686 if (! relocation_in_progress)
688 /* Notify the debugger that relocations are about to happen. */
689 LIBC_PROBE (reloc_start, 2, args->nsid, r);
690 relocation_in_progress = 1;
693 #ifdef SHARED
694 if (__glibc_unlikely (GLRO(dl_profile) != NULL))
696 /* If this here is the shared object which we want to profile
697 make sure the profile is started. We can find out whether
698 this is necessary or not by observing the `_dl_profile_map'
699 variable. If it was NULL but is not NULL afterwards we must
700 start the profiling. */
701 struct link_map *old_profile_map = GL(dl_profile_map);
703 _dl_relocate_object (l, l->l_scope, reloc_mode | RTLD_LAZY, 1);
705 if (old_profile_map == NULL && GL(dl_profile_map) != NULL)
707 /* We must prepare the profiling. */
708 _dl_start_profile ();
710 /* Prevent unloading the object. */
711 GL(dl_profile_map)->l_nodelete_active = true;
714 else
715 #endif
716 _dl_relocate_object (l, l->l_scope, reloc_mode, 0);
719 /* This only performs the memory allocations. The actual update of
720 the scopes happens below, after failure is impossible. */
721 resize_scopes (new);
723 /* Increase the size of the GL (dl_tls_dtv_slotinfo_list) data
724 structure. */
725 bool any_tls = resize_tls_slotinfo (new);
727 /* Perform the necessary allocations for adding new global objects
728 to the global scope below. */
729 if (mode & RTLD_GLOBAL)
730 add_to_global_resize (new);
732 /* Demarcation point: After this, no recoverable errors are allowed.
733 All memory allocations for new objects must have happened
734 before. */
736 /* Finalize the NODELETE status first. This comes before
737 update_scopes, so that lazy binding will not see pending NODELETE
738 state for newly loaded objects. There is a compiler barrier in
739 update_scopes which ensures that the changes from
740 activate_nodelete are visible before new objects show up in the
741 local scope. */
742 activate_nodelete (new);
744 /* Second stage after resize_scopes: Actually perform the scope
745 update. After this, dlsym and lazy binding can bind to new
746 objects. */
747 update_scopes (new);
749 /* FIXME: It is unclear whether the order here is correct.
750 Shouldn't new objects be made available for binding (and thus
751 execution) only after there TLS data has been set up fully?
752 Fixing bug 16134 will likely make this distinction less
753 important. */
755 /* Second stage after resize_tls_slotinfo: Update the slotinfo data
756 structures. */
757 if (any_tls)
758 /* FIXME: This calls _dl_update_slotinfo, which aborts the process
759 on memory allocation failure. See bug 16134. */
760 update_tls_slotinfo (new);
762 /* Notify the debugger all new objects have been relocated. */
763 if (relocation_in_progress)
764 LIBC_PROBE (reloc_complete, 3, args->nsid, r, new);
766 /* If libc.so was not there before, attempt to call its early
767 initialization routine. Indicate to the initialization routine
768 whether the libc being initialized is the one in the base
769 namespace. */
770 if (!args->libc_already_loaded)
772 struct link_map *libc_map = GL(dl_ns)[args->nsid].libc_map;
773 #ifdef SHARED
774 bool initial = libc_map->l_ns == LM_ID_BASE;
775 #else
776 /* In the static case, there is only one namespace, but it
777 contains a secondary libc (the primary libc is statically
778 linked). */
779 bool initial = false;
780 #endif
781 _dl_call_libc_early_init (libc_map, initial);
784 /* Run the initializer functions of new objects. Temporarily
785 disable the exception handler, so that lazy binding failures are
786 fatal. */
788 struct dl_init_args init_args =
790 .new = new,
791 .argc = args->argc,
792 .argv = args->argv,
793 .env = args->env
795 _dl_catch_exception (NULL, call_dl_init, &init_args);
798 /* Now we can make the new map available in the global scope. */
799 if (mode & RTLD_GLOBAL)
800 add_to_global_update (new);
802 /* Let the user know about the opencount. */
803 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
804 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
805 new->l_name, new->l_ns, new->l_direct_opencount);
808 void *
809 _dl_open (const char *file, int mode, const void *caller_dlopen, Lmid_t nsid,
810 int argc, char *argv[], char *env[])
812 if ((mode & RTLD_BINDING_MASK) == 0)
813 /* One of the flags must be set. */
814 _dl_signal_error (EINVAL, file, NULL, N_("invalid mode for dlopen()"));
816 /* Make sure we are alone. */
817 __rtld_lock_lock_recursive (GL(dl_load_lock));
819 if (__glibc_unlikely (nsid == LM_ID_NEWLM))
821 /* Find a new namespace. */
822 for (nsid = 1; DL_NNS > 1 && nsid < GL(dl_nns); ++nsid)
823 if (GL(dl_ns)[nsid]._ns_loaded == NULL)
824 break;
826 if (__glibc_unlikely (nsid == DL_NNS))
828 /* No more namespace available. */
829 __rtld_lock_unlock_recursive (GL(dl_load_lock));
831 _dl_signal_error (EINVAL, file, NULL, N_("\
832 no more namespaces available for dlmopen()"));
834 else if (nsid == GL(dl_nns))
836 __rtld_lock_initialize (GL(dl_ns)[nsid]._ns_unique_sym_table.lock);
837 ++GL(dl_nns);
840 _dl_debug_initialize (0, nsid)->r_state = RT_CONSISTENT;
842 /* Never allow loading a DSO in a namespace which is empty. Such
843 direct placements is only causing problems. Also don't allow
844 loading into a namespace used for auditing. */
845 else if (__glibc_unlikely (nsid != LM_ID_BASE && nsid != __LM_ID_CALLER)
846 && (__glibc_unlikely (nsid < 0 || nsid >= GL(dl_nns))
847 /* This prevents the [NSID] index expressions from being
848 evaluated, so the compiler won't think that we are
849 accessing an invalid index here in the !SHARED case where
850 DL_NNS is 1 and so any NSID != 0 is invalid. */
851 || DL_NNS == 1
852 || GL(dl_ns)[nsid]._ns_nloaded == 0
853 || GL(dl_ns)[nsid]._ns_loaded->l_auditing))
854 _dl_signal_error (EINVAL, file, NULL,
855 N_("invalid target namespace in dlmopen()"));
857 struct dl_open_args args;
858 args.file = file;
859 args.mode = mode;
860 args.caller_dlopen = caller_dlopen;
861 args.map = NULL;
862 args.nsid = nsid;
863 /* args.libc_already_loaded is always assigned by dl_open_worker
864 (before any explicit/non-local returns). */
865 args.argc = argc;
866 args.argv = argv;
867 args.env = env;
869 struct dl_exception exception;
870 int errcode = _dl_catch_exception (&exception, dl_open_worker, &args);
872 #if defined USE_LDCONFIG && !defined MAP_COPY
873 /* We must unmap the cache file. */
874 _dl_unload_cache ();
875 #endif
877 /* Do this for both the error and success cases. The old value has
878 only been determined if the namespace ID was assigned (i.e., it
879 is not __LM_ID_CALLER). In the success case, we actually may
880 have consumed more pending adds than planned (because the local
881 scopes overlap in case of a recursive dlopen, the inner dlopen
882 doing some of the globalization work of the outer dlopen), so the
883 old pending adds value is larger than absolutely necessary.
884 Since it is just a conservative upper bound, this is harmless.
885 The top-level dlopen call will restore the field to zero. */
886 if (args.nsid >= 0)
887 GL (dl_ns)[args.nsid]._ns_global_scope_pending_adds
888 = args.original_global_scope_pending_adds;
890 /* See if an error occurred during loading. */
891 if (__glibc_unlikely (exception.errstring != NULL))
893 /* Avoid keeping around a dangling reference to the libc.so link
894 map in case it has been cached in libc_map. */
895 if (!args.libc_already_loaded)
896 GL(dl_ns)[nsid].libc_map = NULL;
898 /* Remove the object from memory. It may be in an inconsistent
899 state if relocation failed, for example. */
900 if (args.map)
902 /* Maybe some of the modules which were loaded use TLS.
903 Since it will be removed in the following _dl_close call
904 we have to mark the dtv array as having gaps to fill the
905 holes. This is a pessimistic assumption which won't hurt
906 if not true. There is no need to do this when we are
907 loading the auditing DSOs since TLS has not yet been set
908 up. */
909 if ((mode & __RTLD_AUDIT) == 0)
910 GL(dl_tls_dtv_gaps) = true;
912 _dl_close_worker (args.map, true);
914 /* All l_nodelete_pending objects should have been deleted
915 at this point, which is why it is not necessary to reset
916 the flag here. */
919 assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT);
921 /* Release the lock. */
922 __rtld_lock_unlock_recursive (GL(dl_load_lock));
924 /* Reraise the error. */
925 _dl_signal_exception (errcode, &exception, NULL);
928 assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT);
930 /* Release the lock. */
931 __rtld_lock_unlock_recursive (GL(dl_load_lock));
933 return args.map;
937 void
938 _dl_show_scope (struct link_map *l, int from)
940 _dl_debug_printf ("object=%s [%lu]\n",
941 DSO_FILENAME (l->l_name), l->l_ns);
942 if (l->l_scope != NULL)
943 for (int scope_cnt = from; l->l_scope[scope_cnt] != NULL; ++scope_cnt)
945 _dl_debug_printf (" scope %u:", scope_cnt);
947 for (unsigned int cnt = 0; cnt < l->l_scope[scope_cnt]->r_nlist; ++cnt)
948 if (*l->l_scope[scope_cnt]->r_list[cnt]->l_name)
949 _dl_debug_printf_c (" %s",
950 l->l_scope[scope_cnt]->r_list[cnt]->l_name);
951 else
952 _dl_debug_printf_c (" %s", RTLD_PROGNAME);
954 _dl_debug_printf_c ("\n");
956 else
957 _dl_debug_printf (" no scope\n");
958 _dl_debug_printf ("\n");