1 /* Load a shared object at runtime, relocate it, and run its initializer.
2 Copyright (C) 1996-2020 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
27 #include <sys/mman.h> /* Check whether MAP_COPY is defined. */
28 #include <sys/param.h>
29 #include <libc-lock.h>
31 #include <sysdep-cancel.h>
33 #include <stap-probe.h>
35 #include <libc-internal.h>
36 #include <array_length.h>
37 #include <libc-early-init.h>
43 /* We must be careful not to leave us in an inconsistent state. Thus we
44 catch any error and re-raise it after cleaning up. */
50 /* This is the caller of the dlopen() function. */
51 const void *caller_dlopen
;
56 /* Original value of _ns_global_scope_pending_adds. Set by
57 dl_open_worker. Only valid if nsid is a real namespace
59 unsigned int original_global_scope_pending_adds
;
61 /* Set to true by dl_open_worker if libc.so was already loaded into
62 the namespace at the time dl_open_worker was called. This is
63 used to determine whether libc.so early initialization has
64 already been done before, and whether to roll back the cached
65 libc_map value in the namespace in case of a dlopen failure. */
66 bool libc_already_loaded
;
68 /* Original parameters to the program and the current environment. */
74 /* Called in case the global scope cannot be extended. */
75 static void __attribute__ ((noreturn
))
76 add_to_global_resize_failure (struct link_map
*new)
78 _dl_signal_error (ENOMEM
, new->l_libname
->name
, NULL
,
79 N_ ("cannot extend global scope"));
82 /* Grow the global scope array for the namespace, so that all the new
83 global objects can be added later in add_to_global_update, without
84 risk of memory allocation failure. add_to_global_resize raises
85 exceptions for memory allocation errors. */
87 add_to_global_resize (struct link_map
*new)
89 struct link_namespaces
*ns
= &GL (dl_ns
)[new->l_ns
];
91 /* Count the objects we have to put in the global scope. */
92 unsigned int to_add
= 0;
93 for (unsigned int cnt
= 0; cnt
< new->l_searchlist
.r_nlist
; ++cnt
)
94 if (new->l_searchlist
.r_list
[cnt
]->l_global
== 0)
97 /* The symbols of the new objects and its dependencies are to be
98 introduced into the global scope that will be used to resolve
99 references from other dynamically-loaded objects.
101 The global scope is the searchlist in the main link map. We
102 extend this list if necessary. There is one problem though:
103 since this structure was allocated very early (before the libc
104 is loaded) the memory it uses is allocated by the malloc()-stub
105 in the ld.so. When we come here these functions are not used
106 anymore. Instead the malloc() implementation of the libc is
107 used. But this means the block from the main map cannot be used
108 in an realloc() call. Therefore we allocate a completely new
109 array the first time we have to add something to the locale scope. */
111 if (__builtin_add_overflow (ns
->_ns_global_scope_pending_adds
, to_add
,
112 &ns
->_ns_global_scope_pending_adds
))
113 add_to_global_resize_failure (new);
115 unsigned int new_size
= 0; /* 0 means no new allocation. */
116 void *old_global
= NULL
; /* Old allocation if free-able. */
118 /* Minimum required element count for resizing. Adjusted below for
119 an exponential resizing policy. */
120 size_t required_new_size
;
121 if (__builtin_add_overflow (ns
->_ns_main_searchlist
->r_nlist
,
122 ns
->_ns_global_scope_pending_adds
,
124 add_to_global_resize_failure (new);
126 if (ns
->_ns_global_scope_alloc
== 0)
128 if (__builtin_add_overflow (required_new_size
, 8, &new_size
))
129 add_to_global_resize_failure (new);
131 else if (required_new_size
> ns
->_ns_global_scope_alloc
)
133 if (__builtin_mul_overflow (required_new_size
, 2, &new_size
))
134 add_to_global_resize_failure (new);
136 /* The old array was allocated with our malloc, not the minimal
138 old_global
= ns
->_ns_main_searchlist
->r_list
;
143 size_t allocation_size
;
144 if (__builtin_mul_overflow (new_size
, sizeof (struct link_map
*),
146 add_to_global_resize_failure (new);
147 struct link_map
**new_global
= malloc (allocation_size
);
148 if (new_global
== NULL
)
149 add_to_global_resize_failure (new);
151 /* Copy over the old entries. */
152 memcpy (new_global
, ns
->_ns_main_searchlist
->r_list
,
153 ns
->_ns_main_searchlist
->r_nlist
* sizeof (struct link_map
*));
155 ns
->_ns_global_scope_alloc
= new_size
;
156 ns
->_ns_main_searchlist
->r_list
= new_global
;
158 if (!RTLD_SINGLE_THREAD_P
)
159 THREAD_GSCOPE_WAIT ();
165 /* Actually add the new global objects to the global scope. Must be
166 called after add_to_global_resize. This function cannot fail. */
168 add_to_global_update (struct link_map
*new)
170 struct link_namespaces
*ns
= &GL (dl_ns
)[new->l_ns
];
172 /* Now add the new entries. */
173 unsigned int new_nlist
= ns
->_ns_main_searchlist
->r_nlist
;
174 for (unsigned int cnt
= 0; cnt
< new->l_searchlist
.r_nlist
; ++cnt
)
176 struct link_map
*map
= new->l_searchlist
.r_list
[cnt
];
178 if (map
->l_global
== 0)
182 /* The array has been resized by add_to_global_resize. */
183 assert (new_nlist
< ns
->_ns_global_scope_alloc
);
185 ns
->_ns_main_searchlist
->r_list
[new_nlist
++] = map
;
187 /* We modify the global scope. Report this. */
188 if (__glibc_unlikely (GLRO(dl_debug_mask
) & DL_DEBUG_SCOPES
))
189 _dl_debug_printf ("\nadd %s [%lu] to global scope\n",
190 map
->l_name
, map
->l_ns
);
194 /* Some of the pending adds have been performed by the loop above.
195 Adjust the counter accordingly. */
196 unsigned int added
= new_nlist
- ns
->_ns_main_searchlist
->r_nlist
;
197 assert (added
<= ns
->_ns_global_scope_pending_adds
);
198 ns
->_ns_global_scope_pending_adds
-= added
;
200 atomic_write_barrier ();
201 ns
->_ns_main_searchlist
->r_nlist
= new_nlist
;
204 /* Search link maps in all namespaces for the DSO that contains the object at
205 address ADDR. Returns the pointer to the link map of the matching DSO, or
206 NULL if a match is not found. */
208 _dl_find_dso_for_object (const ElfW(Addr
) addr
)
212 /* Find the highest-addressed object that ADDR is not below. */
213 for (Lmid_t ns
= 0; ns
< GL(dl_nns
); ++ns
)
214 for (l
= GL(dl_ns
)[ns
]._ns_loaded
; l
!= NULL
; l
= l
->l_next
)
215 if (addr
>= l
->l_map_start
&& addr
< l
->l_map_end
217 || _dl_addr_inside_object (l
, (ElfW(Addr
)) addr
)))
219 assert (ns
== l
->l_ns
);
224 rtld_hidden_def (_dl_find_dso_for_object
);
226 /* Return true if NEW is found in the scope for MAP. */
228 scope_has_map (struct link_map
*map
, struct link_map
*new)
231 for (cnt
= 0; map
->l_scope
[cnt
] != NULL
; ++cnt
)
232 if (map
->l_scope
[cnt
] == &new->l_searchlist
)
237 /* Return the length of the scope for MAP. */
239 scope_size (struct link_map
*map
)
242 for (cnt
= 0; map
->l_scope
[cnt
] != NULL
; )
247 /* Resize the scopes of depended-upon objects, so that the new object
248 can be added later without further allocation of memory. This
249 function can raise an exceptions due to malloc failure. */
251 resize_scopes (struct link_map
*new)
253 /* If the file is not loaded now as a dependency, add the search
254 list of the newly loaded object to the scope. */
255 for (unsigned int i
= 0; i
< new->l_searchlist
.r_nlist
; ++i
)
257 struct link_map
*imap
= new->l_searchlist
.r_list
[i
];
259 /* If the initializer has been called already, the object has
260 not been loaded here and now. */
261 if (imap
->l_init_called
&& imap
->l_type
== lt_loaded
)
263 if (scope_has_map (imap
, new))
264 /* Avoid duplicates. */
267 size_t cnt
= scope_size (imap
);
268 if (__glibc_unlikely (cnt
+ 1 >= imap
->l_scope_max
))
270 /* The l_scope array is too small. Allocate a new one
273 struct r_scope_elem
**newp
;
275 if (imap
->l_scope
!= imap
->l_scope_mem
276 && imap
->l_scope_max
< array_length (imap
->l_scope_mem
))
278 /* If the current l_scope memory is not pointing to
279 the static memory in the structure, but the
280 static memory in the structure is large enough to
281 use for cnt + 1 scope entries, then switch to
282 using the static memory. */
283 new_size
= array_length (imap
->l_scope_mem
);
284 newp
= imap
->l_scope_mem
;
288 new_size
= imap
->l_scope_max
* 2;
289 newp
= (struct r_scope_elem
**)
290 malloc (new_size
* sizeof (struct r_scope_elem
*));
292 _dl_signal_error (ENOMEM
, "dlopen", NULL
,
293 N_("cannot create scope list"));
296 /* Copy the array and the terminating NULL. */
297 memcpy (newp
, imap
->l_scope
,
298 (cnt
+ 1) * sizeof (imap
->l_scope
[0]));
299 struct r_scope_elem
**old
= imap
->l_scope
;
301 imap
->l_scope
= newp
;
303 if (old
!= imap
->l_scope_mem
)
304 _dl_scope_free (old
);
306 imap
->l_scope_max
= new_size
;
312 /* Second stage of resize_scopes: Add NEW to the scopes. Also print
313 debugging information about scopes if requested.
315 This function cannot raise an exception because all required memory
316 has been allocated by a previous call to resize_scopes. */
318 update_scopes (struct link_map
*new)
320 for (unsigned int i
= 0; i
< new->l_searchlist
.r_nlist
; ++i
)
322 struct link_map
*imap
= new->l_searchlist
.r_list
[i
];
325 if (imap
->l_init_called
&& imap
->l_type
== lt_loaded
)
327 if (scope_has_map (imap
, new))
328 /* Avoid duplicates. */
331 size_t cnt
= scope_size (imap
);
332 /* Assert that resize_scopes has sufficiently enlarged the
334 assert (cnt
+ 1 < imap
->l_scope_max
);
336 /* First terminate the extended list. Otherwise a thread
337 might use the new last element and then use the garbage
339 imap
->l_scope
[cnt
+ 1] = NULL
;
340 atomic_write_barrier ();
341 imap
->l_scope
[cnt
] = &new->l_searchlist
;
346 /* Print scope information. */
347 if (__glibc_unlikely (GLRO(dl_debug_mask
) & DL_DEBUG_SCOPES
))
348 _dl_show_scope (imap
, from_scope
);
352 /* Call _dl_add_to_slotinfo with DO_ADD set to false, to allocate
353 space in GL (dl_tls_dtv_slotinfo_list). This can raise an
354 exception. The return value is true if any of the new objects use
357 resize_tls_slotinfo (struct link_map
*new)
359 bool any_tls
= false;
360 for (unsigned int i
= 0; i
< new->l_searchlist
.r_nlist
; ++i
)
362 struct link_map
*imap
= new->l_searchlist
.r_list
[i
];
364 /* Only add TLS memory if this object is loaded now and
365 therefore is not yet initialized. */
366 if (! imap
->l_init_called
&& imap
->l_tls_blocksize
> 0)
368 _dl_add_to_slotinfo (imap
, false);
375 /* Second stage of TLS update, after resize_tls_slotinfo. This
376 function does not raise any exception. It should only be called if
377 resize_tls_slotinfo returned true. */
379 update_tls_slotinfo (struct link_map
*new)
381 unsigned int first_static_tls
= new->l_searchlist
.r_nlist
;
382 for (unsigned int i
= 0; i
< new->l_searchlist
.r_nlist
; ++i
)
384 struct link_map
*imap
= new->l_searchlist
.r_list
[i
];
386 /* Only add TLS memory if this object is loaded now and
387 therefore is not yet initialized. */
388 if (! imap
->l_init_called
&& imap
->l_tls_blocksize
> 0)
390 _dl_add_to_slotinfo (imap
, true);
392 if (imap
->l_need_tls_init
393 && first_static_tls
== new->l_searchlist
.r_nlist
)
394 first_static_tls
= i
;
398 if (__builtin_expect (++GL(dl_tls_generation
) == 0, 0))
399 _dl_fatal_printf (N_("\
400 TLS generation counter wrapped! Please report this."));
402 /* We need a second pass for static tls data, because
403 _dl_update_slotinfo must not be run while calls to
404 _dl_add_to_slotinfo are still pending. */
405 for (unsigned int i
= first_static_tls
; i
< new->l_searchlist
.r_nlist
; ++i
)
407 struct link_map
*imap
= new->l_searchlist
.r_list
[i
];
409 if (imap
->l_need_tls_init
410 && ! imap
->l_init_called
411 && imap
->l_tls_blocksize
> 0)
413 /* For static TLS we have to allocate the memory here and
414 now, but we can delay updating the DTV. */
415 imap
->l_need_tls_init
= 0;
417 /* Update the slot information data for at least the
418 generation of the DSO we are allocating data for. */
420 /* FIXME: This can terminate the process on memory
421 allocation failure. It is not possible to raise
422 exceptions from this context; to fix this bug,
423 _dl_update_slotinfo would have to be split into two
424 operations, similar to resize_scopes and update_scopes
425 above. This is related to bug 16134. */
426 _dl_update_slotinfo (imap
->l_tls_modid
);
429 GL(dl_init_static_tls
) (imap
);
430 assert (imap
->l_need_tls_init
== 0);
435 /* Mark the objects as NODELETE if required. This is delayed until
436 after dlopen failure is not possible, so that _dl_close can clean
437 up objects if necessary. */
439 activate_nodelete (struct link_map
*new)
441 /* It is necessary to traverse the entire namespace. References to
442 objects in the global scope and unique symbol bindings can force
443 NODELETE status for objects outside the local scope. */
444 for (struct link_map
*l
= GL (dl_ns
)[new->l_ns
]._ns_loaded
; l
!= NULL
;
446 if (l
->l_nodelete_pending
)
448 if (__glibc_unlikely (GLRO (dl_debug_mask
) & DL_DEBUG_FILES
))
449 _dl_debug_printf ("activating NODELETE for %s [%lu]\n",
452 /* The flag can already be true at this point, e.g. a signal
453 handler may have triggered lazy binding and set NODELETE
454 status immediately. */
455 l
->l_nodelete_active
= true;
457 /* This is just a debugging aid, to indicate that
458 activate_nodelete has run for this map. */
459 l
->l_nodelete_pending
= false;
463 /* struct dl_init_args and call_dl_init are used to call _dl_init with
464 exception handling disabled. */
467 struct link_map
*new;
474 call_dl_init (void *closure
)
476 struct dl_init_args
*args
= closure
;
477 _dl_init (args
->new, args
->argc
, args
->argv
, args
->env
);
481 dl_open_worker (void *a
)
483 struct dl_open_args
*args
= a
;
484 const char *file
= args
->file
;
485 int mode
= args
->mode
;
486 struct link_map
*call_map
= NULL
;
488 /* Determine the caller's map if necessary. This is needed in case
489 we have a DST, when we don't know the namespace ID we have to put
490 the new object in, or when the file name has no path in which
491 case we need to look along the RUNPATH/RPATH of the caller. */
492 const char *dst
= strchr (file
, '$');
493 if (dst
!= NULL
|| args
->nsid
== __LM_ID_CALLER
494 || strchr (file
, '/') == NULL
)
496 const void *caller_dlopen
= args
->caller_dlopen
;
498 /* We have to find out from which object the caller is calling.
499 By default we assume this is the main application. */
500 call_map
= GL(dl_ns
)[LM_ID_BASE
]._ns_loaded
;
502 struct link_map
*l
= _dl_find_dso_for_object ((ElfW(Addr
)) caller_dlopen
);
507 if (args
->nsid
== __LM_ID_CALLER
)
508 args
->nsid
= call_map
->l_ns
;
511 /* The namespace ID is now known. Keep track of whether libc.so was
512 already loaded, to determine whether it is necessary to call the
513 early initialization routine (or clear libc_map on error). */
514 args
->libc_already_loaded
= GL(dl_ns
)[args
->nsid
].libc_map
!= NULL
;
516 /* Retain the old value, so that it can be restored. */
517 args
->original_global_scope_pending_adds
518 = GL (dl_ns
)[args
->nsid
]._ns_global_scope_pending_adds
;
520 /* One might be tempted to assert that we are RT_CONSISTENT at this point, but that
521 may not be true if this is a recursive call to dlopen. */
522 _dl_debug_initialize (0, args
->nsid
);
524 /* Load the named object. */
525 struct link_map
*new;
526 args
->map
= new = _dl_map_object (call_map
, file
, lt_loaded
, 0,
527 mode
| __RTLD_CALLMAP
, args
->nsid
);
529 /* If the pointer returned is NULL this means the RTLD_NOLOAD flag is
530 set and the object is not already loaded. */
533 assert (mode
& RTLD_NOLOAD
);
537 if (__glibc_unlikely (mode
& __RTLD_SPROF
))
538 /* This happens only if we load a DSO for 'sprof'. */
541 /* This object is directly loaded. */
542 ++new->l_direct_opencount
;
544 /* It was already open. */
545 if (__glibc_unlikely (new->l_searchlist
.r_list
!= NULL
))
547 /* Let the user know about the opencount. */
548 if (__glibc_unlikely (GLRO(dl_debug_mask
) & DL_DEBUG_FILES
))
549 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
550 new->l_name
, new->l_ns
, new->l_direct_opencount
);
552 /* If the user requested the object to be in the global
553 namespace but it is not so far, prepare to add it now. This
554 can raise an exception to do a malloc failure. */
555 if ((mode
& RTLD_GLOBAL
) && new->l_global
== 0)
556 add_to_global_resize (new);
558 /* Mark the object as not deletable if the RTLD_NODELETE flags
560 if (__glibc_unlikely (mode
& RTLD_NODELETE
))
562 if (__glibc_unlikely (GLRO (dl_debug_mask
) & DL_DEBUG_FILES
)
563 && !new->l_nodelete_active
)
564 _dl_debug_printf ("marking %s [%lu] as NODELETE\n",
565 new->l_name
, new->l_ns
);
566 new->l_nodelete_active
= true;
569 /* Finalize the addition to the global scope. */
570 if ((mode
& RTLD_GLOBAL
) && new->l_global
== 0)
571 add_to_global_update (new);
573 assert (_dl_debug_initialize (0, args
->nsid
)->r_state
== RT_CONSISTENT
);
578 /* Schedule NODELETE marking for the directly loaded object if
580 if (__glibc_unlikely (mode
& RTLD_NODELETE
))
581 new->l_nodelete_pending
= true;
583 /* Load that object's dependencies. */
584 _dl_map_object_deps (new, NULL
, 0, 0,
585 mode
& (__RTLD_DLOPEN
| RTLD_DEEPBIND
| __RTLD_AUDIT
));
587 /* So far, so good. Now check the versions. */
588 for (unsigned int i
= 0; i
< new->l_searchlist
.r_nlist
; ++i
)
589 if (new->l_searchlist
.r_list
[i
]->l_real
->l_versions
== NULL
)
590 (void) _dl_check_map_versions (new->l_searchlist
.r_list
[i
]->l_real
,
594 /* Auditing checkpoint: we have added all objects. */
595 if (__glibc_unlikely (GLRO(dl_naudit
) > 0))
597 struct link_map
*head
= GL(dl_ns
)[new->l_ns
]._ns_loaded
;
598 /* Do not call the functions for any auditing object. */
599 if (head
->l_auditing
== 0)
601 struct audit_ifaces
*afct
= GLRO(dl_audit
);
602 for (unsigned int cnt
= 0; cnt
< GLRO(dl_naudit
); ++cnt
)
604 if (afct
->activity
!= NULL
)
606 struct auditstate
*state
= link_map_audit_state (head
, cnt
);
607 afct
->activity (&state
->cookie
, LA_ACT_CONSISTENT
);
616 /* Notify the debugger all new objects are now ready to go. */
617 struct r_debug
*r
= _dl_debug_initialize (0, args
->nsid
);
618 r
->r_state
= RT_CONSISTENT
;
620 LIBC_PROBE (map_complete
, 3, args
->nsid
, r
, new);
622 _dl_open_check (new);
624 /* Print scope information. */
625 if (__glibc_unlikely (GLRO(dl_debug_mask
) & DL_DEBUG_SCOPES
))
626 _dl_show_scope (new, 0);
628 /* Only do lazy relocation if `LD_BIND_NOW' is not set. */
629 int reloc_mode
= mode
& __RTLD_AUDIT
;
631 reloc_mode
|= mode
& RTLD_LAZY
;
633 /* Objects must be sorted by dependency for the relocation process.
634 This allows IFUNC relocations to work and it also means copy
635 relocation of dependencies are if necessary overwritten.
636 __dl_map_object_deps has already sorted l_initfini for us. */
637 unsigned int first
= UINT_MAX
;
638 unsigned int last
= 0;
640 struct link_map
*l
= new->l_initfini
[0];
643 if (! l
->l_real
->l_relocated
)
645 if (first
== UINT_MAX
)
649 l
= new->l_initfini
[++j
];
653 int relocation_in_progress
= 0;
655 /* Perform relocation. This can trigger lazy binding in IFUNC
656 resolvers. For NODELETE mappings, these dependencies are not
657 recorded because the flag has not been applied to the newly
658 loaded objects. This means that upon dlopen failure, these
659 NODELETE objects can be unloaded despite existing references to
660 them. However, such relocation dependencies in IFUNC resolvers
661 are undefined anyway, so this is not a problem. */
663 for (unsigned int i
= last
; i
-- > first
; )
665 l
= new->l_initfini
[i
];
667 if (l
->l_real
->l_relocated
)
670 if (! relocation_in_progress
)
672 /* Notify the debugger that relocations are about to happen. */
673 LIBC_PROBE (reloc_start
, 2, args
->nsid
, r
);
674 relocation_in_progress
= 1;
678 if (__glibc_unlikely (GLRO(dl_profile
) != NULL
))
680 /* If this here is the shared object which we want to profile
681 make sure the profile is started. We can find out whether
682 this is necessary or not by observing the `_dl_profile_map'
683 variable. If it was NULL but is not NULL afterwards we must
684 start the profiling. */
685 struct link_map
*old_profile_map
= GL(dl_profile_map
);
687 _dl_relocate_object (l
, l
->l_scope
, reloc_mode
| RTLD_LAZY
, 1);
689 if (old_profile_map
== NULL
&& GL(dl_profile_map
) != NULL
)
691 /* We must prepare the profiling. */
692 _dl_start_profile ();
694 /* Prevent unloading the object. */
695 GL(dl_profile_map
)->l_nodelete_active
= true;
700 _dl_relocate_object (l
, l
->l_scope
, reloc_mode
, 0);
703 /* This only performs the memory allocations. The actual update of
704 the scopes happens below, after failure is impossible. */
707 /* Increase the size of the GL (dl_tls_dtv_slotinfo_list) data
709 bool any_tls
= resize_tls_slotinfo (new);
711 /* Perform the necessary allocations for adding new global objects
712 to the global scope below. */
713 if (mode
& RTLD_GLOBAL
)
714 add_to_global_resize (new);
716 /* Demarcation point: After this, no recoverable errors are allowed.
717 All memory allocations for new objects must have happened
720 /* Finalize the NODELETE status first. This comes before
721 update_scopes, so that lazy binding will not see pending NODELETE
722 state for newly loaded objects. There is a compiler barrier in
723 update_scopes which ensures that the changes from
724 activate_nodelete are visible before new objects show up in the
726 activate_nodelete (new);
728 /* Second stage after resize_scopes: Actually perform the scope
729 update. After this, dlsym and lazy binding can bind to new
733 /* FIXME: It is unclear whether the order here is correct.
734 Shouldn't new objects be made available for binding (and thus
735 execution) only after there TLS data has been set up fully?
736 Fixing bug 16134 will likely make this distinction less
739 /* Second stage after resize_tls_slotinfo: Update the slotinfo data
742 /* FIXME: This calls _dl_update_slotinfo, which aborts the process
743 on memory allocation failure. See bug 16134. */
744 update_tls_slotinfo (new);
746 /* Notify the debugger all new objects have been relocated. */
747 if (relocation_in_progress
)
748 LIBC_PROBE (reloc_complete
, 3, args
->nsid
, r
, new);
750 /* If libc.so was not there before, attempt to call its early
751 initialization routine. Indicate to the initialization routine
752 whether the libc being initialized is the one in the base
754 if (!args
->libc_already_loaded
)
756 struct link_map
*libc_map
= GL(dl_ns
)[args
->nsid
].libc_map
;
758 bool initial
= libc_map
->l_ns
== LM_ID_BASE
;
760 /* In the static case, there is only one namespace, but it
761 contains a secondary libc (the primary libc is statically
763 bool initial
= false;
765 _dl_call_libc_early_init (libc_map
, initial
);
769 DL_STATIC_INIT (new);
772 /* Run the initializer functions of new objects. Temporarily
773 disable the exception handler, so that lazy binding failures are
776 struct dl_init_args init_args
=
783 _dl_catch_exception (NULL
, call_dl_init
, &init_args
);
786 /* Now we can make the new map available in the global scope. */
787 if (mode
& RTLD_GLOBAL
)
788 add_to_global_update (new);
791 /* We must be the static _dl_open in libc.a. A static program that
792 has loaded a dynamic object now has competition. */
793 __libc_multiple_libcs
= 1;
796 /* Let the user know about the opencount. */
797 if (__glibc_unlikely (GLRO(dl_debug_mask
) & DL_DEBUG_FILES
))
798 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
799 new->l_name
, new->l_ns
, new->l_direct_opencount
);
803 _dl_open (const char *file
, int mode
, const void *caller_dlopen
, Lmid_t nsid
,
804 int argc
, char *argv
[], char *env
[])
806 if ((mode
& RTLD_BINDING_MASK
) == 0)
807 /* One of the flags must be set. */
808 _dl_signal_error (EINVAL
, file
, NULL
, N_("invalid mode for dlopen()"));
810 /* Make sure we are alone. */
811 __rtld_lock_lock_recursive (GL(dl_load_lock
));
813 if (__glibc_unlikely (nsid
== LM_ID_NEWLM
))
815 /* Find a new namespace. */
816 for (nsid
= 1; DL_NNS
> 1 && nsid
< GL(dl_nns
); ++nsid
)
817 if (GL(dl_ns
)[nsid
]._ns_loaded
== NULL
)
820 if (__glibc_unlikely (nsid
== DL_NNS
))
822 /* No more namespace available. */
823 __rtld_lock_unlock_recursive (GL(dl_load_lock
));
825 _dl_signal_error (EINVAL
, file
, NULL
, N_("\
826 no more namespaces available for dlmopen()"));
828 else if (nsid
== GL(dl_nns
))
830 __rtld_lock_initialize (GL(dl_ns
)[nsid
]._ns_unique_sym_table
.lock
);
834 _dl_debug_initialize (0, nsid
)->r_state
= RT_CONSISTENT
;
836 /* Never allow loading a DSO in a namespace which is empty. Such
837 direct placements is only causing problems. Also don't allow
838 loading into a namespace used for auditing. */
839 else if (__glibc_unlikely (nsid
!= LM_ID_BASE
&& nsid
!= __LM_ID_CALLER
)
840 && (__glibc_unlikely (nsid
< 0 || nsid
>= GL(dl_nns
))
841 /* This prevents the [NSID] index expressions from being
842 evaluated, so the compiler won't think that we are
843 accessing an invalid index here in the !SHARED case where
844 DL_NNS is 1 and so any NSID != 0 is invalid. */
846 || GL(dl_ns
)[nsid
]._ns_nloaded
== 0
847 || GL(dl_ns
)[nsid
]._ns_loaded
->l_auditing
))
848 _dl_signal_error (EINVAL
, file
, NULL
,
849 N_("invalid target namespace in dlmopen()"));
851 struct dl_open_args args
;
854 args
.caller_dlopen
= caller_dlopen
;
857 /* args.libc_already_loaded is always assigned by dl_open_worker
858 (before any explicit/non-local returns). */
863 struct dl_exception exception
;
864 int errcode
= _dl_catch_exception (&exception
, dl_open_worker
, &args
);
866 #if defined USE_LDCONFIG && !defined MAP_COPY
867 /* We must unmap the cache file. */
871 /* Do this for both the error and success cases. The old value has
872 only been determined if the namespace ID was assigned (i.e., it
873 is not __LM_ID_CALLER). In the success case, we actually may
874 have consumed more pending adds than planned (because the local
875 scopes overlap in case of a recursive dlopen, the inner dlopen
876 doing some of the globalization work of the outer dlopen), so the
877 old pending adds value is larger than absolutely necessary.
878 Since it is just a conservative upper bound, this is harmless.
879 The top-level dlopen call will restore the field to zero. */
881 GL (dl_ns
)[args
.nsid
]._ns_global_scope_pending_adds
882 = args
.original_global_scope_pending_adds
;
884 /* See if an error occurred during loading. */
885 if (__glibc_unlikely (exception
.errstring
!= NULL
))
887 /* Avoid keeping around a dangling reference to the libc.so link
888 map in case it has been cached in libc_map. */
889 if (!args
.libc_already_loaded
)
890 GL(dl_ns
)[nsid
].libc_map
= NULL
;
892 /* Remove the object from memory. It may be in an inconsistent
893 state if relocation failed, for example. */
896 /* Maybe some of the modules which were loaded use TLS.
897 Since it will be removed in the following _dl_close call
898 we have to mark the dtv array as having gaps to fill the
899 holes. This is a pessimistic assumption which won't hurt
900 if not true. There is no need to do this when we are
901 loading the auditing DSOs since TLS has not yet been set
903 if ((mode
& __RTLD_AUDIT
) == 0)
904 GL(dl_tls_dtv_gaps
) = true;
906 _dl_close_worker (args
.map
, true);
908 /* All l_nodelete_pending objects should have been deleted
909 at this point, which is why it is not necessary to reset
913 assert (_dl_debug_initialize (0, args
.nsid
)->r_state
== RT_CONSISTENT
);
915 /* Release the lock. */
916 __rtld_lock_unlock_recursive (GL(dl_load_lock
));
918 /* Reraise the error. */
919 _dl_signal_exception (errcode
, &exception
, NULL
);
922 assert (_dl_debug_initialize (0, args
.nsid
)->r_state
== RT_CONSISTENT
);
924 /* Release the lock. */
925 __rtld_lock_unlock_recursive (GL(dl_load_lock
));
932 _dl_show_scope (struct link_map
*l
, int from
)
934 _dl_debug_printf ("object=%s [%lu]\n",
935 DSO_FILENAME (l
->l_name
), l
->l_ns
);
936 if (l
->l_scope
!= NULL
)
937 for (int scope_cnt
= from
; l
->l_scope
[scope_cnt
] != NULL
; ++scope_cnt
)
939 _dl_debug_printf (" scope %u:", scope_cnt
);
941 for (unsigned int cnt
= 0; cnt
< l
->l_scope
[scope_cnt
]->r_nlist
; ++cnt
)
942 if (*l
->l_scope
[scope_cnt
]->r_list
[cnt
]->l_name
)
943 _dl_debug_printf_c (" %s",
944 l
->l_scope
[scope_cnt
]->r_list
[cnt
]->l_name
);
946 _dl_debug_printf_c (" %s", RTLD_PROGNAME
);
948 _dl_debug_printf_c ("\n");
951 _dl_debug_printf (" no scope\n");
952 _dl_debug_printf ("\n");