Fix libnldbl_nonshared.a references to internal libm symbols (bug 23735).
[glibc.git] / elf / dl-open.c
blobf6c8ef1043b9a6cf90419db0f536445389764b7d
1 /* Load a shared object at runtime, relocate it, and run its initializer.
2 Copyright (C) 1996-2018 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <dlfcn.h>
21 #include <errno.h>
22 #include <libintl.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <unistd.h>
27 #include <sys/mman.h> /* Check whether MAP_COPY is defined. */
28 #include <sys/param.h>
29 #include <libc-lock.h>
30 #include <ldsodefs.h>
31 #include <sysdep-cancel.h>
32 #include <tls.h>
33 #include <stap-probe.h>
34 #include <atomic.h>
35 #include <libc-internal.h>
37 #include <dl-dst.h>
38 #include <dl-prop.h>
41 /* We must be careful not to leave us in an inconsistent state. Thus we
42 catch any error and re-raise it after cleaning up. */
44 struct dl_open_args
46 const char *file;
47 int mode;
48 /* This is the caller of the dlopen() function. */
49 const void *caller_dlopen;
50 struct link_map *map;
51 /* Namespace ID. */
52 Lmid_t nsid;
53 /* Original parameters to the program and the current environment. */
54 int argc;
55 char **argv;
56 char **env;
60 static int
61 add_to_global (struct link_map *new)
63 struct link_map **new_global;
64 unsigned int to_add = 0;
65 unsigned int cnt;
67 /* Count the objects we have to put in the global scope. */
68 for (cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
69 if (new->l_searchlist.r_list[cnt]->l_global == 0)
70 ++to_add;
72 /* The symbols of the new objects and its dependencies are to be
73 introduced into the global scope that will be used to resolve
74 references from other dynamically-loaded objects.
76 The global scope is the searchlist in the main link map. We
77 extend this list if necessary. There is one problem though:
78 since this structure was allocated very early (before the libc
79 is loaded) the memory it uses is allocated by the malloc()-stub
80 in the ld.so. When we come here these functions are not used
81 anymore. Instead the malloc() implementation of the libc is
82 used. But this means the block from the main map cannot be used
83 in an realloc() call. Therefore we allocate a completely new
84 array the first time we have to add something to the locale scope. */
86 struct link_namespaces *ns = &GL(dl_ns)[new->l_ns];
87 if (ns->_ns_global_scope_alloc == 0)
89 /* This is the first dynamic object given global scope. */
90 ns->_ns_global_scope_alloc
91 = ns->_ns_main_searchlist->r_nlist + to_add + 8;
92 new_global = (struct link_map **)
93 malloc (ns->_ns_global_scope_alloc * sizeof (struct link_map *));
94 if (new_global == NULL)
96 ns->_ns_global_scope_alloc = 0;
97 nomem:
98 _dl_signal_error (ENOMEM, new->l_libname->name, NULL,
99 N_("cannot extend global scope"));
100 return 1;
103 /* Copy over the old entries. */
104 ns->_ns_main_searchlist->r_list
105 = memcpy (new_global, ns->_ns_main_searchlist->r_list,
106 (ns->_ns_main_searchlist->r_nlist
107 * sizeof (struct link_map *)));
109 else if (ns->_ns_main_searchlist->r_nlist + to_add
110 > ns->_ns_global_scope_alloc)
112 /* We have to extend the existing array of link maps in the
113 main map. */
114 struct link_map **old_global
115 = GL(dl_ns)[new->l_ns]._ns_main_searchlist->r_list;
116 size_t new_nalloc = ((ns->_ns_global_scope_alloc + to_add) * 2);
118 new_global = (struct link_map **)
119 malloc (new_nalloc * sizeof (struct link_map *));
120 if (new_global == NULL)
121 goto nomem;
123 memcpy (new_global, old_global,
124 ns->_ns_global_scope_alloc * sizeof (struct link_map *));
126 ns->_ns_global_scope_alloc = new_nalloc;
127 ns->_ns_main_searchlist->r_list = new_global;
129 if (!RTLD_SINGLE_THREAD_P)
130 THREAD_GSCOPE_WAIT ();
132 free (old_global);
135 /* Now add the new entries. */
136 unsigned int new_nlist = ns->_ns_main_searchlist->r_nlist;
137 for (cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
139 struct link_map *map = new->l_searchlist.r_list[cnt];
141 if (map->l_global == 0)
143 map->l_global = 1;
144 ns->_ns_main_searchlist->r_list[new_nlist++] = map;
146 /* We modify the global scope. Report this. */
147 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
148 _dl_debug_printf ("\nadd %s [%lu] to global scope\n",
149 map->l_name, map->l_ns);
152 atomic_write_barrier ();
153 ns->_ns_main_searchlist->r_nlist = new_nlist;
155 return 0;
158 /* Search link maps in all namespaces for the DSO that contains the object at
159 address ADDR. Returns the pointer to the link map of the matching DSO, or
160 NULL if a match is not found. */
161 struct link_map *
162 _dl_find_dso_for_object (const ElfW(Addr) addr)
164 struct link_map *l;
166 /* Find the highest-addressed object that ADDR is not below. */
167 for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns)
168 for (l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next)
169 if (addr >= l->l_map_start && addr < l->l_map_end
170 && (l->l_contiguous
171 || _dl_addr_inside_object (l, (ElfW(Addr)) addr)))
173 assert (ns == l->l_ns);
174 return l;
176 return NULL;
178 rtld_hidden_def (_dl_find_dso_for_object);
180 static void
181 dl_open_worker (void *a)
183 struct dl_open_args *args = a;
184 const char *file = args->file;
185 int mode = args->mode;
186 struct link_map *call_map = NULL;
188 /* Determine the caller's map if necessary. This is needed in case
189 we have a DST, when we don't know the namespace ID we have to put
190 the new object in, or when the file name has no path in which
191 case we need to look along the RUNPATH/RPATH of the caller. */
192 const char *dst = strchr (file, '$');
193 if (dst != NULL || args->nsid == __LM_ID_CALLER
194 || strchr (file, '/') == NULL)
196 const void *caller_dlopen = args->caller_dlopen;
198 /* We have to find out from which object the caller is calling.
199 By default we assume this is the main application. */
200 call_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
202 struct link_map *l = _dl_find_dso_for_object ((ElfW(Addr)) caller_dlopen);
204 if (l)
205 call_map = l;
207 if (args->nsid == __LM_ID_CALLER)
208 args->nsid = call_map->l_ns;
211 /* One might be tempted to assert that we are RT_CONSISTENT at this point, but that
212 may not be true if this is a recursive call to dlopen. */
213 _dl_debug_initialize (0, args->nsid);
215 /* Load the named object. */
216 struct link_map *new;
217 args->map = new = _dl_map_object (call_map, file, lt_loaded, 0,
218 mode | __RTLD_CALLMAP, args->nsid);
220 /* If the pointer returned is NULL this means the RTLD_NOLOAD flag is
221 set and the object is not already loaded. */
222 if (new == NULL)
224 assert (mode & RTLD_NOLOAD);
225 return;
228 /* Mark the object as not deletable if the RTLD_NODELETE flags was passed.
229 Do this early so that we don't skip marking the object if it was
230 already loaded. */
231 if (__glibc_unlikely (mode & RTLD_NODELETE))
232 new->l_flags_1 |= DF_1_NODELETE;
234 if (__glibc_unlikely (mode & __RTLD_SPROF))
235 /* This happens only if we load a DSO for 'sprof'. */
236 return;
238 /* This object is directly loaded. */
239 ++new->l_direct_opencount;
241 /* It was already open. */
242 if (__glibc_unlikely (new->l_searchlist.r_list != NULL))
244 /* Let the user know about the opencount. */
245 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
246 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
247 new->l_name, new->l_ns, new->l_direct_opencount);
249 /* If the user requested the object to be in the global namespace
250 but it is not so far, add it now. */
251 if ((mode & RTLD_GLOBAL) && new->l_global == 0)
252 (void) add_to_global (new);
254 assert (_dl_debug_initialize (0, args->nsid)->r_state == RT_CONSISTENT);
256 return;
259 /* Load that object's dependencies. */
260 _dl_map_object_deps (new, NULL, 0, 0,
261 mode & (__RTLD_DLOPEN | RTLD_DEEPBIND | __RTLD_AUDIT));
263 /* So far, so good. Now check the versions. */
264 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
265 if (new->l_searchlist.r_list[i]->l_real->l_versions == NULL)
266 (void) _dl_check_map_versions (new->l_searchlist.r_list[i]->l_real,
267 0, 0);
269 #ifdef SHARED
270 /* Auditing checkpoint: we have added all objects. */
271 if (__glibc_unlikely (GLRO(dl_naudit) > 0))
273 struct link_map *head = GL(dl_ns)[new->l_ns]._ns_loaded;
274 /* Do not call the functions for any auditing object. */
275 if (head->l_auditing == 0)
277 struct audit_ifaces *afct = GLRO(dl_audit);
278 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
280 if (afct->activity != NULL)
281 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_CONSISTENT);
283 afct = afct->next;
287 #endif
289 /* Notify the debugger all new objects are now ready to go. */
290 struct r_debug *r = _dl_debug_initialize (0, args->nsid);
291 r->r_state = RT_CONSISTENT;
292 _dl_debug_state ();
293 LIBC_PROBE (map_complete, 3, args->nsid, r, new);
295 _dl_open_check (new);
297 /* Print scope information. */
298 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
299 _dl_show_scope (new, 0);
301 /* Only do lazy relocation if `LD_BIND_NOW' is not set. */
302 int reloc_mode = mode & __RTLD_AUDIT;
303 if (GLRO(dl_lazy))
304 reloc_mode |= mode & RTLD_LAZY;
306 /* Sort the objects by dependency for the relocation process. This
307 allows IFUNC relocations to work and it also means copy
308 relocation of dependencies are if necessary overwritten. */
309 unsigned int nmaps = 0;
310 struct link_map *l = new;
313 if (! l->l_real->l_relocated)
314 ++nmaps;
315 l = l->l_next;
317 while (l != NULL);
318 struct link_map *maps[nmaps];
319 nmaps = 0;
320 l = new;
323 if (! l->l_real->l_relocated)
324 maps[nmaps++] = l;
325 l = l->l_next;
327 while (l != NULL);
328 _dl_sort_maps (maps, nmaps, NULL, false);
330 int relocation_in_progress = 0;
332 for (unsigned int i = nmaps; i-- > 0; )
334 l = maps[i];
336 if (! relocation_in_progress)
338 /* Notify the debugger that relocations are about to happen. */
339 LIBC_PROBE (reloc_start, 2, args->nsid, r);
340 relocation_in_progress = 1;
343 #ifdef SHARED
344 if (__glibc_unlikely (GLRO(dl_profile) != NULL))
346 /* If this here is the shared object which we want to profile
347 make sure the profile is started. We can find out whether
348 this is necessary or not by observing the `_dl_profile_map'
349 variable. If it was NULL but is not NULL afterwards we must
350 start the profiling. */
351 struct link_map *old_profile_map = GL(dl_profile_map);
353 _dl_relocate_object (l, l->l_scope, reloc_mode | RTLD_LAZY, 1);
355 if (old_profile_map == NULL && GL(dl_profile_map) != NULL)
357 /* We must prepare the profiling. */
358 _dl_start_profile ();
360 /* Prevent unloading the object. */
361 GL(dl_profile_map)->l_flags_1 |= DF_1_NODELETE;
364 else
365 #endif
366 _dl_relocate_object (l, l->l_scope, reloc_mode, 0);
369 /* If the file is not loaded now as a dependency, add the search
370 list of the newly loaded object to the scope. */
371 bool any_tls = false;
372 unsigned int first_static_tls = new->l_searchlist.r_nlist;
373 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
375 struct link_map *imap = new->l_searchlist.r_list[i];
376 int from_scope = 0;
378 /* If the initializer has been called already, the object has
379 not been loaded here and now. */
380 if (imap->l_init_called && imap->l_type == lt_loaded)
382 struct r_scope_elem **runp = imap->l_scope;
383 size_t cnt = 0;
385 while (*runp != NULL)
387 if (*runp == &new->l_searchlist)
388 break;
389 ++cnt;
390 ++runp;
393 if (*runp != NULL)
394 /* Avoid duplicates. */
395 continue;
397 if (__glibc_unlikely (cnt + 1 >= imap->l_scope_max))
399 /* The 'r_scope' array is too small. Allocate a new one
400 dynamically. */
401 size_t new_size;
402 struct r_scope_elem **newp;
404 #define SCOPE_ELEMS(imap) \
405 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
407 if (imap->l_scope != imap->l_scope_mem
408 && imap->l_scope_max < SCOPE_ELEMS (imap))
410 new_size = SCOPE_ELEMS (imap);
411 newp = imap->l_scope_mem;
413 else
415 new_size = imap->l_scope_max * 2;
416 newp = (struct r_scope_elem **)
417 malloc (new_size * sizeof (struct r_scope_elem *));
418 if (newp == NULL)
419 _dl_signal_error (ENOMEM, "dlopen", NULL,
420 N_("cannot create scope list"));
423 memcpy (newp, imap->l_scope, cnt * sizeof (imap->l_scope[0]));
424 struct r_scope_elem **old = imap->l_scope;
426 imap->l_scope = newp;
428 if (old != imap->l_scope_mem)
429 _dl_scope_free (old);
431 imap->l_scope_max = new_size;
434 /* First terminate the extended list. Otherwise a thread
435 might use the new last element and then use the garbage
436 at offset IDX+1. */
437 imap->l_scope[cnt + 1] = NULL;
438 atomic_write_barrier ();
439 imap->l_scope[cnt] = &new->l_searchlist;
441 /* Print only new scope information. */
442 from_scope = cnt;
444 /* Only add TLS memory if this object is loaded now and
445 therefore is not yet initialized. */
446 else if (! imap->l_init_called
447 /* Only if the module defines thread local data. */
448 && __builtin_expect (imap->l_tls_blocksize > 0, 0))
450 /* Now that we know the object is loaded successfully add
451 modules containing TLS data to the slot info table. We
452 might have to increase its size. */
453 _dl_add_to_slotinfo (imap);
455 if (imap->l_need_tls_init
456 && first_static_tls == new->l_searchlist.r_nlist)
457 first_static_tls = i;
459 /* We have to bump the generation counter. */
460 any_tls = true;
463 /* Print scope information. */
464 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
465 _dl_show_scope (imap, from_scope);
468 /* Bump the generation number if necessary. */
469 if (any_tls && __builtin_expect (++GL(dl_tls_generation) == 0, 0))
470 _dl_fatal_printf (N_("\
471 TLS generation counter wrapped! Please report this."));
473 /* We need a second pass for static tls data, because _dl_update_slotinfo
474 must not be run while calls to _dl_add_to_slotinfo are still pending. */
475 for (unsigned int i = first_static_tls; i < new->l_searchlist.r_nlist; ++i)
477 struct link_map *imap = new->l_searchlist.r_list[i];
479 if (imap->l_need_tls_init
480 && ! imap->l_init_called
481 && imap->l_tls_blocksize > 0)
483 /* For static TLS we have to allocate the memory here and
484 now, but we can delay updating the DTV. */
485 imap->l_need_tls_init = 0;
486 #ifdef SHARED
487 /* Update the slot information data for at least the
488 generation of the DSO we are allocating data for. */
489 _dl_update_slotinfo (imap->l_tls_modid);
490 #endif
492 GL(dl_init_static_tls) (imap);
493 assert (imap->l_need_tls_init == 0);
497 /* Notify the debugger all new objects have been relocated. */
498 if (relocation_in_progress)
499 LIBC_PROBE (reloc_complete, 3, args->nsid, r, new);
501 #ifndef SHARED
502 DL_STATIC_INIT (new);
503 #endif
505 /* Run the initializer functions of new objects. */
506 _dl_init (new, args->argc, args->argv, args->env);
508 /* Now we can make the new map available in the global scope. */
509 if (mode & RTLD_GLOBAL)
510 /* Move the object in the global namespace. */
511 if (add_to_global (new) != 0)
512 /* It failed. */
513 return;
515 #ifndef SHARED
516 /* We must be the static _dl_open in libc.a. A static program that
517 has loaded a dynamic object now has competition. */
518 __libc_multiple_libcs = 1;
519 #endif
521 /* Let the user know about the opencount. */
522 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
523 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
524 new->l_name, new->l_ns, new->l_direct_opencount);
528 void *
529 _dl_open (const char *file, int mode, const void *caller_dlopen, Lmid_t nsid,
530 int argc, char *argv[], char *env[])
532 if ((mode & RTLD_BINDING_MASK) == 0)
533 /* One of the flags must be set. */
534 _dl_signal_error (EINVAL, file, NULL, N_("invalid mode for dlopen()"));
536 /* Make sure we are alone. */
537 __rtld_lock_lock_recursive (GL(dl_load_lock));
539 if (__glibc_unlikely (nsid == LM_ID_NEWLM))
541 /* Find a new namespace. */
542 for (nsid = 1; DL_NNS > 1 && nsid < GL(dl_nns); ++nsid)
543 if (GL(dl_ns)[nsid]._ns_loaded == NULL)
544 break;
546 if (__glibc_unlikely (nsid == DL_NNS))
548 /* No more namespace available. */
549 __rtld_lock_unlock_recursive (GL(dl_load_lock));
551 _dl_signal_error (EINVAL, file, NULL, N_("\
552 no more namespaces available for dlmopen()"));
554 else if (nsid == GL(dl_nns))
556 __rtld_lock_initialize (GL(dl_ns)[nsid]._ns_unique_sym_table.lock);
557 ++GL(dl_nns);
560 _dl_debug_initialize (0, nsid)->r_state = RT_CONSISTENT;
562 /* Never allow loading a DSO in a namespace which is empty. Such
563 direct placements is only causing problems. Also don't allow
564 loading into a namespace used for auditing. */
565 else if (__glibc_unlikely (nsid != LM_ID_BASE && nsid != __LM_ID_CALLER)
566 && (__glibc_unlikely (nsid < 0 || nsid >= GL(dl_nns))
567 /* This prevents the [NSID] index expressions from being
568 evaluated, so the compiler won't think that we are
569 accessing an invalid index here in the !SHARED case where
570 DL_NNS is 1 and so any NSID != 0 is invalid. */
571 || DL_NNS == 1
572 || GL(dl_ns)[nsid]._ns_nloaded == 0
573 || GL(dl_ns)[nsid]._ns_loaded->l_auditing))
574 _dl_signal_error (EINVAL, file, NULL,
575 N_("invalid target namespace in dlmopen()"));
577 struct dl_open_args args;
578 args.file = file;
579 args.mode = mode;
580 args.caller_dlopen = caller_dlopen;
581 args.map = NULL;
582 args.nsid = nsid;
583 args.argc = argc;
584 args.argv = argv;
585 args.env = env;
587 struct dl_exception exception;
588 int errcode = _dl_catch_exception (&exception, dl_open_worker, &args);
590 #if defined USE_LDCONFIG && !defined MAP_COPY
591 /* We must unmap the cache file. */
592 _dl_unload_cache ();
593 #endif
595 /* See if an error occurred during loading. */
596 if (__glibc_unlikely (exception.errstring != NULL))
598 /* Remove the object from memory. It may be in an inconsistent
599 state if relocation failed, for example. */
600 if (args.map)
602 /* Maybe some of the modules which were loaded use TLS.
603 Since it will be removed in the following _dl_close call
604 we have to mark the dtv array as having gaps to fill the
605 holes. This is a pessimistic assumption which won't hurt
606 if not true. There is no need to do this when we are
607 loading the auditing DSOs since TLS has not yet been set
608 up. */
609 if ((mode & __RTLD_AUDIT) == 0)
610 GL(dl_tls_dtv_gaps) = true;
612 _dl_close_worker (args.map, true);
615 assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT);
617 /* Release the lock. */
618 __rtld_lock_unlock_recursive (GL(dl_load_lock));
620 /* Reraise the error. */
621 _dl_signal_exception (errcode, &exception, NULL);
624 assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT);
626 /* Release the lock. */
627 __rtld_lock_unlock_recursive (GL(dl_load_lock));
629 return args.map;
633 void
634 _dl_show_scope (struct link_map *l, int from)
636 _dl_debug_printf ("object=%s [%lu]\n",
637 DSO_FILENAME (l->l_name), l->l_ns);
638 if (l->l_scope != NULL)
639 for (int scope_cnt = from; l->l_scope[scope_cnt] != NULL; ++scope_cnt)
641 _dl_debug_printf (" scope %u:", scope_cnt);
643 for (unsigned int cnt = 0; cnt < l->l_scope[scope_cnt]->r_nlist; ++cnt)
644 if (*l->l_scope[scope_cnt]->r_list[cnt]->l_name)
645 _dl_debug_printf_c (" %s",
646 l->l_scope[scope_cnt]->r_list[cnt]->l_name);
647 else
648 _dl_debug_printf_c (" %s", RTLD_PROGNAME);
650 _dl_debug_printf_c ("\n");
652 else
653 _dl_debug_printf (" no scope\n");
654 _dl_debug_printf ("\n");