math: Reverse include order in <math-type-macros-*.h>
[glibc.git] / elf / dl-open.c
blob9dde4acfbcc3b2e62ede0ba53cce825058dcfa0c
1 /* Load a shared object at runtime, relocate it, and run its initializer.
2 Copyright (C) 1996-2018 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <dlfcn.h>
21 #include <errno.h>
22 #include <libintl.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <unistd.h>
27 #include <sys/mman.h> /* Check whether MAP_COPY is defined. */
28 #include <sys/param.h>
29 #include <libc-lock.h>
30 #include <ldsodefs.h>
31 #include <sysdep-cancel.h>
32 #include <tls.h>
33 #include <stap-probe.h>
34 #include <atomic.h>
35 #include <libc-internal.h>
37 #include <dl-dst.h>
40 /* We must be careful not to leave us in an inconsistent state. Thus we
41 catch any error and re-raise it after cleaning up. */
43 struct dl_open_args
45 const char *file;
46 int mode;
47 /* This is the caller of the dlopen() function. */
48 const void *caller_dlopen;
49 struct link_map *map;
50 /* Namespace ID. */
51 Lmid_t nsid;
52 /* Original parameters to the program and the current environment. */
53 int argc;
54 char **argv;
55 char **env;
59 static int
60 add_to_global (struct link_map *new)
62 struct link_map **new_global;
63 unsigned int to_add = 0;
64 unsigned int cnt;
66 /* Count the objects we have to put in the global scope. */
67 for (cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
68 if (new->l_searchlist.r_list[cnt]->l_global == 0)
69 ++to_add;
71 /* The symbols of the new objects and its dependencies are to be
72 introduced into the global scope that will be used to resolve
73 references from other dynamically-loaded objects.
75 The global scope is the searchlist in the main link map. We
76 extend this list if necessary. There is one problem though:
77 since this structure was allocated very early (before the libc
78 is loaded) the memory it uses is allocated by the malloc()-stub
79 in the ld.so. When we come here these functions are not used
80 anymore. Instead the malloc() implementation of the libc is
81 used. But this means the block from the main map cannot be used
82 in an realloc() call. Therefore we allocate a completely new
83 array the first time we have to add something to the locale scope. */
85 struct link_namespaces *ns = &GL(dl_ns)[new->l_ns];
86 if (ns->_ns_global_scope_alloc == 0)
88 /* This is the first dynamic object given global scope. */
89 ns->_ns_global_scope_alloc
90 = ns->_ns_main_searchlist->r_nlist + to_add + 8;
91 new_global = (struct link_map **)
92 malloc (ns->_ns_global_scope_alloc * sizeof (struct link_map *));
93 if (new_global == NULL)
95 ns->_ns_global_scope_alloc = 0;
96 nomem:
97 _dl_signal_error (ENOMEM, new->l_libname->name, NULL,
98 N_("cannot extend global scope"));
99 return 1;
102 /* Copy over the old entries. */
103 ns->_ns_main_searchlist->r_list
104 = memcpy (new_global, ns->_ns_main_searchlist->r_list,
105 (ns->_ns_main_searchlist->r_nlist
106 * sizeof (struct link_map *)));
108 else if (ns->_ns_main_searchlist->r_nlist + to_add
109 > ns->_ns_global_scope_alloc)
111 /* We have to extend the existing array of link maps in the
112 main map. */
113 struct link_map **old_global
114 = GL(dl_ns)[new->l_ns]._ns_main_searchlist->r_list;
115 size_t new_nalloc = ((ns->_ns_global_scope_alloc + to_add) * 2);
117 new_global = (struct link_map **)
118 malloc (new_nalloc * sizeof (struct link_map *));
119 if (new_global == NULL)
120 goto nomem;
122 memcpy (new_global, old_global,
123 ns->_ns_global_scope_alloc * sizeof (struct link_map *));
125 ns->_ns_global_scope_alloc = new_nalloc;
126 ns->_ns_main_searchlist->r_list = new_global;
128 if (!RTLD_SINGLE_THREAD_P)
129 THREAD_GSCOPE_WAIT ();
131 free (old_global);
134 /* Now add the new entries. */
135 unsigned int new_nlist = ns->_ns_main_searchlist->r_nlist;
136 for (cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
138 struct link_map *map = new->l_searchlist.r_list[cnt];
140 if (map->l_global == 0)
142 map->l_global = 1;
143 ns->_ns_main_searchlist->r_list[new_nlist++] = map;
145 /* We modify the global scope. Report this. */
146 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
147 _dl_debug_printf ("\nadd %s [%lu] to global scope\n",
148 map->l_name, map->l_ns);
151 atomic_write_barrier ();
152 ns->_ns_main_searchlist->r_nlist = new_nlist;
154 return 0;
157 /* Search link maps in all namespaces for the DSO that contains the object at
158 address ADDR. Returns the pointer to the link map of the matching DSO, or
159 NULL if a match is not found. */
160 struct link_map *
161 _dl_find_dso_for_object (const ElfW(Addr) addr)
163 struct link_map *l;
165 /* Find the highest-addressed object that ADDR is not below. */
166 for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns)
167 for (l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next)
168 if (addr >= l->l_map_start && addr < l->l_map_end
169 && (l->l_contiguous
170 || _dl_addr_inside_object (l, (ElfW(Addr)) addr)))
172 assert (ns == l->l_ns);
173 return l;
175 return NULL;
177 rtld_hidden_def (_dl_find_dso_for_object);
179 static void
180 dl_open_worker (void *a)
182 struct dl_open_args *args = a;
183 const char *file = args->file;
184 int mode = args->mode;
185 struct link_map *call_map = NULL;
187 /* Determine the caller's map if necessary. This is needed in case
188 we have a DST, when we don't know the namespace ID we have to put
189 the new object in, or when the file name has no path in which
190 case we need to look along the RUNPATH/RPATH of the caller. */
191 const char *dst = strchr (file, '$');
192 if (dst != NULL || args->nsid == __LM_ID_CALLER
193 || strchr (file, '/') == NULL)
195 const void *caller_dlopen = args->caller_dlopen;
197 /* We have to find out from which object the caller is calling.
198 By default we assume this is the main application. */
199 call_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
201 struct link_map *l = _dl_find_dso_for_object ((ElfW(Addr)) caller_dlopen);
203 if (l)
204 call_map = l;
206 if (args->nsid == __LM_ID_CALLER)
207 args->nsid = call_map->l_ns;
210 /* One might be tempted to assert that we are RT_CONSISTENT at this point, but that
211 may not be true if this is a recursive call to dlopen. */
212 _dl_debug_initialize (0, args->nsid);
214 /* Load the named object. */
215 struct link_map *new;
216 args->map = new = _dl_map_object (call_map, file, lt_loaded, 0,
217 mode | __RTLD_CALLMAP, args->nsid);
219 /* If the pointer returned is NULL this means the RTLD_NOLOAD flag is
220 set and the object is not already loaded. */
221 if (new == NULL)
223 assert (mode & RTLD_NOLOAD);
224 return;
227 /* Mark the object as not deletable if the RTLD_NODELETE flags was passed.
228 Do this early so that we don't skip marking the object if it was
229 already loaded. */
230 if (__glibc_unlikely (mode & RTLD_NODELETE))
231 new->l_flags_1 |= DF_1_NODELETE;
233 if (__glibc_unlikely (mode & __RTLD_SPROF))
234 /* This happens only if we load a DSO for 'sprof'. */
235 return;
237 /* This object is directly loaded. */
238 ++new->l_direct_opencount;
240 /* It was already open. */
241 if (__glibc_unlikely (new->l_searchlist.r_list != NULL))
243 /* Let the user know about the opencount. */
244 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
245 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
246 new->l_name, new->l_ns, new->l_direct_opencount);
248 /* If the user requested the object to be in the global namespace
249 but it is not so far, add it now. */
250 if ((mode & RTLD_GLOBAL) && new->l_global == 0)
251 (void) add_to_global (new);
253 assert (_dl_debug_initialize (0, args->nsid)->r_state == RT_CONSISTENT);
255 return;
258 /* Load that object's dependencies. */
259 _dl_map_object_deps (new, NULL, 0, 0,
260 mode & (__RTLD_DLOPEN | RTLD_DEEPBIND | __RTLD_AUDIT));
262 /* So far, so good. Now check the versions. */
263 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
264 if (new->l_searchlist.r_list[i]->l_real->l_versions == NULL)
265 (void) _dl_check_map_versions (new->l_searchlist.r_list[i]->l_real,
266 0, 0);
268 #ifdef SHARED
269 /* Auditing checkpoint: we have added all objects. */
270 if (__glibc_unlikely (GLRO(dl_naudit) > 0))
272 struct link_map *head = GL(dl_ns)[new->l_ns]._ns_loaded;
273 /* Do not call the functions for any auditing object. */
274 if (head->l_auditing == 0)
276 struct audit_ifaces *afct = GLRO(dl_audit);
277 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
279 if (afct->activity != NULL)
280 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_CONSISTENT);
282 afct = afct->next;
286 #endif
288 /* Notify the debugger all new objects are now ready to go. */
289 struct r_debug *r = _dl_debug_initialize (0, args->nsid);
290 r->r_state = RT_CONSISTENT;
291 _dl_debug_state ();
292 LIBC_PROBE (map_complete, 3, args->nsid, r, new);
294 /* Print scope information. */
295 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
296 _dl_show_scope (new, 0);
298 /* Only do lazy relocation if `LD_BIND_NOW' is not set. */
299 int reloc_mode = mode & __RTLD_AUDIT;
300 if (GLRO(dl_lazy))
301 reloc_mode |= mode & RTLD_LAZY;
303 /* Sort the objects by dependency for the relocation process. This
304 allows IFUNC relocations to work and it also means copy
305 relocation of dependencies are if necessary overwritten. */
306 unsigned int nmaps = 0;
307 struct link_map *l = new;
310 if (! l->l_real->l_relocated)
311 ++nmaps;
312 l = l->l_next;
314 while (l != NULL);
315 struct link_map *maps[nmaps];
316 nmaps = 0;
317 l = new;
320 if (! l->l_real->l_relocated)
321 maps[nmaps++] = l;
322 l = l->l_next;
324 while (l != NULL);
325 _dl_sort_maps (maps, nmaps, NULL, false);
327 int relocation_in_progress = 0;
329 for (unsigned int i = nmaps; i-- > 0; )
331 l = maps[i];
333 if (! relocation_in_progress)
335 /* Notify the debugger that relocations are about to happen. */
336 LIBC_PROBE (reloc_start, 2, args->nsid, r);
337 relocation_in_progress = 1;
340 #ifdef SHARED
341 if (__glibc_unlikely (GLRO(dl_profile) != NULL))
343 /* If this here is the shared object which we want to profile
344 make sure the profile is started. We can find out whether
345 this is necessary or not by observing the `_dl_profile_map'
346 variable. If it was NULL but is not NULL afterwards we must
347 start the profiling. */
348 struct link_map *old_profile_map = GL(dl_profile_map);
350 _dl_relocate_object (l, l->l_scope, reloc_mode | RTLD_LAZY, 1);
352 if (old_profile_map == NULL && GL(dl_profile_map) != NULL)
354 /* We must prepare the profiling. */
355 _dl_start_profile ();
357 /* Prevent unloading the object. */
358 GL(dl_profile_map)->l_flags_1 |= DF_1_NODELETE;
361 else
362 #endif
363 _dl_relocate_object (l, l->l_scope, reloc_mode, 0);
366 /* If the file is not loaded now as a dependency, add the search
367 list of the newly loaded object to the scope. */
368 bool any_tls = false;
369 unsigned int first_static_tls = new->l_searchlist.r_nlist;
370 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
372 struct link_map *imap = new->l_searchlist.r_list[i];
373 int from_scope = 0;
375 /* If the initializer has been called already, the object has
376 not been loaded here and now. */
377 if (imap->l_init_called && imap->l_type == lt_loaded)
379 struct r_scope_elem **runp = imap->l_scope;
380 size_t cnt = 0;
382 while (*runp != NULL)
384 if (*runp == &new->l_searchlist)
385 break;
386 ++cnt;
387 ++runp;
390 if (*runp != NULL)
391 /* Avoid duplicates. */
392 continue;
394 if (__glibc_unlikely (cnt + 1 >= imap->l_scope_max))
396 /* The 'r_scope' array is too small. Allocate a new one
397 dynamically. */
398 size_t new_size;
399 struct r_scope_elem **newp;
401 #define SCOPE_ELEMS(imap) \
402 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
404 if (imap->l_scope != imap->l_scope_mem
405 && imap->l_scope_max < SCOPE_ELEMS (imap))
407 new_size = SCOPE_ELEMS (imap);
408 newp = imap->l_scope_mem;
410 else
412 new_size = imap->l_scope_max * 2;
413 newp = (struct r_scope_elem **)
414 malloc (new_size * sizeof (struct r_scope_elem *));
415 if (newp == NULL)
416 _dl_signal_error (ENOMEM, "dlopen", NULL,
417 N_("cannot create scope list"));
420 memcpy (newp, imap->l_scope, cnt * sizeof (imap->l_scope[0]));
421 struct r_scope_elem **old = imap->l_scope;
423 imap->l_scope = newp;
425 if (old != imap->l_scope_mem)
426 _dl_scope_free (old);
428 imap->l_scope_max = new_size;
431 /* First terminate the extended list. Otherwise a thread
432 might use the new last element and then use the garbage
433 at offset IDX+1. */
434 imap->l_scope[cnt + 1] = NULL;
435 atomic_write_barrier ();
436 imap->l_scope[cnt] = &new->l_searchlist;
438 /* Print only new scope information. */
439 from_scope = cnt;
441 /* Only add TLS memory if this object is loaded now and
442 therefore is not yet initialized. */
443 else if (! imap->l_init_called
444 /* Only if the module defines thread local data. */
445 && __builtin_expect (imap->l_tls_blocksize > 0, 0))
447 /* Now that we know the object is loaded successfully add
448 modules containing TLS data to the slot info table. We
449 might have to increase its size. */
450 _dl_add_to_slotinfo (imap);
452 if (imap->l_need_tls_init
453 && first_static_tls == new->l_searchlist.r_nlist)
454 first_static_tls = i;
456 /* We have to bump the generation counter. */
457 any_tls = true;
460 /* Print scope information. */
461 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
462 _dl_show_scope (imap, from_scope);
465 /* Bump the generation number if necessary. */
466 if (any_tls && __builtin_expect (++GL(dl_tls_generation) == 0, 0))
467 _dl_fatal_printf (N_("\
468 TLS generation counter wrapped! Please report this."));
470 /* We need a second pass for static tls data, because _dl_update_slotinfo
471 must not be run while calls to _dl_add_to_slotinfo are still pending. */
472 for (unsigned int i = first_static_tls; i < new->l_searchlist.r_nlist; ++i)
474 struct link_map *imap = new->l_searchlist.r_list[i];
476 if (imap->l_need_tls_init
477 && ! imap->l_init_called
478 && imap->l_tls_blocksize > 0)
480 /* For static TLS we have to allocate the memory here and
481 now, but we can delay updating the DTV. */
482 imap->l_need_tls_init = 0;
483 #ifdef SHARED
484 /* Update the slot information data for at least the
485 generation of the DSO we are allocating data for. */
486 _dl_update_slotinfo (imap->l_tls_modid);
487 #endif
489 GL(dl_init_static_tls) (imap);
490 assert (imap->l_need_tls_init == 0);
494 /* Notify the debugger all new objects have been relocated. */
495 if (relocation_in_progress)
496 LIBC_PROBE (reloc_complete, 3, args->nsid, r, new);
498 #ifndef SHARED
499 DL_STATIC_INIT (new);
500 #endif
502 /* Run the initializer functions of new objects. */
503 _dl_init (new, args->argc, args->argv, args->env);
505 /* Now we can make the new map available in the global scope. */
506 if (mode & RTLD_GLOBAL)
507 /* Move the object in the global namespace. */
508 if (add_to_global (new) != 0)
509 /* It failed. */
510 return;
512 #ifndef SHARED
513 /* We must be the static _dl_open in libc.a. A static program that
514 has loaded a dynamic object now has competition. */
515 __libc_multiple_libcs = 1;
516 #endif
518 /* Let the user know about the opencount. */
519 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
520 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
521 new->l_name, new->l_ns, new->l_direct_opencount);
525 void *
526 _dl_open (const char *file, int mode, const void *caller_dlopen, Lmid_t nsid,
527 int argc, char *argv[], char *env[])
529 if ((mode & RTLD_BINDING_MASK) == 0)
530 /* One of the flags must be set. */
531 _dl_signal_error (EINVAL, file, NULL, N_("invalid mode for dlopen()"));
533 /* Make sure we are alone. */
534 __rtld_lock_lock_recursive (GL(dl_load_lock));
536 if (__glibc_unlikely (nsid == LM_ID_NEWLM))
538 /* Find a new namespace. */
539 for (nsid = 1; DL_NNS > 1 && nsid < GL(dl_nns); ++nsid)
540 if (GL(dl_ns)[nsid]._ns_loaded == NULL)
541 break;
543 if (__glibc_unlikely (nsid == DL_NNS))
545 /* No more namespace available. */
546 __rtld_lock_unlock_recursive (GL(dl_load_lock));
548 _dl_signal_error (EINVAL, file, NULL, N_("\
549 no more namespaces available for dlmopen()"));
551 else if (nsid == GL(dl_nns))
553 __rtld_lock_initialize (GL(dl_ns)[nsid]._ns_unique_sym_table.lock);
554 ++GL(dl_nns);
557 _dl_debug_initialize (0, nsid)->r_state = RT_CONSISTENT;
559 /* Never allow loading a DSO in a namespace which is empty. Such
560 direct placements is only causing problems. Also don't allow
561 loading into a namespace used for auditing. */
562 else if (__glibc_unlikely (nsid != LM_ID_BASE && nsid != __LM_ID_CALLER)
563 && (__glibc_unlikely (nsid < 0 || nsid >= GL(dl_nns))
564 /* This prevents the [NSID] index expressions from being
565 evaluated, so the compiler won't think that we are
566 accessing an invalid index here in the !SHARED case where
567 DL_NNS is 1 and so any NSID != 0 is invalid. */
568 || DL_NNS == 1
569 || GL(dl_ns)[nsid]._ns_nloaded == 0
570 || GL(dl_ns)[nsid]._ns_loaded->l_auditing))
571 _dl_signal_error (EINVAL, file, NULL,
572 N_("invalid target namespace in dlmopen()"));
574 struct dl_open_args args;
575 args.file = file;
576 args.mode = mode;
577 args.caller_dlopen = caller_dlopen;
578 args.map = NULL;
579 args.nsid = nsid;
580 args.argc = argc;
581 args.argv = argv;
582 args.env = env;
584 struct dl_exception exception;
585 int errcode = _dl_catch_exception (&exception, dl_open_worker, &args);
587 #if defined USE_LDCONFIG && !defined MAP_COPY
588 /* We must unmap the cache file. */
589 _dl_unload_cache ();
590 #endif
592 /* See if an error occurred during loading. */
593 if (__glibc_unlikely (exception.errstring != NULL))
595 /* Remove the object from memory. It may be in an inconsistent
596 state if relocation failed, for example. */
597 if (args.map)
599 /* Maybe some of the modules which were loaded use TLS.
600 Since it will be removed in the following _dl_close call
601 we have to mark the dtv array as having gaps to fill the
602 holes. This is a pessimistic assumption which won't hurt
603 if not true. There is no need to do this when we are
604 loading the auditing DSOs since TLS has not yet been set
605 up. */
606 if ((mode & __RTLD_AUDIT) == 0)
607 GL(dl_tls_dtv_gaps) = true;
609 _dl_close_worker (args.map, true);
612 assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT);
614 /* Release the lock. */
615 __rtld_lock_unlock_recursive (GL(dl_load_lock));
617 /* Reraise the error. */
618 _dl_signal_exception (errcode, &exception, NULL);
621 assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT);
623 /* Release the lock. */
624 __rtld_lock_unlock_recursive (GL(dl_load_lock));
626 return args.map;
630 void
631 _dl_show_scope (struct link_map *l, int from)
633 _dl_debug_printf ("object=%s [%lu]\n",
634 DSO_FILENAME (l->l_name), l->l_ns);
635 if (l->l_scope != NULL)
636 for (int scope_cnt = from; l->l_scope[scope_cnt] != NULL; ++scope_cnt)
638 _dl_debug_printf (" scope %u:", scope_cnt);
640 for (unsigned int cnt = 0; cnt < l->l_scope[scope_cnt]->r_nlist; ++cnt)
641 if (*l->l_scope[scope_cnt]->r_list[cnt]->l_name)
642 _dl_debug_printf_c (" %s",
643 l->l_scope[scope_cnt]->r_list[cnt]->l_name);
644 else
645 _dl_debug_printf_c (" %s", RTLD_PROGNAME);
647 _dl_debug_printf_c ("\n");
649 else
650 _dl_debug_printf (" no scope\n");
651 _dl_debug_printf ("\n");