Fix unwind info in x86 memcmp-ssse3.
[glibc.git] / elf / dl-open.c
blob754a263fa16000a4bd926343a26b9691a1970998
1 /* Load a shared object at runtime, relocate it, and run its initializer.
2 Copyright (C) 1996-2007, 2009, 2010 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
20 #include <assert.h>
21 #include <dlfcn.h>
22 #include <errno.h>
23 #include <libintl.h>
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <sys/mman.h> /* Check whether MAP_COPY is defined. */
29 #include <sys/param.h>
30 #include <bits/libc-lock.h>
31 #include <ldsodefs.h>
32 #include <bp-sym.h>
33 #include <caller.h>
34 #include <sysdep-cancel.h>
35 #include <tls.h>
37 #include <dl-dst.h>
40 extern ElfW(Addr) _dl_sysdep_start (void **start_argptr,
41 void (*dl_main) (const ElfW(Phdr) *phdr,
42 ElfW(Word) phnum,
43 ElfW(Addr) *user_entry,
44 ElfW(auxv_t) *auxv));
45 weak_extern (BP_SYM (_dl_sysdep_start))
47 extern int __libc_multiple_libcs; /* Defined in init-first.c. */
49 /* Undefine the following for debugging. */
50 /* #define SCOPE_DEBUG 1 */
51 #ifdef SCOPE_DEBUG
52 static void show_scope (struct link_map *new);
53 #endif
55 /* We must be carefull not to leave us in an inconsistent state. Thus we
56 catch any error and re-raise it after cleaning up. */
58 struct dl_open_args
60 const char *file;
61 int mode;
62 /* This is the caller of the dlopen() function. */
63 const void *caller_dlopen;
64 /* This is the caller if _dl_open(). */
65 const void *caller_dl_open;
66 struct link_map *map;
67 /* Namespace ID. */
68 Lmid_t nsid;
69 /* Original parameters to the program and the current environment. */
70 int argc;
71 char **argv;
72 char **env;
76 static int
77 add_to_global (struct link_map *new)
79 struct link_map **new_global;
80 unsigned int to_add = 0;
81 unsigned int cnt;
83 /* Count the objects we have to put in the global scope. */
84 for (cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
85 if (new->l_searchlist.r_list[cnt]->l_global == 0)
86 ++to_add;
88 /* The symbols of the new objects and its dependencies are to be
89 introduced into the global scope that will be used to resolve
90 references from other dynamically-loaded objects.
92 The global scope is the searchlist in the main link map. We
93 extend this list if necessary. There is one problem though:
94 since this structure was allocated very early (before the libc
95 is loaded) the memory it uses is allocated by the malloc()-stub
96 in the ld.so. When we come here these functions are not used
97 anymore. Instead the malloc() implementation of the libc is
98 used. But this means the block from the main map cannot be used
99 in an realloc() call. Therefore we allocate a completely new
100 array the first time we have to add something to the locale scope. */
102 struct link_namespaces *ns = &GL(dl_ns)[new->l_ns];
103 if (ns->_ns_global_scope_alloc == 0)
105 /* This is the first dynamic object given global scope. */
106 ns->_ns_global_scope_alloc
107 = ns->_ns_main_searchlist->r_nlist + to_add + 8;
108 new_global = (struct link_map **)
109 malloc (ns->_ns_global_scope_alloc * sizeof (struct link_map *));
110 if (new_global == NULL)
112 ns->_ns_global_scope_alloc = 0;
113 nomem:
114 _dl_signal_error (ENOMEM, new->l_libname->name, NULL,
115 N_("cannot extend global scope"));
116 return 1;
119 /* Copy over the old entries. */
120 ns->_ns_main_searchlist->r_list
121 = memcpy (new_global, ns->_ns_main_searchlist->r_list,
122 (ns->_ns_main_searchlist->r_nlist
123 * sizeof (struct link_map *)));
125 else if (ns->_ns_main_searchlist->r_nlist + to_add
126 > ns->_ns_global_scope_alloc)
128 /* We have to extend the existing array of link maps in the
129 main map. */
130 struct link_map **old_global
131 = GL(dl_ns)[new->l_ns]._ns_main_searchlist->r_list;
132 size_t new_nalloc = ((ns->_ns_global_scope_alloc + to_add) * 2);
134 new_global = (struct link_map **)
135 malloc (new_nalloc * sizeof (struct link_map *));
136 if (new_global == NULL)
137 goto nomem;
139 memcpy (new_global, old_global,
140 ns->_ns_global_scope_alloc * sizeof (struct link_map *));
142 ns->_ns_global_scope_alloc = new_nalloc;
143 ns->_ns_main_searchlist->r_list = new_global;
145 if (!RTLD_SINGLE_THREAD_P)
146 THREAD_GSCOPE_WAIT ();
148 free (old_global);
151 /* Now add the new entries. */
152 unsigned int new_nlist = ns->_ns_main_searchlist->r_nlist;
153 for (cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
155 struct link_map *map = new->l_searchlist.r_list[cnt];
157 if (map->l_global == 0)
159 map->l_global = 1;
160 ns->_ns_main_searchlist->r_list[new_nlist++] = map;
163 atomic_write_barrier ();
164 ns->_ns_main_searchlist->r_nlist = new_nlist;
166 return 0;
169 static void
170 dl_open_worker (void *a)
172 struct dl_open_args *args = a;
173 const char *file = args->file;
174 int mode = args->mode;
175 struct link_map *call_map = NULL;
177 /* Check whether _dl_open() has been called from a valid DSO. */
178 if (__check_caller (args->caller_dl_open,
179 allow_libc|allow_libdl|allow_ldso) != 0)
180 _dl_signal_error (0, "dlopen", NULL, N_("invalid caller"));
182 /* Determine the caller's map if necessary. This is needed in case
183 we have a DST, when we don't know the namespace ID we have to put
184 the new object in, or when the file name has no path in which
185 case we need to look along the RUNPATH/RPATH of the caller. */
186 const char *dst = strchr (file, '$');
187 if (dst != NULL || args->nsid == __LM_ID_CALLER
188 || strchr (file, '/') == NULL)
190 const void *caller_dlopen = args->caller_dlopen;
192 /* We have to find out from which object the caller is calling.
193 By default we assume this is the main application. */
194 call_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
196 struct link_map *l;
197 for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns)
198 for (l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next)
199 if (caller_dlopen >= (const void *) l->l_map_start
200 && caller_dlopen < (const void *) l->l_map_end
201 && (l->l_contiguous
202 || _dl_addr_inside_object (l, (ElfW(Addr)) caller_dlopen)))
204 assert (ns == l->l_ns);
205 call_map = l;
206 goto found_caller;
209 found_caller:
210 if (args->nsid == __LM_ID_CALLER)
212 #ifndef SHARED
213 /* In statically linked apps there might be no loaded object. */
214 if (call_map == NULL)
215 args->nsid = LM_ID_BASE;
216 else
217 #endif
218 args->nsid = call_map->l_ns;
222 assert (_dl_debug_initialize (0, args->nsid)->r_state == RT_CONSISTENT);
224 /* Maybe we have to expand a DST. */
225 if (__builtin_expect (dst != NULL, 0))
227 size_t len = strlen (file);
229 /* Determine how much space we need. We have to allocate the
230 memory locally. */
231 size_t required = DL_DST_REQUIRED (call_map, file, len,
232 _dl_dst_count (dst, 0));
234 /* Get space for the new file name. */
235 char *new_file = (char *) alloca (required + 1);
237 /* Generate the new file name. */
238 _dl_dst_substitute (call_map, file, new_file, 0);
240 /* If the substitution failed don't try to load. */
241 if (*new_file == '\0')
242 _dl_signal_error (0, "dlopen", NULL,
243 N_("empty dynamic string token substitution"));
245 /* Now we have a new file name. */
246 file = new_file;
248 /* It does not matter whether call_map is set even if we
249 computed it only because of the DST. Since the path contains
250 a slash the value is not used. See dl-load.c. */
253 /* Load the named object. */
254 struct link_map *new;
255 args->map = new = _dl_map_object (call_map, file, 0, lt_loaded, 0,
256 mode | __RTLD_CALLMAP, args->nsid);
258 /* If the pointer returned is NULL this means the RTLD_NOLOAD flag is
259 set and the object is not already loaded. */
260 if (new == NULL)
262 assert (mode & RTLD_NOLOAD);
263 return;
266 if (__builtin_expect (mode & __RTLD_SPROF, 0))
267 /* This happens only if we load a DSO for 'sprof'. */
268 return;
270 /* This object is directly loaded. */
271 ++new->l_direct_opencount;
273 /* It was already open. */
274 if (__builtin_expect (new->l_searchlist.r_list != NULL, 0))
276 /* Let the user know about the opencount. */
277 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
278 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
279 new->l_name, new->l_ns, new->l_direct_opencount);
281 /* If the user requested the object to be in the global namespace
282 but it is not so far, add it now. */
283 if ((mode & RTLD_GLOBAL) && new->l_global == 0)
284 (void) add_to_global (new);
286 assert (_dl_debug_initialize (0, args->nsid)->r_state == RT_CONSISTENT);
288 return;
291 /* Load that object's dependencies. */
292 _dl_map_object_deps (new, NULL, 0, 0,
293 mode & (__RTLD_DLOPEN | RTLD_DEEPBIND | __RTLD_AUDIT));
295 /* So far, so good. Now check the versions. */
296 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
297 if (new->l_searchlist.r_list[i]->l_real->l_versions == NULL)
298 (void) _dl_check_map_versions (new->l_searchlist.r_list[i]->l_real,
299 0, 0);
301 #ifdef SCOPE_DEBUG
302 show_scope (new);
303 #endif
305 #ifdef SHARED
306 /* Auditing checkpoint: we have added all objects. */
307 if (__builtin_expect (GLRO(dl_naudit) > 0, 0))
309 struct link_map *head = GL(dl_ns)[new->l_ns]._ns_loaded;
310 /* Do not call the functions for any auditing object. */
311 if (head->l_auditing == 0)
313 struct audit_ifaces *afct = GLRO(dl_audit);
314 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
316 if (afct->activity != NULL)
317 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_CONSISTENT);
319 afct = afct->next;
323 #endif
325 /* Notify the debugger all new objects are now ready to go. */
326 struct r_debug *r = _dl_debug_initialize (0, args->nsid);
327 r->r_state = RT_CONSISTENT;
328 _dl_debug_state ();
330 /* Only do lazy relocation if `LD_BIND_NOW' is not set. */
331 int reloc_mode = mode & __RTLD_AUDIT;
332 if (GLRO(dl_lazy))
333 reloc_mode |= mode & RTLD_LAZY;
335 /* Relocate the objects loaded. We do this in reverse order so that copy
336 relocs of earlier objects overwrite the data written by later objects. */
338 struct link_map *l = new;
339 while (l->l_next)
340 l = l->l_next;
341 while (1)
343 if (! l->l_real->l_relocated)
345 #ifdef SHARED
346 if (__builtin_expect (GLRO(dl_profile) != NULL, 0))
348 /* If this here is the shared object which we want to profile
349 make sure the profile is started. We can find out whether
350 this is necessary or not by observing the `_dl_profile_map'
351 variable. If was NULL but is not NULL afterwars we must
352 start the profiling. */
353 struct link_map *old_profile_map = GL(dl_profile_map);
355 _dl_relocate_object (l, l->l_scope, reloc_mode | RTLD_LAZY, 1);
357 if (old_profile_map == NULL && GL(dl_profile_map) != NULL)
359 /* We must prepare the profiling. */
360 _dl_start_profile ();
362 /* Prevent unloading the object. */
363 GL(dl_profile_map)->l_flags_1 |= DF_1_NODELETE;
366 else
367 #endif
368 _dl_relocate_object (l, l->l_scope, reloc_mode, 0);
371 if (l == new)
372 break;
373 l = l->l_prev;
376 /* If the file is not loaded now as a dependency, add the search
377 list of the newly loaded object to the scope. */
378 bool any_tls = false;
379 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
381 struct link_map *imap = new->l_searchlist.r_list[i];
383 /* If the initializer has been called already, the object has
384 not been loaded here and now. */
385 if (imap->l_init_called && imap->l_type == lt_loaded)
387 struct r_scope_elem **runp = imap->l_scope;
388 size_t cnt = 0;
390 while (*runp != NULL)
392 if (*runp == &new->l_searchlist)
393 break;
394 ++cnt;
395 ++runp;
398 if (*runp != NULL)
399 /* Avoid duplicates. */
400 continue;
402 if (__builtin_expect (cnt + 1 >= imap->l_scope_max, 0))
404 /* The 'r_scope' array is too small. Allocate a new one
405 dynamically. */
406 size_t new_size;
407 struct r_scope_elem **newp;
409 #define SCOPE_ELEMS(imap) \
410 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
412 if (imap->l_scope != imap->l_scope_mem
413 && imap->l_scope_max < SCOPE_ELEMS (imap))
415 new_size = SCOPE_ELEMS (imap);
416 newp = imap->l_scope_mem;
418 else
420 new_size = imap->l_scope_max * 2;
421 newp = (struct r_scope_elem **)
422 malloc (new_size * sizeof (struct r_scope_elem *));
423 if (newp == NULL)
424 _dl_signal_error (ENOMEM, "dlopen", NULL,
425 N_("cannot create scope list"));
428 memcpy (newp, imap->l_scope, cnt * sizeof (imap->l_scope[0]));
429 struct r_scope_elem **old = imap->l_scope;
431 imap->l_scope = newp;
433 if (old != imap->l_scope_mem)
434 _dl_scope_free (old);
436 imap->l_scope_max = new_size;
439 /* First terminate the extended list. Otherwise a thread
440 might use the new last element and then use the garbage
441 at offset IDX+1. */
442 imap->l_scope[cnt + 1] = NULL;
443 atomic_write_barrier ();
444 imap->l_scope[cnt] = &new->l_searchlist;
446 /* Only add TLS memory if this object is loaded now and
447 therefore is not yet initialized. */
448 else if (! imap->l_init_called
449 /* Only if the module defines thread local data. */
450 && __builtin_expect (imap->l_tls_blocksize > 0, 0))
452 /* Now that we know the object is loaded successfully add
453 modules containing TLS data to the slot info table. We
454 might have to increase its size. */
455 _dl_add_to_slotinfo (imap);
457 if (imap->l_need_tls_init)
459 /* For static TLS we have to allocate the memory here
460 and now. This includes allocating memory in the DTV.
461 But we cannot change any DTV other than our own. So,
462 if we cannot guarantee that there is room in the DTV
463 we don't even try it and fail the load.
465 XXX We could track the minimum DTV slots allocated in
466 all threads. */
467 if (! RTLD_SINGLE_THREAD_P && imap->l_tls_modid > DTV_SURPLUS)
468 _dl_signal_error (0, "dlopen", NULL, N_("\
469 cannot load any more object with static TLS"));
471 imap->l_need_tls_init = 0;
472 #ifdef SHARED
473 /* Update the slot information data for at least the
474 generation of the DSO we are allocating data for. */
475 _dl_update_slotinfo (imap->l_tls_modid);
476 #endif
478 GL(dl_init_static_tls) (imap);
479 assert (imap->l_need_tls_init == 0);
482 /* We have to bump the generation counter. */
483 any_tls = true;
487 /* Bump the generation number if necessary. */
488 if (any_tls && __builtin_expect (++GL(dl_tls_generation) == 0, 0))
489 _dl_fatal_printf (N_("\
490 TLS generation counter wrapped! Please report this."));
492 /* Run the initializer functions of new objects. */
493 _dl_init (new, args->argc, args->argv, args->env);
495 /* Now we can make the new map available in the global scope. */
496 if (mode & RTLD_GLOBAL)
497 /* Move the object in the global namespace. */
498 if (add_to_global (new) != 0)
499 /* It failed. */
500 return;
502 /* Mark the object as not deletable if the RTLD_NODELETE flags was
503 passed. */
504 if (__builtin_expect (mode & RTLD_NODELETE, 0))
505 new->l_flags_1 |= DF_1_NODELETE;
507 #ifndef SHARED
508 /* We must be the static _dl_open in libc.a. A static program that
509 has loaded a dynamic object now has competition. */
510 __libc_multiple_libcs = 1;
511 #endif
513 /* Let the user know about the opencount. */
514 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
515 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
516 new->l_name, new->l_ns, new->l_direct_opencount);
520 void *
521 _dl_open (const char *file, int mode, const void *caller_dlopen, Lmid_t nsid,
522 int argc, char *argv[], char *env[])
524 if ((mode & RTLD_BINDING_MASK) == 0)
525 /* One of the flags must be set. */
526 _dl_signal_error (EINVAL, file, NULL, N_("invalid mode for dlopen()"));
528 /* Make sure we are alone. */
529 __rtld_lock_lock_recursive (GL(dl_load_lock));
531 if (__builtin_expect (nsid == LM_ID_NEWLM, 0))
533 /* Find a new namespace. */
534 for (nsid = 1; nsid < GL(dl_nns); ++nsid)
535 if (GL(dl_ns)[nsid]._ns_loaded == NULL)
536 break;
538 if (__builtin_expect (nsid == DL_NNS, 0))
540 /* No more namespace available. */
541 __rtld_lock_unlock_recursive (GL(dl_load_lock));
543 _dl_signal_error (EINVAL, file, NULL, N_("\
544 no more namespaces available for dlmopen()"));
547 if (nsid == GL(dl_nns))
549 __rtld_lock_initialize (GL(dl_ns)[nsid]._ns_unique_sym_table.lock);
550 ++GL(dl_nns);
553 _dl_debug_initialize (0, nsid)->r_state = RT_CONSISTENT;
555 /* Never allow loading a DSO in a namespace which is empty. Such
556 direct placements is only causing problems. Also don't allow
557 loading into a namespace used for auditing. */
558 else if (__builtin_expect (nsid != LM_ID_BASE && nsid != __LM_ID_CALLER, 0)
559 && (GL(dl_ns)[nsid]._ns_nloaded == 0
560 || GL(dl_ns)[nsid]._ns_loaded->l_auditing))
561 _dl_signal_error (EINVAL, file, NULL,
562 N_("invalid target namespace in dlmopen()"));
563 #ifndef SHARED
564 else if ((nsid == LM_ID_BASE || nsid == __LM_ID_CALLER)
565 && GL(dl_ns)[LM_ID_BASE]._ns_loaded == NULL
566 && GL(dl_nns) == 0)
567 GL(dl_nns) = 1;
568 #endif
570 struct dl_open_args args;
571 args.file = file;
572 args.mode = mode;
573 args.caller_dlopen = caller_dlopen;
574 args.caller_dl_open = RETURN_ADDRESS (0);
575 args.map = NULL;
576 args.nsid = nsid;
577 args.argc = argc;
578 args.argv = argv;
579 args.env = env;
581 const char *objname;
582 const char *errstring;
583 bool malloced;
584 int errcode = _dl_catch_error (&objname, &errstring, &malloced,
585 dl_open_worker, &args);
587 #ifndef MAP_COPY
588 /* We must munmap() the cache file. */
589 _dl_unload_cache ();
590 #endif
592 /* See if an error occurred during loading. */
593 if (__builtin_expect (errstring != NULL, 0))
595 /* Remove the object from memory. It may be in an inconsistent
596 state if relocation failed, for example. */
597 if (args.map)
599 /* Maybe some of the modules which were loaded use TLS.
600 Since it will be removed in the following _dl_close call
601 we have to mark the dtv array as having gaps to fill the
602 holes. This is a pessimistic assumption which won't hurt
603 if not true. There is no need to do this when we are
604 loading the auditing DSOs since TLS has not yet been set
605 up. */
606 if ((mode & __RTLD_AUDIT) == 0)
607 GL(dl_tls_dtv_gaps) = true;
609 _dl_close_worker (args.map);
612 assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT);
614 /* Release the lock. */
615 __rtld_lock_unlock_recursive (GL(dl_load_lock));
617 /* Make a local copy of the error string so that we can release the
618 memory allocated for it. */
619 size_t len_errstring = strlen (errstring) + 1;
620 char *local_errstring;
621 if (objname == errstring + len_errstring)
623 size_t total_len = len_errstring + strlen (objname) + 1;
624 local_errstring = alloca (total_len);
625 memcpy (local_errstring, errstring, total_len);
626 objname = local_errstring + len_errstring;
628 else
630 local_errstring = alloca (len_errstring);
631 memcpy (local_errstring, errstring, len_errstring);
634 if (malloced)
635 free ((char *) errstring);
637 /* Reraise the error. */
638 _dl_signal_error (errcode, objname, NULL, local_errstring);
641 assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT);
643 /* Release the lock. */
644 __rtld_lock_unlock_recursive (GL(dl_load_lock));
646 #ifndef SHARED
647 DL_STATIC_INIT (args.map);
648 #endif
650 return args.map;
654 #ifdef SCOPE_DEBUG
655 #include <unistd.h>
657 static void
658 show_scope (struct link_map *new)
660 int scope_cnt;
662 for (scope_cnt = 0; new->l_scope[scope_cnt] != NULL; ++scope_cnt)
664 char numbuf[2];
665 unsigned int cnt;
667 numbuf[0] = '0' + scope_cnt;
668 numbuf[1] = '\0';
669 _dl_printf ("scope %s:", numbuf);
671 for (cnt = 0; cnt < new->l_scope[scope_cnt]->r_nlist; ++cnt)
672 if (*new->l_scope[scope_cnt]->r_list[cnt]->l_name)
673 _dl_printf (" %s", new->l_scope[scope_cnt]->r_list[cnt]->l_name);
674 else
675 _dl_printf (" <main>");
677 _dl_printf ("\n");
680 #endif
682 #ifdef IS_IN_rtld
683 /* Return non-zero if ADDR lies within one of L's segments. */
685 internal_function
686 _dl_addr_inside_object (struct link_map *l, const ElfW(Addr) addr)
688 int n = l->l_phnum;
689 const ElfW(Addr) reladdr = addr - l->l_addr;
691 while (--n >= 0)
692 if (l->l_phdr[n].p_type == PT_LOAD
693 && reladdr - l->l_phdr[n].p_vaddr >= 0
694 && reladdr - l->l_phdr[n].p_vaddr < l->l_phdr[n].p_memsz)
695 return 1;
696 return 0;
698 #endif