1 /* Load a shared object at runtime, relocate it, and run its initializer.
2 Copyright (C) 1996-2007, 2009, 2010 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 #include <sys/mman.h> /* Check whether MAP_COPY is defined. */
29 #include <sys/param.h>
30 #include <bits/libc-lock.h>
34 #include <sysdep-cancel.h>
40 extern ElfW(Addr
) _dl_sysdep_start (void **start_argptr
,
41 void (*dl_main
) (const ElfW(Phdr
) *phdr
,
43 ElfW(Addr
) *user_entry
,
45 weak_extern (BP_SYM (_dl_sysdep_start
))
47 extern int __libc_multiple_libcs
; /* Defined in init-first.c. */
49 /* Undefine the following for debugging. */
50 /* #define SCOPE_DEBUG 1 */
52 static void show_scope (struct link_map
*new);
55 /* We must be carefull not to leave us in an inconsistent state. Thus we
56 catch any error and re-raise it after cleaning up. */
62 /* This is the caller of the dlopen() function. */
63 const void *caller_dlopen
;
64 /* This is the caller if _dl_open(). */
65 const void *caller_dl_open
;
69 /* Original parameters to the program and the current environment. */
77 add_to_global (struct link_map
*new)
79 struct link_map
**new_global
;
80 unsigned int to_add
= 0;
83 /* Count the objects we have to put in the global scope. */
84 for (cnt
= 0; cnt
< new->l_searchlist
.r_nlist
; ++cnt
)
85 if (new->l_searchlist
.r_list
[cnt
]->l_global
== 0)
88 /* The symbols of the new objects and its dependencies are to be
89 introduced into the global scope that will be used to resolve
90 references from other dynamically-loaded objects.
92 The global scope is the searchlist in the main link map. We
93 extend this list if necessary. There is one problem though:
94 since this structure was allocated very early (before the libc
95 is loaded) the memory it uses is allocated by the malloc()-stub
96 in the ld.so. When we come here these functions are not used
97 anymore. Instead the malloc() implementation of the libc is
98 used. But this means the block from the main map cannot be used
99 in an realloc() call. Therefore we allocate a completely new
100 array the first time we have to add something to the locale scope. */
102 struct link_namespaces
*ns
= &GL(dl_ns
)[new->l_ns
];
103 if (ns
->_ns_global_scope_alloc
== 0)
105 /* This is the first dynamic object given global scope. */
106 ns
->_ns_global_scope_alloc
107 = ns
->_ns_main_searchlist
->r_nlist
+ to_add
+ 8;
108 new_global
= (struct link_map
**)
109 malloc (ns
->_ns_global_scope_alloc
* sizeof (struct link_map
*));
110 if (new_global
== NULL
)
112 ns
->_ns_global_scope_alloc
= 0;
114 _dl_signal_error (ENOMEM
, new->l_libname
->name
, NULL
,
115 N_("cannot extend global scope"));
119 /* Copy over the old entries. */
120 ns
->_ns_main_searchlist
->r_list
121 = memcpy (new_global
, ns
->_ns_main_searchlist
->r_list
,
122 (ns
->_ns_main_searchlist
->r_nlist
123 * sizeof (struct link_map
*)));
125 else if (ns
->_ns_main_searchlist
->r_nlist
+ to_add
126 > ns
->_ns_global_scope_alloc
)
128 /* We have to extend the existing array of link maps in the
130 struct link_map
**old_global
131 = GL(dl_ns
)[new->l_ns
]._ns_main_searchlist
->r_list
;
132 size_t new_nalloc
= ((ns
->_ns_global_scope_alloc
+ to_add
) * 2);
134 new_global
= (struct link_map
**)
135 malloc (new_nalloc
* sizeof (struct link_map
*));
136 if (new_global
== NULL
)
139 memcpy (new_global
, old_global
,
140 ns
->_ns_global_scope_alloc
* sizeof (struct link_map
*));
142 ns
->_ns_global_scope_alloc
= new_nalloc
;
143 ns
->_ns_main_searchlist
->r_list
= new_global
;
145 if (!RTLD_SINGLE_THREAD_P
)
146 THREAD_GSCOPE_WAIT ();
151 /* Now add the new entries. */
152 unsigned int new_nlist
= ns
->_ns_main_searchlist
->r_nlist
;
153 for (cnt
= 0; cnt
< new->l_searchlist
.r_nlist
; ++cnt
)
155 struct link_map
*map
= new->l_searchlist
.r_list
[cnt
];
157 if (map
->l_global
== 0)
160 ns
->_ns_main_searchlist
->r_list
[new_nlist
++] = map
;
163 atomic_write_barrier ();
164 ns
->_ns_main_searchlist
->r_nlist
= new_nlist
;
170 dl_open_worker (void *a
)
172 struct dl_open_args
*args
= a
;
173 const char *file
= args
->file
;
174 int mode
= args
->mode
;
175 struct link_map
*call_map
= NULL
;
177 /* Check whether _dl_open() has been called from a valid DSO. */
178 if (__check_caller (args
->caller_dl_open
,
179 allow_libc
|allow_libdl
|allow_ldso
) != 0)
180 _dl_signal_error (0, "dlopen", NULL
, N_("invalid caller"));
182 /* Determine the caller's map if necessary. This is needed in case
183 we have a DST, when we don't know the namespace ID we have to put
184 the new object in, or when the file name has no path in which
185 case we need to look along the RUNPATH/RPATH of the caller. */
186 const char *dst
= strchr (file
, '$');
187 if (dst
!= NULL
|| args
->nsid
== __LM_ID_CALLER
188 || strchr (file
, '/') == NULL
)
190 const void *caller_dlopen
= args
->caller_dlopen
;
192 /* We have to find out from which object the caller is calling.
193 By default we assume this is the main application. */
194 call_map
= GL(dl_ns
)[LM_ID_BASE
]._ns_loaded
;
197 for (Lmid_t ns
= 0; ns
< GL(dl_nns
); ++ns
)
198 for (l
= GL(dl_ns
)[ns
]._ns_loaded
; l
!= NULL
; l
= l
->l_next
)
199 if (caller_dlopen
>= (const void *) l
->l_map_start
200 && caller_dlopen
< (const void *) l
->l_map_end
202 || _dl_addr_inside_object (l
, (ElfW(Addr
)) caller_dlopen
)))
204 assert (ns
== l
->l_ns
);
210 if (args
->nsid
== __LM_ID_CALLER
)
213 /* In statically linked apps there might be no loaded object. */
214 if (call_map
== NULL
)
215 args
->nsid
= LM_ID_BASE
;
218 args
->nsid
= call_map
->l_ns
;
222 assert (_dl_debug_initialize (0, args
->nsid
)->r_state
== RT_CONSISTENT
);
224 /* Load the named object. */
225 struct link_map
*new;
226 args
->map
= new = _dl_map_object (call_map
, file
, lt_loaded
, 0,
227 mode
| __RTLD_CALLMAP
, args
->nsid
);
229 /* If the pointer returned is NULL this means the RTLD_NOLOAD flag is
230 set and the object is not already loaded. */
233 assert (mode
& RTLD_NOLOAD
);
237 if (__builtin_expect (mode
& __RTLD_SPROF
, 0))
238 /* This happens only if we load a DSO for 'sprof'. */
241 /* This object is directly loaded. */
242 ++new->l_direct_opencount
;
244 /* It was already open. */
245 if (__builtin_expect (new->l_searchlist
.r_list
!= NULL
, 0))
247 /* Let the user know about the opencount. */
248 if (__builtin_expect (GLRO(dl_debug_mask
) & DL_DEBUG_FILES
, 0))
249 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
250 new->l_name
, new->l_ns
, new->l_direct_opencount
);
252 /* If the user requested the object to be in the global namespace
253 but it is not so far, add it now. */
254 if ((mode
& RTLD_GLOBAL
) && new->l_global
== 0)
255 (void) add_to_global (new);
257 assert (_dl_debug_initialize (0, args
->nsid
)->r_state
== RT_CONSISTENT
);
262 /* Load that object's dependencies. */
263 _dl_map_object_deps (new, NULL
, 0, 0,
264 mode
& (__RTLD_DLOPEN
| RTLD_DEEPBIND
| __RTLD_AUDIT
));
266 /* So far, so good. Now check the versions. */
267 for (unsigned int i
= 0; i
< new->l_searchlist
.r_nlist
; ++i
)
268 if (new->l_searchlist
.r_list
[i
]->l_real
->l_versions
== NULL
)
269 (void) _dl_check_map_versions (new->l_searchlist
.r_list
[i
]->l_real
,
277 /* Auditing checkpoint: we have added all objects. */
278 if (__builtin_expect (GLRO(dl_naudit
) > 0, 0))
280 struct link_map
*head
= GL(dl_ns
)[new->l_ns
]._ns_loaded
;
281 /* Do not call the functions for any auditing object. */
282 if (head
->l_auditing
== 0)
284 struct audit_ifaces
*afct
= GLRO(dl_audit
);
285 for (unsigned int cnt
= 0; cnt
< GLRO(dl_naudit
); ++cnt
)
287 if (afct
->activity
!= NULL
)
288 afct
->activity (&head
->l_audit
[cnt
].cookie
, LA_ACT_CONSISTENT
);
296 /* Notify the debugger all new objects are now ready to go. */
297 struct r_debug
*r
= _dl_debug_initialize (0, args
->nsid
);
298 r
->r_state
= RT_CONSISTENT
;
301 /* Only do lazy relocation if `LD_BIND_NOW' is not set. */
302 int reloc_mode
= mode
& __RTLD_AUDIT
;
304 reloc_mode
|= mode
& RTLD_LAZY
;
306 /* Relocate the objects loaded. We do this in reverse order so that copy
307 relocs of earlier objects overwrite the data written by later objects. */
309 struct link_map
*l
= new;
314 if (! l
->l_real
->l_relocated
)
317 if (__builtin_expect (GLRO(dl_profile
) != NULL
, 0))
319 /* If this here is the shared object which we want to profile
320 make sure the profile is started. We can find out whether
321 this is necessary or not by observing the `_dl_profile_map'
322 variable. If was NULL but is not NULL afterwars we must
323 start the profiling. */
324 struct link_map
*old_profile_map
= GL(dl_profile_map
);
326 _dl_relocate_object (l
, l
->l_scope
, reloc_mode
| RTLD_LAZY
, 1);
328 if (old_profile_map
== NULL
&& GL(dl_profile_map
) != NULL
)
330 /* We must prepare the profiling. */
331 _dl_start_profile ();
333 /* Prevent unloading the object. */
334 GL(dl_profile_map
)->l_flags_1
|= DF_1_NODELETE
;
339 _dl_relocate_object (l
, l
->l_scope
, reloc_mode
, 0);
347 /* If the file is not loaded now as a dependency, add the search
348 list of the newly loaded object to the scope. */
349 bool any_tls
= false;
350 for (unsigned int i
= 0; i
< new->l_searchlist
.r_nlist
; ++i
)
352 struct link_map
*imap
= new->l_searchlist
.r_list
[i
];
354 /* If the initializer has been called already, the object has
355 not been loaded here and now. */
356 if (imap
->l_init_called
&& imap
->l_type
== lt_loaded
)
358 struct r_scope_elem
**runp
= imap
->l_scope
;
361 while (*runp
!= NULL
)
363 if (*runp
== &new->l_searchlist
)
370 /* Avoid duplicates. */
373 if (__builtin_expect (cnt
+ 1 >= imap
->l_scope_max
, 0))
375 /* The 'r_scope' array is too small. Allocate a new one
378 struct r_scope_elem
**newp
;
380 #define SCOPE_ELEMS(imap) \
381 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
383 if (imap
->l_scope
!= imap
->l_scope_mem
384 && imap
->l_scope_max
< SCOPE_ELEMS (imap
))
386 new_size
= SCOPE_ELEMS (imap
);
387 newp
= imap
->l_scope_mem
;
391 new_size
= imap
->l_scope_max
* 2;
392 newp
= (struct r_scope_elem
**)
393 malloc (new_size
* sizeof (struct r_scope_elem
*));
395 _dl_signal_error (ENOMEM
, "dlopen", NULL
,
396 N_("cannot create scope list"));
399 memcpy (newp
, imap
->l_scope
, cnt
* sizeof (imap
->l_scope
[0]));
400 struct r_scope_elem
**old
= imap
->l_scope
;
402 imap
->l_scope
= newp
;
404 if (old
!= imap
->l_scope_mem
)
405 _dl_scope_free (old
);
407 imap
->l_scope_max
= new_size
;
410 /* First terminate the extended list. Otherwise a thread
411 might use the new last element and then use the garbage
413 imap
->l_scope
[cnt
+ 1] = NULL
;
414 atomic_write_barrier ();
415 imap
->l_scope
[cnt
] = &new->l_searchlist
;
417 /* Only add TLS memory if this object is loaded now and
418 therefore is not yet initialized. */
419 else if (! imap
->l_init_called
420 /* Only if the module defines thread local data. */
421 && __builtin_expect (imap
->l_tls_blocksize
> 0, 0))
423 /* Now that we know the object is loaded successfully add
424 modules containing TLS data to the slot info table. We
425 might have to increase its size. */
426 _dl_add_to_slotinfo (imap
);
428 if (imap
->l_need_tls_init
)
430 /* For static TLS we have to allocate the memory here
431 and now. This includes allocating memory in the DTV.
432 But we cannot change any DTV other than our own. So,
433 if we cannot guarantee that there is room in the DTV
434 we don't even try it and fail the load.
436 XXX We could track the minimum DTV slots allocated in
438 if (! RTLD_SINGLE_THREAD_P
&& imap
->l_tls_modid
> DTV_SURPLUS
)
439 _dl_signal_error (0, "dlopen", NULL
, N_("\
440 cannot load any more object with static TLS"));
442 imap
->l_need_tls_init
= 0;
444 /* Update the slot information data for at least the
445 generation of the DSO we are allocating data for. */
446 _dl_update_slotinfo (imap
->l_tls_modid
);
449 GL(dl_init_static_tls
) (imap
);
450 assert (imap
->l_need_tls_init
== 0);
453 /* We have to bump the generation counter. */
458 /* Bump the generation number if necessary. */
459 if (any_tls
&& __builtin_expect (++GL(dl_tls_generation
) == 0, 0))
460 _dl_fatal_printf (N_("\
461 TLS generation counter wrapped! Please report this."));
463 /* Run the initializer functions of new objects. */
464 _dl_init (new, args
->argc
, args
->argv
, args
->env
);
466 /* Now we can make the new map available in the global scope. */
467 if (mode
& RTLD_GLOBAL
)
468 /* Move the object in the global namespace. */
469 if (add_to_global (new) != 0)
473 /* Mark the object as not deletable if the RTLD_NODELETE flags was
475 if (__builtin_expect (mode
& RTLD_NODELETE
, 0))
476 new->l_flags_1
|= DF_1_NODELETE
;
479 /* We must be the static _dl_open in libc.a. A static program that
480 has loaded a dynamic object now has competition. */
481 __libc_multiple_libcs
= 1;
484 /* Let the user know about the opencount. */
485 if (__builtin_expect (GLRO(dl_debug_mask
) & DL_DEBUG_FILES
, 0))
486 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
487 new->l_name
, new->l_ns
, new->l_direct_opencount
);
492 _dl_open (const char *file
, int mode
, const void *caller_dlopen
, Lmid_t nsid
,
493 int argc
, char *argv
[], char *env
[])
495 if ((mode
& RTLD_BINDING_MASK
) == 0)
496 /* One of the flags must be set. */
497 _dl_signal_error (EINVAL
, file
, NULL
, N_("invalid mode for dlopen()"));
499 /* Make sure we are alone. */
500 __rtld_lock_lock_recursive (GL(dl_load_lock
));
502 if (__builtin_expect (nsid
== LM_ID_NEWLM
, 0))
504 /* Find a new namespace. */
505 for (nsid
= 1; nsid
< GL(dl_nns
); ++nsid
)
506 if (GL(dl_ns
)[nsid
]._ns_loaded
== NULL
)
509 if (__builtin_expect (nsid
== DL_NNS
, 0))
511 /* No more namespace available. */
512 __rtld_lock_unlock_recursive (GL(dl_load_lock
));
514 _dl_signal_error (EINVAL
, file
, NULL
, N_("\
515 no more namespaces available for dlmopen()"));
518 if (nsid
== GL(dl_nns
))
520 __rtld_lock_initialize (GL(dl_ns
)[nsid
]._ns_unique_sym_table
.lock
);
524 _dl_debug_initialize (0, nsid
)->r_state
= RT_CONSISTENT
;
526 /* Never allow loading a DSO in a namespace which is empty. Such
527 direct placements is only causing problems. Also don't allow
528 loading into a namespace used for auditing. */
529 else if (__builtin_expect (nsid
!= LM_ID_BASE
&& nsid
!= __LM_ID_CALLER
, 0)
530 && (GL(dl_ns
)[nsid
]._ns_nloaded
== 0
531 || GL(dl_ns
)[nsid
]._ns_loaded
->l_auditing
))
532 _dl_signal_error (EINVAL
, file
, NULL
,
533 N_("invalid target namespace in dlmopen()"));
535 else if ((nsid
== LM_ID_BASE
|| nsid
== __LM_ID_CALLER
)
536 && GL(dl_ns
)[LM_ID_BASE
]._ns_loaded
== NULL
541 struct dl_open_args args
;
544 args
.caller_dlopen
= caller_dlopen
;
545 args
.caller_dl_open
= RETURN_ADDRESS (0);
553 const char *errstring
;
555 int errcode
= _dl_catch_error (&objname
, &errstring
, &malloced
,
556 dl_open_worker
, &args
);
559 /* We must munmap() the cache file. */
563 /* See if an error occurred during loading. */
564 if (__builtin_expect (errstring
!= NULL
, 0))
566 /* Remove the object from memory. It may be in an inconsistent
567 state if relocation failed, for example. */
570 /* Maybe some of the modules which were loaded use TLS.
571 Since it will be removed in the following _dl_close call
572 we have to mark the dtv array as having gaps to fill the
573 holes. This is a pessimistic assumption which won't hurt
574 if not true. There is no need to do this when we are
575 loading the auditing DSOs since TLS has not yet been set
577 if ((mode
& __RTLD_AUDIT
) == 0)
578 GL(dl_tls_dtv_gaps
) = true;
580 _dl_close_worker (args
.map
);
583 assert (_dl_debug_initialize (0, args
.nsid
)->r_state
== RT_CONSISTENT
);
585 /* Release the lock. */
586 __rtld_lock_unlock_recursive (GL(dl_load_lock
));
588 /* Make a local copy of the error string so that we can release the
589 memory allocated for it. */
590 size_t len_errstring
= strlen (errstring
) + 1;
591 char *local_errstring
;
592 if (objname
== errstring
+ len_errstring
)
594 size_t total_len
= len_errstring
+ strlen (objname
) + 1;
595 local_errstring
= alloca (total_len
);
596 memcpy (local_errstring
, errstring
, total_len
);
597 objname
= local_errstring
+ len_errstring
;
601 local_errstring
= alloca (len_errstring
);
602 memcpy (local_errstring
, errstring
, len_errstring
);
606 free ((char *) errstring
);
608 /* Reraise the error. */
609 _dl_signal_error (errcode
, objname
, NULL
, local_errstring
);
612 assert (_dl_debug_initialize (0, args
.nsid
)->r_state
== RT_CONSISTENT
);
614 /* Release the lock. */
615 __rtld_lock_unlock_recursive (GL(dl_load_lock
));
618 DL_STATIC_INIT (args
.map
);
629 show_scope (struct link_map
*new)
633 for (scope_cnt
= 0; new->l_scope
[scope_cnt
] != NULL
; ++scope_cnt
)
638 numbuf
[0] = '0' + scope_cnt
;
640 _dl_printf ("scope %s:", numbuf
);
642 for (cnt
= 0; cnt
< new->l_scope
[scope_cnt
]->r_nlist
; ++cnt
)
643 if (*new->l_scope
[scope_cnt
]->r_list
[cnt
]->l_name
)
644 _dl_printf (" %s", new->l_scope
[scope_cnt
]->r_list
[cnt
]->l_name
);
646 _dl_printf (" <main>");
654 /* Return non-zero if ADDR lies within one of L's segments. */
657 _dl_addr_inside_object (struct link_map
*l
, const ElfW(Addr
) addr
)
660 const ElfW(Addr
) reladdr
= addr
- l
->l_addr
;
663 if (l
->l_phdr
[n
].p_type
== PT_LOAD
664 && reladdr
- l
->l_phdr
[n
].p_vaddr
>= 0
665 && reladdr
- l
->l_phdr
[n
].p_vaddr
< l
->l_phdr
[n
].p_memsz
)