Disable x87 inline functions for SSE2 math
[glibc.git] / elf / dl-reloc.c
blob81ee47eb62212a03ef23b182b448fc4e94c930aa
1 /* Relocate a shared object and resolve its references to other loaded objects.
2 Copyright (C) 1995-2014 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <atomic.h>
20 #include <errno.h>
21 #include <libintl.h>
22 #include <signal.h>
23 #include <stdlib.h>
24 #include <unistd.h>
25 #include <ldsodefs.h>
26 #include <sys/mman.h>
27 #include <sys/param.h>
28 #include <sys/types.h>
29 #include <_itoa.h>
30 #include "dynamic-link.h"
32 /* Statistics function. */
33 #ifdef SHARED
34 # define bump_num_cache_relocations() ++GL(dl_num_cache_relocations)
35 #else
36 # define bump_num_cache_relocations() ((void) 0)
37 #endif
40 /* We are trying to perform a static TLS relocation in MAP, but it was
41 dynamically loaded. This can only work if there is enough surplus in
42 the static TLS area already allocated for each running thread. If this
43 object's TLS segment is too big to fit, we fail. If it fits,
44 we set MAP->l_tls_offset and return.
45 This function intentionally does not return any value but signals error
46 directly, as static TLS should be rare and code handling it should
47 not be inlined as much as possible. */
48 int
49 internal_function
50 _dl_try_allocate_static_tls (struct link_map *map)
52 /* If we've already used the variable with dynamic access, or if the
53 alignment requirements are too high, fail. */
54 if (map->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET
55 || map->l_tls_align > GL(dl_tls_static_align))
57 fail:
58 return -1;
61 #if TLS_TCB_AT_TP
62 size_t freebytes = GL(dl_tls_static_size) - GL(dl_tls_static_used);
63 if (freebytes < TLS_TCB_SIZE)
64 goto fail;
65 freebytes -= TLS_TCB_SIZE;
67 size_t blsize = map->l_tls_blocksize + map->l_tls_firstbyte_offset;
68 if (freebytes < blsize)
69 goto fail;
71 size_t n = (freebytes - blsize) / map->l_tls_align;
73 size_t offset = GL(dl_tls_static_used) + (freebytes - n * map->l_tls_align
74 - map->l_tls_firstbyte_offset);
75 #elif TLS_DTV_AT_TP
76 /* dl_tls_static_used includes the TCB at the beginning. */
77 size_t offset = (((GL(dl_tls_static_used)
78 - map->l_tls_firstbyte_offset
79 + map->l_tls_align - 1) & -map->l_tls_align)
80 + map->l_tls_firstbyte_offset);
81 size_t used = offset + map->l_tls_blocksize;
83 if (used > GL(dl_tls_static_size))
84 goto fail;
86 #else
87 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
88 #endif
89 /* We've computed the new value we want, now try to install it. */
90 ptrdiff_t val;
91 if ((val = map->l_tls_offset) == NO_TLS_OFFSET)
93 /* l_tls_offset starts out at NO_TLS_OFFSET, and all attempts to
94 change it go from NO_TLS_OFFSET to some other value. We use
95 compare_and_exchange to ensure only one attempt succeeds. We
96 don't actually need any memory ordering here, but _acq is the
97 weakest available. */
98 (void ) atomic_compare_and_exchange_bool_acq (&map->l_tls_offset,
99 offset,
100 NO_TLS_OFFSET);
101 val = map->l_tls_offset;
102 assert (val != NO_TLS_OFFSET);
104 if (val != offset)
106 /* We'd like to set a static offset for this section, but another
107 thread has already used a dynamic TLS block for it. Since we can
108 only use static offsets if everyone does (and it's not practical
109 to move that thread's dynamic block), we have to fail. */
110 goto fail;
112 /* We installed the value; now update the globals. */
113 #if TLS_TCB_AT_TP
114 GL(dl_tls_static_used) = offset;
115 #elif TLS_DTV_AT_TP
116 map->l_tls_firstbyte_offset = GL(dl_tls_static_used);
117 GL(dl_tls_static_used) = used;
118 #else
119 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
120 #endif
122 /* If the object is not yet relocated we cannot initialize the
123 static TLS region. Delay it. */
124 if (map->l_real->l_relocated)
126 #ifdef SHARED
127 if (__builtin_expect (THREAD_DTV()[0].counter != GL(dl_tls_generation),
129 /* Update the slot information data for at least the generation of
130 the DSO we are allocating data for. */
131 (void) _dl_update_slotinfo (map->l_tls_modid);
132 #endif
134 GL(dl_init_static_tls) (map);
136 else
137 map->l_need_tls_init = 1;
139 return 0;
142 void
143 internal_function __attribute_noinline__
144 _dl_allocate_static_tls (struct link_map *map)
146 /* We wrap this in a signal mask because it has to iterate all threads
147 (including this one) and update this map's TLS entry. A signal handler
148 accessing TLS would try to do the same update and break. */
149 sigset_t old;
150 _dl_mask_all_signals (&old);
151 int err = -1;
152 if (map->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET)
153 err = _dl_try_allocate_static_tls (map);
155 _dl_unmask_signals (&old);
156 if (err != 0)
158 _dl_signal_error (0, map->l_name, NULL, N_("\
159 cannot allocate memory in static TLS block"));
163 /* Initialize static TLS area and DTV for current (only) thread.
164 libpthread implementations should provide their own hook
165 to handle all threads. */
166 void
167 _dl_nothread_init_static_tls (struct link_map *map)
169 #if TLS_TCB_AT_TP
170 void *dest = (char *) THREAD_SELF - map->l_tls_offset;
171 #elif TLS_DTV_AT_TP
172 void *dest = (char *) THREAD_SELF + map->l_tls_offset + TLS_PRE_TCB_SIZE;
173 #else
174 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
175 #endif
177 /* Fill in the DTV slot so that a later LD/GD access will find it. */
178 dtv_t *dtv = THREAD_DTV ();
179 assert (map->l_tls_modid <= dtv[-1].counter);
180 dtv[map->l_tls_modid].pointer.val = dest;
181 dtv[map->l_tls_modid].pointer.is_static = true;
183 /* Initialize the memory. */
184 memset (__mempcpy (dest, map->l_tls_initimage, map->l_tls_initimage_size),
185 '\0', map->l_tls_blocksize - map->l_tls_initimage_size);
189 void
190 _dl_relocate_object (struct link_map *l, struct r_scope_elem *scope[],
191 int reloc_mode, int consider_profiling)
193 struct textrels
195 caddr_t start;
196 size_t len;
197 int prot;
198 struct textrels *next;
199 } *textrels = NULL;
200 /* Initialize it to make the compiler happy. */
201 const char *errstring = NULL;
202 int lazy = reloc_mode & RTLD_LAZY;
203 int skip_ifunc = reloc_mode & __RTLD_NOIFUNC;
205 #ifdef SHARED
206 /* If we are auditing, install the same handlers we need for profiling. */
207 if ((reloc_mode & __RTLD_AUDIT) == 0)
208 consider_profiling |= GLRO(dl_audit) != NULL;
209 #elif defined PROF
210 /* Never use dynamic linker profiling for gprof profiling code. */
211 # define consider_profiling 0
212 #endif
214 if (l->l_relocated)
215 return;
217 /* If DT_BIND_NOW is set relocate all references in this object. We
218 do not do this if we are profiling, of course. */
219 // XXX Correct for auditing?
220 if (!consider_profiling
221 && __builtin_expect (l->l_info[DT_BIND_NOW] != NULL, 0))
222 lazy = 0;
224 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_RELOC, 0))
225 _dl_debug_printf ("\nrelocation processing: %s%s\n",
226 DSO_FILENAME (l->l_name), lazy ? " (lazy)" : "");
228 /* DT_TEXTREL is now in level 2 and might phase out at some time.
229 But we rewrite the DT_FLAGS entry to a DT_TEXTREL entry to make
230 testing easier and therefore it will be available at all time. */
231 if (__builtin_expect (l->l_info[DT_TEXTREL] != NULL, 0))
233 /* Bletch. We must make read-only segments writable
234 long enough to relocate them. */
235 const ElfW(Phdr) *ph;
236 for (ph = l->l_phdr; ph < &l->l_phdr[l->l_phnum]; ++ph)
237 if (ph->p_type == PT_LOAD && (ph->p_flags & PF_W) == 0)
239 struct textrels *newp;
241 newp = (struct textrels *) alloca (sizeof (*newp));
242 newp->len = (((ph->p_vaddr + ph->p_memsz + GLRO(dl_pagesize) - 1)
243 & ~(GLRO(dl_pagesize) - 1))
244 - (ph->p_vaddr & ~(GLRO(dl_pagesize) - 1)));
245 newp->start = ((ph->p_vaddr & ~(GLRO(dl_pagesize) - 1))
246 + (caddr_t) l->l_addr);
248 if (__mprotect (newp->start, newp->len, PROT_READ|PROT_WRITE) < 0)
250 errstring = N_("cannot make segment writable for relocation");
251 call_error:
252 _dl_signal_error (errno, l->l_name, NULL, errstring);
255 #if (PF_R | PF_W | PF_X) == 7 && (PROT_READ | PROT_WRITE | PROT_EXEC) == 7
256 newp->prot = (PF_TO_PROT
257 >> ((ph->p_flags & (PF_R | PF_W | PF_X)) * 4)) & 0xf;
258 #else
259 newp->prot = 0;
260 if (ph->p_flags & PF_R)
261 newp->prot |= PROT_READ;
262 if (ph->p_flags & PF_W)
263 newp->prot |= PROT_WRITE;
264 if (ph->p_flags & PF_X)
265 newp->prot |= PROT_EXEC;
266 #endif
267 newp->next = textrels;
268 textrels = newp;
273 /* Do the actual relocation of the object's GOT and other data. */
275 /* String table object symbols. */
276 const char *strtab = (const void *) D_PTR (l, l_info[DT_STRTAB]);
278 /* This macro is used as a callback from the ELF_DYNAMIC_RELOCATE code. */
279 #define RESOLVE_MAP(ref, version, r_type) \
280 (ELFW(ST_BIND) ((*ref)->st_info) != STB_LOCAL \
281 ? ((__builtin_expect ((*ref) == l->l_lookup_cache.sym, 0) \
282 && elf_machine_type_class (r_type) == l->l_lookup_cache.type_class) \
283 ? (bump_num_cache_relocations (), \
284 (*ref) = l->l_lookup_cache.ret, \
285 l->l_lookup_cache.value) \
286 : ({ lookup_t _lr; \
287 int _tc = elf_machine_type_class (r_type); \
288 l->l_lookup_cache.type_class = _tc; \
289 l->l_lookup_cache.sym = (*ref); \
290 const struct r_found_version *v = NULL; \
291 if ((version) != NULL && (version)->hash != 0) \
292 v = (version); \
293 _lr = _dl_lookup_symbol_x (strtab + (*ref)->st_name, l, (ref), \
294 scope, v, _tc, \
295 DL_LOOKUP_ADD_DEPENDENCY, NULL); \
296 l->l_lookup_cache.ret = (*ref); \
297 l->l_lookup_cache.value = _lr; })) \
298 : l)
300 #include "dynamic-link.h"
302 ELF_DYNAMIC_RELOCATE (l, lazy, consider_profiling, skip_ifunc);
304 #ifndef PROF
305 if (__builtin_expect (consider_profiling, 0))
307 /* Allocate the array which will contain the already found
308 relocations. If the shared object lacks a PLT (for example
309 if it only contains lead function) the l_info[DT_PLTRELSZ]
310 will be NULL. */
311 if (l->l_info[DT_PLTRELSZ] == NULL)
313 errstring = N_("%s: no PLTREL found in object %s\n");
314 fatal:
315 _dl_fatal_printf (errstring,
316 RTLD_PROGNAME,
317 l->l_name);
320 l->l_reloc_result = calloc (sizeof (l->l_reloc_result[0]),
321 l->l_info[DT_PLTRELSZ]->d_un.d_val);
322 if (l->l_reloc_result == NULL)
324 errstring = N_("\
325 %s: out of memory to store relocation results for %s\n");
326 goto fatal;
329 #endif
332 /* Mark the object so we know this work has been done. */
333 l->l_relocated = 1;
335 /* Undo the segment protection changes. */
336 while (__builtin_expect (textrels != NULL, 0))
338 if (__mprotect (textrels->start, textrels->len, textrels->prot) < 0)
340 errstring = N_("cannot restore segment prot after reloc");
341 goto call_error;
344 #ifdef CLEAR_CACHE
345 CLEAR_CACHE (textrels->start, textrels->start + textrels->len);
346 #endif
348 textrels = textrels->next;
351 /* In case we can protect the data now that the relocations are
352 done, do it. */
353 if (l->l_relro_size != 0)
354 _dl_protect_relro (l);
358 void internal_function
359 _dl_protect_relro (struct link_map *l)
361 ElfW(Addr) start = ((l->l_addr + l->l_relro_addr)
362 & ~(GLRO(dl_pagesize) - 1));
363 ElfW(Addr) end = ((l->l_addr + l->l_relro_addr + l->l_relro_size)
364 & ~(GLRO(dl_pagesize) - 1));
366 if (start != end
367 && __mprotect ((void *) start, end - start, PROT_READ) < 0)
369 static const char errstring[] = N_("\
370 cannot apply additional memory protection after relocation");
371 _dl_signal_error (errno, l->l_name, NULL, errstring);
375 void
376 internal_function __attribute_noinline__
377 _dl_reloc_bad_type (struct link_map *map, unsigned int type, int plt)
379 #define DIGIT(b) _itoa_lower_digits[(b) & 0xf];
381 /* XXX We cannot translate these messages. */
382 static const char msg[2][32
383 #if __ELF_NATIVE_CLASS == 64
385 #endif
386 ] = { "unexpected reloc type 0x",
387 "unexpected PLT reloc type 0x" };
388 char msgbuf[sizeof (msg[0])];
389 char *cp;
391 cp = __stpcpy (msgbuf, msg[plt]);
392 #if __ELF_NATIVE_CLASS == 64
393 if (__builtin_expect(type > 0xff, 0))
395 *cp++ = DIGIT (type >> 28);
396 *cp++ = DIGIT (type >> 24);
397 *cp++ = DIGIT (type >> 20);
398 *cp++ = DIGIT (type >> 16);
399 *cp++ = DIGIT (type >> 12);
400 *cp++ = DIGIT (type >> 8);
402 #endif
403 *cp++ = DIGIT (type >> 4);
404 *cp++ = DIGIT (type);
405 *cp = '\0';
407 _dl_signal_error (0, map->l_name, NULL, msgbuf);