Update copyright notices with scripts/update-copyrights
[glibc.git] / sysdeps / x86_64 / dl-machine.h
blob504c95f32071557c1a295883b93e64296e78127f
1 /* Machine-dependent ELF dynamic relocation inline functions. x86-64 version.
2 Copyright (C) 2001-2014 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Andreas Jaeger <aj@suse.de>.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; if not, see
18 <http://www.gnu.org/licenses/>. */
20 #ifndef dl_machine_h
21 #define dl_machine_h
23 #define ELF_MACHINE_NAME "x86_64"
25 #include <sys/param.h>
26 #include <sysdep.h>
27 #include <tls.h>
28 #include <dl-tlsdesc.h>
30 /* Return nonzero iff ELF header is compatible with the running host. */
31 static inline int __attribute__ ((unused))
32 elf_machine_matches_host (const ElfW(Ehdr) *ehdr)
34 return ehdr->e_machine == EM_X86_64;
38 /* Return the link-time address of _DYNAMIC. Conveniently, this is the
39 first element of the GOT. This must be inlined in a function which
40 uses global data. */
41 static inline ElfW(Addr) __attribute__ ((unused))
42 elf_machine_dynamic (void)
44 /* This produces an IP-relative reloc which is resolved at link time. */
45 extern const ElfW(Addr) _GLOBAL_OFFSET_TABLE_[] attribute_hidden;
46 return _GLOBAL_OFFSET_TABLE_[0];
50 /* Return the run-time load address of the shared object. */
51 static inline ElfW(Addr) __attribute__ ((unused))
52 elf_machine_load_address (void)
54 /* Compute the difference between the runtime address of _DYNAMIC as seen
55 by an IP-relative reference, and the link-time address found in the
56 special unrelocated first GOT entry. */
57 extern ElfW(Dyn) _DYNAMIC[] attribute_hidden;
58 return (ElfW(Addr)) &_DYNAMIC - elf_machine_dynamic ();
61 /* Set up the loaded object described by L so its unrelocated PLT
62 entries will jump to the on-demand fixup code in dl-runtime.c. */
64 static inline int __attribute__ ((unused, always_inline))
65 elf_machine_runtime_setup (struct link_map *l, int lazy, int profile)
67 Elf64_Addr *got;
68 extern void _dl_runtime_resolve (ElfW(Word)) attribute_hidden;
69 extern void _dl_runtime_profile (ElfW(Word)) attribute_hidden;
71 if (l->l_info[DT_JMPREL] && lazy)
73 /* The GOT entries for functions in the PLT have not yet been filled
74 in. Their initial contents will arrange when called to push an
75 offset into the .rel.plt section, push _GLOBAL_OFFSET_TABLE_[1],
76 and then jump to _GLOBAL_OFFSET_TABLE_[2]. */
77 got = (Elf64_Addr *) D_PTR (l, l_info[DT_PLTGOT]);
78 /* If a library is prelinked but we have to relocate anyway,
79 we have to be able to undo the prelinking of .got.plt.
80 The prelinker saved us here address of .plt + 0x16. */
81 if (got[1])
83 l->l_mach.plt = got[1] + l->l_addr;
84 l->l_mach.gotplt = (ElfW(Addr)) &got[3];
86 /* Identify this shared object. */
87 *(ElfW(Addr) *) (got + 1) = (ElfW(Addr)) l;
89 /* The got[2] entry contains the address of a function which gets
90 called to get the address of a so far unresolved function and
91 jump to it. The profiling extension of the dynamic linker allows
92 to intercept the calls to collect information. In this case we
93 don't store the address in the GOT so that all future calls also
94 end in this function. */
95 if (__builtin_expect (profile, 0))
97 *(ElfW(Addr) *) (got + 2) = (ElfW(Addr)) &_dl_runtime_profile;
99 if (GLRO(dl_profile) != NULL
100 && _dl_name_match_p (GLRO(dl_profile), l))
101 /* This is the object we are looking for. Say that we really
102 want profiling and the timers are started. */
103 GL(dl_profile_map) = l;
105 else
106 /* This function will get called to fix up the GOT entry indicated by
107 the offset on the stack, and then jump to the resolved address. */
108 *(ElfW(Addr) *) (got + 2) = (ElfW(Addr)) &_dl_runtime_resolve;
111 if (l->l_info[ADDRIDX (DT_TLSDESC_GOT)] && lazy)
112 *(ElfW(Addr)*)(D_PTR (l, l_info[ADDRIDX (DT_TLSDESC_GOT)]) + l->l_addr)
113 = (ElfW(Addr)) &_dl_tlsdesc_resolve_rela;
115 return lazy;
118 /* Initial entry point code for the dynamic linker.
119 The C function `_dl_start' is the real entry point;
120 its return value is the user program's entry point. */
121 #define RTLD_START asm ("\n\
122 .text\n\
123 .align 16\n\
124 .globl _start\n\
125 .globl _dl_start_user\n\
126 _start:\n\
127 movq %rsp, %rdi\n\
128 call _dl_start\n\
129 _dl_start_user:\n\
130 # Save the user entry point address in %r12.\n\
131 movq %rax, %r12\n\
132 # See if we were run as a command with the executable file\n\
133 # name as an extra leading argument.\n\
134 movl _dl_skip_args(%rip), %eax\n\
135 # Pop the original argument count.\n\
136 popq %rdx\n\
137 # Adjust the stack pointer to skip _dl_skip_args words.\n\
138 leaq (%rsp,%rax,8), %rsp\n\
139 # Subtract _dl_skip_args from argc.\n\
140 subl %eax, %edx\n\
141 # Push argc back on the stack.\n\
142 pushq %rdx\n\
143 # Call _dl_init (struct link_map *main_map, int argc, char **argv, char **env)\n\
144 # argc -> rsi\n\
145 movq %rdx, %rsi\n\
146 # Save %rsp value in %r13.\n\
147 movq %rsp, %r13\n\
148 # And align stack for the _dl_init_internal call. \n\
149 andq $-16, %rsp\n\
150 # _dl_loaded -> rdi\n\
151 movq _rtld_local(%rip), %rdi\n\
152 # env -> rcx\n\
153 leaq 16(%r13,%rdx,8), %rcx\n\
154 # argv -> rdx\n\
155 leaq 8(%r13), %rdx\n\
156 # Clear %rbp to mark outermost frame obviously even for constructors.\n\
157 xorl %ebp, %ebp\n\
158 # Call the function to run the initializers.\n\
159 call _dl_init_internal@PLT\n\
160 # Pass our finalizer function to the user in %rdx, as per ELF ABI.\n\
161 leaq _dl_fini(%rip), %rdx\n\
162 # And make sure %rsp points to argc stored on the stack.\n\
163 movq %r13, %rsp\n\
164 # Jump to the user's entry point.\n\
165 jmp *%r12\n\
166 .previous\n\
169 /* ELF_RTYPE_CLASS_PLT iff TYPE describes relocation of a PLT entry or
170 TLS variable, so undefined references should not be allowed to
171 define the value.
172 ELF_RTYPE_CLASS_NOCOPY iff TYPE should not be allowed to resolve to one
173 of the main executable's symbols, as for a COPY reloc. */
174 #define elf_machine_type_class(type) \
175 ((((type) == R_X86_64_JUMP_SLOT \
176 || (type) == R_X86_64_DTPMOD64 \
177 || (type) == R_X86_64_DTPOFF64 \
178 || (type) == R_X86_64_TPOFF64 \
179 || (type) == R_X86_64_TLSDESC) \
180 * ELF_RTYPE_CLASS_PLT) \
181 | (((type) == R_X86_64_COPY) * ELF_RTYPE_CLASS_COPY))
183 /* A reloc type used for ld.so cmdline arg lookups to reject PLT entries. */
184 #define ELF_MACHINE_JMP_SLOT R_X86_64_JUMP_SLOT
186 /* The relative ifunc relocation. */
187 // XXX This is a work-around for a broken linker. Remove!
188 #define ELF_MACHINE_IRELATIVE R_X86_64_IRELATIVE
190 /* The x86-64 never uses Elf64_Rel/Elf32_Rel relocations. */
191 #define ELF_MACHINE_NO_REL 1
193 /* We define an initialization function. This is called very early in
194 _dl_sysdep_start. */
195 #define DL_PLATFORM_INIT dl_platform_init ()
197 static inline void __attribute__ ((unused))
198 dl_platform_init (void)
200 if (GLRO(dl_platform) != NULL && *GLRO(dl_platform) == '\0')
201 /* Avoid an empty string which would disturb us. */
202 GLRO(dl_platform) = NULL;
205 static inline ElfW(Addr)
206 elf_machine_fixup_plt (struct link_map *map, lookup_t t,
207 const ElfW(Rela) *reloc,
208 ElfW(Addr) *reloc_addr, ElfW(Addr) value)
210 return *reloc_addr = value;
213 /* Return the final value of a PLT relocation. On x86-64 the
214 JUMP_SLOT relocation ignores the addend. */
215 static inline ElfW(Addr)
216 elf_machine_plt_value (struct link_map *map, const ElfW(Rela) *reloc,
217 ElfW(Addr) value)
219 return value;
223 /* Names of the architecture-specific auditing callback functions. */
224 #define ARCH_LA_PLTENTER x86_64_gnu_pltenter
225 #define ARCH_LA_PLTEXIT x86_64_gnu_pltexit
227 #endif /* !dl_machine_h */
229 #ifdef RESOLVE_MAP
231 /* Perform the relocation specified by RELOC and SYM (which is fully resolved).
232 MAP is the object containing the reloc. */
234 auto inline void
235 __attribute__ ((always_inline))
236 elf_machine_rela (struct link_map *map, const ElfW(Rela) *reloc,
237 const ElfW(Sym) *sym, const struct r_found_version *version,
238 void *const reloc_addr_arg, int skip_ifunc)
240 ElfW(Addr) *const reloc_addr = reloc_addr_arg;
241 const unsigned long int r_type = ELFW(R_TYPE) (reloc->r_info);
243 # if !defined RTLD_BOOTSTRAP || !defined HAVE_Z_COMBRELOC
244 if (__builtin_expect (r_type == R_X86_64_RELATIVE, 0))
246 # if !defined RTLD_BOOTSTRAP && !defined HAVE_Z_COMBRELOC
247 /* This is defined in rtld.c, but nowhere in the static libc.a;
248 make the reference weak so static programs can still link.
249 This declaration cannot be done when compiling rtld.c
250 (i.e. #ifdef RTLD_BOOTSTRAP) because rtld.c contains the
251 common defn for _dl_rtld_map, which is incompatible with a
252 weak decl in the same file. */
253 # ifndef SHARED
254 weak_extern (GL(dl_rtld_map));
255 # endif
256 if (map != &GL(dl_rtld_map)) /* Already done in rtld itself. */
257 # endif
258 *reloc_addr = map->l_addr + reloc->r_addend;
260 else
261 # endif
262 # if !defined RTLD_BOOTSTRAP
263 /* l_addr + r_addend may be > 0xffffffff and R_X86_64_RELATIVE64
264 relocation updates the whole 64-bit entry. */
265 if (__builtin_expect (r_type == R_X86_64_RELATIVE64, 0))
266 *(Elf64_Addr *) reloc_addr = (Elf64_Addr) map->l_addr + reloc->r_addend;
267 else
268 # endif
269 if (__builtin_expect (r_type == R_X86_64_NONE, 0))
270 return;
271 else
273 # ifndef RTLD_BOOTSTRAP
274 const ElfW(Sym) *const refsym = sym;
275 # endif
276 struct link_map *sym_map = RESOLVE_MAP (&sym, version, r_type);
277 ElfW(Addr) value = (sym == NULL ? 0
278 : (ElfW(Addr)) sym_map->l_addr + sym->st_value);
280 if (sym != NULL
281 && __builtin_expect (ELFW(ST_TYPE) (sym->st_info) == STT_GNU_IFUNC,
283 && __builtin_expect (sym->st_shndx != SHN_UNDEF, 1)
284 && __builtin_expect (!skip_ifunc, 1))
285 value = ((ElfW(Addr) (*) (void)) value) ();
287 switch (r_type)
289 # ifndef RTLD_BOOTSTRAP
290 # ifdef __ILP32__
291 case R_X86_64_SIZE64:
292 /* Set to symbol size plus addend. */
293 *(Elf64_Addr *) (uintptr_t) reloc_addr
294 = (Elf64_Addr) sym->st_size + reloc->r_addend;
295 break;
297 case R_X86_64_SIZE32:
298 # else
299 case R_X86_64_SIZE64:
300 # endif
301 /* Set to symbol size plus addend. */
302 value = sym->st_size;
303 # endif
304 case R_X86_64_GLOB_DAT:
305 case R_X86_64_JUMP_SLOT:
306 *reloc_addr = value + reloc->r_addend;
307 break;
309 # ifndef RESOLVE_CONFLICT_FIND_MAP
310 case R_X86_64_DTPMOD64:
311 # ifdef RTLD_BOOTSTRAP
312 /* During startup the dynamic linker is always the module
313 with index 1.
314 XXX If this relocation is necessary move before RESOLVE
315 call. */
316 *reloc_addr = 1;
317 # else
318 /* Get the information from the link map returned by the
319 resolve function. */
320 if (sym_map != NULL)
321 *reloc_addr = sym_map->l_tls_modid;
322 # endif
323 break;
324 case R_X86_64_DTPOFF64:
325 # ifndef RTLD_BOOTSTRAP
326 /* During relocation all TLS symbols are defined and used.
327 Therefore the offset is already correct. */
328 if (sym != NULL)
330 value = sym->st_value + reloc->r_addend;
331 # ifdef __ILP32__
332 /* This relocation type computes a signed offset that is
333 usually negative. The symbol and addend values are 32
334 bits but the GOT entry is 64 bits wide and the whole
335 64-bit entry is used as a signed quantity, so we need
336 to sign-extend the computed value to 64 bits. */
337 *(Elf64_Sxword *) reloc_addr = (Elf64_Sxword) (Elf32_Sword) value;
338 # else
339 *reloc_addr = value;
340 # endif
342 # endif
343 break;
344 case R_X86_64_TLSDESC:
346 struct tlsdesc volatile *td =
347 (struct tlsdesc volatile *)reloc_addr;
349 # ifndef RTLD_BOOTSTRAP
350 if (! sym)
352 td->arg = (void*)reloc->r_addend;
353 td->entry = _dl_tlsdesc_undefweak;
355 else
356 # endif
358 # ifndef RTLD_BOOTSTRAP
359 # ifndef SHARED
360 CHECK_STATIC_TLS (map, sym_map);
361 # else
362 if (!TRY_STATIC_TLS (map, sym_map))
364 td->arg = _dl_make_tlsdesc_dynamic
365 (sym_map, sym->st_value + reloc->r_addend);
366 td->entry = _dl_tlsdesc_dynamic;
368 else
369 # endif
370 # endif
372 td->arg = (void*)(sym->st_value - sym_map->l_tls_offset
373 + reloc->r_addend);
374 td->entry = _dl_tlsdesc_return;
377 break;
379 case R_X86_64_TPOFF64:
380 /* The offset is negative, forward from the thread pointer. */
381 # ifndef RTLD_BOOTSTRAP
382 if (sym != NULL)
383 # endif
385 # ifndef RTLD_BOOTSTRAP
386 CHECK_STATIC_TLS (map, sym_map);
387 # endif
388 /* We know the offset of the object the symbol is contained in.
389 It is a negative value which will be added to the
390 thread pointer. */
391 value = (sym->st_value + reloc->r_addend
392 - sym_map->l_tls_offset);
393 # ifdef __ILP32__
394 /* The symbol and addend values are 32 bits but the GOT
395 entry is 64 bits wide and the whole 64-bit entry is used
396 as a signed quantity, so we need to sign-extend the
397 computed value to 64 bits. */
398 *(Elf64_Sxword *) reloc_addr = (Elf64_Sxword) (Elf32_Sword) value;
399 # else
400 *reloc_addr = value;
401 # endif
403 break;
404 # endif
406 # ifndef RTLD_BOOTSTRAP
407 case R_X86_64_64:
408 /* value + r_addend may be > 0xffffffff and R_X86_64_64
409 relocation updates the whole 64-bit entry. */
410 *(Elf64_Addr *) reloc_addr = (Elf64_Addr) value + reloc->r_addend;
411 break;
412 # ifndef __ILP32__
413 case R_X86_64_SIZE32:
414 /* Set to symbol size plus addend. */
415 value = sym->st_size;
416 # endif
417 case R_X86_64_32:
418 value += reloc->r_addend;
419 *(unsigned int *) reloc_addr = value;
421 const char *fmt;
422 if (__builtin_expect (value > UINT_MAX, 0))
424 const char *strtab;
426 fmt = "\
427 %s: Symbol `%s' causes overflow in R_X86_64_32 relocation\n";
428 # ifndef RESOLVE_CONFLICT_FIND_MAP
429 print_err:
430 # endif
431 strtab = (const char *) D_PTR (map, l_info[DT_STRTAB]);
433 _dl_error_printf (fmt, RTLD_PROGNAME, strtab + refsym->st_name);
435 break;
436 # ifndef RESOLVE_CONFLICT_FIND_MAP
437 /* Not needed for dl-conflict.c. */
438 case R_X86_64_PC32:
439 value += reloc->r_addend - (ElfW(Addr)) reloc_addr;
440 *(unsigned int *) reloc_addr = value;
441 if (__builtin_expect (value != (int) value, 0))
443 fmt = "\
444 %s: Symbol `%s' causes overflow in R_X86_64_PC32 relocation\n";
445 goto print_err;
447 break;
448 case R_X86_64_COPY:
449 if (sym == NULL)
450 /* This can happen in trace mode if an object could not be
451 found. */
452 break;
453 memcpy (reloc_addr_arg, (void *) value,
454 MIN (sym->st_size, refsym->st_size));
455 if (__builtin_expect (sym->st_size > refsym->st_size, 0)
456 || (__builtin_expect (sym->st_size < refsym->st_size, 0)
457 && GLRO(dl_verbose)))
459 fmt = "\
460 %s: Symbol `%s' has different size in shared object, consider re-linking\n";
461 goto print_err;
463 break;
464 # endif
465 case R_X86_64_IRELATIVE:
466 value = map->l_addr + reloc->r_addend;
467 value = ((ElfW(Addr) (*) (void)) value) ();
468 *reloc_addr = value;
469 break;
470 default:
471 _dl_reloc_bad_type (map, r_type, 0);
472 break;
473 # endif
478 auto inline void
479 __attribute ((always_inline))
480 elf_machine_rela_relative (ElfW(Addr) l_addr, const ElfW(Rela) *reloc,
481 void *const reloc_addr_arg)
483 ElfW(Addr) *const reloc_addr = reloc_addr_arg;
484 #if !defined RTLD_BOOTSTRAP
485 /* l_addr + r_addend may be > 0xffffffff and R_X86_64_RELATIVE64
486 relocation updates the whole 64-bit entry. */
487 if (__builtin_expect (ELFW(R_TYPE) (reloc->r_info) == R_X86_64_RELATIVE64, 0))
488 *(Elf64_Addr *) reloc_addr = (Elf64_Addr) l_addr + reloc->r_addend;
489 else
490 #endif
492 assert (ELFW(R_TYPE) (reloc->r_info) == R_X86_64_RELATIVE);
493 *reloc_addr = l_addr + reloc->r_addend;
497 auto inline void
498 __attribute ((always_inline))
499 elf_machine_lazy_rel (struct link_map *map,
500 ElfW(Addr) l_addr, const ElfW(Rela) *reloc,
501 int skip_ifunc)
503 ElfW(Addr) *const reloc_addr = (void *) (l_addr + reloc->r_offset);
504 const unsigned long int r_type = ELFW(R_TYPE) (reloc->r_info);
506 /* Check for unexpected PLT reloc type. */
507 if (__builtin_expect (r_type == R_X86_64_JUMP_SLOT, 1))
509 if (__builtin_expect (map->l_mach.plt, 0) == 0)
510 *reloc_addr += l_addr;
511 else
512 *reloc_addr =
513 map->l_mach.plt
514 + (((ElfW(Addr)) reloc_addr) - map->l_mach.gotplt) * 2;
516 else if (__builtin_expect (r_type == R_X86_64_TLSDESC, 1))
518 struct tlsdesc volatile * __attribute__((__unused__)) td =
519 (struct tlsdesc volatile *)reloc_addr;
521 td->arg = (void*)reloc;
522 td->entry = (void*)(D_PTR (map, l_info[ADDRIDX (DT_TLSDESC_PLT)])
523 + map->l_addr);
525 else if (__builtin_expect (r_type == R_X86_64_IRELATIVE, 0))
527 ElfW(Addr) value = map->l_addr + reloc->r_addend;
528 if (__builtin_expect (!skip_ifunc, 1))
529 value = ((ElfW(Addr) (*) (void)) value) ();
530 *reloc_addr = value;
532 else
533 _dl_reloc_bad_type (map, r_type, 1);
536 #endif /* RESOLVE_MAP */