Update copyright dates with scripts/update-copyrights
[glibc.git] / sysdeps / x86_64 / dl-machine.h
blob9ea2a708378b780695bb267949a250b6e71a55d7
1 /* Machine-dependent ELF dynamic relocation inline functions. x86-64 version.
2 Copyright (C) 2001-2023 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
19 #ifndef dl_machine_h
20 #define dl_machine_h
22 #define ELF_MACHINE_NAME "x86_64"
24 #include <assert.h>
25 #include <sys/param.h>
26 #include <sysdep.h>
27 #include <tls.h>
28 #include <dl-tlsdesc.h>
29 #include <dl-static-tls.h>
30 #include <dl-machine-rel.h>
31 #include <isa-level.h>
33 /* Return nonzero iff ELF header is compatible with the running host. */
34 static inline int __attribute__ ((unused))
35 elf_machine_matches_host (const ElfW(Ehdr) *ehdr)
37 return ehdr->e_machine == EM_X86_64;
41 /* Return the run-time load address of the shared object. */
42 static inline ElfW(Addr) __attribute__ ((unused))
43 elf_machine_load_address (void)
45 extern const ElfW(Ehdr) __ehdr_start attribute_hidden;
46 return (ElfW(Addr)) &__ehdr_start;
49 /* Return the link-time address of _DYNAMIC. */
50 static inline ElfW(Addr) __attribute__ ((unused))
51 elf_machine_dynamic (void)
53 extern ElfW(Dyn) _DYNAMIC[] attribute_hidden;
54 return (ElfW(Addr)) _DYNAMIC - elf_machine_load_address ();
57 /* Set up the loaded object described by L so its unrelocated PLT
58 entries will jump to the on-demand fixup code in dl-runtime.c. */
60 static inline int __attribute__ ((unused, always_inline))
61 elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
62 int lazy, int profile)
64 Elf64_Addr *got;
65 extern void _dl_runtime_resolve_fxsave (ElfW(Word)) attribute_hidden;
66 extern void _dl_runtime_resolve_xsave (ElfW(Word)) attribute_hidden;
67 extern void _dl_runtime_resolve_xsavec (ElfW(Word)) attribute_hidden;
68 extern void _dl_runtime_profile_sse (ElfW(Word)) attribute_hidden;
69 extern void _dl_runtime_profile_avx (ElfW(Word)) attribute_hidden;
70 extern void _dl_runtime_profile_avx512 (ElfW(Word)) attribute_hidden;
72 if (l->l_info[DT_JMPREL] && lazy)
74 /* The GOT entries for functions in the PLT have not yet been filled
75 in. Their initial contents will arrange when called to push an
76 offset into the .rel.plt section, push _GLOBAL_OFFSET_TABLE_[1],
77 and then jump to _GLOBAL_OFFSET_TABLE_[2]. */
78 got = (Elf64_Addr *) D_PTR (l, l_info[DT_PLTGOT]);
79 /* If a library is prelinked but we have to relocate anyway,
80 we have to be able to undo the prelinking of .got.plt.
81 The prelinker saved us here address of .plt + 0x16. */
82 if (got[1])
84 l->l_mach.plt = got[1] + l->l_addr;
85 l->l_mach.gotplt = (ElfW(Addr)) &got[3];
87 /* Identify this shared object. */
88 *(ElfW(Addr) *) (got + 1) = (ElfW(Addr)) l;
90 const struct cpu_features* cpu_features = __get_cpu_features ();
92 /* The got[2] entry contains the address of a function which gets
93 called to get the address of a so far unresolved function and
94 jump to it. The profiling extension of the dynamic linker allows
95 to intercept the calls to collect information. In this case we
96 don't store the address in the GOT so that all future calls also
97 end in this function. */
98 if (__glibc_unlikely (profile))
100 if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512F))
101 *(ElfW(Addr) *) (got + 2) = (ElfW(Addr)) &_dl_runtime_profile_avx512;
102 else if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX))
103 *(ElfW(Addr) *) (got + 2) = (ElfW(Addr)) &_dl_runtime_profile_avx;
104 else
105 *(ElfW(Addr) *) (got + 2) = (ElfW(Addr)) &_dl_runtime_profile_sse;
107 if (GLRO(dl_profile) != NULL
108 && _dl_name_match_p (GLRO(dl_profile), l))
109 /* This is the object we are looking for. Say that we really
110 want profiling and the timers are started. */
111 GL(dl_profile_map) = l;
113 else
115 /* This function will get called to fix up the GOT entry
116 indicated by the offset on the stack, and then jump to
117 the resolved address. */
118 if (MINIMUM_X86_ISA_LEVEL >= AVX_X86_ISA_LEVEL
119 || GLRO(dl_x86_cpu_features).xsave_state_size != 0)
120 *(ElfW(Addr) *) (got + 2)
121 = (CPU_FEATURE_USABLE_P (cpu_features, XSAVEC)
122 ? (ElfW(Addr)) &_dl_runtime_resolve_xsavec
123 : (ElfW(Addr)) &_dl_runtime_resolve_xsave);
124 else
125 *(ElfW(Addr) *) (got + 2)
126 = (ElfW(Addr)) &_dl_runtime_resolve_fxsave;
130 return lazy;
133 /* Initial entry point code for the dynamic linker.
134 The C function `_dl_start' is the real entry point;
135 its return value is the user program's entry point. */
136 #define RTLD_START asm ("\n\
137 .text\n\
138 .align 16\n\
139 .globl _start\n\
140 .globl _dl_start_user\n\
141 _start:\n\
142 movq %rsp, %rdi\n\
143 call _dl_start\n\
144 _dl_start_user:\n\
145 # Save the user entry point address in %r12.\n\
146 movq %rax, %r12\n\
147 # Read the original argument count.\n\
148 movq (%rsp), %rdx\n\
149 # Call _dl_init (struct link_map *main_map, int argc, char **argv, char **env)\n\
150 # argc -> rsi\n\
151 movq %rdx, %rsi\n\
152 # Save %rsp value in %r13.\n\
153 movq %rsp, %r13\n\
154 # And align stack for the _dl_init call. \n\
155 andq $-16, %rsp\n\
156 # _dl_loaded -> rdi\n\
157 movq _rtld_local(%rip), %rdi\n\
158 # env -> rcx\n\
159 leaq 16(%r13,%rdx,8), %rcx\n\
160 # argv -> rdx\n\
161 leaq 8(%r13), %rdx\n\
162 # Clear %rbp to mark outermost frame obviously even for constructors.\n\
163 xorl %ebp, %ebp\n\
164 # Call the function to run the initializers.\n\
165 call _dl_init\n\
166 # Pass our finalizer function to the user in %rdx, as per ELF ABI.\n\
167 leaq _dl_fini(%rip), %rdx\n\
168 # And make sure %rsp points to argc stored on the stack.\n\
169 movq %r13, %rsp\n\
170 # Jump to the user's entry point.\n\
171 jmp *%r12\n\
172 .previous\n\
175 /* ELF_RTYPE_CLASS_PLT iff TYPE describes relocation of a PLT entry or
176 TLS variable, so undefined references should not be allowed to
177 define the value.
178 ELF_RTYPE_CLASS_COPY iff TYPE should not be allowed to resolve to one
179 of the main executable's symbols, as for a COPY reloc. */
180 #define elf_machine_type_class(type) \
181 ((((type) == R_X86_64_JUMP_SLOT \
182 || (type) == R_X86_64_DTPMOD64 \
183 || (type) == R_X86_64_DTPOFF64 \
184 || (type) == R_X86_64_TPOFF64 \
185 || (type) == R_X86_64_TLSDESC) \
186 * ELF_RTYPE_CLASS_PLT) \
187 | (((type) == R_X86_64_COPY) * ELF_RTYPE_CLASS_COPY))
189 /* A reloc type used for ld.so cmdline arg lookups to reject PLT entries. */
190 #define ELF_MACHINE_JMP_SLOT R_X86_64_JUMP_SLOT
192 /* The relative ifunc relocation. */
193 // XXX This is a work-around for a broken linker. Remove!
194 #define ELF_MACHINE_IRELATIVE R_X86_64_IRELATIVE
196 /* We define an initialization function. This is called very early in
197 _dl_sysdep_start. */
198 #define DL_PLATFORM_INIT dl_platform_init ()
200 static inline void __attribute__ ((unused))
201 dl_platform_init (void)
203 #if IS_IN (rtld)
204 /* _dl_x86_init_cpu_features is a wrapper for init_cpu_features which
205 has been called early from __libc_start_main in static executable. */
206 _dl_x86_init_cpu_features ();
207 #else
208 if (GLRO(dl_platform) != NULL && *GLRO(dl_platform) == '\0')
209 /* Avoid an empty string which would disturb us. */
210 GLRO(dl_platform) = NULL;
211 #endif
214 static inline ElfW(Addr)
215 elf_machine_fixup_plt (struct link_map *map, lookup_t t,
216 const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
217 const ElfW(Rela) *reloc,
218 ElfW(Addr) *reloc_addr, ElfW(Addr) value)
220 return *reloc_addr = value;
223 /* Return the final value of a PLT relocation. On x86-64 the
224 JUMP_SLOT relocation ignores the addend. */
225 static inline ElfW(Addr)
226 elf_machine_plt_value (struct link_map *map, const ElfW(Rela) *reloc,
227 ElfW(Addr) value)
229 return value;
233 /* Names of the architecture-specific auditing callback functions. */
234 #define ARCH_LA_PLTENTER x86_64_gnu_pltenter
235 #define ARCH_LA_PLTEXIT x86_64_gnu_pltexit
237 #endif /* !dl_machine_h */
239 #ifdef RESOLVE_MAP
241 /* Perform the relocation specified by RELOC and SYM (which is fully resolved).
242 MAP is the object containing the reloc. */
244 static inline void __attribute__((always_inline))
245 elf_machine_rela(struct link_map *map, struct r_scope_elem *scope[],
246 const ElfW(Rela) *reloc, const ElfW(Sym) *sym,
247 const struct r_found_version *version,
248 void *const reloc_addr_arg, int skip_ifunc) {
249 ElfW(Addr) *const reloc_addr = reloc_addr_arg;
250 const unsigned long int r_type = ELFW(R_TYPE) (reloc->r_info);
252 # if !defined RTLD_BOOTSTRAP
253 if (__glibc_unlikely (r_type == R_X86_64_RELATIVE))
254 *reloc_addr = map->l_addr + reloc->r_addend;
255 else
256 # endif
257 # if !defined RTLD_BOOTSTRAP
258 /* l_addr + r_addend may be > 0xffffffff and R_X86_64_RELATIVE64
259 relocation updates the whole 64-bit entry. */
260 if (__glibc_unlikely (r_type == R_X86_64_RELATIVE64))
261 *(Elf64_Addr *) reloc_addr = (Elf64_Addr) map->l_addr + reloc->r_addend;
262 else
263 # endif
264 if (__glibc_unlikely (r_type == R_X86_64_NONE))
265 return;
266 else
268 # ifndef RTLD_BOOTSTRAP
269 const ElfW(Sym) *const refsym = sym;
270 # endif
271 struct link_map *sym_map = RESOLVE_MAP (map, scope, &sym, version,
272 r_type);
273 ElfW(Addr) value = SYMBOL_ADDRESS (sym_map, sym, true);
275 if (sym != NULL
276 && __glibc_unlikely (ELFW(ST_TYPE) (sym->st_info) == STT_GNU_IFUNC)
277 && __glibc_likely (sym->st_shndx != SHN_UNDEF)
278 && __glibc_likely (!skip_ifunc))
280 # ifndef RTLD_BOOTSTRAP
281 if (sym_map != map
282 && !sym_map->l_relocated)
284 const char *strtab
285 = (const char *) D_PTR (map, l_info[DT_STRTAB]);
286 if (sym_map->l_type == lt_executable)
287 _dl_fatal_printf ("\
288 %s: IFUNC symbol '%s' referenced in '%s' is defined in the executable \
289 and creates an unsatisfiable circular dependency.\n",
290 RTLD_PROGNAME, strtab + refsym->st_name,
291 map->l_name);
292 else
293 _dl_error_printf ("\
294 %s: Relink `%s' with `%s' for IFUNC symbol `%s'\n",
295 RTLD_PROGNAME, map->l_name,
296 sym_map->l_name,
297 strtab + refsym->st_name);
299 # endif
300 value = ((ElfW(Addr) (*) (void)) value) ();
303 switch (r_type)
305 case R_X86_64_GLOB_DAT:
306 case R_X86_64_JUMP_SLOT:
307 *reloc_addr = value;
308 break;
310 # ifndef RTLD_BOOTSTRAP
311 # ifdef __ILP32__
312 case R_X86_64_SIZE64:
313 /* Set to symbol size plus addend. */
314 *(Elf64_Addr *) (uintptr_t) reloc_addr
315 = (Elf64_Addr) sym->st_size + reloc->r_addend;
316 break;
318 case R_X86_64_SIZE32:
319 # else
320 case R_X86_64_SIZE64:
321 # endif
322 /* Set to symbol size plus addend. */
323 value = sym->st_size;
324 *reloc_addr = value + reloc->r_addend;
325 break;
327 case R_X86_64_DTPMOD64:
328 /* Get the information from the link map returned by the
329 resolve function. */
330 if (sym_map != NULL)
331 *reloc_addr = sym_map->l_tls_modid;
332 break;
333 case R_X86_64_DTPOFF64:
334 /* During relocation all TLS symbols are defined and used.
335 Therefore the offset is already correct. */
336 if (sym != NULL)
338 value = sym->st_value + reloc->r_addend;
339 # ifdef __ILP32__
340 /* This relocation type computes a signed offset that is
341 usually negative. The symbol and addend values are 32
342 bits but the GOT entry is 64 bits wide and the whole
343 64-bit entry is used as a signed quantity, so we need
344 to sign-extend the computed value to 64 bits. */
345 *(Elf64_Sxword *) reloc_addr = (Elf64_Sxword) (Elf32_Sword) value;
346 # else
347 *reloc_addr = value;
348 # endif
350 break;
351 case R_X86_64_TLSDESC:
353 struct tlsdesc volatile *td =
354 (struct tlsdesc volatile *)reloc_addr;
356 if (! sym)
358 td->arg = (void*)reloc->r_addend;
359 td->entry = _dl_tlsdesc_undefweak;
361 else
363 # ifndef SHARED
364 CHECK_STATIC_TLS (map, sym_map);
365 # else
366 if (!TRY_STATIC_TLS (map, sym_map))
368 td->arg = _dl_make_tlsdesc_dynamic
369 (sym_map, sym->st_value + reloc->r_addend);
370 td->entry = _dl_tlsdesc_dynamic;
372 else
373 # endif
375 td->arg = (void*)(sym->st_value - sym_map->l_tls_offset
376 + reloc->r_addend);
377 td->entry = _dl_tlsdesc_return;
380 break;
382 case R_X86_64_TPOFF64:
383 /* The offset is negative, forward from the thread pointer. */
384 if (sym != NULL)
386 CHECK_STATIC_TLS (map, sym_map);
387 /* We know the offset of the object the symbol is contained in.
388 It is a negative value which will be added to the
389 thread pointer. */
390 value = (sym->st_value + reloc->r_addend
391 - sym_map->l_tls_offset);
392 # ifdef __ILP32__
393 /* The symbol and addend values are 32 bits but the GOT
394 entry is 64 bits wide and the whole 64-bit entry is used
395 as a signed quantity, so we need to sign-extend the
396 computed value to 64 bits. */
397 *(Elf64_Sxword *) reloc_addr = (Elf64_Sxword) (Elf32_Sword) value;
398 # else
399 *reloc_addr = value;
400 # endif
402 break;
404 case R_X86_64_64:
405 /* value + r_addend may be > 0xffffffff and R_X86_64_64
406 relocation updates the whole 64-bit entry. */
407 *(Elf64_Addr *) reloc_addr = (Elf64_Addr) value + reloc->r_addend;
408 break;
409 # ifndef __ILP32__
410 case R_X86_64_SIZE32:
411 /* Set to symbol size plus addend. */
412 value = sym->st_size;
413 # endif
414 /* Fall through. */
415 case R_X86_64_32:
416 value += reloc->r_addend;
417 *(unsigned int *) reloc_addr = value;
419 const char *fmt;
420 if (__glibc_unlikely (value > UINT_MAX))
422 const char *strtab;
424 fmt = "\
425 %s: Symbol `%s' causes overflow in R_X86_64_32 relocation\n";
426 print_err:
427 strtab = (const char *) D_PTR (map, l_info[DT_STRTAB]);
429 _dl_error_printf (fmt, RTLD_PROGNAME, strtab + refsym->st_name);
431 break;
432 /* Not needed for dl-conflict.c. */
433 case R_X86_64_PC32:
434 value += reloc->r_addend - (ElfW(Addr)) reloc_addr;
435 *(unsigned int *) reloc_addr = value;
436 if (__glibc_unlikely (value != (int) value))
438 fmt = "\
439 %s: Symbol `%s' causes overflow in R_X86_64_PC32 relocation\n";
440 goto print_err;
442 break;
443 case R_X86_64_COPY:
444 if (sym == NULL)
445 /* This can happen in trace mode if an object could not be
446 found. */
447 break;
448 memcpy (reloc_addr_arg, (void *) value,
449 MIN (sym->st_size, refsym->st_size));
450 if (__glibc_unlikely (sym->st_size > refsym->st_size)
451 || (__glibc_unlikely (sym->st_size < refsym->st_size)
452 && GLRO(dl_verbose)))
454 fmt = "\
455 %s: Symbol `%s' has different size in shared object, consider re-linking\n";
456 goto print_err;
458 break;
459 case R_X86_64_IRELATIVE:
460 value = map->l_addr + reloc->r_addend;
461 if (__glibc_likely (!skip_ifunc))
462 value = ((ElfW(Addr) (*) (void)) value) ();
463 *reloc_addr = value;
464 break;
465 default:
466 _dl_reloc_bad_type (map, r_type, 0);
467 break;
468 # endif /* !RTLD_BOOTSTRAP */
473 static inline void
474 __attribute ((always_inline))
475 elf_machine_rela_relative (ElfW(Addr) l_addr, const ElfW(Rela) *reloc,
476 void *const reloc_addr_arg)
478 ElfW(Addr) *const reloc_addr = reloc_addr_arg;
479 #if !defined RTLD_BOOTSTRAP
480 /* l_addr + r_addend may be > 0xffffffff and R_X86_64_RELATIVE64
481 relocation updates the whole 64-bit entry. */
482 if (__glibc_unlikely (ELFW(R_TYPE) (reloc->r_info) == R_X86_64_RELATIVE64))
483 *(Elf64_Addr *) reloc_addr = (Elf64_Addr) l_addr + reloc->r_addend;
484 else
485 #endif
487 assert (ELFW(R_TYPE) (reloc->r_info) == R_X86_64_RELATIVE);
488 *reloc_addr = l_addr + reloc->r_addend;
492 static inline void
493 __attribute ((always_inline))
494 elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
495 ElfW(Addr) l_addr, const ElfW(Rela) *reloc,
496 int skip_ifunc)
498 ElfW(Addr) *const reloc_addr = (void *) (l_addr + reloc->r_offset);
499 const unsigned long int r_type = ELFW(R_TYPE) (reloc->r_info);
501 /* Check for unexpected PLT reloc type. */
502 if (__glibc_likely (r_type == R_X86_64_JUMP_SLOT))
504 /* Prelink has been deprecated. */
505 if (__glibc_likely (map->l_mach.plt == 0))
506 *reloc_addr += l_addr;
507 else
508 *reloc_addr =
509 map->l_mach.plt
510 + (((ElfW(Addr)) reloc_addr) - map->l_mach.gotplt) * 2;
512 else if (__glibc_likely (r_type == R_X86_64_TLSDESC))
514 const Elf_Symndx symndx = ELFW (R_SYM) (reloc->r_info);
515 const ElfW (Sym) *symtab = (const void *)D_PTR (map, l_info[DT_SYMTAB]);
516 const ElfW (Sym) *sym = &symtab[symndx];
517 const struct r_found_version *version = NULL;
519 if (map->l_info[VERSYMIDX (DT_VERSYM)] != NULL)
521 const ElfW (Half) *vernum =
522 (const void *)D_PTR (map, l_info[VERSYMIDX (DT_VERSYM)]);
523 version = &map->l_versions[vernum[symndx] & 0x7fff];
526 /* Always initialize TLS descriptors completely at load time, in
527 case static TLS is allocated for it that requires locking. */
528 elf_machine_rela (map, scope, reloc, sym, version, reloc_addr, skip_ifunc);
530 else if (__glibc_unlikely (r_type == R_X86_64_IRELATIVE))
532 ElfW(Addr) value = map->l_addr + reloc->r_addend;
533 if (__glibc_likely (!skip_ifunc))
534 value = ((ElfW(Addr) (*) (void)) value) ();
535 *reloc_addr = value;
537 else
538 _dl_reloc_bad_type (map, r_type, 1);
541 #endif /* RESOLVE_MAP */