elf: Add ELF_DYNAMIC_AFTER_RELOC to rewrite PLT
[glibc.git] / sysdeps / x86_64 / dl-machine.h
blobca290ef0822b53fe5a64458a45a0c0f79f92d1ef
1 /* Machine-dependent ELF dynamic relocation inline functions. x86-64 version.
2 Copyright (C) 2001-2024 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
19 #ifndef dl_machine_h
20 #define dl_machine_h
22 #define ELF_MACHINE_NAME "x86_64"
24 #include <assert.h>
25 #include <stdint.h>
26 #include <sys/param.h>
27 #include <sysdep.h>
28 #include <tls.h>
29 #include <dl-tlsdesc.h>
30 #include <dl-static-tls.h>
31 #include <dl-machine-rel.h>
32 #include <isa-level.h>
33 #ifdef __CET__
34 # include <dl-cet.h>
35 #else
36 # define RTLD_START_ENABLE_X86_FEATURES
37 #endif
39 /* Translate a processor specific dynamic tag to the index in l_info array. */
40 #define DT_X86_64(x) (DT_X86_64_##x - DT_LOPROC + DT_NUM)
42 /* Return nonzero iff ELF header is compatible with the running host. */
43 static inline int __attribute__ ((unused))
44 elf_machine_matches_host (const ElfW(Ehdr) *ehdr)
46 return ehdr->e_machine == EM_X86_64;
50 /* Return the run-time load address of the shared object. */
51 static inline ElfW(Addr) __attribute__ ((unused))
52 elf_machine_load_address (void)
54 extern const ElfW(Ehdr) __ehdr_start attribute_hidden;
55 return (ElfW(Addr)) &__ehdr_start;
58 /* Return the link-time address of _DYNAMIC. */
59 static inline ElfW(Addr) __attribute__ ((unused))
60 elf_machine_dynamic (void)
62 extern ElfW(Dyn) _DYNAMIC[] attribute_hidden;
63 return (ElfW(Addr)) _DYNAMIC - elf_machine_load_address ();
66 /* Set up the loaded object described by L so its unrelocated PLT
67 entries will jump to the on-demand fixup code in dl-runtime.c. */
69 static inline int __attribute__ ((unused, always_inline))
70 elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
71 int lazy, int profile)
73 Elf64_Addr *got;
74 extern void _dl_runtime_resolve_fxsave (ElfW(Word)) attribute_hidden;
75 extern void _dl_runtime_resolve_xsave (ElfW(Word)) attribute_hidden;
76 extern void _dl_runtime_resolve_xsavec (ElfW(Word)) attribute_hidden;
77 extern void _dl_runtime_profile_sse (ElfW(Word)) attribute_hidden;
78 extern void _dl_runtime_profile_avx (ElfW(Word)) attribute_hidden;
79 extern void _dl_runtime_profile_avx512 (ElfW(Word)) attribute_hidden;
81 if (l->l_info[DT_JMPREL] && lazy)
83 /* The GOT entries for functions in the PLT have not yet been filled
84 in. Their initial contents will arrange when called to push an
85 offset into the .rel.plt section, push _GLOBAL_OFFSET_TABLE_[1],
86 and then jump to _GLOBAL_OFFSET_TABLE_[2]. */
87 got = (Elf64_Addr *) D_PTR (l, l_info[DT_PLTGOT]);
88 /* If a library is prelinked but we have to relocate anyway,
89 we have to be able to undo the prelinking of .got.plt.
90 The prelinker saved us here address of .plt + 0x16. */
91 if (got[1])
93 l->l_mach.plt = got[1] + l->l_addr;
94 l->l_mach.gotplt = (ElfW(Addr)) &got[3];
96 /* Identify this shared object. */
97 *(ElfW(Addr) *) (got + 1) = (ElfW(Addr)) l;
99 const struct cpu_features* cpu_features = __get_cpu_features ();
101 #ifdef SHARED
102 /* The got[2] entry contains the address of a function which gets
103 called to get the address of a so far unresolved function and
104 jump to it. The profiling extension of the dynamic linker allows
105 to intercept the calls to collect information. In this case we
106 don't store the address in the GOT so that all future calls also
107 end in this function. */
108 if (__glibc_unlikely (profile))
110 if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512F))
111 *(ElfW(Addr) *) (got + 2) = (ElfW(Addr)) &_dl_runtime_profile_avx512;
112 else if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX))
113 *(ElfW(Addr) *) (got + 2) = (ElfW(Addr)) &_dl_runtime_profile_avx;
114 else
115 *(ElfW(Addr) *) (got + 2) = (ElfW(Addr)) &_dl_runtime_profile_sse;
117 if (GLRO(dl_profile) != NULL
118 && _dl_name_match_p (GLRO(dl_profile), l))
119 /* This is the object we are looking for. Say that we really
120 want profiling and the timers are started. */
121 GL(dl_profile_map) = l;
123 else
124 #endif
126 /* This function will get called to fix up the GOT entry
127 indicated by the offset on the stack, and then jump to
128 the resolved address. */
129 if (MINIMUM_X86_ISA_LEVEL >= AVX_X86_ISA_LEVEL
130 || GLRO(dl_x86_cpu_features).xsave_state_size != 0)
131 *(ElfW(Addr) *) (got + 2)
132 = (CPU_FEATURE_USABLE_P (cpu_features, XSAVEC)
133 ? (ElfW(Addr)) &_dl_runtime_resolve_xsavec
134 : (ElfW(Addr)) &_dl_runtime_resolve_xsave);
135 else
136 *(ElfW(Addr) *) (got + 2)
137 = (ElfW(Addr)) &_dl_runtime_resolve_fxsave;
141 return lazy;
144 /* Initial entry point code for the dynamic linker.
145 The C function `_dl_start' is the real entry point;
146 its return value is the user program's entry point. */
147 #define RTLD_START asm ("\n\
148 .text\n\
149 .align 16\n\
150 .globl _start\n\
151 .globl _dl_start_user\n\
152 _start:\n\
153 movq %rsp, %rdi\n\
154 call _dl_start\n\
155 _dl_start_user:\n\
156 # Save the user entry point address in %r12.\n\
157 movq %rax, %r12\n\
158 # Save %rsp value in %r13.\n\
159 movq %rsp, %r13\n\
161 RTLD_START_ENABLE_X86_FEATURES \
163 # Read the original argument count.\n\
164 movq (%rsp), %rdx\n\
165 # Call _dl_init (struct link_map *main_map, int argc, char **argv, char **env)\n\
166 # argc -> rsi\n\
167 movq %rdx, %rsi\n\
168 # And align stack for the _dl_init call. \n\
169 andq $-16, %rsp\n\
170 # _dl_loaded -> rdi\n\
171 movq _rtld_local(%rip), %rdi\n\
172 # env -> rcx\n\
173 leaq 16(%r13,%rdx,8), %rcx\n\
174 # argv -> rdx\n\
175 leaq 8(%r13), %rdx\n\
176 # Clear %rbp to mark outermost frame obviously even for constructors.\n\
177 xorl %ebp, %ebp\n\
178 # Call the function to run the initializers.\n\
179 call _dl_init\n\
180 # Pass our finalizer function to the user in %rdx, as per ELF ABI.\n\
181 leaq _dl_fini(%rip), %rdx\n\
182 # And make sure %rsp points to argc stored on the stack.\n\
183 movq %r13, %rsp\n\
184 # Jump to the user's entry point.\n\
185 jmp *%r12\n\
186 .previous\n\
189 /* ELF_RTYPE_CLASS_PLT iff TYPE describes relocation of a PLT entry or
190 TLS variable, so undefined references should not be allowed to
191 define the value.
192 ELF_RTYPE_CLASS_COPY iff TYPE should not be allowed to resolve to one
193 of the main executable's symbols, as for a COPY reloc. */
194 #define elf_machine_type_class(type) \
195 ((((type) == R_X86_64_JUMP_SLOT \
196 || (type) == R_X86_64_DTPMOD64 \
197 || (type) == R_X86_64_DTPOFF64 \
198 || (type) == R_X86_64_TPOFF64 \
199 || (type) == R_X86_64_TLSDESC) \
200 * ELF_RTYPE_CLASS_PLT) \
201 | (((type) == R_X86_64_COPY) * ELF_RTYPE_CLASS_COPY))
203 /* A reloc type used for ld.so cmdline arg lookups to reject PLT entries. */
204 #define ELF_MACHINE_JMP_SLOT R_X86_64_JUMP_SLOT
206 /* The relative ifunc relocation. */
207 // XXX This is a work-around for a broken linker. Remove!
208 #define ELF_MACHINE_IRELATIVE R_X86_64_IRELATIVE
210 /* We define an initialization function. This is called very early in
211 _dl_sysdep_start. */
212 #define DL_PLATFORM_INIT dl_platform_init ()
214 static inline void __attribute__ ((unused))
215 dl_platform_init (void)
217 #if IS_IN (rtld)
218 /* _dl_x86_init_cpu_features is a wrapper for init_cpu_features which
219 has been called early from __libc_start_main in static executable. */
220 _dl_x86_init_cpu_features ();
221 #else
222 if (GLRO(dl_platform) != NULL && *GLRO(dl_platform) == '\0')
223 /* Avoid an empty string which would disturb us. */
224 GLRO(dl_platform) = NULL;
225 #endif
228 static inline ElfW(Addr)
229 elf_machine_fixup_plt (struct link_map *map, lookup_t t,
230 const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
231 const ElfW(Rela) *reloc,
232 ElfW(Addr) *reloc_addr, ElfW(Addr) value)
234 return *reloc_addr = value;
237 /* Return the final value of a PLT relocation. On x86-64 the
238 JUMP_SLOT relocation ignores the addend. */
239 static inline ElfW(Addr)
240 elf_machine_plt_value (struct link_map *map, const ElfW(Rela) *reloc,
241 ElfW(Addr) value)
243 return value;
247 /* Names of the architecture-specific auditing callback functions. */
248 #define ARCH_LA_PLTENTER x86_64_gnu_pltenter
249 #define ARCH_LA_PLTEXIT x86_64_gnu_pltexit
251 #endif /* !dl_machine_h */
253 #ifdef RESOLVE_MAP
255 /* Perform the relocation specified by RELOC and SYM (which is fully resolved).
256 MAP is the object containing the reloc. */
258 static inline void __attribute__((always_inline))
259 elf_machine_rela(struct link_map *map, struct r_scope_elem *scope[],
260 const ElfW(Rela) *reloc, const ElfW(Sym) *sym,
261 const struct r_found_version *version,
262 void *const reloc_addr_arg, int skip_ifunc) {
263 ElfW(Addr) *const reloc_addr = reloc_addr_arg;
264 const unsigned long int r_type = ELFW(R_TYPE) (reloc->r_info);
266 # if !defined RTLD_BOOTSTRAP
267 if (__glibc_unlikely (r_type == R_X86_64_RELATIVE))
268 *reloc_addr = map->l_addr + reloc->r_addend;
269 else
270 # endif
271 # if !defined RTLD_BOOTSTRAP
272 /* l_addr + r_addend may be > 0xffffffff and R_X86_64_RELATIVE64
273 relocation updates the whole 64-bit entry. */
274 if (__glibc_unlikely (r_type == R_X86_64_RELATIVE64))
275 *(Elf64_Addr *) reloc_addr = (Elf64_Addr) map->l_addr + reloc->r_addend;
276 else
277 # endif
278 if (__glibc_unlikely (r_type == R_X86_64_NONE))
279 return;
280 else
282 # ifndef RTLD_BOOTSTRAP
283 const ElfW(Sym) *const refsym = sym;
284 # endif
285 struct link_map *sym_map = RESOLVE_MAP (map, scope, &sym, version,
286 r_type);
287 ElfW(Addr) value = SYMBOL_ADDRESS (sym_map, sym, true);
289 if (sym != NULL
290 && __glibc_unlikely (ELFW(ST_TYPE) (sym->st_info) == STT_GNU_IFUNC)
291 && __glibc_likely (sym->st_shndx != SHN_UNDEF)
292 && __glibc_likely (!skip_ifunc))
294 # ifndef RTLD_BOOTSTRAP
295 if (sym_map != map
296 && !sym_map->l_relocated)
298 const char *strtab
299 = (const char *) D_PTR (map, l_info[DT_STRTAB]);
300 if (sym_map->l_type == lt_executable)
301 _dl_fatal_printf ("\
302 %s: IFUNC symbol '%s' referenced in '%s' is defined in the executable \
303 and creates an unsatisfiable circular dependency.\n",
304 RTLD_PROGNAME, strtab + refsym->st_name,
305 map->l_name);
306 else
307 _dl_error_printf ("\
308 %s: Relink `%s' with `%s' for IFUNC symbol `%s'\n",
309 RTLD_PROGNAME, map->l_name,
310 sym_map->l_name,
311 strtab + refsym->st_name);
313 # endif
314 value = ((ElfW(Addr) (*) (void)) value) ();
317 switch (r_type)
319 case R_X86_64_JUMP_SLOT:
320 map->l_has_jump_slot_reloc = true;
321 /* fallthrough */
322 case R_X86_64_GLOB_DAT:
323 *reloc_addr = value;
324 break;
326 # ifndef RTLD_BOOTSTRAP
327 # ifdef __ILP32__
328 case R_X86_64_SIZE64:
329 /* Set to symbol size plus addend. */
330 *(Elf64_Addr *) (uintptr_t) reloc_addr
331 = (Elf64_Addr) sym->st_size + reloc->r_addend;
332 break;
334 case R_X86_64_SIZE32:
335 # else
336 case R_X86_64_SIZE64:
337 # endif
338 /* Set to symbol size plus addend. */
339 value = sym->st_size;
340 *reloc_addr = value + reloc->r_addend;
341 break;
343 case R_X86_64_DTPMOD64:
344 /* Get the information from the link map returned by the
345 resolve function. */
346 if (sym_map != NULL)
347 *reloc_addr = sym_map->l_tls_modid;
348 break;
349 case R_X86_64_DTPOFF64:
350 /* During relocation all TLS symbols are defined and used.
351 Therefore the offset is already correct. */
352 if (sym != NULL)
354 value = sym->st_value + reloc->r_addend;
355 # ifdef __ILP32__
356 /* This relocation type computes a signed offset that is
357 usually negative. The symbol and addend values are 32
358 bits but the GOT entry is 64 bits wide and the whole
359 64-bit entry is used as a signed quantity, so we need
360 to sign-extend the computed value to 64 bits. */
361 *(Elf64_Sxword *) reloc_addr = (Elf64_Sxword) (Elf32_Sword) value;
362 # else
363 *reloc_addr = value;
364 # endif
366 break;
367 case R_X86_64_TLSDESC:
369 struct tlsdesc volatile *td =
370 (struct tlsdesc volatile *)reloc_addr;
372 if (! sym)
374 td->arg = (void*)reloc->r_addend;
375 td->entry = _dl_tlsdesc_undefweak;
377 else
379 # ifndef SHARED
380 CHECK_STATIC_TLS (map, sym_map);
381 # else
382 if (!TRY_STATIC_TLS (map, sym_map))
384 td->arg = _dl_make_tlsdesc_dynamic
385 (sym_map, sym->st_value + reloc->r_addend);
386 td->entry = _dl_tlsdesc_dynamic;
388 else
389 # endif
391 td->arg = (void*)(sym->st_value - sym_map->l_tls_offset
392 + reloc->r_addend);
393 td->entry = _dl_tlsdesc_return;
396 break;
398 case R_X86_64_TPOFF64:
399 /* The offset is negative, forward from the thread pointer. */
400 if (sym != NULL)
402 CHECK_STATIC_TLS (map, sym_map);
403 /* We know the offset of the object the symbol is contained in.
404 It is a negative value which will be added to the
405 thread pointer. */
406 value = (sym->st_value + reloc->r_addend
407 - sym_map->l_tls_offset);
408 # ifdef __ILP32__
409 /* The symbol and addend values are 32 bits but the GOT
410 entry is 64 bits wide and the whole 64-bit entry is used
411 as a signed quantity, so we need to sign-extend the
412 computed value to 64 bits. */
413 *(Elf64_Sxword *) reloc_addr = (Elf64_Sxword) (Elf32_Sword) value;
414 # else
415 *reloc_addr = value;
416 # endif
418 break;
420 case R_X86_64_64:
421 /* value + r_addend may be > 0xffffffff and R_X86_64_64
422 relocation updates the whole 64-bit entry. */
423 *(Elf64_Addr *) reloc_addr = (Elf64_Addr) value + reloc->r_addend;
424 break;
425 # ifndef __ILP32__
426 case R_X86_64_SIZE32:
427 /* Set to symbol size plus addend. */
428 value = sym->st_size;
429 # endif
430 /* Fall through. */
431 case R_X86_64_32:
432 value += reloc->r_addend;
433 *(unsigned int *) reloc_addr = value;
435 const char *fmt;
436 if (__glibc_unlikely (value > UINT_MAX))
438 const char *strtab;
440 fmt = "\
441 %s: Symbol `%s' causes overflow in R_X86_64_32 relocation\n";
442 print_err:
443 strtab = (const char *) D_PTR (map, l_info[DT_STRTAB]);
445 _dl_error_printf (fmt, RTLD_PROGNAME, strtab + refsym->st_name);
447 break;
448 /* Not needed for dl-conflict.c. */
449 case R_X86_64_PC32:
450 value += reloc->r_addend - (ElfW(Addr)) reloc_addr;
451 *(unsigned int *) reloc_addr = value;
452 if (__glibc_unlikely (value != (int) value))
454 fmt = "\
455 %s: Symbol `%s' causes overflow in R_X86_64_PC32 relocation\n";
456 goto print_err;
458 break;
459 case R_X86_64_COPY:
460 if (sym == NULL)
461 /* This can happen in trace mode if an object could not be
462 found. */
463 break;
464 memcpy (reloc_addr_arg, (void *) value,
465 MIN (sym->st_size, refsym->st_size));
466 if (__glibc_unlikely (sym->st_size > refsym->st_size)
467 || (__glibc_unlikely (sym->st_size < refsym->st_size)
468 && GLRO(dl_verbose)))
470 fmt = "\
471 %s: Symbol `%s' has different size in shared object, consider re-linking\n";
472 goto print_err;
474 break;
475 case R_X86_64_IRELATIVE:
476 value = map->l_addr + reloc->r_addend;
477 if (__glibc_likely (!skip_ifunc))
478 value = ((ElfW(Addr) (*) (void)) value) ();
479 *reloc_addr = value;
480 break;
481 default:
482 _dl_reloc_bad_type (map, r_type, 0);
483 break;
484 # endif /* !RTLD_BOOTSTRAP */
489 static inline void
490 __attribute ((always_inline))
491 elf_machine_rela_relative (ElfW(Addr) l_addr, const ElfW(Rela) *reloc,
492 void *const reloc_addr_arg)
494 ElfW(Addr) *const reloc_addr = reloc_addr_arg;
495 #if !defined RTLD_BOOTSTRAP
496 /* l_addr + r_addend may be > 0xffffffff and R_X86_64_RELATIVE64
497 relocation updates the whole 64-bit entry. */
498 if (__glibc_unlikely (ELFW(R_TYPE) (reloc->r_info) == R_X86_64_RELATIVE64))
499 *(Elf64_Addr *) reloc_addr = (Elf64_Addr) l_addr + reloc->r_addend;
500 else
501 #endif
503 assert (ELFW(R_TYPE) (reloc->r_info) == R_X86_64_RELATIVE);
504 *reloc_addr = l_addr + reloc->r_addend;
508 static inline void
509 __attribute ((always_inline))
510 elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
511 ElfW(Addr) l_addr, const ElfW(Rela) *reloc,
512 int skip_ifunc)
514 ElfW(Addr) *const reloc_addr = (void *) (l_addr + reloc->r_offset);
515 const unsigned long int r_type = ELFW(R_TYPE) (reloc->r_info);
517 /* Check for unexpected PLT reloc type. */
518 if (__glibc_likely (r_type == R_X86_64_JUMP_SLOT))
520 /* Prelink has been deprecated. */
521 if (__glibc_likely (map->l_mach.plt == 0))
522 *reloc_addr += l_addr;
523 else
524 *reloc_addr =
525 map->l_mach.plt
526 + (((ElfW(Addr)) reloc_addr) - map->l_mach.gotplt) * 2;
528 else if (__glibc_likely (r_type == R_X86_64_TLSDESC))
530 const Elf_Symndx symndx = ELFW (R_SYM) (reloc->r_info);
531 const ElfW (Sym) *symtab = (const void *)D_PTR (map, l_info[DT_SYMTAB]);
532 const ElfW (Sym) *sym = &symtab[symndx];
533 const struct r_found_version *version = NULL;
535 if (map->l_info[VERSYMIDX (DT_VERSYM)] != NULL)
537 const ElfW (Half) *vernum =
538 (const void *)D_PTR (map, l_info[VERSYMIDX (DT_VERSYM)]);
539 version = &map->l_versions[vernum[symndx] & 0x7fff];
542 /* Always initialize TLS descriptors completely at load time, in
543 case static TLS is allocated for it that requires locking. */
544 elf_machine_rela (map, scope, reloc, sym, version, reloc_addr, skip_ifunc);
546 else if (__glibc_unlikely (r_type == R_X86_64_IRELATIVE))
548 ElfW(Addr) value = map->l_addr + reloc->r_addend;
549 if (__glibc_likely (!skip_ifunc))
550 value = ((ElfW(Addr) (*) (void)) value) ();
551 *reloc_addr = value;
553 else
554 _dl_reloc_bad_type (map, r_type, 1);
557 #endif /* RESOLVE_MAP */
559 #if !defined ELF_DYNAMIC_AFTER_RELOC && !defined RTLD_BOOTSTRAP \
560 && defined SHARED
561 # define ELF_DYNAMIC_AFTER_RELOC(map, lazy) \
562 x86_64_dynamic_after_reloc (map, (lazy))
564 # define JMP32_INSN_OPCODE 0xe9
565 # define JMP32_INSN_SIZE 5
566 # define JMPABS_INSN_OPCODE 0xa100d5
567 # define JMPABS_INSN_SIZE 11
568 # define INT3_INSN_OPCODE 0xcc
570 static const char *
571 x86_64_reloc_symbol_name (struct link_map *map, const ElfW(Rela) *reloc)
573 const ElfW(Sym) *const symtab
574 = (const void *) map->l_info[DT_SYMTAB]->d_un.d_ptr;
575 const ElfW(Sym) *const refsym = &symtab[ELFW (R_SYM) (reloc->r_info)];
576 const char *strtab = (const char *) map->l_info[DT_STRTAB]->d_un.d_ptr;
577 return strtab + refsym->st_name;
580 static void
581 x86_64_rewrite_plt (struct link_map *map, ElfW(Addr) plt_rewrite)
583 ElfW(Addr) l_addr = map->l_addr;
584 ElfW(Addr) pltent = map->l_info[DT_X86_64 (PLTENT)]->d_un.d_val;
585 ElfW(Addr) start = map->l_info[DT_JMPREL]->d_un.d_ptr;
586 ElfW(Addr) size = map->l_info[DT_PLTRELSZ]->d_un.d_val;
587 const ElfW(Rela) *reloc = (const void *) start;
588 const ElfW(Rela) *reloc_end = (const void *) (start + size);
590 unsigned int feature_1 = THREAD_GETMEM (THREAD_SELF,
591 header.feature_1);
592 bool ibt_enabled_p
593 = (feature_1 & GNU_PROPERTY_X86_FEATURE_1_IBT) != 0;
595 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
596 _dl_debug_printf ("\nchanging PLT in '%s' to direct branch\n",
597 DSO_FILENAME (map->l_name));
599 for (; reloc < reloc_end; reloc++)
600 if (ELFW(R_TYPE) (reloc->r_info) == R_X86_64_JUMP_SLOT)
602 /* Get the value from the GOT entry. */
603 ElfW(Addr) value = *(ElfW(Addr) *) (l_addr + reloc->r_offset);
605 /* Get the corresponding PLT entry from r_addend. */
606 ElfW(Addr) branch_start = l_addr + reloc->r_addend;
607 /* Skip ENDBR64 if IBT isn't enabled. */
608 if (!ibt_enabled_p)
609 branch_start = ALIGN_DOWN (branch_start, pltent);
610 /* Get the displacement from the branch target. */
611 ElfW(Addr) disp = value - branch_start - JMP32_INSN_SIZE;
612 ElfW(Addr) plt_end;
613 ElfW(Addr) pad;
615 plt_end = (branch_start | (pltent - 1)) + 1;
617 /* Update the PLT entry. */
618 if (((uint64_t) disp + (uint64_t) ((uint32_t) INT32_MIN))
619 <= (uint64_t) UINT32_MAX)
621 pad = branch_start + JMP32_INSN_SIZE;
623 if (__glibc_unlikely (pad > plt_end))
624 continue;
626 /* If the target branch can be reached with a direct branch,
627 rewrite the PLT entry with a direct branch. */
628 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_BINDINGS))
630 const char *sym_name = x86_64_reloc_symbol_name (map,
631 reloc);
632 _dl_debug_printf ("changing '%s' PLT entry in '%s' to "
633 "direct branch\n", sym_name,
634 DSO_FILENAME (map->l_name));
637 /* Write out direct branch. */
638 *(uint8_t *) branch_start = JMP32_INSN_OPCODE;
639 *(uint32_t *) (branch_start + 1) = disp;
641 else
643 if (GL(dl_x86_feature_control).plt_rewrite
644 != plt_rewrite_jmpabs)
646 if (__glibc_unlikely (GLRO(dl_debug_mask)
647 & DL_DEBUG_BINDINGS))
649 const char *sym_name
650 = x86_64_reloc_symbol_name (map, reloc);
651 _dl_debug_printf ("skipping '%s' PLT entry in '%s'\n",
652 sym_name,
653 DSO_FILENAME (map->l_name));
655 continue;
658 pad = branch_start + JMPABS_INSN_SIZE;
660 if (__glibc_unlikely (pad > plt_end))
661 continue;
663 /* Rewrite the PLT entry with JMPABS. */
664 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_BINDINGS))
666 const char *sym_name = x86_64_reloc_symbol_name (map,
667 reloc);
668 _dl_debug_printf ("changing '%s' PLT entry in '%s' to "
669 "JMPABS\n", sym_name,
670 DSO_FILENAME (map->l_name));
673 /* "jmpabs $target" for 64-bit displacement. NB: JMPABS has
674 a 3-byte opcode + 64bit address. There is a 1-byte overlap
675 between 4-byte write and 8-byte write. */
676 *(uint32_t *) (branch_start) = JMPABS_INSN_OPCODE;
677 *(uint64_t *) (branch_start + 3) = value;
680 /* Fill the unused part of the PLT entry with INT3. */
681 for (; pad < plt_end; pad++)
682 *(uint8_t *) pad = INT3_INSN_OPCODE;
686 static inline void
687 x86_64_rewrite_plt_in_place (struct link_map *map)
689 /* Adjust DT_X86_64_PLT address and DT_X86_64_PLTSZ values. */
690 ElfW(Addr) plt = (map->l_info[DT_X86_64 (PLT)]->d_un.d_ptr
691 + map->l_addr);
692 size_t pagesize = GLRO(dl_pagesize);
693 ElfW(Addr) plt_aligned = ALIGN_DOWN (plt, pagesize);
694 size_t pltsz = (map->l_info[DT_X86_64 (PLTSZ)]->d_un.d_val
695 + plt - plt_aligned);
697 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
698 _dl_debug_printf ("\nchanging PLT in '%s' to writable\n",
699 DSO_FILENAME (map->l_name));
701 if (__glibc_unlikely (__mprotect ((void *) plt_aligned, pltsz,
702 PROT_WRITE | PROT_READ) < 0))
704 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
705 _dl_debug_printf ("\nfailed to change PLT in '%s' to writable\n",
706 DSO_FILENAME (map->l_name));
707 return;
710 x86_64_rewrite_plt (map, plt_aligned);
712 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
713 _dl_debug_printf ("\nchanging PLT in '%s' back to read-only\n",
714 DSO_FILENAME (map->l_name));
716 if (__glibc_unlikely (__mprotect ((void *) plt_aligned, pltsz,
717 PROT_EXEC | PROT_READ) < 0))
718 _dl_signal_error (0, DSO_FILENAME (map->l_name), NULL,
719 "failed to change PLT back to read-only");
722 /* Rewrite PLT entries to direct branch if possible. */
724 static inline void
725 x86_64_dynamic_after_reloc (struct link_map *map, int lazy)
727 /* Ignore DT_X86_64_PLT if the lazy binding is enabled. */
728 if (lazy != 0)
729 return;
731 /* Ignore DT_X86_64_PLT if PLT rewrite isn't enabled. */
732 if (__glibc_likely (GL(dl_x86_feature_control).plt_rewrite
733 == plt_rewrite_none))
734 return;
736 if (__glibc_likely (map->l_info[DT_X86_64 (PLT)] == NULL))
737 return;
739 /* Ignore DT_X86_64_PLT if there is no R_X86_64_JUMP_SLOT. */
740 if (map->l_has_jump_slot_reloc == 0)
741 return;
743 /* Ignore DT_X86_64_PLT if
744 1. DT_JMPREL isn't available or its value is 0.
745 2. DT_PLTRELSZ is 0.
746 3. DT_X86_64_PLTENT isn't available or its value is smaller than
747 16 bytes.
748 4. DT_X86_64_PLTSZ isn't available or its value is smaller than
749 DT_X86_64_PLTENT's value or isn't a multiple of DT_X86_64_PLTENT's
750 value. */
751 if (map->l_info[DT_JMPREL] == NULL
752 || map->l_info[DT_JMPREL]->d_un.d_ptr == 0
753 || map->l_info[DT_PLTRELSZ]->d_un.d_val == 0
754 || map->l_info[DT_X86_64 (PLTSZ)] == NULL
755 || map->l_info[DT_X86_64 (PLTENT)] == NULL
756 || map->l_info[DT_X86_64 (PLTENT)]->d_un.d_val < 16
757 || (map->l_info[DT_X86_64 (PLTSZ)]->d_un.d_val
758 < map->l_info[DT_X86_64 (PLTENT)]->d_un.d_val)
759 || (map->l_info[DT_X86_64 (PLTSZ)]->d_un.d_val
760 % map->l_info[DT_X86_64 (PLTENT)]->d_un.d_val) != 0)
761 return;
763 x86_64_rewrite_plt_in_place (map);
765 #endif