arm: Use _dl_find_object on __gnu_Unwind_Find_exidx (BZ 31405)
[glibc.git] / sysdeps / powerpc / powerpc64 / dl-machine.h
blobc6682f34456156369b5793cccd6a3bf06fcc559d
1 /* Machine-dependent ELF dynamic relocation inline functions.
2 PowerPC64 version.
3 Copyright 1995-2024 Free Software Foundation, Inc.
4 This file is part of the GNU C Library.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Library General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Library General Public License for more details.
16 You should have received a copy of the GNU Library General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <https://www.gnu.org/licenses/>. */
20 #ifndef dl_machine_h
21 #define dl_machine_h
23 #define ELF_MACHINE_NAME "powerpc64"
25 #include <assert.h>
26 #include <sys/param.h>
27 #include <dl-tls.h>
28 #include <sysdep.h>
29 #include <hwcapinfo.h>
30 #include <dl-static-tls.h>
31 #include <dl-funcdesc.h>
32 #include <dl-machine-rel.h>
34 /* Translate a processor specific dynamic tag to the index
35 in l_info array. */
36 #define DT_PPC64(x) (DT_PPC64_##x - DT_LOPROC + DT_NUM)
38 #define ELF_MULT_MACHINES_SUPPORTED
40 /* Return nonzero iff ELF header is compatible with the running host. */
41 static inline int
42 elf_machine_matches_host (const Elf64_Ehdr *ehdr)
44 /* Verify that the binary matches our ABI version. */
45 if ((ehdr->e_flags & EF_PPC64_ABI) != 0)
47 #if _CALL_ELF != 2
48 if ((ehdr->e_flags & EF_PPC64_ABI) != 1)
49 return 0;
50 #else
51 if ((ehdr->e_flags & EF_PPC64_ABI) != 2)
52 return 0;
53 #endif
56 return ehdr->e_machine == EM_PPC64;
59 /* Return nonzero iff ELF header is compatible with the running host,
60 but not this loader. */
61 static inline int
62 elf_host_tolerates_machine (const Elf64_Ehdr *ehdr)
64 return ehdr->e_machine == EM_PPC;
67 /* Return nonzero iff ELF header is compatible with the running host,
68 but not this loader. */
69 static inline int
70 elf_host_tolerates_class (const Elf64_Ehdr *ehdr)
72 return ehdr->e_ident[EI_CLASS] == ELFCLASS32;
76 /* Return the run-time load address of the shared object, assuming it
77 was originally linked at zero. */
78 static inline Elf64_Addr
79 elf_machine_load_address (void) __attribute__ ((const));
81 static inline Elf64_Addr
82 elf_machine_load_address (void)
84 Elf64_Addr ret;
86 /* The first entry in .got (and thus the first entry in .toc) is the
87 link-time TOC_base, ie. r2. So the difference between that and
88 the current r2 set by the kernel is how far the shared lib has
89 moved. */
90 asm ( " ld %0,-32768(2)\n"
91 " subf %0,%0,2\n"
92 : "=r" (ret));
93 return ret;
96 /* Return the link-time address of _DYNAMIC. */
97 static inline Elf64_Addr
98 elf_machine_dynamic (void)
100 Elf64_Addr runtime_dynamic;
101 /* It's easier to get the run-time address. */
102 asm ( " addis %0,2,_DYNAMIC@toc@ha\n"
103 " addi %0,%0,_DYNAMIC@toc@l\n"
104 : "=b" (runtime_dynamic));
105 /* Then subtract off the load address offset. */
106 return runtime_dynamic - elf_machine_load_address() ;
109 /* The PLT uses Elf64_Rela relocs. */
110 #define elf_machine_relplt elf_machine_rela
113 #ifdef HAVE_INLINED_SYSCALLS
114 /* We do not need _dl_starting_up. */
115 # define DL_STARTING_UP_DEF
116 #else
117 # define DL_STARTING_UP_DEF \
118 ".LC__dl_starting_up:\n" \
119 " .tc __GI__dl_starting_up[TC],__GI__dl_starting_up\n"
120 #endif
123 /* Initial entry point code for the dynamic linker. The C function
124 `_dl_start' is the real entry point; its return value is the user
125 program's entry point. */
126 #define RTLD_START \
127 asm (".pushsection \".text\"\n" \
128 " .align 2\n" \
129 " " ENTRY_2(_start) "\n" \
130 BODY_PREFIX "_start:\n" \
131 " " LOCALENTRY(_start) "\n" \
132 /* We start with the following on the stack, from top: \
133 argc (4 bytes); \
134 arguments for program (terminated by NULL); \
135 environment variables (terminated by NULL); \
136 arguments for the program loader. */ \
137 " mr 3,1\n" \
138 " li 4,0\n" \
139 " stdu 4,-128(1)\n" \
140 /* Call _dl_start with one parameter pointing at argc. */ \
141 " bl " DOT_PREFIX "_dl_start\n" \
142 " nop\n" \
143 /* Transfer control to _dl_start_user! */ \
144 " b " DOT_PREFIX "_dl_start_user\n" \
145 ".LT__start:\n" \
146 " .long 0\n" \
147 " .byte 0x00,0x0c,0x24,0x40,0x00,0x00,0x00,0x00\n" \
148 " .long .LT__start-" BODY_PREFIX "_start\n" \
149 " .short .LT__start_name_end-.LT__start_name_start\n" \
150 ".LT__start_name_start:\n" \
151 " .ascii \"_start\"\n" \
152 ".LT__start_name_end:\n" \
153 " .align 2\n" \
154 " " END_2(_start) "\n" \
155 " .pushsection \".toc\",\"aw\"\n" \
156 DL_STARTING_UP_DEF \
157 ".LC__rtld_local:\n" \
158 " .tc _rtld_local[TC],_rtld_local\n" \
159 ".LC__dl_argc:\n" \
160 " .tc _dl_argc[TC],_dl_argc\n" \
161 ".LC__dl_argv:\n" \
162 " .tc __GI__dl_argv[TC],__GI__dl_argv\n" \
163 ".LC__dl_fini:\n" \
164 " .tc _dl_fini[TC],_dl_fini\n" \
165 " .popsection\n" \
166 " " ENTRY_2(_dl_start_user) "\n" \
167 /* Now, we do our main work of calling initialisation procedures. \
168 The ELF ABI doesn't say anything about parameters for these, \
169 so we just pass argc, argv, and the environment. \
170 Changing these is strongly discouraged (not least because argc is \
171 passed by value!). */ \
172 BODY_PREFIX "_dl_start_user:\n" \
173 " " LOCALENTRY(_dl_start_user) "\n" \
174 /* the address of _start in r30. */ \
175 " mr 30,3\n" \
176 /* &_dl_argc in 29, &_dl_argv in 27, and _dl_loaded in 28. */ \
177 " addis 28,2,.LC__rtld_local@toc@ha\n" \
178 " ld 28,.LC__rtld_local@toc@l(28)\n" \
179 " addis 29,2,.LC__dl_argc@toc@ha\n" \
180 " ld 29,.LC__dl_argc@toc@l(29)\n" \
181 " addis 27,2,.LC__dl_argv@toc@ha\n" \
182 " ld 27,.LC__dl_argv@toc@l(27)\n" \
183 /* _dl_init (_dl_loaded, _dl_argc, _dl_argv, _dl_argv+_dl_argc+1). */ \
184 " ld 3,0(28)\n" \
185 " lwa 4,0(29)\n" \
186 " ld 5,0(27)\n" \
187 " sldi 6,4,3\n" \
188 " add 6,5,6\n" \
189 " addi 6,6,8\n" \
190 " bl " DOT_PREFIX "_dl_init\n" \
191 " nop\n" \
192 /* Now, to conform to the ELF ABI, we have to: \
193 Pass argc (actually _dl_argc) in r3; */ \
194 " lwa 3,0(29)\n" \
195 /* Pass argv (actually _dl_argv) in r4; */ \
196 " ld 4,0(27)\n" \
197 /* Pass argv+argc+1 in r5; */ \
198 " sldi 5,3,3\n" \
199 " add 6,4,5\n" \
200 " addi 5,6,8\n" \
201 /* Pass the auxiliary vector in r6. This is passed to us just after \
202 _envp. */ \
203 "2: ldu 0,8(6)\n" \
204 " cmpdi 0,0\n" \
205 " bne 2b\n" \
206 " addi 6,6,8\n" \
207 /* Pass a termination function pointer (in this case _dl_fini) in \
208 r7. */ \
209 " addis 7,2,.LC__dl_fini@toc@ha\n" \
210 " ld 7,.LC__dl_fini@toc@l(7)\n" \
211 /* Pass the stack pointer in r1 (so far so good), pointing to a NULL \
212 value. This lets our startup code distinguish between a program \
213 linked statically, which linux will call with argc on top of the \
214 stack which will hopefully never be zero, and a dynamically linked \
215 program which will always have a NULL on the top of the stack. \
216 Take the opportunity to clear LR, so anyone who accidentally \
217 returns from _start gets SEGV. Also clear the next few words of \
218 the stack. */ \
219 " li 31,0\n" \
220 " std 31,0(1)\n" \
221 " mtlr 31\n" \
222 " std 31,8(1)\n" \
223 " std 31,16(1)\n" \
224 " std 31,24(1)\n" \
225 /* Now, call the start function descriptor at r30... */ \
226 " .globl ._dl_main_dispatch\n" \
227 "._dl_main_dispatch:\n" \
228 " " PPC64_LOAD_FUNCPTR(30) "\n" \
229 " bctr\n" \
230 ".LT__dl_start_user:\n" \
231 " .long 0\n" \
232 " .byte 0x00,0x0c,0x24,0x40,0x00,0x00,0x00,0x00\n" \
233 " .long .LT__dl_start_user-" BODY_PREFIX "_dl_start_user\n" \
234 " .short .LT__dl_start_user_name_end-.LT__dl_start_user_name_start\n" \
235 ".LT__dl_start_user_name_start:\n" \
236 " .ascii \"_dl_start_user\"\n" \
237 ".LT__dl_start_user_name_end:\n" \
238 " .align 2\n" \
239 " " END_2(_dl_start_user) "\n" \
240 " .popsection");
242 /* ELF_RTYPE_CLASS_COPY iff TYPE should not be allowed to resolve to
243 one of the main executable's symbols, as for a COPY reloc.
245 To make function pointer comparisons work on most targets, the
246 relevant ABI states that the address of a non-local function in a
247 dynamically linked executable is the address of the PLT entry for
248 that function. This is quite reasonable since using the real
249 function address in a non-PIC executable would typically require
250 dynamic relocations in .text, something to be avoided. For such
251 functions, the linker emits a SHN_UNDEF symbol in the executable
252 with value equal to the PLT entry address. Normally, SHN_UNDEF
253 symbols have a value of zero, so this is a clue to ld.so that it
254 should treat these symbols specially. For relocations not in
255 ELF_RTYPE_CLASS_PLT (eg. those on function pointers), ld.so should
256 use the value of the executable SHN_UNDEF symbol, ie. the PLT entry
257 address. For relocations in ELF_RTYPE_CLASS_PLT (eg. the relocs in
258 the PLT itself), ld.so should use the value of the corresponding
259 defined symbol in the object that defines the function, ie. the
260 real function address. This complicates ld.so in that there are
261 now two possible values for a given symbol, and it gets even worse
262 because protected symbols need yet another set of rules.
264 On PowerPC64 we don't need any of this. The linker won't emit
265 SHN_UNDEF symbols with non-zero values. ld.so can make all
266 relocations behave "normally", ie. always use the real address
267 like PLT relocations. So always set ELF_RTYPE_CLASS_PLT. */
269 #if _CALL_ELF != 2
270 #define elf_machine_type_class(type) \
271 (ELF_RTYPE_CLASS_PLT | (((type) == R_PPC64_COPY) * ELF_RTYPE_CLASS_COPY))
272 #else
273 /* And now that you have read that large comment, you can disregard it
274 all for ELFv2. ELFv2 does need the special SHN_UNDEF treatment. */
275 #define IS_PPC64_TLS_RELOC(R) \
276 (((R) >= R_PPC64_TLS && (R) <= R_PPC64_DTPREL16_HIGHESTA) \
277 || ((R) >= R_PPC64_TPREL16_HIGH && (R) <= R_PPC64_DTPREL16_HIGHA))
279 #define elf_machine_type_class(type) \
280 ((((type) == R_PPC64_JMP_SLOT \
281 || (type) == R_PPC64_ADDR24 \
282 || IS_PPC64_TLS_RELOC (type)) * ELF_RTYPE_CLASS_PLT) \
283 | (((type) == R_PPC64_COPY) * ELF_RTYPE_CLASS_COPY))
284 #endif
286 /* A reloc type used for ld.so cmdline arg lookups to reject PLT entries. */
287 #define ELF_MACHINE_JMP_SLOT R_PPC64_JMP_SLOT
289 /* We define an initialization function to initialize HWCAP/HWCAP2 and
290 platform data so it can be copied into the TCB later. This is called
291 very early in _dl_sysdep_start for dynamically linked binaries. */
292 #if defined(SHARED) && IS_IN (rtld)
293 # define DL_PLATFORM_INIT dl_platform_init ()
295 static inline void __attribute__ ((unused))
296 dl_platform_init (void)
298 __tcb_parse_hwcap_and_convert_at_platform ();
300 #endif
302 /* Stuff for the PLT. */
303 #if _CALL_ELF != 2
304 #define PLT_INITIAL_ENTRY_WORDS 3
305 #define PLT_ENTRY_WORDS 3
306 #define GLINK_INITIAL_ENTRY_WORDS 8
307 /* The first 32k entries of glink can set an index and branch using two
308 instructions; past that point, glink uses three instructions. */
309 #define GLINK_ENTRY_WORDS(I) (((I) < 0x8000)? 2 : 3)
310 #else
311 #define PLT_INITIAL_ENTRY_WORDS 2
312 #define PLT_ENTRY_WORDS 1
313 #define GLINK_INITIAL_ENTRY_WORDS 8
314 #define GLINK_ENTRY_WORDS(I) 1
315 #endif
317 #define PPC_DCBST(where) asm volatile ("dcbst 0,%0" : : "r"(where) : "memory")
318 #define PPC_DCBT(where) asm volatile ("dcbt 0,%0" : : "r"(where) : "memory")
319 #define PPC_DCBF(where) asm volatile ("dcbf 0,%0" : : "r"(where) : "memory")
320 #define PPC_SYNC asm volatile ("sync" : : : "memory")
321 #define PPC_ISYNC asm volatile ("sync; isync" : : : "memory")
322 #define PPC_ICBI(where) asm volatile ("icbi 0,%0" : : "r"(where) : "memory")
323 #define PPC_DIE asm volatile ("tweq 0,0")
324 /* Use this when you've modified some code, but it won't be in the
325 instruction fetch queue (or when it doesn't matter if it is). */
326 #define MODIFIED_CODE_NOQUEUE(where) \
327 do { PPC_DCBST(where); PPC_SYNC; PPC_ICBI(where); } while (0)
328 /* Use this when it might be in the instruction queue. */
329 #define MODIFIED_CODE(where) \
330 do { PPC_DCBST(where); PPC_SYNC; PPC_ICBI(where); PPC_ISYNC; } while (0)
332 /* Set up the loaded object described by MAP so its unrelocated PLT
333 entries will jump to the on-demand fixup code in dl-runtime.c. */
334 static inline int __attribute__ ((always_inline))
335 elf_machine_runtime_setup (struct link_map *map, struct r_scope_elem *scope[],
336 int lazy, int profile)
338 if (map->l_info[DT_JMPREL])
340 Elf64_Word i;
341 Elf64_Word *glink = NULL;
342 Elf64_Xword *plt = (Elf64_Xword *) D_PTR (map, l_info[DT_PLTGOT]);
343 Elf64_Word num_plt_entries = (map->l_info[DT_PLTRELSZ]->d_un.d_val
344 / sizeof (Elf64_Rela));
345 Elf64_Addr l_addr = map->l_addr;
346 Elf64_Dyn **info = map->l_info;
347 char *p;
349 extern void _dl_runtime_resolve (void);
350 extern void _dl_profile_resolve (void);
352 /* Relocate the DT_PPC64_GLINK entry in the _DYNAMIC section.
353 elf_get_dynamic_info takes care of the standard entries but
354 doesn't know exactly what to do with processor specific
355 entries. */
356 if (info[DT_PPC64(GLINK)] != NULL)
357 info[DT_PPC64(GLINK)]->d_un.d_ptr += l_addr;
359 if (lazy)
361 Elf64_Word glink_offset;
362 Elf64_Word offset;
363 Elf64_Addr dlrr;
365 #ifdef SHARED
366 if (__glibc_unlikely (profile))
368 dlrr = (Elf64_Addr) _dl_profile_resolve;
369 if (profile && GLRO(dl_profile) != NULL
370 && _dl_name_match_p (GLRO(dl_profile), map))
371 /* This is the object we are looking for. Say that we really
372 want profiling and the timers are started. */
373 GL(dl_profile_map) = map;
375 else
376 #endif
377 dlrr = (Elf64_Addr) _dl_runtime_resolve;
379 #if _CALL_ELF != 2
380 /* We need to stuff the address/TOC of _dl_runtime_resolve
381 into doublewords 0 and 1 of plt_reserve. Then we need to
382 stuff the map address into doubleword 2 of plt_reserve.
383 This allows the GLINK0 code to transfer control to the
384 correct trampoline which will transfer control to fixup
385 in dl-machine.c. */
387 /* The plt_reserve area is the 1st 3 doublewords of the PLT. */
388 Elf64_FuncDesc *plt_reserve = (Elf64_FuncDesc *) plt;
389 Elf64_FuncDesc *resolve_fd = (Elf64_FuncDesc *) dlrr;
390 plt_reserve->fd_func = resolve_fd->fd_func;
391 plt_reserve->fd_toc = resolve_fd->fd_toc;
392 plt_reserve->fd_aux = (Elf64_Addr) map;
393 #ifdef RTLD_BOOTSTRAP
394 /* When we're bootstrapping, the opd entry will not have
395 been relocated yet. */
396 plt_reserve->fd_func += l_addr;
397 plt_reserve->fd_toc += l_addr;
398 #endif
400 #else
401 /* When we don't have function descriptors, the first doubleword
402 of the PLT holds the address of _dl_runtime_resolve, and the
403 second doubleword holds the map address. */
404 plt[0] = dlrr;
405 plt[1] = (Elf64_Addr) map;
406 #endif
408 /* Set up the lazy PLT entries. */
409 glink = (Elf64_Word *) D_PTR (map, l_info[DT_PPC64(GLINK)]);
410 offset = PLT_INITIAL_ENTRY_WORDS;
411 glink_offset = GLINK_INITIAL_ENTRY_WORDS;
412 for (i = 0; i < num_plt_entries; i++)
415 plt[offset] = (Elf64_Xword) &glink[glink_offset];
416 offset += PLT_ENTRY_WORDS;
417 glink_offset += GLINK_ENTRY_WORDS (i);
420 /* Now, we've modified data. We need to write the changes from
421 the data cache to a second-level unified cache, then make
422 sure that stale data in the instruction cache is removed.
423 (In a multiprocessor system, the effect is more complex.)
424 Most of the PLT shouldn't be in the instruction cache, but
425 there may be a little overlap at the start and the end.
427 Assumes that dcbst and icbi apply to lines of 16 bytes or
428 more. Current known line sizes are 16, 32, and 128 bytes. */
430 for (p = (char *) plt; p < (char *) &plt[offset]; p += 16)
431 PPC_DCBST (p);
432 PPC_SYNC;
435 return lazy;
438 #if _CALL_ELF == 2
439 extern void attribute_hidden _dl_error_localentry (struct link_map *map,
440 const Elf64_Sym *refsym);
442 /* If the PLT entry resolves to a function in the same object, return
443 the target function's local entry point offset if usable. */
444 static inline Elf64_Addr __attribute__ ((always_inline))
445 ppc64_local_entry_offset (struct link_map *map, lookup_t sym_map,
446 const ElfW(Sym) *refsym, const ElfW(Sym) *sym)
448 /* If the target function is in a different object, we cannot
449 use the local entry point. */
450 if (sym_map != map)
452 /* Check that optimized plt call stubs for localentry:0 functions
453 are not being satisfied by a non-zero localentry symbol. */
454 if (map->l_info[DT_PPC64(OPT)]
455 && (map->l_info[DT_PPC64(OPT)]->d_un.d_val & PPC64_OPT_LOCALENTRY) != 0
456 && refsym->st_info == ELFW(ST_INFO) (STB_GLOBAL, STT_FUNC)
457 && (STO_PPC64_LOCAL_MASK & refsym->st_other) == 0
458 && (STO_PPC64_LOCAL_MASK & sym->st_other) != 0)
459 _dl_error_localentry (map, refsym);
461 return 0;
464 /* If the linker inserted multiple TOCs, we cannot use the
465 local entry point. */
466 if (map->l_info[DT_PPC64(OPT)]
467 && (map->l_info[DT_PPC64(OPT)]->d_un.d_val & PPC64_OPT_MULTI_TOC))
468 return 0;
470 /* If the target function is an ifunc then the local entry offset is
471 for the resolver, not the final destination. */
472 if (__builtin_expect (ELFW(ST_TYPE) (sym->st_info) == STT_GNU_IFUNC, 0))
473 return 0;
475 /* Otherwise, we can use the local entry point. Retrieve its offset
476 from the symbol's ELF st_other field. */
477 return PPC64_LOCAL_ENTRY_OFFSET (sym->st_other);
479 #endif
481 /* Change the PLT entry whose reloc is 'reloc' to call the actual
482 routine. */
483 static inline Elf64_Addr __attribute__ ((always_inline))
484 elf_machine_fixup_plt (struct link_map *map, lookup_t sym_map,
485 const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
486 const Elf64_Rela *reloc,
487 Elf64_Addr *reloc_addr, Elf64_Addr finaladdr)
489 #if _CALL_ELF != 2
490 Elf64_FuncDesc *plt = (Elf64_FuncDesc *) reloc_addr;
491 Elf64_FuncDesc *rel = (Elf64_FuncDesc *) finaladdr;
492 Elf64_Addr offset = 0;
493 Elf64_FuncDesc zero_fd = {0, 0, 0};
495 PPC_DCBT (&plt->fd_aux);
496 PPC_DCBT (&plt->fd_func);
498 /* If sym_map is NULL, it's a weak undefined sym; Set the plt to
499 zero. finaladdr should be zero already in this case, but guard
500 against invalid plt relocations with non-zero addends. */
501 if (sym_map == NULL)
502 finaladdr = 0;
504 /* Don't die here if finaladdr is zero, die if this plt entry is
505 actually called. Makes a difference when LD_BIND_NOW=1.
506 finaladdr may be zero for a weak undefined symbol, or when an
507 ifunc resolver returns zero. */
508 if (finaladdr == 0)
509 rel = &zero_fd;
510 else
512 PPC_DCBT (&rel->fd_aux);
513 PPC_DCBT (&rel->fd_func);
516 /* If the opd entry is not yet relocated (because it's from a shared
517 object that hasn't been processed yet), then manually reloc it. */
518 if (finaladdr != 0 && map != sym_map && !sym_map->l_relocated
519 #if !defined RTLD_BOOTSTRAP && defined SHARED
520 /* Bootstrap map doesn't have l_relocated set for it. */
521 && sym_map != &GL(dl_rtld_map)
522 #endif
524 offset = sym_map->l_addr;
526 /* For PPC64, fixup_plt copies the function descriptor from opd
527 over the corresponding PLT entry.
528 Initially, PLT Entry[i] is set up for lazy linking, or is zero.
529 For lazy linking, the fd_toc and fd_aux entries are irrelevant,
530 so for thread safety we write them before changing fd_func. */
532 plt->fd_aux = rel->fd_aux + offset;
533 plt->fd_toc = rel->fd_toc + offset;
534 PPC_DCBF (&plt->fd_toc);
535 PPC_ISYNC;
537 plt->fd_func = rel->fd_func + offset;
538 PPC_DCBST (&plt->fd_func);
539 PPC_ISYNC;
540 #else
541 finaladdr += ppc64_local_entry_offset (map, sym_map, refsym, sym);
542 *reloc_addr = finaladdr;
543 #endif
545 return finaladdr;
548 /* Return the final value of a plt relocation. */
549 static inline Elf64_Addr
550 elf_machine_plt_value (struct link_map *map, const Elf64_Rela *reloc,
551 Elf64_Addr value)
553 return value + reloc->r_addend;
557 /* Names of the architecture-specific auditing callback functions. */
558 #if _CALL_ELF != 2
559 #define ARCH_LA_PLTENTER ppc64_gnu_pltenter
560 #define ARCH_LA_PLTEXIT ppc64_gnu_pltexit
561 #else
562 #define ARCH_LA_PLTENTER ppc64v2_gnu_pltenter
563 #define ARCH_LA_PLTEXIT ppc64v2_gnu_pltexit
564 #endif
566 #if ENABLE_STATIC_PIE && !defined SHARED && !IS_IN (rtld)
567 #include <libc-diag.h>
568 #include <tcb-offsets.h>
570 /* Set up r13 for _dl_relocate_static_pie so that libgcc ifuncs that
571 normally access the tcb copy of hwcap will see __tcb.hwcap. */
573 static inline void __attribute__ ((always_inline))
574 ppc_init_fake_thread_pointer (void)
576 DIAG_PUSH_NEEDS_COMMENT;
577 /* We are playing pointer tricks. Silence gcc warning. */
578 DIAG_IGNORE_NEEDS_COMMENT (4.9, "-Warray-bounds");
579 __thread_register = (char *) &__tcb.hwcap - TCB_HWCAP;
580 DIAG_POP_NEEDS_COMMENT;
583 #define ELF_MACHINE_BEFORE_RTLD_RELOC(map, dynamic_info) \
584 ppc_init_fake_thread_pointer ();
585 #endif /* ENABLE_STATIC_PIE && !defined SHARED && !IS_IN (rtld) */
587 #endif /* dl_machine_h */
589 #ifdef RESOLVE_MAP
591 #define PPC_LO(v) ((v) & 0xffff)
592 #define PPC_HI(v) (((v) >> 16) & 0xffff)
593 #define PPC_HA(v) PPC_HI ((v) + 0x8000)
594 #define PPC_HIGHER(v) (((v) >> 32) & 0xffff)
595 #define PPC_HIGHERA(v) PPC_HIGHER ((v) + 0x8000)
596 #define PPC_HIGHEST(v) (((v) >> 48) & 0xffff)
597 #define PPC_HIGHESTA(v) PPC_HIGHEST ((v) + 0x8000)
598 #define BIT_INSERT(var, val, mask) \
599 ((var) = ((var) & ~(Elf64_Addr) (mask)) | ((val) & (mask)))
601 #define dont_expect(X) __builtin_expect ((X), 0)
603 extern void attribute_hidden _dl_reloc_overflow (struct link_map *map,
604 const char *name,
605 Elf64_Addr *const reloc_addr,
606 const Elf64_Sym *refsym);
608 static inline void __attribute__ ((always_inline))
609 elf_machine_rela_relative (Elf64_Addr l_addr, const Elf64_Rela *reloc,
610 void *const reloc_addr_arg)
612 Elf64_Addr *const reloc_addr = reloc_addr_arg;
613 *reloc_addr = l_addr + reloc->r_addend;
616 /* This computes the value used by TPREL* relocs. */
617 static inline Elf64_Addr __attribute__ ((always_inline, const))
618 elf_machine_tprel (struct link_map *map,
619 struct link_map *sym_map,
620 const Elf64_Sym *sym,
621 const Elf64_Rela *reloc)
623 #ifndef RTLD_BOOTSTRAP
624 if (sym_map)
626 CHECK_STATIC_TLS (map, sym_map);
627 #endif
628 return TLS_TPREL_VALUE (sym_map, sym, reloc);
629 #ifndef RTLD_BOOTSTRAP
631 #endif
632 return 0;
635 /* Call function at address VALUE (an OPD entry) to resolve ifunc relocs. */
636 static inline Elf64_Addr __attribute__ ((always_inline))
637 resolve_ifunc (Elf64_Addr value,
638 const struct link_map *map, const struct link_map *sym_map)
640 #if _CALL_ELF != 2
641 /* The function we are calling may not yet have its opd entry relocated. */
642 Elf64_FuncDesc opd;
643 if (map != sym_map
644 # if !defined RTLD_BOOTSTRAP && defined SHARED
645 /* Bootstrap map doesn't have l_relocated set for it. */
646 && sym_map != &GL(dl_rtld_map)
647 # endif
648 && !sym_map->l_relocated)
650 Elf64_FuncDesc *func = (Elf64_FuncDesc *) value;
651 opd.fd_func = func->fd_func + sym_map->l_addr;
652 opd.fd_toc = func->fd_toc + sym_map->l_addr;
653 opd.fd_aux = func->fd_aux;
654 /* GCC 4.9+ eliminates the branch as dead code, force the odp set
655 dependency. */
656 asm ("" : "=r" (value) : "0" (&opd), "X" (opd));
658 #endif
659 return ((Elf64_Addr (*) (unsigned long int)) value) (GLRO(dl_hwcap));
662 /* Perform the relocation specified by RELOC and SYM (which is fully
663 resolved). MAP is the object containing the reloc. */
664 static inline void __attribute__ ((always_inline))
665 elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
666 const Elf64_Rela *reloc,
667 const Elf64_Sym *sym,
668 const struct r_found_version *version,
669 void *const reloc_addr_arg,
670 int skip_ifunc)
672 Elf64_Addr *const reloc_addr = reloc_addr_arg;
673 const int r_type = ELF64_R_TYPE (reloc->r_info);
674 const Elf64_Sym *const refsym = sym;
675 union unaligned
677 uint16_t u2;
678 uint32_t u4;
679 uint64_t u8;
680 } __attribute__ ((__packed__));
682 if (r_type == R_PPC64_RELATIVE)
684 *reloc_addr = map->l_addr + reloc->r_addend;
685 return;
688 if (__glibc_unlikely (r_type == R_PPC64_NONE))
689 return;
691 /* We need SYM_MAP even in the absence of TLS, for elf_machine_fixup_plt
692 and STT_GNU_IFUNC. */
693 struct link_map *sym_map = RESOLVE_MAP (map, scope, &sym, version, r_type);
694 Elf64_Addr value = SYMBOL_ADDRESS (sym_map, sym, true) + reloc->r_addend;
696 if (sym != NULL
697 && __builtin_expect (ELFW(ST_TYPE) (sym->st_info) == STT_GNU_IFUNC, 0)
698 && __builtin_expect (sym->st_shndx != SHN_UNDEF, 1)
699 && __builtin_expect (!skip_ifunc, 1))
700 value = resolve_ifunc (value, map, sym_map);
702 /* For relocs that don't edit code, return.
703 For relocs that might edit instructions, break from the switch. */
704 switch (r_type)
706 case R_PPC64_ADDR64:
707 case R_PPC64_GLOB_DAT:
708 *reloc_addr = value;
709 return;
711 case R_PPC64_IRELATIVE:
712 if (__glibc_likely (!skip_ifunc))
713 value = resolve_ifunc (value, map, sym_map);
714 *reloc_addr = value;
715 return;
717 case R_PPC64_JMP_IREL:
718 if (__glibc_likely (!skip_ifunc))
719 value = resolve_ifunc (value, map, sym_map);
720 /* Fall thru */
721 case R_PPC64_JMP_SLOT:
722 elf_machine_fixup_plt (map, sym_map, refsym, sym,
723 reloc, reloc_addr, value);
724 return;
726 case R_PPC64_DTPMOD64:
727 if (map->l_info[DT_PPC64(OPT)]
728 && (map->l_info[DT_PPC64(OPT)]->d_un.d_val & PPC64_OPT_TLS))
730 #ifdef RTLD_BOOTSTRAP
731 reloc_addr[0] = 0;
732 reloc_addr[1] = (sym_map->l_tls_offset - TLS_TP_OFFSET
733 + TLS_DTV_OFFSET);
734 return;
735 #else
736 if (sym_map != NULL)
738 # ifndef SHARED
739 CHECK_STATIC_TLS (map, sym_map);
740 # else
741 if (TRY_STATIC_TLS (map, sym_map))
742 # endif
744 reloc_addr[0] = 0;
745 /* Set up for local dynamic. */
746 reloc_addr[1] = (sym_map->l_tls_offset - TLS_TP_OFFSET
747 + TLS_DTV_OFFSET);
748 return;
751 #endif
753 #ifdef RTLD_BOOTSTRAP
754 /* During startup the dynamic linker is always index 1. */
755 *reloc_addr = 1;
756 #else
757 /* Get the information from the link map returned by the
758 resolve function. */
759 if (sym_map != NULL)
760 *reloc_addr = sym_map->l_tls_modid;
761 #endif
762 return;
764 case R_PPC64_DTPREL64:
765 if (map->l_info[DT_PPC64(OPT)]
766 && (map->l_info[DT_PPC64(OPT)]->d_un.d_val & PPC64_OPT_TLS))
768 #ifdef RTLD_BOOTSTRAP
769 *reloc_addr = TLS_TPREL_VALUE (sym_map, sym, reloc);
770 return;
771 #else
772 if (sym_map != NULL)
774 /* This reloc is always preceded by R_PPC64_DTPMOD64. */
775 # ifndef SHARED
776 assert (HAVE_STATIC_TLS (map, sym_map));
777 # else
778 if (HAVE_STATIC_TLS (map, sym_map))
779 # endif
781 *reloc_addr = TLS_TPREL_VALUE (sym_map, sym, reloc);
782 return;
785 #endif
787 /* During relocation all TLS symbols are defined and used.
788 Therefore the offset is already correct. */
789 #ifndef RTLD_BOOTSTRAP
790 if (sym_map != NULL)
791 *reloc_addr = TLS_DTPREL_VALUE (sym, reloc);
792 #endif
793 return;
795 case R_PPC64_TPREL64:
796 *reloc_addr = elf_machine_tprel (map, sym_map, sym, reloc);
797 return;
799 case R_PPC64_TPREL16_LO_DS:
800 value = elf_machine_tprel (map, sym_map, sym, reloc);
801 if (dont_expect ((value & 3) != 0))
802 _dl_reloc_overflow (map, "R_PPC64_TPREL16_LO_DS", reloc_addr, refsym);
803 BIT_INSERT (*(Elf64_Half *) reloc_addr, value, 0xfffc);
804 break;
806 case R_PPC64_TPREL16_DS:
807 value = elf_machine_tprel (map, sym_map, sym, reloc);
808 if (dont_expect ((value + 0x8000) >= 0x10000 || (value & 3) != 0))
809 _dl_reloc_overflow (map, "R_PPC64_TPREL16_DS", reloc_addr, refsym);
810 BIT_INSERT (*(Elf64_Half *) reloc_addr, value, 0xfffc);
811 break;
813 case R_PPC64_TPREL16:
814 value = elf_machine_tprel (map, sym_map, sym, reloc);
815 if (dont_expect ((value + 0x8000) >= 0x10000))
816 _dl_reloc_overflow (map, "R_PPC64_TPREL16", reloc_addr, refsym);
817 *(Elf64_Half *) reloc_addr = PPC_LO (value);
818 break;
820 case R_PPC64_TPREL16_LO:
821 value = elf_machine_tprel (map, sym_map, sym, reloc);
822 *(Elf64_Half *) reloc_addr = PPC_LO (value);
823 break;
825 case R_PPC64_TPREL16_HI:
826 value = elf_machine_tprel (map, sym_map, sym, reloc);
827 if (dont_expect (value + 0x80000000 >= 0x100000000LL))
828 _dl_reloc_overflow (map, "R_PPC64_TPREL16_HI", reloc_addr, refsym);
829 *(Elf64_Half *) reloc_addr = PPC_HI (value);
830 break;
832 case R_PPC64_TPREL16_HIGH:
833 value = elf_machine_tprel (map, sym_map, sym, reloc);
834 *(Elf64_Half *) reloc_addr = PPC_HI (value);
835 break;
837 case R_PPC64_TPREL16_HA:
838 value = elf_machine_tprel (map, sym_map, sym, reloc);
839 if (dont_expect (value + 0x80008000 >= 0x100000000LL))
840 _dl_reloc_overflow (map, "R_PPC64_TPREL16_HA", reloc_addr, refsym);
841 *(Elf64_Half *) reloc_addr = PPC_HA (value);
842 break;
844 case R_PPC64_TPREL16_HIGHA:
845 value = elf_machine_tprel (map, sym_map, sym, reloc);
846 *(Elf64_Half *) reloc_addr = PPC_HA (value);
847 break;
849 case R_PPC64_TPREL16_HIGHER:
850 value = elf_machine_tprel (map, sym_map, sym, reloc);
851 *(Elf64_Half *) reloc_addr = PPC_HIGHER (value);
852 break;
854 case R_PPC64_TPREL16_HIGHEST:
855 value = elf_machine_tprel (map, sym_map, sym, reloc);
856 *(Elf64_Half *) reloc_addr = PPC_HIGHEST (value);
857 break;
859 case R_PPC64_TPREL16_HIGHERA:
860 value = elf_machine_tprel (map, sym_map, sym, reloc);
861 *(Elf64_Half *) reloc_addr = PPC_HIGHERA (value);
862 break;
864 case R_PPC64_TPREL16_HIGHESTA:
865 value = elf_machine_tprel (map, sym_map, sym, reloc);
866 *(Elf64_Half *) reloc_addr = PPC_HIGHESTA (value);
867 break;
869 #ifndef RTLD_BOOTSTRAP /* None of the following appear in ld.so */
870 case R_PPC64_ADDR16_LO_DS:
871 if (dont_expect ((value & 3) != 0))
872 _dl_reloc_overflow (map, "R_PPC64_ADDR16_LO_DS", reloc_addr, refsym);
873 BIT_INSERT (*(Elf64_Half *) reloc_addr, value, 0xfffc);
874 break;
876 case R_PPC64_ADDR16_LO:
877 *(Elf64_Half *) reloc_addr = PPC_LO (value);
878 break;
880 case R_PPC64_ADDR16_HI:
881 if (dont_expect (value + 0x80000000 >= 0x100000000LL))
882 _dl_reloc_overflow (map, "R_PPC64_ADDR16_HI", reloc_addr, refsym);
883 /* Fall through. */
884 case R_PPC64_ADDR16_HIGH:
885 *(Elf64_Half *) reloc_addr = PPC_HI (value);
886 break;
888 case R_PPC64_ADDR16_HA:
889 if (dont_expect (value + 0x80008000 >= 0x100000000LL))
890 _dl_reloc_overflow (map, "R_PPC64_ADDR16_HA", reloc_addr, refsym);
891 /* Fall through. */
892 case R_PPC64_ADDR16_HIGHA:
893 *(Elf64_Half *) reloc_addr = PPC_HA (value);
894 break;
896 case R_PPC64_ADDR30:
898 Elf64_Addr delta = value - (Elf64_Xword) reloc_addr;
899 if (dont_expect ((delta + 0x80000000) >= 0x100000000LL
900 || (delta & 3) != 0))
901 _dl_reloc_overflow (map, "R_PPC64_ADDR30", reloc_addr, refsym);
902 BIT_INSERT (*(Elf64_Word *) reloc_addr, delta, 0xfffffffc);
904 break;
906 case R_PPC64_COPY:
907 if (dont_expect (sym == NULL))
908 /* This can happen in trace mode when an object could not be found. */
909 return;
910 if (dont_expect (sym->st_size > refsym->st_size
911 || (GLRO(dl_verbose)
912 && sym->st_size < refsym->st_size)))
914 const char *strtab;
916 strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]);
917 _dl_error_printf ("%s: Symbol `%s' has different size" \
918 " in shared object," \
919 " consider re-linking\n",
920 RTLD_PROGNAME, strtab + refsym->st_name);
922 memcpy (reloc_addr_arg, (char *) value,
923 MIN (sym->st_size, refsym->st_size));
924 return;
926 case R_PPC64_UADDR64:
927 ((union unaligned *) reloc_addr)->u8 = value;
928 return;
930 case R_PPC64_UADDR32:
931 ((union unaligned *) reloc_addr)->u4 = value;
932 return;
934 case R_PPC64_ADDR32:
935 if (dont_expect ((value + 0x80000000) >= 0x100000000LL))
936 _dl_reloc_overflow (map, "R_PPC64_ADDR32", reloc_addr, refsym);
937 *(Elf64_Word *) reloc_addr = value;
938 return;
940 case R_PPC64_ADDR24:
941 if (dont_expect ((value + 0x2000000) >= 0x4000000 || (value & 3) != 0))
942 _dl_reloc_overflow (map, "R_PPC64_ADDR24", reloc_addr, refsym);
943 BIT_INSERT (*(Elf64_Word *) reloc_addr, value, 0x3fffffc);
944 break;
946 case R_PPC64_ADDR16:
947 if (dont_expect ((value + 0x8000) >= 0x10000))
948 _dl_reloc_overflow (map, "R_PPC64_ADDR16", reloc_addr, refsym);
949 *(Elf64_Half *) reloc_addr = value;
950 break;
952 case R_PPC64_UADDR16:
953 if (dont_expect ((value + 0x8000) >= 0x10000))
954 _dl_reloc_overflow (map, "R_PPC64_UADDR16", reloc_addr, refsym);
955 ((union unaligned *) reloc_addr)->u2 = value;
956 return;
958 case R_PPC64_ADDR16_DS:
959 if (dont_expect ((value + 0x8000) >= 0x10000 || (value & 3) != 0))
960 _dl_reloc_overflow (map, "R_PPC64_ADDR16_DS", reloc_addr, refsym);
961 BIT_INSERT (*(Elf64_Half *) reloc_addr, value, 0xfffc);
962 break;
964 case R_PPC64_ADDR16_HIGHER:
965 *(Elf64_Half *) reloc_addr = PPC_HIGHER (value);
966 break;
968 case R_PPC64_ADDR16_HIGHEST:
969 *(Elf64_Half *) reloc_addr = PPC_HIGHEST (value);
970 break;
972 case R_PPC64_ADDR16_HIGHERA:
973 *(Elf64_Half *) reloc_addr = PPC_HIGHERA (value);
974 break;
976 case R_PPC64_ADDR16_HIGHESTA:
977 *(Elf64_Half *) reloc_addr = PPC_HIGHESTA (value);
978 break;
980 case R_PPC64_ADDR14:
981 case R_PPC64_ADDR14_BRTAKEN:
982 case R_PPC64_ADDR14_BRNTAKEN:
984 if (dont_expect ((value + 0x8000) >= 0x10000 || (value & 3) != 0))
985 _dl_reloc_overflow (map, "R_PPC64_ADDR14", reloc_addr, refsym);
986 Elf64_Word insn = *(Elf64_Word *) reloc_addr;
987 BIT_INSERT (insn, value, 0xfffc);
988 if (r_type != R_PPC64_ADDR14)
990 insn &= ~(1 << 21);
991 if (r_type == R_PPC64_ADDR14_BRTAKEN)
992 insn |= 1 << 21;
993 if ((insn & (0x14 << 21)) == (0x04 << 21))
994 insn |= 0x02 << 21;
995 else if ((insn & (0x14 << 21)) == (0x10 << 21))
996 insn |= 0x08 << 21;
998 *(Elf64_Word *) reloc_addr = insn;
1000 break;
1002 case R_PPC64_REL32:
1003 *(Elf64_Word *) reloc_addr = value - (Elf64_Addr) reloc_addr;
1004 return;
1006 case R_PPC64_REL64:
1007 *reloc_addr = value - (Elf64_Addr) reloc_addr;
1008 return;
1009 #endif /* !RTLD_BOOTSTRAP */
1011 default:
1012 _dl_reloc_bad_type (map, r_type, 0);
1013 return;
1015 MODIFIED_CODE_NOQUEUE (reloc_addr);
1018 static inline void __attribute__ ((always_inline))
1019 elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
1020 Elf64_Addr l_addr, const Elf64_Rela *reloc,
1021 int skip_ifunc)
1023 /* elf_machine_runtime_setup handles this. */
1027 #endif /* RESOLVE */