PowerPC: Influence cpu/arch hwcap features via GLIBC_TUNABLES
[glibc.git] / sysdeps / powerpc / powerpc64 / dl-machine.h
blob449208e86f5f912039f5dba1eec5240cd3b5ce10
1 /* Machine-dependent ELF dynamic relocation inline functions.
2 PowerPC64 version.
3 Copyright 1995-2023 Free Software Foundation, Inc.
4 This file is part of the GNU C Library.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Library General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Library General Public License for more details.
16 You should have received a copy of the GNU Library General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <https://www.gnu.org/licenses/>. */
20 #ifndef dl_machine_h
21 #define dl_machine_h
23 #define ELF_MACHINE_NAME "powerpc64"
25 #include <assert.h>
26 #include <sys/param.h>
27 #include <dl-tls.h>
28 #include <sysdep.h>
29 #include <hwcapinfo.h>
30 #include <dl-static-tls.h>
31 #include <dl-funcdesc.h>
32 #include <dl-machine-rel.h>
34 /* Translate a processor specific dynamic tag to the index
35 in l_info array. */
36 #define DT_PPC64(x) (DT_PPC64_##x - DT_LOPROC + DT_NUM)
38 #define ELF_MULT_MACHINES_SUPPORTED
40 /* Return nonzero iff ELF header is compatible with the running host. */
41 static inline int
42 elf_machine_matches_host (const Elf64_Ehdr *ehdr)
44 /* Verify that the binary matches our ABI version. */
45 if ((ehdr->e_flags & EF_PPC64_ABI) != 0)
47 #if _CALL_ELF != 2
48 if ((ehdr->e_flags & EF_PPC64_ABI) != 1)
49 return 0;
50 #else
51 if ((ehdr->e_flags & EF_PPC64_ABI) != 2)
52 return 0;
53 #endif
56 return ehdr->e_machine == EM_PPC64;
59 /* Return nonzero iff ELF header is compatible with the running host,
60 but not this loader. */
61 static inline int
62 elf_host_tolerates_machine (const Elf64_Ehdr *ehdr)
64 return ehdr->e_machine == EM_PPC;
67 /* Return nonzero iff ELF header is compatible with the running host,
68 but not this loader. */
69 static inline int
70 elf_host_tolerates_class (const Elf64_Ehdr *ehdr)
72 return ehdr->e_ident[EI_CLASS] == ELFCLASS32;
76 /* Return the run-time load address of the shared object, assuming it
77 was originally linked at zero. */
78 static inline Elf64_Addr
79 elf_machine_load_address (void) __attribute__ ((const));
81 static inline Elf64_Addr
82 elf_machine_load_address (void)
84 Elf64_Addr ret;
86 /* The first entry in .got (and thus the first entry in .toc) is the
87 link-time TOC_base, ie. r2. So the difference between that and
88 the current r2 set by the kernel is how far the shared lib has
89 moved. */
90 asm ( " ld %0,-32768(2)\n"
91 " subf %0,%0,2\n"
92 : "=r" (ret));
93 return ret;
96 /* Return the link-time address of _DYNAMIC. */
97 static inline Elf64_Addr
98 elf_machine_dynamic (void)
100 Elf64_Addr runtime_dynamic;
101 /* It's easier to get the run-time address. */
102 asm ( " addis %0,2,_DYNAMIC@toc@ha\n"
103 " addi %0,%0,_DYNAMIC@toc@l\n"
104 : "=b" (runtime_dynamic));
105 /* Then subtract off the load address offset. */
106 return runtime_dynamic - elf_machine_load_address() ;
109 /* The PLT uses Elf64_Rela relocs. */
110 #define elf_machine_relplt elf_machine_rela
113 #ifdef HAVE_INLINED_SYSCALLS
114 /* We do not need _dl_starting_up. */
115 # define DL_STARTING_UP_DEF
116 #else
117 # define DL_STARTING_UP_DEF \
118 ".LC__dl_starting_up:\n" \
119 " .tc __GI__dl_starting_up[TC],__GI__dl_starting_up\n"
120 #endif
123 /* Initial entry point code for the dynamic linker. The C function
124 `_dl_start' is the real entry point; its return value is the user
125 program's entry point. */
126 #define RTLD_START \
127 asm (".pushsection \".text\"\n" \
128 " .align 2\n" \
129 " " ENTRY_2(_start) "\n" \
130 BODY_PREFIX "_start:\n" \
131 " " LOCALENTRY(_start) "\n" \
132 /* We start with the following on the stack, from top: \
133 argc (4 bytes); \
134 arguments for program (terminated by NULL); \
135 environment variables (terminated by NULL); \
136 arguments for the program loader. */ \
137 " mr 3,1\n" \
138 " li 4,0\n" \
139 " stdu 4,-128(1)\n" \
140 /* Call _dl_start with one parameter pointing at argc. */ \
141 " bl " DOT_PREFIX "_dl_start\n" \
142 " nop\n" \
143 /* Transfer control to _dl_start_user! */ \
144 " b " DOT_PREFIX "_dl_start_user\n" \
145 ".LT__start:\n" \
146 " .long 0\n" \
147 " .byte 0x00,0x0c,0x24,0x40,0x00,0x00,0x00,0x00\n" \
148 " .long .LT__start-" BODY_PREFIX "_start\n" \
149 " .short .LT__start_name_end-.LT__start_name_start\n" \
150 ".LT__start_name_start:\n" \
151 " .ascii \"_start\"\n" \
152 ".LT__start_name_end:\n" \
153 " .align 2\n" \
154 " " END_2(_start) "\n" \
155 " .pushsection \".toc\",\"aw\"\n" \
156 DL_STARTING_UP_DEF \
157 ".LC__rtld_local:\n" \
158 " .tc _rtld_local[TC],_rtld_local\n" \
159 ".LC__dl_argc:\n" \
160 " .tc _dl_argc[TC],_dl_argc\n" \
161 ".LC__dl_argv:\n" \
162 " .tc __GI__dl_argv[TC],__GI__dl_argv\n" \
163 ".LC__dl_fini:\n" \
164 " .tc _dl_fini[TC],_dl_fini\n" \
165 " .popsection\n" \
166 " " ENTRY_2(_dl_start_user) "\n" \
167 /* Now, we do our main work of calling initialisation procedures. \
168 The ELF ABI doesn't say anything about parameters for these, \
169 so we just pass argc, argv, and the environment. \
170 Changing these is strongly discouraged (not least because argc is \
171 passed by value!). */ \
172 BODY_PREFIX "_dl_start_user:\n" \
173 " " LOCALENTRY(_dl_start_user) "\n" \
174 /* the address of _start in r30. */ \
175 " mr 30,3\n" \
176 /* &_dl_argc in 29, &_dl_argv in 27, and _dl_loaded in 28. */ \
177 " addis 28,2,.LC__rtld_local@toc@ha\n" \
178 " ld 28,.LC__rtld_local@toc@l(28)\n" \
179 " addis 29,2,.LC__dl_argc@toc@ha\n" \
180 " ld 29,.LC__dl_argc@toc@l(29)\n" \
181 " addis 27,2,.LC__dl_argv@toc@ha\n" \
182 " ld 27,.LC__dl_argv@toc@l(27)\n" \
183 /* _dl_init (_dl_loaded, _dl_argc, _dl_argv, _dl_argv+_dl_argc+1). */ \
184 " ld 3,0(28)\n" \
185 " lwa 4,0(29)\n" \
186 " ld 5,0(27)\n" \
187 " sldi 6,4,3\n" \
188 " add 6,5,6\n" \
189 " addi 6,6,8\n" \
190 " bl " DOT_PREFIX "_dl_init\n" \
191 " nop\n" \
192 /* Now, to conform to the ELF ABI, we have to: \
193 Pass argc (actually _dl_argc) in r3; */ \
194 " lwa 3,0(29)\n" \
195 /* Pass argv (actually _dl_argv) in r4; */ \
196 " ld 4,0(27)\n" \
197 /* Pass argv+argc+1 in r5; */ \
198 " sldi 5,3,3\n" \
199 " add 6,4,5\n" \
200 " addi 5,6,8\n" \
201 /* Pass the auxiliary vector in r6. This is passed to us just after \
202 _envp. */ \
203 "2: ldu 0,8(6)\n" \
204 " cmpdi 0,0\n" \
205 " bne 2b\n" \
206 " addi 6,6,8\n" \
207 /* Pass a termination function pointer (in this case _dl_fini) in \
208 r7. */ \
209 " addis 7,2,.LC__dl_fini@toc@ha\n" \
210 " ld 7,.LC__dl_fini@toc@l(7)\n" \
211 /* Pass the stack pointer in r1 (so far so good), pointing to a NULL \
212 value. This lets our startup code distinguish between a program \
213 linked statically, which linux will call with argc on top of the \
214 stack which will hopefully never be zero, and a dynamically linked \
215 program which will always have a NULL on the top of the stack. \
216 Take the opportunity to clear LR, so anyone who accidentally \
217 returns from _start gets SEGV. Also clear the next few words of \
218 the stack. */ \
219 " li 31,0\n" \
220 " std 31,0(1)\n" \
221 " mtlr 31\n" \
222 " std 31,8(1)\n" \
223 " std 31,16(1)\n" \
224 " std 31,24(1)\n" \
225 /* Now, call the start function descriptor at r30... */ \
226 " .globl ._dl_main_dispatch\n" \
227 "._dl_main_dispatch:\n" \
228 " " PPC64_LOAD_FUNCPTR(30) "\n" \
229 " bctr\n" \
230 ".LT__dl_start_user:\n" \
231 " .long 0\n" \
232 " .byte 0x00,0x0c,0x24,0x40,0x00,0x00,0x00,0x00\n" \
233 " .long .LT__dl_start_user-" BODY_PREFIX "_dl_start_user\n" \
234 " .short .LT__dl_start_user_name_end-.LT__dl_start_user_name_start\n" \
235 ".LT__dl_start_user_name_start:\n" \
236 " .ascii \"_dl_start_user\"\n" \
237 ".LT__dl_start_user_name_end:\n" \
238 " .align 2\n" \
239 " " END_2(_dl_start_user) "\n" \
240 " .popsection");
242 /* ELF_RTYPE_CLASS_COPY iff TYPE should not be allowed to resolve to
243 one of the main executable's symbols, as for a COPY reloc.
245 To make function pointer comparisons work on most targets, the
246 relevant ABI states that the address of a non-local function in a
247 dynamically linked executable is the address of the PLT entry for
248 that function. This is quite reasonable since using the real
249 function address in a non-PIC executable would typically require
250 dynamic relocations in .text, something to be avoided. For such
251 functions, the linker emits a SHN_UNDEF symbol in the executable
252 with value equal to the PLT entry address. Normally, SHN_UNDEF
253 symbols have a value of zero, so this is a clue to ld.so that it
254 should treat these symbols specially. For relocations not in
255 ELF_RTYPE_CLASS_PLT (eg. those on function pointers), ld.so should
256 use the value of the executable SHN_UNDEF symbol, ie. the PLT entry
257 address. For relocations in ELF_RTYPE_CLASS_PLT (eg. the relocs in
258 the PLT itself), ld.so should use the value of the corresponding
259 defined symbol in the object that defines the function, ie. the
260 real function address. This complicates ld.so in that there are
261 now two possible values for a given symbol, and it gets even worse
262 because protected symbols need yet another set of rules.
264 On PowerPC64 we don't need any of this. The linker won't emit
265 SHN_UNDEF symbols with non-zero values. ld.so can make all
266 relocations behave "normally", ie. always use the real address
267 like PLT relocations. So always set ELF_RTYPE_CLASS_PLT. */
269 #if _CALL_ELF != 2
270 #define elf_machine_type_class(type) \
271 (ELF_RTYPE_CLASS_PLT | (((type) == R_PPC64_COPY) * ELF_RTYPE_CLASS_COPY))
272 #else
273 /* And now that you have read that large comment, you can disregard it
274 all for ELFv2. ELFv2 does need the special SHN_UNDEF treatment. */
275 #define IS_PPC64_TLS_RELOC(R) \
276 (((R) >= R_PPC64_TLS && (R) <= R_PPC64_DTPREL16_HIGHESTA) \
277 || ((R) >= R_PPC64_TPREL16_HIGH && (R) <= R_PPC64_DTPREL16_HIGHA))
279 #define elf_machine_type_class(type) \
280 ((((type) == R_PPC64_JMP_SLOT \
281 || (type) == R_PPC64_ADDR24 \
282 || IS_PPC64_TLS_RELOC (type)) * ELF_RTYPE_CLASS_PLT) \
283 | (((type) == R_PPC64_COPY) * ELF_RTYPE_CLASS_COPY))
284 #endif
286 /* A reloc type used for ld.so cmdline arg lookups to reject PLT entries. */
287 #define ELF_MACHINE_JMP_SLOT R_PPC64_JMP_SLOT
289 /* We define an initialization function to initialize HWCAP/HWCAP2 and
290 platform data so it can be copied into the TCB later. This is called
291 very early in _dl_sysdep_start for dynamically linked binaries. */
292 #if defined(SHARED) && IS_IN (rtld)
293 # define DL_PLATFORM_INIT dl_platform_init ()
295 static inline void __attribute__ ((unused))
296 dl_platform_init (void)
298 __tcb_parse_hwcap_and_convert_at_platform ();
300 #endif
302 /* Stuff for the PLT. */
303 #if _CALL_ELF != 2
304 #define PLT_INITIAL_ENTRY_WORDS 3
305 #define PLT_ENTRY_WORDS 3
306 #define GLINK_INITIAL_ENTRY_WORDS 8
307 /* The first 32k entries of glink can set an index and branch using two
308 instructions; past that point, glink uses three instructions. */
309 #define GLINK_ENTRY_WORDS(I) (((I) < 0x8000)? 2 : 3)
310 #else
311 #define PLT_INITIAL_ENTRY_WORDS 2
312 #define PLT_ENTRY_WORDS 1
313 #define GLINK_INITIAL_ENTRY_WORDS 8
314 #define GLINK_ENTRY_WORDS(I) 1
315 #endif
317 #define PPC_DCBST(where) asm volatile ("dcbst 0,%0" : : "r"(where) : "memory")
318 #define PPC_DCBT(where) asm volatile ("dcbt 0,%0" : : "r"(where) : "memory")
319 #define PPC_DCBF(where) asm volatile ("dcbf 0,%0" : : "r"(where) : "memory")
320 #define PPC_SYNC asm volatile ("sync" : : : "memory")
321 #define PPC_ISYNC asm volatile ("sync; isync" : : : "memory")
322 #define PPC_ICBI(where) asm volatile ("icbi 0,%0" : : "r"(where) : "memory")
323 #define PPC_DIE asm volatile ("tweq 0,0")
324 /* Use this when you've modified some code, but it won't be in the
325 instruction fetch queue (or when it doesn't matter if it is). */
326 #define MODIFIED_CODE_NOQUEUE(where) \
327 do { PPC_DCBST(where); PPC_SYNC; PPC_ICBI(where); } while (0)
328 /* Use this when it might be in the instruction queue. */
329 #define MODIFIED_CODE(where) \
330 do { PPC_DCBST(where); PPC_SYNC; PPC_ICBI(where); PPC_ISYNC; } while (0)
332 /* Set up the loaded object described by MAP so its unrelocated PLT
333 entries will jump to the on-demand fixup code in dl-runtime.c. */
334 static inline int __attribute__ ((always_inline))
335 elf_machine_runtime_setup (struct link_map *map, struct r_scope_elem *scope[],
336 int lazy, int profile)
338 if (map->l_info[DT_JMPREL])
340 Elf64_Word i;
341 Elf64_Word *glink = NULL;
342 Elf64_Xword *plt = (Elf64_Xword *) D_PTR (map, l_info[DT_PLTGOT]);
343 Elf64_Word num_plt_entries = (map->l_info[DT_PLTRELSZ]->d_un.d_val
344 / sizeof (Elf64_Rela));
345 Elf64_Addr l_addr = map->l_addr;
346 Elf64_Dyn **info = map->l_info;
347 char *p;
349 extern void _dl_runtime_resolve (void);
350 extern void _dl_profile_resolve (void);
352 /* Relocate the DT_PPC64_GLINK entry in the _DYNAMIC section.
353 elf_get_dynamic_info takes care of the standard entries but
354 doesn't know exactly what to do with processor specific
355 entries. */
356 if (info[DT_PPC64(GLINK)] != NULL)
357 info[DT_PPC64(GLINK)]->d_un.d_ptr += l_addr;
359 if (lazy)
361 Elf64_Word glink_offset;
362 Elf64_Word offset;
363 Elf64_Addr dlrr;
365 dlrr = (Elf64_Addr) (profile ? _dl_profile_resolve
366 : _dl_runtime_resolve);
367 if (profile && GLRO(dl_profile) != NULL
368 && _dl_name_match_p (GLRO(dl_profile), map))
369 /* This is the object we are looking for. Say that we really
370 want profiling and the timers are started. */
371 GL(dl_profile_map) = map;
373 #if _CALL_ELF != 2
374 /* We need to stuff the address/TOC of _dl_runtime_resolve
375 into doublewords 0 and 1 of plt_reserve. Then we need to
376 stuff the map address into doubleword 2 of plt_reserve.
377 This allows the GLINK0 code to transfer control to the
378 correct trampoline which will transfer control to fixup
379 in dl-machine.c. */
381 /* The plt_reserve area is the 1st 3 doublewords of the PLT. */
382 Elf64_FuncDesc *plt_reserve = (Elf64_FuncDesc *) plt;
383 Elf64_FuncDesc *resolve_fd = (Elf64_FuncDesc *) dlrr;
384 plt_reserve->fd_func = resolve_fd->fd_func;
385 plt_reserve->fd_toc = resolve_fd->fd_toc;
386 plt_reserve->fd_aux = (Elf64_Addr) map;
387 #ifdef RTLD_BOOTSTRAP
388 /* When we're bootstrapping, the opd entry will not have
389 been relocated yet. */
390 plt_reserve->fd_func += l_addr;
391 plt_reserve->fd_toc += l_addr;
392 #endif
394 #else
395 /* When we don't have function descriptors, the first doubleword
396 of the PLT holds the address of _dl_runtime_resolve, and the
397 second doubleword holds the map address. */
398 plt[0] = dlrr;
399 plt[1] = (Elf64_Addr) map;
400 #endif
402 /* Set up the lazy PLT entries. */
403 glink = (Elf64_Word *) D_PTR (map, l_info[DT_PPC64(GLINK)]);
404 offset = PLT_INITIAL_ENTRY_WORDS;
405 glink_offset = GLINK_INITIAL_ENTRY_WORDS;
406 for (i = 0; i < num_plt_entries; i++)
409 plt[offset] = (Elf64_Xword) &glink[glink_offset];
410 offset += PLT_ENTRY_WORDS;
411 glink_offset += GLINK_ENTRY_WORDS (i);
414 /* Now, we've modified data. We need to write the changes from
415 the data cache to a second-level unified cache, then make
416 sure that stale data in the instruction cache is removed.
417 (In a multiprocessor system, the effect is more complex.)
418 Most of the PLT shouldn't be in the instruction cache, but
419 there may be a little overlap at the start and the end.
421 Assumes that dcbst and icbi apply to lines of 16 bytes or
422 more. Current known line sizes are 16, 32, and 128 bytes. */
424 for (p = (char *) plt; p < (char *) &plt[offset]; p += 16)
425 PPC_DCBST (p);
426 PPC_SYNC;
429 return lazy;
432 #if _CALL_ELF == 2
433 extern void attribute_hidden _dl_error_localentry (struct link_map *map,
434 const Elf64_Sym *refsym);
436 /* If the PLT entry resolves to a function in the same object, return
437 the target function's local entry point offset if usable. */
438 static inline Elf64_Addr __attribute__ ((always_inline))
439 ppc64_local_entry_offset (struct link_map *map, lookup_t sym_map,
440 const ElfW(Sym) *refsym, const ElfW(Sym) *sym)
442 /* If the target function is in a different object, we cannot
443 use the local entry point. */
444 if (sym_map != map)
446 /* Check that optimized plt call stubs for localentry:0 functions
447 are not being satisfied by a non-zero localentry symbol. */
448 if (map->l_info[DT_PPC64(OPT)]
449 && (map->l_info[DT_PPC64(OPT)]->d_un.d_val & PPC64_OPT_LOCALENTRY) != 0
450 && refsym->st_info == ELFW(ST_INFO) (STB_GLOBAL, STT_FUNC)
451 && (STO_PPC64_LOCAL_MASK & refsym->st_other) == 0
452 && (STO_PPC64_LOCAL_MASK & sym->st_other) != 0)
453 _dl_error_localentry (map, refsym);
455 return 0;
458 /* If the linker inserted multiple TOCs, we cannot use the
459 local entry point. */
460 if (map->l_info[DT_PPC64(OPT)]
461 && (map->l_info[DT_PPC64(OPT)]->d_un.d_val & PPC64_OPT_MULTI_TOC))
462 return 0;
464 /* If the target function is an ifunc then the local entry offset is
465 for the resolver, not the final destination. */
466 if (__builtin_expect (ELFW(ST_TYPE) (sym->st_info) == STT_GNU_IFUNC, 0))
467 return 0;
469 /* Otherwise, we can use the local entry point. Retrieve its offset
470 from the symbol's ELF st_other field. */
471 return PPC64_LOCAL_ENTRY_OFFSET (sym->st_other);
473 #endif
475 /* Change the PLT entry whose reloc is 'reloc' to call the actual
476 routine. */
477 static inline Elf64_Addr __attribute__ ((always_inline))
478 elf_machine_fixup_plt (struct link_map *map, lookup_t sym_map,
479 const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
480 const Elf64_Rela *reloc,
481 Elf64_Addr *reloc_addr, Elf64_Addr finaladdr)
483 #if _CALL_ELF != 2
484 Elf64_FuncDesc *plt = (Elf64_FuncDesc *) reloc_addr;
485 Elf64_FuncDesc *rel = (Elf64_FuncDesc *) finaladdr;
486 Elf64_Addr offset = 0;
487 Elf64_FuncDesc zero_fd = {0, 0, 0};
489 PPC_DCBT (&plt->fd_aux);
490 PPC_DCBT (&plt->fd_func);
492 /* If sym_map is NULL, it's a weak undefined sym; Set the plt to
493 zero. finaladdr should be zero already in this case, but guard
494 against invalid plt relocations with non-zero addends. */
495 if (sym_map == NULL)
496 finaladdr = 0;
498 /* Don't die here if finaladdr is zero, die if this plt entry is
499 actually called. Makes a difference when LD_BIND_NOW=1.
500 finaladdr may be zero for a weak undefined symbol, or when an
501 ifunc resolver returns zero. */
502 if (finaladdr == 0)
503 rel = &zero_fd;
504 else
506 PPC_DCBT (&rel->fd_aux);
507 PPC_DCBT (&rel->fd_func);
510 /* If the opd entry is not yet relocated (because it's from a shared
511 object that hasn't been processed yet), then manually reloc it. */
512 if (finaladdr != 0 && map != sym_map && !sym_map->l_relocated
513 #if !defined RTLD_BOOTSTRAP && defined SHARED
514 /* Bootstrap map doesn't have l_relocated set for it. */
515 && sym_map != &GL(dl_rtld_map)
516 #endif
518 offset = sym_map->l_addr;
520 /* For PPC64, fixup_plt copies the function descriptor from opd
521 over the corresponding PLT entry.
522 Initially, PLT Entry[i] is set up for lazy linking, or is zero.
523 For lazy linking, the fd_toc and fd_aux entries are irrelevant,
524 so for thread safety we write them before changing fd_func. */
526 plt->fd_aux = rel->fd_aux + offset;
527 plt->fd_toc = rel->fd_toc + offset;
528 PPC_DCBF (&plt->fd_toc);
529 PPC_ISYNC;
531 plt->fd_func = rel->fd_func + offset;
532 PPC_DCBST (&plt->fd_func);
533 PPC_ISYNC;
534 #else
535 finaladdr += ppc64_local_entry_offset (map, sym_map, refsym, sym);
536 *reloc_addr = finaladdr;
537 #endif
539 return finaladdr;
542 /* Return the final value of a plt relocation. */
543 static inline Elf64_Addr
544 elf_machine_plt_value (struct link_map *map, const Elf64_Rela *reloc,
545 Elf64_Addr value)
547 return value + reloc->r_addend;
551 /* Names of the architecture-specific auditing callback functions. */
552 #if _CALL_ELF != 2
553 #define ARCH_LA_PLTENTER ppc64_gnu_pltenter
554 #define ARCH_LA_PLTEXIT ppc64_gnu_pltexit
555 #else
556 #define ARCH_LA_PLTENTER ppc64v2_gnu_pltenter
557 #define ARCH_LA_PLTEXIT ppc64v2_gnu_pltexit
558 #endif
560 #if ENABLE_STATIC_PIE && !defined SHARED && !IS_IN (rtld)
561 #include <libc-diag.h>
562 #include <tcb-offsets.h>
564 /* Set up r13 for _dl_relocate_static_pie so that libgcc ifuncs that
565 normally access the tcb copy of hwcap will see __tcb.hwcap. */
567 static inline void __attribute__ ((always_inline))
568 ppc_init_fake_thread_pointer (void)
570 DIAG_PUSH_NEEDS_COMMENT;
571 /* We are playing pointer tricks. Silence gcc warning. */
572 DIAG_IGNORE_NEEDS_COMMENT (4.9, "-Warray-bounds");
573 __thread_register = (char *) &__tcb.hwcap - TCB_HWCAP;
574 DIAG_POP_NEEDS_COMMENT;
577 #define ELF_MACHINE_BEFORE_RTLD_RELOC(map, dynamic_info) \
578 ppc_init_fake_thread_pointer ();
579 #endif /* ENABLE_STATIC_PIE && !defined SHARED && !IS_IN (rtld) */
581 #endif /* dl_machine_h */
583 #ifdef RESOLVE_MAP
585 #define PPC_LO(v) ((v) & 0xffff)
586 #define PPC_HI(v) (((v) >> 16) & 0xffff)
587 #define PPC_HA(v) PPC_HI ((v) + 0x8000)
588 #define PPC_HIGHER(v) (((v) >> 32) & 0xffff)
589 #define PPC_HIGHERA(v) PPC_HIGHER ((v) + 0x8000)
590 #define PPC_HIGHEST(v) (((v) >> 48) & 0xffff)
591 #define PPC_HIGHESTA(v) PPC_HIGHEST ((v) + 0x8000)
592 #define BIT_INSERT(var, val, mask) \
593 ((var) = ((var) & ~(Elf64_Addr) (mask)) | ((val) & (mask)))
595 #define dont_expect(X) __builtin_expect ((X), 0)
597 extern void attribute_hidden _dl_reloc_overflow (struct link_map *map,
598 const char *name,
599 Elf64_Addr *const reloc_addr,
600 const Elf64_Sym *refsym);
602 static inline void __attribute__ ((always_inline))
603 elf_machine_rela_relative (Elf64_Addr l_addr, const Elf64_Rela *reloc,
604 void *const reloc_addr_arg)
606 Elf64_Addr *const reloc_addr = reloc_addr_arg;
607 *reloc_addr = l_addr + reloc->r_addend;
610 /* This computes the value used by TPREL* relocs. */
611 static inline Elf64_Addr __attribute__ ((always_inline, const))
612 elf_machine_tprel (struct link_map *map,
613 struct link_map *sym_map,
614 const Elf64_Sym *sym,
615 const Elf64_Rela *reloc)
617 #ifndef RTLD_BOOTSTRAP
618 if (sym_map)
620 CHECK_STATIC_TLS (map, sym_map);
621 #endif
622 return TLS_TPREL_VALUE (sym_map, sym, reloc);
623 #ifndef RTLD_BOOTSTRAP
625 #endif
626 return 0;
629 /* Call function at address VALUE (an OPD entry) to resolve ifunc relocs. */
630 static inline Elf64_Addr __attribute__ ((always_inline))
631 resolve_ifunc (Elf64_Addr value,
632 const struct link_map *map, const struct link_map *sym_map)
634 #if _CALL_ELF != 2
635 /* The function we are calling may not yet have its opd entry relocated. */
636 Elf64_FuncDesc opd;
637 if (map != sym_map
638 # if !defined RTLD_BOOTSTRAP && defined SHARED
639 /* Bootstrap map doesn't have l_relocated set for it. */
640 && sym_map != &GL(dl_rtld_map)
641 # endif
642 && !sym_map->l_relocated)
644 Elf64_FuncDesc *func = (Elf64_FuncDesc *) value;
645 opd.fd_func = func->fd_func + sym_map->l_addr;
646 opd.fd_toc = func->fd_toc + sym_map->l_addr;
647 opd.fd_aux = func->fd_aux;
648 /* GCC 4.9+ eliminates the branch as dead code, force the odp set
649 dependency. */
650 asm ("" : "=r" (value) : "0" (&opd), "X" (opd));
652 #endif
653 return ((Elf64_Addr (*) (unsigned long int)) value) (GLRO(dl_hwcap));
656 /* Perform the relocation specified by RELOC and SYM (which is fully
657 resolved). MAP is the object containing the reloc. */
658 static inline void __attribute__ ((always_inline))
659 elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
660 const Elf64_Rela *reloc,
661 const Elf64_Sym *sym,
662 const struct r_found_version *version,
663 void *const reloc_addr_arg,
664 int skip_ifunc)
666 Elf64_Addr *const reloc_addr = reloc_addr_arg;
667 const int r_type = ELF64_R_TYPE (reloc->r_info);
668 const Elf64_Sym *const refsym = sym;
669 union unaligned
671 uint16_t u2;
672 uint32_t u4;
673 uint64_t u8;
674 } __attribute__ ((__packed__));
676 if (r_type == R_PPC64_RELATIVE)
678 *reloc_addr = map->l_addr + reloc->r_addend;
679 return;
682 if (__glibc_unlikely (r_type == R_PPC64_NONE))
683 return;
685 /* We need SYM_MAP even in the absence of TLS, for elf_machine_fixup_plt
686 and STT_GNU_IFUNC. */
687 struct link_map *sym_map = RESOLVE_MAP (map, scope, &sym, version, r_type);
688 Elf64_Addr value = SYMBOL_ADDRESS (sym_map, sym, true) + reloc->r_addend;
690 if (sym != NULL
691 && __builtin_expect (ELFW(ST_TYPE) (sym->st_info) == STT_GNU_IFUNC, 0)
692 && __builtin_expect (sym->st_shndx != SHN_UNDEF, 1)
693 && __builtin_expect (!skip_ifunc, 1))
694 value = resolve_ifunc (value, map, sym_map);
696 /* For relocs that don't edit code, return.
697 For relocs that might edit instructions, break from the switch. */
698 switch (r_type)
700 case R_PPC64_ADDR64:
701 case R_PPC64_GLOB_DAT:
702 *reloc_addr = value;
703 return;
705 case R_PPC64_IRELATIVE:
706 if (__glibc_likely (!skip_ifunc))
707 value = resolve_ifunc (value, map, sym_map);
708 *reloc_addr = value;
709 return;
711 case R_PPC64_JMP_IREL:
712 if (__glibc_likely (!skip_ifunc))
713 value = resolve_ifunc (value, map, sym_map);
714 /* Fall thru */
715 case R_PPC64_JMP_SLOT:
716 elf_machine_fixup_plt (map, sym_map, refsym, sym,
717 reloc, reloc_addr, value);
718 return;
720 case R_PPC64_DTPMOD64:
721 if (map->l_info[DT_PPC64(OPT)]
722 && (map->l_info[DT_PPC64(OPT)]->d_un.d_val & PPC64_OPT_TLS))
724 #ifdef RTLD_BOOTSTRAP
725 reloc_addr[0] = 0;
726 reloc_addr[1] = (sym_map->l_tls_offset - TLS_TP_OFFSET
727 + TLS_DTV_OFFSET);
728 return;
729 #else
730 if (sym_map != NULL)
732 # ifndef SHARED
733 CHECK_STATIC_TLS (map, sym_map);
734 # else
735 if (TRY_STATIC_TLS (map, sym_map))
736 # endif
738 reloc_addr[0] = 0;
739 /* Set up for local dynamic. */
740 reloc_addr[1] = (sym_map->l_tls_offset - TLS_TP_OFFSET
741 + TLS_DTV_OFFSET);
742 return;
745 #endif
747 #ifdef RTLD_BOOTSTRAP
748 /* During startup the dynamic linker is always index 1. */
749 *reloc_addr = 1;
750 #else
751 /* Get the information from the link map returned by the
752 resolve function. */
753 if (sym_map != NULL)
754 *reloc_addr = sym_map->l_tls_modid;
755 #endif
756 return;
758 case R_PPC64_DTPREL64:
759 if (map->l_info[DT_PPC64(OPT)]
760 && (map->l_info[DT_PPC64(OPT)]->d_un.d_val & PPC64_OPT_TLS))
762 #ifdef RTLD_BOOTSTRAP
763 *reloc_addr = TLS_TPREL_VALUE (sym_map, sym, reloc);
764 return;
765 #else
766 if (sym_map != NULL)
768 /* This reloc is always preceded by R_PPC64_DTPMOD64. */
769 # ifndef SHARED
770 assert (HAVE_STATIC_TLS (map, sym_map));
771 # else
772 if (HAVE_STATIC_TLS (map, sym_map))
773 # endif
775 *reloc_addr = TLS_TPREL_VALUE (sym_map, sym, reloc);
776 return;
779 #endif
781 /* During relocation all TLS symbols are defined and used.
782 Therefore the offset is already correct. */
783 #ifndef RTLD_BOOTSTRAP
784 if (sym_map != NULL)
785 *reloc_addr = TLS_DTPREL_VALUE (sym, reloc);
786 #endif
787 return;
789 case R_PPC64_TPREL64:
790 *reloc_addr = elf_machine_tprel (map, sym_map, sym, reloc);
791 return;
793 case R_PPC64_TPREL16_LO_DS:
794 value = elf_machine_tprel (map, sym_map, sym, reloc);
795 if (dont_expect ((value & 3) != 0))
796 _dl_reloc_overflow (map, "R_PPC64_TPREL16_LO_DS", reloc_addr, refsym);
797 BIT_INSERT (*(Elf64_Half *) reloc_addr, value, 0xfffc);
798 break;
800 case R_PPC64_TPREL16_DS:
801 value = elf_machine_tprel (map, sym_map, sym, reloc);
802 if (dont_expect ((value + 0x8000) >= 0x10000 || (value & 3) != 0))
803 _dl_reloc_overflow (map, "R_PPC64_TPREL16_DS", reloc_addr, refsym);
804 BIT_INSERT (*(Elf64_Half *) reloc_addr, value, 0xfffc);
805 break;
807 case R_PPC64_TPREL16:
808 value = elf_machine_tprel (map, sym_map, sym, reloc);
809 if (dont_expect ((value + 0x8000) >= 0x10000))
810 _dl_reloc_overflow (map, "R_PPC64_TPREL16", reloc_addr, refsym);
811 *(Elf64_Half *) reloc_addr = PPC_LO (value);
812 break;
814 case R_PPC64_TPREL16_LO:
815 value = elf_machine_tprel (map, sym_map, sym, reloc);
816 *(Elf64_Half *) reloc_addr = PPC_LO (value);
817 break;
819 case R_PPC64_TPREL16_HI:
820 value = elf_machine_tprel (map, sym_map, sym, reloc);
821 if (dont_expect (value + 0x80000000 >= 0x100000000LL))
822 _dl_reloc_overflow (map, "R_PPC64_TPREL16_HI", reloc_addr, refsym);
823 *(Elf64_Half *) reloc_addr = PPC_HI (value);
824 break;
826 case R_PPC64_TPREL16_HIGH:
827 value = elf_machine_tprel (map, sym_map, sym, reloc);
828 *(Elf64_Half *) reloc_addr = PPC_HI (value);
829 break;
831 case R_PPC64_TPREL16_HA:
832 value = elf_machine_tprel (map, sym_map, sym, reloc);
833 if (dont_expect (value + 0x80008000 >= 0x100000000LL))
834 _dl_reloc_overflow (map, "R_PPC64_TPREL16_HA", reloc_addr, refsym);
835 *(Elf64_Half *) reloc_addr = PPC_HA (value);
836 break;
838 case R_PPC64_TPREL16_HIGHA:
839 value = elf_machine_tprel (map, sym_map, sym, reloc);
840 *(Elf64_Half *) reloc_addr = PPC_HA (value);
841 break;
843 case R_PPC64_TPREL16_HIGHER:
844 value = elf_machine_tprel (map, sym_map, sym, reloc);
845 *(Elf64_Half *) reloc_addr = PPC_HIGHER (value);
846 break;
848 case R_PPC64_TPREL16_HIGHEST:
849 value = elf_machine_tprel (map, sym_map, sym, reloc);
850 *(Elf64_Half *) reloc_addr = PPC_HIGHEST (value);
851 break;
853 case R_PPC64_TPREL16_HIGHERA:
854 value = elf_machine_tprel (map, sym_map, sym, reloc);
855 *(Elf64_Half *) reloc_addr = PPC_HIGHERA (value);
856 break;
858 case R_PPC64_TPREL16_HIGHESTA:
859 value = elf_machine_tprel (map, sym_map, sym, reloc);
860 *(Elf64_Half *) reloc_addr = PPC_HIGHESTA (value);
861 break;
863 #ifndef RTLD_BOOTSTRAP /* None of the following appear in ld.so */
864 case R_PPC64_ADDR16_LO_DS:
865 if (dont_expect ((value & 3) != 0))
866 _dl_reloc_overflow (map, "R_PPC64_ADDR16_LO_DS", reloc_addr, refsym);
867 BIT_INSERT (*(Elf64_Half *) reloc_addr, value, 0xfffc);
868 break;
870 case R_PPC64_ADDR16_LO:
871 *(Elf64_Half *) reloc_addr = PPC_LO (value);
872 break;
874 case R_PPC64_ADDR16_HI:
875 if (dont_expect (value + 0x80000000 >= 0x100000000LL))
876 _dl_reloc_overflow (map, "R_PPC64_ADDR16_HI", reloc_addr, refsym);
877 /* Fall through. */
878 case R_PPC64_ADDR16_HIGH:
879 *(Elf64_Half *) reloc_addr = PPC_HI (value);
880 break;
882 case R_PPC64_ADDR16_HA:
883 if (dont_expect (value + 0x80008000 >= 0x100000000LL))
884 _dl_reloc_overflow (map, "R_PPC64_ADDR16_HA", reloc_addr, refsym);
885 /* Fall through. */
886 case R_PPC64_ADDR16_HIGHA:
887 *(Elf64_Half *) reloc_addr = PPC_HA (value);
888 break;
890 case R_PPC64_ADDR30:
892 Elf64_Addr delta = value - (Elf64_Xword) reloc_addr;
893 if (dont_expect ((delta + 0x80000000) >= 0x100000000LL
894 || (delta & 3) != 0))
895 _dl_reloc_overflow (map, "R_PPC64_ADDR30", reloc_addr, refsym);
896 BIT_INSERT (*(Elf64_Word *) reloc_addr, delta, 0xfffffffc);
898 break;
900 case R_PPC64_COPY:
901 if (dont_expect (sym == NULL))
902 /* This can happen in trace mode when an object could not be found. */
903 return;
904 if (dont_expect (sym->st_size > refsym->st_size
905 || (GLRO(dl_verbose)
906 && sym->st_size < refsym->st_size)))
908 const char *strtab;
910 strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]);
911 _dl_error_printf ("%s: Symbol `%s' has different size" \
912 " in shared object," \
913 " consider re-linking\n",
914 RTLD_PROGNAME, strtab + refsym->st_name);
916 memcpy (reloc_addr_arg, (char *) value,
917 MIN (sym->st_size, refsym->st_size));
918 return;
920 case R_PPC64_UADDR64:
921 ((union unaligned *) reloc_addr)->u8 = value;
922 return;
924 case R_PPC64_UADDR32:
925 ((union unaligned *) reloc_addr)->u4 = value;
926 return;
928 case R_PPC64_ADDR32:
929 if (dont_expect ((value + 0x80000000) >= 0x100000000LL))
930 _dl_reloc_overflow (map, "R_PPC64_ADDR32", reloc_addr, refsym);
931 *(Elf64_Word *) reloc_addr = value;
932 return;
934 case R_PPC64_ADDR24:
935 if (dont_expect ((value + 0x2000000) >= 0x4000000 || (value & 3) != 0))
936 _dl_reloc_overflow (map, "R_PPC64_ADDR24", reloc_addr, refsym);
937 BIT_INSERT (*(Elf64_Word *) reloc_addr, value, 0x3fffffc);
938 break;
940 case R_PPC64_ADDR16:
941 if (dont_expect ((value + 0x8000) >= 0x10000))
942 _dl_reloc_overflow (map, "R_PPC64_ADDR16", reloc_addr, refsym);
943 *(Elf64_Half *) reloc_addr = value;
944 break;
946 case R_PPC64_UADDR16:
947 if (dont_expect ((value + 0x8000) >= 0x10000))
948 _dl_reloc_overflow (map, "R_PPC64_UADDR16", reloc_addr, refsym);
949 ((union unaligned *) reloc_addr)->u2 = value;
950 return;
952 case R_PPC64_ADDR16_DS:
953 if (dont_expect ((value + 0x8000) >= 0x10000 || (value & 3) != 0))
954 _dl_reloc_overflow (map, "R_PPC64_ADDR16_DS", reloc_addr, refsym);
955 BIT_INSERT (*(Elf64_Half *) reloc_addr, value, 0xfffc);
956 break;
958 case R_PPC64_ADDR16_HIGHER:
959 *(Elf64_Half *) reloc_addr = PPC_HIGHER (value);
960 break;
962 case R_PPC64_ADDR16_HIGHEST:
963 *(Elf64_Half *) reloc_addr = PPC_HIGHEST (value);
964 break;
966 case R_PPC64_ADDR16_HIGHERA:
967 *(Elf64_Half *) reloc_addr = PPC_HIGHERA (value);
968 break;
970 case R_PPC64_ADDR16_HIGHESTA:
971 *(Elf64_Half *) reloc_addr = PPC_HIGHESTA (value);
972 break;
974 case R_PPC64_ADDR14:
975 case R_PPC64_ADDR14_BRTAKEN:
976 case R_PPC64_ADDR14_BRNTAKEN:
978 if (dont_expect ((value + 0x8000) >= 0x10000 || (value & 3) != 0))
979 _dl_reloc_overflow (map, "R_PPC64_ADDR14", reloc_addr, refsym);
980 Elf64_Word insn = *(Elf64_Word *) reloc_addr;
981 BIT_INSERT (insn, value, 0xfffc);
982 if (r_type != R_PPC64_ADDR14)
984 insn &= ~(1 << 21);
985 if (r_type == R_PPC64_ADDR14_BRTAKEN)
986 insn |= 1 << 21;
987 if ((insn & (0x14 << 21)) == (0x04 << 21))
988 insn |= 0x02 << 21;
989 else if ((insn & (0x14 << 21)) == (0x10 << 21))
990 insn |= 0x08 << 21;
992 *(Elf64_Word *) reloc_addr = insn;
994 break;
996 case R_PPC64_REL32:
997 *(Elf64_Word *) reloc_addr = value - (Elf64_Addr) reloc_addr;
998 return;
1000 case R_PPC64_REL64:
1001 *reloc_addr = value - (Elf64_Addr) reloc_addr;
1002 return;
1003 #endif /* !RTLD_BOOTSTRAP */
1005 default:
1006 _dl_reloc_bad_type (map, r_type, 0);
1007 return;
1009 MODIFIED_CODE_NOQUEUE (reloc_addr);
1012 static inline void __attribute__ ((always_inline))
1013 elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
1014 Elf64_Addr l_addr, const Elf64_Rela *reloc,
1015 int skip_ifunc)
1017 /* elf_machine_runtime_setup handles this. */
1021 #endif /* RESOLVE */