localedata: add new locales scn_IT
[glibc.git] / sysdeps / powerpc / powerpc64 / dl-machine.h
blob2b6f5d2b08cb10b8d4a059abd3baff82633d9d70
1 /* Machine-dependent ELF dynamic relocation inline functions.
2 PowerPC64 version.
3 Copyright 1995-2024 Free Software Foundation, Inc.
4 This file is part of the GNU C Library.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Library General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Library General Public License for more details.
16 You should have received a copy of the GNU Library General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <https://www.gnu.org/licenses/>. */
20 #ifndef dl_machine_h
21 #define dl_machine_h
23 #define ELF_MACHINE_NAME "powerpc64"
25 #include <assert.h>
26 #include <sys/param.h>
27 #include <dl-tls.h>
28 #include <sysdep.h>
29 #include <hwcapinfo.h>
30 #include <dl-static-tls.h>
31 #include <dl-funcdesc.h>
32 #include <dl-machine-rel.h>
34 /* Translate a processor specific dynamic tag to the index
35 in l_info array. */
36 #define DT_PPC64(x) (DT_PPC64_##x - DT_LOPROC + DT_NUM)
38 #define ELF_MULT_MACHINES_SUPPORTED
40 /* Return nonzero iff ELF header is compatible with the running host. */
41 static inline int
42 elf_machine_matches_host (const Elf64_Ehdr *ehdr)
44 /* Verify that the binary matches our ABI version. */
45 if ((ehdr->e_flags & EF_PPC64_ABI) != 0)
47 #if _CALL_ELF != 2
48 if ((ehdr->e_flags & EF_PPC64_ABI) != 1)
49 return 0;
50 #else
51 if ((ehdr->e_flags & EF_PPC64_ABI) != 2)
52 return 0;
53 #endif
56 return ehdr->e_machine == EM_PPC64;
59 /* Return nonzero iff ELF header is compatible with the running host,
60 but not this loader. */
61 static inline int
62 elf_host_tolerates_machine (const Elf64_Ehdr *ehdr)
64 return ehdr->e_machine == EM_PPC;
67 /* Return nonzero iff ELF header is compatible with the running host,
68 but not this loader. */
69 static inline int
70 elf_host_tolerates_class (const Elf64_Ehdr *ehdr)
72 return ehdr->e_ident[EI_CLASS] == ELFCLASS32;
76 /* Return the run-time load address of the shared object, assuming it
77 was originally linked at zero. */
78 static inline Elf64_Addr
79 elf_machine_load_address (void) __attribute__ ((const));
81 #ifndef __PCREL__
82 static inline Elf64_Addr
83 elf_machine_load_address (void)
85 Elf64_Addr ret;
87 /* The first entry in .got (and thus the first entry in .toc) is the
88 link-time TOC_base, ie. r2. So the difference between that and
89 the current r2 set by the kernel is how far the shared lib has
90 moved. */
91 asm ( " ld %0,-32768(2)\n"
92 " subf %0,%0,2\n"
93 : "=r" (ret));
94 return ret;
97 /* Return the link-time address of _DYNAMIC. */
98 static inline Elf64_Addr
99 elf_machine_dynamic (void)
101 Elf64_Addr runtime_dynamic;
102 /* It's easier to get the run-time address. */
103 asm ( " addis %0,2,_DYNAMIC@toc@ha\n"
104 " addi %0,%0,_DYNAMIC@toc@l\n"
105 : "=b" (runtime_dynamic));
106 /* Then subtract off the load address offset. */
107 return runtime_dynamic - elf_machine_load_address() ;
109 #else /* __PCREL__ */
110 /* In PCREL mode, r2 may have been clobbered. Rely on relative
111 relocations instead. */
113 static inline ElfW(Addr)
114 elf_machine_load_address (void)
116 extern const ElfW(Ehdr) __ehdr_start attribute_hidden;
117 return (ElfW(Addr)) &__ehdr_start;
120 static inline ElfW(Addr)
121 elf_machine_dynamic (void)
123 extern ElfW(Dyn) _DYNAMIC[] attribute_hidden;
124 return (ElfW(Addr)) _DYNAMIC - elf_machine_load_address ();
126 #endif /* __PCREL__ */
128 /* The PLT uses Elf64_Rela relocs. */
129 #define elf_machine_relplt elf_machine_rela
132 #ifdef HAVE_INLINED_SYSCALLS
133 /* We do not need _dl_starting_up. */
134 # define DL_STARTING_UP_DEF
135 #else
136 # define DL_STARTING_UP_DEF \
137 ".LC__dl_starting_up:\n" \
138 " .tc __GI__dl_starting_up[TC],__GI__dl_starting_up\n"
139 #endif
142 /* Initial entry point code for the dynamic linker. The C function
143 `_dl_start' is the real entry point; its return value is the user
144 program's entry point. */
145 #define RTLD_START \
146 asm (".pushsection \".text\"\n" \
147 " .align 2\n" \
148 " " ENTRY_2(_start) "\n" \
149 BODY_PREFIX "_start:\n" \
150 " " LOCALENTRY(_start) "\n" \
151 /* We start with the following on the stack, from top: \
152 argc (4 bytes); \
153 arguments for program (terminated by NULL); \
154 environment variables (terminated by NULL); \
155 arguments for the program loader. */ \
156 " mr 3,1\n" \
157 " li 4,0\n" \
158 " stdu 4,-128(1)\n" \
159 /* Call _dl_start with one parameter pointing at argc. */ \
160 " bl " DOT_PREFIX "_dl_start\n" \
161 " nop\n" \
162 /* Transfer control to _dl_start_user! */ \
163 " b " DOT_PREFIX "_dl_start_user\n" \
164 ".LT__start:\n" \
165 " .long 0\n" \
166 " .byte 0x00,0x0c,0x24,0x40,0x00,0x00,0x00,0x00\n" \
167 " .long .LT__start-" BODY_PREFIX "_start\n" \
168 " .short .LT__start_name_end-.LT__start_name_start\n" \
169 ".LT__start_name_start:\n" \
170 " .ascii \"_start\"\n" \
171 ".LT__start_name_end:\n" \
172 " .align 2\n" \
173 " " END_2(_start) "\n" \
174 " .pushsection \".toc\",\"aw\"\n" \
175 DL_STARTING_UP_DEF \
176 ".LC__rtld_local:\n" \
177 " .tc _rtld_local[TC],_rtld_local\n" \
178 ".LC__dl_argc:\n" \
179 " .tc _dl_argc[TC],_dl_argc\n" \
180 ".LC__dl_argv:\n" \
181 " .tc __GI__dl_argv[TC],__GI__dl_argv\n" \
182 ".LC__dl_fini:\n" \
183 " .tc _dl_fini[TC],_dl_fini\n" \
184 " .popsection\n" \
185 " " ENTRY_2(_dl_start_user) "\n" \
186 /* Now, we do our main work of calling initialisation procedures. \
187 The ELF ABI doesn't say anything about parameters for these, \
188 so we just pass argc, argv, and the environment. \
189 Changing these is strongly discouraged (not least because argc is \
190 passed by value!). */ \
191 BODY_PREFIX "_dl_start_user:\n" \
192 " " LOCALENTRY(_dl_start_user) "\n" \
193 /* the address of _start in r30. */ \
194 " mr 30,3\n" \
195 /* &_dl_argc in 29, &_dl_argv in 27, and _dl_loaded in 28. */ \
196 " addis 28,2,.LC__rtld_local@toc@ha\n" \
197 " ld 28,.LC__rtld_local@toc@l(28)\n" \
198 " addis 29,2,.LC__dl_argc@toc@ha\n" \
199 " ld 29,.LC__dl_argc@toc@l(29)\n" \
200 " addis 27,2,.LC__dl_argv@toc@ha\n" \
201 " ld 27,.LC__dl_argv@toc@l(27)\n" \
202 /* _dl_init (_dl_loaded, _dl_argc, _dl_argv, _dl_argv+_dl_argc+1). */ \
203 " ld 3,0(28)\n" \
204 " lwa 4,0(29)\n" \
205 " ld 5,0(27)\n" \
206 " sldi 6,4,3\n" \
207 " add 6,5,6\n" \
208 " addi 6,6,8\n" \
209 " bl " DOT_PREFIX "_dl_init\n" \
210 " nop\n" \
211 /* Now, to conform to the ELF ABI, we have to: \
212 Pass argc (actually _dl_argc) in r3; */ \
213 " lwa 3,0(29)\n" \
214 /* Pass argv (actually _dl_argv) in r4; */ \
215 " ld 4,0(27)\n" \
216 /* Pass argv+argc+1 in r5; */ \
217 " sldi 5,3,3\n" \
218 " add 6,4,5\n" \
219 " addi 5,6,8\n" \
220 /* Pass the auxiliary vector in r6. This is passed to us just after \
221 _envp. */ \
222 "2: ldu 0,8(6)\n" \
223 " cmpdi 0,0\n" \
224 " bne 2b\n" \
225 " addi 6,6,8\n" \
226 /* Pass a termination function pointer (in this case _dl_fini) in \
227 r7. */ \
228 " addis 7,2,.LC__dl_fini@toc@ha\n" \
229 " ld 7,.LC__dl_fini@toc@l(7)\n" \
230 /* Pass the stack pointer in r1 (so far so good), pointing to a NULL \
231 value. This lets our startup code distinguish between a program \
232 linked statically, which linux will call with argc on top of the \
233 stack which will hopefully never be zero, and a dynamically linked \
234 program which will always have a NULL on the top of the stack. \
235 Take the opportunity to clear LR, so anyone who accidentally \
236 returns from _start gets SEGV. Also clear the next few words of \
237 the stack. */ \
238 " li 31,0\n" \
239 " std 31,0(1)\n" \
240 " mtlr 31\n" \
241 " std 31,8(1)\n" \
242 " std 31,16(1)\n" \
243 " std 31,24(1)\n" \
244 /* Now, call the start function descriptor at r30... */ \
245 " .globl ._dl_main_dispatch\n" \
246 "._dl_main_dispatch:\n" \
247 " " PPC64_LOAD_FUNCPTR(30) "\n" \
248 " bctr\n" \
249 ".LT__dl_start_user:\n" \
250 " .long 0\n" \
251 " .byte 0x00,0x0c,0x24,0x40,0x00,0x00,0x00,0x00\n" \
252 " .long .LT__dl_start_user-" BODY_PREFIX "_dl_start_user\n" \
253 " .short .LT__dl_start_user_name_end-.LT__dl_start_user_name_start\n" \
254 ".LT__dl_start_user_name_start:\n" \
255 " .ascii \"_dl_start_user\"\n" \
256 ".LT__dl_start_user_name_end:\n" \
257 " .align 2\n" \
258 " " END_2(_dl_start_user) "\n" \
259 " .popsection");
261 /* ELF_RTYPE_CLASS_COPY iff TYPE should not be allowed to resolve to
262 one of the main executable's symbols, as for a COPY reloc.
264 To make function pointer comparisons work on most targets, the
265 relevant ABI states that the address of a non-local function in a
266 dynamically linked executable is the address of the PLT entry for
267 that function. This is quite reasonable since using the real
268 function address in a non-PIC executable would typically require
269 dynamic relocations in .text, something to be avoided. For such
270 functions, the linker emits a SHN_UNDEF symbol in the executable
271 with value equal to the PLT entry address. Normally, SHN_UNDEF
272 symbols have a value of zero, so this is a clue to ld.so that it
273 should treat these symbols specially. For relocations not in
274 ELF_RTYPE_CLASS_PLT (eg. those on function pointers), ld.so should
275 use the value of the executable SHN_UNDEF symbol, ie. the PLT entry
276 address. For relocations in ELF_RTYPE_CLASS_PLT (eg. the relocs in
277 the PLT itself), ld.so should use the value of the corresponding
278 defined symbol in the object that defines the function, ie. the
279 real function address. This complicates ld.so in that there are
280 now two possible values for a given symbol, and it gets even worse
281 because protected symbols need yet another set of rules.
283 On PowerPC64 we don't need any of this. The linker won't emit
284 SHN_UNDEF symbols with non-zero values. ld.so can make all
285 relocations behave "normally", ie. always use the real address
286 like PLT relocations. So always set ELF_RTYPE_CLASS_PLT. */
288 #if _CALL_ELF != 2
289 #define elf_machine_type_class(type) \
290 (ELF_RTYPE_CLASS_PLT | (((type) == R_PPC64_COPY) * ELF_RTYPE_CLASS_COPY))
291 #else
292 /* And now that you have read that large comment, you can disregard it
293 all for ELFv2. ELFv2 does need the special SHN_UNDEF treatment. */
294 #define IS_PPC64_TLS_RELOC(R) \
295 (((R) >= R_PPC64_TLS && (R) <= R_PPC64_DTPREL16_HIGHESTA) \
296 || ((R) >= R_PPC64_TPREL16_HIGH && (R) <= R_PPC64_DTPREL16_HIGHA))
298 #define elf_machine_type_class(type) \
299 ((((type) == R_PPC64_JMP_SLOT \
300 || (type) == R_PPC64_ADDR24 \
301 || IS_PPC64_TLS_RELOC (type)) * ELF_RTYPE_CLASS_PLT) \
302 | (((type) == R_PPC64_COPY) * ELF_RTYPE_CLASS_COPY))
303 #endif
305 /* A reloc type used for ld.so cmdline arg lookups to reject PLT entries. */
306 #define ELF_MACHINE_JMP_SLOT R_PPC64_JMP_SLOT
308 /* We define an initialization function to initialize HWCAP/HWCAP2 and
309 platform data so it can be copied into the TCB later. This is called
310 very early in _dl_sysdep_start for dynamically linked binaries. */
311 #if defined(SHARED) && IS_IN (rtld)
312 # define DL_PLATFORM_INIT dl_platform_init ()
314 static inline void __attribute__ ((unused))
315 dl_platform_init (void)
317 __tcb_parse_hwcap_and_convert_at_platform ();
319 #endif
321 /* Stuff for the PLT. */
322 #if _CALL_ELF != 2
323 #define PLT_INITIAL_ENTRY_WORDS 3
324 #define PLT_ENTRY_WORDS 3
325 #define GLINK_INITIAL_ENTRY_WORDS 8
326 /* The first 32k entries of glink can set an index and branch using two
327 instructions; past that point, glink uses three instructions. */
328 #define GLINK_ENTRY_WORDS(I) (((I) < 0x8000)? 2 : 3)
329 #else
330 #define PLT_INITIAL_ENTRY_WORDS 2
331 #define PLT_ENTRY_WORDS 1
332 #define GLINK_INITIAL_ENTRY_WORDS 8
333 #define GLINK_ENTRY_WORDS(I) 1
334 #endif
336 #define PPC_DCBST(where) asm volatile ("dcbst 0,%0" : : "r"(where) : "memory")
337 #define PPC_DCBT(where) asm volatile ("dcbt 0,%0" : : "r"(where) : "memory")
338 #define PPC_DCBF(where) asm volatile ("dcbf 0,%0" : : "r"(where) : "memory")
339 #define PPC_SYNC asm volatile ("sync" : : : "memory")
340 #define PPC_ISYNC asm volatile ("sync; isync" : : : "memory")
341 #define PPC_ICBI(where) asm volatile ("icbi 0,%0" : : "r"(where) : "memory")
342 #define PPC_DIE asm volatile ("tweq 0,0")
343 /* Use this when you've modified some code, but it won't be in the
344 instruction fetch queue (or when it doesn't matter if it is). */
345 #define MODIFIED_CODE_NOQUEUE(where) \
346 do { PPC_DCBST(where); PPC_SYNC; PPC_ICBI(where); } while (0)
347 /* Use this when it might be in the instruction queue. */
348 #define MODIFIED_CODE(where) \
349 do { PPC_DCBST(where); PPC_SYNC; PPC_ICBI(where); PPC_ISYNC; } while (0)
351 /* Set up the loaded object described by MAP so its unrelocated PLT
352 entries will jump to the on-demand fixup code in dl-runtime.c. */
353 static inline int __attribute__ ((always_inline))
354 elf_machine_runtime_setup (struct link_map *map, struct r_scope_elem *scope[],
355 int lazy, int profile)
357 if (map->l_info[DT_JMPREL])
359 Elf64_Word i;
360 Elf64_Word *glink = NULL;
361 Elf64_Xword *plt = (Elf64_Xword *) D_PTR (map, l_info[DT_PLTGOT]);
362 Elf64_Word num_plt_entries = (map->l_info[DT_PLTRELSZ]->d_un.d_val
363 / sizeof (Elf64_Rela));
364 Elf64_Addr l_addr = map->l_addr;
365 Elf64_Dyn **info = map->l_info;
366 char *p;
368 extern void _dl_runtime_resolve (void);
369 extern void _dl_profile_resolve (void);
371 /* Relocate the DT_PPC64_GLINK entry in the _DYNAMIC section.
372 elf_get_dynamic_info takes care of the standard entries but
373 doesn't know exactly what to do with processor specific
374 entries. */
375 if (info[DT_PPC64(GLINK)] != NULL)
376 info[DT_PPC64(GLINK)]->d_un.d_ptr += l_addr;
378 if (lazy)
380 Elf64_Word glink_offset;
381 Elf64_Word offset;
382 Elf64_Addr dlrr;
384 #ifdef SHARED
385 if (__glibc_unlikely (profile))
387 dlrr = (Elf64_Addr) _dl_profile_resolve;
388 if (profile && GLRO(dl_profile) != NULL
389 && _dl_name_match_p (GLRO(dl_profile), map))
390 /* This is the object we are looking for. Say that we really
391 want profiling and the timers are started. */
392 GL(dl_profile_map) = map;
394 else
395 #endif
396 dlrr = (Elf64_Addr) _dl_runtime_resolve;
398 #if _CALL_ELF != 2
399 /* We need to stuff the address/TOC of _dl_runtime_resolve
400 into doublewords 0 and 1 of plt_reserve. Then we need to
401 stuff the map address into doubleword 2 of plt_reserve.
402 This allows the GLINK0 code to transfer control to the
403 correct trampoline which will transfer control to fixup
404 in dl-machine.c. */
406 /* The plt_reserve area is the 1st 3 doublewords of the PLT. */
407 Elf64_FuncDesc *plt_reserve = (Elf64_FuncDesc *) plt;
408 Elf64_FuncDesc *resolve_fd = (Elf64_FuncDesc *) dlrr;
409 plt_reserve->fd_func = resolve_fd->fd_func;
410 plt_reserve->fd_toc = resolve_fd->fd_toc;
411 plt_reserve->fd_aux = (Elf64_Addr) map;
412 #ifdef RTLD_BOOTSTRAP
413 /* When we're bootstrapping, the opd entry will not have
414 been relocated yet. */
415 plt_reserve->fd_func += l_addr;
416 plt_reserve->fd_toc += l_addr;
417 #endif
419 #else
420 /* When we don't have function descriptors, the first doubleword
421 of the PLT holds the address of _dl_runtime_resolve, and the
422 second doubleword holds the map address. */
423 plt[0] = dlrr;
424 plt[1] = (Elf64_Addr) map;
425 #endif
427 /* Set up the lazy PLT entries. */
428 glink = (Elf64_Word *) D_PTR (map, l_info[DT_PPC64(GLINK)]);
429 offset = PLT_INITIAL_ENTRY_WORDS;
430 glink_offset = GLINK_INITIAL_ENTRY_WORDS;
431 for (i = 0; i < num_plt_entries; i++)
434 plt[offset] = (Elf64_Xword) &glink[glink_offset];
435 offset += PLT_ENTRY_WORDS;
436 glink_offset += GLINK_ENTRY_WORDS (i);
439 /* Now, we've modified data. We need to write the changes from
440 the data cache to a second-level unified cache, then make
441 sure that stale data in the instruction cache is removed.
442 (In a multiprocessor system, the effect is more complex.)
443 Most of the PLT shouldn't be in the instruction cache, but
444 there may be a little overlap at the start and the end.
446 Assumes that dcbst and icbi apply to lines of 16 bytes or
447 more. Current known line sizes are 16, 32, and 128 bytes. */
449 for (p = (char *) plt; p < (char *) &plt[offset]; p += 16)
450 PPC_DCBST (p);
451 PPC_SYNC;
454 return lazy;
457 #if _CALL_ELF == 2
458 extern void attribute_hidden _dl_error_localentry (struct link_map *map,
459 const Elf64_Sym *refsym);
461 /* If the PLT entry resolves to a function in the same object, return
462 the target function's local entry point offset if usable. */
463 static inline Elf64_Addr __attribute__ ((always_inline))
464 ppc64_local_entry_offset (struct link_map *map, lookup_t sym_map,
465 const ElfW(Sym) *refsym, const ElfW(Sym) *sym)
467 /* If the target function is in a different object, we cannot
468 use the local entry point. */
469 if (sym_map != map)
471 /* Check that optimized plt call stubs for localentry:0 functions
472 are not being satisfied by a non-zero localentry symbol. */
473 if (map->l_info[DT_PPC64(OPT)]
474 && (map->l_info[DT_PPC64(OPT)]->d_un.d_val & PPC64_OPT_LOCALENTRY) != 0
475 && refsym->st_info == ELFW(ST_INFO) (STB_GLOBAL, STT_FUNC)
476 && (STO_PPC64_LOCAL_MASK & refsym->st_other) == 0
477 && (STO_PPC64_LOCAL_MASK & sym->st_other) != 0)
478 _dl_error_localentry (map, refsym);
480 return 0;
483 /* If the linker inserted multiple TOCs, we cannot use the
484 local entry point. */
485 if (map->l_info[DT_PPC64(OPT)]
486 && (map->l_info[DT_PPC64(OPT)]->d_un.d_val & PPC64_OPT_MULTI_TOC))
487 return 0;
489 /* If the target function is an ifunc then the local entry offset is
490 for the resolver, not the final destination. */
491 if (__builtin_expect (ELFW(ST_TYPE) (sym->st_info) == STT_GNU_IFUNC, 0))
492 return 0;
494 /* Otherwise, we can use the local entry point. Retrieve its offset
495 from the symbol's ELF st_other field. */
496 return PPC64_LOCAL_ENTRY_OFFSET (sym->st_other);
498 #endif
500 /* Change the PLT entry whose reloc is 'reloc' to call the actual
501 routine. */
502 static inline Elf64_Addr __attribute__ ((always_inline))
503 elf_machine_fixup_plt (struct link_map *map, lookup_t sym_map,
504 const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
505 const Elf64_Rela *reloc,
506 Elf64_Addr *reloc_addr, Elf64_Addr finaladdr)
508 #if _CALL_ELF != 2
509 Elf64_FuncDesc *plt = (Elf64_FuncDesc *) reloc_addr;
510 Elf64_FuncDesc *rel = (Elf64_FuncDesc *) finaladdr;
511 Elf64_Addr offset = 0;
512 Elf64_FuncDesc zero_fd = {0, 0, 0};
514 PPC_DCBT (&plt->fd_aux);
515 PPC_DCBT (&plt->fd_func);
517 /* If sym_map is NULL, it's a weak undefined sym; Set the plt to
518 zero. finaladdr should be zero already in this case, but guard
519 against invalid plt relocations with non-zero addends. */
520 if (sym_map == NULL)
521 finaladdr = 0;
523 /* Don't die here if finaladdr is zero, die if this plt entry is
524 actually called. Makes a difference when LD_BIND_NOW=1.
525 finaladdr may be zero for a weak undefined symbol, or when an
526 ifunc resolver returns zero. */
527 if (finaladdr == 0)
528 rel = &zero_fd;
529 else
531 PPC_DCBT (&rel->fd_aux);
532 PPC_DCBT (&rel->fd_func);
535 /* If the opd entry is not yet relocated (because it's from a shared
536 object that hasn't been processed yet), then manually reloc it. */
537 if (finaladdr != 0 && map != sym_map && !sym_map->l_relocated
538 #if !defined RTLD_BOOTSTRAP && defined SHARED
539 /* Bootstrap map doesn't have l_relocated set for it. */
540 && sym_map != &GL(dl_rtld_map)
541 #endif
543 offset = sym_map->l_addr;
545 /* For PPC64, fixup_plt copies the function descriptor from opd
546 over the corresponding PLT entry.
547 Initially, PLT Entry[i] is set up for lazy linking, or is zero.
548 For lazy linking, the fd_toc and fd_aux entries are irrelevant,
549 so for thread safety we write them before changing fd_func. */
551 plt->fd_aux = rel->fd_aux + offset;
552 plt->fd_toc = rel->fd_toc + offset;
553 PPC_DCBF (&plt->fd_toc);
554 PPC_ISYNC;
556 plt->fd_func = rel->fd_func + offset;
557 PPC_DCBST (&plt->fd_func);
558 PPC_ISYNC;
559 #else
560 finaladdr += ppc64_local_entry_offset (map, sym_map, refsym, sym);
561 *reloc_addr = finaladdr;
562 #endif
564 return finaladdr;
567 /* Return the final value of a plt relocation. */
568 static inline Elf64_Addr
569 elf_machine_plt_value (struct link_map *map, const Elf64_Rela *reloc,
570 Elf64_Addr value)
572 return value + reloc->r_addend;
576 /* Names of the architecture-specific auditing callback functions. */
577 #if _CALL_ELF != 2
578 #define ARCH_LA_PLTENTER ppc64_gnu_pltenter
579 #define ARCH_LA_PLTEXIT ppc64_gnu_pltexit
580 #else
581 #define ARCH_LA_PLTENTER ppc64v2_gnu_pltenter
582 #define ARCH_LA_PLTEXIT ppc64v2_gnu_pltexit
583 #endif
585 #if ENABLE_STATIC_PIE && !defined SHARED && !IS_IN (rtld)
586 #include <libc-diag.h>
587 #include <tcb-offsets.h>
589 /* Set up r13 for _dl_relocate_static_pie so that libgcc ifuncs that
590 normally access the tcb copy of hwcap will see __tcb.hwcap. */
592 static inline void __attribute__ ((always_inline))
593 ppc_init_fake_thread_pointer (void)
595 DIAG_PUSH_NEEDS_COMMENT;
596 /* We are playing pointer tricks. Silence gcc warning. */
597 DIAG_IGNORE_NEEDS_COMMENT (4.9, "-Warray-bounds");
598 __thread_register = (char *) &__tcb.hwcap - TCB_HWCAP;
599 DIAG_POP_NEEDS_COMMENT;
602 #define ELF_MACHINE_BEFORE_RTLD_RELOC(map, dynamic_info) \
603 ppc_init_fake_thread_pointer ();
604 #endif /* ENABLE_STATIC_PIE && !defined SHARED && !IS_IN (rtld) */
606 #endif /* dl_machine_h */
608 #ifdef RESOLVE_MAP
610 #define PPC_LO(v) ((v) & 0xffff)
611 #define PPC_HI(v) (((v) >> 16) & 0xffff)
612 #define PPC_HA(v) PPC_HI ((v) + 0x8000)
613 #define PPC_HIGHER(v) (((v) >> 32) & 0xffff)
614 #define PPC_HIGHERA(v) PPC_HIGHER ((v) + 0x8000)
615 #define PPC_HIGHEST(v) (((v) >> 48) & 0xffff)
616 #define PPC_HIGHESTA(v) PPC_HIGHEST ((v) + 0x8000)
617 #define BIT_INSERT(var, val, mask) \
618 ((var) = ((var) & ~(Elf64_Addr) (mask)) | ((val) & (mask)))
620 #define dont_expect(X) __builtin_expect ((X), 0)
622 extern void attribute_hidden _dl_reloc_overflow (struct link_map *map,
623 const char *name,
624 Elf64_Addr *const reloc_addr,
625 const Elf64_Sym *refsym);
627 static inline void __attribute__ ((always_inline))
628 elf_machine_rela_relative (Elf64_Addr l_addr, const Elf64_Rela *reloc,
629 void *const reloc_addr_arg)
631 Elf64_Addr *const reloc_addr = reloc_addr_arg;
632 *reloc_addr = l_addr + reloc->r_addend;
635 /* This computes the value used by TPREL* relocs. */
636 static inline Elf64_Addr __attribute__ ((always_inline, const))
637 elf_machine_tprel (struct link_map *map,
638 struct link_map *sym_map,
639 const Elf64_Sym *sym,
640 const Elf64_Rela *reloc)
642 #ifndef RTLD_BOOTSTRAP
643 if (sym_map)
645 CHECK_STATIC_TLS (map, sym_map);
646 #endif
647 return TLS_TPREL_VALUE (sym_map, sym, reloc);
648 #ifndef RTLD_BOOTSTRAP
650 #endif
651 return 0;
654 /* Call function at address VALUE (an OPD entry) to resolve ifunc relocs. */
655 static inline Elf64_Addr __attribute__ ((always_inline))
656 resolve_ifunc (Elf64_Addr value,
657 const struct link_map *map, const struct link_map *sym_map)
659 #if _CALL_ELF != 2
660 /* The function we are calling may not yet have its opd entry relocated. */
661 Elf64_FuncDesc opd;
662 if (map != sym_map
663 # if !defined RTLD_BOOTSTRAP && defined SHARED
664 /* Bootstrap map doesn't have l_relocated set for it. */
665 && sym_map != &GL(dl_rtld_map)
666 # endif
667 && !sym_map->l_relocated)
669 Elf64_FuncDesc *func = (Elf64_FuncDesc *) value;
670 opd.fd_func = func->fd_func + sym_map->l_addr;
671 opd.fd_toc = func->fd_toc + sym_map->l_addr;
672 opd.fd_aux = func->fd_aux;
673 /* GCC 4.9+ eliminates the branch as dead code, force the odp set
674 dependency. */
675 asm ("" : "=r" (value) : "0" (&opd), "X" (opd));
677 #endif
678 return ((Elf64_Addr (*) (unsigned long int)) value) (GLRO(dl_hwcap));
681 /* Perform the relocation specified by RELOC and SYM (which is fully
682 resolved). MAP is the object containing the reloc. */
683 static inline void __attribute__ ((always_inline))
684 elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
685 const Elf64_Rela *reloc,
686 const Elf64_Sym *sym,
687 const struct r_found_version *version,
688 void *const reloc_addr_arg,
689 int skip_ifunc)
691 Elf64_Addr *const reloc_addr = reloc_addr_arg;
692 const int r_type = ELF64_R_TYPE (reloc->r_info);
693 const Elf64_Sym *const refsym = sym;
694 union unaligned
696 uint16_t u2;
697 uint32_t u4;
698 uint64_t u8;
699 } __attribute__ ((__packed__));
701 if (r_type == R_PPC64_RELATIVE)
703 *reloc_addr = map->l_addr + reloc->r_addend;
704 return;
707 if (__glibc_unlikely (r_type == R_PPC64_NONE))
708 return;
710 /* We need SYM_MAP even in the absence of TLS, for elf_machine_fixup_plt
711 and STT_GNU_IFUNC. */
712 struct link_map *sym_map = RESOLVE_MAP (map, scope, &sym, version, r_type);
713 Elf64_Addr value = SYMBOL_ADDRESS (sym_map, sym, true) + reloc->r_addend;
715 if (sym != NULL
716 && __builtin_expect (ELFW(ST_TYPE) (sym->st_info) == STT_GNU_IFUNC, 0)
717 && __builtin_expect (sym->st_shndx != SHN_UNDEF, 1)
718 && __builtin_expect (!skip_ifunc, 1))
719 value = resolve_ifunc (value, map, sym_map);
721 /* For relocs that don't edit code, return.
722 For relocs that might edit instructions, break from the switch. */
723 switch (r_type)
725 case R_PPC64_ADDR64:
726 case R_PPC64_GLOB_DAT:
727 *reloc_addr = value;
728 return;
730 case R_PPC64_IRELATIVE:
731 if (__glibc_likely (!skip_ifunc))
732 value = resolve_ifunc (value, map, sym_map);
733 *reloc_addr = value;
734 return;
736 case R_PPC64_JMP_IREL:
737 if (__glibc_likely (!skip_ifunc))
738 value = resolve_ifunc (value, map, sym_map);
739 /* Fall thru */
740 case R_PPC64_JMP_SLOT:
741 elf_machine_fixup_plt (map, sym_map, refsym, sym,
742 reloc, reloc_addr, value);
743 return;
745 case R_PPC64_DTPMOD64:
746 if (map->l_info[DT_PPC64(OPT)]
747 && (map->l_info[DT_PPC64(OPT)]->d_un.d_val & PPC64_OPT_TLS))
749 #ifdef RTLD_BOOTSTRAP
750 reloc_addr[0] = 0;
751 reloc_addr[1] = (sym_map->l_tls_offset - TLS_TP_OFFSET
752 + TLS_DTV_OFFSET);
753 return;
754 #else
755 if (sym_map != NULL)
757 # ifndef SHARED
758 CHECK_STATIC_TLS (map, sym_map);
759 # else
760 if (TRY_STATIC_TLS (map, sym_map))
761 # endif
763 reloc_addr[0] = 0;
764 /* Set up for local dynamic. */
765 reloc_addr[1] = (sym_map->l_tls_offset - TLS_TP_OFFSET
766 + TLS_DTV_OFFSET);
767 return;
770 #endif
772 #ifdef RTLD_BOOTSTRAP
773 /* During startup the dynamic linker is always index 1. */
774 *reloc_addr = 1;
775 #else
776 /* Get the information from the link map returned by the
777 resolve function. */
778 if (sym_map != NULL)
779 *reloc_addr = sym_map->l_tls_modid;
780 #endif
781 return;
783 case R_PPC64_DTPREL64:
784 if (map->l_info[DT_PPC64(OPT)]
785 && (map->l_info[DT_PPC64(OPT)]->d_un.d_val & PPC64_OPT_TLS))
787 #ifdef RTLD_BOOTSTRAP
788 *reloc_addr = TLS_TPREL_VALUE (sym_map, sym, reloc);
789 return;
790 #else
791 if (sym_map != NULL)
793 /* This reloc is always preceded by R_PPC64_DTPMOD64. */
794 # ifndef SHARED
795 assert (HAVE_STATIC_TLS (map, sym_map));
796 # else
797 if (HAVE_STATIC_TLS (map, sym_map))
798 # endif
800 *reloc_addr = TLS_TPREL_VALUE (sym_map, sym, reloc);
801 return;
804 #endif
806 /* During relocation all TLS symbols are defined and used.
807 Therefore the offset is already correct. */
808 #ifndef RTLD_BOOTSTRAP
809 if (sym_map != NULL)
810 *reloc_addr = TLS_DTPREL_VALUE (sym, reloc);
811 #endif
812 return;
814 case R_PPC64_TPREL64:
815 *reloc_addr = elf_machine_tprel (map, sym_map, sym, reloc);
816 return;
818 case R_PPC64_TPREL16_LO_DS:
819 value = elf_machine_tprel (map, sym_map, sym, reloc);
820 if (dont_expect ((value & 3) != 0))
821 _dl_reloc_overflow (map, "R_PPC64_TPREL16_LO_DS", reloc_addr, refsym);
822 BIT_INSERT (*(Elf64_Half *) reloc_addr, value, 0xfffc);
823 break;
825 case R_PPC64_TPREL16_DS:
826 value = elf_machine_tprel (map, sym_map, sym, reloc);
827 if (dont_expect ((value + 0x8000) >= 0x10000 || (value & 3) != 0))
828 _dl_reloc_overflow (map, "R_PPC64_TPREL16_DS", reloc_addr, refsym);
829 BIT_INSERT (*(Elf64_Half *) reloc_addr, value, 0xfffc);
830 break;
832 case R_PPC64_TPREL16:
833 value = elf_machine_tprel (map, sym_map, sym, reloc);
834 if (dont_expect ((value + 0x8000) >= 0x10000))
835 _dl_reloc_overflow (map, "R_PPC64_TPREL16", reloc_addr, refsym);
836 *(Elf64_Half *) reloc_addr = PPC_LO (value);
837 break;
839 case R_PPC64_TPREL16_LO:
840 value = elf_machine_tprel (map, sym_map, sym, reloc);
841 *(Elf64_Half *) reloc_addr = PPC_LO (value);
842 break;
844 case R_PPC64_TPREL16_HI:
845 value = elf_machine_tprel (map, sym_map, sym, reloc);
846 if (dont_expect (value + 0x80000000 >= 0x100000000LL))
847 _dl_reloc_overflow (map, "R_PPC64_TPREL16_HI", reloc_addr, refsym);
848 *(Elf64_Half *) reloc_addr = PPC_HI (value);
849 break;
851 case R_PPC64_TPREL16_HIGH:
852 value = elf_machine_tprel (map, sym_map, sym, reloc);
853 *(Elf64_Half *) reloc_addr = PPC_HI (value);
854 break;
856 case R_PPC64_TPREL16_HA:
857 value = elf_machine_tprel (map, sym_map, sym, reloc);
858 if (dont_expect (value + 0x80008000 >= 0x100000000LL))
859 _dl_reloc_overflow (map, "R_PPC64_TPREL16_HA", reloc_addr, refsym);
860 *(Elf64_Half *) reloc_addr = PPC_HA (value);
861 break;
863 case R_PPC64_TPREL16_HIGHA:
864 value = elf_machine_tprel (map, sym_map, sym, reloc);
865 *(Elf64_Half *) reloc_addr = PPC_HA (value);
866 break;
868 case R_PPC64_TPREL16_HIGHER:
869 value = elf_machine_tprel (map, sym_map, sym, reloc);
870 *(Elf64_Half *) reloc_addr = PPC_HIGHER (value);
871 break;
873 case R_PPC64_TPREL16_HIGHEST:
874 value = elf_machine_tprel (map, sym_map, sym, reloc);
875 *(Elf64_Half *) reloc_addr = PPC_HIGHEST (value);
876 break;
878 case R_PPC64_TPREL16_HIGHERA:
879 value = elf_machine_tprel (map, sym_map, sym, reloc);
880 *(Elf64_Half *) reloc_addr = PPC_HIGHERA (value);
881 break;
883 case R_PPC64_TPREL16_HIGHESTA:
884 value = elf_machine_tprel (map, sym_map, sym, reloc);
885 *(Elf64_Half *) reloc_addr = PPC_HIGHESTA (value);
886 break;
888 #ifndef RTLD_BOOTSTRAP /* None of the following appear in ld.so */
889 case R_PPC64_ADDR16_LO_DS:
890 if (dont_expect ((value & 3) != 0))
891 _dl_reloc_overflow (map, "R_PPC64_ADDR16_LO_DS", reloc_addr, refsym);
892 BIT_INSERT (*(Elf64_Half *) reloc_addr, value, 0xfffc);
893 break;
895 case R_PPC64_ADDR16_LO:
896 *(Elf64_Half *) reloc_addr = PPC_LO (value);
897 break;
899 case R_PPC64_ADDR16_HI:
900 if (dont_expect (value + 0x80000000 >= 0x100000000LL))
901 _dl_reloc_overflow (map, "R_PPC64_ADDR16_HI", reloc_addr, refsym);
902 /* Fall through. */
903 case R_PPC64_ADDR16_HIGH:
904 *(Elf64_Half *) reloc_addr = PPC_HI (value);
905 break;
907 case R_PPC64_ADDR16_HA:
908 if (dont_expect (value + 0x80008000 >= 0x100000000LL))
909 _dl_reloc_overflow (map, "R_PPC64_ADDR16_HA", reloc_addr, refsym);
910 /* Fall through. */
911 case R_PPC64_ADDR16_HIGHA:
912 *(Elf64_Half *) reloc_addr = PPC_HA (value);
913 break;
915 case R_PPC64_ADDR30:
917 Elf64_Addr delta = value - (Elf64_Xword) reloc_addr;
918 if (dont_expect ((delta + 0x80000000) >= 0x100000000LL
919 || (delta & 3) != 0))
920 _dl_reloc_overflow (map, "R_PPC64_ADDR30", reloc_addr, refsym);
921 BIT_INSERT (*(Elf64_Word *) reloc_addr, delta, 0xfffffffc);
923 break;
925 case R_PPC64_COPY:
926 if (dont_expect (sym == NULL))
927 /* This can happen in trace mode when an object could not be found. */
928 return;
929 if (dont_expect (sym->st_size > refsym->st_size
930 || (GLRO(dl_verbose)
931 && sym->st_size < refsym->st_size)))
933 const char *strtab;
935 strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]);
936 _dl_error_printf ("%s: Symbol `%s' has different size" \
937 " in shared object," \
938 " consider re-linking\n",
939 RTLD_PROGNAME, strtab + refsym->st_name);
941 memcpy (reloc_addr_arg, (char *) value,
942 MIN (sym->st_size, refsym->st_size));
943 return;
945 case R_PPC64_UADDR64:
946 ((union unaligned *) reloc_addr)->u8 = value;
947 return;
949 case R_PPC64_UADDR32:
950 ((union unaligned *) reloc_addr)->u4 = value;
951 return;
953 case R_PPC64_ADDR32:
954 if (dont_expect ((value + 0x80000000) >= 0x100000000LL))
955 _dl_reloc_overflow (map, "R_PPC64_ADDR32", reloc_addr, refsym);
956 *(Elf64_Word *) reloc_addr = value;
957 return;
959 case R_PPC64_ADDR24:
960 if (dont_expect ((value + 0x2000000) >= 0x4000000 || (value & 3) != 0))
961 _dl_reloc_overflow (map, "R_PPC64_ADDR24", reloc_addr, refsym);
962 BIT_INSERT (*(Elf64_Word *) reloc_addr, value, 0x3fffffc);
963 break;
965 case R_PPC64_ADDR16:
966 if (dont_expect ((value + 0x8000) >= 0x10000))
967 _dl_reloc_overflow (map, "R_PPC64_ADDR16", reloc_addr, refsym);
968 *(Elf64_Half *) reloc_addr = value;
969 break;
971 case R_PPC64_UADDR16:
972 if (dont_expect ((value + 0x8000) >= 0x10000))
973 _dl_reloc_overflow (map, "R_PPC64_UADDR16", reloc_addr, refsym);
974 ((union unaligned *) reloc_addr)->u2 = value;
975 return;
977 case R_PPC64_ADDR16_DS:
978 if (dont_expect ((value + 0x8000) >= 0x10000 || (value & 3) != 0))
979 _dl_reloc_overflow (map, "R_PPC64_ADDR16_DS", reloc_addr, refsym);
980 BIT_INSERT (*(Elf64_Half *) reloc_addr, value, 0xfffc);
981 break;
983 case R_PPC64_ADDR16_HIGHER:
984 *(Elf64_Half *) reloc_addr = PPC_HIGHER (value);
985 break;
987 case R_PPC64_ADDR16_HIGHEST:
988 *(Elf64_Half *) reloc_addr = PPC_HIGHEST (value);
989 break;
991 case R_PPC64_ADDR16_HIGHERA:
992 *(Elf64_Half *) reloc_addr = PPC_HIGHERA (value);
993 break;
995 case R_PPC64_ADDR16_HIGHESTA:
996 *(Elf64_Half *) reloc_addr = PPC_HIGHESTA (value);
997 break;
999 case R_PPC64_ADDR14:
1000 case R_PPC64_ADDR14_BRTAKEN:
1001 case R_PPC64_ADDR14_BRNTAKEN:
1003 if (dont_expect ((value + 0x8000) >= 0x10000 || (value & 3) != 0))
1004 _dl_reloc_overflow (map, "R_PPC64_ADDR14", reloc_addr, refsym);
1005 Elf64_Word insn = *(Elf64_Word *) reloc_addr;
1006 BIT_INSERT (insn, value, 0xfffc);
1007 if (r_type != R_PPC64_ADDR14)
1009 insn &= ~(1 << 21);
1010 if (r_type == R_PPC64_ADDR14_BRTAKEN)
1011 insn |= 1 << 21;
1012 if ((insn & (0x14 << 21)) == (0x04 << 21))
1013 insn |= 0x02 << 21;
1014 else if ((insn & (0x14 << 21)) == (0x10 << 21))
1015 insn |= 0x08 << 21;
1017 *(Elf64_Word *) reloc_addr = insn;
1019 break;
1021 case R_PPC64_REL32:
1022 *(Elf64_Word *) reloc_addr = value - (Elf64_Addr) reloc_addr;
1023 return;
1025 case R_PPC64_REL64:
1026 *reloc_addr = value - (Elf64_Addr) reloc_addr;
1027 return;
1028 #endif /* !RTLD_BOOTSTRAP */
1030 default:
1031 _dl_reloc_bad_type (map, r_type, 0);
1032 return;
1034 MODIFIED_CODE_NOQUEUE (reloc_addr);
1037 static inline void __attribute__ ((always_inline))
1038 elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
1039 Elf64_Addr l_addr, const Elf64_Rela *reloc,
1040 int skip_ifunc)
1042 /* elf_machine_runtime_setup handles this. */
1046 #endif /* RESOLVE */