Update copyright dates with scripts/update-copyrights.
[glibc.git] / sysdeps / sparc / sparc32 / dl-machine.h
blobf722ecbf1f8d64ac5ba5cd48e340c73c21abe4f5
1 /* Machine-dependent ELF dynamic relocation inline functions. SPARC version.
2 Copyright (C) 1996-2015 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #ifndef dl_machine_h
20 #define dl_machine_h
22 #define ELF_MACHINE_NAME "sparc"
24 #include <string.h>
25 #include <sys/param.h>
26 #include <ldsodefs.h>
27 #include <sysdep.h>
28 #include <tls.h>
29 #include <dl-plt.h>
31 /* Return nonzero iff ELF header is compatible with the running host. */
32 static inline int
33 elf_machine_matches_host (const Elf32_Ehdr *ehdr)
35 if (ehdr->e_machine == EM_SPARC)
36 return 1;
37 else if (ehdr->e_machine == EM_SPARC32PLUS)
39 /* XXX The following is wrong! Dave Miller rejected to implement it
40 correctly. If this causes problems shoot *him*! */
41 #ifdef SHARED
42 return GLRO(dl_hwcap) & GLRO(dl_hwcap_mask) & HWCAP_SPARC_V9;
43 #else
44 return GLRO(dl_hwcap) & HWCAP_SPARC_V9;
45 #endif
47 else
48 return 0;
51 /* We have to do this because elf_machine_{dynamic,load_address} can be
52 invoked from functions that have no GOT references, and thus the compiler
53 has no obligation to load the PIC register. */
54 #define LOAD_PIC_REG(PIC_REG) \
55 do { register Elf32_Addr pc __asm("o7"); \
56 __asm("sethi %%hi(_GLOBAL_OFFSET_TABLE_-4), %1\n\t" \
57 "call 1f\n\t" \
58 "add %1, %%lo(_GLOBAL_OFFSET_TABLE_+4), %1\n" \
59 "1:\tadd %1, %0, %1" \
60 : "=r" (pc), "=r" (PIC_REG)); \
61 } while (0)
63 /* Return the link-time address of _DYNAMIC. Conveniently, this is the
64 first element of the GOT. This must be inlined in a function which
65 uses global data. */
66 static inline Elf32_Addr
67 elf_machine_dynamic (void)
69 register Elf32_Addr *got asm ("%l7");
71 LOAD_PIC_REG (got);
73 return *got;
76 /* Return the run-time load address of the shared object. */
77 static inline Elf32_Addr
78 elf_machine_load_address (void)
80 register Elf32_Addr *pc __asm ("%o7"), *got __asm ("%l7");
82 __asm ("sethi %%hi(_GLOBAL_OFFSET_TABLE_-4), %1\n\t"
83 "call 1f\n\t"
84 " add %1, %%lo(_GLOBAL_OFFSET_TABLE_+4), %1\n\t"
85 "call _DYNAMIC\n\t"
86 "call _GLOBAL_OFFSET_TABLE_\n"
87 "1:\tadd %1, %0, %1\n\t" : "=r" (pc), "=r" (got));
89 /* got is now l_addr + _GLOBAL_OFFSET_TABLE_
90 *got is _DYNAMIC
91 pc[2]*4 is l_addr + _DYNAMIC - (long)pc - 8
92 pc[3]*4 is l_addr + _GLOBAL_OFFSET_TABLE_ - (long)pc - 12 */
93 return (Elf32_Addr) got - *got + (pc[2] - pc[3]) * 4 - 4;
96 /* Set up the loaded object described by L so its unrelocated PLT
97 entries will jump to the on-demand fixup code in dl-runtime.c. */
99 static inline int
100 elf_machine_runtime_setup (struct link_map *l, int lazy, int profile)
102 Elf32_Addr *plt;
103 extern void _dl_runtime_resolve (Elf32_Word);
104 extern void _dl_runtime_profile (Elf32_Word);
106 if (l->l_info[DT_JMPREL] && lazy)
108 Elf32_Addr rfunc;
110 /* The entries for functions in the PLT have not yet been filled in.
111 Their initial contents will arrange when called to set the high 22
112 bits of %g1 with an offset into the .rela.plt section and jump to
113 the beginning of the PLT. */
114 plt = (Elf32_Addr *) D_PTR (l, l_info[DT_PLTGOT]);
115 if (__builtin_expect(profile, 0))
117 rfunc = (Elf32_Addr) &_dl_runtime_profile;
119 if (GLRO(dl_profile) != NULL
120 && _dl_name_match_p (GLRO(dl_profile), l))
121 GL(dl_profile_map) = l;
123 else
125 rfunc = (Elf32_Addr) &_dl_runtime_resolve;
128 /* The beginning of the PLT does:
130 sethi %hi(_dl_runtime_{resolve,profile}), %g2
131 pltpc: jmpl %g2 + %lo(_dl_runtime_{resolve,profile}), %g2
133 .word MAP
135 The PC value (pltpc) saved in %g2 by the jmpl points near the
136 location where we store the link_map pointer for this object. */
138 plt[0] = 0x05000000 | ((rfunc >> 10) & 0x003fffff);
139 plt[1] = 0x85c0a000 | (rfunc & 0x3ff);
140 plt[2] = OPCODE_NOP; /* Fill call delay slot. */
141 plt[3] = (Elf32_Addr) l;
142 if (__builtin_expect (l->l_info[VALIDX(DT_GNU_PRELINKED)] != NULL, 0)
143 || __builtin_expect (l->l_info [VALIDX (DT_GNU_LIBLISTSZ)] != NULL, 0))
145 /* Need to reinitialize .plt to undo prelinking. */
146 Elf32_Rela *rela = (Elf32_Rela *) D_PTR (l, l_info[DT_JMPREL]);
147 Elf32_Rela *relaend
148 = (Elf32_Rela *) ((char *) rela
149 + l->l_info[DT_PLTRELSZ]->d_un.d_val);
150 #if !defined RTLD_BOOTSTRAP && !defined __sparc_v9__
151 /* Note that we don't mask the hwcap here, as the flush is
152 essential to functionality on those cpu's that implement it.
153 For sparcv9 we can assume flush is present. */
154 const int do_flush = GLRO(dl_hwcap) & HWCAP_SPARC_FLUSH;
155 #else
156 const int do_flush = 1;
157 #endif
159 /* prelink must ensure there are no R_SPARC_NONE relocs left
160 in .rela.plt. */
161 while (rela < relaend)
163 *(unsigned int *) (rela->r_offset + l->l_addr)
164 = OPCODE_SETHI_G1 | (rela->r_offset + l->l_addr
165 - (Elf32_Addr) plt);
166 *(unsigned int *) (rela->r_offset + l->l_addr + 4)
167 = OPCODE_BA | ((((Elf32_Addr) plt
168 - rela->r_offset - l->l_addr - 4) >> 2)
169 & 0x3fffff);
170 if (do_flush)
172 __asm __volatile ("flush %0" : : "r" (rela->r_offset
173 + l->l_addr));
174 __asm __volatile ("flush %0+4" : : "r" (rela->r_offset
175 + l->l_addr));
177 ++rela;
182 return lazy;
185 /* ELF_RTYPE_CLASS_PLT iff TYPE describes relocation of a PLT entry, so
186 PLT entries should not be allowed to define the value.
187 ELF_RTYPE_CLASS_NOCOPY iff TYPE should not be allowed to resolve to one
188 of the main executable's symbols, as for a COPY reloc. */
189 #define elf_machine_type_class(type) \
190 ((((type) == R_SPARC_JMP_SLOT \
191 || ((type) >= R_SPARC_TLS_GD_HI22 && (type) <= R_SPARC_TLS_TPOFF64)) \
192 * ELF_RTYPE_CLASS_PLT) \
193 | (((type) == R_SPARC_COPY) * ELF_RTYPE_CLASS_COPY))
195 /* A reloc type used for ld.so cmdline arg lookups to reject PLT entries. */
196 #define ELF_MACHINE_JMP_SLOT R_SPARC_JMP_SLOT
198 /* The SPARC never uses Elf32_Rel relocations. */
199 #define ELF_MACHINE_NO_REL 1
200 #define ELF_MACHINE_NO_RELA 0
202 /* Undo the sub %sp, 6*4, %sp; add %sp, 22*4, %o0 below to get at the
203 value we want in __libc_stack_end. */
204 #define DL_STACK_END(cookie) \
205 ((void *) (((long) (cookie)) - (22 - 6) * 4))
207 /* Initial entry point code for the dynamic linker.
208 The C function `_dl_start' is the real entry point;
209 its return value is the user program's entry point. */
211 #define RTLD_GOT_ADDRESS(pic_reg, reg, symbol) \
212 "sethi %gdop_hix22(" #symbol "), " #reg "\n\t" \
213 "xor " #reg ", %gdop_lox10(" #symbol "), " #reg "\n\t" \
214 "ld [" #pic_reg " + " #reg "], " #reg ", %gdop(" #symbol ")"
216 #define RTLD_START __asm__ ("\
217 .text\n\
218 .globl _start\n\
219 .type _start, @function\n\
220 .align 32\n\
221 _start:\n\
222 /* Allocate space for functions to drop their arguments. */\n\
223 sub %sp, 6*4, %sp\n\
224 /* Pass pointer to argument block to _dl_start. */\n\
225 call _dl_start\n\
226 add %sp, 22*4, %o0\n\
227 /* FALTHRU */\n\
228 .globl _dl_start_user\n\
229 .type _dl_start_user, @function\n\
230 _dl_start_user:\n\
231 /* Load the PIC register. */\n\
232 1: call 2f\n\
233 sethi %hi(_GLOBAL_OFFSET_TABLE_-(1b-.)), %l7\n\
234 2: or %l7, %lo(_GLOBAL_OFFSET_TABLE_-(1b-.)), %l7\n\
235 add %l7, %o7, %l7\n\
236 /* Save the user entry point address in %l0 */\n\
237 mov %o0, %l0\n\
238 /* See if we were run as a command with the executable file name as an\n\
239 extra leading argument. If so, adjust the contents of the stack. */\n\
240 " RTLD_GOT_ADDRESS(%l7, %g2, _dl_skip_args) "\n\
241 ld [%g2], %i0\n\
242 tst %i0\n\
243 beq 3f\n\
244 ld [%sp+22*4], %i5 /* load argc */\n\
245 /* Find out how far to shift. */\n\
246 " RTLD_GOT_ADDRESS(%l7, %l3, _dl_argv) "\n\
247 sub %i5, %i0, %i5\n\
248 ld [%l3], %l4\n\
249 sll %i0, 2, %i2\n\
250 st %i5, [%sp+22*4]\n\
251 sub %l4, %i2, %l4\n\
252 add %sp, 23*4, %i1\n\
253 add %i1, %i2, %i2\n\
254 st %l4, [%l3]\n\
255 /* Copy down argv */\n\
256 21: ld [%i2], %i3\n\
257 add %i2, 4, %i2\n\
258 tst %i3\n\
259 st %i3, [%i1]\n\
260 bne 21b\n\
261 add %i1, 4, %i1\n\
262 /* Copy down env */\n\
263 22: ld [%i2], %i3\n\
264 add %i2, 4, %i2\n\
265 tst %i3\n\
266 st %i3, [%i1]\n\
267 bne 22b\n\
268 add %i1, 4, %i1\n\
269 /* Copy down auxiliary table. */\n\
270 23: ld [%i2], %i3\n\
271 ld [%i2+4], %i4\n\
272 add %i2, 8, %i2\n\
273 tst %i3\n\
274 st %i3, [%i1]\n\
275 st %i4, [%i1+4]\n\
276 bne 23b\n\
277 add %i1, 8, %i1\n\
278 /* %o0 = _dl_loaded, %o1 = argc, %o2 = argv, %o3 = envp. */\n\
279 3: " RTLD_GOT_ADDRESS(%l7, %o0, _rtld_local) "\n\
280 add %sp, 23*4, %o2\n\
281 sll %i5, 2, %o3\n\
282 add %o3, 4, %o3\n\
283 mov %i5, %o1\n\
284 add %o2, %o3, %o3\n\
285 call _dl_init\n\
286 ld [%o0], %o0\n\
287 /* Pass our finalizer function to the user in %g1. */\n\
288 " RTLD_GOT_ADDRESS(%l7, %g1, _dl_fini) "\n\
289 /* Jump to the user's entry point and deallocate the extra stack we got. */\n\
290 jmp %l0\n\
291 add %sp, 6*4, %sp\n\
292 .size _dl_start_user, . - _dl_start_user\n\
293 .previous");
295 static inline Elf32_Addr
296 elf_machine_fixup_plt (struct link_map *map, lookup_t t,
297 const Elf32_Rela *reloc,
298 Elf32_Addr *reloc_addr, Elf32_Addr value)
300 #ifdef __sparc_v9__
301 /* Sparc v9 can assume flush is always present. */
302 const int do_flush = 1;
303 #else
304 /* Note that we don't mask the hwcap here, as the flush is essential to
305 functionality on those cpu's that implement it. */
306 const int do_flush = GLRO(dl_hwcap) & HWCAP_SPARC_FLUSH;
307 #endif
308 return sparc_fixup_plt (reloc, reloc_addr, value, 1, do_flush);
311 /* Return the final value of a plt relocation. */
312 static inline Elf32_Addr
313 elf_machine_plt_value (struct link_map *map, const Elf32_Rela *reloc,
314 Elf32_Addr value)
316 return value + reloc->r_addend;
319 #endif /* dl_machine_h */
321 #define ARCH_LA_PLTENTER sparc32_gnu_pltenter
322 #define ARCH_LA_PLTEXIT sparc32_gnu_pltexit
324 #ifdef RESOLVE_MAP
326 /* Perform the relocation specified by RELOC and SYM (which is fully resolved).
327 MAP is the object containing the reloc. */
329 auto inline void
330 __attribute__ ((always_inline))
331 elf_machine_rela (struct link_map *map, const Elf32_Rela *reloc,
332 const Elf32_Sym *sym, const struct r_found_version *version,
333 void *const reloc_addr_arg, int skip_ifunc)
335 Elf32_Addr *const reloc_addr = reloc_addr_arg;
336 #if !defined RTLD_BOOTSTRAP && !defined RESOLVE_CONFLICT_FIND_MAP
337 const Elf32_Sym *const refsym = sym;
338 #endif
339 Elf32_Addr value;
340 const unsigned int r_type = ELF32_R_TYPE (reloc->r_info);
341 #if !defined RESOLVE_CONFLICT_FIND_MAP
342 struct link_map *sym_map = NULL;
343 #endif
345 #if !defined RTLD_BOOTSTRAP && !defined HAVE_Z_COMBRELOC
346 /* This is defined in rtld.c, but nowhere in the static libc.a; make the
347 reference weak so static programs can still link. This declaration
348 cannot be done when compiling rtld.c (i.e. #ifdef RTLD_BOOTSTRAP)
349 because rtld.c contains the common defn for _dl_rtld_map, which is
350 incompatible with a weak decl in the same file. */
351 weak_extern (_dl_rtld_map);
352 #endif
354 if (__glibc_unlikely (r_type == R_SPARC_NONE))
355 return;
357 if (__glibc_unlikely (r_type == R_SPARC_SIZE32))
359 *reloc_addr = sym->st_size + reloc->r_addend;
360 return;
363 #if !defined RTLD_BOOTSTRAP || !defined HAVE_Z_COMBRELOC
364 if (__glibc_unlikely (r_type == R_SPARC_RELATIVE))
366 # if !defined RTLD_BOOTSTRAP && !defined HAVE_Z_COMBRELOC
367 if (map != &_dl_rtld_map) /* Already done in rtld itself. */
368 # endif
369 *reloc_addr += map->l_addr + reloc->r_addend;
370 return;
372 #endif
374 #ifndef RESOLVE_CONFLICT_FIND_MAP
375 if (__builtin_expect (ELF32_ST_BIND (sym->st_info) == STB_LOCAL, 0)
376 && sym->st_shndx != SHN_UNDEF)
378 value = map->l_addr;
380 else
382 sym_map = RESOLVE_MAP (&sym, version, r_type);
383 value = sym_map == NULL ? 0 : sym_map->l_addr + sym->st_value;
385 #else
386 value = 0;
387 #endif
389 value += reloc->r_addend; /* Assume copy relocs have zero addend. */
391 if (sym != NULL
392 && __builtin_expect (ELFW(ST_TYPE) (sym->st_info) == STT_GNU_IFUNC, 0)
393 && __builtin_expect (sym->st_shndx != SHN_UNDEF, 1)
394 && __builtin_expect (!skip_ifunc, 1))
396 value = ((Elf32_Addr (*) (int)) value) (GLRO(dl_hwcap));
399 switch (r_type)
401 #if !defined RTLD_BOOTSTRAP && !defined RESOLVE_CONFLICT_FIND_MAP
402 case R_SPARC_COPY:
403 if (sym == NULL)
404 /* This can happen in trace mode if an object could not be
405 found. */
406 break;
407 if (sym->st_size > refsym->st_size
408 || (GLRO(dl_verbose) && sym->st_size < refsym->st_size))
410 const char *strtab;
412 strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]);
413 _dl_error_printf ("\
414 %s: Symbol `%s' has different size in shared object, consider re-linking\n",
415 RTLD_PROGNAME, strtab + refsym->st_name);
417 memcpy (reloc_addr_arg, (void *) value,
418 MIN (sym->st_size, refsym->st_size));
419 break;
420 #endif
421 case R_SPARC_GLOB_DAT:
422 case R_SPARC_32:
423 *reloc_addr = value;
424 break;
425 case R_SPARC_IRELATIVE:
426 value = ((Elf32_Addr (*) (int)) value) (GLRO(dl_hwcap));
427 *reloc_addr = value;
428 break;
429 case R_SPARC_JMP_IREL:
430 value = ((Elf32_Addr (*) (int)) value) (GLRO(dl_hwcap));
431 /* Fall thru */
432 case R_SPARC_JMP_SLOT:
434 #if !defined RTLD_BOOTSTRAP && !defined __sparc_v9__
435 /* Note that we don't mask the hwcap here, as the flush is
436 essential to functionality on those cpu's that implement
437 it. For sparcv9 we can assume flush is present. */
438 const int do_flush = GLRO(dl_hwcap) & HWCAP_SPARC_FLUSH;
439 #else
440 /* Unfortunately, this is necessary, so that we can ensure
441 ld.so will not execute corrupt PLT entry instructions. */
442 const int do_flush = 1;
443 #endif
444 /* At this point we don't need to bother with thread safety,
445 so we can optimize the first instruction of .plt out. */
446 sparc_fixup_plt (reloc, reloc_addr, value, 0, do_flush);
448 break;
449 #ifndef RESOLVE_CONFLICT_FIND_MAP
450 case R_SPARC_TLS_DTPMOD32:
451 /* Get the information from the link map returned by the
452 resolv function. */
453 if (sym_map != NULL)
454 *reloc_addr = sym_map->l_tls_modid;
455 break;
456 case R_SPARC_TLS_DTPOFF32:
457 /* During relocation all TLS symbols are defined and used.
458 Therefore the offset is already correct. */
459 *reloc_addr = (sym == NULL ? 0 : sym->st_value) + reloc->r_addend;
460 break;
461 case R_SPARC_TLS_TPOFF32:
462 /* The offset is negative, forward from the thread pointer. */
463 /* We know the offset of object the symbol is contained in.
464 It is a negative value which will be added to the
465 thread pointer. */
466 if (sym != NULL)
468 CHECK_STATIC_TLS (map, sym_map);
469 *reloc_addr = sym->st_value - sym_map->l_tls_offset
470 + reloc->r_addend;
472 break;
473 # ifndef RTLD_BOOTSTRAP
474 case R_SPARC_TLS_LE_HIX22:
475 case R_SPARC_TLS_LE_LOX10:
476 if (sym != NULL)
478 CHECK_STATIC_TLS (map, sym_map);
479 value = sym->st_value - sym_map->l_tls_offset
480 + reloc->r_addend;
481 if (r_type == R_SPARC_TLS_LE_HIX22)
482 *reloc_addr = (*reloc_addr & 0xffc00000) | ((~value) >> 10);
483 else
484 *reloc_addr = (*reloc_addr & 0xffffe000) | (value & 0x3ff)
485 | 0x1c00;
487 break;
488 # endif
489 #endif
490 #ifndef RTLD_BOOTSTRAP
491 case R_SPARC_8:
492 *(char *) reloc_addr = value;
493 break;
494 case R_SPARC_16:
495 *(short *) reloc_addr = value;
496 break;
497 case R_SPARC_DISP8:
498 *(char *) reloc_addr = (value - (Elf32_Addr) reloc_addr);
499 break;
500 case R_SPARC_DISP16:
501 *(short *) reloc_addr = (value - (Elf32_Addr) reloc_addr);
502 break;
503 case R_SPARC_DISP32:
504 *reloc_addr = (value - (Elf32_Addr) reloc_addr);
505 break;
506 case R_SPARC_LO10:
507 *reloc_addr = (*reloc_addr & ~0x3ff) | (value & 0x3ff);
508 break;
509 case R_SPARC_WDISP30:
510 *reloc_addr = ((*reloc_addr & 0xc0000000)
511 | ((value - (unsigned int) reloc_addr) >> 2));
512 break;
513 case R_SPARC_HI22:
514 *reloc_addr = (*reloc_addr & 0xffc00000) | (value >> 10);
515 break;
516 case R_SPARC_UA16:
517 ((unsigned char *) reloc_addr_arg) [0] = value >> 8;
518 ((unsigned char *) reloc_addr_arg) [1] = value;
519 break;
520 case R_SPARC_UA32:
521 ((unsigned char *) reloc_addr_arg) [0] = value >> 24;
522 ((unsigned char *) reloc_addr_arg) [1] = value >> 16;
523 ((unsigned char *) reloc_addr_arg) [2] = value >> 8;
524 ((unsigned char *) reloc_addr_arg) [3] = value;
525 break;
526 #endif
527 #if !defined RTLD_BOOTSTRAP || defined _NDEBUG
528 default:
529 _dl_reloc_bad_type (map, r_type, 0);
530 break;
531 #endif
535 auto inline void
536 __attribute__ ((always_inline))
537 elf_machine_rela_relative (Elf32_Addr l_addr, const Elf32_Rela *reloc,
538 void *const reloc_addr_arg)
540 Elf32_Addr *const reloc_addr = reloc_addr_arg;
541 *reloc_addr += l_addr + reloc->r_addend;
544 auto inline void
545 __attribute__ ((always_inline))
546 elf_machine_lazy_rel (struct link_map *map,
547 Elf32_Addr l_addr, const Elf32_Rela *reloc,
548 int skip_ifunc)
550 Elf32_Addr *const reloc_addr = (void *) (l_addr + reloc->r_offset);
551 const unsigned int r_type = ELF32_R_TYPE (reloc->r_info);
553 if (__glibc_likely (r_type == R_SPARC_JMP_SLOT))
555 else if (r_type == R_SPARC_JMP_IREL)
557 Elf32_Addr value = map->l_addr + reloc->r_addend;
558 if (__glibc_likely (!skip_ifunc))
559 value = ((Elf32_Addr (*) (int)) value) (GLRO(dl_hwcap));
560 sparc_fixup_plt (reloc, reloc_addr, value, 1, 1);
562 else if (r_type == R_SPARC_NONE)
564 else
565 _dl_reloc_bad_type (map, r_type, 1);
568 #endif /* RESOLVE_MAP */