Replace FSF snail mail address with URLs.
[glibc.git] / sysdeps / sparc / sparc32 / dl-machine.h
bloba0c18dc1c486ae9f407bab2869f92b6b4338ccdb
1 /* Machine-dependent ELF dynamic relocation inline functions. SPARC version.
2 Copyright (C) 1996-2003, 2004, 2005, 2006, 2007, 2010, 2011
3 Free Software Foundation, Inc.
4 This file is part of the GNU C Library.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; if not, see
18 <http://www.gnu.org/licenses/>. */
20 #ifndef dl_machine_h
21 #define dl_machine_h
23 #define ELF_MACHINE_NAME "sparc"
25 #include <string.h>
26 #include <sys/param.h>
27 #include <ldsodefs.h>
28 #include <sysdep.h>
29 #include <tls.h>
30 #include <dl-plt.h>
32 #ifndef VALIDX
33 # define VALIDX(tag) (DT_NUM + DT_THISPROCNUM + DT_VERSIONTAGNUM \
34 + DT_EXTRANUM + DT_VALTAGIDX (tag))
35 #endif
37 /* Return nonzero iff ELF header is compatible with the running host. */
38 static inline int
39 elf_machine_matches_host (const Elf32_Ehdr *ehdr)
41 if (ehdr->e_machine == EM_SPARC)
42 return 1;
43 else if (ehdr->e_machine == EM_SPARC32PLUS)
45 /* XXX The following is wrong! Dave Miller rejected to implement it
46 correctly. If this causes problems shoot *him*! */
47 #ifdef SHARED
48 return GLRO(dl_hwcap) & GLRO(dl_hwcap_mask) & HWCAP_SPARC_V9;
49 #else
50 return GLRO(dl_hwcap) & HWCAP_SPARC_V9;
51 #endif
53 else
54 return 0;
57 /* We have to do this because elf_machine_{dynamic,load_address} can be
58 invoked from functions that have no GOT references, and thus the compiler
59 has no obligation to load the PIC register. */
60 #define LOAD_PIC_REG(PIC_REG) \
61 do { register Elf32_Addr pc __asm("o7"); \
62 __asm("sethi %%hi(_GLOBAL_OFFSET_TABLE_-4), %1\n\t" \
63 "call 1f\n\t" \
64 "add %1, %%lo(_GLOBAL_OFFSET_TABLE_+4), %1\n" \
65 "1:\tadd %1, %0, %1" \
66 : "=r" (pc), "=r" (PIC_REG)); \
67 } while (0)
69 /* Return the link-time address of _DYNAMIC. Conveniently, this is the
70 first element of the GOT. This must be inlined in a function which
71 uses global data. */
72 static inline Elf32_Addr
73 elf_machine_dynamic (void)
75 register Elf32_Addr *got asm ("%l7");
77 LOAD_PIC_REG (got);
79 return *got;
82 /* Return the run-time load address of the shared object. */
83 static inline Elf32_Addr
84 elf_machine_load_address (void)
86 register Elf32_Addr *pc __asm ("%o7"), *got __asm ("%l7");
88 __asm ("sethi %%hi(_GLOBAL_OFFSET_TABLE_-4), %1\n\t"
89 "call 1f\n\t"
90 " add %1, %%lo(_GLOBAL_OFFSET_TABLE_+4), %1\n\t"
91 "call _DYNAMIC\n\t"
92 "call _GLOBAL_OFFSET_TABLE_\n"
93 "1:\tadd %1, %0, %1\n\t" : "=r" (pc), "=r" (got));
95 /* got is now l_addr + _GLOBAL_OFFSET_TABLE_
96 *got is _DYNAMIC
97 pc[2]*4 is l_addr + _DYNAMIC - (long)pc - 8
98 pc[3]*4 is l_addr + _GLOBAL_OFFSET_TABLE_ - (long)pc - 12 */
99 return (Elf32_Addr) got - *got + (pc[2] - pc[3]) * 4 - 4;
102 /* Set up the loaded object described by L so its unrelocated PLT
103 entries will jump to the on-demand fixup code in dl-runtime.c. */
105 static inline int
106 elf_machine_runtime_setup (struct link_map *l, int lazy, int profile)
108 Elf32_Addr *plt;
109 extern void _dl_runtime_resolve (Elf32_Word);
110 extern void _dl_runtime_profile (Elf32_Word);
112 if (l->l_info[DT_JMPREL] && lazy)
114 Elf32_Addr rfunc;
116 /* The entries for functions in the PLT have not yet been filled in.
117 Their initial contents will arrange when called to set the high 22
118 bits of %g1 with an offset into the .rela.plt section and jump to
119 the beginning of the PLT. */
120 plt = (Elf32_Addr *) D_PTR (l, l_info[DT_PLTGOT]);
121 if (__builtin_expect(profile, 0))
123 rfunc = (Elf32_Addr) &_dl_runtime_profile;
125 if (GLRO(dl_profile) != NULL
126 && _dl_name_match_p (GLRO(dl_profile), l))
127 GL(dl_profile_map) = l;
129 else
131 rfunc = (Elf32_Addr) &_dl_runtime_resolve;
134 /* The beginning of the PLT does:
136 sethi %hi(_dl_runtime_{resolve,profile}), %g2
137 pltpc: jmpl %g2 + %lo(_dl_runtime_{resolve,profile}), %g2
139 .word MAP
141 The PC value (pltpc) saved in %g2 by the jmpl points near the
142 location where we store the link_map pointer for this object. */
144 plt[0] = 0x05000000 | ((rfunc >> 10) & 0x003fffff);
145 plt[1] = 0x85c0a000 | (rfunc & 0x3ff);
146 plt[2] = OPCODE_NOP; /* Fill call delay slot. */
147 plt[3] = (Elf32_Addr) l;
148 if (__builtin_expect (l->l_info[VALIDX(DT_GNU_PRELINKED)] != NULL, 0)
149 || __builtin_expect (l->l_info [VALIDX (DT_GNU_LIBLISTSZ)] != NULL, 0))
151 /* Need to reinitialize .plt to undo prelinking. */
152 Elf32_Rela *rela = (Elf32_Rela *) D_PTR (l, l_info[DT_JMPREL]);
153 Elf32_Rela *relaend
154 = (Elf32_Rela *) ((char *) rela
155 + l->l_info[DT_PLTRELSZ]->d_un.d_val);
156 #if !defined RTLD_BOOTSTRAP && !defined __sparc_v9__
157 /* Note that we don't mask the hwcap here, as the flush is
158 essential to functionality on those cpu's that implement it.
159 For sparcv9 we can assume flush is present. */
160 const int do_flush = GLRO(dl_hwcap) & HWCAP_SPARC_FLUSH;
161 #else
162 const int do_flush = 1;
163 #endif
165 /* prelink must ensure there are no R_SPARC_NONE relocs left
166 in .rela.plt. */
167 while (rela < relaend)
169 *(unsigned int *) (rela->r_offset + l->l_addr)
170 = OPCODE_SETHI_G1 | (rela->r_offset + l->l_addr
171 - (Elf32_Addr) plt);
172 *(unsigned int *) (rela->r_offset + l->l_addr + 4)
173 = OPCODE_BA | ((((Elf32_Addr) plt
174 - rela->r_offset - l->l_addr - 4) >> 2)
175 & 0x3fffff);
176 if (do_flush)
178 __asm __volatile ("flush %0" : : "r" (rela->r_offset
179 + l->l_addr));
180 __asm __volatile ("flush %0+4" : : "r" (rela->r_offset
181 + l->l_addr));
183 ++rela;
188 return lazy;
191 /* ELF_RTYPE_CLASS_PLT iff TYPE describes relocation of a PLT entry, so
192 PLT entries should not be allowed to define the value.
193 ELF_RTYPE_CLASS_NOCOPY iff TYPE should not be allowed to resolve to one
194 of the main executable's symbols, as for a COPY reloc. */
195 #define elf_machine_type_class(type) \
196 ((((type) == R_SPARC_JMP_SLOT \
197 || ((type) >= R_SPARC_TLS_GD_HI22 && (type) <= R_SPARC_TLS_TPOFF64)) \
198 * ELF_RTYPE_CLASS_PLT) \
199 | (((type) == R_SPARC_COPY) * ELF_RTYPE_CLASS_COPY))
201 /* A reloc type used for ld.so cmdline arg lookups to reject PLT entries. */
202 #define ELF_MACHINE_JMP_SLOT R_SPARC_JMP_SLOT
204 /* The SPARC never uses Elf32_Rel relocations. */
205 #define ELF_MACHINE_NO_REL 1
207 /* The SPARC overlaps DT_RELA and DT_PLTREL. */
208 #define ELF_MACHINE_PLTREL_OVERLAP 1
210 /* Undo the sub %sp, 6*4, %sp; add %sp, 22*4, %o0 below to get at the
211 value we want in __libc_stack_end. */
212 #define DL_STACK_END(cookie) \
213 ((void *) (((long) (cookie)) - (22 - 6) * 4))
215 /* Initial entry point code for the dynamic linker.
216 The C function `_dl_start' is the real entry point;
217 its return value is the user program's entry point. */
219 #define RTLD_START __asm__ ("\
220 .text\n\
221 .globl _start\n\
222 .type _start, @function\n\
223 .align 32\n\
224 _start:\n\
225 /* Allocate space for functions to drop their arguments. */\n\
226 sub %sp, 6*4, %sp\n\
227 /* Pass pointer to argument block to _dl_start. */\n\
228 call _dl_start\n\
229 add %sp, 22*4, %o0\n\
230 /* FALTHRU */\n\
231 .globl _dl_start_user\n\
232 .type _dl_start_user, @function\n\
233 _dl_start_user:\n\
234 /* Load the PIC register. */\n\
235 1: call 2f\n\
236 sethi %hi(_GLOBAL_OFFSET_TABLE_-(1b-.)), %l7\n\
237 2: or %l7, %lo(_GLOBAL_OFFSET_TABLE_-(1b-.)), %l7\n\
238 add %l7, %o7, %l7\n\
239 /* Save the user entry point address in %l0 */\n\
240 mov %o0, %l0\n\
241 /* See if we were run as a command with the executable file name as an\n\
242 extra leading argument. If so, adjust the contents of the stack. */\n\
243 sethi %hi(_dl_skip_args), %g2\n\
244 or %g2, %lo(_dl_skip_args), %g2\n\
245 ld [%l7+%g2], %i0\n\
246 ld [%i0], %i0\n\
247 tst %i0\n\
248 beq 3f\n\
249 ld [%sp+22*4], %i5 /* load argc */\n\
250 /* Find out how far to shift. */\n\
251 sethi %hi(_dl_argv), %l3\n\
252 or %l3, %lo(_dl_argv), %l3\n\
253 ld [%l7+%l3], %l3\n\
254 sub %i5, %i0, %i5\n\
255 ld [%l3], %l4\n\
256 sll %i0, 2, %i2\n\
257 st %i5, [%sp+22*4]\n\
258 sub %l4, %i2, %l4\n\
259 add %sp, 23*4, %i1\n\
260 add %i1, %i2, %i2\n\
261 st %l4, [%l3]\n\
262 /* Copy down argv */\n\
263 21: ld [%i2], %i3\n\
264 add %i2, 4, %i2\n\
265 tst %i3\n\
266 st %i3, [%i1]\n\
267 bne 21b\n\
268 add %i1, 4, %i1\n\
269 /* Copy down env */\n\
270 22: ld [%i2], %i3\n\
271 add %i2, 4, %i2\n\
272 tst %i3\n\
273 st %i3, [%i1]\n\
274 bne 22b\n\
275 add %i1, 4, %i1\n\
276 /* Copy down auxiliary table. */\n\
277 23: ld [%i2], %i3\n\
278 ld [%i2+4], %i4\n\
279 add %i2, 8, %i2\n\
280 tst %i3\n\
281 st %i3, [%i1]\n\
282 st %i4, [%i1+4]\n\
283 bne 23b\n\
284 add %i1, 8, %i1\n\
285 /* %o0 = _dl_loaded, %o1 = argc, %o2 = argv, %o3 = envp. */\n\
286 3: sethi %hi(_rtld_local), %o0\n\
287 add %sp, 23*4, %o2\n\
288 orcc %o0, %lo(_rtld_local), %o0\n\
289 sll %i5, 2, %o3\n\
290 ld [%l7+%o0], %o0\n\
291 add %o3, 4, %o3\n\
292 mov %i5, %o1\n\
293 add %o2, %o3, %o3\n\
294 call _dl_init_internal\n\
295 ld [%o0], %o0\n\
296 /* Pass our finalizer function to the user in %g1. */\n\
297 sethi %hi(_dl_fini), %g1\n\
298 or %g1, %lo(_dl_fini), %g1\n\
299 ld [%l7+%g1], %g1\n\
300 /* Jump to the user's entry point and deallocate the extra stack we got. */\n\
301 jmp %l0\n\
302 add %sp, 6*4, %sp\n\
303 .size _dl_start_user, . - _dl_start_user\n\
304 .previous");
306 static inline Elf32_Addr
307 elf_machine_fixup_plt (struct link_map *map, lookup_t t,
308 const Elf32_Rela *reloc,
309 Elf32_Addr *reloc_addr, Elf32_Addr value)
311 #ifdef __sparc_v9__
312 /* Sparc v9 can assume flush is always present. */
313 const int do_flush = 1;
314 #else
315 /* Note that we don't mask the hwcap here, as the flush is essential to
316 functionality on those cpu's that implement it. */
317 const int do_flush = GLRO(dl_hwcap) & HWCAP_SPARC_FLUSH;
318 #endif
319 return sparc_fixup_plt (reloc, reloc_addr, value, 1, do_flush);
322 /* Return the final value of a plt relocation. */
323 static inline Elf32_Addr
324 elf_machine_plt_value (struct link_map *map, const Elf32_Rela *reloc,
325 Elf32_Addr value)
327 return value + reloc->r_addend;
330 #endif /* dl_machine_h */
332 #define ARCH_LA_PLTENTER sparc32_gnu_pltenter
333 #define ARCH_LA_PLTEXIT sparc32_gnu_pltexit
335 #ifdef RESOLVE_MAP
337 /* Perform the relocation specified by RELOC and SYM (which is fully resolved).
338 MAP is the object containing the reloc. */
340 auto inline void
341 __attribute__ ((always_inline))
342 elf_machine_rela (struct link_map *map, const Elf32_Rela *reloc,
343 const Elf32_Sym *sym, const struct r_found_version *version,
344 void *const reloc_addr_arg, int skip_ifunc)
346 Elf32_Addr *const reloc_addr = reloc_addr_arg;
347 const Elf32_Sym *const refsym = sym;
348 Elf32_Addr value;
349 const unsigned int r_type = ELF32_R_TYPE (reloc->r_info);
350 struct link_map *sym_map = NULL;
352 #if !defined RTLD_BOOTSTRAP && !defined HAVE_Z_COMBRELOC
353 /* This is defined in rtld.c, but nowhere in the static libc.a; make the
354 reference weak so static programs can still link. This declaration
355 cannot be done when compiling rtld.c (i.e. #ifdef RTLD_BOOTSTRAP)
356 because rtld.c contains the common defn for _dl_rtld_map, which is
357 incompatible with a weak decl in the same file. */
358 weak_extern (_dl_rtld_map);
359 #endif
361 if (__builtin_expect (r_type == R_SPARC_NONE, 0))
362 return;
364 #if !defined RTLD_BOOTSTRAP || !defined HAVE_Z_COMBRELOC
365 if (__builtin_expect (r_type == R_SPARC_RELATIVE, 0))
367 # if !defined RTLD_BOOTSTRAP && !defined HAVE_Z_COMBRELOC
368 if (map != &_dl_rtld_map) /* Already done in rtld itself. */
369 # endif
370 *reloc_addr += map->l_addr + reloc->r_addend;
371 return;
373 #endif
375 #ifndef RESOLVE_CONFLICT_FIND_MAP
376 if (__builtin_expect (ELF32_ST_BIND (sym->st_info) == STB_LOCAL, 0)
377 && sym->st_shndx != SHN_UNDEF)
379 value = map->l_addr;
381 else
383 sym_map = RESOLVE_MAP (&sym, version, r_type);
384 value = sym_map == NULL ? 0 : sym_map->l_addr + sym->st_value;
386 #else
387 value = 0;
388 #endif
390 value += reloc->r_addend; /* Assume copy relocs have zero addend. */
392 if (sym != NULL
393 && __builtin_expect (ELFW(ST_TYPE) (sym->st_info) == STT_GNU_IFUNC, 0)
394 && __builtin_expect (sym->st_shndx != SHN_UNDEF, 1)
395 && __builtin_expect (!skip_ifunc, 1))
397 value = ((Elf32_Addr (*) (int)) value) (GLRO(dl_hwcap));
400 switch (r_type)
402 #if !defined RTLD_BOOTSTRAP && !defined RESOLVE_CONFLICT_FIND_MAP
403 case R_SPARC_COPY:
404 if (sym == NULL)
405 /* This can happen in trace mode if an object could not be
406 found. */
407 break;
408 if (sym->st_size > refsym->st_size
409 || (GLRO(dl_verbose) && sym->st_size < refsym->st_size))
411 const char *strtab;
413 strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]);
414 _dl_error_printf ("\
415 %s: Symbol `%s' has different size in shared object, consider re-linking\n",
416 rtld_progname ?: "<program name unknown>",
417 strtab + refsym->st_name);
419 memcpy (reloc_addr_arg, (void *) value,
420 MIN (sym->st_size, refsym->st_size));
421 break;
422 #endif
423 case R_SPARC_GLOB_DAT:
424 case R_SPARC_32:
425 *reloc_addr = value;
426 break;
427 case R_SPARC_IRELATIVE:
428 value = ((Elf32_Addr (*) (int)) value) (GLRO(dl_hwcap));
429 *reloc_addr = value;
430 break;
431 case R_SPARC_JMP_IREL:
432 value = ((Elf32_Addr (*) (int)) value) (GLRO(dl_hwcap));
433 /* Fall thru */
434 case R_SPARC_JMP_SLOT:
436 #if !defined RTLD_BOOTSTRAP && !defined __sparc_v9__
437 /* Note that we don't mask the hwcap here, as the flush is
438 essential to functionality on those cpu's that implement
439 it. For sparcv9 we can assume flush is present. */
440 const int do_flush = GLRO(dl_hwcap) & HWCAP_SPARC_FLUSH;
441 #else
442 /* Unfortunately, this is necessary, so that we can ensure
443 ld.so will not execute corrupt PLT entry instructions. */
444 const int do_flush = 1;
445 #endif
446 /* At this point we don't need to bother with thread safety,
447 so we can optimize the first instruction of .plt out. */
448 sparc_fixup_plt (reloc, reloc_addr, value, 0, do_flush);
450 break;
451 #ifndef RESOLVE_CONFLICT_FIND_MAP
452 case R_SPARC_TLS_DTPMOD32:
453 /* Get the information from the link map returned by the
454 resolv function. */
455 if (sym_map != NULL)
456 *reloc_addr = sym_map->l_tls_modid;
457 break;
458 case R_SPARC_TLS_DTPOFF32:
459 /* During relocation all TLS symbols are defined and used.
460 Therefore the offset is already correct. */
461 *reloc_addr = (sym == NULL ? 0 : sym->st_value) + reloc->r_addend;
462 break;
463 case R_SPARC_TLS_TPOFF32:
464 /* The offset is negative, forward from the thread pointer. */
465 /* We know the offset of object the symbol is contained in.
466 It is a negative value which will be added to the
467 thread pointer. */
468 if (sym != NULL)
470 CHECK_STATIC_TLS (map, sym_map);
471 *reloc_addr = sym->st_value - sym_map->l_tls_offset
472 + reloc->r_addend;
474 break;
475 # ifndef RTLD_BOOTSTRAP
476 case R_SPARC_TLS_LE_HIX22:
477 case R_SPARC_TLS_LE_LOX10:
478 if (sym != NULL)
480 CHECK_STATIC_TLS (map, sym_map);
481 value = sym->st_value - sym_map->l_tls_offset
482 + reloc->r_addend;
483 if (r_type == R_SPARC_TLS_LE_HIX22)
484 *reloc_addr = (*reloc_addr & 0xffc00000) | ((~value) >> 10);
485 else
486 *reloc_addr = (*reloc_addr & 0xffffe000) | (value & 0x3ff)
487 | 0x1c00;
489 break;
490 # endif
491 #endif
492 #ifndef RTLD_BOOTSTRAP
493 case R_SPARC_8:
494 *(char *) reloc_addr = value;
495 break;
496 case R_SPARC_16:
497 *(short *) reloc_addr = value;
498 break;
499 case R_SPARC_DISP8:
500 *(char *) reloc_addr = (value - (Elf32_Addr) reloc_addr);
501 break;
502 case R_SPARC_DISP16:
503 *(short *) reloc_addr = (value - (Elf32_Addr) reloc_addr);
504 break;
505 case R_SPARC_DISP32:
506 *reloc_addr = (value - (Elf32_Addr) reloc_addr);
507 break;
508 case R_SPARC_LO10:
509 *reloc_addr = (*reloc_addr & ~0x3ff) | (value & 0x3ff);
510 break;
511 case R_SPARC_WDISP30:
512 *reloc_addr = ((*reloc_addr & 0xc0000000)
513 | ((value - (unsigned int) reloc_addr) >> 2));
514 break;
515 case R_SPARC_HI22:
516 *reloc_addr = (*reloc_addr & 0xffc00000) | (value >> 10);
517 break;
518 case R_SPARC_UA16:
519 ((unsigned char *) reloc_addr_arg) [0] = value >> 8;
520 ((unsigned char *) reloc_addr_arg) [1] = value;
521 break;
522 case R_SPARC_UA32:
523 ((unsigned char *) reloc_addr_arg) [0] = value >> 24;
524 ((unsigned char *) reloc_addr_arg) [1] = value >> 16;
525 ((unsigned char *) reloc_addr_arg) [2] = value >> 8;
526 ((unsigned char *) reloc_addr_arg) [3] = value;
527 break;
528 #endif
529 #if !defined RTLD_BOOTSTRAP || defined _NDEBUG
530 default:
531 _dl_reloc_bad_type (map, r_type, 0);
532 break;
533 #endif
537 auto inline void
538 __attribute__ ((always_inline))
539 elf_machine_rela_relative (Elf32_Addr l_addr, const Elf32_Rela *reloc,
540 void *const reloc_addr_arg)
542 Elf32_Addr *const reloc_addr = reloc_addr_arg;
543 *reloc_addr += l_addr + reloc->r_addend;
546 auto inline void
547 __attribute__ ((always_inline))
548 elf_machine_lazy_rel (struct link_map *map,
549 Elf32_Addr l_addr, const Elf32_Rela *reloc,
550 int skip_ifunc)
552 Elf32_Addr *const reloc_addr = (void *) (l_addr + reloc->r_offset);
553 const unsigned int r_type = ELF32_R_TYPE (reloc->r_info);
555 if (__builtin_expect (r_type == R_SPARC_JMP_SLOT, 1))
557 else if (r_type == R_SPARC_JMP_IREL)
559 Elf32_Addr value = map->l_addr + reloc->r_addend;
560 if (__builtin_expect (!skip_ifunc, 1))
561 value = ((Elf32_Addr (*) (int)) value) (GLRO(dl_hwcap));
562 sparc_fixup_plt (reloc, reloc_addr, value, 1, 1);
564 else if (r_type == R_SPARC_NONE)
566 else
567 _dl_reloc_bad_type (map, r_type, 1);
570 #endif /* RESOLVE_MAP */