2.5-18.1
[glibc.git] / sysdeps / sparc / sparc32 / dl-machine.h
blob02dabaabb44d9896fff85388d1c02e1535a11d24
1 /* Machine-dependent ELF dynamic relocation inline functions. SPARC version.
2 Copyright (C) 1996-2003, 2004, 2005, 2006 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
20 #ifndef dl_machine_h
21 #define dl_machine_h
23 #define ELF_MACHINE_NAME "sparc"
25 #include <string.h>
26 #include <sys/param.h>
27 #include <ldsodefs.h>
28 #include <tls.h>
30 #ifndef VALIDX
31 # define VALIDX(tag) (DT_NUM + DT_THISPROCNUM + DT_VERSIONTAGNUM \
32 + DT_EXTRANUM + DT_VALTAGIDX (tag))
33 #endif
35 /* Some SPARC opcodes we need to use for self-modifying code. */
36 #define OPCODE_NOP 0x01000000 /* nop */
37 #define OPCODE_CALL 0x40000000 /* call ?; add PC-rel word address */
38 #define OPCODE_SETHI_G1 0x03000000 /* sethi ?, %g1; add value>>10 */
39 #define OPCODE_JMP_G1 0x81c06000 /* jmp %g1+?; add lo 10 bits of value */
40 #define OPCODE_SAVE_SP 0x9de3bfa8 /* save %sp, -(16+6)*4, %sp */
41 #define OPCODE_BA 0x30800000 /* b,a ?; add PC-rel word address */
43 /* Return nonzero iff ELF header is compatible with the running host. */
44 static inline int
45 elf_machine_matches_host (const Elf32_Ehdr *ehdr)
47 if (ehdr->e_machine == EM_SPARC)
48 return 1;
49 else if (ehdr->e_machine == EM_SPARC32PLUS)
51 /* XXX The following is wrong! Dave Miller rejected to implement it
52 correctly. If this causes problems shoot *him*! */
53 #ifdef SHARED
54 return GLRO(dl_hwcap) & GLRO(dl_hwcap_mask) & HWCAP_SPARC_V9;
55 #else
56 return GLRO(dl_hwcap) & HWCAP_SPARC_V9;
57 #endif
59 else
60 return 0;
63 /* We have to do this because elf_machine_{dynamic,load_address} can be
64 invoked from functions that have no GOT references, and thus the compiler
65 has no obligation to load the PIC register. */
66 #define LOAD_PIC_REG(PIC_REG) \
67 do { register Elf32_Addr pc __asm("o7"); \
68 __asm("sethi %%hi(_GLOBAL_OFFSET_TABLE_-4), %1\n\t" \
69 "call 1f\n\t" \
70 "add %1, %%lo(_GLOBAL_OFFSET_TABLE_+4), %1\n" \
71 "1:\tadd %1, %0, %1" \
72 : "=r" (pc), "=r" (PIC_REG)); \
73 } while (0)
75 /* Return the link-time address of _DYNAMIC. Conveniently, this is the
76 first element of the GOT. This must be inlined in a function which
77 uses global data. */
78 static inline Elf32_Addr
79 elf_machine_dynamic (void)
81 register Elf32_Addr *got asm ("%l7");
83 LOAD_PIC_REG (got);
85 return *got;
88 /* Return the run-time load address of the shared object. */
89 static inline Elf32_Addr
90 elf_machine_load_address (void)
92 register Elf32_Addr *pc __asm ("%o7"), *got __asm ("%l7");
94 __asm ("sethi %%hi(_GLOBAL_OFFSET_TABLE_-4), %1\n\t"
95 "call 1f\n\t"
96 " add %1, %%lo(_GLOBAL_OFFSET_TABLE_+4), %1\n\t"
97 "call _DYNAMIC\n\t"
98 "call _GLOBAL_OFFSET_TABLE_\n"
99 "1:\tadd %1, %0, %1\n\t" : "=r" (pc), "=r" (got));
101 /* got is now l_addr + _GLOBAL_OFFSET_TABLE_
102 *got is _DYNAMIC
103 pc[2]*4 is l_addr + _DYNAMIC - (long)pc - 8
104 pc[3]*4 is l_addr + _GLOBAL_OFFSET_TABLE_ - (long)pc - 12 */
105 return (Elf32_Addr) got - *got + (pc[2] - pc[3]) * 4 - 4;
108 /* Set up the loaded object described by L so its unrelocated PLT
109 entries will jump to the on-demand fixup code in dl-runtime.c. */
111 static inline int
112 elf_machine_runtime_setup (struct link_map *l, int lazy, int profile)
114 Elf32_Addr *plt;
115 extern void _dl_runtime_resolve (Elf32_Word);
116 extern void _dl_runtime_profile (Elf32_Word);
118 if (l->l_info[DT_JMPREL] && lazy)
120 Elf32_Addr rfunc;
122 /* The entries for functions in the PLT have not yet been filled in.
123 Their initial contents will arrange when called to set the high 22
124 bits of %g1 with an offset into the .rela.plt section and jump to
125 the beginning of the PLT. */
126 plt = (Elf32_Addr *) D_PTR (l, l_info[DT_PLTGOT]);
127 if (__builtin_expect(profile, 0))
129 rfunc = (Elf32_Addr) &_dl_runtime_profile;
131 if (GLRO(dl_profile) != NULL
132 && _dl_name_match_p (GLRO(dl_profile), l))
133 GL(dl_profile_map) = l;
135 else
137 rfunc = (Elf32_Addr) &_dl_runtime_resolve;
140 /* The beginning of the PLT does:
142 sethi %hi(_dl_runtime_{resolve,profile}), %g2
143 pltpc: jmpl %g2 + %lo(_dl_runtime_{resolve,profile}), %g2
145 .word MAP
147 The PC value (pltpc) saved in %g2 by the jmpl points near the
148 location where we store the link_map pointer for this object. */
150 plt[0] = 0x05000000 | ((rfunc >> 10) & 0x003fffff);
151 plt[1] = 0x85c0a000 | (rfunc & 0x3ff);
152 plt[2] = OPCODE_NOP; /* Fill call delay slot. */
153 plt[3] = (Elf32_Addr) l;
154 if (__builtin_expect (l->l_info[VALIDX(DT_GNU_PRELINKED)] != NULL, 0)
155 || __builtin_expect (l->l_info [VALIDX (DT_GNU_LIBLISTSZ)] != NULL, 0))
157 /* Need to reinitialize .plt to undo prelinking. */
158 int do_flush;
159 Elf32_Rela *rela = (Elf32_Rela *) D_PTR (l, l_info[DT_JMPREL]);
160 Elf32_Rela *relaend
161 = (Elf32_Rela *) ((char *) rela
162 + l->l_info[DT_PLTRELSZ]->d_un.d_val);
163 do_flush = GLRO(dl_hwcap) & HWCAP_SPARC_FLUSH;
165 /* prelink must ensure there are no R_SPARC_NONE relocs left
166 in .rela.plt. */
167 while (rela < relaend)
169 *(unsigned int *) rela->r_offset
170 = OPCODE_SETHI_G1 | (rela->r_offset - (Elf32_Addr) plt);
171 *(unsigned int *) (rela->r_offset + 4)
172 = OPCODE_BA | ((((Elf32_Addr) plt
173 - rela->r_offset - 4) >> 2) & 0x3fffff);
174 if (do_flush)
176 __asm __volatile ("flush %0" : : "r"(rela->r_offset));
177 __asm __volatile ("flush %0+4" : : "r"(rela->r_offset));
179 ++rela;
184 return lazy;
187 /* ELF_RTYPE_CLASS_PLT iff TYPE describes relocation of a PLT entry, so
188 PLT entries should not be allowed to define the value.
189 ELF_RTYPE_CLASS_NOCOPY iff TYPE should not be allowed to resolve to one
190 of the main executable's symbols, as for a COPY reloc. */
191 #if defined USE_TLS && (!defined RTLD_BOOTSTRAP || USE___THREAD)
192 # define elf_machine_type_class(type) \
193 ((((type) == R_SPARC_JMP_SLOT \
194 || ((type) >= R_SPARC_TLS_GD_HI22 && (type) <= R_SPARC_TLS_TPOFF64)) \
195 * ELF_RTYPE_CLASS_PLT) \
196 | (((type) == R_SPARC_COPY) * ELF_RTYPE_CLASS_COPY))
197 #else
198 # define elf_machine_type_class(type) \
199 ((((type) == R_SPARC_JMP_SLOT) * ELF_RTYPE_CLASS_PLT) \
200 | (((type) == R_SPARC_COPY) * ELF_RTYPE_CLASS_COPY))
201 #endif
203 /* A reloc type used for ld.so cmdline arg lookups to reject PLT entries. */
204 #define ELF_MACHINE_JMP_SLOT R_SPARC_JMP_SLOT
206 /* The SPARC never uses Elf32_Rel relocations. */
207 #define ELF_MACHINE_NO_REL 1
209 /* The SPARC overlaps DT_RELA and DT_PLTREL. */
210 #define ELF_MACHINE_PLTREL_OVERLAP 1
212 /* Undo the sub %sp, 6*4, %sp; add %sp, 22*4, %o0 below to get at the
213 value we want in __libc_stack_end. */
214 #define DL_STACK_END(cookie) \
215 ((void *) (((long) (cookie)) - (22 - 6) * 4))
217 /* Initial entry point code for the dynamic linker.
218 The C function `_dl_start' is the real entry point;
219 its return value is the user program's entry point. */
221 #define RTLD_START __asm__ ("\
222 .text\n\
223 .globl _start\n\
224 .type _start, @function\n\
225 .align 32\n\
226 _start:\n\
227 /* Allocate space for functions to drop their arguments. */\n\
228 sub %sp, 6*4, %sp\n\
229 /* Pass pointer to argument block to _dl_start. */\n\
230 call _dl_start\n\
231 add %sp, 22*4, %o0\n\
232 /* FALTHRU */\n\
233 .globl _dl_start_user\n\
234 .type _dl_start_user, @function\n\
235 _dl_start_user:\n\
236 /* Load the PIC register. */\n\
237 1: call 2f\n\
238 sethi %hi(_GLOBAL_OFFSET_TABLE_-(1b-.)), %l7\n\
239 2: or %l7, %lo(_GLOBAL_OFFSET_TABLE_-(1b-.)), %l7\n\
240 add %l7, %o7, %l7\n\
241 /* Save the user entry point address in %l0 */\n\
242 mov %o0, %l0\n\
243 /* See if we were run as a command with the executable file name as an\n\
244 extra leading argument. If so, adjust the contents of the stack. */\n\
245 sethi %hi(_dl_skip_args), %g2\n\
246 or %g2, %lo(_dl_skip_args), %g2\n\
247 ld [%l7+%g2], %i0\n\
248 ld [%i0], %i0\n\
249 tst %i0\n\
250 beq 3f\n\
251 ld [%sp+22*4], %i5 /* load argc */\n\
252 /* Find out how far to shift. */\n\
253 sethi %hi(_dl_argv), %l3\n\
254 or %l3, %lo(_dl_argv), %l3\n\
255 ld [%l7+%l3], %l3\n\
256 sub %i5, %i0, %i5\n\
257 ld [%l3], %l4\n\
258 sll %i0, 2, %i2\n\
259 st %i5, [%sp+22*4]\n\
260 sub %l4, %i2, %l4\n\
261 add %sp, 23*4, %i1\n\
262 add %i1, %i2, %i2\n\
263 st %l4, [%l3]\n\
264 /* Copy down argv */\n\
265 21: ld [%i2], %i3\n\
266 add %i2, 4, %i2\n\
267 tst %i3\n\
268 st %i3, [%i1]\n\
269 bne 21b\n\
270 add %i1, 4, %i1\n\
271 /* Copy down env */\n\
272 22: ld [%i2], %i3\n\
273 add %i2, 4, %i2\n\
274 tst %i3\n\
275 st %i3, [%i1]\n\
276 bne 22b\n\
277 add %i1, 4, %i1\n\
278 /* Copy down auxiliary table. */\n\
279 23: ld [%i2], %i3\n\
280 ld [%i2+4], %i4\n\
281 add %i2, 8, %i2\n\
282 tst %i3\n\
283 st %i3, [%i1]\n\
284 st %i4, [%i1+4]\n\
285 bne 23b\n\
286 add %i1, 8, %i1\n\
287 /* %o0 = _dl_loaded, %o1 = argc, %o2 = argv, %o3 = envp. */\n\
288 3: sethi %hi(_rtld_local), %o0\n\
289 add %sp, 23*4, %o2\n\
290 orcc %o0, %lo(_rtld_local), %o0\n\
291 sll %i5, 2, %o3\n\
292 ld [%l7+%o0], %o0\n\
293 add %o3, 4, %o3\n\
294 mov %i5, %o1\n\
295 add %o2, %o3, %o3\n\
296 call _dl_init_internal\n\
297 ld [%o0], %o0\n\
298 /* Pass our finalizer function to the user in %g1. */\n\
299 sethi %hi(_dl_fini), %g1\n\
300 or %g1, %lo(_dl_fini), %g1\n\
301 ld [%l7+%g1], %g1\n\
302 /* Jump to the user's entry point and deallocate the extra stack we got. */\n\
303 jmp %l0\n\
304 add %sp, 6*4, %sp\n\
305 .size _dl_start_user, . - _dl_start_user\n\
306 .previous");
308 static inline Elf32_Addr
309 sparc_fixup_plt (const Elf32_Rela *reloc, Elf32_Addr *reloc_addr,
310 Elf32_Addr value, int t)
312 Elf32_Sword disp = value - (Elf32_Addr) reloc_addr;
313 #ifndef RTLD_BOOTSTRAP
314 /* Note that we don't mask the hwcap here, as the flush is essential to
315 functionality on those cpu's that implement it. */
316 int do_flush = GLRO(dl_hwcap) & HWCAP_SPARC_FLUSH;
317 #else
318 /* Unfortunately, this is necessary, so that we can ensure
319 ld.so will not execute corrupt PLT entry instructions. */
320 const int do_flush = 1;
321 #endif
323 if (0 && disp >= -0x800000 && disp < 0x800000)
325 /* Don't need to worry about thread safety. We're writing just one
326 instruction. */
328 reloc_addr[0] = OPCODE_BA | ((disp >> 2) & 0x3fffff);
329 if (do_flush)
330 __asm __volatile ("flush %0" : : "r"(reloc_addr));
332 else
334 /* For thread safety, write the instructions from the bottom and
335 flush before we overwrite the critical "b,a". This of course
336 need not be done during bootstrapping, since there are no threads.
337 But we also can't tell if we _can_ use flush, so don't. */
339 reloc_addr += t;
340 reloc_addr[1] = OPCODE_JMP_G1 | (value & 0x3ff);
341 if (do_flush)
342 __asm __volatile ("flush %0+4" : : "r"(reloc_addr));
344 reloc_addr[0] = OPCODE_SETHI_G1 | (value >> 10);
345 if (do_flush)
346 __asm __volatile ("flush %0" : : "r"(reloc_addr));
349 return value;
352 static inline Elf32_Addr
353 elf_machine_fixup_plt (struct link_map *map, lookup_t t,
354 const Elf32_Rela *reloc,
355 Elf32_Addr *reloc_addr, Elf32_Addr value)
357 return sparc_fixup_plt (reloc, reloc_addr, value, 1);
360 /* Return the final value of a plt relocation. */
361 static inline Elf32_Addr
362 elf_machine_plt_value (struct link_map *map, const Elf32_Rela *reloc,
363 Elf32_Addr value)
365 return value + reloc->r_addend;
368 #endif /* dl_machine_h */
370 #define ARCH_LA_PLTENTER sparc32_gnu_pltenter
371 #define ARCH_LA_PLTEXIT sparc32_gnu_pltexit
373 #ifdef RESOLVE_MAP
375 /* Perform the relocation specified by RELOC and SYM (which is fully resolved).
376 MAP is the object containing the reloc. */
378 auto inline void
379 __attribute__ ((always_inline))
380 elf_machine_rela (struct link_map *map, const Elf32_Rela *reloc,
381 const Elf32_Sym *sym, const struct r_found_version *version,
382 void *const reloc_addr_arg)
384 Elf32_Addr *const reloc_addr = reloc_addr_arg;
385 const Elf32_Sym *const refsym = sym;
386 Elf32_Addr value;
387 const unsigned int r_type = ELF32_R_TYPE (reloc->r_info);
388 struct link_map *sym_map = NULL;
390 #if !defined RTLD_BOOTSTRAP && !defined HAVE_Z_COMBRELOC
391 /* This is defined in rtld.c, but nowhere in the static libc.a; make the
392 reference weak so static programs can still link. This declaration
393 cannot be done when compiling rtld.c (i.e. #ifdef RTLD_BOOTSTRAP)
394 because rtld.c contains the common defn for _dl_rtld_map, which is
395 incompatible with a weak decl in the same file. */
396 weak_extern (_dl_rtld_map);
397 #endif
399 if (__builtin_expect (r_type == R_SPARC_NONE, 0))
400 return;
402 #if !defined RTLD_BOOTSTRAP || !defined HAVE_Z_COMBRELOC
403 if (__builtin_expect (r_type == R_SPARC_RELATIVE, 0))
405 # if !defined RTLD_BOOTSTRAP && !defined HAVE_Z_COMBRELOC
406 if (map != &_dl_rtld_map) /* Already done in rtld itself. */
407 # endif
408 *reloc_addr += map->l_addr + reloc->r_addend;
409 return;
411 #endif
413 #ifndef RESOLVE_CONFLICT_FIND_MAP
414 if (__builtin_expect (ELF32_ST_BIND (sym->st_info) == STB_LOCAL, 0)
415 && sym->st_shndx != SHN_UNDEF)
417 value = map->l_addr;
419 else
421 sym_map = RESOLVE_MAP (&sym, version, r_type);
422 value = sym_map == NULL ? 0 : sym_map->l_addr + sym->st_value;
424 #else
425 value = 0;
426 #endif
428 value += reloc->r_addend; /* Assume copy relocs have zero addend. */
430 switch (r_type)
432 #if !defined RTLD_BOOTSTRAP && !defined RESOLVE_CONFLICT_FIND_MAP
433 case R_SPARC_COPY:
434 if (sym == NULL)
435 /* This can happen in trace mode if an object could not be
436 found. */
437 break;
438 if (sym->st_size > refsym->st_size
439 || (GLRO(dl_verbose) && sym->st_size < refsym->st_size))
441 const char *strtab;
443 strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]);
444 _dl_error_printf ("\
445 %s: Symbol `%s' has different size in shared object, consider re-linking\n",
446 rtld_progname ?: "<program name unknown>",
447 strtab + refsym->st_name);
449 memcpy (reloc_addr_arg, (void *) value,
450 MIN (sym->st_size, refsym->st_size));
451 break;
452 #endif
453 case R_SPARC_GLOB_DAT:
454 case R_SPARC_32:
455 *reloc_addr = value;
456 break;
457 case R_SPARC_JMP_SLOT:
458 /* At this point we don't need to bother with thread safety,
459 so we can optimize the first instruction of .plt out. */
460 sparc_fixup_plt (reloc, reloc_addr, value, 0);
461 break;
462 #if defined USE_TLS && (!defined RTLD_BOOTSTRAP || USE___THREAD) \
463 && !defined RESOLVE_CONFLICT_FIND_MAP
464 case R_SPARC_TLS_DTPMOD32:
465 /* Get the information from the link map returned by the
466 resolv function. */
467 if (sym_map != NULL)
468 *reloc_addr = sym_map->l_tls_modid;
469 break;
470 case R_SPARC_TLS_DTPOFF32:
471 /* During relocation all TLS symbols are defined and used.
472 Therefore the offset is already correct. */
473 *reloc_addr = (sym == NULL ? 0 : sym->st_value) + reloc->r_addend;
474 break;
475 case R_SPARC_TLS_TPOFF32:
476 /* The offset is negative, forward from the thread pointer. */
477 /* We know the offset of object the symbol is contained in.
478 It is a negative value which will be added to the
479 thread pointer. */
480 if (sym != NULL)
482 CHECK_STATIC_TLS (map, sym_map);
483 *reloc_addr = sym->st_value - sym_map->l_tls_offset
484 + reloc->r_addend;
486 break;
487 # ifndef RTLD_BOOTSTRAP
488 case R_SPARC_TLS_LE_HIX22:
489 case R_SPARC_TLS_LE_LOX10:
490 if (sym != NULL)
492 CHECK_STATIC_TLS (map, sym_map);
493 value = sym->st_value - sym_map->l_tls_offset
494 + reloc->r_addend;
495 if (r_type == R_SPARC_TLS_LE_HIX22)
496 *reloc_addr = (*reloc_addr & 0xffc00000) | ((~value) >> 10);
497 else
498 *reloc_addr = (*reloc_addr & 0xffffe000) | (value & 0x3ff)
499 | 0x1c00;
501 break;
502 # endif
503 #endif
504 #ifndef RTLD_BOOTSTRAP
505 case R_SPARC_8:
506 *(char *) reloc_addr = value;
507 break;
508 case R_SPARC_16:
509 *(short *) reloc_addr = value;
510 break;
511 case R_SPARC_DISP8:
512 *(char *) reloc_addr = (value - (Elf32_Addr) reloc_addr);
513 break;
514 case R_SPARC_DISP16:
515 *(short *) reloc_addr = (value - (Elf32_Addr) reloc_addr);
516 break;
517 case R_SPARC_DISP32:
518 *reloc_addr = (value - (Elf32_Addr) reloc_addr);
519 break;
520 case R_SPARC_LO10:
521 *reloc_addr = (*reloc_addr & ~0x3ff) | (value & 0x3ff);
522 break;
523 case R_SPARC_WDISP30:
524 *reloc_addr = ((*reloc_addr & 0xc0000000)
525 | ((value - (unsigned int) reloc_addr) >> 2));
526 break;
527 case R_SPARC_HI22:
528 *reloc_addr = (*reloc_addr & 0xffc00000) | (value >> 10);
529 break;
530 case R_SPARC_UA16:
531 ((unsigned char *) reloc_addr_arg) [0] = value >> 8;
532 ((unsigned char *) reloc_addr_arg) [1] = value;
533 break;
534 case R_SPARC_UA32:
535 ((unsigned char *) reloc_addr_arg) [0] = value >> 24;
536 ((unsigned char *) reloc_addr_arg) [1] = value >> 16;
537 ((unsigned char *) reloc_addr_arg) [2] = value >> 8;
538 ((unsigned char *) reloc_addr_arg) [3] = value;
539 break;
540 #endif
541 #if !defined RTLD_BOOTSTRAP || defined _NDEBUG
542 default:
543 _dl_reloc_bad_type (map, r_type, 0);
544 break;
545 #endif
549 auto inline void
550 __attribute__ ((always_inline))
551 elf_machine_rela_relative (Elf32_Addr l_addr, const Elf32_Rela *reloc,
552 void *const reloc_addr_arg)
554 Elf32_Addr *const reloc_addr = reloc_addr_arg;
555 *reloc_addr += l_addr + reloc->r_addend;
558 auto inline void
559 __attribute__ ((always_inline))
560 elf_machine_lazy_rel (struct link_map *map,
561 Elf32_Addr l_addr, const Elf32_Rela *reloc)
563 switch (ELF32_R_TYPE (reloc->r_info))
565 case R_SPARC_NONE:
566 break;
567 case R_SPARC_JMP_SLOT:
568 break;
569 default:
570 _dl_reloc_bad_type (map, ELFW(R_TYPE) (reloc->r_info), 1);
571 break;
575 #endif /* RESOLVE_MAP */