Remove dead code from x86-32 SSSE3 strncmp.
[glibc.git] / sysdeps / sparc / sparc32 / dl-machine.h
blob9631db32e171e72ddc87338480914824944deafc
1 /* Machine-dependent ELF dynamic relocation inline functions. SPARC version.
2 Copyright (C) 1996-2003, 2004, 2005, 2006, 2007, 2010
3 Free Software Foundation, Inc.
4 This file is part of the GNU C Library.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; if not, write to the Free
18 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19 02111-1307 USA. */
21 #ifndef dl_machine_h
22 #define dl_machine_h
24 #define ELF_MACHINE_NAME "sparc"
26 #include <string.h>
27 #include <sys/param.h>
28 #include <ldsodefs.h>
29 #include <tls.h>
30 #include <dl-plt.h>
32 #ifndef VALIDX
33 # define VALIDX(tag) (DT_NUM + DT_THISPROCNUM + DT_VERSIONTAGNUM \
34 + DT_EXTRANUM + DT_VALTAGIDX (tag))
35 #endif
37 /* Return nonzero iff ELF header is compatible with the running host. */
38 static inline int
39 elf_machine_matches_host (const Elf32_Ehdr *ehdr)
41 if (ehdr->e_machine == EM_SPARC)
42 return 1;
43 else if (ehdr->e_machine == EM_SPARC32PLUS)
45 /* XXX The following is wrong! Dave Miller rejected to implement it
46 correctly. If this causes problems shoot *him*! */
47 #ifdef SHARED
48 return GLRO(dl_hwcap) & GLRO(dl_hwcap_mask) & HWCAP_SPARC_V9;
49 #else
50 return GLRO(dl_hwcap) & HWCAP_SPARC_V9;
51 #endif
53 else
54 return 0;
57 /* We have to do this because elf_machine_{dynamic,load_address} can be
58 invoked from functions that have no GOT references, and thus the compiler
59 has no obligation to load the PIC register. */
60 #define LOAD_PIC_REG(PIC_REG) \
61 do { register Elf32_Addr pc __asm("o7"); \
62 __asm("sethi %%hi(_GLOBAL_OFFSET_TABLE_-4), %1\n\t" \
63 "call 1f\n\t" \
64 "add %1, %%lo(_GLOBAL_OFFSET_TABLE_+4), %1\n" \
65 "1:\tadd %1, %0, %1" \
66 : "=r" (pc), "=r" (PIC_REG)); \
67 } while (0)
69 /* Return the link-time address of _DYNAMIC. Conveniently, this is the
70 first element of the GOT. This must be inlined in a function which
71 uses global data. */
72 static inline Elf32_Addr
73 elf_machine_dynamic (void)
75 register Elf32_Addr *got asm ("%l7");
77 LOAD_PIC_REG (got);
79 return *got;
82 /* Return the run-time load address of the shared object. */
83 static inline Elf32_Addr
84 elf_machine_load_address (void)
86 register Elf32_Addr *pc __asm ("%o7"), *got __asm ("%l7");
88 __asm ("sethi %%hi(_GLOBAL_OFFSET_TABLE_-4), %1\n\t"
89 "call 1f\n\t"
90 " add %1, %%lo(_GLOBAL_OFFSET_TABLE_+4), %1\n\t"
91 "call _DYNAMIC\n\t"
92 "call _GLOBAL_OFFSET_TABLE_\n"
93 "1:\tadd %1, %0, %1\n\t" : "=r" (pc), "=r" (got));
95 /* got is now l_addr + _GLOBAL_OFFSET_TABLE_
96 *got is _DYNAMIC
97 pc[2]*4 is l_addr + _DYNAMIC - (long)pc - 8
98 pc[3]*4 is l_addr + _GLOBAL_OFFSET_TABLE_ - (long)pc - 12 */
99 return (Elf32_Addr) got - *got + (pc[2] - pc[3]) * 4 - 4;
102 /* Set up the loaded object described by L so its unrelocated PLT
103 entries will jump to the on-demand fixup code in dl-runtime.c. */
105 static inline int
106 elf_machine_runtime_setup (struct link_map *l, int lazy, int profile)
108 Elf32_Addr *plt;
109 extern void _dl_runtime_resolve (Elf32_Word);
110 extern void _dl_runtime_profile (Elf32_Word);
112 if (l->l_info[DT_JMPREL] && lazy)
114 Elf32_Addr rfunc;
116 /* The entries for functions in the PLT have not yet been filled in.
117 Their initial contents will arrange when called to set the high 22
118 bits of %g1 with an offset into the .rela.plt section and jump to
119 the beginning of the PLT. */
120 plt = (Elf32_Addr *) D_PTR (l, l_info[DT_PLTGOT]);
121 if (__builtin_expect(profile, 0))
123 rfunc = (Elf32_Addr) &_dl_runtime_profile;
125 if (GLRO(dl_profile) != NULL
126 && _dl_name_match_p (GLRO(dl_profile), l))
127 GL(dl_profile_map) = l;
129 else
131 rfunc = (Elf32_Addr) &_dl_runtime_resolve;
134 /* The beginning of the PLT does:
136 sethi %hi(_dl_runtime_{resolve,profile}), %g2
137 pltpc: jmpl %g2 + %lo(_dl_runtime_{resolve,profile}), %g2
139 .word MAP
141 The PC value (pltpc) saved in %g2 by the jmpl points near the
142 location where we store the link_map pointer for this object. */
144 plt[0] = 0x05000000 | ((rfunc >> 10) & 0x003fffff);
145 plt[1] = 0x85c0a000 | (rfunc & 0x3ff);
146 plt[2] = OPCODE_NOP; /* Fill call delay slot. */
147 plt[3] = (Elf32_Addr) l;
148 if (__builtin_expect (l->l_info[VALIDX(DT_GNU_PRELINKED)] != NULL, 0)
149 || __builtin_expect (l->l_info [VALIDX (DT_GNU_LIBLISTSZ)] != NULL, 0))
151 /* Need to reinitialize .plt to undo prelinking. */
152 Elf32_Rela *rela = (Elf32_Rela *) D_PTR (l, l_info[DT_JMPREL]);
153 Elf32_Rela *relaend
154 = (Elf32_Rela *) ((char *) rela
155 + l->l_info[DT_PLTRELSZ]->d_un.d_val);
156 #if !defined RTLD_BOOTSTRAP && !defined __sparc_v9__
157 /* Note that we don't mask the hwcap here, as the flush is
158 essential to functionality on those cpu's that implement it.
159 For sparcv9 we can assume flush is present. */
160 const int do_flush = GLRO(dl_hwcap) & HWCAP_SPARC_FLUSH;
161 #else
162 const int do_flush = 1;
163 #endif
165 /* prelink must ensure there are no R_SPARC_NONE relocs left
166 in .rela.plt. */
167 while (rela < relaend)
169 *(unsigned int *) (rela->r_offset + l->l_addr)
170 = OPCODE_SETHI_G1 | (rela->r_offset + l->l_addr
171 - (Elf32_Addr) plt);
172 *(unsigned int *) (rela->r_offset + l->l_addr + 4)
173 = OPCODE_BA | ((((Elf32_Addr) plt
174 - rela->r_offset - l->l_addr - 4) >> 2)
175 & 0x3fffff);
176 if (do_flush)
178 __asm __volatile ("flush %0" : : "r" (rela->r_offset
179 + l->l_addr));
180 __asm __volatile ("flush %0+4" : : "r" (rela->r_offset
181 + l->l_addr));
183 ++rela;
188 return lazy;
191 /* ELF_RTYPE_CLASS_PLT iff TYPE describes relocation of a PLT entry, so
192 PLT entries should not be allowed to define the value.
193 ELF_RTYPE_CLASS_NOCOPY iff TYPE should not be allowed to resolve to one
194 of the main executable's symbols, as for a COPY reloc. */
195 #if !defined RTLD_BOOTSTRAP || USE___THREAD
196 # define elf_machine_type_class(type) \
197 ((((type) == R_SPARC_JMP_SLOT \
198 || ((type) >= R_SPARC_TLS_GD_HI22 && (type) <= R_SPARC_TLS_TPOFF64)) \
199 * ELF_RTYPE_CLASS_PLT) \
200 | (((type) == R_SPARC_COPY) * ELF_RTYPE_CLASS_COPY))
201 #else
202 # define elf_machine_type_class(type) \
203 ((((type) == R_SPARC_JMP_SLOT) * ELF_RTYPE_CLASS_PLT) \
204 | (((type) == R_SPARC_COPY) * ELF_RTYPE_CLASS_COPY))
205 #endif
207 /* A reloc type used for ld.so cmdline arg lookups to reject PLT entries. */
208 #define ELF_MACHINE_JMP_SLOT R_SPARC_JMP_SLOT
210 /* The SPARC never uses Elf32_Rel relocations. */
211 #define ELF_MACHINE_NO_REL 1
213 /* The SPARC overlaps DT_RELA and DT_PLTREL. */
214 #define ELF_MACHINE_PLTREL_OVERLAP 1
216 /* Undo the sub %sp, 6*4, %sp; add %sp, 22*4, %o0 below to get at the
217 value we want in __libc_stack_end. */
218 #define DL_STACK_END(cookie) \
219 ((void *) (((long) (cookie)) - (22 - 6) * 4))
221 /* Initial entry point code for the dynamic linker.
222 The C function `_dl_start' is the real entry point;
223 its return value is the user program's entry point. */
225 #define RTLD_START __asm__ ("\
226 .text\n\
227 .globl _start\n\
228 .type _start, @function\n\
229 .align 32\n\
230 _start:\n\
231 /* Allocate space for functions to drop their arguments. */\n\
232 sub %sp, 6*4, %sp\n\
233 /* Pass pointer to argument block to _dl_start. */\n\
234 call _dl_start\n\
235 add %sp, 22*4, %o0\n\
236 /* FALTHRU */\n\
237 .globl _dl_start_user\n\
238 .type _dl_start_user, @function\n\
239 _dl_start_user:\n\
240 /* Load the PIC register. */\n\
241 1: call 2f\n\
242 sethi %hi(_GLOBAL_OFFSET_TABLE_-(1b-.)), %l7\n\
243 2: or %l7, %lo(_GLOBAL_OFFSET_TABLE_-(1b-.)), %l7\n\
244 add %l7, %o7, %l7\n\
245 /* Save the user entry point address in %l0 */\n\
246 mov %o0, %l0\n\
247 /* See if we were run as a command with the executable file name as an\n\
248 extra leading argument. If so, adjust the contents of the stack. */\n\
249 sethi %hi(_dl_skip_args), %g2\n\
250 or %g2, %lo(_dl_skip_args), %g2\n\
251 ld [%l7+%g2], %i0\n\
252 ld [%i0], %i0\n\
253 tst %i0\n\
254 beq 3f\n\
255 ld [%sp+22*4], %i5 /* load argc */\n\
256 /* Find out how far to shift. */\n\
257 sethi %hi(_dl_argv), %l3\n\
258 or %l3, %lo(_dl_argv), %l3\n\
259 ld [%l7+%l3], %l3\n\
260 sub %i5, %i0, %i5\n\
261 ld [%l3], %l4\n\
262 sll %i0, 2, %i2\n\
263 st %i5, [%sp+22*4]\n\
264 sub %l4, %i2, %l4\n\
265 add %sp, 23*4, %i1\n\
266 add %i1, %i2, %i2\n\
267 st %l4, [%l3]\n\
268 /* Copy down argv */\n\
269 21: ld [%i2], %i3\n\
270 add %i2, 4, %i2\n\
271 tst %i3\n\
272 st %i3, [%i1]\n\
273 bne 21b\n\
274 add %i1, 4, %i1\n\
275 /* Copy down env */\n\
276 22: ld [%i2], %i3\n\
277 add %i2, 4, %i2\n\
278 tst %i3\n\
279 st %i3, [%i1]\n\
280 bne 22b\n\
281 add %i1, 4, %i1\n\
282 /* Copy down auxiliary table. */\n\
283 23: ld [%i2], %i3\n\
284 ld [%i2+4], %i4\n\
285 add %i2, 8, %i2\n\
286 tst %i3\n\
287 st %i3, [%i1]\n\
288 st %i4, [%i1+4]\n\
289 bne 23b\n\
290 add %i1, 8, %i1\n\
291 /* %o0 = _dl_loaded, %o1 = argc, %o2 = argv, %o3 = envp. */\n\
292 3: sethi %hi(_rtld_local), %o0\n\
293 add %sp, 23*4, %o2\n\
294 orcc %o0, %lo(_rtld_local), %o0\n\
295 sll %i5, 2, %o3\n\
296 ld [%l7+%o0], %o0\n\
297 add %o3, 4, %o3\n\
298 mov %i5, %o1\n\
299 add %o2, %o3, %o3\n\
300 call _dl_init_internal\n\
301 ld [%o0], %o0\n\
302 /* Pass our finalizer function to the user in %g1. */\n\
303 sethi %hi(_dl_fini), %g1\n\
304 or %g1, %lo(_dl_fini), %g1\n\
305 ld [%l7+%g1], %g1\n\
306 /* Jump to the user's entry point and deallocate the extra stack we got. */\n\
307 jmp %l0\n\
308 add %sp, 6*4, %sp\n\
309 .size _dl_start_user, . - _dl_start_user\n\
310 .previous");
312 static inline Elf32_Addr
313 elf_machine_fixup_plt (struct link_map *map, lookup_t t,
314 const Elf32_Rela *reloc,
315 Elf32_Addr *reloc_addr, Elf32_Addr value)
317 #ifdef __sparc_v9__
318 /* Sparc v9 can assume flush is always present. */
319 const int do_flush = 1;
320 #else
321 /* Note that we don't mask the hwcap here, as the flush is essential to
322 functionality on those cpu's that implement it. */
323 const int do_flush = GLRO(dl_hwcap) & HWCAP_SPARC_FLUSH;
324 #endif
325 return sparc_fixup_plt (reloc, reloc_addr, value, 1, do_flush);
328 /* Return the final value of a plt relocation. */
329 static inline Elf32_Addr
330 elf_machine_plt_value (struct link_map *map, const Elf32_Rela *reloc,
331 Elf32_Addr value)
333 return value + reloc->r_addend;
336 #endif /* dl_machine_h */
338 #define ARCH_LA_PLTENTER sparc32_gnu_pltenter
339 #define ARCH_LA_PLTEXIT sparc32_gnu_pltexit
341 #ifdef RESOLVE_MAP
343 /* Perform the relocation specified by RELOC and SYM (which is fully resolved).
344 MAP is the object containing the reloc. */
346 auto inline void
347 __attribute__ ((always_inline))
348 elf_machine_rela (struct link_map *map, const Elf32_Rela *reloc,
349 const Elf32_Sym *sym, const struct r_found_version *version,
350 void *const reloc_addr_arg)
352 Elf32_Addr *const reloc_addr = reloc_addr_arg;
353 const Elf32_Sym *const refsym = sym;
354 Elf32_Addr value;
355 const unsigned int r_type = ELF32_R_TYPE (reloc->r_info);
356 struct link_map *sym_map = NULL;
358 #if !defined RTLD_BOOTSTRAP && !defined HAVE_Z_COMBRELOC
359 /* This is defined in rtld.c, but nowhere in the static libc.a; make the
360 reference weak so static programs can still link. This declaration
361 cannot be done when compiling rtld.c (i.e. #ifdef RTLD_BOOTSTRAP)
362 because rtld.c contains the common defn for _dl_rtld_map, which is
363 incompatible with a weak decl in the same file. */
364 weak_extern (_dl_rtld_map);
365 #endif
367 if (__builtin_expect (r_type == R_SPARC_NONE, 0))
368 return;
370 #if !defined RTLD_BOOTSTRAP || !defined HAVE_Z_COMBRELOC
371 if (__builtin_expect (r_type == R_SPARC_RELATIVE, 0))
373 # if !defined RTLD_BOOTSTRAP && !defined HAVE_Z_COMBRELOC
374 if (map != &_dl_rtld_map) /* Already done in rtld itself. */
375 # endif
376 *reloc_addr += map->l_addr + reloc->r_addend;
377 return;
379 #endif
381 #ifndef RESOLVE_CONFLICT_FIND_MAP
382 if (__builtin_expect (ELF32_ST_BIND (sym->st_info) == STB_LOCAL, 0)
383 && sym->st_shndx != SHN_UNDEF)
385 value = map->l_addr;
387 else
389 sym_map = RESOLVE_MAP (&sym, version, r_type);
390 value = sym_map == NULL ? 0 : sym_map->l_addr + sym->st_value;
392 #else
393 value = 0;
394 #endif
396 value += reloc->r_addend; /* Assume copy relocs have zero addend. */
398 if (sym != NULL
399 && __builtin_expect (ELFW(ST_TYPE) (sym->st_info) == STT_GNU_IFUNC, 0)
400 && __builtin_expect (sym->st_shndx != SHN_UNDEF, 1))
402 value = ((Elf32_Addr (*) (void)) value) ();
405 switch (r_type)
407 #if !defined RTLD_BOOTSTRAP && !defined RESOLVE_CONFLICT_FIND_MAP
408 case R_SPARC_COPY:
409 if (sym == NULL)
410 /* This can happen in trace mode if an object could not be
411 found. */
412 break;
413 if (sym->st_size > refsym->st_size
414 || (GLRO(dl_verbose) && sym->st_size < refsym->st_size))
416 const char *strtab;
418 strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]);
419 _dl_error_printf ("\
420 %s: Symbol `%s' has different size in shared object, consider re-linking\n",
421 rtld_progname ?: "<program name unknown>",
422 strtab + refsym->st_name);
424 memcpy (reloc_addr_arg, (void *) value,
425 MIN (sym->st_size, refsym->st_size));
426 break;
427 #endif
428 case R_SPARC_GLOB_DAT:
429 case R_SPARC_32:
430 *reloc_addr = value;
431 break;
432 case R_SPARC_IRELATIVE:
433 value = ((Elf32_Addr (*) (void)) value) ();
434 *reloc_addr = value;
435 break;
436 case R_SPARC_JMP_IREL:
437 value = ((Elf32_Addr (*) (void)) value) ();
438 /* Fall thru */
439 case R_SPARC_JMP_SLOT:
441 #if !defined RTLD_BOOTSTRAP && !defined __sparc_v9__
442 /* Note that we don't mask the hwcap here, as the flush is
443 essential to functionality on those cpu's that implement
444 it. For sparcv9 we can assume flush is present. */
445 const int do_flush = GLRO(dl_hwcap) & HWCAP_SPARC_FLUSH;
446 #else
447 /* Unfortunately, this is necessary, so that we can ensure
448 ld.so will not execute corrupt PLT entry instructions. */
449 const int do_flush = 1;
450 #endif
451 /* At this point we don't need to bother with thread safety,
452 so we can optimize the first instruction of .plt out. */
453 sparc_fixup_plt (reloc, reloc_addr, value, 0, do_flush);
455 break;
456 #if (!defined RTLD_BOOTSTRAP || USE___THREAD) \
457 && !defined RESOLVE_CONFLICT_FIND_MAP
458 case R_SPARC_TLS_DTPMOD32:
459 /* Get the information from the link map returned by the
460 resolv function. */
461 if (sym_map != NULL)
462 *reloc_addr = sym_map->l_tls_modid;
463 break;
464 case R_SPARC_TLS_DTPOFF32:
465 /* During relocation all TLS symbols are defined and used.
466 Therefore the offset is already correct. */
467 *reloc_addr = (sym == NULL ? 0 : sym->st_value) + reloc->r_addend;
468 break;
469 case R_SPARC_TLS_TPOFF32:
470 /* The offset is negative, forward from the thread pointer. */
471 /* We know the offset of object the symbol is contained in.
472 It is a negative value which will be added to the
473 thread pointer. */
474 if (sym != NULL)
476 CHECK_STATIC_TLS (map, sym_map);
477 *reloc_addr = sym->st_value - sym_map->l_tls_offset
478 + reloc->r_addend;
480 break;
481 # ifndef RTLD_BOOTSTRAP
482 case R_SPARC_TLS_LE_HIX22:
483 case R_SPARC_TLS_LE_LOX10:
484 if (sym != NULL)
486 CHECK_STATIC_TLS (map, sym_map);
487 value = sym->st_value - sym_map->l_tls_offset
488 + reloc->r_addend;
489 if (r_type == R_SPARC_TLS_LE_HIX22)
490 *reloc_addr = (*reloc_addr & 0xffc00000) | ((~value) >> 10);
491 else
492 *reloc_addr = (*reloc_addr & 0xffffe000) | (value & 0x3ff)
493 | 0x1c00;
495 break;
496 # endif
497 #endif
498 #ifndef RTLD_BOOTSTRAP
499 case R_SPARC_8:
500 *(char *) reloc_addr = value;
501 break;
502 case R_SPARC_16:
503 *(short *) reloc_addr = value;
504 break;
505 case R_SPARC_DISP8:
506 *(char *) reloc_addr = (value - (Elf32_Addr) reloc_addr);
507 break;
508 case R_SPARC_DISP16:
509 *(short *) reloc_addr = (value - (Elf32_Addr) reloc_addr);
510 break;
511 case R_SPARC_DISP32:
512 *reloc_addr = (value - (Elf32_Addr) reloc_addr);
513 break;
514 case R_SPARC_LO10:
515 *reloc_addr = (*reloc_addr & ~0x3ff) | (value & 0x3ff);
516 break;
517 case R_SPARC_WDISP30:
518 *reloc_addr = ((*reloc_addr & 0xc0000000)
519 | ((value - (unsigned int) reloc_addr) >> 2));
520 break;
521 case R_SPARC_HI22:
522 *reloc_addr = (*reloc_addr & 0xffc00000) | (value >> 10);
523 break;
524 case R_SPARC_UA16:
525 ((unsigned char *) reloc_addr_arg) [0] = value >> 8;
526 ((unsigned char *) reloc_addr_arg) [1] = value;
527 break;
528 case R_SPARC_UA32:
529 ((unsigned char *) reloc_addr_arg) [0] = value >> 24;
530 ((unsigned char *) reloc_addr_arg) [1] = value >> 16;
531 ((unsigned char *) reloc_addr_arg) [2] = value >> 8;
532 ((unsigned char *) reloc_addr_arg) [3] = value;
533 break;
534 #endif
535 #if !defined RTLD_BOOTSTRAP || defined _NDEBUG
536 default:
537 _dl_reloc_bad_type (map, r_type, 0);
538 break;
539 #endif
543 auto inline void
544 __attribute__ ((always_inline))
545 elf_machine_rela_relative (Elf32_Addr l_addr, const Elf32_Rela *reloc,
546 void *const reloc_addr_arg)
548 Elf32_Addr *const reloc_addr = reloc_addr_arg;
549 *reloc_addr += l_addr + reloc->r_addend;
552 auto inline void
553 __attribute__ ((always_inline))
554 elf_machine_lazy_rel (struct link_map *map,
555 Elf32_Addr l_addr, const Elf32_Rela *reloc)
557 Elf32_Addr *const reloc_addr = (void *) (l_addr + reloc->r_offset);
558 const unsigned int r_type = ELF32_R_TYPE (reloc->r_info);
560 if (__builtin_expect (r_type == R_SPARC_JMP_SLOT, 1))
562 else if (r_type == R_SPARC_JMP_IREL)
564 Elf32_Addr value = map->l_addr + reloc->r_addend;
565 value = ((Elf32_Addr (*) (void)) value) ();
566 sparc_fixup_plt (reloc, reloc_addr, value, 1, 1);
568 else if (r_type == R_SPARC_NONE)
570 else
571 _dl_reloc_bad_type (map, r_type, 1);
574 #endif /* RESOLVE_MAP */