Use libc_hidden_* for fputs (bug 15105).
[glibc.git] / sysdeps / aarch64 / dl-machine.h
blob7ce3c8eb8bd0a3e4944d266ee12ead6c81deeb00
1 /* Copyright (C) 1995-2018 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public License as
7 published by the Free Software Foundation; either version 2.1 of the
8 License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #ifndef dl_machine_h
20 #define dl_machine_h
22 #define ELF_MACHINE_NAME "aarch64"
24 #include <sysdep.h>
25 #include <tls.h>
26 #include <dl-tlsdesc.h>
27 #include <dl-irel.h>
28 #include <cpu-features.c>
30 /* Return nonzero iff ELF header is compatible with the running host. */
31 static inline int __attribute__ ((unused))
32 elf_machine_matches_host (const ElfW(Ehdr) *ehdr)
34 return ehdr->e_machine == EM_AARCH64;
37 /* Return the link-time address of _DYNAMIC. Conveniently, this is the
38 first element of the GOT. */
39 static inline ElfW(Addr) __attribute__ ((unused))
40 elf_machine_dynamic (void)
42 extern const ElfW(Addr) _GLOBAL_OFFSET_TABLE_[] attribute_hidden;
43 return _GLOBAL_OFFSET_TABLE_[0];
46 /* Return the run-time load address of the shared object. */
48 static inline ElfW(Addr) __attribute__ ((unused))
49 elf_machine_load_address (void)
51 /* To figure out the load address we use the definition that for any symbol:
52 dynamic_addr(symbol) = static_addr(symbol) + load_addr
54 _DYNAMIC sysmbol is used here as its link-time address stored in
55 the special unrelocated first GOT entry. */
57 extern ElfW(Dyn) _DYNAMIC[] attribute_hidden;
58 return (ElfW(Addr)) &_DYNAMIC - elf_machine_dynamic ();
61 /* Set up the loaded object described by L so its unrelocated PLT
62 entries will jump to the on-demand fixup code in dl-runtime.c. */
64 static inline int __attribute__ ((unused))
65 elf_machine_runtime_setup (struct link_map *l, int lazy, int profile)
67 if (l->l_info[DT_JMPREL] && lazy)
69 ElfW(Addr) *got;
70 extern void _dl_runtime_resolve (ElfW(Word));
71 extern void _dl_runtime_profile (ElfW(Word));
73 got = (ElfW(Addr) *) D_PTR (l, l_info[DT_PLTGOT]);
74 if (got[1])
76 l->l_mach.plt = got[1] + l->l_addr;
78 got[1] = (ElfW(Addr)) l;
80 /* The got[2] entry contains the address of a function which gets
81 called to get the address of a so far unresolved function and
82 jump to it. The profiling extension of the dynamic linker allows
83 to intercept the calls to collect information. In this case we
84 don't store the address in the GOT so that all future calls also
85 end in this function. */
86 if ( profile)
88 got[2] = (ElfW(Addr)) &_dl_runtime_profile;
90 if (GLRO(dl_profile) != NULL
91 && _dl_name_match_p (GLRO(dl_profile), l))
92 /* Say that we really want profiling and the timers are
93 started. */
94 GL(dl_profile_map) = l;
96 else
98 /* This function will get called to fix up the GOT entry
99 indicated by the offset on the stack, and then jump to
100 the resolved address. */
101 got[2] = (ElfW(Addr)) &_dl_runtime_resolve;
105 return lazy;
108 /* Initial entry point for the dynamic linker. The C function
109 _dl_start is the real entry point, its return value is the user
110 program's entry point */
111 #ifdef __LP64__
112 # define RTLD_START RTLD_START_1 ("x", "3", "sp")
113 #else
114 # define RTLD_START RTLD_START_1 ("w", "2", "wsp")
115 #endif
118 #define RTLD_START_1(PTR, PTR_SIZE_LOG, PTR_SP) asm ("\
119 .text \n\
120 .globl _start \n\
121 .type _start, %function \n\
122 .globl _dl_start_user \n\
123 .type _dl_start_user, %function \n\
124 _start: \n\
125 mov " PTR "0, " PTR_SP " \n\
126 bl _dl_start \n\
127 // returns user entry point in x0 \n\
128 mov x21, x0 \n\
129 _dl_start_user: \n\
130 // get the original arg count \n\
131 ldr " PTR "1, [sp] \n\
132 // get the argv address \n\
133 add " PTR "2, " PTR_SP ", #(1<<" PTR_SIZE_LOG ") \n\
134 // get _dl_skip_args to see if we were \n\
135 // invoked as an executable \n\
136 adrp x4, _dl_skip_args \n\
137 ldr w4, [x4, #:lo12:_dl_skip_args] \n\
138 // do we need to adjust argc/argv \n\
139 cmp w4, 0 \n\
140 beq .L_done_stack_adjust \n\
141 // subtract _dl_skip_args from original arg count \n\
142 sub " PTR "1, " PTR "1, " PTR "4 \n\
143 // store adjusted argc back to stack \n\
144 str " PTR "1, [sp] \n\
145 // find the first unskipped argument \n\
146 mov " PTR "3, " PTR "2 \n\
147 add " PTR "4, " PTR "2, " PTR "4, lsl #" PTR_SIZE_LOG " \n\
148 // shuffle argv down \n\
149 1: ldr " PTR "5, [x4], #(1<<" PTR_SIZE_LOG ") \n\
150 str " PTR "5, [x3], #(1<<" PTR_SIZE_LOG ") \n\
151 cmp " PTR "5, #0 \n\
152 bne 1b \n\
153 // shuffle envp down \n\
154 1: ldr " PTR "5, [x4], #(1<<" PTR_SIZE_LOG ") \n\
155 str " PTR "5, [x3], #(1<<" PTR_SIZE_LOG ") \n\
156 cmp " PTR "5, #0 \n\
157 bne 1b \n\
158 // shuffle auxv down \n\
159 1: ldp " PTR "0, " PTR "5, [x4, #(2<<" PTR_SIZE_LOG ")]! \n\
160 stp " PTR "0, " PTR "5, [x3], #(2<<" PTR_SIZE_LOG ") \n\
161 cmp " PTR "0, #0 \n\
162 bne 1b \n\
163 // Update _dl_argv \n\
164 adrp x3, __GI__dl_argv \n\
165 str " PTR "2, [x3, #:lo12:__GI__dl_argv] \n\
166 .L_done_stack_adjust: \n\
167 // compute envp \n\
168 add " PTR "3, " PTR "2, " PTR "1, lsl #" PTR_SIZE_LOG " \n\
169 add " PTR "3, " PTR "3, #(1<<" PTR_SIZE_LOG ") \n\
170 adrp x16, _rtld_local \n\
171 add " PTR "16, " PTR "16, #:lo12:_rtld_local \n\
172 ldr " PTR "0, [x16] \n\
173 bl _dl_init \n\
174 // load the finalizer function \n\
175 adrp x0, _dl_fini \n\
176 add " PTR "0, " PTR "0, #:lo12:_dl_fini \n\
177 // jump to the user_s entry point \n\
178 br x21 \n\
181 #define elf_machine_type_class(type) \
182 ((((type) == AARCH64_R(JUMP_SLOT) \
183 || (type) == AARCH64_R(TLS_DTPMOD) \
184 || (type) == AARCH64_R(TLS_DTPREL) \
185 || (type) == AARCH64_R(TLS_TPREL) \
186 || (type) == AARCH64_R(TLSDESC)) * ELF_RTYPE_CLASS_PLT) \
187 | (((type) == AARCH64_R(COPY)) * ELF_RTYPE_CLASS_COPY) \
188 | (((type) == AARCH64_R(GLOB_DAT)) * ELF_RTYPE_CLASS_EXTERN_PROTECTED_DATA))
190 #define ELF_MACHINE_JMP_SLOT AARCH64_R(JUMP_SLOT)
192 /* AArch64 uses RELA not REL */
193 #define ELF_MACHINE_NO_REL 1
194 #define ELF_MACHINE_NO_RELA 0
196 #define DL_PLATFORM_INIT dl_platform_init ()
198 static inline void __attribute__ ((unused))
199 dl_platform_init (void)
201 if (GLRO(dl_platform) != NULL && *GLRO(dl_platform) == '\0')
202 /* Avoid an empty string which would disturb us. */
203 GLRO(dl_platform) = NULL;
205 #ifdef SHARED
206 /* init_cpu_features has been called early from __libc_start_main in
207 static executable. */
208 init_cpu_features (&GLRO(dl_aarch64_cpu_features));
209 #endif
213 static inline ElfW(Addr)
214 elf_machine_fixup_plt (struct link_map *map, lookup_t t,
215 const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
216 const ElfW(Rela) *reloc,
217 ElfW(Addr) *reloc_addr,
218 ElfW(Addr) value)
220 return *reloc_addr = value;
223 /* Return the final value of a plt relocation. */
224 static inline ElfW(Addr)
225 elf_machine_plt_value (struct link_map *map,
226 const ElfW(Rela) *reloc,
227 ElfW(Addr) value)
229 return value;
232 #endif
234 /* Names of the architecture-specific auditing callback functions. */
235 #define ARCH_LA_PLTENTER aarch64_gnu_pltenter
236 #define ARCH_LA_PLTEXIT aarch64_gnu_pltexit
238 #ifdef RESOLVE_MAP
240 auto inline void
241 __attribute__ ((always_inline))
242 elf_machine_rela (struct link_map *map, const ElfW(Rela) *reloc,
243 const ElfW(Sym) *sym, const struct r_found_version *version,
244 void *const reloc_addr_arg, int skip_ifunc)
246 ElfW(Addr) *const reloc_addr = reloc_addr_arg;
247 const unsigned int r_type = ELFW (R_TYPE) (reloc->r_info);
249 if (__builtin_expect (r_type == AARCH64_R(RELATIVE), 0))
250 *reloc_addr = map->l_addr + reloc->r_addend;
251 else if (__builtin_expect (r_type == R_AARCH64_NONE, 0))
252 return;
253 else
255 const ElfW(Sym) *const refsym = sym;
256 struct link_map *sym_map = RESOLVE_MAP (&sym, version, r_type);
257 ElfW(Addr) value = sym_map == NULL ? 0 : sym_map->l_addr + sym->st_value;
259 if (sym != NULL
260 && __glibc_unlikely (ELFW(ST_TYPE) (sym->st_info) == STT_GNU_IFUNC)
261 && __glibc_likely (sym->st_shndx != SHN_UNDEF)
262 && __glibc_likely (!skip_ifunc))
263 value = elf_ifunc_invoke (value);
265 switch (r_type)
267 case AARCH64_R(COPY):
268 if (sym == NULL)
269 break;
271 if (sym->st_size > refsym->st_size
272 || (GLRO(dl_verbose) && sym->st_size < refsym->st_size))
274 const char *strtab;
276 strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]);
277 _dl_error_printf ("\
278 %s: Symbol `%s' has different size in shared object, consider re-linking\n",
279 RTLD_PROGNAME, strtab + refsym->st_name);
281 memcpy (reloc_addr_arg, (void *) value,
282 sym->st_size < refsym->st_size
283 ? sym->st_size : refsym->st_size);
284 break;
286 case AARCH64_R(RELATIVE):
287 case AARCH64_R(GLOB_DAT):
288 case AARCH64_R(JUMP_SLOT):
289 case AARCH64_R(ABS32):
290 #ifdef __LP64__
291 case AARCH64_R(ABS64):
292 #endif
293 *reloc_addr = value + reloc->r_addend;
294 break;
296 case AARCH64_R(TLSDESC):
298 struct tlsdesc volatile *td =
299 (struct tlsdesc volatile *)reloc_addr;
300 #ifndef RTLD_BOOTSTRAP
301 if (! sym)
303 td->arg = (void*)reloc->r_addend;
304 td->entry = _dl_tlsdesc_undefweak;
306 else
307 #endif
309 #ifndef RTLD_BOOTSTRAP
310 # ifndef SHARED
311 CHECK_STATIC_TLS (map, sym_map);
312 # else
313 if (!TRY_STATIC_TLS (map, sym_map))
315 td->arg = _dl_make_tlsdesc_dynamic
316 (sym_map, sym->st_value + reloc->r_addend);
317 td->entry = _dl_tlsdesc_dynamic;
319 else
320 # endif
321 #endif
323 td->arg = (void*)(sym->st_value + sym_map->l_tls_offset
324 + reloc->r_addend);
325 td->entry = _dl_tlsdesc_return;
328 break;
331 case AARCH64_R(TLS_DTPMOD):
332 #ifdef RTLD_BOOTSTRAP
333 *reloc_addr = 1;
334 #else
335 if (sym_map != NULL)
337 *reloc_addr = sym_map->l_tls_modid;
339 #endif
340 break;
342 case AARCH64_R(TLS_DTPREL):
343 if (sym)
344 *reloc_addr = sym->st_value + reloc->r_addend;
345 break;
347 case AARCH64_R(TLS_TPREL):
348 if (sym)
350 CHECK_STATIC_TLS (map, sym_map);
351 *reloc_addr =
352 sym->st_value + reloc->r_addend + sym_map->l_tls_offset;
354 break;
356 case AARCH64_R(IRELATIVE):
357 value = map->l_addr + reloc->r_addend;
358 value = elf_ifunc_invoke (value);
359 *reloc_addr = value;
360 break;
362 default:
363 _dl_reloc_bad_type (map, r_type, 0);
364 break;
369 inline void
370 __attribute__ ((always_inline))
371 elf_machine_rela_relative (ElfW(Addr) l_addr,
372 const ElfW(Rela) *reloc,
373 void *const reloc_addr_arg)
375 ElfW(Addr) *const reloc_addr = reloc_addr_arg;
376 *reloc_addr = l_addr + reloc->r_addend;
379 inline void
380 __attribute__ ((always_inline))
381 elf_machine_lazy_rel (struct link_map *map,
382 ElfW(Addr) l_addr,
383 const ElfW(Rela) *reloc,
384 int skip_ifunc)
386 ElfW(Addr) *const reloc_addr = (void *) (l_addr + reloc->r_offset);
387 const unsigned int r_type = ELFW (R_TYPE) (reloc->r_info);
388 /* Check for unexpected PLT reloc type. */
389 if (__builtin_expect (r_type == AARCH64_R(JUMP_SLOT), 1))
391 if (__builtin_expect (map->l_mach.plt, 0) == 0)
392 *reloc_addr += l_addr;
393 else
394 *reloc_addr = map->l_mach.plt;
396 else if (__builtin_expect (r_type == AARCH64_R(TLSDESC), 1))
398 const Elf_Symndx symndx = ELFW (R_SYM) (reloc->r_info);
399 const ElfW (Sym) *symtab = (const void *)D_PTR (map, l_info[DT_SYMTAB]);
400 const ElfW (Sym) *sym = &symtab[symndx];
401 const struct r_found_version *version = NULL;
403 if (map->l_info[VERSYMIDX (DT_VERSYM)] != NULL)
405 const ElfW (Half) *vernum =
406 (const void *)D_PTR (map, l_info[VERSYMIDX (DT_VERSYM)]);
407 version = &map->l_versions[vernum[symndx] & 0x7fff];
410 /* Always initialize TLS descriptors completely, because lazy
411 initialization requires synchronization at every TLS access. */
412 elf_machine_rela (map, reloc, sym, version, reloc_addr, skip_ifunc);
414 else if (__glibc_unlikely (r_type == AARCH64_R(IRELATIVE)))
416 ElfW(Addr) value = map->l_addr + reloc->r_addend;
417 if (__glibc_likely (!skip_ifunc))
418 value = elf_ifunc_invoke (value);
419 *reloc_addr = value;
421 else
422 _dl_reloc_bad_type (map, r_type, 1);
425 #endif