Don't use INTDEF/INTUSE with _dl_init (bug 14132).
[glibc.git] / sysdeps / aarch64 / dl-machine.h
blob674643781bb4dddbae86d4b2d887dd3e283ce4f7
1 /* Copyright (C) 1995-2014 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public License as
7 published by the Free Software Foundation; either version 2.1 of the
8 License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #ifndef dl_machine_h
20 #define dl_machine_h
22 #define ELF_MACHINE_NAME "aarch64"
24 #include <tls.h>
25 #include <dl-tlsdesc.h>
26 #include <dl-irel.h>
28 /* Return nonzero iff ELF header is compatible with the running host. */
29 static inline int __attribute__ ((unused))
30 elf_machine_matches_host (const ElfW(Ehdr) *ehdr)
32 return ehdr->e_machine == EM_AARCH64;
35 /* Return the link-time address of _DYNAMIC. Conveniently, this is the
36 first element of the GOT. */
37 static inline ElfW(Addr) __attribute__ ((unused))
38 elf_machine_dynamic (void)
40 extern const ElfW(Addr) _GLOBAL_OFFSET_TABLE_[] attribute_hidden;
41 return _GLOBAL_OFFSET_TABLE_[0];
44 /* Return the run-time load address of the shared object. */
46 static inline ElfW(Addr) __attribute__ ((unused))
47 elf_machine_load_address (void)
49 /* To figure out the load address we use the definition that for any symbol:
50 dynamic_addr(symbol) = static_addr(symbol) + load_addr
52 The choice of symbol is arbitrary. The static address we obtain
53 by constructing a non GOT reference to the symbol, the dynamic
54 address of the symbol we compute using adrp/add to compute the
55 symbol's address relative to the PC. */
57 ElfW(Addr) static_addr;
58 ElfW(Addr) dynamic_addr;
60 asm (" \n\
61 adrp %1, _dl_start; \n\
62 add %1, %1, #:lo12:_dl_start \n\
63 ldr %w0, 1f \n\
64 b 2f \n\
65 1: .word _dl_start \n\
66 2: \n\
67 " : "=r" (static_addr), "=r" (dynamic_addr));
68 return dynamic_addr - static_addr;
71 /* Set up the loaded object described by L so its unrelocated PLT
72 entries will jump to the on-demand fixup code in dl-runtime.c. */
74 static inline int __attribute__ ((unused))
75 elf_machine_runtime_setup (struct link_map *l, int lazy, int profile)
77 if (l->l_info[DT_JMPREL] && lazy)
79 ElfW(Addr) *got;
80 extern void _dl_runtime_resolve (ElfW(Word));
81 extern void _dl_runtime_profile (ElfW(Word));
83 got = (ElfW(Addr) *) D_PTR (l, l_info[DT_PLTGOT]);
84 if (got[1])
86 l->l_mach.plt = got[1] + l->l_addr;
88 got[1] = (ElfW(Addr)) l;
90 /* The got[2] entry contains the address of a function which gets
91 called to get the address of a so far unresolved function and
92 jump to it. The profiling extension of the dynamic linker allows
93 to intercept the calls to collect information. In this case we
94 don't store the address in the GOT so that all future calls also
95 end in this function. */
96 if ( profile)
98 got[2] = (ElfW(Addr)) &_dl_runtime_profile;
100 if (GLRO(dl_profile) != NULL
101 && _dl_name_match_p (GLRO(dl_profile), l))
102 /* Say that we really want profiling and the timers are
103 started. */
104 GL(dl_profile_map) = l;
106 else
108 /* This function will get called to fix up the GOT entry
109 indicated by the offset on the stack, and then jump to
110 the resolved address. */
111 got[2] = (ElfW(Addr)) &_dl_runtime_resolve;
115 if (l->l_info[ADDRIDX (DT_TLSDESC_GOT)] && lazy)
116 *(Elf64_Addr*)(D_PTR (l, l_info[ADDRIDX (DT_TLSDESC_GOT)]) + l->l_addr)
117 = (Elf64_Addr) &_dl_tlsdesc_resolve_rela;
119 return lazy;
122 /* Initial entry point for the dynamic linker. The C function
123 _dl_start is the real entry point, its return value is the user
124 program's entry point */
126 #define RTLD_START asm ("\
127 .text \n\
128 .globl _start \n\
129 .type _start, %function \n\
130 .globl _dl_start_user \n\
131 .type _dl_start_user, %function \n\
132 _start: \n\
133 mov x0, sp \n\
134 bl _dl_start \n\
135 // returns user entry point in x0 \n\
136 mov x21, x0 \n\
137 _dl_start_user: \n\
138 // get the original arg count \n\
139 ldr x1, [sp] \n\
140 // get the argv address \n\
141 add x2, sp, #8 \n\
142 // get _dl_skip_args to see if we were \n\
143 // invoked as an executable \n\
144 adrp x4, _dl_skip_args \n\
145 ldr w4, [x4, #:lo12:_dl_skip_args] \n\
146 // do we need to adjust argc/argv \n\
147 cmp w4, 0 \n\
148 beq .L_done_stack_adjust \n\
149 // subtract _dl_skip_args from original arg count \n\
150 sub x1, x1, x4 \n\
151 // store adjusted argc back to stack \n\
152 str x1, [sp] \n\
153 // find the first unskipped argument \n\
154 mov x3, x2 \n\
155 add x4, x2, x4, lsl #3 \n\
156 // shuffle argv down \n\
157 1: ldr x5, [x4], #8 \n\
158 str x5, [x3], #8 \n\
159 cmp x5, #0 \n\
160 bne 1b \n\
161 // shuffle envp down \n\
162 1: ldr x5, [x4], #8 \n\
163 str x5, [x3], #8 \n\
164 cmp x5, #0 \n\
165 bne 1b \n\
166 // shuffle auxv down \n\
167 1: ldp x0, x5, [x4, #16]! \n\
168 stp x0, x5, [x3], #16 \n\
169 cmp x0, #0 \n\
170 bne 1b \n\
171 // Update _dl_argv \n\
172 adrp x3, _dl_argv \n\
173 str x2, [x3, #:lo12:_dl_argv] \n\
174 .L_done_stack_adjust: \n\
175 // compute envp \n\
176 add x3, x2, x1, lsl #3 \n\
177 add x3, x3, #8 \n\
178 adrp x16, _rtld_local \n\
179 add x16, x16, #:lo12:_rtld_local \n\
180 ldr x0, [x16] \n\
181 bl _dl_init \n\
182 // load the finalizer function \n\
183 adrp x0, _dl_fini \n\
184 add x0, x0, #:lo12:_dl_fini \n\
185 // jump to the user_s entry point \n\
186 br x21 \n\
189 #define elf_machine_type_class(type) \
190 ((((type) == R_AARCH64_JUMP_SLOT || \
191 (type) == R_AARCH64_TLS_DTPMOD64 || \
192 (type) == R_AARCH64_TLS_DTPREL64 || \
193 (type) == R_AARCH64_TLS_TPREL64 || \
194 (type) == R_AARCH64_TLSDESC) * ELF_RTYPE_CLASS_PLT) \
195 | (((type) == R_AARCH64_COPY) * ELF_RTYPE_CLASS_COPY))
197 #define ELF_MACHINE_JMP_SLOT R_AARCH64_JUMP_SLOT
199 /* AArch64 uses RELA not REL */
200 #define ELF_MACHINE_NO_REL 1
201 #define ELF_MACHINE_NO_RELA 0
203 static inline ElfW(Addr)
204 elf_machine_fixup_plt (struct link_map *map, lookup_t t,
205 const ElfW(Rela) *reloc,
206 ElfW(Addr) *reloc_addr,
207 ElfW(Addr) value)
209 return *reloc_addr = value;
212 /* Return the final value of a plt relocation. */
213 static inline ElfW(Addr)
214 elf_machine_plt_value (struct link_map *map,
215 const ElfW(Rela) *reloc,
216 ElfW(Addr) value)
218 return value;
221 #endif
223 /* Names of the architecture-specific auditing callback functions. */
224 #define ARCH_LA_PLTENTER aarch64_gnu_pltenter
225 #define ARCH_LA_PLTEXIT aarch64_gnu_pltexit
227 #ifdef RESOLVE_MAP
229 auto inline void
230 __attribute__ ((always_inline))
231 elf_machine_rela (struct link_map *map, const ElfW(Rela) *reloc,
232 const ElfW(Sym) *sym, const struct r_found_version *version,
233 void *const reloc_addr_arg, int skip_ifunc)
235 ElfW(Addr) *const reloc_addr = reloc_addr_arg;
236 const unsigned int r_type = ELF64_R_TYPE (reloc->r_info);
238 if (__builtin_expect (r_type == R_AARCH64_RELATIVE, 0))
239 *reloc_addr = map->l_addr + reloc->r_addend;
240 else if (__builtin_expect (r_type == R_AARCH64_NONE, 0))
241 return;
242 else
244 const ElfW(Sym) *const refsym = sym;
245 struct link_map *sym_map = RESOLVE_MAP (&sym, version, r_type);
246 ElfW(Addr) value = sym_map == NULL ? 0 : sym_map->l_addr + sym->st_value;
248 if (sym != NULL
249 && __glibc_unlikely (ELFW(ST_TYPE) (sym->st_info) == STT_GNU_IFUNC)
250 && __glibc_likely (sym->st_shndx != SHN_UNDEF)
251 && __glibc_likely (!skip_ifunc))
252 value = elf_ifunc_invoke (value);
254 switch (r_type)
256 case R_AARCH64_COPY:
257 if (sym == NULL)
258 break;
260 if (sym->st_size > refsym->st_size
261 || (GLRO(dl_verbose) && sym->st_size < refsym->st_size))
263 const char *strtab;
265 strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]);
266 _dl_error_printf ("\
267 %s: Symbol `%s' has different size in shared object, consider re-linking\n",
268 RTLD_PROGNAME, strtab + refsym->st_name);
270 memcpy (reloc_addr_arg, (void *) value,
271 MIN (sym->st_size, refsym->st_size));
272 break;
274 case R_AARCH64_RELATIVE:
275 case R_AARCH64_GLOB_DAT:
276 case R_AARCH64_JUMP_SLOT:
277 case R_AARCH64_ABS32:
278 case R_AARCH64_ABS64:
279 *reloc_addr = value + reloc->r_addend;
280 break;
282 case R_AARCH64_TLSDESC:
284 struct tlsdesc volatile *td =
285 (struct tlsdesc volatile *)reloc_addr;
286 #ifndef RTLD_BOOTSTRAP
287 if (! sym)
289 td->arg = (void*)reloc->r_addend;
290 td->entry = _dl_tlsdesc_undefweak;
292 else
293 #endif
295 #ifndef RTLD_BOOTSTRAP
296 # ifndef SHARED
297 CHECK_STATIC_TLS (map, sym_map);
298 # else
299 if (!TRY_STATIC_TLS (map, sym_map))
301 td->arg = _dl_make_tlsdesc_dynamic
302 (sym_map, sym->st_value + reloc->r_addend);
303 td->entry = _dl_tlsdesc_dynamic;
305 else
306 # endif
307 #endif
309 td->arg = (void*)(sym->st_value + sym_map->l_tls_offset
310 + reloc->r_addend);
311 td->entry = _dl_tlsdesc_return;
314 break;
317 case R_AARCH64_TLS_DTPMOD64:
318 #ifdef RTLD_BOOTSTRAP
319 *reloc_addr = 1;
320 #else
321 if (sym_map != NULL)
323 *reloc_addr = sym_map->l_tls_modid;
325 #endif
326 break;
328 case R_AARCH64_TLS_DTPREL64:
329 if (sym)
330 *reloc_addr = sym->st_value + reloc->r_addend;
331 break;
333 case R_AARCH64_TLS_TPREL64:
334 if (sym)
336 CHECK_STATIC_TLS (map, sym_map);
337 *reloc_addr =
338 sym->st_value + reloc->r_addend + sym_map->l_tls_offset;
340 break;
342 case R_AARCH64_IRELATIVE:
343 value = map->l_addr + reloc->r_addend;
344 value = elf_ifunc_invoke (value);
345 *reloc_addr = value;
346 break;
348 default:
349 _dl_reloc_bad_type (map, r_type, 0);
350 break;
355 inline void
356 __attribute__ ((always_inline))
357 elf_machine_rela_relative (ElfW(Addr) l_addr,
358 const ElfW(Rela) *reloc,
359 void *const reloc_addr_arg)
361 ElfW(Addr) *const reloc_addr = reloc_addr_arg;
362 *reloc_addr = l_addr + reloc->r_addend;
365 inline void
366 __attribute__ ((always_inline))
367 elf_machine_lazy_rel (struct link_map *map,
368 ElfW(Addr) l_addr,
369 const ElfW(Rela) *reloc,
370 int skip_ifunc)
372 ElfW(Addr) *const reloc_addr = (void *) (l_addr + reloc->r_offset);
373 const unsigned int r_type = ELF64_R_TYPE (reloc->r_info);
374 /* Check for unexpected PLT reloc type. */
375 if (__builtin_expect (r_type == R_AARCH64_JUMP_SLOT, 1))
377 if (__builtin_expect (map->l_mach.plt, 0) == 0)
378 *reloc_addr += l_addr;
379 else
380 *reloc_addr = map->l_mach.plt;
382 else if (__builtin_expect (r_type == R_AARCH64_TLSDESC, 1))
384 struct tlsdesc volatile *td =
385 (struct tlsdesc volatile *)reloc_addr;
387 td->arg = (void*)reloc;
388 td->entry = (void*)(D_PTR (map, l_info[ADDRIDX (DT_TLSDESC_PLT)])
389 + map->l_addr);
391 else if (__glibc_unlikely (r_type == R_AARCH64_IRELATIVE))
393 ElfW(Addr) value = map->l_addr + reloc->r_addend;
394 if (__glibc_likely (!skip_ifunc))
395 value = elf_ifunc_invoke (value);
396 *reloc_addr = value;
398 else
399 _dl_reloc_bad_type (map, r_type, 1);
402 #endif