Update copyright dates with scripts/update-copyrights.
[glibc.git] / sysdeps / aarch64 / dl-machine.h
blob350d9879b29e34b1d2cf5669d25596de0d53eca6
1 /* Copyright (C) 1995-2015 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public License as
7 published by the Free Software Foundation; either version 2.1 of the
8 License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #ifndef dl_machine_h
20 #define dl_machine_h
22 #define ELF_MACHINE_NAME "aarch64"
24 #include <tls.h>
25 #include <dl-tlsdesc.h>
26 #include <dl-irel.h>
28 /* Return nonzero iff ELF header is compatible with the running host. */
29 static inline int __attribute__ ((unused))
30 elf_machine_matches_host (const ElfW(Ehdr) *ehdr)
32 return ehdr->e_machine == EM_AARCH64;
35 /* Return the link-time address of _DYNAMIC. Conveniently, this is the
36 first element of the GOT. */
37 static inline ElfW(Addr) __attribute__ ((unused))
38 elf_machine_dynamic (void)
40 extern const ElfW(Addr) _GLOBAL_OFFSET_TABLE_[] attribute_hidden;
41 return _GLOBAL_OFFSET_TABLE_[0];
44 /* Return the run-time load address of the shared object. */
46 static inline ElfW(Addr) __attribute__ ((unused))
47 elf_machine_load_address (void)
49 /* To figure out the load address we use the definition that for any symbol:
50 dynamic_addr(symbol) = static_addr(symbol) + load_addr
52 The choice of symbol is arbitrary. The static address we obtain
53 by constructing a non GOT reference to the symbol, the dynamic
54 address of the symbol we compute using adrp/add to compute the
55 symbol's address relative to the PC.
56 This depends on 32bit relocations being resolved at link time
57 and that the static address fits in the 32bits. */
59 ElfW(Addr) static_addr;
60 ElfW(Addr) dynamic_addr;
62 asm (" \n"
63 " adrp %1, _dl_start; \n"
64 " add %1, %1, #:lo12:_dl_start \n"
65 " ldr %w0, 1f \n"
66 " b 2f \n"
67 "1: \n"
68 " .word _dl_start \n"
69 "2: \n"
70 : "=r" (static_addr), "=r" (dynamic_addr));
71 return dynamic_addr - static_addr;
74 /* Set up the loaded object described by L so its unrelocated PLT
75 entries will jump to the on-demand fixup code in dl-runtime.c. */
77 static inline int __attribute__ ((unused))
78 elf_machine_runtime_setup (struct link_map *l, int lazy, int profile)
80 if (l->l_info[DT_JMPREL] && lazy)
82 ElfW(Addr) *got;
83 extern void _dl_runtime_resolve (ElfW(Word));
84 extern void _dl_runtime_profile (ElfW(Word));
86 got = (ElfW(Addr) *) D_PTR (l, l_info[DT_PLTGOT]);
87 if (got[1])
89 l->l_mach.plt = got[1] + l->l_addr;
91 got[1] = (ElfW(Addr)) l;
93 /* The got[2] entry contains the address of a function which gets
94 called to get the address of a so far unresolved function and
95 jump to it. The profiling extension of the dynamic linker allows
96 to intercept the calls to collect information. In this case we
97 don't store the address in the GOT so that all future calls also
98 end in this function. */
99 if ( profile)
101 got[2] = (ElfW(Addr)) &_dl_runtime_profile;
103 if (GLRO(dl_profile) != NULL
104 && _dl_name_match_p (GLRO(dl_profile), l))
105 /* Say that we really want profiling and the timers are
106 started. */
107 GL(dl_profile_map) = l;
109 else
111 /* This function will get called to fix up the GOT entry
112 indicated by the offset on the stack, and then jump to
113 the resolved address. */
114 got[2] = (ElfW(Addr)) &_dl_runtime_resolve;
118 if (l->l_info[ADDRIDX (DT_TLSDESC_GOT)] && lazy)
119 *(ElfW(Addr)*)(D_PTR (l, l_info[ADDRIDX (DT_TLSDESC_GOT)]) + l->l_addr)
120 = (ElfW(Addr)) &_dl_tlsdesc_resolve_rela;
122 return lazy;
125 /* Initial entry point for the dynamic linker. The C function
126 _dl_start is the real entry point, its return value is the user
127 program's entry point */
129 #define RTLD_START asm ("\
130 .text \n\
131 .globl _start \n\
132 .type _start, %function \n\
133 .globl _dl_start_user \n\
134 .type _dl_start_user, %function \n\
135 _start: \n\
136 mov x0, sp \n\
137 bl _dl_start \n\
138 // returns user entry point in x0 \n\
139 mov x21, x0 \n\
140 _dl_start_user: \n\
141 // get the original arg count \n\
142 ldr x1, [sp] \n\
143 // get the argv address \n\
144 add x2, sp, #8 \n\
145 // get _dl_skip_args to see if we were \n\
146 // invoked as an executable \n\
147 adrp x4, _dl_skip_args \n\
148 ldr w4, [x4, #:lo12:_dl_skip_args] \n\
149 // do we need to adjust argc/argv \n\
150 cmp w4, 0 \n\
151 beq .L_done_stack_adjust \n\
152 // subtract _dl_skip_args from original arg count \n\
153 sub x1, x1, x4 \n\
154 // store adjusted argc back to stack \n\
155 str x1, [sp] \n\
156 // find the first unskipped argument \n\
157 mov x3, x2 \n\
158 add x4, x2, x4, lsl #3 \n\
159 // shuffle argv down \n\
160 1: ldr x5, [x4], #8 \n\
161 str x5, [x3], #8 \n\
162 cmp x5, #0 \n\
163 bne 1b \n\
164 // shuffle envp down \n\
165 1: ldr x5, [x4], #8 \n\
166 str x5, [x3], #8 \n\
167 cmp x5, #0 \n\
168 bne 1b \n\
169 // shuffle auxv down \n\
170 1: ldp x0, x5, [x4, #16]! \n\
171 stp x0, x5, [x3], #16 \n\
172 cmp x0, #0 \n\
173 bne 1b \n\
174 // Update _dl_argv \n\
175 adrp x3, _dl_argv \n\
176 str x2, [x3, #:lo12:_dl_argv] \n\
177 .L_done_stack_adjust: \n\
178 // compute envp \n\
179 add x3, x2, x1, lsl #3 \n\
180 add x3, x3, #8 \n\
181 adrp x16, _rtld_local \n\
182 add x16, x16, #:lo12:_rtld_local \n\
183 ldr x0, [x16] \n\
184 bl _dl_init \n\
185 // load the finalizer function \n\
186 adrp x0, _dl_fini \n\
187 add x0, x0, #:lo12:_dl_fini \n\
188 // jump to the user_s entry point \n\
189 br x21 \n\
192 #define elf_machine_type_class(type) \
193 ((((type) == R_AARCH64_JUMP_SLOT || \
194 (type) == R_AARCH64_TLS_DTPMOD || \
195 (type) == R_AARCH64_TLS_DTPREL || \
196 (type) == R_AARCH64_TLS_TPREL || \
197 (type) == R_AARCH64_TLSDESC) * ELF_RTYPE_CLASS_PLT) \
198 | (((type) == R_AARCH64_COPY) * ELF_RTYPE_CLASS_COPY))
200 #define ELF_MACHINE_JMP_SLOT R_AARCH64_JUMP_SLOT
202 /* AArch64 uses RELA not REL */
203 #define ELF_MACHINE_NO_REL 1
204 #define ELF_MACHINE_NO_RELA 0
206 static inline ElfW(Addr)
207 elf_machine_fixup_plt (struct link_map *map, lookup_t t,
208 const ElfW(Rela) *reloc,
209 ElfW(Addr) *reloc_addr,
210 ElfW(Addr) value)
212 return *reloc_addr = value;
215 /* Return the final value of a plt relocation. */
216 static inline ElfW(Addr)
217 elf_machine_plt_value (struct link_map *map,
218 const ElfW(Rela) *reloc,
219 ElfW(Addr) value)
221 return value;
224 #endif
226 /* Names of the architecture-specific auditing callback functions. */
227 #define ARCH_LA_PLTENTER aarch64_gnu_pltenter
228 #define ARCH_LA_PLTEXIT aarch64_gnu_pltexit
230 #ifdef RESOLVE_MAP
232 auto inline void
233 __attribute__ ((always_inline))
234 elf_machine_rela (struct link_map *map, const ElfW(Rela) *reloc,
235 const ElfW(Sym) *sym, const struct r_found_version *version,
236 void *const reloc_addr_arg, int skip_ifunc)
238 ElfW(Addr) *const reloc_addr = reloc_addr_arg;
239 const unsigned int r_type = ELF64_R_TYPE (reloc->r_info);
241 if (__builtin_expect (r_type == R_AARCH64_RELATIVE, 0))
242 *reloc_addr = map->l_addr + reloc->r_addend;
243 else if (__builtin_expect (r_type == R_AARCH64_NONE, 0))
244 return;
245 else
247 const ElfW(Sym) *const refsym = sym;
248 struct link_map *sym_map = RESOLVE_MAP (&sym, version, r_type);
249 ElfW(Addr) value = sym_map == NULL ? 0 : sym_map->l_addr + sym->st_value;
251 if (sym != NULL
252 && __glibc_unlikely (ELFW(ST_TYPE) (sym->st_info) == STT_GNU_IFUNC)
253 && __glibc_likely (sym->st_shndx != SHN_UNDEF)
254 && __glibc_likely (!skip_ifunc))
255 value = elf_ifunc_invoke (value);
257 switch (r_type)
259 case R_AARCH64_COPY:
260 if (sym == NULL)
261 break;
263 if (sym->st_size > refsym->st_size
264 || (GLRO(dl_verbose) && sym->st_size < refsym->st_size))
266 const char *strtab;
268 strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]);
269 _dl_error_printf ("\
270 %s: Symbol `%s' has different size in shared object, consider re-linking\n",
271 RTLD_PROGNAME, strtab + refsym->st_name);
273 memcpy (reloc_addr_arg, (void *) value,
274 MIN (sym->st_size, refsym->st_size));
275 break;
277 case R_AARCH64_RELATIVE:
278 case R_AARCH64_GLOB_DAT:
279 case R_AARCH64_JUMP_SLOT:
280 case R_AARCH64_ABS32:
281 case R_AARCH64_ABS64:
282 *reloc_addr = value + reloc->r_addend;
283 break;
285 case R_AARCH64_TLSDESC:
287 struct tlsdesc volatile *td =
288 (struct tlsdesc volatile *)reloc_addr;
289 #ifndef RTLD_BOOTSTRAP
290 if (! sym)
292 td->arg = (void*)reloc->r_addend;
293 td->entry = _dl_tlsdesc_undefweak;
295 else
296 #endif
298 #ifndef RTLD_BOOTSTRAP
299 # ifndef SHARED
300 CHECK_STATIC_TLS (map, sym_map);
301 # else
302 if (!TRY_STATIC_TLS (map, sym_map))
304 td->arg = _dl_make_tlsdesc_dynamic
305 (sym_map, sym->st_value + reloc->r_addend);
306 td->entry = _dl_tlsdesc_dynamic;
308 else
309 # endif
310 #endif
312 td->arg = (void*)(sym->st_value + sym_map->l_tls_offset
313 + reloc->r_addend);
314 td->entry = _dl_tlsdesc_return;
317 break;
320 case R_AARCH64_TLS_DTPMOD:
321 #ifdef RTLD_BOOTSTRAP
322 *reloc_addr = 1;
323 #else
324 if (sym_map != NULL)
326 *reloc_addr = sym_map->l_tls_modid;
328 #endif
329 break;
331 case R_AARCH64_TLS_DTPREL:
332 if (sym)
333 *reloc_addr = sym->st_value + reloc->r_addend;
334 break;
336 case R_AARCH64_TLS_TPREL:
337 if (sym)
339 CHECK_STATIC_TLS (map, sym_map);
340 *reloc_addr =
341 sym->st_value + reloc->r_addend + sym_map->l_tls_offset;
343 break;
345 case R_AARCH64_IRELATIVE:
346 value = map->l_addr + reloc->r_addend;
347 value = elf_ifunc_invoke (value);
348 *reloc_addr = value;
349 break;
351 default:
352 _dl_reloc_bad_type (map, r_type, 0);
353 break;
358 inline void
359 __attribute__ ((always_inline))
360 elf_machine_rela_relative (ElfW(Addr) l_addr,
361 const ElfW(Rela) *reloc,
362 void *const reloc_addr_arg)
364 ElfW(Addr) *const reloc_addr = reloc_addr_arg;
365 *reloc_addr = l_addr + reloc->r_addend;
368 inline void
369 __attribute__ ((always_inline))
370 elf_machine_lazy_rel (struct link_map *map,
371 ElfW(Addr) l_addr,
372 const ElfW(Rela) *reloc,
373 int skip_ifunc)
375 ElfW(Addr) *const reloc_addr = (void *) (l_addr + reloc->r_offset);
376 const unsigned int r_type = ELF64_R_TYPE (reloc->r_info);
377 /* Check for unexpected PLT reloc type. */
378 if (__builtin_expect (r_type == R_AARCH64_JUMP_SLOT, 1))
380 if (__builtin_expect (map->l_mach.plt, 0) == 0)
381 *reloc_addr += l_addr;
382 else
383 *reloc_addr = map->l_mach.plt;
385 else if (__builtin_expect (r_type == R_AARCH64_TLSDESC, 1))
387 struct tlsdesc volatile *td =
388 (struct tlsdesc volatile *)reloc_addr;
390 td->arg = (void*)reloc;
391 td->entry = (void*)(D_PTR (map, l_info[ADDRIDX (DT_TLSDESC_PLT)])
392 + map->l_addr);
394 else if (__glibc_unlikely (r_type == R_AARCH64_IRELATIVE))
396 ElfW(Addr) value = map->l_addr + reloc->r_addend;
397 if (__glibc_likely (!skip_ifunc))
398 value = elf_ifunc_invoke (value);
399 *reloc_addr = value;
401 else
402 _dl_reloc_bad_type (map, r_type, 1);
405 #endif