Update.
[glibc.git] / sysdeps / x86_64 / dl-machine.h
blob504c75e01be44743f71c2fd1ef3c61d997c3b6b1
1 /* Machine-dependent ELF dynamic relocation inline functions. x86-64 version.
2 Copyright (C) 2001, 2002 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Andreas Jaeger <aj@suse.de>.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; if not, write to the Free
18 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19 02111-1307 USA. */
21 #ifndef dl_machine_h
22 #define dl_machine_h
24 #define ELF_MACHINE_NAME "x86_64"
26 #include <sys/param.h>
28 /* Return nonzero iff ELF header is compatible with the running host. */
29 static inline int __attribute__ ((unused))
30 elf_machine_matches_host (const Elf64_Ehdr *ehdr)
32 return ehdr->e_machine == EM_X86_64;
36 /* Return the link-time address of _DYNAMIC. Conveniently, this is the
37 first element of the GOT. This must be inlined in a function which
38 uses global data. */
39 static inline Elf64_Addr __attribute__ ((unused))
40 elf_machine_dynamic (void)
42 register Elf64_Addr addr;
44 asm ("leaq _DYNAMIC, %0\n" : "=r" (addr));
45 return addr;
49 /* Return the run-time load address of the shared object. */
50 static inline Elf64_Addr __attribute__ ((unused))
51 elf_machine_load_address (void)
53 register Elf64_Addr addr, tmp;
55 asm ("leaq _dl_start, %0\n"
56 "leaq _dl_start(%%rip), %1\n"
57 "subq %0, %1\n"
58 : "=r" (tmp), "=r" (addr) : : "cc");
59 return addr;
62 /* Set up the loaded object described by L so its unrelocated PLT
63 entries will jump to the on-demand fixup code in dl-runtime.c. */
65 static inline int __attribute__ ((unused))
66 elf_machine_runtime_setup (struct link_map *l, int lazy, int profile)
68 Elf64_Addr *got;
69 extern void _dl_runtime_resolve (Elf64_Word);
70 extern void _dl_runtime_profile (Elf64_Word);
72 if (l->l_info[DT_JMPREL] && lazy)
74 /* The GOT entries for functions in the PLT have not yet been filled
75 in. Their initial contents will arrange when called to push an
76 offset into the .rel.plt section, push _GLOBAL_OFFSET_TABLE_[1],
77 and then jump to _GLOBAL_OFFSET_TABLE[2]. */
78 got = (Elf64_Addr *) D_PTR (l, l_info[DT_PLTGOT]);
79 /* If a library is prelinked but we have to relocate anyway,
80 we have to be able to undo the prelinking of .got.plt.
81 The prelinker saved us here address of .plt + 0x16. */
82 if (got[1])
84 l->l_mach.plt = got[1] + l->l_addr;
85 l->l_mach.gotplt = (Elf64_Addr) &got[3];
87 got[1] = (Elf64_Addr) l; /* Identify this shared object. */
89 /* The got[2] entry contains the address of a function which gets
90 called to get the address of a so far unresolved function and
91 jump to it. The profiling extension of the dynamic linker allows
92 to intercept the calls to collect information. In this case we
93 don't store the address in the GOT so that all future calls also
94 end in this function. */
95 if (__builtin_expect (profile, 0))
97 got[2] = (Elf64_Addr) &_dl_runtime_profile;
99 if (_dl_name_match_p (GL(dl_profile), l))
100 /* This is the object we are looking for. Say that we really
101 want profiling and the timers are started. */
102 GL(dl_profile_map) = l;
104 else
105 /* This function will get called to fix up the GOT entry indicated by
106 the offset on the stack, and then jump to the resolved address. */
107 got[2] = (Elf64_Addr) &_dl_runtime_resolve;
110 return lazy;
113 /* This code is used in dl-runtime.c to call the `fixup' function
114 and then redirect to the address it returns. */
115 #ifndef PROF
116 # define ELF_MACHINE_RUNTIME_TRAMPOLINE asm ("\n\
117 .text\n\
118 .globl _dl_runtime_resolve\n\
119 .type _dl_runtime_resolve, @function\n\
120 .align 16\n\
121 _dl_runtime_resolve:\n\
122 pushq %rax # Preserve registers otherwise clobbered.\n\
123 pushq %rcx\n\
124 pushq %rdx\n\
125 pushq %rsi\n\
126 pushq %rdi\n\
127 pushq %r8\n\
128 pushq %r9\n\
129 movq 64(%rsp), %rsi # Copy args pushed by PLT in register.\n\
130 movq %rsi,%r11 # Multiply by 24\n\
131 addq %r11,%rsi\n\
132 addq %r11,%rsi\n\
133 shlq $3, %rsi\n\
134 movq 56(%rsp), %rdi # %rdi: link_map, %rsi: reloc_offset\n\
135 call fixup # Call resolver.\n\
136 movq %rax, %r11 # Save return value\n\
137 popq %r9 # Get register content back.\n\
138 popq %r8\n\
139 popq %rdi\n\
140 popq %rsi\n\
141 popq %rdx\n\
142 popq %rcx\n\
143 popq %rax\n\
144 addq $16,%rsp # Adjust stack\n\
145 jmp *%r11 # Jump to function address.\n\
146 .size _dl_runtime_resolve, .-_dl_runtime_resolve\n\
148 .globl _dl_runtime_profile\n\
149 .type _dl_runtime_profile, @function\n\
150 .align 16\n\
151 _dl_runtime_profile:\n\
152 pushq %rax # Preserve registers otherwise clobbered.\n\
153 pushq %rcx\n\
154 pushq %rdx\n\
155 pushq %rsi\n\
156 pushq %rdi\n\
157 pushq %r8\n\
158 pushq %r9\n\
159 movq 72(%rsp), %rdx # Load return address if needed\n\
160 movq 64(%rsp), %rsi # Copy args pushed by PLT in register.\n\
161 movq %rsi,%r11 # Multiply by 24\n\
162 addq %r11,%rsi\n\
163 addq %r11,%rsi\n\
164 shlq $3, %rsi\n\
165 movq 56(%rsp), %rdi # %rdi: link_map, %rsi: reloc_offset\n\
166 call profile_fixup # Call resolver.\n\
167 movq %rax, %r11 # Save return value\n\
168 popq %r9 # Get register content back.\n\
169 popq %r8\n\
170 popq %rdi\n\
171 popq %rsi\n\
172 popq %rdx\n\
173 popq %rcx\n\
174 popq %rax\n\
175 addq $16,%rsp # Adjust stack\n\
176 jmp *%r11 # Jump to function address.\n\
177 .size _dl_runtime_profile, .-_dl_runtime_profile\n\
178 .previous\n\
180 #else
181 # define ELF_MACHINE_RUNTIME_TRAMPOLINE asm ("\n\
182 .text\n\
183 .globl _dl_runtime_resolve\n\
184 .globl _dl_runtime_profile\n\
185 .type _dl_runtime_resolve, @function\n\
186 .type _dl_runtime_profile, @function\n\
187 .align 16\n\
188 _dl_runtime_resolve:\n\
189 _dl_runtime_profile:\n\
190 pushq %rax # Preserve registers otherwise clobbered.\n\
191 pushq %rcx\n\
192 pushq %rdx\n\
193 pushq %rsi\n\
194 pushq %rdi\n\
195 pushq %r8\n\
196 pushq %r9\n\
197 movq 64(%rsp), %rsi # Copy args pushed by PLT in register.\n\
198 movq %rsi,%r11 # Multiply by 24\n\
199 addq %r11,%rsi\n\
200 addq %r11,%rsi\n\
201 shlq $3, %rsi\n\
202 movq 56(%rsp), %rdi # %rdi: link_map, %rsi: reloc_offset\n\
203 call fixup # Call resolver.\n\
204 movq %rax, %r11 # Save return value\n\
205 popq %r9 # Get register content back.\n\
206 popq %r8\n\
207 popq %rdi\n\
208 popq %rsi\n\
209 popq %rdx\n\
210 popq %rcx\n\
211 popq %rax\n\
212 addq $16,%rsp # Adjust stack\n\
213 jmp *%r11 # Jump to function address.\n\
214 .size _dl_runtime_resolve, .-_dl_runtime_resolve\n\
215 .size _dl_runtime_profile, .-_dl_runtime_profile\n\
216 .previous\n\
218 #endif
220 /* Initial entry point code for the dynamic linker.
221 The C function `_dl_start' is the real entry point;
222 its return value is the user program's entry point. */
223 #define RTLD_START asm ("\n\
224 .text\n\
225 .align 16\n\
226 .globl _start\n\
227 .globl _dl_start_user\n\
228 _start:\n\
229 movq %rsp, %rdi\n\
230 call _dl_start\n\
231 _dl_start_user:\n\
232 # Save the user entry point address in %r12.\n\
233 movq %rax, %r12\n\
234 # Store the highest stack address\n\
235 movq __libc_stack_end@GOTPCREL(%rip), %rax\n\
236 movq %rsp, (%rax)\n\
237 # See if we were run as a command with the executable file\n\
238 # name as an extra leading argument.\n\
239 movq _dl_skip_args@GOTPCREL(%rip), %rax\n\
240 movl (%rax), %eax\n\
241 # Pop the original argument count.\n\
242 popq %rdx\n\
243 # Adjust the stack pointer to skip _dl_skip_args words.\n\
244 leaq (%rsp,%rax,8), %rsp\n\
245 # Subtract _dl_skip_args from argc.\n\
246 subl %eax, %edx\n\
247 # Push argc back on the stack.\n\
248 pushq %rdx\n\
249 # Call _dl_init (struct link_map *main_map, int argc, char **argv, char **env)\n\
250 # argc -> rsi\n\
251 movq %rdx, %rsi\n\
252 # _dl_loaded -> rdi\n\
253 movq _rtld_global@GOTPCREL(%rip), %rdi\n\
254 movq (%rdi), %rdi\n\
255 # env -> rcx\n\
256 leaq 16(%rsp,%rdx,8), %rcx\n\
257 # argv -> rdx\n\
258 leaq 8(%rsp), %rdx\n\
259 # Call the function to run the initializers.\n\
260 call _dl_init@PLT\n\
261 # Pass our finalizer function to the user in %rdx, as per ELF ABI.\n\
262 movq _dl_fini@GOTPCREL(%rip), %rdx\n\
263 # Jump to the user's entry point.\n\
264 jmp *%r12\n\
265 .previous\n\
268 /* ELF_RTYPE_CLASS_PLT iff TYPE describes relocation of a PLT entry, so
269 PLT entries should not be allowed to define the value.
270 ELF_RTYPE_CLASS_NOCOPY iff TYPE should not be allowed to resolve to one
271 of the main executable's symbols, as for a COPY reloc. */
272 #define elf_machine_type_class(type) \
273 ((((type) == R_X86_64_JUMP_SLOT) * ELF_RTYPE_CLASS_PLT) \
274 | (((type) == R_X86_64_COPY) * ELF_RTYPE_CLASS_COPY))
276 /* A reloc type used for ld.so cmdline arg lookups to reject PLT entries. */
277 #define ELF_MACHINE_JMP_SLOT R_X86_64_JUMP_SLOT
279 /* The x86-64 never uses Elf64_Rel relocations. */
280 #define ELF_MACHINE_NO_REL 1
282 /* We define an initialization functions. This is called very early in
283 _dl_sysdep_start. */
284 #define DL_PLATFORM_INIT dl_platform_init ()
286 static inline void __attribute__ ((unused))
287 dl_platform_init (void)
289 if (GL(dl_platform) != NULL && *GL(dl_platform) == '\0')
290 /* Avoid an empty string which would disturb us. */
291 GL(dl_platform) = NULL;
294 static inline Elf64_Addr
295 elf_machine_fixup_plt (struct link_map *map, lookup_t t,
296 const Elf64_Rela *reloc,
297 Elf64_Addr *reloc_addr, Elf64_Addr value)
299 return *reloc_addr = value;
302 /* Return the final value of a plt relocation. On x86-64 the
303 JUMP_SLOT relocation ignores the addend. */
304 static inline Elf64_Addr
305 elf_machine_plt_value (struct link_map *map, const Elf64_Rela *reloc,
306 Elf64_Addr value)
308 return value;
311 #endif /* !dl_machine_h */
313 #ifdef RESOLVE
315 /* Perform the relocation specified by RELOC and SYM (which is fully resolved).
316 MAP is the object containing the reloc. */
318 static inline void
319 elf_machine_rela (struct link_map *map, const Elf64_Rela *reloc,
320 const Elf64_Sym *sym, const struct r_found_version *version,
321 Elf64_Addr *const reloc_addr)
323 const unsigned long int r_type = ELF64_R_TYPE (reloc->r_info);
325 #if !defined RTLD_BOOTSTRAP || !defined HAVE_Z_COMBRELOC
326 if (__builtin_expect (r_type == R_X86_64_RELATIVE, 0))
328 # if !defined RTLD_BOOTSTRAP && !defined HAVE_Z_COMBRELOC
329 /* This is defined in rtld.c, but nowhere in the static libc.a;
330 make the reference weak so static programs can still link.
331 This declaration cannot be done when compiling rtld.c
332 (i.e. #ifdef RTLD_BOOTSTRAP) because rtld.c contains the
333 common defn for _dl_rtld_map, which is incompatible with a
334 weak decl in the same file. */
335 # ifndef SHARED
336 weak_extern (GL(dl_rtld_map));
337 # endif
338 if (map != &GL(dl_rtld_map)) /* Already done in rtld itself. */
339 # endif
340 *reloc_addr = map->l_addr + reloc->r_addend;
342 else
343 #endif
344 if (__builtin_expect (r_type == R_X86_64_NONE, 0))
345 return;
346 else
348 #ifndef RTLD_BOOTSTRAP
349 const Elf64_Sym *const refsym = sym;
350 #endif
351 Elf64_Addr value = RESOLVE (&sym, version, r_type);
352 if (sym)
353 value += sym->st_value;
355 #ifdef RTLD_BOOTSTRAP
356 assert (r_type == R_X86_64_GLOB_DAT || r_type == R_X86_64_JUMP_SLOT);
357 *reloc_addr = value + reloc->r_addend;
358 #else
359 switch (r_type)
361 case R_X86_64_GLOB_DAT:
362 case R_X86_64_JUMP_SLOT:
363 *reloc_addr = value + reloc->r_addend;
364 break;
365 case R_X86_64_64:
366 *reloc_addr = value + reloc->r_addend;
367 break;
368 case R_X86_64_32:
369 *(unsigned int *) reloc_addr = value + reloc->r_addend;
370 break;
371 case R_X86_64_PC32:
372 *(unsigned int *) reloc_addr = value + reloc->r_addend
373 - (Elf64_Addr) reloc_addr;
374 break;
375 case R_X86_64_COPY:
376 if (sym == NULL)
377 /* This can happen in trace mode if an object could not be
378 found. */
379 break;
380 if (__builtin_expect (sym->st_size > refsym->st_size, 0)
381 || (__builtin_expect (sym->st_size < refsym->st_size, 0)
382 && GL(dl_verbose)))
384 const char *strtab;
386 strtab = (const char *) D_PTR (map, l_info[DT_STRTAB]);
387 _dl_error_printf ("\
388 %s: Symbol `%s' has different size in shared object, consider re-linking\n",
389 _dl_argv[0] ?: "<program name unknown>",
390 strtab + refsym->st_name);
392 memcpy (reloc_addr, (void *) value, MIN (sym->st_size,
393 refsym->st_size));
394 break;
395 default:
396 _dl_reloc_bad_type (map, r_type, 0);
397 break;
399 #endif
403 static inline void
404 elf_machine_rela_relative (Elf64_Addr l_addr, const Elf64_Rela *reloc,
405 Elf64_Addr *const reloc_addr)
407 assert (ELF64_R_TYPE (reloc->r_info) == R_X86_64_RELATIVE);
408 *reloc_addr = l_addr + reloc->r_addend;
411 static inline void
412 elf_machine_lazy_rel (struct link_map *map,
413 Elf64_Addr l_addr, const Elf64_Rela *reloc)
415 Elf64_Addr *const reloc_addr = (void *) (l_addr + reloc->r_offset);
416 const unsigned long int r_type = ELF64_R_TYPE (reloc->r_info);
418 /* Check for unexpected PLT reloc type. */
419 if (__builtin_expect (r_type == R_X86_64_JUMP_SLOT, 1))
421 if (__builtin_expect (map->l_mach.plt, 0) == 0)
422 *reloc_addr += l_addr;
423 else
424 *reloc_addr =
425 map->l_mach.plt
426 + (((Elf64_Addr) reloc_addr) - map->l_mach.gotplt) * 2;
428 else
429 _dl_reloc_bad_type (map, r_type, 1);
432 #endif /* RESOLVE */