Xen, mapcache: Fix the compute of the size of bucket.
[qemu/wangdongxu.git] / linux-user / elfload.c
blobf3b1552e9e945e334c5710a470e50de4136d76ed
1 /* This is the Linux kernel elf-loading code, ported into user space */
2 #include <sys/time.h>
3 #include <sys/param.h>
5 #include <stdio.h>
6 #include <sys/types.h>
7 #include <fcntl.h>
8 #include <errno.h>
9 #include <unistd.h>
10 #include <sys/mman.h>
11 #include <sys/resource.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <time.h>
16 #include "qemu.h"
17 #include "disas.h"
19 #ifdef _ARCH_PPC64
20 #undef ARCH_DLINFO
21 #undef ELF_PLATFORM
22 #undef ELF_HWCAP
23 #undef ELF_CLASS
24 #undef ELF_DATA
25 #undef ELF_ARCH
26 #endif
28 #define ELF_OSABI ELFOSABI_SYSV
30 /* from personality.h */
33 * Flags for bug emulation.
35 * These occupy the top three bytes.
37 enum {
38 ADDR_NO_RANDOMIZE = 0x0040000, /* disable randomization of VA space */
39 FDPIC_FUNCPTRS = 0x0080000, /* userspace function ptrs point to
40 descriptors (signal handling) */
41 MMAP_PAGE_ZERO = 0x0100000,
42 ADDR_COMPAT_LAYOUT = 0x0200000,
43 READ_IMPLIES_EXEC = 0x0400000,
44 ADDR_LIMIT_32BIT = 0x0800000,
45 SHORT_INODE = 0x1000000,
46 WHOLE_SECONDS = 0x2000000,
47 STICKY_TIMEOUTS = 0x4000000,
48 ADDR_LIMIT_3GB = 0x8000000,
52 * Personality types.
54 * These go in the low byte. Avoid using the top bit, it will
55 * conflict with error returns.
57 enum {
58 PER_LINUX = 0x0000,
59 PER_LINUX_32BIT = 0x0000 | ADDR_LIMIT_32BIT,
60 PER_LINUX_FDPIC = 0x0000 | FDPIC_FUNCPTRS,
61 PER_SVR4 = 0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
62 PER_SVR3 = 0x0002 | STICKY_TIMEOUTS | SHORT_INODE,
63 PER_SCOSVR3 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS | SHORT_INODE,
64 PER_OSR5 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS,
65 PER_WYSEV386 = 0x0004 | STICKY_TIMEOUTS | SHORT_INODE,
66 PER_ISCR4 = 0x0005 | STICKY_TIMEOUTS,
67 PER_BSD = 0x0006,
68 PER_SUNOS = 0x0006 | STICKY_TIMEOUTS,
69 PER_XENIX = 0x0007 | STICKY_TIMEOUTS | SHORT_INODE,
70 PER_LINUX32 = 0x0008,
71 PER_LINUX32_3GB = 0x0008 | ADDR_LIMIT_3GB,
72 PER_IRIX32 = 0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */
73 PER_IRIXN32 = 0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */
74 PER_IRIX64 = 0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */
75 PER_RISCOS = 0x000c,
76 PER_SOLARIS = 0x000d | STICKY_TIMEOUTS,
77 PER_UW7 = 0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
78 PER_OSF4 = 0x000f, /* OSF/1 v4 */
79 PER_HPUX = 0x0010,
80 PER_MASK = 0x00ff,
84 * Return the base personality without flags.
86 #define personality(pers) (pers & PER_MASK)
88 /* this flag is uneffective under linux too, should be deleted */
89 #ifndef MAP_DENYWRITE
90 #define MAP_DENYWRITE 0
91 #endif
93 /* should probably go in elf.h */
94 #ifndef ELIBBAD
95 #define ELIBBAD 80
96 #endif
98 #ifdef TARGET_WORDS_BIGENDIAN
99 #define ELF_DATA ELFDATA2MSB
100 #else
101 #define ELF_DATA ELFDATA2LSB
102 #endif
104 typedef target_ulong target_elf_greg_t;
105 #ifdef USE_UID16
106 typedef target_ushort target_uid_t;
107 typedef target_ushort target_gid_t;
108 #else
109 typedef target_uint target_uid_t;
110 typedef target_uint target_gid_t;
111 #endif
112 typedef target_int target_pid_t;
114 #ifdef TARGET_I386
116 #define ELF_PLATFORM get_elf_platform()
118 static const char *get_elf_platform(void)
120 static char elf_platform[] = "i386";
121 int family = (thread_env->cpuid_version >> 8) & 0xff;
122 if (family > 6)
123 family = 6;
124 if (family >= 3)
125 elf_platform[1] = '0' + family;
126 return elf_platform;
129 #define ELF_HWCAP get_elf_hwcap()
131 static uint32_t get_elf_hwcap(void)
133 return thread_env->cpuid_features;
136 #ifdef TARGET_X86_64
137 #define ELF_START_MMAP 0x2aaaaab000ULL
138 #define elf_check_arch(x) ( ((x) == ELF_ARCH) )
140 #define ELF_CLASS ELFCLASS64
141 #define ELF_ARCH EM_X86_64
143 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
145 regs->rax = 0;
146 regs->rsp = infop->start_stack;
147 regs->rip = infop->entry;
150 #define ELF_NREG 27
151 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
154 * Note that ELF_NREG should be 29 as there should be place for
155 * TRAPNO and ERR "registers" as well but linux doesn't dump
156 * those.
158 * See linux kernel: arch/x86/include/asm/elf.h
160 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *env)
162 (*regs)[0] = env->regs[15];
163 (*regs)[1] = env->regs[14];
164 (*regs)[2] = env->regs[13];
165 (*regs)[3] = env->regs[12];
166 (*regs)[4] = env->regs[R_EBP];
167 (*regs)[5] = env->regs[R_EBX];
168 (*regs)[6] = env->regs[11];
169 (*regs)[7] = env->regs[10];
170 (*regs)[8] = env->regs[9];
171 (*regs)[9] = env->regs[8];
172 (*regs)[10] = env->regs[R_EAX];
173 (*regs)[11] = env->regs[R_ECX];
174 (*regs)[12] = env->regs[R_EDX];
175 (*regs)[13] = env->regs[R_ESI];
176 (*regs)[14] = env->regs[R_EDI];
177 (*regs)[15] = env->regs[R_EAX]; /* XXX */
178 (*regs)[16] = env->eip;
179 (*regs)[17] = env->segs[R_CS].selector & 0xffff;
180 (*regs)[18] = env->eflags;
181 (*regs)[19] = env->regs[R_ESP];
182 (*regs)[20] = env->segs[R_SS].selector & 0xffff;
183 (*regs)[21] = env->segs[R_FS].selector & 0xffff;
184 (*regs)[22] = env->segs[R_GS].selector & 0xffff;
185 (*regs)[23] = env->segs[R_DS].selector & 0xffff;
186 (*regs)[24] = env->segs[R_ES].selector & 0xffff;
187 (*regs)[25] = env->segs[R_FS].selector & 0xffff;
188 (*regs)[26] = env->segs[R_GS].selector & 0xffff;
191 #else
193 #define ELF_START_MMAP 0x80000000
196 * This is used to ensure we don't load something for the wrong architecture.
198 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
201 * These are used to set parameters in the core dumps.
203 #define ELF_CLASS ELFCLASS32
204 #define ELF_ARCH EM_386
206 static inline void init_thread(struct target_pt_regs *regs,
207 struct image_info *infop)
209 regs->esp = infop->start_stack;
210 regs->eip = infop->entry;
212 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
213 starts %edx contains a pointer to a function which might be
214 registered using `atexit'. This provides a mean for the
215 dynamic linker to call DT_FINI functions for shared libraries
216 that have been loaded before the code runs.
218 A value of 0 tells we have no such handler. */
219 regs->edx = 0;
222 #define ELF_NREG 17
223 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
226 * Note that ELF_NREG should be 19 as there should be place for
227 * TRAPNO and ERR "registers" as well but linux doesn't dump
228 * those.
230 * See linux kernel: arch/x86/include/asm/elf.h
232 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *env)
234 (*regs)[0] = env->regs[R_EBX];
235 (*regs)[1] = env->regs[R_ECX];
236 (*regs)[2] = env->regs[R_EDX];
237 (*regs)[3] = env->regs[R_ESI];
238 (*regs)[4] = env->regs[R_EDI];
239 (*regs)[5] = env->regs[R_EBP];
240 (*regs)[6] = env->regs[R_EAX];
241 (*regs)[7] = env->segs[R_DS].selector & 0xffff;
242 (*regs)[8] = env->segs[R_ES].selector & 0xffff;
243 (*regs)[9] = env->segs[R_FS].selector & 0xffff;
244 (*regs)[10] = env->segs[R_GS].selector & 0xffff;
245 (*regs)[11] = env->regs[R_EAX]; /* XXX */
246 (*regs)[12] = env->eip;
247 (*regs)[13] = env->segs[R_CS].selector & 0xffff;
248 (*regs)[14] = env->eflags;
249 (*regs)[15] = env->regs[R_ESP];
250 (*regs)[16] = env->segs[R_SS].selector & 0xffff;
252 #endif
254 #define USE_ELF_CORE_DUMP
255 #define ELF_EXEC_PAGESIZE 4096
257 #endif
259 #ifdef TARGET_ARM
261 #define ELF_START_MMAP 0x80000000
263 #define elf_check_arch(x) ( (x) == EM_ARM )
265 #define ELF_CLASS ELFCLASS32
266 #define ELF_ARCH EM_ARM
268 static inline void init_thread(struct target_pt_regs *regs,
269 struct image_info *infop)
271 abi_long stack = infop->start_stack;
272 memset(regs, 0, sizeof(*regs));
273 regs->ARM_cpsr = 0x10;
274 if (infop->entry & 1)
275 regs->ARM_cpsr |= CPSR_T;
276 regs->ARM_pc = infop->entry & 0xfffffffe;
277 regs->ARM_sp = infop->start_stack;
278 /* FIXME - what to for failure of get_user()? */
279 get_user_ual(regs->ARM_r2, stack + 8); /* envp */
280 get_user_ual(regs->ARM_r1, stack + 4); /* envp */
281 /* XXX: it seems that r0 is zeroed after ! */
282 regs->ARM_r0 = 0;
283 /* For uClinux PIC binaries. */
284 /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
285 regs->ARM_r10 = infop->start_data;
288 #define ELF_NREG 18
289 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
291 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUARMState *env)
293 (*regs)[0] = tswapl(env->regs[0]);
294 (*regs)[1] = tswapl(env->regs[1]);
295 (*regs)[2] = tswapl(env->regs[2]);
296 (*regs)[3] = tswapl(env->regs[3]);
297 (*regs)[4] = tswapl(env->regs[4]);
298 (*regs)[5] = tswapl(env->regs[5]);
299 (*regs)[6] = tswapl(env->regs[6]);
300 (*regs)[7] = tswapl(env->regs[7]);
301 (*regs)[8] = tswapl(env->regs[8]);
302 (*regs)[9] = tswapl(env->regs[9]);
303 (*regs)[10] = tswapl(env->regs[10]);
304 (*regs)[11] = tswapl(env->regs[11]);
305 (*regs)[12] = tswapl(env->regs[12]);
306 (*regs)[13] = tswapl(env->regs[13]);
307 (*regs)[14] = tswapl(env->regs[14]);
308 (*regs)[15] = tswapl(env->regs[15]);
310 (*regs)[16] = tswapl(cpsr_read((CPUARMState *)env));
311 (*regs)[17] = tswapl(env->regs[0]); /* XXX */
314 #define USE_ELF_CORE_DUMP
315 #define ELF_EXEC_PAGESIZE 4096
317 enum
319 ARM_HWCAP_ARM_SWP = 1 << 0,
320 ARM_HWCAP_ARM_HALF = 1 << 1,
321 ARM_HWCAP_ARM_THUMB = 1 << 2,
322 ARM_HWCAP_ARM_26BIT = 1 << 3,
323 ARM_HWCAP_ARM_FAST_MULT = 1 << 4,
324 ARM_HWCAP_ARM_FPA = 1 << 5,
325 ARM_HWCAP_ARM_VFP = 1 << 6,
326 ARM_HWCAP_ARM_EDSP = 1 << 7,
327 ARM_HWCAP_ARM_JAVA = 1 << 8,
328 ARM_HWCAP_ARM_IWMMXT = 1 << 9,
329 ARM_HWCAP_ARM_THUMBEE = 1 << 10,
330 ARM_HWCAP_ARM_NEON = 1 << 11,
331 ARM_HWCAP_ARM_VFPv3 = 1 << 12,
332 ARM_HWCAP_ARM_VFPv3D16 = 1 << 13,
335 #define TARGET_HAS_GUEST_VALIDATE_BASE
336 /* We want the opportunity to check the suggested base */
337 bool guest_validate_base(unsigned long guest_base)
339 unsigned long real_start, test_page_addr;
341 /* We need to check that we can force a fault on access to the
342 * commpage at 0xffff0fxx
344 test_page_addr = guest_base + (0xffff0f00 & qemu_host_page_mask);
345 /* Note it needs to be writeable to let us initialise it */
346 real_start = (unsigned long)
347 mmap((void *)test_page_addr, qemu_host_page_size,
348 PROT_READ | PROT_WRITE,
349 MAP_ANONYMOUS | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
351 /* If we can't map it then try another address */
352 if (real_start == -1ul) {
353 return 0;
356 if (real_start != test_page_addr) {
357 /* OS didn't put the page where we asked - unmap and reject */
358 munmap((void *)real_start, qemu_host_page_size);
359 return 0;
362 /* Leave the page mapped
363 * Populate it (mmap should have left it all 0'd)
366 /* Kernel helper versions */
367 __put_user(5, (uint32_t *)g2h(0xffff0ffcul));
369 /* Now it's populated make it RO */
370 if (mprotect((void *)test_page_addr, qemu_host_page_size, PROT_READ)) {
371 perror("Protecting guest commpage");
372 exit(-1);
375 return 1; /* All good */
379 #define ELF_HWCAP get_elf_hwcap()
381 static uint32_t get_elf_hwcap(void)
383 CPUARMState *e = thread_env;
384 uint32_t hwcaps = 0;
386 hwcaps |= ARM_HWCAP_ARM_SWP;
387 hwcaps |= ARM_HWCAP_ARM_HALF;
388 hwcaps |= ARM_HWCAP_ARM_THUMB;
389 hwcaps |= ARM_HWCAP_ARM_FAST_MULT;
390 hwcaps |= ARM_HWCAP_ARM_FPA;
392 /* probe for the extra features */
393 #define GET_FEATURE(feat, hwcap) \
394 do {if (arm_feature(e, feat)) { hwcaps |= hwcap; } } while (0)
395 GET_FEATURE(ARM_FEATURE_VFP, ARM_HWCAP_ARM_VFP);
396 GET_FEATURE(ARM_FEATURE_IWMMXT, ARM_HWCAP_ARM_IWMMXT);
397 GET_FEATURE(ARM_FEATURE_THUMB2EE, ARM_HWCAP_ARM_THUMBEE);
398 GET_FEATURE(ARM_FEATURE_NEON, ARM_HWCAP_ARM_NEON);
399 GET_FEATURE(ARM_FEATURE_VFP3, ARM_HWCAP_ARM_VFPv3);
400 GET_FEATURE(ARM_FEATURE_VFP_FP16, ARM_HWCAP_ARM_VFPv3D16);
401 #undef GET_FEATURE
403 return hwcaps;
406 #endif
408 #ifdef TARGET_UNICORE32
410 #define ELF_START_MMAP 0x80000000
412 #define elf_check_arch(x) ((x) == EM_UNICORE32)
414 #define ELF_CLASS ELFCLASS32
415 #define ELF_DATA ELFDATA2LSB
416 #define ELF_ARCH EM_UNICORE32
418 static inline void init_thread(struct target_pt_regs *regs,
419 struct image_info *infop)
421 abi_long stack = infop->start_stack;
422 memset(regs, 0, sizeof(*regs));
423 regs->UC32_REG_asr = 0x10;
424 regs->UC32_REG_pc = infop->entry & 0xfffffffe;
425 regs->UC32_REG_sp = infop->start_stack;
426 /* FIXME - what to for failure of get_user()? */
427 get_user_ual(regs->UC32_REG_02, stack + 8); /* envp */
428 get_user_ual(regs->UC32_REG_01, stack + 4); /* envp */
429 /* XXX: it seems that r0 is zeroed after ! */
430 regs->UC32_REG_00 = 0;
433 #define ELF_NREG 34
434 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
436 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUUniCore32State *env)
438 (*regs)[0] = env->regs[0];
439 (*regs)[1] = env->regs[1];
440 (*regs)[2] = env->regs[2];
441 (*regs)[3] = env->regs[3];
442 (*regs)[4] = env->regs[4];
443 (*regs)[5] = env->regs[5];
444 (*regs)[6] = env->regs[6];
445 (*regs)[7] = env->regs[7];
446 (*regs)[8] = env->regs[8];
447 (*regs)[9] = env->regs[9];
448 (*regs)[10] = env->regs[10];
449 (*regs)[11] = env->regs[11];
450 (*regs)[12] = env->regs[12];
451 (*regs)[13] = env->regs[13];
452 (*regs)[14] = env->regs[14];
453 (*regs)[15] = env->regs[15];
454 (*regs)[16] = env->regs[16];
455 (*regs)[17] = env->regs[17];
456 (*regs)[18] = env->regs[18];
457 (*regs)[19] = env->regs[19];
458 (*regs)[20] = env->regs[20];
459 (*regs)[21] = env->regs[21];
460 (*regs)[22] = env->regs[22];
461 (*regs)[23] = env->regs[23];
462 (*regs)[24] = env->regs[24];
463 (*regs)[25] = env->regs[25];
464 (*regs)[26] = env->regs[26];
465 (*regs)[27] = env->regs[27];
466 (*regs)[28] = env->regs[28];
467 (*regs)[29] = env->regs[29];
468 (*regs)[30] = env->regs[30];
469 (*regs)[31] = env->regs[31];
471 (*regs)[32] = cpu_asr_read((CPUUniCore32State *)env);
472 (*regs)[33] = env->regs[0]; /* XXX */
475 #define USE_ELF_CORE_DUMP
476 #define ELF_EXEC_PAGESIZE 4096
478 #define ELF_HWCAP (UC32_HWCAP_CMOV | UC32_HWCAP_UCF64)
480 #endif
482 #ifdef TARGET_SPARC
483 #ifdef TARGET_SPARC64
485 #define ELF_START_MMAP 0x80000000
486 #define ELF_HWCAP (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | HWCAP_SPARC_SWAP \
487 | HWCAP_SPARC_MULDIV | HWCAP_SPARC_V9)
488 #ifndef TARGET_ABI32
489 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
490 #else
491 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
492 #endif
494 #define ELF_CLASS ELFCLASS64
495 #define ELF_ARCH EM_SPARCV9
497 #define STACK_BIAS 2047
499 static inline void init_thread(struct target_pt_regs *regs,
500 struct image_info *infop)
502 #ifndef TARGET_ABI32
503 regs->tstate = 0;
504 #endif
505 regs->pc = infop->entry;
506 regs->npc = regs->pc + 4;
507 regs->y = 0;
508 #ifdef TARGET_ABI32
509 regs->u_regs[14] = infop->start_stack - 16 * 4;
510 #else
511 if (personality(infop->personality) == PER_LINUX32)
512 regs->u_regs[14] = infop->start_stack - 16 * 4;
513 else
514 regs->u_regs[14] = infop->start_stack - 16 * 8 - STACK_BIAS;
515 #endif
518 #else
519 #define ELF_START_MMAP 0x80000000
520 #define ELF_HWCAP (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | HWCAP_SPARC_SWAP \
521 | HWCAP_SPARC_MULDIV)
522 #define elf_check_arch(x) ( (x) == EM_SPARC )
524 #define ELF_CLASS ELFCLASS32
525 #define ELF_ARCH EM_SPARC
527 static inline void init_thread(struct target_pt_regs *regs,
528 struct image_info *infop)
530 regs->psr = 0;
531 regs->pc = infop->entry;
532 regs->npc = regs->pc + 4;
533 regs->y = 0;
534 regs->u_regs[14] = infop->start_stack - 16 * 4;
537 #endif
538 #endif
540 #ifdef TARGET_PPC
542 #define ELF_START_MMAP 0x80000000
544 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
546 #define elf_check_arch(x) ( (x) == EM_PPC64 )
548 #define ELF_CLASS ELFCLASS64
550 #else
552 #define elf_check_arch(x) ( (x) == EM_PPC )
554 #define ELF_CLASS ELFCLASS32
556 #endif
558 #define ELF_ARCH EM_PPC
560 /* Feature masks for the Aux Vector Hardware Capabilities (AT_HWCAP).
561 See arch/powerpc/include/asm/cputable.h. */
562 enum {
563 QEMU_PPC_FEATURE_32 = 0x80000000,
564 QEMU_PPC_FEATURE_64 = 0x40000000,
565 QEMU_PPC_FEATURE_601_INSTR = 0x20000000,
566 QEMU_PPC_FEATURE_HAS_ALTIVEC = 0x10000000,
567 QEMU_PPC_FEATURE_HAS_FPU = 0x08000000,
568 QEMU_PPC_FEATURE_HAS_MMU = 0x04000000,
569 QEMU_PPC_FEATURE_HAS_4xxMAC = 0x02000000,
570 QEMU_PPC_FEATURE_UNIFIED_CACHE = 0x01000000,
571 QEMU_PPC_FEATURE_HAS_SPE = 0x00800000,
572 QEMU_PPC_FEATURE_HAS_EFP_SINGLE = 0x00400000,
573 QEMU_PPC_FEATURE_HAS_EFP_DOUBLE = 0x00200000,
574 QEMU_PPC_FEATURE_NO_TB = 0x00100000,
575 QEMU_PPC_FEATURE_POWER4 = 0x00080000,
576 QEMU_PPC_FEATURE_POWER5 = 0x00040000,
577 QEMU_PPC_FEATURE_POWER5_PLUS = 0x00020000,
578 QEMU_PPC_FEATURE_CELL = 0x00010000,
579 QEMU_PPC_FEATURE_BOOKE = 0x00008000,
580 QEMU_PPC_FEATURE_SMT = 0x00004000,
581 QEMU_PPC_FEATURE_ICACHE_SNOOP = 0x00002000,
582 QEMU_PPC_FEATURE_ARCH_2_05 = 0x00001000,
583 QEMU_PPC_FEATURE_PA6T = 0x00000800,
584 QEMU_PPC_FEATURE_HAS_DFP = 0x00000400,
585 QEMU_PPC_FEATURE_POWER6_EXT = 0x00000200,
586 QEMU_PPC_FEATURE_ARCH_2_06 = 0x00000100,
587 QEMU_PPC_FEATURE_HAS_VSX = 0x00000080,
588 QEMU_PPC_FEATURE_PSERIES_PERFMON_COMPAT = 0x00000040,
590 QEMU_PPC_FEATURE_TRUE_LE = 0x00000002,
591 QEMU_PPC_FEATURE_PPC_LE = 0x00000001,
594 #define ELF_HWCAP get_elf_hwcap()
596 static uint32_t get_elf_hwcap(void)
598 CPUPPCState *e = thread_env;
599 uint32_t features = 0;
601 /* We don't have to be terribly complete here; the high points are
602 Altivec/FP/SPE support. Anything else is just a bonus. */
603 #define GET_FEATURE(flag, feature) \
604 do {if (e->insns_flags & flag) features |= feature; } while(0)
605 GET_FEATURE(PPC_64B, QEMU_PPC_FEATURE_64);
606 GET_FEATURE(PPC_FLOAT, QEMU_PPC_FEATURE_HAS_FPU);
607 GET_FEATURE(PPC_ALTIVEC, QEMU_PPC_FEATURE_HAS_ALTIVEC);
608 GET_FEATURE(PPC_SPE, QEMU_PPC_FEATURE_HAS_SPE);
609 GET_FEATURE(PPC_SPE_SINGLE, QEMU_PPC_FEATURE_HAS_EFP_SINGLE);
610 GET_FEATURE(PPC_SPE_DOUBLE, QEMU_PPC_FEATURE_HAS_EFP_DOUBLE);
611 GET_FEATURE(PPC_BOOKE, QEMU_PPC_FEATURE_BOOKE);
612 GET_FEATURE(PPC_405_MAC, QEMU_PPC_FEATURE_HAS_4xxMAC);
613 #undef GET_FEATURE
615 return features;
619 * The requirements here are:
620 * - keep the final alignment of sp (sp & 0xf)
621 * - make sure the 32-bit value at the first 16 byte aligned position of
622 * AUXV is greater than 16 for glibc compatibility.
623 * AT_IGNOREPPC is used for that.
624 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
625 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
627 #define DLINFO_ARCH_ITEMS 5
628 #define ARCH_DLINFO \
629 do { \
630 NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \
631 NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \
632 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
633 /* \
634 * Now handle glibc compatibility. \
635 */ \
636 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
637 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
638 } while (0)
640 static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop)
642 _regs->gpr[1] = infop->start_stack;
643 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
644 _regs->gpr[2] = ldq_raw(infop->entry + 8) + infop->load_bias;
645 infop->entry = ldq_raw(infop->entry) + infop->load_bias;
646 #endif
647 _regs->nip = infop->entry;
650 /* See linux kernel: arch/powerpc/include/asm/elf.h. */
651 #define ELF_NREG 48
652 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
654 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUPPCState *env)
656 int i;
657 target_ulong ccr = 0;
659 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
660 (*regs)[i] = tswapl(env->gpr[i]);
663 (*regs)[32] = tswapl(env->nip);
664 (*regs)[33] = tswapl(env->msr);
665 (*regs)[35] = tswapl(env->ctr);
666 (*regs)[36] = tswapl(env->lr);
667 (*regs)[37] = tswapl(env->xer);
669 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
670 ccr |= env->crf[i] << (32 - ((i + 1) * 4));
672 (*regs)[38] = tswapl(ccr);
675 #define USE_ELF_CORE_DUMP
676 #define ELF_EXEC_PAGESIZE 4096
678 #endif
680 #ifdef TARGET_MIPS
682 #define ELF_START_MMAP 0x80000000
684 #define elf_check_arch(x) ( (x) == EM_MIPS )
686 #ifdef TARGET_MIPS64
687 #define ELF_CLASS ELFCLASS64
688 #else
689 #define ELF_CLASS ELFCLASS32
690 #endif
691 #define ELF_ARCH EM_MIPS
693 static inline void init_thread(struct target_pt_regs *regs,
694 struct image_info *infop)
696 regs->cp0_status = 2 << CP0St_KSU;
697 regs->cp0_epc = infop->entry;
698 regs->regs[29] = infop->start_stack;
701 /* See linux kernel: arch/mips/include/asm/elf.h. */
702 #define ELF_NREG 45
703 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
705 /* See linux kernel: arch/mips/include/asm/reg.h. */
706 enum {
707 #ifdef TARGET_MIPS64
708 TARGET_EF_R0 = 0,
709 #else
710 TARGET_EF_R0 = 6,
711 #endif
712 TARGET_EF_R26 = TARGET_EF_R0 + 26,
713 TARGET_EF_R27 = TARGET_EF_R0 + 27,
714 TARGET_EF_LO = TARGET_EF_R0 + 32,
715 TARGET_EF_HI = TARGET_EF_R0 + 33,
716 TARGET_EF_CP0_EPC = TARGET_EF_R0 + 34,
717 TARGET_EF_CP0_BADVADDR = TARGET_EF_R0 + 35,
718 TARGET_EF_CP0_STATUS = TARGET_EF_R0 + 36,
719 TARGET_EF_CP0_CAUSE = TARGET_EF_R0 + 37
722 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */
723 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMIPSState *env)
725 int i;
727 for (i = 0; i < TARGET_EF_R0; i++) {
728 (*regs)[i] = 0;
730 (*regs)[TARGET_EF_R0] = 0;
732 for (i = 1; i < ARRAY_SIZE(env->active_tc.gpr); i++) {
733 (*regs)[TARGET_EF_R0 + i] = tswapl(env->active_tc.gpr[i]);
736 (*regs)[TARGET_EF_R26] = 0;
737 (*regs)[TARGET_EF_R27] = 0;
738 (*regs)[TARGET_EF_LO] = tswapl(env->active_tc.LO[0]);
739 (*regs)[TARGET_EF_HI] = tswapl(env->active_tc.HI[0]);
740 (*regs)[TARGET_EF_CP0_EPC] = tswapl(env->active_tc.PC);
741 (*regs)[TARGET_EF_CP0_BADVADDR] = tswapl(env->CP0_BadVAddr);
742 (*regs)[TARGET_EF_CP0_STATUS] = tswapl(env->CP0_Status);
743 (*regs)[TARGET_EF_CP0_CAUSE] = tswapl(env->CP0_Cause);
746 #define USE_ELF_CORE_DUMP
747 #define ELF_EXEC_PAGESIZE 4096
749 #endif /* TARGET_MIPS */
751 #ifdef TARGET_MICROBLAZE
753 #define ELF_START_MMAP 0x80000000
755 #define elf_check_arch(x) ( (x) == EM_MICROBLAZE || (x) == EM_MICROBLAZE_OLD)
757 #define ELF_CLASS ELFCLASS32
758 #define ELF_ARCH EM_MICROBLAZE
760 static inline void init_thread(struct target_pt_regs *regs,
761 struct image_info *infop)
763 regs->pc = infop->entry;
764 regs->r1 = infop->start_stack;
768 #define ELF_EXEC_PAGESIZE 4096
770 #define USE_ELF_CORE_DUMP
771 #define ELF_NREG 38
772 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
774 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */
775 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMBState *env)
777 int i, pos = 0;
779 for (i = 0; i < 32; i++) {
780 (*regs)[pos++] = tswapl(env->regs[i]);
783 for (i = 0; i < 6; i++) {
784 (*regs)[pos++] = tswapl(env->sregs[i]);
788 #endif /* TARGET_MICROBLAZE */
790 #ifdef TARGET_SH4
792 #define ELF_START_MMAP 0x80000000
794 #define elf_check_arch(x) ( (x) == EM_SH )
796 #define ELF_CLASS ELFCLASS32
797 #define ELF_ARCH EM_SH
799 static inline void init_thread(struct target_pt_regs *regs,
800 struct image_info *infop)
802 /* Check other registers XXXXX */
803 regs->pc = infop->entry;
804 regs->regs[15] = infop->start_stack;
807 /* See linux kernel: arch/sh/include/asm/elf.h. */
808 #define ELF_NREG 23
809 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
811 /* See linux kernel: arch/sh/include/asm/ptrace.h. */
812 enum {
813 TARGET_REG_PC = 16,
814 TARGET_REG_PR = 17,
815 TARGET_REG_SR = 18,
816 TARGET_REG_GBR = 19,
817 TARGET_REG_MACH = 20,
818 TARGET_REG_MACL = 21,
819 TARGET_REG_SYSCALL = 22
822 static inline void elf_core_copy_regs(target_elf_gregset_t *regs,
823 const CPUSH4State *env)
825 int i;
827 for (i = 0; i < 16; i++) {
828 (*regs[i]) = tswapl(env->gregs[i]);
831 (*regs)[TARGET_REG_PC] = tswapl(env->pc);
832 (*regs)[TARGET_REG_PR] = tswapl(env->pr);
833 (*regs)[TARGET_REG_SR] = tswapl(env->sr);
834 (*regs)[TARGET_REG_GBR] = tswapl(env->gbr);
835 (*regs)[TARGET_REG_MACH] = tswapl(env->mach);
836 (*regs)[TARGET_REG_MACL] = tswapl(env->macl);
837 (*regs)[TARGET_REG_SYSCALL] = 0; /* FIXME */
840 #define USE_ELF_CORE_DUMP
841 #define ELF_EXEC_PAGESIZE 4096
843 #endif
845 #ifdef TARGET_CRIS
847 #define ELF_START_MMAP 0x80000000
849 #define elf_check_arch(x) ( (x) == EM_CRIS )
851 #define ELF_CLASS ELFCLASS32
852 #define ELF_ARCH EM_CRIS
854 static inline void init_thread(struct target_pt_regs *regs,
855 struct image_info *infop)
857 regs->erp = infop->entry;
860 #define ELF_EXEC_PAGESIZE 8192
862 #endif
864 #ifdef TARGET_M68K
866 #define ELF_START_MMAP 0x80000000
868 #define elf_check_arch(x) ( (x) == EM_68K )
870 #define ELF_CLASS ELFCLASS32
871 #define ELF_ARCH EM_68K
873 /* ??? Does this need to do anything?
874 #define ELF_PLAT_INIT(_r) */
876 static inline void init_thread(struct target_pt_regs *regs,
877 struct image_info *infop)
879 regs->usp = infop->start_stack;
880 regs->sr = 0;
881 regs->pc = infop->entry;
884 /* See linux kernel: arch/m68k/include/asm/elf.h. */
885 #define ELF_NREG 20
886 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
888 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUM68KState *env)
890 (*regs)[0] = tswapl(env->dregs[1]);
891 (*regs)[1] = tswapl(env->dregs[2]);
892 (*regs)[2] = tswapl(env->dregs[3]);
893 (*regs)[3] = tswapl(env->dregs[4]);
894 (*regs)[4] = tswapl(env->dregs[5]);
895 (*regs)[5] = tswapl(env->dregs[6]);
896 (*regs)[6] = tswapl(env->dregs[7]);
897 (*regs)[7] = tswapl(env->aregs[0]);
898 (*regs)[8] = tswapl(env->aregs[1]);
899 (*regs)[9] = tswapl(env->aregs[2]);
900 (*regs)[10] = tswapl(env->aregs[3]);
901 (*regs)[11] = tswapl(env->aregs[4]);
902 (*regs)[12] = tswapl(env->aregs[5]);
903 (*regs)[13] = tswapl(env->aregs[6]);
904 (*regs)[14] = tswapl(env->dregs[0]);
905 (*regs)[15] = tswapl(env->aregs[7]);
906 (*regs)[16] = tswapl(env->dregs[0]); /* FIXME: orig_d0 */
907 (*regs)[17] = tswapl(env->sr);
908 (*regs)[18] = tswapl(env->pc);
909 (*regs)[19] = 0; /* FIXME: regs->format | regs->vector */
912 #define USE_ELF_CORE_DUMP
913 #define ELF_EXEC_PAGESIZE 8192
915 #endif
917 #ifdef TARGET_ALPHA
919 #define ELF_START_MMAP (0x30000000000ULL)
921 #define elf_check_arch(x) ( (x) == ELF_ARCH )
923 #define ELF_CLASS ELFCLASS64
924 #define ELF_ARCH EM_ALPHA
926 static inline void init_thread(struct target_pt_regs *regs,
927 struct image_info *infop)
929 regs->pc = infop->entry;
930 regs->ps = 8;
931 regs->usp = infop->start_stack;
934 #define ELF_EXEC_PAGESIZE 8192
936 #endif /* TARGET_ALPHA */
938 #ifdef TARGET_S390X
940 #define ELF_START_MMAP (0x20000000000ULL)
942 #define elf_check_arch(x) ( (x) == ELF_ARCH )
944 #define ELF_CLASS ELFCLASS64
945 #define ELF_DATA ELFDATA2MSB
946 #define ELF_ARCH EM_S390
948 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
950 regs->psw.addr = infop->entry;
951 regs->psw.mask = PSW_MASK_64 | PSW_MASK_32;
952 regs->gprs[15] = infop->start_stack;
955 #endif /* TARGET_S390X */
957 #ifndef ELF_PLATFORM
958 #define ELF_PLATFORM (NULL)
959 #endif
961 #ifndef ELF_HWCAP
962 #define ELF_HWCAP 0
963 #endif
965 #ifdef TARGET_ABI32
966 #undef ELF_CLASS
967 #define ELF_CLASS ELFCLASS32
968 #undef bswaptls
969 #define bswaptls(ptr) bswap32s(ptr)
970 #endif
972 #include "elf.h"
974 struct exec
976 unsigned int a_info; /* Use macros N_MAGIC, etc for access */
977 unsigned int a_text; /* length of text, in bytes */
978 unsigned int a_data; /* length of data, in bytes */
979 unsigned int a_bss; /* length of uninitialized data area, in bytes */
980 unsigned int a_syms; /* length of symbol table data in file, in bytes */
981 unsigned int a_entry; /* start address */
982 unsigned int a_trsize; /* length of relocation info for text, in bytes */
983 unsigned int a_drsize; /* length of relocation info for data, in bytes */
987 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
988 #define OMAGIC 0407
989 #define NMAGIC 0410
990 #define ZMAGIC 0413
991 #define QMAGIC 0314
993 /* Necessary parameters */
994 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
995 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
996 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
998 #define DLINFO_ITEMS 13
1000 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
1002 memcpy(to, from, n);
1005 #ifdef BSWAP_NEEDED
1006 static void bswap_ehdr(struct elfhdr *ehdr)
1008 bswap16s(&ehdr->e_type); /* Object file type */
1009 bswap16s(&ehdr->e_machine); /* Architecture */
1010 bswap32s(&ehdr->e_version); /* Object file version */
1011 bswaptls(&ehdr->e_entry); /* Entry point virtual address */
1012 bswaptls(&ehdr->e_phoff); /* Program header table file offset */
1013 bswaptls(&ehdr->e_shoff); /* Section header table file offset */
1014 bswap32s(&ehdr->e_flags); /* Processor-specific flags */
1015 bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */
1016 bswap16s(&ehdr->e_phentsize); /* Program header table entry size */
1017 bswap16s(&ehdr->e_phnum); /* Program header table entry count */
1018 bswap16s(&ehdr->e_shentsize); /* Section header table entry size */
1019 bswap16s(&ehdr->e_shnum); /* Section header table entry count */
1020 bswap16s(&ehdr->e_shstrndx); /* Section header string table index */
1023 static void bswap_phdr(struct elf_phdr *phdr, int phnum)
1025 int i;
1026 for (i = 0; i < phnum; ++i, ++phdr) {
1027 bswap32s(&phdr->p_type); /* Segment type */
1028 bswap32s(&phdr->p_flags); /* Segment flags */
1029 bswaptls(&phdr->p_offset); /* Segment file offset */
1030 bswaptls(&phdr->p_vaddr); /* Segment virtual address */
1031 bswaptls(&phdr->p_paddr); /* Segment physical address */
1032 bswaptls(&phdr->p_filesz); /* Segment size in file */
1033 bswaptls(&phdr->p_memsz); /* Segment size in memory */
1034 bswaptls(&phdr->p_align); /* Segment alignment */
1038 static void bswap_shdr(struct elf_shdr *shdr, int shnum)
1040 int i;
1041 for (i = 0; i < shnum; ++i, ++shdr) {
1042 bswap32s(&shdr->sh_name);
1043 bswap32s(&shdr->sh_type);
1044 bswaptls(&shdr->sh_flags);
1045 bswaptls(&shdr->sh_addr);
1046 bswaptls(&shdr->sh_offset);
1047 bswaptls(&shdr->sh_size);
1048 bswap32s(&shdr->sh_link);
1049 bswap32s(&shdr->sh_info);
1050 bswaptls(&shdr->sh_addralign);
1051 bswaptls(&shdr->sh_entsize);
1055 static void bswap_sym(struct elf_sym *sym)
1057 bswap32s(&sym->st_name);
1058 bswaptls(&sym->st_value);
1059 bswaptls(&sym->st_size);
1060 bswap16s(&sym->st_shndx);
1062 #else
1063 static inline void bswap_ehdr(struct elfhdr *ehdr) { }
1064 static inline void bswap_phdr(struct elf_phdr *phdr, int phnum) { }
1065 static inline void bswap_shdr(struct elf_shdr *shdr, int shnum) { }
1066 static inline void bswap_sym(struct elf_sym *sym) { }
1067 #endif
1069 #ifdef USE_ELF_CORE_DUMP
1070 static int elf_core_dump(int, const CPUArchState *);
1071 #endif /* USE_ELF_CORE_DUMP */
1072 static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias);
1074 /* Verify the portions of EHDR within E_IDENT for the target.
1075 This can be performed before bswapping the entire header. */
1076 static bool elf_check_ident(struct elfhdr *ehdr)
1078 return (ehdr->e_ident[EI_MAG0] == ELFMAG0
1079 && ehdr->e_ident[EI_MAG1] == ELFMAG1
1080 && ehdr->e_ident[EI_MAG2] == ELFMAG2
1081 && ehdr->e_ident[EI_MAG3] == ELFMAG3
1082 && ehdr->e_ident[EI_CLASS] == ELF_CLASS
1083 && ehdr->e_ident[EI_DATA] == ELF_DATA
1084 && ehdr->e_ident[EI_VERSION] == EV_CURRENT);
1087 /* Verify the portions of EHDR outside of E_IDENT for the target.
1088 This has to wait until after bswapping the header. */
1089 static bool elf_check_ehdr(struct elfhdr *ehdr)
1091 return (elf_check_arch(ehdr->e_machine)
1092 && ehdr->e_ehsize == sizeof(struct elfhdr)
1093 && ehdr->e_phentsize == sizeof(struct elf_phdr)
1094 && ehdr->e_shentsize == sizeof(struct elf_shdr)
1095 && (ehdr->e_type == ET_EXEC || ehdr->e_type == ET_DYN));
1099 * 'copy_elf_strings()' copies argument/envelope strings from user
1100 * memory to free pages in kernel mem. These are in a format ready
1101 * to be put directly into the top of new user memory.
1104 static abi_ulong copy_elf_strings(int argc,char ** argv, void **page,
1105 abi_ulong p)
1107 char *tmp, *tmp1, *pag = NULL;
1108 int len, offset = 0;
1110 if (!p) {
1111 return 0; /* bullet-proofing */
1113 while (argc-- > 0) {
1114 tmp = argv[argc];
1115 if (!tmp) {
1116 fprintf(stderr, "VFS: argc is wrong");
1117 exit(-1);
1119 tmp1 = tmp;
1120 while (*tmp++);
1121 len = tmp - tmp1;
1122 if (p < len) { /* this shouldn't happen - 128kB */
1123 return 0;
1125 while (len) {
1126 --p; --tmp; --len;
1127 if (--offset < 0) {
1128 offset = p % TARGET_PAGE_SIZE;
1129 pag = (char *)page[p/TARGET_PAGE_SIZE];
1130 if (!pag) {
1131 pag = g_try_malloc0(TARGET_PAGE_SIZE);
1132 page[p/TARGET_PAGE_SIZE] = pag;
1133 if (!pag)
1134 return 0;
1137 if (len == 0 || offset == 0) {
1138 *(pag + offset) = *tmp;
1140 else {
1141 int bytes_to_copy = (len > offset) ? offset : len;
1142 tmp -= bytes_to_copy;
1143 p -= bytes_to_copy;
1144 offset -= bytes_to_copy;
1145 len -= bytes_to_copy;
1146 memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1);
1150 return p;
1153 static abi_ulong setup_arg_pages(abi_ulong p, struct linux_binprm *bprm,
1154 struct image_info *info)
1156 abi_ulong stack_base, size, error, guard;
1157 int i;
1159 /* Create enough stack to hold everything. If we don't use
1160 it for args, we'll use it for something else. */
1161 size = guest_stack_size;
1162 if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE) {
1163 size = MAX_ARG_PAGES*TARGET_PAGE_SIZE;
1165 guard = TARGET_PAGE_SIZE;
1166 if (guard < qemu_real_host_page_size) {
1167 guard = qemu_real_host_page_size;
1170 error = target_mmap(0, size + guard, PROT_READ | PROT_WRITE,
1171 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1172 if (error == -1) {
1173 perror("mmap stack");
1174 exit(-1);
1177 /* We reserve one extra page at the top of the stack as guard. */
1178 target_mprotect(error, guard, PROT_NONE);
1180 info->stack_limit = error + guard;
1181 stack_base = info->stack_limit + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE;
1182 p += stack_base;
1184 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
1185 if (bprm->page[i]) {
1186 info->rss++;
1187 /* FIXME - check return value of memcpy_to_target() for failure */
1188 memcpy_to_target(stack_base, bprm->page[i], TARGET_PAGE_SIZE);
1189 g_free(bprm->page[i]);
1191 stack_base += TARGET_PAGE_SIZE;
1193 return p;
1196 /* Map and zero the bss. We need to explicitly zero any fractional pages
1197 after the data section (i.e. bss). */
1198 static void zero_bss(abi_ulong elf_bss, abi_ulong last_bss, int prot)
1200 uintptr_t host_start, host_map_start, host_end;
1202 last_bss = TARGET_PAGE_ALIGN(last_bss);
1204 /* ??? There is confusion between qemu_real_host_page_size and
1205 qemu_host_page_size here and elsewhere in target_mmap, which
1206 may lead to the end of the data section mapping from the file
1207 not being mapped. At least there was an explicit test and
1208 comment for that here, suggesting that "the file size must
1209 be known". The comment probably pre-dates the introduction
1210 of the fstat system call in target_mmap which does in fact
1211 find out the size. What isn't clear is if the workaround
1212 here is still actually needed. For now, continue with it,
1213 but merge it with the "normal" mmap that would allocate the bss. */
1215 host_start = (uintptr_t) g2h(elf_bss);
1216 host_end = (uintptr_t) g2h(last_bss);
1217 host_map_start = (host_start + qemu_real_host_page_size - 1);
1218 host_map_start &= -qemu_real_host_page_size;
1220 if (host_map_start < host_end) {
1221 void *p = mmap((void *)host_map_start, host_end - host_map_start,
1222 prot, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1223 if (p == MAP_FAILED) {
1224 perror("cannot mmap brk");
1225 exit(-1);
1228 /* Since we didn't use target_mmap, make sure to record
1229 the validity of the pages with qemu. */
1230 page_set_flags(elf_bss & TARGET_PAGE_MASK, last_bss, prot|PAGE_VALID);
1233 if (host_start < host_map_start) {
1234 memset((void *)host_start, 0, host_map_start - host_start);
1238 #ifdef CONFIG_USE_FDPIC
1239 static abi_ulong loader_build_fdpic_loadmap(struct image_info *info, abi_ulong sp)
1241 uint16_t n;
1242 struct elf32_fdpic_loadseg *loadsegs = info->loadsegs;
1244 /* elf32_fdpic_loadseg */
1245 n = info->nsegs;
1246 while (n--) {
1247 sp -= 12;
1248 put_user_u32(loadsegs[n].addr, sp+0);
1249 put_user_u32(loadsegs[n].p_vaddr, sp+4);
1250 put_user_u32(loadsegs[n].p_memsz, sp+8);
1253 /* elf32_fdpic_loadmap */
1254 sp -= 4;
1255 put_user_u16(0, sp+0); /* version */
1256 put_user_u16(info->nsegs, sp+2); /* nsegs */
1258 info->personality = PER_LINUX_FDPIC;
1259 info->loadmap_addr = sp;
1261 return sp;
1263 #endif
1265 static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
1266 struct elfhdr *exec,
1267 struct image_info *info,
1268 struct image_info *interp_info)
1270 abi_ulong sp;
1271 abi_ulong sp_auxv;
1272 int size;
1273 int i;
1274 abi_ulong u_rand_bytes;
1275 uint8_t k_rand_bytes[16];
1276 abi_ulong u_platform;
1277 const char *k_platform;
1278 const int n = sizeof(elf_addr_t);
1280 sp = p;
1282 #ifdef CONFIG_USE_FDPIC
1283 /* Needs to be before we load the env/argc/... */
1284 if (elf_is_fdpic(exec)) {
1285 /* Need 4 byte alignment for these structs */
1286 sp &= ~3;
1287 sp = loader_build_fdpic_loadmap(info, sp);
1288 info->other_info = interp_info;
1289 if (interp_info) {
1290 interp_info->other_info = info;
1291 sp = loader_build_fdpic_loadmap(interp_info, sp);
1294 #endif
1296 u_platform = 0;
1297 k_platform = ELF_PLATFORM;
1298 if (k_platform) {
1299 size_t len = strlen(k_platform) + 1;
1300 sp -= (len + n - 1) & ~(n - 1);
1301 u_platform = sp;
1302 /* FIXME - check return value of memcpy_to_target() for failure */
1303 memcpy_to_target(sp, k_platform, len);
1307 * Generate 16 random bytes for userspace PRNG seeding (not
1308 * cryptically secure but it's not the aim of QEMU).
1310 srand((unsigned int) time(NULL));
1311 for (i = 0; i < 16; i++) {
1312 k_rand_bytes[i] = rand();
1314 sp -= 16;
1315 u_rand_bytes = sp;
1316 /* FIXME - check return value of memcpy_to_target() for failure */
1317 memcpy_to_target(sp, k_rand_bytes, 16);
1320 * Force 16 byte _final_ alignment here for generality.
1322 sp = sp &~ (abi_ulong)15;
1323 size = (DLINFO_ITEMS + 1) * 2;
1324 if (k_platform)
1325 size += 2;
1326 #ifdef DLINFO_ARCH_ITEMS
1327 size += DLINFO_ARCH_ITEMS * 2;
1328 #endif
1329 size += envc + argc + 2;
1330 size += 1; /* argc itself */
1331 size *= n;
1332 if (size & 15)
1333 sp -= 16 - (size & 15);
1335 /* This is correct because Linux defines
1336 * elf_addr_t as Elf32_Off / Elf64_Off
1338 #define NEW_AUX_ENT(id, val) do { \
1339 sp -= n; put_user_ual(val, sp); \
1340 sp -= n; put_user_ual(id, sp); \
1341 } while(0)
1343 sp_auxv = sp;
1344 NEW_AUX_ENT (AT_NULL, 0);
1346 /* There must be exactly DLINFO_ITEMS entries here. */
1347 NEW_AUX_ENT(AT_PHDR, (abi_ulong)(info->load_addr + exec->e_phoff));
1348 NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr)));
1349 NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum));
1350 NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE));
1351 NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_info ? interp_info->load_addr : 0));
1352 NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0);
1353 NEW_AUX_ENT(AT_ENTRY, info->entry);
1354 NEW_AUX_ENT(AT_UID, (abi_ulong) getuid());
1355 NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid());
1356 NEW_AUX_ENT(AT_GID, (abi_ulong) getgid());
1357 NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid());
1358 NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP);
1359 NEW_AUX_ENT(AT_CLKTCK, (abi_ulong) sysconf(_SC_CLK_TCK));
1360 NEW_AUX_ENT(AT_RANDOM, (abi_ulong) u_rand_bytes);
1362 if (k_platform)
1363 NEW_AUX_ENT(AT_PLATFORM, u_platform);
1364 #ifdef ARCH_DLINFO
1366 * ARCH_DLINFO must come last so platform specific code can enforce
1367 * special alignment requirements on the AUXV if necessary (eg. PPC).
1369 ARCH_DLINFO;
1370 #endif
1371 #undef NEW_AUX_ENT
1373 info->saved_auxv = sp;
1374 info->auxv_len = sp_auxv - sp;
1376 sp = loader_build_argptr(envc, argc, sp, p, 0);
1377 return sp;
1380 #ifndef TARGET_HAS_GUEST_VALIDATE_BASE
1381 /* If the guest doesn't have a validation function just agree */
1382 bool guest_validate_base(unsigned long guest_base)
1384 return 1;
1386 #endif
1388 static void probe_guest_base(const char *image_name,
1389 abi_ulong loaddr, abi_ulong hiaddr)
1391 /* Probe for a suitable guest base address, if the user has not set
1392 * it explicitly, and set guest_base appropriately.
1393 * In case of error we will print a suitable message and exit.
1395 #if defined(CONFIG_USE_GUEST_BASE)
1396 const char *errmsg;
1397 if (!have_guest_base && !reserved_va) {
1398 unsigned long host_start, real_start, host_size;
1400 /* Round addresses to page boundaries. */
1401 loaddr &= qemu_host_page_mask;
1402 hiaddr = HOST_PAGE_ALIGN(hiaddr);
1404 if (loaddr < mmap_min_addr) {
1405 host_start = HOST_PAGE_ALIGN(mmap_min_addr);
1406 } else {
1407 host_start = loaddr;
1408 if (host_start != loaddr) {
1409 errmsg = "Address overflow loading ELF binary";
1410 goto exit_errmsg;
1413 host_size = hiaddr - loaddr;
1414 while (1) {
1415 /* Do not use mmap_find_vma here because that is limited to the
1416 guest address space. We are going to make the
1417 guest address space fit whatever we're given. */
1418 real_start = (unsigned long)
1419 mmap((void *)host_start, host_size, PROT_NONE,
1420 MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
1421 if (real_start == (unsigned long)-1) {
1422 goto exit_perror;
1424 guest_base = real_start - loaddr;
1425 if ((real_start == host_start) &&
1426 guest_validate_base(guest_base)) {
1427 break;
1429 /* That address didn't work. Unmap and try a different one.
1430 The address the host picked because is typically right at
1431 the top of the host address space and leaves the guest with
1432 no usable address space. Resort to a linear search. We
1433 already compensated for mmap_min_addr, so this should not
1434 happen often. Probably means we got unlucky and host
1435 address space randomization put a shared library somewhere
1436 inconvenient. */
1437 munmap((void *)real_start, host_size);
1438 host_start += qemu_host_page_size;
1439 if (host_start == loaddr) {
1440 /* Theoretically possible if host doesn't have any suitably
1441 aligned areas. Normally the first mmap will fail. */
1442 errmsg = "Unable to find space for application";
1443 goto exit_errmsg;
1446 qemu_log("Relocating guest address space from 0x"
1447 TARGET_ABI_FMT_lx " to 0x%lx\n",
1448 loaddr, real_start);
1450 return;
1452 exit_perror:
1453 errmsg = strerror(errno);
1454 exit_errmsg:
1455 fprintf(stderr, "%s: %s\n", image_name, errmsg);
1456 exit(-1);
1457 #endif
1461 /* Load an ELF image into the address space.
1463 IMAGE_NAME is the filename of the image, to use in error messages.
1464 IMAGE_FD is the open file descriptor for the image.
1466 BPRM_BUF is a copy of the beginning of the file; this of course
1467 contains the elf file header at offset 0. It is assumed that this
1468 buffer is sufficiently aligned to present no problems to the host
1469 in accessing data at aligned offsets within the buffer.
1471 On return: INFO values will be filled in, as necessary or available. */
1473 static void load_elf_image(const char *image_name, int image_fd,
1474 struct image_info *info, char **pinterp_name,
1475 char bprm_buf[BPRM_BUF_SIZE])
1477 struct elfhdr *ehdr = (struct elfhdr *)bprm_buf;
1478 struct elf_phdr *phdr;
1479 abi_ulong load_addr, load_bias, loaddr, hiaddr, error;
1480 int i, retval;
1481 const char *errmsg;
1483 /* First of all, some simple consistency checks */
1484 errmsg = "Invalid ELF image for this architecture";
1485 if (!elf_check_ident(ehdr)) {
1486 goto exit_errmsg;
1488 bswap_ehdr(ehdr);
1489 if (!elf_check_ehdr(ehdr)) {
1490 goto exit_errmsg;
1493 i = ehdr->e_phnum * sizeof(struct elf_phdr);
1494 if (ehdr->e_phoff + i <= BPRM_BUF_SIZE) {
1495 phdr = (struct elf_phdr *)(bprm_buf + ehdr->e_phoff);
1496 } else {
1497 phdr = (struct elf_phdr *) alloca(i);
1498 retval = pread(image_fd, phdr, i, ehdr->e_phoff);
1499 if (retval != i) {
1500 goto exit_read;
1503 bswap_phdr(phdr, ehdr->e_phnum);
1505 #ifdef CONFIG_USE_FDPIC
1506 info->nsegs = 0;
1507 info->pt_dynamic_addr = 0;
1508 #endif
1510 /* Find the maximum size of the image and allocate an appropriate
1511 amount of memory to handle that. */
1512 loaddr = -1, hiaddr = 0;
1513 for (i = 0; i < ehdr->e_phnum; ++i) {
1514 if (phdr[i].p_type == PT_LOAD) {
1515 abi_ulong a = phdr[i].p_vaddr;
1516 if (a < loaddr) {
1517 loaddr = a;
1519 a += phdr[i].p_memsz;
1520 if (a > hiaddr) {
1521 hiaddr = a;
1523 #ifdef CONFIG_USE_FDPIC
1524 ++info->nsegs;
1525 #endif
1529 load_addr = loaddr;
1530 if (ehdr->e_type == ET_DYN) {
1531 /* The image indicates that it can be loaded anywhere. Find a
1532 location that can hold the memory space required. If the
1533 image is pre-linked, LOADDR will be non-zero. Since we do
1534 not supply MAP_FIXED here we'll use that address if and
1535 only if it remains available. */
1536 load_addr = target_mmap(loaddr, hiaddr - loaddr, PROT_NONE,
1537 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
1538 -1, 0);
1539 if (load_addr == -1) {
1540 goto exit_perror;
1542 } else if (pinterp_name != NULL) {
1543 /* This is the main executable. Make sure that the low
1544 address does not conflict with MMAP_MIN_ADDR or the
1545 QEMU application itself. */
1546 probe_guest_base(image_name, loaddr, hiaddr);
1548 load_bias = load_addr - loaddr;
1550 #ifdef CONFIG_USE_FDPIC
1552 struct elf32_fdpic_loadseg *loadsegs = info->loadsegs =
1553 g_malloc(sizeof(*loadsegs) * info->nsegs);
1555 for (i = 0; i < ehdr->e_phnum; ++i) {
1556 switch (phdr[i].p_type) {
1557 case PT_DYNAMIC:
1558 info->pt_dynamic_addr = phdr[i].p_vaddr + load_bias;
1559 break;
1560 case PT_LOAD:
1561 loadsegs->addr = phdr[i].p_vaddr + load_bias;
1562 loadsegs->p_vaddr = phdr[i].p_vaddr;
1563 loadsegs->p_memsz = phdr[i].p_memsz;
1564 ++loadsegs;
1565 break;
1569 #endif
1571 info->load_bias = load_bias;
1572 info->load_addr = load_addr;
1573 info->entry = ehdr->e_entry + load_bias;
1574 info->start_code = -1;
1575 info->end_code = 0;
1576 info->start_data = -1;
1577 info->end_data = 0;
1578 info->brk = 0;
1579 info->elf_flags = ehdr->e_flags;
1581 for (i = 0; i < ehdr->e_phnum; i++) {
1582 struct elf_phdr *eppnt = phdr + i;
1583 if (eppnt->p_type == PT_LOAD) {
1584 abi_ulong vaddr, vaddr_po, vaddr_ps, vaddr_ef, vaddr_em;
1585 int elf_prot = 0;
1587 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
1588 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
1589 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
1591 vaddr = load_bias + eppnt->p_vaddr;
1592 vaddr_po = TARGET_ELF_PAGEOFFSET(vaddr);
1593 vaddr_ps = TARGET_ELF_PAGESTART(vaddr);
1595 error = target_mmap(vaddr_ps, eppnt->p_filesz + vaddr_po,
1596 elf_prot, MAP_PRIVATE | MAP_FIXED,
1597 image_fd, eppnt->p_offset - vaddr_po);
1598 if (error == -1) {
1599 goto exit_perror;
1602 vaddr_ef = vaddr + eppnt->p_filesz;
1603 vaddr_em = vaddr + eppnt->p_memsz;
1605 /* If the load segment requests extra zeros (e.g. bss), map it. */
1606 if (vaddr_ef < vaddr_em) {
1607 zero_bss(vaddr_ef, vaddr_em, elf_prot);
1610 /* Find the full program boundaries. */
1611 if (elf_prot & PROT_EXEC) {
1612 if (vaddr < info->start_code) {
1613 info->start_code = vaddr;
1615 if (vaddr_ef > info->end_code) {
1616 info->end_code = vaddr_ef;
1619 if (elf_prot & PROT_WRITE) {
1620 if (vaddr < info->start_data) {
1621 info->start_data = vaddr;
1623 if (vaddr_ef > info->end_data) {
1624 info->end_data = vaddr_ef;
1626 if (vaddr_em > info->brk) {
1627 info->brk = vaddr_em;
1630 } else if (eppnt->p_type == PT_INTERP && pinterp_name) {
1631 char *interp_name;
1633 if (*pinterp_name) {
1634 errmsg = "Multiple PT_INTERP entries";
1635 goto exit_errmsg;
1637 interp_name = malloc(eppnt->p_filesz);
1638 if (!interp_name) {
1639 goto exit_perror;
1642 if (eppnt->p_offset + eppnt->p_filesz <= BPRM_BUF_SIZE) {
1643 memcpy(interp_name, bprm_buf + eppnt->p_offset,
1644 eppnt->p_filesz);
1645 } else {
1646 retval = pread(image_fd, interp_name, eppnt->p_filesz,
1647 eppnt->p_offset);
1648 if (retval != eppnt->p_filesz) {
1649 goto exit_perror;
1652 if (interp_name[eppnt->p_filesz - 1] != 0) {
1653 errmsg = "Invalid PT_INTERP entry";
1654 goto exit_errmsg;
1656 *pinterp_name = interp_name;
1660 if (info->end_data == 0) {
1661 info->start_data = info->end_code;
1662 info->end_data = info->end_code;
1663 info->brk = info->end_code;
1666 if (qemu_log_enabled()) {
1667 load_symbols(ehdr, image_fd, load_bias);
1670 close(image_fd);
1671 return;
1673 exit_read:
1674 if (retval >= 0) {
1675 errmsg = "Incomplete read of file header";
1676 goto exit_errmsg;
1678 exit_perror:
1679 errmsg = strerror(errno);
1680 exit_errmsg:
1681 fprintf(stderr, "%s: %s\n", image_name, errmsg);
1682 exit(-1);
1685 static void load_elf_interp(const char *filename, struct image_info *info,
1686 char bprm_buf[BPRM_BUF_SIZE])
1688 int fd, retval;
1690 fd = open(path(filename), O_RDONLY);
1691 if (fd < 0) {
1692 goto exit_perror;
1695 retval = read(fd, bprm_buf, BPRM_BUF_SIZE);
1696 if (retval < 0) {
1697 goto exit_perror;
1699 if (retval < BPRM_BUF_SIZE) {
1700 memset(bprm_buf + retval, 0, BPRM_BUF_SIZE - retval);
1703 load_elf_image(filename, fd, info, NULL, bprm_buf);
1704 return;
1706 exit_perror:
1707 fprintf(stderr, "%s: %s\n", filename, strerror(errno));
1708 exit(-1);
1711 static int symfind(const void *s0, const void *s1)
1713 target_ulong addr = *(target_ulong *)s0;
1714 struct elf_sym *sym = (struct elf_sym *)s1;
1715 int result = 0;
1716 if (addr < sym->st_value) {
1717 result = -1;
1718 } else if (addr >= sym->st_value + sym->st_size) {
1719 result = 1;
1721 return result;
1724 static const char *lookup_symbolxx(struct syminfo *s, target_ulong orig_addr)
1726 #if ELF_CLASS == ELFCLASS32
1727 struct elf_sym *syms = s->disas_symtab.elf32;
1728 #else
1729 struct elf_sym *syms = s->disas_symtab.elf64;
1730 #endif
1732 // binary search
1733 struct elf_sym *sym;
1735 sym = bsearch(&orig_addr, syms, s->disas_num_syms, sizeof(*syms), symfind);
1736 if (sym != NULL) {
1737 return s->disas_strtab + sym->st_name;
1740 return "";
1743 /* FIXME: This should use elf_ops.h */
1744 static int symcmp(const void *s0, const void *s1)
1746 struct elf_sym *sym0 = (struct elf_sym *)s0;
1747 struct elf_sym *sym1 = (struct elf_sym *)s1;
1748 return (sym0->st_value < sym1->st_value)
1749 ? -1
1750 : ((sym0->st_value > sym1->st_value) ? 1 : 0);
1753 /* Best attempt to load symbols from this ELF object. */
1754 static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias)
1756 int i, shnum, nsyms, sym_idx = 0, str_idx = 0;
1757 struct elf_shdr *shdr;
1758 char *strings = NULL;
1759 struct syminfo *s = NULL;
1760 struct elf_sym *new_syms, *syms = NULL;
1762 shnum = hdr->e_shnum;
1763 i = shnum * sizeof(struct elf_shdr);
1764 shdr = (struct elf_shdr *)alloca(i);
1765 if (pread(fd, shdr, i, hdr->e_shoff) != i) {
1766 return;
1769 bswap_shdr(shdr, shnum);
1770 for (i = 0; i < shnum; ++i) {
1771 if (shdr[i].sh_type == SHT_SYMTAB) {
1772 sym_idx = i;
1773 str_idx = shdr[i].sh_link;
1774 goto found;
1778 /* There will be no symbol table if the file was stripped. */
1779 return;
1781 found:
1782 /* Now know where the strtab and symtab are. Snarf them. */
1783 s = malloc(sizeof(*s));
1784 if (!s) {
1785 goto give_up;
1788 i = shdr[str_idx].sh_size;
1789 s->disas_strtab = strings = malloc(i);
1790 if (!strings || pread(fd, strings, i, shdr[str_idx].sh_offset) != i) {
1791 goto give_up;
1794 i = shdr[sym_idx].sh_size;
1795 syms = malloc(i);
1796 if (!syms || pread(fd, syms, i, shdr[sym_idx].sh_offset) != i) {
1797 goto give_up;
1800 nsyms = i / sizeof(struct elf_sym);
1801 for (i = 0; i < nsyms; ) {
1802 bswap_sym(syms + i);
1803 /* Throw away entries which we do not need. */
1804 if (syms[i].st_shndx == SHN_UNDEF
1805 || syms[i].st_shndx >= SHN_LORESERVE
1806 || ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) {
1807 if (i < --nsyms) {
1808 syms[i] = syms[nsyms];
1810 } else {
1811 #if defined(TARGET_ARM) || defined (TARGET_MIPS)
1812 /* The bottom address bit marks a Thumb or MIPS16 symbol. */
1813 syms[i].st_value &= ~(target_ulong)1;
1814 #endif
1815 syms[i].st_value += load_bias;
1816 i++;
1820 /* No "useful" symbol. */
1821 if (nsyms == 0) {
1822 goto give_up;
1825 /* Attempt to free the storage associated with the local symbols
1826 that we threw away. Whether or not this has any effect on the
1827 memory allocation depends on the malloc implementation and how
1828 many symbols we managed to discard. */
1829 new_syms = realloc(syms, nsyms * sizeof(*syms));
1830 if (new_syms == NULL) {
1831 goto give_up;
1833 syms = new_syms;
1835 qsort(syms, nsyms, sizeof(*syms), symcmp);
1837 s->disas_num_syms = nsyms;
1838 #if ELF_CLASS == ELFCLASS32
1839 s->disas_symtab.elf32 = syms;
1840 #else
1841 s->disas_symtab.elf64 = syms;
1842 #endif
1843 s->lookup_symbol = lookup_symbolxx;
1844 s->next = syminfos;
1845 syminfos = s;
1847 return;
1849 give_up:
1850 free(s);
1851 free(strings);
1852 free(syms);
1855 int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
1856 struct image_info * info)
1858 struct image_info interp_info;
1859 struct elfhdr elf_ex;
1860 char *elf_interpreter = NULL;
1862 info->start_mmap = (abi_ulong)ELF_START_MMAP;
1863 info->mmap = 0;
1864 info->rss = 0;
1866 load_elf_image(bprm->filename, bprm->fd, info,
1867 &elf_interpreter, bprm->buf);
1869 /* ??? We need a copy of the elf header for passing to create_elf_tables.
1870 If we do nothing, we'll have overwritten this when we re-use bprm->buf
1871 when we load the interpreter. */
1872 elf_ex = *(struct elfhdr *)bprm->buf;
1874 bprm->p = copy_elf_strings(1, &bprm->filename, bprm->page, bprm->p);
1875 bprm->p = copy_elf_strings(bprm->envc,bprm->envp,bprm->page,bprm->p);
1876 bprm->p = copy_elf_strings(bprm->argc,bprm->argv,bprm->page,bprm->p);
1877 if (!bprm->p) {
1878 fprintf(stderr, "%s: %s\n", bprm->filename, strerror(E2BIG));
1879 exit(-1);
1882 /* Do this so that we can load the interpreter, if need be. We will
1883 change some of these later */
1884 bprm->p = setup_arg_pages(bprm->p, bprm, info);
1886 if (elf_interpreter) {
1887 load_elf_interp(elf_interpreter, &interp_info, bprm->buf);
1889 /* If the program interpreter is one of these two, then assume
1890 an iBCS2 image. Otherwise assume a native linux image. */
1892 if (strcmp(elf_interpreter, "/usr/lib/libc.so.1") == 0
1893 || strcmp(elf_interpreter, "/usr/lib/ld.so.1") == 0) {
1894 info->personality = PER_SVR4;
1896 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1897 and some applications "depend" upon this behavior. Since
1898 we do not have the power to recompile these, we emulate
1899 the SVr4 behavior. Sigh. */
1900 target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC,
1901 MAP_FIXED | MAP_PRIVATE, -1, 0);
1905 bprm->p = create_elf_tables(bprm->p, bprm->argc, bprm->envc, &elf_ex,
1906 info, (elf_interpreter ? &interp_info : NULL));
1907 info->start_stack = bprm->p;
1909 /* If we have an interpreter, set that as the program's entry point.
1910 Copy the load_bias as well, to help PPC64 interpret the entry
1911 point as a function descriptor. Do this after creating elf tables
1912 so that we copy the original program entry point into the AUXV. */
1913 if (elf_interpreter) {
1914 info->load_bias = interp_info.load_bias;
1915 info->entry = interp_info.entry;
1916 free(elf_interpreter);
1919 #ifdef USE_ELF_CORE_DUMP
1920 bprm->core_dump = &elf_core_dump;
1921 #endif
1923 return 0;
1926 #ifdef USE_ELF_CORE_DUMP
1928 * Definitions to generate Intel SVR4-like core files.
1929 * These mostly have the same names as the SVR4 types with "target_elf_"
1930 * tacked on the front to prevent clashes with linux definitions,
1931 * and the typedef forms have been avoided. This is mostly like
1932 * the SVR4 structure, but more Linuxy, with things that Linux does
1933 * not support and which gdb doesn't really use excluded.
1935 * Fields we don't dump (their contents is zero) in linux-user qemu
1936 * are marked with XXX.
1938 * Core dump code is copied from linux kernel (fs/binfmt_elf.c).
1940 * Porting ELF coredump for target is (quite) simple process. First you
1941 * define USE_ELF_CORE_DUMP in target ELF code (where init_thread() for
1942 * the target resides):
1944 * #define USE_ELF_CORE_DUMP
1946 * Next you define type of register set used for dumping. ELF specification
1947 * says that it needs to be array of elf_greg_t that has size of ELF_NREG.
1949 * typedef <target_regtype> target_elf_greg_t;
1950 * #define ELF_NREG <number of registers>
1951 * typedef taret_elf_greg_t target_elf_gregset_t[ELF_NREG];
1953 * Last step is to implement target specific function that copies registers
1954 * from given cpu into just specified register set. Prototype is:
1956 * static void elf_core_copy_regs(taret_elf_gregset_t *regs,
1957 * const CPUArchState *env);
1959 * Parameters:
1960 * regs - copy register values into here (allocated and zeroed by caller)
1961 * env - copy registers from here
1963 * Example for ARM target is provided in this file.
1966 /* An ELF note in memory */
1967 struct memelfnote {
1968 const char *name;
1969 size_t namesz;
1970 size_t namesz_rounded;
1971 int type;
1972 size_t datasz;
1973 size_t datasz_rounded;
1974 void *data;
1975 size_t notesz;
1978 struct target_elf_siginfo {
1979 target_int si_signo; /* signal number */
1980 target_int si_code; /* extra code */
1981 target_int si_errno; /* errno */
1984 struct target_elf_prstatus {
1985 struct target_elf_siginfo pr_info; /* Info associated with signal */
1986 target_short pr_cursig; /* Current signal */
1987 target_ulong pr_sigpend; /* XXX */
1988 target_ulong pr_sighold; /* XXX */
1989 target_pid_t pr_pid;
1990 target_pid_t pr_ppid;
1991 target_pid_t pr_pgrp;
1992 target_pid_t pr_sid;
1993 struct target_timeval pr_utime; /* XXX User time */
1994 struct target_timeval pr_stime; /* XXX System time */
1995 struct target_timeval pr_cutime; /* XXX Cumulative user time */
1996 struct target_timeval pr_cstime; /* XXX Cumulative system time */
1997 target_elf_gregset_t pr_reg; /* GP registers */
1998 target_int pr_fpvalid; /* XXX */
2001 #define ELF_PRARGSZ (80) /* Number of chars for args */
2003 struct target_elf_prpsinfo {
2004 char pr_state; /* numeric process state */
2005 char pr_sname; /* char for pr_state */
2006 char pr_zomb; /* zombie */
2007 char pr_nice; /* nice val */
2008 target_ulong pr_flag; /* flags */
2009 target_uid_t pr_uid;
2010 target_gid_t pr_gid;
2011 target_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
2012 /* Lots missing */
2013 char pr_fname[16]; /* filename of executable */
2014 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
2017 /* Here is the structure in which status of each thread is captured. */
2018 struct elf_thread_status {
2019 QTAILQ_ENTRY(elf_thread_status) ets_link;
2020 struct target_elf_prstatus prstatus; /* NT_PRSTATUS */
2021 #if 0
2022 elf_fpregset_t fpu; /* NT_PRFPREG */
2023 struct task_struct *thread;
2024 elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */
2025 #endif
2026 struct memelfnote notes[1];
2027 int num_notes;
2030 struct elf_note_info {
2031 struct memelfnote *notes;
2032 struct target_elf_prstatus *prstatus; /* NT_PRSTATUS */
2033 struct target_elf_prpsinfo *psinfo; /* NT_PRPSINFO */
2035 QTAILQ_HEAD(thread_list_head, elf_thread_status) thread_list;
2036 #if 0
2038 * Current version of ELF coredump doesn't support
2039 * dumping fp regs etc.
2041 elf_fpregset_t *fpu;
2042 elf_fpxregset_t *xfpu;
2043 int thread_status_size;
2044 #endif
2045 int notes_size;
2046 int numnote;
2049 struct vm_area_struct {
2050 abi_ulong vma_start; /* start vaddr of memory region */
2051 abi_ulong vma_end; /* end vaddr of memory region */
2052 abi_ulong vma_flags; /* protection etc. flags for the region */
2053 QTAILQ_ENTRY(vm_area_struct) vma_link;
2056 struct mm_struct {
2057 QTAILQ_HEAD(, vm_area_struct) mm_mmap;
2058 int mm_count; /* number of mappings */
2061 static struct mm_struct *vma_init(void);
2062 static void vma_delete(struct mm_struct *);
2063 static int vma_add_mapping(struct mm_struct *, abi_ulong,
2064 abi_ulong, abi_ulong);
2065 static int vma_get_mapping_count(const struct mm_struct *);
2066 static struct vm_area_struct *vma_first(const struct mm_struct *);
2067 static struct vm_area_struct *vma_next(struct vm_area_struct *);
2068 static abi_ulong vma_dump_size(const struct vm_area_struct *);
2069 static int vma_walker(void *priv, abi_ulong start, abi_ulong end,
2070 unsigned long flags);
2072 static void fill_elf_header(struct elfhdr *, int, uint16_t, uint32_t);
2073 static void fill_note(struct memelfnote *, const char *, int,
2074 unsigned int, void *);
2075 static void fill_prstatus(struct target_elf_prstatus *, const TaskState *, int);
2076 static int fill_psinfo(struct target_elf_prpsinfo *, const TaskState *);
2077 static void fill_auxv_note(struct memelfnote *, const TaskState *);
2078 static void fill_elf_note_phdr(struct elf_phdr *, int, off_t);
2079 static size_t note_size(const struct memelfnote *);
2080 static void free_note_info(struct elf_note_info *);
2081 static int fill_note_info(struct elf_note_info *, long, const CPUArchState *);
2082 static void fill_thread_info(struct elf_note_info *, const CPUArchState *);
2083 static int core_dump_filename(const TaskState *, char *, size_t);
2085 static int dump_write(int, const void *, size_t);
2086 static int write_note(struct memelfnote *, int);
2087 static int write_note_info(struct elf_note_info *, int);
2089 #ifdef BSWAP_NEEDED
2090 static void bswap_prstatus(struct target_elf_prstatus *prstatus)
2092 prstatus->pr_info.si_signo = tswapl(prstatus->pr_info.si_signo);
2093 prstatus->pr_info.si_code = tswapl(prstatus->pr_info.si_code);
2094 prstatus->pr_info.si_errno = tswapl(prstatus->pr_info.si_errno);
2095 prstatus->pr_cursig = tswap16(prstatus->pr_cursig);
2096 prstatus->pr_sigpend = tswapl(prstatus->pr_sigpend);
2097 prstatus->pr_sighold = tswapl(prstatus->pr_sighold);
2098 prstatus->pr_pid = tswap32(prstatus->pr_pid);
2099 prstatus->pr_ppid = tswap32(prstatus->pr_ppid);
2100 prstatus->pr_pgrp = tswap32(prstatus->pr_pgrp);
2101 prstatus->pr_sid = tswap32(prstatus->pr_sid);
2102 /* cpu times are not filled, so we skip them */
2103 /* regs should be in correct format already */
2104 prstatus->pr_fpvalid = tswap32(prstatus->pr_fpvalid);
2107 static void bswap_psinfo(struct target_elf_prpsinfo *psinfo)
2109 psinfo->pr_flag = tswapl(psinfo->pr_flag);
2110 psinfo->pr_uid = tswap16(psinfo->pr_uid);
2111 psinfo->pr_gid = tswap16(psinfo->pr_gid);
2112 psinfo->pr_pid = tswap32(psinfo->pr_pid);
2113 psinfo->pr_ppid = tswap32(psinfo->pr_ppid);
2114 psinfo->pr_pgrp = tswap32(psinfo->pr_pgrp);
2115 psinfo->pr_sid = tswap32(psinfo->pr_sid);
2118 static void bswap_note(struct elf_note *en)
2120 bswap32s(&en->n_namesz);
2121 bswap32s(&en->n_descsz);
2122 bswap32s(&en->n_type);
2124 #else
2125 static inline void bswap_prstatus(struct target_elf_prstatus *p) { }
2126 static inline void bswap_psinfo(struct target_elf_prpsinfo *p) {}
2127 static inline void bswap_note(struct elf_note *en) { }
2128 #endif /* BSWAP_NEEDED */
2131 * Minimal support for linux memory regions. These are needed
2132 * when we are finding out what memory exactly belongs to
2133 * emulated process. No locks needed here, as long as
2134 * thread that received the signal is stopped.
2137 static struct mm_struct *vma_init(void)
2139 struct mm_struct *mm;
2141 if ((mm = g_malloc(sizeof (*mm))) == NULL)
2142 return (NULL);
2144 mm->mm_count = 0;
2145 QTAILQ_INIT(&mm->mm_mmap);
2147 return (mm);
2150 static void vma_delete(struct mm_struct *mm)
2152 struct vm_area_struct *vma;
2154 while ((vma = vma_first(mm)) != NULL) {
2155 QTAILQ_REMOVE(&mm->mm_mmap, vma, vma_link);
2156 g_free(vma);
2158 g_free(mm);
2161 static int vma_add_mapping(struct mm_struct *mm, abi_ulong start,
2162 abi_ulong end, abi_ulong flags)
2164 struct vm_area_struct *vma;
2166 if ((vma = g_malloc0(sizeof (*vma))) == NULL)
2167 return (-1);
2169 vma->vma_start = start;
2170 vma->vma_end = end;
2171 vma->vma_flags = flags;
2173 QTAILQ_INSERT_TAIL(&mm->mm_mmap, vma, vma_link);
2174 mm->mm_count++;
2176 return (0);
2179 static struct vm_area_struct *vma_first(const struct mm_struct *mm)
2181 return (QTAILQ_FIRST(&mm->mm_mmap));
2184 static struct vm_area_struct *vma_next(struct vm_area_struct *vma)
2186 return (QTAILQ_NEXT(vma, vma_link));
2189 static int vma_get_mapping_count(const struct mm_struct *mm)
2191 return (mm->mm_count);
2195 * Calculate file (dump) size of given memory region.
2197 static abi_ulong vma_dump_size(const struct vm_area_struct *vma)
2199 /* if we cannot even read the first page, skip it */
2200 if (!access_ok(VERIFY_READ, vma->vma_start, TARGET_PAGE_SIZE))
2201 return (0);
2204 * Usually we don't dump executable pages as they contain
2205 * non-writable code that debugger can read directly from
2206 * target library etc. However, thread stacks are marked
2207 * also executable so we read in first page of given region
2208 * and check whether it contains elf header. If there is
2209 * no elf header, we dump it.
2211 if (vma->vma_flags & PROT_EXEC) {
2212 char page[TARGET_PAGE_SIZE];
2214 copy_from_user(page, vma->vma_start, sizeof (page));
2215 if ((page[EI_MAG0] == ELFMAG0) &&
2216 (page[EI_MAG1] == ELFMAG1) &&
2217 (page[EI_MAG2] == ELFMAG2) &&
2218 (page[EI_MAG3] == ELFMAG3)) {
2220 * Mappings are possibly from ELF binary. Don't dump
2221 * them.
2223 return (0);
2227 return (vma->vma_end - vma->vma_start);
2230 static int vma_walker(void *priv, abi_ulong start, abi_ulong end,
2231 unsigned long flags)
2233 struct mm_struct *mm = (struct mm_struct *)priv;
2235 vma_add_mapping(mm, start, end, flags);
2236 return (0);
2239 static void fill_note(struct memelfnote *note, const char *name, int type,
2240 unsigned int sz, void *data)
2242 unsigned int namesz;
2244 namesz = strlen(name) + 1;
2245 note->name = name;
2246 note->namesz = namesz;
2247 note->namesz_rounded = roundup(namesz, sizeof (int32_t));
2248 note->type = type;
2249 note->datasz = sz;
2250 note->datasz_rounded = roundup(sz, sizeof (int32_t));
2252 note->data = data;
2255 * We calculate rounded up note size here as specified by
2256 * ELF document.
2258 note->notesz = sizeof (struct elf_note) +
2259 note->namesz_rounded + note->datasz_rounded;
2262 static void fill_elf_header(struct elfhdr *elf, int segs, uint16_t machine,
2263 uint32_t flags)
2265 (void) memset(elf, 0, sizeof(*elf));
2267 (void) memcpy(elf->e_ident, ELFMAG, SELFMAG);
2268 elf->e_ident[EI_CLASS] = ELF_CLASS;
2269 elf->e_ident[EI_DATA] = ELF_DATA;
2270 elf->e_ident[EI_VERSION] = EV_CURRENT;
2271 elf->e_ident[EI_OSABI] = ELF_OSABI;
2273 elf->e_type = ET_CORE;
2274 elf->e_machine = machine;
2275 elf->e_version = EV_CURRENT;
2276 elf->e_phoff = sizeof(struct elfhdr);
2277 elf->e_flags = flags;
2278 elf->e_ehsize = sizeof(struct elfhdr);
2279 elf->e_phentsize = sizeof(struct elf_phdr);
2280 elf->e_phnum = segs;
2282 bswap_ehdr(elf);
2285 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
2287 phdr->p_type = PT_NOTE;
2288 phdr->p_offset = offset;
2289 phdr->p_vaddr = 0;
2290 phdr->p_paddr = 0;
2291 phdr->p_filesz = sz;
2292 phdr->p_memsz = 0;
2293 phdr->p_flags = 0;
2294 phdr->p_align = 0;
2296 bswap_phdr(phdr, 1);
2299 static size_t note_size(const struct memelfnote *note)
2301 return (note->notesz);
2304 static void fill_prstatus(struct target_elf_prstatus *prstatus,
2305 const TaskState *ts, int signr)
2307 (void) memset(prstatus, 0, sizeof (*prstatus));
2308 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
2309 prstatus->pr_pid = ts->ts_tid;
2310 prstatus->pr_ppid = getppid();
2311 prstatus->pr_pgrp = getpgrp();
2312 prstatus->pr_sid = getsid(0);
2314 bswap_prstatus(prstatus);
2317 static int fill_psinfo(struct target_elf_prpsinfo *psinfo, const TaskState *ts)
2319 char *filename, *base_filename;
2320 unsigned int i, len;
2322 (void) memset(psinfo, 0, sizeof (*psinfo));
2324 len = ts->info->arg_end - ts->info->arg_start;
2325 if (len >= ELF_PRARGSZ)
2326 len = ELF_PRARGSZ - 1;
2327 if (copy_from_user(&psinfo->pr_psargs, ts->info->arg_start, len))
2328 return -EFAULT;
2329 for (i = 0; i < len; i++)
2330 if (psinfo->pr_psargs[i] == 0)
2331 psinfo->pr_psargs[i] = ' ';
2332 psinfo->pr_psargs[len] = 0;
2334 psinfo->pr_pid = getpid();
2335 psinfo->pr_ppid = getppid();
2336 psinfo->pr_pgrp = getpgrp();
2337 psinfo->pr_sid = getsid(0);
2338 psinfo->pr_uid = getuid();
2339 psinfo->pr_gid = getgid();
2341 filename = strdup(ts->bprm->filename);
2342 base_filename = strdup(basename(filename));
2343 (void) strncpy(psinfo->pr_fname, base_filename,
2344 sizeof(psinfo->pr_fname));
2345 free(base_filename);
2346 free(filename);
2348 bswap_psinfo(psinfo);
2349 return (0);
2352 static void fill_auxv_note(struct memelfnote *note, const TaskState *ts)
2354 elf_addr_t auxv = (elf_addr_t)ts->info->saved_auxv;
2355 elf_addr_t orig_auxv = auxv;
2356 void *ptr;
2357 int len = ts->info->auxv_len;
2360 * Auxiliary vector is stored in target process stack. It contains
2361 * {type, value} pairs that we need to dump into note. This is not
2362 * strictly necessary but we do it here for sake of completeness.
2365 /* read in whole auxv vector and copy it to memelfnote */
2366 ptr = lock_user(VERIFY_READ, orig_auxv, len, 0);
2367 if (ptr != NULL) {
2368 fill_note(note, "CORE", NT_AUXV, len, ptr);
2369 unlock_user(ptr, auxv, len);
2374 * Constructs name of coredump file. We have following convention
2375 * for the name:
2376 * qemu_<basename-of-target-binary>_<date>-<time>_<pid>.core
2378 * Returns 0 in case of success, -1 otherwise (errno is set).
2380 static int core_dump_filename(const TaskState *ts, char *buf,
2381 size_t bufsize)
2383 char timestamp[64];
2384 char *filename = NULL;
2385 char *base_filename = NULL;
2386 struct timeval tv;
2387 struct tm tm;
2389 assert(bufsize >= PATH_MAX);
2391 if (gettimeofday(&tv, NULL) < 0) {
2392 (void) fprintf(stderr, "unable to get current timestamp: %s",
2393 strerror(errno));
2394 return (-1);
2397 filename = strdup(ts->bprm->filename);
2398 base_filename = strdup(basename(filename));
2399 (void) strftime(timestamp, sizeof (timestamp), "%Y%m%d-%H%M%S",
2400 localtime_r(&tv.tv_sec, &tm));
2401 (void) snprintf(buf, bufsize, "qemu_%s_%s_%d.core",
2402 base_filename, timestamp, (int)getpid());
2403 free(base_filename);
2404 free(filename);
2406 return (0);
2409 static int dump_write(int fd, const void *ptr, size_t size)
2411 const char *bufp = (const char *)ptr;
2412 ssize_t bytes_written, bytes_left;
2413 struct rlimit dumpsize;
2414 off_t pos;
2416 bytes_written = 0;
2417 getrlimit(RLIMIT_CORE, &dumpsize);
2418 if ((pos = lseek(fd, 0, SEEK_CUR))==-1) {
2419 if (errno == ESPIPE) { /* not a seekable stream */
2420 bytes_left = size;
2421 } else {
2422 return pos;
2424 } else {
2425 if (dumpsize.rlim_cur <= pos) {
2426 return -1;
2427 } else if (dumpsize.rlim_cur == RLIM_INFINITY) {
2428 bytes_left = size;
2429 } else {
2430 size_t limit_left=dumpsize.rlim_cur - pos;
2431 bytes_left = limit_left >= size ? size : limit_left ;
2436 * In normal conditions, single write(2) should do but
2437 * in case of socket etc. this mechanism is more portable.
2439 do {
2440 bytes_written = write(fd, bufp, bytes_left);
2441 if (bytes_written < 0) {
2442 if (errno == EINTR)
2443 continue;
2444 return (-1);
2445 } else if (bytes_written == 0) { /* eof */
2446 return (-1);
2448 bufp += bytes_written;
2449 bytes_left -= bytes_written;
2450 } while (bytes_left > 0);
2452 return (0);
2455 static int write_note(struct memelfnote *men, int fd)
2457 struct elf_note en;
2459 en.n_namesz = men->namesz;
2460 en.n_type = men->type;
2461 en.n_descsz = men->datasz;
2463 bswap_note(&en);
2465 if (dump_write(fd, &en, sizeof(en)) != 0)
2466 return (-1);
2467 if (dump_write(fd, men->name, men->namesz_rounded) != 0)
2468 return (-1);
2469 if (dump_write(fd, men->data, men->datasz_rounded) != 0)
2470 return (-1);
2472 return (0);
2475 static void fill_thread_info(struct elf_note_info *info, const CPUArchState *env)
2477 TaskState *ts = (TaskState *)env->opaque;
2478 struct elf_thread_status *ets;
2480 ets = g_malloc0(sizeof (*ets));
2481 ets->num_notes = 1; /* only prstatus is dumped */
2482 fill_prstatus(&ets->prstatus, ts, 0);
2483 elf_core_copy_regs(&ets->prstatus.pr_reg, env);
2484 fill_note(&ets->notes[0], "CORE", NT_PRSTATUS, sizeof (ets->prstatus),
2485 &ets->prstatus);
2487 QTAILQ_INSERT_TAIL(&info->thread_list, ets, ets_link);
2489 info->notes_size += note_size(&ets->notes[0]);
2492 static int fill_note_info(struct elf_note_info *info,
2493 long signr, const CPUArchState *env)
2495 #define NUMNOTES 3
2496 CPUArchState *cpu = NULL;
2497 TaskState *ts = (TaskState *)env->opaque;
2498 int i;
2500 (void) memset(info, 0, sizeof (*info));
2502 QTAILQ_INIT(&info->thread_list);
2504 info->notes = g_malloc0(NUMNOTES * sizeof (struct memelfnote));
2505 if (info->notes == NULL)
2506 return (-ENOMEM);
2507 info->prstatus = g_malloc0(sizeof (*info->prstatus));
2508 if (info->prstatus == NULL)
2509 return (-ENOMEM);
2510 info->psinfo = g_malloc0(sizeof (*info->psinfo));
2511 if (info->prstatus == NULL)
2512 return (-ENOMEM);
2515 * First fill in status (and registers) of current thread
2516 * including process info & aux vector.
2518 fill_prstatus(info->prstatus, ts, signr);
2519 elf_core_copy_regs(&info->prstatus->pr_reg, env);
2520 fill_note(&info->notes[0], "CORE", NT_PRSTATUS,
2521 sizeof (*info->prstatus), info->prstatus);
2522 fill_psinfo(info->psinfo, ts);
2523 fill_note(&info->notes[1], "CORE", NT_PRPSINFO,
2524 sizeof (*info->psinfo), info->psinfo);
2525 fill_auxv_note(&info->notes[2], ts);
2526 info->numnote = 3;
2528 info->notes_size = 0;
2529 for (i = 0; i < info->numnote; i++)
2530 info->notes_size += note_size(&info->notes[i]);
2532 /* read and fill status of all threads */
2533 cpu_list_lock();
2534 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2535 if (cpu == thread_env)
2536 continue;
2537 fill_thread_info(info, cpu);
2539 cpu_list_unlock();
2541 return (0);
2544 static void free_note_info(struct elf_note_info *info)
2546 struct elf_thread_status *ets;
2548 while (!QTAILQ_EMPTY(&info->thread_list)) {
2549 ets = QTAILQ_FIRST(&info->thread_list);
2550 QTAILQ_REMOVE(&info->thread_list, ets, ets_link);
2551 g_free(ets);
2554 g_free(info->prstatus);
2555 g_free(info->psinfo);
2556 g_free(info->notes);
2559 static int write_note_info(struct elf_note_info *info, int fd)
2561 struct elf_thread_status *ets;
2562 int i, error = 0;
2564 /* write prstatus, psinfo and auxv for current thread */
2565 for (i = 0; i < info->numnote; i++)
2566 if ((error = write_note(&info->notes[i], fd)) != 0)
2567 return (error);
2569 /* write prstatus for each thread */
2570 for (ets = info->thread_list.tqh_first; ets != NULL;
2571 ets = ets->ets_link.tqe_next) {
2572 if ((error = write_note(&ets->notes[0], fd)) != 0)
2573 return (error);
2576 return (0);
2580 * Write out ELF coredump.
2582 * See documentation of ELF object file format in:
2583 * http://www.caldera.com/developers/devspecs/gabi41.pdf
2585 * Coredump format in linux is following:
2587 * 0 +----------------------+ \
2588 * | ELF header | ET_CORE |
2589 * +----------------------+ |
2590 * | ELF program headers | |--- headers
2591 * | - NOTE section | |
2592 * | - PT_LOAD sections | |
2593 * +----------------------+ /
2594 * | NOTEs: |
2595 * | - NT_PRSTATUS |
2596 * | - NT_PRSINFO |
2597 * | - NT_AUXV |
2598 * +----------------------+ <-- aligned to target page
2599 * | Process memory dump |
2600 * : :
2601 * . .
2602 * : :
2603 * | |
2604 * +----------------------+
2606 * NT_PRSTATUS -> struct elf_prstatus (per thread)
2607 * NT_PRSINFO -> struct elf_prpsinfo
2608 * NT_AUXV is array of { type, value } pairs (see fill_auxv_note()).
2610 * Format follows System V format as close as possible. Current
2611 * version limitations are as follows:
2612 * - no floating point registers are dumped
2614 * Function returns 0 in case of success, negative errno otherwise.
2616 * TODO: make this work also during runtime: it should be
2617 * possible to force coredump from running process and then
2618 * continue processing. For example qemu could set up SIGUSR2
2619 * handler (provided that target process haven't registered
2620 * handler for that) that does the dump when signal is received.
2622 static int elf_core_dump(int signr, const CPUArchState *env)
2624 const TaskState *ts = (const TaskState *)env->opaque;
2625 struct vm_area_struct *vma = NULL;
2626 char corefile[PATH_MAX];
2627 struct elf_note_info info;
2628 struct elfhdr elf;
2629 struct elf_phdr phdr;
2630 struct rlimit dumpsize;
2631 struct mm_struct *mm = NULL;
2632 off_t offset = 0, data_offset = 0;
2633 int segs = 0;
2634 int fd = -1;
2636 errno = 0;
2637 getrlimit(RLIMIT_CORE, &dumpsize);
2638 if (dumpsize.rlim_cur == 0)
2639 return 0;
2641 if (core_dump_filename(ts, corefile, sizeof (corefile)) < 0)
2642 return (-errno);
2644 if ((fd = open(corefile, O_WRONLY | O_CREAT,
2645 S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH)) < 0)
2646 return (-errno);
2649 * Walk through target process memory mappings and
2650 * set up structure containing this information. After
2651 * this point vma_xxx functions can be used.
2653 if ((mm = vma_init()) == NULL)
2654 goto out;
2656 walk_memory_regions(mm, vma_walker);
2657 segs = vma_get_mapping_count(mm);
2660 * Construct valid coredump ELF header. We also
2661 * add one more segment for notes.
2663 fill_elf_header(&elf, segs + 1, ELF_MACHINE, 0);
2664 if (dump_write(fd, &elf, sizeof (elf)) != 0)
2665 goto out;
2667 /* fill in in-memory version of notes */
2668 if (fill_note_info(&info, signr, env) < 0)
2669 goto out;
2671 offset += sizeof (elf); /* elf header */
2672 offset += (segs + 1) * sizeof (struct elf_phdr); /* program headers */
2674 /* write out notes program header */
2675 fill_elf_note_phdr(&phdr, info.notes_size, offset);
2677 offset += info.notes_size;
2678 if (dump_write(fd, &phdr, sizeof (phdr)) != 0)
2679 goto out;
2682 * ELF specification wants data to start at page boundary so
2683 * we align it here.
2685 data_offset = offset = roundup(offset, ELF_EXEC_PAGESIZE);
2688 * Write program headers for memory regions mapped in
2689 * the target process.
2691 for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
2692 (void) memset(&phdr, 0, sizeof (phdr));
2694 phdr.p_type = PT_LOAD;
2695 phdr.p_offset = offset;
2696 phdr.p_vaddr = vma->vma_start;
2697 phdr.p_paddr = 0;
2698 phdr.p_filesz = vma_dump_size(vma);
2699 offset += phdr.p_filesz;
2700 phdr.p_memsz = vma->vma_end - vma->vma_start;
2701 phdr.p_flags = vma->vma_flags & PROT_READ ? PF_R : 0;
2702 if (vma->vma_flags & PROT_WRITE)
2703 phdr.p_flags |= PF_W;
2704 if (vma->vma_flags & PROT_EXEC)
2705 phdr.p_flags |= PF_X;
2706 phdr.p_align = ELF_EXEC_PAGESIZE;
2708 bswap_phdr(&phdr, 1);
2709 dump_write(fd, &phdr, sizeof (phdr));
2713 * Next we write notes just after program headers. No
2714 * alignment needed here.
2716 if (write_note_info(&info, fd) < 0)
2717 goto out;
2719 /* align data to page boundary */
2720 if (lseek(fd, data_offset, SEEK_SET) != data_offset)
2721 goto out;
2724 * Finally we can dump process memory into corefile as well.
2726 for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
2727 abi_ulong addr;
2728 abi_ulong end;
2730 end = vma->vma_start + vma_dump_size(vma);
2732 for (addr = vma->vma_start; addr < end;
2733 addr += TARGET_PAGE_SIZE) {
2734 char page[TARGET_PAGE_SIZE];
2735 int error;
2738 * Read in page from target process memory and
2739 * write it to coredump file.
2741 error = copy_from_user(page, addr, sizeof (page));
2742 if (error != 0) {
2743 (void) fprintf(stderr, "unable to dump " TARGET_ABI_FMT_lx "\n",
2744 addr);
2745 errno = -error;
2746 goto out;
2748 if (dump_write(fd, page, TARGET_PAGE_SIZE) < 0)
2749 goto out;
2753 out:
2754 free_note_info(&info);
2755 if (mm != NULL)
2756 vma_delete(mm);
2757 (void) close(fd);
2759 if (errno != 0)
2760 return (-errno);
2761 return (0);
2763 #endif /* USE_ELF_CORE_DUMP */
2765 void do_init_thread(struct target_pt_regs *regs, struct image_info *infop)
2767 init_thread(regs, infop);