pc: remove global variable rtc_state by using qemu_irq.
[qemu.git] / linux-user / elfload.c
blob4ef77bcd2d558e35f34a1850cc0780e55755ee33
1 /* This is the Linux kernel elf-loading code, ported into user space */
2 #include <sys/time.h>
3 #include <sys/param.h>
5 #include <stdio.h>
6 #include <sys/types.h>
7 #include <fcntl.h>
8 #include <errno.h>
9 #include <unistd.h>
10 #include <sys/mman.h>
11 #include <sys/resource.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <time.h>
16 #include "qemu.h"
17 #include "disas.h"
19 #ifdef _ARCH_PPC64
20 #undef ARCH_DLINFO
21 #undef ELF_PLATFORM
22 #undef ELF_HWCAP
23 #undef ELF_CLASS
24 #undef ELF_DATA
25 #undef ELF_ARCH
26 #endif
28 #define ELF_OSABI ELFOSABI_SYSV
30 /* from personality.h */
33 * Flags for bug emulation.
35 * These occupy the top three bytes.
37 enum {
38 ADDR_NO_RANDOMIZE = 0x0040000, /* disable randomization of VA space */
39 FDPIC_FUNCPTRS = 0x0080000, /* userspace function ptrs point to descriptors
40 * (signal handling)
42 MMAP_PAGE_ZERO = 0x0100000,
43 ADDR_COMPAT_LAYOUT = 0x0200000,
44 READ_IMPLIES_EXEC = 0x0400000,
45 ADDR_LIMIT_32BIT = 0x0800000,
46 SHORT_INODE = 0x1000000,
47 WHOLE_SECONDS = 0x2000000,
48 STICKY_TIMEOUTS = 0x4000000,
49 ADDR_LIMIT_3GB = 0x8000000,
53 * Personality types.
55 * These go in the low byte. Avoid using the top bit, it will
56 * conflict with error returns.
58 enum {
59 PER_LINUX = 0x0000,
60 PER_LINUX_32BIT = 0x0000 | ADDR_LIMIT_32BIT,
61 PER_LINUX_FDPIC = 0x0000 | FDPIC_FUNCPTRS,
62 PER_SVR4 = 0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
63 PER_SVR3 = 0x0002 | STICKY_TIMEOUTS | SHORT_INODE,
64 PER_SCOSVR3 = 0x0003 | STICKY_TIMEOUTS |
65 WHOLE_SECONDS | SHORT_INODE,
66 PER_OSR5 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS,
67 PER_WYSEV386 = 0x0004 | STICKY_TIMEOUTS | SHORT_INODE,
68 PER_ISCR4 = 0x0005 | STICKY_TIMEOUTS,
69 PER_BSD = 0x0006,
70 PER_SUNOS = 0x0006 | STICKY_TIMEOUTS,
71 PER_XENIX = 0x0007 | STICKY_TIMEOUTS | SHORT_INODE,
72 PER_LINUX32 = 0x0008,
73 PER_LINUX32_3GB = 0x0008 | ADDR_LIMIT_3GB,
74 PER_IRIX32 = 0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */
75 PER_IRIXN32 = 0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */
76 PER_IRIX64 = 0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */
77 PER_RISCOS = 0x000c,
78 PER_SOLARIS = 0x000d | STICKY_TIMEOUTS,
79 PER_UW7 = 0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
80 PER_OSF4 = 0x000f, /* OSF/1 v4 */
81 PER_HPUX = 0x0010,
82 PER_MASK = 0x00ff,
86 * Return the base personality without flags.
88 #define personality(pers) (pers & PER_MASK)
90 /* this flag is uneffective under linux too, should be deleted */
91 #ifndef MAP_DENYWRITE
92 #define MAP_DENYWRITE 0
93 #endif
95 /* should probably go in elf.h */
96 #ifndef ELIBBAD
97 #define ELIBBAD 80
98 #endif
100 typedef target_ulong target_elf_greg_t;
101 #ifdef USE_UID16
102 typedef uint16_t target_uid_t;
103 typedef uint16_t target_gid_t;
104 #else
105 typedef uint32_t target_uid_t;
106 typedef uint32_t target_gid_t;
107 #endif
108 typedef int32_t target_pid_t;
110 #ifdef TARGET_I386
112 #define ELF_PLATFORM get_elf_platform()
114 static const char *get_elf_platform(void)
116 static char elf_platform[] = "i386";
117 int family = (thread_env->cpuid_version >> 8) & 0xff;
118 if (family > 6)
119 family = 6;
120 if (family >= 3)
121 elf_platform[1] = '0' + family;
122 return elf_platform;
125 #define ELF_HWCAP get_elf_hwcap()
127 static uint32_t get_elf_hwcap(void)
129 return thread_env->cpuid_features;
132 #ifdef TARGET_X86_64
133 #define ELF_START_MMAP 0x2aaaaab000ULL
134 #define elf_check_arch(x) ( ((x) == ELF_ARCH) )
136 #define ELF_CLASS ELFCLASS64
137 #define ELF_DATA ELFDATA2LSB
138 #define ELF_ARCH EM_X86_64
140 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
142 regs->rax = 0;
143 regs->rsp = infop->start_stack;
144 regs->rip = infop->entry;
147 #define ELF_NREG 27
148 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
151 * Note that ELF_NREG should be 29 as there should be place for
152 * TRAPNO and ERR "registers" as well but linux doesn't dump
153 * those.
155 * See linux kernel: arch/x86/include/asm/elf.h
157 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
159 (*regs)[0] = env->regs[15];
160 (*regs)[1] = env->regs[14];
161 (*regs)[2] = env->regs[13];
162 (*regs)[3] = env->regs[12];
163 (*regs)[4] = env->regs[R_EBP];
164 (*regs)[5] = env->regs[R_EBX];
165 (*regs)[6] = env->regs[11];
166 (*regs)[7] = env->regs[10];
167 (*regs)[8] = env->regs[9];
168 (*regs)[9] = env->regs[8];
169 (*regs)[10] = env->regs[R_EAX];
170 (*regs)[11] = env->regs[R_ECX];
171 (*regs)[12] = env->regs[R_EDX];
172 (*regs)[13] = env->regs[R_ESI];
173 (*regs)[14] = env->regs[R_EDI];
174 (*regs)[15] = env->regs[R_EAX]; /* XXX */
175 (*regs)[16] = env->eip;
176 (*regs)[17] = env->segs[R_CS].selector & 0xffff;
177 (*regs)[18] = env->eflags;
178 (*regs)[19] = env->regs[R_ESP];
179 (*regs)[20] = env->segs[R_SS].selector & 0xffff;
180 (*regs)[21] = env->segs[R_FS].selector & 0xffff;
181 (*regs)[22] = env->segs[R_GS].selector & 0xffff;
182 (*regs)[23] = env->segs[R_DS].selector & 0xffff;
183 (*regs)[24] = env->segs[R_ES].selector & 0xffff;
184 (*regs)[25] = env->segs[R_FS].selector & 0xffff;
185 (*regs)[26] = env->segs[R_GS].selector & 0xffff;
188 #else
190 #define ELF_START_MMAP 0x80000000
193 * This is used to ensure we don't load something for the wrong architecture.
195 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
198 * These are used to set parameters in the core dumps.
200 #define ELF_CLASS ELFCLASS32
201 #define ELF_DATA ELFDATA2LSB
202 #define ELF_ARCH EM_386
204 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
206 regs->esp = infop->start_stack;
207 regs->eip = infop->entry;
209 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
210 starts %edx contains a pointer to a function which might be
211 registered using `atexit'. This provides a mean for the
212 dynamic linker to call DT_FINI functions for shared libraries
213 that have been loaded before the code runs.
215 A value of 0 tells we have no such handler. */
216 regs->edx = 0;
219 #define ELF_NREG 17
220 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
223 * Note that ELF_NREG should be 19 as there should be place for
224 * TRAPNO and ERR "registers" as well but linux doesn't dump
225 * those.
227 * See linux kernel: arch/x86/include/asm/elf.h
229 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
231 (*regs)[0] = env->regs[R_EBX];
232 (*regs)[1] = env->regs[R_ECX];
233 (*regs)[2] = env->regs[R_EDX];
234 (*regs)[3] = env->regs[R_ESI];
235 (*regs)[4] = env->regs[R_EDI];
236 (*regs)[5] = env->regs[R_EBP];
237 (*regs)[6] = env->regs[R_EAX];
238 (*regs)[7] = env->segs[R_DS].selector & 0xffff;
239 (*regs)[8] = env->segs[R_ES].selector & 0xffff;
240 (*regs)[9] = env->segs[R_FS].selector & 0xffff;
241 (*regs)[10] = env->segs[R_GS].selector & 0xffff;
242 (*regs)[11] = env->regs[R_EAX]; /* XXX */
243 (*regs)[12] = env->eip;
244 (*regs)[13] = env->segs[R_CS].selector & 0xffff;
245 (*regs)[14] = env->eflags;
246 (*regs)[15] = env->regs[R_ESP];
247 (*regs)[16] = env->segs[R_SS].selector & 0xffff;
249 #endif
251 #define USE_ELF_CORE_DUMP
252 #define ELF_EXEC_PAGESIZE 4096
254 #endif
256 #ifdef TARGET_ARM
258 #define ELF_START_MMAP 0x80000000
260 #define elf_check_arch(x) ( (x) == EM_ARM )
262 #define ELF_CLASS ELFCLASS32
263 #ifdef TARGET_WORDS_BIGENDIAN
264 #define ELF_DATA ELFDATA2MSB
265 #else
266 #define ELF_DATA ELFDATA2LSB
267 #endif
268 #define ELF_ARCH EM_ARM
270 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
272 abi_long stack = infop->start_stack;
273 memset(regs, 0, sizeof(*regs));
274 regs->ARM_cpsr = 0x10;
275 if (infop->entry & 1)
276 regs->ARM_cpsr |= CPSR_T;
277 regs->ARM_pc = infop->entry & 0xfffffffe;
278 regs->ARM_sp = infop->start_stack;
279 /* FIXME - what to for failure of get_user()? */
280 get_user_ual(regs->ARM_r2, stack + 8); /* envp */
281 get_user_ual(regs->ARM_r1, stack + 4); /* envp */
282 /* XXX: it seems that r0 is zeroed after ! */
283 regs->ARM_r0 = 0;
284 /* For uClinux PIC binaries. */
285 /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
286 regs->ARM_r10 = infop->start_data;
289 #define ELF_NREG 18
290 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
292 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
294 (*regs)[0] = tswapl(env->regs[0]);
295 (*regs)[1] = tswapl(env->regs[1]);
296 (*regs)[2] = tswapl(env->regs[2]);
297 (*regs)[3] = tswapl(env->regs[3]);
298 (*regs)[4] = tswapl(env->regs[4]);
299 (*regs)[5] = tswapl(env->regs[5]);
300 (*regs)[6] = tswapl(env->regs[6]);
301 (*regs)[7] = tswapl(env->regs[7]);
302 (*regs)[8] = tswapl(env->regs[8]);
303 (*regs)[9] = tswapl(env->regs[9]);
304 (*regs)[10] = tswapl(env->regs[10]);
305 (*regs)[11] = tswapl(env->regs[11]);
306 (*regs)[12] = tswapl(env->regs[12]);
307 (*regs)[13] = tswapl(env->regs[13]);
308 (*regs)[14] = tswapl(env->regs[14]);
309 (*regs)[15] = tswapl(env->regs[15]);
311 (*regs)[16] = tswapl(cpsr_read((CPUState *)env));
312 (*regs)[17] = tswapl(env->regs[0]); /* XXX */
315 #define USE_ELF_CORE_DUMP
316 #define ELF_EXEC_PAGESIZE 4096
318 enum
320 ARM_HWCAP_ARM_SWP = 1 << 0,
321 ARM_HWCAP_ARM_HALF = 1 << 1,
322 ARM_HWCAP_ARM_THUMB = 1 << 2,
323 ARM_HWCAP_ARM_26BIT = 1 << 3,
324 ARM_HWCAP_ARM_FAST_MULT = 1 << 4,
325 ARM_HWCAP_ARM_FPA = 1 << 5,
326 ARM_HWCAP_ARM_VFP = 1 << 6,
327 ARM_HWCAP_ARM_EDSP = 1 << 7,
328 ARM_HWCAP_ARM_JAVA = 1 << 8,
329 ARM_HWCAP_ARM_IWMMXT = 1 << 9,
330 ARM_HWCAP_ARM_THUMBEE = 1 << 10,
331 ARM_HWCAP_ARM_NEON = 1 << 11,
332 ARM_HWCAP_ARM_VFPv3 = 1 << 12,
333 ARM_HWCAP_ARM_VFPv3D16 = 1 << 13,
336 #define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF \
337 | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT \
338 | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP \
339 | ARM_HWCAP_ARM_NEON | ARM_HWCAP_ARM_VFPv3 )
341 #endif
343 #ifdef TARGET_SPARC
344 #ifdef TARGET_SPARC64
346 #define ELF_START_MMAP 0x80000000
348 #ifndef TARGET_ABI32
349 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
350 #else
351 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
352 #endif
354 #define ELF_CLASS ELFCLASS64
355 #define ELF_DATA ELFDATA2MSB
356 #define ELF_ARCH EM_SPARCV9
358 #define STACK_BIAS 2047
360 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
362 #ifndef TARGET_ABI32
363 regs->tstate = 0;
364 #endif
365 regs->pc = infop->entry;
366 regs->npc = regs->pc + 4;
367 regs->y = 0;
368 #ifdef TARGET_ABI32
369 regs->u_regs[14] = infop->start_stack - 16 * 4;
370 #else
371 if (personality(infop->personality) == PER_LINUX32)
372 regs->u_regs[14] = infop->start_stack - 16 * 4;
373 else
374 regs->u_regs[14] = infop->start_stack - 16 * 8 - STACK_BIAS;
375 #endif
378 #else
379 #define ELF_START_MMAP 0x80000000
381 #define elf_check_arch(x) ( (x) == EM_SPARC )
383 #define ELF_CLASS ELFCLASS32
384 #define ELF_DATA ELFDATA2MSB
385 #define ELF_ARCH EM_SPARC
387 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
389 regs->psr = 0;
390 regs->pc = infop->entry;
391 regs->npc = regs->pc + 4;
392 regs->y = 0;
393 regs->u_regs[14] = infop->start_stack - 16 * 4;
396 #endif
397 #endif
399 #ifdef TARGET_PPC
401 #define ELF_START_MMAP 0x80000000
403 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
405 #define elf_check_arch(x) ( (x) == EM_PPC64 )
407 #define ELF_CLASS ELFCLASS64
409 #else
411 #define elf_check_arch(x) ( (x) == EM_PPC )
413 #define ELF_CLASS ELFCLASS32
415 #endif
417 #ifdef TARGET_WORDS_BIGENDIAN
418 #define ELF_DATA ELFDATA2MSB
419 #else
420 #define ELF_DATA ELFDATA2LSB
421 #endif
422 #define ELF_ARCH EM_PPC
424 /* Feature masks for the Aux Vector Hardware Capabilities (AT_HWCAP).
425 See arch/powerpc/include/asm/cputable.h. */
426 enum {
427 QEMU_PPC_FEATURE_32 = 0x80000000,
428 QEMU_PPC_FEATURE_64 = 0x40000000,
429 QEMU_PPC_FEATURE_601_INSTR = 0x20000000,
430 QEMU_PPC_FEATURE_HAS_ALTIVEC = 0x10000000,
431 QEMU_PPC_FEATURE_HAS_FPU = 0x08000000,
432 QEMU_PPC_FEATURE_HAS_MMU = 0x04000000,
433 QEMU_PPC_FEATURE_HAS_4xxMAC = 0x02000000,
434 QEMU_PPC_FEATURE_UNIFIED_CACHE = 0x01000000,
435 QEMU_PPC_FEATURE_HAS_SPE = 0x00800000,
436 QEMU_PPC_FEATURE_HAS_EFP_SINGLE = 0x00400000,
437 QEMU_PPC_FEATURE_HAS_EFP_DOUBLE = 0x00200000,
438 QEMU_PPC_FEATURE_NO_TB = 0x00100000,
439 QEMU_PPC_FEATURE_POWER4 = 0x00080000,
440 QEMU_PPC_FEATURE_POWER5 = 0x00040000,
441 QEMU_PPC_FEATURE_POWER5_PLUS = 0x00020000,
442 QEMU_PPC_FEATURE_CELL = 0x00010000,
443 QEMU_PPC_FEATURE_BOOKE = 0x00008000,
444 QEMU_PPC_FEATURE_SMT = 0x00004000,
445 QEMU_PPC_FEATURE_ICACHE_SNOOP = 0x00002000,
446 QEMU_PPC_FEATURE_ARCH_2_05 = 0x00001000,
447 QEMU_PPC_FEATURE_PA6T = 0x00000800,
448 QEMU_PPC_FEATURE_HAS_DFP = 0x00000400,
449 QEMU_PPC_FEATURE_POWER6_EXT = 0x00000200,
450 QEMU_PPC_FEATURE_ARCH_2_06 = 0x00000100,
451 QEMU_PPC_FEATURE_HAS_VSX = 0x00000080,
452 QEMU_PPC_FEATURE_PSERIES_PERFMON_COMPAT = 0x00000040,
454 QEMU_PPC_FEATURE_TRUE_LE = 0x00000002,
455 QEMU_PPC_FEATURE_PPC_LE = 0x00000001,
458 #define ELF_HWCAP get_elf_hwcap()
460 static uint32_t get_elf_hwcap(void)
462 CPUState *e = thread_env;
463 uint32_t features = 0;
465 /* We don't have to be terribly complete here; the high points are
466 Altivec/FP/SPE support. Anything else is just a bonus. */
467 #define GET_FEATURE(flag, feature) \
468 do {if (e->insns_flags & flag) features |= feature; } while(0)
469 GET_FEATURE(PPC_64B, QEMU_PPC_FEATURE_64);
470 GET_FEATURE(PPC_FLOAT, QEMU_PPC_FEATURE_HAS_FPU);
471 GET_FEATURE(PPC_ALTIVEC, QEMU_PPC_FEATURE_HAS_ALTIVEC);
472 GET_FEATURE(PPC_SPE, QEMU_PPC_FEATURE_HAS_SPE);
473 GET_FEATURE(PPC_SPE_SINGLE, QEMU_PPC_FEATURE_HAS_EFP_SINGLE);
474 GET_FEATURE(PPC_SPE_DOUBLE, QEMU_PPC_FEATURE_HAS_EFP_DOUBLE);
475 GET_FEATURE(PPC_BOOKE, QEMU_PPC_FEATURE_BOOKE);
476 GET_FEATURE(PPC_405_MAC, QEMU_PPC_FEATURE_HAS_4xxMAC);
477 #undef GET_FEATURE
479 return features;
483 * We need to put in some extra aux table entries to tell glibc what
484 * the cache block size is, so it can use the dcbz instruction safely.
486 #define AT_DCACHEBSIZE 19
487 #define AT_ICACHEBSIZE 20
488 #define AT_UCACHEBSIZE 21
489 /* A special ignored type value for PPC, for glibc compatibility. */
490 #define AT_IGNOREPPC 22
492 * The requirements here are:
493 * - keep the final alignment of sp (sp & 0xf)
494 * - make sure the 32-bit value at the first 16 byte aligned position of
495 * AUXV is greater than 16 for glibc compatibility.
496 * AT_IGNOREPPC is used for that.
497 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
498 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
500 #define DLINFO_ARCH_ITEMS 5
501 #define ARCH_DLINFO \
502 do { \
503 NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \
504 NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \
505 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
506 /* \
507 * Now handle glibc compatibility. \
508 */ \
509 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
510 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
511 } while (0)
513 static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop)
515 _regs->gpr[1] = infop->start_stack;
516 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
517 _regs->gpr[2] = ldq_raw(infop->entry + 8) + infop->load_addr;
518 infop->entry = ldq_raw(infop->entry) + infop->load_addr;
519 #endif
520 _regs->nip = infop->entry;
523 /* See linux kernel: arch/powerpc/include/asm/elf.h. */
524 #define ELF_NREG 48
525 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
527 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
529 int i;
530 target_ulong ccr = 0;
532 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
533 (*regs)[i] = tswapl(env->gpr[i]);
536 (*regs)[32] = tswapl(env->nip);
537 (*regs)[33] = tswapl(env->msr);
538 (*regs)[35] = tswapl(env->ctr);
539 (*regs)[36] = tswapl(env->lr);
540 (*regs)[37] = tswapl(env->xer);
542 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
543 ccr |= env->crf[i] << (32 - ((i + 1) * 4));
545 (*regs)[38] = tswapl(ccr);
548 #define USE_ELF_CORE_DUMP
549 #define ELF_EXEC_PAGESIZE 4096
551 #endif
553 #ifdef TARGET_MIPS
555 #define ELF_START_MMAP 0x80000000
557 #define elf_check_arch(x) ( (x) == EM_MIPS )
559 #ifdef TARGET_MIPS64
560 #define ELF_CLASS ELFCLASS64
561 #else
562 #define ELF_CLASS ELFCLASS32
563 #endif
564 #ifdef TARGET_WORDS_BIGENDIAN
565 #define ELF_DATA ELFDATA2MSB
566 #else
567 #define ELF_DATA ELFDATA2LSB
568 #endif
569 #define ELF_ARCH EM_MIPS
571 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
573 regs->cp0_status = 2 << CP0St_KSU;
574 regs->cp0_epc = infop->entry;
575 regs->regs[29] = infop->start_stack;
578 /* See linux kernel: arch/mips/include/asm/elf.h. */
579 #define ELF_NREG 45
580 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
582 /* See linux kernel: arch/mips/include/asm/reg.h. */
583 enum {
584 #ifdef TARGET_MIPS64
585 TARGET_EF_R0 = 0,
586 #else
587 TARGET_EF_R0 = 6,
588 #endif
589 TARGET_EF_R26 = TARGET_EF_R0 + 26,
590 TARGET_EF_R27 = TARGET_EF_R0 + 27,
591 TARGET_EF_LO = TARGET_EF_R0 + 32,
592 TARGET_EF_HI = TARGET_EF_R0 + 33,
593 TARGET_EF_CP0_EPC = TARGET_EF_R0 + 34,
594 TARGET_EF_CP0_BADVADDR = TARGET_EF_R0 + 35,
595 TARGET_EF_CP0_STATUS = TARGET_EF_R0 + 36,
596 TARGET_EF_CP0_CAUSE = TARGET_EF_R0 + 37
599 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */
600 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
602 int i;
604 for (i = 0; i < TARGET_EF_R0; i++) {
605 (*regs)[i] = 0;
607 (*regs)[TARGET_EF_R0] = 0;
609 for (i = 1; i < ARRAY_SIZE(env->active_tc.gpr); i++) {
610 (*regs)[TARGET_EF_R0 + i] = tswapl(env->active_tc.gpr[i]);
613 (*regs)[TARGET_EF_R26] = 0;
614 (*regs)[TARGET_EF_R27] = 0;
615 (*regs)[TARGET_EF_LO] = tswapl(env->active_tc.LO[0]);
616 (*regs)[TARGET_EF_HI] = tswapl(env->active_tc.HI[0]);
617 (*regs)[TARGET_EF_CP0_EPC] = tswapl(env->active_tc.PC);
618 (*regs)[TARGET_EF_CP0_BADVADDR] = tswapl(env->CP0_BadVAddr);
619 (*regs)[TARGET_EF_CP0_STATUS] = tswapl(env->CP0_Status);
620 (*regs)[TARGET_EF_CP0_CAUSE] = tswapl(env->CP0_Cause);
623 #define USE_ELF_CORE_DUMP
624 #define ELF_EXEC_PAGESIZE 4096
626 #endif /* TARGET_MIPS */
628 #ifdef TARGET_MICROBLAZE
630 #define ELF_START_MMAP 0x80000000
632 #define elf_check_arch(x) ( (x) == EM_XILINX_MICROBLAZE )
634 #define ELF_CLASS ELFCLASS32
635 #define ELF_DATA ELFDATA2MSB
636 #define ELF_ARCH EM_XILINX_MICROBLAZE
638 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
640 regs->pc = infop->entry;
641 regs->r1 = infop->start_stack;
645 #define ELF_EXEC_PAGESIZE 4096
647 #endif /* TARGET_MICROBLAZE */
649 #ifdef TARGET_SH4
651 #define ELF_START_MMAP 0x80000000
653 #define elf_check_arch(x) ( (x) == EM_SH )
655 #define ELF_CLASS ELFCLASS32
656 #define ELF_DATA ELFDATA2LSB
657 #define ELF_ARCH EM_SH
659 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
661 /* Check other registers XXXXX */
662 regs->pc = infop->entry;
663 regs->regs[15] = infop->start_stack;
666 /* See linux kernel: arch/sh/include/asm/elf.h. */
667 #define ELF_NREG 23
668 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
670 /* See linux kernel: arch/sh/include/asm/ptrace.h. */
671 enum {
672 TARGET_REG_PC = 16,
673 TARGET_REG_PR = 17,
674 TARGET_REG_SR = 18,
675 TARGET_REG_GBR = 19,
676 TARGET_REG_MACH = 20,
677 TARGET_REG_MACL = 21,
678 TARGET_REG_SYSCALL = 22
681 static inline void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
683 int i;
685 for (i = 0; i < 16; i++) {
686 (*regs[i]) = tswapl(env->gregs[i]);
689 (*regs)[TARGET_REG_PC] = tswapl(env->pc);
690 (*regs)[TARGET_REG_PR] = tswapl(env->pr);
691 (*regs)[TARGET_REG_SR] = tswapl(env->sr);
692 (*regs)[TARGET_REG_GBR] = tswapl(env->gbr);
693 (*regs)[TARGET_REG_MACH] = tswapl(env->mach);
694 (*regs)[TARGET_REG_MACL] = tswapl(env->macl);
695 (*regs)[TARGET_REG_SYSCALL] = 0; /* FIXME */
698 #define USE_ELF_CORE_DUMP
699 #define ELF_EXEC_PAGESIZE 4096
701 #endif
703 #ifdef TARGET_CRIS
705 #define ELF_START_MMAP 0x80000000
707 #define elf_check_arch(x) ( (x) == EM_CRIS )
709 #define ELF_CLASS ELFCLASS32
710 #define ELF_DATA ELFDATA2LSB
711 #define ELF_ARCH EM_CRIS
713 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
715 regs->erp = infop->entry;
718 #define ELF_EXEC_PAGESIZE 8192
720 #endif
722 #ifdef TARGET_M68K
724 #define ELF_START_MMAP 0x80000000
726 #define elf_check_arch(x) ( (x) == EM_68K )
728 #define ELF_CLASS ELFCLASS32
729 #define ELF_DATA ELFDATA2MSB
730 #define ELF_ARCH EM_68K
732 /* ??? Does this need to do anything?
733 #define ELF_PLAT_INIT(_r) */
735 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
737 regs->usp = infop->start_stack;
738 regs->sr = 0;
739 regs->pc = infop->entry;
742 /* See linux kernel: arch/m68k/include/asm/elf.h. */
743 #define ELF_NREG 20
744 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
746 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
748 (*regs)[0] = tswapl(env->dregs[1]);
749 (*regs)[1] = tswapl(env->dregs[2]);
750 (*regs)[2] = tswapl(env->dregs[3]);
751 (*regs)[3] = tswapl(env->dregs[4]);
752 (*regs)[4] = tswapl(env->dregs[5]);
753 (*regs)[5] = tswapl(env->dregs[6]);
754 (*regs)[6] = tswapl(env->dregs[7]);
755 (*regs)[7] = tswapl(env->aregs[0]);
756 (*regs)[8] = tswapl(env->aregs[1]);
757 (*regs)[9] = tswapl(env->aregs[2]);
758 (*regs)[10] = tswapl(env->aregs[3]);
759 (*regs)[11] = tswapl(env->aregs[4]);
760 (*regs)[12] = tswapl(env->aregs[5]);
761 (*regs)[13] = tswapl(env->aregs[6]);
762 (*regs)[14] = tswapl(env->dregs[0]);
763 (*regs)[15] = tswapl(env->aregs[7]);
764 (*regs)[16] = tswapl(env->dregs[0]); /* FIXME: orig_d0 */
765 (*regs)[17] = tswapl(env->sr);
766 (*regs)[18] = tswapl(env->pc);
767 (*regs)[19] = 0; /* FIXME: regs->format | regs->vector */
770 #define USE_ELF_CORE_DUMP
771 #define ELF_EXEC_PAGESIZE 8192
773 #endif
775 #ifdef TARGET_ALPHA
777 #define ELF_START_MMAP (0x30000000000ULL)
779 #define elf_check_arch(x) ( (x) == ELF_ARCH )
781 #define ELF_CLASS ELFCLASS64
782 #define ELF_DATA ELFDATA2MSB
783 #define ELF_ARCH EM_ALPHA
785 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
787 regs->pc = infop->entry;
788 regs->ps = 8;
789 regs->usp = infop->start_stack;
792 #define ELF_EXEC_PAGESIZE 8192
794 #endif /* TARGET_ALPHA */
796 #ifndef ELF_PLATFORM
797 #define ELF_PLATFORM (NULL)
798 #endif
800 #ifndef ELF_HWCAP
801 #define ELF_HWCAP 0
802 #endif
804 #ifdef TARGET_ABI32
805 #undef ELF_CLASS
806 #define ELF_CLASS ELFCLASS32
807 #undef bswaptls
808 #define bswaptls(ptr) bswap32s(ptr)
809 #endif
811 #include "elf.h"
813 struct exec
815 unsigned int a_info; /* Use macros N_MAGIC, etc for access */
816 unsigned int a_text; /* length of text, in bytes */
817 unsigned int a_data; /* length of data, in bytes */
818 unsigned int a_bss; /* length of uninitialized data area, in bytes */
819 unsigned int a_syms; /* length of symbol table data in file, in bytes */
820 unsigned int a_entry; /* start address */
821 unsigned int a_trsize; /* length of relocation info for text, in bytes */
822 unsigned int a_drsize; /* length of relocation info for data, in bytes */
826 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
827 #define OMAGIC 0407
828 #define NMAGIC 0410
829 #define ZMAGIC 0413
830 #define QMAGIC 0314
832 /* max code+data+bss space allocated to elf interpreter */
833 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
835 /* max code+data+bss+brk space allocated to ET_DYN executables */
836 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
838 /* Necessary parameters */
839 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
840 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
841 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
843 #define INTERPRETER_NONE 0
844 #define INTERPRETER_AOUT 1
845 #define INTERPRETER_ELF 2
847 #define DLINFO_ITEMS 12
849 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
851 memcpy(to, from, n);
854 static int load_aout_interp(void * exptr, int interp_fd);
856 #ifdef BSWAP_NEEDED
857 static void bswap_ehdr(struct elfhdr *ehdr)
859 bswap16s(&ehdr->e_type); /* Object file type */
860 bswap16s(&ehdr->e_machine); /* Architecture */
861 bswap32s(&ehdr->e_version); /* Object file version */
862 bswaptls(&ehdr->e_entry); /* Entry point virtual address */
863 bswaptls(&ehdr->e_phoff); /* Program header table file offset */
864 bswaptls(&ehdr->e_shoff); /* Section header table file offset */
865 bswap32s(&ehdr->e_flags); /* Processor-specific flags */
866 bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */
867 bswap16s(&ehdr->e_phentsize); /* Program header table entry size */
868 bswap16s(&ehdr->e_phnum); /* Program header table entry count */
869 bswap16s(&ehdr->e_shentsize); /* Section header table entry size */
870 bswap16s(&ehdr->e_shnum); /* Section header table entry count */
871 bswap16s(&ehdr->e_shstrndx); /* Section header string table index */
874 static void bswap_phdr(struct elf_phdr *phdr)
876 bswap32s(&phdr->p_type); /* Segment type */
877 bswaptls(&phdr->p_offset); /* Segment file offset */
878 bswaptls(&phdr->p_vaddr); /* Segment virtual address */
879 bswaptls(&phdr->p_paddr); /* Segment physical address */
880 bswaptls(&phdr->p_filesz); /* Segment size in file */
881 bswaptls(&phdr->p_memsz); /* Segment size in memory */
882 bswap32s(&phdr->p_flags); /* Segment flags */
883 bswaptls(&phdr->p_align); /* Segment alignment */
886 static void bswap_shdr(struct elf_shdr *shdr)
888 bswap32s(&shdr->sh_name);
889 bswap32s(&shdr->sh_type);
890 bswaptls(&shdr->sh_flags);
891 bswaptls(&shdr->sh_addr);
892 bswaptls(&shdr->sh_offset);
893 bswaptls(&shdr->sh_size);
894 bswap32s(&shdr->sh_link);
895 bswap32s(&shdr->sh_info);
896 bswaptls(&shdr->sh_addralign);
897 bswaptls(&shdr->sh_entsize);
900 static void bswap_sym(struct elf_sym *sym)
902 bswap32s(&sym->st_name);
903 bswaptls(&sym->st_value);
904 bswaptls(&sym->st_size);
905 bswap16s(&sym->st_shndx);
907 #endif
909 #ifdef USE_ELF_CORE_DUMP
910 static int elf_core_dump(int, const CPUState *);
912 #ifdef BSWAP_NEEDED
913 static void bswap_note(struct elf_note *en)
915 bswap32s(&en->n_namesz);
916 bswap32s(&en->n_descsz);
917 bswap32s(&en->n_type);
919 #endif /* BSWAP_NEEDED */
921 #endif /* USE_ELF_CORE_DUMP */
924 * 'copy_elf_strings()' copies argument/envelope strings from user
925 * memory to free pages in kernel mem. These are in a format ready
926 * to be put directly into the top of new user memory.
929 static abi_ulong copy_elf_strings(int argc,char ** argv, void **page,
930 abi_ulong p)
932 char *tmp, *tmp1, *pag = NULL;
933 int len, offset = 0;
935 if (!p) {
936 return 0; /* bullet-proofing */
938 while (argc-- > 0) {
939 tmp = argv[argc];
940 if (!tmp) {
941 fprintf(stderr, "VFS: argc is wrong");
942 exit(-1);
944 tmp1 = tmp;
945 while (*tmp++);
946 len = tmp - tmp1;
947 if (p < len) { /* this shouldn't happen - 128kB */
948 return 0;
950 while (len) {
951 --p; --tmp; --len;
952 if (--offset < 0) {
953 offset = p % TARGET_PAGE_SIZE;
954 pag = (char *)page[p/TARGET_PAGE_SIZE];
955 if (!pag) {
956 pag = (char *)malloc(TARGET_PAGE_SIZE);
957 memset(pag, 0, TARGET_PAGE_SIZE);
958 page[p/TARGET_PAGE_SIZE] = pag;
959 if (!pag)
960 return 0;
963 if (len == 0 || offset == 0) {
964 *(pag + offset) = *tmp;
966 else {
967 int bytes_to_copy = (len > offset) ? offset : len;
968 tmp -= bytes_to_copy;
969 p -= bytes_to_copy;
970 offset -= bytes_to_copy;
971 len -= bytes_to_copy;
972 memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1);
976 return p;
979 static abi_ulong setup_arg_pages(abi_ulong p, struct linux_binprm *bprm,
980 struct image_info *info)
982 abi_ulong stack_base, size, error;
983 int i;
985 /* Create enough stack to hold everything. If we don't use
986 * it for args, we'll use it for something else...
988 size = guest_stack_size;
989 if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE)
990 size = MAX_ARG_PAGES*TARGET_PAGE_SIZE;
991 error = target_mmap(0,
992 size + qemu_host_page_size,
993 PROT_READ | PROT_WRITE,
994 MAP_PRIVATE | MAP_ANONYMOUS,
995 -1, 0);
996 if (error == -1) {
997 perror("stk mmap");
998 exit(-1);
1000 /* we reserve one extra page at the top of the stack as guard */
1001 target_mprotect(error + size, qemu_host_page_size, PROT_NONE);
1003 stack_base = error + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE;
1004 p += stack_base;
1006 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
1007 if (bprm->page[i]) {
1008 info->rss++;
1009 /* FIXME - check return value of memcpy_to_target() for failure */
1010 memcpy_to_target(stack_base, bprm->page[i], TARGET_PAGE_SIZE);
1011 free(bprm->page[i]);
1013 stack_base += TARGET_PAGE_SIZE;
1015 return p;
1018 static void set_brk(abi_ulong start, abi_ulong end)
1020 /* page-align the start and end addresses... */
1021 start = HOST_PAGE_ALIGN(start);
1022 end = HOST_PAGE_ALIGN(end);
1023 if (end <= start)
1024 return;
1025 if(target_mmap(start, end - start,
1026 PROT_READ | PROT_WRITE | PROT_EXEC,
1027 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0) == -1) {
1028 perror("cannot mmap brk");
1029 exit(-1);
1034 /* We need to explicitly zero any fractional pages after the data
1035 section (i.e. bss). This would contain the junk from the file that
1036 should not be in memory. */
1037 static void padzero(abi_ulong elf_bss, abi_ulong last_bss)
1039 abi_ulong nbyte;
1041 if (elf_bss >= last_bss)
1042 return;
1044 /* XXX: this is really a hack : if the real host page size is
1045 smaller than the target page size, some pages after the end
1046 of the file may not be mapped. A better fix would be to
1047 patch target_mmap(), but it is more complicated as the file
1048 size must be known */
1049 if (qemu_real_host_page_size < qemu_host_page_size) {
1050 abi_ulong end_addr, end_addr1;
1051 end_addr1 = (elf_bss + qemu_real_host_page_size - 1) &
1052 ~(qemu_real_host_page_size - 1);
1053 end_addr = HOST_PAGE_ALIGN(elf_bss);
1054 if (end_addr1 < end_addr) {
1055 mmap((void *)g2h(end_addr1), end_addr - end_addr1,
1056 PROT_READ|PROT_WRITE|PROT_EXEC,
1057 MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
1061 nbyte = elf_bss & (qemu_host_page_size-1);
1062 if (nbyte) {
1063 nbyte = qemu_host_page_size - nbyte;
1064 do {
1065 /* FIXME - what to do if put_user() fails? */
1066 put_user_u8(0, elf_bss);
1067 elf_bss++;
1068 } while (--nbyte);
1073 static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
1074 struct elfhdr * exec,
1075 abi_ulong load_addr,
1076 abi_ulong load_bias,
1077 abi_ulong interp_load_addr, int ibcs,
1078 struct image_info *info)
1080 abi_ulong sp;
1081 int size;
1082 abi_ulong u_platform;
1083 const char *k_platform;
1084 const int n = sizeof(elf_addr_t);
1086 sp = p;
1087 u_platform = 0;
1088 k_platform = ELF_PLATFORM;
1089 if (k_platform) {
1090 size_t len = strlen(k_platform) + 1;
1091 sp -= (len + n - 1) & ~(n - 1);
1092 u_platform = sp;
1093 /* FIXME - check return value of memcpy_to_target() for failure */
1094 memcpy_to_target(sp, k_platform, len);
1097 * Force 16 byte _final_ alignment here for generality.
1099 sp = sp &~ (abi_ulong)15;
1100 size = (DLINFO_ITEMS + 1) * 2;
1101 if (k_platform)
1102 size += 2;
1103 #ifdef DLINFO_ARCH_ITEMS
1104 size += DLINFO_ARCH_ITEMS * 2;
1105 #endif
1106 size += envc + argc + 2;
1107 size += (!ibcs ? 3 : 1); /* argc itself */
1108 size *= n;
1109 if (size & 15)
1110 sp -= 16 - (size & 15);
1112 /* This is correct because Linux defines
1113 * elf_addr_t as Elf32_Off / Elf64_Off
1115 #define NEW_AUX_ENT(id, val) do { \
1116 sp -= n; put_user_ual(val, sp); \
1117 sp -= n; put_user_ual(id, sp); \
1118 } while(0)
1120 NEW_AUX_ENT (AT_NULL, 0);
1122 /* There must be exactly DLINFO_ITEMS entries here. */
1123 NEW_AUX_ENT(AT_PHDR, (abi_ulong)(load_addr + exec->e_phoff));
1124 NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr)));
1125 NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum));
1126 NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE));
1127 NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_load_addr));
1128 NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0);
1129 NEW_AUX_ENT(AT_ENTRY, load_bias + exec->e_entry);
1130 NEW_AUX_ENT(AT_UID, (abi_ulong) getuid());
1131 NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid());
1132 NEW_AUX_ENT(AT_GID, (abi_ulong) getgid());
1133 NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid());
1134 NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP);
1135 NEW_AUX_ENT(AT_CLKTCK, (abi_ulong) sysconf(_SC_CLK_TCK));
1136 if (k_platform)
1137 NEW_AUX_ENT(AT_PLATFORM, u_platform);
1138 #ifdef ARCH_DLINFO
1140 * ARCH_DLINFO must come last so platform specific code can enforce
1141 * special alignment requirements on the AUXV if necessary (eg. PPC).
1143 ARCH_DLINFO;
1144 #endif
1145 #undef NEW_AUX_ENT
1147 info->saved_auxv = sp;
1149 sp = loader_build_argptr(envc, argc, sp, p, !ibcs);
1150 return sp;
1154 static abi_ulong load_elf_interp(struct elfhdr * interp_elf_ex,
1155 int interpreter_fd,
1156 abi_ulong *interp_load_addr)
1158 struct elf_phdr *elf_phdata = NULL;
1159 struct elf_phdr *eppnt;
1160 abi_ulong load_addr = 0;
1161 int load_addr_set = 0;
1162 int retval;
1163 abi_ulong last_bss, elf_bss;
1164 abi_ulong error;
1165 int i;
1167 elf_bss = 0;
1168 last_bss = 0;
1169 error = 0;
1171 #ifdef BSWAP_NEEDED
1172 bswap_ehdr(interp_elf_ex);
1173 #endif
1174 /* First of all, some simple consistency checks */
1175 if ((interp_elf_ex->e_type != ET_EXEC &&
1176 interp_elf_ex->e_type != ET_DYN) ||
1177 !elf_check_arch(interp_elf_ex->e_machine)) {
1178 return ~((abi_ulong)0UL);
1182 /* Now read in all of the header information */
1184 if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > TARGET_PAGE_SIZE)
1185 return ~(abi_ulong)0UL;
1187 elf_phdata = (struct elf_phdr *)
1188 malloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
1190 if (!elf_phdata)
1191 return ~((abi_ulong)0UL);
1194 * If the size of this structure has changed, then punt, since
1195 * we will be doing the wrong thing.
1197 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr)) {
1198 free(elf_phdata);
1199 return ~((abi_ulong)0UL);
1202 retval = lseek(interpreter_fd, interp_elf_ex->e_phoff, SEEK_SET);
1203 if(retval >= 0) {
1204 retval = read(interpreter_fd,
1205 (char *) elf_phdata,
1206 sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
1208 if (retval < 0) {
1209 perror("load_elf_interp");
1210 exit(-1);
1211 free (elf_phdata);
1212 return retval;
1214 #ifdef BSWAP_NEEDED
1215 eppnt = elf_phdata;
1216 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
1217 bswap_phdr(eppnt);
1219 #endif
1221 if (interp_elf_ex->e_type == ET_DYN) {
1222 /* in order to avoid hardcoding the interpreter load
1223 address in qemu, we allocate a big enough memory zone */
1224 error = target_mmap(0, INTERP_MAP_SIZE,
1225 PROT_NONE, MAP_PRIVATE | MAP_ANON,
1226 -1, 0);
1227 if (error == -1) {
1228 perror("mmap");
1229 exit(-1);
1231 load_addr = error;
1232 load_addr_set = 1;
1235 eppnt = elf_phdata;
1236 for(i=0; i<interp_elf_ex->e_phnum; i++, eppnt++)
1237 if (eppnt->p_type == PT_LOAD) {
1238 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
1239 int elf_prot = 0;
1240 abi_ulong vaddr = 0;
1241 abi_ulong k;
1243 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
1244 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
1245 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
1246 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) {
1247 elf_type |= MAP_FIXED;
1248 vaddr = eppnt->p_vaddr;
1250 error = target_mmap(load_addr+TARGET_ELF_PAGESTART(vaddr),
1251 eppnt->p_filesz + TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr),
1252 elf_prot,
1253 elf_type,
1254 interpreter_fd,
1255 eppnt->p_offset - TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr));
1257 if (error == -1) {
1258 /* Real error */
1259 close(interpreter_fd);
1260 free(elf_phdata);
1261 return ~((abi_ulong)0UL);
1264 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
1265 load_addr = error;
1266 load_addr_set = 1;
1270 * Find the end of the file mapping for this phdr, and keep
1271 * track of the largest address we see for this.
1273 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
1274 if (k > elf_bss) elf_bss = k;
1277 * Do the same thing for the memory mapping - between
1278 * elf_bss and last_bss is the bss section.
1280 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
1281 if (k > last_bss) last_bss = k;
1284 /* Now use mmap to map the library into memory. */
1286 close(interpreter_fd);
1289 * Now fill out the bss section. First pad the last page up
1290 * to the page boundary, and then perform a mmap to make sure
1291 * that there are zeromapped pages up to and including the last
1292 * bss page.
1294 padzero(elf_bss, last_bss);
1295 elf_bss = TARGET_ELF_PAGESTART(elf_bss + qemu_host_page_size - 1); /* What we have mapped so far */
1297 /* Map the last of the bss segment */
1298 if (last_bss > elf_bss) {
1299 target_mmap(elf_bss, last_bss-elf_bss,
1300 PROT_READ|PROT_WRITE|PROT_EXEC,
1301 MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
1303 free(elf_phdata);
1305 *interp_load_addr = load_addr;
1306 return ((abi_ulong) interp_elf_ex->e_entry) + load_addr;
1309 static int symfind(const void *s0, const void *s1)
1311 struct elf_sym *key = (struct elf_sym *)s0;
1312 struct elf_sym *sym = (struct elf_sym *)s1;
1313 int result = 0;
1314 if (key->st_value < sym->st_value) {
1315 result = -1;
1316 } else if (key->st_value >= sym->st_value + sym->st_size) {
1317 result = 1;
1319 return result;
1322 static const char *lookup_symbolxx(struct syminfo *s, target_ulong orig_addr)
1324 #if ELF_CLASS == ELFCLASS32
1325 struct elf_sym *syms = s->disas_symtab.elf32;
1326 #else
1327 struct elf_sym *syms = s->disas_symtab.elf64;
1328 #endif
1330 // binary search
1331 struct elf_sym key;
1332 struct elf_sym *sym;
1334 key.st_value = orig_addr;
1336 sym = bsearch(&key, syms, s->disas_num_syms, sizeof(*syms), symfind);
1337 if (sym != NULL) {
1338 return s->disas_strtab + sym->st_name;
1341 return "";
1344 /* FIXME: This should use elf_ops.h */
1345 static int symcmp(const void *s0, const void *s1)
1347 struct elf_sym *sym0 = (struct elf_sym *)s0;
1348 struct elf_sym *sym1 = (struct elf_sym *)s1;
1349 return (sym0->st_value < sym1->st_value)
1350 ? -1
1351 : ((sym0->st_value > sym1->st_value) ? 1 : 0);
1354 /* Best attempt to load symbols from this ELF object. */
1355 static void load_symbols(struct elfhdr *hdr, int fd)
1357 unsigned int i, nsyms;
1358 struct elf_shdr sechdr, symtab, strtab;
1359 char *strings;
1360 struct syminfo *s;
1361 struct elf_sym *syms;
1363 lseek(fd, hdr->e_shoff, SEEK_SET);
1364 for (i = 0; i < hdr->e_shnum; i++) {
1365 if (read(fd, &sechdr, sizeof(sechdr)) != sizeof(sechdr))
1366 return;
1367 #ifdef BSWAP_NEEDED
1368 bswap_shdr(&sechdr);
1369 #endif
1370 if (sechdr.sh_type == SHT_SYMTAB) {
1371 symtab = sechdr;
1372 lseek(fd, hdr->e_shoff
1373 + sizeof(sechdr) * sechdr.sh_link, SEEK_SET);
1374 if (read(fd, &strtab, sizeof(strtab))
1375 != sizeof(strtab))
1376 return;
1377 #ifdef BSWAP_NEEDED
1378 bswap_shdr(&strtab);
1379 #endif
1380 goto found;
1383 return; /* Shouldn't happen... */
1385 found:
1386 /* Now know where the strtab and symtab are. Snarf them. */
1387 s = malloc(sizeof(*s));
1388 syms = malloc(symtab.sh_size);
1389 if (!syms)
1390 return;
1391 s->disas_strtab = strings = malloc(strtab.sh_size);
1392 if (!s->disas_strtab)
1393 return;
1395 lseek(fd, symtab.sh_offset, SEEK_SET);
1396 if (read(fd, syms, symtab.sh_size) != symtab.sh_size)
1397 return;
1399 nsyms = symtab.sh_size / sizeof(struct elf_sym);
1401 i = 0;
1402 while (i < nsyms) {
1403 #ifdef BSWAP_NEEDED
1404 bswap_sym(syms + i);
1405 #endif
1406 // Throw away entries which we do not need.
1407 if (syms[i].st_shndx == SHN_UNDEF ||
1408 syms[i].st_shndx >= SHN_LORESERVE ||
1409 ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) {
1410 nsyms--;
1411 if (i < nsyms) {
1412 syms[i] = syms[nsyms];
1414 continue;
1416 #if defined(TARGET_ARM) || defined (TARGET_MIPS)
1417 /* The bottom address bit marks a Thumb or MIPS16 symbol. */
1418 syms[i].st_value &= ~(target_ulong)1;
1419 #endif
1420 i++;
1422 syms = realloc(syms, nsyms * sizeof(*syms));
1424 qsort(syms, nsyms, sizeof(*syms), symcmp);
1426 lseek(fd, strtab.sh_offset, SEEK_SET);
1427 if (read(fd, strings, strtab.sh_size) != strtab.sh_size)
1428 return;
1429 s->disas_num_syms = nsyms;
1430 #if ELF_CLASS == ELFCLASS32
1431 s->disas_symtab.elf32 = syms;
1432 s->lookup_symbol = lookup_symbolxx;
1433 #else
1434 s->disas_symtab.elf64 = syms;
1435 s->lookup_symbol = lookup_symbolxx;
1436 #endif
1437 s->next = syminfos;
1438 syminfos = s;
1441 int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
1442 struct image_info * info)
1444 struct elfhdr elf_ex;
1445 struct elfhdr interp_elf_ex;
1446 struct exec interp_ex;
1447 int interpreter_fd = -1; /* avoid warning */
1448 abi_ulong load_addr, load_bias;
1449 int load_addr_set = 0;
1450 unsigned int interpreter_type = INTERPRETER_NONE;
1451 unsigned char ibcs2_interpreter;
1452 int i;
1453 abi_ulong mapped_addr;
1454 struct elf_phdr * elf_ppnt;
1455 struct elf_phdr *elf_phdata;
1456 abi_ulong elf_bss, k, elf_brk;
1457 int retval;
1458 char * elf_interpreter;
1459 abi_ulong elf_entry, interp_load_addr = 0;
1460 int status;
1461 abi_ulong start_code, end_code, start_data, end_data;
1462 abi_ulong reloc_func_desc = 0;
1463 abi_ulong elf_stack;
1464 char passed_fileno[6];
1466 ibcs2_interpreter = 0;
1467 status = 0;
1468 load_addr = 0;
1469 load_bias = 0;
1470 elf_ex = *((struct elfhdr *) bprm->buf); /* exec-header */
1471 #ifdef BSWAP_NEEDED
1472 bswap_ehdr(&elf_ex);
1473 #endif
1475 /* First of all, some simple consistency checks */
1476 if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) ||
1477 (! elf_check_arch(elf_ex.e_machine))) {
1478 return -ENOEXEC;
1481 bprm->p = copy_elf_strings(1, &bprm->filename, bprm->page, bprm->p);
1482 bprm->p = copy_elf_strings(bprm->envc,bprm->envp,bprm->page,bprm->p);
1483 bprm->p = copy_elf_strings(bprm->argc,bprm->argv,bprm->page,bprm->p);
1484 if (!bprm->p) {
1485 retval = -E2BIG;
1488 /* Now read in all of the header information */
1489 elf_phdata = (struct elf_phdr *)malloc(elf_ex.e_phentsize*elf_ex.e_phnum);
1490 if (elf_phdata == NULL) {
1491 return -ENOMEM;
1494 retval = lseek(bprm->fd, elf_ex.e_phoff, SEEK_SET);
1495 if(retval > 0) {
1496 retval = read(bprm->fd, (char *) elf_phdata,
1497 elf_ex.e_phentsize * elf_ex.e_phnum);
1500 if (retval < 0) {
1501 perror("load_elf_binary");
1502 exit(-1);
1503 free (elf_phdata);
1504 return -errno;
1507 #ifdef BSWAP_NEEDED
1508 elf_ppnt = elf_phdata;
1509 for (i=0; i<elf_ex.e_phnum; i++, elf_ppnt++) {
1510 bswap_phdr(elf_ppnt);
1512 #endif
1513 elf_ppnt = elf_phdata;
1515 elf_bss = 0;
1516 elf_brk = 0;
1519 elf_stack = ~((abi_ulong)0UL);
1520 elf_interpreter = NULL;
1521 start_code = ~((abi_ulong)0UL);
1522 end_code = 0;
1523 start_data = 0;
1524 end_data = 0;
1525 interp_ex.a_info = 0;
1527 for(i=0;i < elf_ex.e_phnum; i++) {
1528 if (elf_ppnt->p_type == PT_INTERP) {
1529 if ( elf_interpreter != NULL )
1531 free (elf_phdata);
1532 free(elf_interpreter);
1533 close(bprm->fd);
1534 return -EINVAL;
1537 /* This is the program interpreter used for
1538 * shared libraries - for now assume that this
1539 * is an a.out format binary
1542 elf_interpreter = (char *)malloc(elf_ppnt->p_filesz);
1544 if (elf_interpreter == NULL) {
1545 free (elf_phdata);
1546 close(bprm->fd);
1547 return -ENOMEM;
1550 retval = lseek(bprm->fd, elf_ppnt->p_offset, SEEK_SET);
1551 if(retval >= 0) {
1552 retval = read(bprm->fd, elf_interpreter, elf_ppnt->p_filesz);
1554 if(retval < 0) {
1555 perror("load_elf_binary2");
1556 exit(-1);
1559 /* If the program interpreter is one of these two,
1560 then assume an iBCS2 image. Otherwise assume
1561 a native linux image. */
1563 /* JRP - Need to add X86 lib dir stuff here... */
1565 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
1566 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0) {
1567 ibcs2_interpreter = 1;
1570 #if 0
1571 printf("Using ELF interpreter %s\n", path(elf_interpreter));
1572 #endif
1573 if (retval >= 0) {
1574 retval = open(path(elf_interpreter), O_RDONLY);
1575 if(retval >= 0) {
1576 interpreter_fd = retval;
1578 else {
1579 perror(elf_interpreter);
1580 exit(-1);
1581 /* retval = -errno; */
1585 if (retval >= 0) {
1586 retval = lseek(interpreter_fd, 0, SEEK_SET);
1587 if(retval >= 0) {
1588 retval = read(interpreter_fd,bprm->buf,128);
1591 if (retval >= 0) {
1592 interp_ex = *((struct exec *) bprm->buf); /* aout exec-header */
1593 interp_elf_ex = *((struct elfhdr *) bprm->buf); /* elf exec-header */
1595 if (retval < 0) {
1596 perror("load_elf_binary3");
1597 exit(-1);
1598 free (elf_phdata);
1599 free(elf_interpreter);
1600 close(bprm->fd);
1601 return retval;
1604 elf_ppnt++;
1607 /* Some simple consistency checks for the interpreter */
1608 if (elf_interpreter){
1609 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
1611 /* Now figure out which format our binary is */
1612 if ((N_MAGIC(interp_ex) != OMAGIC) && (N_MAGIC(interp_ex) != ZMAGIC) &&
1613 (N_MAGIC(interp_ex) != QMAGIC)) {
1614 interpreter_type = INTERPRETER_ELF;
1617 if (interp_elf_ex.e_ident[0] != 0x7f ||
1618 strncmp((char *)&interp_elf_ex.e_ident[1], "ELF",3) != 0) {
1619 interpreter_type &= ~INTERPRETER_ELF;
1622 if (!interpreter_type) {
1623 free(elf_interpreter);
1624 free(elf_phdata);
1625 close(bprm->fd);
1626 return -ELIBBAD;
1630 /* OK, we are done with that, now set up the arg stuff,
1631 and then start this sucker up */
1634 char * passed_p;
1636 if (interpreter_type == INTERPRETER_AOUT) {
1637 snprintf(passed_fileno, sizeof(passed_fileno), "%d", bprm->fd);
1638 passed_p = passed_fileno;
1640 if (elf_interpreter) {
1641 bprm->p = copy_elf_strings(1,&passed_p,bprm->page,bprm->p);
1642 bprm->argc++;
1645 if (!bprm->p) {
1646 if (elf_interpreter) {
1647 free(elf_interpreter);
1649 free (elf_phdata);
1650 close(bprm->fd);
1651 return -E2BIG;
1655 /* OK, This is the point of no return */
1656 info->end_data = 0;
1657 info->end_code = 0;
1658 info->start_mmap = (abi_ulong)ELF_START_MMAP;
1659 info->mmap = 0;
1660 elf_entry = (abi_ulong) elf_ex.e_entry;
1662 #if defined(CONFIG_USE_GUEST_BASE)
1664 * In case where user has not explicitly set the guest_base, we
1665 * probe here that should we set it automatically.
1667 if (!have_guest_base) {
1669 * Go through ELF program header table and find the address
1670 * range used by loadable segments. Check that this is available on
1671 * the host, and if not find a suitable value for guest_base. */
1672 abi_ulong app_start = ~0;
1673 abi_ulong app_end = 0;
1674 abi_ulong addr;
1675 unsigned long host_start;
1676 unsigned long real_start;
1677 unsigned long host_size;
1678 for (i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum;
1679 i++, elf_ppnt++) {
1680 if (elf_ppnt->p_type != PT_LOAD)
1681 continue;
1682 addr = elf_ppnt->p_vaddr;
1683 if (addr < app_start) {
1684 app_start = addr;
1686 addr += elf_ppnt->p_memsz;
1687 if (addr > app_end) {
1688 app_end = addr;
1692 /* If we don't have any loadable segments then something
1693 is very wrong. */
1694 assert(app_start < app_end);
1696 /* Round addresses to page boundaries. */
1697 app_start = app_start & qemu_host_page_mask;
1698 app_end = HOST_PAGE_ALIGN(app_end);
1699 if (app_start < mmap_min_addr) {
1700 host_start = HOST_PAGE_ALIGN(mmap_min_addr);
1701 } else {
1702 host_start = app_start;
1703 if (host_start != app_start) {
1704 fprintf(stderr, "qemu: Address overflow loading ELF binary\n");
1705 abort();
1708 host_size = app_end - app_start;
1709 while (1) {
1710 /* Do not use mmap_find_vma here because that is limited to the
1711 guest address space. We are going to make the
1712 guest address space fit whatever we're given. */
1713 real_start = (unsigned long)mmap((void *)host_start, host_size,
1714 PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
1715 if (real_start == (unsigned long)-1) {
1716 fprintf(stderr, "qemu: Virtual memory exausted\n");
1717 abort();
1719 if (real_start == host_start) {
1720 break;
1722 /* That address didn't work. Unmap and try a different one.
1723 The address the host picked because is typically
1724 right at the top of the host address space and leaves the
1725 guest with no usable address space. Resort to a linear search.
1726 We already compensated for mmap_min_addr, so this should not
1727 happen often. Probably means we got unlucky and host address
1728 space randomization put a shared library somewhere
1729 inconvenient. */
1730 munmap((void *)real_start, host_size);
1731 host_start += qemu_host_page_size;
1732 if (host_start == app_start) {
1733 /* Theoretically possible if host doesn't have any
1734 suitably aligned areas. Normally the first mmap will
1735 fail. */
1736 fprintf(stderr, "qemu: Unable to find space for application\n");
1737 abort();
1740 qemu_log("Relocating guest address space from 0x" TARGET_ABI_FMT_lx
1741 " to 0x%lx\n", app_start, real_start);
1742 guest_base = real_start - app_start;
1744 #endif /* CONFIG_USE_GUEST_BASE */
1746 /* Do this so that we can load the interpreter, if need be. We will
1747 change some of these later */
1748 info->rss = 0;
1749 bprm->p = setup_arg_pages(bprm->p, bprm, info);
1750 info->start_stack = bprm->p;
1752 /* Now we do a little grungy work by mmaping the ELF image into
1753 * the correct location in memory. At this point, we assume that
1754 * the image should be loaded at fixed address, not at a variable
1755 * address.
1758 for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
1759 int elf_prot = 0;
1760 int elf_flags = 0;
1761 abi_ulong error;
1763 if (elf_ppnt->p_type != PT_LOAD)
1764 continue;
1766 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
1767 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
1768 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
1769 elf_flags = MAP_PRIVATE | MAP_DENYWRITE;
1770 if (elf_ex.e_type == ET_EXEC || load_addr_set) {
1771 elf_flags |= MAP_FIXED;
1772 } else if (elf_ex.e_type == ET_DYN) {
1773 /* Try and get dynamic programs out of the way of the default mmap
1774 base, as well as whatever program they might try to exec. This
1775 is because the brk will follow the loader, and is not movable. */
1776 /* NOTE: for qemu, we do a big mmap to get enough space
1777 without hardcoding any address */
1778 error = target_mmap(0, ET_DYN_MAP_SIZE,
1779 PROT_NONE, MAP_PRIVATE | MAP_ANON,
1780 -1, 0);
1781 if (error == -1) {
1782 perror("mmap");
1783 exit(-1);
1785 load_bias = TARGET_ELF_PAGESTART(error - elf_ppnt->p_vaddr);
1788 error = target_mmap(TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr),
1789 (elf_ppnt->p_filesz +
1790 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)),
1791 elf_prot,
1792 (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE),
1793 bprm->fd,
1794 (elf_ppnt->p_offset -
1795 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)));
1796 if (error == -1) {
1797 perror("mmap");
1798 exit(-1);
1801 #ifdef LOW_ELF_STACK
1802 if (TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr) < elf_stack)
1803 elf_stack = TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr);
1804 #endif
1806 if (!load_addr_set) {
1807 load_addr_set = 1;
1808 load_addr = elf_ppnt->p_vaddr - elf_ppnt->p_offset;
1809 if (elf_ex.e_type == ET_DYN) {
1810 load_bias += error -
1811 TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr);
1812 load_addr += load_bias;
1813 reloc_func_desc = load_bias;
1816 k = elf_ppnt->p_vaddr;
1817 if (k < start_code)
1818 start_code = k;
1819 if (start_data < k)
1820 start_data = k;
1821 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
1822 if (k > elf_bss)
1823 elf_bss = k;
1824 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
1825 end_code = k;
1826 if (end_data < k)
1827 end_data = k;
1828 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
1829 if (k > elf_brk) elf_brk = k;
1832 elf_entry += load_bias;
1833 elf_bss += load_bias;
1834 elf_brk += load_bias;
1835 start_code += load_bias;
1836 end_code += load_bias;
1837 start_data += load_bias;
1838 end_data += load_bias;
1840 if (elf_interpreter) {
1841 if (interpreter_type & 1) {
1842 elf_entry = load_aout_interp(&interp_ex, interpreter_fd);
1844 else if (interpreter_type & 2) {
1845 elf_entry = load_elf_interp(&interp_elf_ex, interpreter_fd,
1846 &interp_load_addr);
1848 reloc_func_desc = interp_load_addr;
1850 close(interpreter_fd);
1851 free(elf_interpreter);
1853 if (elf_entry == ~((abi_ulong)0UL)) {
1854 printf("Unable to load interpreter\n");
1855 free(elf_phdata);
1856 exit(-1);
1857 return 0;
1861 free(elf_phdata);
1863 if (qemu_log_enabled())
1864 load_symbols(&elf_ex, bprm->fd);
1866 if (interpreter_type != INTERPRETER_AOUT) close(bprm->fd);
1867 info->personality = (ibcs2_interpreter ? PER_SVR4 : PER_LINUX);
1869 #ifdef LOW_ELF_STACK
1870 info->start_stack = bprm->p = elf_stack - 4;
1871 #endif
1872 bprm->p = create_elf_tables(bprm->p,
1873 bprm->argc,
1874 bprm->envc,
1875 &elf_ex,
1876 load_addr, load_bias,
1877 interp_load_addr,
1878 (interpreter_type == INTERPRETER_AOUT ? 0 : 1),
1879 info);
1880 info->load_addr = reloc_func_desc;
1881 info->start_brk = info->brk = elf_brk;
1882 info->end_code = end_code;
1883 info->start_code = start_code;
1884 info->start_data = start_data;
1885 info->end_data = end_data;
1886 info->start_stack = bprm->p;
1888 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
1889 sections */
1890 set_brk(elf_bss, elf_brk);
1892 padzero(elf_bss, elf_brk);
1894 #if 0
1895 printf("(start_brk) %x\n" , info->start_brk);
1896 printf("(end_code) %x\n" , info->end_code);
1897 printf("(start_code) %x\n" , info->start_code);
1898 printf("(end_data) %x\n" , info->end_data);
1899 printf("(start_stack) %x\n" , info->start_stack);
1900 printf("(brk) %x\n" , info->brk);
1901 #endif
1903 if ( info->personality == PER_SVR4 )
1905 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1906 and some applications "depend" upon this behavior.
1907 Since we do not have the power to recompile these, we
1908 emulate the SVr4 behavior. Sigh. */
1909 mapped_addr = target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC,
1910 MAP_FIXED | MAP_PRIVATE, -1, 0);
1913 info->entry = elf_entry;
1915 #ifdef USE_ELF_CORE_DUMP
1916 bprm->core_dump = &elf_core_dump;
1917 #endif
1919 return 0;
1922 #ifdef USE_ELF_CORE_DUMP
1925 * Definitions to generate Intel SVR4-like core files.
1926 * These mostly have the same names as the SVR4 types with "target_elf_"
1927 * tacked on the front to prevent clashes with linux definitions,
1928 * and the typedef forms have been avoided. This is mostly like
1929 * the SVR4 structure, but more Linuxy, with things that Linux does
1930 * not support and which gdb doesn't really use excluded.
1932 * Fields we don't dump (their contents is zero) in linux-user qemu
1933 * are marked with XXX.
1935 * Core dump code is copied from linux kernel (fs/binfmt_elf.c).
1937 * Porting ELF coredump for target is (quite) simple process. First you
1938 * define USE_ELF_CORE_DUMP in target ELF code (where init_thread() for
1939 * the target resides):
1941 * #define USE_ELF_CORE_DUMP
1943 * Next you define type of register set used for dumping. ELF specification
1944 * says that it needs to be array of elf_greg_t that has size of ELF_NREG.
1946 * typedef <target_regtype> target_elf_greg_t;
1947 * #define ELF_NREG <number of registers>
1948 * typedef taret_elf_greg_t target_elf_gregset_t[ELF_NREG];
1950 * Last step is to implement target specific function that copies registers
1951 * from given cpu into just specified register set. Prototype is:
1953 * static void elf_core_copy_regs(taret_elf_gregset_t *regs,
1954 * const CPUState *env);
1956 * Parameters:
1957 * regs - copy register values into here (allocated and zeroed by caller)
1958 * env - copy registers from here
1960 * Example for ARM target is provided in this file.
1963 /* An ELF note in memory */
1964 struct memelfnote {
1965 const char *name;
1966 size_t namesz;
1967 size_t namesz_rounded;
1968 int type;
1969 size_t datasz;
1970 void *data;
1971 size_t notesz;
1974 struct target_elf_siginfo {
1975 int si_signo; /* signal number */
1976 int si_code; /* extra code */
1977 int si_errno; /* errno */
1980 struct target_elf_prstatus {
1981 struct target_elf_siginfo pr_info; /* Info associated with signal */
1982 short pr_cursig; /* Current signal */
1983 target_ulong pr_sigpend; /* XXX */
1984 target_ulong pr_sighold; /* XXX */
1985 target_pid_t pr_pid;
1986 target_pid_t pr_ppid;
1987 target_pid_t pr_pgrp;
1988 target_pid_t pr_sid;
1989 struct target_timeval pr_utime; /* XXX User time */
1990 struct target_timeval pr_stime; /* XXX System time */
1991 struct target_timeval pr_cutime; /* XXX Cumulative user time */
1992 struct target_timeval pr_cstime; /* XXX Cumulative system time */
1993 target_elf_gregset_t pr_reg; /* GP registers */
1994 int pr_fpvalid; /* XXX */
1997 #define ELF_PRARGSZ (80) /* Number of chars for args */
1999 struct target_elf_prpsinfo {
2000 char pr_state; /* numeric process state */
2001 char pr_sname; /* char for pr_state */
2002 char pr_zomb; /* zombie */
2003 char pr_nice; /* nice val */
2004 target_ulong pr_flag; /* flags */
2005 target_uid_t pr_uid;
2006 target_gid_t pr_gid;
2007 target_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
2008 /* Lots missing */
2009 char pr_fname[16]; /* filename of executable */
2010 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
2013 /* Here is the structure in which status of each thread is captured. */
2014 struct elf_thread_status {
2015 QTAILQ_ENTRY(elf_thread_status) ets_link;
2016 struct target_elf_prstatus prstatus; /* NT_PRSTATUS */
2017 #if 0
2018 elf_fpregset_t fpu; /* NT_PRFPREG */
2019 struct task_struct *thread;
2020 elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */
2021 #endif
2022 struct memelfnote notes[1];
2023 int num_notes;
2026 struct elf_note_info {
2027 struct memelfnote *notes;
2028 struct target_elf_prstatus *prstatus; /* NT_PRSTATUS */
2029 struct target_elf_prpsinfo *psinfo; /* NT_PRPSINFO */
2031 QTAILQ_HEAD(thread_list_head, elf_thread_status) thread_list;
2032 #if 0
2034 * Current version of ELF coredump doesn't support
2035 * dumping fp regs etc.
2037 elf_fpregset_t *fpu;
2038 elf_fpxregset_t *xfpu;
2039 int thread_status_size;
2040 #endif
2041 int notes_size;
2042 int numnote;
2045 struct vm_area_struct {
2046 abi_ulong vma_start; /* start vaddr of memory region */
2047 abi_ulong vma_end; /* end vaddr of memory region */
2048 abi_ulong vma_flags; /* protection etc. flags for the region */
2049 QTAILQ_ENTRY(vm_area_struct) vma_link;
2052 struct mm_struct {
2053 QTAILQ_HEAD(, vm_area_struct) mm_mmap;
2054 int mm_count; /* number of mappings */
2057 static struct mm_struct *vma_init(void);
2058 static void vma_delete(struct mm_struct *);
2059 static int vma_add_mapping(struct mm_struct *, abi_ulong,
2060 abi_ulong, abi_ulong);
2061 static int vma_get_mapping_count(const struct mm_struct *);
2062 static struct vm_area_struct *vma_first(const struct mm_struct *);
2063 static struct vm_area_struct *vma_next(struct vm_area_struct *);
2064 static abi_ulong vma_dump_size(const struct vm_area_struct *);
2065 static int vma_walker(void *priv, abi_ulong start, abi_ulong end,
2066 unsigned long flags);
2068 static void fill_elf_header(struct elfhdr *, int, uint16_t, uint32_t);
2069 static void fill_note(struct memelfnote *, const char *, int,
2070 unsigned int, void *);
2071 static void fill_prstatus(struct target_elf_prstatus *, const TaskState *, int);
2072 static int fill_psinfo(struct target_elf_prpsinfo *, const TaskState *);
2073 static void fill_auxv_note(struct memelfnote *, const TaskState *);
2074 static void fill_elf_note_phdr(struct elf_phdr *, int, off_t);
2075 static size_t note_size(const struct memelfnote *);
2076 static void free_note_info(struct elf_note_info *);
2077 static int fill_note_info(struct elf_note_info *, long, const CPUState *);
2078 static void fill_thread_info(struct elf_note_info *, const CPUState *);
2079 static int core_dump_filename(const TaskState *, char *, size_t);
2081 static int dump_write(int, const void *, size_t);
2082 static int write_note(struct memelfnote *, int);
2083 static int write_note_info(struct elf_note_info *, int);
2085 #ifdef BSWAP_NEEDED
2086 static void bswap_prstatus(struct target_elf_prstatus *);
2087 static void bswap_psinfo(struct target_elf_prpsinfo *);
2089 static void bswap_prstatus(struct target_elf_prstatus *prstatus)
2091 prstatus->pr_info.si_signo = tswapl(prstatus->pr_info.si_signo);
2092 prstatus->pr_info.si_code = tswapl(prstatus->pr_info.si_code);
2093 prstatus->pr_info.si_errno = tswapl(prstatus->pr_info.si_errno);
2094 prstatus->pr_cursig = tswap16(prstatus->pr_cursig);
2095 prstatus->pr_sigpend = tswapl(prstatus->pr_sigpend);
2096 prstatus->pr_sighold = tswapl(prstatus->pr_sighold);
2097 prstatus->pr_pid = tswap32(prstatus->pr_pid);
2098 prstatus->pr_ppid = tswap32(prstatus->pr_ppid);
2099 prstatus->pr_pgrp = tswap32(prstatus->pr_pgrp);
2100 prstatus->pr_sid = tswap32(prstatus->pr_sid);
2101 /* cpu times are not filled, so we skip them */
2102 /* regs should be in correct format already */
2103 prstatus->pr_fpvalid = tswap32(prstatus->pr_fpvalid);
2106 static void bswap_psinfo(struct target_elf_prpsinfo *psinfo)
2108 psinfo->pr_flag = tswapl(psinfo->pr_flag);
2109 psinfo->pr_uid = tswap16(psinfo->pr_uid);
2110 psinfo->pr_gid = tswap16(psinfo->pr_gid);
2111 psinfo->pr_pid = tswap32(psinfo->pr_pid);
2112 psinfo->pr_ppid = tswap32(psinfo->pr_ppid);
2113 psinfo->pr_pgrp = tswap32(psinfo->pr_pgrp);
2114 psinfo->pr_sid = tswap32(psinfo->pr_sid);
2116 #endif /* BSWAP_NEEDED */
2119 * Minimal support for linux memory regions. These are needed
2120 * when we are finding out what memory exactly belongs to
2121 * emulated process. No locks needed here, as long as
2122 * thread that received the signal is stopped.
2125 static struct mm_struct *vma_init(void)
2127 struct mm_struct *mm;
2129 if ((mm = qemu_malloc(sizeof (*mm))) == NULL)
2130 return (NULL);
2132 mm->mm_count = 0;
2133 QTAILQ_INIT(&mm->mm_mmap);
2135 return (mm);
2138 static void vma_delete(struct mm_struct *mm)
2140 struct vm_area_struct *vma;
2142 while ((vma = vma_first(mm)) != NULL) {
2143 QTAILQ_REMOVE(&mm->mm_mmap, vma, vma_link);
2144 qemu_free(vma);
2146 qemu_free(mm);
2149 static int vma_add_mapping(struct mm_struct *mm, abi_ulong start,
2150 abi_ulong end, abi_ulong flags)
2152 struct vm_area_struct *vma;
2154 if ((vma = qemu_mallocz(sizeof (*vma))) == NULL)
2155 return (-1);
2157 vma->vma_start = start;
2158 vma->vma_end = end;
2159 vma->vma_flags = flags;
2161 QTAILQ_INSERT_TAIL(&mm->mm_mmap, vma, vma_link);
2162 mm->mm_count++;
2164 return (0);
2167 static struct vm_area_struct *vma_first(const struct mm_struct *mm)
2169 return (QTAILQ_FIRST(&mm->mm_mmap));
2172 static struct vm_area_struct *vma_next(struct vm_area_struct *vma)
2174 return (QTAILQ_NEXT(vma, vma_link));
2177 static int vma_get_mapping_count(const struct mm_struct *mm)
2179 return (mm->mm_count);
2183 * Calculate file (dump) size of given memory region.
2185 static abi_ulong vma_dump_size(const struct vm_area_struct *vma)
2187 /* if we cannot even read the first page, skip it */
2188 if (!access_ok(VERIFY_READ, vma->vma_start, TARGET_PAGE_SIZE))
2189 return (0);
2192 * Usually we don't dump executable pages as they contain
2193 * non-writable code that debugger can read directly from
2194 * target library etc. However, thread stacks are marked
2195 * also executable so we read in first page of given region
2196 * and check whether it contains elf header. If there is
2197 * no elf header, we dump it.
2199 if (vma->vma_flags & PROT_EXEC) {
2200 char page[TARGET_PAGE_SIZE];
2202 copy_from_user(page, vma->vma_start, sizeof (page));
2203 if ((page[EI_MAG0] == ELFMAG0) &&
2204 (page[EI_MAG1] == ELFMAG1) &&
2205 (page[EI_MAG2] == ELFMAG2) &&
2206 (page[EI_MAG3] == ELFMAG3)) {
2208 * Mappings are possibly from ELF binary. Don't dump
2209 * them.
2211 return (0);
2215 return (vma->vma_end - vma->vma_start);
2218 static int vma_walker(void *priv, abi_ulong start, abi_ulong end,
2219 unsigned long flags)
2221 struct mm_struct *mm = (struct mm_struct *)priv;
2223 vma_add_mapping(mm, start, end, flags);
2224 return (0);
2227 static void fill_note(struct memelfnote *note, const char *name, int type,
2228 unsigned int sz, void *data)
2230 unsigned int namesz;
2232 namesz = strlen(name) + 1;
2233 note->name = name;
2234 note->namesz = namesz;
2235 note->namesz_rounded = roundup(namesz, sizeof (int32_t));
2236 note->type = type;
2237 note->datasz = roundup(sz, sizeof (int32_t));;
2238 note->data = data;
2241 * We calculate rounded up note size here as specified by
2242 * ELF document.
2244 note->notesz = sizeof (struct elf_note) +
2245 note->namesz_rounded + note->datasz;
2248 static void fill_elf_header(struct elfhdr *elf, int segs, uint16_t machine,
2249 uint32_t flags)
2251 (void) memset(elf, 0, sizeof(*elf));
2253 (void) memcpy(elf->e_ident, ELFMAG, SELFMAG);
2254 elf->e_ident[EI_CLASS] = ELF_CLASS;
2255 elf->e_ident[EI_DATA] = ELF_DATA;
2256 elf->e_ident[EI_VERSION] = EV_CURRENT;
2257 elf->e_ident[EI_OSABI] = ELF_OSABI;
2259 elf->e_type = ET_CORE;
2260 elf->e_machine = machine;
2261 elf->e_version = EV_CURRENT;
2262 elf->e_phoff = sizeof(struct elfhdr);
2263 elf->e_flags = flags;
2264 elf->e_ehsize = sizeof(struct elfhdr);
2265 elf->e_phentsize = sizeof(struct elf_phdr);
2266 elf->e_phnum = segs;
2268 #ifdef BSWAP_NEEDED
2269 bswap_ehdr(elf);
2270 #endif
2273 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
2275 phdr->p_type = PT_NOTE;
2276 phdr->p_offset = offset;
2277 phdr->p_vaddr = 0;
2278 phdr->p_paddr = 0;
2279 phdr->p_filesz = sz;
2280 phdr->p_memsz = 0;
2281 phdr->p_flags = 0;
2282 phdr->p_align = 0;
2284 #ifdef BSWAP_NEEDED
2285 bswap_phdr(phdr);
2286 #endif
2289 static size_t note_size(const struct memelfnote *note)
2291 return (note->notesz);
2294 static void fill_prstatus(struct target_elf_prstatus *prstatus,
2295 const TaskState *ts, int signr)
2297 (void) memset(prstatus, 0, sizeof (*prstatus));
2298 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
2299 prstatus->pr_pid = ts->ts_tid;
2300 prstatus->pr_ppid = getppid();
2301 prstatus->pr_pgrp = getpgrp();
2302 prstatus->pr_sid = getsid(0);
2304 #ifdef BSWAP_NEEDED
2305 bswap_prstatus(prstatus);
2306 #endif
2309 static int fill_psinfo(struct target_elf_prpsinfo *psinfo, const TaskState *ts)
2311 char *filename, *base_filename;
2312 unsigned int i, len;
2314 (void) memset(psinfo, 0, sizeof (*psinfo));
2316 len = ts->info->arg_end - ts->info->arg_start;
2317 if (len >= ELF_PRARGSZ)
2318 len = ELF_PRARGSZ - 1;
2319 if (copy_from_user(&psinfo->pr_psargs, ts->info->arg_start, len))
2320 return -EFAULT;
2321 for (i = 0; i < len; i++)
2322 if (psinfo->pr_psargs[i] == 0)
2323 psinfo->pr_psargs[i] = ' ';
2324 psinfo->pr_psargs[len] = 0;
2326 psinfo->pr_pid = getpid();
2327 psinfo->pr_ppid = getppid();
2328 psinfo->pr_pgrp = getpgrp();
2329 psinfo->pr_sid = getsid(0);
2330 psinfo->pr_uid = getuid();
2331 psinfo->pr_gid = getgid();
2333 filename = strdup(ts->bprm->filename);
2334 base_filename = strdup(basename(filename));
2335 (void) strncpy(psinfo->pr_fname, base_filename,
2336 sizeof(psinfo->pr_fname));
2337 free(base_filename);
2338 free(filename);
2340 #ifdef BSWAP_NEEDED
2341 bswap_psinfo(psinfo);
2342 #endif
2343 return (0);
2346 static void fill_auxv_note(struct memelfnote *note, const TaskState *ts)
2348 elf_addr_t auxv = (elf_addr_t)ts->info->saved_auxv;
2349 elf_addr_t orig_auxv = auxv;
2350 abi_ulong val;
2351 void *ptr;
2352 int i, len;
2355 * Auxiliary vector is stored in target process stack. It contains
2356 * {type, value} pairs that we need to dump into note. This is not
2357 * strictly necessary but we do it here for sake of completeness.
2360 /* find out lenght of the vector, AT_NULL is terminator */
2361 i = len = 0;
2362 do {
2363 get_user_ual(val, auxv);
2364 i += 2;
2365 auxv += 2 * sizeof (elf_addr_t);
2366 } while (val != AT_NULL);
2367 len = i * sizeof (elf_addr_t);
2369 /* read in whole auxv vector and copy it to memelfnote */
2370 ptr = lock_user(VERIFY_READ, orig_auxv, len, 0);
2371 if (ptr != NULL) {
2372 fill_note(note, "CORE", NT_AUXV, len, ptr);
2373 unlock_user(ptr, auxv, len);
2378 * Constructs name of coredump file. We have following convention
2379 * for the name:
2380 * qemu_<basename-of-target-binary>_<date>-<time>_<pid>.core
2382 * Returns 0 in case of success, -1 otherwise (errno is set).
2384 static int core_dump_filename(const TaskState *ts, char *buf,
2385 size_t bufsize)
2387 char timestamp[64];
2388 char *filename = NULL;
2389 char *base_filename = NULL;
2390 struct timeval tv;
2391 struct tm tm;
2393 assert(bufsize >= PATH_MAX);
2395 if (gettimeofday(&tv, NULL) < 0) {
2396 (void) fprintf(stderr, "unable to get current timestamp: %s",
2397 strerror(errno));
2398 return (-1);
2401 filename = strdup(ts->bprm->filename);
2402 base_filename = strdup(basename(filename));
2403 (void) strftime(timestamp, sizeof (timestamp), "%Y%m%d-%H%M%S",
2404 localtime_r(&tv.tv_sec, &tm));
2405 (void) snprintf(buf, bufsize, "qemu_%s_%s_%d.core",
2406 base_filename, timestamp, (int)getpid());
2407 free(base_filename);
2408 free(filename);
2410 return (0);
2413 static int dump_write(int fd, const void *ptr, size_t size)
2415 const char *bufp = (const char *)ptr;
2416 ssize_t bytes_written, bytes_left;
2417 struct rlimit dumpsize;
2418 off_t pos;
2420 bytes_written = 0;
2421 getrlimit(RLIMIT_CORE, &dumpsize);
2422 if ((pos = lseek(fd, 0, SEEK_CUR))==-1) {
2423 if (errno == ESPIPE) { /* not a seekable stream */
2424 bytes_left = size;
2425 } else {
2426 return pos;
2428 } else {
2429 if (dumpsize.rlim_cur <= pos) {
2430 return -1;
2431 } else if (dumpsize.rlim_cur == RLIM_INFINITY) {
2432 bytes_left = size;
2433 } else {
2434 size_t limit_left=dumpsize.rlim_cur - pos;
2435 bytes_left = limit_left >= size ? size : limit_left ;
2440 * In normal conditions, single write(2) should do but
2441 * in case of socket etc. this mechanism is more portable.
2443 do {
2444 bytes_written = write(fd, bufp, bytes_left);
2445 if (bytes_written < 0) {
2446 if (errno == EINTR)
2447 continue;
2448 return (-1);
2449 } else if (bytes_written == 0) { /* eof */
2450 return (-1);
2452 bufp += bytes_written;
2453 bytes_left -= bytes_written;
2454 } while (bytes_left > 0);
2456 return (0);
2459 static int write_note(struct memelfnote *men, int fd)
2461 struct elf_note en;
2463 en.n_namesz = men->namesz;
2464 en.n_type = men->type;
2465 en.n_descsz = men->datasz;
2467 #ifdef BSWAP_NEEDED
2468 bswap_note(&en);
2469 #endif
2471 if (dump_write(fd, &en, sizeof(en)) != 0)
2472 return (-1);
2473 if (dump_write(fd, men->name, men->namesz_rounded) != 0)
2474 return (-1);
2475 if (dump_write(fd, men->data, men->datasz) != 0)
2476 return (-1);
2478 return (0);
2481 static void fill_thread_info(struct elf_note_info *info, const CPUState *env)
2483 TaskState *ts = (TaskState *)env->opaque;
2484 struct elf_thread_status *ets;
2486 ets = qemu_mallocz(sizeof (*ets));
2487 ets->num_notes = 1; /* only prstatus is dumped */
2488 fill_prstatus(&ets->prstatus, ts, 0);
2489 elf_core_copy_regs(&ets->prstatus.pr_reg, env);
2490 fill_note(&ets->notes[0], "CORE", NT_PRSTATUS, sizeof (ets->prstatus),
2491 &ets->prstatus);
2493 QTAILQ_INSERT_TAIL(&info->thread_list, ets, ets_link);
2495 info->notes_size += note_size(&ets->notes[0]);
2498 static int fill_note_info(struct elf_note_info *info,
2499 long signr, const CPUState *env)
2501 #define NUMNOTES 3
2502 CPUState *cpu = NULL;
2503 TaskState *ts = (TaskState *)env->opaque;
2504 int i;
2506 (void) memset(info, 0, sizeof (*info));
2508 QTAILQ_INIT(&info->thread_list);
2510 info->notes = qemu_mallocz(NUMNOTES * sizeof (struct memelfnote));
2511 if (info->notes == NULL)
2512 return (-ENOMEM);
2513 info->prstatus = qemu_mallocz(sizeof (*info->prstatus));
2514 if (info->prstatus == NULL)
2515 return (-ENOMEM);
2516 info->psinfo = qemu_mallocz(sizeof (*info->psinfo));
2517 if (info->prstatus == NULL)
2518 return (-ENOMEM);
2521 * First fill in status (and registers) of current thread
2522 * including process info & aux vector.
2524 fill_prstatus(info->prstatus, ts, signr);
2525 elf_core_copy_regs(&info->prstatus->pr_reg, env);
2526 fill_note(&info->notes[0], "CORE", NT_PRSTATUS,
2527 sizeof (*info->prstatus), info->prstatus);
2528 fill_psinfo(info->psinfo, ts);
2529 fill_note(&info->notes[1], "CORE", NT_PRPSINFO,
2530 sizeof (*info->psinfo), info->psinfo);
2531 fill_auxv_note(&info->notes[2], ts);
2532 info->numnote = 3;
2534 info->notes_size = 0;
2535 for (i = 0; i < info->numnote; i++)
2536 info->notes_size += note_size(&info->notes[i]);
2538 /* read and fill status of all threads */
2539 cpu_list_lock();
2540 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2541 if (cpu == thread_env)
2542 continue;
2543 fill_thread_info(info, cpu);
2545 cpu_list_unlock();
2547 return (0);
2550 static void free_note_info(struct elf_note_info *info)
2552 struct elf_thread_status *ets;
2554 while (!QTAILQ_EMPTY(&info->thread_list)) {
2555 ets = QTAILQ_FIRST(&info->thread_list);
2556 QTAILQ_REMOVE(&info->thread_list, ets, ets_link);
2557 qemu_free(ets);
2560 qemu_free(info->prstatus);
2561 qemu_free(info->psinfo);
2562 qemu_free(info->notes);
2565 static int write_note_info(struct elf_note_info *info, int fd)
2567 struct elf_thread_status *ets;
2568 int i, error = 0;
2570 /* write prstatus, psinfo and auxv for current thread */
2571 for (i = 0; i < info->numnote; i++)
2572 if ((error = write_note(&info->notes[i], fd)) != 0)
2573 return (error);
2575 /* write prstatus for each thread */
2576 for (ets = info->thread_list.tqh_first; ets != NULL;
2577 ets = ets->ets_link.tqe_next) {
2578 if ((error = write_note(&ets->notes[0], fd)) != 0)
2579 return (error);
2582 return (0);
2586 * Write out ELF coredump.
2588 * See documentation of ELF object file format in:
2589 * http://www.caldera.com/developers/devspecs/gabi41.pdf
2591 * Coredump format in linux is following:
2593 * 0 +----------------------+ \
2594 * | ELF header | ET_CORE |
2595 * +----------------------+ |
2596 * | ELF program headers | |--- headers
2597 * | - NOTE section | |
2598 * | - PT_LOAD sections | |
2599 * +----------------------+ /
2600 * | NOTEs: |
2601 * | - NT_PRSTATUS |
2602 * | - NT_PRSINFO |
2603 * | - NT_AUXV |
2604 * +----------------------+ <-- aligned to target page
2605 * | Process memory dump |
2606 * : :
2607 * . .
2608 * : :
2609 * | |
2610 * +----------------------+
2612 * NT_PRSTATUS -> struct elf_prstatus (per thread)
2613 * NT_PRSINFO -> struct elf_prpsinfo
2614 * NT_AUXV is array of { type, value } pairs (see fill_auxv_note()).
2616 * Format follows System V format as close as possible. Current
2617 * version limitations are as follows:
2618 * - no floating point registers are dumped
2620 * Function returns 0 in case of success, negative errno otherwise.
2622 * TODO: make this work also during runtime: it should be
2623 * possible to force coredump from running process and then
2624 * continue processing. For example qemu could set up SIGUSR2
2625 * handler (provided that target process haven't registered
2626 * handler for that) that does the dump when signal is received.
2628 static int elf_core_dump(int signr, const CPUState *env)
2630 const TaskState *ts = (const TaskState *)env->opaque;
2631 struct vm_area_struct *vma = NULL;
2632 char corefile[PATH_MAX];
2633 struct elf_note_info info;
2634 struct elfhdr elf;
2635 struct elf_phdr phdr;
2636 struct rlimit dumpsize;
2637 struct mm_struct *mm = NULL;
2638 off_t offset = 0, data_offset = 0;
2639 int segs = 0;
2640 int fd = -1;
2642 errno = 0;
2643 getrlimit(RLIMIT_CORE, &dumpsize);
2644 if (dumpsize.rlim_cur == 0)
2645 return 0;
2647 if (core_dump_filename(ts, corefile, sizeof (corefile)) < 0)
2648 return (-errno);
2650 if ((fd = open(corefile, O_WRONLY | O_CREAT,
2651 S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH)) < 0)
2652 return (-errno);
2655 * Walk through target process memory mappings and
2656 * set up structure containing this information. After
2657 * this point vma_xxx functions can be used.
2659 if ((mm = vma_init()) == NULL)
2660 goto out;
2662 walk_memory_regions(mm, vma_walker);
2663 segs = vma_get_mapping_count(mm);
2666 * Construct valid coredump ELF header. We also
2667 * add one more segment for notes.
2669 fill_elf_header(&elf, segs + 1, ELF_MACHINE, 0);
2670 if (dump_write(fd, &elf, sizeof (elf)) != 0)
2671 goto out;
2673 /* fill in in-memory version of notes */
2674 if (fill_note_info(&info, signr, env) < 0)
2675 goto out;
2677 offset += sizeof (elf); /* elf header */
2678 offset += (segs + 1) * sizeof (struct elf_phdr); /* program headers */
2680 /* write out notes program header */
2681 fill_elf_note_phdr(&phdr, info.notes_size, offset);
2683 offset += info.notes_size;
2684 if (dump_write(fd, &phdr, sizeof (phdr)) != 0)
2685 goto out;
2688 * ELF specification wants data to start at page boundary so
2689 * we align it here.
2691 offset = roundup(offset, ELF_EXEC_PAGESIZE);
2694 * Write program headers for memory regions mapped in
2695 * the target process.
2697 for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
2698 (void) memset(&phdr, 0, sizeof (phdr));
2700 phdr.p_type = PT_LOAD;
2701 phdr.p_offset = offset;
2702 phdr.p_vaddr = vma->vma_start;
2703 phdr.p_paddr = 0;
2704 phdr.p_filesz = vma_dump_size(vma);
2705 offset += phdr.p_filesz;
2706 phdr.p_memsz = vma->vma_end - vma->vma_start;
2707 phdr.p_flags = vma->vma_flags & PROT_READ ? PF_R : 0;
2708 if (vma->vma_flags & PROT_WRITE)
2709 phdr.p_flags |= PF_W;
2710 if (vma->vma_flags & PROT_EXEC)
2711 phdr.p_flags |= PF_X;
2712 phdr.p_align = ELF_EXEC_PAGESIZE;
2714 dump_write(fd, &phdr, sizeof (phdr));
2718 * Next we write notes just after program headers. No
2719 * alignment needed here.
2721 if (write_note_info(&info, fd) < 0)
2722 goto out;
2724 /* align data to page boundary */
2725 data_offset = lseek(fd, 0, SEEK_CUR);
2726 data_offset = TARGET_PAGE_ALIGN(data_offset);
2727 if (lseek(fd, data_offset, SEEK_SET) != data_offset)
2728 goto out;
2731 * Finally we can dump process memory into corefile as well.
2733 for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
2734 abi_ulong addr;
2735 abi_ulong end;
2737 end = vma->vma_start + vma_dump_size(vma);
2739 for (addr = vma->vma_start; addr < end;
2740 addr += TARGET_PAGE_SIZE) {
2741 char page[TARGET_PAGE_SIZE];
2742 int error;
2745 * Read in page from target process memory and
2746 * write it to coredump file.
2748 error = copy_from_user(page, addr, sizeof (page));
2749 if (error != 0) {
2750 (void) fprintf(stderr, "unable to dump " TARGET_ABI_FMT_lx "\n",
2751 addr);
2752 errno = -error;
2753 goto out;
2755 if (dump_write(fd, page, TARGET_PAGE_SIZE) < 0)
2756 goto out;
2760 out:
2761 free_note_info(&info);
2762 if (mm != NULL)
2763 vma_delete(mm);
2764 (void) close(fd);
2766 if (errno != 0)
2767 return (-errno);
2768 return (0);
2771 #endif /* USE_ELF_CORE_DUMP */
2773 static int load_aout_interp(void * exptr, int interp_fd)
2775 printf("a.out interpreter not yet supported\n");
2776 return(0);
2779 void do_init_thread(struct target_pt_regs *regs, struct image_info *infop)
2781 init_thread(regs, infop);