linux-user: Extract load_elf_image from load_elf_interp.
[qemu/stefanha.git] / linux-user / elfload.c
blob0a3d0844bbe7ec4968f82768c715a6090a1eea31
1 /* This is the Linux kernel elf-loading code, ported into user space */
2 #include <sys/time.h>
3 #include <sys/param.h>
5 #include <stdio.h>
6 #include <sys/types.h>
7 #include <fcntl.h>
8 #include <errno.h>
9 #include <unistd.h>
10 #include <sys/mman.h>
11 #include <sys/resource.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <time.h>
16 #include "qemu.h"
17 #include "disas.h"
19 #ifdef _ARCH_PPC64
20 #undef ARCH_DLINFO
21 #undef ELF_PLATFORM
22 #undef ELF_HWCAP
23 #undef ELF_CLASS
24 #undef ELF_DATA
25 #undef ELF_ARCH
26 #endif
28 #define ELF_OSABI ELFOSABI_SYSV
30 /* from personality.h */
33 * Flags for bug emulation.
35 * These occupy the top three bytes.
37 enum {
38 ADDR_NO_RANDOMIZE = 0x0040000, /* disable randomization of VA space */
39 FDPIC_FUNCPTRS = 0x0080000, /* userspace function ptrs point to
40 descriptors (signal handling) */
41 MMAP_PAGE_ZERO = 0x0100000,
42 ADDR_COMPAT_LAYOUT = 0x0200000,
43 READ_IMPLIES_EXEC = 0x0400000,
44 ADDR_LIMIT_32BIT = 0x0800000,
45 SHORT_INODE = 0x1000000,
46 WHOLE_SECONDS = 0x2000000,
47 STICKY_TIMEOUTS = 0x4000000,
48 ADDR_LIMIT_3GB = 0x8000000,
52 * Personality types.
54 * These go in the low byte. Avoid using the top bit, it will
55 * conflict with error returns.
57 enum {
58 PER_LINUX = 0x0000,
59 PER_LINUX_32BIT = 0x0000 | ADDR_LIMIT_32BIT,
60 PER_LINUX_FDPIC = 0x0000 | FDPIC_FUNCPTRS,
61 PER_SVR4 = 0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
62 PER_SVR3 = 0x0002 | STICKY_TIMEOUTS | SHORT_INODE,
63 PER_SCOSVR3 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS | SHORT_INODE,
64 PER_OSR5 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS,
65 PER_WYSEV386 = 0x0004 | STICKY_TIMEOUTS | SHORT_INODE,
66 PER_ISCR4 = 0x0005 | STICKY_TIMEOUTS,
67 PER_BSD = 0x0006,
68 PER_SUNOS = 0x0006 | STICKY_TIMEOUTS,
69 PER_XENIX = 0x0007 | STICKY_TIMEOUTS | SHORT_INODE,
70 PER_LINUX32 = 0x0008,
71 PER_LINUX32_3GB = 0x0008 | ADDR_LIMIT_3GB,
72 PER_IRIX32 = 0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */
73 PER_IRIXN32 = 0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */
74 PER_IRIX64 = 0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */
75 PER_RISCOS = 0x000c,
76 PER_SOLARIS = 0x000d | STICKY_TIMEOUTS,
77 PER_UW7 = 0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
78 PER_OSF4 = 0x000f, /* OSF/1 v4 */
79 PER_HPUX = 0x0010,
80 PER_MASK = 0x00ff,
84 * Return the base personality without flags.
86 #define personality(pers) (pers & PER_MASK)
88 /* this flag is uneffective under linux too, should be deleted */
89 #ifndef MAP_DENYWRITE
90 #define MAP_DENYWRITE 0
91 #endif
93 /* should probably go in elf.h */
94 #ifndef ELIBBAD
95 #define ELIBBAD 80
96 #endif
98 #ifdef TARGET_WORDS_BIGENDIAN
99 #define ELF_DATA ELFDATA2MSB
100 #else
101 #define ELF_DATA ELFDATA2LSB
102 #endif
104 typedef target_ulong target_elf_greg_t;
105 #ifdef USE_UID16
106 typedef uint16_t target_uid_t;
107 typedef uint16_t target_gid_t;
108 #else
109 typedef uint32_t target_uid_t;
110 typedef uint32_t target_gid_t;
111 #endif
112 typedef int32_t target_pid_t;
114 #ifdef TARGET_I386
116 #define ELF_PLATFORM get_elf_platform()
118 static const char *get_elf_platform(void)
120 static char elf_platform[] = "i386";
121 int family = (thread_env->cpuid_version >> 8) & 0xff;
122 if (family > 6)
123 family = 6;
124 if (family >= 3)
125 elf_platform[1] = '0' + family;
126 return elf_platform;
129 #define ELF_HWCAP get_elf_hwcap()
131 static uint32_t get_elf_hwcap(void)
133 return thread_env->cpuid_features;
136 #ifdef TARGET_X86_64
137 #define ELF_START_MMAP 0x2aaaaab000ULL
138 #define elf_check_arch(x) ( ((x) == ELF_ARCH) )
140 #define ELF_CLASS ELFCLASS64
141 #define ELF_ARCH EM_X86_64
143 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
145 regs->rax = 0;
146 regs->rsp = infop->start_stack;
147 regs->rip = infop->entry;
150 #define ELF_NREG 27
151 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
154 * Note that ELF_NREG should be 29 as there should be place for
155 * TRAPNO and ERR "registers" as well but linux doesn't dump
156 * those.
158 * See linux kernel: arch/x86/include/asm/elf.h
160 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
162 (*regs)[0] = env->regs[15];
163 (*regs)[1] = env->regs[14];
164 (*regs)[2] = env->regs[13];
165 (*regs)[3] = env->regs[12];
166 (*regs)[4] = env->regs[R_EBP];
167 (*regs)[5] = env->regs[R_EBX];
168 (*regs)[6] = env->regs[11];
169 (*regs)[7] = env->regs[10];
170 (*regs)[8] = env->regs[9];
171 (*regs)[9] = env->regs[8];
172 (*regs)[10] = env->regs[R_EAX];
173 (*regs)[11] = env->regs[R_ECX];
174 (*regs)[12] = env->regs[R_EDX];
175 (*regs)[13] = env->regs[R_ESI];
176 (*regs)[14] = env->regs[R_EDI];
177 (*regs)[15] = env->regs[R_EAX]; /* XXX */
178 (*regs)[16] = env->eip;
179 (*regs)[17] = env->segs[R_CS].selector & 0xffff;
180 (*regs)[18] = env->eflags;
181 (*regs)[19] = env->regs[R_ESP];
182 (*regs)[20] = env->segs[R_SS].selector & 0xffff;
183 (*regs)[21] = env->segs[R_FS].selector & 0xffff;
184 (*regs)[22] = env->segs[R_GS].selector & 0xffff;
185 (*regs)[23] = env->segs[R_DS].selector & 0xffff;
186 (*regs)[24] = env->segs[R_ES].selector & 0xffff;
187 (*regs)[25] = env->segs[R_FS].selector & 0xffff;
188 (*regs)[26] = env->segs[R_GS].selector & 0xffff;
191 #else
193 #define ELF_START_MMAP 0x80000000
196 * This is used to ensure we don't load something for the wrong architecture.
198 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
201 * These are used to set parameters in the core dumps.
203 #define ELF_CLASS ELFCLASS32
204 #define ELF_ARCH EM_386
206 static inline void init_thread(struct target_pt_regs *regs,
207 struct image_info *infop)
209 regs->esp = infop->start_stack;
210 regs->eip = infop->entry;
212 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
213 starts %edx contains a pointer to a function which might be
214 registered using `atexit'. This provides a mean for the
215 dynamic linker to call DT_FINI functions for shared libraries
216 that have been loaded before the code runs.
218 A value of 0 tells we have no such handler. */
219 regs->edx = 0;
222 #define ELF_NREG 17
223 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
226 * Note that ELF_NREG should be 19 as there should be place for
227 * TRAPNO and ERR "registers" as well but linux doesn't dump
228 * those.
230 * See linux kernel: arch/x86/include/asm/elf.h
232 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
234 (*regs)[0] = env->regs[R_EBX];
235 (*regs)[1] = env->regs[R_ECX];
236 (*regs)[2] = env->regs[R_EDX];
237 (*regs)[3] = env->regs[R_ESI];
238 (*regs)[4] = env->regs[R_EDI];
239 (*regs)[5] = env->regs[R_EBP];
240 (*regs)[6] = env->regs[R_EAX];
241 (*regs)[7] = env->segs[R_DS].selector & 0xffff;
242 (*regs)[8] = env->segs[R_ES].selector & 0xffff;
243 (*regs)[9] = env->segs[R_FS].selector & 0xffff;
244 (*regs)[10] = env->segs[R_GS].selector & 0xffff;
245 (*regs)[11] = env->regs[R_EAX]; /* XXX */
246 (*regs)[12] = env->eip;
247 (*regs)[13] = env->segs[R_CS].selector & 0xffff;
248 (*regs)[14] = env->eflags;
249 (*regs)[15] = env->regs[R_ESP];
250 (*regs)[16] = env->segs[R_SS].selector & 0xffff;
252 #endif
254 #define USE_ELF_CORE_DUMP
255 #define ELF_EXEC_PAGESIZE 4096
257 #endif
259 #ifdef TARGET_ARM
261 #define ELF_START_MMAP 0x80000000
263 #define elf_check_arch(x) ( (x) == EM_ARM )
265 #define ELF_CLASS ELFCLASS32
266 #define ELF_ARCH EM_ARM
268 static inline void init_thread(struct target_pt_regs *regs,
269 struct image_info *infop)
271 abi_long stack = infop->start_stack;
272 memset(regs, 0, sizeof(*regs));
273 regs->ARM_cpsr = 0x10;
274 if (infop->entry & 1)
275 regs->ARM_cpsr |= CPSR_T;
276 regs->ARM_pc = infop->entry & 0xfffffffe;
277 regs->ARM_sp = infop->start_stack;
278 /* FIXME - what to for failure of get_user()? */
279 get_user_ual(regs->ARM_r2, stack + 8); /* envp */
280 get_user_ual(regs->ARM_r1, stack + 4); /* envp */
281 /* XXX: it seems that r0 is zeroed after ! */
282 regs->ARM_r0 = 0;
283 /* For uClinux PIC binaries. */
284 /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
285 regs->ARM_r10 = infop->start_data;
288 #define ELF_NREG 18
289 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
291 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
293 (*regs)[0] = tswapl(env->regs[0]);
294 (*regs)[1] = tswapl(env->regs[1]);
295 (*regs)[2] = tswapl(env->regs[2]);
296 (*regs)[3] = tswapl(env->regs[3]);
297 (*regs)[4] = tswapl(env->regs[4]);
298 (*regs)[5] = tswapl(env->regs[5]);
299 (*regs)[6] = tswapl(env->regs[6]);
300 (*regs)[7] = tswapl(env->regs[7]);
301 (*regs)[8] = tswapl(env->regs[8]);
302 (*regs)[9] = tswapl(env->regs[9]);
303 (*regs)[10] = tswapl(env->regs[10]);
304 (*regs)[11] = tswapl(env->regs[11]);
305 (*regs)[12] = tswapl(env->regs[12]);
306 (*regs)[13] = tswapl(env->regs[13]);
307 (*regs)[14] = tswapl(env->regs[14]);
308 (*regs)[15] = tswapl(env->regs[15]);
310 (*regs)[16] = tswapl(cpsr_read((CPUState *)env));
311 (*regs)[17] = tswapl(env->regs[0]); /* XXX */
314 #define USE_ELF_CORE_DUMP
315 #define ELF_EXEC_PAGESIZE 4096
317 enum
319 ARM_HWCAP_ARM_SWP = 1 << 0,
320 ARM_HWCAP_ARM_HALF = 1 << 1,
321 ARM_HWCAP_ARM_THUMB = 1 << 2,
322 ARM_HWCAP_ARM_26BIT = 1 << 3,
323 ARM_HWCAP_ARM_FAST_MULT = 1 << 4,
324 ARM_HWCAP_ARM_FPA = 1 << 5,
325 ARM_HWCAP_ARM_VFP = 1 << 6,
326 ARM_HWCAP_ARM_EDSP = 1 << 7,
327 ARM_HWCAP_ARM_JAVA = 1 << 8,
328 ARM_HWCAP_ARM_IWMMXT = 1 << 9,
329 ARM_HWCAP_ARM_THUMBEE = 1 << 10,
330 ARM_HWCAP_ARM_NEON = 1 << 11,
331 ARM_HWCAP_ARM_VFPv3 = 1 << 12,
332 ARM_HWCAP_ARM_VFPv3D16 = 1 << 13,
335 #define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF \
336 | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT \
337 | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP \
338 | ARM_HWCAP_ARM_NEON | ARM_HWCAP_ARM_VFPv3 )
340 #endif
342 #ifdef TARGET_SPARC
343 #ifdef TARGET_SPARC64
345 #define ELF_START_MMAP 0x80000000
347 #ifndef TARGET_ABI32
348 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
349 #else
350 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
351 #endif
353 #define ELF_CLASS ELFCLASS64
354 #define ELF_ARCH EM_SPARCV9
356 #define STACK_BIAS 2047
358 static inline void init_thread(struct target_pt_regs *regs,
359 struct image_info *infop)
361 #ifndef TARGET_ABI32
362 regs->tstate = 0;
363 #endif
364 regs->pc = infop->entry;
365 regs->npc = regs->pc + 4;
366 regs->y = 0;
367 #ifdef TARGET_ABI32
368 regs->u_regs[14] = infop->start_stack - 16 * 4;
369 #else
370 if (personality(infop->personality) == PER_LINUX32)
371 regs->u_regs[14] = infop->start_stack - 16 * 4;
372 else
373 regs->u_regs[14] = infop->start_stack - 16 * 8 - STACK_BIAS;
374 #endif
377 #else
378 #define ELF_START_MMAP 0x80000000
380 #define elf_check_arch(x) ( (x) == EM_SPARC )
382 #define ELF_CLASS ELFCLASS32
383 #define ELF_ARCH EM_SPARC
385 static inline void init_thread(struct target_pt_regs *regs,
386 struct image_info *infop)
388 regs->psr = 0;
389 regs->pc = infop->entry;
390 regs->npc = regs->pc + 4;
391 regs->y = 0;
392 regs->u_regs[14] = infop->start_stack - 16 * 4;
395 #endif
396 #endif
398 #ifdef TARGET_PPC
400 #define ELF_START_MMAP 0x80000000
402 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
404 #define elf_check_arch(x) ( (x) == EM_PPC64 )
406 #define ELF_CLASS ELFCLASS64
408 #else
410 #define elf_check_arch(x) ( (x) == EM_PPC )
412 #define ELF_CLASS ELFCLASS32
414 #endif
416 #define ELF_ARCH EM_PPC
418 /* Feature masks for the Aux Vector Hardware Capabilities (AT_HWCAP).
419 See arch/powerpc/include/asm/cputable.h. */
420 enum {
421 QEMU_PPC_FEATURE_32 = 0x80000000,
422 QEMU_PPC_FEATURE_64 = 0x40000000,
423 QEMU_PPC_FEATURE_601_INSTR = 0x20000000,
424 QEMU_PPC_FEATURE_HAS_ALTIVEC = 0x10000000,
425 QEMU_PPC_FEATURE_HAS_FPU = 0x08000000,
426 QEMU_PPC_FEATURE_HAS_MMU = 0x04000000,
427 QEMU_PPC_FEATURE_HAS_4xxMAC = 0x02000000,
428 QEMU_PPC_FEATURE_UNIFIED_CACHE = 0x01000000,
429 QEMU_PPC_FEATURE_HAS_SPE = 0x00800000,
430 QEMU_PPC_FEATURE_HAS_EFP_SINGLE = 0x00400000,
431 QEMU_PPC_FEATURE_HAS_EFP_DOUBLE = 0x00200000,
432 QEMU_PPC_FEATURE_NO_TB = 0x00100000,
433 QEMU_PPC_FEATURE_POWER4 = 0x00080000,
434 QEMU_PPC_FEATURE_POWER5 = 0x00040000,
435 QEMU_PPC_FEATURE_POWER5_PLUS = 0x00020000,
436 QEMU_PPC_FEATURE_CELL = 0x00010000,
437 QEMU_PPC_FEATURE_BOOKE = 0x00008000,
438 QEMU_PPC_FEATURE_SMT = 0x00004000,
439 QEMU_PPC_FEATURE_ICACHE_SNOOP = 0x00002000,
440 QEMU_PPC_FEATURE_ARCH_2_05 = 0x00001000,
441 QEMU_PPC_FEATURE_PA6T = 0x00000800,
442 QEMU_PPC_FEATURE_HAS_DFP = 0x00000400,
443 QEMU_PPC_FEATURE_POWER6_EXT = 0x00000200,
444 QEMU_PPC_FEATURE_ARCH_2_06 = 0x00000100,
445 QEMU_PPC_FEATURE_HAS_VSX = 0x00000080,
446 QEMU_PPC_FEATURE_PSERIES_PERFMON_COMPAT = 0x00000040,
448 QEMU_PPC_FEATURE_TRUE_LE = 0x00000002,
449 QEMU_PPC_FEATURE_PPC_LE = 0x00000001,
452 #define ELF_HWCAP get_elf_hwcap()
454 static uint32_t get_elf_hwcap(void)
456 CPUState *e = thread_env;
457 uint32_t features = 0;
459 /* We don't have to be terribly complete here; the high points are
460 Altivec/FP/SPE support. Anything else is just a bonus. */
461 #define GET_FEATURE(flag, feature) \
462 do {if (e->insns_flags & flag) features |= feature; } while(0)
463 GET_FEATURE(PPC_64B, QEMU_PPC_FEATURE_64);
464 GET_FEATURE(PPC_FLOAT, QEMU_PPC_FEATURE_HAS_FPU);
465 GET_FEATURE(PPC_ALTIVEC, QEMU_PPC_FEATURE_HAS_ALTIVEC);
466 GET_FEATURE(PPC_SPE, QEMU_PPC_FEATURE_HAS_SPE);
467 GET_FEATURE(PPC_SPE_SINGLE, QEMU_PPC_FEATURE_HAS_EFP_SINGLE);
468 GET_FEATURE(PPC_SPE_DOUBLE, QEMU_PPC_FEATURE_HAS_EFP_DOUBLE);
469 GET_FEATURE(PPC_BOOKE, QEMU_PPC_FEATURE_BOOKE);
470 GET_FEATURE(PPC_405_MAC, QEMU_PPC_FEATURE_HAS_4xxMAC);
471 #undef GET_FEATURE
473 return features;
477 * The requirements here are:
478 * - keep the final alignment of sp (sp & 0xf)
479 * - make sure the 32-bit value at the first 16 byte aligned position of
480 * AUXV is greater than 16 for glibc compatibility.
481 * AT_IGNOREPPC is used for that.
482 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
483 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
485 #define DLINFO_ARCH_ITEMS 5
486 #define ARCH_DLINFO \
487 do { \
488 NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \
489 NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \
490 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
491 /* \
492 * Now handle glibc compatibility. \
493 */ \
494 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
495 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
496 } while (0)
498 static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop)
500 _regs->gpr[1] = infop->start_stack;
501 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
502 _regs->gpr[2] = ldq_raw(infop->entry + 8) + infop->load_addr;
503 infop->entry = ldq_raw(infop->entry) + infop->load_addr;
504 #endif
505 _regs->nip = infop->entry;
508 /* See linux kernel: arch/powerpc/include/asm/elf.h. */
509 #define ELF_NREG 48
510 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
512 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
514 int i;
515 target_ulong ccr = 0;
517 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
518 (*regs)[i] = tswapl(env->gpr[i]);
521 (*regs)[32] = tswapl(env->nip);
522 (*regs)[33] = tswapl(env->msr);
523 (*regs)[35] = tswapl(env->ctr);
524 (*regs)[36] = tswapl(env->lr);
525 (*regs)[37] = tswapl(env->xer);
527 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
528 ccr |= env->crf[i] << (32 - ((i + 1) * 4));
530 (*regs)[38] = tswapl(ccr);
533 #define USE_ELF_CORE_DUMP
534 #define ELF_EXEC_PAGESIZE 4096
536 #endif
538 #ifdef TARGET_MIPS
540 #define ELF_START_MMAP 0x80000000
542 #define elf_check_arch(x) ( (x) == EM_MIPS )
544 #ifdef TARGET_MIPS64
545 #define ELF_CLASS ELFCLASS64
546 #else
547 #define ELF_CLASS ELFCLASS32
548 #endif
549 #define ELF_ARCH EM_MIPS
551 static inline void init_thread(struct target_pt_regs *regs,
552 struct image_info *infop)
554 regs->cp0_status = 2 << CP0St_KSU;
555 regs->cp0_epc = infop->entry;
556 regs->regs[29] = infop->start_stack;
559 /* See linux kernel: arch/mips/include/asm/elf.h. */
560 #define ELF_NREG 45
561 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
563 /* See linux kernel: arch/mips/include/asm/reg.h. */
564 enum {
565 #ifdef TARGET_MIPS64
566 TARGET_EF_R0 = 0,
567 #else
568 TARGET_EF_R0 = 6,
569 #endif
570 TARGET_EF_R26 = TARGET_EF_R0 + 26,
571 TARGET_EF_R27 = TARGET_EF_R0 + 27,
572 TARGET_EF_LO = TARGET_EF_R0 + 32,
573 TARGET_EF_HI = TARGET_EF_R0 + 33,
574 TARGET_EF_CP0_EPC = TARGET_EF_R0 + 34,
575 TARGET_EF_CP0_BADVADDR = TARGET_EF_R0 + 35,
576 TARGET_EF_CP0_STATUS = TARGET_EF_R0 + 36,
577 TARGET_EF_CP0_CAUSE = TARGET_EF_R0 + 37
580 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */
581 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
583 int i;
585 for (i = 0; i < TARGET_EF_R0; i++) {
586 (*regs)[i] = 0;
588 (*regs)[TARGET_EF_R0] = 0;
590 for (i = 1; i < ARRAY_SIZE(env->active_tc.gpr); i++) {
591 (*regs)[TARGET_EF_R0 + i] = tswapl(env->active_tc.gpr[i]);
594 (*regs)[TARGET_EF_R26] = 0;
595 (*regs)[TARGET_EF_R27] = 0;
596 (*regs)[TARGET_EF_LO] = tswapl(env->active_tc.LO[0]);
597 (*regs)[TARGET_EF_HI] = tswapl(env->active_tc.HI[0]);
598 (*regs)[TARGET_EF_CP0_EPC] = tswapl(env->active_tc.PC);
599 (*regs)[TARGET_EF_CP0_BADVADDR] = tswapl(env->CP0_BadVAddr);
600 (*regs)[TARGET_EF_CP0_STATUS] = tswapl(env->CP0_Status);
601 (*regs)[TARGET_EF_CP0_CAUSE] = tswapl(env->CP0_Cause);
604 #define USE_ELF_CORE_DUMP
605 #define ELF_EXEC_PAGESIZE 4096
607 #endif /* TARGET_MIPS */
609 #ifdef TARGET_MICROBLAZE
611 #define ELF_START_MMAP 0x80000000
613 #define elf_check_arch(x) ( (x) == EM_MICROBLAZE || (x) == EM_MICROBLAZE_OLD)
615 #define ELF_CLASS ELFCLASS32
616 #define ELF_ARCH EM_MICROBLAZE
618 static inline void init_thread(struct target_pt_regs *regs,
619 struct image_info *infop)
621 regs->pc = infop->entry;
622 regs->r1 = infop->start_stack;
626 #define ELF_EXEC_PAGESIZE 4096
628 #define USE_ELF_CORE_DUMP
629 #define ELF_NREG 38
630 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
632 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */
633 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
635 int i, pos = 0;
637 for (i = 0; i < 32; i++) {
638 (*regs)[pos++] = tswapl(env->regs[i]);
641 for (i = 0; i < 6; i++) {
642 (*regs)[pos++] = tswapl(env->sregs[i]);
646 #endif /* TARGET_MICROBLAZE */
648 #ifdef TARGET_SH4
650 #define ELF_START_MMAP 0x80000000
652 #define elf_check_arch(x) ( (x) == EM_SH )
654 #define ELF_CLASS ELFCLASS32
655 #define ELF_ARCH EM_SH
657 static inline void init_thread(struct target_pt_regs *regs,
658 struct image_info *infop)
660 /* Check other registers XXXXX */
661 regs->pc = infop->entry;
662 regs->regs[15] = infop->start_stack;
665 /* See linux kernel: arch/sh/include/asm/elf.h. */
666 #define ELF_NREG 23
667 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
669 /* See linux kernel: arch/sh/include/asm/ptrace.h. */
670 enum {
671 TARGET_REG_PC = 16,
672 TARGET_REG_PR = 17,
673 TARGET_REG_SR = 18,
674 TARGET_REG_GBR = 19,
675 TARGET_REG_MACH = 20,
676 TARGET_REG_MACL = 21,
677 TARGET_REG_SYSCALL = 22
680 static inline void elf_core_copy_regs(target_elf_gregset_t *regs,
681 const CPUState *env)
683 int i;
685 for (i = 0; i < 16; i++) {
686 (*regs[i]) = tswapl(env->gregs[i]);
689 (*regs)[TARGET_REG_PC] = tswapl(env->pc);
690 (*regs)[TARGET_REG_PR] = tswapl(env->pr);
691 (*regs)[TARGET_REG_SR] = tswapl(env->sr);
692 (*regs)[TARGET_REG_GBR] = tswapl(env->gbr);
693 (*regs)[TARGET_REG_MACH] = tswapl(env->mach);
694 (*regs)[TARGET_REG_MACL] = tswapl(env->macl);
695 (*regs)[TARGET_REG_SYSCALL] = 0; /* FIXME */
698 #define USE_ELF_CORE_DUMP
699 #define ELF_EXEC_PAGESIZE 4096
701 #endif
703 #ifdef TARGET_CRIS
705 #define ELF_START_MMAP 0x80000000
707 #define elf_check_arch(x) ( (x) == EM_CRIS )
709 #define ELF_CLASS ELFCLASS32
710 #define ELF_ARCH EM_CRIS
712 static inline void init_thread(struct target_pt_regs *regs,
713 struct image_info *infop)
715 regs->erp = infop->entry;
718 #define ELF_EXEC_PAGESIZE 8192
720 #endif
722 #ifdef TARGET_M68K
724 #define ELF_START_MMAP 0x80000000
726 #define elf_check_arch(x) ( (x) == EM_68K )
728 #define ELF_CLASS ELFCLASS32
729 #define ELF_ARCH EM_68K
731 /* ??? Does this need to do anything?
732 #define ELF_PLAT_INIT(_r) */
734 static inline void init_thread(struct target_pt_regs *regs,
735 struct image_info *infop)
737 regs->usp = infop->start_stack;
738 regs->sr = 0;
739 regs->pc = infop->entry;
742 /* See linux kernel: arch/m68k/include/asm/elf.h. */
743 #define ELF_NREG 20
744 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
746 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
748 (*regs)[0] = tswapl(env->dregs[1]);
749 (*regs)[1] = tswapl(env->dregs[2]);
750 (*regs)[2] = tswapl(env->dregs[3]);
751 (*regs)[3] = tswapl(env->dregs[4]);
752 (*regs)[4] = tswapl(env->dregs[5]);
753 (*regs)[5] = tswapl(env->dregs[6]);
754 (*regs)[6] = tswapl(env->dregs[7]);
755 (*regs)[7] = tswapl(env->aregs[0]);
756 (*regs)[8] = tswapl(env->aregs[1]);
757 (*regs)[9] = tswapl(env->aregs[2]);
758 (*regs)[10] = tswapl(env->aregs[3]);
759 (*regs)[11] = tswapl(env->aregs[4]);
760 (*regs)[12] = tswapl(env->aregs[5]);
761 (*regs)[13] = tswapl(env->aregs[6]);
762 (*regs)[14] = tswapl(env->dregs[0]);
763 (*regs)[15] = tswapl(env->aregs[7]);
764 (*regs)[16] = tswapl(env->dregs[0]); /* FIXME: orig_d0 */
765 (*regs)[17] = tswapl(env->sr);
766 (*regs)[18] = tswapl(env->pc);
767 (*regs)[19] = 0; /* FIXME: regs->format | regs->vector */
770 #define USE_ELF_CORE_DUMP
771 #define ELF_EXEC_PAGESIZE 8192
773 #endif
775 #ifdef TARGET_ALPHA
777 #define ELF_START_MMAP (0x30000000000ULL)
779 #define elf_check_arch(x) ( (x) == ELF_ARCH )
781 #define ELF_CLASS ELFCLASS64
782 #define ELF_ARCH EM_ALPHA
784 static inline void init_thread(struct target_pt_regs *regs,
785 struct image_info *infop)
787 regs->pc = infop->entry;
788 regs->ps = 8;
789 regs->usp = infop->start_stack;
792 #define ELF_EXEC_PAGESIZE 8192
794 #endif /* TARGET_ALPHA */
796 #ifndef ELF_PLATFORM
797 #define ELF_PLATFORM (NULL)
798 #endif
800 #ifndef ELF_HWCAP
801 #define ELF_HWCAP 0
802 #endif
804 #ifdef TARGET_ABI32
805 #undef ELF_CLASS
806 #define ELF_CLASS ELFCLASS32
807 #undef bswaptls
808 #define bswaptls(ptr) bswap32s(ptr)
809 #endif
811 #include "elf.h"
813 struct exec
815 unsigned int a_info; /* Use macros N_MAGIC, etc for access */
816 unsigned int a_text; /* length of text, in bytes */
817 unsigned int a_data; /* length of data, in bytes */
818 unsigned int a_bss; /* length of uninitialized data area, in bytes */
819 unsigned int a_syms; /* length of symbol table data in file, in bytes */
820 unsigned int a_entry; /* start address */
821 unsigned int a_trsize; /* length of relocation info for text, in bytes */
822 unsigned int a_drsize; /* length of relocation info for data, in bytes */
826 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
827 #define OMAGIC 0407
828 #define NMAGIC 0410
829 #define ZMAGIC 0413
830 #define QMAGIC 0314
832 /* max code+data+bss+brk space allocated to ET_DYN executables */
833 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
835 /* Necessary parameters */
836 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
837 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
838 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
840 #define DLINFO_ITEMS 12
842 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
844 memcpy(to, from, n);
847 #ifdef BSWAP_NEEDED
848 static void bswap_ehdr(struct elfhdr *ehdr)
850 bswap16s(&ehdr->e_type); /* Object file type */
851 bswap16s(&ehdr->e_machine); /* Architecture */
852 bswap32s(&ehdr->e_version); /* Object file version */
853 bswaptls(&ehdr->e_entry); /* Entry point virtual address */
854 bswaptls(&ehdr->e_phoff); /* Program header table file offset */
855 bswaptls(&ehdr->e_shoff); /* Section header table file offset */
856 bswap32s(&ehdr->e_flags); /* Processor-specific flags */
857 bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */
858 bswap16s(&ehdr->e_phentsize); /* Program header table entry size */
859 bswap16s(&ehdr->e_phnum); /* Program header table entry count */
860 bswap16s(&ehdr->e_shentsize); /* Section header table entry size */
861 bswap16s(&ehdr->e_shnum); /* Section header table entry count */
862 bswap16s(&ehdr->e_shstrndx); /* Section header string table index */
865 static void bswap_phdr(struct elf_phdr *phdr, int phnum)
867 int i;
868 for (i = 0; i < phnum; ++i, ++phdr) {
869 bswap32s(&phdr->p_type); /* Segment type */
870 bswap32s(&phdr->p_flags); /* Segment flags */
871 bswaptls(&phdr->p_offset); /* Segment file offset */
872 bswaptls(&phdr->p_vaddr); /* Segment virtual address */
873 bswaptls(&phdr->p_paddr); /* Segment physical address */
874 bswaptls(&phdr->p_filesz); /* Segment size in file */
875 bswaptls(&phdr->p_memsz); /* Segment size in memory */
876 bswaptls(&phdr->p_align); /* Segment alignment */
880 static void bswap_shdr(struct elf_shdr *shdr, int shnum)
882 int i;
883 for (i = 0; i < shnum; ++i, ++shdr) {
884 bswap32s(&shdr->sh_name);
885 bswap32s(&shdr->sh_type);
886 bswaptls(&shdr->sh_flags);
887 bswaptls(&shdr->sh_addr);
888 bswaptls(&shdr->sh_offset);
889 bswaptls(&shdr->sh_size);
890 bswap32s(&shdr->sh_link);
891 bswap32s(&shdr->sh_info);
892 bswaptls(&shdr->sh_addralign);
893 bswaptls(&shdr->sh_entsize);
897 static void bswap_sym(struct elf_sym *sym)
899 bswap32s(&sym->st_name);
900 bswaptls(&sym->st_value);
901 bswaptls(&sym->st_size);
902 bswap16s(&sym->st_shndx);
904 #else
905 static inline void bswap_ehdr(struct elfhdr *ehdr) { }
906 static inline void bswap_phdr(struct elf_phdr *phdr, int phnum) { }
907 static inline void bswap_shdr(struct elf_shdr *shdr, int shnum) { }
908 static inline void bswap_sym(struct elf_sym *sym) { }
909 #endif
911 #ifdef USE_ELF_CORE_DUMP
912 static int elf_core_dump(int, const CPUState *);
913 #endif /* USE_ELF_CORE_DUMP */
914 static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias);
916 /* Verify the portions of EHDR within E_IDENT for the target.
917 This can be performed before bswapping the entire header. */
918 static bool elf_check_ident(struct elfhdr *ehdr)
920 return (ehdr->e_ident[EI_MAG0] == ELFMAG0
921 && ehdr->e_ident[EI_MAG1] == ELFMAG1
922 && ehdr->e_ident[EI_MAG2] == ELFMAG2
923 && ehdr->e_ident[EI_MAG3] == ELFMAG3
924 && ehdr->e_ident[EI_CLASS] == ELF_CLASS
925 && ehdr->e_ident[EI_DATA] == ELF_DATA
926 && ehdr->e_ident[EI_VERSION] == EV_CURRENT);
929 /* Verify the portions of EHDR outside of E_IDENT for the target.
930 This has to wait until after bswapping the header. */
931 static bool elf_check_ehdr(struct elfhdr *ehdr)
933 return (elf_check_arch(ehdr->e_machine)
934 && ehdr->e_ehsize == sizeof(struct elfhdr)
935 && ehdr->e_phentsize == sizeof(struct elf_phdr)
936 && ehdr->e_shentsize == sizeof(struct elf_shdr)
937 && (ehdr->e_type == ET_EXEC || ehdr->e_type == ET_DYN));
941 * 'copy_elf_strings()' copies argument/envelope strings from user
942 * memory to free pages in kernel mem. These are in a format ready
943 * to be put directly into the top of new user memory.
946 static abi_ulong copy_elf_strings(int argc,char ** argv, void **page,
947 abi_ulong p)
949 char *tmp, *tmp1, *pag = NULL;
950 int len, offset = 0;
952 if (!p) {
953 return 0; /* bullet-proofing */
955 while (argc-- > 0) {
956 tmp = argv[argc];
957 if (!tmp) {
958 fprintf(stderr, "VFS: argc is wrong");
959 exit(-1);
961 tmp1 = tmp;
962 while (*tmp++);
963 len = tmp - tmp1;
964 if (p < len) { /* this shouldn't happen - 128kB */
965 return 0;
967 while (len) {
968 --p; --tmp; --len;
969 if (--offset < 0) {
970 offset = p % TARGET_PAGE_SIZE;
971 pag = (char *)page[p/TARGET_PAGE_SIZE];
972 if (!pag) {
973 pag = (char *)malloc(TARGET_PAGE_SIZE);
974 memset(pag, 0, TARGET_PAGE_SIZE);
975 page[p/TARGET_PAGE_SIZE] = pag;
976 if (!pag)
977 return 0;
980 if (len == 0 || offset == 0) {
981 *(pag + offset) = *tmp;
983 else {
984 int bytes_to_copy = (len > offset) ? offset : len;
985 tmp -= bytes_to_copy;
986 p -= bytes_to_copy;
987 offset -= bytes_to_copy;
988 len -= bytes_to_copy;
989 memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1);
993 return p;
996 static abi_ulong setup_arg_pages(abi_ulong p, struct linux_binprm *bprm,
997 struct image_info *info)
999 abi_ulong stack_base, size, error, guard;
1000 int i;
1002 /* Create enough stack to hold everything. If we don't use
1003 it for args, we'll use it for something else. */
1004 size = guest_stack_size;
1005 if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE) {
1006 size = MAX_ARG_PAGES*TARGET_PAGE_SIZE;
1008 guard = TARGET_PAGE_SIZE;
1009 if (guard < qemu_real_host_page_size) {
1010 guard = qemu_real_host_page_size;
1013 error = target_mmap(0, size + guard, PROT_READ | PROT_WRITE,
1014 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1015 if (error == -1) {
1016 perror("mmap stack");
1017 exit(-1);
1020 /* We reserve one extra page at the top of the stack as guard. */
1021 target_mprotect(error, guard, PROT_NONE);
1023 info->stack_limit = error + guard;
1024 stack_base = info->stack_limit + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE;
1025 p += stack_base;
1027 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
1028 if (bprm->page[i]) {
1029 info->rss++;
1030 /* FIXME - check return value of memcpy_to_target() for failure */
1031 memcpy_to_target(stack_base, bprm->page[i], TARGET_PAGE_SIZE);
1032 free(bprm->page[i]);
1034 stack_base += TARGET_PAGE_SIZE;
1036 return p;
1039 /* Map and zero the bss. We need to explicitly zero any fractional pages
1040 after the data section (i.e. bss). */
1041 static void zero_bss(abi_ulong elf_bss, abi_ulong last_bss, int prot)
1043 uintptr_t host_start, host_map_start, host_end;
1045 last_bss = TARGET_PAGE_ALIGN(last_bss);
1047 /* ??? There is confusion between qemu_real_host_page_size and
1048 qemu_host_page_size here and elsewhere in target_mmap, which
1049 may lead to the end of the data section mapping from the file
1050 not being mapped. At least there was an explicit test and
1051 comment for that here, suggesting that "the file size must
1052 be known". The comment probably pre-dates the introduction
1053 of the fstat system call in target_mmap which does in fact
1054 find out the size. What isn't clear is if the workaround
1055 here is still actually needed. For now, continue with it,
1056 but merge it with the "normal" mmap that would allocate the bss. */
1058 host_start = (uintptr_t) g2h(elf_bss);
1059 host_end = (uintptr_t) g2h(last_bss);
1060 host_map_start = (host_start + qemu_real_host_page_size - 1);
1061 host_map_start &= -qemu_real_host_page_size;
1063 if (host_map_start < host_end) {
1064 void *p = mmap((void *)host_map_start, host_end - host_map_start,
1065 prot, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1066 if (p == MAP_FAILED) {
1067 perror("cannot mmap brk");
1068 exit(-1);
1071 /* Since we didn't use target_mmap, make sure to record
1072 the validity of the pages with qemu. */
1073 page_set_flags(elf_bss & TARGET_PAGE_MASK, last_bss, prot|PAGE_VALID);
1076 if (host_start < host_map_start) {
1077 memset((void *)host_start, 0, host_map_start - host_start);
1081 static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
1082 struct elfhdr *exec,
1083 struct image_info *info,
1084 struct image_info *interp_info)
1086 abi_ulong sp;
1087 int size;
1088 abi_ulong u_platform;
1089 const char *k_platform;
1090 const int n = sizeof(elf_addr_t);
1092 sp = p;
1093 u_platform = 0;
1094 k_platform = ELF_PLATFORM;
1095 if (k_platform) {
1096 size_t len = strlen(k_platform) + 1;
1097 sp -= (len + n - 1) & ~(n - 1);
1098 u_platform = sp;
1099 /* FIXME - check return value of memcpy_to_target() for failure */
1100 memcpy_to_target(sp, k_platform, len);
1103 * Force 16 byte _final_ alignment here for generality.
1105 sp = sp &~ (abi_ulong)15;
1106 size = (DLINFO_ITEMS + 1) * 2;
1107 if (k_platform)
1108 size += 2;
1109 #ifdef DLINFO_ARCH_ITEMS
1110 size += DLINFO_ARCH_ITEMS * 2;
1111 #endif
1112 size += envc + argc + 2;
1113 size += 1; /* argc itself */
1114 size *= n;
1115 if (size & 15)
1116 sp -= 16 - (size & 15);
1118 /* This is correct because Linux defines
1119 * elf_addr_t as Elf32_Off / Elf64_Off
1121 #define NEW_AUX_ENT(id, val) do { \
1122 sp -= n; put_user_ual(val, sp); \
1123 sp -= n; put_user_ual(id, sp); \
1124 } while(0)
1126 NEW_AUX_ENT (AT_NULL, 0);
1128 /* There must be exactly DLINFO_ITEMS entries here. */
1129 NEW_AUX_ENT(AT_PHDR, (abi_ulong)(info->load_addr + exec->e_phoff));
1130 NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr)));
1131 NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum));
1132 NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE));
1133 NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_info ? interp_info->load_addr : 0));
1134 NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0);
1135 NEW_AUX_ENT(AT_ENTRY, info->entry);
1136 NEW_AUX_ENT(AT_UID, (abi_ulong) getuid());
1137 NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid());
1138 NEW_AUX_ENT(AT_GID, (abi_ulong) getgid());
1139 NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid());
1140 NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP);
1141 NEW_AUX_ENT(AT_CLKTCK, (abi_ulong) sysconf(_SC_CLK_TCK));
1142 if (k_platform)
1143 NEW_AUX_ENT(AT_PLATFORM, u_platform);
1144 #ifdef ARCH_DLINFO
1146 * ARCH_DLINFO must come last so platform specific code can enforce
1147 * special alignment requirements on the AUXV if necessary (eg. PPC).
1149 ARCH_DLINFO;
1150 #endif
1151 #undef NEW_AUX_ENT
1153 info->saved_auxv = sp;
1155 sp = loader_build_argptr(envc, argc, sp, p, 0);
1156 return sp;
1159 /* Load an ELF image into the address space.
1161 IMAGE_NAME is the filename of the image, to use in error messages.
1162 IMAGE_FD is the open file descriptor for the image.
1164 BPRM_BUF is a copy of the beginning of the file; this of course
1165 contains the elf file header at offset 0. It is assumed that this
1166 buffer is sufficiently aligned to present no problems to the host
1167 in accessing data at aligned offsets within the buffer.
1169 On return: INFO values will be filled in, as necessary or available. */
1171 static void load_elf_image(const char *image_name, int image_fd,
1172 struct image_info *info,
1173 char bprm_buf[BPRM_BUF_SIZE])
1175 struct elfhdr *ehdr = (struct elfhdr *)bprm_buf;
1176 struct elf_phdr *phdr;
1177 abi_ulong load_addr, load_bias, loaddr, hiaddr, error;
1178 int i, retval;
1179 const char *errmsg;
1181 /* First of all, some simple consistency checks */
1182 errmsg = "Invalid ELF image for this architecture";
1183 if (!elf_check_ident(ehdr)) {
1184 goto exit_errmsg;
1186 bswap_ehdr(ehdr);
1187 if (!elf_check_ehdr(ehdr)) {
1188 goto exit_errmsg;
1191 i = ehdr->e_phnum * sizeof(struct elf_phdr);
1192 if (ehdr->e_phoff + i <= BPRM_BUF_SIZE) {
1193 phdr = (struct elf_phdr *)(bprm_buf + ehdr->e_phoff);
1194 } else {
1195 phdr = (struct elf_phdr *) alloca(i);
1196 retval = pread(image_fd, phdr, i, ehdr->e_phoff);
1197 if (retval != i) {
1198 goto exit_read;
1201 bswap_phdr(phdr, ehdr->e_phnum);
1203 /* Find the maximum size of the image and allocate an appropriate
1204 amount of memory to handle that. */
1205 loaddr = -1, hiaddr = 0;
1206 for (i = 0; i < ehdr->e_phnum; ++i) {
1207 if (phdr[i].p_type == PT_LOAD) {
1208 abi_ulong a = phdr[i].p_vaddr;
1209 if (a < loaddr) {
1210 loaddr = a;
1212 a += phdr[i].p_memsz;
1213 if (a > hiaddr) {
1214 hiaddr = a;
1219 load_addr = loaddr;
1220 if (ehdr->e_type == ET_DYN) {
1221 /* The image indicates that it can be loaded anywhere. Find a
1222 location that can hold the memory space required. If the
1223 image is pre-linked, LOADDR will be non-zero. Since we do
1224 not supply MAP_FIXED here we'll use that address if and
1225 only if it remains available. */
1226 load_addr = target_mmap(loaddr, hiaddr - loaddr, PROT_NONE,
1227 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
1228 -1, 0);
1229 if (load_addr == -1) {
1230 goto exit_perror;
1233 load_bias = load_addr - loaddr;
1235 info->load_bias = load_bias;
1236 info->load_addr = load_addr;
1237 info->entry = ehdr->e_entry + load_bias;
1238 info->start_code = -1;
1239 info->end_code = 0;
1240 info->start_data = -1;
1241 info->end_data = 0;
1242 info->brk = 0;
1244 for (i = 0; i < ehdr->e_phnum; i++) {
1245 struct elf_phdr *eppnt = phdr + i;
1246 if (eppnt->p_type == PT_LOAD) {
1247 abi_ulong vaddr, vaddr_po, vaddr_ps, vaddr_ef, vaddr_em;
1248 int elf_prot = 0;
1250 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
1251 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
1252 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
1254 vaddr = load_bias + eppnt->p_vaddr;
1255 vaddr_po = TARGET_ELF_PAGEOFFSET(vaddr);
1256 vaddr_ps = TARGET_ELF_PAGESTART(vaddr);
1258 error = target_mmap(vaddr_ps, eppnt->p_filesz + vaddr_po,
1259 elf_prot, MAP_PRIVATE | MAP_FIXED,
1260 image_fd, eppnt->p_offset - vaddr_po);
1261 if (error == -1) {
1262 goto exit_perror;
1265 vaddr_ef = vaddr + eppnt->p_filesz;
1266 vaddr_em = vaddr + eppnt->p_memsz;
1268 /* If the load segment requests extra zeros (e.g. bss), map it. */
1269 if (vaddr_ef < vaddr_em) {
1270 zero_bss(vaddr_ef, vaddr_em, elf_prot);
1273 /* Find the full program boundaries. */
1274 if (elf_prot & PROT_EXEC) {
1275 if (vaddr < info->start_code) {
1276 info->start_code = vaddr;
1278 if (vaddr_ef > info->end_code) {
1279 info->end_code = vaddr_ef;
1282 if (elf_prot & PROT_WRITE) {
1283 if (vaddr < info->start_data) {
1284 info->start_data = vaddr;
1286 if (vaddr_ef > info->end_data) {
1287 info->end_data = vaddr_ef;
1289 if (vaddr_em > info->brk) {
1290 info->brk = vaddr_em;
1296 if (info->end_data == 0) {
1297 info->start_data = info->end_code;
1298 info->end_data = info->end_code;
1299 info->brk = info->end_code;
1302 if (qemu_log_enabled()) {
1303 load_symbols(ehdr, image_fd, load_bias);
1306 close(image_fd);
1307 return;
1309 exit_read:
1310 if (retval >= 0) {
1311 errmsg = "Incomplete read of file header";
1312 goto exit_errmsg;
1314 exit_perror:
1315 errmsg = strerror(errno);
1316 exit_errmsg:
1317 fprintf(stderr, "%s: %s\n", image_name, errmsg);
1318 exit(-1);
1321 static void load_elf_interp(const char *filename, struct image_info *info,
1322 char bprm_buf[BPRM_BUF_SIZE])
1324 int fd, retval;
1326 fd = open(path(filename), O_RDONLY);
1327 if (fd < 0) {
1328 goto exit_perror;
1331 retval = read(fd, bprm_buf, BPRM_BUF_SIZE);
1332 if (retval < 0) {
1333 goto exit_perror;
1335 if (retval < BPRM_BUF_SIZE) {
1336 memset(bprm_buf + retval, 0, BPRM_BUF_SIZE - retval);
1339 load_elf_image(filename, fd, info, bprm_buf);
1340 return;
1342 exit_perror:
1343 fprintf(stderr, "%s: %s\n", filename, strerror(errno));
1344 exit(-1);
1347 static int symfind(const void *s0, const void *s1)
1349 struct elf_sym *key = (struct elf_sym *)s0;
1350 struct elf_sym *sym = (struct elf_sym *)s1;
1351 int result = 0;
1352 if (key->st_value < sym->st_value) {
1353 result = -1;
1354 } else if (key->st_value >= sym->st_value + sym->st_size) {
1355 result = 1;
1357 return result;
1360 static const char *lookup_symbolxx(struct syminfo *s, target_ulong orig_addr)
1362 #if ELF_CLASS == ELFCLASS32
1363 struct elf_sym *syms = s->disas_symtab.elf32;
1364 #else
1365 struct elf_sym *syms = s->disas_symtab.elf64;
1366 #endif
1368 // binary search
1369 struct elf_sym key;
1370 struct elf_sym *sym;
1372 key.st_value = orig_addr;
1374 sym = bsearch(&key, syms, s->disas_num_syms, sizeof(*syms), symfind);
1375 if (sym != NULL) {
1376 return s->disas_strtab + sym->st_name;
1379 return "";
1382 /* FIXME: This should use elf_ops.h */
1383 static int symcmp(const void *s0, const void *s1)
1385 struct elf_sym *sym0 = (struct elf_sym *)s0;
1386 struct elf_sym *sym1 = (struct elf_sym *)s1;
1387 return (sym0->st_value < sym1->st_value)
1388 ? -1
1389 : ((sym0->st_value > sym1->st_value) ? 1 : 0);
1392 /* Best attempt to load symbols from this ELF object. */
1393 static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias)
1395 int i, shnum, nsyms, sym_idx = 0, str_idx = 0;
1396 struct elf_shdr *shdr;
1397 char *strings;
1398 struct syminfo *s;
1399 struct elf_sym *syms;
1401 shnum = hdr->e_shnum;
1402 i = shnum * sizeof(struct elf_shdr);
1403 shdr = (struct elf_shdr *)alloca(i);
1404 if (pread(fd, shdr, i, hdr->e_shoff) != i) {
1405 return;
1408 bswap_shdr(shdr, shnum);
1409 for (i = 0; i < shnum; ++i) {
1410 if (shdr[i].sh_type == SHT_SYMTAB) {
1411 sym_idx = i;
1412 str_idx = shdr[i].sh_link;
1413 goto found;
1417 /* There will be no symbol table if the file was stripped. */
1418 return;
1420 found:
1421 /* Now know where the strtab and symtab are. Snarf them. */
1422 s = malloc(sizeof(*s));
1423 if (!s) {
1424 return;
1427 i = shdr[str_idx].sh_size;
1428 s->disas_strtab = strings = malloc(i);
1429 if (!strings || pread(fd, strings, i, shdr[str_idx].sh_offset) != i) {
1430 free(s);
1431 free(strings);
1432 return;
1435 i = shdr[sym_idx].sh_size;
1436 syms = malloc(i);
1437 if (!syms || pread(fd, syms, i, shdr[sym_idx].sh_offset) != i) {
1438 free(s);
1439 free(strings);
1440 free(syms);
1441 return;
1444 nsyms = i / sizeof(struct elf_sym);
1445 for (i = 0; i < nsyms; ) {
1446 bswap_sym(syms + i);
1447 /* Throw away entries which we do not need. */
1448 if (syms[i].st_shndx == SHN_UNDEF
1449 || syms[i].st_shndx >= SHN_LORESERVE
1450 || ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) {
1451 if (i < --nsyms) {
1452 syms[i] = syms[nsyms];
1454 } else {
1455 #if defined(TARGET_ARM) || defined (TARGET_MIPS)
1456 /* The bottom address bit marks a Thumb or MIPS16 symbol. */
1457 syms[i].st_value &= ~(target_ulong)1;
1458 #endif
1459 syms[i].st_value += load_bias;
1460 i++;
1464 syms = realloc(syms, nsyms * sizeof(*syms));
1465 qsort(syms, nsyms, sizeof(*syms), symcmp);
1467 s->disas_num_syms = nsyms;
1468 #if ELF_CLASS == ELFCLASS32
1469 s->disas_symtab.elf32 = syms;
1470 #else
1471 s->disas_symtab.elf64 = syms;
1472 #endif
1473 s->lookup_symbol = lookup_symbolxx;
1474 s->next = syminfos;
1475 syminfos = s;
1478 int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
1479 struct image_info * info)
1481 struct image_info interp_info;
1482 struct elfhdr elf_ex;
1483 abi_ulong load_addr, load_bias;
1484 int load_addr_set = 0;
1485 int i;
1486 struct elf_phdr * elf_ppnt;
1487 struct elf_phdr *elf_phdata;
1488 abi_ulong k, elf_brk;
1489 int retval;
1490 char *elf_interpreter = NULL;
1491 abi_ulong elf_entry;
1492 int status;
1493 abi_ulong start_code, end_code, start_data, end_data;
1494 abi_ulong elf_stack;
1496 status = 0;
1497 load_addr = 0;
1498 load_bias = 0;
1499 elf_ex = *((struct elfhdr *) bprm->buf); /* exec-header */
1501 /* First of all, some simple consistency checks */
1502 if (!elf_check_ident(&elf_ex)) {
1503 return -ENOEXEC;
1505 bswap_ehdr(&elf_ex);
1506 if (!elf_check_ehdr(&elf_ex)) {
1507 return -ENOEXEC;
1510 bprm->p = copy_elf_strings(1, &bprm->filename, bprm->page, bprm->p);
1511 bprm->p = copy_elf_strings(bprm->envc,bprm->envp,bprm->page,bprm->p);
1512 bprm->p = copy_elf_strings(bprm->argc,bprm->argv,bprm->page,bprm->p);
1513 if (!bprm->p) {
1514 retval = -E2BIG;
1517 /* Now read in all of the header information */
1518 elf_phdata = (struct elf_phdr *)
1519 malloc(elf_ex.e_phnum * sizeof(struct elf_phdr));
1520 if (elf_phdata == NULL) {
1521 return -ENOMEM;
1524 i = elf_ex.e_phnum * sizeof(struct elf_phdr);
1525 if (elf_ex.e_phoff + i <= BPRM_BUF_SIZE) {
1526 memcpy(elf_phdata, bprm->buf + elf_ex.e_phoff, i);
1527 } else {
1528 retval = pread(bprm->fd, (char *) elf_phdata, i, elf_ex.e_phoff);
1529 if (retval != i) {
1530 perror("load_elf_binary");
1531 exit(-1);
1534 bswap_phdr(elf_phdata, elf_ex.e_phnum);
1536 elf_brk = 0;
1537 elf_stack = ~((abi_ulong)0UL);
1538 start_code = ~((abi_ulong)0UL);
1539 end_code = 0;
1540 start_data = 0;
1541 end_data = 0;
1543 elf_ppnt = elf_phdata;
1544 for(i=0;i < elf_ex.e_phnum; i++) {
1545 if (elf_ppnt->p_type == PT_INTERP) {
1546 if (elf_ppnt->p_offset + elf_ppnt->p_filesz <= BPRM_BUF_SIZE) {
1547 elf_interpreter = bprm->buf + elf_ppnt->p_offset;
1548 } else {
1549 elf_interpreter = alloca(elf_ppnt->p_filesz);
1550 retval = pread(bprm->fd, elf_interpreter, elf_ppnt->p_filesz,
1551 elf_ppnt->p_offset);
1552 if (retval != elf_ppnt->p_filesz) {
1553 perror("load_elf_binary");
1554 exit(-1);
1558 elf_ppnt++;
1561 /* OK, This is the point of no return */
1562 info->end_data = 0;
1563 info->end_code = 0;
1564 info->start_mmap = (abi_ulong)ELF_START_MMAP;
1565 info->mmap = 0;
1566 elf_entry = (abi_ulong) elf_ex.e_entry;
1568 #if defined(CONFIG_USE_GUEST_BASE)
1570 * In case where user has not explicitly set the guest_base, we
1571 * probe here that should we set it automatically.
1573 if (!(have_guest_base || reserved_va)) {
1575 * Go through ELF program header table and find the address
1576 * range used by loadable segments. Check that this is available on
1577 * the host, and if not find a suitable value for guest_base. */
1578 abi_ulong app_start = ~0;
1579 abi_ulong app_end = 0;
1580 abi_ulong addr;
1581 unsigned long host_start;
1582 unsigned long real_start;
1583 unsigned long host_size;
1584 for (i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum;
1585 i++, elf_ppnt++) {
1586 if (elf_ppnt->p_type != PT_LOAD)
1587 continue;
1588 addr = elf_ppnt->p_vaddr;
1589 if (addr < app_start) {
1590 app_start = addr;
1592 addr += elf_ppnt->p_memsz;
1593 if (addr > app_end) {
1594 app_end = addr;
1598 /* If we don't have any loadable segments then something
1599 is very wrong. */
1600 assert(app_start < app_end);
1602 /* Round addresses to page boundaries. */
1603 app_start = app_start & qemu_host_page_mask;
1604 app_end = HOST_PAGE_ALIGN(app_end);
1605 if (app_start < mmap_min_addr) {
1606 host_start = HOST_PAGE_ALIGN(mmap_min_addr);
1607 } else {
1608 host_start = app_start;
1609 if (host_start != app_start) {
1610 fprintf(stderr, "qemu: Address overflow loading ELF binary\n");
1611 abort();
1614 host_size = app_end - app_start;
1615 while (1) {
1616 /* Do not use mmap_find_vma here because that is limited to the
1617 guest address space. We are going to make the
1618 guest address space fit whatever we're given. */
1619 real_start = (unsigned long)mmap((void *)host_start, host_size,
1620 PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
1621 if (real_start == (unsigned long)-1) {
1622 fprintf(stderr, "qemu: Virtual memory exausted\n");
1623 abort();
1625 if (real_start == host_start) {
1626 break;
1628 /* That address didn't work. Unmap and try a different one.
1629 The address the host picked because is typically
1630 right at the top of the host address space and leaves the
1631 guest with no usable address space. Resort to a linear search.
1632 We already compensated for mmap_min_addr, so this should not
1633 happen often. Probably means we got unlucky and host address
1634 space randomization put a shared library somewhere
1635 inconvenient. */
1636 munmap((void *)real_start, host_size);
1637 host_start += qemu_host_page_size;
1638 if (host_start == app_start) {
1639 /* Theoretically possible if host doesn't have any
1640 suitably aligned areas. Normally the first mmap will
1641 fail. */
1642 fprintf(stderr, "qemu: Unable to find space for application\n");
1643 abort();
1646 qemu_log("Relocating guest address space from 0x" TARGET_ABI_FMT_lx
1647 " to 0x%lx\n", app_start, real_start);
1648 guest_base = real_start - app_start;
1650 #endif /* CONFIG_USE_GUEST_BASE */
1652 /* Do this so that we can load the interpreter, if need be. We will
1653 change some of these later */
1654 info->rss = 0;
1655 bprm->p = setup_arg_pages(bprm->p, bprm, info);
1656 info->start_stack = bprm->p;
1658 /* Now we do a little grungy work by mmaping the ELF image into
1659 * the correct location in memory. At this point, we assume that
1660 * the image should be loaded at fixed address, not at a variable
1661 * address.
1664 for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
1665 int elf_prot = 0;
1666 int elf_flags = 0;
1667 abi_ulong error;
1669 if (elf_ppnt->p_type != PT_LOAD)
1670 continue;
1672 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
1673 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
1674 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
1675 elf_flags = MAP_PRIVATE | MAP_DENYWRITE;
1676 if (elf_ex.e_type == ET_EXEC || load_addr_set) {
1677 elf_flags |= MAP_FIXED;
1678 } else if (elf_ex.e_type == ET_DYN) {
1679 /* Try and get dynamic programs out of the way of the default mmap
1680 base, as well as whatever program they might try to exec. This
1681 is because the brk will follow the loader, and is not movable. */
1682 /* NOTE: for qemu, we do a big mmap to get enough space
1683 without hardcoding any address */
1684 error = target_mmap(0, ET_DYN_MAP_SIZE,
1685 PROT_NONE, MAP_PRIVATE | MAP_ANON,
1686 -1, 0);
1687 if (error == -1) {
1688 perror("mmap");
1689 exit(-1);
1691 load_bias = TARGET_ELF_PAGESTART(error - elf_ppnt->p_vaddr);
1694 error = target_mmap(TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr),
1695 (elf_ppnt->p_filesz +
1696 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)),
1697 elf_prot,
1698 (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE),
1699 bprm->fd,
1700 (elf_ppnt->p_offset -
1701 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)));
1702 if (error == -1) {
1703 perror("mmap");
1704 exit(-1);
1707 #ifdef LOW_ELF_STACK
1708 if (TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr) < elf_stack)
1709 elf_stack = TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr);
1710 #endif
1712 if (!load_addr_set) {
1713 load_addr_set = 1;
1714 load_addr = elf_ppnt->p_vaddr - elf_ppnt->p_offset;
1715 if (elf_ex.e_type == ET_DYN) {
1716 load_bias += error -
1717 TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr);
1718 load_addr += load_bias;
1721 k = elf_ppnt->p_vaddr;
1722 if (k < start_code)
1723 start_code = k;
1724 if (start_data < k)
1725 start_data = k;
1726 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
1727 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
1728 end_code = k;
1729 if (end_data < k)
1730 end_data = k;
1731 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
1732 if (k > elf_brk) {
1733 elf_brk = TARGET_PAGE_ALIGN(k);
1736 /* If the load segment requests extra zeros (e.g. bss), map it. */
1737 if (elf_ppnt->p_filesz < elf_ppnt->p_memsz) {
1738 abi_ulong base = load_bias + elf_ppnt->p_vaddr;
1739 zero_bss(base + elf_ppnt->p_filesz,
1740 base + elf_ppnt->p_memsz, elf_prot);
1744 elf_entry += load_bias;
1745 elf_brk += load_bias;
1746 start_code += load_bias;
1747 end_code += load_bias;
1748 start_data += load_bias;
1749 end_data += load_bias;
1751 info->load_bias = load_bias;
1752 info->load_addr = load_addr;
1753 info->entry = elf_entry;
1754 info->start_brk = info->brk = elf_brk;
1755 info->end_code = end_code;
1756 info->start_code = start_code;
1757 info->start_data = start_data;
1758 info->end_data = end_data;
1759 info->personality = PER_LINUX;
1761 free(elf_phdata);
1763 if (qemu_log_enabled()) {
1764 load_symbols(&elf_ex, bprm->fd, load_bias);
1767 close(bprm->fd);
1769 if (elf_interpreter) {
1770 load_elf_interp(elf_interpreter, &interp_info, bprm->buf);
1772 /* If the program interpreter is one of these two, then assume
1773 an iBCS2 image. Otherwise assume a native linux image. */
1775 if (strcmp(elf_interpreter, "/usr/lib/libc.so.1") == 0
1776 || strcmp(elf_interpreter, "/usr/lib/ld.so.1") == 0) {
1777 info->personality = PER_SVR4;
1779 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1780 and some applications "depend" upon this behavior. Since
1781 we do not have the power to recompile these, we emulate
1782 the SVr4 behavior. Sigh. */
1783 target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC,
1784 MAP_FIXED | MAP_PRIVATE, -1, 0);
1788 bprm->p = create_elf_tables(bprm->p, bprm->argc, bprm->envc, &elf_ex,
1789 info, (elf_interpreter ? &interp_info : NULL));
1790 info->start_stack = bprm->p;
1792 /* If we have an interpreter, set that as the program's entry point.
1793 Copy the load_addr as well, to help PPC64 interpret the entry
1794 point as a function descriptor. Do this after creating elf tables
1795 so that we copy the original program entry point into the AUXV. */
1796 if (elf_interpreter) {
1797 info->load_addr = interp_info.load_addr;
1798 info->entry = interp_info.entry;
1801 #ifdef USE_ELF_CORE_DUMP
1802 bprm->core_dump = &elf_core_dump;
1803 #endif
1805 return 0;
1808 #ifdef USE_ELF_CORE_DUMP
1810 * Definitions to generate Intel SVR4-like core files.
1811 * These mostly have the same names as the SVR4 types with "target_elf_"
1812 * tacked on the front to prevent clashes with linux definitions,
1813 * and the typedef forms have been avoided. This is mostly like
1814 * the SVR4 structure, but more Linuxy, with things that Linux does
1815 * not support and which gdb doesn't really use excluded.
1817 * Fields we don't dump (their contents is zero) in linux-user qemu
1818 * are marked with XXX.
1820 * Core dump code is copied from linux kernel (fs/binfmt_elf.c).
1822 * Porting ELF coredump for target is (quite) simple process. First you
1823 * define USE_ELF_CORE_DUMP in target ELF code (where init_thread() for
1824 * the target resides):
1826 * #define USE_ELF_CORE_DUMP
1828 * Next you define type of register set used for dumping. ELF specification
1829 * says that it needs to be array of elf_greg_t that has size of ELF_NREG.
1831 * typedef <target_regtype> target_elf_greg_t;
1832 * #define ELF_NREG <number of registers>
1833 * typedef taret_elf_greg_t target_elf_gregset_t[ELF_NREG];
1835 * Last step is to implement target specific function that copies registers
1836 * from given cpu into just specified register set. Prototype is:
1838 * static void elf_core_copy_regs(taret_elf_gregset_t *regs,
1839 * const CPUState *env);
1841 * Parameters:
1842 * regs - copy register values into here (allocated and zeroed by caller)
1843 * env - copy registers from here
1845 * Example for ARM target is provided in this file.
1848 /* An ELF note in memory */
1849 struct memelfnote {
1850 const char *name;
1851 size_t namesz;
1852 size_t namesz_rounded;
1853 int type;
1854 size_t datasz;
1855 void *data;
1856 size_t notesz;
1859 struct target_elf_siginfo {
1860 int si_signo; /* signal number */
1861 int si_code; /* extra code */
1862 int si_errno; /* errno */
1865 struct target_elf_prstatus {
1866 struct target_elf_siginfo pr_info; /* Info associated with signal */
1867 short pr_cursig; /* Current signal */
1868 target_ulong pr_sigpend; /* XXX */
1869 target_ulong pr_sighold; /* XXX */
1870 target_pid_t pr_pid;
1871 target_pid_t pr_ppid;
1872 target_pid_t pr_pgrp;
1873 target_pid_t pr_sid;
1874 struct target_timeval pr_utime; /* XXX User time */
1875 struct target_timeval pr_stime; /* XXX System time */
1876 struct target_timeval pr_cutime; /* XXX Cumulative user time */
1877 struct target_timeval pr_cstime; /* XXX Cumulative system time */
1878 target_elf_gregset_t pr_reg; /* GP registers */
1879 int pr_fpvalid; /* XXX */
1882 #define ELF_PRARGSZ (80) /* Number of chars for args */
1884 struct target_elf_prpsinfo {
1885 char pr_state; /* numeric process state */
1886 char pr_sname; /* char for pr_state */
1887 char pr_zomb; /* zombie */
1888 char pr_nice; /* nice val */
1889 target_ulong pr_flag; /* flags */
1890 target_uid_t pr_uid;
1891 target_gid_t pr_gid;
1892 target_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
1893 /* Lots missing */
1894 char pr_fname[16]; /* filename of executable */
1895 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
1898 /* Here is the structure in which status of each thread is captured. */
1899 struct elf_thread_status {
1900 QTAILQ_ENTRY(elf_thread_status) ets_link;
1901 struct target_elf_prstatus prstatus; /* NT_PRSTATUS */
1902 #if 0
1903 elf_fpregset_t fpu; /* NT_PRFPREG */
1904 struct task_struct *thread;
1905 elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */
1906 #endif
1907 struct memelfnote notes[1];
1908 int num_notes;
1911 struct elf_note_info {
1912 struct memelfnote *notes;
1913 struct target_elf_prstatus *prstatus; /* NT_PRSTATUS */
1914 struct target_elf_prpsinfo *psinfo; /* NT_PRPSINFO */
1916 QTAILQ_HEAD(thread_list_head, elf_thread_status) thread_list;
1917 #if 0
1919 * Current version of ELF coredump doesn't support
1920 * dumping fp regs etc.
1922 elf_fpregset_t *fpu;
1923 elf_fpxregset_t *xfpu;
1924 int thread_status_size;
1925 #endif
1926 int notes_size;
1927 int numnote;
1930 struct vm_area_struct {
1931 abi_ulong vma_start; /* start vaddr of memory region */
1932 abi_ulong vma_end; /* end vaddr of memory region */
1933 abi_ulong vma_flags; /* protection etc. flags for the region */
1934 QTAILQ_ENTRY(vm_area_struct) vma_link;
1937 struct mm_struct {
1938 QTAILQ_HEAD(, vm_area_struct) mm_mmap;
1939 int mm_count; /* number of mappings */
1942 static struct mm_struct *vma_init(void);
1943 static void vma_delete(struct mm_struct *);
1944 static int vma_add_mapping(struct mm_struct *, abi_ulong,
1945 abi_ulong, abi_ulong);
1946 static int vma_get_mapping_count(const struct mm_struct *);
1947 static struct vm_area_struct *vma_first(const struct mm_struct *);
1948 static struct vm_area_struct *vma_next(struct vm_area_struct *);
1949 static abi_ulong vma_dump_size(const struct vm_area_struct *);
1950 static int vma_walker(void *priv, abi_ulong start, abi_ulong end,
1951 unsigned long flags);
1953 static void fill_elf_header(struct elfhdr *, int, uint16_t, uint32_t);
1954 static void fill_note(struct memelfnote *, const char *, int,
1955 unsigned int, void *);
1956 static void fill_prstatus(struct target_elf_prstatus *, const TaskState *, int);
1957 static int fill_psinfo(struct target_elf_prpsinfo *, const TaskState *);
1958 static void fill_auxv_note(struct memelfnote *, const TaskState *);
1959 static void fill_elf_note_phdr(struct elf_phdr *, int, off_t);
1960 static size_t note_size(const struct memelfnote *);
1961 static void free_note_info(struct elf_note_info *);
1962 static int fill_note_info(struct elf_note_info *, long, const CPUState *);
1963 static void fill_thread_info(struct elf_note_info *, const CPUState *);
1964 static int core_dump_filename(const TaskState *, char *, size_t);
1966 static int dump_write(int, const void *, size_t);
1967 static int write_note(struct memelfnote *, int);
1968 static int write_note_info(struct elf_note_info *, int);
1970 #ifdef BSWAP_NEEDED
1971 static void bswap_prstatus(struct target_elf_prstatus *prstatus)
1973 prstatus->pr_info.si_signo = tswapl(prstatus->pr_info.si_signo);
1974 prstatus->pr_info.si_code = tswapl(prstatus->pr_info.si_code);
1975 prstatus->pr_info.si_errno = tswapl(prstatus->pr_info.si_errno);
1976 prstatus->pr_cursig = tswap16(prstatus->pr_cursig);
1977 prstatus->pr_sigpend = tswapl(prstatus->pr_sigpend);
1978 prstatus->pr_sighold = tswapl(prstatus->pr_sighold);
1979 prstatus->pr_pid = tswap32(prstatus->pr_pid);
1980 prstatus->pr_ppid = tswap32(prstatus->pr_ppid);
1981 prstatus->pr_pgrp = tswap32(prstatus->pr_pgrp);
1982 prstatus->pr_sid = tswap32(prstatus->pr_sid);
1983 /* cpu times are not filled, so we skip them */
1984 /* regs should be in correct format already */
1985 prstatus->pr_fpvalid = tswap32(prstatus->pr_fpvalid);
1988 static void bswap_psinfo(struct target_elf_prpsinfo *psinfo)
1990 psinfo->pr_flag = tswapl(psinfo->pr_flag);
1991 psinfo->pr_uid = tswap16(psinfo->pr_uid);
1992 psinfo->pr_gid = tswap16(psinfo->pr_gid);
1993 psinfo->pr_pid = tswap32(psinfo->pr_pid);
1994 psinfo->pr_ppid = tswap32(psinfo->pr_ppid);
1995 psinfo->pr_pgrp = tswap32(psinfo->pr_pgrp);
1996 psinfo->pr_sid = tswap32(psinfo->pr_sid);
1999 static void bswap_note(struct elf_note *en)
2001 bswap32s(&en->n_namesz);
2002 bswap32s(&en->n_descsz);
2003 bswap32s(&en->n_type);
2005 #else
2006 static inline void bswap_prstatus(struct target_elf_prstatus *p) { }
2007 static inline void bswap_psinfo(struct target_elf_prpsinfo *p) {}
2008 static inline void bswap_note(struct elf_note *en) { }
2009 #endif /* BSWAP_NEEDED */
2012 * Minimal support for linux memory regions. These are needed
2013 * when we are finding out what memory exactly belongs to
2014 * emulated process. No locks needed here, as long as
2015 * thread that received the signal is stopped.
2018 static struct mm_struct *vma_init(void)
2020 struct mm_struct *mm;
2022 if ((mm = qemu_malloc(sizeof (*mm))) == NULL)
2023 return (NULL);
2025 mm->mm_count = 0;
2026 QTAILQ_INIT(&mm->mm_mmap);
2028 return (mm);
2031 static void vma_delete(struct mm_struct *mm)
2033 struct vm_area_struct *vma;
2035 while ((vma = vma_first(mm)) != NULL) {
2036 QTAILQ_REMOVE(&mm->mm_mmap, vma, vma_link);
2037 qemu_free(vma);
2039 qemu_free(mm);
2042 static int vma_add_mapping(struct mm_struct *mm, abi_ulong start,
2043 abi_ulong end, abi_ulong flags)
2045 struct vm_area_struct *vma;
2047 if ((vma = qemu_mallocz(sizeof (*vma))) == NULL)
2048 return (-1);
2050 vma->vma_start = start;
2051 vma->vma_end = end;
2052 vma->vma_flags = flags;
2054 QTAILQ_INSERT_TAIL(&mm->mm_mmap, vma, vma_link);
2055 mm->mm_count++;
2057 return (0);
2060 static struct vm_area_struct *vma_first(const struct mm_struct *mm)
2062 return (QTAILQ_FIRST(&mm->mm_mmap));
2065 static struct vm_area_struct *vma_next(struct vm_area_struct *vma)
2067 return (QTAILQ_NEXT(vma, vma_link));
2070 static int vma_get_mapping_count(const struct mm_struct *mm)
2072 return (mm->mm_count);
2076 * Calculate file (dump) size of given memory region.
2078 static abi_ulong vma_dump_size(const struct vm_area_struct *vma)
2080 /* if we cannot even read the first page, skip it */
2081 if (!access_ok(VERIFY_READ, vma->vma_start, TARGET_PAGE_SIZE))
2082 return (0);
2085 * Usually we don't dump executable pages as they contain
2086 * non-writable code that debugger can read directly from
2087 * target library etc. However, thread stacks are marked
2088 * also executable so we read in first page of given region
2089 * and check whether it contains elf header. If there is
2090 * no elf header, we dump it.
2092 if (vma->vma_flags & PROT_EXEC) {
2093 char page[TARGET_PAGE_SIZE];
2095 copy_from_user(page, vma->vma_start, sizeof (page));
2096 if ((page[EI_MAG0] == ELFMAG0) &&
2097 (page[EI_MAG1] == ELFMAG1) &&
2098 (page[EI_MAG2] == ELFMAG2) &&
2099 (page[EI_MAG3] == ELFMAG3)) {
2101 * Mappings are possibly from ELF binary. Don't dump
2102 * them.
2104 return (0);
2108 return (vma->vma_end - vma->vma_start);
2111 static int vma_walker(void *priv, abi_ulong start, abi_ulong end,
2112 unsigned long flags)
2114 struct mm_struct *mm = (struct mm_struct *)priv;
2116 vma_add_mapping(mm, start, end, flags);
2117 return (0);
2120 static void fill_note(struct memelfnote *note, const char *name, int type,
2121 unsigned int sz, void *data)
2123 unsigned int namesz;
2125 namesz = strlen(name) + 1;
2126 note->name = name;
2127 note->namesz = namesz;
2128 note->namesz_rounded = roundup(namesz, sizeof (int32_t));
2129 note->type = type;
2130 note->datasz = roundup(sz, sizeof (int32_t));;
2131 note->data = data;
2134 * We calculate rounded up note size here as specified by
2135 * ELF document.
2137 note->notesz = sizeof (struct elf_note) +
2138 note->namesz_rounded + note->datasz;
2141 static void fill_elf_header(struct elfhdr *elf, int segs, uint16_t machine,
2142 uint32_t flags)
2144 (void) memset(elf, 0, sizeof(*elf));
2146 (void) memcpy(elf->e_ident, ELFMAG, SELFMAG);
2147 elf->e_ident[EI_CLASS] = ELF_CLASS;
2148 elf->e_ident[EI_DATA] = ELF_DATA;
2149 elf->e_ident[EI_VERSION] = EV_CURRENT;
2150 elf->e_ident[EI_OSABI] = ELF_OSABI;
2152 elf->e_type = ET_CORE;
2153 elf->e_machine = machine;
2154 elf->e_version = EV_CURRENT;
2155 elf->e_phoff = sizeof(struct elfhdr);
2156 elf->e_flags = flags;
2157 elf->e_ehsize = sizeof(struct elfhdr);
2158 elf->e_phentsize = sizeof(struct elf_phdr);
2159 elf->e_phnum = segs;
2161 bswap_ehdr(elf);
2164 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
2166 phdr->p_type = PT_NOTE;
2167 phdr->p_offset = offset;
2168 phdr->p_vaddr = 0;
2169 phdr->p_paddr = 0;
2170 phdr->p_filesz = sz;
2171 phdr->p_memsz = 0;
2172 phdr->p_flags = 0;
2173 phdr->p_align = 0;
2175 bswap_phdr(phdr, 1);
2178 static size_t note_size(const struct memelfnote *note)
2180 return (note->notesz);
2183 static void fill_prstatus(struct target_elf_prstatus *prstatus,
2184 const TaskState *ts, int signr)
2186 (void) memset(prstatus, 0, sizeof (*prstatus));
2187 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
2188 prstatus->pr_pid = ts->ts_tid;
2189 prstatus->pr_ppid = getppid();
2190 prstatus->pr_pgrp = getpgrp();
2191 prstatus->pr_sid = getsid(0);
2193 bswap_prstatus(prstatus);
2196 static int fill_psinfo(struct target_elf_prpsinfo *psinfo, const TaskState *ts)
2198 char *filename, *base_filename;
2199 unsigned int i, len;
2201 (void) memset(psinfo, 0, sizeof (*psinfo));
2203 len = ts->info->arg_end - ts->info->arg_start;
2204 if (len >= ELF_PRARGSZ)
2205 len = ELF_PRARGSZ - 1;
2206 if (copy_from_user(&psinfo->pr_psargs, ts->info->arg_start, len))
2207 return -EFAULT;
2208 for (i = 0; i < len; i++)
2209 if (psinfo->pr_psargs[i] == 0)
2210 psinfo->pr_psargs[i] = ' ';
2211 psinfo->pr_psargs[len] = 0;
2213 psinfo->pr_pid = getpid();
2214 psinfo->pr_ppid = getppid();
2215 psinfo->pr_pgrp = getpgrp();
2216 psinfo->pr_sid = getsid(0);
2217 psinfo->pr_uid = getuid();
2218 psinfo->pr_gid = getgid();
2220 filename = strdup(ts->bprm->filename);
2221 base_filename = strdup(basename(filename));
2222 (void) strncpy(psinfo->pr_fname, base_filename,
2223 sizeof(psinfo->pr_fname));
2224 free(base_filename);
2225 free(filename);
2227 bswap_psinfo(psinfo);
2228 return (0);
2231 static void fill_auxv_note(struct memelfnote *note, const TaskState *ts)
2233 elf_addr_t auxv = (elf_addr_t)ts->info->saved_auxv;
2234 elf_addr_t orig_auxv = auxv;
2235 abi_ulong val;
2236 void *ptr;
2237 int i, len;
2240 * Auxiliary vector is stored in target process stack. It contains
2241 * {type, value} pairs that we need to dump into note. This is not
2242 * strictly necessary but we do it here for sake of completeness.
2245 /* find out lenght of the vector, AT_NULL is terminator */
2246 i = len = 0;
2247 do {
2248 get_user_ual(val, auxv);
2249 i += 2;
2250 auxv += 2 * sizeof (elf_addr_t);
2251 } while (val != AT_NULL);
2252 len = i * sizeof (elf_addr_t);
2254 /* read in whole auxv vector and copy it to memelfnote */
2255 ptr = lock_user(VERIFY_READ, orig_auxv, len, 0);
2256 if (ptr != NULL) {
2257 fill_note(note, "CORE", NT_AUXV, len, ptr);
2258 unlock_user(ptr, auxv, len);
2263 * Constructs name of coredump file. We have following convention
2264 * for the name:
2265 * qemu_<basename-of-target-binary>_<date>-<time>_<pid>.core
2267 * Returns 0 in case of success, -1 otherwise (errno is set).
2269 static int core_dump_filename(const TaskState *ts, char *buf,
2270 size_t bufsize)
2272 char timestamp[64];
2273 char *filename = NULL;
2274 char *base_filename = NULL;
2275 struct timeval tv;
2276 struct tm tm;
2278 assert(bufsize >= PATH_MAX);
2280 if (gettimeofday(&tv, NULL) < 0) {
2281 (void) fprintf(stderr, "unable to get current timestamp: %s",
2282 strerror(errno));
2283 return (-1);
2286 filename = strdup(ts->bprm->filename);
2287 base_filename = strdup(basename(filename));
2288 (void) strftime(timestamp, sizeof (timestamp), "%Y%m%d-%H%M%S",
2289 localtime_r(&tv.tv_sec, &tm));
2290 (void) snprintf(buf, bufsize, "qemu_%s_%s_%d.core",
2291 base_filename, timestamp, (int)getpid());
2292 free(base_filename);
2293 free(filename);
2295 return (0);
2298 static int dump_write(int fd, const void *ptr, size_t size)
2300 const char *bufp = (const char *)ptr;
2301 ssize_t bytes_written, bytes_left;
2302 struct rlimit dumpsize;
2303 off_t pos;
2305 bytes_written = 0;
2306 getrlimit(RLIMIT_CORE, &dumpsize);
2307 if ((pos = lseek(fd, 0, SEEK_CUR))==-1) {
2308 if (errno == ESPIPE) { /* not a seekable stream */
2309 bytes_left = size;
2310 } else {
2311 return pos;
2313 } else {
2314 if (dumpsize.rlim_cur <= pos) {
2315 return -1;
2316 } else if (dumpsize.rlim_cur == RLIM_INFINITY) {
2317 bytes_left = size;
2318 } else {
2319 size_t limit_left=dumpsize.rlim_cur - pos;
2320 bytes_left = limit_left >= size ? size : limit_left ;
2325 * In normal conditions, single write(2) should do but
2326 * in case of socket etc. this mechanism is more portable.
2328 do {
2329 bytes_written = write(fd, bufp, bytes_left);
2330 if (bytes_written < 0) {
2331 if (errno == EINTR)
2332 continue;
2333 return (-1);
2334 } else if (bytes_written == 0) { /* eof */
2335 return (-1);
2337 bufp += bytes_written;
2338 bytes_left -= bytes_written;
2339 } while (bytes_left > 0);
2341 return (0);
2344 static int write_note(struct memelfnote *men, int fd)
2346 struct elf_note en;
2348 en.n_namesz = men->namesz;
2349 en.n_type = men->type;
2350 en.n_descsz = men->datasz;
2352 bswap_note(&en);
2354 if (dump_write(fd, &en, sizeof(en)) != 0)
2355 return (-1);
2356 if (dump_write(fd, men->name, men->namesz_rounded) != 0)
2357 return (-1);
2358 if (dump_write(fd, men->data, men->datasz) != 0)
2359 return (-1);
2361 return (0);
2364 static void fill_thread_info(struct elf_note_info *info, const CPUState *env)
2366 TaskState *ts = (TaskState *)env->opaque;
2367 struct elf_thread_status *ets;
2369 ets = qemu_mallocz(sizeof (*ets));
2370 ets->num_notes = 1; /* only prstatus is dumped */
2371 fill_prstatus(&ets->prstatus, ts, 0);
2372 elf_core_copy_regs(&ets->prstatus.pr_reg, env);
2373 fill_note(&ets->notes[0], "CORE", NT_PRSTATUS, sizeof (ets->prstatus),
2374 &ets->prstatus);
2376 QTAILQ_INSERT_TAIL(&info->thread_list, ets, ets_link);
2378 info->notes_size += note_size(&ets->notes[0]);
2381 static int fill_note_info(struct elf_note_info *info,
2382 long signr, const CPUState *env)
2384 #define NUMNOTES 3
2385 CPUState *cpu = NULL;
2386 TaskState *ts = (TaskState *)env->opaque;
2387 int i;
2389 (void) memset(info, 0, sizeof (*info));
2391 QTAILQ_INIT(&info->thread_list);
2393 info->notes = qemu_mallocz(NUMNOTES * sizeof (struct memelfnote));
2394 if (info->notes == NULL)
2395 return (-ENOMEM);
2396 info->prstatus = qemu_mallocz(sizeof (*info->prstatus));
2397 if (info->prstatus == NULL)
2398 return (-ENOMEM);
2399 info->psinfo = qemu_mallocz(sizeof (*info->psinfo));
2400 if (info->prstatus == NULL)
2401 return (-ENOMEM);
2404 * First fill in status (and registers) of current thread
2405 * including process info & aux vector.
2407 fill_prstatus(info->prstatus, ts, signr);
2408 elf_core_copy_regs(&info->prstatus->pr_reg, env);
2409 fill_note(&info->notes[0], "CORE", NT_PRSTATUS,
2410 sizeof (*info->prstatus), info->prstatus);
2411 fill_psinfo(info->psinfo, ts);
2412 fill_note(&info->notes[1], "CORE", NT_PRPSINFO,
2413 sizeof (*info->psinfo), info->psinfo);
2414 fill_auxv_note(&info->notes[2], ts);
2415 info->numnote = 3;
2417 info->notes_size = 0;
2418 for (i = 0; i < info->numnote; i++)
2419 info->notes_size += note_size(&info->notes[i]);
2421 /* read and fill status of all threads */
2422 cpu_list_lock();
2423 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2424 if (cpu == thread_env)
2425 continue;
2426 fill_thread_info(info, cpu);
2428 cpu_list_unlock();
2430 return (0);
2433 static void free_note_info(struct elf_note_info *info)
2435 struct elf_thread_status *ets;
2437 while (!QTAILQ_EMPTY(&info->thread_list)) {
2438 ets = QTAILQ_FIRST(&info->thread_list);
2439 QTAILQ_REMOVE(&info->thread_list, ets, ets_link);
2440 qemu_free(ets);
2443 qemu_free(info->prstatus);
2444 qemu_free(info->psinfo);
2445 qemu_free(info->notes);
2448 static int write_note_info(struct elf_note_info *info, int fd)
2450 struct elf_thread_status *ets;
2451 int i, error = 0;
2453 /* write prstatus, psinfo and auxv for current thread */
2454 for (i = 0; i < info->numnote; i++)
2455 if ((error = write_note(&info->notes[i], fd)) != 0)
2456 return (error);
2458 /* write prstatus for each thread */
2459 for (ets = info->thread_list.tqh_first; ets != NULL;
2460 ets = ets->ets_link.tqe_next) {
2461 if ((error = write_note(&ets->notes[0], fd)) != 0)
2462 return (error);
2465 return (0);
2469 * Write out ELF coredump.
2471 * See documentation of ELF object file format in:
2472 * http://www.caldera.com/developers/devspecs/gabi41.pdf
2474 * Coredump format in linux is following:
2476 * 0 +----------------------+ \
2477 * | ELF header | ET_CORE |
2478 * +----------------------+ |
2479 * | ELF program headers | |--- headers
2480 * | - NOTE section | |
2481 * | - PT_LOAD sections | |
2482 * +----------------------+ /
2483 * | NOTEs: |
2484 * | - NT_PRSTATUS |
2485 * | - NT_PRSINFO |
2486 * | - NT_AUXV |
2487 * +----------------------+ <-- aligned to target page
2488 * | Process memory dump |
2489 * : :
2490 * . .
2491 * : :
2492 * | |
2493 * +----------------------+
2495 * NT_PRSTATUS -> struct elf_prstatus (per thread)
2496 * NT_PRSINFO -> struct elf_prpsinfo
2497 * NT_AUXV is array of { type, value } pairs (see fill_auxv_note()).
2499 * Format follows System V format as close as possible. Current
2500 * version limitations are as follows:
2501 * - no floating point registers are dumped
2503 * Function returns 0 in case of success, negative errno otherwise.
2505 * TODO: make this work also during runtime: it should be
2506 * possible to force coredump from running process and then
2507 * continue processing. For example qemu could set up SIGUSR2
2508 * handler (provided that target process haven't registered
2509 * handler for that) that does the dump when signal is received.
2511 static int elf_core_dump(int signr, const CPUState *env)
2513 const TaskState *ts = (const TaskState *)env->opaque;
2514 struct vm_area_struct *vma = NULL;
2515 char corefile[PATH_MAX];
2516 struct elf_note_info info;
2517 struct elfhdr elf;
2518 struct elf_phdr phdr;
2519 struct rlimit dumpsize;
2520 struct mm_struct *mm = NULL;
2521 off_t offset = 0, data_offset = 0;
2522 int segs = 0;
2523 int fd = -1;
2525 errno = 0;
2526 getrlimit(RLIMIT_CORE, &dumpsize);
2527 if (dumpsize.rlim_cur == 0)
2528 return 0;
2530 if (core_dump_filename(ts, corefile, sizeof (corefile)) < 0)
2531 return (-errno);
2533 if ((fd = open(corefile, O_WRONLY | O_CREAT,
2534 S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH)) < 0)
2535 return (-errno);
2538 * Walk through target process memory mappings and
2539 * set up structure containing this information. After
2540 * this point vma_xxx functions can be used.
2542 if ((mm = vma_init()) == NULL)
2543 goto out;
2545 walk_memory_regions(mm, vma_walker);
2546 segs = vma_get_mapping_count(mm);
2549 * Construct valid coredump ELF header. We also
2550 * add one more segment for notes.
2552 fill_elf_header(&elf, segs + 1, ELF_MACHINE, 0);
2553 if (dump_write(fd, &elf, sizeof (elf)) != 0)
2554 goto out;
2556 /* fill in in-memory version of notes */
2557 if (fill_note_info(&info, signr, env) < 0)
2558 goto out;
2560 offset += sizeof (elf); /* elf header */
2561 offset += (segs + 1) * sizeof (struct elf_phdr); /* program headers */
2563 /* write out notes program header */
2564 fill_elf_note_phdr(&phdr, info.notes_size, offset);
2566 offset += info.notes_size;
2567 if (dump_write(fd, &phdr, sizeof (phdr)) != 0)
2568 goto out;
2571 * ELF specification wants data to start at page boundary so
2572 * we align it here.
2574 offset = roundup(offset, ELF_EXEC_PAGESIZE);
2577 * Write program headers for memory regions mapped in
2578 * the target process.
2580 for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
2581 (void) memset(&phdr, 0, sizeof (phdr));
2583 phdr.p_type = PT_LOAD;
2584 phdr.p_offset = offset;
2585 phdr.p_vaddr = vma->vma_start;
2586 phdr.p_paddr = 0;
2587 phdr.p_filesz = vma_dump_size(vma);
2588 offset += phdr.p_filesz;
2589 phdr.p_memsz = vma->vma_end - vma->vma_start;
2590 phdr.p_flags = vma->vma_flags & PROT_READ ? PF_R : 0;
2591 if (vma->vma_flags & PROT_WRITE)
2592 phdr.p_flags |= PF_W;
2593 if (vma->vma_flags & PROT_EXEC)
2594 phdr.p_flags |= PF_X;
2595 phdr.p_align = ELF_EXEC_PAGESIZE;
2597 dump_write(fd, &phdr, sizeof (phdr));
2601 * Next we write notes just after program headers. No
2602 * alignment needed here.
2604 if (write_note_info(&info, fd) < 0)
2605 goto out;
2607 /* align data to page boundary */
2608 data_offset = lseek(fd, 0, SEEK_CUR);
2609 data_offset = TARGET_PAGE_ALIGN(data_offset);
2610 if (lseek(fd, data_offset, SEEK_SET) != data_offset)
2611 goto out;
2614 * Finally we can dump process memory into corefile as well.
2616 for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
2617 abi_ulong addr;
2618 abi_ulong end;
2620 end = vma->vma_start + vma_dump_size(vma);
2622 for (addr = vma->vma_start; addr < end;
2623 addr += TARGET_PAGE_SIZE) {
2624 char page[TARGET_PAGE_SIZE];
2625 int error;
2628 * Read in page from target process memory and
2629 * write it to coredump file.
2631 error = copy_from_user(page, addr, sizeof (page));
2632 if (error != 0) {
2633 (void) fprintf(stderr, "unable to dump " TARGET_ABI_FMT_lx "\n",
2634 addr);
2635 errno = -error;
2636 goto out;
2638 if (dump_write(fd, page, TARGET_PAGE_SIZE) < 0)
2639 goto out;
2643 out:
2644 free_note_info(&info);
2645 if (mm != NULL)
2646 vma_delete(mm);
2647 (void) close(fd);
2649 if (errno != 0)
2650 return (-errno);
2651 return (0);
2653 #endif /* USE_ELF_CORE_DUMP */
2655 void do_init_thread(struct target_pt_regs *regs, struct image_info *infop)
2657 init_thread(regs, infop);