1 /* This is the Linux kernel elf-loading code, ported into user space */
11 #include <sys/resource.h>
28 #define ELF_OSABI ELFOSABI_SYSV
30 /* from personality.h */
33 * Flags for bug emulation.
35 * These occupy the top three bytes.
38 ADDR_NO_RANDOMIZE
= 0x0040000, /* disable randomization of VA space */
39 FDPIC_FUNCPTRS
= 0x0080000, /* userspace function ptrs point to descriptors
42 MMAP_PAGE_ZERO
= 0x0100000,
43 ADDR_COMPAT_LAYOUT
= 0x0200000,
44 READ_IMPLIES_EXEC
= 0x0400000,
45 ADDR_LIMIT_32BIT
= 0x0800000,
46 SHORT_INODE
= 0x1000000,
47 WHOLE_SECONDS
= 0x2000000,
48 STICKY_TIMEOUTS
= 0x4000000,
49 ADDR_LIMIT_3GB
= 0x8000000,
55 * These go in the low byte. Avoid using the top bit, it will
56 * conflict with error returns.
60 PER_LINUX_32BIT
= 0x0000 | ADDR_LIMIT_32BIT
,
61 PER_LINUX_FDPIC
= 0x0000 | FDPIC_FUNCPTRS
,
62 PER_SVR4
= 0x0001 | STICKY_TIMEOUTS
| MMAP_PAGE_ZERO
,
63 PER_SVR3
= 0x0002 | STICKY_TIMEOUTS
| SHORT_INODE
,
64 PER_SCOSVR3
= 0x0003 | STICKY_TIMEOUTS
|
65 WHOLE_SECONDS
| SHORT_INODE
,
66 PER_OSR5
= 0x0003 | STICKY_TIMEOUTS
| WHOLE_SECONDS
,
67 PER_WYSEV386
= 0x0004 | STICKY_TIMEOUTS
| SHORT_INODE
,
68 PER_ISCR4
= 0x0005 | STICKY_TIMEOUTS
,
70 PER_SUNOS
= 0x0006 | STICKY_TIMEOUTS
,
71 PER_XENIX
= 0x0007 | STICKY_TIMEOUTS
| SHORT_INODE
,
73 PER_LINUX32_3GB
= 0x0008 | ADDR_LIMIT_3GB
,
74 PER_IRIX32
= 0x0009 | STICKY_TIMEOUTS
,/* IRIX5 32-bit */
75 PER_IRIXN32
= 0x000a | STICKY_TIMEOUTS
,/* IRIX6 new 32-bit */
76 PER_IRIX64
= 0x000b | STICKY_TIMEOUTS
,/* IRIX6 64-bit */
78 PER_SOLARIS
= 0x000d | STICKY_TIMEOUTS
,
79 PER_UW7
= 0x000e | STICKY_TIMEOUTS
| MMAP_PAGE_ZERO
,
80 PER_OSF4
= 0x000f, /* OSF/1 v4 */
86 * Return the base personality without flags.
88 #define personality(pers) (pers & PER_MASK)
90 /* this flag is uneffective under linux too, should be deleted */
92 #define MAP_DENYWRITE 0
95 /* should probably go in elf.h */
102 #define ELF_PLATFORM get_elf_platform()
104 static const char *get_elf_platform(void)
106 static char elf_platform
[] = "i386";
107 int family
= (thread_env
->cpuid_version
>> 8) & 0xff;
111 elf_platform
[1] = '0' + family
;
115 #define ELF_HWCAP get_elf_hwcap()
117 static uint32_t get_elf_hwcap(void)
119 return thread_env
->cpuid_features
;
123 #define ELF_START_MMAP 0x2aaaaab000ULL
124 #define elf_check_arch(x) ( ((x) == ELF_ARCH) )
126 #define ELF_CLASS ELFCLASS64
127 #define ELF_DATA ELFDATA2LSB
128 #define ELF_ARCH EM_X86_64
130 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
133 regs
->rsp
= infop
->start_stack
;
134 regs
->rip
= infop
->entry
;
137 typedef target_ulong target_elf_greg_t
;
138 typedef uint32_t target_uid_t
;
139 typedef uint32_t target_gid_t
;
140 typedef int32_t target_pid_t
;
143 typedef target_elf_greg_t target_elf_gregset_t
[ELF_NREG
];
146 * Note that ELF_NREG should be 29 as there should be place for
147 * TRAPNO and ERR "registers" as well but linux doesn't dump
150 * See linux kernel: arch/x86/include/asm/elf.h
152 static void elf_core_copy_regs(target_elf_gregset_t
*regs
, const CPUState
*env
)
154 (*regs
)[0] = env
->regs
[15];
155 (*regs
)[1] = env
->regs
[14];
156 (*regs
)[2] = env
->regs
[13];
157 (*regs
)[3] = env
->regs
[12];
158 (*regs
)[4] = env
->regs
[R_EBP
];
159 (*regs
)[5] = env
->regs
[R_EBX
];
160 (*regs
)[6] = env
->regs
[11];
161 (*regs
)[7] = env
->regs
[10];
162 (*regs
)[8] = env
->regs
[9];
163 (*regs
)[9] = env
->regs
[8];
164 (*regs
)[10] = env
->regs
[R_EAX
];
165 (*regs
)[11] = env
->regs
[R_ECX
];
166 (*regs
)[12] = env
->regs
[R_EDX
];
167 (*regs
)[13] = env
->regs
[R_ESI
];
168 (*regs
)[14] = env
->regs
[R_EDI
];
169 (*regs
)[15] = env
->regs
[R_EAX
]; /* XXX */
170 (*regs
)[16] = env
->eip
;
171 (*regs
)[17] = env
->segs
[R_CS
].selector
& 0xffff;
172 (*regs
)[18] = env
->eflags
;
173 (*regs
)[19] = env
->regs
[R_ESP
];
174 (*regs
)[20] = env
->segs
[R_SS
].selector
& 0xffff;
175 (*regs
)[21] = env
->segs
[R_FS
].selector
& 0xffff;
176 (*regs
)[22] = env
->segs
[R_GS
].selector
& 0xffff;
177 (*regs
)[23] = env
->segs
[R_DS
].selector
& 0xffff;
178 (*regs
)[24] = env
->segs
[R_ES
].selector
& 0xffff;
179 (*regs
)[25] = env
->segs
[R_FS
].selector
& 0xffff;
180 (*regs
)[26] = env
->segs
[R_GS
].selector
& 0xffff;
185 #define ELF_START_MMAP 0x80000000
188 * This is used to ensure we don't load something for the wrong architecture.
190 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
193 * These are used to set parameters in the core dumps.
195 #define ELF_CLASS ELFCLASS32
196 #define ELF_DATA ELFDATA2LSB
197 #define ELF_ARCH EM_386
199 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
201 regs
->esp
= infop
->start_stack
;
202 regs
->eip
= infop
->entry
;
204 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
205 starts %edx contains a pointer to a function which might be
206 registered using `atexit'. This provides a mean for the
207 dynamic linker to call DT_FINI functions for shared libraries
208 that have been loaded before the code runs.
210 A value of 0 tells we have no such handler. */
214 typedef target_ulong target_elf_greg_t
;
215 typedef uint16_t target_uid_t
;
216 typedef uint16_t target_gid_t
;
217 typedef int32_t target_pid_t
;
220 typedef target_elf_greg_t target_elf_gregset_t
[ELF_NREG
];
223 * Note that ELF_NREG should be 19 as there should be place for
224 * TRAPNO and ERR "registers" as well but linux doesn't dump
227 * See linux kernel: arch/x86/include/asm/elf.h
229 static void elf_core_copy_regs(target_elf_gregset_t
*regs
, const CPUState
*env
)
231 (*regs
)[0] = env
->regs
[R_EBX
];
232 (*regs
)[1] = env
->regs
[R_ECX
];
233 (*regs
)[2] = env
->regs
[R_EDX
];
234 (*regs
)[3] = env
->regs
[R_ESI
];
235 (*regs
)[4] = env
->regs
[R_EDI
];
236 (*regs
)[5] = env
->regs
[R_EBP
];
237 (*regs
)[6] = env
->regs
[R_EAX
];
238 (*regs
)[7] = env
->segs
[R_DS
].selector
& 0xffff;
239 (*regs
)[8] = env
->segs
[R_ES
].selector
& 0xffff;
240 (*regs
)[9] = env
->segs
[R_FS
].selector
& 0xffff;
241 (*regs
)[10] = env
->segs
[R_GS
].selector
& 0xffff;
242 (*regs
)[11] = env
->regs
[R_EAX
]; /* XXX */
243 (*regs
)[12] = env
->eip
;
244 (*regs
)[13] = env
->segs
[R_CS
].selector
& 0xffff;
245 (*regs
)[14] = env
->eflags
;
246 (*regs
)[15] = env
->regs
[R_ESP
];
247 (*regs
)[16] = env
->segs
[R_SS
].selector
& 0xffff;
251 #define USE_ELF_CORE_DUMP
252 #define ELF_EXEC_PAGESIZE 4096
258 #define ELF_START_MMAP 0x80000000
260 #define elf_check_arch(x) ( (x) == EM_ARM )
262 #define ELF_CLASS ELFCLASS32
263 #ifdef TARGET_WORDS_BIGENDIAN
264 #define ELF_DATA ELFDATA2MSB
266 #define ELF_DATA ELFDATA2LSB
268 #define ELF_ARCH EM_ARM
270 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
272 abi_long stack
= infop
->start_stack
;
273 memset(regs
, 0, sizeof(*regs
));
274 regs
->ARM_cpsr
= 0x10;
275 if (infop
->entry
& 1)
276 regs
->ARM_cpsr
|= CPSR_T
;
277 regs
->ARM_pc
= infop
->entry
& 0xfffffffe;
278 regs
->ARM_sp
= infop
->start_stack
;
279 /* FIXME - what to for failure of get_user()? */
280 get_user_ual(regs
->ARM_r2
, stack
+ 8); /* envp */
281 get_user_ual(regs
->ARM_r1
, stack
+ 4); /* envp */
282 /* XXX: it seems that r0 is zeroed after ! */
284 /* For uClinux PIC binaries. */
285 /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
286 regs
->ARM_r10
= infop
->start_data
;
289 typedef uint32_t target_elf_greg_t
;
290 typedef uint16_t target_uid_t
;
291 typedef uint16_t target_gid_t
;
292 typedef int32_t target_pid_t
;
295 typedef target_elf_greg_t target_elf_gregset_t
[ELF_NREG
];
297 static void elf_core_copy_regs(target_elf_gregset_t
*regs
, const CPUState
*env
)
299 (*regs
)[0] = env
->regs
[0];
300 (*regs
)[1] = env
->regs
[1];
301 (*regs
)[2] = env
->regs
[2];
302 (*regs
)[3] = env
->regs
[3];
303 (*regs
)[4] = env
->regs
[4];
304 (*regs
)[5] = env
->regs
[5];
305 (*regs
)[6] = env
->regs
[6];
306 (*regs
)[7] = env
->regs
[7];
307 (*regs
)[8] = env
->regs
[8];
308 (*regs
)[9] = env
->regs
[9];
309 (*regs
)[10] = env
->regs
[10];
310 (*regs
)[11] = env
->regs
[11];
311 (*regs
)[12] = env
->regs
[12];
312 (*regs
)[13] = env
->regs
[13];
313 (*regs
)[14] = env
->regs
[14];
314 (*regs
)[15] = env
->regs
[15];
316 (*regs
)[16] = cpsr_read((CPUState
*)env
);
317 (*regs
)[17] = env
->regs
[0]; /* XXX */
320 #define USE_ELF_CORE_DUMP
321 #define ELF_EXEC_PAGESIZE 4096
325 ARM_HWCAP_ARM_SWP
= 1 << 0,
326 ARM_HWCAP_ARM_HALF
= 1 << 1,
327 ARM_HWCAP_ARM_THUMB
= 1 << 2,
328 ARM_HWCAP_ARM_26BIT
= 1 << 3,
329 ARM_HWCAP_ARM_FAST_MULT
= 1 << 4,
330 ARM_HWCAP_ARM_FPA
= 1 << 5,
331 ARM_HWCAP_ARM_VFP
= 1 << 6,
332 ARM_HWCAP_ARM_EDSP
= 1 << 7,
333 ARM_HWCAP_ARM_JAVA
= 1 << 8,
334 ARM_HWCAP_ARM_IWMMXT
= 1 << 9,
335 ARM_HWCAP_ARM_THUMBEE
= 1 << 10,
336 ARM_HWCAP_ARM_NEON
= 1 << 11,
337 ARM_HWCAP_ARM_VFPv3
= 1 << 12,
338 ARM_HWCAP_ARM_VFPv3D16
= 1 << 13,
341 #define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF \
342 | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT \
343 | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP \
344 | ARM_HWCAP_ARM_NEON | ARM_HWCAP_ARM_VFPv3 )
349 #ifdef TARGET_SPARC64
351 #define ELF_START_MMAP 0x80000000
354 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
356 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
359 #define ELF_CLASS ELFCLASS64
360 #define ELF_DATA ELFDATA2MSB
361 #define ELF_ARCH EM_SPARCV9
363 #define STACK_BIAS 2047
365 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
370 regs
->pc
= infop
->entry
;
371 regs
->npc
= regs
->pc
+ 4;
374 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
376 if (personality(infop
->personality
) == PER_LINUX32
)
377 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
379 regs
->u_regs
[14] = infop
->start_stack
- 16 * 8 - STACK_BIAS
;
384 #define ELF_START_MMAP 0x80000000
386 #define elf_check_arch(x) ( (x) == EM_SPARC )
388 #define ELF_CLASS ELFCLASS32
389 #define ELF_DATA ELFDATA2MSB
390 #define ELF_ARCH EM_SPARC
392 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
395 regs
->pc
= infop
->entry
;
396 regs
->npc
= regs
->pc
+ 4;
398 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
406 #define ELF_START_MMAP 0x80000000
408 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
410 #define elf_check_arch(x) ( (x) == EM_PPC64 )
412 #define ELF_CLASS ELFCLASS64
416 #define elf_check_arch(x) ( (x) == EM_PPC )
418 #define ELF_CLASS ELFCLASS32
422 #ifdef TARGET_WORDS_BIGENDIAN
423 #define ELF_DATA ELFDATA2MSB
425 #define ELF_DATA ELFDATA2LSB
427 #define ELF_ARCH EM_PPC
429 /* Feature masks for the Aux Vector Hardware Capabilities (AT_HWCAP).
430 See arch/powerpc/include/asm/cputable.h. */
432 QEMU_PPC_FEATURE_32
= 0x80000000,
433 QEMU_PPC_FEATURE_64
= 0x40000000,
434 QEMU_PPC_FEATURE_601_INSTR
= 0x20000000,
435 QEMU_PPC_FEATURE_HAS_ALTIVEC
= 0x10000000,
436 QEMU_PPC_FEATURE_HAS_FPU
= 0x08000000,
437 QEMU_PPC_FEATURE_HAS_MMU
= 0x04000000,
438 QEMU_PPC_FEATURE_HAS_4xxMAC
= 0x02000000,
439 QEMU_PPC_FEATURE_UNIFIED_CACHE
= 0x01000000,
440 QEMU_PPC_FEATURE_HAS_SPE
= 0x00800000,
441 QEMU_PPC_FEATURE_HAS_EFP_SINGLE
= 0x00400000,
442 QEMU_PPC_FEATURE_HAS_EFP_DOUBLE
= 0x00200000,
443 QEMU_PPC_FEATURE_NO_TB
= 0x00100000,
444 QEMU_PPC_FEATURE_POWER4
= 0x00080000,
445 QEMU_PPC_FEATURE_POWER5
= 0x00040000,
446 QEMU_PPC_FEATURE_POWER5_PLUS
= 0x00020000,
447 QEMU_PPC_FEATURE_CELL
= 0x00010000,
448 QEMU_PPC_FEATURE_BOOKE
= 0x00008000,
449 QEMU_PPC_FEATURE_SMT
= 0x00004000,
450 QEMU_PPC_FEATURE_ICACHE_SNOOP
= 0x00002000,
451 QEMU_PPC_FEATURE_ARCH_2_05
= 0x00001000,
452 QEMU_PPC_FEATURE_PA6T
= 0x00000800,
453 QEMU_PPC_FEATURE_HAS_DFP
= 0x00000400,
454 QEMU_PPC_FEATURE_POWER6_EXT
= 0x00000200,
455 QEMU_PPC_FEATURE_ARCH_2_06
= 0x00000100,
456 QEMU_PPC_FEATURE_HAS_VSX
= 0x00000080,
457 QEMU_PPC_FEATURE_PSERIES_PERFMON_COMPAT
= 0x00000040,
459 QEMU_PPC_FEATURE_TRUE_LE
= 0x00000002,
460 QEMU_PPC_FEATURE_PPC_LE
= 0x00000001,
463 #define ELF_HWCAP get_elf_hwcap()
465 static uint32_t get_elf_hwcap(void)
467 CPUState
*e
= thread_env
;
468 uint32_t features
= 0;
470 /* We don't have to be terribly complete here; the high points are
471 Altivec/FP/SPE support. Anything else is just a bonus. */
472 #define GET_FEATURE(flag, feature) \
473 do {if (e->insns_flags & flag) features |= feature; } while(0)
474 GET_FEATURE(PPC_64B
, QEMU_PPC_FEATURE_64
);
475 GET_FEATURE(PPC_FLOAT
, QEMU_PPC_FEATURE_HAS_FPU
);
476 GET_FEATURE(PPC_ALTIVEC
, QEMU_PPC_FEATURE_HAS_ALTIVEC
);
477 GET_FEATURE(PPC_SPE
, QEMU_PPC_FEATURE_HAS_SPE
);
478 GET_FEATURE(PPC_SPE_SINGLE
, QEMU_PPC_FEATURE_HAS_EFP_SINGLE
);
479 GET_FEATURE(PPC_SPE_DOUBLE
, QEMU_PPC_FEATURE_HAS_EFP_DOUBLE
);
480 GET_FEATURE(PPC_BOOKE
, QEMU_PPC_FEATURE_BOOKE
);
481 GET_FEATURE(PPC_405_MAC
, QEMU_PPC_FEATURE_HAS_4xxMAC
);
488 * We need to put in some extra aux table entries to tell glibc what
489 * the cache block size is, so it can use the dcbz instruction safely.
491 #define AT_DCACHEBSIZE 19
492 #define AT_ICACHEBSIZE 20
493 #define AT_UCACHEBSIZE 21
494 /* A special ignored type value for PPC, for glibc compatibility. */
495 #define AT_IGNOREPPC 22
497 * The requirements here are:
498 * - keep the final alignment of sp (sp & 0xf)
499 * - make sure the 32-bit value at the first 16 byte aligned position of
500 * AUXV is greater than 16 for glibc compatibility.
501 * AT_IGNOREPPC is used for that.
502 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
503 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
505 #define DLINFO_ARCH_ITEMS 5
506 #define ARCH_DLINFO \
508 NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \
509 NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \
510 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
512 * Now handle glibc compatibility. \
514 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
515 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
518 static inline void init_thread(struct target_pt_regs
*_regs
, struct image_info
*infop
)
520 abi_ulong pos
= infop
->start_stack
;
522 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
523 abi_ulong entry
, toc
;
526 _regs
->gpr
[1] = infop
->start_stack
;
527 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
528 entry
= ldq_raw(infop
->entry
) + infop
->load_addr
;
529 toc
= ldq_raw(infop
->entry
+ 8) + infop
->load_addr
;
531 infop
->entry
= entry
;
533 _regs
->nip
= infop
->entry
;
534 /* Note that isn't exactly what regular kernel does
535 * but this is what the ABI wants and is needed to allow
536 * execution of PPC BSD programs.
538 /* FIXME - what to for failure of get_user()? */
539 get_user_ual(_regs
->gpr
[3], pos
);
540 pos
+= sizeof(abi_ulong
);
542 for (tmp
= 1; tmp
!= 0; pos
+= sizeof(abi_ulong
))
547 #define ELF_EXEC_PAGESIZE 4096
553 #define ELF_START_MMAP 0x80000000
555 #define elf_check_arch(x) ( (x) == EM_MIPS )
558 #define ELF_CLASS ELFCLASS64
560 #define ELF_CLASS ELFCLASS32
562 #ifdef TARGET_WORDS_BIGENDIAN
563 #define ELF_DATA ELFDATA2MSB
565 #define ELF_DATA ELFDATA2LSB
567 #define ELF_ARCH EM_MIPS
569 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
571 regs
->cp0_status
= 2 << CP0St_KSU
;
572 regs
->cp0_epc
= infop
->entry
;
573 regs
->regs
[29] = infop
->start_stack
;
576 #define ELF_EXEC_PAGESIZE 4096
578 #endif /* TARGET_MIPS */
580 #ifdef TARGET_MICROBLAZE
582 #define ELF_START_MMAP 0x80000000
584 #define elf_check_arch(x) ( (x) == EM_XILINX_MICROBLAZE )
586 #define ELF_CLASS ELFCLASS32
587 #define ELF_DATA ELFDATA2MSB
588 #define ELF_ARCH EM_MIPS
590 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
592 regs
->pc
= infop
->entry
;
593 regs
->r1
= infop
->start_stack
;
597 #define ELF_EXEC_PAGESIZE 4096
599 #endif /* TARGET_MICROBLAZE */
603 #define ELF_START_MMAP 0x80000000
605 #define elf_check_arch(x) ( (x) == EM_SH )
607 #define ELF_CLASS ELFCLASS32
608 #define ELF_DATA ELFDATA2LSB
609 #define ELF_ARCH EM_SH
611 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
613 /* Check other registers XXXXX */
614 regs
->pc
= infop
->entry
;
615 regs
->regs
[15] = infop
->start_stack
;
618 #define ELF_EXEC_PAGESIZE 4096
624 #define ELF_START_MMAP 0x80000000
626 #define elf_check_arch(x) ( (x) == EM_CRIS )
628 #define ELF_CLASS ELFCLASS32
629 #define ELF_DATA ELFDATA2LSB
630 #define ELF_ARCH EM_CRIS
632 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
634 regs
->erp
= infop
->entry
;
637 #define ELF_EXEC_PAGESIZE 8192
643 #define ELF_START_MMAP 0x80000000
645 #define elf_check_arch(x) ( (x) == EM_68K )
647 #define ELF_CLASS ELFCLASS32
648 #define ELF_DATA ELFDATA2MSB
649 #define ELF_ARCH EM_68K
651 /* ??? Does this need to do anything?
652 #define ELF_PLAT_INIT(_r) */
654 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
656 regs
->usp
= infop
->start_stack
;
658 regs
->pc
= infop
->entry
;
661 #define ELF_EXEC_PAGESIZE 8192
667 #define ELF_START_MMAP (0x30000000000ULL)
669 #define elf_check_arch(x) ( (x) == ELF_ARCH )
671 #define ELF_CLASS ELFCLASS64
672 #define ELF_DATA ELFDATA2MSB
673 #define ELF_ARCH EM_ALPHA
675 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
677 regs
->pc
= infop
->entry
;
679 regs
->usp
= infop
->start_stack
;
680 regs
->unique
= infop
->start_data
; /* ? */
681 printf("Set unique value to " TARGET_FMT_lx
" (" TARGET_FMT_lx
")\n",
682 regs
->unique
, infop
->start_data
);
685 #define ELF_EXEC_PAGESIZE 8192
687 #endif /* TARGET_ALPHA */
690 #define ELF_PLATFORM (NULL)
699 #define ELF_CLASS ELFCLASS32
701 #define bswaptls(ptr) bswap32s(ptr)
708 unsigned int a_info
; /* Use macros N_MAGIC, etc for access */
709 unsigned int a_text
; /* length of text, in bytes */
710 unsigned int a_data
; /* length of data, in bytes */
711 unsigned int a_bss
; /* length of uninitialized data area, in bytes */
712 unsigned int a_syms
; /* length of symbol table data in file, in bytes */
713 unsigned int a_entry
; /* start address */
714 unsigned int a_trsize
; /* length of relocation info for text, in bytes */
715 unsigned int a_drsize
; /* length of relocation info for data, in bytes */
719 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
725 /* max code+data+bss space allocated to elf interpreter */
726 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
728 /* max code+data+bss+brk space allocated to ET_DYN executables */
729 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
731 /* Necessary parameters */
732 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
733 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
734 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
736 #define INTERPRETER_NONE 0
737 #define INTERPRETER_AOUT 1
738 #define INTERPRETER_ELF 2
740 #define DLINFO_ITEMS 12
742 static inline void memcpy_fromfs(void * to
, const void * from
, unsigned long n
)
747 static int load_aout_interp(void * exptr
, int interp_fd
);
750 static void bswap_ehdr(struct elfhdr
*ehdr
)
752 bswap16s(&ehdr
->e_type
); /* Object file type */
753 bswap16s(&ehdr
->e_machine
); /* Architecture */
754 bswap32s(&ehdr
->e_version
); /* Object file version */
755 bswaptls(&ehdr
->e_entry
); /* Entry point virtual address */
756 bswaptls(&ehdr
->e_phoff
); /* Program header table file offset */
757 bswaptls(&ehdr
->e_shoff
); /* Section header table file offset */
758 bswap32s(&ehdr
->e_flags
); /* Processor-specific flags */
759 bswap16s(&ehdr
->e_ehsize
); /* ELF header size in bytes */
760 bswap16s(&ehdr
->e_phentsize
); /* Program header table entry size */
761 bswap16s(&ehdr
->e_phnum
); /* Program header table entry count */
762 bswap16s(&ehdr
->e_shentsize
); /* Section header table entry size */
763 bswap16s(&ehdr
->e_shnum
); /* Section header table entry count */
764 bswap16s(&ehdr
->e_shstrndx
); /* Section header string table index */
767 static void bswap_phdr(struct elf_phdr
*phdr
)
769 bswap32s(&phdr
->p_type
); /* Segment type */
770 bswaptls(&phdr
->p_offset
); /* Segment file offset */
771 bswaptls(&phdr
->p_vaddr
); /* Segment virtual address */
772 bswaptls(&phdr
->p_paddr
); /* Segment physical address */
773 bswaptls(&phdr
->p_filesz
); /* Segment size in file */
774 bswaptls(&phdr
->p_memsz
); /* Segment size in memory */
775 bswap32s(&phdr
->p_flags
); /* Segment flags */
776 bswaptls(&phdr
->p_align
); /* Segment alignment */
779 static void bswap_shdr(struct elf_shdr
*shdr
)
781 bswap32s(&shdr
->sh_name
);
782 bswap32s(&shdr
->sh_type
);
783 bswaptls(&shdr
->sh_flags
);
784 bswaptls(&shdr
->sh_addr
);
785 bswaptls(&shdr
->sh_offset
);
786 bswaptls(&shdr
->sh_size
);
787 bswap32s(&shdr
->sh_link
);
788 bswap32s(&shdr
->sh_info
);
789 bswaptls(&shdr
->sh_addralign
);
790 bswaptls(&shdr
->sh_entsize
);
793 static void bswap_sym(struct elf_sym
*sym
)
795 bswap32s(&sym
->st_name
);
796 bswaptls(&sym
->st_value
);
797 bswaptls(&sym
->st_size
);
798 bswap16s(&sym
->st_shndx
);
802 #ifdef USE_ELF_CORE_DUMP
803 static int elf_core_dump(int, const CPUState
*);
806 static void bswap_note(struct elf_note
*en
)
808 bswap32s(&en
->n_namesz
);
809 bswap32s(&en
->n_descsz
);
810 bswap32s(&en
->n_type
);
812 #endif /* BSWAP_NEEDED */
814 #endif /* USE_ELF_CORE_DUMP */
817 * 'copy_elf_strings()' copies argument/envelope strings from user
818 * memory to free pages in kernel mem. These are in a format ready
819 * to be put directly into the top of new user memory.
822 static abi_ulong
copy_elf_strings(int argc
,char ** argv
, void **page
,
825 char *tmp
, *tmp1
, *pag
= NULL
;
829 return 0; /* bullet-proofing */
834 fprintf(stderr
, "VFS: argc is wrong");
840 if (p
< len
) { /* this shouldn't happen - 128kB */
846 offset
= p
% TARGET_PAGE_SIZE
;
847 pag
= (char *)page
[p
/TARGET_PAGE_SIZE
];
849 pag
= (char *)malloc(TARGET_PAGE_SIZE
);
850 memset(pag
, 0, TARGET_PAGE_SIZE
);
851 page
[p
/TARGET_PAGE_SIZE
] = pag
;
856 if (len
== 0 || offset
== 0) {
857 *(pag
+ offset
) = *tmp
;
860 int bytes_to_copy
= (len
> offset
) ? offset
: len
;
861 tmp
-= bytes_to_copy
;
863 offset
-= bytes_to_copy
;
864 len
-= bytes_to_copy
;
865 memcpy_fromfs(pag
+ offset
, tmp
, bytes_to_copy
+ 1);
872 static abi_ulong
setup_arg_pages(abi_ulong p
, struct linux_binprm
*bprm
,
873 struct image_info
*info
)
875 abi_ulong stack_base
, size
, error
;
878 /* Create enough stack to hold everything. If we don't use
879 * it for args, we'll use it for something else...
881 size
= x86_stack_size
;
882 if (size
< MAX_ARG_PAGES
*TARGET_PAGE_SIZE
)
883 size
= MAX_ARG_PAGES
*TARGET_PAGE_SIZE
;
884 error
= target_mmap(0,
885 size
+ qemu_host_page_size
,
886 PROT_READ
| PROT_WRITE
,
887 MAP_PRIVATE
| MAP_ANONYMOUS
,
893 /* we reserve one extra page at the top of the stack as guard */
894 target_mprotect(error
+ size
, qemu_host_page_size
, PROT_NONE
);
896 stack_base
= error
+ size
- MAX_ARG_PAGES
*TARGET_PAGE_SIZE
;
899 for (i
= 0 ; i
< MAX_ARG_PAGES
; i
++) {
902 /* FIXME - check return value of memcpy_to_target() for failure */
903 memcpy_to_target(stack_base
, bprm
->page
[i
], TARGET_PAGE_SIZE
);
906 stack_base
+= TARGET_PAGE_SIZE
;
911 static void set_brk(abi_ulong start
, abi_ulong end
)
913 /* page-align the start and end addresses... */
914 start
= HOST_PAGE_ALIGN(start
);
915 end
= HOST_PAGE_ALIGN(end
);
918 if(target_mmap(start
, end
- start
,
919 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
920 MAP_FIXED
| MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0) == -1) {
921 perror("cannot mmap brk");
927 /* We need to explicitly zero any fractional pages after the data
928 section (i.e. bss). This would contain the junk from the file that
929 should not be in memory. */
930 static void padzero(abi_ulong elf_bss
, abi_ulong last_bss
)
934 if (elf_bss
>= last_bss
)
937 /* XXX: this is really a hack : if the real host page size is
938 smaller than the target page size, some pages after the end
939 of the file may not be mapped. A better fix would be to
940 patch target_mmap(), but it is more complicated as the file
941 size must be known */
942 if (qemu_real_host_page_size
< qemu_host_page_size
) {
943 abi_ulong end_addr
, end_addr1
;
944 end_addr1
= (elf_bss
+ qemu_real_host_page_size
- 1) &
945 ~(qemu_real_host_page_size
- 1);
946 end_addr
= HOST_PAGE_ALIGN(elf_bss
);
947 if (end_addr1
< end_addr
) {
948 mmap((void *)g2h(end_addr1
), end_addr
- end_addr1
,
949 PROT_READ
|PROT_WRITE
|PROT_EXEC
,
950 MAP_FIXED
|MAP_PRIVATE
|MAP_ANONYMOUS
, -1, 0);
954 nbyte
= elf_bss
& (qemu_host_page_size
-1);
956 nbyte
= qemu_host_page_size
- nbyte
;
958 /* FIXME - what to do if put_user() fails? */
959 put_user_u8(0, elf_bss
);
966 static abi_ulong
create_elf_tables(abi_ulong p
, int argc
, int envc
,
967 struct elfhdr
* exec
,
970 abi_ulong interp_load_addr
, int ibcs
,
971 struct image_info
*info
)
975 abi_ulong u_platform
;
976 const char *k_platform
;
977 const int n
= sizeof(elf_addr_t
);
981 k_platform
= ELF_PLATFORM
;
983 size_t len
= strlen(k_platform
) + 1;
984 sp
-= (len
+ n
- 1) & ~(n
- 1);
986 /* FIXME - check return value of memcpy_to_target() for failure */
987 memcpy_to_target(sp
, k_platform
, len
);
990 * Force 16 byte _final_ alignment here for generality.
992 sp
= sp
&~ (abi_ulong
)15;
993 size
= (DLINFO_ITEMS
+ 1) * 2;
996 #ifdef DLINFO_ARCH_ITEMS
997 size
+= DLINFO_ARCH_ITEMS
* 2;
999 size
+= envc
+ argc
+ 2;
1000 size
+= (!ibcs
? 3 : 1); /* argc itself */
1003 sp
-= 16 - (size
& 15);
1005 /* This is correct because Linux defines
1006 * elf_addr_t as Elf32_Off / Elf64_Off
1008 #define NEW_AUX_ENT(id, val) do { \
1009 sp -= n; put_user_ual(val, sp); \
1010 sp -= n; put_user_ual(id, sp); \
1013 NEW_AUX_ENT (AT_NULL
, 0);
1015 /* There must be exactly DLINFO_ITEMS entries here. */
1016 NEW_AUX_ENT(AT_PHDR
, (abi_ulong
)(load_addr
+ exec
->e_phoff
));
1017 NEW_AUX_ENT(AT_PHENT
, (abi_ulong
)(sizeof (struct elf_phdr
)));
1018 NEW_AUX_ENT(AT_PHNUM
, (abi_ulong
)(exec
->e_phnum
));
1019 NEW_AUX_ENT(AT_PAGESZ
, (abi_ulong
)(TARGET_PAGE_SIZE
));
1020 NEW_AUX_ENT(AT_BASE
, (abi_ulong
)(interp_load_addr
));
1021 NEW_AUX_ENT(AT_FLAGS
, (abi_ulong
)0);
1022 NEW_AUX_ENT(AT_ENTRY
, load_bias
+ exec
->e_entry
);
1023 NEW_AUX_ENT(AT_UID
, (abi_ulong
) getuid());
1024 NEW_AUX_ENT(AT_EUID
, (abi_ulong
) geteuid());
1025 NEW_AUX_ENT(AT_GID
, (abi_ulong
) getgid());
1026 NEW_AUX_ENT(AT_EGID
, (abi_ulong
) getegid());
1027 NEW_AUX_ENT(AT_HWCAP
, (abi_ulong
) ELF_HWCAP
);
1028 NEW_AUX_ENT(AT_CLKTCK
, (abi_ulong
) sysconf(_SC_CLK_TCK
));
1030 NEW_AUX_ENT(AT_PLATFORM
, u_platform
);
1033 * ARCH_DLINFO must come last so platform specific code can enforce
1034 * special alignment requirements on the AUXV if necessary (eg. PPC).
1040 info
->saved_auxv
= sp
;
1042 sp
= loader_build_argptr(envc
, argc
, sp
, p
, !ibcs
);
1047 static abi_ulong
load_elf_interp(struct elfhdr
* interp_elf_ex
,
1049 abi_ulong
*interp_load_addr
)
1051 struct elf_phdr
*elf_phdata
= NULL
;
1052 struct elf_phdr
*eppnt
;
1053 abi_ulong load_addr
= 0;
1054 int load_addr_set
= 0;
1056 abi_ulong last_bss
, elf_bss
;
1065 bswap_ehdr(interp_elf_ex
);
1067 /* First of all, some simple consistency checks */
1068 if ((interp_elf_ex
->e_type
!= ET_EXEC
&&
1069 interp_elf_ex
->e_type
!= ET_DYN
) ||
1070 !elf_check_arch(interp_elf_ex
->e_machine
)) {
1071 return ~((abi_ulong
)0UL);
1075 /* Now read in all of the header information */
1077 if (sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
> TARGET_PAGE_SIZE
)
1078 return ~(abi_ulong
)0UL;
1080 elf_phdata
= (struct elf_phdr
*)
1081 malloc(sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
1084 return ~((abi_ulong
)0UL);
1087 * If the size of this structure has changed, then punt, since
1088 * we will be doing the wrong thing.
1090 if (interp_elf_ex
->e_phentsize
!= sizeof(struct elf_phdr
)) {
1092 return ~((abi_ulong
)0UL);
1095 retval
= lseek(interpreter_fd
, interp_elf_ex
->e_phoff
, SEEK_SET
);
1097 retval
= read(interpreter_fd
,
1098 (char *) elf_phdata
,
1099 sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
1102 perror("load_elf_interp");
1109 for (i
=0; i
<interp_elf_ex
->e_phnum
; i
++, eppnt
++) {
1114 if (interp_elf_ex
->e_type
== ET_DYN
) {
1115 /* in order to avoid hardcoding the interpreter load
1116 address in qemu, we allocate a big enough memory zone */
1117 error
= target_mmap(0, INTERP_MAP_SIZE
,
1118 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
1129 for(i
=0; i
<interp_elf_ex
->e_phnum
; i
++, eppnt
++)
1130 if (eppnt
->p_type
== PT_LOAD
) {
1131 int elf_type
= MAP_PRIVATE
| MAP_DENYWRITE
;
1133 abi_ulong vaddr
= 0;
1136 if (eppnt
->p_flags
& PF_R
) elf_prot
= PROT_READ
;
1137 if (eppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
1138 if (eppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
1139 if (interp_elf_ex
->e_type
== ET_EXEC
|| load_addr_set
) {
1140 elf_type
|= MAP_FIXED
;
1141 vaddr
= eppnt
->p_vaddr
;
1143 error
= target_mmap(load_addr
+TARGET_ELF_PAGESTART(vaddr
),
1144 eppnt
->p_filesz
+ TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
),
1148 eppnt
->p_offset
- TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
));
1152 close(interpreter_fd
);
1154 return ~((abi_ulong
)0UL);
1157 if (!load_addr_set
&& interp_elf_ex
->e_type
== ET_DYN
) {
1163 * Find the end of the file mapping for this phdr, and keep
1164 * track of the largest address we see for this.
1166 k
= load_addr
+ eppnt
->p_vaddr
+ eppnt
->p_filesz
;
1167 if (k
> elf_bss
) elf_bss
= k
;
1170 * Do the same thing for the memory mapping - between
1171 * elf_bss and last_bss is the bss section.
1173 k
= load_addr
+ eppnt
->p_memsz
+ eppnt
->p_vaddr
;
1174 if (k
> last_bss
) last_bss
= k
;
1177 /* Now use mmap to map the library into memory. */
1179 close(interpreter_fd
);
1182 * Now fill out the bss section. First pad the last page up
1183 * to the page boundary, and then perform a mmap to make sure
1184 * that there are zeromapped pages up to and including the last
1187 padzero(elf_bss
, last_bss
);
1188 elf_bss
= TARGET_ELF_PAGESTART(elf_bss
+ qemu_host_page_size
- 1); /* What we have mapped so far */
1190 /* Map the last of the bss segment */
1191 if (last_bss
> elf_bss
) {
1192 target_mmap(elf_bss
, last_bss
-elf_bss
,
1193 PROT_READ
|PROT_WRITE
|PROT_EXEC
,
1194 MAP_FIXED
|MAP_PRIVATE
|MAP_ANONYMOUS
, -1, 0);
1198 *interp_load_addr
= load_addr
;
1199 return ((abi_ulong
) interp_elf_ex
->e_entry
) + load_addr
;
1202 static int symfind(const void *s0
, const void *s1
)
1204 struct elf_sym
*key
= (struct elf_sym
*)s0
;
1205 struct elf_sym
*sym
= (struct elf_sym
*)s1
;
1207 if (key
->st_value
< sym
->st_value
) {
1209 } else if (key
->st_value
>= sym
->st_value
+ sym
->st_size
) {
1215 static const char *lookup_symbolxx(struct syminfo
*s
, target_ulong orig_addr
)
1217 #if ELF_CLASS == ELFCLASS32
1218 struct elf_sym
*syms
= s
->disas_symtab
.elf32
;
1220 struct elf_sym
*syms
= s
->disas_symtab
.elf64
;
1225 struct elf_sym
*sym
;
1227 key
.st_value
= orig_addr
;
1229 sym
= bsearch(&key
, syms
, s
->disas_num_syms
, sizeof(*syms
), symfind
);
1231 return s
->disas_strtab
+ sym
->st_name
;
1237 /* FIXME: This should use elf_ops.h */
1238 static int symcmp(const void *s0
, const void *s1
)
1240 struct elf_sym
*sym0
= (struct elf_sym
*)s0
;
1241 struct elf_sym
*sym1
= (struct elf_sym
*)s1
;
1242 return (sym0
->st_value
< sym1
->st_value
)
1244 : ((sym0
->st_value
> sym1
->st_value
) ? 1 : 0);
1247 /* Best attempt to load symbols from this ELF object. */
1248 static void load_symbols(struct elfhdr
*hdr
, int fd
)
1250 unsigned int i
, nsyms
;
1251 struct elf_shdr sechdr
, symtab
, strtab
;
1254 struct elf_sym
*syms
;
1256 lseek(fd
, hdr
->e_shoff
, SEEK_SET
);
1257 for (i
= 0; i
< hdr
->e_shnum
; i
++) {
1258 if (read(fd
, &sechdr
, sizeof(sechdr
)) != sizeof(sechdr
))
1261 bswap_shdr(&sechdr
);
1263 if (sechdr
.sh_type
== SHT_SYMTAB
) {
1265 lseek(fd
, hdr
->e_shoff
1266 + sizeof(sechdr
) * sechdr
.sh_link
, SEEK_SET
);
1267 if (read(fd
, &strtab
, sizeof(strtab
))
1271 bswap_shdr(&strtab
);
1276 return; /* Shouldn't happen... */
1279 /* Now know where the strtab and symtab are. Snarf them. */
1280 s
= malloc(sizeof(*s
));
1281 syms
= malloc(symtab
.sh_size
);
1284 s
->disas_strtab
= strings
= malloc(strtab
.sh_size
);
1285 if (!s
->disas_strtab
)
1288 lseek(fd
, symtab
.sh_offset
, SEEK_SET
);
1289 if (read(fd
, syms
, symtab
.sh_size
) != symtab
.sh_size
)
1292 nsyms
= symtab
.sh_size
/ sizeof(struct elf_sym
);
1297 bswap_sym(syms
+ i
);
1299 // Throw away entries which we do not need.
1300 if (syms
[i
].st_shndx
== SHN_UNDEF
||
1301 syms
[i
].st_shndx
>= SHN_LORESERVE
||
1302 ELF_ST_TYPE(syms
[i
].st_info
) != STT_FUNC
) {
1305 syms
[i
] = syms
[nsyms
];
1309 #if defined(TARGET_ARM) || defined (TARGET_MIPS)
1310 /* The bottom address bit marks a Thumb or MIPS16 symbol. */
1311 syms
[i
].st_value
&= ~(target_ulong
)1;
1315 syms
= realloc(syms
, nsyms
* sizeof(*syms
));
1317 qsort(syms
, nsyms
, sizeof(*syms
), symcmp
);
1319 lseek(fd
, strtab
.sh_offset
, SEEK_SET
);
1320 if (read(fd
, strings
, strtab
.sh_size
) != strtab
.sh_size
)
1322 s
->disas_num_syms
= nsyms
;
1323 #if ELF_CLASS == ELFCLASS32
1324 s
->disas_symtab
.elf32
= syms
;
1325 s
->lookup_symbol
= (lookup_symbol_t
)lookup_symbolxx
;
1327 s
->disas_symtab
.elf64
= syms
;
1328 s
->lookup_symbol
= (lookup_symbol_t
)lookup_symbolxx
;
1334 int load_elf_binary(struct linux_binprm
* bprm
, struct target_pt_regs
* regs
,
1335 struct image_info
* info
)
1337 struct elfhdr elf_ex
;
1338 struct elfhdr interp_elf_ex
;
1339 struct exec interp_ex
;
1340 int interpreter_fd
= -1; /* avoid warning */
1341 abi_ulong load_addr
, load_bias
;
1342 int load_addr_set
= 0;
1343 unsigned int interpreter_type
= INTERPRETER_NONE
;
1344 unsigned char ibcs2_interpreter
;
1346 abi_ulong mapped_addr
;
1347 struct elf_phdr
* elf_ppnt
;
1348 struct elf_phdr
*elf_phdata
;
1349 abi_ulong elf_bss
, k
, elf_brk
;
1351 char * elf_interpreter
;
1352 abi_ulong elf_entry
, interp_load_addr
= 0;
1354 abi_ulong start_code
, end_code
, start_data
, end_data
;
1355 abi_ulong reloc_func_desc
= 0;
1356 abi_ulong elf_stack
;
1357 char passed_fileno
[6];
1359 ibcs2_interpreter
= 0;
1363 elf_ex
= *((struct elfhdr
*) bprm
->buf
); /* exec-header */
1365 bswap_ehdr(&elf_ex
);
1368 /* First of all, some simple consistency checks */
1369 if ((elf_ex
.e_type
!= ET_EXEC
&& elf_ex
.e_type
!= ET_DYN
) ||
1370 (! elf_check_arch(elf_ex
.e_machine
))) {
1374 bprm
->p
= copy_elf_strings(1, &bprm
->filename
, bprm
->page
, bprm
->p
);
1375 bprm
->p
= copy_elf_strings(bprm
->envc
,bprm
->envp
,bprm
->page
,bprm
->p
);
1376 bprm
->p
= copy_elf_strings(bprm
->argc
,bprm
->argv
,bprm
->page
,bprm
->p
);
1381 /* Now read in all of the header information */
1382 elf_phdata
= (struct elf_phdr
*)malloc(elf_ex
.e_phentsize
*elf_ex
.e_phnum
);
1383 if (elf_phdata
== NULL
) {
1387 retval
= lseek(bprm
->fd
, elf_ex
.e_phoff
, SEEK_SET
);
1389 retval
= read(bprm
->fd
, (char *) elf_phdata
,
1390 elf_ex
.e_phentsize
* elf_ex
.e_phnum
);
1394 perror("load_elf_binary");
1401 elf_ppnt
= elf_phdata
;
1402 for (i
=0; i
<elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
1403 bswap_phdr(elf_ppnt
);
1406 elf_ppnt
= elf_phdata
;
1412 elf_stack
= ~((abi_ulong
)0UL);
1413 elf_interpreter
= NULL
;
1414 start_code
= ~((abi_ulong
)0UL);
1418 interp_ex
.a_info
= 0;
1420 for(i
=0;i
< elf_ex
.e_phnum
; i
++) {
1421 if (elf_ppnt
->p_type
== PT_INTERP
) {
1422 if ( elf_interpreter
!= NULL
)
1425 free(elf_interpreter
);
1430 /* This is the program interpreter used for
1431 * shared libraries - for now assume that this
1432 * is an a.out format binary
1435 elf_interpreter
= (char *)malloc(elf_ppnt
->p_filesz
);
1437 if (elf_interpreter
== NULL
) {
1443 retval
= lseek(bprm
->fd
, elf_ppnt
->p_offset
, SEEK_SET
);
1445 retval
= read(bprm
->fd
, elf_interpreter
, elf_ppnt
->p_filesz
);
1448 perror("load_elf_binary2");
1452 /* If the program interpreter is one of these two,
1453 then assume an iBCS2 image. Otherwise assume
1454 a native linux image. */
1456 /* JRP - Need to add X86 lib dir stuff here... */
1458 if (strcmp(elf_interpreter
,"/usr/lib/libc.so.1") == 0 ||
1459 strcmp(elf_interpreter
,"/usr/lib/ld.so.1") == 0) {
1460 ibcs2_interpreter
= 1;
1464 printf("Using ELF interpreter %s\n", path(elf_interpreter
));
1467 retval
= open(path(elf_interpreter
), O_RDONLY
);
1469 interpreter_fd
= retval
;
1472 perror(elf_interpreter
);
1474 /* retval = -errno; */
1479 retval
= lseek(interpreter_fd
, 0, SEEK_SET
);
1481 retval
= read(interpreter_fd
,bprm
->buf
,128);
1485 interp_ex
= *((struct exec
*) bprm
->buf
); /* aout exec-header */
1486 interp_elf_ex
= *((struct elfhdr
*) bprm
->buf
); /* elf exec-header */
1489 perror("load_elf_binary3");
1492 free(elf_interpreter
);
1500 /* Some simple consistency checks for the interpreter */
1501 if (elf_interpreter
){
1502 interpreter_type
= INTERPRETER_ELF
| INTERPRETER_AOUT
;
1504 /* Now figure out which format our binary is */
1505 if ((N_MAGIC(interp_ex
) != OMAGIC
) && (N_MAGIC(interp_ex
) != ZMAGIC
) &&
1506 (N_MAGIC(interp_ex
) != QMAGIC
)) {
1507 interpreter_type
= INTERPRETER_ELF
;
1510 if (interp_elf_ex
.e_ident
[0] != 0x7f ||
1511 strncmp((char *)&interp_elf_ex
.e_ident
[1], "ELF",3) != 0) {
1512 interpreter_type
&= ~INTERPRETER_ELF
;
1515 if (!interpreter_type
) {
1516 free(elf_interpreter
);
1523 /* OK, we are done with that, now set up the arg stuff,
1524 and then start this sucker up */
1529 if (interpreter_type
== INTERPRETER_AOUT
) {
1530 snprintf(passed_fileno
, sizeof(passed_fileno
), "%d", bprm
->fd
);
1531 passed_p
= passed_fileno
;
1533 if (elf_interpreter
) {
1534 bprm
->p
= copy_elf_strings(1,&passed_p
,bprm
->page
,bprm
->p
);
1539 if (elf_interpreter
) {
1540 free(elf_interpreter
);
1548 /* OK, This is the point of no return */
1551 info
->start_mmap
= (abi_ulong
)ELF_START_MMAP
;
1553 elf_entry
= (abi_ulong
) elf_ex
.e_entry
;
1555 #if defined(CONFIG_USE_GUEST_BASE)
1557 * In case where user has not explicitly set the guest_base, we
1558 * probe here that should we set it automatically.
1560 if (!have_guest_base
) {
1562 * Go through ELF program header table and find out whether
1563 * any of the segments drop below our current mmap_min_addr and
1564 * in that case set guest_base to corresponding address.
1566 for (i
= 0, elf_ppnt
= elf_phdata
; i
< elf_ex
.e_phnum
;
1568 if (elf_ppnt
->p_type
!= PT_LOAD
)
1570 if (HOST_PAGE_ALIGN(elf_ppnt
->p_vaddr
) < mmap_min_addr
) {
1571 guest_base
= HOST_PAGE_ALIGN(mmap_min_addr
);
1576 #endif /* CONFIG_USE_GUEST_BASE */
1578 /* Do this so that we can load the interpreter, if need be. We will
1579 change some of these later */
1581 bprm
->p
= setup_arg_pages(bprm
->p
, bprm
, info
);
1582 info
->start_stack
= bprm
->p
;
1584 /* Now we do a little grungy work by mmaping the ELF image into
1585 * the correct location in memory. At this point, we assume that
1586 * the image should be loaded at fixed address, not at a variable
1590 for(i
= 0, elf_ppnt
= elf_phdata
; i
< elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
1595 if (elf_ppnt
->p_type
!= PT_LOAD
)
1598 if (elf_ppnt
->p_flags
& PF_R
) elf_prot
|= PROT_READ
;
1599 if (elf_ppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
1600 if (elf_ppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
1601 elf_flags
= MAP_PRIVATE
| MAP_DENYWRITE
;
1602 if (elf_ex
.e_type
== ET_EXEC
|| load_addr_set
) {
1603 elf_flags
|= MAP_FIXED
;
1604 } else if (elf_ex
.e_type
== ET_DYN
) {
1605 /* Try and get dynamic programs out of the way of the default mmap
1606 base, as well as whatever program they might try to exec. This
1607 is because the brk will follow the loader, and is not movable. */
1608 /* NOTE: for qemu, we do a big mmap to get enough space
1609 without hardcoding any address */
1610 error
= target_mmap(0, ET_DYN_MAP_SIZE
,
1611 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
1617 load_bias
= TARGET_ELF_PAGESTART(error
- elf_ppnt
->p_vaddr
);
1620 error
= target_mmap(TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
),
1621 (elf_ppnt
->p_filesz
+
1622 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)),
1624 (MAP_FIXED
| MAP_PRIVATE
| MAP_DENYWRITE
),
1626 (elf_ppnt
->p_offset
-
1627 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)));
1633 #ifdef LOW_ELF_STACK
1634 if (TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
) < elf_stack
)
1635 elf_stack
= TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
);
1638 if (!load_addr_set
) {
1640 load_addr
= elf_ppnt
->p_vaddr
- elf_ppnt
->p_offset
;
1641 if (elf_ex
.e_type
== ET_DYN
) {
1642 load_bias
+= error
-
1643 TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
);
1644 load_addr
+= load_bias
;
1645 reloc_func_desc
= load_bias
;
1648 k
= elf_ppnt
->p_vaddr
;
1653 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_filesz
;
1656 if ((elf_ppnt
->p_flags
& PF_X
) && end_code
< k
)
1660 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_memsz
;
1661 if (k
> elf_brk
) elf_brk
= k
;
1664 elf_entry
+= load_bias
;
1665 elf_bss
+= load_bias
;
1666 elf_brk
+= load_bias
;
1667 start_code
+= load_bias
;
1668 end_code
+= load_bias
;
1669 start_data
+= load_bias
;
1670 end_data
+= load_bias
;
1672 if (elf_interpreter
) {
1673 if (interpreter_type
& 1) {
1674 elf_entry
= load_aout_interp(&interp_ex
, interpreter_fd
);
1676 else if (interpreter_type
& 2) {
1677 elf_entry
= load_elf_interp(&interp_elf_ex
, interpreter_fd
,
1680 reloc_func_desc
= interp_load_addr
;
1682 close(interpreter_fd
);
1683 free(elf_interpreter
);
1685 if (elf_entry
== ~((abi_ulong
)0UL)) {
1686 printf("Unable to load interpreter\n");
1695 if (qemu_log_enabled())
1696 load_symbols(&elf_ex
, bprm
->fd
);
1698 if (interpreter_type
!= INTERPRETER_AOUT
) close(bprm
->fd
);
1699 info
->personality
= (ibcs2_interpreter
? PER_SVR4
: PER_LINUX
);
1701 #ifdef LOW_ELF_STACK
1702 info
->start_stack
= bprm
->p
= elf_stack
- 4;
1704 bprm
->p
= create_elf_tables(bprm
->p
,
1708 load_addr
, load_bias
,
1710 (interpreter_type
== INTERPRETER_AOUT
? 0 : 1),
1712 info
->load_addr
= reloc_func_desc
;
1713 info
->start_brk
= info
->brk
= elf_brk
;
1714 info
->end_code
= end_code
;
1715 info
->start_code
= start_code
;
1716 info
->start_data
= start_data
;
1717 info
->end_data
= end_data
;
1718 info
->start_stack
= bprm
->p
;
1720 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
1722 set_brk(elf_bss
, elf_brk
);
1724 padzero(elf_bss
, elf_brk
);
1727 printf("(start_brk) %x\n" , info
->start_brk
);
1728 printf("(end_code) %x\n" , info
->end_code
);
1729 printf("(start_code) %x\n" , info
->start_code
);
1730 printf("(end_data) %x\n" , info
->end_data
);
1731 printf("(start_stack) %x\n" , info
->start_stack
);
1732 printf("(brk) %x\n" , info
->brk
);
1735 if ( info
->personality
== PER_SVR4
)
1737 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1738 and some applications "depend" upon this behavior.
1739 Since we do not have the power to recompile these, we
1740 emulate the SVr4 behavior. Sigh. */
1741 mapped_addr
= target_mmap(0, qemu_host_page_size
, PROT_READ
| PROT_EXEC
,
1742 MAP_FIXED
| MAP_PRIVATE
, -1, 0);
1745 info
->entry
= elf_entry
;
1747 #ifdef USE_ELF_CORE_DUMP
1748 bprm
->core_dump
= &elf_core_dump
;
1754 #ifdef USE_ELF_CORE_DUMP
1757 * Definitions to generate Intel SVR4-like core files.
1758 * These mostly have the same names as the SVR4 types with "target_elf_"
1759 * tacked on the front to prevent clashes with linux definitions,
1760 * and the typedef forms have been avoided. This is mostly like
1761 * the SVR4 structure, but more Linuxy, with things that Linux does
1762 * not support and which gdb doesn't really use excluded.
1764 * Fields we don't dump (their contents is zero) in linux-user qemu
1765 * are marked with XXX.
1767 * Core dump code is copied from linux kernel (fs/binfmt_elf.c).
1769 * Porting ELF coredump for target is (quite) simple process. First you
1770 * define ELF_USE_CORE_DUMP in target ELF code (where init_thread() for
1771 * the target resides):
1773 * #define USE_ELF_CORE_DUMP
1775 * Next you define type of register set used for dumping. ELF specification
1776 * says that it needs to be array of elf_greg_t that has size of ELF_NREG.
1778 * typedef <target_regtype> target_elf_greg_t;
1779 * #define ELF_NREG <number of registers>
1780 * typedef taret_elf_greg_t target_elf_gregset_t[ELF_NREG];
1782 * Then define following types to match target types. Actual types can
1783 * be found from linux kernel (arch/<ARCH>/include/asm/posix_types.h):
1785 * typedef <target_uid_type> target_uid_t;
1786 * typedef <target_gid_type> target_gid_t;
1787 * typedef <target_pid_type> target_pid_t;
1789 * Last step is to implement target specific function that copies registers
1790 * from given cpu into just specified register set. Prototype is:
1792 * static void elf_core_copy_regs(taret_elf_gregset_t *regs,
1793 * const CPUState *env);
1796 * regs - copy register values into here (allocated and zeroed by caller)
1797 * env - copy registers from here
1799 * Example for ARM target is provided in this file.
1802 /* An ELF note in memory */
1806 size_t namesz_rounded
;
1813 struct target_elf_siginfo
{
1814 int si_signo
; /* signal number */
1815 int si_code
; /* extra code */
1816 int si_errno
; /* errno */
1819 struct target_elf_prstatus
{
1820 struct target_elf_siginfo pr_info
; /* Info associated with signal */
1821 short pr_cursig
; /* Current signal */
1822 target_ulong pr_sigpend
; /* XXX */
1823 target_ulong pr_sighold
; /* XXX */
1824 target_pid_t pr_pid
;
1825 target_pid_t pr_ppid
;
1826 target_pid_t pr_pgrp
;
1827 target_pid_t pr_sid
;
1828 struct target_timeval pr_utime
; /* XXX User time */
1829 struct target_timeval pr_stime
; /* XXX System time */
1830 struct target_timeval pr_cutime
; /* XXX Cumulative user time */
1831 struct target_timeval pr_cstime
; /* XXX Cumulative system time */
1832 target_elf_gregset_t pr_reg
; /* GP registers */
1833 int pr_fpvalid
; /* XXX */
1836 #define ELF_PRARGSZ (80) /* Number of chars for args */
1838 struct target_elf_prpsinfo
{
1839 char pr_state
; /* numeric process state */
1840 char pr_sname
; /* char for pr_state */
1841 char pr_zomb
; /* zombie */
1842 char pr_nice
; /* nice val */
1843 target_ulong pr_flag
; /* flags */
1844 target_uid_t pr_uid
;
1845 target_gid_t pr_gid
;
1846 target_pid_t pr_pid
, pr_ppid
, pr_pgrp
, pr_sid
;
1848 char pr_fname
[16]; /* filename of executable */
1849 char pr_psargs
[ELF_PRARGSZ
]; /* initial part of arg list */
1852 /* Here is the structure in which status of each thread is captured. */
1853 struct elf_thread_status
{
1854 QTAILQ_ENTRY(elf_thread_status
) ets_link
;
1855 struct target_elf_prstatus prstatus
; /* NT_PRSTATUS */
1857 elf_fpregset_t fpu
; /* NT_PRFPREG */
1858 struct task_struct
*thread
;
1859 elf_fpxregset_t xfpu
; /* ELF_CORE_XFPREG_TYPE */
1861 struct memelfnote notes
[1];
1865 struct elf_note_info
{
1866 struct memelfnote
*notes
;
1867 struct target_elf_prstatus
*prstatus
; /* NT_PRSTATUS */
1868 struct target_elf_prpsinfo
*psinfo
; /* NT_PRPSINFO */
1870 QTAILQ_HEAD(thread_list_head
, elf_thread_status
) thread_list
;
1873 * Current version of ELF coredump doesn't support
1874 * dumping fp regs etc.
1876 elf_fpregset_t
*fpu
;
1877 elf_fpxregset_t
*xfpu
;
1878 int thread_status_size
;
1884 struct vm_area_struct
{
1885 abi_ulong vma_start
; /* start vaddr of memory region */
1886 abi_ulong vma_end
; /* end vaddr of memory region */
1887 abi_ulong vma_flags
; /* protection etc. flags for the region */
1888 QTAILQ_ENTRY(vm_area_struct
) vma_link
;
1892 QTAILQ_HEAD(, vm_area_struct
) mm_mmap
;
1893 int mm_count
; /* number of mappings */
1896 static struct mm_struct
*vma_init(void);
1897 static void vma_delete(struct mm_struct
*);
1898 static int vma_add_mapping(struct mm_struct
*, abi_ulong
,
1899 abi_ulong
, abi_ulong
);
1900 static int vma_get_mapping_count(const struct mm_struct
*);
1901 static struct vm_area_struct
*vma_first(const struct mm_struct
*);
1902 static struct vm_area_struct
*vma_next(struct vm_area_struct
*);
1903 static abi_ulong
vma_dump_size(const struct vm_area_struct
*);
1904 static int vma_walker(void *priv
, unsigned long start
, unsigned long end
,
1905 unsigned long flags
);
1907 static void fill_elf_header(struct elfhdr
*, int, uint16_t, uint32_t);
1908 static void fill_note(struct memelfnote
*, const char *, int,
1909 unsigned int, void *);
1910 static void fill_prstatus(struct target_elf_prstatus
*, const TaskState
*, int);
1911 static int fill_psinfo(struct target_elf_prpsinfo
*, const TaskState
*);
1912 static void fill_auxv_note(struct memelfnote
*, const TaskState
*);
1913 static void fill_elf_note_phdr(struct elf_phdr
*, int, off_t
);
1914 static size_t note_size(const struct memelfnote
*);
1915 static void free_note_info(struct elf_note_info
*);
1916 static int fill_note_info(struct elf_note_info
*, long, const CPUState
*);
1917 static void fill_thread_info(struct elf_note_info
*, const CPUState
*);
1918 static int core_dump_filename(const TaskState
*, char *, size_t);
1920 static int dump_write(int, const void *, size_t);
1921 static int write_note(struct memelfnote
*, int);
1922 static int write_note_info(struct elf_note_info
*, int);
1925 static void bswap_prstatus(struct target_elf_prstatus
*);
1926 static void bswap_psinfo(struct target_elf_prpsinfo
*);
1928 static void bswap_prstatus(struct target_elf_prstatus
*prstatus
)
1930 prstatus
->pr_info
.si_signo
= tswapl(prstatus
->pr_info
.si_signo
);
1931 prstatus
->pr_info
.si_code
= tswapl(prstatus
->pr_info
.si_code
);
1932 prstatus
->pr_info
.si_errno
= tswapl(prstatus
->pr_info
.si_errno
);
1933 prstatus
->pr_cursig
= tswap16(prstatus
->pr_cursig
);
1934 prstatus
->pr_sigpend
= tswapl(prstatus
->pr_sigpend
);
1935 prstatus
->pr_sighold
= tswapl(prstatus
->pr_sighold
);
1936 prstatus
->pr_pid
= tswap32(prstatus
->pr_pid
);
1937 prstatus
->pr_ppid
= tswap32(prstatus
->pr_ppid
);
1938 prstatus
->pr_pgrp
= tswap32(prstatus
->pr_pgrp
);
1939 prstatus
->pr_sid
= tswap32(prstatus
->pr_sid
);
1940 /* cpu times are not filled, so we skip them */
1941 /* regs should be in correct format already */
1942 prstatus
->pr_fpvalid
= tswap32(prstatus
->pr_fpvalid
);
1945 static void bswap_psinfo(struct target_elf_prpsinfo
*psinfo
)
1947 psinfo
->pr_flag
= tswapl(psinfo
->pr_flag
);
1948 psinfo
->pr_uid
= tswap16(psinfo
->pr_uid
);
1949 psinfo
->pr_gid
= tswap16(psinfo
->pr_gid
);
1950 psinfo
->pr_pid
= tswap32(psinfo
->pr_pid
);
1951 psinfo
->pr_ppid
= tswap32(psinfo
->pr_ppid
);
1952 psinfo
->pr_pgrp
= tswap32(psinfo
->pr_pgrp
);
1953 psinfo
->pr_sid
= tswap32(psinfo
->pr_sid
);
1955 #endif /* BSWAP_NEEDED */
1958 * Minimal support for linux memory regions. These are needed
1959 * when we are finding out what memory exactly belongs to
1960 * emulated process. No locks needed here, as long as
1961 * thread that received the signal is stopped.
1964 static struct mm_struct
*vma_init(void)
1966 struct mm_struct
*mm
;
1968 if ((mm
= qemu_malloc(sizeof (*mm
))) == NULL
)
1972 QTAILQ_INIT(&mm
->mm_mmap
);
1977 static void vma_delete(struct mm_struct
*mm
)
1979 struct vm_area_struct
*vma
;
1981 while ((vma
= vma_first(mm
)) != NULL
) {
1982 QTAILQ_REMOVE(&mm
->mm_mmap
, vma
, vma_link
);
1988 static int vma_add_mapping(struct mm_struct
*mm
, abi_ulong start
,
1989 abi_ulong end
, abi_ulong flags
)
1991 struct vm_area_struct
*vma
;
1993 if ((vma
= qemu_mallocz(sizeof (*vma
))) == NULL
)
1996 vma
->vma_start
= start
;
1998 vma
->vma_flags
= flags
;
2000 QTAILQ_INSERT_TAIL(&mm
->mm_mmap
, vma
, vma_link
);
2006 static struct vm_area_struct
*vma_first(const struct mm_struct
*mm
)
2008 return (QTAILQ_FIRST(&mm
->mm_mmap
));
2011 static struct vm_area_struct
*vma_next(struct vm_area_struct
*vma
)
2013 return (QTAILQ_NEXT(vma
, vma_link
));
2016 static int vma_get_mapping_count(const struct mm_struct
*mm
)
2018 return (mm
->mm_count
);
2022 * Calculate file (dump) size of given memory region.
2024 static abi_ulong
vma_dump_size(const struct vm_area_struct
*vma
)
2026 /* if we cannot even read the first page, skip it */
2027 if (!access_ok(VERIFY_READ
, vma
->vma_start
, TARGET_PAGE_SIZE
))
2031 * Usually we don't dump executable pages as they contain
2032 * non-writable code that debugger can read directly from
2033 * target library etc. However, thread stacks are marked
2034 * also executable so we read in first page of given region
2035 * and check whether it contains elf header. If there is
2036 * no elf header, we dump it.
2038 if (vma
->vma_flags
& PROT_EXEC
) {
2039 char page
[TARGET_PAGE_SIZE
];
2041 copy_from_user(page
, vma
->vma_start
, sizeof (page
));
2042 if ((page
[EI_MAG0
] == ELFMAG0
) &&
2043 (page
[EI_MAG1
] == ELFMAG1
) &&
2044 (page
[EI_MAG2
] == ELFMAG2
) &&
2045 (page
[EI_MAG3
] == ELFMAG3
)) {
2047 * Mappings are possibly from ELF binary. Don't dump
2054 return (vma
->vma_end
- vma
->vma_start
);
2057 static int vma_walker(void *priv
, unsigned long start
, unsigned long end
,
2058 unsigned long flags
)
2060 struct mm_struct
*mm
= (struct mm_struct
*)priv
;
2063 * Don't dump anything that qemu has reserved for internal use.
2065 if (flags
& PAGE_RESERVED
)
2068 vma_add_mapping(mm
, start
, end
, flags
);
2072 static void fill_note(struct memelfnote
*note
, const char *name
, int type
,
2073 unsigned int sz
, void *data
)
2075 unsigned int namesz
;
2077 namesz
= strlen(name
) + 1;
2079 note
->namesz
= namesz
;
2080 note
->namesz_rounded
= roundup(namesz
, sizeof (int32_t));
2082 note
->datasz
= roundup(sz
, sizeof (int32_t));;
2086 * We calculate rounded up note size here as specified by
2089 note
->notesz
= sizeof (struct elf_note
) +
2090 note
->namesz_rounded
+ note
->datasz
;
2093 static void fill_elf_header(struct elfhdr
*elf
, int segs
, uint16_t machine
,
2096 (void) memset(elf
, 0, sizeof(*elf
));
2098 (void) memcpy(elf
->e_ident
, ELFMAG
, SELFMAG
);
2099 elf
->e_ident
[EI_CLASS
] = ELF_CLASS
;
2100 elf
->e_ident
[EI_DATA
] = ELF_DATA
;
2101 elf
->e_ident
[EI_VERSION
] = EV_CURRENT
;
2102 elf
->e_ident
[EI_OSABI
] = ELF_OSABI
;
2104 elf
->e_type
= ET_CORE
;
2105 elf
->e_machine
= machine
;
2106 elf
->e_version
= EV_CURRENT
;
2107 elf
->e_phoff
= sizeof(struct elfhdr
);
2108 elf
->e_flags
= flags
;
2109 elf
->e_ehsize
= sizeof(struct elfhdr
);
2110 elf
->e_phentsize
= sizeof(struct elf_phdr
);
2111 elf
->e_phnum
= segs
;
2118 static void fill_elf_note_phdr(struct elf_phdr
*phdr
, int sz
, off_t offset
)
2120 phdr
->p_type
= PT_NOTE
;
2121 phdr
->p_offset
= offset
;
2124 phdr
->p_filesz
= sz
;
2134 static size_t note_size(const struct memelfnote
*note
)
2136 return (note
->notesz
);
2139 static void fill_prstatus(struct target_elf_prstatus
*prstatus
,
2140 const TaskState
*ts
, int signr
)
2142 (void) memset(prstatus
, 0, sizeof (*prstatus
));
2143 prstatus
->pr_info
.si_signo
= prstatus
->pr_cursig
= signr
;
2144 prstatus
->pr_pid
= ts
->ts_tid
;
2145 prstatus
->pr_ppid
= getppid();
2146 prstatus
->pr_pgrp
= getpgrp();
2147 prstatus
->pr_sid
= getsid(0);
2150 bswap_prstatus(prstatus
);
2154 static int fill_psinfo(struct target_elf_prpsinfo
*psinfo
, const TaskState
*ts
)
2156 char *filename
, *base_filename
;
2157 unsigned int i
, len
;
2159 (void) memset(psinfo
, 0, sizeof (*psinfo
));
2161 len
= ts
->info
->arg_end
- ts
->info
->arg_start
;
2162 if (len
>= ELF_PRARGSZ
)
2163 len
= ELF_PRARGSZ
- 1;
2164 if (copy_from_user(&psinfo
->pr_psargs
, ts
->info
->arg_start
, len
))
2166 for (i
= 0; i
< len
; i
++)
2167 if (psinfo
->pr_psargs
[i
] == 0)
2168 psinfo
->pr_psargs
[i
] = ' ';
2169 psinfo
->pr_psargs
[len
] = 0;
2171 psinfo
->pr_pid
= getpid();
2172 psinfo
->pr_ppid
= getppid();
2173 psinfo
->pr_pgrp
= getpgrp();
2174 psinfo
->pr_sid
= getsid(0);
2175 psinfo
->pr_uid
= getuid();
2176 psinfo
->pr_gid
= getgid();
2178 filename
= strdup(ts
->bprm
->filename
);
2179 base_filename
= strdup(basename(filename
));
2180 (void) strncpy(psinfo
->pr_fname
, base_filename
,
2181 sizeof(psinfo
->pr_fname
));
2182 free(base_filename
);
2186 bswap_psinfo(psinfo
);
2191 static void fill_auxv_note(struct memelfnote
*note
, const TaskState
*ts
)
2193 elf_addr_t auxv
= (elf_addr_t
)ts
->info
->saved_auxv
;
2194 elf_addr_t orig_auxv
= auxv
;
2200 * Auxiliary vector is stored in target process stack. It contains
2201 * {type, value} pairs that we need to dump into note. This is not
2202 * strictly necessary but we do it here for sake of completeness.
2205 /* find out lenght of the vector, AT_NULL is terminator */
2208 get_user_ual(val
, auxv
);
2210 auxv
+= 2 * sizeof (elf_addr_t
);
2211 } while (val
!= AT_NULL
);
2212 len
= i
* sizeof (elf_addr_t
);
2214 /* read in whole auxv vector and copy it to memelfnote */
2215 ptr
= lock_user(VERIFY_READ
, orig_auxv
, len
, 0);
2217 fill_note(note
, "CORE", NT_AUXV
, len
, ptr
);
2218 unlock_user(ptr
, auxv
, len
);
2223 * Constructs name of coredump file. We have following convention
2225 * qemu_<basename-of-target-binary>_<date>-<time>_<pid>.core
2227 * Returns 0 in case of success, -1 otherwise (errno is set).
2229 static int core_dump_filename(const TaskState
*ts
, char *buf
,
2233 char *filename
= NULL
;
2234 char *base_filename
= NULL
;
2238 assert(bufsize
>= PATH_MAX
);
2240 if (gettimeofday(&tv
, NULL
) < 0) {
2241 (void) fprintf(stderr
, "unable to get current timestamp: %s",
2246 filename
= strdup(ts
->bprm
->filename
);
2247 base_filename
= strdup(basename(filename
));
2248 (void) strftime(timestamp
, sizeof (timestamp
), "%Y%m%d-%H%M%S",
2249 localtime_r(&tv
.tv_sec
, &tm
));
2250 (void) snprintf(buf
, bufsize
, "qemu_%s_%s_%d.core",
2251 base_filename
, timestamp
, (int)getpid());
2252 free(base_filename
);
2258 static int dump_write(int fd
, const void *ptr
, size_t size
)
2260 const char *bufp
= (const char *)ptr
;
2261 ssize_t bytes_written
, bytes_left
;
2262 struct rlimit dumpsize
;
2266 getrlimit(RLIMIT_CORE
, &dumpsize
);
2267 if ((pos
= lseek(fd
, 0, SEEK_CUR
))==-1) {
2268 if (errno
== ESPIPE
) { /* not a seekable stream */
2274 if (dumpsize
.rlim_cur
<= pos
) {
2276 } else if (dumpsize
.rlim_cur
== RLIM_INFINITY
) {
2279 size_t limit_left
=dumpsize
.rlim_cur
- pos
;
2280 bytes_left
= limit_left
>= size
? size
: limit_left
;
2285 * In normal conditions, single write(2) should do but
2286 * in case of socket etc. this mechanism is more portable.
2289 bytes_written
= write(fd
, bufp
, bytes_left
);
2290 if (bytes_written
< 0) {
2294 } else if (bytes_written
== 0) { /* eof */
2297 bufp
+= bytes_written
;
2298 bytes_left
-= bytes_written
;
2299 } while (bytes_left
> 0);
2304 static int write_note(struct memelfnote
*men
, int fd
)
2308 en
.n_namesz
= men
->namesz
;
2309 en
.n_type
= men
->type
;
2310 en
.n_descsz
= men
->datasz
;
2316 if (dump_write(fd
, &en
, sizeof(en
)) != 0)
2318 if (dump_write(fd
, men
->name
, men
->namesz_rounded
) != 0)
2320 if (dump_write(fd
, men
->data
, men
->datasz
) != 0)
2326 static void fill_thread_info(struct elf_note_info
*info
, const CPUState
*env
)
2328 TaskState
*ts
= (TaskState
*)env
->opaque
;
2329 struct elf_thread_status
*ets
;
2331 ets
= qemu_mallocz(sizeof (*ets
));
2332 ets
->num_notes
= 1; /* only prstatus is dumped */
2333 fill_prstatus(&ets
->prstatus
, ts
, 0);
2334 elf_core_copy_regs(&ets
->prstatus
.pr_reg
, env
);
2335 fill_note(&ets
->notes
[0], "CORE", NT_PRSTATUS
, sizeof (ets
->prstatus
),
2338 QTAILQ_INSERT_TAIL(&info
->thread_list
, ets
, ets_link
);
2340 info
->notes_size
+= note_size(&ets
->notes
[0]);
2343 static int fill_note_info(struct elf_note_info
*info
,
2344 long signr
, const CPUState
*env
)
2347 CPUState
*cpu
= NULL
;
2348 TaskState
*ts
= (TaskState
*)env
->opaque
;
2351 (void) memset(info
, 0, sizeof (*info
));
2353 QTAILQ_INIT(&info
->thread_list
);
2355 info
->notes
= qemu_mallocz(NUMNOTES
* sizeof (struct memelfnote
));
2356 if (info
->notes
== NULL
)
2358 info
->prstatus
= qemu_mallocz(sizeof (*info
->prstatus
));
2359 if (info
->prstatus
== NULL
)
2361 info
->psinfo
= qemu_mallocz(sizeof (*info
->psinfo
));
2362 if (info
->prstatus
== NULL
)
2366 * First fill in status (and registers) of current thread
2367 * including process info & aux vector.
2369 fill_prstatus(info
->prstatus
, ts
, signr
);
2370 elf_core_copy_regs(&info
->prstatus
->pr_reg
, env
);
2371 fill_note(&info
->notes
[0], "CORE", NT_PRSTATUS
,
2372 sizeof (*info
->prstatus
), info
->prstatus
);
2373 fill_psinfo(info
->psinfo
, ts
);
2374 fill_note(&info
->notes
[1], "CORE", NT_PRPSINFO
,
2375 sizeof (*info
->psinfo
), info
->psinfo
);
2376 fill_auxv_note(&info
->notes
[2], ts
);
2379 info
->notes_size
= 0;
2380 for (i
= 0; i
< info
->numnote
; i
++)
2381 info
->notes_size
+= note_size(&info
->notes
[i
]);
2383 /* read and fill status of all threads */
2385 for (cpu
= first_cpu
; cpu
!= NULL
; cpu
= cpu
->next_cpu
) {
2386 if (cpu
== thread_env
)
2388 fill_thread_info(info
, cpu
);
2395 static void free_note_info(struct elf_note_info
*info
)
2397 struct elf_thread_status
*ets
;
2399 while (!QTAILQ_EMPTY(&info
->thread_list
)) {
2400 ets
= QTAILQ_FIRST(&info
->thread_list
);
2401 QTAILQ_REMOVE(&info
->thread_list
, ets
, ets_link
);
2405 qemu_free(info
->prstatus
);
2406 qemu_free(info
->psinfo
);
2407 qemu_free(info
->notes
);
2410 static int write_note_info(struct elf_note_info
*info
, int fd
)
2412 struct elf_thread_status
*ets
;
2415 /* write prstatus, psinfo and auxv for current thread */
2416 for (i
= 0; i
< info
->numnote
; i
++)
2417 if ((error
= write_note(&info
->notes
[i
], fd
)) != 0)
2420 /* write prstatus for each thread */
2421 for (ets
= info
->thread_list
.tqh_first
; ets
!= NULL
;
2422 ets
= ets
->ets_link
.tqe_next
) {
2423 if ((error
= write_note(&ets
->notes
[0], fd
)) != 0)
2431 * Write out ELF coredump.
2433 * See documentation of ELF object file format in:
2434 * http://www.caldera.com/developers/devspecs/gabi41.pdf
2436 * Coredump format in linux is following:
2438 * 0 +----------------------+ \
2439 * | ELF header | ET_CORE |
2440 * +----------------------+ |
2441 * | ELF program headers | |--- headers
2442 * | - NOTE section | |
2443 * | - PT_LOAD sections | |
2444 * +----------------------+ /
2449 * +----------------------+ <-- aligned to target page
2450 * | Process memory dump |
2455 * +----------------------+
2457 * NT_PRSTATUS -> struct elf_prstatus (per thread)
2458 * NT_PRSINFO -> struct elf_prpsinfo
2459 * NT_AUXV is array of { type, value } pairs (see fill_auxv_note()).
2461 * Format follows System V format as close as possible. Current
2462 * version limitations are as follows:
2463 * - no floating point registers are dumped
2465 * Function returns 0 in case of success, negative errno otherwise.
2467 * TODO: make this work also during runtime: it should be
2468 * possible to force coredump from running process and then
2469 * continue processing. For example qemu could set up SIGUSR2
2470 * handler (provided that target process haven't registered
2471 * handler for that) that does the dump when signal is received.
2473 static int elf_core_dump(int signr
, const CPUState
*env
)
2475 const TaskState
*ts
= (const TaskState
*)env
->opaque
;
2476 struct vm_area_struct
*vma
= NULL
;
2477 char corefile
[PATH_MAX
];
2478 struct elf_note_info info
;
2480 struct elf_phdr phdr
;
2481 struct rlimit dumpsize
;
2482 struct mm_struct
*mm
= NULL
;
2483 off_t offset
= 0, data_offset
= 0;
2488 getrlimit(RLIMIT_CORE
, &dumpsize
);
2489 if (dumpsize
.rlim_cur
== 0)
2492 if (core_dump_filename(ts
, corefile
, sizeof (corefile
)) < 0)
2495 if ((fd
= open(corefile
, O_WRONLY
| O_CREAT
,
2496 S_IRUSR
|S_IWUSR
|S_IRGRP
|S_IROTH
)) < 0)
2500 * Walk through target process memory mappings and
2501 * set up structure containing this information. After
2502 * this point vma_xxx functions can be used.
2504 if ((mm
= vma_init()) == NULL
)
2507 walk_memory_regions(mm
, vma_walker
);
2508 segs
= vma_get_mapping_count(mm
);
2511 * Construct valid coredump ELF header. We also
2512 * add one more segment for notes.
2514 fill_elf_header(&elf
, segs
+ 1, ELF_MACHINE
, 0);
2515 if (dump_write(fd
, &elf
, sizeof (elf
)) != 0)
2518 /* fill in in-memory version of notes */
2519 if (fill_note_info(&info
, signr
, env
) < 0)
2522 offset
+= sizeof (elf
); /* elf header */
2523 offset
+= (segs
+ 1) * sizeof (struct elf_phdr
); /* program headers */
2525 /* write out notes program header */
2526 fill_elf_note_phdr(&phdr
, info
.notes_size
, offset
);
2528 offset
+= info
.notes_size
;
2529 if (dump_write(fd
, &phdr
, sizeof (phdr
)) != 0)
2533 * ELF specification wants data to start at page boundary so
2536 offset
= roundup(offset
, ELF_EXEC_PAGESIZE
);
2539 * Write program headers for memory regions mapped in
2540 * the target process.
2542 for (vma
= vma_first(mm
); vma
!= NULL
; vma
= vma_next(vma
)) {
2543 (void) memset(&phdr
, 0, sizeof (phdr
));
2545 phdr
.p_type
= PT_LOAD
;
2546 phdr
.p_offset
= offset
;
2547 phdr
.p_vaddr
= vma
->vma_start
;
2549 phdr
.p_filesz
= vma_dump_size(vma
);
2550 offset
+= phdr
.p_filesz
;
2551 phdr
.p_memsz
= vma
->vma_end
- vma
->vma_start
;
2552 phdr
.p_flags
= vma
->vma_flags
& PROT_READ
? PF_R
: 0;
2553 if (vma
->vma_flags
& PROT_WRITE
)
2554 phdr
.p_flags
|= PF_W
;
2555 if (vma
->vma_flags
& PROT_EXEC
)
2556 phdr
.p_flags
|= PF_X
;
2557 phdr
.p_align
= ELF_EXEC_PAGESIZE
;
2559 dump_write(fd
, &phdr
, sizeof (phdr
));
2563 * Next we write notes just after program headers. No
2564 * alignment needed here.
2566 if (write_note_info(&info
, fd
) < 0)
2569 /* align data to page boundary */
2570 data_offset
= lseek(fd
, 0, SEEK_CUR
);
2571 data_offset
= TARGET_PAGE_ALIGN(data_offset
);
2572 if (lseek(fd
, data_offset
, SEEK_SET
) != data_offset
)
2576 * Finally we can dump process memory into corefile as well.
2578 for (vma
= vma_first(mm
); vma
!= NULL
; vma
= vma_next(vma
)) {
2582 end
= vma
->vma_start
+ vma_dump_size(vma
);
2584 for (addr
= vma
->vma_start
; addr
< end
;
2585 addr
+= TARGET_PAGE_SIZE
) {
2586 char page
[TARGET_PAGE_SIZE
];
2590 * Read in page from target process memory and
2591 * write it to coredump file.
2593 error
= copy_from_user(page
, addr
, sizeof (page
));
2595 (void) fprintf(stderr
, "unable to dump " TARGET_FMT_lx
"\n",
2600 if (dump_write(fd
, page
, TARGET_PAGE_SIZE
) < 0)
2606 free_note_info(&info
);
2616 #endif /* USE_ELF_CORE_DUMP */
2618 static int load_aout_interp(void * exptr
, int interp_fd
)
2620 printf("a.out interpreter not yet supported\n");
2624 void do_init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
2626 init_thread(regs
, infop
);