1 /* This is the Linux kernel elf-loading code, ported into user space */
11 #include <sys/resource.h>
28 #define ELF_OSABI ELFOSABI_SYSV
30 /* from personality.h */
33 * Flags for bug emulation.
35 * These occupy the top three bytes.
38 ADDR_NO_RANDOMIZE
= 0x0040000, /* disable randomization of VA space */
39 FDPIC_FUNCPTRS
= 0x0080000, /* userspace function ptrs point to descriptors
42 MMAP_PAGE_ZERO
= 0x0100000,
43 ADDR_COMPAT_LAYOUT
= 0x0200000,
44 READ_IMPLIES_EXEC
= 0x0400000,
45 ADDR_LIMIT_32BIT
= 0x0800000,
46 SHORT_INODE
= 0x1000000,
47 WHOLE_SECONDS
= 0x2000000,
48 STICKY_TIMEOUTS
= 0x4000000,
49 ADDR_LIMIT_3GB
= 0x8000000,
55 * These go in the low byte. Avoid using the top bit, it will
56 * conflict with error returns.
60 PER_LINUX_32BIT
= 0x0000 | ADDR_LIMIT_32BIT
,
61 PER_LINUX_FDPIC
= 0x0000 | FDPIC_FUNCPTRS
,
62 PER_SVR4
= 0x0001 | STICKY_TIMEOUTS
| MMAP_PAGE_ZERO
,
63 PER_SVR3
= 0x0002 | STICKY_TIMEOUTS
| SHORT_INODE
,
64 PER_SCOSVR3
= 0x0003 | STICKY_TIMEOUTS
|
65 WHOLE_SECONDS
| SHORT_INODE
,
66 PER_OSR5
= 0x0003 | STICKY_TIMEOUTS
| WHOLE_SECONDS
,
67 PER_WYSEV386
= 0x0004 | STICKY_TIMEOUTS
| SHORT_INODE
,
68 PER_ISCR4
= 0x0005 | STICKY_TIMEOUTS
,
70 PER_SUNOS
= 0x0006 | STICKY_TIMEOUTS
,
71 PER_XENIX
= 0x0007 | STICKY_TIMEOUTS
| SHORT_INODE
,
73 PER_LINUX32_3GB
= 0x0008 | ADDR_LIMIT_3GB
,
74 PER_IRIX32
= 0x0009 | STICKY_TIMEOUTS
,/* IRIX5 32-bit */
75 PER_IRIXN32
= 0x000a | STICKY_TIMEOUTS
,/* IRIX6 new 32-bit */
76 PER_IRIX64
= 0x000b | STICKY_TIMEOUTS
,/* IRIX6 64-bit */
78 PER_SOLARIS
= 0x000d | STICKY_TIMEOUTS
,
79 PER_UW7
= 0x000e | STICKY_TIMEOUTS
| MMAP_PAGE_ZERO
,
80 PER_OSF4
= 0x000f, /* OSF/1 v4 */
86 * Return the base personality without flags.
88 #define personality(pers) (pers & PER_MASK)
90 /* this flag is uneffective under linux too, should be deleted */
92 #define MAP_DENYWRITE 0
95 /* should probably go in elf.h */
100 typedef target_ulong target_elf_greg_t
;
102 typedef uint16_t target_uid_t
;
103 typedef uint16_t target_gid_t
;
105 typedef uint32_t target_uid_t
;
106 typedef uint32_t target_gid_t
;
108 typedef int32_t target_pid_t
;
112 #define ELF_PLATFORM get_elf_platform()
114 static const char *get_elf_platform(void)
116 static char elf_platform
[] = "i386";
117 int family
= (thread_env
->cpuid_version
>> 8) & 0xff;
121 elf_platform
[1] = '0' + family
;
125 #define ELF_HWCAP get_elf_hwcap()
127 static uint32_t get_elf_hwcap(void)
129 return thread_env
->cpuid_features
;
133 #define ELF_START_MMAP 0x2aaaaab000ULL
134 #define elf_check_arch(x) ( ((x) == ELF_ARCH) )
136 #define ELF_CLASS ELFCLASS64
137 #define ELF_DATA ELFDATA2LSB
138 #define ELF_ARCH EM_X86_64
140 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
143 regs
->rsp
= infop
->start_stack
;
144 regs
->rip
= infop
->entry
;
148 typedef target_elf_greg_t target_elf_gregset_t
[ELF_NREG
];
151 * Note that ELF_NREG should be 29 as there should be place for
152 * TRAPNO and ERR "registers" as well but linux doesn't dump
155 * See linux kernel: arch/x86/include/asm/elf.h
157 static void elf_core_copy_regs(target_elf_gregset_t
*regs
, const CPUState
*env
)
159 (*regs
)[0] = env
->regs
[15];
160 (*regs
)[1] = env
->regs
[14];
161 (*regs
)[2] = env
->regs
[13];
162 (*regs
)[3] = env
->regs
[12];
163 (*regs
)[4] = env
->regs
[R_EBP
];
164 (*regs
)[5] = env
->regs
[R_EBX
];
165 (*regs
)[6] = env
->regs
[11];
166 (*regs
)[7] = env
->regs
[10];
167 (*regs
)[8] = env
->regs
[9];
168 (*regs
)[9] = env
->regs
[8];
169 (*regs
)[10] = env
->regs
[R_EAX
];
170 (*regs
)[11] = env
->regs
[R_ECX
];
171 (*regs
)[12] = env
->regs
[R_EDX
];
172 (*regs
)[13] = env
->regs
[R_ESI
];
173 (*regs
)[14] = env
->regs
[R_EDI
];
174 (*regs
)[15] = env
->regs
[R_EAX
]; /* XXX */
175 (*regs
)[16] = env
->eip
;
176 (*regs
)[17] = env
->segs
[R_CS
].selector
& 0xffff;
177 (*regs
)[18] = env
->eflags
;
178 (*regs
)[19] = env
->regs
[R_ESP
];
179 (*regs
)[20] = env
->segs
[R_SS
].selector
& 0xffff;
180 (*regs
)[21] = env
->segs
[R_FS
].selector
& 0xffff;
181 (*regs
)[22] = env
->segs
[R_GS
].selector
& 0xffff;
182 (*regs
)[23] = env
->segs
[R_DS
].selector
& 0xffff;
183 (*regs
)[24] = env
->segs
[R_ES
].selector
& 0xffff;
184 (*regs
)[25] = env
->segs
[R_FS
].selector
& 0xffff;
185 (*regs
)[26] = env
->segs
[R_GS
].selector
& 0xffff;
190 #define ELF_START_MMAP 0x80000000
193 * This is used to ensure we don't load something for the wrong architecture.
195 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
198 * These are used to set parameters in the core dumps.
200 #define ELF_CLASS ELFCLASS32
201 #define ELF_DATA ELFDATA2LSB
202 #define ELF_ARCH EM_386
204 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
206 regs
->esp
= infop
->start_stack
;
207 regs
->eip
= infop
->entry
;
209 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
210 starts %edx contains a pointer to a function which might be
211 registered using `atexit'. This provides a mean for the
212 dynamic linker to call DT_FINI functions for shared libraries
213 that have been loaded before the code runs.
215 A value of 0 tells we have no such handler. */
220 typedef target_elf_greg_t target_elf_gregset_t
[ELF_NREG
];
223 * Note that ELF_NREG should be 19 as there should be place for
224 * TRAPNO and ERR "registers" as well but linux doesn't dump
227 * See linux kernel: arch/x86/include/asm/elf.h
229 static void elf_core_copy_regs(target_elf_gregset_t
*regs
, const CPUState
*env
)
231 (*regs
)[0] = env
->regs
[R_EBX
];
232 (*regs
)[1] = env
->regs
[R_ECX
];
233 (*regs
)[2] = env
->regs
[R_EDX
];
234 (*regs
)[3] = env
->regs
[R_ESI
];
235 (*regs
)[4] = env
->regs
[R_EDI
];
236 (*regs
)[5] = env
->regs
[R_EBP
];
237 (*regs
)[6] = env
->regs
[R_EAX
];
238 (*regs
)[7] = env
->segs
[R_DS
].selector
& 0xffff;
239 (*regs
)[8] = env
->segs
[R_ES
].selector
& 0xffff;
240 (*regs
)[9] = env
->segs
[R_FS
].selector
& 0xffff;
241 (*regs
)[10] = env
->segs
[R_GS
].selector
& 0xffff;
242 (*regs
)[11] = env
->regs
[R_EAX
]; /* XXX */
243 (*regs
)[12] = env
->eip
;
244 (*regs
)[13] = env
->segs
[R_CS
].selector
& 0xffff;
245 (*regs
)[14] = env
->eflags
;
246 (*regs
)[15] = env
->regs
[R_ESP
];
247 (*regs
)[16] = env
->segs
[R_SS
].selector
& 0xffff;
251 #define USE_ELF_CORE_DUMP
252 #define ELF_EXEC_PAGESIZE 4096
258 #define ELF_START_MMAP 0x80000000
260 #define elf_check_arch(x) ( (x) == EM_ARM )
262 #define ELF_CLASS ELFCLASS32
263 #ifdef TARGET_WORDS_BIGENDIAN
264 #define ELF_DATA ELFDATA2MSB
266 #define ELF_DATA ELFDATA2LSB
268 #define ELF_ARCH EM_ARM
270 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
272 abi_long stack
= infop
->start_stack
;
273 memset(regs
, 0, sizeof(*regs
));
274 regs
->ARM_cpsr
= 0x10;
275 if (infop
->entry
& 1)
276 regs
->ARM_cpsr
|= CPSR_T
;
277 regs
->ARM_pc
= infop
->entry
& 0xfffffffe;
278 regs
->ARM_sp
= infop
->start_stack
;
279 /* FIXME - what to for failure of get_user()? */
280 get_user_ual(regs
->ARM_r2
, stack
+ 8); /* envp */
281 get_user_ual(regs
->ARM_r1
, stack
+ 4); /* envp */
282 /* XXX: it seems that r0 is zeroed after ! */
284 /* For uClinux PIC binaries. */
285 /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
286 regs
->ARM_r10
= infop
->start_data
;
290 typedef target_elf_greg_t target_elf_gregset_t
[ELF_NREG
];
292 static void elf_core_copy_regs(target_elf_gregset_t
*regs
, const CPUState
*env
)
294 (*regs
)[0] = tswapl(env
->regs
[0]);
295 (*regs
)[1] = tswapl(env
->regs
[1]);
296 (*regs
)[2] = tswapl(env
->regs
[2]);
297 (*regs
)[3] = tswapl(env
->regs
[3]);
298 (*regs
)[4] = tswapl(env
->regs
[4]);
299 (*regs
)[5] = tswapl(env
->regs
[5]);
300 (*regs
)[6] = tswapl(env
->regs
[6]);
301 (*regs
)[7] = tswapl(env
->regs
[7]);
302 (*regs
)[8] = tswapl(env
->regs
[8]);
303 (*regs
)[9] = tswapl(env
->regs
[9]);
304 (*regs
)[10] = tswapl(env
->regs
[10]);
305 (*regs
)[11] = tswapl(env
->regs
[11]);
306 (*regs
)[12] = tswapl(env
->regs
[12]);
307 (*regs
)[13] = tswapl(env
->regs
[13]);
308 (*regs
)[14] = tswapl(env
->regs
[14]);
309 (*regs
)[15] = tswapl(env
->regs
[15]);
311 (*regs
)[16] = tswapl(cpsr_read((CPUState
*)env
));
312 (*regs
)[17] = tswapl(env
->regs
[0]); /* XXX */
315 #define USE_ELF_CORE_DUMP
316 #define ELF_EXEC_PAGESIZE 4096
320 ARM_HWCAP_ARM_SWP
= 1 << 0,
321 ARM_HWCAP_ARM_HALF
= 1 << 1,
322 ARM_HWCAP_ARM_THUMB
= 1 << 2,
323 ARM_HWCAP_ARM_26BIT
= 1 << 3,
324 ARM_HWCAP_ARM_FAST_MULT
= 1 << 4,
325 ARM_HWCAP_ARM_FPA
= 1 << 5,
326 ARM_HWCAP_ARM_VFP
= 1 << 6,
327 ARM_HWCAP_ARM_EDSP
= 1 << 7,
328 ARM_HWCAP_ARM_JAVA
= 1 << 8,
329 ARM_HWCAP_ARM_IWMMXT
= 1 << 9,
330 ARM_HWCAP_ARM_THUMBEE
= 1 << 10,
331 ARM_HWCAP_ARM_NEON
= 1 << 11,
332 ARM_HWCAP_ARM_VFPv3
= 1 << 12,
333 ARM_HWCAP_ARM_VFPv3D16
= 1 << 13,
336 #define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF \
337 | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT \
338 | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP \
339 | ARM_HWCAP_ARM_NEON | ARM_HWCAP_ARM_VFPv3 )
344 #ifdef TARGET_SPARC64
346 #define ELF_START_MMAP 0x80000000
349 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
351 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
354 #define ELF_CLASS ELFCLASS64
355 #define ELF_DATA ELFDATA2MSB
356 #define ELF_ARCH EM_SPARCV9
358 #define STACK_BIAS 2047
360 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
365 regs
->pc
= infop
->entry
;
366 regs
->npc
= regs
->pc
+ 4;
369 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
371 if (personality(infop
->personality
) == PER_LINUX32
)
372 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
374 regs
->u_regs
[14] = infop
->start_stack
- 16 * 8 - STACK_BIAS
;
379 #define ELF_START_MMAP 0x80000000
381 #define elf_check_arch(x) ( (x) == EM_SPARC )
383 #define ELF_CLASS ELFCLASS32
384 #define ELF_DATA ELFDATA2MSB
385 #define ELF_ARCH EM_SPARC
387 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
390 regs
->pc
= infop
->entry
;
391 regs
->npc
= regs
->pc
+ 4;
393 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
401 #define ELF_START_MMAP 0x80000000
403 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
405 #define elf_check_arch(x) ( (x) == EM_PPC64 )
407 #define ELF_CLASS ELFCLASS64
411 #define elf_check_arch(x) ( (x) == EM_PPC )
413 #define ELF_CLASS ELFCLASS32
417 #ifdef TARGET_WORDS_BIGENDIAN
418 #define ELF_DATA ELFDATA2MSB
420 #define ELF_DATA ELFDATA2LSB
422 #define ELF_ARCH EM_PPC
424 /* Feature masks for the Aux Vector Hardware Capabilities (AT_HWCAP).
425 See arch/powerpc/include/asm/cputable.h. */
427 QEMU_PPC_FEATURE_32
= 0x80000000,
428 QEMU_PPC_FEATURE_64
= 0x40000000,
429 QEMU_PPC_FEATURE_601_INSTR
= 0x20000000,
430 QEMU_PPC_FEATURE_HAS_ALTIVEC
= 0x10000000,
431 QEMU_PPC_FEATURE_HAS_FPU
= 0x08000000,
432 QEMU_PPC_FEATURE_HAS_MMU
= 0x04000000,
433 QEMU_PPC_FEATURE_HAS_4xxMAC
= 0x02000000,
434 QEMU_PPC_FEATURE_UNIFIED_CACHE
= 0x01000000,
435 QEMU_PPC_FEATURE_HAS_SPE
= 0x00800000,
436 QEMU_PPC_FEATURE_HAS_EFP_SINGLE
= 0x00400000,
437 QEMU_PPC_FEATURE_HAS_EFP_DOUBLE
= 0x00200000,
438 QEMU_PPC_FEATURE_NO_TB
= 0x00100000,
439 QEMU_PPC_FEATURE_POWER4
= 0x00080000,
440 QEMU_PPC_FEATURE_POWER5
= 0x00040000,
441 QEMU_PPC_FEATURE_POWER5_PLUS
= 0x00020000,
442 QEMU_PPC_FEATURE_CELL
= 0x00010000,
443 QEMU_PPC_FEATURE_BOOKE
= 0x00008000,
444 QEMU_PPC_FEATURE_SMT
= 0x00004000,
445 QEMU_PPC_FEATURE_ICACHE_SNOOP
= 0x00002000,
446 QEMU_PPC_FEATURE_ARCH_2_05
= 0x00001000,
447 QEMU_PPC_FEATURE_PA6T
= 0x00000800,
448 QEMU_PPC_FEATURE_HAS_DFP
= 0x00000400,
449 QEMU_PPC_FEATURE_POWER6_EXT
= 0x00000200,
450 QEMU_PPC_FEATURE_ARCH_2_06
= 0x00000100,
451 QEMU_PPC_FEATURE_HAS_VSX
= 0x00000080,
452 QEMU_PPC_FEATURE_PSERIES_PERFMON_COMPAT
= 0x00000040,
454 QEMU_PPC_FEATURE_TRUE_LE
= 0x00000002,
455 QEMU_PPC_FEATURE_PPC_LE
= 0x00000001,
458 #define ELF_HWCAP get_elf_hwcap()
460 static uint32_t get_elf_hwcap(void)
462 CPUState
*e
= thread_env
;
463 uint32_t features
= 0;
465 /* We don't have to be terribly complete here; the high points are
466 Altivec/FP/SPE support. Anything else is just a bonus. */
467 #define GET_FEATURE(flag, feature) \
468 do {if (e->insns_flags & flag) features |= feature; } while(0)
469 GET_FEATURE(PPC_64B
, QEMU_PPC_FEATURE_64
);
470 GET_FEATURE(PPC_FLOAT
, QEMU_PPC_FEATURE_HAS_FPU
);
471 GET_FEATURE(PPC_ALTIVEC
, QEMU_PPC_FEATURE_HAS_ALTIVEC
);
472 GET_FEATURE(PPC_SPE
, QEMU_PPC_FEATURE_HAS_SPE
);
473 GET_FEATURE(PPC_SPE_SINGLE
, QEMU_PPC_FEATURE_HAS_EFP_SINGLE
);
474 GET_FEATURE(PPC_SPE_DOUBLE
, QEMU_PPC_FEATURE_HAS_EFP_DOUBLE
);
475 GET_FEATURE(PPC_BOOKE
, QEMU_PPC_FEATURE_BOOKE
);
476 GET_FEATURE(PPC_405_MAC
, QEMU_PPC_FEATURE_HAS_4xxMAC
);
483 * We need to put in some extra aux table entries to tell glibc what
484 * the cache block size is, so it can use the dcbz instruction safely.
486 #define AT_DCACHEBSIZE 19
487 #define AT_ICACHEBSIZE 20
488 #define AT_UCACHEBSIZE 21
489 /* A special ignored type value for PPC, for glibc compatibility. */
490 #define AT_IGNOREPPC 22
492 * The requirements here are:
493 * - keep the final alignment of sp (sp & 0xf)
494 * - make sure the 32-bit value at the first 16 byte aligned position of
495 * AUXV is greater than 16 for glibc compatibility.
496 * AT_IGNOREPPC is used for that.
497 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
498 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
500 #define DLINFO_ARCH_ITEMS 5
501 #define ARCH_DLINFO \
503 NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \
504 NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \
505 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
507 * Now handle glibc compatibility. \
509 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
510 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
513 static inline void init_thread(struct target_pt_regs
*_regs
, struct image_info
*infop
)
515 _regs
->gpr
[1] = infop
->start_stack
;
516 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
517 _regs
->gpr
[2] = ldq_raw(infop
->entry
+ 8) + infop
->load_addr
;
518 infop
->entry
= ldq_raw(infop
->entry
) + infop
->load_addr
;
520 _regs
->nip
= infop
->entry
;
523 /* See linux kernel: arch/powerpc/include/asm/elf.h. */
525 typedef target_elf_greg_t target_elf_gregset_t
[ELF_NREG
];
527 static void elf_core_copy_regs(target_elf_gregset_t
*regs
, const CPUState
*env
)
530 target_ulong ccr
= 0;
532 for (i
= 0; i
< ARRAY_SIZE(env
->gpr
); i
++) {
533 (*regs
)[i
] = tswapl(env
->gpr
[i
]);
536 (*regs
)[32] = tswapl(env
->nip
);
537 (*regs
)[33] = tswapl(env
->msr
);
538 (*regs
)[35] = tswapl(env
->ctr
);
539 (*regs
)[36] = tswapl(env
->lr
);
540 (*regs
)[37] = tswapl(env
->xer
);
542 for (i
= 0; i
< ARRAY_SIZE(env
->crf
); i
++) {
543 ccr
|= env
->crf
[i
] << (32 - ((i
+ 1) * 4));
545 (*regs
)[38] = tswapl(ccr
);
548 #define USE_ELF_CORE_DUMP
549 #define ELF_EXEC_PAGESIZE 4096
555 #define ELF_START_MMAP 0x80000000
557 #define elf_check_arch(x) ( (x) == EM_MIPS )
560 #define ELF_CLASS ELFCLASS64
562 #define ELF_CLASS ELFCLASS32
564 #ifdef TARGET_WORDS_BIGENDIAN
565 #define ELF_DATA ELFDATA2MSB
567 #define ELF_DATA ELFDATA2LSB
569 #define ELF_ARCH EM_MIPS
571 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
573 regs
->cp0_status
= 2 << CP0St_KSU
;
574 regs
->cp0_epc
= infop
->entry
;
575 regs
->regs
[29] = infop
->start_stack
;
578 /* See linux kernel: arch/mips/include/asm/elf.h. */
580 typedef target_elf_greg_t target_elf_gregset_t
[ELF_NREG
];
582 /* See linux kernel: arch/mips/include/asm/reg.h. */
589 TARGET_EF_R26
= TARGET_EF_R0
+ 26,
590 TARGET_EF_R27
= TARGET_EF_R0
+ 27,
591 TARGET_EF_LO
= TARGET_EF_R0
+ 32,
592 TARGET_EF_HI
= TARGET_EF_R0
+ 33,
593 TARGET_EF_CP0_EPC
= TARGET_EF_R0
+ 34,
594 TARGET_EF_CP0_BADVADDR
= TARGET_EF_R0
+ 35,
595 TARGET_EF_CP0_STATUS
= TARGET_EF_R0
+ 36,
596 TARGET_EF_CP0_CAUSE
= TARGET_EF_R0
+ 37
599 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */
600 static void elf_core_copy_regs(target_elf_gregset_t
*regs
, const CPUState
*env
)
604 for (i
= 0; i
< TARGET_EF_R0
; i
++) {
607 (*regs
)[TARGET_EF_R0
] = 0;
609 for (i
= 1; i
< ARRAY_SIZE(env
->active_tc
.gpr
); i
++) {
610 (*regs
)[TARGET_EF_R0
+ i
] = tswapl(env
->active_tc
.gpr
[i
]);
613 (*regs
)[TARGET_EF_R26
] = 0;
614 (*regs
)[TARGET_EF_R27
] = 0;
615 (*regs
)[TARGET_EF_LO
] = tswapl(env
->active_tc
.LO
[0]);
616 (*regs
)[TARGET_EF_HI
] = tswapl(env
->active_tc
.HI
[0]);
617 (*regs
)[TARGET_EF_CP0_EPC
] = tswapl(env
->active_tc
.PC
);
618 (*regs
)[TARGET_EF_CP0_BADVADDR
] = tswapl(env
->CP0_BadVAddr
);
619 (*regs
)[TARGET_EF_CP0_STATUS
] = tswapl(env
->CP0_Status
);
620 (*regs
)[TARGET_EF_CP0_CAUSE
] = tswapl(env
->CP0_Cause
);
623 #define USE_ELF_CORE_DUMP
624 #define ELF_EXEC_PAGESIZE 4096
626 #endif /* TARGET_MIPS */
628 #ifdef TARGET_MICROBLAZE
630 #define ELF_START_MMAP 0x80000000
632 #define elf_check_arch(x) ( (x) == EM_MICROBLAZE || (x) == EM_MICROBLAZE_OLD)
634 #define ELF_CLASS ELFCLASS32
635 #define ELF_DATA ELFDATA2MSB
636 #define ELF_ARCH EM_MICROBLAZE
638 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
640 regs
->pc
= infop
->entry
;
641 regs
->r1
= infop
->start_stack
;
645 #define ELF_EXEC_PAGESIZE 4096
647 #define USE_ELF_CORE_DUMP
649 typedef target_elf_greg_t target_elf_gregset_t
[ELF_NREG
];
651 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */
652 static void elf_core_copy_regs(target_elf_gregset_t
*regs
, const CPUState
*env
)
656 for (i
= 0; i
< 32; i
++) {
657 (*regs
)[pos
++] = tswapl(env
->regs
[i
]);
660 for (i
= 0; i
< 6; i
++) {
661 (*regs
)[pos
++] = tswapl(env
->sregs
[i
]);
665 #endif /* TARGET_MICROBLAZE */
669 #define ELF_START_MMAP 0x80000000
671 #define elf_check_arch(x) ( (x) == EM_SH )
673 #define ELF_CLASS ELFCLASS32
674 #define ELF_DATA ELFDATA2LSB
675 #define ELF_ARCH EM_SH
677 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
679 /* Check other registers XXXXX */
680 regs
->pc
= infop
->entry
;
681 regs
->regs
[15] = infop
->start_stack
;
684 /* See linux kernel: arch/sh/include/asm/elf.h. */
686 typedef target_elf_greg_t target_elf_gregset_t
[ELF_NREG
];
688 /* See linux kernel: arch/sh/include/asm/ptrace.h. */
694 TARGET_REG_MACH
= 20,
695 TARGET_REG_MACL
= 21,
696 TARGET_REG_SYSCALL
= 22
699 static inline void elf_core_copy_regs(target_elf_gregset_t
*regs
, const CPUState
*env
)
703 for (i
= 0; i
< 16; i
++) {
704 (*regs
[i
]) = tswapl(env
->gregs
[i
]);
707 (*regs
)[TARGET_REG_PC
] = tswapl(env
->pc
);
708 (*regs
)[TARGET_REG_PR
] = tswapl(env
->pr
);
709 (*regs
)[TARGET_REG_SR
] = tswapl(env
->sr
);
710 (*regs
)[TARGET_REG_GBR
] = tswapl(env
->gbr
);
711 (*regs
)[TARGET_REG_MACH
] = tswapl(env
->mach
);
712 (*regs
)[TARGET_REG_MACL
] = tswapl(env
->macl
);
713 (*regs
)[TARGET_REG_SYSCALL
] = 0; /* FIXME */
716 #define USE_ELF_CORE_DUMP
717 #define ELF_EXEC_PAGESIZE 4096
723 #define ELF_START_MMAP 0x80000000
725 #define elf_check_arch(x) ( (x) == EM_CRIS )
727 #define ELF_CLASS ELFCLASS32
728 #define ELF_DATA ELFDATA2LSB
729 #define ELF_ARCH EM_CRIS
731 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
733 regs
->erp
= infop
->entry
;
736 #define ELF_EXEC_PAGESIZE 8192
742 #define ELF_START_MMAP 0x80000000
744 #define elf_check_arch(x) ( (x) == EM_68K )
746 #define ELF_CLASS ELFCLASS32
747 #define ELF_DATA ELFDATA2MSB
748 #define ELF_ARCH EM_68K
750 /* ??? Does this need to do anything?
751 #define ELF_PLAT_INIT(_r) */
753 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
755 regs
->usp
= infop
->start_stack
;
757 regs
->pc
= infop
->entry
;
760 /* See linux kernel: arch/m68k/include/asm/elf.h. */
762 typedef target_elf_greg_t target_elf_gregset_t
[ELF_NREG
];
764 static void elf_core_copy_regs(target_elf_gregset_t
*regs
, const CPUState
*env
)
766 (*regs
)[0] = tswapl(env
->dregs
[1]);
767 (*regs
)[1] = tswapl(env
->dregs
[2]);
768 (*regs
)[2] = tswapl(env
->dregs
[3]);
769 (*regs
)[3] = tswapl(env
->dregs
[4]);
770 (*regs
)[4] = tswapl(env
->dregs
[5]);
771 (*regs
)[5] = tswapl(env
->dregs
[6]);
772 (*regs
)[6] = tswapl(env
->dregs
[7]);
773 (*regs
)[7] = tswapl(env
->aregs
[0]);
774 (*regs
)[8] = tswapl(env
->aregs
[1]);
775 (*regs
)[9] = tswapl(env
->aregs
[2]);
776 (*regs
)[10] = tswapl(env
->aregs
[3]);
777 (*regs
)[11] = tswapl(env
->aregs
[4]);
778 (*regs
)[12] = tswapl(env
->aregs
[5]);
779 (*regs
)[13] = tswapl(env
->aregs
[6]);
780 (*regs
)[14] = tswapl(env
->dregs
[0]);
781 (*regs
)[15] = tswapl(env
->aregs
[7]);
782 (*regs
)[16] = tswapl(env
->dregs
[0]); /* FIXME: orig_d0 */
783 (*regs
)[17] = tswapl(env
->sr
);
784 (*regs
)[18] = tswapl(env
->pc
);
785 (*regs
)[19] = 0; /* FIXME: regs->format | regs->vector */
788 #define USE_ELF_CORE_DUMP
789 #define ELF_EXEC_PAGESIZE 8192
795 #define ELF_START_MMAP (0x30000000000ULL)
797 #define elf_check_arch(x) ( (x) == ELF_ARCH )
799 #define ELF_CLASS ELFCLASS64
800 #define ELF_DATA ELFDATA2MSB
801 #define ELF_ARCH EM_ALPHA
803 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
805 regs
->pc
= infop
->entry
;
807 regs
->usp
= infop
->start_stack
;
810 #define ELF_EXEC_PAGESIZE 8192
812 #endif /* TARGET_ALPHA */
815 #define ELF_PLATFORM (NULL)
824 #define ELF_CLASS ELFCLASS32
826 #define bswaptls(ptr) bswap32s(ptr)
833 unsigned int a_info
; /* Use macros N_MAGIC, etc for access */
834 unsigned int a_text
; /* length of text, in bytes */
835 unsigned int a_data
; /* length of data, in bytes */
836 unsigned int a_bss
; /* length of uninitialized data area, in bytes */
837 unsigned int a_syms
; /* length of symbol table data in file, in bytes */
838 unsigned int a_entry
; /* start address */
839 unsigned int a_trsize
; /* length of relocation info for text, in bytes */
840 unsigned int a_drsize
; /* length of relocation info for data, in bytes */
844 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
850 /* max code+data+bss space allocated to elf interpreter */
851 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
853 /* max code+data+bss+brk space allocated to ET_DYN executables */
854 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
856 /* Necessary parameters */
857 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
858 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
859 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
861 #define INTERPRETER_NONE 0
862 #define INTERPRETER_AOUT 1
863 #define INTERPRETER_ELF 2
865 #define DLINFO_ITEMS 12
867 static inline void memcpy_fromfs(void * to
, const void * from
, unsigned long n
)
872 static int load_aout_interp(void * exptr
, int interp_fd
);
875 static void bswap_ehdr(struct elfhdr
*ehdr
)
877 bswap16s(&ehdr
->e_type
); /* Object file type */
878 bswap16s(&ehdr
->e_machine
); /* Architecture */
879 bswap32s(&ehdr
->e_version
); /* Object file version */
880 bswaptls(&ehdr
->e_entry
); /* Entry point virtual address */
881 bswaptls(&ehdr
->e_phoff
); /* Program header table file offset */
882 bswaptls(&ehdr
->e_shoff
); /* Section header table file offset */
883 bswap32s(&ehdr
->e_flags
); /* Processor-specific flags */
884 bswap16s(&ehdr
->e_ehsize
); /* ELF header size in bytes */
885 bswap16s(&ehdr
->e_phentsize
); /* Program header table entry size */
886 bswap16s(&ehdr
->e_phnum
); /* Program header table entry count */
887 bswap16s(&ehdr
->e_shentsize
); /* Section header table entry size */
888 bswap16s(&ehdr
->e_shnum
); /* Section header table entry count */
889 bswap16s(&ehdr
->e_shstrndx
); /* Section header string table index */
892 static void bswap_phdr(struct elf_phdr
*phdr
)
894 bswap32s(&phdr
->p_type
); /* Segment type */
895 bswaptls(&phdr
->p_offset
); /* Segment file offset */
896 bswaptls(&phdr
->p_vaddr
); /* Segment virtual address */
897 bswaptls(&phdr
->p_paddr
); /* Segment physical address */
898 bswaptls(&phdr
->p_filesz
); /* Segment size in file */
899 bswaptls(&phdr
->p_memsz
); /* Segment size in memory */
900 bswap32s(&phdr
->p_flags
); /* Segment flags */
901 bswaptls(&phdr
->p_align
); /* Segment alignment */
904 static void bswap_shdr(struct elf_shdr
*shdr
)
906 bswap32s(&shdr
->sh_name
);
907 bswap32s(&shdr
->sh_type
);
908 bswaptls(&shdr
->sh_flags
);
909 bswaptls(&shdr
->sh_addr
);
910 bswaptls(&shdr
->sh_offset
);
911 bswaptls(&shdr
->sh_size
);
912 bswap32s(&shdr
->sh_link
);
913 bswap32s(&shdr
->sh_info
);
914 bswaptls(&shdr
->sh_addralign
);
915 bswaptls(&shdr
->sh_entsize
);
918 static void bswap_sym(struct elf_sym
*sym
)
920 bswap32s(&sym
->st_name
);
921 bswaptls(&sym
->st_value
);
922 bswaptls(&sym
->st_size
);
923 bswap16s(&sym
->st_shndx
);
927 #ifdef USE_ELF_CORE_DUMP
928 static int elf_core_dump(int, const CPUState
*);
931 static void bswap_note(struct elf_note
*en
)
933 bswap32s(&en
->n_namesz
);
934 bswap32s(&en
->n_descsz
);
935 bswap32s(&en
->n_type
);
937 #endif /* BSWAP_NEEDED */
939 #endif /* USE_ELF_CORE_DUMP */
942 * 'copy_elf_strings()' copies argument/envelope strings from user
943 * memory to free pages in kernel mem. These are in a format ready
944 * to be put directly into the top of new user memory.
947 static abi_ulong
copy_elf_strings(int argc
,char ** argv
, void **page
,
950 char *tmp
, *tmp1
, *pag
= NULL
;
954 return 0; /* bullet-proofing */
959 fprintf(stderr
, "VFS: argc is wrong");
965 if (p
< len
) { /* this shouldn't happen - 128kB */
971 offset
= p
% TARGET_PAGE_SIZE
;
972 pag
= (char *)page
[p
/TARGET_PAGE_SIZE
];
974 pag
= (char *)malloc(TARGET_PAGE_SIZE
);
975 memset(pag
, 0, TARGET_PAGE_SIZE
);
976 page
[p
/TARGET_PAGE_SIZE
] = pag
;
981 if (len
== 0 || offset
== 0) {
982 *(pag
+ offset
) = *tmp
;
985 int bytes_to_copy
= (len
> offset
) ? offset
: len
;
986 tmp
-= bytes_to_copy
;
988 offset
-= bytes_to_copy
;
989 len
-= bytes_to_copy
;
990 memcpy_fromfs(pag
+ offset
, tmp
, bytes_to_copy
+ 1);
997 static abi_ulong
setup_arg_pages(abi_ulong p
, struct linux_binprm
*bprm
,
998 struct image_info
*info
)
1000 abi_ulong stack_base
, size
, error
;
1003 /* Create enough stack to hold everything. If we don't use
1004 * it for args, we'll use it for something else...
1006 size
= guest_stack_size
;
1007 if (size
< MAX_ARG_PAGES
*TARGET_PAGE_SIZE
)
1008 size
= MAX_ARG_PAGES
*TARGET_PAGE_SIZE
;
1009 error
= target_mmap(0,
1010 size
+ qemu_host_page_size
,
1011 PROT_READ
| PROT_WRITE
,
1012 MAP_PRIVATE
| MAP_ANONYMOUS
,
1018 /* we reserve one extra page at the top of the stack as guard */
1019 target_mprotect(error
+ size
, qemu_host_page_size
, PROT_NONE
);
1021 stack_base
= error
+ size
- MAX_ARG_PAGES
*TARGET_PAGE_SIZE
;
1024 for (i
= 0 ; i
< MAX_ARG_PAGES
; i
++) {
1025 if (bprm
->page
[i
]) {
1027 /* FIXME - check return value of memcpy_to_target() for failure */
1028 memcpy_to_target(stack_base
, bprm
->page
[i
], TARGET_PAGE_SIZE
);
1029 free(bprm
->page
[i
]);
1031 stack_base
+= TARGET_PAGE_SIZE
;
1036 static void set_brk(abi_ulong start
, abi_ulong end
)
1038 /* page-align the start and end addresses... */
1039 start
= HOST_PAGE_ALIGN(start
);
1040 end
= HOST_PAGE_ALIGN(end
);
1043 if(target_mmap(start
, end
- start
,
1044 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
1045 MAP_FIXED
| MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0) == -1) {
1046 perror("cannot mmap brk");
1052 /* We need to explicitly zero any fractional pages after the data
1053 section (i.e. bss). This would contain the junk from the file that
1054 should not be in memory. */
1055 static void padzero(abi_ulong elf_bss
, abi_ulong last_bss
)
1059 if (elf_bss
>= last_bss
)
1062 /* XXX: this is really a hack : if the real host page size is
1063 smaller than the target page size, some pages after the end
1064 of the file may not be mapped. A better fix would be to
1065 patch target_mmap(), but it is more complicated as the file
1066 size must be known */
1067 if (qemu_real_host_page_size
< qemu_host_page_size
) {
1068 abi_ulong end_addr
, end_addr1
;
1069 end_addr1
= (elf_bss
+ qemu_real_host_page_size
- 1) &
1070 ~(qemu_real_host_page_size
- 1);
1071 end_addr
= HOST_PAGE_ALIGN(elf_bss
);
1072 if (end_addr1
< end_addr
) {
1073 mmap((void *)g2h(end_addr1
), end_addr
- end_addr1
,
1074 PROT_READ
|PROT_WRITE
|PROT_EXEC
,
1075 MAP_FIXED
|MAP_PRIVATE
|MAP_ANONYMOUS
, -1, 0);
1079 nbyte
= elf_bss
& (qemu_host_page_size
-1);
1081 nbyte
= qemu_host_page_size
- nbyte
;
1083 /* FIXME - what to do if put_user() fails? */
1084 put_user_u8(0, elf_bss
);
1091 static abi_ulong
create_elf_tables(abi_ulong p
, int argc
, int envc
,
1092 struct elfhdr
* exec
,
1093 abi_ulong load_addr
,
1094 abi_ulong load_bias
,
1095 abi_ulong interp_load_addr
, int ibcs
,
1096 struct image_info
*info
)
1100 abi_ulong u_platform
;
1101 const char *k_platform
;
1102 const int n
= sizeof(elf_addr_t
);
1106 k_platform
= ELF_PLATFORM
;
1108 size_t len
= strlen(k_platform
) + 1;
1109 sp
-= (len
+ n
- 1) & ~(n
- 1);
1111 /* FIXME - check return value of memcpy_to_target() for failure */
1112 memcpy_to_target(sp
, k_platform
, len
);
1115 * Force 16 byte _final_ alignment here for generality.
1117 sp
= sp
&~ (abi_ulong
)15;
1118 size
= (DLINFO_ITEMS
+ 1) * 2;
1121 #ifdef DLINFO_ARCH_ITEMS
1122 size
+= DLINFO_ARCH_ITEMS
* 2;
1124 size
+= envc
+ argc
+ 2;
1125 size
+= (!ibcs
? 3 : 1); /* argc itself */
1128 sp
-= 16 - (size
& 15);
1130 /* This is correct because Linux defines
1131 * elf_addr_t as Elf32_Off / Elf64_Off
1133 #define NEW_AUX_ENT(id, val) do { \
1134 sp -= n; put_user_ual(val, sp); \
1135 sp -= n; put_user_ual(id, sp); \
1138 NEW_AUX_ENT (AT_NULL
, 0);
1140 /* There must be exactly DLINFO_ITEMS entries here. */
1141 NEW_AUX_ENT(AT_PHDR
, (abi_ulong
)(load_addr
+ exec
->e_phoff
));
1142 NEW_AUX_ENT(AT_PHENT
, (abi_ulong
)(sizeof (struct elf_phdr
)));
1143 NEW_AUX_ENT(AT_PHNUM
, (abi_ulong
)(exec
->e_phnum
));
1144 NEW_AUX_ENT(AT_PAGESZ
, (abi_ulong
)(TARGET_PAGE_SIZE
));
1145 NEW_AUX_ENT(AT_BASE
, (abi_ulong
)(interp_load_addr
));
1146 NEW_AUX_ENT(AT_FLAGS
, (abi_ulong
)0);
1147 NEW_AUX_ENT(AT_ENTRY
, load_bias
+ exec
->e_entry
);
1148 NEW_AUX_ENT(AT_UID
, (abi_ulong
) getuid());
1149 NEW_AUX_ENT(AT_EUID
, (abi_ulong
) geteuid());
1150 NEW_AUX_ENT(AT_GID
, (abi_ulong
) getgid());
1151 NEW_AUX_ENT(AT_EGID
, (abi_ulong
) getegid());
1152 NEW_AUX_ENT(AT_HWCAP
, (abi_ulong
) ELF_HWCAP
);
1153 NEW_AUX_ENT(AT_CLKTCK
, (abi_ulong
) sysconf(_SC_CLK_TCK
));
1155 NEW_AUX_ENT(AT_PLATFORM
, u_platform
);
1158 * ARCH_DLINFO must come last so platform specific code can enforce
1159 * special alignment requirements on the AUXV if necessary (eg. PPC).
1165 info
->saved_auxv
= sp
;
1167 sp
= loader_build_argptr(envc
, argc
, sp
, p
, !ibcs
);
1172 static abi_ulong
load_elf_interp(struct elfhdr
* interp_elf_ex
,
1174 abi_ulong
*interp_load_addr
)
1176 struct elf_phdr
*elf_phdata
= NULL
;
1177 struct elf_phdr
*eppnt
;
1178 abi_ulong load_addr
= 0;
1179 int load_addr_set
= 0;
1181 abi_ulong last_bss
, elf_bss
;
1190 bswap_ehdr(interp_elf_ex
);
1192 /* First of all, some simple consistency checks */
1193 if ((interp_elf_ex
->e_type
!= ET_EXEC
&&
1194 interp_elf_ex
->e_type
!= ET_DYN
) ||
1195 !elf_check_arch(interp_elf_ex
->e_machine
)) {
1196 return ~((abi_ulong
)0UL);
1200 /* Now read in all of the header information */
1202 if (sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
> TARGET_PAGE_SIZE
)
1203 return ~(abi_ulong
)0UL;
1205 elf_phdata
= (struct elf_phdr
*)
1206 malloc(sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
1209 return ~((abi_ulong
)0UL);
1212 * If the size of this structure has changed, then punt, since
1213 * we will be doing the wrong thing.
1215 if (interp_elf_ex
->e_phentsize
!= sizeof(struct elf_phdr
)) {
1217 return ~((abi_ulong
)0UL);
1220 retval
= lseek(interpreter_fd
, interp_elf_ex
->e_phoff
, SEEK_SET
);
1222 retval
= read(interpreter_fd
,
1223 (char *) elf_phdata
,
1224 sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
1227 perror("load_elf_interp");
1234 for (i
=0; i
<interp_elf_ex
->e_phnum
; i
++, eppnt
++) {
1239 if (interp_elf_ex
->e_type
== ET_DYN
) {
1240 /* in order to avoid hardcoding the interpreter load
1241 address in qemu, we allocate a big enough memory zone */
1242 error
= target_mmap(0, INTERP_MAP_SIZE
,
1243 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
1254 for(i
=0; i
<interp_elf_ex
->e_phnum
; i
++, eppnt
++)
1255 if (eppnt
->p_type
== PT_LOAD
) {
1256 int elf_type
= MAP_PRIVATE
| MAP_DENYWRITE
;
1258 abi_ulong vaddr
= 0;
1261 if (eppnt
->p_flags
& PF_R
) elf_prot
= PROT_READ
;
1262 if (eppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
1263 if (eppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
1264 if (interp_elf_ex
->e_type
== ET_EXEC
|| load_addr_set
) {
1265 elf_type
|= MAP_FIXED
;
1266 vaddr
= eppnt
->p_vaddr
;
1268 error
= target_mmap(load_addr
+TARGET_ELF_PAGESTART(vaddr
),
1269 eppnt
->p_filesz
+ TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
),
1273 eppnt
->p_offset
- TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
));
1277 close(interpreter_fd
);
1279 return ~((abi_ulong
)0UL);
1282 if (!load_addr_set
&& interp_elf_ex
->e_type
== ET_DYN
) {
1288 * Find the end of the file mapping for this phdr, and keep
1289 * track of the largest address we see for this.
1291 k
= load_addr
+ eppnt
->p_vaddr
+ eppnt
->p_filesz
;
1292 if (k
> elf_bss
) elf_bss
= k
;
1295 * Do the same thing for the memory mapping - between
1296 * elf_bss and last_bss is the bss section.
1298 k
= load_addr
+ eppnt
->p_memsz
+ eppnt
->p_vaddr
;
1299 if (k
> last_bss
) last_bss
= k
;
1302 /* Now use mmap to map the library into memory. */
1304 close(interpreter_fd
);
1307 * Now fill out the bss section. First pad the last page up
1308 * to the page boundary, and then perform a mmap to make sure
1309 * that there are zeromapped pages up to and including the last
1312 padzero(elf_bss
, last_bss
);
1313 elf_bss
= TARGET_ELF_PAGESTART(elf_bss
+ qemu_host_page_size
- 1); /* What we have mapped so far */
1315 /* Map the last of the bss segment */
1316 if (last_bss
> elf_bss
) {
1317 target_mmap(elf_bss
, last_bss
-elf_bss
,
1318 PROT_READ
|PROT_WRITE
|PROT_EXEC
,
1319 MAP_FIXED
|MAP_PRIVATE
|MAP_ANONYMOUS
, -1, 0);
1323 *interp_load_addr
= load_addr
;
1324 return ((abi_ulong
) interp_elf_ex
->e_entry
) + load_addr
;
1327 static int symfind(const void *s0
, const void *s1
)
1329 struct elf_sym
*key
= (struct elf_sym
*)s0
;
1330 struct elf_sym
*sym
= (struct elf_sym
*)s1
;
1332 if (key
->st_value
< sym
->st_value
) {
1334 } else if (key
->st_value
>= sym
->st_value
+ sym
->st_size
) {
1340 static const char *lookup_symbolxx(struct syminfo
*s
, target_ulong orig_addr
)
1342 #if ELF_CLASS == ELFCLASS32
1343 struct elf_sym
*syms
= s
->disas_symtab
.elf32
;
1345 struct elf_sym
*syms
= s
->disas_symtab
.elf64
;
1350 struct elf_sym
*sym
;
1352 key
.st_value
= orig_addr
;
1354 sym
= bsearch(&key
, syms
, s
->disas_num_syms
, sizeof(*syms
), symfind
);
1356 return s
->disas_strtab
+ sym
->st_name
;
1362 /* FIXME: This should use elf_ops.h */
1363 static int symcmp(const void *s0
, const void *s1
)
1365 struct elf_sym
*sym0
= (struct elf_sym
*)s0
;
1366 struct elf_sym
*sym1
= (struct elf_sym
*)s1
;
1367 return (sym0
->st_value
< sym1
->st_value
)
1369 : ((sym0
->st_value
> sym1
->st_value
) ? 1 : 0);
1372 /* Best attempt to load symbols from this ELF object. */
1373 static void load_symbols(struct elfhdr
*hdr
, int fd
)
1375 unsigned int i
, nsyms
;
1376 struct elf_shdr sechdr
, symtab
, strtab
;
1379 struct elf_sym
*syms
;
1381 lseek(fd
, hdr
->e_shoff
, SEEK_SET
);
1382 for (i
= 0; i
< hdr
->e_shnum
; i
++) {
1383 if (read(fd
, &sechdr
, sizeof(sechdr
)) != sizeof(sechdr
))
1386 bswap_shdr(&sechdr
);
1388 if (sechdr
.sh_type
== SHT_SYMTAB
) {
1390 lseek(fd
, hdr
->e_shoff
1391 + sizeof(sechdr
) * sechdr
.sh_link
, SEEK_SET
);
1392 if (read(fd
, &strtab
, sizeof(strtab
))
1396 bswap_shdr(&strtab
);
1401 return; /* Shouldn't happen... */
1404 /* Now know where the strtab and symtab are. Snarf them. */
1405 s
= malloc(sizeof(*s
));
1406 syms
= malloc(symtab
.sh_size
);
1409 s
->disas_strtab
= strings
= malloc(strtab
.sh_size
);
1410 if (!s
->disas_strtab
)
1413 lseek(fd
, symtab
.sh_offset
, SEEK_SET
);
1414 if (read(fd
, syms
, symtab
.sh_size
) != symtab
.sh_size
)
1417 nsyms
= symtab
.sh_size
/ sizeof(struct elf_sym
);
1422 bswap_sym(syms
+ i
);
1424 // Throw away entries which we do not need.
1425 if (syms
[i
].st_shndx
== SHN_UNDEF
||
1426 syms
[i
].st_shndx
>= SHN_LORESERVE
||
1427 ELF_ST_TYPE(syms
[i
].st_info
) != STT_FUNC
) {
1430 syms
[i
] = syms
[nsyms
];
1434 #if defined(TARGET_ARM) || defined (TARGET_MIPS)
1435 /* The bottom address bit marks a Thumb or MIPS16 symbol. */
1436 syms
[i
].st_value
&= ~(target_ulong
)1;
1440 syms
= realloc(syms
, nsyms
* sizeof(*syms
));
1442 qsort(syms
, nsyms
, sizeof(*syms
), symcmp
);
1444 lseek(fd
, strtab
.sh_offset
, SEEK_SET
);
1445 if (read(fd
, strings
, strtab
.sh_size
) != strtab
.sh_size
)
1447 s
->disas_num_syms
= nsyms
;
1448 #if ELF_CLASS == ELFCLASS32
1449 s
->disas_symtab
.elf32
= syms
;
1450 s
->lookup_symbol
= lookup_symbolxx
;
1452 s
->disas_symtab
.elf64
= syms
;
1453 s
->lookup_symbol
= lookup_symbolxx
;
1459 int load_elf_binary(struct linux_binprm
* bprm
, struct target_pt_regs
* regs
,
1460 struct image_info
* info
)
1462 struct elfhdr elf_ex
;
1463 struct elfhdr interp_elf_ex
;
1464 struct exec interp_ex
;
1465 int interpreter_fd
= -1; /* avoid warning */
1466 abi_ulong load_addr
, load_bias
;
1467 int load_addr_set
= 0;
1468 unsigned int interpreter_type
= INTERPRETER_NONE
;
1469 unsigned char ibcs2_interpreter
;
1471 abi_ulong mapped_addr
;
1472 struct elf_phdr
* elf_ppnt
;
1473 struct elf_phdr
*elf_phdata
;
1474 abi_ulong elf_bss
, k
, elf_brk
;
1476 char * elf_interpreter
;
1477 abi_ulong elf_entry
, interp_load_addr
= 0;
1479 abi_ulong start_code
, end_code
, start_data
, end_data
;
1480 abi_ulong reloc_func_desc
= 0;
1481 abi_ulong elf_stack
;
1482 char passed_fileno
[6];
1484 ibcs2_interpreter
= 0;
1488 elf_ex
= *((struct elfhdr
*) bprm
->buf
); /* exec-header */
1490 bswap_ehdr(&elf_ex
);
1493 /* First of all, some simple consistency checks */
1494 if ((elf_ex
.e_type
!= ET_EXEC
&& elf_ex
.e_type
!= ET_DYN
) ||
1495 (! elf_check_arch(elf_ex
.e_machine
))) {
1499 bprm
->p
= copy_elf_strings(1, &bprm
->filename
, bprm
->page
, bprm
->p
);
1500 bprm
->p
= copy_elf_strings(bprm
->envc
,bprm
->envp
,bprm
->page
,bprm
->p
);
1501 bprm
->p
= copy_elf_strings(bprm
->argc
,bprm
->argv
,bprm
->page
,bprm
->p
);
1506 /* Now read in all of the header information */
1507 elf_phdata
= (struct elf_phdr
*)malloc(elf_ex
.e_phentsize
*elf_ex
.e_phnum
);
1508 if (elf_phdata
== NULL
) {
1512 retval
= lseek(bprm
->fd
, elf_ex
.e_phoff
, SEEK_SET
);
1514 retval
= read(bprm
->fd
, (char *) elf_phdata
,
1515 elf_ex
.e_phentsize
* elf_ex
.e_phnum
);
1519 perror("load_elf_binary");
1526 elf_ppnt
= elf_phdata
;
1527 for (i
=0; i
<elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
1528 bswap_phdr(elf_ppnt
);
1531 elf_ppnt
= elf_phdata
;
1537 elf_stack
= ~((abi_ulong
)0UL);
1538 elf_interpreter
= NULL
;
1539 start_code
= ~((abi_ulong
)0UL);
1543 interp_ex
.a_info
= 0;
1545 for(i
=0;i
< elf_ex
.e_phnum
; i
++) {
1546 if (elf_ppnt
->p_type
== PT_INTERP
) {
1547 if ( elf_interpreter
!= NULL
)
1550 free(elf_interpreter
);
1555 /* This is the program interpreter used for
1556 * shared libraries - for now assume that this
1557 * is an a.out format binary
1560 elf_interpreter
= (char *)malloc(elf_ppnt
->p_filesz
);
1562 if (elf_interpreter
== NULL
) {
1568 retval
= lseek(bprm
->fd
, elf_ppnt
->p_offset
, SEEK_SET
);
1570 retval
= read(bprm
->fd
, elf_interpreter
, elf_ppnt
->p_filesz
);
1573 perror("load_elf_binary2");
1577 /* If the program interpreter is one of these two,
1578 then assume an iBCS2 image. Otherwise assume
1579 a native linux image. */
1581 /* JRP - Need to add X86 lib dir stuff here... */
1583 if (strcmp(elf_interpreter
,"/usr/lib/libc.so.1") == 0 ||
1584 strcmp(elf_interpreter
,"/usr/lib/ld.so.1") == 0) {
1585 ibcs2_interpreter
= 1;
1589 printf("Using ELF interpreter %s\n", path(elf_interpreter
));
1592 retval
= open(path(elf_interpreter
), O_RDONLY
);
1594 interpreter_fd
= retval
;
1597 perror(elf_interpreter
);
1599 /* retval = -errno; */
1604 retval
= lseek(interpreter_fd
, 0, SEEK_SET
);
1606 retval
= read(interpreter_fd
,bprm
->buf
,128);
1610 interp_ex
= *((struct exec
*) bprm
->buf
); /* aout exec-header */
1611 interp_elf_ex
= *((struct elfhdr
*) bprm
->buf
); /* elf exec-header */
1614 perror("load_elf_binary3");
1617 free(elf_interpreter
);
1625 /* Some simple consistency checks for the interpreter */
1626 if (elf_interpreter
){
1627 interpreter_type
= INTERPRETER_ELF
| INTERPRETER_AOUT
;
1629 /* Now figure out which format our binary is */
1630 if ((N_MAGIC(interp_ex
) != OMAGIC
) && (N_MAGIC(interp_ex
) != ZMAGIC
) &&
1631 (N_MAGIC(interp_ex
) != QMAGIC
)) {
1632 interpreter_type
= INTERPRETER_ELF
;
1635 if (interp_elf_ex
.e_ident
[0] != 0x7f ||
1636 strncmp((char *)&interp_elf_ex
.e_ident
[1], "ELF",3) != 0) {
1637 interpreter_type
&= ~INTERPRETER_ELF
;
1640 if (!interpreter_type
) {
1641 free(elf_interpreter
);
1648 /* OK, we are done with that, now set up the arg stuff,
1649 and then start this sucker up */
1654 if (interpreter_type
== INTERPRETER_AOUT
) {
1655 snprintf(passed_fileno
, sizeof(passed_fileno
), "%d", bprm
->fd
);
1656 passed_p
= passed_fileno
;
1658 if (elf_interpreter
) {
1659 bprm
->p
= copy_elf_strings(1,&passed_p
,bprm
->page
,bprm
->p
);
1664 if (elf_interpreter
) {
1665 free(elf_interpreter
);
1673 /* OK, This is the point of no return */
1676 info
->start_mmap
= (abi_ulong
)ELF_START_MMAP
;
1678 elf_entry
= (abi_ulong
) elf_ex
.e_entry
;
1680 #if defined(CONFIG_USE_GUEST_BASE)
1682 * In case where user has not explicitly set the guest_base, we
1683 * probe here that should we set it automatically.
1685 if (!(have_guest_base
|| reserved_va
)) {
1687 * Go through ELF program header table and find the address
1688 * range used by loadable segments. Check that this is available on
1689 * the host, and if not find a suitable value for guest_base. */
1690 abi_ulong app_start
= ~0;
1691 abi_ulong app_end
= 0;
1693 unsigned long host_start
;
1694 unsigned long real_start
;
1695 unsigned long host_size
;
1696 for (i
= 0, elf_ppnt
= elf_phdata
; i
< elf_ex
.e_phnum
;
1698 if (elf_ppnt
->p_type
!= PT_LOAD
)
1700 addr
= elf_ppnt
->p_vaddr
;
1701 if (addr
< app_start
) {
1704 addr
+= elf_ppnt
->p_memsz
;
1705 if (addr
> app_end
) {
1710 /* If we don't have any loadable segments then something
1712 assert(app_start
< app_end
);
1714 /* Round addresses to page boundaries. */
1715 app_start
= app_start
& qemu_host_page_mask
;
1716 app_end
= HOST_PAGE_ALIGN(app_end
);
1717 if (app_start
< mmap_min_addr
) {
1718 host_start
= HOST_PAGE_ALIGN(mmap_min_addr
);
1720 host_start
= app_start
;
1721 if (host_start
!= app_start
) {
1722 fprintf(stderr
, "qemu: Address overflow loading ELF binary\n");
1726 host_size
= app_end
- app_start
;
1728 /* Do not use mmap_find_vma here because that is limited to the
1729 guest address space. We are going to make the
1730 guest address space fit whatever we're given. */
1731 real_start
= (unsigned long)mmap((void *)host_start
, host_size
,
1732 PROT_NONE
, MAP_ANONYMOUS
| MAP_PRIVATE
| MAP_NORESERVE
, -1, 0);
1733 if (real_start
== (unsigned long)-1) {
1734 fprintf(stderr
, "qemu: Virtual memory exausted\n");
1737 if (real_start
== host_start
) {
1740 /* That address didn't work. Unmap and try a different one.
1741 The address the host picked because is typically
1742 right at the top of the host address space and leaves the
1743 guest with no usable address space. Resort to a linear search.
1744 We already compensated for mmap_min_addr, so this should not
1745 happen often. Probably means we got unlucky and host address
1746 space randomization put a shared library somewhere
1748 munmap((void *)real_start
, host_size
);
1749 host_start
+= qemu_host_page_size
;
1750 if (host_start
== app_start
) {
1751 /* Theoretically possible if host doesn't have any
1752 suitably aligned areas. Normally the first mmap will
1754 fprintf(stderr
, "qemu: Unable to find space for application\n");
1758 qemu_log("Relocating guest address space from 0x" TARGET_ABI_FMT_lx
1759 " to 0x%lx\n", app_start
, real_start
);
1760 guest_base
= real_start
- app_start
;
1762 #endif /* CONFIG_USE_GUEST_BASE */
1764 /* Do this so that we can load the interpreter, if need be. We will
1765 change some of these later */
1767 bprm
->p
= setup_arg_pages(bprm
->p
, bprm
, info
);
1768 info
->start_stack
= bprm
->p
;
1770 /* Now we do a little grungy work by mmaping the ELF image into
1771 * the correct location in memory. At this point, we assume that
1772 * the image should be loaded at fixed address, not at a variable
1776 for(i
= 0, elf_ppnt
= elf_phdata
; i
< elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
1781 if (elf_ppnt
->p_type
!= PT_LOAD
)
1784 if (elf_ppnt
->p_flags
& PF_R
) elf_prot
|= PROT_READ
;
1785 if (elf_ppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
1786 if (elf_ppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
1787 elf_flags
= MAP_PRIVATE
| MAP_DENYWRITE
;
1788 if (elf_ex
.e_type
== ET_EXEC
|| load_addr_set
) {
1789 elf_flags
|= MAP_FIXED
;
1790 } else if (elf_ex
.e_type
== ET_DYN
) {
1791 /* Try and get dynamic programs out of the way of the default mmap
1792 base, as well as whatever program they might try to exec. This
1793 is because the brk will follow the loader, and is not movable. */
1794 /* NOTE: for qemu, we do a big mmap to get enough space
1795 without hardcoding any address */
1796 error
= target_mmap(0, ET_DYN_MAP_SIZE
,
1797 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
1803 load_bias
= TARGET_ELF_PAGESTART(error
- elf_ppnt
->p_vaddr
);
1806 error
= target_mmap(TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
),
1807 (elf_ppnt
->p_filesz
+
1808 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)),
1810 (MAP_FIXED
| MAP_PRIVATE
| MAP_DENYWRITE
),
1812 (elf_ppnt
->p_offset
-
1813 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)));
1819 #ifdef LOW_ELF_STACK
1820 if (TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
) < elf_stack
)
1821 elf_stack
= TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
);
1824 if (!load_addr_set
) {
1826 load_addr
= elf_ppnt
->p_vaddr
- elf_ppnt
->p_offset
;
1827 if (elf_ex
.e_type
== ET_DYN
) {
1828 load_bias
+= error
-
1829 TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
);
1830 load_addr
+= load_bias
;
1831 reloc_func_desc
= load_bias
;
1834 k
= elf_ppnt
->p_vaddr
;
1839 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_filesz
;
1842 if ((elf_ppnt
->p_flags
& PF_X
) && end_code
< k
)
1846 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_memsz
;
1847 if (k
> elf_brk
) elf_brk
= k
;
1850 elf_entry
+= load_bias
;
1851 elf_bss
+= load_bias
;
1852 elf_brk
+= load_bias
;
1853 start_code
+= load_bias
;
1854 end_code
+= load_bias
;
1855 start_data
+= load_bias
;
1856 end_data
+= load_bias
;
1858 if (elf_interpreter
) {
1859 if (interpreter_type
& 1) {
1860 elf_entry
= load_aout_interp(&interp_ex
, interpreter_fd
);
1862 else if (interpreter_type
& 2) {
1863 elf_entry
= load_elf_interp(&interp_elf_ex
, interpreter_fd
,
1866 reloc_func_desc
= interp_load_addr
;
1868 close(interpreter_fd
);
1869 free(elf_interpreter
);
1871 if (elf_entry
== ~((abi_ulong
)0UL)) {
1872 printf("Unable to load interpreter\n");
1881 if (qemu_log_enabled())
1882 load_symbols(&elf_ex
, bprm
->fd
);
1884 if (interpreter_type
!= INTERPRETER_AOUT
) close(bprm
->fd
);
1885 info
->personality
= (ibcs2_interpreter
? PER_SVR4
: PER_LINUX
);
1887 #ifdef LOW_ELF_STACK
1888 info
->start_stack
= bprm
->p
= elf_stack
- 4;
1890 bprm
->p
= create_elf_tables(bprm
->p
,
1894 load_addr
, load_bias
,
1896 (interpreter_type
== INTERPRETER_AOUT
? 0 : 1),
1898 info
->load_addr
= reloc_func_desc
;
1899 info
->start_brk
= info
->brk
= elf_brk
;
1900 info
->end_code
= end_code
;
1901 info
->start_code
= start_code
;
1902 info
->start_data
= start_data
;
1903 info
->end_data
= end_data
;
1904 info
->start_stack
= bprm
->p
;
1906 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
1908 set_brk(elf_bss
, elf_brk
);
1910 padzero(elf_bss
, elf_brk
);
1913 printf("(start_brk) %x\n" , info
->start_brk
);
1914 printf("(end_code) %x\n" , info
->end_code
);
1915 printf("(start_code) %x\n" , info
->start_code
);
1916 printf("(end_data) %x\n" , info
->end_data
);
1917 printf("(start_stack) %x\n" , info
->start_stack
);
1918 printf("(brk) %x\n" , info
->brk
);
1921 if ( info
->personality
== PER_SVR4
)
1923 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1924 and some applications "depend" upon this behavior.
1925 Since we do not have the power to recompile these, we
1926 emulate the SVr4 behavior. Sigh. */
1927 mapped_addr
= target_mmap(0, qemu_host_page_size
, PROT_READ
| PROT_EXEC
,
1928 MAP_FIXED
| MAP_PRIVATE
, -1, 0);
1931 info
->entry
= elf_entry
;
1933 #ifdef USE_ELF_CORE_DUMP
1934 bprm
->core_dump
= &elf_core_dump
;
1940 #ifdef USE_ELF_CORE_DUMP
1943 * Definitions to generate Intel SVR4-like core files.
1944 * These mostly have the same names as the SVR4 types with "target_elf_"
1945 * tacked on the front to prevent clashes with linux definitions,
1946 * and the typedef forms have been avoided. This is mostly like
1947 * the SVR4 structure, but more Linuxy, with things that Linux does
1948 * not support and which gdb doesn't really use excluded.
1950 * Fields we don't dump (their contents is zero) in linux-user qemu
1951 * are marked with XXX.
1953 * Core dump code is copied from linux kernel (fs/binfmt_elf.c).
1955 * Porting ELF coredump for target is (quite) simple process. First you
1956 * define USE_ELF_CORE_DUMP in target ELF code (where init_thread() for
1957 * the target resides):
1959 * #define USE_ELF_CORE_DUMP
1961 * Next you define type of register set used for dumping. ELF specification
1962 * says that it needs to be array of elf_greg_t that has size of ELF_NREG.
1964 * typedef <target_regtype> target_elf_greg_t;
1965 * #define ELF_NREG <number of registers>
1966 * typedef taret_elf_greg_t target_elf_gregset_t[ELF_NREG];
1968 * Last step is to implement target specific function that copies registers
1969 * from given cpu into just specified register set. Prototype is:
1971 * static void elf_core_copy_regs(taret_elf_gregset_t *regs,
1972 * const CPUState *env);
1975 * regs - copy register values into here (allocated and zeroed by caller)
1976 * env - copy registers from here
1978 * Example for ARM target is provided in this file.
1981 /* An ELF note in memory */
1985 size_t namesz_rounded
;
1992 struct target_elf_siginfo
{
1993 int si_signo
; /* signal number */
1994 int si_code
; /* extra code */
1995 int si_errno
; /* errno */
1998 struct target_elf_prstatus
{
1999 struct target_elf_siginfo pr_info
; /* Info associated with signal */
2000 short pr_cursig
; /* Current signal */
2001 target_ulong pr_sigpend
; /* XXX */
2002 target_ulong pr_sighold
; /* XXX */
2003 target_pid_t pr_pid
;
2004 target_pid_t pr_ppid
;
2005 target_pid_t pr_pgrp
;
2006 target_pid_t pr_sid
;
2007 struct target_timeval pr_utime
; /* XXX User time */
2008 struct target_timeval pr_stime
; /* XXX System time */
2009 struct target_timeval pr_cutime
; /* XXX Cumulative user time */
2010 struct target_timeval pr_cstime
; /* XXX Cumulative system time */
2011 target_elf_gregset_t pr_reg
; /* GP registers */
2012 int pr_fpvalid
; /* XXX */
2015 #define ELF_PRARGSZ (80) /* Number of chars for args */
2017 struct target_elf_prpsinfo
{
2018 char pr_state
; /* numeric process state */
2019 char pr_sname
; /* char for pr_state */
2020 char pr_zomb
; /* zombie */
2021 char pr_nice
; /* nice val */
2022 target_ulong pr_flag
; /* flags */
2023 target_uid_t pr_uid
;
2024 target_gid_t pr_gid
;
2025 target_pid_t pr_pid
, pr_ppid
, pr_pgrp
, pr_sid
;
2027 char pr_fname
[16]; /* filename of executable */
2028 char pr_psargs
[ELF_PRARGSZ
]; /* initial part of arg list */
2031 /* Here is the structure in which status of each thread is captured. */
2032 struct elf_thread_status
{
2033 QTAILQ_ENTRY(elf_thread_status
) ets_link
;
2034 struct target_elf_prstatus prstatus
; /* NT_PRSTATUS */
2036 elf_fpregset_t fpu
; /* NT_PRFPREG */
2037 struct task_struct
*thread
;
2038 elf_fpxregset_t xfpu
; /* ELF_CORE_XFPREG_TYPE */
2040 struct memelfnote notes
[1];
2044 struct elf_note_info
{
2045 struct memelfnote
*notes
;
2046 struct target_elf_prstatus
*prstatus
; /* NT_PRSTATUS */
2047 struct target_elf_prpsinfo
*psinfo
; /* NT_PRPSINFO */
2049 QTAILQ_HEAD(thread_list_head
, elf_thread_status
) thread_list
;
2052 * Current version of ELF coredump doesn't support
2053 * dumping fp regs etc.
2055 elf_fpregset_t
*fpu
;
2056 elf_fpxregset_t
*xfpu
;
2057 int thread_status_size
;
2063 struct vm_area_struct
{
2064 abi_ulong vma_start
; /* start vaddr of memory region */
2065 abi_ulong vma_end
; /* end vaddr of memory region */
2066 abi_ulong vma_flags
; /* protection etc. flags for the region */
2067 QTAILQ_ENTRY(vm_area_struct
) vma_link
;
2071 QTAILQ_HEAD(, vm_area_struct
) mm_mmap
;
2072 int mm_count
; /* number of mappings */
2075 static struct mm_struct
*vma_init(void);
2076 static void vma_delete(struct mm_struct
*);
2077 static int vma_add_mapping(struct mm_struct
*, abi_ulong
,
2078 abi_ulong
, abi_ulong
);
2079 static int vma_get_mapping_count(const struct mm_struct
*);
2080 static struct vm_area_struct
*vma_first(const struct mm_struct
*);
2081 static struct vm_area_struct
*vma_next(struct vm_area_struct
*);
2082 static abi_ulong
vma_dump_size(const struct vm_area_struct
*);
2083 static int vma_walker(void *priv
, abi_ulong start
, abi_ulong end
,
2084 unsigned long flags
);
2086 static void fill_elf_header(struct elfhdr
*, int, uint16_t, uint32_t);
2087 static void fill_note(struct memelfnote
*, const char *, int,
2088 unsigned int, void *);
2089 static void fill_prstatus(struct target_elf_prstatus
*, const TaskState
*, int);
2090 static int fill_psinfo(struct target_elf_prpsinfo
*, const TaskState
*);
2091 static void fill_auxv_note(struct memelfnote
*, const TaskState
*);
2092 static void fill_elf_note_phdr(struct elf_phdr
*, int, off_t
);
2093 static size_t note_size(const struct memelfnote
*);
2094 static void free_note_info(struct elf_note_info
*);
2095 static int fill_note_info(struct elf_note_info
*, long, const CPUState
*);
2096 static void fill_thread_info(struct elf_note_info
*, const CPUState
*);
2097 static int core_dump_filename(const TaskState
*, char *, size_t);
2099 static int dump_write(int, const void *, size_t);
2100 static int write_note(struct memelfnote
*, int);
2101 static int write_note_info(struct elf_note_info
*, int);
2104 static void bswap_prstatus(struct target_elf_prstatus
*);
2105 static void bswap_psinfo(struct target_elf_prpsinfo
*);
2107 static void bswap_prstatus(struct target_elf_prstatus
*prstatus
)
2109 prstatus
->pr_info
.si_signo
= tswapl(prstatus
->pr_info
.si_signo
);
2110 prstatus
->pr_info
.si_code
= tswapl(prstatus
->pr_info
.si_code
);
2111 prstatus
->pr_info
.si_errno
= tswapl(prstatus
->pr_info
.si_errno
);
2112 prstatus
->pr_cursig
= tswap16(prstatus
->pr_cursig
);
2113 prstatus
->pr_sigpend
= tswapl(prstatus
->pr_sigpend
);
2114 prstatus
->pr_sighold
= tswapl(prstatus
->pr_sighold
);
2115 prstatus
->pr_pid
= tswap32(prstatus
->pr_pid
);
2116 prstatus
->pr_ppid
= tswap32(prstatus
->pr_ppid
);
2117 prstatus
->pr_pgrp
= tswap32(prstatus
->pr_pgrp
);
2118 prstatus
->pr_sid
= tswap32(prstatus
->pr_sid
);
2119 /* cpu times are not filled, so we skip them */
2120 /* regs should be in correct format already */
2121 prstatus
->pr_fpvalid
= tswap32(prstatus
->pr_fpvalid
);
2124 static void bswap_psinfo(struct target_elf_prpsinfo
*psinfo
)
2126 psinfo
->pr_flag
= tswapl(psinfo
->pr_flag
);
2127 psinfo
->pr_uid
= tswap16(psinfo
->pr_uid
);
2128 psinfo
->pr_gid
= tswap16(psinfo
->pr_gid
);
2129 psinfo
->pr_pid
= tswap32(psinfo
->pr_pid
);
2130 psinfo
->pr_ppid
= tswap32(psinfo
->pr_ppid
);
2131 psinfo
->pr_pgrp
= tswap32(psinfo
->pr_pgrp
);
2132 psinfo
->pr_sid
= tswap32(psinfo
->pr_sid
);
2134 #endif /* BSWAP_NEEDED */
2137 * Minimal support for linux memory regions. These are needed
2138 * when we are finding out what memory exactly belongs to
2139 * emulated process. No locks needed here, as long as
2140 * thread that received the signal is stopped.
2143 static struct mm_struct
*vma_init(void)
2145 struct mm_struct
*mm
;
2147 if ((mm
= qemu_malloc(sizeof (*mm
))) == NULL
)
2151 QTAILQ_INIT(&mm
->mm_mmap
);
2156 static void vma_delete(struct mm_struct
*mm
)
2158 struct vm_area_struct
*vma
;
2160 while ((vma
= vma_first(mm
)) != NULL
) {
2161 QTAILQ_REMOVE(&mm
->mm_mmap
, vma
, vma_link
);
2167 static int vma_add_mapping(struct mm_struct
*mm
, abi_ulong start
,
2168 abi_ulong end
, abi_ulong flags
)
2170 struct vm_area_struct
*vma
;
2172 if ((vma
= qemu_mallocz(sizeof (*vma
))) == NULL
)
2175 vma
->vma_start
= start
;
2177 vma
->vma_flags
= flags
;
2179 QTAILQ_INSERT_TAIL(&mm
->mm_mmap
, vma
, vma_link
);
2185 static struct vm_area_struct
*vma_first(const struct mm_struct
*mm
)
2187 return (QTAILQ_FIRST(&mm
->mm_mmap
));
2190 static struct vm_area_struct
*vma_next(struct vm_area_struct
*vma
)
2192 return (QTAILQ_NEXT(vma
, vma_link
));
2195 static int vma_get_mapping_count(const struct mm_struct
*mm
)
2197 return (mm
->mm_count
);
2201 * Calculate file (dump) size of given memory region.
2203 static abi_ulong
vma_dump_size(const struct vm_area_struct
*vma
)
2205 /* if we cannot even read the first page, skip it */
2206 if (!access_ok(VERIFY_READ
, vma
->vma_start
, TARGET_PAGE_SIZE
))
2210 * Usually we don't dump executable pages as they contain
2211 * non-writable code that debugger can read directly from
2212 * target library etc. However, thread stacks are marked
2213 * also executable so we read in first page of given region
2214 * and check whether it contains elf header. If there is
2215 * no elf header, we dump it.
2217 if (vma
->vma_flags
& PROT_EXEC
) {
2218 char page
[TARGET_PAGE_SIZE
];
2220 copy_from_user(page
, vma
->vma_start
, sizeof (page
));
2221 if ((page
[EI_MAG0
] == ELFMAG0
) &&
2222 (page
[EI_MAG1
] == ELFMAG1
) &&
2223 (page
[EI_MAG2
] == ELFMAG2
) &&
2224 (page
[EI_MAG3
] == ELFMAG3
)) {
2226 * Mappings are possibly from ELF binary. Don't dump
2233 return (vma
->vma_end
- vma
->vma_start
);
2236 static int vma_walker(void *priv
, abi_ulong start
, abi_ulong end
,
2237 unsigned long flags
)
2239 struct mm_struct
*mm
= (struct mm_struct
*)priv
;
2241 vma_add_mapping(mm
, start
, end
, flags
);
2245 static void fill_note(struct memelfnote
*note
, const char *name
, int type
,
2246 unsigned int sz
, void *data
)
2248 unsigned int namesz
;
2250 namesz
= strlen(name
) + 1;
2252 note
->namesz
= namesz
;
2253 note
->namesz_rounded
= roundup(namesz
, sizeof (int32_t));
2255 note
->datasz
= roundup(sz
, sizeof (int32_t));;
2259 * We calculate rounded up note size here as specified by
2262 note
->notesz
= sizeof (struct elf_note
) +
2263 note
->namesz_rounded
+ note
->datasz
;
2266 static void fill_elf_header(struct elfhdr
*elf
, int segs
, uint16_t machine
,
2269 (void) memset(elf
, 0, sizeof(*elf
));
2271 (void) memcpy(elf
->e_ident
, ELFMAG
, SELFMAG
);
2272 elf
->e_ident
[EI_CLASS
] = ELF_CLASS
;
2273 elf
->e_ident
[EI_DATA
] = ELF_DATA
;
2274 elf
->e_ident
[EI_VERSION
] = EV_CURRENT
;
2275 elf
->e_ident
[EI_OSABI
] = ELF_OSABI
;
2277 elf
->e_type
= ET_CORE
;
2278 elf
->e_machine
= machine
;
2279 elf
->e_version
= EV_CURRENT
;
2280 elf
->e_phoff
= sizeof(struct elfhdr
);
2281 elf
->e_flags
= flags
;
2282 elf
->e_ehsize
= sizeof(struct elfhdr
);
2283 elf
->e_phentsize
= sizeof(struct elf_phdr
);
2284 elf
->e_phnum
= segs
;
2291 static void fill_elf_note_phdr(struct elf_phdr
*phdr
, int sz
, off_t offset
)
2293 phdr
->p_type
= PT_NOTE
;
2294 phdr
->p_offset
= offset
;
2297 phdr
->p_filesz
= sz
;
2307 static size_t note_size(const struct memelfnote
*note
)
2309 return (note
->notesz
);
2312 static void fill_prstatus(struct target_elf_prstatus
*prstatus
,
2313 const TaskState
*ts
, int signr
)
2315 (void) memset(prstatus
, 0, sizeof (*prstatus
));
2316 prstatus
->pr_info
.si_signo
= prstatus
->pr_cursig
= signr
;
2317 prstatus
->pr_pid
= ts
->ts_tid
;
2318 prstatus
->pr_ppid
= getppid();
2319 prstatus
->pr_pgrp
= getpgrp();
2320 prstatus
->pr_sid
= getsid(0);
2323 bswap_prstatus(prstatus
);
2327 static int fill_psinfo(struct target_elf_prpsinfo
*psinfo
, const TaskState
*ts
)
2329 char *filename
, *base_filename
;
2330 unsigned int i
, len
;
2332 (void) memset(psinfo
, 0, sizeof (*psinfo
));
2334 len
= ts
->info
->arg_end
- ts
->info
->arg_start
;
2335 if (len
>= ELF_PRARGSZ
)
2336 len
= ELF_PRARGSZ
- 1;
2337 if (copy_from_user(&psinfo
->pr_psargs
, ts
->info
->arg_start
, len
))
2339 for (i
= 0; i
< len
; i
++)
2340 if (psinfo
->pr_psargs
[i
] == 0)
2341 psinfo
->pr_psargs
[i
] = ' ';
2342 psinfo
->pr_psargs
[len
] = 0;
2344 psinfo
->pr_pid
= getpid();
2345 psinfo
->pr_ppid
= getppid();
2346 psinfo
->pr_pgrp
= getpgrp();
2347 psinfo
->pr_sid
= getsid(0);
2348 psinfo
->pr_uid
= getuid();
2349 psinfo
->pr_gid
= getgid();
2351 filename
= strdup(ts
->bprm
->filename
);
2352 base_filename
= strdup(basename(filename
));
2353 (void) strncpy(psinfo
->pr_fname
, base_filename
,
2354 sizeof(psinfo
->pr_fname
));
2355 free(base_filename
);
2359 bswap_psinfo(psinfo
);
2364 static void fill_auxv_note(struct memelfnote
*note
, const TaskState
*ts
)
2366 elf_addr_t auxv
= (elf_addr_t
)ts
->info
->saved_auxv
;
2367 elf_addr_t orig_auxv
= auxv
;
2373 * Auxiliary vector is stored in target process stack. It contains
2374 * {type, value} pairs that we need to dump into note. This is not
2375 * strictly necessary but we do it here for sake of completeness.
2378 /* find out lenght of the vector, AT_NULL is terminator */
2381 get_user_ual(val
, auxv
);
2383 auxv
+= 2 * sizeof (elf_addr_t
);
2384 } while (val
!= AT_NULL
);
2385 len
= i
* sizeof (elf_addr_t
);
2387 /* read in whole auxv vector and copy it to memelfnote */
2388 ptr
= lock_user(VERIFY_READ
, orig_auxv
, len
, 0);
2390 fill_note(note
, "CORE", NT_AUXV
, len
, ptr
);
2391 unlock_user(ptr
, auxv
, len
);
2396 * Constructs name of coredump file. We have following convention
2398 * qemu_<basename-of-target-binary>_<date>-<time>_<pid>.core
2400 * Returns 0 in case of success, -1 otherwise (errno is set).
2402 static int core_dump_filename(const TaskState
*ts
, char *buf
,
2406 char *filename
= NULL
;
2407 char *base_filename
= NULL
;
2411 assert(bufsize
>= PATH_MAX
);
2413 if (gettimeofday(&tv
, NULL
) < 0) {
2414 (void) fprintf(stderr
, "unable to get current timestamp: %s",
2419 filename
= strdup(ts
->bprm
->filename
);
2420 base_filename
= strdup(basename(filename
));
2421 (void) strftime(timestamp
, sizeof (timestamp
), "%Y%m%d-%H%M%S",
2422 localtime_r(&tv
.tv_sec
, &tm
));
2423 (void) snprintf(buf
, bufsize
, "qemu_%s_%s_%d.core",
2424 base_filename
, timestamp
, (int)getpid());
2425 free(base_filename
);
2431 static int dump_write(int fd
, const void *ptr
, size_t size
)
2433 const char *bufp
= (const char *)ptr
;
2434 ssize_t bytes_written
, bytes_left
;
2435 struct rlimit dumpsize
;
2439 getrlimit(RLIMIT_CORE
, &dumpsize
);
2440 if ((pos
= lseek(fd
, 0, SEEK_CUR
))==-1) {
2441 if (errno
== ESPIPE
) { /* not a seekable stream */
2447 if (dumpsize
.rlim_cur
<= pos
) {
2449 } else if (dumpsize
.rlim_cur
== RLIM_INFINITY
) {
2452 size_t limit_left
=dumpsize
.rlim_cur
- pos
;
2453 bytes_left
= limit_left
>= size
? size
: limit_left
;
2458 * In normal conditions, single write(2) should do but
2459 * in case of socket etc. this mechanism is more portable.
2462 bytes_written
= write(fd
, bufp
, bytes_left
);
2463 if (bytes_written
< 0) {
2467 } else if (bytes_written
== 0) { /* eof */
2470 bufp
+= bytes_written
;
2471 bytes_left
-= bytes_written
;
2472 } while (bytes_left
> 0);
2477 static int write_note(struct memelfnote
*men
, int fd
)
2481 en
.n_namesz
= men
->namesz
;
2482 en
.n_type
= men
->type
;
2483 en
.n_descsz
= men
->datasz
;
2489 if (dump_write(fd
, &en
, sizeof(en
)) != 0)
2491 if (dump_write(fd
, men
->name
, men
->namesz_rounded
) != 0)
2493 if (dump_write(fd
, men
->data
, men
->datasz
) != 0)
2499 static void fill_thread_info(struct elf_note_info
*info
, const CPUState
*env
)
2501 TaskState
*ts
= (TaskState
*)env
->opaque
;
2502 struct elf_thread_status
*ets
;
2504 ets
= qemu_mallocz(sizeof (*ets
));
2505 ets
->num_notes
= 1; /* only prstatus is dumped */
2506 fill_prstatus(&ets
->prstatus
, ts
, 0);
2507 elf_core_copy_regs(&ets
->prstatus
.pr_reg
, env
);
2508 fill_note(&ets
->notes
[0], "CORE", NT_PRSTATUS
, sizeof (ets
->prstatus
),
2511 QTAILQ_INSERT_TAIL(&info
->thread_list
, ets
, ets_link
);
2513 info
->notes_size
+= note_size(&ets
->notes
[0]);
2516 static int fill_note_info(struct elf_note_info
*info
,
2517 long signr
, const CPUState
*env
)
2520 CPUState
*cpu
= NULL
;
2521 TaskState
*ts
= (TaskState
*)env
->opaque
;
2524 (void) memset(info
, 0, sizeof (*info
));
2526 QTAILQ_INIT(&info
->thread_list
);
2528 info
->notes
= qemu_mallocz(NUMNOTES
* sizeof (struct memelfnote
));
2529 if (info
->notes
== NULL
)
2531 info
->prstatus
= qemu_mallocz(sizeof (*info
->prstatus
));
2532 if (info
->prstatus
== NULL
)
2534 info
->psinfo
= qemu_mallocz(sizeof (*info
->psinfo
));
2535 if (info
->prstatus
== NULL
)
2539 * First fill in status (and registers) of current thread
2540 * including process info & aux vector.
2542 fill_prstatus(info
->prstatus
, ts
, signr
);
2543 elf_core_copy_regs(&info
->prstatus
->pr_reg
, env
);
2544 fill_note(&info
->notes
[0], "CORE", NT_PRSTATUS
,
2545 sizeof (*info
->prstatus
), info
->prstatus
);
2546 fill_psinfo(info
->psinfo
, ts
);
2547 fill_note(&info
->notes
[1], "CORE", NT_PRPSINFO
,
2548 sizeof (*info
->psinfo
), info
->psinfo
);
2549 fill_auxv_note(&info
->notes
[2], ts
);
2552 info
->notes_size
= 0;
2553 for (i
= 0; i
< info
->numnote
; i
++)
2554 info
->notes_size
+= note_size(&info
->notes
[i
]);
2556 /* read and fill status of all threads */
2558 for (cpu
= first_cpu
; cpu
!= NULL
; cpu
= cpu
->next_cpu
) {
2559 if (cpu
== thread_env
)
2561 fill_thread_info(info
, cpu
);
2568 static void free_note_info(struct elf_note_info
*info
)
2570 struct elf_thread_status
*ets
;
2572 while (!QTAILQ_EMPTY(&info
->thread_list
)) {
2573 ets
= QTAILQ_FIRST(&info
->thread_list
);
2574 QTAILQ_REMOVE(&info
->thread_list
, ets
, ets_link
);
2578 qemu_free(info
->prstatus
);
2579 qemu_free(info
->psinfo
);
2580 qemu_free(info
->notes
);
2583 static int write_note_info(struct elf_note_info
*info
, int fd
)
2585 struct elf_thread_status
*ets
;
2588 /* write prstatus, psinfo and auxv for current thread */
2589 for (i
= 0; i
< info
->numnote
; i
++)
2590 if ((error
= write_note(&info
->notes
[i
], fd
)) != 0)
2593 /* write prstatus for each thread */
2594 for (ets
= info
->thread_list
.tqh_first
; ets
!= NULL
;
2595 ets
= ets
->ets_link
.tqe_next
) {
2596 if ((error
= write_note(&ets
->notes
[0], fd
)) != 0)
2604 * Write out ELF coredump.
2606 * See documentation of ELF object file format in:
2607 * http://www.caldera.com/developers/devspecs/gabi41.pdf
2609 * Coredump format in linux is following:
2611 * 0 +----------------------+ \
2612 * | ELF header | ET_CORE |
2613 * +----------------------+ |
2614 * | ELF program headers | |--- headers
2615 * | - NOTE section | |
2616 * | - PT_LOAD sections | |
2617 * +----------------------+ /
2622 * +----------------------+ <-- aligned to target page
2623 * | Process memory dump |
2628 * +----------------------+
2630 * NT_PRSTATUS -> struct elf_prstatus (per thread)
2631 * NT_PRSINFO -> struct elf_prpsinfo
2632 * NT_AUXV is array of { type, value } pairs (see fill_auxv_note()).
2634 * Format follows System V format as close as possible. Current
2635 * version limitations are as follows:
2636 * - no floating point registers are dumped
2638 * Function returns 0 in case of success, negative errno otherwise.
2640 * TODO: make this work also during runtime: it should be
2641 * possible to force coredump from running process and then
2642 * continue processing. For example qemu could set up SIGUSR2
2643 * handler (provided that target process haven't registered
2644 * handler for that) that does the dump when signal is received.
2646 static int elf_core_dump(int signr
, const CPUState
*env
)
2648 const TaskState
*ts
= (const TaskState
*)env
->opaque
;
2649 struct vm_area_struct
*vma
= NULL
;
2650 char corefile
[PATH_MAX
];
2651 struct elf_note_info info
;
2653 struct elf_phdr phdr
;
2654 struct rlimit dumpsize
;
2655 struct mm_struct
*mm
= NULL
;
2656 off_t offset
= 0, data_offset
= 0;
2661 getrlimit(RLIMIT_CORE
, &dumpsize
);
2662 if (dumpsize
.rlim_cur
== 0)
2665 if (core_dump_filename(ts
, corefile
, sizeof (corefile
)) < 0)
2668 if ((fd
= open(corefile
, O_WRONLY
| O_CREAT
,
2669 S_IRUSR
|S_IWUSR
|S_IRGRP
|S_IROTH
)) < 0)
2673 * Walk through target process memory mappings and
2674 * set up structure containing this information. After
2675 * this point vma_xxx functions can be used.
2677 if ((mm
= vma_init()) == NULL
)
2680 walk_memory_regions(mm
, vma_walker
);
2681 segs
= vma_get_mapping_count(mm
);
2684 * Construct valid coredump ELF header. We also
2685 * add one more segment for notes.
2687 fill_elf_header(&elf
, segs
+ 1, ELF_MACHINE
, 0);
2688 if (dump_write(fd
, &elf
, sizeof (elf
)) != 0)
2691 /* fill in in-memory version of notes */
2692 if (fill_note_info(&info
, signr
, env
) < 0)
2695 offset
+= sizeof (elf
); /* elf header */
2696 offset
+= (segs
+ 1) * sizeof (struct elf_phdr
); /* program headers */
2698 /* write out notes program header */
2699 fill_elf_note_phdr(&phdr
, info
.notes_size
, offset
);
2701 offset
+= info
.notes_size
;
2702 if (dump_write(fd
, &phdr
, sizeof (phdr
)) != 0)
2706 * ELF specification wants data to start at page boundary so
2709 offset
= roundup(offset
, ELF_EXEC_PAGESIZE
);
2712 * Write program headers for memory regions mapped in
2713 * the target process.
2715 for (vma
= vma_first(mm
); vma
!= NULL
; vma
= vma_next(vma
)) {
2716 (void) memset(&phdr
, 0, sizeof (phdr
));
2718 phdr
.p_type
= PT_LOAD
;
2719 phdr
.p_offset
= offset
;
2720 phdr
.p_vaddr
= vma
->vma_start
;
2722 phdr
.p_filesz
= vma_dump_size(vma
);
2723 offset
+= phdr
.p_filesz
;
2724 phdr
.p_memsz
= vma
->vma_end
- vma
->vma_start
;
2725 phdr
.p_flags
= vma
->vma_flags
& PROT_READ
? PF_R
: 0;
2726 if (vma
->vma_flags
& PROT_WRITE
)
2727 phdr
.p_flags
|= PF_W
;
2728 if (vma
->vma_flags
& PROT_EXEC
)
2729 phdr
.p_flags
|= PF_X
;
2730 phdr
.p_align
= ELF_EXEC_PAGESIZE
;
2732 dump_write(fd
, &phdr
, sizeof (phdr
));
2736 * Next we write notes just after program headers. No
2737 * alignment needed here.
2739 if (write_note_info(&info
, fd
) < 0)
2742 /* align data to page boundary */
2743 data_offset
= lseek(fd
, 0, SEEK_CUR
);
2744 data_offset
= TARGET_PAGE_ALIGN(data_offset
);
2745 if (lseek(fd
, data_offset
, SEEK_SET
) != data_offset
)
2749 * Finally we can dump process memory into corefile as well.
2751 for (vma
= vma_first(mm
); vma
!= NULL
; vma
= vma_next(vma
)) {
2755 end
= vma
->vma_start
+ vma_dump_size(vma
);
2757 for (addr
= vma
->vma_start
; addr
< end
;
2758 addr
+= TARGET_PAGE_SIZE
) {
2759 char page
[TARGET_PAGE_SIZE
];
2763 * Read in page from target process memory and
2764 * write it to coredump file.
2766 error
= copy_from_user(page
, addr
, sizeof (page
));
2768 (void) fprintf(stderr
, "unable to dump " TARGET_ABI_FMT_lx
"\n",
2773 if (dump_write(fd
, page
, TARGET_PAGE_SIZE
) < 0)
2779 free_note_info(&info
);
2789 #endif /* USE_ELF_CORE_DUMP */
2791 static int load_aout_interp(void * exptr
, int interp_fd
)
2793 printf("a.out interpreter not yet supported\n");
2797 void do_init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
2799 init_thread(regs
, infop
);