1 /* This is the Linux kernel elf-loading code, ported into user space */
11 #include <sys/resource.h>
28 #define ELF_OSABI ELFOSABI_SYSV
30 /* from personality.h */
33 * Flags for bug emulation.
35 * These occupy the top three bytes.
38 ADDR_NO_RANDOMIZE
= 0x0040000, /* disable randomization of VA space */
39 FDPIC_FUNCPTRS
= 0x0080000, /* userspace function ptrs point to descriptors
42 MMAP_PAGE_ZERO
= 0x0100000,
43 ADDR_COMPAT_LAYOUT
= 0x0200000,
44 READ_IMPLIES_EXEC
= 0x0400000,
45 ADDR_LIMIT_32BIT
= 0x0800000,
46 SHORT_INODE
= 0x1000000,
47 WHOLE_SECONDS
= 0x2000000,
48 STICKY_TIMEOUTS
= 0x4000000,
49 ADDR_LIMIT_3GB
= 0x8000000,
55 * These go in the low byte. Avoid using the top bit, it will
56 * conflict with error returns.
60 PER_LINUX_32BIT
= 0x0000 | ADDR_LIMIT_32BIT
,
61 PER_LINUX_FDPIC
= 0x0000 | FDPIC_FUNCPTRS
,
62 PER_SVR4
= 0x0001 | STICKY_TIMEOUTS
| MMAP_PAGE_ZERO
,
63 PER_SVR3
= 0x0002 | STICKY_TIMEOUTS
| SHORT_INODE
,
64 PER_SCOSVR3
= 0x0003 | STICKY_TIMEOUTS
|
65 WHOLE_SECONDS
| SHORT_INODE
,
66 PER_OSR5
= 0x0003 | STICKY_TIMEOUTS
| WHOLE_SECONDS
,
67 PER_WYSEV386
= 0x0004 | STICKY_TIMEOUTS
| SHORT_INODE
,
68 PER_ISCR4
= 0x0005 | STICKY_TIMEOUTS
,
70 PER_SUNOS
= 0x0006 | STICKY_TIMEOUTS
,
71 PER_XENIX
= 0x0007 | STICKY_TIMEOUTS
| SHORT_INODE
,
73 PER_LINUX32_3GB
= 0x0008 | ADDR_LIMIT_3GB
,
74 PER_IRIX32
= 0x0009 | STICKY_TIMEOUTS
,/* IRIX5 32-bit */
75 PER_IRIXN32
= 0x000a | STICKY_TIMEOUTS
,/* IRIX6 new 32-bit */
76 PER_IRIX64
= 0x000b | STICKY_TIMEOUTS
,/* IRIX6 64-bit */
78 PER_SOLARIS
= 0x000d | STICKY_TIMEOUTS
,
79 PER_UW7
= 0x000e | STICKY_TIMEOUTS
| MMAP_PAGE_ZERO
,
80 PER_OSF4
= 0x000f, /* OSF/1 v4 */
86 * Return the base personality without flags.
88 #define personality(pers) (pers & PER_MASK)
90 /* this flag is uneffective under linux too, should be deleted */
92 #define MAP_DENYWRITE 0
95 /* should probably go in elf.h */
100 typedef target_ulong target_elf_greg_t
;
102 typedef uint16_t target_uid_t
;
103 typedef uint16_t target_gid_t
;
105 typedef uint32_t target_uid_t
;
106 typedef uint32_t target_gid_t
;
108 typedef int32_t target_pid_t
;
112 #define ELF_PLATFORM get_elf_platform()
114 static const char *get_elf_platform(void)
116 static char elf_platform
[] = "i386";
117 int family
= (thread_env
->cpuid_version
>> 8) & 0xff;
121 elf_platform
[1] = '0' + family
;
125 #define ELF_HWCAP get_elf_hwcap()
127 static uint32_t get_elf_hwcap(void)
129 return thread_env
->cpuid_features
;
133 #define ELF_START_MMAP 0x2aaaaab000ULL
134 #define elf_check_arch(x) ( ((x) == ELF_ARCH) )
136 #define ELF_CLASS ELFCLASS64
137 #define ELF_DATA ELFDATA2LSB
138 #define ELF_ARCH EM_X86_64
140 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
143 regs
->rsp
= infop
->start_stack
;
144 regs
->rip
= infop
->entry
;
148 typedef target_elf_greg_t target_elf_gregset_t
[ELF_NREG
];
151 * Note that ELF_NREG should be 29 as there should be place for
152 * TRAPNO and ERR "registers" as well but linux doesn't dump
155 * See linux kernel: arch/x86/include/asm/elf.h
157 static void elf_core_copy_regs(target_elf_gregset_t
*regs
, const CPUState
*env
)
159 (*regs
)[0] = env
->regs
[15];
160 (*regs
)[1] = env
->regs
[14];
161 (*regs
)[2] = env
->regs
[13];
162 (*regs
)[3] = env
->regs
[12];
163 (*regs
)[4] = env
->regs
[R_EBP
];
164 (*regs
)[5] = env
->regs
[R_EBX
];
165 (*regs
)[6] = env
->regs
[11];
166 (*regs
)[7] = env
->regs
[10];
167 (*regs
)[8] = env
->regs
[9];
168 (*regs
)[9] = env
->regs
[8];
169 (*regs
)[10] = env
->regs
[R_EAX
];
170 (*regs
)[11] = env
->regs
[R_ECX
];
171 (*regs
)[12] = env
->regs
[R_EDX
];
172 (*regs
)[13] = env
->regs
[R_ESI
];
173 (*regs
)[14] = env
->regs
[R_EDI
];
174 (*regs
)[15] = env
->regs
[R_EAX
]; /* XXX */
175 (*regs
)[16] = env
->eip
;
176 (*regs
)[17] = env
->segs
[R_CS
].selector
& 0xffff;
177 (*regs
)[18] = env
->eflags
;
178 (*regs
)[19] = env
->regs
[R_ESP
];
179 (*regs
)[20] = env
->segs
[R_SS
].selector
& 0xffff;
180 (*regs
)[21] = env
->segs
[R_FS
].selector
& 0xffff;
181 (*regs
)[22] = env
->segs
[R_GS
].selector
& 0xffff;
182 (*regs
)[23] = env
->segs
[R_DS
].selector
& 0xffff;
183 (*regs
)[24] = env
->segs
[R_ES
].selector
& 0xffff;
184 (*regs
)[25] = env
->segs
[R_FS
].selector
& 0xffff;
185 (*regs
)[26] = env
->segs
[R_GS
].selector
& 0xffff;
190 #define ELF_START_MMAP 0x80000000
193 * This is used to ensure we don't load something for the wrong architecture.
195 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
198 * These are used to set parameters in the core dumps.
200 #define ELF_CLASS ELFCLASS32
201 #define ELF_DATA ELFDATA2LSB
202 #define ELF_ARCH EM_386
204 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
206 regs
->esp
= infop
->start_stack
;
207 regs
->eip
= infop
->entry
;
209 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
210 starts %edx contains a pointer to a function which might be
211 registered using `atexit'. This provides a mean for the
212 dynamic linker to call DT_FINI functions for shared libraries
213 that have been loaded before the code runs.
215 A value of 0 tells we have no such handler. */
220 typedef target_elf_greg_t target_elf_gregset_t
[ELF_NREG
];
223 * Note that ELF_NREG should be 19 as there should be place for
224 * TRAPNO and ERR "registers" as well but linux doesn't dump
227 * See linux kernel: arch/x86/include/asm/elf.h
229 static void elf_core_copy_regs(target_elf_gregset_t
*regs
, const CPUState
*env
)
231 (*regs
)[0] = env
->regs
[R_EBX
];
232 (*regs
)[1] = env
->regs
[R_ECX
];
233 (*regs
)[2] = env
->regs
[R_EDX
];
234 (*regs
)[3] = env
->regs
[R_ESI
];
235 (*regs
)[4] = env
->regs
[R_EDI
];
236 (*regs
)[5] = env
->regs
[R_EBP
];
237 (*regs
)[6] = env
->regs
[R_EAX
];
238 (*regs
)[7] = env
->segs
[R_DS
].selector
& 0xffff;
239 (*regs
)[8] = env
->segs
[R_ES
].selector
& 0xffff;
240 (*regs
)[9] = env
->segs
[R_FS
].selector
& 0xffff;
241 (*regs
)[10] = env
->segs
[R_GS
].selector
& 0xffff;
242 (*regs
)[11] = env
->regs
[R_EAX
]; /* XXX */
243 (*regs
)[12] = env
->eip
;
244 (*regs
)[13] = env
->segs
[R_CS
].selector
& 0xffff;
245 (*regs
)[14] = env
->eflags
;
246 (*regs
)[15] = env
->regs
[R_ESP
];
247 (*regs
)[16] = env
->segs
[R_SS
].selector
& 0xffff;
251 #define USE_ELF_CORE_DUMP
252 #define ELF_EXEC_PAGESIZE 4096
258 #define ELF_START_MMAP 0x80000000
260 #define elf_check_arch(x) ( (x) == EM_ARM )
262 #define ELF_CLASS ELFCLASS32
263 #ifdef TARGET_WORDS_BIGENDIAN
264 #define ELF_DATA ELFDATA2MSB
266 #define ELF_DATA ELFDATA2LSB
268 #define ELF_ARCH EM_ARM
270 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
272 abi_long stack
= infop
->start_stack
;
273 memset(regs
, 0, sizeof(*regs
));
274 regs
->ARM_cpsr
= 0x10;
275 if (infop
->entry
& 1)
276 regs
->ARM_cpsr
|= CPSR_T
;
277 regs
->ARM_pc
= infop
->entry
& 0xfffffffe;
278 regs
->ARM_sp
= infop
->start_stack
;
279 /* FIXME - what to for failure of get_user()? */
280 get_user_ual(regs
->ARM_r2
, stack
+ 8); /* envp */
281 get_user_ual(regs
->ARM_r1
, stack
+ 4); /* envp */
282 /* XXX: it seems that r0 is zeroed after ! */
284 /* For uClinux PIC binaries. */
285 /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
286 regs
->ARM_r10
= infop
->start_data
;
290 typedef target_elf_greg_t target_elf_gregset_t
[ELF_NREG
];
292 static void elf_core_copy_regs(target_elf_gregset_t
*regs
, const CPUState
*env
)
294 (*regs
)[0] = tswapl(env
->regs
[0]);
295 (*regs
)[1] = tswapl(env
->regs
[1]);
296 (*regs
)[2] = tswapl(env
->regs
[2]);
297 (*regs
)[3] = tswapl(env
->regs
[3]);
298 (*regs
)[4] = tswapl(env
->regs
[4]);
299 (*regs
)[5] = tswapl(env
->regs
[5]);
300 (*regs
)[6] = tswapl(env
->regs
[6]);
301 (*regs
)[7] = tswapl(env
->regs
[7]);
302 (*regs
)[8] = tswapl(env
->regs
[8]);
303 (*regs
)[9] = tswapl(env
->regs
[9]);
304 (*regs
)[10] = tswapl(env
->regs
[10]);
305 (*regs
)[11] = tswapl(env
->regs
[11]);
306 (*regs
)[12] = tswapl(env
->regs
[12]);
307 (*regs
)[13] = tswapl(env
->regs
[13]);
308 (*regs
)[14] = tswapl(env
->regs
[14]);
309 (*regs
)[15] = tswapl(env
->regs
[15]);
311 (*regs
)[16] = tswapl(cpsr_read((CPUState
*)env
));
312 (*regs
)[17] = tswapl(env
->regs
[0]); /* XXX */
315 #define USE_ELF_CORE_DUMP
316 #define ELF_EXEC_PAGESIZE 4096
320 ARM_HWCAP_ARM_SWP
= 1 << 0,
321 ARM_HWCAP_ARM_HALF
= 1 << 1,
322 ARM_HWCAP_ARM_THUMB
= 1 << 2,
323 ARM_HWCAP_ARM_26BIT
= 1 << 3,
324 ARM_HWCAP_ARM_FAST_MULT
= 1 << 4,
325 ARM_HWCAP_ARM_FPA
= 1 << 5,
326 ARM_HWCAP_ARM_VFP
= 1 << 6,
327 ARM_HWCAP_ARM_EDSP
= 1 << 7,
328 ARM_HWCAP_ARM_JAVA
= 1 << 8,
329 ARM_HWCAP_ARM_IWMMXT
= 1 << 9,
330 ARM_HWCAP_ARM_THUMBEE
= 1 << 10,
331 ARM_HWCAP_ARM_NEON
= 1 << 11,
332 ARM_HWCAP_ARM_VFPv3
= 1 << 12,
333 ARM_HWCAP_ARM_VFPv3D16
= 1 << 13,
336 #define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF \
337 | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT \
338 | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP \
339 | ARM_HWCAP_ARM_NEON | ARM_HWCAP_ARM_VFPv3 )
344 #ifdef TARGET_SPARC64
346 #define ELF_START_MMAP 0x80000000
349 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
351 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
354 #define ELF_CLASS ELFCLASS64
355 #define ELF_DATA ELFDATA2MSB
356 #define ELF_ARCH EM_SPARCV9
358 #define STACK_BIAS 2047
360 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
365 regs
->pc
= infop
->entry
;
366 regs
->npc
= regs
->pc
+ 4;
369 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
371 if (personality(infop
->personality
) == PER_LINUX32
)
372 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
374 regs
->u_regs
[14] = infop
->start_stack
- 16 * 8 - STACK_BIAS
;
379 #define ELF_START_MMAP 0x80000000
381 #define elf_check_arch(x) ( (x) == EM_SPARC )
383 #define ELF_CLASS ELFCLASS32
384 #define ELF_DATA ELFDATA2MSB
385 #define ELF_ARCH EM_SPARC
387 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
390 regs
->pc
= infop
->entry
;
391 regs
->npc
= regs
->pc
+ 4;
393 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
401 #define ELF_START_MMAP 0x80000000
403 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
405 #define elf_check_arch(x) ( (x) == EM_PPC64 )
407 #define ELF_CLASS ELFCLASS64
411 #define elf_check_arch(x) ( (x) == EM_PPC )
413 #define ELF_CLASS ELFCLASS32
417 #ifdef TARGET_WORDS_BIGENDIAN
418 #define ELF_DATA ELFDATA2MSB
420 #define ELF_DATA ELFDATA2LSB
422 #define ELF_ARCH EM_PPC
424 /* Feature masks for the Aux Vector Hardware Capabilities (AT_HWCAP).
425 See arch/powerpc/include/asm/cputable.h. */
427 QEMU_PPC_FEATURE_32
= 0x80000000,
428 QEMU_PPC_FEATURE_64
= 0x40000000,
429 QEMU_PPC_FEATURE_601_INSTR
= 0x20000000,
430 QEMU_PPC_FEATURE_HAS_ALTIVEC
= 0x10000000,
431 QEMU_PPC_FEATURE_HAS_FPU
= 0x08000000,
432 QEMU_PPC_FEATURE_HAS_MMU
= 0x04000000,
433 QEMU_PPC_FEATURE_HAS_4xxMAC
= 0x02000000,
434 QEMU_PPC_FEATURE_UNIFIED_CACHE
= 0x01000000,
435 QEMU_PPC_FEATURE_HAS_SPE
= 0x00800000,
436 QEMU_PPC_FEATURE_HAS_EFP_SINGLE
= 0x00400000,
437 QEMU_PPC_FEATURE_HAS_EFP_DOUBLE
= 0x00200000,
438 QEMU_PPC_FEATURE_NO_TB
= 0x00100000,
439 QEMU_PPC_FEATURE_POWER4
= 0x00080000,
440 QEMU_PPC_FEATURE_POWER5
= 0x00040000,
441 QEMU_PPC_FEATURE_POWER5_PLUS
= 0x00020000,
442 QEMU_PPC_FEATURE_CELL
= 0x00010000,
443 QEMU_PPC_FEATURE_BOOKE
= 0x00008000,
444 QEMU_PPC_FEATURE_SMT
= 0x00004000,
445 QEMU_PPC_FEATURE_ICACHE_SNOOP
= 0x00002000,
446 QEMU_PPC_FEATURE_ARCH_2_05
= 0x00001000,
447 QEMU_PPC_FEATURE_PA6T
= 0x00000800,
448 QEMU_PPC_FEATURE_HAS_DFP
= 0x00000400,
449 QEMU_PPC_FEATURE_POWER6_EXT
= 0x00000200,
450 QEMU_PPC_FEATURE_ARCH_2_06
= 0x00000100,
451 QEMU_PPC_FEATURE_HAS_VSX
= 0x00000080,
452 QEMU_PPC_FEATURE_PSERIES_PERFMON_COMPAT
= 0x00000040,
454 QEMU_PPC_FEATURE_TRUE_LE
= 0x00000002,
455 QEMU_PPC_FEATURE_PPC_LE
= 0x00000001,
458 #define ELF_HWCAP get_elf_hwcap()
460 static uint32_t get_elf_hwcap(void)
462 CPUState
*e
= thread_env
;
463 uint32_t features
= 0;
465 /* We don't have to be terribly complete here; the high points are
466 Altivec/FP/SPE support. Anything else is just a bonus. */
467 #define GET_FEATURE(flag, feature) \
468 do {if (e->insns_flags & flag) features |= feature; } while(0)
469 GET_FEATURE(PPC_64B
, QEMU_PPC_FEATURE_64
);
470 GET_FEATURE(PPC_FLOAT
, QEMU_PPC_FEATURE_HAS_FPU
);
471 GET_FEATURE(PPC_ALTIVEC
, QEMU_PPC_FEATURE_HAS_ALTIVEC
);
472 GET_FEATURE(PPC_SPE
, QEMU_PPC_FEATURE_HAS_SPE
);
473 GET_FEATURE(PPC_SPE_SINGLE
, QEMU_PPC_FEATURE_HAS_EFP_SINGLE
);
474 GET_FEATURE(PPC_SPE_DOUBLE
, QEMU_PPC_FEATURE_HAS_EFP_DOUBLE
);
475 GET_FEATURE(PPC_BOOKE
, QEMU_PPC_FEATURE_BOOKE
);
476 GET_FEATURE(PPC_405_MAC
, QEMU_PPC_FEATURE_HAS_4xxMAC
);
483 * We need to put in some extra aux table entries to tell glibc what
484 * the cache block size is, so it can use the dcbz instruction safely.
486 #define AT_DCACHEBSIZE 19
487 #define AT_ICACHEBSIZE 20
488 #define AT_UCACHEBSIZE 21
489 /* A special ignored type value for PPC, for glibc compatibility. */
490 #define AT_IGNOREPPC 22
492 * The requirements here are:
493 * - keep the final alignment of sp (sp & 0xf)
494 * - make sure the 32-bit value at the first 16 byte aligned position of
495 * AUXV is greater than 16 for glibc compatibility.
496 * AT_IGNOREPPC is used for that.
497 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
498 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
500 #define DLINFO_ARCH_ITEMS 5
501 #define ARCH_DLINFO \
503 NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \
504 NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \
505 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
507 * Now handle glibc compatibility. \
509 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
510 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
513 static inline void init_thread(struct target_pt_regs
*_regs
, struct image_info
*infop
)
515 _regs
->gpr
[1] = infop
->start_stack
;
516 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
517 _regs
->gpr
[2] = ldq_raw(infop
->entry
+ 8) + infop
->load_addr
;
518 infop
->entry
= ldq_raw(infop
->entry
) + infop
->load_addr
;
520 _regs
->nip
= infop
->entry
;
523 /* See linux kernel: arch/powerpc/include/asm/elf.h. */
525 typedef target_elf_greg_t target_elf_gregset_t
[ELF_NREG
];
527 static void elf_core_copy_regs(target_elf_gregset_t
*regs
, const CPUState
*env
)
530 target_ulong ccr
= 0;
532 for (i
= 0; i
< ARRAY_SIZE(env
->gpr
); i
++) {
533 (*regs
)[i
] = tswapl(env
->gpr
[i
]);
536 (*regs
)[32] = tswapl(env
->nip
);
537 (*regs
)[33] = tswapl(env
->msr
);
538 (*regs
)[35] = tswapl(env
->ctr
);
539 (*regs
)[36] = tswapl(env
->lr
);
540 (*regs
)[37] = tswapl(env
->xer
);
542 for (i
= 0; i
< ARRAY_SIZE(env
->crf
); i
++) {
543 ccr
|= env
->crf
[i
] << (32 - ((i
+ 1) * 4));
545 (*regs
)[38] = tswapl(ccr
);
548 #define USE_ELF_CORE_DUMP
549 #define ELF_EXEC_PAGESIZE 4096
555 #define ELF_START_MMAP 0x80000000
557 #define elf_check_arch(x) ( (x) == EM_MIPS )
560 #define ELF_CLASS ELFCLASS64
562 #define ELF_CLASS ELFCLASS32
564 #ifdef TARGET_WORDS_BIGENDIAN
565 #define ELF_DATA ELFDATA2MSB
567 #define ELF_DATA ELFDATA2LSB
569 #define ELF_ARCH EM_MIPS
571 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
573 regs
->cp0_status
= 2 << CP0St_KSU
;
574 regs
->cp0_epc
= infop
->entry
;
575 regs
->regs
[29] = infop
->start_stack
;
578 /* See linux kernel: arch/mips/include/asm/elf.h. */
580 typedef target_elf_greg_t target_elf_gregset_t
[ELF_NREG
];
582 /* See linux kernel: arch/mips/include/asm/reg.h. */
589 TARGET_EF_R26
= TARGET_EF_R0
+ 26,
590 TARGET_EF_R27
= TARGET_EF_R0
+ 27,
591 TARGET_EF_LO
= TARGET_EF_R0
+ 32,
592 TARGET_EF_HI
= TARGET_EF_R0
+ 33,
593 TARGET_EF_CP0_EPC
= TARGET_EF_R0
+ 34,
594 TARGET_EF_CP0_BADVADDR
= TARGET_EF_R0
+ 35,
595 TARGET_EF_CP0_STATUS
= TARGET_EF_R0
+ 36,
596 TARGET_EF_CP0_CAUSE
= TARGET_EF_R0
+ 37
599 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */
600 static void elf_core_copy_regs(target_elf_gregset_t
*regs
, const CPUState
*env
)
604 for (i
= 0; i
< TARGET_EF_R0
; i
++) {
607 (*regs
)[TARGET_EF_R0
] = 0;
609 for (i
= 1; i
< ARRAY_SIZE(env
->active_tc
.gpr
); i
++) {
610 (*regs
)[TARGET_EF_R0
+ i
] = tswapl(env
->active_tc
.gpr
[i
]);
613 (*regs
)[TARGET_EF_R26
] = 0;
614 (*regs
)[TARGET_EF_R27
] = 0;
615 (*regs
)[TARGET_EF_LO
] = tswapl(env
->active_tc
.LO
[0]);
616 (*regs
)[TARGET_EF_HI
] = tswapl(env
->active_tc
.HI
[0]);
617 (*regs
)[TARGET_EF_CP0_EPC
] = tswapl(env
->active_tc
.PC
);
618 (*regs
)[TARGET_EF_CP0_BADVADDR
] = tswapl(env
->CP0_BadVAddr
);
619 (*regs
)[TARGET_EF_CP0_STATUS
] = tswapl(env
->CP0_Status
);
620 (*regs
)[TARGET_EF_CP0_CAUSE
] = tswapl(env
->CP0_Cause
);
623 #define USE_ELF_CORE_DUMP
624 #define ELF_EXEC_PAGESIZE 4096
626 #endif /* TARGET_MIPS */
628 #ifdef TARGET_MICROBLAZE
630 #define ELF_START_MMAP 0x80000000
632 #define elf_check_arch(x) ( (x) == EM_XILINX_MICROBLAZE )
634 #define ELF_CLASS ELFCLASS32
635 #define ELF_DATA ELFDATA2MSB
636 #define ELF_ARCH EM_XILINX_MICROBLAZE
638 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
640 regs
->pc
= infop
->entry
;
641 regs
->r1
= infop
->start_stack
;
645 #define ELF_EXEC_PAGESIZE 4096
647 #endif /* TARGET_MICROBLAZE */
651 #define ELF_START_MMAP 0x80000000
653 #define elf_check_arch(x) ( (x) == EM_SH )
655 #define ELF_CLASS ELFCLASS32
656 #define ELF_DATA ELFDATA2LSB
657 #define ELF_ARCH EM_SH
659 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
661 /* Check other registers XXXXX */
662 regs
->pc
= infop
->entry
;
663 regs
->regs
[15] = infop
->start_stack
;
666 /* See linux kernel: arch/sh/include/asm/elf.h. */
668 typedef target_elf_greg_t target_elf_gregset_t
[ELF_NREG
];
670 /* See linux kernel: arch/sh/include/asm/ptrace.h. */
676 TARGET_REG_MACH
= 20,
677 TARGET_REG_MACL
= 21,
678 TARGET_REG_SYSCALL
= 22
681 static inline void elf_core_copy_regs(target_elf_gregset_t
*regs
, const CPUState
*env
)
685 for (i
= 0; i
< 16; i
++) {
686 (*regs
[i
]) = tswapl(env
->gregs
[i
]);
689 (*regs
)[TARGET_REG_PC
] = tswapl(env
->pc
);
690 (*regs
)[TARGET_REG_PR
] = tswapl(env
->pr
);
691 (*regs
)[TARGET_REG_SR
] = tswapl(env
->sr
);
692 (*regs
)[TARGET_REG_GBR
] = tswapl(env
->gbr
);
693 (*regs
)[TARGET_REG_MACH
] = tswapl(env
->mach
);
694 (*regs
)[TARGET_REG_MACL
] = tswapl(env
->macl
);
695 (*regs
)[TARGET_REG_SYSCALL
] = 0; /* FIXME */
698 #define USE_ELF_CORE_DUMP
699 #define ELF_EXEC_PAGESIZE 4096
705 #define ELF_START_MMAP 0x80000000
707 #define elf_check_arch(x) ( (x) == EM_CRIS )
709 #define ELF_CLASS ELFCLASS32
710 #define ELF_DATA ELFDATA2LSB
711 #define ELF_ARCH EM_CRIS
713 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
715 regs
->erp
= infop
->entry
;
718 #define ELF_EXEC_PAGESIZE 8192
724 #define ELF_START_MMAP 0x80000000
726 #define elf_check_arch(x) ( (x) == EM_68K )
728 #define ELF_CLASS ELFCLASS32
729 #define ELF_DATA ELFDATA2MSB
730 #define ELF_ARCH EM_68K
732 /* ??? Does this need to do anything?
733 #define ELF_PLAT_INIT(_r) */
735 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
737 regs
->usp
= infop
->start_stack
;
739 regs
->pc
= infop
->entry
;
742 /* See linux kernel: arch/m68k/include/asm/elf.h. */
744 typedef target_elf_greg_t target_elf_gregset_t
[ELF_NREG
];
746 static void elf_core_copy_regs(target_elf_gregset_t
*regs
, const CPUState
*env
)
748 (*regs
)[0] = tswapl(env
->dregs
[1]);
749 (*regs
)[1] = tswapl(env
->dregs
[2]);
750 (*regs
)[2] = tswapl(env
->dregs
[3]);
751 (*regs
)[3] = tswapl(env
->dregs
[4]);
752 (*regs
)[4] = tswapl(env
->dregs
[5]);
753 (*regs
)[5] = tswapl(env
->dregs
[6]);
754 (*regs
)[6] = tswapl(env
->dregs
[7]);
755 (*regs
)[7] = tswapl(env
->aregs
[0]);
756 (*regs
)[8] = tswapl(env
->aregs
[1]);
757 (*regs
)[9] = tswapl(env
->aregs
[2]);
758 (*regs
)[10] = tswapl(env
->aregs
[3]);
759 (*regs
)[11] = tswapl(env
->aregs
[4]);
760 (*regs
)[12] = tswapl(env
->aregs
[5]);
761 (*regs
)[13] = tswapl(env
->aregs
[6]);
762 (*regs
)[14] = tswapl(env
->dregs
[0]);
763 (*regs
)[15] = tswapl(env
->aregs
[7]);
764 (*regs
)[16] = tswapl(env
->dregs
[0]); /* FIXME: orig_d0 */
765 (*regs
)[17] = tswapl(env
->sr
);
766 (*regs
)[18] = tswapl(env
->pc
);
767 (*regs
)[19] = 0; /* FIXME: regs->format | regs->vector */
770 #define USE_ELF_CORE_DUMP
771 #define ELF_EXEC_PAGESIZE 8192
777 #define ELF_START_MMAP (0x30000000000ULL)
779 #define elf_check_arch(x) ( (x) == ELF_ARCH )
781 #define ELF_CLASS ELFCLASS64
782 #define ELF_DATA ELFDATA2MSB
783 #define ELF_ARCH EM_ALPHA
785 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
787 regs
->pc
= infop
->entry
;
789 regs
->usp
= infop
->start_stack
;
792 #define ELF_EXEC_PAGESIZE 8192
794 #endif /* TARGET_ALPHA */
797 #define ELF_PLATFORM (NULL)
806 #define ELF_CLASS ELFCLASS32
808 #define bswaptls(ptr) bswap32s(ptr)
815 unsigned int a_info
; /* Use macros N_MAGIC, etc for access */
816 unsigned int a_text
; /* length of text, in bytes */
817 unsigned int a_data
; /* length of data, in bytes */
818 unsigned int a_bss
; /* length of uninitialized data area, in bytes */
819 unsigned int a_syms
; /* length of symbol table data in file, in bytes */
820 unsigned int a_entry
; /* start address */
821 unsigned int a_trsize
; /* length of relocation info for text, in bytes */
822 unsigned int a_drsize
; /* length of relocation info for data, in bytes */
826 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
832 /* max code+data+bss space allocated to elf interpreter */
833 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
835 /* max code+data+bss+brk space allocated to ET_DYN executables */
836 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
838 /* Necessary parameters */
839 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
840 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
841 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
843 #define INTERPRETER_NONE 0
844 #define INTERPRETER_AOUT 1
845 #define INTERPRETER_ELF 2
847 #define DLINFO_ITEMS 12
849 static inline void memcpy_fromfs(void * to
, const void * from
, unsigned long n
)
854 static int load_aout_interp(void * exptr
, int interp_fd
);
857 static void bswap_ehdr(struct elfhdr
*ehdr
)
859 bswap16s(&ehdr
->e_type
); /* Object file type */
860 bswap16s(&ehdr
->e_machine
); /* Architecture */
861 bswap32s(&ehdr
->e_version
); /* Object file version */
862 bswaptls(&ehdr
->e_entry
); /* Entry point virtual address */
863 bswaptls(&ehdr
->e_phoff
); /* Program header table file offset */
864 bswaptls(&ehdr
->e_shoff
); /* Section header table file offset */
865 bswap32s(&ehdr
->e_flags
); /* Processor-specific flags */
866 bswap16s(&ehdr
->e_ehsize
); /* ELF header size in bytes */
867 bswap16s(&ehdr
->e_phentsize
); /* Program header table entry size */
868 bswap16s(&ehdr
->e_phnum
); /* Program header table entry count */
869 bswap16s(&ehdr
->e_shentsize
); /* Section header table entry size */
870 bswap16s(&ehdr
->e_shnum
); /* Section header table entry count */
871 bswap16s(&ehdr
->e_shstrndx
); /* Section header string table index */
874 static void bswap_phdr(struct elf_phdr
*phdr
)
876 bswap32s(&phdr
->p_type
); /* Segment type */
877 bswaptls(&phdr
->p_offset
); /* Segment file offset */
878 bswaptls(&phdr
->p_vaddr
); /* Segment virtual address */
879 bswaptls(&phdr
->p_paddr
); /* Segment physical address */
880 bswaptls(&phdr
->p_filesz
); /* Segment size in file */
881 bswaptls(&phdr
->p_memsz
); /* Segment size in memory */
882 bswap32s(&phdr
->p_flags
); /* Segment flags */
883 bswaptls(&phdr
->p_align
); /* Segment alignment */
886 static void bswap_shdr(struct elf_shdr
*shdr
)
888 bswap32s(&shdr
->sh_name
);
889 bswap32s(&shdr
->sh_type
);
890 bswaptls(&shdr
->sh_flags
);
891 bswaptls(&shdr
->sh_addr
);
892 bswaptls(&shdr
->sh_offset
);
893 bswaptls(&shdr
->sh_size
);
894 bswap32s(&shdr
->sh_link
);
895 bswap32s(&shdr
->sh_info
);
896 bswaptls(&shdr
->sh_addralign
);
897 bswaptls(&shdr
->sh_entsize
);
900 static void bswap_sym(struct elf_sym
*sym
)
902 bswap32s(&sym
->st_name
);
903 bswaptls(&sym
->st_value
);
904 bswaptls(&sym
->st_size
);
905 bswap16s(&sym
->st_shndx
);
909 #ifdef USE_ELF_CORE_DUMP
910 static int elf_core_dump(int, const CPUState
*);
913 static void bswap_note(struct elf_note
*en
)
915 bswap32s(&en
->n_namesz
);
916 bswap32s(&en
->n_descsz
);
917 bswap32s(&en
->n_type
);
919 #endif /* BSWAP_NEEDED */
921 #endif /* USE_ELF_CORE_DUMP */
924 * 'copy_elf_strings()' copies argument/envelope strings from user
925 * memory to free pages in kernel mem. These are in a format ready
926 * to be put directly into the top of new user memory.
929 static abi_ulong
copy_elf_strings(int argc
,char ** argv
, void **page
,
932 char *tmp
, *tmp1
, *pag
= NULL
;
936 return 0; /* bullet-proofing */
941 fprintf(stderr
, "VFS: argc is wrong");
947 if (p
< len
) { /* this shouldn't happen - 128kB */
953 offset
= p
% TARGET_PAGE_SIZE
;
954 pag
= (char *)page
[p
/TARGET_PAGE_SIZE
];
956 pag
= (char *)malloc(TARGET_PAGE_SIZE
);
957 memset(pag
, 0, TARGET_PAGE_SIZE
);
958 page
[p
/TARGET_PAGE_SIZE
] = pag
;
963 if (len
== 0 || offset
== 0) {
964 *(pag
+ offset
) = *tmp
;
967 int bytes_to_copy
= (len
> offset
) ? offset
: len
;
968 tmp
-= bytes_to_copy
;
970 offset
-= bytes_to_copy
;
971 len
-= bytes_to_copy
;
972 memcpy_fromfs(pag
+ offset
, tmp
, bytes_to_copy
+ 1);
979 static abi_ulong
setup_arg_pages(abi_ulong p
, struct linux_binprm
*bprm
,
980 struct image_info
*info
)
982 abi_ulong stack_base
, size
, error
;
985 /* Create enough stack to hold everything. If we don't use
986 * it for args, we'll use it for something else...
988 size
= guest_stack_size
;
989 if (size
< MAX_ARG_PAGES
*TARGET_PAGE_SIZE
)
990 size
= MAX_ARG_PAGES
*TARGET_PAGE_SIZE
;
991 error
= target_mmap(0,
992 size
+ qemu_host_page_size
,
993 PROT_READ
| PROT_WRITE
,
994 MAP_PRIVATE
| MAP_ANONYMOUS
,
1000 /* we reserve one extra page at the top of the stack as guard */
1001 target_mprotect(error
+ size
, qemu_host_page_size
, PROT_NONE
);
1003 stack_base
= error
+ size
- MAX_ARG_PAGES
*TARGET_PAGE_SIZE
;
1006 for (i
= 0 ; i
< MAX_ARG_PAGES
; i
++) {
1007 if (bprm
->page
[i
]) {
1009 /* FIXME - check return value of memcpy_to_target() for failure */
1010 memcpy_to_target(stack_base
, bprm
->page
[i
], TARGET_PAGE_SIZE
);
1011 free(bprm
->page
[i
]);
1013 stack_base
+= TARGET_PAGE_SIZE
;
1018 static void set_brk(abi_ulong start
, abi_ulong end
)
1020 /* page-align the start and end addresses... */
1021 start
= HOST_PAGE_ALIGN(start
);
1022 end
= HOST_PAGE_ALIGN(end
);
1025 if(target_mmap(start
, end
- start
,
1026 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
1027 MAP_FIXED
| MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0) == -1) {
1028 perror("cannot mmap brk");
1034 /* We need to explicitly zero any fractional pages after the data
1035 section (i.e. bss). This would contain the junk from the file that
1036 should not be in memory. */
1037 static void padzero(abi_ulong elf_bss
, abi_ulong last_bss
)
1041 if (elf_bss
>= last_bss
)
1044 /* XXX: this is really a hack : if the real host page size is
1045 smaller than the target page size, some pages after the end
1046 of the file may not be mapped. A better fix would be to
1047 patch target_mmap(), but it is more complicated as the file
1048 size must be known */
1049 if (qemu_real_host_page_size
< qemu_host_page_size
) {
1050 abi_ulong end_addr
, end_addr1
;
1051 end_addr1
= (elf_bss
+ qemu_real_host_page_size
- 1) &
1052 ~(qemu_real_host_page_size
- 1);
1053 end_addr
= HOST_PAGE_ALIGN(elf_bss
);
1054 if (end_addr1
< end_addr
) {
1055 mmap((void *)g2h(end_addr1
), end_addr
- end_addr1
,
1056 PROT_READ
|PROT_WRITE
|PROT_EXEC
,
1057 MAP_FIXED
|MAP_PRIVATE
|MAP_ANONYMOUS
, -1, 0);
1061 nbyte
= elf_bss
& (qemu_host_page_size
-1);
1063 nbyte
= qemu_host_page_size
- nbyte
;
1065 /* FIXME - what to do if put_user() fails? */
1066 put_user_u8(0, elf_bss
);
1073 static abi_ulong
create_elf_tables(abi_ulong p
, int argc
, int envc
,
1074 struct elfhdr
* exec
,
1075 abi_ulong load_addr
,
1076 abi_ulong load_bias
,
1077 abi_ulong interp_load_addr
, int ibcs
,
1078 struct image_info
*info
)
1082 abi_ulong u_platform
;
1083 const char *k_platform
;
1084 const int n
= sizeof(elf_addr_t
);
1088 k_platform
= ELF_PLATFORM
;
1090 size_t len
= strlen(k_platform
) + 1;
1091 sp
-= (len
+ n
- 1) & ~(n
- 1);
1093 /* FIXME - check return value of memcpy_to_target() for failure */
1094 memcpy_to_target(sp
, k_platform
, len
);
1097 * Force 16 byte _final_ alignment here for generality.
1099 sp
= sp
&~ (abi_ulong
)15;
1100 size
= (DLINFO_ITEMS
+ 1) * 2;
1103 #ifdef DLINFO_ARCH_ITEMS
1104 size
+= DLINFO_ARCH_ITEMS
* 2;
1106 size
+= envc
+ argc
+ 2;
1107 size
+= (!ibcs
? 3 : 1); /* argc itself */
1110 sp
-= 16 - (size
& 15);
1112 /* This is correct because Linux defines
1113 * elf_addr_t as Elf32_Off / Elf64_Off
1115 #define NEW_AUX_ENT(id, val) do { \
1116 sp -= n; put_user_ual(val, sp); \
1117 sp -= n; put_user_ual(id, sp); \
1120 NEW_AUX_ENT (AT_NULL
, 0);
1122 /* There must be exactly DLINFO_ITEMS entries here. */
1123 NEW_AUX_ENT(AT_PHDR
, (abi_ulong
)(load_addr
+ exec
->e_phoff
));
1124 NEW_AUX_ENT(AT_PHENT
, (abi_ulong
)(sizeof (struct elf_phdr
)));
1125 NEW_AUX_ENT(AT_PHNUM
, (abi_ulong
)(exec
->e_phnum
));
1126 NEW_AUX_ENT(AT_PAGESZ
, (abi_ulong
)(TARGET_PAGE_SIZE
));
1127 NEW_AUX_ENT(AT_BASE
, (abi_ulong
)(interp_load_addr
));
1128 NEW_AUX_ENT(AT_FLAGS
, (abi_ulong
)0);
1129 NEW_AUX_ENT(AT_ENTRY
, load_bias
+ exec
->e_entry
);
1130 NEW_AUX_ENT(AT_UID
, (abi_ulong
) getuid());
1131 NEW_AUX_ENT(AT_EUID
, (abi_ulong
) geteuid());
1132 NEW_AUX_ENT(AT_GID
, (abi_ulong
) getgid());
1133 NEW_AUX_ENT(AT_EGID
, (abi_ulong
) getegid());
1134 NEW_AUX_ENT(AT_HWCAP
, (abi_ulong
) ELF_HWCAP
);
1135 NEW_AUX_ENT(AT_CLKTCK
, (abi_ulong
) sysconf(_SC_CLK_TCK
));
1137 NEW_AUX_ENT(AT_PLATFORM
, u_platform
);
1140 * ARCH_DLINFO must come last so platform specific code can enforce
1141 * special alignment requirements on the AUXV if necessary (eg. PPC).
1147 info
->saved_auxv
= sp
;
1149 sp
= loader_build_argptr(envc
, argc
, sp
, p
, !ibcs
);
1154 static abi_ulong
load_elf_interp(struct elfhdr
* interp_elf_ex
,
1156 abi_ulong
*interp_load_addr
)
1158 struct elf_phdr
*elf_phdata
= NULL
;
1159 struct elf_phdr
*eppnt
;
1160 abi_ulong load_addr
= 0;
1161 int load_addr_set
= 0;
1163 abi_ulong last_bss
, elf_bss
;
1172 bswap_ehdr(interp_elf_ex
);
1174 /* First of all, some simple consistency checks */
1175 if ((interp_elf_ex
->e_type
!= ET_EXEC
&&
1176 interp_elf_ex
->e_type
!= ET_DYN
) ||
1177 !elf_check_arch(interp_elf_ex
->e_machine
)) {
1178 return ~((abi_ulong
)0UL);
1182 /* Now read in all of the header information */
1184 if (sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
> TARGET_PAGE_SIZE
)
1185 return ~(abi_ulong
)0UL;
1187 elf_phdata
= (struct elf_phdr
*)
1188 malloc(sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
1191 return ~((abi_ulong
)0UL);
1194 * If the size of this structure has changed, then punt, since
1195 * we will be doing the wrong thing.
1197 if (interp_elf_ex
->e_phentsize
!= sizeof(struct elf_phdr
)) {
1199 return ~((abi_ulong
)0UL);
1202 retval
= lseek(interpreter_fd
, interp_elf_ex
->e_phoff
, SEEK_SET
);
1204 retval
= read(interpreter_fd
,
1205 (char *) elf_phdata
,
1206 sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
1209 perror("load_elf_interp");
1216 for (i
=0; i
<interp_elf_ex
->e_phnum
; i
++, eppnt
++) {
1221 if (interp_elf_ex
->e_type
== ET_DYN
) {
1222 /* in order to avoid hardcoding the interpreter load
1223 address in qemu, we allocate a big enough memory zone */
1224 error
= target_mmap(0, INTERP_MAP_SIZE
,
1225 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
1236 for(i
=0; i
<interp_elf_ex
->e_phnum
; i
++, eppnt
++)
1237 if (eppnt
->p_type
== PT_LOAD
) {
1238 int elf_type
= MAP_PRIVATE
| MAP_DENYWRITE
;
1240 abi_ulong vaddr
= 0;
1243 if (eppnt
->p_flags
& PF_R
) elf_prot
= PROT_READ
;
1244 if (eppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
1245 if (eppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
1246 if (interp_elf_ex
->e_type
== ET_EXEC
|| load_addr_set
) {
1247 elf_type
|= MAP_FIXED
;
1248 vaddr
= eppnt
->p_vaddr
;
1250 error
= target_mmap(load_addr
+TARGET_ELF_PAGESTART(vaddr
),
1251 eppnt
->p_filesz
+ TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
),
1255 eppnt
->p_offset
- TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
));
1259 close(interpreter_fd
);
1261 return ~((abi_ulong
)0UL);
1264 if (!load_addr_set
&& interp_elf_ex
->e_type
== ET_DYN
) {
1270 * Find the end of the file mapping for this phdr, and keep
1271 * track of the largest address we see for this.
1273 k
= load_addr
+ eppnt
->p_vaddr
+ eppnt
->p_filesz
;
1274 if (k
> elf_bss
) elf_bss
= k
;
1277 * Do the same thing for the memory mapping - between
1278 * elf_bss and last_bss is the bss section.
1280 k
= load_addr
+ eppnt
->p_memsz
+ eppnt
->p_vaddr
;
1281 if (k
> last_bss
) last_bss
= k
;
1284 /* Now use mmap to map the library into memory. */
1286 close(interpreter_fd
);
1289 * Now fill out the bss section. First pad the last page up
1290 * to the page boundary, and then perform a mmap to make sure
1291 * that there are zeromapped pages up to and including the last
1294 padzero(elf_bss
, last_bss
);
1295 elf_bss
= TARGET_ELF_PAGESTART(elf_bss
+ qemu_host_page_size
- 1); /* What we have mapped so far */
1297 /* Map the last of the bss segment */
1298 if (last_bss
> elf_bss
) {
1299 target_mmap(elf_bss
, last_bss
-elf_bss
,
1300 PROT_READ
|PROT_WRITE
|PROT_EXEC
,
1301 MAP_FIXED
|MAP_PRIVATE
|MAP_ANONYMOUS
, -1, 0);
1305 *interp_load_addr
= load_addr
;
1306 return ((abi_ulong
) interp_elf_ex
->e_entry
) + load_addr
;
1309 static int symfind(const void *s0
, const void *s1
)
1311 struct elf_sym
*key
= (struct elf_sym
*)s0
;
1312 struct elf_sym
*sym
= (struct elf_sym
*)s1
;
1314 if (key
->st_value
< sym
->st_value
) {
1316 } else if (key
->st_value
>= sym
->st_value
+ sym
->st_size
) {
1322 static const char *lookup_symbolxx(struct syminfo
*s
, target_ulong orig_addr
)
1324 #if ELF_CLASS == ELFCLASS32
1325 struct elf_sym
*syms
= s
->disas_symtab
.elf32
;
1327 struct elf_sym
*syms
= s
->disas_symtab
.elf64
;
1332 struct elf_sym
*sym
;
1334 key
.st_value
= orig_addr
;
1336 sym
= bsearch(&key
, syms
, s
->disas_num_syms
, sizeof(*syms
), symfind
);
1338 return s
->disas_strtab
+ sym
->st_name
;
1344 /* FIXME: This should use elf_ops.h */
1345 static int symcmp(const void *s0
, const void *s1
)
1347 struct elf_sym
*sym0
= (struct elf_sym
*)s0
;
1348 struct elf_sym
*sym1
= (struct elf_sym
*)s1
;
1349 return (sym0
->st_value
< sym1
->st_value
)
1351 : ((sym0
->st_value
> sym1
->st_value
) ? 1 : 0);
1354 /* Best attempt to load symbols from this ELF object. */
1355 static void load_symbols(struct elfhdr
*hdr
, int fd
)
1357 unsigned int i
, nsyms
;
1358 struct elf_shdr sechdr
, symtab
, strtab
;
1361 struct elf_sym
*syms
;
1363 lseek(fd
, hdr
->e_shoff
, SEEK_SET
);
1364 for (i
= 0; i
< hdr
->e_shnum
; i
++) {
1365 if (read(fd
, &sechdr
, sizeof(sechdr
)) != sizeof(sechdr
))
1368 bswap_shdr(&sechdr
);
1370 if (sechdr
.sh_type
== SHT_SYMTAB
) {
1372 lseek(fd
, hdr
->e_shoff
1373 + sizeof(sechdr
) * sechdr
.sh_link
, SEEK_SET
);
1374 if (read(fd
, &strtab
, sizeof(strtab
))
1378 bswap_shdr(&strtab
);
1383 return; /* Shouldn't happen... */
1386 /* Now know where the strtab and symtab are. Snarf them. */
1387 s
= malloc(sizeof(*s
));
1388 syms
= malloc(symtab
.sh_size
);
1391 s
->disas_strtab
= strings
= malloc(strtab
.sh_size
);
1392 if (!s
->disas_strtab
)
1395 lseek(fd
, symtab
.sh_offset
, SEEK_SET
);
1396 if (read(fd
, syms
, symtab
.sh_size
) != symtab
.sh_size
)
1399 nsyms
= symtab
.sh_size
/ sizeof(struct elf_sym
);
1404 bswap_sym(syms
+ i
);
1406 // Throw away entries which we do not need.
1407 if (syms
[i
].st_shndx
== SHN_UNDEF
||
1408 syms
[i
].st_shndx
>= SHN_LORESERVE
||
1409 ELF_ST_TYPE(syms
[i
].st_info
) != STT_FUNC
) {
1412 syms
[i
] = syms
[nsyms
];
1416 #if defined(TARGET_ARM) || defined (TARGET_MIPS)
1417 /* The bottom address bit marks a Thumb or MIPS16 symbol. */
1418 syms
[i
].st_value
&= ~(target_ulong
)1;
1422 syms
= realloc(syms
, nsyms
* sizeof(*syms
));
1424 qsort(syms
, nsyms
, sizeof(*syms
), symcmp
);
1426 lseek(fd
, strtab
.sh_offset
, SEEK_SET
);
1427 if (read(fd
, strings
, strtab
.sh_size
) != strtab
.sh_size
)
1429 s
->disas_num_syms
= nsyms
;
1430 #if ELF_CLASS == ELFCLASS32
1431 s
->disas_symtab
.elf32
= syms
;
1432 s
->lookup_symbol
= lookup_symbolxx
;
1434 s
->disas_symtab
.elf64
= syms
;
1435 s
->lookup_symbol
= lookup_symbolxx
;
1441 int load_elf_binary(struct linux_binprm
* bprm
, struct target_pt_regs
* regs
,
1442 struct image_info
* info
)
1444 struct elfhdr elf_ex
;
1445 struct elfhdr interp_elf_ex
;
1446 struct exec interp_ex
;
1447 int interpreter_fd
= -1; /* avoid warning */
1448 abi_ulong load_addr
, load_bias
;
1449 int load_addr_set
= 0;
1450 unsigned int interpreter_type
= INTERPRETER_NONE
;
1451 unsigned char ibcs2_interpreter
;
1453 abi_ulong mapped_addr
;
1454 struct elf_phdr
* elf_ppnt
;
1455 struct elf_phdr
*elf_phdata
;
1456 abi_ulong elf_bss
, k
, elf_brk
;
1458 char * elf_interpreter
;
1459 abi_ulong elf_entry
, interp_load_addr
= 0;
1461 abi_ulong start_code
, end_code
, start_data
, end_data
;
1462 abi_ulong reloc_func_desc
= 0;
1463 abi_ulong elf_stack
;
1464 char passed_fileno
[6];
1466 ibcs2_interpreter
= 0;
1470 elf_ex
= *((struct elfhdr
*) bprm
->buf
); /* exec-header */
1472 bswap_ehdr(&elf_ex
);
1475 /* First of all, some simple consistency checks */
1476 if ((elf_ex
.e_type
!= ET_EXEC
&& elf_ex
.e_type
!= ET_DYN
) ||
1477 (! elf_check_arch(elf_ex
.e_machine
))) {
1481 bprm
->p
= copy_elf_strings(1, &bprm
->filename
, bprm
->page
, bprm
->p
);
1482 bprm
->p
= copy_elf_strings(bprm
->envc
,bprm
->envp
,bprm
->page
,bprm
->p
);
1483 bprm
->p
= copy_elf_strings(bprm
->argc
,bprm
->argv
,bprm
->page
,bprm
->p
);
1488 /* Now read in all of the header information */
1489 elf_phdata
= (struct elf_phdr
*)malloc(elf_ex
.e_phentsize
*elf_ex
.e_phnum
);
1490 if (elf_phdata
== NULL
) {
1494 retval
= lseek(bprm
->fd
, elf_ex
.e_phoff
, SEEK_SET
);
1496 retval
= read(bprm
->fd
, (char *) elf_phdata
,
1497 elf_ex
.e_phentsize
* elf_ex
.e_phnum
);
1501 perror("load_elf_binary");
1508 elf_ppnt
= elf_phdata
;
1509 for (i
=0; i
<elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
1510 bswap_phdr(elf_ppnt
);
1513 elf_ppnt
= elf_phdata
;
1519 elf_stack
= ~((abi_ulong
)0UL);
1520 elf_interpreter
= NULL
;
1521 start_code
= ~((abi_ulong
)0UL);
1525 interp_ex
.a_info
= 0;
1527 for(i
=0;i
< elf_ex
.e_phnum
; i
++) {
1528 if (elf_ppnt
->p_type
== PT_INTERP
) {
1529 if ( elf_interpreter
!= NULL
)
1532 free(elf_interpreter
);
1537 /* This is the program interpreter used for
1538 * shared libraries - for now assume that this
1539 * is an a.out format binary
1542 elf_interpreter
= (char *)malloc(elf_ppnt
->p_filesz
);
1544 if (elf_interpreter
== NULL
) {
1550 retval
= lseek(bprm
->fd
, elf_ppnt
->p_offset
, SEEK_SET
);
1552 retval
= read(bprm
->fd
, elf_interpreter
, elf_ppnt
->p_filesz
);
1555 perror("load_elf_binary2");
1559 /* If the program interpreter is one of these two,
1560 then assume an iBCS2 image. Otherwise assume
1561 a native linux image. */
1563 /* JRP - Need to add X86 lib dir stuff here... */
1565 if (strcmp(elf_interpreter
,"/usr/lib/libc.so.1") == 0 ||
1566 strcmp(elf_interpreter
,"/usr/lib/ld.so.1") == 0) {
1567 ibcs2_interpreter
= 1;
1571 printf("Using ELF interpreter %s\n", path(elf_interpreter
));
1574 retval
= open(path(elf_interpreter
), O_RDONLY
);
1576 interpreter_fd
= retval
;
1579 perror(elf_interpreter
);
1581 /* retval = -errno; */
1586 retval
= lseek(interpreter_fd
, 0, SEEK_SET
);
1588 retval
= read(interpreter_fd
,bprm
->buf
,128);
1592 interp_ex
= *((struct exec
*) bprm
->buf
); /* aout exec-header */
1593 interp_elf_ex
= *((struct elfhdr
*) bprm
->buf
); /* elf exec-header */
1596 perror("load_elf_binary3");
1599 free(elf_interpreter
);
1607 /* Some simple consistency checks for the interpreter */
1608 if (elf_interpreter
){
1609 interpreter_type
= INTERPRETER_ELF
| INTERPRETER_AOUT
;
1611 /* Now figure out which format our binary is */
1612 if ((N_MAGIC(interp_ex
) != OMAGIC
) && (N_MAGIC(interp_ex
) != ZMAGIC
) &&
1613 (N_MAGIC(interp_ex
) != QMAGIC
)) {
1614 interpreter_type
= INTERPRETER_ELF
;
1617 if (interp_elf_ex
.e_ident
[0] != 0x7f ||
1618 strncmp((char *)&interp_elf_ex
.e_ident
[1], "ELF",3) != 0) {
1619 interpreter_type
&= ~INTERPRETER_ELF
;
1622 if (!interpreter_type
) {
1623 free(elf_interpreter
);
1630 /* OK, we are done with that, now set up the arg stuff,
1631 and then start this sucker up */
1636 if (interpreter_type
== INTERPRETER_AOUT
) {
1637 snprintf(passed_fileno
, sizeof(passed_fileno
), "%d", bprm
->fd
);
1638 passed_p
= passed_fileno
;
1640 if (elf_interpreter
) {
1641 bprm
->p
= copy_elf_strings(1,&passed_p
,bprm
->page
,bprm
->p
);
1646 if (elf_interpreter
) {
1647 free(elf_interpreter
);
1655 /* OK, This is the point of no return */
1658 info
->start_mmap
= (abi_ulong
)ELF_START_MMAP
;
1660 elf_entry
= (abi_ulong
) elf_ex
.e_entry
;
1662 #if defined(CONFIG_USE_GUEST_BASE)
1664 * In case where user has not explicitly set the guest_base, we
1665 * probe here that should we set it automatically.
1667 if (!have_guest_base
) {
1669 * Go through ELF program header table and find out whether
1670 * any of the segments drop below our current mmap_min_addr and
1671 * in that case set guest_base to corresponding address.
1673 for (i
= 0, elf_ppnt
= elf_phdata
; i
< elf_ex
.e_phnum
;
1675 if (elf_ppnt
->p_type
!= PT_LOAD
)
1677 if (HOST_PAGE_ALIGN(elf_ppnt
->p_vaddr
) < mmap_min_addr
) {
1678 guest_base
= HOST_PAGE_ALIGN(mmap_min_addr
);
1683 #endif /* CONFIG_USE_GUEST_BASE */
1685 /* Do this so that we can load the interpreter, if need be. We will
1686 change some of these later */
1688 bprm
->p
= setup_arg_pages(bprm
->p
, bprm
, info
);
1689 info
->start_stack
= bprm
->p
;
1691 /* Now we do a little grungy work by mmaping the ELF image into
1692 * the correct location in memory. At this point, we assume that
1693 * the image should be loaded at fixed address, not at a variable
1697 for(i
= 0, elf_ppnt
= elf_phdata
; i
< elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
1702 if (elf_ppnt
->p_type
!= PT_LOAD
)
1705 if (elf_ppnt
->p_flags
& PF_R
) elf_prot
|= PROT_READ
;
1706 if (elf_ppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
1707 if (elf_ppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
1708 elf_flags
= MAP_PRIVATE
| MAP_DENYWRITE
;
1709 if (elf_ex
.e_type
== ET_EXEC
|| load_addr_set
) {
1710 elf_flags
|= MAP_FIXED
;
1711 } else if (elf_ex
.e_type
== ET_DYN
) {
1712 /* Try and get dynamic programs out of the way of the default mmap
1713 base, as well as whatever program they might try to exec. This
1714 is because the brk will follow the loader, and is not movable. */
1715 /* NOTE: for qemu, we do a big mmap to get enough space
1716 without hardcoding any address */
1717 error
= target_mmap(0, ET_DYN_MAP_SIZE
,
1718 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
1724 load_bias
= TARGET_ELF_PAGESTART(error
- elf_ppnt
->p_vaddr
);
1727 error
= target_mmap(TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
),
1728 (elf_ppnt
->p_filesz
+
1729 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)),
1731 (MAP_FIXED
| MAP_PRIVATE
| MAP_DENYWRITE
),
1733 (elf_ppnt
->p_offset
-
1734 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)));
1740 #ifdef LOW_ELF_STACK
1741 if (TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
) < elf_stack
)
1742 elf_stack
= TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
);
1745 if (!load_addr_set
) {
1747 load_addr
= elf_ppnt
->p_vaddr
- elf_ppnt
->p_offset
;
1748 if (elf_ex
.e_type
== ET_DYN
) {
1749 load_bias
+= error
-
1750 TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
);
1751 load_addr
+= load_bias
;
1752 reloc_func_desc
= load_bias
;
1755 k
= elf_ppnt
->p_vaddr
;
1760 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_filesz
;
1763 if ((elf_ppnt
->p_flags
& PF_X
) && end_code
< k
)
1767 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_memsz
;
1768 if (k
> elf_brk
) elf_brk
= k
;
1771 elf_entry
+= load_bias
;
1772 elf_bss
+= load_bias
;
1773 elf_brk
+= load_bias
;
1774 start_code
+= load_bias
;
1775 end_code
+= load_bias
;
1776 start_data
+= load_bias
;
1777 end_data
+= load_bias
;
1779 if (elf_interpreter
) {
1780 if (interpreter_type
& 1) {
1781 elf_entry
= load_aout_interp(&interp_ex
, interpreter_fd
);
1783 else if (interpreter_type
& 2) {
1784 elf_entry
= load_elf_interp(&interp_elf_ex
, interpreter_fd
,
1787 reloc_func_desc
= interp_load_addr
;
1789 close(interpreter_fd
);
1790 free(elf_interpreter
);
1792 if (elf_entry
== ~((abi_ulong
)0UL)) {
1793 printf("Unable to load interpreter\n");
1802 if (qemu_log_enabled())
1803 load_symbols(&elf_ex
, bprm
->fd
);
1805 if (interpreter_type
!= INTERPRETER_AOUT
) close(bprm
->fd
);
1806 info
->personality
= (ibcs2_interpreter
? PER_SVR4
: PER_LINUX
);
1808 #ifdef LOW_ELF_STACK
1809 info
->start_stack
= bprm
->p
= elf_stack
- 4;
1811 bprm
->p
= create_elf_tables(bprm
->p
,
1815 load_addr
, load_bias
,
1817 (interpreter_type
== INTERPRETER_AOUT
? 0 : 1),
1819 info
->load_addr
= reloc_func_desc
;
1820 info
->start_brk
= info
->brk
= elf_brk
;
1821 info
->end_code
= end_code
;
1822 info
->start_code
= start_code
;
1823 info
->start_data
= start_data
;
1824 info
->end_data
= end_data
;
1825 info
->start_stack
= bprm
->p
;
1827 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
1829 set_brk(elf_bss
, elf_brk
);
1831 padzero(elf_bss
, elf_brk
);
1834 printf("(start_brk) %x\n" , info
->start_brk
);
1835 printf("(end_code) %x\n" , info
->end_code
);
1836 printf("(start_code) %x\n" , info
->start_code
);
1837 printf("(end_data) %x\n" , info
->end_data
);
1838 printf("(start_stack) %x\n" , info
->start_stack
);
1839 printf("(brk) %x\n" , info
->brk
);
1842 if ( info
->personality
== PER_SVR4
)
1844 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1845 and some applications "depend" upon this behavior.
1846 Since we do not have the power to recompile these, we
1847 emulate the SVr4 behavior. Sigh. */
1848 mapped_addr
= target_mmap(0, qemu_host_page_size
, PROT_READ
| PROT_EXEC
,
1849 MAP_FIXED
| MAP_PRIVATE
, -1, 0);
1852 info
->entry
= elf_entry
;
1854 #ifdef USE_ELF_CORE_DUMP
1855 bprm
->core_dump
= &elf_core_dump
;
1861 #ifdef USE_ELF_CORE_DUMP
1864 * Definitions to generate Intel SVR4-like core files.
1865 * These mostly have the same names as the SVR4 types with "target_elf_"
1866 * tacked on the front to prevent clashes with linux definitions,
1867 * and the typedef forms have been avoided. This is mostly like
1868 * the SVR4 structure, but more Linuxy, with things that Linux does
1869 * not support and which gdb doesn't really use excluded.
1871 * Fields we don't dump (their contents is zero) in linux-user qemu
1872 * are marked with XXX.
1874 * Core dump code is copied from linux kernel (fs/binfmt_elf.c).
1876 * Porting ELF coredump for target is (quite) simple process. First you
1877 * define USE_ELF_CORE_DUMP in target ELF code (where init_thread() for
1878 * the target resides):
1880 * #define USE_ELF_CORE_DUMP
1882 * Next you define type of register set used for dumping. ELF specification
1883 * says that it needs to be array of elf_greg_t that has size of ELF_NREG.
1885 * typedef <target_regtype> target_elf_greg_t;
1886 * #define ELF_NREG <number of registers>
1887 * typedef taret_elf_greg_t target_elf_gregset_t[ELF_NREG];
1889 * Last step is to implement target specific function that copies registers
1890 * from given cpu into just specified register set. Prototype is:
1892 * static void elf_core_copy_regs(taret_elf_gregset_t *regs,
1893 * const CPUState *env);
1896 * regs - copy register values into here (allocated and zeroed by caller)
1897 * env - copy registers from here
1899 * Example for ARM target is provided in this file.
1902 /* An ELF note in memory */
1906 size_t namesz_rounded
;
1913 struct target_elf_siginfo
{
1914 int si_signo
; /* signal number */
1915 int si_code
; /* extra code */
1916 int si_errno
; /* errno */
1919 struct target_elf_prstatus
{
1920 struct target_elf_siginfo pr_info
; /* Info associated with signal */
1921 short pr_cursig
; /* Current signal */
1922 target_ulong pr_sigpend
; /* XXX */
1923 target_ulong pr_sighold
; /* XXX */
1924 target_pid_t pr_pid
;
1925 target_pid_t pr_ppid
;
1926 target_pid_t pr_pgrp
;
1927 target_pid_t pr_sid
;
1928 struct target_timeval pr_utime
; /* XXX User time */
1929 struct target_timeval pr_stime
; /* XXX System time */
1930 struct target_timeval pr_cutime
; /* XXX Cumulative user time */
1931 struct target_timeval pr_cstime
; /* XXX Cumulative system time */
1932 target_elf_gregset_t pr_reg
; /* GP registers */
1933 int pr_fpvalid
; /* XXX */
1936 #define ELF_PRARGSZ (80) /* Number of chars for args */
1938 struct target_elf_prpsinfo
{
1939 char pr_state
; /* numeric process state */
1940 char pr_sname
; /* char for pr_state */
1941 char pr_zomb
; /* zombie */
1942 char pr_nice
; /* nice val */
1943 target_ulong pr_flag
; /* flags */
1944 target_uid_t pr_uid
;
1945 target_gid_t pr_gid
;
1946 target_pid_t pr_pid
, pr_ppid
, pr_pgrp
, pr_sid
;
1948 char pr_fname
[16]; /* filename of executable */
1949 char pr_psargs
[ELF_PRARGSZ
]; /* initial part of arg list */
1952 /* Here is the structure in which status of each thread is captured. */
1953 struct elf_thread_status
{
1954 QTAILQ_ENTRY(elf_thread_status
) ets_link
;
1955 struct target_elf_prstatus prstatus
; /* NT_PRSTATUS */
1957 elf_fpregset_t fpu
; /* NT_PRFPREG */
1958 struct task_struct
*thread
;
1959 elf_fpxregset_t xfpu
; /* ELF_CORE_XFPREG_TYPE */
1961 struct memelfnote notes
[1];
1965 struct elf_note_info
{
1966 struct memelfnote
*notes
;
1967 struct target_elf_prstatus
*prstatus
; /* NT_PRSTATUS */
1968 struct target_elf_prpsinfo
*psinfo
; /* NT_PRPSINFO */
1970 QTAILQ_HEAD(thread_list_head
, elf_thread_status
) thread_list
;
1973 * Current version of ELF coredump doesn't support
1974 * dumping fp regs etc.
1976 elf_fpregset_t
*fpu
;
1977 elf_fpxregset_t
*xfpu
;
1978 int thread_status_size
;
1984 struct vm_area_struct
{
1985 abi_ulong vma_start
; /* start vaddr of memory region */
1986 abi_ulong vma_end
; /* end vaddr of memory region */
1987 abi_ulong vma_flags
; /* protection etc. flags for the region */
1988 QTAILQ_ENTRY(vm_area_struct
) vma_link
;
1992 QTAILQ_HEAD(, vm_area_struct
) mm_mmap
;
1993 int mm_count
; /* number of mappings */
1996 static struct mm_struct
*vma_init(void);
1997 static void vma_delete(struct mm_struct
*);
1998 static int vma_add_mapping(struct mm_struct
*, abi_ulong
,
1999 abi_ulong
, abi_ulong
);
2000 static int vma_get_mapping_count(const struct mm_struct
*);
2001 static struct vm_area_struct
*vma_first(const struct mm_struct
*);
2002 static struct vm_area_struct
*vma_next(struct vm_area_struct
*);
2003 static abi_ulong
vma_dump_size(const struct vm_area_struct
*);
2004 static int vma_walker(void *priv
, abi_ulong start
, abi_ulong end
,
2005 unsigned long flags
);
2007 static void fill_elf_header(struct elfhdr
*, int, uint16_t, uint32_t);
2008 static void fill_note(struct memelfnote
*, const char *, int,
2009 unsigned int, void *);
2010 static void fill_prstatus(struct target_elf_prstatus
*, const TaskState
*, int);
2011 static int fill_psinfo(struct target_elf_prpsinfo
*, const TaskState
*);
2012 static void fill_auxv_note(struct memelfnote
*, const TaskState
*);
2013 static void fill_elf_note_phdr(struct elf_phdr
*, int, off_t
);
2014 static size_t note_size(const struct memelfnote
*);
2015 static void free_note_info(struct elf_note_info
*);
2016 static int fill_note_info(struct elf_note_info
*, long, const CPUState
*);
2017 static void fill_thread_info(struct elf_note_info
*, const CPUState
*);
2018 static int core_dump_filename(const TaskState
*, char *, size_t);
2020 static int dump_write(int, const void *, size_t);
2021 static int write_note(struct memelfnote
*, int);
2022 static int write_note_info(struct elf_note_info
*, int);
2025 static void bswap_prstatus(struct target_elf_prstatus
*);
2026 static void bswap_psinfo(struct target_elf_prpsinfo
*);
2028 static void bswap_prstatus(struct target_elf_prstatus
*prstatus
)
2030 prstatus
->pr_info
.si_signo
= tswapl(prstatus
->pr_info
.si_signo
);
2031 prstatus
->pr_info
.si_code
= tswapl(prstatus
->pr_info
.si_code
);
2032 prstatus
->pr_info
.si_errno
= tswapl(prstatus
->pr_info
.si_errno
);
2033 prstatus
->pr_cursig
= tswap16(prstatus
->pr_cursig
);
2034 prstatus
->pr_sigpend
= tswapl(prstatus
->pr_sigpend
);
2035 prstatus
->pr_sighold
= tswapl(prstatus
->pr_sighold
);
2036 prstatus
->pr_pid
= tswap32(prstatus
->pr_pid
);
2037 prstatus
->pr_ppid
= tswap32(prstatus
->pr_ppid
);
2038 prstatus
->pr_pgrp
= tswap32(prstatus
->pr_pgrp
);
2039 prstatus
->pr_sid
= tswap32(prstatus
->pr_sid
);
2040 /* cpu times are not filled, so we skip them */
2041 /* regs should be in correct format already */
2042 prstatus
->pr_fpvalid
= tswap32(prstatus
->pr_fpvalid
);
2045 static void bswap_psinfo(struct target_elf_prpsinfo
*psinfo
)
2047 psinfo
->pr_flag
= tswapl(psinfo
->pr_flag
);
2048 psinfo
->pr_uid
= tswap16(psinfo
->pr_uid
);
2049 psinfo
->pr_gid
= tswap16(psinfo
->pr_gid
);
2050 psinfo
->pr_pid
= tswap32(psinfo
->pr_pid
);
2051 psinfo
->pr_ppid
= tswap32(psinfo
->pr_ppid
);
2052 psinfo
->pr_pgrp
= tswap32(psinfo
->pr_pgrp
);
2053 psinfo
->pr_sid
= tswap32(psinfo
->pr_sid
);
2055 #endif /* BSWAP_NEEDED */
2058 * Minimal support for linux memory regions. These are needed
2059 * when we are finding out what memory exactly belongs to
2060 * emulated process. No locks needed here, as long as
2061 * thread that received the signal is stopped.
2064 static struct mm_struct
*vma_init(void)
2066 struct mm_struct
*mm
;
2068 if ((mm
= qemu_malloc(sizeof (*mm
))) == NULL
)
2072 QTAILQ_INIT(&mm
->mm_mmap
);
2077 static void vma_delete(struct mm_struct
*mm
)
2079 struct vm_area_struct
*vma
;
2081 while ((vma
= vma_first(mm
)) != NULL
) {
2082 QTAILQ_REMOVE(&mm
->mm_mmap
, vma
, vma_link
);
2088 static int vma_add_mapping(struct mm_struct
*mm
, abi_ulong start
,
2089 abi_ulong end
, abi_ulong flags
)
2091 struct vm_area_struct
*vma
;
2093 if ((vma
= qemu_mallocz(sizeof (*vma
))) == NULL
)
2096 vma
->vma_start
= start
;
2098 vma
->vma_flags
= flags
;
2100 QTAILQ_INSERT_TAIL(&mm
->mm_mmap
, vma
, vma_link
);
2106 static struct vm_area_struct
*vma_first(const struct mm_struct
*mm
)
2108 return (QTAILQ_FIRST(&mm
->mm_mmap
));
2111 static struct vm_area_struct
*vma_next(struct vm_area_struct
*vma
)
2113 return (QTAILQ_NEXT(vma
, vma_link
));
2116 static int vma_get_mapping_count(const struct mm_struct
*mm
)
2118 return (mm
->mm_count
);
2122 * Calculate file (dump) size of given memory region.
2124 static abi_ulong
vma_dump_size(const struct vm_area_struct
*vma
)
2126 /* if we cannot even read the first page, skip it */
2127 if (!access_ok(VERIFY_READ
, vma
->vma_start
, TARGET_PAGE_SIZE
))
2131 * Usually we don't dump executable pages as they contain
2132 * non-writable code that debugger can read directly from
2133 * target library etc. However, thread stacks are marked
2134 * also executable so we read in first page of given region
2135 * and check whether it contains elf header. If there is
2136 * no elf header, we dump it.
2138 if (vma
->vma_flags
& PROT_EXEC
) {
2139 char page
[TARGET_PAGE_SIZE
];
2141 copy_from_user(page
, vma
->vma_start
, sizeof (page
));
2142 if ((page
[EI_MAG0
] == ELFMAG0
) &&
2143 (page
[EI_MAG1
] == ELFMAG1
) &&
2144 (page
[EI_MAG2
] == ELFMAG2
) &&
2145 (page
[EI_MAG3
] == ELFMAG3
)) {
2147 * Mappings are possibly from ELF binary. Don't dump
2154 return (vma
->vma_end
- vma
->vma_start
);
2157 static int vma_walker(void *priv
, abi_ulong start
, abi_ulong end
,
2158 unsigned long flags
)
2160 struct mm_struct
*mm
= (struct mm_struct
*)priv
;
2163 * Don't dump anything that qemu has reserved for internal use.
2165 if (flags
& PAGE_RESERVED
)
2168 vma_add_mapping(mm
, start
, end
, flags
);
2172 static void fill_note(struct memelfnote
*note
, const char *name
, int type
,
2173 unsigned int sz
, void *data
)
2175 unsigned int namesz
;
2177 namesz
= strlen(name
) + 1;
2179 note
->namesz
= namesz
;
2180 note
->namesz_rounded
= roundup(namesz
, sizeof (int32_t));
2182 note
->datasz
= roundup(sz
, sizeof (int32_t));;
2186 * We calculate rounded up note size here as specified by
2189 note
->notesz
= sizeof (struct elf_note
) +
2190 note
->namesz_rounded
+ note
->datasz
;
2193 static void fill_elf_header(struct elfhdr
*elf
, int segs
, uint16_t machine
,
2196 (void) memset(elf
, 0, sizeof(*elf
));
2198 (void) memcpy(elf
->e_ident
, ELFMAG
, SELFMAG
);
2199 elf
->e_ident
[EI_CLASS
] = ELF_CLASS
;
2200 elf
->e_ident
[EI_DATA
] = ELF_DATA
;
2201 elf
->e_ident
[EI_VERSION
] = EV_CURRENT
;
2202 elf
->e_ident
[EI_OSABI
] = ELF_OSABI
;
2204 elf
->e_type
= ET_CORE
;
2205 elf
->e_machine
= machine
;
2206 elf
->e_version
= EV_CURRENT
;
2207 elf
->e_phoff
= sizeof(struct elfhdr
);
2208 elf
->e_flags
= flags
;
2209 elf
->e_ehsize
= sizeof(struct elfhdr
);
2210 elf
->e_phentsize
= sizeof(struct elf_phdr
);
2211 elf
->e_phnum
= segs
;
2218 static void fill_elf_note_phdr(struct elf_phdr
*phdr
, int sz
, off_t offset
)
2220 phdr
->p_type
= PT_NOTE
;
2221 phdr
->p_offset
= offset
;
2224 phdr
->p_filesz
= sz
;
2234 static size_t note_size(const struct memelfnote
*note
)
2236 return (note
->notesz
);
2239 static void fill_prstatus(struct target_elf_prstatus
*prstatus
,
2240 const TaskState
*ts
, int signr
)
2242 (void) memset(prstatus
, 0, sizeof (*prstatus
));
2243 prstatus
->pr_info
.si_signo
= prstatus
->pr_cursig
= signr
;
2244 prstatus
->pr_pid
= ts
->ts_tid
;
2245 prstatus
->pr_ppid
= getppid();
2246 prstatus
->pr_pgrp
= getpgrp();
2247 prstatus
->pr_sid
= getsid(0);
2250 bswap_prstatus(prstatus
);
2254 static int fill_psinfo(struct target_elf_prpsinfo
*psinfo
, const TaskState
*ts
)
2256 char *filename
, *base_filename
;
2257 unsigned int i
, len
;
2259 (void) memset(psinfo
, 0, sizeof (*psinfo
));
2261 len
= ts
->info
->arg_end
- ts
->info
->arg_start
;
2262 if (len
>= ELF_PRARGSZ
)
2263 len
= ELF_PRARGSZ
- 1;
2264 if (copy_from_user(&psinfo
->pr_psargs
, ts
->info
->arg_start
, len
))
2266 for (i
= 0; i
< len
; i
++)
2267 if (psinfo
->pr_psargs
[i
] == 0)
2268 psinfo
->pr_psargs
[i
] = ' ';
2269 psinfo
->pr_psargs
[len
] = 0;
2271 psinfo
->pr_pid
= getpid();
2272 psinfo
->pr_ppid
= getppid();
2273 psinfo
->pr_pgrp
= getpgrp();
2274 psinfo
->pr_sid
= getsid(0);
2275 psinfo
->pr_uid
= getuid();
2276 psinfo
->pr_gid
= getgid();
2278 filename
= strdup(ts
->bprm
->filename
);
2279 base_filename
= strdup(basename(filename
));
2280 (void) strncpy(psinfo
->pr_fname
, base_filename
,
2281 sizeof(psinfo
->pr_fname
));
2282 free(base_filename
);
2286 bswap_psinfo(psinfo
);
2291 static void fill_auxv_note(struct memelfnote
*note
, const TaskState
*ts
)
2293 elf_addr_t auxv
= (elf_addr_t
)ts
->info
->saved_auxv
;
2294 elf_addr_t orig_auxv
= auxv
;
2300 * Auxiliary vector is stored in target process stack. It contains
2301 * {type, value} pairs that we need to dump into note. This is not
2302 * strictly necessary but we do it here for sake of completeness.
2305 /* find out lenght of the vector, AT_NULL is terminator */
2308 get_user_ual(val
, auxv
);
2310 auxv
+= 2 * sizeof (elf_addr_t
);
2311 } while (val
!= AT_NULL
);
2312 len
= i
* sizeof (elf_addr_t
);
2314 /* read in whole auxv vector and copy it to memelfnote */
2315 ptr
= lock_user(VERIFY_READ
, orig_auxv
, len
, 0);
2317 fill_note(note
, "CORE", NT_AUXV
, len
, ptr
);
2318 unlock_user(ptr
, auxv
, len
);
2323 * Constructs name of coredump file. We have following convention
2325 * qemu_<basename-of-target-binary>_<date>-<time>_<pid>.core
2327 * Returns 0 in case of success, -1 otherwise (errno is set).
2329 static int core_dump_filename(const TaskState
*ts
, char *buf
,
2333 char *filename
= NULL
;
2334 char *base_filename
= NULL
;
2338 assert(bufsize
>= PATH_MAX
);
2340 if (gettimeofday(&tv
, NULL
) < 0) {
2341 (void) fprintf(stderr
, "unable to get current timestamp: %s",
2346 filename
= strdup(ts
->bprm
->filename
);
2347 base_filename
= strdup(basename(filename
));
2348 (void) strftime(timestamp
, sizeof (timestamp
), "%Y%m%d-%H%M%S",
2349 localtime_r(&tv
.tv_sec
, &tm
));
2350 (void) snprintf(buf
, bufsize
, "qemu_%s_%s_%d.core",
2351 base_filename
, timestamp
, (int)getpid());
2352 free(base_filename
);
2358 static int dump_write(int fd
, const void *ptr
, size_t size
)
2360 const char *bufp
= (const char *)ptr
;
2361 ssize_t bytes_written
, bytes_left
;
2362 struct rlimit dumpsize
;
2366 getrlimit(RLIMIT_CORE
, &dumpsize
);
2367 if ((pos
= lseek(fd
, 0, SEEK_CUR
))==-1) {
2368 if (errno
== ESPIPE
) { /* not a seekable stream */
2374 if (dumpsize
.rlim_cur
<= pos
) {
2376 } else if (dumpsize
.rlim_cur
== RLIM_INFINITY
) {
2379 size_t limit_left
=dumpsize
.rlim_cur
- pos
;
2380 bytes_left
= limit_left
>= size
? size
: limit_left
;
2385 * In normal conditions, single write(2) should do but
2386 * in case of socket etc. this mechanism is more portable.
2389 bytes_written
= write(fd
, bufp
, bytes_left
);
2390 if (bytes_written
< 0) {
2394 } else if (bytes_written
== 0) { /* eof */
2397 bufp
+= bytes_written
;
2398 bytes_left
-= bytes_written
;
2399 } while (bytes_left
> 0);
2404 static int write_note(struct memelfnote
*men
, int fd
)
2408 en
.n_namesz
= men
->namesz
;
2409 en
.n_type
= men
->type
;
2410 en
.n_descsz
= men
->datasz
;
2416 if (dump_write(fd
, &en
, sizeof(en
)) != 0)
2418 if (dump_write(fd
, men
->name
, men
->namesz_rounded
) != 0)
2420 if (dump_write(fd
, men
->data
, men
->datasz
) != 0)
2426 static void fill_thread_info(struct elf_note_info
*info
, const CPUState
*env
)
2428 TaskState
*ts
= (TaskState
*)env
->opaque
;
2429 struct elf_thread_status
*ets
;
2431 ets
= qemu_mallocz(sizeof (*ets
));
2432 ets
->num_notes
= 1; /* only prstatus is dumped */
2433 fill_prstatus(&ets
->prstatus
, ts
, 0);
2434 elf_core_copy_regs(&ets
->prstatus
.pr_reg
, env
);
2435 fill_note(&ets
->notes
[0], "CORE", NT_PRSTATUS
, sizeof (ets
->prstatus
),
2438 QTAILQ_INSERT_TAIL(&info
->thread_list
, ets
, ets_link
);
2440 info
->notes_size
+= note_size(&ets
->notes
[0]);
2443 static int fill_note_info(struct elf_note_info
*info
,
2444 long signr
, const CPUState
*env
)
2447 CPUState
*cpu
= NULL
;
2448 TaskState
*ts
= (TaskState
*)env
->opaque
;
2451 (void) memset(info
, 0, sizeof (*info
));
2453 QTAILQ_INIT(&info
->thread_list
);
2455 info
->notes
= qemu_mallocz(NUMNOTES
* sizeof (struct memelfnote
));
2456 if (info
->notes
== NULL
)
2458 info
->prstatus
= qemu_mallocz(sizeof (*info
->prstatus
));
2459 if (info
->prstatus
== NULL
)
2461 info
->psinfo
= qemu_mallocz(sizeof (*info
->psinfo
));
2462 if (info
->prstatus
== NULL
)
2466 * First fill in status (and registers) of current thread
2467 * including process info & aux vector.
2469 fill_prstatus(info
->prstatus
, ts
, signr
);
2470 elf_core_copy_regs(&info
->prstatus
->pr_reg
, env
);
2471 fill_note(&info
->notes
[0], "CORE", NT_PRSTATUS
,
2472 sizeof (*info
->prstatus
), info
->prstatus
);
2473 fill_psinfo(info
->psinfo
, ts
);
2474 fill_note(&info
->notes
[1], "CORE", NT_PRPSINFO
,
2475 sizeof (*info
->psinfo
), info
->psinfo
);
2476 fill_auxv_note(&info
->notes
[2], ts
);
2479 info
->notes_size
= 0;
2480 for (i
= 0; i
< info
->numnote
; i
++)
2481 info
->notes_size
+= note_size(&info
->notes
[i
]);
2483 /* read and fill status of all threads */
2485 for (cpu
= first_cpu
; cpu
!= NULL
; cpu
= cpu
->next_cpu
) {
2486 if (cpu
== thread_env
)
2488 fill_thread_info(info
, cpu
);
2495 static void free_note_info(struct elf_note_info
*info
)
2497 struct elf_thread_status
*ets
;
2499 while (!QTAILQ_EMPTY(&info
->thread_list
)) {
2500 ets
= QTAILQ_FIRST(&info
->thread_list
);
2501 QTAILQ_REMOVE(&info
->thread_list
, ets
, ets_link
);
2505 qemu_free(info
->prstatus
);
2506 qemu_free(info
->psinfo
);
2507 qemu_free(info
->notes
);
2510 static int write_note_info(struct elf_note_info
*info
, int fd
)
2512 struct elf_thread_status
*ets
;
2515 /* write prstatus, psinfo and auxv for current thread */
2516 for (i
= 0; i
< info
->numnote
; i
++)
2517 if ((error
= write_note(&info
->notes
[i
], fd
)) != 0)
2520 /* write prstatus for each thread */
2521 for (ets
= info
->thread_list
.tqh_first
; ets
!= NULL
;
2522 ets
= ets
->ets_link
.tqe_next
) {
2523 if ((error
= write_note(&ets
->notes
[0], fd
)) != 0)
2531 * Write out ELF coredump.
2533 * See documentation of ELF object file format in:
2534 * http://www.caldera.com/developers/devspecs/gabi41.pdf
2536 * Coredump format in linux is following:
2538 * 0 +----------------------+ \
2539 * | ELF header | ET_CORE |
2540 * +----------------------+ |
2541 * | ELF program headers | |--- headers
2542 * | - NOTE section | |
2543 * | - PT_LOAD sections | |
2544 * +----------------------+ /
2549 * +----------------------+ <-- aligned to target page
2550 * | Process memory dump |
2555 * +----------------------+
2557 * NT_PRSTATUS -> struct elf_prstatus (per thread)
2558 * NT_PRSINFO -> struct elf_prpsinfo
2559 * NT_AUXV is array of { type, value } pairs (see fill_auxv_note()).
2561 * Format follows System V format as close as possible. Current
2562 * version limitations are as follows:
2563 * - no floating point registers are dumped
2565 * Function returns 0 in case of success, negative errno otherwise.
2567 * TODO: make this work also during runtime: it should be
2568 * possible to force coredump from running process and then
2569 * continue processing. For example qemu could set up SIGUSR2
2570 * handler (provided that target process haven't registered
2571 * handler for that) that does the dump when signal is received.
2573 static int elf_core_dump(int signr
, const CPUState
*env
)
2575 const TaskState
*ts
= (const TaskState
*)env
->opaque
;
2576 struct vm_area_struct
*vma
= NULL
;
2577 char corefile
[PATH_MAX
];
2578 struct elf_note_info info
;
2580 struct elf_phdr phdr
;
2581 struct rlimit dumpsize
;
2582 struct mm_struct
*mm
= NULL
;
2583 off_t offset
= 0, data_offset
= 0;
2588 getrlimit(RLIMIT_CORE
, &dumpsize
);
2589 if (dumpsize
.rlim_cur
== 0)
2592 if (core_dump_filename(ts
, corefile
, sizeof (corefile
)) < 0)
2595 if ((fd
= open(corefile
, O_WRONLY
| O_CREAT
,
2596 S_IRUSR
|S_IWUSR
|S_IRGRP
|S_IROTH
)) < 0)
2600 * Walk through target process memory mappings and
2601 * set up structure containing this information. After
2602 * this point vma_xxx functions can be used.
2604 if ((mm
= vma_init()) == NULL
)
2607 walk_memory_regions(mm
, vma_walker
);
2608 segs
= vma_get_mapping_count(mm
);
2611 * Construct valid coredump ELF header. We also
2612 * add one more segment for notes.
2614 fill_elf_header(&elf
, segs
+ 1, ELF_MACHINE
, 0);
2615 if (dump_write(fd
, &elf
, sizeof (elf
)) != 0)
2618 /* fill in in-memory version of notes */
2619 if (fill_note_info(&info
, signr
, env
) < 0)
2622 offset
+= sizeof (elf
); /* elf header */
2623 offset
+= (segs
+ 1) * sizeof (struct elf_phdr
); /* program headers */
2625 /* write out notes program header */
2626 fill_elf_note_phdr(&phdr
, info
.notes_size
, offset
);
2628 offset
+= info
.notes_size
;
2629 if (dump_write(fd
, &phdr
, sizeof (phdr
)) != 0)
2633 * ELF specification wants data to start at page boundary so
2636 offset
= roundup(offset
, ELF_EXEC_PAGESIZE
);
2639 * Write program headers for memory regions mapped in
2640 * the target process.
2642 for (vma
= vma_first(mm
); vma
!= NULL
; vma
= vma_next(vma
)) {
2643 (void) memset(&phdr
, 0, sizeof (phdr
));
2645 phdr
.p_type
= PT_LOAD
;
2646 phdr
.p_offset
= offset
;
2647 phdr
.p_vaddr
= vma
->vma_start
;
2649 phdr
.p_filesz
= vma_dump_size(vma
);
2650 offset
+= phdr
.p_filesz
;
2651 phdr
.p_memsz
= vma
->vma_end
- vma
->vma_start
;
2652 phdr
.p_flags
= vma
->vma_flags
& PROT_READ
? PF_R
: 0;
2653 if (vma
->vma_flags
& PROT_WRITE
)
2654 phdr
.p_flags
|= PF_W
;
2655 if (vma
->vma_flags
& PROT_EXEC
)
2656 phdr
.p_flags
|= PF_X
;
2657 phdr
.p_align
= ELF_EXEC_PAGESIZE
;
2659 dump_write(fd
, &phdr
, sizeof (phdr
));
2663 * Next we write notes just after program headers. No
2664 * alignment needed here.
2666 if (write_note_info(&info
, fd
) < 0)
2669 /* align data to page boundary */
2670 data_offset
= lseek(fd
, 0, SEEK_CUR
);
2671 data_offset
= TARGET_PAGE_ALIGN(data_offset
);
2672 if (lseek(fd
, data_offset
, SEEK_SET
) != data_offset
)
2676 * Finally we can dump process memory into corefile as well.
2678 for (vma
= vma_first(mm
); vma
!= NULL
; vma
= vma_next(vma
)) {
2682 end
= vma
->vma_start
+ vma_dump_size(vma
);
2684 for (addr
= vma
->vma_start
; addr
< end
;
2685 addr
+= TARGET_PAGE_SIZE
) {
2686 char page
[TARGET_PAGE_SIZE
];
2690 * Read in page from target process memory and
2691 * write it to coredump file.
2693 error
= copy_from_user(page
, addr
, sizeof (page
));
2695 (void) fprintf(stderr
, "unable to dump " TARGET_ABI_FMT_lx
"\n",
2700 if (dump_write(fd
, page
, TARGET_PAGE_SIZE
) < 0)
2706 free_note_info(&info
);
2716 #endif /* USE_ELF_CORE_DUMP */
2718 static int load_aout_interp(void * exptr
, int interp_fd
)
2720 printf("a.out interpreter not yet supported\n");
2724 void do_init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
2726 init_thread(regs
, infop
);