set ELF_HWCAP for SPARC and SPARC64
[qemu.git] / linux-user / elfload.c
blob443d246adafa43e0fa0369fff6e6868d0f7ab771
1 /* This is the Linux kernel elf-loading code, ported into user space */
2 #include <sys/time.h>
3 #include <sys/param.h>
5 #include <stdio.h>
6 #include <sys/types.h>
7 #include <fcntl.h>
8 #include <errno.h>
9 #include <unistd.h>
10 #include <sys/mman.h>
11 #include <sys/resource.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <time.h>
16 #include "qemu.h"
17 #include "disas.h"
19 #ifdef _ARCH_PPC64
20 #undef ARCH_DLINFO
21 #undef ELF_PLATFORM
22 #undef ELF_HWCAP
23 #undef ELF_CLASS
24 #undef ELF_DATA
25 #undef ELF_ARCH
26 #endif
28 #define ELF_OSABI ELFOSABI_SYSV
30 /* from personality.h */
33 * Flags for bug emulation.
35 * These occupy the top three bytes.
37 enum {
38 ADDR_NO_RANDOMIZE = 0x0040000, /* disable randomization of VA space */
39 FDPIC_FUNCPTRS = 0x0080000, /* userspace function ptrs point to
40 descriptors (signal handling) */
41 MMAP_PAGE_ZERO = 0x0100000,
42 ADDR_COMPAT_LAYOUT = 0x0200000,
43 READ_IMPLIES_EXEC = 0x0400000,
44 ADDR_LIMIT_32BIT = 0x0800000,
45 SHORT_INODE = 0x1000000,
46 WHOLE_SECONDS = 0x2000000,
47 STICKY_TIMEOUTS = 0x4000000,
48 ADDR_LIMIT_3GB = 0x8000000,
52 * Personality types.
54 * These go in the low byte. Avoid using the top bit, it will
55 * conflict with error returns.
57 enum {
58 PER_LINUX = 0x0000,
59 PER_LINUX_32BIT = 0x0000 | ADDR_LIMIT_32BIT,
60 PER_LINUX_FDPIC = 0x0000 | FDPIC_FUNCPTRS,
61 PER_SVR4 = 0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
62 PER_SVR3 = 0x0002 | STICKY_TIMEOUTS | SHORT_INODE,
63 PER_SCOSVR3 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS | SHORT_INODE,
64 PER_OSR5 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS,
65 PER_WYSEV386 = 0x0004 | STICKY_TIMEOUTS | SHORT_INODE,
66 PER_ISCR4 = 0x0005 | STICKY_TIMEOUTS,
67 PER_BSD = 0x0006,
68 PER_SUNOS = 0x0006 | STICKY_TIMEOUTS,
69 PER_XENIX = 0x0007 | STICKY_TIMEOUTS | SHORT_INODE,
70 PER_LINUX32 = 0x0008,
71 PER_LINUX32_3GB = 0x0008 | ADDR_LIMIT_3GB,
72 PER_IRIX32 = 0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */
73 PER_IRIXN32 = 0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */
74 PER_IRIX64 = 0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */
75 PER_RISCOS = 0x000c,
76 PER_SOLARIS = 0x000d | STICKY_TIMEOUTS,
77 PER_UW7 = 0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
78 PER_OSF4 = 0x000f, /* OSF/1 v4 */
79 PER_HPUX = 0x0010,
80 PER_MASK = 0x00ff,
84 * Return the base personality without flags.
86 #define personality(pers) (pers & PER_MASK)
88 /* this flag is uneffective under linux too, should be deleted */
89 #ifndef MAP_DENYWRITE
90 #define MAP_DENYWRITE 0
91 #endif
93 /* should probably go in elf.h */
94 #ifndef ELIBBAD
95 #define ELIBBAD 80
96 #endif
98 #ifdef TARGET_WORDS_BIGENDIAN
99 #define ELF_DATA ELFDATA2MSB
100 #else
101 #define ELF_DATA ELFDATA2LSB
102 #endif
104 typedef target_ulong target_elf_greg_t;
105 #ifdef USE_UID16
106 typedef target_ushort target_uid_t;
107 typedef target_ushort target_gid_t;
108 #else
109 typedef target_uint target_uid_t;
110 typedef target_uint target_gid_t;
111 #endif
112 typedef target_int target_pid_t;
114 #ifdef TARGET_I386
116 #define ELF_PLATFORM get_elf_platform()
118 static const char *get_elf_platform(void)
120 static char elf_platform[] = "i386";
121 int family = (thread_env->cpuid_version >> 8) & 0xff;
122 if (family > 6)
123 family = 6;
124 if (family >= 3)
125 elf_platform[1] = '0' + family;
126 return elf_platform;
129 #define ELF_HWCAP get_elf_hwcap()
131 static uint32_t get_elf_hwcap(void)
133 return thread_env->cpuid_features;
136 #ifdef TARGET_X86_64
137 #define ELF_START_MMAP 0x2aaaaab000ULL
138 #define elf_check_arch(x) ( ((x) == ELF_ARCH) )
140 #define ELF_CLASS ELFCLASS64
141 #define ELF_ARCH EM_X86_64
143 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
145 regs->rax = 0;
146 regs->rsp = infop->start_stack;
147 regs->rip = infop->entry;
150 #define ELF_NREG 27
151 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
154 * Note that ELF_NREG should be 29 as there should be place for
155 * TRAPNO and ERR "registers" as well but linux doesn't dump
156 * those.
158 * See linux kernel: arch/x86/include/asm/elf.h
160 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
162 (*regs)[0] = env->regs[15];
163 (*regs)[1] = env->regs[14];
164 (*regs)[2] = env->regs[13];
165 (*regs)[3] = env->regs[12];
166 (*regs)[4] = env->regs[R_EBP];
167 (*regs)[5] = env->regs[R_EBX];
168 (*regs)[6] = env->regs[11];
169 (*regs)[7] = env->regs[10];
170 (*regs)[8] = env->regs[9];
171 (*regs)[9] = env->regs[8];
172 (*regs)[10] = env->regs[R_EAX];
173 (*regs)[11] = env->regs[R_ECX];
174 (*regs)[12] = env->regs[R_EDX];
175 (*regs)[13] = env->regs[R_ESI];
176 (*regs)[14] = env->regs[R_EDI];
177 (*regs)[15] = env->regs[R_EAX]; /* XXX */
178 (*regs)[16] = env->eip;
179 (*regs)[17] = env->segs[R_CS].selector & 0xffff;
180 (*regs)[18] = env->eflags;
181 (*regs)[19] = env->regs[R_ESP];
182 (*regs)[20] = env->segs[R_SS].selector & 0xffff;
183 (*regs)[21] = env->segs[R_FS].selector & 0xffff;
184 (*regs)[22] = env->segs[R_GS].selector & 0xffff;
185 (*regs)[23] = env->segs[R_DS].selector & 0xffff;
186 (*regs)[24] = env->segs[R_ES].selector & 0xffff;
187 (*regs)[25] = env->segs[R_FS].selector & 0xffff;
188 (*regs)[26] = env->segs[R_GS].selector & 0xffff;
191 #else
193 #define ELF_START_MMAP 0x80000000
196 * This is used to ensure we don't load something for the wrong architecture.
198 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
201 * These are used to set parameters in the core dumps.
203 #define ELF_CLASS ELFCLASS32
204 #define ELF_ARCH EM_386
206 static inline void init_thread(struct target_pt_regs *regs,
207 struct image_info *infop)
209 regs->esp = infop->start_stack;
210 regs->eip = infop->entry;
212 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
213 starts %edx contains a pointer to a function which might be
214 registered using `atexit'. This provides a mean for the
215 dynamic linker to call DT_FINI functions for shared libraries
216 that have been loaded before the code runs.
218 A value of 0 tells we have no such handler. */
219 regs->edx = 0;
222 #define ELF_NREG 17
223 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
226 * Note that ELF_NREG should be 19 as there should be place for
227 * TRAPNO and ERR "registers" as well but linux doesn't dump
228 * those.
230 * See linux kernel: arch/x86/include/asm/elf.h
232 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
234 (*regs)[0] = env->regs[R_EBX];
235 (*regs)[1] = env->regs[R_ECX];
236 (*regs)[2] = env->regs[R_EDX];
237 (*regs)[3] = env->regs[R_ESI];
238 (*regs)[4] = env->regs[R_EDI];
239 (*regs)[5] = env->regs[R_EBP];
240 (*regs)[6] = env->regs[R_EAX];
241 (*regs)[7] = env->segs[R_DS].selector & 0xffff;
242 (*regs)[8] = env->segs[R_ES].selector & 0xffff;
243 (*regs)[9] = env->segs[R_FS].selector & 0xffff;
244 (*regs)[10] = env->segs[R_GS].selector & 0xffff;
245 (*regs)[11] = env->regs[R_EAX]; /* XXX */
246 (*regs)[12] = env->eip;
247 (*regs)[13] = env->segs[R_CS].selector & 0xffff;
248 (*regs)[14] = env->eflags;
249 (*regs)[15] = env->regs[R_ESP];
250 (*regs)[16] = env->segs[R_SS].selector & 0xffff;
252 #endif
254 #define USE_ELF_CORE_DUMP
255 #define ELF_EXEC_PAGESIZE 4096
257 #endif
259 #ifdef TARGET_ARM
261 #define ELF_START_MMAP 0x80000000
263 #define elf_check_arch(x) ( (x) == EM_ARM )
265 #define ELF_CLASS ELFCLASS32
266 #define ELF_ARCH EM_ARM
268 static inline void init_thread(struct target_pt_regs *regs,
269 struct image_info *infop)
271 abi_long stack = infop->start_stack;
272 memset(regs, 0, sizeof(*regs));
273 regs->ARM_cpsr = 0x10;
274 if (infop->entry & 1)
275 regs->ARM_cpsr |= CPSR_T;
276 regs->ARM_pc = infop->entry & 0xfffffffe;
277 regs->ARM_sp = infop->start_stack;
278 /* FIXME - what to for failure of get_user()? */
279 get_user_ual(regs->ARM_r2, stack + 8); /* envp */
280 get_user_ual(regs->ARM_r1, stack + 4); /* envp */
281 /* XXX: it seems that r0 is zeroed after ! */
282 regs->ARM_r0 = 0;
283 /* For uClinux PIC binaries. */
284 /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
285 regs->ARM_r10 = infop->start_data;
288 #define ELF_NREG 18
289 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
291 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
293 (*regs)[0] = tswapl(env->regs[0]);
294 (*regs)[1] = tswapl(env->regs[1]);
295 (*regs)[2] = tswapl(env->regs[2]);
296 (*regs)[3] = tswapl(env->regs[3]);
297 (*regs)[4] = tswapl(env->regs[4]);
298 (*regs)[5] = tswapl(env->regs[5]);
299 (*regs)[6] = tswapl(env->regs[6]);
300 (*regs)[7] = tswapl(env->regs[7]);
301 (*regs)[8] = tswapl(env->regs[8]);
302 (*regs)[9] = tswapl(env->regs[9]);
303 (*regs)[10] = tswapl(env->regs[10]);
304 (*regs)[11] = tswapl(env->regs[11]);
305 (*regs)[12] = tswapl(env->regs[12]);
306 (*regs)[13] = tswapl(env->regs[13]);
307 (*regs)[14] = tswapl(env->regs[14]);
308 (*regs)[15] = tswapl(env->regs[15]);
310 (*regs)[16] = tswapl(cpsr_read((CPUState *)env));
311 (*regs)[17] = tswapl(env->regs[0]); /* XXX */
314 #define USE_ELF_CORE_DUMP
315 #define ELF_EXEC_PAGESIZE 4096
317 enum
319 ARM_HWCAP_ARM_SWP = 1 << 0,
320 ARM_HWCAP_ARM_HALF = 1 << 1,
321 ARM_HWCAP_ARM_THUMB = 1 << 2,
322 ARM_HWCAP_ARM_26BIT = 1 << 3,
323 ARM_HWCAP_ARM_FAST_MULT = 1 << 4,
324 ARM_HWCAP_ARM_FPA = 1 << 5,
325 ARM_HWCAP_ARM_VFP = 1 << 6,
326 ARM_HWCAP_ARM_EDSP = 1 << 7,
327 ARM_HWCAP_ARM_JAVA = 1 << 8,
328 ARM_HWCAP_ARM_IWMMXT = 1 << 9,
329 ARM_HWCAP_ARM_THUMBEE = 1 << 10,
330 ARM_HWCAP_ARM_NEON = 1 << 11,
331 ARM_HWCAP_ARM_VFPv3 = 1 << 12,
332 ARM_HWCAP_ARM_VFPv3D16 = 1 << 13,
335 #define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF \
336 | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT \
337 | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP \
338 | ARM_HWCAP_ARM_NEON | ARM_HWCAP_ARM_VFPv3 )
340 #endif
342 #ifdef TARGET_UNICORE32
344 #define ELF_START_MMAP 0x80000000
346 #define elf_check_arch(x) ((x) == EM_UNICORE32)
348 #define ELF_CLASS ELFCLASS32
349 #define ELF_DATA ELFDATA2LSB
350 #define ELF_ARCH EM_UNICORE32
352 static inline void init_thread(struct target_pt_regs *regs,
353 struct image_info *infop)
355 abi_long stack = infop->start_stack;
356 memset(regs, 0, sizeof(*regs));
357 regs->UC32_REG_asr = 0x10;
358 regs->UC32_REG_pc = infop->entry & 0xfffffffe;
359 regs->UC32_REG_sp = infop->start_stack;
360 /* FIXME - what to for failure of get_user()? */
361 get_user_ual(regs->UC32_REG_02, stack + 8); /* envp */
362 get_user_ual(regs->UC32_REG_01, stack + 4); /* envp */
363 /* XXX: it seems that r0 is zeroed after ! */
364 regs->UC32_REG_00 = 0;
367 #define ELF_NREG 34
368 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
370 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
372 (*regs)[0] = env->regs[0];
373 (*regs)[1] = env->regs[1];
374 (*regs)[2] = env->regs[2];
375 (*regs)[3] = env->regs[3];
376 (*regs)[4] = env->regs[4];
377 (*regs)[5] = env->regs[5];
378 (*regs)[6] = env->regs[6];
379 (*regs)[7] = env->regs[7];
380 (*regs)[8] = env->regs[8];
381 (*regs)[9] = env->regs[9];
382 (*regs)[10] = env->regs[10];
383 (*regs)[11] = env->regs[11];
384 (*regs)[12] = env->regs[12];
385 (*regs)[13] = env->regs[13];
386 (*regs)[14] = env->regs[14];
387 (*regs)[15] = env->regs[15];
388 (*regs)[16] = env->regs[16];
389 (*regs)[17] = env->regs[17];
390 (*regs)[18] = env->regs[18];
391 (*regs)[19] = env->regs[19];
392 (*regs)[20] = env->regs[20];
393 (*regs)[21] = env->regs[21];
394 (*regs)[22] = env->regs[22];
395 (*regs)[23] = env->regs[23];
396 (*regs)[24] = env->regs[24];
397 (*regs)[25] = env->regs[25];
398 (*regs)[26] = env->regs[26];
399 (*regs)[27] = env->regs[27];
400 (*regs)[28] = env->regs[28];
401 (*regs)[29] = env->regs[29];
402 (*regs)[30] = env->regs[30];
403 (*regs)[31] = env->regs[31];
405 (*regs)[32] = cpu_asr_read((CPUState *)env);
406 (*regs)[33] = env->regs[0]; /* XXX */
409 #define USE_ELF_CORE_DUMP
410 #define ELF_EXEC_PAGESIZE 4096
412 #define ELF_HWCAP (UC32_HWCAP_CMOV | UC32_HWCAP_UCF64)
414 #endif
416 #ifdef TARGET_SPARC
417 #ifdef TARGET_SPARC64
419 #define ELF_START_MMAP 0x80000000
420 #define ELF_HWCAP (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | HWCAP_SPARC_SWAP \
421 | HWCAP_SPARC_MULDIV | HWCAP_SPARC_V9)
422 #ifndef TARGET_ABI32
423 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
424 #else
425 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
426 #endif
428 #define ELF_CLASS ELFCLASS64
429 #define ELF_ARCH EM_SPARCV9
431 #define STACK_BIAS 2047
433 static inline void init_thread(struct target_pt_regs *regs,
434 struct image_info *infop)
436 #ifndef TARGET_ABI32
437 regs->tstate = 0;
438 #endif
439 regs->pc = infop->entry;
440 regs->npc = regs->pc + 4;
441 regs->y = 0;
442 #ifdef TARGET_ABI32
443 regs->u_regs[14] = infop->start_stack - 16 * 4;
444 #else
445 if (personality(infop->personality) == PER_LINUX32)
446 regs->u_regs[14] = infop->start_stack - 16 * 4;
447 else
448 regs->u_regs[14] = infop->start_stack - 16 * 8 - STACK_BIAS;
449 #endif
452 #else
453 #define ELF_START_MMAP 0x80000000
454 #define ELF_HWCAP (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | HWCAP_SPARC_SWAP \
455 | HWCAP_SPARC_MULDIV)
456 #define elf_check_arch(x) ( (x) == EM_SPARC )
458 #define ELF_CLASS ELFCLASS32
459 #define ELF_ARCH EM_SPARC
461 static inline void init_thread(struct target_pt_regs *regs,
462 struct image_info *infop)
464 regs->psr = 0;
465 regs->pc = infop->entry;
466 regs->npc = regs->pc + 4;
467 regs->y = 0;
468 regs->u_regs[14] = infop->start_stack - 16 * 4;
471 #endif
472 #endif
474 #ifdef TARGET_PPC
476 #define ELF_START_MMAP 0x80000000
478 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
480 #define elf_check_arch(x) ( (x) == EM_PPC64 )
482 #define ELF_CLASS ELFCLASS64
484 #else
486 #define elf_check_arch(x) ( (x) == EM_PPC )
488 #define ELF_CLASS ELFCLASS32
490 #endif
492 #define ELF_ARCH EM_PPC
494 /* Feature masks for the Aux Vector Hardware Capabilities (AT_HWCAP).
495 See arch/powerpc/include/asm/cputable.h. */
496 enum {
497 QEMU_PPC_FEATURE_32 = 0x80000000,
498 QEMU_PPC_FEATURE_64 = 0x40000000,
499 QEMU_PPC_FEATURE_601_INSTR = 0x20000000,
500 QEMU_PPC_FEATURE_HAS_ALTIVEC = 0x10000000,
501 QEMU_PPC_FEATURE_HAS_FPU = 0x08000000,
502 QEMU_PPC_FEATURE_HAS_MMU = 0x04000000,
503 QEMU_PPC_FEATURE_HAS_4xxMAC = 0x02000000,
504 QEMU_PPC_FEATURE_UNIFIED_CACHE = 0x01000000,
505 QEMU_PPC_FEATURE_HAS_SPE = 0x00800000,
506 QEMU_PPC_FEATURE_HAS_EFP_SINGLE = 0x00400000,
507 QEMU_PPC_FEATURE_HAS_EFP_DOUBLE = 0x00200000,
508 QEMU_PPC_FEATURE_NO_TB = 0x00100000,
509 QEMU_PPC_FEATURE_POWER4 = 0x00080000,
510 QEMU_PPC_FEATURE_POWER5 = 0x00040000,
511 QEMU_PPC_FEATURE_POWER5_PLUS = 0x00020000,
512 QEMU_PPC_FEATURE_CELL = 0x00010000,
513 QEMU_PPC_FEATURE_BOOKE = 0x00008000,
514 QEMU_PPC_FEATURE_SMT = 0x00004000,
515 QEMU_PPC_FEATURE_ICACHE_SNOOP = 0x00002000,
516 QEMU_PPC_FEATURE_ARCH_2_05 = 0x00001000,
517 QEMU_PPC_FEATURE_PA6T = 0x00000800,
518 QEMU_PPC_FEATURE_HAS_DFP = 0x00000400,
519 QEMU_PPC_FEATURE_POWER6_EXT = 0x00000200,
520 QEMU_PPC_FEATURE_ARCH_2_06 = 0x00000100,
521 QEMU_PPC_FEATURE_HAS_VSX = 0x00000080,
522 QEMU_PPC_FEATURE_PSERIES_PERFMON_COMPAT = 0x00000040,
524 QEMU_PPC_FEATURE_TRUE_LE = 0x00000002,
525 QEMU_PPC_FEATURE_PPC_LE = 0x00000001,
528 #define ELF_HWCAP get_elf_hwcap()
530 static uint32_t get_elf_hwcap(void)
532 CPUState *e = thread_env;
533 uint32_t features = 0;
535 /* We don't have to be terribly complete here; the high points are
536 Altivec/FP/SPE support. Anything else is just a bonus. */
537 #define GET_FEATURE(flag, feature) \
538 do {if (e->insns_flags & flag) features |= feature; } while(0)
539 GET_FEATURE(PPC_64B, QEMU_PPC_FEATURE_64);
540 GET_FEATURE(PPC_FLOAT, QEMU_PPC_FEATURE_HAS_FPU);
541 GET_FEATURE(PPC_ALTIVEC, QEMU_PPC_FEATURE_HAS_ALTIVEC);
542 GET_FEATURE(PPC_SPE, QEMU_PPC_FEATURE_HAS_SPE);
543 GET_FEATURE(PPC_SPE_SINGLE, QEMU_PPC_FEATURE_HAS_EFP_SINGLE);
544 GET_FEATURE(PPC_SPE_DOUBLE, QEMU_PPC_FEATURE_HAS_EFP_DOUBLE);
545 GET_FEATURE(PPC_BOOKE, QEMU_PPC_FEATURE_BOOKE);
546 GET_FEATURE(PPC_405_MAC, QEMU_PPC_FEATURE_HAS_4xxMAC);
547 #undef GET_FEATURE
549 return features;
553 * The requirements here are:
554 * - keep the final alignment of sp (sp & 0xf)
555 * - make sure the 32-bit value at the first 16 byte aligned position of
556 * AUXV is greater than 16 for glibc compatibility.
557 * AT_IGNOREPPC is used for that.
558 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
559 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
561 #define DLINFO_ARCH_ITEMS 5
562 #define ARCH_DLINFO \
563 do { \
564 NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \
565 NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \
566 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
567 /* \
568 * Now handle glibc compatibility. \
569 */ \
570 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
571 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
572 } while (0)
574 static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop)
576 _regs->gpr[1] = infop->start_stack;
577 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
578 _regs->gpr[2] = ldq_raw(infop->entry + 8) + infop->load_addr;
579 infop->entry = ldq_raw(infop->entry) + infop->load_addr;
580 #endif
581 _regs->nip = infop->entry;
584 /* See linux kernel: arch/powerpc/include/asm/elf.h. */
585 #define ELF_NREG 48
586 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
588 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
590 int i;
591 target_ulong ccr = 0;
593 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
594 (*regs)[i] = tswapl(env->gpr[i]);
597 (*regs)[32] = tswapl(env->nip);
598 (*regs)[33] = tswapl(env->msr);
599 (*regs)[35] = tswapl(env->ctr);
600 (*regs)[36] = tswapl(env->lr);
601 (*regs)[37] = tswapl(env->xer);
603 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
604 ccr |= env->crf[i] << (32 - ((i + 1) * 4));
606 (*regs)[38] = tswapl(ccr);
609 #define USE_ELF_CORE_DUMP
610 #define ELF_EXEC_PAGESIZE 4096
612 #endif
614 #ifdef TARGET_MIPS
616 #define ELF_START_MMAP 0x80000000
618 #define elf_check_arch(x) ( (x) == EM_MIPS )
620 #ifdef TARGET_MIPS64
621 #define ELF_CLASS ELFCLASS64
622 #else
623 #define ELF_CLASS ELFCLASS32
624 #endif
625 #define ELF_ARCH EM_MIPS
627 static inline void init_thread(struct target_pt_regs *regs,
628 struct image_info *infop)
630 regs->cp0_status = 2 << CP0St_KSU;
631 regs->cp0_epc = infop->entry;
632 regs->regs[29] = infop->start_stack;
635 /* See linux kernel: arch/mips/include/asm/elf.h. */
636 #define ELF_NREG 45
637 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
639 /* See linux kernel: arch/mips/include/asm/reg.h. */
640 enum {
641 #ifdef TARGET_MIPS64
642 TARGET_EF_R0 = 0,
643 #else
644 TARGET_EF_R0 = 6,
645 #endif
646 TARGET_EF_R26 = TARGET_EF_R0 + 26,
647 TARGET_EF_R27 = TARGET_EF_R0 + 27,
648 TARGET_EF_LO = TARGET_EF_R0 + 32,
649 TARGET_EF_HI = TARGET_EF_R0 + 33,
650 TARGET_EF_CP0_EPC = TARGET_EF_R0 + 34,
651 TARGET_EF_CP0_BADVADDR = TARGET_EF_R0 + 35,
652 TARGET_EF_CP0_STATUS = TARGET_EF_R0 + 36,
653 TARGET_EF_CP0_CAUSE = TARGET_EF_R0 + 37
656 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */
657 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
659 int i;
661 for (i = 0; i < TARGET_EF_R0; i++) {
662 (*regs)[i] = 0;
664 (*regs)[TARGET_EF_R0] = 0;
666 for (i = 1; i < ARRAY_SIZE(env->active_tc.gpr); i++) {
667 (*regs)[TARGET_EF_R0 + i] = tswapl(env->active_tc.gpr[i]);
670 (*regs)[TARGET_EF_R26] = 0;
671 (*regs)[TARGET_EF_R27] = 0;
672 (*regs)[TARGET_EF_LO] = tswapl(env->active_tc.LO[0]);
673 (*regs)[TARGET_EF_HI] = tswapl(env->active_tc.HI[0]);
674 (*regs)[TARGET_EF_CP0_EPC] = tswapl(env->active_tc.PC);
675 (*regs)[TARGET_EF_CP0_BADVADDR] = tswapl(env->CP0_BadVAddr);
676 (*regs)[TARGET_EF_CP0_STATUS] = tswapl(env->CP0_Status);
677 (*regs)[TARGET_EF_CP0_CAUSE] = tswapl(env->CP0_Cause);
680 #define USE_ELF_CORE_DUMP
681 #define ELF_EXEC_PAGESIZE 4096
683 #endif /* TARGET_MIPS */
685 #ifdef TARGET_MICROBLAZE
687 #define ELF_START_MMAP 0x80000000
689 #define elf_check_arch(x) ( (x) == EM_MICROBLAZE || (x) == EM_MICROBLAZE_OLD)
691 #define ELF_CLASS ELFCLASS32
692 #define ELF_ARCH EM_MICROBLAZE
694 static inline void init_thread(struct target_pt_regs *regs,
695 struct image_info *infop)
697 regs->pc = infop->entry;
698 regs->r1 = infop->start_stack;
702 #define ELF_EXEC_PAGESIZE 4096
704 #define USE_ELF_CORE_DUMP
705 #define ELF_NREG 38
706 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
708 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */
709 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
711 int i, pos = 0;
713 for (i = 0; i < 32; i++) {
714 (*regs)[pos++] = tswapl(env->regs[i]);
717 for (i = 0; i < 6; i++) {
718 (*regs)[pos++] = tswapl(env->sregs[i]);
722 #endif /* TARGET_MICROBLAZE */
724 #ifdef TARGET_SH4
726 #define ELF_START_MMAP 0x80000000
728 #define elf_check_arch(x) ( (x) == EM_SH )
730 #define ELF_CLASS ELFCLASS32
731 #define ELF_ARCH EM_SH
733 static inline void init_thread(struct target_pt_regs *regs,
734 struct image_info *infop)
736 /* Check other registers XXXXX */
737 regs->pc = infop->entry;
738 regs->regs[15] = infop->start_stack;
741 /* See linux kernel: arch/sh/include/asm/elf.h. */
742 #define ELF_NREG 23
743 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
745 /* See linux kernel: arch/sh/include/asm/ptrace.h. */
746 enum {
747 TARGET_REG_PC = 16,
748 TARGET_REG_PR = 17,
749 TARGET_REG_SR = 18,
750 TARGET_REG_GBR = 19,
751 TARGET_REG_MACH = 20,
752 TARGET_REG_MACL = 21,
753 TARGET_REG_SYSCALL = 22
756 static inline void elf_core_copy_regs(target_elf_gregset_t *regs,
757 const CPUState *env)
759 int i;
761 for (i = 0; i < 16; i++) {
762 (*regs[i]) = tswapl(env->gregs[i]);
765 (*regs)[TARGET_REG_PC] = tswapl(env->pc);
766 (*regs)[TARGET_REG_PR] = tswapl(env->pr);
767 (*regs)[TARGET_REG_SR] = tswapl(env->sr);
768 (*regs)[TARGET_REG_GBR] = tswapl(env->gbr);
769 (*regs)[TARGET_REG_MACH] = tswapl(env->mach);
770 (*regs)[TARGET_REG_MACL] = tswapl(env->macl);
771 (*regs)[TARGET_REG_SYSCALL] = 0; /* FIXME */
774 #define USE_ELF_CORE_DUMP
775 #define ELF_EXEC_PAGESIZE 4096
777 #endif
779 #ifdef TARGET_CRIS
781 #define ELF_START_MMAP 0x80000000
783 #define elf_check_arch(x) ( (x) == EM_CRIS )
785 #define ELF_CLASS ELFCLASS32
786 #define ELF_ARCH EM_CRIS
788 static inline void init_thread(struct target_pt_regs *regs,
789 struct image_info *infop)
791 regs->erp = infop->entry;
794 #define ELF_EXEC_PAGESIZE 8192
796 #endif
798 #ifdef TARGET_M68K
800 #define ELF_START_MMAP 0x80000000
802 #define elf_check_arch(x) ( (x) == EM_68K )
804 #define ELF_CLASS ELFCLASS32
805 #define ELF_ARCH EM_68K
807 /* ??? Does this need to do anything?
808 #define ELF_PLAT_INIT(_r) */
810 static inline void init_thread(struct target_pt_regs *regs,
811 struct image_info *infop)
813 regs->usp = infop->start_stack;
814 regs->sr = 0;
815 regs->pc = infop->entry;
818 /* See linux kernel: arch/m68k/include/asm/elf.h. */
819 #define ELF_NREG 20
820 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
822 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
824 (*regs)[0] = tswapl(env->dregs[1]);
825 (*regs)[1] = tswapl(env->dregs[2]);
826 (*regs)[2] = tswapl(env->dregs[3]);
827 (*regs)[3] = tswapl(env->dregs[4]);
828 (*regs)[4] = tswapl(env->dregs[5]);
829 (*regs)[5] = tswapl(env->dregs[6]);
830 (*regs)[6] = tswapl(env->dregs[7]);
831 (*regs)[7] = tswapl(env->aregs[0]);
832 (*regs)[8] = tswapl(env->aregs[1]);
833 (*regs)[9] = tswapl(env->aregs[2]);
834 (*regs)[10] = tswapl(env->aregs[3]);
835 (*regs)[11] = tswapl(env->aregs[4]);
836 (*regs)[12] = tswapl(env->aregs[5]);
837 (*regs)[13] = tswapl(env->aregs[6]);
838 (*regs)[14] = tswapl(env->dregs[0]);
839 (*regs)[15] = tswapl(env->aregs[7]);
840 (*regs)[16] = tswapl(env->dregs[0]); /* FIXME: orig_d0 */
841 (*regs)[17] = tswapl(env->sr);
842 (*regs)[18] = tswapl(env->pc);
843 (*regs)[19] = 0; /* FIXME: regs->format | regs->vector */
846 #define USE_ELF_CORE_DUMP
847 #define ELF_EXEC_PAGESIZE 8192
849 #endif
851 #ifdef TARGET_ALPHA
853 #define ELF_START_MMAP (0x30000000000ULL)
855 #define elf_check_arch(x) ( (x) == ELF_ARCH )
857 #define ELF_CLASS ELFCLASS64
858 #define ELF_ARCH EM_ALPHA
860 static inline void init_thread(struct target_pt_regs *regs,
861 struct image_info *infop)
863 regs->pc = infop->entry;
864 regs->ps = 8;
865 regs->usp = infop->start_stack;
868 #define ELF_EXEC_PAGESIZE 8192
870 #endif /* TARGET_ALPHA */
872 #ifdef TARGET_S390X
874 #define ELF_START_MMAP (0x20000000000ULL)
876 #define elf_check_arch(x) ( (x) == ELF_ARCH )
878 #define ELF_CLASS ELFCLASS64
879 #define ELF_DATA ELFDATA2MSB
880 #define ELF_ARCH EM_S390
882 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
884 regs->psw.addr = infop->entry;
885 regs->psw.mask = PSW_MASK_64 | PSW_MASK_32;
886 regs->gprs[15] = infop->start_stack;
889 #endif /* TARGET_S390X */
891 #ifndef ELF_PLATFORM
892 #define ELF_PLATFORM (NULL)
893 #endif
895 #ifndef ELF_HWCAP
896 #define ELF_HWCAP 0
897 #endif
899 #ifdef TARGET_ABI32
900 #undef ELF_CLASS
901 #define ELF_CLASS ELFCLASS32
902 #undef bswaptls
903 #define bswaptls(ptr) bswap32s(ptr)
904 #endif
906 #include "elf.h"
908 struct exec
910 unsigned int a_info; /* Use macros N_MAGIC, etc for access */
911 unsigned int a_text; /* length of text, in bytes */
912 unsigned int a_data; /* length of data, in bytes */
913 unsigned int a_bss; /* length of uninitialized data area, in bytes */
914 unsigned int a_syms; /* length of symbol table data in file, in bytes */
915 unsigned int a_entry; /* start address */
916 unsigned int a_trsize; /* length of relocation info for text, in bytes */
917 unsigned int a_drsize; /* length of relocation info for data, in bytes */
921 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
922 #define OMAGIC 0407
923 #define NMAGIC 0410
924 #define ZMAGIC 0413
925 #define QMAGIC 0314
927 /* Necessary parameters */
928 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
929 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
930 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
932 #define DLINFO_ITEMS 13
934 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
936 memcpy(to, from, n);
939 #ifdef BSWAP_NEEDED
940 static void bswap_ehdr(struct elfhdr *ehdr)
942 bswap16s(&ehdr->e_type); /* Object file type */
943 bswap16s(&ehdr->e_machine); /* Architecture */
944 bswap32s(&ehdr->e_version); /* Object file version */
945 bswaptls(&ehdr->e_entry); /* Entry point virtual address */
946 bswaptls(&ehdr->e_phoff); /* Program header table file offset */
947 bswaptls(&ehdr->e_shoff); /* Section header table file offset */
948 bswap32s(&ehdr->e_flags); /* Processor-specific flags */
949 bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */
950 bswap16s(&ehdr->e_phentsize); /* Program header table entry size */
951 bswap16s(&ehdr->e_phnum); /* Program header table entry count */
952 bswap16s(&ehdr->e_shentsize); /* Section header table entry size */
953 bswap16s(&ehdr->e_shnum); /* Section header table entry count */
954 bswap16s(&ehdr->e_shstrndx); /* Section header string table index */
957 static void bswap_phdr(struct elf_phdr *phdr, int phnum)
959 int i;
960 for (i = 0; i < phnum; ++i, ++phdr) {
961 bswap32s(&phdr->p_type); /* Segment type */
962 bswap32s(&phdr->p_flags); /* Segment flags */
963 bswaptls(&phdr->p_offset); /* Segment file offset */
964 bswaptls(&phdr->p_vaddr); /* Segment virtual address */
965 bswaptls(&phdr->p_paddr); /* Segment physical address */
966 bswaptls(&phdr->p_filesz); /* Segment size in file */
967 bswaptls(&phdr->p_memsz); /* Segment size in memory */
968 bswaptls(&phdr->p_align); /* Segment alignment */
972 static void bswap_shdr(struct elf_shdr *shdr, int shnum)
974 int i;
975 for (i = 0; i < shnum; ++i, ++shdr) {
976 bswap32s(&shdr->sh_name);
977 bswap32s(&shdr->sh_type);
978 bswaptls(&shdr->sh_flags);
979 bswaptls(&shdr->sh_addr);
980 bswaptls(&shdr->sh_offset);
981 bswaptls(&shdr->sh_size);
982 bswap32s(&shdr->sh_link);
983 bswap32s(&shdr->sh_info);
984 bswaptls(&shdr->sh_addralign);
985 bswaptls(&shdr->sh_entsize);
989 static void bswap_sym(struct elf_sym *sym)
991 bswap32s(&sym->st_name);
992 bswaptls(&sym->st_value);
993 bswaptls(&sym->st_size);
994 bswap16s(&sym->st_shndx);
996 #else
997 static inline void bswap_ehdr(struct elfhdr *ehdr) { }
998 static inline void bswap_phdr(struct elf_phdr *phdr, int phnum) { }
999 static inline void bswap_shdr(struct elf_shdr *shdr, int shnum) { }
1000 static inline void bswap_sym(struct elf_sym *sym) { }
1001 #endif
1003 #ifdef USE_ELF_CORE_DUMP
1004 static int elf_core_dump(int, const CPUState *);
1005 #endif /* USE_ELF_CORE_DUMP */
1006 static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias);
1008 /* Verify the portions of EHDR within E_IDENT for the target.
1009 This can be performed before bswapping the entire header. */
1010 static bool elf_check_ident(struct elfhdr *ehdr)
1012 return (ehdr->e_ident[EI_MAG0] == ELFMAG0
1013 && ehdr->e_ident[EI_MAG1] == ELFMAG1
1014 && ehdr->e_ident[EI_MAG2] == ELFMAG2
1015 && ehdr->e_ident[EI_MAG3] == ELFMAG3
1016 && ehdr->e_ident[EI_CLASS] == ELF_CLASS
1017 && ehdr->e_ident[EI_DATA] == ELF_DATA
1018 && ehdr->e_ident[EI_VERSION] == EV_CURRENT);
1021 /* Verify the portions of EHDR outside of E_IDENT for the target.
1022 This has to wait until after bswapping the header. */
1023 static bool elf_check_ehdr(struct elfhdr *ehdr)
1025 return (elf_check_arch(ehdr->e_machine)
1026 && ehdr->e_ehsize == sizeof(struct elfhdr)
1027 && ehdr->e_phentsize == sizeof(struct elf_phdr)
1028 && ehdr->e_shentsize == sizeof(struct elf_shdr)
1029 && (ehdr->e_type == ET_EXEC || ehdr->e_type == ET_DYN));
1033 * 'copy_elf_strings()' copies argument/envelope strings from user
1034 * memory to free pages in kernel mem. These are in a format ready
1035 * to be put directly into the top of new user memory.
1038 static abi_ulong copy_elf_strings(int argc,char ** argv, void **page,
1039 abi_ulong p)
1041 char *tmp, *tmp1, *pag = NULL;
1042 int len, offset = 0;
1044 if (!p) {
1045 return 0; /* bullet-proofing */
1047 while (argc-- > 0) {
1048 tmp = argv[argc];
1049 if (!tmp) {
1050 fprintf(stderr, "VFS: argc is wrong");
1051 exit(-1);
1053 tmp1 = tmp;
1054 while (*tmp++);
1055 len = tmp - tmp1;
1056 if (p < len) { /* this shouldn't happen - 128kB */
1057 return 0;
1059 while (len) {
1060 --p; --tmp; --len;
1061 if (--offset < 0) {
1062 offset = p % TARGET_PAGE_SIZE;
1063 pag = (char *)page[p/TARGET_PAGE_SIZE];
1064 if (!pag) {
1065 pag = (char *)malloc(TARGET_PAGE_SIZE);
1066 memset(pag, 0, TARGET_PAGE_SIZE);
1067 page[p/TARGET_PAGE_SIZE] = pag;
1068 if (!pag)
1069 return 0;
1072 if (len == 0 || offset == 0) {
1073 *(pag + offset) = *tmp;
1075 else {
1076 int bytes_to_copy = (len > offset) ? offset : len;
1077 tmp -= bytes_to_copy;
1078 p -= bytes_to_copy;
1079 offset -= bytes_to_copy;
1080 len -= bytes_to_copy;
1081 memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1);
1085 return p;
1088 static abi_ulong setup_arg_pages(abi_ulong p, struct linux_binprm *bprm,
1089 struct image_info *info)
1091 abi_ulong stack_base, size, error, guard;
1092 int i;
1094 /* Create enough stack to hold everything. If we don't use
1095 it for args, we'll use it for something else. */
1096 size = guest_stack_size;
1097 if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE) {
1098 size = MAX_ARG_PAGES*TARGET_PAGE_SIZE;
1100 guard = TARGET_PAGE_SIZE;
1101 if (guard < qemu_real_host_page_size) {
1102 guard = qemu_real_host_page_size;
1105 error = target_mmap(0, size + guard, PROT_READ | PROT_WRITE,
1106 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1107 if (error == -1) {
1108 perror("mmap stack");
1109 exit(-1);
1112 /* We reserve one extra page at the top of the stack as guard. */
1113 target_mprotect(error, guard, PROT_NONE);
1115 info->stack_limit = error + guard;
1116 stack_base = info->stack_limit + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE;
1117 p += stack_base;
1119 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
1120 if (bprm->page[i]) {
1121 info->rss++;
1122 /* FIXME - check return value of memcpy_to_target() for failure */
1123 memcpy_to_target(stack_base, bprm->page[i], TARGET_PAGE_SIZE);
1124 free(bprm->page[i]);
1126 stack_base += TARGET_PAGE_SIZE;
1128 return p;
1131 /* Map and zero the bss. We need to explicitly zero any fractional pages
1132 after the data section (i.e. bss). */
1133 static void zero_bss(abi_ulong elf_bss, abi_ulong last_bss, int prot)
1135 uintptr_t host_start, host_map_start, host_end;
1137 last_bss = TARGET_PAGE_ALIGN(last_bss);
1139 /* ??? There is confusion between qemu_real_host_page_size and
1140 qemu_host_page_size here and elsewhere in target_mmap, which
1141 may lead to the end of the data section mapping from the file
1142 not being mapped. At least there was an explicit test and
1143 comment for that here, suggesting that "the file size must
1144 be known". The comment probably pre-dates the introduction
1145 of the fstat system call in target_mmap which does in fact
1146 find out the size. What isn't clear is if the workaround
1147 here is still actually needed. For now, continue with it,
1148 but merge it with the "normal" mmap that would allocate the bss. */
1150 host_start = (uintptr_t) g2h(elf_bss);
1151 host_end = (uintptr_t) g2h(last_bss);
1152 host_map_start = (host_start + qemu_real_host_page_size - 1);
1153 host_map_start &= -qemu_real_host_page_size;
1155 if (host_map_start < host_end) {
1156 void *p = mmap((void *)host_map_start, host_end - host_map_start,
1157 prot, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1158 if (p == MAP_FAILED) {
1159 perror("cannot mmap brk");
1160 exit(-1);
1163 /* Since we didn't use target_mmap, make sure to record
1164 the validity of the pages with qemu. */
1165 page_set_flags(elf_bss & TARGET_PAGE_MASK, last_bss, prot|PAGE_VALID);
1168 if (host_start < host_map_start) {
1169 memset((void *)host_start, 0, host_map_start - host_start);
1173 #ifdef CONFIG_USE_FDPIC
1174 static abi_ulong loader_build_fdpic_loadmap(struct image_info *info, abi_ulong sp)
1176 uint16_t n;
1177 struct elf32_fdpic_loadseg *loadsegs = info->loadsegs;
1179 /* elf32_fdpic_loadseg */
1180 n = info->nsegs;
1181 while (n--) {
1182 sp -= 12;
1183 put_user_u32(loadsegs[n].addr, sp+0);
1184 put_user_u32(loadsegs[n].p_vaddr, sp+4);
1185 put_user_u32(loadsegs[n].p_memsz, sp+8);
1188 /* elf32_fdpic_loadmap */
1189 sp -= 4;
1190 put_user_u16(0, sp+0); /* version */
1191 put_user_u16(info->nsegs, sp+2); /* nsegs */
1193 info->personality = PER_LINUX_FDPIC;
1194 info->loadmap_addr = sp;
1196 return sp;
1198 #endif
1200 static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
1201 struct elfhdr *exec,
1202 struct image_info *info,
1203 struct image_info *interp_info)
1205 abi_ulong sp;
1206 int size;
1207 int i;
1208 abi_ulong u_rand_bytes;
1209 uint8_t k_rand_bytes[16];
1210 abi_ulong u_platform;
1211 const char *k_platform;
1212 const int n = sizeof(elf_addr_t);
1214 sp = p;
1216 #ifdef CONFIG_USE_FDPIC
1217 /* Needs to be before we load the env/argc/... */
1218 if (elf_is_fdpic(exec)) {
1219 /* Need 4 byte alignment for these structs */
1220 sp &= ~3;
1221 sp = loader_build_fdpic_loadmap(info, sp);
1222 info->other_info = interp_info;
1223 if (interp_info) {
1224 interp_info->other_info = info;
1225 sp = loader_build_fdpic_loadmap(interp_info, sp);
1228 #endif
1230 u_platform = 0;
1231 k_platform = ELF_PLATFORM;
1232 if (k_platform) {
1233 size_t len = strlen(k_platform) + 1;
1234 sp -= (len + n - 1) & ~(n - 1);
1235 u_platform = sp;
1236 /* FIXME - check return value of memcpy_to_target() for failure */
1237 memcpy_to_target(sp, k_platform, len);
1241 * Generate 16 random bytes for userspace PRNG seeding (not
1242 * cryptically secure but it's not the aim of QEMU).
1244 srand((unsigned int) time(NULL));
1245 for (i = 0; i < 16; i++) {
1246 k_rand_bytes[i] = rand();
1248 sp -= 16;
1249 u_rand_bytes = sp;
1250 /* FIXME - check return value of memcpy_to_target() for failure */
1251 memcpy_to_target(sp, k_rand_bytes, 16);
1254 * Force 16 byte _final_ alignment here for generality.
1256 sp = sp &~ (abi_ulong)15;
1257 size = (DLINFO_ITEMS + 1) * 2;
1258 if (k_platform)
1259 size += 2;
1260 #ifdef DLINFO_ARCH_ITEMS
1261 size += DLINFO_ARCH_ITEMS * 2;
1262 #endif
1263 size += envc + argc + 2;
1264 size += 1; /* argc itself */
1265 size *= n;
1266 if (size & 15)
1267 sp -= 16 - (size & 15);
1269 /* This is correct because Linux defines
1270 * elf_addr_t as Elf32_Off / Elf64_Off
1272 #define NEW_AUX_ENT(id, val) do { \
1273 sp -= n; put_user_ual(val, sp); \
1274 sp -= n; put_user_ual(id, sp); \
1275 } while(0)
1277 NEW_AUX_ENT (AT_NULL, 0);
1279 /* There must be exactly DLINFO_ITEMS entries here. */
1280 NEW_AUX_ENT(AT_PHDR, (abi_ulong)(info->load_addr + exec->e_phoff));
1281 NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr)));
1282 NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum));
1283 NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE));
1284 NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_info ? interp_info->load_addr : 0));
1285 NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0);
1286 NEW_AUX_ENT(AT_ENTRY, info->entry);
1287 NEW_AUX_ENT(AT_UID, (abi_ulong) getuid());
1288 NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid());
1289 NEW_AUX_ENT(AT_GID, (abi_ulong) getgid());
1290 NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid());
1291 NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP);
1292 NEW_AUX_ENT(AT_CLKTCK, (abi_ulong) sysconf(_SC_CLK_TCK));
1293 NEW_AUX_ENT(AT_RANDOM, (abi_ulong) u_rand_bytes);
1295 if (k_platform)
1296 NEW_AUX_ENT(AT_PLATFORM, u_platform);
1297 #ifdef ARCH_DLINFO
1299 * ARCH_DLINFO must come last so platform specific code can enforce
1300 * special alignment requirements on the AUXV if necessary (eg. PPC).
1302 ARCH_DLINFO;
1303 #endif
1304 #undef NEW_AUX_ENT
1306 info->saved_auxv = sp;
1308 sp = loader_build_argptr(envc, argc, sp, p, 0);
1309 return sp;
1312 static void probe_guest_base(const char *image_name,
1313 abi_ulong loaddr, abi_ulong hiaddr)
1315 /* Probe for a suitable guest base address, if the user has not set
1316 * it explicitly, and set guest_base appropriately.
1317 * In case of error we will print a suitable message and exit.
1319 #if defined(CONFIG_USE_GUEST_BASE)
1320 const char *errmsg;
1321 if (!have_guest_base && !reserved_va) {
1322 unsigned long host_start, real_start, host_size;
1324 /* Round addresses to page boundaries. */
1325 loaddr &= qemu_host_page_mask;
1326 hiaddr = HOST_PAGE_ALIGN(hiaddr);
1328 if (loaddr < mmap_min_addr) {
1329 host_start = HOST_PAGE_ALIGN(mmap_min_addr);
1330 } else {
1331 host_start = loaddr;
1332 if (host_start != loaddr) {
1333 errmsg = "Address overflow loading ELF binary";
1334 goto exit_errmsg;
1337 host_size = hiaddr - loaddr;
1338 while (1) {
1339 /* Do not use mmap_find_vma here because that is limited to the
1340 guest address space. We are going to make the
1341 guest address space fit whatever we're given. */
1342 real_start = (unsigned long)
1343 mmap((void *)host_start, host_size, PROT_NONE,
1344 MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
1345 if (real_start == (unsigned long)-1) {
1346 goto exit_perror;
1348 if (real_start == host_start) {
1349 break;
1351 /* That address didn't work. Unmap and try a different one.
1352 The address the host picked because is typically right at
1353 the top of the host address space and leaves the guest with
1354 no usable address space. Resort to a linear search. We
1355 already compensated for mmap_min_addr, so this should not
1356 happen often. Probably means we got unlucky and host
1357 address space randomization put a shared library somewhere
1358 inconvenient. */
1359 munmap((void *)real_start, host_size);
1360 host_start += qemu_host_page_size;
1361 if (host_start == loaddr) {
1362 /* Theoretically possible if host doesn't have any suitably
1363 aligned areas. Normally the first mmap will fail. */
1364 errmsg = "Unable to find space for application";
1365 goto exit_errmsg;
1368 qemu_log("Relocating guest address space from 0x"
1369 TARGET_ABI_FMT_lx " to 0x%lx\n",
1370 loaddr, real_start);
1371 guest_base = real_start - loaddr;
1373 return;
1375 exit_perror:
1376 errmsg = strerror(errno);
1377 exit_errmsg:
1378 fprintf(stderr, "%s: %s\n", image_name, errmsg);
1379 exit(-1);
1380 #endif
1384 /* Load an ELF image into the address space.
1386 IMAGE_NAME is the filename of the image, to use in error messages.
1387 IMAGE_FD is the open file descriptor for the image.
1389 BPRM_BUF is a copy of the beginning of the file; this of course
1390 contains the elf file header at offset 0. It is assumed that this
1391 buffer is sufficiently aligned to present no problems to the host
1392 in accessing data at aligned offsets within the buffer.
1394 On return: INFO values will be filled in, as necessary or available. */
1396 static void load_elf_image(const char *image_name, int image_fd,
1397 struct image_info *info, char **pinterp_name,
1398 char bprm_buf[BPRM_BUF_SIZE])
1400 struct elfhdr *ehdr = (struct elfhdr *)bprm_buf;
1401 struct elf_phdr *phdr;
1402 abi_ulong load_addr, load_bias, loaddr, hiaddr, error;
1403 int i, retval;
1404 const char *errmsg;
1406 /* First of all, some simple consistency checks */
1407 errmsg = "Invalid ELF image for this architecture";
1408 if (!elf_check_ident(ehdr)) {
1409 goto exit_errmsg;
1411 bswap_ehdr(ehdr);
1412 if (!elf_check_ehdr(ehdr)) {
1413 goto exit_errmsg;
1416 i = ehdr->e_phnum * sizeof(struct elf_phdr);
1417 if (ehdr->e_phoff + i <= BPRM_BUF_SIZE) {
1418 phdr = (struct elf_phdr *)(bprm_buf + ehdr->e_phoff);
1419 } else {
1420 phdr = (struct elf_phdr *) alloca(i);
1421 retval = pread(image_fd, phdr, i, ehdr->e_phoff);
1422 if (retval != i) {
1423 goto exit_read;
1426 bswap_phdr(phdr, ehdr->e_phnum);
1428 #ifdef CONFIG_USE_FDPIC
1429 info->nsegs = 0;
1430 info->pt_dynamic_addr = 0;
1431 #endif
1433 /* Find the maximum size of the image and allocate an appropriate
1434 amount of memory to handle that. */
1435 loaddr = -1, hiaddr = 0;
1436 for (i = 0; i < ehdr->e_phnum; ++i) {
1437 if (phdr[i].p_type == PT_LOAD) {
1438 abi_ulong a = phdr[i].p_vaddr;
1439 if (a < loaddr) {
1440 loaddr = a;
1442 a += phdr[i].p_memsz;
1443 if (a > hiaddr) {
1444 hiaddr = a;
1446 #ifdef CONFIG_USE_FDPIC
1447 ++info->nsegs;
1448 #endif
1452 load_addr = loaddr;
1453 if (ehdr->e_type == ET_DYN) {
1454 /* The image indicates that it can be loaded anywhere. Find a
1455 location that can hold the memory space required. If the
1456 image is pre-linked, LOADDR will be non-zero. Since we do
1457 not supply MAP_FIXED here we'll use that address if and
1458 only if it remains available. */
1459 load_addr = target_mmap(loaddr, hiaddr - loaddr, PROT_NONE,
1460 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
1461 -1, 0);
1462 if (load_addr == -1) {
1463 goto exit_perror;
1465 } else if (pinterp_name != NULL) {
1466 /* This is the main executable. Make sure that the low
1467 address does not conflict with MMAP_MIN_ADDR or the
1468 QEMU application itself. */
1469 probe_guest_base(image_name, loaddr, hiaddr);
1471 load_bias = load_addr - loaddr;
1473 #ifdef CONFIG_USE_FDPIC
1475 struct elf32_fdpic_loadseg *loadsegs = info->loadsegs =
1476 qemu_malloc(sizeof(*loadsegs) * info->nsegs);
1478 for (i = 0; i < ehdr->e_phnum; ++i) {
1479 switch (phdr[i].p_type) {
1480 case PT_DYNAMIC:
1481 info->pt_dynamic_addr = phdr[i].p_vaddr + load_bias;
1482 break;
1483 case PT_LOAD:
1484 loadsegs->addr = phdr[i].p_vaddr + load_bias;
1485 loadsegs->p_vaddr = phdr[i].p_vaddr;
1486 loadsegs->p_memsz = phdr[i].p_memsz;
1487 ++loadsegs;
1488 break;
1492 #endif
1494 info->load_bias = load_bias;
1495 info->load_addr = load_addr;
1496 info->entry = ehdr->e_entry + load_bias;
1497 info->start_code = -1;
1498 info->end_code = 0;
1499 info->start_data = -1;
1500 info->end_data = 0;
1501 info->brk = 0;
1503 for (i = 0; i < ehdr->e_phnum; i++) {
1504 struct elf_phdr *eppnt = phdr + i;
1505 if (eppnt->p_type == PT_LOAD) {
1506 abi_ulong vaddr, vaddr_po, vaddr_ps, vaddr_ef, vaddr_em;
1507 int elf_prot = 0;
1509 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
1510 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
1511 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
1513 vaddr = load_bias + eppnt->p_vaddr;
1514 vaddr_po = TARGET_ELF_PAGEOFFSET(vaddr);
1515 vaddr_ps = TARGET_ELF_PAGESTART(vaddr);
1517 error = target_mmap(vaddr_ps, eppnt->p_filesz + vaddr_po,
1518 elf_prot, MAP_PRIVATE | MAP_FIXED,
1519 image_fd, eppnt->p_offset - vaddr_po);
1520 if (error == -1) {
1521 goto exit_perror;
1524 vaddr_ef = vaddr + eppnt->p_filesz;
1525 vaddr_em = vaddr + eppnt->p_memsz;
1527 /* If the load segment requests extra zeros (e.g. bss), map it. */
1528 if (vaddr_ef < vaddr_em) {
1529 zero_bss(vaddr_ef, vaddr_em, elf_prot);
1532 /* Find the full program boundaries. */
1533 if (elf_prot & PROT_EXEC) {
1534 if (vaddr < info->start_code) {
1535 info->start_code = vaddr;
1537 if (vaddr_ef > info->end_code) {
1538 info->end_code = vaddr_ef;
1541 if (elf_prot & PROT_WRITE) {
1542 if (vaddr < info->start_data) {
1543 info->start_data = vaddr;
1545 if (vaddr_ef > info->end_data) {
1546 info->end_data = vaddr_ef;
1548 if (vaddr_em > info->brk) {
1549 info->brk = vaddr_em;
1552 } else if (eppnt->p_type == PT_INTERP && pinterp_name) {
1553 char *interp_name;
1555 if (*pinterp_name) {
1556 errmsg = "Multiple PT_INTERP entries";
1557 goto exit_errmsg;
1559 interp_name = malloc(eppnt->p_filesz);
1560 if (!interp_name) {
1561 goto exit_perror;
1564 if (eppnt->p_offset + eppnt->p_filesz <= BPRM_BUF_SIZE) {
1565 memcpy(interp_name, bprm_buf + eppnt->p_offset,
1566 eppnt->p_filesz);
1567 } else {
1568 retval = pread(image_fd, interp_name, eppnt->p_filesz,
1569 eppnt->p_offset);
1570 if (retval != eppnt->p_filesz) {
1571 goto exit_perror;
1574 if (interp_name[eppnt->p_filesz - 1] != 0) {
1575 errmsg = "Invalid PT_INTERP entry";
1576 goto exit_errmsg;
1578 *pinterp_name = interp_name;
1582 if (info->end_data == 0) {
1583 info->start_data = info->end_code;
1584 info->end_data = info->end_code;
1585 info->brk = info->end_code;
1588 if (qemu_log_enabled()) {
1589 load_symbols(ehdr, image_fd, load_bias);
1592 close(image_fd);
1593 return;
1595 exit_read:
1596 if (retval >= 0) {
1597 errmsg = "Incomplete read of file header";
1598 goto exit_errmsg;
1600 exit_perror:
1601 errmsg = strerror(errno);
1602 exit_errmsg:
1603 fprintf(stderr, "%s: %s\n", image_name, errmsg);
1604 exit(-1);
1607 static void load_elf_interp(const char *filename, struct image_info *info,
1608 char bprm_buf[BPRM_BUF_SIZE])
1610 int fd, retval;
1612 fd = open(path(filename), O_RDONLY);
1613 if (fd < 0) {
1614 goto exit_perror;
1617 retval = read(fd, bprm_buf, BPRM_BUF_SIZE);
1618 if (retval < 0) {
1619 goto exit_perror;
1621 if (retval < BPRM_BUF_SIZE) {
1622 memset(bprm_buf + retval, 0, BPRM_BUF_SIZE - retval);
1625 load_elf_image(filename, fd, info, NULL, bprm_buf);
1626 return;
1628 exit_perror:
1629 fprintf(stderr, "%s: %s\n", filename, strerror(errno));
1630 exit(-1);
1633 static int symfind(const void *s0, const void *s1)
1635 struct elf_sym *key = (struct elf_sym *)s0;
1636 struct elf_sym *sym = (struct elf_sym *)s1;
1637 int result = 0;
1638 if (key->st_value < sym->st_value) {
1639 result = -1;
1640 } else if (key->st_value >= sym->st_value + sym->st_size) {
1641 result = 1;
1643 return result;
1646 static const char *lookup_symbolxx(struct syminfo *s, target_ulong orig_addr)
1648 #if ELF_CLASS == ELFCLASS32
1649 struct elf_sym *syms = s->disas_symtab.elf32;
1650 #else
1651 struct elf_sym *syms = s->disas_symtab.elf64;
1652 #endif
1654 // binary search
1655 struct elf_sym key;
1656 struct elf_sym *sym;
1658 key.st_value = orig_addr;
1660 sym = bsearch(&key, syms, s->disas_num_syms, sizeof(*syms), symfind);
1661 if (sym != NULL) {
1662 return s->disas_strtab + sym->st_name;
1665 return "";
1668 /* FIXME: This should use elf_ops.h */
1669 static int symcmp(const void *s0, const void *s1)
1671 struct elf_sym *sym0 = (struct elf_sym *)s0;
1672 struct elf_sym *sym1 = (struct elf_sym *)s1;
1673 return (sym0->st_value < sym1->st_value)
1674 ? -1
1675 : ((sym0->st_value > sym1->st_value) ? 1 : 0);
1678 /* Best attempt to load symbols from this ELF object. */
1679 static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias)
1681 int i, shnum, nsyms, sym_idx = 0, str_idx = 0;
1682 struct elf_shdr *shdr;
1683 char *strings = NULL;
1684 struct syminfo *s = NULL;
1685 struct elf_sym *new_syms, *syms = NULL;
1687 shnum = hdr->e_shnum;
1688 i = shnum * sizeof(struct elf_shdr);
1689 shdr = (struct elf_shdr *)alloca(i);
1690 if (pread(fd, shdr, i, hdr->e_shoff) != i) {
1691 return;
1694 bswap_shdr(shdr, shnum);
1695 for (i = 0; i < shnum; ++i) {
1696 if (shdr[i].sh_type == SHT_SYMTAB) {
1697 sym_idx = i;
1698 str_idx = shdr[i].sh_link;
1699 goto found;
1703 /* There will be no symbol table if the file was stripped. */
1704 return;
1706 found:
1707 /* Now know where the strtab and symtab are. Snarf them. */
1708 s = malloc(sizeof(*s));
1709 if (!s) {
1710 goto give_up;
1713 i = shdr[str_idx].sh_size;
1714 s->disas_strtab = strings = malloc(i);
1715 if (!strings || pread(fd, strings, i, shdr[str_idx].sh_offset) != i) {
1716 goto give_up;
1719 i = shdr[sym_idx].sh_size;
1720 syms = malloc(i);
1721 if (!syms || pread(fd, syms, i, shdr[sym_idx].sh_offset) != i) {
1722 goto give_up;
1725 nsyms = i / sizeof(struct elf_sym);
1726 for (i = 0; i < nsyms; ) {
1727 bswap_sym(syms + i);
1728 /* Throw away entries which we do not need. */
1729 if (syms[i].st_shndx == SHN_UNDEF
1730 || syms[i].st_shndx >= SHN_LORESERVE
1731 || ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) {
1732 if (i < --nsyms) {
1733 syms[i] = syms[nsyms];
1735 } else {
1736 #if defined(TARGET_ARM) || defined (TARGET_MIPS)
1737 /* The bottom address bit marks a Thumb or MIPS16 symbol. */
1738 syms[i].st_value &= ~(target_ulong)1;
1739 #endif
1740 syms[i].st_value += load_bias;
1741 i++;
1745 /* No "useful" symbol. */
1746 if (nsyms == 0) {
1747 goto give_up;
1750 /* Attempt to free the storage associated with the local symbols
1751 that we threw away. Whether or not this has any effect on the
1752 memory allocation depends on the malloc implementation and how
1753 many symbols we managed to discard. */
1754 new_syms = realloc(syms, nsyms * sizeof(*syms));
1755 if (new_syms == NULL) {
1756 goto give_up;
1758 syms = new_syms;
1760 qsort(syms, nsyms, sizeof(*syms), symcmp);
1762 s->disas_num_syms = nsyms;
1763 #if ELF_CLASS == ELFCLASS32
1764 s->disas_symtab.elf32 = syms;
1765 #else
1766 s->disas_symtab.elf64 = syms;
1767 #endif
1768 s->lookup_symbol = lookup_symbolxx;
1769 s->next = syminfos;
1770 syminfos = s;
1772 return;
1774 give_up:
1775 free(s);
1776 free(strings);
1777 free(syms);
1780 int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
1781 struct image_info * info)
1783 struct image_info interp_info;
1784 struct elfhdr elf_ex;
1785 char *elf_interpreter = NULL;
1787 info->start_mmap = (abi_ulong)ELF_START_MMAP;
1788 info->mmap = 0;
1789 info->rss = 0;
1791 load_elf_image(bprm->filename, bprm->fd, info,
1792 &elf_interpreter, bprm->buf);
1794 /* ??? We need a copy of the elf header for passing to create_elf_tables.
1795 If we do nothing, we'll have overwritten this when we re-use bprm->buf
1796 when we load the interpreter. */
1797 elf_ex = *(struct elfhdr *)bprm->buf;
1799 bprm->p = copy_elf_strings(1, &bprm->filename, bprm->page, bprm->p);
1800 bprm->p = copy_elf_strings(bprm->envc,bprm->envp,bprm->page,bprm->p);
1801 bprm->p = copy_elf_strings(bprm->argc,bprm->argv,bprm->page,bprm->p);
1802 if (!bprm->p) {
1803 fprintf(stderr, "%s: %s\n", bprm->filename, strerror(E2BIG));
1804 exit(-1);
1807 /* Do this so that we can load the interpreter, if need be. We will
1808 change some of these later */
1809 bprm->p = setup_arg_pages(bprm->p, bprm, info);
1811 if (elf_interpreter) {
1812 load_elf_interp(elf_interpreter, &interp_info, bprm->buf);
1814 /* If the program interpreter is one of these two, then assume
1815 an iBCS2 image. Otherwise assume a native linux image. */
1817 if (strcmp(elf_interpreter, "/usr/lib/libc.so.1") == 0
1818 || strcmp(elf_interpreter, "/usr/lib/ld.so.1") == 0) {
1819 info->personality = PER_SVR4;
1821 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1822 and some applications "depend" upon this behavior. Since
1823 we do not have the power to recompile these, we emulate
1824 the SVr4 behavior. Sigh. */
1825 target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC,
1826 MAP_FIXED | MAP_PRIVATE, -1, 0);
1830 bprm->p = create_elf_tables(bprm->p, bprm->argc, bprm->envc, &elf_ex,
1831 info, (elf_interpreter ? &interp_info : NULL));
1832 info->start_stack = bprm->p;
1834 /* If we have an interpreter, set that as the program's entry point.
1835 Copy the load_addr as well, to help PPC64 interpret the entry
1836 point as a function descriptor. Do this after creating elf tables
1837 so that we copy the original program entry point into the AUXV. */
1838 if (elf_interpreter) {
1839 info->load_addr = interp_info.load_addr;
1840 info->entry = interp_info.entry;
1841 free(elf_interpreter);
1844 #ifdef USE_ELF_CORE_DUMP
1845 bprm->core_dump = &elf_core_dump;
1846 #endif
1848 return 0;
1851 #ifdef USE_ELF_CORE_DUMP
1853 * Definitions to generate Intel SVR4-like core files.
1854 * These mostly have the same names as the SVR4 types with "target_elf_"
1855 * tacked on the front to prevent clashes with linux definitions,
1856 * and the typedef forms have been avoided. This is mostly like
1857 * the SVR4 structure, but more Linuxy, with things that Linux does
1858 * not support and which gdb doesn't really use excluded.
1860 * Fields we don't dump (their contents is zero) in linux-user qemu
1861 * are marked with XXX.
1863 * Core dump code is copied from linux kernel (fs/binfmt_elf.c).
1865 * Porting ELF coredump for target is (quite) simple process. First you
1866 * define USE_ELF_CORE_DUMP in target ELF code (where init_thread() for
1867 * the target resides):
1869 * #define USE_ELF_CORE_DUMP
1871 * Next you define type of register set used for dumping. ELF specification
1872 * says that it needs to be array of elf_greg_t that has size of ELF_NREG.
1874 * typedef <target_regtype> target_elf_greg_t;
1875 * #define ELF_NREG <number of registers>
1876 * typedef taret_elf_greg_t target_elf_gregset_t[ELF_NREG];
1878 * Last step is to implement target specific function that copies registers
1879 * from given cpu into just specified register set. Prototype is:
1881 * static void elf_core_copy_regs(taret_elf_gregset_t *regs,
1882 * const CPUState *env);
1884 * Parameters:
1885 * regs - copy register values into here (allocated and zeroed by caller)
1886 * env - copy registers from here
1888 * Example for ARM target is provided in this file.
1891 /* An ELF note in memory */
1892 struct memelfnote {
1893 const char *name;
1894 size_t namesz;
1895 size_t namesz_rounded;
1896 int type;
1897 size_t datasz;
1898 size_t datasz_rounded;
1899 void *data;
1900 size_t notesz;
1903 struct target_elf_siginfo {
1904 target_int si_signo; /* signal number */
1905 target_int si_code; /* extra code */
1906 target_int si_errno; /* errno */
1909 struct target_elf_prstatus {
1910 struct target_elf_siginfo pr_info; /* Info associated with signal */
1911 target_short pr_cursig; /* Current signal */
1912 target_ulong pr_sigpend; /* XXX */
1913 target_ulong pr_sighold; /* XXX */
1914 target_pid_t pr_pid;
1915 target_pid_t pr_ppid;
1916 target_pid_t pr_pgrp;
1917 target_pid_t pr_sid;
1918 struct target_timeval pr_utime; /* XXX User time */
1919 struct target_timeval pr_stime; /* XXX System time */
1920 struct target_timeval pr_cutime; /* XXX Cumulative user time */
1921 struct target_timeval pr_cstime; /* XXX Cumulative system time */
1922 target_elf_gregset_t pr_reg; /* GP registers */
1923 target_int pr_fpvalid; /* XXX */
1926 #define ELF_PRARGSZ (80) /* Number of chars for args */
1928 struct target_elf_prpsinfo {
1929 char pr_state; /* numeric process state */
1930 char pr_sname; /* char for pr_state */
1931 char pr_zomb; /* zombie */
1932 char pr_nice; /* nice val */
1933 target_ulong pr_flag; /* flags */
1934 target_uid_t pr_uid;
1935 target_gid_t pr_gid;
1936 target_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
1937 /* Lots missing */
1938 char pr_fname[16]; /* filename of executable */
1939 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
1942 /* Here is the structure in which status of each thread is captured. */
1943 struct elf_thread_status {
1944 QTAILQ_ENTRY(elf_thread_status) ets_link;
1945 struct target_elf_prstatus prstatus; /* NT_PRSTATUS */
1946 #if 0
1947 elf_fpregset_t fpu; /* NT_PRFPREG */
1948 struct task_struct *thread;
1949 elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */
1950 #endif
1951 struct memelfnote notes[1];
1952 int num_notes;
1955 struct elf_note_info {
1956 struct memelfnote *notes;
1957 struct target_elf_prstatus *prstatus; /* NT_PRSTATUS */
1958 struct target_elf_prpsinfo *psinfo; /* NT_PRPSINFO */
1960 QTAILQ_HEAD(thread_list_head, elf_thread_status) thread_list;
1961 #if 0
1963 * Current version of ELF coredump doesn't support
1964 * dumping fp regs etc.
1966 elf_fpregset_t *fpu;
1967 elf_fpxregset_t *xfpu;
1968 int thread_status_size;
1969 #endif
1970 int notes_size;
1971 int numnote;
1974 struct vm_area_struct {
1975 abi_ulong vma_start; /* start vaddr of memory region */
1976 abi_ulong vma_end; /* end vaddr of memory region */
1977 abi_ulong vma_flags; /* protection etc. flags for the region */
1978 QTAILQ_ENTRY(vm_area_struct) vma_link;
1981 struct mm_struct {
1982 QTAILQ_HEAD(, vm_area_struct) mm_mmap;
1983 int mm_count; /* number of mappings */
1986 static struct mm_struct *vma_init(void);
1987 static void vma_delete(struct mm_struct *);
1988 static int vma_add_mapping(struct mm_struct *, abi_ulong,
1989 abi_ulong, abi_ulong);
1990 static int vma_get_mapping_count(const struct mm_struct *);
1991 static struct vm_area_struct *vma_first(const struct mm_struct *);
1992 static struct vm_area_struct *vma_next(struct vm_area_struct *);
1993 static abi_ulong vma_dump_size(const struct vm_area_struct *);
1994 static int vma_walker(void *priv, abi_ulong start, abi_ulong end,
1995 unsigned long flags);
1997 static void fill_elf_header(struct elfhdr *, int, uint16_t, uint32_t);
1998 static void fill_note(struct memelfnote *, const char *, int,
1999 unsigned int, void *);
2000 static void fill_prstatus(struct target_elf_prstatus *, const TaskState *, int);
2001 static int fill_psinfo(struct target_elf_prpsinfo *, const TaskState *);
2002 static void fill_auxv_note(struct memelfnote *, const TaskState *);
2003 static void fill_elf_note_phdr(struct elf_phdr *, int, off_t);
2004 static size_t note_size(const struct memelfnote *);
2005 static void free_note_info(struct elf_note_info *);
2006 static int fill_note_info(struct elf_note_info *, long, const CPUState *);
2007 static void fill_thread_info(struct elf_note_info *, const CPUState *);
2008 static int core_dump_filename(const TaskState *, char *, size_t);
2010 static int dump_write(int, const void *, size_t);
2011 static int write_note(struct memelfnote *, int);
2012 static int write_note_info(struct elf_note_info *, int);
2014 #ifdef BSWAP_NEEDED
2015 static void bswap_prstatus(struct target_elf_prstatus *prstatus)
2017 prstatus->pr_info.si_signo = tswapl(prstatus->pr_info.si_signo);
2018 prstatus->pr_info.si_code = tswapl(prstatus->pr_info.si_code);
2019 prstatus->pr_info.si_errno = tswapl(prstatus->pr_info.si_errno);
2020 prstatus->pr_cursig = tswap16(prstatus->pr_cursig);
2021 prstatus->pr_sigpend = tswapl(prstatus->pr_sigpend);
2022 prstatus->pr_sighold = tswapl(prstatus->pr_sighold);
2023 prstatus->pr_pid = tswap32(prstatus->pr_pid);
2024 prstatus->pr_ppid = tswap32(prstatus->pr_ppid);
2025 prstatus->pr_pgrp = tswap32(prstatus->pr_pgrp);
2026 prstatus->pr_sid = tswap32(prstatus->pr_sid);
2027 /* cpu times are not filled, so we skip them */
2028 /* regs should be in correct format already */
2029 prstatus->pr_fpvalid = tswap32(prstatus->pr_fpvalid);
2032 static void bswap_psinfo(struct target_elf_prpsinfo *psinfo)
2034 psinfo->pr_flag = tswapl(psinfo->pr_flag);
2035 psinfo->pr_uid = tswap16(psinfo->pr_uid);
2036 psinfo->pr_gid = tswap16(psinfo->pr_gid);
2037 psinfo->pr_pid = tswap32(psinfo->pr_pid);
2038 psinfo->pr_ppid = tswap32(psinfo->pr_ppid);
2039 psinfo->pr_pgrp = tswap32(psinfo->pr_pgrp);
2040 psinfo->pr_sid = tswap32(psinfo->pr_sid);
2043 static void bswap_note(struct elf_note *en)
2045 bswap32s(&en->n_namesz);
2046 bswap32s(&en->n_descsz);
2047 bswap32s(&en->n_type);
2049 #else
2050 static inline void bswap_prstatus(struct target_elf_prstatus *p) { }
2051 static inline void bswap_psinfo(struct target_elf_prpsinfo *p) {}
2052 static inline void bswap_note(struct elf_note *en) { }
2053 #endif /* BSWAP_NEEDED */
2056 * Minimal support for linux memory regions. These are needed
2057 * when we are finding out what memory exactly belongs to
2058 * emulated process. No locks needed here, as long as
2059 * thread that received the signal is stopped.
2062 static struct mm_struct *vma_init(void)
2064 struct mm_struct *mm;
2066 if ((mm = qemu_malloc(sizeof (*mm))) == NULL)
2067 return (NULL);
2069 mm->mm_count = 0;
2070 QTAILQ_INIT(&mm->mm_mmap);
2072 return (mm);
2075 static void vma_delete(struct mm_struct *mm)
2077 struct vm_area_struct *vma;
2079 while ((vma = vma_first(mm)) != NULL) {
2080 QTAILQ_REMOVE(&mm->mm_mmap, vma, vma_link);
2081 qemu_free(vma);
2083 qemu_free(mm);
2086 static int vma_add_mapping(struct mm_struct *mm, abi_ulong start,
2087 abi_ulong end, abi_ulong flags)
2089 struct vm_area_struct *vma;
2091 if ((vma = qemu_mallocz(sizeof (*vma))) == NULL)
2092 return (-1);
2094 vma->vma_start = start;
2095 vma->vma_end = end;
2096 vma->vma_flags = flags;
2098 QTAILQ_INSERT_TAIL(&mm->mm_mmap, vma, vma_link);
2099 mm->mm_count++;
2101 return (0);
2104 static struct vm_area_struct *vma_first(const struct mm_struct *mm)
2106 return (QTAILQ_FIRST(&mm->mm_mmap));
2109 static struct vm_area_struct *vma_next(struct vm_area_struct *vma)
2111 return (QTAILQ_NEXT(vma, vma_link));
2114 static int vma_get_mapping_count(const struct mm_struct *mm)
2116 return (mm->mm_count);
2120 * Calculate file (dump) size of given memory region.
2122 static abi_ulong vma_dump_size(const struct vm_area_struct *vma)
2124 /* if we cannot even read the first page, skip it */
2125 if (!access_ok(VERIFY_READ, vma->vma_start, TARGET_PAGE_SIZE))
2126 return (0);
2129 * Usually we don't dump executable pages as they contain
2130 * non-writable code that debugger can read directly from
2131 * target library etc. However, thread stacks are marked
2132 * also executable so we read in first page of given region
2133 * and check whether it contains elf header. If there is
2134 * no elf header, we dump it.
2136 if (vma->vma_flags & PROT_EXEC) {
2137 char page[TARGET_PAGE_SIZE];
2139 copy_from_user(page, vma->vma_start, sizeof (page));
2140 if ((page[EI_MAG0] == ELFMAG0) &&
2141 (page[EI_MAG1] == ELFMAG1) &&
2142 (page[EI_MAG2] == ELFMAG2) &&
2143 (page[EI_MAG3] == ELFMAG3)) {
2145 * Mappings are possibly from ELF binary. Don't dump
2146 * them.
2148 return (0);
2152 return (vma->vma_end - vma->vma_start);
2155 static int vma_walker(void *priv, abi_ulong start, abi_ulong end,
2156 unsigned long flags)
2158 struct mm_struct *mm = (struct mm_struct *)priv;
2160 vma_add_mapping(mm, start, end, flags);
2161 return (0);
2164 static void fill_note(struct memelfnote *note, const char *name, int type,
2165 unsigned int sz, void *data)
2167 unsigned int namesz;
2169 namesz = strlen(name) + 1;
2170 note->name = name;
2171 note->namesz = namesz;
2172 note->namesz_rounded = roundup(namesz, sizeof (int32_t));
2173 note->type = type;
2174 note->datasz = sz;
2175 note->datasz_rounded = roundup(sz, sizeof (int32_t));
2177 note->data = data;
2180 * We calculate rounded up note size here as specified by
2181 * ELF document.
2183 note->notesz = sizeof (struct elf_note) +
2184 note->namesz_rounded + note->datasz_rounded;
2187 static void fill_elf_header(struct elfhdr *elf, int segs, uint16_t machine,
2188 uint32_t flags)
2190 (void) memset(elf, 0, sizeof(*elf));
2192 (void) memcpy(elf->e_ident, ELFMAG, SELFMAG);
2193 elf->e_ident[EI_CLASS] = ELF_CLASS;
2194 elf->e_ident[EI_DATA] = ELF_DATA;
2195 elf->e_ident[EI_VERSION] = EV_CURRENT;
2196 elf->e_ident[EI_OSABI] = ELF_OSABI;
2198 elf->e_type = ET_CORE;
2199 elf->e_machine = machine;
2200 elf->e_version = EV_CURRENT;
2201 elf->e_phoff = sizeof(struct elfhdr);
2202 elf->e_flags = flags;
2203 elf->e_ehsize = sizeof(struct elfhdr);
2204 elf->e_phentsize = sizeof(struct elf_phdr);
2205 elf->e_phnum = segs;
2207 bswap_ehdr(elf);
2210 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
2212 phdr->p_type = PT_NOTE;
2213 phdr->p_offset = offset;
2214 phdr->p_vaddr = 0;
2215 phdr->p_paddr = 0;
2216 phdr->p_filesz = sz;
2217 phdr->p_memsz = 0;
2218 phdr->p_flags = 0;
2219 phdr->p_align = 0;
2221 bswap_phdr(phdr, 1);
2224 static size_t note_size(const struct memelfnote *note)
2226 return (note->notesz);
2229 static void fill_prstatus(struct target_elf_prstatus *prstatus,
2230 const TaskState *ts, int signr)
2232 (void) memset(prstatus, 0, sizeof (*prstatus));
2233 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
2234 prstatus->pr_pid = ts->ts_tid;
2235 prstatus->pr_ppid = getppid();
2236 prstatus->pr_pgrp = getpgrp();
2237 prstatus->pr_sid = getsid(0);
2239 bswap_prstatus(prstatus);
2242 static int fill_psinfo(struct target_elf_prpsinfo *psinfo, const TaskState *ts)
2244 char *filename, *base_filename;
2245 unsigned int i, len;
2247 (void) memset(psinfo, 0, sizeof (*psinfo));
2249 len = ts->info->arg_end - ts->info->arg_start;
2250 if (len >= ELF_PRARGSZ)
2251 len = ELF_PRARGSZ - 1;
2252 if (copy_from_user(&psinfo->pr_psargs, ts->info->arg_start, len))
2253 return -EFAULT;
2254 for (i = 0; i < len; i++)
2255 if (psinfo->pr_psargs[i] == 0)
2256 psinfo->pr_psargs[i] = ' ';
2257 psinfo->pr_psargs[len] = 0;
2259 psinfo->pr_pid = getpid();
2260 psinfo->pr_ppid = getppid();
2261 psinfo->pr_pgrp = getpgrp();
2262 psinfo->pr_sid = getsid(0);
2263 psinfo->pr_uid = getuid();
2264 psinfo->pr_gid = getgid();
2266 filename = strdup(ts->bprm->filename);
2267 base_filename = strdup(basename(filename));
2268 (void) strncpy(psinfo->pr_fname, base_filename,
2269 sizeof(psinfo->pr_fname));
2270 free(base_filename);
2271 free(filename);
2273 bswap_psinfo(psinfo);
2274 return (0);
2277 static void fill_auxv_note(struct memelfnote *note, const TaskState *ts)
2279 elf_addr_t auxv = (elf_addr_t)ts->info->saved_auxv;
2280 elf_addr_t orig_auxv = auxv;
2281 abi_ulong val;
2282 void *ptr;
2283 int i, len;
2286 * Auxiliary vector is stored in target process stack. It contains
2287 * {type, value} pairs that we need to dump into note. This is not
2288 * strictly necessary but we do it here for sake of completeness.
2291 /* find out lenght of the vector, AT_NULL is terminator */
2292 i = len = 0;
2293 do {
2294 get_user_ual(val, auxv);
2295 i += 2;
2296 auxv += 2 * sizeof (elf_addr_t);
2297 } while (val != AT_NULL);
2298 len = i * sizeof (elf_addr_t);
2300 /* read in whole auxv vector and copy it to memelfnote */
2301 ptr = lock_user(VERIFY_READ, orig_auxv, len, 0);
2302 if (ptr != NULL) {
2303 fill_note(note, "CORE", NT_AUXV, len, ptr);
2304 unlock_user(ptr, auxv, len);
2309 * Constructs name of coredump file. We have following convention
2310 * for the name:
2311 * qemu_<basename-of-target-binary>_<date>-<time>_<pid>.core
2313 * Returns 0 in case of success, -1 otherwise (errno is set).
2315 static int core_dump_filename(const TaskState *ts, char *buf,
2316 size_t bufsize)
2318 char timestamp[64];
2319 char *filename = NULL;
2320 char *base_filename = NULL;
2321 struct timeval tv;
2322 struct tm tm;
2324 assert(bufsize >= PATH_MAX);
2326 if (gettimeofday(&tv, NULL) < 0) {
2327 (void) fprintf(stderr, "unable to get current timestamp: %s",
2328 strerror(errno));
2329 return (-1);
2332 filename = strdup(ts->bprm->filename);
2333 base_filename = strdup(basename(filename));
2334 (void) strftime(timestamp, sizeof (timestamp), "%Y%m%d-%H%M%S",
2335 localtime_r(&tv.tv_sec, &tm));
2336 (void) snprintf(buf, bufsize, "qemu_%s_%s_%d.core",
2337 base_filename, timestamp, (int)getpid());
2338 free(base_filename);
2339 free(filename);
2341 return (0);
2344 static int dump_write(int fd, const void *ptr, size_t size)
2346 const char *bufp = (const char *)ptr;
2347 ssize_t bytes_written, bytes_left;
2348 struct rlimit dumpsize;
2349 off_t pos;
2351 bytes_written = 0;
2352 getrlimit(RLIMIT_CORE, &dumpsize);
2353 if ((pos = lseek(fd, 0, SEEK_CUR))==-1) {
2354 if (errno == ESPIPE) { /* not a seekable stream */
2355 bytes_left = size;
2356 } else {
2357 return pos;
2359 } else {
2360 if (dumpsize.rlim_cur <= pos) {
2361 return -1;
2362 } else if (dumpsize.rlim_cur == RLIM_INFINITY) {
2363 bytes_left = size;
2364 } else {
2365 size_t limit_left=dumpsize.rlim_cur - pos;
2366 bytes_left = limit_left >= size ? size : limit_left ;
2371 * In normal conditions, single write(2) should do but
2372 * in case of socket etc. this mechanism is more portable.
2374 do {
2375 bytes_written = write(fd, bufp, bytes_left);
2376 if (bytes_written < 0) {
2377 if (errno == EINTR)
2378 continue;
2379 return (-1);
2380 } else if (bytes_written == 0) { /* eof */
2381 return (-1);
2383 bufp += bytes_written;
2384 bytes_left -= bytes_written;
2385 } while (bytes_left > 0);
2387 return (0);
2390 static int write_note(struct memelfnote *men, int fd)
2392 struct elf_note en;
2394 en.n_namesz = men->namesz;
2395 en.n_type = men->type;
2396 en.n_descsz = men->datasz;
2398 bswap_note(&en);
2400 if (dump_write(fd, &en, sizeof(en)) != 0)
2401 return (-1);
2402 if (dump_write(fd, men->name, men->namesz_rounded) != 0)
2403 return (-1);
2404 if (dump_write(fd, men->data, men->datasz_rounded) != 0)
2405 return (-1);
2407 return (0);
2410 static void fill_thread_info(struct elf_note_info *info, const CPUState *env)
2412 TaskState *ts = (TaskState *)env->opaque;
2413 struct elf_thread_status *ets;
2415 ets = qemu_mallocz(sizeof (*ets));
2416 ets->num_notes = 1; /* only prstatus is dumped */
2417 fill_prstatus(&ets->prstatus, ts, 0);
2418 elf_core_copy_regs(&ets->prstatus.pr_reg, env);
2419 fill_note(&ets->notes[0], "CORE", NT_PRSTATUS, sizeof (ets->prstatus),
2420 &ets->prstatus);
2422 QTAILQ_INSERT_TAIL(&info->thread_list, ets, ets_link);
2424 info->notes_size += note_size(&ets->notes[0]);
2427 static int fill_note_info(struct elf_note_info *info,
2428 long signr, const CPUState *env)
2430 #define NUMNOTES 3
2431 CPUState *cpu = NULL;
2432 TaskState *ts = (TaskState *)env->opaque;
2433 int i;
2435 (void) memset(info, 0, sizeof (*info));
2437 QTAILQ_INIT(&info->thread_list);
2439 info->notes = qemu_mallocz(NUMNOTES * sizeof (struct memelfnote));
2440 if (info->notes == NULL)
2441 return (-ENOMEM);
2442 info->prstatus = qemu_mallocz(sizeof (*info->prstatus));
2443 if (info->prstatus == NULL)
2444 return (-ENOMEM);
2445 info->psinfo = qemu_mallocz(sizeof (*info->psinfo));
2446 if (info->prstatus == NULL)
2447 return (-ENOMEM);
2450 * First fill in status (and registers) of current thread
2451 * including process info & aux vector.
2453 fill_prstatus(info->prstatus, ts, signr);
2454 elf_core_copy_regs(&info->prstatus->pr_reg, env);
2455 fill_note(&info->notes[0], "CORE", NT_PRSTATUS,
2456 sizeof (*info->prstatus), info->prstatus);
2457 fill_psinfo(info->psinfo, ts);
2458 fill_note(&info->notes[1], "CORE", NT_PRPSINFO,
2459 sizeof (*info->psinfo), info->psinfo);
2460 fill_auxv_note(&info->notes[2], ts);
2461 info->numnote = 3;
2463 info->notes_size = 0;
2464 for (i = 0; i < info->numnote; i++)
2465 info->notes_size += note_size(&info->notes[i]);
2467 /* read and fill status of all threads */
2468 cpu_list_lock();
2469 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2470 if (cpu == thread_env)
2471 continue;
2472 fill_thread_info(info, cpu);
2474 cpu_list_unlock();
2476 return (0);
2479 static void free_note_info(struct elf_note_info *info)
2481 struct elf_thread_status *ets;
2483 while (!QTAILQ_EMPTY(&info->thread_list)) {
2484 ets = QTAILQ_FIRST(&info->thread_list);
2485 QTAILQ_REMOVE(&info->thread_list, ets, ets_link);
2486 qemu_free(ets);
2489 qemu_free(info->prstatus);
2490 qemu_free(info->psinfo);
2491 qemu_free(info->notes);
2494 static int write_note_info(struct elf_note_info *info, int fd)
2496 struct elf_thread_status *ets;
2497 int i, error = 0;
2499 /* write prstatus, psinfo and auxv for current thread */
2500 for (i = 0; i < info->numnote; i++)
2501 if ((error = write_note(&info->notes[i], fd)) != 0)
2502 return (error);
2504 /* write prstatus for each thread */
2505 for (ets = info->thread_list.tqh_first; ets != NULL;
2506 ets = ets->ets_link.tqe_next) {
2507 if ((error = write_note(&ets->notes[0], fd)) != 0)
2508 return (error);
2511 return (0);
2515 * Write out ELF coredump.
2517 * See documentation of ELF object file format in:
2518 * http://www.caldera.com/developers/devspecs/gabi41.pdf
2520 * Coredump format in linux is following:
2522 * 0 +----------------------+ \
2523 * | ELF header | ET_CORE |
2524 * +----------------------+ |
2525 * | ELF program headers | |--- headers
2526 * | - NOTE section | |
2527 * | - PT_LOAD sections | |
2528 * +----------------------+ /
2529 * | NOTEs: |
2530 * | - NT_PRSTATUS |
2531 * | - NT_PRSINFO |
2532 * | - NT_AUXV |
2533 * +----------------------+ <-- aligned to target page
2534 * | Process memory dump |
2535 * : :
2536 * . .
2537 * : :
2538 * | |
2539 * +----------------------+
2541 * NT_PRSTATUS -> struct elf_prstatus (per thread)
2542 * NT_PRSINFO -> struct elf_prpsinfo
2543 * NT_AUXV is array of { type, value } pairs (see fill_auxv_note()).
2545 * Format follows System V format as close as possible. Current
2546 * version limitations are as follows:
2547 * - no floating point registers are dumped
2549 * Function returns 0 in case of success, negative errno otherwise.
2551 * TODO: make this work also during runtime: it should be
2552 * possible to force coredump from running process and then
2553 * continue processing. For example qemu could set up SIGUSR2
2554 * handler (provided that target process haven't registered
2555 * handler for that) that does the dump when signal is received.
2557 static int elf_core_dump(int signr, const CPUState *env)
2559 const TaskState *ts = (const TaskState *)env->opaque;
2560 struct vm_area_struct *vma = NULL;
2561 char corefile[PATH_MAX];
2562 struct elf_note_info info;
2563 struct elfhdr elf;
2564 struct elf_phdr phdr;
2565 struct rlimit dumpsize;
2566 struct mm_struct *mm = NULL;
2567 off_t offset = 0, data_offset = 0;
2568 int segs = 0;
2569 int fd = -1;
2571 errno = 0;
2572 getrlimit(RLIMIT_CORE, &dumpsize);
2573 if (dumpsize.rlim_cur == 0)
2574 return 0;
2576 if (core_dump_filename(ts, corefile, sizeof (corefile)) < 0)
2577 return (-errno);
2579 if ((fd = open(corefile, O_WRONLY | O_CREAT,
2580 S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH)) < 0)
2581 return (-errno);
2584 * Walk through target process memory mappings and
2585 * set up structure containing this information. After
2586 * this point vma_xxx functions can be used.
2588 if ((mm = vma_init()) == NULL)
2589 goto out;
2591 walk_memory_regions(mm, vma_walker);
2592 segs = vma_get_mapping_count(mm);
2595 * Construct valid coredump ELF header. We also
2596 * add one more segment for notes.
2598 fill_elf_header(&elf, segs + 1, ELF_MACHINE, 0);
2599 if (dump_write(fd, &elf, sizeof (elf)) != 0)
2600 goto out;
2602 /* fill in in-memory version of notes */
2603 if (fill_note_info(&info, signr, env) < 0)
2604 goto out;
2606 offset += sizeof (elf); /* elf header */
2607 offset += (segs + 1) * sizeof (struct elf_phdr); /* program headers */
2609 /* write out notes program header */
2610 fill_elf_note_phdr(&phdr, info.notes_size, offset);
2612 offset += info.notes_size;
2613 if (dump_write(fd, &phdr, sizeof (phdr)) != 0)
2614 goto out;
2617 * ELF specification wants data to start at page boundary so
2618 * we align it here.
2620 data_offset = offset = roundup(offset, ELF_EXEC_PAGESIZE);
2623 * Write program headers for memory regions mapped in
2624 * the target process.
2626 for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
2627 (void) memset(&phdr, 0, sizeof (phdr));
2629 phdr.p_type = PT_LOAD;
2630 phdr.p_offset = offset;
2631 phdr.p_vaddr = vma->vma_start;
2632 phdr.p_paddr = 0;
2633 phdr.p_filesz = vma_dump_size(vma);
2634 offset += phdr.p_filesz;
2635 phdr.p_memsz = vma->vma_end - vma->vma_start;
2636 phdr.p_flags = vma->vma_flags & PROT_READ ? PF_R : 0;
2637 if (vma->vma_flags & PROT_WRITE)
2638 phdr.p_flags |= PF_W;
2639 if (vma->vma_flags & PROT_EXEC)
2640 phdr.p_flags |= PF_X;
2641 phdr.p_align = ELF_EXEC_PAGESIZE;
2643 bswap_phdr(&phdr, 1);
2644 dump_write(fd, &phdr, sizeof (phdr));
2648 * Next we write notes just after program headers. No
2649 * alignment needed here.
2651 if (write_note_info(&info, fd) < 0)
2652 goto out;
2654 /* align data to page boundary */
2655 if (lseek(fd, data_offset, SEEK_SET) != data_offset)
2656 goto out;
2659 * Finally we can dump process memory into corefile as well.
2661 for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
2662 abi_ulong addr;
2663 abi_ulong end;
2665 end = vma->vma_start + vma_dump_size(vma);
2667 for (addr = vma->vma_start; addr < end;
2668 addr += TARGET_PAGE_SIZE) {
2669 char page[TARGET_PAGE_SIZE];
2670 int error;
2673 * Read in page from target process memory and
2674 * write it to coredump file.
2676 error = copy_from_user(page, addr, sizeof (page));
2677 if (error != 0) {
2678 (void) fprintf(stderr, "unable to dump " TARGET_ABI_FMT_lx "\n",
2679 addr);
2680 errno = -error;
2681 goto out;
2683 if (dump_write(fd, page, TARGET_PAGE_SIZE) < 0)
2684 goto out;
2688 out:
2689 free_note_info(&info);
2690 if (mm != NULL)
2691 vma_delete(mm);
2692 (void) close(fd);
2694 if (errno != 0)
2695 return (-errno);
2696 return (0);
2698 #endif /* USE_ELF_CORE_DUMP */
2700 void do_init_thread(struct target_pt_regs *regs, struct image_info *infop)
2702 init_thread(regs, infop);