[PATCH] Have x86_64 use add_active_range() and free_area_init_nodes
[linux-2.6/mini2440.git] / fs / binfmt_elf.c
blobdfd8cfb7fb5dcd213ab74c41474c81e06a69f8e1
1 /*
2 * linux/fs/binfmt_elf.c
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
7 * Tools".
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/fs.h>
15 #include <linux/stat.h>
16 #include <linux/time.h>
17 #include <linux/mm.h>
18 #include <linux/mman.h>
19 #include <linux/a.out.h>
20 #include <linux/errno.h>
21 #include <linux/signal.h>
22 #include <linux/binfmts.h>
23 #include <linux/string.h>
24 #include <linux/file.h>
25 #include <linux/fcntl.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/shm.h>
29 #include <linux/personality.h>
30 #include <linux/elfcore.h>
31 #include <linux/init.h>
32 #include <linux/highuid.h>
33 #include <linux/smp.h>
34 #include <linux/smp_lock.h>
35 #include <linux/compiler.h>
36 #include <linux/highmem.h>
37 #include <linux/pagemap.h>
38 #include <linux/security.h>
39 #include <linux/syscalls.h>
40 #include <linux/random.h>
41 #include <linux/elf.h>
42 #include <asm/uaccess.h>
43 #include <asm/param.h>
44 #include <asm/page.h>
46 static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs);
47 static int load_elf_library(struct file *);
48 static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int);
49 extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
51 #ifndef elf_addr_t
52 #define elf_addr_t unsigned long
53 #endif
56 * If we don't support core dumping, then supply a NULL so we
57 * don't even try.
59 #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
60 static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file);
61 #else
62 #define elf_core_dump NULL
63 #endif
65 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
66 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
67 #else
68 #define ELF_MIN_ALIGN PAGE_SIZE
69 #endif
71 #ifndef ELF_CORE_EFLAGS
72 #define ELF_CORE_EFLAGS 0
73 #endif
75 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
76 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
77 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
79 static struct linux_binfmt elf_format = {
80 .module = THIS_MODULE,
81 .load_binary = load_elf_binary,
82 .load_shlib = load_elf_library,
83 .core_dump = elf_core_dump,
84 .min_coredump = ELF_EXEC_PAGESIZE
87 #define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
89 static int set_brk(unsigned long start, unsigned long end)
91 start = ELF_PAGEALIGN(start);
92 end = ELF_PAGEALIGN(end);
93 if (end > start) {
94 unsigned long addr;
95 down_write(&current->mm->mmap_sem);
96 addr = do_brk(start, end - start);
97 up_write(&current->mm->mmap_sem);
98 if (BAD_ADDR(addr))
99 return addr;
101 current->mm->start_brk = current->mm->brk = end;
102 return 0;
105 /* We need to explicitly zero any fractional pages
106 after the data section (i.e. bss). This would
107 contain the junk from the file that should not
108 be in memory
110 static int padzero(unsigned long elf_bss)
112 unsigned long nbyte;
114 nbyte = ELF_PAGEOFFSET(elf_bss);
115 if (nbyte) {
116 nbyte = ELF_MIN_ALIGN - nbyte;
117 if (clear_user((void __user *) elf_bss, nbyte))
118 return -EFAULT;
120 return 0;
123 /* Let's use some macros to make this stack manipulation a litle clearer */
124 #ifdef CONFIG_STACK_GROWSUP
125 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
126 #define STACK_ROUND(sp, items) \
127 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
128 #define STACK_ALLOC(sp, len) ({ \
129 elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \
130 old_sp; })
131 #else
132 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
133 #define STACK_ROUND(sp, items) \
134 (((unsigned long) (sp - items)) &~ 15UL)
135 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
136 #endif
138 static int
139 create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
140 int interp_aout, unsigned long load_addr,
141 unsigned long interp_load_addr)
143 unsigned long p = bprm->p;
144 int argc = bprm->argc;
145 int envc = bprm->envc;
146 elf_addr_t __user *argv;
147 elf_addr_t __user *envp;
148 elf_addr_t __user *sp;
149 elf_addr_t __user *u_platform;
150 const char *k_platform = ELF_PLATFORM;
151 int items;
152 elf_addr_t *elf_info;
153 int ei_index = 0;
154 struct task_struct *tsk = current;
157 * If this architecture has a platform capability string, copy it
158 * to userspace. In some cases (Sparc), this info is impossible
159 * for userspace to get any other way, in others (i386) it is
160 * merely difficult.
162 u_platform = NULL;
163 if (k_platform) {
164 size_t len = strlen(k_platform) + 1;
167 * In some cases (e.g. Hyper-Threading), we want to avoid L1
168 * evictions by the processes running on the same package. One
169 * thing we can do is to shuffle the initial stack for them.
172 p = arch_align_stack(p);
174 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
175 if (__copy_to_user(u_platform, k_platform, len))
176 return -EFAULT;
179 /* Create the ELF interpreter info */
180 elf_info = (elf_addr_t *)current->mm->saved_auxv;
181 #define NEW_AUX_ENT(id, val) \
182 do { \
183 elf_info[ei_index++] = id; \
184 elf_info[ei_index++] = val; \
185 } while (0)
187 #ifdef ARCH_DLINFO
189 * ARCH_DLINFO must come first so PPC can do its special alignment of
190 * AUXV.
192 ARCH_DLINFO;
193 #endif
194 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
195 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
196 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
197 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
198 NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
199 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
200 NEW_AUX_ENT(AT_BASE, interp_load_addr);
201 NEW_AUX_ENT(AT_FLAGS, 0);
202 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
203 NEW_AUX_ENT(AT_UID, tsk->uid);
204 NEW_AUX_ENT(AT_EUID, tsk->euid);
205 NEW_AUX_ENT(AT_GID, tsk->gid);
206 NEW_AUX_ENT(AT_EGID, tsk->egid);
207 NEW_AUX_ENT(AT_SECURE, security_bprm_secureexec(bprm));
208 if (k_platform) {
209 NEW_AUX_ENT(AT_PLATFORM,
210 (elf_addr_t)(unsigned long)u_platform);
212 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
213 NEW_AUX_ENT(AT_EXECFD, bprm->interp_data);
215 #undef NEW_AUX_ENT
216 /* AT_NULL is zero; clear the rest too */
217 memset(&elf_info[ei_index], 0,
218 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
220 /* And advance past the AT_NULL entry. */
221 ei_index += 2;
223 sp = STACK_ADD(p, ei_index);
225 items = (argc + 1) + (envc + 1);
226 if (interp_aout) {
227 items += 3; /* a.out interpreters require argv & envp too */
228 } else {
229 items += 1; /* ELF interpreters only put argc on the stack */
231 bprm->p = STACK_ROUND(sp, items);
233 /* Point sp at the lowest address on the stack */
234 #ifdef CONFIG_STACK_GROWSUP
235 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
236 bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */
237 #else
238 sp = (elf_addr_t __user *)bprm->p;
239 #endif
241 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
242 if (__put_user(argc, sp++))
243 return -EFAULT;
244 if (interp_aout) {
245 argv = sp + 2;
246 envp = argv + argc + 1;
247 __put_user((elf_addr_t)(unsigned long)argv, sp++);
248 __put_user((elf_addr_t)(unsigned long)envp, sp++);
249 } else {
250 argv = sp;
251 envp = argv + argc + 1;
254 /* Populate argv and envp */
255 p = current->mm->arg_end = current->mm->arg_start;
256 while (argc-- > 0) {
257 size_t len;
258 __put_user((elf_addr_t)p, argv++);
259 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
260 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
261 return 0;
262 p += len;
264 if (__put_user(0, argv))
265 return -EFAULT;
266 current->mm->arg_end = current->mm->env_start = p;
267 while (envc-- > 0) {
268 size_t len;
269 __put_user((elf_addr_t)p, envp++);
270 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
271 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
272 return 0;
273 p += len;
275 if (__put_user(0, envp))
276 return -EFAULT;
277 current->mm->env_end = p;
279 /* Put the elf_info on the stack in the right place. */
280 sp = (elf_addr_t __user *)envp + 1;
281 if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
282 return -EFAULT;
283 return 0;
286 #ifndef elf_map
288 static unsigned long elf_map(struct file *filep, unsigned long addr,
289 struct elf_phdr *eppnt, int prot, int type)
291 unsigned long map_addr;
292 unsigned long pageoffset = ELF_PAGEOFFSET(eppnt->p_vaddr);
294 down_write(&current->mm->mmap_sem);
295 /* mmap() will return -EINVAL if given a zero size, but a
296 * segment with zero filesize is perfectly valid */
297 if (eppnt->p_filesz + pageoffset)
298 map_addr = do_mmap(filep, ELF_PAGESTART(addr),
299 eppnt->p_filesz + pageoffset, prot, type,
300 eppnt->p_offset - pageoffset);
301 else
302 map_addr = ELF_PAGESTART(addr);
303 up_write(&current->mm->mmap_sem);
304 return(map_addr);
307 #endif /* !elf_map */
309 /* This is much more generalized than the library routine read function,
310 so we keep this separate. Technically the library read function
311 is only provided so that we can read a.out libraries that have
312 an ELF header */
314 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
315 struct file *interpreter, unsigned long *interp_load_addr)
317 struct elf_phdr *elf_phdata;
318 struct elf_phdr *eppnt;
319 unsigned long load_addr = 0;
320 int load_addr_set = 0;
321 unsigned long last_bss = 0, elf_bss = 0;
322 unsigned long error = ~0UL;
323 int retval, i, size;
325 /* First of all, some simple consistency checks */
326 if (interp_elf_ex->e_type != ET_EXEC &&
327 interp_elf_ex->e_type != ET_DYN)
328 goto out;
329 if (!elf_check_arch(interp_elf_ex))
330 goto out;
331 if (!interpreter->f_op || !interpreter->f_op->mmap)
332 goto out;
335 * If the size of this structure has changed, then punt, since
336 * we will be doing the wrong thing.
338 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
339 goto out;
340 if (interp_elf_ex->e_phnum < 1 ||
341 interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
342 goto out;
344 /* Now read in all of the header information */
345 size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
346 if (size > ELF_MIN_ALIGN)
347 goto out;
348 elf_phdata = kmalloc(size, GFP_KERNEL);
349 if (!elf_phdata)
350 goto out;
352 retval = kernel_read(interpreter, interp_elf_ex->e_phoff,
353 (char *)elf_phdata,size);
354 error = -EIO;
355 if (retval != size) {
356 if (retval < 0)
357 error = retval;
358 goto out_close;
361 eppnt = elf_phdata;
362 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
363 if (eppnt->p_type == PT_LOAD) {
364 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
365 int elf_prot = 0;
366 unsigned long vaddr = 0;
367 unsigned long k, map_addr;
369 if (eppnt->p_flags & PF_R)
370 elf_prot = PROT_READ;
371 if (eppnt->p_flags & PF_W)
372 elf_prot |= PROT_WRITE;
373 if (eppnt->p_flags & PF_X)
374 elf_prot |= PROT_EXEC;
375 vaddr = eppnt->p_vaddr;
376 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
377 elf_type |= MAP_FIXED;
379 map_addr = elf_map(interpreter, load_addr + vaddr,
380 eppnt, elf_prot, elf_type);
381 error = map_addr;
382 if (BAD_ADDR(map_addr))
383 goto out_close;
385 if (!load_addr_set &&
386 interp_elf_ex->e_type == ET_DYN) {
387 load_addr = map_addr - ELF_PAGESTART(vaddr);
388 load_addr_set = 1;
392 * Check to see if the section's size will overflow the
393 * allowed task size. Note that p_filesz must always be
394 * <= p_memsize so it's only necessary to check p_memsz.
396 k = load_addr + eppnt->p_vaddr;
397 if (BAD_ADDR(k) ||
398 eppnt->p_filesz > eppnt->p_memsz ||
399 eppnt->p_memsz > TASK_SIZE ||
400 TASK_SIZE - eppnt->p_memsz < k) {
401 error = -ENOMEM;
402 goto out_close;
406 * Find the end of the file mapping for this phdr, and
407 * keep track of the largest address we see for this.
409 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
410 if (k > elf_bss)
411 elf_bss = k;
414 * Do the same thing for the memory mapping - between
415 * elf_bss and last_bss is the bss section.
417 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
418 if (k > last_bss)
419 last_bss = k;
424 * Now fill out the bss section. First pad the last page up
425 * to the page boundary, and then perform a mmap to make sure
426 * that there are zero-mapped pages up to and including the
427 * last bss page.
429 if (padzero(elf_bss)) {
430 error = -EFAULT;
431 goto out_close;
434 /* What we have mapped so far */
435 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
437 /* Map the last of the bss segment */
438 if (last_bss > elf_bss) {
439 down_write(&current->mm->mmap_sem);
440 error = do_brk(elf_bss, last_bss - elf_bss);
441 up_write(&current->mm->mmap_sem);
442 if (BAD_ADDR(error))
443 goto out_close;
446 *interp_load_addr = load_addr;
447 error = ((unsigned long)interp_elf_ex->e_entry) + load_addr;
449 out_close:
450 kfree(elf_phdata);
451 out:
452 return error;
455 static unsigned long load_aout_interp(struct exec *interp_ex,
456 struct file *interpreter)
458 unsigned long text_data, elf_entry = ~0UL;
459 char __user * addr;
460 loff_t offset;
462 current->mm->end_code = interp_ex->a_text;
463 text_data = interp_ex->a_text + interp_ex->a_data;
464 current->mm->end_data = text_data;
465 current->mm->brk = interp_ex->a_bss + text_data;
467 switch (N_MAGIC(*interp_ex)) {
468 case OMAGIC:
469 offset = 32;
470 addr = (char __user *)0;
471 break;
472 case ZMAGIC:
473 case QMAGIC:
474 offset = N_TXTOFF(*interp_ex);
475 addr = (char __user *)N_TXTADDR(*interp_ex);
476 break;
477 default:
478 goto out;
481 down_write(&current->mm->mmap_sem);
482 do_brk(0, text_data);
483 up_write(&current->mm->mmap_sem);
484 if (!interpreter->f_op || !interpreter->f_op->read)
485 goto out;
486 if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0)
487 goto out;
488 flush_icache_range((unsigned long)addr,
489 (unsigned long)addr + text_data);
491 down_write(&current->mm->mmap_sem);
492 do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
493 interp_ex->a_bss);
494 up_write(&current->mm->mmap_sem);
495 elf_entry = interp_ex->a_entry;
497 out:
498 return elf_entry;
502 * These are the functions used to load ELF style executables and shared
503 * libraries. There is no binary dependent code anywhere else.
506 #define INTERPRETER_NONE 0
507 #define INTERPRETER_AOUT 1
508 #define INTERPRETER_ELF 2
510 #ifndef STACK_RND_MASK
511 #define STACK_RND_MASK 0x7ff /* with 4K pages 8MB of VA */
512 #endif
514 static unsigned long randomize_stack_top(unsigned long stack_top)
516 unsigned int random_variable = 0;
518 if ((current->flags & PF_RANDOMIZE) &&
519 !(current->personality & ADDR_NO_RANDOMIZE)) {
520 random_variable = get_random_int() & STACK_RND_MASK;
521 random_variable <<= PAGE_SHIFT;
523 #ifdef CONFIG_STACK_GROWSUP
524 return PAGE_ALIGN(stack_top) + random_variable;
525 #else
526 return PAGE_ALIGN(stack_top) - random_variable;
527 #endif
530 static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
532 struct file *interpreter = NULL; /* to shut gcc up */
533 unsigned long load_addr = 0, load_bias = 0;
534 int load_addr_set = 0;
535 char * elf_interpreter = NULL;
536 unsigned int interpreter_type = INTERPRETER_NONE;
537 unsigned char ibcs2_interpreter = 0;
538 unsigned long error;
539 struct elf_phdr *elf_ppnt, *elf_phdata;
540 unsigned long elf_bss, elf_brk;
541 int elf_exec_fileno;
542 int retval, i;
543 unsigned int size;
544 unsigned long elf_entry, interp_load_addr = 0;
545 unsigned long start_code, end_code, start_data, end_data;
546 unsigned long reloc_func_desc = 0;
547 char passed_fileno[6];
548 struct files_struct *files;
549 int have_pt_gnu_stack, executable_stack = EXSTACK_DEFAULT;
550 unsigned long def_flags = 0;
551 struct {
552 struct elfhdr elf_ex;
553 struct elfhdr interp_elf_ex;
554 struct exec interp_ex;
555 } *loc;
557 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
558 if (!loc) {
559 retval = -ENOMEM;
560 goto out_ret;
563 /* Get the exec-header */
564 loc->elf_ex = *((struct elfhdr *)bprm->buf);
566 retval = -ENOEXEC;
567 /* First of all, some simple consistency checks */
568 if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
569 goto out;
571 if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
572 goto out;
573 if (!elf_check_arch(&loc->elf_ex))
574 goto out;
575 if (!bprm->file->f_op||!bprm->file->f_op->mmap)
576 goto out;
578 /* Now read in all of the header information */
579 if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr))
580 goto out;
581 if (loc->elf_ex.e_phnum < 1 ||
582 loc->elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
583 goto out;
584 size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr);
585 retval = -ENOMEM;
586 elf_phdata = kmalloc(size, GFP_KERNEL);
587 if (!elf_phdata)
588 goto out;
590 retval = kernel_read(bprm->file, loc->elf_ex.e_phoff,
591 (char *)elf_phdata, size);
592 if (retval != size) {
593 if (retval >= 0)
594 retval = -EIO;
595 goto out_free_ph;
598 files = current->files; /* Refcounted so ok */
599 retval = unshare_files();
600 if (retval < 0)
601 goto out_free_ph;
602 if (files == current->files) {
603 put_files_struct(files);
604 files = NULL;
607 /* exec will make our files private anyway, but for the a.out
608 loader stuff we need to do it earlier */
609 retval = get_unused_fd();
610 if (retval < 0)
611 goto out_free_fh;
612 get_file(bprm->file);
613 fd_install(elf_exec_fileno = retval, bprm->file);
615 elf_ppnt = elf_phdata;
616 elf_bss = 0;
617 elf_brk = 0;
619 start_code = ~0UL;
620 end_code = 0;
621 start_data = 0;
622 end_data = 0;
624 for (i = 0; i < loc->elf_ex.e_phnum; i++) {
625 if (elf_ppnt->p_type == PT_INTERP) {
626 /* This is the program interpreter used for
627 * shared libraries - for now assume that this
628 * is an a.out format binary
630 retval = -ENOEXEC;
631 if (elf_ppnt->p_filesz > PATH_MAX ||
632 elf_ppnt->p_filesz < 2)
633 goto out_free_file;
635 retval = -ENOMEM;
636 elf_interpreter = kmalloc(elf_ppnt->p_filesz,
637 GFP_KERNEL);
638 if (!elf_interpreter)
639 goto out_free_file;
641 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
642 elf_interpreter,
643 elf_ppnt->p_filesz);
644 if (retval != elf_ppnt->p_filesz) {
645 if (retval >= 0)
646 retval = -EIO;
647 goto out_free_interp;
649 /* make sure path is NULL terminated */
650 retval = -ENOEXEC;
651 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
652 goto out_free_interp;
654 /* If the program interpreter is one of these two,
655 * then assume an iBCS2 image. Otherwise assume
656 * a native linux image.
658 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
659 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
660 ibcs2_interpreter = 1;
663 * The early SET_PERSONALITY here is so that the lookup
664 * for the interpreter happens in the namespace of the
665 * to-be-execed image. SET_PERSONALITY can select an
666 * alternate root.
668 * However, SET_PERSONALITY is NOT allowed to switch
669 * this task into the new images's memory mapping
670 * policy - that is, TASK_SIZE must still evaluate to
671 * that which is appropriate to the execing application.
672 * This is because exit_mmap() needs to have TASK_SIZE
673 * evaluate to the size of the old image.
675 * So if (say) a 64-bit application is execing a 32-bit
676 * application it is the architecture's responsibility
677 * to defer changing the value of TASK_SIZE until the
678 * switch really is going to happen - do this in
679 * flush_thread(). - akpm
681 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
683 interpreter = open_exec(elf_interpreter);
684 retval = PTR_ERR(interpreter);
685 if (IS_ERR(interpreter))
686 goto out_free_interp;
687 retval = kernel_read(interpreter, 0, bprm->buf,
688 BINPRM_BUF_SIZE);
689 if (retval != BINPRM_BUF_SIZE) {
690 if (retval >= 0)
691 retval = -EIO;
692 goto out_free_dentry;
695 /* Get the exec headers */
696 loc->interp_ex = *((struct exec *)bprm->buf);
697 loc->interp_elf_ex = *((struct elfhdr *)bprm->buf);
698 break;
700 elf_ppnt++;
703 elf_ppnt = elf_phdata;
704 for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
705 if (elf_ppnt->p_type == PT_GNU_STACK) {
706 if (elf_ppnt->p_flags & PF_X)
707 executable_stack = EXSTACK_ENABLE_X;
708 else
709 executable_stack = EXSTACK_DISABLE_X;
710 break;
712 have_pt_gnu_stack = (i < loc->elf_ex.e_phnum);
714 /* Some simple consistency checks for the interpreter */
715 if (elf_interpreter) {
716 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
718 /* Now figure out which format our binary is */
719 if ((N_MAGIC(loc->interp_ex) != OMAGIC) &&
720 (N_MAGIC(loc->interp_ex) != ZMAGIC) &&
721 (N_MAGIC(loc->interp_ex) != QMAGIC))
722 interpreter_type = INTERPRETER_ELF;
724 if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
725 interpreter_type &= ~INTERPRETER_ELF;
727 retval = -ELIBBAD;
728 if (!interpreter_type)
729 goto out_free_dentry;
731 /* Make sure only one type was selected */
732 if ((interpreter_type & INTERPRETER_ELF) &&
733 interpreter_type != INTERPRETER_ELF) {
734 // FIXME - ratelimit this before re-enabling
735 // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
736 interpreter_type = INTERPRETER_ELF;
738 /* Verify the interpreter has a valid arch */
739 if ((interpreter_type == INTERPRETER_ELF) &&
740 !elf_check_arch(&loc->interp_elf_ex))
741 goto out_free_dentry;
742 } else {
743 /* Executables without an interpreter also need a personality */
744 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
747 /* OK, we are done with that, now set up the arg stuff,
748 and then start this sucker up */
749 if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) {
750 char *passed_p = passed_fileno;
751 sprintf(passed_fileno, "%d", elf_exec_fileno);
753 if (elf_interpreter) {
754 retval = copy_strings_kernel(1, &passed_p, bprm);
755 if (retval)
756 goto out_free_dentry;
757 bprm->argc++;
761 /* Flush all traces of the currently running executable */
762 retval = flush_old_exec(bprm);
763 if (retval)
764 goto out_free_dentry;
766 /* Discard our unneeded old files struct */
767 if (files) {
768 put_files_struct(files);
769 files = NULL;
772 /* OK, This is the point of no return */
773 current->mm->start_data = 0;
774 current->mm->end_data = 0;
775 current->mm->end_code = 0;
776 current->mm->mmap = NULL;
777 current->flags &= ~PF_FORKNOEXEC;
778 current->mm->def_flags = def_flags;
780 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
781 may depend on the personality. */
782 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
783 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
784 current->personality |= READ_IMPLIES_EXEC;
786 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
787 current->flags |= PF_RANDOMIZE;
788 arch_pick_mmap_layout(current->mm);
790 /* Do this so that we can load the interpreter, if need be. We will
791 change some of these later */
792 current->mm->free_area_cache = current->mm->mmap_base;
793 current->mm->cached_hole_size = 0;
794 retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
795 executable_stack);
796 if (retval < 0) {
797 send_sig(SIGKILL, current, 0);
798 goto out_free_dentry;
801 current->mm->start_stack = bprm->p;
803 /* Now we do a little grungy work by mmaping the ELF image into
804 the correct location in memory. At this point, we assume that
805 the image should be loaded at fixed address, not at a variable
806 address. */
807 for(i = 0, elf_ppnt = elf_phdata;
808 i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
809 int elf_prot = 0, elf_flags;
810 unsigned long k, vaddr;
812 if (elf_ppnt->p_type != PT_LOAD)
813 continue;
815 if (unlikely (elf_brk > elf_bss)) {
816 unsigned long nbyte;
818 /* There was a PT_LOAD segment with p_memsz > p_filesz
819 before this one. Map anonymous pages, if needed,
820 and clear the area. */
821 retval = set_brk (elf_bss + load_bias,
822 elf_brk + load_bias);
823 if (retval) {
824 send_sig(SIGKILL, current, 0);
825 goto out_free_dentry;
827 nbyte = ELF_PAGEOFFSET(elf_bss);
828 if (nbyte) {
829 nbyte = ELF_MIN_ALIGN - nbyte;
830 if (nbyte > elf_brk - elf_bss)
831 nbyte = elf_brk - elf_bss;
832 if (clear_user((void __user *)elf_bss +
833 load_bias, nbyte)) {
835 * This bss-zeroing can fail if the ELF
836 * file specifies odd protections. So
837 * we don't check the return value
843 if (elf_ppnt->p_flags & PF_R)
844 elf_prot |= PROT_READ;
845 if (elf_ppnt->p_flags & PF_W)
846 elf_prot |= PROT_WRITE;
847 if (elf_ppnt->p_flags & PF_X)
848 elf_prot |= PROT_EXEC;
850 elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
852 vaddr = elf_ppnt->p_vaddr;
853 if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
854 elf_flags |= MAP_FIXED;
855 } else if (loc->elf_ex.e_type == ET_DYN) {
856 /* Try and get dynamic programs out of the way of the
857 * default mmap base, as well as whatever program they
858 * might try to exec. This is because the brk will
859 * follow the loader, and is not movable. */
860 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
863 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
864 elf_prot, elf_flags);
865 if (BAD_ADDR(error)) {
866 send_sig(SIGKILL, current, 0);
867 goto out_free_dentry;
870 if (!load_addr_set) {
871 load_addr_set = 1;
872 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
873 if (loc->elf_ex.e_type == ET_DYN) {
874 load_bias += error -
875 ELF_PAGESTART(load_bias + vaddr);
876 load_addr += load_bias;
877 reloc_func_desc = load_bias;
880 k = elf_ppnt->p_vaddr;
881 if (k < start_code)
882 start_code = k;
883 if (start_data < k)
884 start_data = k;
887 * Check to see if the section's size will overflow the
888 * allowed task size. Note that p_filesz must always be
889 * <= p_memsz so it is only necessary to check p_memsz.
891 if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
892 elf_ppnt->p_memsz > TASK_SIZE ||
893 TASK_SIZE - elf_ppnt->p_memsz < k) {
894 /* set_brk can never work. Avoid overflows. */
895 send_sig(SIGKILL, current, 0);
896 goto out_free_dentry;
899 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
901 if (k > elf_bss)
902 elf_bss = k;
903 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
904 end_code = k;
905 if (end_data < k)
906 end_data = k;
907 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
908 if (k > elf_brk)
909 elf_brk = k;
912 loc->elf_ex.e_entry += load_bias;
913 elf_bss += load_bias;
914 elf_brk += load_bias;
915 start_code += load_bias;
916 end_code += load_bias;
917 start_data += load_bias;
918 end_data += load_bias;
920 /* Calling set_brk effectively mmaps the pages that we need
921 * for the bss and break sections. We must do this before
922 * mapping in the interpreter, to make sure it doesn't wind
923 * up getting placed where the bss needs to go.
925 retval = set_brk(elf_bss, elf_brk);
926 if (retval) {
927 send_sig(SIGKILL, current, 0);
928 goto out_free_dentry;
930 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
931 send_sig(SIGSEGV, current, 0);
932 retval = -EFAULT; /* Nobody gets to see this, but.. */
933 goto out_free_dentry;
936 if (elf_interpreter) {
937 if (interpreter_type == INTERPRETER_AOUT)
938 elf_entry = load_aout_interp(&loc->interp_ex,
939 interpreter);
940 else
941 elf_entry = load_elf_interp(&loc->interp_elf_ex,
942 interpreter,
943 &interp_load_addr);
944 if (BAD_ADDR(elf_entry)) {
945 force_sig(SIGSEGV, current);
946 retval = IS_ERR((void *)elf_entry) ?
947 (int)elf_entry : -EINVAL;
948 goto out_free_dentry;
950 reloc_func_desc = interp_load_addr;
952 allow_write_access(interpreter);
953 fput(interpreter);
954 kfree(elf_interpreter);
955 } else {
956 elf_entry = loc->elf_ex.e_entry;
957 if (BAD_ADDR(elf_entry)) {
958 force_sig(SIGSEGV, current);
959 retval = -EINVAL;
960 goto out_free_dentry;
964 kfree(elf_phdata);
966 if (interpreter_type != INTERPRETER_AOUT)
967 sys_close(elf_exec_fileno);
969 set_binfmt(&elf_format);
971 #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
972 retval = arch_setup_additional_pages(bprm, executable_stack);
973 if (retval < 0) {
974 send_sig(SIGKILL, current, 0);
975 goto out;
977 #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
979 compute_creds(bprm);
980 current->flags &= ~PF_FORKNOEXEC;
981 create_elf_tables(bprm, &loc->elf_ex,
982 (interpreter_type == INTERPRETER_AOUT),
983 load_addr, interp_load_addr);
984 /* N.B. passed_fileno might not be initialized? */
985 if (interpreter_type == INTERPRETER_AOUT)
986 current->mm->arg_start += strlen(passed_fileno) + 1;
987 current->mm->end_code = end_code;
988 current->mm->start_code = start_code;
989 current->mm->start_data = start_data;
990 current->mm->end_data = end_data;
991 current->mm->start_stack = bprm->p;
993 if (current->personality & MMAP_PAGE_ZERO) {
994 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
995 and some applications "depend" upon this behavior.
996 Since we do not have the power to recompile these, we
997 emulate the SVr4 behavior. Sigh. */
998 down_write(&current->mm->mmap_sem);
999 error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
1000 MAP_FIXED | MAP_PRIVATE, 0);
1001 up_write(&current->mm->mmap_sem);
1004 #ifdef ELF_PLAT_INIT
1006 * The ABI may specify that certain registers be set up in special
1007 * ways (on i386 %edx is the address of a DT_FINI function, for
1008 * example. In addition, it may also specify (eg, PowerPC64 ELF)
1009 * that the e_entry field is the address of the function descriptor
1010 * for the startup routine, rather than the address of the startup
1011 * routine itself. This macro performs whatever initialization to
1012 * the regs structure is required as well as any relocations to the
1013 * function descriptor entries when executing dynamically links apps.
1015 ELF_PLAT_INIT(regs, reloc_func_desc);
1016 #endif
1018 start_thread(regs, elf_entry, bprm->p);
1019 if (unlikely(current->ptrace & PT_PTRACED)) {
1020 if (current->ptrace & PT_TRACE_EXEC)
1021 ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
1022 else
1023 send_sig(SIGTRAP, current, 0);
1025 retval = 0;
1026 out:
1027 kfree(loc);
1028 out_ret:
1029 return retval;
1031 /* error cleanup */
1032 out_free_dentry:
1033 allow_write_access(interpreter);
1034 if (interpreter)
1035 fput(interpreter);
1036 out_free_interp:
1037 kfree(elf_interpreter);
1038 out_free_file:
1039 sys_close(elf_exec_fileno);
1040 out_free_fh:
1041 if (files) {
1042 put_files_struct(current->files);
1043 current->files = files;
1045 out_free_ph:
1046 kfree(elf_phdata);
1047 goto out;
1050 /* This is really simpleminded and specialized - we are loading an
1051 a.out library that is given an ELF header. */
1052 static int load_elf_library(struct file *file)
1054 struct elf_phdr *elf_phdata;
1055 struct elf_phdr *eppnt;
1056 unsigned long elf_bss, bss, len;
1057 int retval, error, i, j;
1058 struct elfhdr elf_ex;
1060 error = -ENOEXEC;
1061 retval = kernel_read(file, 0, (char *)&elf_ex, sizeof(elf_ex));
1062 if (retval != sizeof(elf_ex))
1063 goto out;
1065 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1066 goto out;
1068 /* First of all, some simple consistency checks */
1069 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
1070 !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
1071 goto out;
1073 /* Now read in all of the header information */
1075 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1076 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1078 error = -ENOMEM;
1079 elf_phdata = kmalloc(j, GFP_KERNEL);
1080 if (!elf_phdata)
1081 goto out;
1083 eppnt = elf_phdata;
1084 error = -ENOEXEC;
1085 retval = kernel_read(file, elf_ex.e_phoff, (char *)eppnt, j);
1086 if (retval != j)
1087 goto out_free_ph;
1089 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1090 if ((eppnt + i)->p_type == PT_LOAD)
1091 j++;
1092 if (j != 1)
1093 goto out_free_ph;
1095 while (eppnt->p_type != PT_LOAD)
1096 eppnt++;
1098 /* Now use mmap to map the library into memory. */
1099 down_write(&current->mm->mmap_sem);
1100 error = do_mmap(file,
1101 ELF_PAGESTART(eppnt->p_vaddr),
1102 (eppnt->p_filesz +
1103 ELF_PAGEOFFSET(eppnt->p_vaddr)),
1104 PROT_READ | PROT_WRITE | PROT_EXEC,
1105 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
1106 (eppnt->p_offset -
1107 ELF_PAGEOFFSET(eppnt->p_vaddr)));
1108 up_write(&current->mm->mmap_sem);
1109 if (error != ELF_PAGESTART(eppnt->p_vaddr))
1110 goto out_free_ph;
1112 elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
1113 if (padzero(elf_bss)) {
1114 error = -EFAULT;
1115 goto out_free_ph;
1118 len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr +
1119 ELF_MIN_ALIGN - 1);
1120 bss = eppnt->p_memsz + eppnt->p_vaddr;
1121 if (bss > len) {
1122 down_write(&current->mm->mmap_sem);
1123 do_brk(len, bss - len);
1124 up_write(&current->mm->mmap_sem);
1126 error = 0;
1128 out_free_ph:
1129 kfree(elf_phdata);
1130 out:
1131 return error;
1135 * Note that some platforms still use traditional core dumps and not
1136 * the ELF core dump. Each platform can select it as appropriate.
1138 #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
1141 * ELF core dumper
1143 * Modelled on fs/exec.c:aout_core_dump()
1144 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1147 * These are the only things you should do on a core-file: use only these
1148 * functions to write out all the necessary info.
1150 static int dump_write(struct file *file, const void *addr, int nr)
1152 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
1155 static int dump_seek(struct file *file, loff_t off)
1157 if (file->f_op->llseek) {
1158 if (file->f_op->llseek(file, off, 0) != off)
1159 return 0;
1160 } else
1161 file->f_pos = off;
1162 return 1;
1166 * Decide whether a segment is worth dumping; default is yes to be
1167 * sure (missing info is worse than too much; etc).
1168 * Personally I'd include everything, and use the coredump limit...
1170 * I think we should skip something. But I am not sure how. H.J.
1172 static int maydump(struct vm_area_struct *vma)
1174 /* Do not dump I/O mapped devices or special mappings */
1175 if (vma->vm_flags & (VM_IO | VM_RESERVED))
1176 return 0;
1178 /* Dump shared memory only if mapped from an anonymous file. */
1179 if (vma->vm_flags & VM_SHARED)
1180 return vma->vm_file->f_dentry->d_inode->i_nlink == 0;
1182 /* If it hasn't been written to, don't write it out */
1183 if (!vma->anon_vma)
1184 return 0;
1186 return 1;
1189 /* An ELF note in memory */
1190 struct memelfnote
1192 const char *name;
1193 int type;
1194 unsigned int datasz;
1195 void *data;
1198 static int notesize(struct memelfnote *en)
1200 int sz;
1202 sz = sizeof(struct elf_note);
1203 sz += roundup(strlen(en->name) + 1, 4);
1204 sz += roundup(en->datasz, 4);
1206 return sz;
1209 #define DUMP_WRITE(addr, nr) \
1210 do { if (!dump_write(file, (addr), (nr))) return 0; } while(0)
1211 #define DUMP_SEEK(off) \
1212 do { if (!dump_seek(file, (off))) return 0; } while(0)
1214 static int writenote(struct memelfnote *men, struct file *file)
1216 struct elf_note en;
1218 en.n_namesz = strlen(men->name) + 1;
1219 en.n_descsz = men->datasz;
1220 en.n_type = men->type;
1222 DUMP_WRITE(&en, sizeof(en));
1223 DUMP_WRITE(men->name, en.n_namesz);
1224 /* XXX - cast from long long to long to avoid need for libgcc.a */
1225 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1226 DUMP_WRITE(men->data, men->datasz);
1227 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1229 return 1;
1231 #undef DUMP_WRITE
1232 #undef DUMP_SEEK
1234 #define DUMP_WRITE(addr, nr) \
1235 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1236 goto end_coredump;
1237 #define DUMP_SEEK(off) \
1238 if (!dump_seek(file, (off))) \
1239 goto end_coredump;
1241 static void fill_elf_header(struct elfhdr *elf, int segs)
1243 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1244 elf->e_ident[EI_CLASS] = ELF_CLASS;
1245 elf->e_ident[EI_DATA] = ELF_DATA;
1246 elf->e_ident[EI_VERSION] = EV_CURRENT;
1247 elf->e_ident[EI_OSABI] = ELF_OSABI;
1248 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
1250 elf->e_type = ET_CORE;
1251 elf->e_machine = ELF_ARCH;
1252 elf->e_version = EV_CURRENT;
1253 elf->e_entry = 0;
1254 elf->e_phoff = sizeof(struct elfhdr);
1255 elf->e_shoff = 0;
1256 elf->e_flags = ELF_CORE_EFLAGS;
1257 elf->e_ehsize = sizeof(struct elfhdr);
1258 elf->e_phentsize = sizeof(struct elf_phdr);
1259 elf->e_phnum = segs;
1260 elf->e_shentsize = 0;
1261 elf->e_shnum = 0;
1262 elf->e_shstrndx = 0;
1263 return;
1266 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset)
1268 phdr->p_type = PT_NOTE;
1269 phdr->p_offset = offset;
1270 phdr->p_vaddr = 0;
1271 phdr->p_paddr = 0;
1272 phdr->p_filesz = sz;
1273 phdr->p_memsz = 0;
1274 phdr->p_flags = 0;
1275 phdr->p_align = 0;
1276 return;
1279 static void fill_note(struct memelfnote *note, const char *name, int type,
1280 unsigned int sz, void *data)
1282 note->name = name;
1283 note->type = type;
1284 note->datasz = sz;
1285 note->data = data;
1286 return;
1290 * fill up all the fields in prstatus from the given task struct, except
1291 * registers which need to be filled up separately.
1293 static void fill_prstatus(struct elf_prstatus *prstatus,
1294 struct task_struct *p, long signr)
1296 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1297 prstatus->pr_sigpend = p->pending.signal.sig[0];
1298 prstatus->pr_sighold = p->blocked.sig[0];
1299 prstatus->pr_pid = p->pid;
1300 prstatus->pr_ppid = p->parent->pid;
1301 prstatus->pr_pgrp = process_group(p);
1302 prstatus->pr_sid = p->signal->session;
1303 if (thread_group_leader(p)) {
1305 * This is the record for the group leader. Add in the
1306 * cumulative times of previous dead threads. This total
1307 * won't include the time of each live thread whose state
1308 * is included in the core dump. The final total reported
1309 * to our parent process when it calls wait4 will include
1310 * those sums as well as the little bit more time it takes
1311 * this and each other thread to finish dying after the
1312 * core dump synchronization phase.
1314 cputime_to_timeval(cputime_add(p->utime, p->signal->utime),
1315 &prstatus->pr_utime);
1316 cputime_to_timeval(cputime_add(p->stime, p->signal->stime),
1317 &prstatus->pr_stime);
1318 } else {
1319 cputime_to_timeval(p->utime, &prstatus->pr_utime);
1320 cputime_to_timeval(p->stime, &prstatus->pr_stime);
1322 cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
1323 cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
1326 static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1327 struct mm_struct *mm)
1329 unsigned int i, len;
1331 /* first copy the parameters from user space */
1332 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1334 len = mm->arg_end - mm->arg_start;
1335 if (len >= ELF_PRARGSZ)
1336 len = ELF_PRARGSZ-1;
1337 if (copy_from_user(&psinfo->pr_psargs,
1338 (const char __user *)mm->arg_start, len))
1339 return -EFAULT;
1340 for(i = 0; i < len; i++)
1341 if (psinfo->pr_psargs[i] == 0)
1342 psinfo->pr_psargs[i] = ' ';
1343 psinfo->pr_psargs[len] = 0;
1345 psinfo->pr_pid = p->pid;
1346 psinfo->pr_ppid = p->parent->pid;
1347 psinfo->pr_pgrp = process_group(p);
1348 psinfo->pr_sid = p->signal->session;
1350 i = p->state ? ffz(~p->state) + 1 : 0;
1351 psinfo->pr_state = i;
1352 psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
1353 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1354 psinfo->pr_nice = task_nice(p);
1355 psinfo->pr_flag = p->flags;
1356 SET_UID(psinfo->pr_uid, p->uid);
1357 SET_GID(psinfo->pr_gid, p->gid);
1358 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1360 return 0;
1363 /* Here is the structure in which status of each thread is captured. */
1364 struct elf_thread_status
1366 struct list_head list;
1367 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1368 elf_fpregset_t fpu; /* NT_PRFPREG */
1369 struct task_struct *thread;
1370 #ifdef ELF_CORE_COPY_XFPREGS
1371 elf_fpxregset_t xfpu; /* NT_PRXFPREG */
1372 #endif
1373 struct memelfnote notes[3];
1374 int num_notes;
1378 * In order to add the specific thread information for the elf file format,
1379 * we need to keep a linked list of every threads pr_status and then create
1380 * a single section for them in the final core file.
1382 static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1384 int sz = 0;
1385 struct task_struct *p = t->thread;
1386 t->num_notes = 0;
1388 fill_prstatus(&t->prstatus, p, signr);
1389 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1391 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
1392 &(t->prstatus));
1393 t->num_notes++;
1394 sz += notesize(&t->notes[0]);
1396 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL,
1397 &t->fpu))) {
1398 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu),
1399 &(t->fpu));
1400 t->num_notes++;
1401 sz += notesize(&t->notes[1]);
1404 #ifdef ELF_CORE_COPY_XFPREGS
1405 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
1406 fill_note(&t->notes[2], "LINUX", NT_PRXFPREG, sizeof(t->xfpu),
1407 &t->xfpu);
1408 t->num_notes++;
1409 sz += notesize(&t->notes[2]);
1411 #endif
1412 return sz;
1416 * Actual dumper
1418 * This is a two-pass process; first we find the offsets of the bits,
1419 * and then they are actually written out. If we run out of core limit
1420 * we just truncate.
1422 static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file)
1424 #define NUM_NOTES 6
1425 int has_dumped = 0;
1426 mm_segment_t fs;
1427 int segs;
1428 size_t size = 0;
1429 int i;
1430 struct vm_area_struct *vma;
1431 struct elfhdr *elf = NULL;
1432 loff_t offset = 0, dataoff;
1433 unsigned long limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
1434 int numnote;
1435 struct memelfnote *notes = NULL;
1436 struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */
1437 struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */
1438 struct task_struct *g, *p;
1439 LIST_HEAD(thread_list);
1440 struct list_head *t;
1441 elf_fpregset_t *fpu = NULL;
1442 #ifdef ELF_CORE_COPY_XFPREGS
1443 elf_fpxregset_t *xfpu = NULL;
1444 #endif
1445 int thread_status_size = 0;
1446 elf_addr_t *auxv;
1449 * We no longer stop all VM operations.
1451 * This is because those proceses that could possibly change map_count
1452 * or the mmap / vma pages are now blocked in do_exit on current
1453 * finishing this core dump.
1455 * Only ptrace can touch these memory addresses, but it doesn't change
1456 * the map_count or the pages allocated. So no possibility of crashing
1457 * exists while dumping the mm->vm_next areas to the core file.
1460 /* alloc memory for large data structures: too large to be on stack */
1461 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
1462 if (!elf)
1463 goto cleanup;
1464 prstatus = kmalloc(sizeof(*prstatus), GFP_KERNEL);
1465 if (!prstatus)
1466 goto cleanup;
1467 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1468 if (!psinfo)
1469 goto cleanup;
1470 notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL);
1471 if (!notes)
1472 goto cleanup;
1473 fpu = kmalloc(sizeof(*fpu), GFP_KERNEL);
1474 if (!fpu)
1475 goto cleanup;
1476 #ifdef ELF_CORE_COPY_XFPREGS
1477 xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL);
1478 if (!xfpu)
1479 goto cleanup;
1480 #endif
1482 if (signr) {
1483 struct elf_thread_status *tmp;
1484 read_lock(&tasklist_lock);
1485 do_each_thread(g,p)
1486 if (current->mm == p->mm && current != p) {
1487 tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
1488 if (!tmp) {
1489 read_unlock(&tasklist_lock);
1490 goto cleanup;
1492 INIT_LIST_HEAD(&tmp->list);
1493 tmp->thread = p;
1494 list_add(&tmp->list, &thread_list);
1496 while_each_thread(g,p);
1497 read_unlock(&tasklist_lock);
1498 list_for_each(t, &thread_list) {
1499 struct elf_thread_status *tmp;
1500 int sz;
1502 tmp = list_entry(t, struct elf_thread_status, list);
1503 sz = elf_dump_thread_status(signr, tmp);
1504 thread_status_size += sz;
1507 /* now collect the dump for the current */
1508 memset(prstatus, 0, sizeof(*prstatus));
1509 fill_prstatus(prstatus, current, signr);
1510 elf_core_copy_regs(&prstatus->pr_reg, regs);
1512 segs = current->mm->map_count;
1513 #ifdef ELF_CORE_EXTRA_PHDRS
1514 segs += ELF_CORE_EXTRA_PHDRS;
1515 #endif
1517 /* Set up header */
1518 fill_elf_header(elf, segs + 1); /* including notes section */
1520 has_dumped = 1;
1521 current->flags |= PF_DUMPCORE;
1524 * Set up the notes in similar form to SVR4 core dumps made
1525 * with info from their /proc.
1528 fill_note(notes + 0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus);
1529 fill_psinfo(psinfo, current->group_leader, current->mm);
1530 fill_note(notes + 1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1532 numnote = 2;
1534 auxv = (elf_addr_t *)current->mm->saved_auxv;
1536 i = 0;
1538 i += 2;
1539 while (auxv[i - 2] != AT_NULL);
1540 fill_note(&notes[numnote++], "CORE", NT_AUXV,
1541 i * sizeof(elf_addr_t), auxv);
1543 /* Try to dump the FPU. */
1544 if ((prstatus->pr_fpvalid =
1545 elf_core_copy_task_fpregs(current, regs, fpu)))
1546 fill_note(notes + numnote++,
1547 "CORE", NT_PRFPREG, sizeof(*fpu), fpu);
1548 #ifdef ELF_CORE_COPY_XFPREGS
1549 if (elf_core_copy_task_xfpregs(current, xfpu))
1550 fill_note(notes + numnote++,
1551 "LINUX", NT_PRXFPREG, sizeof(*xfpu), xfpu);
1552 #endif
1554 fs = get_fs();
1555 set_fs(KERNEL_DS);
1557 DUMP_WRITE(elf, sizeof(*elf));
1558 offset += sizeof(*elf); /* Elf header */
1559 offset += (segs+1) * sizeof(struct elf_phdr); /* Program headers */
1561 /* Write notes phdr entry */
1563 struct elf_phdr phdr;
1564 int sz = 0;
1566 for (i = 0; i < numnote; i++)
1567 sz += notesize(notes + i);
1569 sz += thread_status_size;
1571 fill_elf_note_phdr(&phdr, sz, offset);
1572 offset += sz;
1573 DUMP_WRITE(&phdr, sizeof(phdr));
1576 /* Page-align dumped data */
1577 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
1579 /* Write program headers for segments dump */
1580 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1581 struct elf_phdr phdr;
1582 size_t sz;
1584 sz = vma->vm_end - vma->vm_start;
1586 phdr.p_type = PT_LOAD;
1587 phdr.p_offset = offset;
1588 phdr.p_vaddr = vma->vm_start;
1589 phdr.p_paddr = 0;
1590 phdr.p_filesz = maydump(vma) ? sz : 0;
1591 phdr.p_memsz = sz;
1592 offset += phdr.p_filesz;
1593 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
1594 if (vma->vm_flags & VM_WRITE)
1595 phdr.p_flags |= PF_W;
1596 if (vma->vm_flags & VM_EXEC)
1597 phdr.p_flags |= PF_X;
1598 phdr.p_align = ELF_EXEC_PAGESIZE;
1600 DUMP_WRITE(&phdr, sizeof(phdr));
1603 #ifdef ELF_CORE_WRITE_EXTRA_PHDRS
1604 ELF_CORE_WRITE_EXTRA_PHDRS;
1605 #endif
1607 /* write out the notes section */
1608 for (i = 0; i < numnote; i++)
1609 if (!writenote(notes + i, file))
1610 goto end_coredump;
1612 /* write out the thread status notes section */
1613 list_for_each(t, &thread_list) {
1614 struct elf_thread_status *tmp =
1615 list_entry(t, struct elf_thread_status, list);
1617 for (i = 0; i < tmp->num_notes; i++)
1618 if (!writenote(&tmp->notes[i], file))
1619 goto end_coredump;
1622 DUMP_SEEK(dataoff);
1624 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1625 unsigned long addr;
1627 if (!maydump(vma))
1628 continue;
1630 for (addr = vma->vm_start;
1631 addr < vma->vm_end;
1632 addr += PAGE_SIZE) {
1633 struct page *page;
1634 struct vm_area_struct *vma;
1636 if (get_user_pages(current, current->mm, addr, 1, 0, 1,
1637 &page, &vma) <= 0) {
1638 DUMP_SEEK(file->f_pos + PAGE_SIZE);
1639 } else {
1640 if (page == ZERO_PAGE(addr)) {
1641 DUMP_SEEK(file->f_pos + PAGE_SIZE);
1642 } else {
1643 void *kaddr;
1644 flush_cache_page(vma, addr,
1645 page_to_pfn(page));
1646 kaddr = kmap(page);
1647 if ((size += PAGE_SIZE) > limit ||
1648 !dump_write(file, kaddr,
1649 PAGE_SIZE)) {
1650 kunmap(page);
1651 page_cache_release(page);
1652 goto end_coredump;
1654 kunmap(page);
1656 page_cache_release(page);
1661 #ifdef ELF_CORE_WRITE_EXTRA_DATA
1662 ELF_CORE_WRITE_EXTRA_DATA;
1663 #endif
1665 if (file->f_pos != offset) {
1666 /* Sanity check */
1667 printk(KERN_WARNING
1668 "elf_core_dump: file->f_pos (%Ld) != offset (%Ld)\n",
1669 file->f_pos, offset);
1672 end_coredump:
1673 set_fs(fs);
1675 cleanup:
1676 while (!list_empty(&thread_list)) {
1677 struct list_head *tmp = thread_list.next;
1678 list_del(tmp);
1679 kfree(list_entry(tmp, struct elf_thread_status, list));
1682 kfree(elf);
1683 kfree(prstatus);
1684 kfree(psinfo);
1685 kfree(notes);
1686 kfree(fpu);
1687 #ifdef ELF_CORE_COPY_XFPREGS
1688 kfree(xfpu);
1689 #endif
1690 return has_dumped;
1691 #undef NUM_NOTES
1694 #endif /* USE_ELF_CORE_DUMP */
1696 static int __init init_elf_binfmt(void)
1698 return register_binfmt(&elf_format);
1701 static void __exit exit_elf_binfmt(void)
1703 /* Remove the COFF and ELF loaders. */
1704 unregister_binfmt(&elf_format);
1707 core_initcall(init_elf_binfmt);
1708 module_exit(exit_elf_binfmt);
1709 MODULE_LICENSE("GPL");