Fix prototype of SMP version of synchronize_irq.
[linux-2.6/linux-mips.git] / fs / binfmt_elf.c
blob80bd7747dfcce90fc84347ad513e1e9e0f5e31f9
1 /*
2 * linux/fs/binfmt_elf.c
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
7 * Tools".
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/fs.h>
15 #include <linux/stat.h>
16 #include <linux/time.h>
17 #include <linux/mm.h>
18 #include <linux/mman.h>
19 #include <linux/a.out.h>
20 #include <linux/errno.h>
21 #include <linux/signal.h>
22 #include <linux/binfmts.h>
23 #include <linux/string.h>
24 #include <linux/file.h>
25 #include <linux/fcntl.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/shm.h>
29 #include <linux/personality.h>
30 #include <linux/elfcore.h>
31 #include <linux/init.h>
32 #include <linux/highuid.h>
33 #include <linux/smp.h>
34 #include <linux/smp_lock.h>
35 #include <linux/compiler.h>
36 #include <linux/highmem.h>
37 #include <linux/pagemap.h>
39 #include <asm/uaccess.h>
40 #include <asm/param.h>
41 #include <asm/pgalloc.h>
43 #include <linux/elf.h>
45 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs);
46 static int load_elf_library(struct file*);
47 static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int);
48 extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
50 #ifndef elf_addr_t
51 #define elf_addr_t unsigned long
52 #endif
55 * If we don't support core dumping, then supply a NULL so we
56 * don't even try.
58 #ifdef USE_ELF_CORE_DUMP
59 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file);
60 #else
61 #define elf_core_dump NULL
62 #endif
64 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
65 # define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
66 #else
67 # define ELF_MIN_ALIGN PAGE_SIZE
68 #endif
70 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
71 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
72 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
74 static struct linux_binfmt elf_format = {
75 .module = THIS_MODULE,
76 .load_binary = load_elf_binary,
77 .load_shlib = load_elf_library,
78 .core_dump = elf_core_dump,
79 .min_coredump = ELF_EXEC_PAGESIZE
82 #define BAD_ADDR(x) ((unsigned long)(x) > TASK_SIZE)
84 static void set_brk(unsigned long start, unsigned long end)
86 start = ELF_PAGEALIGN(start);
87 end = ELF_PAGEALIGN(end);
88 if (end > start)
89 do_brk(start, end - start);
90 current->mm->start_brk = current->mm->brk = end;
94 /* We need to explicitly zero any fractional pages
95 after the data section (i.e. bss). This would
96 contain the junk from the file that should not
97 be in memory */
100 static void padzero(unsigned long elf_bss)
102 unsigned long nbyte;
104 nbyte = ELF_PAGEOFFSET(elf_bss);
105 if (nbyte) {
106 nbyte = ELF_MIN_ALIGN - nbyte;
107 clear_user((void *) elf_bss, nbyte);
111 /* Let's use some macros to make this stack manipulation a litle clearer */
112 #ifdef CONFIG_STACK_GROWSUP
113 #define STACK_ADD(sp, items) ((elf_addr_t *)(sp) + (items))
114 #define STACK_ROUND(sp, items) \
115 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
116 #define STACK_ALLOC(sp, len) ({ elf_addr_t *old_sp = sp; sp += len; old_sp; })
117 #else
118 #define STACK_ADD(sp, items) ((elf_addr_t *)(sp) - (items))
119 #define STACK_ROUND(sp, items) \
120 (((unsigned long) (sp - items)) &~ 15UL)
121 #define STACK_ALLOC(sp, len) sp -= len
122 #endif
124 static void
125 create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
126 int interp_aout, unsigned long load_addr,
127 unsigned long interp_load_addr)
129 unsigned long p = bprm->p;
130 int argc = bprm->argc;
131 int envc = bprm->envc;
132 elf_addr_t *argv, *envp;
133 elf_addr_t *sp, *u_platform;
134 const char *k_platform = ELF_PLATFORM;
135 int items;
136 elf_addr_t elf_info[40];
137 int ei_index = 0;
140 * If this architecture has a platform capability string, copy it
141 * to userspace. In some cases (Sparc), this info is impossible
142 * for userspace to get any other way, in others (i386) it is
143 * merely difficult.
146 u_platform = NULL;
147 if (k_platform) {
148 size_t len = strlen(k_platform) + 1;
150 #ifdef CONFIG_X86_HT
152 * In some cases (e.g. Hyper-Threading), we want to avoid L1
153 * evictions by the processes running on the same package. One
154 * thing we can do is to shuffle the initial stack for them.
156 * The conditionals here are unneeded, but kept in to make the
157 * code behaviour the same as pre change unless we have
158 * hyperthreaded processors. This should be cleaned up
159 * before 2.6
162 if (smp_num_siblings > 1)
163 STACK_ALLOC(p, ((current->pid % 64) << 7));
164 #endif
165 u_platform = (elf_addr_t *) STACK_ALLOC(p, len);
166 __copy_to_user(u_platform, k_platform, len);
169 /* Create the ELF interpreter info */
170 #define NEW_AUX_ENT(id, val) \
171 do { elf_info[ei_index++] = id; elf_info[ei_index++] = val; } while (0)
173 #ifdef ARCH_DLINFO
175 * ARCH_DLINFO must come first so PPC can do its special alignment of
176 * AUXV.
178 ARCH_DLINFO;
179 #endif
180 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
181 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
182 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
183 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
184 NEW_AUX_ENT(AT_PHENT, sizeof (struct elf_phdr));
185 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
186 NEW_AUX_ENT(AT_BASE, interp_load_addr);
187 NEW_AUX_ENT(AT_FLAGS, 0);
188 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
189 NEW_AUX_ENT(AT_UID, (elf_addr_t) current->uid);
190 NEW_AUX_ENT(AT_EUID, (elf_addr_t) current->euid);
191 NEW_AUX_ENT(AT_GID, (elf_addr_t) current->gid);
192 NEW_AUX_ENT(AT_EGID, (elf_addr_t) current->egid);
193 if (k_platform) {
194 NEW_AUX_ENT(AT_PLATFORM, (elf_addr_t)(long)u_platform);
196 NEW_AUX_ENT(AT_NULL, 0);
197 #undef NEW_AUX_ENT
199 sp = STACK_ADD(p, ei_index);
201 items = (argc + 1) + (envc + 1);
202 if (interp_aout) {
203 items += 3; /* a.out interpreters require argv & envp too */
204 } else {
205 items += 1; /* ELF interpreters only put argc on the stack */
207 bprm->p = STACK_ROUND(sp, items);
209 /* Point sp at the lowest address on the stack */
210 #ifdef CONFIG_STACK_GROWSUP
211 sp = (elf_addr_t *)bprm->p - items - ei_index;
212 bprm->exec = (unsigned long) sp; /* XXX: PARISC HACK */
213 #else
214 sp = (elf_addr_t *)bprm->p;
215 #endif
217 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
218 __put_user(argc, sp++);
219 if (interp_aout) {
220 argv = sp + 2;
221 envp = argv + argc + 1;
222 __put_user((elf_addr_t)(long)argv, sp++);
223 __put_user((elf_addr_t)(long)envp, sp++);
224 } else {
225 argv = sp;
226 envp = argv + argc + 1;
229 /* Populate argv and envp */
230 p = current->mm->arg_start;
231 while (argc-- > 0) {
232 size_t len;
233 __put_user((elf_addr_t)p, argv++);
234 len = strnlen_user((void *)p, PAGE_SIZE*MAX_ARG_PAGES);
235 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
236 return;
237 p += len;
239 __put_user(0, argv);
240 current->mm->arg_end = current->mm->env_start = p;
241 while (envc-- > 0) {
242 size_t len;
243 __put_user((elf_addr_t)p, envp++);
244 len = strnlen_user((void *)p, PAGE_SIZE*MAX_ARG_PAGES);
245 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
246 return;
247 p += len;
249 __put_user(0, envp);
250 current->mm->env_end = p;
252 /* Put the elf_info on the stack in the right place. */
253 sp = (elf_addr_t *)envp + 1;
254 copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t));
257 #ifndef elf_map
259 static unsigned long elf_map(struct file *filep, unsigned long addr,
260 struct elf_phdr *eppnt, int prot, int type)
262 unsigned long map_addr;
264 down_write(&current->mm->mmap_sem);
265 map_addr = do_mmap(filep, ELF_PAGESTART(addr),
266 eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr), prot, type,
267 eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr));
268 up_write(&current->mm->mmap_sem);
269 return(map_addr);
272 #endif /* !elf_map */
274 /* This is much more generalized than the library routine read function,
275 so we keep this separate. Technically the library read function
276 is only provided so that we can read a.out libraries that have
277 an ELF header */
279 static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
280 struct file * interpreter,
281 unsigned long *interp_load_addr)
283 struct elf_phdr *elf_phdata;
284 struct elf_phdr *eppnt;
285 unsigned long load_addr = 0;
286 int load_addr_set = 0;
287 unsigned long last_bss = 0, elf_bss = 0;
288 unsigned long error = ~0UL;
289 int retval, i, size;
291 /* First of all, some simple consistency checks */
292 if (interp_elf_ex->e_type != ET_EXEC &&
293 interp_elf_ex->e_type != ET_DYN)
294 goto out;
295 if (!elf_check_arch(interp_elf_ex))
296 goto out;
297 if (!interpreter->f_op || !interpreter->f_op->mmap)
298 goto out;
301 * If the size of this structure has changed, then punt, since
302 * we will be doing the wrong thing.
304 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
305 goto out;
306 if (interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
307 goto out;
309 /* Now read in all of the header information */
311 size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
312 if (size > ELF_MIN_ALIGN)
313 goto out;
314 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
315 if (!elf_phdata)
316 goto out;
318 retval = kernel_read(interpreter,interp_elf_ex->e_phoff,(char *)elf_phdata,size);
319 error = retval;
320 if (retval < 0)
321 goto out_close;
323 eppnt = elf_phdata;
324 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
325 if (eppnt->p_type == PT_LOAD) {
326 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
327 int elf_prot = 0;
328 unsigned long vaddr = 0;
329 unsigned long k, map_addr;
331 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
332 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
333 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
334 vaddr = eppnt->p_vaddr;
335 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
336 elf_type |= MAP_FIXED;
338 map_addr = elf_map(interpreter, load_addr + vaddr, eppnt, elf_prot, elf_type);
339 if (BAD_ADDR(map_addr))
340 goto out_close;
342 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
343 load_addr = map_addr - ELF_PAGESTART(vaddr);
344 load_addr_set = 1;
348 * Find the end of the file mapping for this phdr, and keep
349 * track of the largest address we see for this.
351 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
352 if (k > elf_bss)
353 elf_bss = k;
356 * Do the same thing for the memory mapping - between
357 * elf_bss and last_bss is the bss section.
359 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
360 if (k > last_bss)
361 last_bss = k;
366 * Now fill out the bss section. First pad the last page up
367 * to the page boundary, and then perform a mmap to make sure
368 * that there are zero-mapped pages up to and including the
369 * last bss page.
371 padzero(elf_bss);
372 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); /* What we have mapped so far */
374 /* Map the last of the bss segment */
375 if (last_bss > elf_bss)
376 do_brk(elf_bss, last_bss - elf_bss);
378 *interp_load_addr = load_addr;
379 error = ((unsigned long) interp_elf_ex->e_entry) + load_addr;
381 out_close:
382 kfree(elf_phdata);
383 out:
384 return error;
387 static unsigned long load_aout_interp(struct exec * interp_ex,
388 struct file * interpreter)
390 unsigned long text_data, elf_entry = ~0UL;
391 char * addr;
392 loff_t offset;
394 current->mm->end_code = interp_ex->a_text;
395 text_data = interp_ex->a_text + interp_ex->a_data;
396 current->mm->end_data = text_data;
397 current->mm->brk = interp_ex->a_bss + text_data;
399 switch (N_MAGIC(*interp_ex)) {
400 case OMAGIC:
401 offset = 32;
402 addr = (char *) 0;
403 break;
404 case ZMAGIC:
405 case QMAGIC:
406 offset = N_TXTOFF(*interp_ex);
407 addr = (char *) N_TXTADDR(*interp_ex);
408 break;
409 default:
410 goto out;
413 do_brk(0, text_data);
414 if (!interpreter->f_op || !interpreter->f_op->read)
415 goto out;
416 if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0)
417 goto out;
418 flush_icache_range((unsigned long)addr,
419 (unsigned long)addr + text_data);
421 do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
422 interp_ex->a_bss);
423 elf_entry = interp_ex->a_entry;
425 out:
426 return elf_entry;
430 * These are the functions used to load ELF style executables and shared
431 * libraries. There is no binary dependent code anywhere else.
434 #define INTERPRETER_NONE 0
435 #define INTERPRETER_AOUT 1
436 #define INTERPRETER_ELF 2
439 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
441 struct file *interpreter = NULL; /* to shut gcc up */
442 unsigned long load_addr = 0, load_bias = 0;
443 int load_addr_set = 0;
444 char * elf_interpreter = NULL;
445 unsigned int interpreter_type = INTERPRETER_NONE;
446 unsigned char ibcs2_interpreter = 0;
447 unsigned long error;
448 struct elf_phdr * elf_ppnt, *elf_phdata;
449 unsigned long elf_bss, elf_brk;
450 int elf_exec_fileno;
451 int retval, i;
452 unsigned int size;
453 unsigned long elf_entry, interp_load_addr = 0;
454 unsigned long start_code, end_code, start_data, end_data;
455 unsigned long reloc_func_desc = 0;
456 struct elfhdr elf_ex;
457 struct elfhdr interp_elf_ex;
458 struct exec interp_ex;
459 char passed_fileno[6];
461 /* Get the exec-header */
462 elf_ex = *((struct elfhdr *) bprm->buf);
464 retval = -ENOEXEC;
465 /* First of all, some simple consistency checks */
466 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
467 goto out;
469 if (elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN)
470 goto out;
471 if (!elf_check_arch(&elf_ex))
472 goto out;
473 if (!bprm->file->f_op||!bprm->file->f_op->mmap)
474 goto out;
476 /* Now read in all of the header information */
478 retval = -ENOMEM;
479 if (elf_ex.e_phentsize != sizeof(struct elf_phdr))
480 goto out;
481 if (elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
482 goto out;
483 size = elf_ex.e_phnum * sizeof(struct elf_phdr);
484 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
485 if (!elf_phdata)
486 goto out;
488 retval = kernel_read(bprm->file, elf_ex.e_phoff, (char *) elf_phdata, size);
489 if (retval < 0)
490 goto out_free_ph;
492 retval = get_unused_fd();
493 if (retval < 0)
494 goto out_free_ph;
495 get_file(bprm->file);
496 fd_install(elf_exec_fileno = retval, bprm->file);
498 elf_ppnt = elf_phdata;
499 elf_bss = 0;
500 elf_brk = 0;
502 start_code = ~0UL;
503 end_code = 0;
504 start_data = 0;
505 end_data = 0;
507 for (i = 0; i < elf_ex.e_phnum; i++) {
508 if (elf_ppnt->p_type == PT_INTERP) {
509 /* This is the program interpreter used for
510 * shared libraries - for now assume that this
511 * is an a.out format binary
514 retval = -ENOMEM;
515 if (elf_ppnt->p_filesz > PATH_MAX)
516 goto out_free_file;
517 elf_interpreter = (char *) kmalloc(elf_ppnt->p_filesz,
518 GFP_KERNEL);
519 if (!elf_interpreter)
520 goto out_free_file;
522 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
523 elf_interpreter,
524 elf_ppnt->p_filesz);
525 if (retval < 0)
526 goto out_free_interp;
527 /* If the program interpreter is one of these two,
528 * then assume an iBCS2 image. Otherwise assume
529 * a native linux image.
531 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
532 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
533 ibcs2_interpreter = 1;
536 * The early SET_PERSONALITY here is so that the lookup
537 * for the interpreter happens in the namespace of the
538 * to-be-execed image. SET_PERSONALITY can select an
539 * alternate root.
541 * However, SET_PERSONALITY is NOT allowed to switch
542 * this task into the new images's memory mapping
543 * policy - that is, TASK_SIZE must still evaluate to
544 * that which is appropriate to the execing application.
545 * This is because exit_mmap() needs to have TASK_SIZE
546 * evaluate to the size of the old image.
548 * So if (say) a 64-bit application is execing a 32-bit
549 * application it is the architecture's responsibility
550 * to defer changing the value of TASK_SIZE until the
551 * switch really is going to happen - do this in
552 * flush_thread(). - akpm
554 SET_PERSONALITY(elf_ex, ibcs2_interpreter);
556 interpreter = open_exec(elf_interpreter);
557 retval = PTR_ERR(interpreter);
558 if (IS_ERR(interpreter))
559 goto out_free_interp;
560 retval = kernel_read(interpreter, 0, bprm->buf, BINPRM_BUF_SIZE);
561 if (retval < 0)
562 goto out_free_dentry;
564 /* Get the exec headers */
565 interp_ex = *((struct exec *) bprm->buf);
566 interp_elf_ex = *((struct elfhdr *) bprm->buf);
567 break;
569 elf_ppnt++;
572 /* Some simple consistency checks for the interpreter */
573 if (elf_interpreter) {
574 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
576 /* Now figure out which format our binary is */
577 if ((N_MAGIC(interp_ex) != OMAGIC) &&
578 (N_MAGIC(interp_ex) != ZMAGIC) &&
579 (N_MAGIC(interp_ex) != QMAGIC))
580 interpreter_type = INTERPRETER_ELF;
582 if (memcmp(interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
583 interpreter_type &= ~INTERPRETER_ELF;
585 retval = -ELIBBAD;
586 if (!interpreter_type)
587 goto out_free_dentry;
589 /* Make sure only one type was selected */
590 if ((interpreter_type & INTERPRETER_ELF) &&
591 interpreter_type != INTERPRETER_ELF) {
592 // FIXME - ratelimit this before re-enabling
593 // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
594 interpreter_type = INTERPRETER_ELF;
596 } else {
597 /* Executables without an interpreter also need a personality */
598 SET_PERSONALITY(elf_ex, ibcs2_interpreter);
601 /* OK, we are done with that, now set up the arg stuff,
602 and then start this sucker up */
604 if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) {
605 char *passed_p = passed_fileno;
606 sprintf(passed_fileno, "%d", elf_exec_fileno);
608 if (elf_interpreter) {
609 retval = copy_strings_kernel(1, &passed_p, bprm);
610 if (retval)
611 goto out_free_dentry;
612 bprm->argc++;
616 /* Flush all traces of the currently running executable */
617 retval = flush_old_exec(bprm);
618 if (retval)
619 goto out_free_dentry;
621 /* OK, This is the point of no return */
622 current->mm->start_data = 0;
623 current->mm->end_data = 0;
624 current->mm->end_code = 0;
625 current->mm->mmap = NULL;
626 current->flags &= ~PF_FORKNOEXEC;
628 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
629 may depend on the personality. */
630 SET_PERSONALITY(elf_ex, ibcs2_interpreter);
632 /* Do this so that we can load the interpreter, if need be. We will
633 change some of these later */
634 current->mm->rss = 0;
635 current->mm->free_area_cache = TASK_UNMAPPED_BASE;
636 retval = setup_arg_pages(bprm);
637 if (retval < 0) {
638 send_sig(SIGKILL, current, 0);
639 goto out_free_dentry;
642 current->mm->start_stack = bprm->p;
644 /* Now we do a little grungy work by mmaping the ELF image into
645 the correct location in memory. At this point, we assume that
646 the image should be loaded at fixed address, not at a variable
647 address. */
649 for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
650 int elf_prot = 0, elf_flags;
651 unsigned long k, vaddr;
653 if (elf_ppnt->p_type != PT_LOAD)
654 continue;
656 if (unlikely (elf_brk > elf_bss)) {
657 unsigned long nbyte;
659 /* There was a PT_LOAD segment with p_memsz > p_filesz
660 before this one. Map anonymous pages, if needed,
661 and clear the area. */
662 set_brk (elf_bss + load_bias, elf_brk + load_bias);
663 nbyte = ELF_PAGEOFFSET(elf_bss);
664 if (nbyte) {
665 nbyte = ELF_MIN_ALIGN - nbyte;
666 if (nbyte > elf_brk - elf_bss)
667 nbyte = elf_brk - elf_bss;
668 clear_user((void *) elf_bss + load_bias, nbyte);
672 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
673 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
674 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
676 elf_flags = MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE;
678 vaddr = elf_ppnt->p_vaddr;
679 if (elf_ex.e_type == ET_EXEC || load_addr_set) {
680 elf_flags |= MAP_FIXED;
681 } else if (elf_ex.e_type == ET_DYN) {
682 /* Try and get dynamic programs out of the way of the default mmap
683 base, as well as whatever program they might try to exec. This
684 is because the brk will follow the loader, and is not movable. */
685 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
688 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags);
689 if (BAD_ADDR(error))
690 continue;
692 if (!load_addr_set) {
693 load_addr_set = 1;
694 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
695 if (elf_ex.e_type == ET_DYN) {
696 load_bias += error -
697 ELF_PAGESTART(load_bias + vaddr);
698 load_addr += load_bias;
699 reloc_func_desc = load_addr;
702 k = elf_ppnt->p_vaddr;
703 if (k < start_code) start_code = k;
704 if (start_data < k) start_data = k;
706 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
708 if (k > elf_bss)
709 elf_bss = k;
710 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
711 end_code = k;
712 if (end_data < k)
713 end_data = k;
714 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
715 if (k > elf_brk)
716 elf_brk = k;
719 elf_ex.e_entry += load_bias;
720 elf_bss += load_bias;
721 elf_brk += load_bias;
722 start_code += load_bias;
723 end_code += load_bias;
724 start_data += load_bias;
725 end_data += load_bias;
727 if (elf_interpreter) {
728 if (interpreter_type == INTERPRETER_AOUT)
729 elf_entry = load_aout_interp(&interp_ex,
730 interpreter);
731 else
732 elf_entry = load_elf_interp(&interp_elf_ex,
733 interpreter,
734 &interp_load_addr);
736 allow_write_access(interpreter);
737 fput(interpreter);
738 kfree(elf_interpreter);
740 if (BAD_ADDR(elf_entry)) {
741 printk(KERN_ERR "Unable to load interpreter\n");
742 kfree(elf_phdata);
743 send_sig(SIGSEGV, current, 0);
744 retval = -ENOEXEC; /* Nobody gets to see this, but.. */
745 goto out;
747 reloc_func_desc = interp_load_addr;
748 } else {
749 elf_entry = elf_ex.e_entry;
752 kfree(elf_phdata);
754 if (interpreter_type != INTERPRETER_AOUT)
755 sys_close(elf_exec_fileno);
757 set_binfmt(&elf_format);
759 compute_creds(bprm);
760 current->flags &= ~PF_FORKNOEXEC;
761 create_elf_tables(bprm, &elf_ex, (interpreter_type == INTERPRETER_AOUT),
762 load_addr, interp_load_addr);
763 /* N.B. passed_fileno might not be initialized? */
764 if (interpreter_type == INTERPRETER_AOUT)
765 current->mm->arg_start += strlen(passed_fileno) + 1;
766 current->mm->end_code = end_code;
767 current->mm->start_code = start_code;
768 current->mm->start_data = start_data;
769 current->mm->end_data = end_data;
770 current->mm->start_stack = bprm->p;
772 /* Calling set_brk effectively mmaps the pages that we need
773 * for the bss and break sections
775 set_brk(elf_bss, elf_brk);
777 padzero(elf_bss);
779 if (current->personality & MMAP_PAGE_ZERO) {
780 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
781 and some applications "depend" upon this behavior.
782 Since we do not have the power to recompile these, we
783 emulate the SVr4 behavior. Sigh. */
784 /* N.B. Shouldn't the size here be PAGE_SIZE?? */
785 down_write(&current->mm->mmap_sem);
786 error = do_mmap(NULL, 0, 4096, PROT_READ | PROT_EXEC,
787 MAP_FIXED | MAP_PRIVATE, 0);
788 up_write(&current->mm->mmap_sem);
791 #ifdef ELF_PLAT_INIT
793 * The ABI may specify that certain registers be set up in special
794 * ways (on i386 %edx is the address of a DT_FINI function, for
795 * example. In addition, it may also specify (eg, PowerPC64 ELF)
796 * that the e_entry field is the address of the function descriptor
797 * for the startup routine, rather than the address of the startup
798 * routine itself. This macro performs whatever initialization to
799 * the regs structure is required as well as any relocations to the
800 * function descriptor entries when executing dynamically links apps.
802 ELF_PLAT_INIT(regs, reloc_func_desc);
803 #endif
805 start_thread(regs, elf_entry, bprm->p);
806 if (unlikely(current->ptrace & PT_PTRACED)) {
807 if (current->ptrace & PT_TRACE_EXEC)
808 ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
809 else
810 send_sig(SIGTRAP, current, 0);
812 retval = 0;
813 out:
814 return retval;
816 /* error cleanup */
817 out_free_dentry:
818 allow_write_access(interpreter);
819 fput(interpreter);
820 out_free_interp:
821 if (elf_interpreter)
822 kfree(elf_interpreter);
823 out_free_file:
824 sys_close(elf_exec_fileno);
825 out_free_ph:
826 kfree(elf_phdata);
827 goto out;
830 /* This is really simpleminded and specialized - we are loading an
831 a.out library that is given an ELF header. */
833 static int load_elf_library(struct file *file)
835 struct elf_phdr *elf_phdata;
836 unsigned long elf_bss, bss, len;
837 int retval, error, i, j;
838 struct elfhdr elf_ex;
840 error = -ENOEXEC;
841 retval = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex));
842 if (retval != sizeof(elf_ex))
843 goto out;
845 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
846 goto out;
848 /* First of all, some simple consistency checks */
849 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
850 !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
851 goto out;
853 /* Now read in all of the header information */
855 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
856 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
858 error = -ENOMEM;
859 elf_phdata = (struct elf_phdr *) kmalloc(j, GFP_KERNEL);
860 if (!elf_phdata)
861 goto out;
863 error = -ENOEXEC;
864 retval = kernel_read(file, elf_ex.e_phoff, (char *) elf_phdata, j);
865 if (retval != j)
866 goto out_free_ph;
868 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
869 if ((elf_phdata + i)->p_type == PT_LOAD) j++;
870 if (j != 1)
871 goto out_free_ph;
873 while (elf_phdata->p_type != PT_LOAD) elf_phdata++;
875 /* Now use mmap to map the library into memory. */
876 down_write(&current->mm->mmap_sem);
877 error = do_mmap(file,
878 ELF_PAGESTART(elf_phdata->p_vaddr),
879 (elf_phdata->p_filesz +
880 ELF_PAGEOFFSET(elf_phdata->p_vaddr)),
881 PROT_READ | PROT_WRITE | PROT_EXEC,
882 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
883 (elf_phdata->p_offset -
884 ELF_PAGEOFFSET(elf_phdata->p_vaddr)));
885 up_write(&current->mm->mmap_sem);
886 if (error != ELF_PAGESTART(elf_phdata->p_vaddr))
887 goto out_free_ph;
889 elf_bss = elf_phdata->p_vaddr + elf_phdata->p_filesz;
890 padzero(elf_bss);
892 len = ELF_PAGESTART(elf_phdata->p_filesz + elf_phdata->p_vaddr + ELF_MIN_ALIGN - 1);
893 bss = elf_phdata->p_memsz + elf_phdata->p_vaddr;
894 if (bss > len)
895 do_brk(len, bss - len);
896 error = 0;
898 out_free_ph:
899 kfree(elf_phdata);
900 out:
901 return error;
905 * Note that some platforms still use traditional core dumps and not
906 * the ELF core dump. Each platform can select it as appropriate.
908 #ifdef USE_ELF_CORE_DUMP
911 * ELF core dumper
913 * Modelled on fs/exec.c:aout_core_dump()
914 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
917 * These are the only things you should do on a core-file: use only these
918 * functions to write out all the necessary info.
920 static int dump_write(struct file *file, const void *addr, int nr)
922 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
925 static int dump_seek(struct file *file, off_t off)
927 if (file->f_op->llseek) {
928 if (file->f_op->llseek(file, off, 0) != off)
929 return 0;
930 } else
931 file->f_pos = off;
932 return 1;
936 * Decide whether a segment is worth dumping; default is yes to be
937 * sure (missing info is worse than too much; etc).
938 * Personally I'd include everything, and use the coredump limit...
940 * I think we should skip something. But I am not sure how. H.J.
942 static int maydump(struct vm_area_struct *vma)
945 * If we may not read the contents, don't allow us to dump
946 * them either. "dump_write()" can't handle it anyway.
948 if (!(vma->vm_flags & VM_READ))
949 return 0;
951 /* Do not dump I/O mapped devices! -DaveM */
952 if (vma->vm_flags & VM_IO)
953 return 0;
954 #if 1
955 if (vma->vm_flags & (VM_WRITE|VM_GROWSUP|VM_GROWSDOWN))
956 return 1;
957 if (vma->vm_flags & (VM_READ|VM_EXEC|VM_EXECUTABLE|VM_SHARED))
958 return 0;
959 #endif
960 return 1;
963 #define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
965 /* An ELF note in memory */
966 struct memelfnote
968 const char *name;
969 int type;
970 unsigned int datasz;
971 void *data;
974 static int notesize(struct memelfnote *en)
976 int sz;
978 sz = sizeof(struct elf_note);
979 sz += roundup(strlen(en->name) + 1, 4);
980 sz += roundup(en->datasz, 4);
982 return sz;
985 #define DUMP_WRITE(addr, nr) \
986 do { if (!dump_write(file, (addr), (nr))) return 0; } while(0)
987 #define DUMP_SEEK(off) \
988 do { if (!dump_seek(file, (off))) return 0; } while(0)
990 static int writenote(struct memelfnote *men, struct file *file)
992 struct elf_note en;
994 en.n_namesz = strlen(men->name) + 1;
995 en.n_descsz = men->datasz;
996 en.n_type = men->type;
998 DUMP_WRITE(&en, sizeof(en));
999 DUMP_WRITE(men->name, en.n_namesz);
1000 /* XXX - cast from long long to long to avoid need for libgcc.a */
1001 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1002 DUMP_WRITE(men->data, men->datasz);
1003 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1005 return 1;
1007 #undef DUMP_WRITE
1008 #undef DUMP_SEEK
1010 #define DUMP_WRITE(addr, nr) \
1011 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1012 goto end_coredump;
1013 #define DUMP_SEEK(off) \
1014 if (!dump_seek(file, (off))) \
1015 goto end_coredump;
1017 static inline void fill_elf_header(struct elfhdr *elf, int segs)
1019 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1020 elf->e_ident[EI_CLASS] = ELF_CLASS;
1021 elf->e_ident[EI_DATA] = ELF_DATA;
1022 elf->e_ident[EI_VERSION] = EV_CURRENT;
1023 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
1025 elf->e_type = ET_CORE;
1026 elf->e_machine = ELF_ARCH;
1027 elf->e_version = EV_CURRENT;
1028 elf->e_entry = 0;
1029 elf->e_phoff = sizeof(struct elfhdr);
1030 elf->e_shoff = 0;
1031 #ifdef ELF_CORE_EFLAGS
1032 elf->e_flags = ELF_CORE_EFLAGS;
1033 #else
1034 elf->e_flags = 0;
1035 #endif
1036 elf->e_ehsize = sizeof(struct elfhdr);
1037 elf->e_phentsize = sizeof(struct elf_phdr);
1038 elf->e_phnum = segs;
1039 elf->e_shentsize = 0;
1040 elf->e_shnum = 0;
1041 elf->e_shstrndx = 0;
1042 return;
1045 static inline void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
1047 phdr->p_type = PT_NOTE;
1048 phdr->p_offset = offset;
1049 phdr->p_vaddr = 0;
1050 phdr->p_paddr = 0;
1051 phdr->p_filesz = sz;
1052 phdr->p_memsz = 0;
1053 phdr->p_flags = 0;
1054 phdr->p_align = 0;
1055 return;
1058 static void fill_note(struct memelfnote *note, const char *name, int type,
1059 unsigned int sz, void *data)
1061 note->name = name;
1062 note->type = type;
1063 note->datasz = sz;
1064 note->data = data;
1065 return;
1069 * fill up all the fields in prstatus from the given task struct, except registers
1070 * which need to be filled up separately.
1072 static void fill_prstatus(struct elf_prstatus *prstatus,
1073 struct task_struct *p, long signr)
1075 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1076 prstatus->pr_sigpend = p->pending.signal.sig[0];
1077 prstatus->pr_sighold = p->blocked.sig[0];
1078 prstatus->pr_pid = p->pid;
1079 prstatus->pr_ppid = p->parent->pid;
1080 prstatus->pr_pgrp = p->pgrp;
1081 prstatus->pr_sid = p->session;
1082 jiffies_to_timeval(p->utime, &prstatus->pr_utime);
1083 jiffies_to_timeval(p->stime, &prstatus->pr_stime);
1084 jiffies_to_timeval(p->cutime, &prstatus->pr_cutime);
1085 jiffies_to_timeval(p->cstime, &prstatus->pr_cstime);
1088 static void fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p)
1090 int i, len;
1092 /* first copy the parameters from user space */
1093 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1095 len = p->mm->arg_end - p->mm->arg_start;
1096 if (len >= ELF_PRARGSZ)
1097 len = ELF_PRARGSZ-1;
1098 copy_from_user(&psinfo->pr_psargs,
1099 (const char *)p->mm->arg_start, len);
1100 for(i = 0; i < len; i++)
1101 if (psinfo->pr_psargs[i] == 0)
1102 psinfo->pr_psargs[i] = ' ';
1103 psinfo->pr_psargs[len] = 0;
1105 psinfo->pr_pid = p->pid;
1106 psinfo->pr_ppid = p->parent->pid;
1107 psinfo->pr_pgrp = p->pgrp;
1108 psinfo->pr_sid = p->session;
1110 i = p->state ? ffz(~p->state) + 1 : 0;
1111 psinfo->pr_state = i;
1112 psinfo->pr_sname = (i < 0 || i > 5) ? '.' : "RSDTZW"[i];
1113 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1114 psinfo->pr_nice = task_nice(p);
1115 psinfo->pr_flag = p->flags;
1116 psinfo->pr_uid = NEW_TO_OLD_UID(p->uid);
1117 psinfo->pr_gid = NEW_TO_OLD_GID(p->gid);
1118 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1120 return;
1123 /* Here is the structure in which status of each thread is captured. */
1124 struct elf_thread_status
1126 struct list_head list;
1127 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1128 elf_fpregset_t fpu; /* NT_PRFPREG */
1129 #ifdef ELF_CORE_COPY_XFPREGS
1130 elf_fpxregset_t xfpu; /* NT_PRXFPREG */
1131 #endif
1132 struct memelfnote notes[3];
1133 int num_notes;
1137 * In order to add the specific thread information for the elf file format,
1138 * we need to keep a linked list of every threads pr_status and then
1139 * create a single section for them in the final core file.
1141 static int elf_dump_thread_status(long signr, struct task_struct * p, struct list_head * thread_list)
1144 struct elf_thread_status *t;
1145 int sz = 0;
1147 t = kmalloc(sizeof(*t), GFP_ATOMIC);
1148 if (!t)
1149 return 0;
1150 memset(t, 0, sizeof(*t));
1152 INIT_LIST_HEAD(&t->list);
1153 t->num_notes = 0;
1155 fill_prstatus(&t->prstatus, p, signr);
1156 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1158 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), &(t->prstatus));
1159 t->num_notes++;
1160 sz += notesize(&t->notes[0]);
1162 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, &t->fpu))) {
1163 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu), &(t->fpu));
1164 t->num_notes++;
1165 sz += notesize(&t->notes[1]);
1168 #ifdef ELF_CORE_COPY_XFPREGS
1169 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
1170 fill_note(&t->notes[2], "LINUX", NT_PRXFPREG, sizeof(t->xfpu), &t->xfpu);
1171 t->num_notes++;
1172 sz += notesize(&t->notes[2]);
1174 #endif
1175 list_add(&t->list, thread_list);
1176 return sz;
1180 * Actual dumper
1182 * This is a two-pass process; first we find the offsets of the bits,
1183 * and then they are actually written out. If we run out of core limit
1184 * we just truncate.
1186 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
1188 #define NUM_NOTES 5
1189 int has_dumped = 0;
1190 mm_segment_t fs;
1191 int segs;
1192 size_t size = 0;
1193 int i;
1194 struct vm_area_struct *vma;
1195 struct elfhdr *elf = NULL;
1196 off_t offset = 0, dataoff;
1197 unsigned long limit = current->rlim[RLIMIT_CORE].rlim_cur;
1198 int numnote = NUM_NOTES;
1199 struct memelfnote *notes = NULL;
1200 struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */
1201 struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */
1202 struct task_struct *g, *p;
1203 LIST_HEAD(thread_list);
1204 struct list_head *t;
1205 elf_fpregset_t *fpu = NULL;
1206 #ifdef ELF_CORE_COPY_XFPREGS
1207 elf_fpxregset_t *xfpu = NULL;
1208 #endif
1209 int thread_status_size = 0;
1212 * We no longer stop all VM operations.
1214 * This is because those proceses that could possibly change map_count or
1215 * the mmap / vma pages are now blocked in do_exit on current finishing
1216 * this core dump.
1218 * Only ptrace can touch these memory addresses, but it doesn't change
1219 * the map_count or the pages allocated. So no possibility of crashing
1220 * exists while dumping the mm->vm_next areas to the core file.
1223 /* alloc memory for large data structures: too large to be on stack */
1224 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
1225 if (!elf)
1226 goto cleanup;
1227 prstatus = kmalloc(sizeof(*prstatus), GFP_KERNEL);
1228 if (!prstatus)
1229 goto cleanup;
1230 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1231 if (!psinfo)
1232 goto cleanup;
1233 notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL);
1234 if (!notes)
1235 goto cleanup;
1236 fpu = kmalloc(sizeof(*fpu), GFP_KERNEL);
1237 if (!fpu)
1238 goto cleanup;
1239 #ifdef ELF_CORE_COPY_XFPREGS
1240 xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL);
1241 if (!xfpu)
1242 goto cleanup;
1243 #endif
1245 /* capture the status of all other threads */
1246 if (signr) {
1247 read_lock(&tasklist_lock);
1248 do_each_thread(g,p)
1249 if (current->mm == p->mm && current != p) {
1250 int sz = elf_dump_thread_status(signr, p, &thread_list);
1251 if (!sz) {
1252 read_unlock(&tasklist_lock);
1253 goto cleanup;
1254 } else
1255 thread_status_size += sz;
1257 while_each_thread(g,p);
1258 read_unlock(&tasklist_lock);
1261 /* now collect the dump for the current */
1262 memset(prstatus, 0, sizeof(*prstatus));
1263 fill_prstatus(prstatus, current, signr);
1264 elf_core_copy_regs(&prstatus->pr_reg, regs);
1266 segs = current->mm->map_count;
1267 #ifdef ELF_CORE_EXTRA_PHDRS
1268 segs += ELF_CORE_EXTRA_PHDRS;
1269 #endif
1271 /* Set up header */
1272 fill_elf_header(elf, segs+1); /* including notes section */
1274 has_dumped = 1;
1275 current->flags |= PF_DUMPCORE;
1278 * Set up the notes in similar form to SVR4 core dumps made
1279 * with info from their /proc.
1282 fill_note(notes +0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus);
1284 fill_psinfo(psinfo, current->group_leader);
1285 fill_note(notes +1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1287 fill_note(notes +2, "CORE", NT_TASKSTRUCT, sizeof(*current), current);
1289 /* Try to dump the FPU. */
1290 if ((prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, fpu)))
1291 fill_note(notes +3, "CORE", NT_PRFPREG, sizeof(*fpu), fpu);
1292 else
1293 --numnote;
1294 #ifdef ELF_CORE_COPY_XFPREGS
1295 if (elf_core_copy_task_xfpregs(current, xfpu))
1296 fill_note(notes +4, "LINUX", NT_PRXFPREG, sizeof(*xfpu), xfpu);
1297 else
1298 --numnote;
1299 #else
1300 numnote--;
1301 #endif
1303 fs = get_fs();
1304 set_fs(KERNEL_DS);
1306 DUMP_WRITE(elf, sizeof(*elf));
1307 offset += sizeof(*elf); /* Elf header */
1308 offset += (segs+1) * sizeof(struct elf_phdr); /* Program headers */
1310 /* Write notes phdr entry */
1312 struct elf_phdr phdr;
1313 int sz = 0;
1315 for (i = 0; i < numnote; i++)
1316 sz += notesize(notes + i);
1318 sz += thread_status_size;
1320 fill_elf_note_phdr(&phdr, sz, offset);
1321 offset += sz;
1322 DUMP_WRITE(&phdr, sizeof(phdr));
1325 /* Page-align dumped data */
1326 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
1328 /* Write program headers for segments dump */
1329 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1330 struct elf_phdr phdr;
1331 size_t sz;
1333 sz = vma->vm_end - vma->vm_start;
1335 phdr.p_type = PT_LOAD;
1336 phdr.p_offset = offset;
1337 phdr.p_vaddr = vma->vm_start;
1338 phdr.p_paddr = 0;
1339 phdr.p_filesz = maydump(vma) ? sz : 0;
1340 phdr.p_memsz = sz;
1341 offset += phdr.p_filesz;
1342 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
1343 if (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W;
1344 if (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X;
1345 phdr.p_align = ELF_EXEC_PAGESIZE;
1347 DUMP_WRITE(&phdr, sizeof(phdr));
1350 #ifdef ELF_CORE_WRITE_EXTRA_PHDRS
1351 ELF_CORE_WRITE_EXTRA_PHDRS;
1352 #endif
1354 /* write out the notes section */
1355 for (i = 0; i < numnote; i++)
1356 if (!writenote(notes + i, file))
1357 goto end_coredump;
1359 /* write out the thread status notes section */
1360 list_for_each(t, &thread_list) {
1361 struct elf_thread_status *tmp = list_entry(t, struct elf_thread_status, list);
1362 for (i = 0; i < tmp->num_notes; i++)
1363 if (!writenote(&tmp->notes[i], file))
1364 goto end_coredump;
1367 DUMP_SEEK(dataoff);
1369 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1370 unsigned long addr;
1372 if (!maydump(vma))
1373 continue;
1375 for (addr = vma->vm_start;
1376 addr < vma->vm_end;
1377 addr += PAGE_SIZE) {
1378 struct page* page;
1379 struct vm_area_struct *vma;
1381 if (get_user_pages(current, current->mm, addr, 1, 0, 1,
1382 &page, &vma) <= 0) {
1383 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1384 } else {
1385 if (page == ZERO_PAGE(addr)) {
1386 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1387 } else {
1388 void *kaddr;
1389 flush_cache_page(vma, addr);
1390 kaddr = kmap(page);
1391 DUMP_WRITE(kaddr, PAGE_SIZE);
1392 kunmap(page);
1394 page_cache_release(page);
1399 #ifdef ELF_CORE_WRITE_EXTRA_DATA
1400 ELF_CORE_WRITE_EXTRA_DATA;
1401 #endif
1403 if ((off_t) file->f_pos != offset) {
1404 /* Sanity check */
1405 printk("elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
1406 (off_t) file->f_pos, offset);
1409 end_coredump:
1410 set_fs(fs);
1412 cleanup:
1413 while(!list_empty(&thread_list)) {
1414 struct list_head *tmp = thread_list.next;
1415 list_del(tmp);
1416 kfree(list_entry(tmp, struct elf_thread_status, list));
1419 kfree(elf);
1420 kfree(prstatus);
1421 kfree(psinfo);
1422 kfree(notes);
1423 kfree(fpu);
1424 #ifdef ELF_CORE_COPY_XFPREGS
1425 kfree(xfpu);
1426 #endif
1427 return has_dumped;
1428 #undef NUM_NOTES
1431 #endif /* USE_ELF_CORE_DUMP */
1433 static int __init init_elf_binfmt(void)
1435 return register_binfmt(&elf_format);
1438 static void __exit exit_elf_binfmt(void)
1440 /* Remove the COFF and ELF loaders. */
1441 unregister_binfmt(&elf_format);
1444 module_init(init_elf_binfmt)
1445 module_exit(exit_elf_binfmt)
1446 MODULE_LICENSE("GPL");