[PATCH] DVB: Documentation and Kconfig updazes
[linux-2.6/history.git] / fs / binfmt_elf.c
blob9cc7cc648b42f388b6fe7b6d8f89ac677f9f1856
1 /*
2 * linux/fs/binfmt_elf.c
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
7 * Tools".
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/fs.h>
15 #include <linux/stat.h>
16 #include <linux/time.h>
17 #include <linux/mm.h>
18 #include <linux/mman.h>
19 #include <linux/a.out.h>
20 #include <linux/errno.h>
21 #include <linux/signal.h>
22 #include <linux/binfmts.h>
23 #include <linux/string.h>
24 #include <linux/file.h>
25 #include <linux/fcntl.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/shm.h>
29 #include <linux/personality.h>
30 #include <linux/elfcore.h>
31 #include <linux/init.h>
32 #include <linux/highuid.h>
33 #include <linux/smp.h>
34 #include <linux/smp_lock.h>
35 #include <linux/compiler.h>
36 #include <linux/highmem.h>
37 #include <linux/pagemap.h>
38 #include <linux/security.h>
39 #include <linux/syscalls.h>
41 #include <asm/uaccess.h>
42 #include <asm/param.h>
43 #include <asm/pgalloc.h>
45 #include <linux/elf.h>
47 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs);
48 static int load_elf_library(struct file*);
49 static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int);
50 extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
52 #ifndef elf_addr_t
53 #define elf_addr_t unsigned long
54 #endif
57 * If we don't support core dumping, then supply a NULL so we
58 * don't even try.
60 #ifdef USE_ELF_CORE_DUMP
61 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file);
62 #else
63 #define elf_core_dump NULL
64 #endif
66 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
67 # define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
68 #else
69 # define ELF_MIN_ALIGN PAGE_SIZE
70 #endif
72 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
73 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
74 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
76 static struct linux_binfmt elf_format = {
77 .module = THIS_MODULE,
78 .load_binary = load_elf_binary,
79 .load_shlib = load_elf_library,
80 .core_dump = elf_core_dump,
81 .min_coredump = ELF_EXEC_PAGESIZE
84 #define BAD_ADDR(x) ((unsigned long)(x) > TASK_SIZE)
86 static int set_brk(unsigned long start, unsigned long end)
88 start = ELF_PAGEALIGN(start);
89 end = ELF_PAGEALIGN(end);
90 if (end > start) {
91 unsigned long addr = do_brk(start, end - start);
92 if (BAD_ADDR(addr))
93 return addr;
95 current->mm->start_brk = current->mm->brk = end;
96 return 0;
100 /* We need to explicitly zero any fractional pages
101 after the data section (i.e. bss). This would
102 contain the junk from the file that should not
103 be in memory */
106 static void padzero(unsigned long elf_bss)
108 unsigned long nbyte;
110 nbyte = ELF_PAGEOFFSET(elf_bss);
111 if (nbyte) {
112 nbyte = ELF_MIN_ALIGN - nbyte;
113 clear_user((void *) elf_bss, nbyte);
117 /* Let's use some macros to make this stack manipulation a litle clearer */
118 #ifdef CONFIG_STACK_GROWSUP
119 #define STACK_ADD(sp, items) ((elf_addr_t *)(sp) + (items))
120 #define STACK_ROUND(sp, items) \
121 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
122 #define STACK_ALLOC(sp, len) ({ elf_addr_t *old_sp = (elf_addr_t *)sp; sp += len; old_sp; })
123 #else
124 #define STACK_ADD(sp, items) ((elf_addr_t *)(sp) - (items))
125 #define STACK_ROUND(sp, items) \
126 (((unsigned long) (sp - items)) &~ 15UL)
127 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
128 #endif
130 static void
131 create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
132 int interp_aout, unsigned long load_addr,
133 unsigned long interp_load_addr)
135 unsigned long p = bprm->p;
136 int argc = bprm->argc;
137 int envc = bprm->envc;
138 elf_addr_t *argv, *envp;
139 elf_addr_t *sp, *u_platform;
140 const char *k_platform = ELF_PLATFORM;
141 int items;
142 elf_addr_t *elf_info;
143 int ei_index = 0;
144 struct task_struct *tsk = current;
147 * If this architecture has a platform capability string, copy it
148 * to userspace. In some cases (Sparc), this info is impossible
149 * for userspace to get any other way, in others (i386) it is
150 * merely difficult.
153 u_platform = NULL;
154 if (k_platform) {
155 size_t len = strlen(k_platform) + 1;
157 #ifdef CONFIG_X86_HT
159 * In some cases (e.g. Hyper-Threading), we want to avoid L1
160 * evictions by the processes running on the same package. One
161 * thing we can do is to shuffle the initial stack for them.
163 * The conditionals here are unneeded, but kept in to make the
164 * code behaviour the same as pre change unless we have
165 * hyperthreaded processors. This should be cleaned up
166 * before 2.6
169 if (smp_num_siblings > 1)
170 STACK_ALLOC(p, ((current->pid % 64) << 7));
171 #endif
172 u_platform = (elf_addr_t *)STACK_ALLOC(p, len);
173 __copy_to_user(u_platform, k_platform, len);
176 /* Create the ELF interpreter info */
177 elf_info = (elf_addr_t *) current->mm->saved_auxv;
178 #define NEW_AUX_ENT(id, val) \
179 do { elf_info[ei_index++] = id; elf_info[ei_index++] = val; } while (0)
181 #ifdef ARCH_DLINFO
183 * ARCH_DLINFO must come first so PPC can do its special alignment of
184 * AUXV.
186 ARCH_DLINFO;
187 #endif
188 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
189 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
190 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
191 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
192 NEW_AUX_ENT(AT_PHENT, sizeof (struct elf_phdr));
193 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
194 NEW_AUX_ENT(AT_BASE, interp_load_addr);
195 NEW_AUX_ENT(AT_FLAGS, 0);
196 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
197 NEW_AUX_ENT(AT_UID, (elf_addr_t) tsk->uid);
198 NEW_AUX_ENT(AT_EUID, (elf_addr_t) tsk->euid);
199 NEW_AUX_ENT(AT_GID, (elf_addr_t) tsk->gid);
200 NEW_AUX_ENT(AT_EGID, (elf_addr_t) tsk->egid);
201 NEW_AUX_ENT(AT_SECURE, (elf_addr_t) security_bprm_secureexec(bprm));
202 if (k_platform) {
203 NEW_AUX_ENT(AT_PLATFORM, (elf_addr_t)(long)u_platform);
205 #undef NEW_AUX_ENT
206 /* AT_NULL is zero; clear the rest too */
207 memset(&elf_info[ei_index], 0,
208 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
210 /* And advance past the AT_NULL entry. */
211 ei_index += 2;
213 sp = STACK_ADD(p, ei_index);
215 items = (argc + 1) + (envc + 1);
216 if (interp_aout) {
217 items += 3; /* a.out interpreters require argv & envp too */
218 } else {
219 items += 1; /* ELF interpreters only put argc on the stack */
221 bprm->p = STACK_ROUND(sp, items);
223 /* Point sp at the lowest address on the stack */
224 #ifdef CONFIG_STACK_GROWSUP
225 sp = (elf_addr_t *)bprm->p - items - ei_index;
226 bprm->exec = (unsigned long) sp; /* XXX: PARISC HACK */
227 #else
228 sp = (elf_addr_t *)bprm->p;
229 #endif
231 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
232 __put_user(argc, sp++);
233 if (interp_aout) {
234 argv = sp + 2;
235 envp = argv + argc + 1;
236 __put_user((elf_addr_t)(long)argv, sp++);
237 __put_user((elf_addr_t)(long)envp, sp++);
238 } else {
239 argv = sp;
240 envp = argv + argc + 1;
243 /* Populate argv and envp */
244 p = current->mm->arg_start;
245 while (argc-- > 0) {
246 size_t len;
247 __put_user((elf_addr_t)p, argv++);
248 len = strnlen_user((void *)p, PAGE_SIZE*MAX_ARG_PAGES);
249 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
250 return;
251 p += len;
253 __put_user(0, argv);
254 current->mm->arg_end = current->mm->env_start = p;
255 while (envc-- > 0) {
256 size_t len;
257 __put_user((elf_addr_t)p, envp++);
258 len = strnlen_user((void *)p, PAGE_SIZE*MAX_ARG_PAGES);
259 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
260 return;
261 p += len;
263 __put_user(0, envp);
264 current->mm->env_end = p;
266 /* Put the elf_info on the stack in the right place. */
267 sp = (elf_addr_t *)envp + 1;
268 copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t));
271 #ifndef elf_map
273 static unsigned long elf_map(struct file *filep, unsigned long addr,
274 struct elf_phdr *eppnt, int prot, int type)
276 unsigned long map_addr;
278 down_write(&current->mm->mmap_sem);
279 map_addr = do_mmap(filep, ELF_PAGESTART(addr),
280 eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr), prot, type,
281 eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr));
282 up_write(&current->mm->mmap_sem);
283 return(map_addr);
286 #endif /* !elf_map */
288 /* This is much more generalized than the library routine read function,
289 so we keep this separate. Technically the library read function
290 is only provided so that we can read a.out libraries that have
291 an ELF header */
293 static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
294 struct file * interpreter,
295 unsigned long *interp_load_addr)
297 struct elf_phdr *elf_phdata;
298 struct elf_phdr *eppnt;
299 unsigned long load_addr = 0;
300 int load_addr_set = 0;
301 unsigned long last_bss = 0, elf_bss = 0;
302 unsigned long error = ~0UL;
303 int retval, i, size;
305 /* First of all, some simple consistency checks */
306 if (interp_elf_ex->e_type != ET_EXEC &&
307 interp_elf_ex->e_type != ET_DYN)
308 goto out;
309 if (!elf_check_arch(interp_elf_ex))
310 goto out;
311 if (!interpreter->f_op || !interpreter->f_op->mmap)
312 goto out;
315 * If the size of this structure has changed, then punt, since
316 * we will be doing the wrong thing.
318 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
319 goto out;
320 if (interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
321 goto out;
323 /* Now read in all of the header information */
325 size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
326 if (size > ELF_MIN_ALIGN)
327 goto out;
328 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
329 if (!elf_phdata)
330 goto out;
332 retval = kernel_read(interpreter,interp_elf_ex->e_phoff,(char *)elf_phdata,size);
333 error = retval;
334 if (retval < 0)
335 goto out_close;
337 eppnt = elf_phdata;
338 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
339 if (eppnt->p_type == PT_LOAD) {
340 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
341 int elf_prot = 0;
342 unsigned long vaddr = 0;
343 unsigned long k, map_addr;
345 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
346 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
347 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
348 vaddr = eppnt->p_vaddr;
349 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
350 elf_type |= MAP_FIXED;
352 map_addr = elf_map(interpreter, load_addr + vaddr, eppnt, elf_prot, elf_type);
353 error = map_addr;
354 if (BAD_ADDR(map_addr))
355 goto out_close;
357 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
358 load_addr = map_addr - ELF_PAGESTART(vaddr);
359 load_addr_set = 1;
363 * Find the end of the file mapping for this phdr, and keep
364 * track of the largest address we see for this.
366 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
367 if (k > elf_bss)
368 elf_bss = k;
371 * Do the same thing for the memory mapping - between
372 * elf_bss and last_bss is the bss section.
374 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
375 if (k > last_bss)
376 last_bss = k;
381 * Now fill out the bss section. First pad the last page up
382 * to the page boundary, and then perform a mmap to make sure
383 * that there are zero-mapped pages up to and including the
384 * last bss page.
386 padzero(elf_bss);
387 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); /* What we have mapped so far */
389 /* Map the last of the bss segment */
390 if (last_bss > elf_bss) {
391 error = do_brk(elf_bss, last_bss - elf_bss);
392 if (BAD_ADDR(error))
393 goto out_close;
396 *interp_load_addr = load_addr;
397 error = ((unsigned long) interp_elf_ex->e_entry) + load_addr;
399 out_close:
400 kfree(elf_phdata);
401 out:
402 return error;
405 static unsigned long load_aout_interp(struct exec * interp_ex,
406 struct file * interpreter)
408 unsigned long text_data, elf_entry = ~0UL;
409 char * addr;
410 loff_t offset;
412 current->mm->end_code = interp_ex->a_text;
413 text_data = interp_ex->a_text + interp_ex->a_data;
414 current->mm->end_data = text_data;
415 current->mm->brk = interp_ex->a_bss + text_data;
417 switch (N_MAGIC(*interp_ex)) {
418 case OMAGIC:
419 offset = 32;
420 addr = (char *) 0;
421 break;
422 case ZMAGIC:
423 case QMAGIC:
424 offset = N_TXTOFF(*interp_ex);
425 addr = (char *) N_TXTADDR(*interp_ex);
426 break;
427 default:
428 goto out;
431 do_brk(0, text_data);
432 if (!interpreter->f_op || !interpreter->f_op->read)
433 goto out;
434 if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0)
435 goto out;
436 flush_icache_range((unsigned long)addr,
437 (unsigned long)addr + text_data);
439 do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
440 interp_ex->a_bss);
441 elf_entry = interp_ex->a_entry;
443 out:
444 return elf_entry;
448 * These are the functions used to load ELF style executables and shared
449 * libraries. There is no binary dependent code anywhere else.
452 #define INTERPRETER_NONE 0
453 #define INTERPRETER_AOUT 1
454 #define INTERPRETER_ELF 2
457 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
459 struct file *interpreter = NULL; /* to shut gcc up */
460 unsigned long load_addr = 0, load_bias = 0;
461 int load_addr_set = 0;
462 char * elf_interpreter = NULL;
463 unsigned int interpreter_type = INTERPRETER_NONE;
464 unsigned char ibcs2_interpreter = 0;
465 unsigned long error;
466 struct elf_phdr * elf_ppnt, *elf_phdata;
467 unsigned long elf_bss, elf_brk;
468 int elf_exec_fileno;
469 int retval, i;
470 unsigned int size;
471 unsigned long elf_entry, interp_load_addr = 0;
472 unsigned long start_code, end_code, start_data, end_data;
473 unsigned long reloc_func_desc = 0;
474 struct elfhdr elf_ex;
475 struct elfhdr interp_elf_ex;
476 struct exec interp_ex;
477 char passed_fileno[6];
478 struct files_struct *files;
480 /* Get the exec-header */
481 elf_ex = *((struct elfhdr *) bprm->buf);
483 retval = -ENOEXEC;
484 /* First of all, some simple consistency checks */
485 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
486 goto out;
488 if (elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN)
489 goto out;
490 if (!elf_check_arch(&elf_ex))
491 goto out;
492 if (!bprm->file->f_op||!bprm->file->f_op->mmap)
493 goto out;
495 /* Now read in all of the header information */
497 retval = -ENOMEM;
498 if (elf_ex.e_phentsize != sizeof(struct elf_phdr))
499 goto out;
500 if (elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
501 goto out;
502 size = elf_ex.e_phnum * sizeof(struct elf_phdr);
503 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
504 if (!elf_phdata)
505 goto out;
507 retval = kernel_read(bprm->file, elf_ex.e_phoff, (char *) elf_phdata, size);
508 if (retval < 0)
509 goto out_free_ph;
511 files = current->files; /* Refcounted so ok */
512 if(unshare_files() < 0)
513 goto out_free_ph;
514 if (files == current->files) {
515 put_files_struct(files);
516 files = NULL;
519 /* exec will make our files private anyway, but for the a.out
520 loader stuff we need to do it earlier */
522 retval = get_unused_fd();
523 if (retval < 0)
524 goto out_free_fh;
525 get_file(bprm->file);
526 fd_install(elf_exec_fileno = retval, bprm->file);
528 elf_ppnt = elf_phdata;
529 elf_bss = 0;
530 elf_brk = 0;
532 start_code = ~0UL;
533 end_code = 0;
534 start_data = 0;
535 end_data = 0;
537 for (i = 0; i < elf_ex.e_phnum; i++) {
538 if (elf_ppnt->p_type == PT_INTERP) {
539 /* This is the program interpreter used for
540 * shared libraries - for now assume that this
541 * is an a.out format binary
544 retval = -ENOMEM;
545 if (elf_ppnt->p_filesz > PATH_MAX)
546 goto out_free_file;
547 elf_interpreter = (char *) kmalloc(elf_ppnt->p_filesz,
548 GFP_KERNEL);
549 if (!elf_interpreter)
550 goto out_free_file;
552 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
553 elf_interpreter,
554 elf_ppnt->p_filesz);
555 if (retval < 0)
556 goto out_free_interp;
557 /* If the program interpreter is one of these two,
558 * then assume an iBCS2 image. Otherwise assume
559 * a native linux image.
561 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
562 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
563 ibcs2_interpreter = 1;
566 * The early SET_PERSONALITY here is so that the lookup
567 * for the interpreter happens in the namespace of the
568 * to-be-execed image. SET_PERSONALITY can select an
569 * alternate root.
571 * However, SET_PERSONALITY is NOT allowed to switch
572 * this task into the new images's memory mapping
573 * policy - that is, TASK_SIZE must still evaluate to
574 * that which is appropriate to the execing application.
575 * This is because exit_mmap() needs to have TASK_SIZE
576 * evaluate to the size of the old image.
578 * So if (say) a 64-bit application is execing a 32-bit
579 * application it is the architecture's responsibility
580 * to defer changing the value of TASK_SIZE until the
581 * switch really is going to happen - do this in
582 * flush_thread(). - akpm
584 SET_PERSONALITY(elf_ex, ibcs2_interpreter);
586 interpreter = open_exec(elf_interpreter);
587 retval = PTR_ERR(interpreter);
588 if (IS_ERR(interpreter))
589 goto out_free_interp;
590 retval = kernel_read(interpreter, 0, bprm->buf, BINPRM_BUF_SIZE);
591 if (retval < 0)
592 goto out_free_dentry;
594 /* Get the exec headers */
595 interp_ex = *((struct exec *) bprm->buf);
596 interp_elf_ex = *((struct elfhdr *) bprm->buf);
597 break;
599 elf_ppnt++;
602 /* Some simple consistency checks for the interpreter */
603 if (elf_interpreter) {
604 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
606 /* Now figure out which format our binary is */
607 if ((N_MAGIC(interp_ex) != OMAGIC) &&
608 (N_MAGIC(interp_ex) != ZMAGIC) &&
609 (N_MAGIC(interp_ex) != QMAGIC))
610 interpreter_type = INTERPRETER_ELF;
612 if (memcmp(interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
613 interpreter_type &= ~INTERPRETER_ELF;
615 retval = -ELIBBAD;
616 if (!interpreter_type)
617 goto out_free_dentry;
619 /* Make sure only one type was selected */
620 if ((interpreter_type & INTERPRETER_ELF) &&
621 interpreter_type != INTERPRETER_ELF) {
622 // FIXME - ratelimit this before re-enabling
623 // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
624 interpreter_type = INTERPRETER_ELF;
626 /* Verify the interpreter has a valid arch */
627 if ((interpreter_type == INTERPRETER_ELF) &&
628 !elf_check_arch(&interp_elf_ex))
629 goto out_free_dentry;
630 } else {
631 /* Executables without an interpreter also need a personality */
632 SET_PERSONALITY(elf_ex, ibcs2_interpreter);
635 /* OK, we are done with that, now set up the arg stuff,
636 and then start this sucker up */
638 if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) {
639 char *passed_p = passed_fileno;
640 sprintf(passed_fileno, "%d", elf_exec_fileno);
642 if (elf_interpreter) {
643 retval = copy_strings_kernel(1, &passed_p, bprm);
644 if (retval)
645 goto out_free_dentry;
646 bprm->argc++;
650 /* Flush all traces of the currently running executable */
651 retval = flush_old_exec(bprm);
652 if (retval)
653 goto out_free_dentry;
655 /* Discard our unneeded old files struct */
656 if (files) {
657 steal_locks(files);
658 put_files_struct(files);
659 files = NULL;
662 /* OK, This is the point of no return */
663 current->mm->start_data = 0;
664 current->mm->end_data = 0;
665 current->mm->end_code = 0;
666 current->mm->mmap = NULL;
667 current->flags &= ~PF_FORKNOEXEC;
669 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
670 may depend on the personality. */
671 SET_PERSONALITY(elf_ex, ibcs2_interpreter);
673 /* Do this so that we can load the interpreter, if need be. We will
674 change some of these later */
675 current->mm->rss = 0;
676 current->mm->free_area_cache = TASK_UNMAPPED_BASE;
677 retval = setup_arg_pages(bprm);
678 if (retval < 0) {
679 send_sig(SIGKILL, current, 0);
680 goto out_free_dentry;
683 current->mm->start_stack = bprm->p;
685 /* Now we do a little grungy work by mmaping the ELF image into
686 the correct location in memory. At this point, we assume that
687 the image should be loaded at fixed address, not at a variable
688 address. */
690 for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
691 int elf_prot = 0, elf_flags;
692 unsigned long k, vaddr;
694 if (elf_ppnt->p_type != PT_LOAD)
695 continue;
697 if (unlikely (elf_brk > elf_bss)) {
698 unsigned long nbyte;
700 /* There was a PT_LOAD segment with p_memsz > p_filesz
701 before this one. Map anonymous pages, if needed,
702 and clear the area. */
703 retval = set_brk (elf_bss + load_bias,
704 elf_brk + load_bias);
705 if (retval) {
706 send_sig(SIGKILL, current, 0);
707 goto out_free_dentry;
709 nbyte = ELF_PAGEOFFSET(elf_bss);
710 if (nbyte) {
711 nbyte = ELF_MIN_ALIGN - nbyte;
712 if (nbyte > elf_brk - elf_bss)
713 nbyte = elf_brk - elf_bss;
714 clear_user((void *) elf_bss + load_bias, nbyte);
718 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
719 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
720 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
722 elf_flags = MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE;
724 vaddr = elf_ppnt->p_vaddr;
725 if (elf_ex.e_type == ET_EXEC || load_addr_set) {
726 elf_flags |= MAP_FIXED;
727 } else if (elf_ex.e_type == ET_DYN) {
728 /* Try and get dynamic programs out of the way of the default mmap
729 base, as well as whatever program they might try to exec. This
730 is because the brk will follow the loader, and is not movable. */
731 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
734 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags);
735 if (BAD_ADDR(error))
736 continue;
738 if (!load_addr_set) {
739 load_addr_set = 1;
740 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
741 if (elf_ex.e_type == ET_DYN) {
742 load_bias += error -
743 ELF_PAGESTART(load_bias + vaddr);
744 load_addr += load_bias;
745 reloc_func_desc = load_bias;
748 k = elf_ppnt->p_vaddr;
749 if (k < start_code) start_code = k;
750 if (start_data < k) start_data = k;
752 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
754 if (k > elf_bss)
755 elf_bss = k;
756 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
757 end_code = k;
758 if (end_data < k)
759 end_data = k;
760 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
761 if (k > elf_brk)
762 elf_brk = k;
765 elf_ex.e_entry += load_bias;
766 elf_bss += load_bias;
767 elf_brk += load_bias;
768 start_code += load_bias;
769 end_code += load_bias;
770 start_data += load_bias;
771 end_data += load_bias;
773 /* Calling set_brk effectively mmaps the pages that we need
774 * for the bss and break sections. We must do this before
775 * mapping in the interpreter, to make sure it doesn't wind
776 * up getting placed where the bss needs to go.
778 retval = set_brk(elf_bss, elf_brk);
779 if (retval) {
780 send_sig(SIGKILL, current, 0);
781 goto out_free_dentry;
783 padzero(elf_bss);
785 if (elf_interpreter) {
786 if (interpreter_type == INTERPRETER_AOUT)
787 elf_entry = load_aout_interp(&interp_ex,
788 interpreter);
789 else
790 elf_entry = load_elf_interp(&interp_elf_ex,
791 interpreter,
792 &interp_load_addr);
793 if (BAD_ADDR(elf_entry)) {
794 printk(KERN_ERR "Unable to load interpreter\n");
795 send_sig(SIGSEGV, current, 0);
796 retval = -ENOEXEC; /* Nobody gets to see this, but.. */
797 goto out_free_dentry;
799 reloc_func_desc = interp_load_addr;
801 allow_write_access(interpreter);
802 fput(interpreter);
803 kfree(elf_interpreter);
804 } else {
805 elf_entry = elf_ex.e_entry;
808 kfree(elf_phdata);
810 if (interpreter_type != INTERPRETER_AOUT)
811 sys_close(elf_exec_fileno);
813 set_binfmt(&elf_format);
815 compute_creds(bprm);
816 current->flags &= ~PF_FORKNOEXEC;
817 create_elf_tables(bprm, &elf_ex, (interpreter_type == INTERPRETER_AOUT),
818 load_addr, interp_load_addr);
819 /* N.B. passed_fileno might not be initialized? */
820 if (interpreter_type == INTERPRETER_AOUT)
821 current->mm->arg_start += strlen(passed_fileno) + 1;
822 current->mm->end_code = end_code;
823 current->mm->start_code = start_code;
824 current->mm->start_data = start_data;
825 current->mm->end_data = end_data;
826 current->mm->start_stack = bprm->p;
828 if (current->personality & MMAP_PAGE_ZERO) {
829 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
830 and some applications "depend" upon this behavior.
831 Since we do not have the power to recompile these, we
832 emulate the SVr4 behavior. Sigh. */
833 down_write(&current->mm->mmap_sem);
834 error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
835 MAP_FIXED | MAP_PRIVATE, 0);
836 up_write(&current->mm->mmap_sem);
839 #ifdef ELF_PLAT_INIT
841 * The ABI may specify that certain registers be set up in special
842 * ways (on i386 %edx is the address of a DT_FINI function, for
843 * example. In addition, it may also specify (eg, PowerPC64 ELF)
844 * that the e_entry field is the address of the function descriptor
845 * for the startup routine, rather than the address of the startup
846 * routine itself. This macro performs whatever initialization to
847 * the regs structure is required as well as any relocations to the
848 * function descriptor entries when executing dynamically links apps.
850 ELF_PLAT_INIT(regs, reloc_func_desc);
851 #endif
853 start_thread(regs, elf_entry, bprm->p);
854 if (unlikely(current->ptrace & PT_PTRACED)) {
855 if (current->ptrace & PT_TRACE_EXEC)
856 ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
857 else
858 send_sig(SIGTRAP, current, 0);
860 retval = 0;
861 out:
862 return retval;
864 /* error cleanup */
865 out_free_dentry:
866 allow_write_access(interpreter);
867 if (interpreter)
868 fput(interpreter);
869 out_free_interp:
870 if (elf_interpreter)
871 kfree(elf_interpreter);
872 out_free_file:
873 sys_close(elf_exec_fileno);
874 out_free_fh:
875 if (files) {
876 put_files_struct(current->files);
877 current->files = files;
879 out_free_ph:
880 kfree(elf_phdata);
881 goto out;
884 /* This is really simpleminded and specialized - we are loading an
885 a.out library that is given an ELF header. */
887 static int load_elf_library(struct file *file)
889 struct elf_phdr *elf_phdata;
890 unsigned long elf_bss, bss, len;
891 int retval, error, i, j;
892 struct elfhdr elf_ex;
894 error = -ENOEXEC;
895 retval = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex));
896 if (retval != sizeof(elf_ex))
897 goto out;
899 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
900 goto out;
902 /* First of all, some simple consistency checks */
903 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
904 !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
905 goto out;
907 /* Now read in all of the header information */
909 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
910 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
912 error = -ENOMEM;
913 elf_phdata = (struct elf_phdr *) kmalloc(j, GFP_KERNEL);
914 if (!elf_phdata)
915 goto out;
917 error = -ENOEXEC;
918 retval = kernel_read(file, elf_ex.e_phoff, (char *) elf_phdata, j);
919 if (retval != j)
920 goto out_free_ph;
922 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
923 if ((elf_phdata + i)->p_type == PT_LOAD) j++;
924 if (j != 1)
925 goto out_free_ph;
927 while (elf_phdata->p_type != PT_LOAD) elf_phdata++;
929 /* Now use mmap to map the library into memory. */
930 down_write(&current->mm->mmap_sem);
931 error = do_mmap(file,
932 ELF_PAGESTART(elf_phdata->p_vaddr),
933 (elf_phdata->p_filesz +
934 ELF_PAGEOFFSET(elf_phdata->p_vaddr)),
935 PROT_READ | PROT_WRITE | PROT_EXEC,
936 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
937 (elf_phdata->p_offset -
938 ELF_PAGEOFFSET(elf_phdata->p_vaddr)));
939 up_write(&current->mm->mmap_sem);
940 if (error != ELF_PAGESTART(elf_phdata->p_vaddr))
941 goto out_free_ph;
943 elf_bss = elf_phdata->p_vaddr + elf_phdata->p_filesz;
944 padzero(elf_bss);
946 len = ELF_PAGESTART(elf_phdata->p_filesz + elf_phdata->p_vaddr + ELF_MIN_ALIGN - 1);
947 bss = elf_phdata->p_memsz + elf_phdata->p_vaddr;
948 if (bss > len)
949 do_brk(len, bss - len);
950 error = 0;
952 out_free_ph:
953 kfree(elf_phdata);
954 out:
955 return error;
959 * Note that some platforms still use traditional core dumps and not
960 * the ELF core dump. Each platform can select it as appropriate.
962 #ifdef USE_ELF_CORE_DUMP
965 * ELF core dumper
967 * Modelled on fs/exec.c:aout_core_dump()
968 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
971 * These are the only things you should do on a core-file: use only these
972 * functions to write out all the necessary info.
974 static int dump_write(struct file *file, const void *addr, int nr)
976 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
979 static int dump_seek(struct file *file, off_t off)
981 if (file->f_op->llseek) {
982 if (file->f_op->llseek(file, off, 0) != off)
983 return 0;
984 } else
985 file->f_pos = off;
986 return 1;
990 * Decide whether a segment is worth dumping; default is yes to be
991 * sure (missing info is worse than too much; etc).
992 * Personally I'd include everything, and use the coredump limit...
994 * I think we should skip something. But I am not sure how. H.J.
996 static int maydump(struct vm_area_struct *vma)
999 * If we may not read the contents, don't allow us to dump
1000 * them either. "dump_write()" can't handle it anyway.
1002 if (!(vma->vm_flags & VM_READ))
1003 return 0;
1005 /* Do not dump I/O mapped devices! -DaveM */
1006 if (vma->vm_flags & VM_IO)
1007 return 0;
1008 #if 1
1009 if (vma->vm_flags & (VM_WRITE|VM_GROWSUP|VM_GROWSDOWN))
1010 return 1;
1011 if (vma->vm_flags & (VM_READ|VM_EXEC|VM_EXECUTABLE|VM_SHARED))
1012 return 0;
1013 #endif
1014 return 1;
1017 #define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
1019 /* An ELF note in memory */
1020 struct memelfnote
1022 const char *name;
1023 int type;
1024 unsigned int datasz;
1025 void *data;
1028 static int notesize(struct memelfnote *en)
1030 int sz;
1032 sz = sizeof(struct elf_note);
1033 sz += roundup(strlen(en->name) + 1, 4);
1034 sz += roundup(en->datasz, 4);
1036 return sz;
1039 #define DUMP_WRITE(addr, nr) \
1040 do { if (!dump_write(file, (addr), (nr))) return 0; } while(0)
1041 #define DUMP_SEEK(off) \
1042 do { if (!dump_seek(file, (off))) return 0; } while(0)
1044 static int writenote(struct memelfnote *men, struct file *file)
1046 struct elf_note en;
1048 en.n_namesz = strlen(men->name) + 1;
1049 en.n_descsz = men->datasz;
1050 en.n_type = men->type;
1052 DUMP_WRITE(&en, sizeof(en));
1053 DUMP_WRITE(men->name, en.n_namesz);
1054 /* XXX - cast from long long to long to avoid need for libgcc.a */
1055 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1056 DUMP_WRITE(men->data, men->datasz);
1057 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1059 return 1;
1061 #undef DUMP_WRITE
1062 #undef DUMP_SEEK
1064 #define DUMP_WRITE(addr, nr) \
1065 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1066 goto end_coredump;
1067 #define DUMP_SEEK(off) \
1068 if (!dump_seek(file, (off))) \
1069 goto end_coredump;
1071 static inline void fill_elf_header(struct elfhdr *elf, int segs)
1073 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1074 elf->e_ident[EI_CLASS] = ELF_CLASS;
1075 elf->e_ident[EI_DATA] = ELF_DATA;
1076 elf->e_ident[EI_VERSION] = EV_CURRENT;
1077 elf->e_ident[EI_OSABI] = ELF_OSABI;
1078 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
1080 elf->e_type = ET_CORE;
1081 elf->e_machine = ELF_ARCH;
1082 elf->e_version = EV_CURRENT;
1083 elf->e_entry = 0;
1084 elf->e_phoff = sizeof(struct elfhdr);
1085 elf->e_shoff = 0;
1086 elf->e_flags = 0;
1087 elf->e_ehsize = sizeof(struct elfhdr);
1088 elf->e_phentsize = sizeof(struct elf_phdr);
1089 elf->e_phnum = segs;
1090 elf->e_shentsize = 0;
1091 elf->e_shnum = 0;
1092 elf->e_shstrndx = 0;
1093 return;
1096 static inline void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
1098 phdr->p_type = PT_NOTE;
1099 phdr->p_offset = offset;
1100 phdr->p_vaddr = 0;
1101 phdr->p_paddr = 0;
1102 phdr->p_filesz = sz;
1103 phdr->p_memsz = 0;
1104 phdr->p_flags = 0;
1105 phdr->p_align = 0;
1106 return;
1109 static void fill_note(struct memelfnote *note, const char *name, int type,
1110 unsigned int sz, void *data)
1112 note->name = name;
1113 note->type = type;
1114 note->datasz = sz;
1115 note->data = data;
1116 return;
1120 * fill up all the fields in prstatus from the given task struct, except registers
1121 * which need to be filled up separately.
1123 static void fill_prstatus(struct elf_prstatus *prstatus,
1124 struct task_struct *p, long signr)
1126 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1127 prstatus->pr_sigpend = p->pending.signal.sig[0];
1128 prstatus->pr_sighold = p->blocked.sig[0];
1129 prstatus->pr_pid = p->pid;
1130 prstatus->pr_ppid = p->parent->pid;
1131 prstatus->pr_pgrp = process_group(p);
1132 prstatus->pr_sid = p->session;
1133 jiffies_to_timeval(p->utime, &prstatus->pr_utime);
1134 jiffies_to_timeval(p->stime, &prstatus->pr_stime);
1135 jiffies_to_timeval(p->cutime, &prstatus->pr_cutime);
1136 jiffies_to_timeval(p->cstime, &prstatus->pr_cstime);
1139 static void fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1140 struct mm_struct *mm)
1142 int i, len;
1144 /* first copy the parameters from user space */
1145 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1147 len = mm->arg_end - mm->arg_start;
1148 if (len >= ELF_PRARGSZ)
1149 len = ELF_PRARGSZ-1;
1150 copy_from_user(&psinfo->pr_psargs,
1151 (const char *)mm->arg_start, len);
1152 for(i = 0; i < len; i++)
1153 if (psinfo->pr_psargs[i] == 0)
1154 psinfo->pr_psargs[i] = ' ';
1155 psinfo->pr_psargs[len] = 0;
1157 psinfo->pr_pid = p->pid;
1158 psinfo->pr_ppid = p->parent->pid;
1159 psinfo->pr_pgrp = process_group(p);
1160 psinfo->pr_sid = p->session;
1162 i = p->state ? ffz(~p->state) + 1 : 0;
1163 psinfo->pr_state = i;
1164 psinfo->pr_sname = (i < 0 || i > 5) ? '.' : "RSDTZW"[i];
1165 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1166 psinfo->pr_nice = task_nice(p);
1167 psinfo->pr_flag = p->flags;
1168 SET_UID(psinfo->pr_uid, p->uid);
1169 SET_GID(psinfo->pr_gid, p->gid);
1170 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1172 return;
1175 /* Here is the structure in which status of each thread is captured. */
1176 struct elf_thread_status
1178 struct list_head list;
1179 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1180 elf_fpregset_t fpu; /* NT_PRFPREG */
1181 #ifdef ELF_CORE_COPY_XFPREGS
1182 elf_fpxregset_t xfpu; /* NT_PRXFPREG */
1183 #endif
1184 struct memelfnote notes[3];
1185 int num_notes;
1189 * In order to add the specific thread information for the elf file format,
1190 * we need to keep a linked list of every threads pr_status and then
1191 * create a single section for them in the final core file.
1193 static int elf_dump_thread_status(long signr, struct task_struct * p, struct list_head * thread_list)
1196 struct elf_thread_status *t;
1197 int sz = 0;
1199 t = kmalloc(sizeof(*t), GFP_ATOMIC);
1200 if (!t)
1201 return 0;
1202 memset(t, 0, sizeof(*t));
1204 INIT_LIST_HEAD(&t->list);
1205 t->num_notes = 0;
1207 fill_prstatus(&t->prstatus, p, signr);
1208 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1210 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), &(t->prstatus));
1211 t->num_notes++;
1212 sz += notesize(&t->notes[0]);
1214 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL, &t->fpu))) {
1215 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu), &(t->fpu));
1216 t->num_notes++;
1217 sz += notesize(&t->notes[1]);
1220 #ifdef ELF_CORE_COPY_XFPREGS
1221 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
1222 fill_note(&t->notes[2], "LINUX", NT_PRXFPREG, sizeof(t->xfpu), &t->xfpu);
1223 t->num_notes++;
1224 sz += notesize(&t->notes[2]);
1226 #endif
1227 list_add(&t->list, thread_list);
1228 return sz;
1232 * Actual dumper
1234 * This is a two-pass process; first we find the offsets of the bits,
1235 * and then they are actually written out. If we run out of core limit
1236 * we just truncate.
1238 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
1240 #define NUM_NOTES 6
1241 int has_dumped = 0;
1242 mm_segment_t fs;
1243 int segs;
1244 size_t size = 0;
1245 int i;
1246 struct vm_area_struct *vma;
1247 struct elfhdr *elf = NULL;
1248 off_t offset = 0, dataoff;
1249 unsigned long limit = current->rlim[RLIMIT_CORE].rlim_cur;
1250 int numnote;
1251 struct memelfnote *notes = NULL;
1252 struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */
1253 struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */
1254 struct task_struct *g, *p;
1255 LIST_HEAD(thread_list);
1256 struct list_head *t;
1257 elf_fpregset_t *fpu = NULL;
1258 #ifdef ELF_CORE_COPY_XFPREGS
1259 elf_fpxregset_t *xfpu = NULL;
1260 #endif
1261 int thread_status_size = 0;
1262 elf_addr_t *auxv;
1265 * We no longer stop all VM operations.
1267 * This is because those proceses that could possibly change map_count or
1268 * the mmap / vma pages are now blocked in do_exit on current finishing
1269 * this core dump.
1271 * Only ptrace can touch these memory addresses, but it doesn't change
1272 * the map_count or the pages allocated. So no possibility of crashing
1273 * exists while dumping the mm->vm_next areas to the core file.
1276 /* alloc memory for large data structures: too large to be on stack */
1277 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
1278 if (!elf)
1279 goto cleanup;
1280 prstatus = kmalloc(sizeof(*prstatus), GFP_KERNEL);
1281 if (!prstatus)
1282 goto cleanup;
1283 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1284 if (!psinfo)
1285 goto cleanup;
1286 notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL);
1287 if (!notes)
1288 goto cleanup;
1289 fpu = kmalloc(sizeof(*fpu), GFP_KERNEL);
1290 if (!fpu)
1291 goto cleanup;
1292 #ifdef ELF_CORE_COPY_XFPREGS
1293 xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL);
1294 if (!xfpu)
1295 goto cleanup;
1296 #endif
1298 /* capture the status of all other threads */
1299 if (signr) {
1300 read_lock(&tasklist_lock);
1301 do_each_thread(g,p)
1302 if (current->mm == p->mm && current != p) {
1303 int sz = elf_dump_thread_status(signr, p, &thread_list);
1304 if (!sz) {
1305 read_unlock(&tasklist_lock);
1306 goto cleanup;
1307 } else
1308 thread_status_size += sz;
1310 while_each_thread(g,p);
1311 read_unlock(&tasklist_lock);
1314 /* now collect the dump for the current */
1315 memset(prstatus, 0, sizeof(*prstatus));
1316 fill_prstatus(prstatus, current, signr);
1317 elf_core_copy_regs(&prstatus->pr_reg, regs);
1319 segs = current->mm->map_count;
1320 #ifdef ELF_CORE_EXTRA_PHDRS
1321 segs += ELF_CORE_EXTRA_PHDRS;
1322 #endif
1324 /* Set up header */
1325 fill_elf_header(elf, segs+1); /* including notes section */
1327 has_dumped = 1;
1328 current->flags |= PF_DUMPCORE;
1331 * Set up the notes in similar form to SVR4 core dumps made
1332 * with info from their /proc.
1335 fill_note(notes +0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus);
1337 fill_psinfo(psinfo, current->group_leader, current->mm);
1338 fill_note(notes +1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1340 fill_note(notes +2, "CORE", NT_TASKSTRUCT, sizeof(*current), current);
1342 numnote = 3;
1344 auxv = (elf_addr_t *) current->mm->saved_auxv;
1346 i = 0;
1348 i += 2;
1349 while (auxv[i - 2] != AT_NULL);
1350 fill_note(&notes[numnote++], "CORE", NT_AUXV,
1351 i * sizeof (elf_addr_t), auxv);
1353 /* Try to dump the FPU. */
1354 if ((prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs, fpu)))
1355 fill_note(notes + numnote++,
1356 "CORE", NT_PRFPREG, sizeof(*fpu), fpu);
1357 #ifdef ELF_CORE_COPY_XFPREGS
1358 if (elf_core_copy_task_xfpregs(current, xfpu))
1359 fill_note(notes + numnote++,
1360 "LINUX", NT_PRXFPREG, sizeof(*xfpu), xfpu);
1361 #endif
1363 fs = get_fs();
1364 set_fs(KERNEL_DS);
1366 DUMP_WRITE(elf, sizeof(*elf));
1367 offset += sizeof(*elf); /* Elf header */
1368 offset += (segs+1) * sizeof(struct elf_phdr); /* Program headers */
1370 /* Write notes phdr entry */
1372 struct elf_phdr phdr;
1373 int sz = 0;
1375 for (i = 0; i < numnote; i++)
1376 sz += notesize(notes + i);
1378 sz += thread_status_size;
1380 fill_elf_note_phdr(&phdr, sz, offset);
1381 offset += sz;
1382 DUMP_WRITE(&phdr, sizeof(phdr));
1385 /* Page-align dumped data */
1386 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
1388 /* Write program headers for segments dump */
1389 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1390 struct elf_phdr phdr;
1391 size_t sz;
1393 sz = vma->vm_end - vma->vm_start;
1395 phdr.p_type = PT_LOAD;
1396 phdr.p_offset = offset;
1397 phdr.p_vaddr = vma->vm_start;
1398 phdr.p_paddr = 0;
1399 phdr.p_filesz = maydump(vma) ? sz : 0;
1400 phdr.p_memsz = sz;
1401 offset += phdr.p_filesz;
1402 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
1403 if (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W;
1404 if (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X;
1405 phdr.p_align = ELF_EXEC_PAGESIZE;
1407 DUMP_WRITE(&phdr, sizeof(phdr));
1410 #ifdef ELF_CORE_WRITE_EXTRA_PHDRS
1411 ELF_CORE_WRITE_EXTRA_PHDRS;
1412 #endif
1414 /* write out the notes section */
1415 for (i = 0; i < numnote; i++)
1416 if (!writenote(notes + i, file))
1417 goto end_coredump;
1419 /* write out the thread status notes section */
1420 list_for_each(t, &thread_list) {
1421 struct elf_thread_status *tmp = list_entry(t, struct elf_thread_status, list);
1422 for (i = 0; i < tmp->num_notes; i++)
1423 if (!writenote(&tmp->notes[i], file))
1424 goto end_coredump;
1427 DUMP_SEEK(dataoff);
1429 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1430 unsigned long addr;
1432 if (!maydump(vma))
1433 continue;
1435 for (addr = vma->vm_start;
1436 addr < vma->vm_end;
1437 addr += PAGE_SIZE) {
1438 struct page* page;
1439 struct vm_area_struct *vma;
1441 if (get_user_pages(current, current->mm, addr, 1, 0, 1,
1442 &page, &vma) <= 0) {
1443 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1444 } else {
1445 if (page == ZERO_PAGE(addr)) {
1446 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1447 } else {
1448 void *kaddr;
1449 flush_cache_page(vma, addr);
1450 kaddr = kmap(page);
1451 if ((size += PAGE_SIZE) > limit ||
1452 !dump_write(file, kaddr,
1453 PAGE_SIZE)) {
1454 kunmap(page);
1455 page_cache_release(page);
1456 goto end_coredump;
1458 kunmap(page);
1460 page_cache_release(page);
1465 #ifdef ELF_CORE_WRITE_EXTRA_DATA
1466 ELF_CORE_WRITE_EXTRA_DATA;
1467 #endif
1469 if ((off_t) file->f_pos != offset) {
1470 /* Sanity check */
1471 printk("elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
1472 (off_t) file->f_pos, offset);
1475 end_coredump:
1476 set_fs(fs);
1478 cleanup:
1479 while(!list_empty(&thread_list)) {
1480 struct list_head *tmp = thread_list.next;
1481 list_del(tmp);
1482 kfree(list_entry(tmp, struct elf_thread_status, list));
1485 kfree(elf);
1486 kfree(prstatus);
1487 kfree(psinfo);
1488 kfree(notes);
1489 kfree(fpu);
1490 #ifdef ELF_CORE_COPY_XFPREGS
1491 kfree(xfpu);
1492 #endif
1493 return has_dumped;
1494 #undef NUM_NOTES
1497 #endif /* USE_ELF_CORE_DUMP */
1499 static int __init init_elf_binfmt(void)
1501 return register_binfmt(&elf_format);
1504 static void __exit exit_elf_binfmt(void)
1506 /* Remove the COFF and ELF loaders. */
1507 unregister_binfmt(&elf_format);
1510 module_init(init_elf_binfmt)
1511 module_exit(exit_elf_binfmt)
1512 MODULE_LICENSE("GPL");