[PATCH] s390 (4/7): ctc driver.
[linux-2.6/history.git] / fs / binfmt_elf.c
blob505c73aa94eed8d5300d64cf222e40bc172309b9
1 /*
2 * linux/fs/binfmt_elf.c
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
7 * Tools".
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/fs.h>
15 #include <linux/stat.h>
16 #include <linux/time.h>
17 #include <linux/mm.h>
18 #include <linux/mman.h>
19 #include <linux/a.out.h>
20 #include <linux/errno.h>
21 #include <linux/signal.h>
22 #include <linux/binfmts.h>
23 #include <linux/string.h>
24 #include <linux/file.h>
25 #include <linux/fcntl.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/shm.h>
29 #include <linux/personality.h>
30 #include <linux/elfcore.h>
31 #include <linux/init.h>
32 #include <linux/highuid.h>
33 #include <linux/smp.h>
34 #include <linux/smp_lock.h>
35 #include <linux/compiler.h>
36 #include <linux/highmem.h>
37 #include <linux/pagemap.h>
38 #include <linux/security.h>
40 #include <asm/uaccess.h>
41 #include <asm/param.h>
42 #include <asm/pgalloc.h>
44 #include <linux/elf.h>
46 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs);
47 static int load_elf_library(struct file*);
48 static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int);
49 extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
51 #ifndef elf_addr_t
52 #define elf_addr_t unsigned long
53 #endif
56 * If we don't support core dumping, then supply a NULL so we
57 * don't even try.
59 #ifdef USE_ELF_CORE_DUMP
60 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file);
61 #else
62 #define elf_core_dump NULL
63 #endif
65 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
66 # define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
67 #else
68 # define ELF_MIN_ALIGN PAGE_SIZE
69 #endif
71 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
72 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
73 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
75 static struct linux_binfmt elf_format = {
76 .module = THIS_MODULE,
77 .load_binary = load_elf_binary,
78 .load_shlib = load_elf_library,
79 .core_dump = elf_core_dump,
80 .min_coredump = ELF_EXEC_PAGESIZE
83 #define BAD_ADDR(x) ((unsigned long)(x) > TASK_SIZE)
85 static void set_brk(unsigned long start, unsigned long end)
87 start = ELF_PAGEALIGN(start);
88 end = ELF_PAGEALIGN(end);
89 if (end > start)
90 do_brk(start, end - start);
91 current->mm->start_brk = current->mm->brk = end;
95 /* We need to explicitly zero any fractional pages
96 after the data section (i.e. bss). This would
97 contain the junk from the file that should not
98 be in memory */
101 static void padzero(unsigned long elf_bss)
103 unsigned long nbyte;
105 nbyte = ELF_PAGEOFFSET(elf_bss);
106 if (nbyte) {
107 nbyte = ELF_MIN_ALIGN - nbyte;
108 clear_user((void *) elf_bss, nbyte);
112 /* Let's use some macros to make this stack manipulation a litle clearer */
113 #ifdef CONFIG_STACK_GROWSUP
114 #define STACK_ADD(sp, items) ((elf_addr_t *)(sp) + (items))
115 #define STACK_ROUND(sp, items) \
116 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
117 #define STACK_ALLOC(sp, len) ({ elf_addr_t *old_sp = (elf_addr_t *)sp; sp += len; old_sp; })
118 #else
119 #define STACK_ADD(sp, items) ((elf_addr_t *)(sp) - (items))
120 #define STACK_ROUND(sp, items) \
121 (((unsigned long) (sp - items)) &~ 15UL)
122 #define STACK_ALLOC(sp, len) sp -= len
123 #endif
125 static void
126 create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
127 int interp_aout, unsigned long load_addr,
128 unsigned long interp_load_addr)
130 unsigned long p = bprm->p;
131 int argc = bprm->argc;
132 int envc = bprm->envc;
133 elf_addr_t *argv, *envp;
134 elf_addr_t *sp, *u_platform;
135 const char *k_platform = ELF_PLATFORM;
136 int items;
137 elf_addr_t *elf_info;
138 int ei_index = 0;
139 struct task_struct *tsk = current;
142 * If this architecture has a platform capability string, copy it
143 * to userspace. In some cases (Sparc), this info is impossible
144 * for userspace to get any other way, in others (i386) it is
145 * merely difficult.
148 u_platform = NULL;
149 if (k_platform) {
150 size_t len = strlen(k_platform) + 1;
152 #ifdef CONFIG_X86_HT
154 * In some cases (e.g. Hyper-Threading), we want to avoid L1
155 * evictions by the processes running on the same package. One
156 * thing we can do is to shuffle the initial stack for them.
158 * The conditionals here are unneeded, but kept in to make the
159 * code behaviour the same as pre change unless we have
160 * hyperthreaded processors. This should be cleaned up
161 * before 2.6
164 if (smp_num_siblings > 1)
165 STACK_ALLOC(p, ((current->pid % 64) << 7));
166 #endif
167 u_platform = (elf_addr_t *) STACK_ALLOC(p, len);
168 __copy_to_user(u_platform, k_platform, len);
171 /* Create the ELF interpreter info */
172 elf_info = (elf_addr_t *) current->mm->saved_auxv;
173 #define NEW_AUX_ENT(id, val) \
174 do { elf_info[ei_index++] = id; elf_info[ei_index++] = val; } while (0)
176 #ifdef ARCH_DLINFO
178 * ARCH_DLINFO must come first so PPC can do its special alignment of
179 * AUXV.
181 ARCH_DLINFO;
182 #endif
183 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
184 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
185 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
186 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
187 NEW_AUX_ENT(AT_PHENT, sizeof (struct elf_phdr));
188 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
189 NEW_AUX_ENT(AT_BASE, interp_load_addr);
190 NEW_AUX_ENT(AT_FLAGS, 0);
191 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
192 NEW_AUX_ENT(AT_UID, (elf_addr_t) tsk->uid);
193 NEW_AUX_ENT(AT_EUID, (elf_addr_t) tsk->euid);
194 NEW_AUX_ENT(AT_GID, (elf_addr_t) tsk->gid);
195 NEW_AUX_ENT(AT_EGID, (elf_addr_t) tsk->egid);
196 NEW_AUX_ENT(AT_SECURE, (elf_addr_t) security_bprm_secureexec(bprm));
197 if (k_platform) {
198 NEW_AUX_ENT(AT_PLATFORM, (elf_addr_t)(long)u_platform);
200 #undef NEW_AUX_ENT
201 /* AT_NULL is zero; clear the rest too */
202 memset(&elf_info[ei_index], 0,
203 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
205 /* And advance past the AT_NULL entry. */
206 ei_index += 2;
208 sp = STACK_ADD(p, ei_index);
210 items = (argc + 1) + (envc + 1);
211 if (interp_aout) {
212 items += 3; /* a.out interpreters require argv & envp too */
213 } else {
214 items += 1; /* ELF interpreters only put argc on the stack */
216 bprm->p = STACK_ROUND(sp, items);
218 /* Point sp at the lowest address on the stack */
219 #ifdef CONFIG_STACK_GROWSUP
220 sp = (elf_addr_t *)bprm->p - items - ei_index;
221 bprm->exec = (unsigned long) sp; /* XXX: PARISC HACK */
222 #else
223 sp = (elf_addr_t *)bprm->p;
224 #endif
226 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
227 __put_user(argc, sp++);
228 if (interp_aout) {
229 argv = sp + 2;
230 envp = argv + argc + 1;
231 __put_user((elf_addr_t)(long)argv, sp++);
232 __put_user((elf_addr_t)(long)envp, sp++);
233 } else {
234 argv = sp;
235 envp = argv + argc + 1;
238 /* Populate argv and envp */
239 p = current->mm->arg_start;
240 while (argc-- > 0) {
241 size_t len;
242 __put_user((elf_addr_t)p, argv++);
243 len = strnlen_user((void *)p, PAGE_SIZE*MAX_ARG_PAGES);
244 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
245 return;
246 p += len;
248 __put_user(0, argv);
249 current->mm->arg_end = current->mm->env_start = p;
250 while (envc-- > 0) {
251 size_t len;
252 __put_user((elf_addr_t)p, envp++);
253 len = strnlen_user((void *)p, PAGE_SIZE*MAX_ARG_PAGES);
254 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
255 return;
256 p += len;
258 __put_user(0, envp);
259 current->mm->env_end = p;
261 /* Put the elf_info on the stack in the right place. */
262 sp = (elf_addr_t *)envp + 1;
263 copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t));
266 #ifndef elf_map
268 static unsigned long elf_map(struct file *filep, unsigned long addr,
269 struct elf_phdr *eppnt, int prot, int type)
271 unsigned long map_addr;
273 down_write(&current->mm->mmap_sem);
274 map_addr = do_mmap(filep, ELF_PAGESTART(addr),
275 eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr), prot, type,
276 eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr));
277 up_write(&current->mm->mmap_sem);
278 return(map_addr);
281 #endif /* !elf_map */
283 /* This is much more generalized than the library routine read function,
284 so we keep this separate. Technically the library read function
285 is only provided so that we can read a.out libraries that have
286 an ELF header */
288 static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
289 struct file * interpreter,
290 unsigned long *interp_load_addr)
292 struct elf_phdr *elf_phdata;
293 struct elf_phdr *eppnt;
294 unsigned long load_addr = 0;
295 int load_addr_set = 0;
296 unsigned long last_bss = 0, elf_bss = 0;
297 unsigned long error = ~0UL;
298 int retval, i, size;
300 /* First of all, some simple consistency checks */
301 if (interp_elf_ex->e_type != ET_EXEC &&
302 interp_elf_ex->e_type != ET_DYN)
303 goto out;
304 if (!elf_check_arch(interp_elf_ex))
305 goto out;
306 if (!interpreter->f_op || !interpreter->f_op->mmap)
307 goto out;
310 * If the size of this structure has changed, then punt, since
311 * we will be doing the wrong thing.
313 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
314 goto out;
315 if (interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
316 goto out;
318 /* Now read in all of the header information */
320 size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
321 if (size > ELF_MIN_ALIGN)
322 goto out;
323 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
324 if (!elf_phdata)
325 goto out;
327 retval = kernel_read(interpreter,interp_elf_ex->e_phoff,(char *)elf_phdata,size);
328 error = retval;
329 if (retval < 0)
330 goto out_close;
332 eppnt = elf_phdata;
333 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
334 if (eppnt->p_type == PT_LOAD) {
335 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
336 int elf_prot = 0;
337 unsigned long vaddr = 0;
338 unsigned long k, map_addr;
340 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
341 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
342 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
343 vaddr = eppnt->p_vaddr;
344 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
345 elf_type |= MAP_FIXED;
347 map_addr = elf_map(interpreter, load_addr + vaddr, eppnt, elf_prot, elf_type);
348 if (BAD_ADDR(map_addr))
349 goto out_close;
351 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
352 load_addr = map_addr - ELF_PAGESTART(vaddr);
353 load_addr_set = 1;
357 * Find the end of the file mapping for this phdr, and keep
358 * track of the largest address we see for this.
360 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
361 if (k > elf_bss)
362 elf_bss = k;
365 * Do the same thing for the memory mapping - between
366 * elf_bss and last_bss is the bss section.
368 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
369 if (k > last_bss)
370 last_bss = k;
375 * Now fill out the bss section. First pad the last page up
376 * to the page boundary, and then perform a mmap to make sure
377 * that there are zero-mapped pages up to and including the
378 * last bss page.
380 padzero(elf_bss);
381 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); /* What we have mapped so far */
383 /* Map the last of the bss segment */
384 if (last_bss > elf_bss)
385 do_brk(elf_bss, last_bss - elf_bss);
387 *interp_load_addr = load_addr;
388 error = ((unsigned long) interp_elf_ex->e_entry) + load_addr;
390 out_close:
391 kfree(elf_phdata);
392 out:
393 return error;
396 static unsigned long load_aout_interp(struct exec * interp_ex,
397 struct file * interpreter)
399 unsigned long text_data, elf_entry = ~0UL;
400 char * addr;
401 loff_t offset;
403 current->mm->end_code = interp_ex->a_text;
404 text_data = interp_ex->a_text + interp_ex->a_data;
405 current->mm->end_data = text_data;
406 current->mm->brk = interp_ex->a_bss + text_data;
408 switch (N_MAGIC(*interp_ex)) {
409 case OMAGIC:
410 offset = 32;
411 addr = (char *) 0;
412 break;
413 case ZMAGIC:
414 case QMAGIC:
415 offset = N_TXTOFF(*interp_ex);
416 addr = (char *) N_TXTADDR(*interp_ex);
417 break;
418 default:
419 goto out;
422 do_brk(0, text_data);
423 if (!interpreter->f_op || !interpreter->f_op->read)
424 goto out;
425 if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0)
426 goto out;
427 flush_icache_range((unsigned long)addr,
428 (unsigned long)addr + text_data);
430 do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
431 interp_ex->a_bss);
432 elf_entry = interp_ex->a_entry;
434 out:
435 return elf_entry;
439 * These are the functions used to load ELF style executables and shared
440 * libraries. There is no binary dependent code anywhere else.
443 #define INTERPRETER_NONE 0
444 #define INTERPRETER_AOUT 1
445 #define INTERPRETER_ELF 2
448 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
450 struct file *interpreter = NULL; /* to shut gcc up */
451 unsigned long load_addr = 0, load_bias = 0;
452 int load_addr_set = 0;
453 char * elf_interpreter = NULL;
454 unsigned int interpreter_type = INTERPRETER_NONE;
455 unsigned char ibcs2_interpreter = 0;
456 unsigned long error;
457 struct elf_phdr * elf_ppnt, *elf_phdata;
458 unsigned long elf_bss, elf_brk;
459 int elf_exec_fileno;
460 int retval, i;
461 unsigned int size;
462 unsigned long elf_entry, interp_load_addr = 0;
463 unsigned long start_code, end_code, start_data, end_data;
464 unsigned long reloc_func_desc = 0;
465 struct elfhdr elf_ex;
466 struct elfhdr interp_elf_ex;
467 struct exec interp_ex;
468 char passed_fileno[6];
470 /* Get the exec-header */
471 elf_ex = *((struct elfhdr *) bprm->buf);
473 retval = -ENOEXEC;
474 /* First of all, some simple consistency checks */
475 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
476 goto out;
478 if (elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN)
479 goto out;
480 if (!elf_check_arch(&elf_ex))
481 goto out;
482 if (!bprm->file->f_op||!bprm->file->f_op->mmap)
483 goto out;
485 /* Now read in all of the header information */
487 retval = -ENOMEM;
488 if (elf_ex.e_phentsize != sizeof(struct elf_phdr))
489 goto out;
490 if (elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
491 goto out;
492 size = elf_ex.e_phnum * sizeof(struct elf_phdr);
493 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
494 if (!elf_phdata)
495 goto out;
497 retval = kernel_read(bprm->file, elf_ex.e_phoff, (char *) elf_phdata, size);
498 if (retval < 0)
499 goto out_free_ph;
501 retval = get_unused_fd();
502 if (retval < 0)
503 goto out_free_ph;
504 get_file(bprm->file);
505 fd_install(elf_exec_fileno = retval, bprm->file);
507 elf_ppnt = elf_phdata;
508 elf_bss = 0;
509 elf_brk = 0;
511 start_code = ~0UL;
512 end_code = 0;
513 start_data = 0;
514 end_data = 0;
516 for (i = 0; i < elf_ex.e_phnum; i++) {
517 if (elf_ppnt->p_type == PT_INTERP) {
518 /* This is the program interpreter used for
519 * shared libraries - for now assume that this
520 * is an a.out format binary
523 retval = -ENOMEM;
524 if (elf_ppnt->p_filesz > PATH_MAX)
525 goto out_free_file;
526 elf_interpreter = (char *) kmalloc(elf_ppnt->p_filesz,
527 GFP_KERNEL);
528 if (!elf_interpreter)
529 goto out_free_file;
531 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
532 elf_interpreter,
533 elf_ppnt->p_filesz);
534 if (retval < 0)
535 goto out_free_interp;
536 /* If the program interpreter is one of these two,
537 * then assume an iBCS2 image. Otherwise assume
538 * a native linux image.
540 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
541 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
542 ibcs2_interpreter = 1;
545 * The early SET_PERSONALITY here is so that the lookup
546 * for the interpreter happens in the namespace of the
547 * to-be-execed image. SET_PERSONALITY can select an
548 * alternate root.
550 * However, SET_PERSONALITY is NOT allowed to switch
551 * this task into the new images's memory mapping
552 * policy - that is, TASK_SIZE must still evaluate to
553 * that which is appropriate to the execing application.
554 * This is because exit_mmap() needs to have TASK_SIZE
555 * evaluate to the size of the old image.
557 * So if (say) a 64-bit application is execing a 32-bit
558 * application it is the architecture's responsibility
559 * to defer changing the value of TASK_SIZE until the
560 * switch really is going to happen - do this in
561 * flush_thread(). - akpm
563 SET_PERSONALITY(elf_ex, ibcs2_interpreter);
565 interpreter = open_exec(elf_interpreter);
566 retval = PTR_ERR(interpreter);
567 if (IS_ERR(interpreter))
568 goto out_free_interp;
569 retval = kernel_read(interpreter, 0, bprm->buf, BINPRM_BUF_SIZE);
570 if (retval < 0)
571 goto out_free_dentry;
573 /* Get the exec headers */
574 interp_ex = *((struct exec *) bprm->buf);
575 interp_elf_ex = *((struct elfhdr *) bprm->buf);
576 break;
578 elf_ppnt++;
581 /* Some simple consistency checks for the interpreter */
582 if (elf_interpreter) {
583 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
585 /* Now figure out which format our binary is */
586 if ((N_MAGIC(interp_ex) != OMAGIC) &&
587 (N_MAGIC(interp_ex) != ZMAGIC) &&
588 (N_MAGIC(interp_ex) != QMAGIC))
589 interpreter_type = INTERPRETER_ELF;
591 if (memcmp(interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
592 interpreter_type &= ~INTERPRETER_ELF;
594 retval = -ELIBBAD;
595 if (!interpreter_type)
596 goto out_free_dentry;
598 /* Make sure only one type was selected */
599 if ((interpreter_type & INTERPRETER_ELF) &&
600 interpreter_type != INTERPRETER_ELF) {
601 // FIXME - ratelimit this before re-enabling
602 // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
603 interpreter_type = INTERPRETER_ELF;
605 } else {
606 /* Executables without an interpreter also need a personality */
607 SET_PERSONALITY(elf_ex, ibcs2_interpreter);
610 /* OK, we are done with that, now set up the arg stuff,
611 and then start this sucker up */
613 if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) {
614 char *passed_p = passed_fileno;
615 sprintf(passed_fileno, "%d", elf_exec_fileno);
617 if (elf_interpreter) {
618 retval = copy_strings_kernel(1, &passed_p, bprm);
619 if (retval)
620 goto out_free_dentry;
621 bprm->argc++;
625 /* Flush all traces of the currently running executable */
626 retval = flush_old_exec(bprm);
627 if (retval)
628 goto out_free_dentry;
630 /* OK, This is the point of no return */
631 current->mm->start_data = 0;
632 current->mm->end_data = 0;
633 current->mm->end_code = 0;
634 current->mm->mmap = NULL;
635 current->flags &= ~PF_FORKNOEXEC;
637 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
638 may depend on the personality. */
639 SET_PERSONALITY(elf_ex, ibcs2_interpreter);
641 /* Do this so that we can load the interpreter, if need be. We will
642 change some of these later */
643 current->mm->rss = 0;
644 current->mm->free_area_cache = TASK_UNMAPPED_BASE;
645 retval = setup_arg_pages(bprm);
646 if (retval < 0) {
647 send_sig(SIGKILL, current, 0);
648 goto out_free_dentry;
651 current->mm->start_stack = bprm->p;
653 /* Now we do a little grungy work by mmaping the ELF image into
654 the correct location in memory. At this point, we assume that
655 the image should be loaded at fixed address, not at a variable
656 address. */
658 for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
659 int elf_prot = 0, elf_flags;
660 unsigned long k, vaddr;
662 if (elf_ppnt->p_type != PT_LOAD)
663 continue;
665 if (unlikely (elf_brk > elf_bss)) {
666 unsigned long nbyte;
668 /* There was a PT_LOAD segment with p_memsz > p_filesz
669 before this one. Map anonymous pages, if needed,
670 and clear the area. */
671 set_brk (elf_bss + load_bias, elf_brk + load_bias);
672 nbyte = ELF_PAGEOFFSET(elf_bss);
673 if (nbyte) {
674 nbyte = ELF_MIN_ALIGN - nbyte;
675 if (nbyte > elf_brk - elf_bss)
676 nbyte = elf_brk - elf_bss;
677 clear_user((void *) elf_bss + load_bias, nbyte);
681 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
682 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
683 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
685 elf_flags = MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE;
687 vaddr = elf_ppnt->p_vaddr;
688 if (elf_ex.e_type == ET_EXEC || load_addr_set) {
689 elf_flags |= MAP_FIXED;
690 } else if (elf_ex.e_type == ET_DYN) {
691 /* Try and get dynamic programs out of the way of the default mmap
692 base, as well as whatever program they might try to exec. This
693 is because the brk will follow the loader, and is not movable. */
694 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
697 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags);
698 if (BAD_ADDR(error))
699 continue;
701 if (!load_addr_set) {
702 load_addr_set = 1;
703 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
704 if (elf_ex.e_type == ET_DYN) {
705 load_bias += error -
706 ELF_PAGESTART(load_bias + vaddr);
707 load_addr += load_bias;
708 reloc_func_desc = load_bias;
711 k = elf_ppnt->p_vaddr;
712 if (k < start_code) start_code = k;
713 if (start_data < k) start_data = k;
715 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
717 if (k > elf_bss)
718 elf_bss = k;
719 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
720 end_code = k;
721 if (end_data < k)
722 end_data = k;
723 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
724 if (k > elf_brk)
725 elf_brk = k;
728 elf_ex.e_entry += load_bias;
729 elf_bss += load_bias;
730 elf_brk += load_bias;
731 start_code += load_bias;
732 end_code += load_bias;
733 start_data += load_bias;
734 end_data += load_bias;
736 if (elf_interpreter) {
737 if (interpreter_type == INTERPRETER_AOUT)
738 elf_entry = load_aout_interp(&interp_ex,
739 interpreter);
740 else
741 elf_entry = load_elf_interp(&interp_elf_ex,
742 interpreter,
743 &interp_load_addr);
745 allow_write_access(interpreter);
746 fput(interpreter);
747 kfree(elf_interpreter);
749 if (BAD_ADDR(elf_entry)) {
750 printk(KERN_ERR "Unable to load interpreter\n");
751 kfree(elf_phdata);
752 send_sig(SIGSEGV, current, 0);
753 retval = -ENOEXEC; /* Nobody gets to see this, but.. */
754 goto out;
756 reloc_func_desc = interp_load_addr;
757 } else {
758 elf_entry = elf_ex.e_entry;
761 kfree(elf_phdata);
763 if (interpreter_type != INTERPRETER_AOUT)
764 sys_close(elf_exec_fileno);
766 set_binfmt(&elf_format);
768 compute_creds(bprm);
769 current->flags &= ~PF_FORKNOEXEC;
770 create_elf_tables(bprm, &elf_ex, (interpreter_type == INTERPRETER_AOUT),
771 load_addr, interp_load_addr);
772 /* N.B. passed_fileno might not be initialized? */
773 if (interpreter_type == INTERPRETER_AOUT)
774 current->mm->arg_start += strlen(passed_fileno) + 1;
775 current->mm->end_code = end_code;
776 current->mm->start_code = start_code;
777 current->mm->start_data = start_data;
778 current->mm->end_data = end_data;
779 current->mm->start_stack = bprm->p;
781 /* Calling set_brk effectively mmaps the pages that we need
782 * for the bss and break sections
784 set_brk(elf_bss, elf_brk);
786 padzero(elf_bss);
788 if (current->personality & MMAP_PAGE_ZERO) {
789 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
790 and some applications "depend" upon this behavior.
791 Since we do not have the power to recompile these, we
792 emulate the SVr4 behavior. Sigh. */
793 /* N.B. Shouldn't the size here be PAGE_SIZE?? */
794 down_write(&current->mm->mmap_sem);
795 error = do_mmap(NULL, 0, 4096, PROT_READ | PROT_EXEC,
796 MAP_FIXED | MAP_PRIVATE, 0);
797 up_write(&current->mm->mmap_sem);
800 #ifdef ELF_PLAT_INIT
802 * The ABI may specify that certain registers be set up in special
803 * ways (on i386 %edx is the address of a DT_FINI function, for
804 * example. In addition, it may also specify (eg, PowerPC64 ELF)
805 * that the e_entry field is the address of the function descriptor
806 * for the startup routine, rather than the address of the startup
807 * routine itself. This macro performs whatever initialization to
808 * the regs structure is required as well as any relocations to the
809 * function descriptor entries when executing dynamically links apps.
811 ELF_PLAT_INIT(regs, reloc_func_desc);
812 #endif
814 start_thread(regs, elf_entry, bprm->p);
815 if (unlikely(current->ptrace & PT_PTRACED)) {
816 if (current->ptrace & PT_TRACE_EXEC)
817 ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
818 else
819 send_sig(SIGTRAP, current, 0);
821 retval = 0;
822 out:
823 return retval;
825 /* error cleanup */
826 out_free_dentry:
827 allow_write_access(interpreter);
828 fput(interpreter);
829 out_free_interp:
830 if (elf_interpreter)
831 kfree(elf_interpreter);
832 out_free_file:
833 sys_close(elf_exec_fileno);
834 out_free_ph:
835 kfree(elf_phdata);
836 goto out;
839 /* This is really simpleminded and specialized - we are loading an
840 a.out library that is given an ELF header. */
842 static int load_elf_library(struct file *file)
844 struct elf_phdr *elf_phdata;
845 unsigned long elf_bss, bss, len;
846 int retval, error, i, j;
847 struct elfhdr elf_ex;
849 error = -ENOEXEC;
850 retval = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex));
851 if (retval != sizeof(elf_ex))
852 goto out;
854 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
855 goto out;
857 /* First of all, some simple consistency checks */
858 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
859 !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
860 goto out;
862 /* Now read in all of the header information */
864 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
865 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
867 error = -ENOMEM;
868 elf_phdata = (struct elf_phdr *) kmalloc(j, GFP_KERNEL);
869 if (!elf_phdata)
870 goto out;
872 error = -ENOEXEC;
873 retval = kernel_read(file, elf_ex.e_phoff, (char *) elf_phdata, j);
874 if (retval != j)
875 goto out_free_ph;
877 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
878 if ((elf_phdata + i)->p_type == PT_LOAD) j++;
879 if (j != 1)
880 goto out_free_ph;
882 while (elf_phdata->p_type != PT_LOAD) elf_phdata++;
884 /* Now use mmap to map the library into memory. */
885 down_write(&current->mm->mmap_sem);
886 error = do_mmap(file,
887 ELF_PAGESTART(elf_phdata->p_vaddr),
888 (elf_phdata->p_filesz +
889 ELF_PAGEOFFSET(elf_phdata->p_vaddr)),
890 PROT_READ | PROT_WRITE | PROT_EXEC,
891 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
892 (elf_phdata->p_offset -
893 ELF_PAGEOFFSET(elf_phdata->p_vaddr)));
894 up_write(&current->mm->mmap_sem);
895 if (error != ELF_PAGESTART(elf_phdata->p_vaddr))
896 goto out_free_ph;
898 elf_bss = elf_phdata->p_vaddr + elf_phdata->p_filesz;
899 padzero(elf_bss);
901 len = ELF_PAGESTART(elf_phdata->p_filesz + elf_phdata->p_vaddr + ELF_MIN_ALIGN - 1);
902 bss = elf_phdata->p_memsz + elf_phdata->p_vaddr;
903 if (bss > len)
904 do_brk(len, bss - len);
905 error = 0;
907 out_free_ph:
908 kfree(elf_phdata);
909 out:
910 return error;
914 * Note that some platforms still use traditional core dumps and not
915 * the ELF core dump. Each platform can select it as appropriate.
917 #ifdef USE_ELF_CORE_DUMP
920 * ELF core dumper
922 * Modelled on fs/exec.c:aout_core_dump()
923 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
926 * These are the only things you should do on a core-file: use only these
927 * functions to write out all the necessary info.
929 static int dump_write(struct file *file, const void *addr, int nr)
931 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
934 static int dump_seek(struct file *file, off_t off)
936 if (file->f_op->llseek) {
937 if (file->f_op->llseek(file, off, 0) != off)
938 return 0;
939 } else
940 file->f_pos = off;
941 return 1;
945 * Decide whether a segment is worth dumping; default is yes to be
946 * sure (missing info is worse than too much; etc).
947 * Personally I'd include everything, and use the coredump limit...
949 * I think we should skip something. But I am not sure how. H.J.
951 static int maydump(struct vm_area_struct *vma)
954 * If we may not read the contents, don't allow us to dump
955 * them either. "dump_write()" can't handle it anyway.
957 if (!(vma->vm_flags & VM_READ))
958 return 0;
960 /* Do not dump I/O mapped devices! -DaveM */
961 if (vma->vm_flags & VM_IO)
962 return 0;
963 #if 1
964 if (vma->vm_flags & (VM_WRITE|VM_GROWSUP|VM_GROWSDOWN))
965 return 1;
966 if (vma->vm_flags & (VM_READ|VM_EXEC|VM_EXECUTABLE|VM_SHARED))
967 return 0;
968 #endif
969 return 1;
972 #define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
974 /* An ELF note in memory */
975 struct memelfnote
977 const char *name;
978 int type;
979 unsigned int datasz;
980 void *data;
983 static int notesize(struct memelfnote *en)
985 int sz;
987 sz = sizeof(struct elf_note);
988 sz += roundup(strlen(en->name) + 1, 4);
989 sz += roundup(en->datasz, 4);
991 return sz;
994 #define DUMP_WRITE(addr, nr) \
995 do { if (!dump_write(file, (addr), (nr))) return 0; } while(0)
996 #define DUMP_SEEK(off) \
997 do { if (!dump_seek(file, (off))) return 0; } while(0)
999 static int writenote(struct memelfnote *men, struct file *file)
1001 struct elf_note en;
1003 en.n_namesz = strlen(men->name) + 1;
1004 en.n_descsz = men->datasz;
1005 en.n_type = men->type;
1007 DUMP_WRITE(&en, sizeof(en));
1008 DUMP_WRITE(men->name, en.n_namesz);
1009 /* XXX - cast from long long to long to avoid need for libgcc.a */
1010 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1011 DUMP_WRITE(men->data, men->datasz);
1012 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1014 return 1;
1016 #undef DUMP_WRITE
1017 #undef DUMP_SEEK
1019 #define DUMP_WRITE(addr, nr) \
1020 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1021 goto end_coredump;
1022 #define DUMP_SEEK(off) \
1023 if (!dump_seek(file, (off))) \
1024 goto end_coredump;
1026 static inline void fill_elf_header(struct elfhdr *elf, int segs)
1028 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1029 elf->e_ident[EI_CLASS] = ELF_CLASS;
1030 elf->e_ident[EI_DATA] = ELF_DATA;
1031 elf->e_ident[EI_VERSION] = EV_CURRENT;
1032 elf->e_ident[EI_OSABI] = ELF_OSABI;
1033 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
1035 elf->e_type = ET_CORE;
1036 elf->e_machine = ELF_ARCH;
1037 elf->e_version = EV_CURRENT;
1038 elf->e_entry = 0;
1039 elf->e_phoff = sizeof(struct elfhdr);
1040 elf->e_shoff = 0;
1041 elf->e_flags = 0;
1042 elf->e_ehsize = sizeof(struct elfhdr);
1043 elf->e_phentsize = sizeof(struct elf_phdr);
1044 elf->e_phnum = segs;
1045 elf->e_shentsize = 0;
1046 elf->e_shnum = 0;
1047 elf->e_shstrndx = 0;
1048 return;
1051 static inline void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
1053 phdr->p_type = PT_NOTE;
1054 phdr->p_offset = offset;
1055 phdr->p_vaddr = 0;
1056 phdr->p_paddr = 0;
1057 phdr->p_filesz = sz;
1058 phdr->p_memsz = 0;
1059 phdr->p_flags = 0;
1060 phdr->p_align = 0;
1061 return;
1064 static void fill_note(struct memelfnote *note, const char *name, int type,
1065 unsigned int sz, void *data)
1067 note->name = name;
1068 note->type = type;
1069 note->datasz = sz;
1070 note->data = data;
1071 return;
1075 * fill up all the fields in prstatus from the given task struct, except registers
1076 * which need to be filled up separately.
1078 static void fill_prstatus(struct elf_prstatus *prstatus,
1079 struct task_struct *p, long signr)
1081 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1082 prstatus->pr_sigpend = p->pending.signal.sig[0];
1083 prstatus->pr_sighold = p->blocked.sig[0];
1084 prstatus->pr_pid = p->pid;
1085 prstatus->pr_ppid = p->parent->pid;
1086 prstatus->pr_pgrp = process_group(p);
1087 prstatus->pr_sid = process_session(p);
1088 jiffies_to_timeval(p->utime, &prstatus->pr_utime);
1089 jiffies_to_timeval(p->stime, &prstatus->pr_stime);
1090 jiffies_to_timeval(p->cutime, &prstatus->pr_cutime);
1091 jiffies_to_timeval(p->cstime, &prstatus->pr_cstime);
1094 static void fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1095 struct mm_struct *mm)
1097 int i, len;
1099 /* first copy the parameters from user space */
1100 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1102 len = mm->arg_end - mm->arg_start;
1103 if (len >= ELF_PRARGSZ)
1104 len = ELF_PRARGSZ-1;
1105 copy_from_user(&psinfo->pr_psargs,
1106 (const char *)mm->arg_start, len);
1107 for(i = 0; i < len; i++)
1108 if (psinfo->pr_psargs[i] == 0)
1109 psinfo->pr_psargs[i] = ' ';
1110 psinfo->pr_psargs[len] = 0;
1112 psinfo->pr_pid = p->pid;
1113 psinfo->pr_ppid = p->parent->pid;
1114 psinfo->pr_pgrp = process_group(p);
1115 psinfo->pr_sid = process_session(p);
1117 i = p->state ? ffz(~p->state) + 1 : 0;
1118 psinfo->pr_state = i;
1119 psinfo->pr_sname = (i < 0 || i > 5) ? '.' : "RSDTZW"[i];
1120 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1121 psinfo->pr_nice = task_nice(p);
1122 psinfo->pr_flag = p->flags;
1123 psinfo->pr_uid = NEW_TO_OLD_UID(p->uid);
1124 psinfo->pr_gid = NEW_TO_OLD_GID(p->gid);
1125 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1127 return;
1130 /* Here is the structure in which status of each thread is captured. */
1131 struct elf_thread_status
1133 struct list_head list;
1134 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1135 elf_fpregset_t fpu; /* NT_PRFPREG */
1136 #ifdef ELF_CORE_COPY_XFPREGS
1137 elf_fpxregset_t xfpu; /* NT_PRXFPREG */
1138 #endif
1139 struct memelfnote notes[3];
1140 int num_notes;
1144 * In order to add the specific thread information for the elf file format,
1145 * we need to keep a linked list of every threads pr_status and then
1146 * create a single section for them in the final core file.
1148 static int elf_dump_thread_status(long signr, struct task_struct * p, struct list_head * thread_list)
1151 struct elf_thread_status *t;
1152 int sz = 0;
1154 t = kmalloc(sizeof(*t), GFP_ATOMIC);
1155 if (!t)
1156 return 0;
1157 memset(t, 0, sizeof(*t));
1159 INIT_LIST_HEAD(&t->list);
1160 t->num_notes = 0;
1162 fill_prstatus(&t->prstatus, p, signr);
1163 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1165 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), &(t->prstatus));
1166 t->num_notes++;
1167 sz += notesize(&t->notes[0]);
1169 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL, &t->fpu))) {
1170 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu), &(t->fpu));
1171 t->num_notes++;
1172 sz += notesize(&t->notes[1]);
1175 #ifdef ELF_CORE_COPY_XFPREGS
1176 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
1177 fill_note(&t->notes[2], "LINUX", NT_PRXFPREG, sizeof(t->xfpu), &t->xfpu);
1178 t->num_notes++;
1179 sz += notesize(&t->notes[2]);
1181 #endif
1182 list_add(&t->list, thread_list);
1183 return sz;
1187 * Actual dumper
1189 * This is a two-pass process; first we find the offsets of the bits,
1190 * and then they are actually written out. If we run out of core limit
1191 * we just truncate.
1193 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
1195 #define NUM_NOTES 6
1196 int has_dumped = 0;
1197 mm_segment_t fs;
1198 int segs;
1199 size_t size = 0;
1200 int i;
1201 struct vm_area_struct *vma;
1202 struct elfhdr *elf = NULL;
1203 off_t offset = 0, dataoff;
1204 unsigned long limit = current->rlim[RLIMIT_CORE].rlim_cur;
1205 int numnote;
1206 struct memelfnote *notes = NULL;
1207 struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */
1208 struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */
1209 struct task_struct *g, *p;
1210 LIST_HEAD(thread_list);
1211 struct list_head *t;
1212 elf_fpregset_t *fpu = NULL;
1213 #ifdef ELF_CORE_COPY_XFPREGS
1214 elf_fpxregset_t *xfpu = NULL;
1215 #endif
1216 int thread_status_size = 0;
1217 elf_addr_t *auxv;
1220 * We no longer stop all VM operations.
1222 * This is because those proceses that could possibly change map_count or
1223 * the mmap / vma pages are now blocked in do_exit on current finishing
1224 * this core dump.
1226 * Only ptrace can touch these memory addresses, but it doesn't change
1227 * the map_count or the pages allocated. So no possibility of crashing
1228 * exists while dumping the mm->vm_next areas to the core file.
1231 /* alloc memory for large data structures: too large to be on stack */
1232 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
1233 if (!elf)
1234 goto cleanup;
1235 prstatus = kmalloc(sizeof(*prstatus), GFP_KERNEL);
1236 if (!prstatus)
1237 goto cleanup;
1238 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1239 if (!psinfo)
1240 goto cleanup;
1241 notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL);
1242 if (!notes)
1243 goto cleanup;
1244 fpu = kmalloc(sizeof(*fpu), GFP_KERNEL);
1245 if (!fpu)
1246 goto cleanup;
1247 #ifdef ELF_CORE_COPY_XFPREGS
1248 xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL);
1249 if (!xfpu)
1250 goto cleanup;
1251 #endif
1253 /* capture the status of all other threads */
1254 if (signr) {
1255 read_lock(&tasklist_lock);
1256 do_each_thread(g,p)
1257 if (current->mm == p->mm && current != p) {
1258 int sz = elf_dump_thread_status(signr, p, &thread_list);
1259 if (!sz) {
1260 read_unlock(&tasklist_lock);
1261 goto cleanup;
1262 } else
1263 thread_status_size += sz;
1265 while_each_thread(g,p);
1266 read_unlock(&tasklist_lock);
1269 /* now collect the dump for the current */
1270 memset(prstatus, 0, sizeof(*prstatus));
1271 fill_prstatus(prstatus, current, signr);
1272 elf_core_copy_regs(&prstatus->pr_reg, regs);
1274 segs = current->mm->map_count;
1275 #ifdef ELF_CORE_EXTRA_PHDRS
1276 segs += ELF_CORE_EXTRA_PHDRS;
1277 #endif
1279 /* Set up header */
1280 fill_elf_header(elf, segs+1); /* including notes section */
1282 has_dumped = 1;
1283 current->flags |= PF_DUMPCORE;
1286 * Set up the notes in similar form to SVR4 core dumps made
1287 * with info from their /proc.
1290 fill_note(notes +0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus);
1292 fill_psinfo(psinfo, current->group_leader, current->mm);
1293 fill_note(notes +1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1295 fill_note(notes +2, "CORE", NT_TASKSTRUCT, sizeof(*current), current);
1297 numnote = 3;
1299 auxv = (elf_addr_t *) current->mm->saved_auxv;
1301 i = 0;
1303 i += 2;
1304 while (auxv[i - 2] != AT_NULL);
1305 fill_note(&notes[numnote++], "CORE", NT_AUXV,
1306 i * sizeof (elf_addr_t), auxv);
1308 /* Try to dump the FPU. */
1309 if ((prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs, fpu)))
1310 fill_note(notes + numnote++,
1311 "CORE", NT_PRFPREG, sizeof(*fpu), fpu);
1312 #ifdef ELF_CORE_COPY_XFPREGS
1313 if (elf_core_copy_task_xfpregs(current, xfpu))
1314 fill_note(notes + numnote++,
1315 "LINUX", NT_PRXFPREG, sizeof(*xfpu), xfpu);
1316 #endif
1318 fs = get_fs();
1319 set_fs(KERNEL_DS);
1321 DUMP_WRITE(elf, sizeof(*elf));
1322 offset += sizeof(*elf); /* Elf header */
1323 offset += (segs+1) * sizeof(struct elf_phdr); /* Program headers */
1325 /* Write notes phdr entry */
1327 struct elf_phdr phdr;
1328 int sz = 0;
1330 for (i = 0; i < numnote; i++)
1331 sz += notesize(notes + i);
1333 sz += thread_status_size;
1335 fill_elf_note_phdr(&phdr, sz, offset);
1336 offset += sz;
1337 DUMP_WRITE(&phdr, sizeof(phdr));
1340 /* Page-align dumped data */
1341 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
1343 /* Write program headers for segments dump */
1344 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1345 struct elf_phdr phdr;
1346 size_t sz;
1348 sz = vma->vm_end - vma->vm_start;
1350 phdr.p_type = PT_LOAD;
1351 phdr.p_offset = offset;
1352 phdr.p_vaddr = vma->vm_start;
1353 phdr.p_paddr = 0;
1354 phdr.p_filesz = maydump(vma) ? sz : 0;
1355 phdr.p_memsz = sz;
1356 offset += phdr.p_filesz;
1357 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
1358 if (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W;
1359 if (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X;
1360 phdr.p_align = ELF_EXEC_PAGESIZE;
1362 DUMP_WRITE(&phdr, sizeof(phdr));
1365 #ifdef ELF_CORE_WRITE_EXTRA_PHDRS
1366 ELF_CORE_WRITE_EXTRA_PHDRS;
1367 #endif
1369 /* write out the notes section */
1370 for (i = 0; i < numnote; i++)
1371 if (!writenote(notes + i, file))
1372 goto end_coredump;
1374 /* write out the thread status notes section */
1375 list_for_each(t, &thread_list) {
1376 struct elf_thread_status *tmp = list_entry(t, struct elf_thread_status, list);
1377 for (i = 0; i < tmp->num_notes; i++)
1378 if (!writenote(&tmp->notes[i], file))
1379 goto end_coredump;
1382 DUMP_SEEK(dataoff);
1384 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1385 unsigned long addr;
1387 if (!maydump(vma))
1388 continue;
1390 for (addr = vma->vm_start;
1391 addr < vma->vm_end;
1392 addr += PAGE_SIZE) {
1393 struct page* page;
1394 struct vm_area_struct *vma;
1396 if (get_user_pages(current, current->mm, addr, 1, 0, 1,
1397 &page, &vma) <= 0) {
1398 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1399 } else {
1400 if (page == ZERO_PAGE(addr)) {
1401 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1402 } else {
1403 void *kaddr;
1404 flush_cache_page(vma, addr);
1405 kaddr = kmap(page);
1406 DUMP_WRITE(kaddr, PAGE_SIZE);
1407 kunmap(page);
1409 page_cache_release(page);
1414 #ifdef ELF_CORE_WRITE_EXTRA_DATA
1415 ELF_CORE_WRITE_EXTRA_DATA;
1416 #endif
1418 if ((off_t) file->f_pos != offset) {
1419 /* Sanity check */
1420 printk("elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
1421 (off_t) file->f_pos, offset);
1424 end_coredump:
1425 set_fs(fs);
1427 cleanup:
1428 while(!list_empty(&thread_list)) {
1429 struct list_head *tmp = thread_list.next;
1430 list_del(tmp);
1431 kfree(list_entry(tmp, struct elf_thread_status, list));
1434 kfree(elf);
1435 kfree(prstatus);
1436 kfree(psinfo);
1437 kfree(notes);
1438 kfree(fpu);
1439 #ifdef ELF_CORE_COPY_XFPREGS
1440 kfree(xfpu);
1441 #endif
1442 return has_dumped;
1443 #undef NUM_NOTES
1446 #endif /* USE_ELF_CORE_DUMP */
1448 static int __init init_elf_binfmt(void)
1450 return register_binfmt(&elf_format);
1453 static void __exit exit_elf_binfmt(void)
1455 /* Remove the COFF and ELF loaders. */
1456 unregister_binfmt(&elf_format);
1459 module_init(init_elf_binfmt)
1460 module_exit(exit_elf_binfmt)
1461 MODULE_LICENSE("GPL");