cgroups: kernel/ns_cgroup.c should #include <linux/nsproxy.h>
[linux-2.6/zen-sources.git] / fs / binfmt_elf_fdpic.c
blobddd35d87339143207f6074a49752dcc40a4b5003
1 /* binfmt_elf_fdpic.c: FDPIC ELF binary format
3 * Copyright (C) 2003, 2004, 2006 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 * Derived from binfmt_elf.c
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/module.h>
15 #include <linux/fs.h>
16 #include <linux/stat.h>
17 #include <linux/sched.h>
18 #include <linux/mm.h>
19 #include <linux/mman.h>
20 #include <linux/errno.h>
21 #include <linux/signal.h>
22 #include <linux/binfmts.h>
23 #include <linux/string.h>
24 #include <linux/file.h>
25 #include <linux/fcntl.h>
26 #include <linux/slab.h>
27 #include <linux/pagemap.h>
28 #include <linux/highmem.h>
29 #include <linux/highuid.h>
30 #include <linux/personality.h>
31 #include <linux/ptrace.h>
32 #include <linux/init.h>
33 #include <linux/elf.h>
34 #include <linux/elf-fdpic.h>
35 #include <linux/elfcore.h>
37 #include <asm/uaccess.h>
38 #include <asm/param.h>
39 #include <asm/pgalloc.h>
41 typedef char *elf_caddr_t;
43 #if 0
44 #define kdebug(fmt, ...) printk("FDPIC "fmt"\n" ,##__VA_ARGS__ )
45 #else
46 #define kdebug(fmt, ...) do {} while(0)
47 #endif
49 #if 0
50 #define kdcore(fmt, ...) printk("FDPIC "fmt"\n" ,##__VA_ARGS__ )
51 #else
52 #define kdcore(fmt, ...) do {} while(0)
53 #endif
55 MODULE_LICENSE("GPL");
57 static int load_elf_fdpic_binary(struct linux_binprm *, struct pt_regs *);
58 static int elf_fdpic_fetch_phdrs(struct elf_fdpic_params *, struct file *);
59 static int elf_fdpic_map_file(struct elf_fdpic_params *, struct file *,
60 struct mm_struct *, const char *);
62 static int create_elf_fdpic_tables(struct linux_binprm *, struct mm_struct *,
63 struct elf_fdpic_params *,
64 struct elf_fdpic_params *);
66 #ifndef CONFIG_MMU
67 static int elf_fdpic_transfer_args_to_stack(struct linux_binprm *,
68 unsigned long *);
69 static int elf_fdpic_map_file_constdisp_on_uclinux(struct elf_fdpic_params *,
70 struct file *,
71 struct mm_struct *);
72 #endif
74 static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *,
75 struct file *, struct mm_struct *);
77 #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
78 static int elf_fdpic_core_dump(long, struct pt_regs *, struct file *, unsigned long limit);
79 #endif
81 static struct linux_binfmt elf_fdpic_format = {
82 .module = THIS_MODULE,
83 .load_binary = load_elf_fdpic_binary,
84 #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
85 .core_dump = elf_fdpic_core_dump,
86 #endif
87 .min_coredump = ELF_EXEC_PAGESIZE,
90 static int __init init_elf_fdpic_binfmt(void)
92 return register_binfmt(&elf_fdpic_format);
95 static void __exit exit_elf_fdpic_binfmt(void)
97 unregister_binfmt(&elf_fdpic_format);
100 core_initcall(init_elf_fdpic_binfmt);
101 module_exit(exit_elf_fdpic_binfmt);
103 static int is_elf_fdpic(struct elfhdr *hdr, struct file *file)
105 if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0)
106 return 0;
107 if (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN)
108 return 0;
109 if (!elf_check_arch(hdr) || !elf_check_fdpic(hdr))
110 return 0;
111 if (!file->f_op || !file->f_op->mmap)
112 return 0;
113 return 1;
116 /*****************************************************************************/
118 * read the program headers table into memory
120 static int elf_fdpic_fetch_phdrs(struct elf_fdpic_params *params,
121 struct file *file)
123 struct elf32_phdr *phdr;
124 unsigned long size;
125 int retval, loop;
127 if (params->hdr.e_phentsize != sizeof(struct elf_phdr))
128 return -ENOMEM;
129 if (params->hdr.e_phnum > 65536U / sizeof(struct elf_phdr))
130 return -ENOMEM;
132 size = params->hdr.e_phnum * sizeof(struct elf_phdr);
133 params->phdrs = kmalloc(size, GFP_KERNEL);
134 if (!params->phdrs)
135 return -ENOMEM;
137 retval = kernel_read(file, params->hdr.e_phoff,
138 (char *) params->phdrs, size);
139 if (unlikely(retval != size))
140 return retval < 0 ? retval : -ENOEXEC;
142 /* determine stack size for this binary */
143 phdr = params->phdrs;
144 for (loop = 0; loop < params->hdr.e_phnum; loop++, phdr++) {
145 if (phdr->p_type != PT_GNU_STACK)
146 continue;
148 if (phdr->p_flags & PF_X)
149 params->flags |= ELF_FDPIC_FLAG_EXEC_STACK;
150 else
151 params->flags |= ELF_FDPIC_FLAG_NOEXEC_STACK;
153 params->stack_size = phdr->p_memsz;
154 break;
157 return 0;
160 /*****************************************************************************/
162 * load an fdpic binary into various bits of memory
164 static int load_elf_fdpic_binary(struct linux_binprm *bprm,
165 struct pt_regs *regs)
167 struct elf_fdpic_params exec_params, interp_params;
168 struct elf_phdr *phdr;
169 unsigned long stack_size, entryaddr;
170 #ifndef CONFIG_MMU
171 unsigned long fullsize;
172 #endif
173 #ifdef ELF_FDPIC_PLAT_INIT
174 unsigned long dynaddr;
175 #endif
176 struct file *interpreter = NULL; /* to shut gcc up */
177 char *interpreter_name = NULL;
178 int executable_stack;
179 int retval, i;
181 kdebug("____ LOAD %d ____", current->pid);
183 memset(&exec_params, 0, sizeof(exec_params));
184 memset(&interp_params, 0, sizeof(interp_params));
186 exec_params.hdr = *(struct elfhdr *) bprm->buf;
187 exec_params.flags = ELF_FDPIC_FLAG_PRESENT | ELF_FDPIC_FLAG_EXECUTABLE;
189 /* check that this is a binary we know how to deal with */
190 retval = -ENOEXEC;
191 if (!is_elf_fdpic(&exec_params.hdr, bprm->file))
192 goto error;
194 /* read the program header table */
195 retval = elf_fdpic_fetch_phdrs(&exec_params, bprm->file);
196 if (retval < 0)
197 goto error;
199 /* scan for a program header that specifies an interpreter */
200 phdr = exec_params.phdrs;
202 for (i = 0; i < exec_params.hdr.e_phnum; i++, phdr++) {
203 switch (phdr->p_type) {
204 case PT_INTERP:
205 retval = -ENOMEM;
206 if (phdr->p_filesz > PATH_MAX)
207 goto error;
208 retval = -ENOENT;
209 if (phdr->p_filesz < 2)
210 goto error;
212 /* read the name of the interpreter into memory */
213 interpreter_name = kmalloc(phdr->p_filesz, GFP_KERNEL);
214 if (!interpreter_name)
215 goto error;
217 retval = kernel_read(bprm->file,
218 phdr->p_offset,
219 interpreter_name,
220 phdr->p_filesz);
221 if (unlikely(retval != phdr->p_filesz)) {
222 if (retval >= 0)
223 retval = -ENOEXEC;
224 goto error;
227 retval = -ENOENT;
228 if (interpreter_name[phdr->p_filesz - 1] != '\0')
229 goto error;
231 kdebug("Using ELF interpreter %s", interpreter_name);
233 /* replace the program with the interpreter */
234 interpreter = open_exec(interpreter_name);
235 retval = PTR_ERR(interpreter);
236 if (IS_ERR(interpreter)) {
237 interpreter = NULL;
238 goto error;
242 * If the binary is not readable then enforce
243 * mm->dumpable = 0 regardless of the interpreter's
244 * permissions.
246 if (file_permission(interpreter, MAY_READ) < 0)
247 bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
249 retval = kernel_read(interpreter, 0, bprm->buf,
250 BINPRM_BUF_SIZE);
251 if (unlikely(retval != BINPRM_BUF_SIZE)) {
252 if (retval >= 0)
253 retval = -ENOEXEC;
254 goto error;
257 interp_params.hdr = *((struct elfhdr *) bprm->buf);
258 break;
260 case PT_LOAD:
261 #ifdef CONFIG_MMU
262 if (exec_params.load_addr == 0)
263 exec_params.load_addr = phdr->p_vaddr;
264 #endif
265 break;
270 if (elf_check_const_displacement(&exec_params.hdr))
271 exec_params.flags |= ELF_FDPIC_FLAG_CONSTDISP;
273 /* perform insanity checks on the interpreter */
274 if (interpreter_name) {
275 retval = -ELIBBAD;
276 if (!is_elf_fdpic(&interp_params.hdr, interpreter))
277 goto error;
279 interp_params.flags = ELF_FDPIC_FLAG_PRESENT;
281 /* read the interpreter's program header table */
282 retval = elf_fdpic_fetch_phdrs(&interp_params, interpreter);
283 if (retval < 0)
284 goto error;
287 stack_size = exec_params.stack_size;
288 if (stack_size < interp_params.stack_size)
289 stack_size = interp_params.stack_size;
291 if (exec_params.flags & ELF_FDPIC_FLAG_EXEC_STACK)
292 executable_stack = EXSTACK_ENABLE_X;
293 else if (exec_params.flags & ELF_FDPIC_FLAG_NOEXEC_STACK)
294 executable_stack = EXSTACK_DISABLE_X;
295 else if (interp_params.flags & ELF_FDPIC_FLAG_EXEC_STACK)
296 executable_stack = EXSTACK_ENABLE_X;
297 else if (interp_params.flags & ELF_FDPIC_FLAG_NOEXEC_STACK)
298 executable_stack = EXSTACK_DISABLE_X;
299 else
300 executable_stack = EXSTACK_DEFAULT;
302 retval = -ENOEXEC;
303 if (stack_size == 0)
304 goto error;
306 if (elf_check_const_displacement(&interp_params.hdr))
307 interp_params.flags |= ELF_FDPIC_FLAG_CONSTDISP;
309 /* flush all traces of the currently running executable */
310 retval = flush_old_exec(bprm);
311 if (retval)
312 goto error;
314 /* there's now no turning back... the old userspace image is dead,
315 * defunct, deceased, etc. after this point we have to exit via
316 * error_kill */
317 set_personality(PER_LINUX_FDPIC);
318 set_binfmt(&elf_fdpic_format);
320 current->mm->start_code = 0;
321 current->mm->end_code = 0;
322 current->mm->start_stack = 0;
323 current->mm->start_data = 0;
324 current->mm->end_data = 0;
325 current->mm->context.exec_fdpic_loadmap = 0;
326 current->mm->context.interp_fdpic_loadmap = 0;
328 current->flags &= ~PF_FORKNOEXEC;
330 #ifdef CONFIG_MMU
331 elf_fdpic_arch_lay_out_mm(&exec_params,
332 &interp_params,
333 &current->mm->start_stack,
334 &current->mm->start_brk);
336 retval = setup_arg_pages(bprm, current->mm->start_stack,
337 executable_stack);
338 if (retval < 0) {
339 send_sig(SIGKILL, current, 0);
340 goto error_kill;
342 #endif
344 /* load the executable and interpreter into memory */
345 retval = elf_fdpic_map_file(&exec_params, bprm->file, current->mm,
346 "executable");
347 if (retval < 0)
348 goto error_kill;
350 if (interpreter_name) {
351 retval = elf_fdpic_map_file(&interp_params, interpreter,
352 current->mm, "interpreter");
353 if (retval < 0) {
354 printk(KERN_ERR "Unable to load interpreter\n");
355 goto error_kill;
358 allow_write_access(interpreter);
359 fput(interpreter);
360 interpreter = NULL;
363 #ifdef CONFIG_MMU
364 if (!current->mm->start_brk)
365 current->mm->start_brk = current->mm->end_data;
367 current->mm->brk = current->mm->start_brk =
368 PAGE_ALIGN(current->mm->start_brk);
370 #else
371 /* create a stack and brk area big enough for everyone
372 * - the brk heap starts at the bottom and works up
373 * - the stack starts at the top and works down
375 stack_size = (stack_size + PAGE_SIZE - 1) & PAGE_MASK;
376 if (stack_size < PAGE_SIZE * 2)
377 stack_size = PAGE_SIZE * 2;
379 down_write(&current->mm->mmap_sem);
380 current->mm->start_brk = do_mmap(NULL, 0, stack_size,
381 PROT_READ | PROT_WRITE | PROT_EXEC,
382 MAP_PRIVATE | MAP_ANONYMOUS | MAP_GROWSDOWN,
385 if (IS_ERR_VALUE(current->mm->start_brk)) {
386 up_write(&current->mm->mmap_sem);
387 retval = current->mm->start_brk;
388 current->mm->start_brk = 0;
389 goto error_kill;
392 /* expand the stack mapping to use up the entire allocation granule */
393 fullsize = ksize((char *) current->mm->start_brk);
394 if (!IS_ERR_VALUE(do_mremap(current->mm->start_brk, stack_size,
395 fullsize, 0, 0)))
396 stack_size = fullsize;
397 up_write(&current->mm->mmap_sem);
399 current->mm->brk = current->mm->start_brk;
400 current->mm->context.end_brk = current->mm->start_brk;
401 current->mm->context.end_brk +=
402 (stack_size > PAGE_SIZE) ? (stack_size - PAGE_SIZE) : 0;
403 current->mm->start_stack = current->mm->start_brk + stack_size;
404 #endif
406 compute_creds(bprm);
407 current->flags &= ~PF_FORKNOEXEC;
408 if (create_elf_fdpic_tables(bprm, current->mm,
409 &exec_params, &interp_params) < 0)
410 goto error_kill;
412 kdebug("- start_code %lx", current->mm->start_code);
413 kdebug("- end_code %lx", current->mm->end_code);
414 kdebug("- start_data %lx", current->mm->start_data);
415 kdebug("- end_data %lx", current->mm->end_data);
416 kdebug("- start_brk %lx", current->mm->start_brk);
417 kdebug("- brk %lx", current->mm->brk);
418 kdebug("- start_stack %lx", current->mm->start_stack);
420 #ifdef ELF_FDPIC_PLAT_INIT
422 * The ABI may specify that certain registers be set up in special
423 * ways (on i386 %edx is the address of a DT_FINI function, for
424 * example. This macro performs whatever initialization to
425 * the regs structure is required.
427 dynaddr = interp_params.dynamic_addr ?: exec_params.dynamic_addr;
428 ELF_FDPIC_PLAT_INIT(regs, exec_params.map_addr, interp_params.map_addr,
429 dynaddr);
430 #endif
432 /* everything is now ready... get the userspace context ready to roll */
433 entryaddr = interp_params.entry_addr ?: exec_params.entry_addr;
434 start_thread(regs, entryaddr, current->mm->start_stack);
436 if (unlikely(current->ptrace & PT_PTRACED)) {
437 if (current->ptrace & PT_TRACE_EXEC)
438 ptrace_notify((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
439 else
440 send_sig(SIGTRAP, current, 0);
443 retval = 0;
445 error:
446 if (interpreter) {
447 allow_write_access(interpreter);
448 fput(interpreter);
450 kfree(interpreter_name);
451 kfree(exec_params.phdrs);
452 kfree(exec_params.loadmap);
453 kfree(interp_params.phdrs);
454 kfree(interp_params.loadmap);
455 return retval;
457 /* unrecoverable error - kill the process */
458 error_kill:
459 send_sig(SIGSEGV, current, 0);
460 goto error;
464 /*****************************************************************************/
466 * present useful information to the program
468 static int create_elf_fdpic_tables(struct linux_binprm *bprm,
469 struct mm_struct *mm,
470 struct elf_fdpic_params *exec_params,
471 struct elf_fdpic_params *interp_params)
473 unsigned long sp, csp, nitems;
474 elf_caddr_t __user *argv, *envp;
475 size_t platform_len = 0, len;
476 char *k_platform;
477 char __user *u_platform, *p;
478 long hwcap;
479 int loop;
481 /* we're going to shovel a whole load of stuff onto the stack */
482 #ifdef CONFIG_MMU
483 sp = bprm->p;
484 #else
485 sp = mm->start_stack;
487 /* stack the program arguments and environment */
488 if (elf_fdpic_transfer_args_to_stack(bprm, &sp) < 0)
489 return -EFAULT;
490 #endif
492 /* get hold of platform and hardware capabilities masks for the machine
493 * we are running on. In some cases (Sparc), this info is impossible
494 * to get, in others (i386) it is merely difficult.
496 hwcap = ELF_HWCAP;
497 k_platform = ELF_PLATFORM;
498 u_platform = NULL;
500 if (k_platform) {
501 platform_len = strlen(k_platform) + 1;
502 sp -= platform_len;
503 u_platform = (char __user *) sp;
504 if (__copy_to_user(u_platform, k_platform, platform_len) != 0)
505 return -EFAULT;
508 #if defined(__i386__) && defined(CONFIG_SMP)
509 /* in some cases (e.g. Hyper-Threading), we want to avoid L1 evictions
510 * by the processes running on the same package. One thing we can do is
511 * to shuffle the initial stack for them.
513 * the conditionals here are unneeded, but kept in to make the code
514 * behaviour the same as pre change unless we have hyperthreaded
515 * processors. This keeps Mr Marcelo Person happier but should be
516 * removed for 2.5
518 if (smp_num_siblings > 1)
519 sp = sp - ((current->pid % 64) << 7);
520 #endif
522 sp &= ~7UL;
524 /* stack the load map(s) */
525 len = sizeof(struct elf32_fdpic_loadmap);
526 len += sizeof(struct elf32_fdpic_loadseg) * exec_params->loadmap->nsegs;
527 sp = (sp - len) & ~7UL;
528 exec_params->map_addr = sp;
530 if (copy_to_user((void __user *) sp, exec_params->loadmap, len) != 0)
531 return -EFAULT;
533 current->mm->context.exec_fdpic_loadmap = (unsigned long) sp;
535 if (interp_params->loadmap) {
536 len = sizeof(struct elf32_fdpic_loadmap);
537 len += sizeof(struct elf32_fdpic_loadseg) *
538 interp_params->loadmap->nsegs;
539 sp = (sp - len) & ~7UL;
540 interp_params->map_addr = sp;
542 if (copy_to_user((void __user *) sp, interp_params->loadmap,
543 len) != 0)
544 return -EFAULT;
546 current->mm->context.interp_fdpic_loadmap = (unsigned long) sp;
549 /* force 16 byte _final_ alignment here for generality */
550 #define DLINFO_ITEMS 13
552 nitems = 1 + DLINFO_ITEMS + (k_platform ? 1 : 0);
553 #ifdef DLINFO_ARCH_ITEMS
554 nitems += DLINFO_ARCH_ITEMS;
555 #endif
557 csp = sp;
558 sp -= nitems * 2 * sizeof(unsigned long);
559 sp -= (bprm->envc + 1) * sizeof(char *); /* envv[] */
560 sp -= (bprm->argc + 1) * sizeof(char *); /* argv[] */
561 sp -= 1 * sizeof(unsigned long); /* argc */
563 csp -= sp & 15UL;
564 sp -= sp & 15UL;
566 /* put the ELF interpreter info on the stack */
567 #define NEW_AUX_ENT(nr, id, val) \
568 do { \
569 struct { unsigned long _id, _val; } __user *ent; \
571 ent = (void __user *) csp; \
572 __put_user((id), &ent[nr]._id); \
573 __put_user((val), &ent[nr]._val); \
574 } while (0)
576 csp -= 2 * sizeof(unsigned long);
577 NEW_AUX_ENT(0, AT_NULL, 0);
578 if (k_platform) {
579 csp -= 2 * sizeof(unsigned long);
580 NEW_AUX_ENT(0, AT_PLATFORM,
581 (elf_addr_t) (unsigned long) u_platform);
584 csp -= DLINFO_ITEMS * 2 * sizeof(unsigned long);
585 NEW_AUX_ENT( 0, AT_HWCAP, hwcap);
586 NEW_AUX_ENT( 1, AT_PAGESZ, PAGE_SIZE);
587 NEW_AUX_ENT( 2, AT_CLKTCK, CLOCKS_PER_SEC);
588 NEW_AUX_ENT( 3, AT_PHDR, exec_params->ph_addr);
589 NEW_AUX_ENT( 4, AT_PHENT, sizeof(struct elf_phdr));
590 NEW_AUX_ENT( 5, AT_PHNUM, exec_params->hdr.e_phnum);
591 NEW_AUX_ENT( 6, AT_BASE, interp_params->elfhdr_addr);
592 NEW_AUX_ENT( 7, AT_FLAGS, 0);
593 NEW_AUX_ENT( 8, AT_ENTRY, exec_params->entry_addr);
594 NEW_AUX_ENT( 9, AT_UID, (elf_addr_t) current->uid);
595 NEW_AUX_ENT(10, AT_EUID, (elf_addr_t) current->euid);
596 NEW_AUX_ENT(11, AT_GID, (elf_addr_t) current->gid);
597 NEW_AUX_ENT(12, AT_EGID, (elf_addr_t) current->egid);
599 #ifdef ARCH_DLINFO
600 /* ARCH_DLINFO must come last so platform specific code can enforce
601 * special alignment requirements on the AUXV if necessary (eg. PPC).
603 ARCH_DLINFO;
604 #endif
605 #undef NEW_AUX_ENT
607 /* allocate room for argv[] and envv[] */
608 csp -= (bprm->envc + 1) * sizeof(elf_caddr_t);
609 envp = (elf_caddr_t __user *) csp;
610 csp -= (bprm->argc + 1) * sizeof(elf_caddr_t);
611 argv = (elf_caddr_t __user *) csp;
613 /* stack argc */
614 csp -= sizeof(unsigned long);
615 __put_user(bprm->argc, (unsigned long __user *) csp);
617 BUG_ON(csp != sp);
619 /* fill in the argv[] array */
620 #ifdef CONFIG_MMU
621 current->mm->arg_start = bprm->p;
622 #else
623 current->mm->arg_start = current->mm->start_stack -
624 (MAX_ARG_PAGES * PAGE_SIZE - bprm->p);
625 #endif
627 p = (char __user *) current->mm->arg_start;
628 for (loop = bprm->argc; loop > 0; loop--) {
629 __put_user((elf_caddr_t) p, argv++);
630 len = strnlen_user(p, MAX_ARG_STRLEN);
631 if (!len || len > MAX_ARG_STRLEN)
632 return -EINVAL;
633 p += len;
635 __put_user(NULL, argv);
636 current->mm->arg_end = (unsigned long) p;
638 /* fill in the envv[] array */
639 current->mm->env_start = (unsigned long) p;
640 for (loop = bprm->envc; loop > 0; loop--) {
641 __put_user((elf_caddr_t)(unsigned long) p, envp++);
642 len = strnlen_user(p, MAX_ARG_STRLEN);
643 if (!len || len > MAX_ARG_STRLEN)
644 return -EINVAL;
645 p += len;
647 __put_user(NULL, envp);
648 current->mm->env_end = (unsigned long) p;
650 mm->start_stack = (unsigned long) sp;
651 return 0;
654 /*****************************************************************************/
656 * transfer the program arguments and environment from the holding pages onto
657 * the stack
659 #ifndef CONFIG_MMU
660 static int elf_fdpic_transfer_args_to_stack(struct linux_binprm *bprm,
661 unsigned long *_sp)
663 unsigned long index, stop, sp;
664 char *src;
665 int ret = 0;
667 stop = bprm->p >> PAGE_SHIFT;
668 sp = *_sp;
670 for (index = MAX_ARG_PAGES - 1; index >= stop; index--) {
671 src = kmap(bprm->page[index]);
672 sp -= PAGE_SIZE;
673 if (copy_to_user((void *) sp, src, PAGE_SIZE) != 0)
674 ret = -EFAULT;
675 kunmap(bprm->page[index]);
676 if (ret < 0)
677 goto out;
680 *_sp = (*_sp - (MAX_ARG_PAGES * PAGE_SIZE - bprm->p)) & ~15;
682 out:
683 return ret;
685 #endif
687 /*****************************************************************************/
689 * load the appropriate binary image (executable or interpreter) into memory
690 * - we assume no MMU is available
691 * - if no other PIC bits are set in params->hdr->e_flags
692 * - we assume that the LOADable segments in the binary are independently relocatable
693 * - we assume R/O executable segments are shareable
694 * - else
695 * - we assume the loadable parts of the image to require fixed displacement
696 * - the image is not shareable
698 static int elf_fdpic_map_file(struct elf_fdpic_params *params,
699 struct file *file,
700 struct mm_struct *mm,
701 const char *what)
703 struct elf32_fdpic_loadmap *loadmap;
704 #ifdef CONFIG_MMU
705 struct elf32_fdpic_loadseg *mseg;
706 #endif
707 struct elf32_fdpic_loadseg *seg;
708 struct elf32_phdr *phdr;
709 unsigned long load_addr, stop;
710 unsigned nloads, tmp;
711 size_t size;
712 int loop, ret;
714 /* allocate a load map table */
715 nloads = 0;
716 for (loop = 0; loop < params->hdr.e_phnum; loop++)
717 if (params->phdrs[loop].p_type == PT_LOAD)
718 nloads++;
720 if (nloads == 0)
721 return -ELIBBAD;
723 size = sizeof(*loadmap) + nloads * sizeof(*seg);
724 loadmap = kzalloc(size, GFP_KERNEL);
725 if (!loadmap)
726 return -ENOMEM;
728 params->loadmap = loadmap;
730 loadmap->version = ELF32_FDPIC_LOADMAP_VERSION;
731 loadmap->nsegs = nloads;
733 load_addr = params->load_addr;
734 seg = loadmap->segs;
736 /* map the requested LOADs into the memory space */
737 switch (params->flags & ELF_FDPIC_FLAG_ARRANGEMENT) {
738 case ELF_FDPIC_FLAG_CONSTDISP:
739 case ELF_FDPIC_FLAG_CONTIGUOUS:
740 #ifndef CONFIG_MMU
741 ret = elf_fdpic_map_file_constdisp_on_uclinux(params, file, mm);
742 if (ret < 0)
743 return ret;
744 break;
745 #endif
746 default:
747 ret = elf_fdpic_map_file_by_direct_mmap(params, file, mm);
748 if (ret < 0)
749 return ret;
750 break;
753 /* map the entry point */
754 if (params->hdr.e_entry) {
755 seg = loadmap->segs;
756 for (loop = loadmap->nsegs; loop > 0; loop--, seg++) {
757 if (params->hdr.e_entry >= seg->p_vaddr &&
758 params->hdr.e_entry < seg->p_vaddr + seg->p_memsz) {
759 params->entry_addr =
760 (params->hdr.e_entry - seg->p_vaddr) +
761 seg->addr;
762 break;
767 /* determine where the program header table has wound up if mapped */
768 stop = params->hdr.e_phoff;
769 stop += params->hdr.e_phnum * sizeof (struct elf_phdr);
770 phdr = params->phdrs;
772 for (loop = 0; loop < params->hdr.e_phnum; loop++, phdr++) {
773 if (phdr->p_type != PT_LOAD)
774 continue;
776 if (phdr->p_offset > params->hdr.e_phoff ||
777 phdr->p_offset + phdr->p_filesz < stop)
778 continue;
780 seg = loadmap->segs;
781 for (loop = loadmap->nsegs; loop > 0; loop--, seg++) {
782 if (phdr->p_vaddr >= seg->p_vaddr &&
783 phdr->p_vaddr + phdr->p_filesz <=
784 seg->p_vaddr + seg->p_memsz) {
785 params->ph_addr =
786 (phdr->p_vaddr - seg->p_vaddr) +
787 seg->addr +
788 params->hdr.e_phoff - phdr->p_offset;
789 break;
792 break;
795 /* determine where the dynamic section has wound up if there is one */
796 phdr = params->phdrs;
797 for (loop = 0; loop < params->hdr.e_phnum; loop++, phdr++) {
798 if (phdr->p_type != PT_DYNAMIC)
799 continue;
801 seg = loadmap->segs;
802 for (loop = loadmap->nsegs; loop > 0; loop--, seg++) {
803 if (phdr->p_vaddr >= seg->p_vaddr &&
804 phdr->p_vaddr + phdr->p_memsz <=
805 seg->p_vaddr + seg->p_memsz) {
806 params->dynamic_addr =
807 (phdr->p_vaddr - seg->p_vaddr) +
808 seg->addr;
810 /* check the dynamic section contains at least
811 * one item, and that the last item is a NULL
812 * entry */
813 if (phdr->p_memsz == 0 ||
814 phdr->p_memsz % sizeof(Elf32_Dyn) != 0)
815 goto dynamic_error;
817 tmp = phdr->p_memsz / sizeof(Elf32_Dyn);
818 if (((Elf32_Dyn *)
819 params->dynamic_addr)[tmp - 1].d_tag != 0)
820 goto dynamic_error;
821 break;
824 break;
827 /* now elide adjacent segments in the load map on MMU linux
828 * - on uClinux the holes between may actually be filled with system
829 * stuff or stuff from other processes
831 #ifdef CONFIG_MMU
832 nloads = loadmap->nsegs;
833 mseg = loadmap->segs;
834 seg = mseg + 1;
835 for (loop = 1; loop < nloads; loop++) {
836 /* see if we have a candidate for merging */
837 if (seg->p_vaddr - mseg->p_vaddr == seg->addr - mseg->addr) {
838 load_addr = PAGE_ALIGN(mseg->addr + mseg->p_memsz);
839 if (load_addr == (seg->addr & PAGE_MASK)) {
840 mseg->p_memsz +=
841 load_addr -
842 (mseg->addr + mseg->p_memsz);
843 mseg->p_memsz += seg->addr & ~PAGE_MASK;
844 mseg->p_memsz += seg->p_memsz;
845 loadmap->nsegs--;
846 continue;
850 mseg++;
851 if (mseg != seg)
852 *mseg = *seg;
854 #endif
856 kdebug("Mapped Object [%s]:", what);
857 kdebug("- elfhdr : %lx", params->elfhdr_addr);
858 kdebug("- entry : %lx", params->entry_addr);
859 kdebug("- PHDR[] : %lx", params->ph_addr);
860 kdebug("- DYNAMIC[]: %lx", params->dynamic_addr);
861 seg = loadmap->segs;
862 for (loop = 0; loop < loadmap->nsegs; loop++, seg++)
863 kdebug("- LOAD[%d] : %08x-%08x [va=%x ms=%x]",
864 loop,
865 seg->addr, seg->addr + seg->p_memsz - 1,
866 seg->p_vaddr, seg->p_memsz);
868 return 0;
870 dynamic_error:
871 printk("ELF FDPIC %s with invalid DYNAMIC section (inode=%lu)\n",
872 what, file->f_path.dentry->d_inode->i_ino);
873 return -ELIBBAD;
876 /*****************************************************************************/
878 * map a file with constant displacement under uClinux
880 #ifndef CONFIG_MMU
881 static int elf_fdpic_map_file_constdisp_on_uclinux(
882 struct elf_fdpic_params *params,
883 struct file *file,
884 struct mm_struct *mm)
886 struct elf32_fdpic_loadseg *seg;
887 struct elf32_phdr *phdr;
888 unsigned long load_addr, base = ULONG_MAX, top = 0, maddr = 0, mflags;
889 loff_t fpos;
890 int loop, ret;
892 load_addr = params->load_addr;
893 seg = params->loadmap->segs;
895 /* determine the bounds of the contiguous overall allocation we must
896 * make */
897 phdr = params->phdrs;
898 for (loop = 0; loop < params->hdr.e_phnum; loop++, phdr++) {
899 if (params->phdrs[loop].p_type != PT_LOAD)
900 continue;
902 if (base > phdr->p_vaddr)
903 base = phdr->p_vaddr;
904 if (top < phdr->p_vaddr + phdr->p_memsz)
905 top = phdr->p_vaddr + phdr->p_memsz;
908 /* allocate one big anon block for everything */
909 mflags = MAP_PRIVATE;
910 if (params->flags & ELF_FDPIC_FLAG_EXECUTABLE)
911 mflags |= MAP_EXECUTABLE;
913 down_write(&mm->mmap_sem);
914 maddr = do_mmap(NULL, load_addr, top - base,
915 PROT_READ | PROT_WRITE | PROT_EXEC, mflags, 0);
916 up_write(&mm->mmap_sem);
917 if (IS_ERR_VALUE(maddr))
918 return (int) maddr;
920 if (load_addr != 0)
921 load_addr += PAGE_ALIGN(top - base);
923 /* and then load the file segments into it */
924 phdr = params->phdrs;
925 for (loop = 0; loop < params->hdr.e_phnum; loop++, phdr++) {
926 if (params->phdrs[loop].p_type != PT_LOAD)
927 continue;
929 fpos = phdr->p_offset;
931 seg->addr = maddr + (phdr->p_vaddr - base);
932 seg->p_vaddr = phdr->p_vaddr;
933 seg->p_memsz = phdr->p_memsz;
935 ret = file->f_op->read(file, (void *) seg->addr,
936 phdr->p_filesz, &fpos);
937 if (ret < 0)
938 return ret;
940 /* map the ELF header address if in this segment */
941 if (phdr->p_offset == 0)
942 params->elfhdr_addr = seg->addr;
944 /* clear any space allocated but not loaded */
945 if (phdr->p_filesz < phdr->p_memsz)
946 clear_user((void *) (seg->addr + phdr->p_filesz),
947 phdr->p_memsz - phdr->p_filesz);
949 if (mm) {
950 if (phdr->p_flags & PF_X) {
951 if (!mm->start_code) {
952 mm->start_code = seg->addr;
953 mm->end_code = seg->addr +
954 phdr->p_memsz;
956 } else if (!mm->start_data) {
957 mm->start_data = seg->addr;
958 #ifndef CONFIG_MMU
959 mm->end_data = seg->addr + phdr->p_memsz;
960 #endif
963 #ifdef CONFIG_MMU
964 if (seg->addr + phdr->p_memsz > mm->end_data)
965 mm->end_data = seg->addr + phdr->p_memsz;
966 #endif
969 seg++;
972 return 0;
974 #endif
976 /*****************************************************************************/
978 * map a binary by direct mmap() of the individual PT_LOAD segments
980 static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params,
981 struct file *file,
982 struct mm_struct *mm)
984 struct elf32_fdpic_loadseg *seg;
985 struct elf32_phdr *phdr;
986 unsigned long load_addr, delta_vaddr;
987 int loop, dvset;
989 load_addr = params->load_addr;
990 delta_vaddr = 0;
991 dvset = 0;
993 seg = params->loadmap->segs;
995 /* deal with each load segment separately */
996 phdr = params->phdrs;
997 for (loop = 0; loop < params->hdr.e_phnum; loop++, phdr++) {
998 unsigned long maddr, disp, excess, excess1;
999 int prot = 0, flags;
1001 if (phdr->p_type != PT_LOAD)
1002 continue;
1004 kdebug("[LOAD] va=%lx of=%lx fs=%lx ms=%lx",
1005 (unsigned long) phdr->p_vaddr,
1006 (unsigned long) phdr->p_offset,
1007 (unsigned long) phdr->p_filesz,
1008 (unsigned long) phdr->p_memsz);
1010 /* determine the mapping parameters */
1011 if (phdr->p_flags & PF_R) prot |= PROT_READ;
1012 if (phdr->p_flags & PF_W) prot |= PROT_WRITE;
1013 if (phdr->p_flags & PF_X) prot |= PROT_EXEC;
1015 flags = MAP_PRIVATE | MAP_DENYWRITE;
1016 if (params->flags & ELF_FDPIC_FLAG_EXECUTABLE)
1017 flags |= MAP_EXECUTABLE;
1019 maddr = 0;
1021 switch (params->flags & ELF_FDPIC_FLAG_ARRANGEMENT) {
1022 case ELF_FDPIC_FLAG_INDEPENDENT:
1023 /* PT_LOADs are independently locatable */
1024 break;
1026 case ELF_FDPIC_FLAG_HONOURVADDR:
1027 /* the specified virtual address must be honoured */
1028 maddr = phdr->p_vaddr;
1029 flags |= MAP_FIXED;
1030 break;
1032 case ELF_FDPIC_FLAG_CONSTDISP:
1033 /* constant displacement
1034 * - can be mapped anywhere, but must be mapped as a
1035 * unit
1037 if (!dvset) {
1038 maddr = load_addr;
1039 delta_vaddr = phdr->p_vaddr;
1040 dvset = 1;
1041 } else {
1042 maddr = load_addr + phdr->p_vaddr - delta_vaddr;
1043 flags |= MAP_FIXED;
1045 break;
1047 case ELF_FDPIC_FLAG_CONTIGUOUS:
1048 /* contiguity handled later */
1049 break;
1051 default:
1052 BUG();
1055 maddr &= PAGE_MASK;
1057 /* create the mapping */
1058 disp = phdr->p_vaddr & ~PAGE_MASK;
1059 down_write(&mm->mmap_sem);
1060 maddr = do_mmap(file, maddr, phdr->p_memsz + disp, prot, flags,
1061 phdr->p_offset - disp);
1062 up_write(&mm->mmap_sem);
1064 kdebug("mmap[%d] <file> sz=%lx pr=%x fl=%x of=%lx --> %08lx",
1065 loop, phdr->p_memsz + disp, prot, flags,
1066 phdr->p_offset - disp, maddr);
1068 if (IS_ERR_VALUE(maddr))
1069 return (int) maddr;
1071 if ((params->flags & ELF_FDPIC_FLAG_ARRANGEMENT) ==
1072 ELF_FDPIC_FLAG_CONTIGUOUS)
1073 load_addr += PAGE_ALIGN(phdr->p_memsz + disp);
1075 seg->addr = maddr + disp;
1076 seg->p_vaddr = phdr->p_vaddr;
1077 seg->p_memsz = phdr->p_memsz;
1079 /* map the ELF header address if in this segment */
1080 if (phdr->p_offset == 0)
1081 params->elfhdr_addr = seg->addr;
1083 /* clear the bit between beginning of mapping and beginning of
1084 * PT_LOAD */
1085 if (prot & PROT_WRITE && disp > 0) {
1086 kdebug("clear[%d] ad=%lx sz=%lx", loop, maddr, disp);
1087 clear_user((void __user *) maddr, disp);
1088 maddr += disp;
1091 /* clear any space allocated but not loaded
1092 * - on uClinux we can just clear the lot
1093 * - on MMU linux we'll get a SIGBUS beyond the last page
1094 * extant in the file
1096 excess = phdr->p_memsz - phdr->p_filesz;
1097 excess1 = PAGE_SIZE - ((maddr + phdr->p_filesz) & ~PAGE_MASK);
1099 #ifdef CONFIG_MMU
1100 if (excess > excess1) {
1101 unsigned long xaddr = maddr + phdr->p_filesz + excess1;
1102 unsigned long xmaddr;
1104 flags |= MAP_FIXED | MAP_ANONYMOUS;
1105 down_write(&mm->mmap_sem);
1106 xmaddr = do_mmap(NULL, xaddr, excess - excess1,
1107 prot, flags, 0);
1108 up_write(&mm->mmap_sem);
1110 kdebug("mmap[%d] <anon>"
1111 " ad=%lx sz=%lx pr=%x fl=%x of=0 --> %08lx",
1112 loop, xaddr, excess - excess1, prot, flags,
1113 xmaddr);
1115 if (xmaddr != xaddr)
1116 return -ENOMEM;
1119 if (prot & PROT_WRITE && excess1 > 0) {
1120 kdebug("clear[%d] ad=%lx sz=%lx",
1121 loop, maddr + phdr->p_filesz, excess1);
1122 clear_user((void __user *) maddr + phdr->p_filesz,
1123 excess1);
1126 #else
1127 if (excess > 0) {
1128 kdebug("clear[%d] ad=%lx sz=%lx",
1129 loop, maddr + phdr->p_filesz, excess);
1130 clear_user((void *) maddr + phdr->p_filesz, excess);
1132 #endif
1134 if (mm) {
1135 if (phdr->p_flags & PF_X) {
1136 if (!mm->start_code) {
1137 mm->start_code = maddr;
1138 mm->end_code = maddr + phdr->p_memsz;
1140 } else if (!mm->start_data) {
1141 mm->start_data = maddr;
1142 mm->end_data = maddr + phdr->p_memsz;
1146 seg++;
1149 return 0;
1152 /*****************************************************************************/
1154 * ELF-FDPIC core dumper
1156 * Modelled on fs/exec.c:aout_core_dump()
1157 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1159 * Modelled on fs/binfmt_elf.c core dumper
1161 #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
1164 * These are the only things you should do on a core-file: use only these
1165 * functions to write out all the necessary info.
1167 static int dump_write(struct file *file, const void *addr, int nr)
1169 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
1172 static int dump_seek(struct file *file, loff_t off)
1174 if (file->f_op->llseek) {
1175 if (file->f_op->llseek(file, off, SEEK_SET) != off)
1176 return 0;
1177 } else {
1178 file->f_pos = off;
1180 return 1;
1184 * Decide whether a segment is worth dumping; default is yes to be
1185 * sure (missing info is worse than too much; etc).
1186 * Personally I'd include everything, and use the coredump limit...
1188 * I think we should skip something. But I am not sure how. H.J.
1190 static int maydump(struct vm_area_struct *vma, unsigned long mm_flags)
1192 int dump_ok;
1194 /* Do not dump I/O mapped devices or special mappings */
1195 if (vma->vm_flags & (VM_IO | VM_RESERVED)) {
1196 kdcore("%08lx: %08lx: no (IO)", vma->vm_start, vma->vm_flags);
1197 return 0;
1200 /* If we may not read the contents, don't allow us to dump
1201 * them either. "dump_write()" can't handle it anyway.
1203 if (!(vma->vm_flags & VM_READ)) {
1204 kdcore("%08lx: %08lx: no (!read)", vma->vm_start, vma->vm_flags);
1205 return 0;
1208 /* By default, dump shared memory if mapped from an anonymous file. */
1209 if (vma->vm_flags & VM_SHARED) {
1210 if (vma->vm_file->f_path.dentry->d_inode->i_nlink == 0) {
1211 dump_ok = test_bit(MMF_DUMP_ANON_SHARED, &mm_flags);
1212 kdcore("%08lx: %08lx: %s (share)", vma->vm_start,
1213 vma->vm_flags, dump_ok ? "yes" : "no");
1214 return dump_ok;
1217 dump_ok = test_bit(MMF_DUMP_MAPPED_SHARED, &mm_flags);
1218 kdcore("%08lx: %08lx: %s (share)", vma->vm_start,
1219 vma->vm_flags, dump_ok ? "yes" : "no");
1220 return dump_ok;
1223 #ifdef CONFIG_MMU
1224 /* By default, if it hasn't been written to, don't write it out */
1225 if (!vma->anon_vma) {
1226 dump_ok = test_bit(MMF_DUMP_MAPPED_PRIVATE, &mm_flags);
1227 kdcore("%08lx: %08lx: %s (!anon)", vma->vm_start,
1228 vma->vm_flags, dump_ok ? "yes" : "no");
1229 return dump_ok;
1231 #endif
1233 dump_ok = test_bit(MMF_DUMP_ANON_PRIVATE, &mm_flags);
1234 kdcore("%08lx: %08lx: %s", vma->vm_start, vma->vm_flags,
1235 dump_ok ? "yes" : "no");
1236 return dump_ok;
1239 /* An ELF note in memory */
1240 struct memelfnote
1242 const char *name;
1243 int type;
1244 unsigned int datasz;
1245 void *data;
1248 static int notesize(struct memelfnote *en)
1250 int sz;
1252 sz = sizeof(struct elf_note);
1253 sz += roundup(strlen(en->name) + 1, 4);
1254 sz += roundup(en->datasz, 4);
1256 return sz;
1259 /* #define DEBUG */
1261 #define DUMP_WRITE(addr, nr) \
1262 do { if (!dump_write(file, (addr), (nr))) return 0; } while(0)
1263 #define DUMP_SEEK(off) \
1264 do { if (!dump_seek(file, (off))) return 0; } while(0)
1266 static int writenote(struct memelfnote *men, struct file *file)
1268 struct elf_note en;
1270 en.n_namesz = strlen(men->name) + 1;
1271 en.n_descsz = men->datasz;
1272 en.n_type = men->type;
1274 DUMP_WRITE(&en, sizeof(en));
1275 DUMP_WRITE(men->name, en.n_namesz);
1276 /* XXX - cast from long long to long to avoid need for libgcc.a */
1277 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1278 DUMP_WRITE(men->data, men->datasz);
1279 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1281 return 1;
1283 #undef DUMP_WRITE
1284 #undef DUMP_SEEK
1286 #define DUMP_WRITE(addr, nr) \
1287 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1288 goto end_coredump;
1289 #define DUMP_SEEK(off) \
1290 if (!dump_seek(file, (off))) \
1291 goto end_coredump;
1293 static inline void fill_elf_fdpic_header(struct elfhdr *elf, int segs)
1295 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1296 elf->e_ident[EI_CLASS] = ELF_CLASS;
1297 elf->e_ident[EI_DATA] = ELF_DATA;
1298 elf->e_ident[EI_VERSION] = EV_CURRENT;
1299 elf->e_ident[EI_OSABI] = ELF_OSABI;
1300 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
1302 elf->e_type = ET_CORE;
1303 elf->e_machine = ELF_ARCH;
1304 elf->e_version = EV_CURRENT;
1305 elf->e_entry = 0;
1306 elf->e_phoff = sizeof(struct elfhdr);
1307 elf->e_shoff = 0;
1308 elf->e_flags = ELF_FDPIC_CORE_EFLAGS;
1309 elf->e_ehsize = sizeof(struct elfhdr);
1310 elf->e_phentsize = sizeof(struct elf_phdr);
1311 elf->e_phnum = segs;
1312 elf->e_shentsize = 0;
1313 elf->e_shnum = 0;
1314 elf->e_shstrndx = 0;
1315 return;
1318 static inline void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset)
1320 phdr->p_type = PT_NOTE;
1321 phdr->p_offset = offset;
1322 phdr->p_vaddr = 0;
1323 phdr->p_paddr = 0;
1324 phdr->p_filesz = sz;
1325 phdr->p_memsz = 0;
1326 phdr->p_flags = 0;
1327 phdr->p_align = 0;
1328 return;
1331 static inline void fill_note(struct memelfnote *note, const char *name, int type,
1332 unsigned int sz, void *data)
1334 note->name = name;
1335 note->type = type;
1336 note->datasz = sz;
1337 note->data = data;
1338 return;
1342 * fill up all the fields in prstatus from the given task struct, except
1343 * registers which need to be filled up seperately.
1345 static void fill_prstatus(struct elf_prstatus *prstatus,
1346 struct task_struct *p, long signr)
1348 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1349 prstatus->pr_sigpend = p->pending.signal.sig[0];
1350 prstatus->pr_sighold = p->blocked.sig[0];
1351 prstatus->pr_pid = task_pid_vnr(p);
1352 prstatus->pr_ppid = task_pid_vnr(p->parent);
1353 prstatus->pr_pgrp = task_pgrp_vnr(p);
1354 prstatus->pr_sid = task_session_vnr(p);
1355 if (thread_group_leader(p)) {
1357 * This is the record for the group leader. Add in the
1358 * cumulative times of previous dead threads. This total
1359 * won't include the time of each live thread whose state
1360 * is included in the core dump. The final total reported
1361 * to our parent process when it calls wait4 will include
1362 * those sums as well as the little bit more time it takes
1363 * this and each other thread to finish dying after the
1364 * core dump synchronization phase.
1366 cputime_to_timeval(cputime_add(p->utime, p->signal->utime),
1367 &prstatus->pr_utime);
1368 cputime_to_timeval(cputime_add(p->stime, p->signal->stime),
1369 &prstatus->pr_stime);
1370 } else {
1371 cputime_to_timeval(p->utime, &prstatus->pr_utime);
1372 cputime_to_timeval(p->stime, &prstatus->pr_stime);
1374 cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
1375 cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
1377 prstatus->pr_exec_fdpic_loadmap = p->mm->context.exec_fdpic_loadmap;
1378 prstatus->pr_interp_fdpic_loadmap = p->mm->context.interp_fdpic_loadmap;
1381 static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1382 struct mm_struct *mm)
1384 unsigned int i, len;
1386 /* first copy the parameters from user space */
1387 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1389 len = mm->arg_end - mm->arg_start;
1390 if (len >= ELF_PRARGSZ)
1391 len = ELF_PRARGSZ - 1;
1392 if (copy_from_user(&psinfo->pr_psargs,
1393 (const char __user *) mm->arg_start, len))
1394 return -EFAULT;
1395 for (i = 0; i < len; i++)
1396 if (psinfo->pr_psargs[i] == 0)
1397 psinfo->pr_psargs[i] = ' ';
1398 psinfo->pr_psargs[len] = 0;
1400 psinfo->pr_pid = task_pid_vnr(p);
1401 psinfo->pr_ppid = task_pid_vnr(p->parent);
1402 psinfo->pr_pgrp = task_pgrp_vnr(p);
1403 psinfo->pr_sid = task_session_vnr(p);
1405 i = p->state ? ffz(~p->state) + 1 : 0;
1406 psinfo->pr_state = i;
1407 psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
1408 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1409 psinfo->pr_nice = task_nice(p);
1410 psinfo->pr_flag = p->flags;
1411 SET_UID(psinfo->pr_uid, p->uid);
1412 SET_GID(psinfo->pr_gid, p->gid);
1413 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1415 return 0;
1418 /* Here is the structure in which status of each thread is captured. */
1419 struct elf_thread_status
1421 struct list_head list;
1422 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1423 elf_fpregset_t fpu; /* NT_PRFPREG */
1424 struct task_struct *thread;
1425 #ifdef ELF_CORE_COPY_XFPREGS
1426 elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */
1427 #endif
1428 struct memelfnote notes[3];
1429 int num_notes;
1433 * In order to add the specific thread information for the elf file format,
1434 * we need to keep a linked list of every thread's pr_status and then create
1435 * a single section for them in the final core file.
1437 static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1439 struct task_struct *p = t->thread;
1440 int sz = 0;
1442 t->num_notes = 0;
1444 fill_prstatus(&t->prstatus, p, signr);
1445 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1447 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
1448 &t->prstatus);
1449 t->num_notes++;
1450 sz += notesize(&t->notes[0]);
1452 t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL, &t->fpu);
1453 if (t->prstatus.pr_fpvalid) {
1454 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu),
1455 &t->fpu);
1456 t->num_notes++;
1457 sz += notesize(&t->notes[1]);
1460 #ifdef ELF_CORE_COPY_XFPREGS
1461 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
1462 fill_note(&t->notes[2], "LINUX", ELF_CORE_XFPREG_TYPE,
1463 sizeof(t->xfpu), &t->xfpu);
1464 t->num_notes++;
1465 sz += notesize(&t->notes[2]);
1467 #endif
1468 return sz;
1472 * dump the segments for an MMU process
1474 #ifdef CONFIG_MMU
1475 static int elf_fdpic_dump_segments(struct file *file, size_t *size,
1476 unsigned long *limit, unsigned long mm_flags)
1478 struct vm_area_struct *vma;
1480 for (vma = current->mm->mmap; vma; vma = vma->vm_next) {
1481 unsigned long addr;
1483 if (!maydump(vma, mm_flags))
1484 continue;
1486 for (addr = vma->vm_start;
1487 addr < vma->vm_end;
1488 addr += PAGE_SIZE
1490 struct vm_area_struct *vma;
1491 struct page *page;
1493 if (get_user_pages(current, current->mm, addr, 1, 0, 1,
1494 &page, &vma) <= 0) {
1495 DUMP_SEEK(file->f_pos + PAGE_SIZE);
1497 else if (page == ZERO_PAGE(0)) {
1498 page_cache_release(page);
1499 DUMP_SEEK(file->f_pos + PAGE_SIZE);
1501 else {
1502 void *kaddr;
1504 flush_cache_page(vma, addr, page_to_pfn(page));
1505 kaddr = kmap(page);
1506 if ((*size += PAGE_SIZE) > *limit ||
1507 !dump_write(file, kaddr, PAGE_SIZE)
1509 kunmap(page);
1510 page_cache_release(page);
1511 return -EIO;
1513 kunmap(page);
1514 page_cache_release(page);
1519 return 0;
1521 end_coredump:
1522 return -EFBIG;
1524 #endif
1527 * dump the segments for a NOMMU process
1529 #ifndef CONFIG_MMU
1530 static int elf_fdpic_dump_segments(struct file *file, size_t *size,
1531 unsigned long *limit, unsigned long mm_flags)
1533 struct vm_list_struct *vml;
1535 for (vml = current->mm->context.vmlist; vml; vml = vml->next) {
1536 struct vm_area_struct *vma = vml->vma;
1538 if (!maydump(vma, mm_flags))
1539 continue;
1541 if ((*size += PAGE_SIZE) > *limit)
1542 return -EFBIG;
1544 if (!dump_write(file, (void *) vma->vm_start,
1545 vma->vm_end - vma->vm_start))
1546 return -EIO;
1549 return 0;
1551 #endif
1554 * Actual dumper
1556 * This is a two-pass process; first we find the offsets of the bits,
1557 * and then they are actually written out. If we run out of core limit
1558 * we just truncate.
1560 static int elf_fdpic_core_dump(long signr, struct pt_regs *regs,
1561 struct file *file, unsigned long limit)
1563 #define NUM_NOTES 6
1564 int has_dumped = 0;
1565 mm_segment_t fs;
1566 int segs;
1567 size_t size = 0;
1568 int i;
1569 struct vm_area_struct *vma;
1570 struct elfhdr *elf = NULL;
1571 loff_t offset = 0, dataoff;
1572 int numnote;
1573 struct memelfnote *notes = NULL;
1574 struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */
1575 struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */
1576 struct task_struct *g, *p;
1577 LIST_HEAD(thread_list);
1578 struct list_head *t;
1579 elf_fpregset_t *fpu = NULL;
1580 #ifdef ELF_CORE_COPY_XFPREGS
1581 elf_fpxregset_t *xfpu = NULL;
1582 #endif
1583 int thread_status_size = 0;
1584 #ifndef CONFIG_MMU
1585 struct vm_list_struct *vml;
1586 #endif
1587 elf_addr_t *auxv;
1588 unsigned long mm_flags;
1591 * We no longer stop all VM operations.
1593 * This is because those proceses that could possibly change map_count
1594 * or the mmap / vma pages are now blocked in do_exit on current
1595 * finishing this core dump.
1597 * Only ptrace can touch these memory addresses, but it doesn't change
1598 * the map_count or the pages allocated. So no possibility of crashing
1599 * exists while dumping the mm->vm_next areas to the core file.
1602 /* alloc memory for large data structures: too large to be on stack */
1603 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
1604 if (!elf)
1605 goto cleanup;
1606 prstatus = kzalloc(sizeof(*prstatus), GFP_KERNEL);
1607 if (!prstatus)
1608 goto cleanup;
1609 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1610 if (!psinfo)
1611 goto cleanup;
1612 notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL);
1613 if (!notes)
1614 goto cleanup;
1615 fpu = kmalloc(sizeof(*fpu), GFP_KERNEL);
1616 if (!fpu)
1617 goto cleanup;
1618 #ifdef ELF_CORE_COPY_XFPREGS
1619 xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL);
1620 if (!xfpu)
1621 goto cleanup;
1622 #endif
1624 if (signr) {
1625 struct elf_thread_status *tmp;
1626 rcu_read_lock();
1627 do_each_thread(g,p)
1628 if (current->mm == p->mm && current != p) {
1629 tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
1630 if (!tmp) {
1631 rcu_read_unlock();
1632 goto cleanup;
1634 tmp->thread = p;
1635 list_add(&tmp->list, &thread_list);
1637 while_each_thread(g,p);
1638 rcu_read_unlock();
1639 list_for_each(t, &thread_list) {
1640 struct elf_thread_status *tmp;
1641 int sz;
1643 tmp = list_entry(t, struct elf_thread_status, list);
1644 sz = elf_dump_thread_status(signr, tmp);
1645 thread_status_size += sz;
1649 /* now collect the dump for the current */
1650 fill_prstatus(prstatus, current, signr);
1651 elf_core_copy_regs(&prstatus->pr_reg, regs);
1653 #ifdef CONFIG_MMU
1654 segs = current->mm->map_count;
1655 #else
1656 segs = 0;
1657 for (vml = current->mm->context.vmlist; vml; vml = vml->next)
1658 segs++;
1659 #endif
1660 #ifdef ELF_CORE_EXTRA_PHDRS
1661 segs += ELF_CORE_EXTRA_PHDRS;
1662 #endif
1664 /* Set up header */
1665 fill_elf_fdpic_header(elf, segs + 1); /* including notes section */
1667 has_dumped = 1;
1668 current->flags |= PF_DUMPCORE;
1671 * Set up the notes in similar form to SVR4 core dumps made
1672 * with info from their /proc.
1675 fill_note(notes + 0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus);
1676 fill_psinfo(psinfo, current->group_leader, current->mm);
1677 fill_note(notes + 1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1679 numnote = 2;
1681 auxv = (elf_addr_t *) current->mm->saved_auxv;
1683 i = 0;
1685 i += 2;
1686 while (auxv[i - 2] != AT_NULL);
1687 fill_note(&notes[numnote++], "CORE", NT_AUXV,
1688 i * sizeof(elf_addr_t), auxv);
1690 /* Try to dump the FPU. */
1691 if ((prstatus->pr_fpvalid =
1692 elf_core_copy_task_fpregs(current, regs, fpu)))
1693 fill_note(notes + numnote++,
1694 "CORE", NT_PRFPREG, sizeof(*fpu), fpu);
1695 #ifdef ELF_CORE_COPY_XFPREGS
1696 if (elf_core_copy_task_xfpregs(current, xfpu))
1697 fill_note(notes + numnote++,
1698 "LINUX", ELF_CORE_XFPREG_TYPE, sizeof(*xfpu), xfpu);
1699 #endif
1701 fs = get_fs();
1702 set_fs(KERNEL_DS);
1704 DUMP_WRITE(elf, sizeof(*elf));
1705 offset += sizeof(*elf); /* Elf header */
1706 offset += (segs+1) * sizeof(struct elf_phdr); /* Program headers */
1708 /* Write notes phdr entry */
1710 struct elf_phdr phdr;
1711 int sz = 0;
1713 for (i = 0; i < numnote; i++)
1714 sz += notesize(notes + i);
1716 sz += thread_status_size;
1718 fill_elf_note_phdr(&phdr, sz, offset);
1719 offset += sz;
1720 DUMP_WRITE(&phdr, sizeof(phdr));
1723 /* Page-align dumped data */
1724 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
1727 * We must use the same mm->flags while dumping core to avoid
1728 * inconsistency between the program headers and bodies, otherwise an
1729 * unusable core file can be generated.
1731 mm_flags = current->mm->flags;
1733 /* write program headers for segments dump */
1734 for (
1735 #ifdef CONFIG_MMU
1736 vma = current->mm->mmap; vma; vma = vma->vm_next
1737 #else
1738 vml = current->mm->context.vmlist; vml; vml = vml->next
1739 #endif
1741 struct elf_phdr phdr;
1742 size_t sz;
1744 #ifndef CONFIG_MMU
1745 vma = vml->vma;
1746 #endif
1748 sz = vma->vm_end - vma->vm_start;
1750 phdr.p_type = PT_LOAD;
1751 phdr.p_offset = offset;
1752 phdr.p_vaddr = vma->vm_start;
1753 phdr.p_paddr = 0;
1754 phdr.p_filesz = maydump(vma, mm_flags) ? sz : 0;
1755 phdr.p_memsz = sz;
1756 offset += phdr.p_filesz;
1757 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
1758 if (vma->vm_flags & VM_WRITE)
1759 phdr.p_flags |= PF_W;
1760 if (vma->vm_flags & VM_EXEC)
1761 phdr.p_flags |= PF_X;
1762 phdr.p_align = ELF_EXEC_PAGESIZE;
1764 DUMP_WRITE(&phdr, sizeof(phdr));
1767 #ifdef ELF_CORE_WRITE_EXTRA_PHDRS
1768 ELF_CORE_WRITE_EXTRA_PHDRS;
1769 #endif
1771 /* write out the notes section */
1772 for (i = 0; i < numnote; i++)
1773 if (!writenote(notes + i, file))
1774 goto end_coredump;
1776 /* write out the thread status notes section */
1777 list_for_each(t, &thread_list) {
1778 struct elf_thread_status *tmp =
1779 list_entry(t, struct elf_thread_status, list);
1781 for (i = 0; i < tmp->num_notes; i++)
1782 if (!writenote(&tmp->notes[i], file))
1783 goto end_coredump;
1786 DUMP_SEEK(dataoff);
1788 if (elf_fdpic_dump_segments(file, &size, &limit, mm_flags) < 0)
1789 goto end_coredump;
1791 #ifdef ELF_CORE_WRITE_EXTRA_DATA
1792 ELF_CORE_WRITE_EXTRA_DATA;
1793 #endif
1795 if (file->f_pos != offset) {
1796 /* Sanity check */
1797 printk(KERN_WARNING
1798 "elf_core_dump: file->f_pos (%lld) != offset (%lld)\n",
1799 file->f_pos, offset);
1802 end_coredump:
1803 set_fs(fs);
1805 cleanup:
1806 while (!list_empty(&thread_list)) {
1807 struct list_head *tmp = thread_list.next;
1808 list_del(tmp);
1809 kfree(list_entry(tmp, struct elf_thread_status, list));
1812 kfree(elf);
1813 kfree(prstatus);
1814 kfree(psinfo);
1815 kfree(notes);
1816 kfree(fpu);
1817 #ifdef ELF_CORE_COPY_XFPREGS
1818 kfree(xfpu);
1819 #endif
1820 return has_dumped;
1821 #undef NUM_NOTES
1824 #endif /* USE_ELF_CORE_DUMP */