[PATCH] kconfig documentation update
[linux-2.6/history.git] / fs / binfmt_elf.c
blob20a1e5ecc08d05ced1a8ba9fd486a7ff01153251
1 /*
2 * linux/fs/binfmt_elf.c
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
7 * Tools".
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
12 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/stat.h>
16 #include <linux/time.h>
17 #include <linux/mm.h>
18 #include <linux/mman.h>
19 #include <linux/a.out.h>
20 #include <linux/errno.h>
21 #include <linux/signal.h>
22 #include <linux/binfmts.h>
23 #include <linux/string.h>
24 #include <linux/file.h>
25 #include <linux/fcntl.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/shm.h>
29 #include <linux/personality.h>
30 #include <linux/elfcore.h>
31 #include <linux/init.h>
32 #include <linux/highuid.h>
33 #include <linux/smp.h>
34 #include <linux/smp_lock.h>
35 #include <linux/compiler.h>
36 #include <linux/highmem.h>
37 #include <linux/pagemap.h>
39 #include <asm/uaccess.h>
40 #include <asm/param.h>
41 #include <asm/pgalloc.h>
43 #include <linux/elf.h>
45 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs);
46 static int load_elf_library(struct file*);
47 static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int);
48 extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
50 #ifndef elf_addr_t
51 #define elf_addr_t unsigned long
52 #endif
55 * If we don't support core dumping, then supply a NULL so we
56 * don't even try.
58 #ifdef USE_ELF_CORE_DUMP
59 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file);
60 #else
61 #define elf_core_dump NULL
62 #endif
64 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
65 # define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
66 #else
67 # define ELF_MIN_ALIGN PAGE_SIZE
68 #endif
70 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
71 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
72 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
74 static struct linux_binfmt elf_format = {
75 .module = THIS_MODULE,
76 .load_binary = load_elf_binary,
77 .load_shlib = load_elf_library,
78 .core_dump = elf_core_dump,
79 .min_coredump = ELF_EXEC_PAGESIZE
82 #define BAD_ADDR(x) ((unsigned long)(x) > TASK_SIZE)
84 static void set_brk(unsigned long start, unsigned long end)
86 start = ELF_PAGEALIGN(start);
87 end = ELF_PAGEALIGN(end);
88 if (end > start)
89 do_brk(start, end - start);
90 current->mm->start_brk = current->mm->brk = end;
94 /* We need to explicitly zero any fractional pages
95 after the data section (i.e. bss). This would
96 contain the junk from the file that should not
97 be in memory */
100 static void padzero(unsigned long elf_bss)
102 unsigned long nbyte;
104 nbyte = ELF_PAGEOFFSET(elf_bss);
105 if (nbyte) {
106 nbyte = ELF_MIN_ALIGN - nbyte;
107 clear_user((void *) elf_bss, nbyte);
111 /* Let's use some macros to make this stack manipulation a litle clearer */
112 #ifdef ARCH_STACK_GROWSUP
113 #define STACK_ADD(sp, items) ((elf_addr_t *)(sp) + (items))
114 #define STACK_ROUND(sp, items) \
115 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
116 #define STACK_ALLOC(sp, len) ({ elf_addr_t *old_sp = sp; sp += len; old_sp; })
117 #else
118 #define STACK_ADD(sp, items) ((elf_addr_t *)(sp) - (items))
119 #define STACK_ROUND(sp, items) \
120 (((unsigned long) (sp - items)) &~ 15UL)
121 #define STACK_ALLOC(sp, len) sp -= len
122 #endif
124 static void
125 create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
126 int interp_aout, unsigned long load_addr,
127 unsigned long interp_load_addr)
129 unsigned long p = bprm->p;
130 int argc = bprm->argc;
131 int envc = bprm->envc;
132 elf_addr_t *argv, *envp;
133 elf_addr_t *sp, *u_platform;
134 const char *k_platform = ELF_PLATFORM;
135 int items;
136 elf_addr_t elf_info[40];
137 int ei_index = 0;
140 * If this architecture has a platform capability string, copy it
141 * to userspace. In some cases (Sparc), this info is impossible
142 * for userspace to get any other way, in others (i386) it is
143 * merely difficult.
146 u_platform = NULL;
147 if (k_platform) {
148 size_t len = strlen(k_platform) + 1;
150 #ifdef CONFIG_X86_HT
152 * In some cases (e.g. Hyper-Threading), we want to avoid L1
153 * evictions by the processes running on the same package. One
154 * thing we can do is to shuffle the initial stack for them.
156 * The conditionals here are unneeded, but kept in to make the
157 * code behaviour the same as pre change unless we have
158 * hyperthreaded processors. This should be cleaned up
159 * before 2.6
162 if (smp_num_siblings > 1)
163 STACK_ALLOC(p, ((current->pid % 64) << 7));
164 #endif
165 u_platform = (elf_addr_t *) STACK_ALLOC(p, len);
166 __copy_to_user(u_platform, k_platform, len);
169 /* Create the ELF interpreter info */
170 #define NEW_AUX_ENT(id, val) \
171 do { elf_info[ei_index++] = id; elf_info[ei_index++] = val; } while (0)
173 #ifdef ARCH_DLINFO
175 * ARCH_DLINFO must come first so PPC can do its special alignment of
176 * AUXV.
178 ARCH_DLINFO;
179 #endif
180 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
181 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
182 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
183 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
184 NEW_AUX_ENT(AT_PHENT, sizeof (struct elf_phdr));
185 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
186 NEW_AUX_ENT(AT_BASE, interp_load_addr);
187 NEW_AUX_ENT(AT_FLAGS, 0);
188 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
189 NEW_AUX_ENT(AT_UID, (elf_addr_t) current->uid);
190 NEW_AUX_ENT(AT_EUID, (elf_addr_t) current->euid);
191 NEW_AUX_ENT(AT_GID, (elf_addr_t) current->gid);
192 NEW_AUX_ENT(AT_EGID, (elf_addr_t) current->egid);
193 if (k_platform) {
194 NEW_AUX_ENT(AT_PLATFORM, (elf_addr_t)(long)u_platform);
196 NEW_AUX_ENT(AT_NULL, 0);
197 #undef NEW_AUX_ENT
199 sp = STACK_ADD(p, ei_index);
201 items = (argc + 1) + (envc + 1);
202 if (interp_aout) {
203 items += 3; /* a.out interpreters require argv & envp too */
204 } else {
205 items += 1; /* ELF interpreters only put argc on the stack */
207 bprm->p = STACK_ROUND(sp, items);
209 /* Point sp at the lowest address on the stack */
210 #ifdef ARCH_STACK_GROWSUP
211 sp = (elf_addr_t *)bprm->p - items - ei_index;
212 bprm->exec = (unsigned long) sp; /* XXX: PARISC HACK */
213 #else
214 sp = (elf_addr_t *)bprm->p;
215 #endif
217 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
218 __put_user(argc, sp++);
219 if (interp_aout) {
220 argv = sp + 2;
221 envp = argv + argc + 1;
222 __put_user((elf_addr_t)(long)argv, sp++);
223 __put_user((elf_addr_t)(long)envp, sp++);
224 } else {
225 argv = sp;
226 envp = argv + argc + 1;
229 /* Populate argv and envp */
230 p = current->mm->arg_start;
231 while (argc-- > 0) {
232 size_t len;
233 __put_user((elf_addr_t)p, argv++);
234 len = strnlen_user((void *)p, PAGE_SIZE*MAX_ARG_PAGES);
235 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
236 return;
237 p += len;
239 __put_user(NULL, argv);
240 current->mm->arg_end = current->mm->env_start = p;
241 while (envc-- > 0) {
242 size_t len;
243 __put_user((elf_addr_t)p, envp++);
244 len = strnlen_user((void *)p, PAGE_SIZE*MAX_ARG_PAGES);
245 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
246 return;
247 p += len;
249 __put_user(NULL, envp);
250 current->mm->env_end = p;
252 /* Put the elf_info on the stack in the right place. */
253 sp = (elf_addr_t *)envp + 1;
254 copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t));
257 #ifndef elf_map
259 static inline unsigned long
260 elf_map (struct file *filep, unsigned long addr, struct elf_phdr *eppnt, int prot, int type)
262 unsigned long map_addr;
264 down_write(&current->mm->mmap_sem);
265 map_addr = do_mmap(filep, ELF_PAGESTART(addr),
266 eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr), prot, type,
267 eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr));
268 up_write(&current->mm->mmap_sem);
269 return(map_addr);
272 #endif /* !elf_map */
274 /* This is much more generalized than the library routine read function,
275 so we keep this separate. Technically the library read function
276 is only provided so that we can read a.out libraries that have
277 an ELF header */
279 static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
280 struct file * interpreter,
281 unsigned long *interp_load_addr)
283 struct elf_phdr *elf_phdata;
284 struct elf_phdr *eppnt;
285 unsigned long load_addr = 0;
286 int load_addr_set = 0;
287 unsigned long last_bss = 0, elf_bss = 0;
288 unsigned long error = ~0UL;
289 int retval, i, size;
291 /* First of all, some simple consistency checks */
292 if (interp_elf_ex->e_type != ET_EXEC &&
293 interp_elf_ex->e_type != ET_DYN)
294 goto out;
295 if (!elf_check_arch(interp_elf_ex))
296 goto out;
297 if (!interpreter->f_op || !interpreter->f_op->mmap)
298 goto out;
301 * If the size of this structure has changed, then punt, since
302 * we will be doing the wrong thing.
304 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
305 goto out;
306 if (interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
307 goto out;
309 /* Now read in all of the header information */
311 size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
312 if (size > ELF_MIN_ALIGN)
313 goto out;
314 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
315 if (!elf_phdata)
316 goto out;
318 retval = kernel_read(interpreter,interp_elf_ex->e_phoff,(char *)elf_phdata,size);
319 error = retval;
320 if (retval < 0)
321 goto out_close;
323 eppnt = elf_phdata;
324 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
325 if (eppnt->p_type == PT_LOAD) {
326 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
327 int elf_prot = 0;
328 unsigned long vaddr = 0;
329 unsigned long k, map_addr;
331 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
332 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
333 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
334 vaddr = eppnt->p_vaddr;
335 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
336 elf_type |= MAP_FIXED;
338 map_addr = elf_map(interpreter, load_addr + vaddr, eppnt, elf_prot, elf_type);
339 if (BAD_ADDR(map_addr))
340 goto out_close;
342 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
343 load_addr = map_addr - ELF_PAGESTART(vaddr);
344 load_addr_set = 1;
348 * Find the end of the file mapping for this phdr, and keep
349 * track of the largest address we see for this.
351 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
352 if (k > elf_bss)
353 elf_bss = k;
356 * Do the same thing for the memory mapping - between
357 * elf_bss and last_bss is the bss section.
359 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
360 if (k > last_bss)
361 last_bss = k;
365 /* Now use mmap to map the library into memory. */
368 * Now fill out the bss section. First pad the last page up
369 * to the page boundary, and then perform a mmap to make sure
370 * that there are zero-mapped pages up to and including the
371 * last bss page.
373 padzero(elf_bss);
374 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); /* What we have mapped so far */
376 /* Map the last of the bss segment */
377 if (last_bss > elf_bss)
378 do_brk(elf_bss, last_bss - elf_bss);
380 *interp_load_addr = load_addr;
381 error = ((unsigned long) interp_elf_ex->e_entry) + load_addr;
383 out_close:
384 kfree(elf_phdata);
385 out:
386 return error;
389 static unsigned long load_aout_interp(struct exec * interp_ex,
390 struct file * interpreter)
392 unsigned long text_data, elf_entry = ~0UL;
393 char * addr;
394 loff_t offset;
395 int retval;
397 current->mm->end_code = interp_ex->a_text;
398 text_data = interp_ex->a_text + interp_ex->a_data;
399 current->mm->end_data = text_data;
400 current->mm->brk = interp_ex->a_bss + text_data;
402 switch (N_MAGIC(*interp_ex)) {
403 case OMAGIC:
404 offset = 32;
405 addr = (char *) 0;
406 break;
407 case ZMAGIC:
408 case QMAGIC:
409 offset = N_TXTOFF(*interp_ex);
410 addr = (char *) N_TXTADDR(*interp_ex);
411 break;
412 default:
413 goto out;
416 do_brk(0, text_data);
417 retval = -ENOEXEC;
418 if (!interpreter->f_op || !interpreter->f_op->read)
419 goto out;
420 retval = interpreter->f_op->read(interpreter, addr, text_data, &offset);
421 if (retval < 0)
422 goto out;
423 flush_icache_range((unsigned long)addr,
424 (unsigned long)addr + text_data);
426 do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
427 interp_ex->a_bss);
428 elf_entry = interp_ex->a_entry;
430 out:
431 return elf_entry;
435 * These are the functions used to load ELF style executables and shared
436 * libraries. There is no binary dependent code anywhere else.
439 #define INTERPRETER_NONE 0
440 #define INTERPRETER_AOUT 1
441 #define INTERPRETER_ELF 2
444 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
446 struct file *interpreter = NULL; /* to shut gcc up */
447 unsigned long load_addr = 0, load_bias = 0;
448 int load_addr_set = 0;
449 char * elf_interpreter = NULL;
450 unsigned int interpreter_type = INTERPRETER_NONE;
451 unsigned char ibcs2_interpreter = 0;
452 unsigned long error;
453 struct elf_phdr * elf_ppnt, *elf_phdata;
454 unsigned long elf_bss, elf_brk;
455 int elf_exec_fileno;
456 int retval, i;
457 unsigned int size;
458 unsigned long elf_entry, interp_load_addr = 0;
459 unsigned long start_code, end_code, start_data, end_data;
460 struct elfhdr elf_ex;
461 struct elfhdr interp_elf_ex;
462 struct exec interp_ex;
463 char passed_fileno[6];
465 /* Get the exec-header */
466 elf_ex = *((struct elfhdr *) bprm->buf);
468 retval = -ENOEXEC;
469 /* First of all, some simple consistency checks */
470 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
471 goto out;
473 if (elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN)
474 goto out;
475 if (!elf_check_arch(&elf_ex))
476 goto out;
477 if (!bprm->file->f_op||!bprm->file->f_op->mmap)
478 goto out;
480 /* Now read in all of the header information */
482 retval = -ENOMEM;
483 if (elf_ex.e_phentsize != sizeof(struct elf_phdr))
484 goto out;
485 if (elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
486 goto out;
487 size = elf_ex.e_phnum * sizeof(struct elf_phdr);
488 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
489 if (!elf_phdata)
490 goto out;
492 retval = kernel_read(bprm->file, elf_ex.e_phoff, (char *) elf_phdata, size);
493 if (retval < 0)
494 goto out_free_ph;
496 retval = get_unused_fd();
497 if (retval < 0)
498 goto out_free_ph;
499 get_file(bprm->file);
500 fd_install(elf_exec_fileno = retval, bprm->file);
502 elf_ppnt = elf_phdata;
503 elf_bss = 0;
504 elf_brk = 0;
506 start_code = ~0UL;
507 end_code = 0;
508 start_data = 0;
509 end_data = 0;
511 for (i = 0; i < elf_ex.e_phnum; i++) {
512 if (elf_ppnt->p_type == PT_INTERP) {
513 /* This is the program interpreter used for
514 * shared libraries - for now assume that this
515 * is an a.out format binary
518 retval = -ENOMEM;
519 if (elf_ppnt->p_filesz > PATH_MAX)
520 goto out_free_file;
521 elf_interpreter = (char *) kmalloc(elf_ppnt->p_filesz,
522 GFP_KERNEL);
523 if (!elf_interpreter)
524 goto out_free_file;
526 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
527 elf_interpreter,
528 elf_ppnt->p_filesz);
529 if (retval < 0)
530 goto out_free_interp;
531 /* If the program interpreter is one of these two,
532 * then assume an iBCS2 image. Otherwise assume
533 * a native linux image.
535 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
536 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
537 ibcs2_interpreter = 1;
539 SET_PERSONALITY(elf_ex, ibcs2_interpreter);
541 interpreter = open_exec(elf_interpreter);
542 retval = PTR_ERR(interpreter);
543 if (IS_ERR(interpreter))
544 goto out_free_interp;
545 retval = kernel_read(interpreter, 0, bprm->buf, BINPRM_BUF_SIZE);
546 if (retval < 0)
547 goto out_free_dentry;
549 /* Get the exec headers */
550 interp_ex = *((struct exec *) bprm->buf);
551 interp_elf_ex = *((struct elfhdr *) bprm->buf);
552 break;
554 elf_ppnt++;
557 /* Some simple consistency checks for the interpreter */
558 if (elf_interpreter) {
559 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
561 /* Now figure out which format our binary is */
562 if ((N_MAGIC(interp_ex) != OMAGIC) &&
563 (N_MAGIC(interp_ex) != ZMAGIC) &&
564 (N_MAGIC(interp_ex) != QMAGIC))
565 interpreter_type = INTERPRETER_ELF;
567 if (memcmp(interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
568 interpreter_type &= ~INTERPRETER_ELF;
570 retval = -ELIBBAD;
571 if (!interpreter_type)
572 goto out_free_dentry;
574 /* Make sure only one type was selected */
575 if ((interpreter_type & INTERPRETER_ELF) &&
576 interpreter_type != INTERPRETER_ELF) {
577 // FIXME - ratelimit this before re-enabling
578 // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
579 interpreter_type = INTERPRETER_ELF;
581 } else {
582 /* Executables without an interpreter also need a personality */
583 SET_PERSONALITY(elf_ex, ibcs2_interpreter);
586 /* OK, we are done with that, now set up the arg stuff,
587 and then start this sucker up */
589 if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) {
590 char *passed_p = passed_fileno;
591 sprintf(passed_fileno, "%d", elf_exec_fileno);
593 if (elf_interpreter) {
594 retval = copy_strings_kernel(1, &passed_p, bprm);
595 if (retval)
596 goto out_free_dentry;
597 bprm->argc++;
601 /* Flush all traces of the currently running executable */
602 retval = flush_old_exec(bprm);
603 if (retval)
604 goto out_free_dentry;
606 /* OK, This is the point of no return */
607 current->mm->start_data = 0;
608 current->mm->end_data = 0;
609 current->mm->end_code = 0;
610 current->mm->mmap = NULL;
611 current->flags &= ~PF_FORKNOEXEC;
613 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
614 may depend on the personality. */
615 SET_PERSONALITY(elf_ex, ibcs2_interpreter);
617 /* Do this so that we can load the interpreter, if need be. We will
618 change some of these later */
619 current->mm->rss = 0;
620 current->mm->free_area_cache = TASK_UNMAPPED_BASE;
621 retval = setup_arg_pages(bprm);
622 if (retval < 0) {
623 send_sig(SIGKILL, current, 0);
624 return retval;
627 current->mm->start_stack = bprm->p;
629 /* Now we do a little grungy work by mmaping the ELF image into
630 the correct location in memory. At this point, we assume that
631 the image should be loaded at fixed address, not at a variable
632 address. */
634 for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
635 int elf_prot = 0, elf_flags;
636 unsigned long k, vaddr;
638 if (elf_ppnt->p_type != PT_LOAD)
639 continue;
641 if (unlikely (elf_brk > elf_bss)) {
642 unsigned long nbyte;
644 /* There was a PT_LOAD segment with p_memsz > p_filesz
645 before this one. Map anonymous pages, if needed,
646 and clear the area. */
647 set_brk (elf_bss + load_bias, elf_brk + load_bias);
648 nbyte = ELF_PAGEOFFSET(elf_bss);
649 if (nbyte) {
650 nbyte = ELF_MIN_ALIGN - nbyte;
651 if (nbyte > elf_brk - elf_bss)
652 nbyte = elf_brk - elf_bss;
653 clear_user((void *) elf_bss + load_bias, nbyte);
657 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
658 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
659 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
661 elf_flags = MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE;
663 vaddr = elf_ppnt->p_vaddr;
664 if (elf_ex.e_type == ET_EXEC || load_addr_set) {
665 elf_flags |= MAP_FIXED;
666 } else if (elf_ex.e_type == ET_DYN) {
667 /* Try and get dynamic programs out of the way of the default mmap
668 base, as well as whatever program they might try to exec. This
669 is because the brk will follow the loader, and is not movable. */
670 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
673 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags);
674 if (BAD_ADDR(error))
675 continue;
677 if (!load_addr_set) {
678 load_addr_set = 1;
679 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
680 if (elf_ex.e_type == ET_DYN) {
681 load_bias += error -
682 ELF_PAGESTART(load_bias + vaddr);
683 load_addr += load_bias;
686 k = elf_ppnt->p_vaddr;
687 if (k < start_code) start_code = k;
688 if (start_data < k) start_data = k;
690 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
692 if (k > elf_bss)
693 elf_bss = k;
694 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
695 end_code = k;
696 if (end_data < k)
697 end_data = k;
698 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
699 if (k > elf_brk)
700 elf_brk = k;
703 elf_ex.e_entry += load_bias;
704 elf_bss += load_bias;
705 elf_brk += load_bias;
706 start_code += load_bias;
707 end_code += load_bias;
708 start_data += load_bias;
709 end_data += load_bias;
711 if (elf_interpreter) {
712 if (interpreter_type == INTERPRETER_AOUT)
713 elf_entry = load_aout_interp(&interp_ex,
714 interpreter);
715 else
716 elf_entry = load_elf_interp(&interp_elf_ex,
717 interpreter,
718 &interp_load_addr);
720 allow_write_access(interpreter);
721 fput(interpreter);
722 kfree(elf_interpreter);
724 if (BAD_ADDR(elf_entry)) {
725 printk(KERN_ERR "Unable to load interpreter\n");
726 kfree(elf_phdata);
727 send_sig(SIGSEGV, current, 0);
728 return 0;
730 } else {
731 elf_entry = elf_ex.e_entry;
734 kfree(elf_phdata);
736 if (interpreter_type != INTERPRETER_AOUT)
737 sys_close(elf_exec_fileno);
739 set_binfmt(&elf_format);
741 compute_creds(bprm);
742 current->flags &= ~PF_FORKNOEXEC;
743 create_elf_tables(bprm, &elf_ex, (interpreter_type == INTERPRETER_AOUT),
744 load_addr, interp_load_addr);
745 /* N.B. passed_fileno might not be initialized? */
746 if (interpreter_type == INTERPRETER_AOUT)
747 current->mm->arg_start += strlen(passed_fileno) + 1;
748 current->mm->end_code = end_code;
749 current->mm->start_code = start_code;
750 current->mm->start_data = start_data;
751 current->mm->end_data = end_data;
752 current->mm->start_stack = bprm->p;
754 /* Calling set_brk effectively mmaps the pages that we need
755 * for the bss and break sections
757 set_brk(elf_bss, elf_brk);
759 padzero(elf_bss);
761 if (current->personality & MMAP_PAGE_ZERO) {
762 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
763 and some applications "depend" upon this behavior.
764 Since we do not have the power to recompile these, we
765 emulate the SVr4 behavior. Sigh. */
766 /* N.B. Shouldn't the size here be PAGE_SIZE?? */
767 down_write(&current->mm->mmap_sem);
768 error = do_mmap(NULL, 0, 4096, PROT_READ | PROT_EXEC,
769 MAP_FIXED | MAP_PRIVATE, 0);
770 up_write(&current->mm->mmap_sem);
773 #ifdef ELF_PLAT_INIT
775 * The ABI may specify that certain registers be set up in special
776 * ways (on i386 %edx is the address of a DT_FINI function, for
777 * example. This macro performs whatever initialization to
778 * the regs structure is required.
780 ELF_PLAT_INIT(regs);
781 #endif
783 start_thread(regs, elf_entry, bprm->p);
784 if (unlikely(current->ptrace & PT_PTRACED)) {
785 if (current->ptrace & PT_TRACE_EXEC)
786 ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
787 else
788 send_sig(SIGTRAP, current, 0);
790 retval = 0;
791 out:
792 return retval;
794 /* error cleanup */
795 out_free_dentry:
796 allow_write_access(interpreter);
797 fput(interpreter);
798 out_free_interp:
799 if (elf_interpreter)
800 kfree(elf_interpreter);
801 out_free_file:
802 sys_close(elf_exec_fileno);
803 out_free_ph:
804 kfree(elf_phdata);
805 goto out;
808 /* This is really simpleminded and specialized - we are loading an
809 a.out library that is given an ELF header. */
811 static int load_elf_library(struct file *file)
813 struct elf_phdr *elf_phdata;
814 unsigned long elf_bss, bss, len;
815 int retval, error, i, j;
816 struct elfhdr elf_ex;
818 error = -ENOEXEC;
819 retval = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex));
820 if (retval != sizeof(elf_ex))
821 goto out;
823 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
824 goto out;
826 /* First of all, some simple consistency checks */
827 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
828 !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
829 goto out;
831 /* Now read in all of the header information */
833 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
834 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
836 error = -ENOMEM;
837 elf_phdata = (struct elf_phdr *) kmalloc(j, GFP_KERNEL);
838 if (!elf_phdata)
839 goto out;
841 error = -ENOEXEC;
842 retval = kernel_read(file, elf_ex.e_phoff, (char *) elf_phdata, j);
843 if (retval != j)
844 goto out_free_ph;
846 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
847 if ((elf_phdata + i)->p_type == PT_LOAD) j++;
848 if (j != 1)
849 goto out_free_ph;
851 while (elf_phdata->p_type != PT_LOAD) elf_phdata++;
853 /* Now use mmap to map the library into memory. */
854 down_write(&current->mm->mmap_sem);
855 error = do_mmap(file,
856 ELF_PAGESTART(elf_phdata->p_vaddr),
857 (elf_phdata->p_filesz +
858 ELF_PAGEOFFSET(elf_phdata->p_vaddr)),
859 PROT_READ | PROT_WRITE | PROT_EXEC,
860 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
861 (elf_phdata->p_offset -
862 ELF_PAGEOFFSET(elf_phdata->p_vaddr)));
863 up_write(&current->mm->mmap_sem);
864 if (error != ELF_PAGESTART(elf_phdata->p_vaddr))
865 goto out_free_ph;
867 elf_bss = elf_phdata->p_vaddr + elf_phdata->p_filesz;
868 padzero(elf_bss);
870 len = ELF_PAGESTART(elf_phdata->p_filesz + elf_phdata->p_vaddr + ELF_MIN_ALIGN - 1);
871 bss = elf_phdata->p_memsz + elf_phdata->p_vaddr;
872 if (bss > len)
873 do_brk(len, bss - len);
874 error = 0;
876 out_free_ph:
877 kfree(elf_phdata);
878 out:
879 return error;
883 * Note that some platforms still use traditional core dumps and not
884 * the ELF core dump. Each platform can select it as appropriate.
886 #ifdef USE_ELF_CORE_DUMP
889 * ELF core dumper
891 * Modelled on fs/exec.c:aout_core_dump()
892 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
895 * These are the only things you should do on a core-file: use only these
896 * functions to write out all the necessary info.
898 static int dump_write(struct file *file, const void *addr, int nr)
900 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
903 static int dump_seek(struct file *file, off_t off)
905 if (file->f_op->llseek) {
906 if (file->f_op->llseek(file, off, 0) != off)
907 return 0;
908 } else
909 file->f_pos = off;
910 return 1;
914 * Decide whether a segment is worth dumping; default is yes to be
915 * sure (missing info is worse than too much; etc).
916 * Personally I'd include everything, and use the coredump limit...
918 * I think we should skip something. But I am not sure how. H.J.
920 static inline int maydump(struct vm_area_struct *vma)
923 * If we may not read the contents, don't allow us to dump
924 * them either. "dump_write()" can't handle it anyway.
926 if (!(vma->vm_flags & VM_READ))
927 return 0;
929 /* Do not dump I/O mapped devices! -DaveM */
930 if (vma->vm_flags & VM_IO)
931 return 0;
932 #if 1
933 if (vma->vm_flags & (VM_WRITE|VM_GROWSUP|VM_GROWSDOWN))
934 return 1;
935 if (vma->vm_flags & (VM_READ|VM_EXEC|VM_EXECUTABLE|VM_SHARED))
936 return 0;
937 #endif
938 return 1;
941 #define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
943 /* An ELF note in memory */
944 struct memelfnote
946 const char *name;
947 int type;
948 unsigned int datasz;
949 void *data;
952 static int notesize(struct memelfnote *en)
954 int sz;
956 sz = sizeof(struct elf_note);
957 sz += roundup(strlen(en->name) + 1, 4);
958 sz += roundup(en->datasz, 4);
960 return sz;
963 #define DUMP_WRITE(addr, nr) \
964 do { if (!dump_write(file, (addr), (nr))) return 0; } while(0)
965 #define DUMP_SEEK(off) \
966 do { if (!dump_seek(file, (off))) return 0; } while(0)
968 static int writenote(struct memelfnote *men, struct file *file)
970 struct elf_note en;
972 en.n_namesz = strlen(men->name) + 1;
973 en.n_descsz = men->datasz;
974 en.n_type = men->type;
976 DUMP_WRITE(&en, sizeof(en));
977 DUMP_WRITE(men->name, en.n_namesz);
978 /* XXX - cast from long long to long to avoid need for libgcc.a */
979 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
980 DUMP_WRITE(men->data, men->datasz);
981 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
983 return 1;
985 #undef DUMP_WRITE
986 #undef DUMP_SEEK
988 #define DUMP_WRITE(addr, nr) \
989 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
990 goto end_coredump;
991 #define DUMP_SEEK(off) \
992 if (!dump_seek(file, (off))) \
993 goto end_coredump;
995 static inline void fill_elf_header(struct elfhdr *elf, int segs)
997 memcpy(elf->e_ident, ELFMAG, SELFMAG);
998 elf->e_ident[EI_CLASS] = ELF_CLASS;
999 elf->e_ident[EI_DATA] = ELF_DATA;
1000 elf->e_ident[EI_VERSION] = EV_CURRENT;
1001 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
1003 elf->e_type = ET_CORE;
1004 elf->e_machine = ELF_ARCH;
1005 elf->e_version = EV_CURRENT;
1006 elf->e_entry = 0;
1007 elf->e_phoff = sizeof(struct elfhdr);
1008 elf->e_shoff = 0;
1009 elf->e_flags = 0;
1010 elf->e_ehsize = sizeof(struct elfhdr);
1011 elf->e_phentsize = sizeof(struct elf_phdr);
1012 elf->e_phnum = segs;
1013 elf->e_shentsize = 0;
1014 elf->e_shnum = 0;
1015 elf->e_shstrndx = 0;
1016 return;
1019 static inline void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
1021 phdr->p_type = PT_NOTE;
1022 phdr->p_offset = offset;
1023 phdr->p_vaddr = 0;
1024 phdr->p_paddr = 0;
1025 phdr->p_filesz = sz;
1026 phdr->p_memsz = 0;
1027 phdr->p_flags = 0;
1028 phdr->p_align = 0;
1029 return;
1032 static inline void fill_note(struct memelfnote *note, const char *name, int type,
1033 unsigned int sz, void *data)
1035 note->name = name;
1036 note->type = type;
1037 note->datasz = sz;
1038 note->data = data;
1039 return;
1043 * fill up all the fields in prstatus from the given task struct, except registers
1044 * which need to be filled up seperately.
1046 static inline void fill_prstatus(struct elf_prstatus *prstatus, struct task_struct *p, long signr)
1048 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1049 prstatus->pr_sigpend = p->pending.signal.sig[0];
1050 prstatus->pr_sighold = p->blocked.sig[0];
1051 prstatus->pr_pid = p->pid;
1052 prstatus->pr_ppid = p->parent->pid;
1053 prstatus->pr_pgrp = p->pgrp;
1054 prstatus->pr_sid = p->session;
1055 jiffies_to_timeval(p->utime, &prstatus->pr_utime);
1056 jiffies_to_timeval(p->stime, &prstatus->pr_stime);
1057 jiffies_to_timeval(p->cutime, &prstatus->pr_cutime);
1058 jiffies_to_timeval(p->cstime, &prstatus->pr_cstime);
1061 static inline void fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p)
1063 int i, len;
1065 /* first copy the parameters from user space */
1066 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1068 len = p->mm->arg_end - p->mm->arg_start;
1069 if (len >= ELF_PRARGSZ)
1070 len = ELF_PRARGSZ-1;
1071 copy_from_user(&psinfo->pr_psargs,
1072 (const char *)p->mm->arg_start, len);
1073 for(i = 0; i < len; i++)
1074 if (psinfo->pr_psargs[i] == 0)
1075 psinfo->pr_psargs[i] = ' ';
1076 psinfo->pr_psargs[len] = 0;
1078 psinfo->pr_pid = p->pid;
1079 psinfo->pr_ppid = p->parent->pid;
1080 psinfo->pr_pgrp = p->pgrp;
1081 psinfo->pr_sid = p->session;
1083 i = p->state ? ffz(~p->state) + 1 : 0;
1084 psinfo->pr_state = i;
1085 psinfo->pr_sname = (i < 0 || i > 5) ? '.' : "RSDZTD"[i];
1086 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1087 psinfo->pr_nice = task_nice(p);
1088 psinfo->pr_flag = p->flags;
1089 psinfo->pr_uid = NEW_TO_OLD_UID(p->uid);
1090 psinfo->pr_gid = NEW_TO_OLD_GID(p->gid);
1091 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1093 return;
1096 /* Here is the structure in which status of each thread is captured. */
1097 struct elf_thread_status
1099 struct list_head list;
1100 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1101 elf_fpregset_t fpu; /* NT_PRFPREG */
1102 #ifdef ELF_CORE_COPY_XFPREGS
1103 elf_fpxregset_t xfpu; /* NT_PRXFPREG */
1104 #endif
1105 struct memelfnote notes[3];
1106 int num_notes;
1110 * In order to add the specific thread information for the elf file format,
1111 * we need to keep a linked list of every threads pr_status and then
1112 * create a single section for them in the final core file.
1114 static int elf_dump_thread_status(long signr, struct task_struct * p, struct list_head * thread_list)
1117 struct elf_thread_status *t;
1118 int sz = 0;
1120 t = kmalloc(sizeof(*t), GFP_ATOMIC);
1121 if (!t)
1122 return 0;
1123 memset(t, 0, sizeof(*t));
1125 INIT_LIST_HEAD(&t->list);
1126 t->num_notes = 0;
1128 fill_prstatus(&t->prstatus, p, signr);
1129 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1131 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), &(t->prstatus));
1132 t->num_notes++;
1133 sz += notesize(&t->notes[0]);
1135 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, &t->fpu))) {
1136 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu), &(t->fpu));
1137 t->num_notes++;
1138 sz += notesize(&t->notes[1]);
1141 #ifdef ELF_CORE_COPY_XFPREGS
1142 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
1143 fill_note(&t->notes[2], "LINUX", NT_PRXFPREG, sizeof(t->xfpu), &t->xfpu);
1144 t->num_notes++;
1145 sz += notesize(&t->notes[2]);
1147 #endif
1148 list_add(&t->list, thread_list);
1149 return sz;
1153 * Actual dumper
1155 * This is a two-pass process; first we find the offsets of the bits,
1156 * and then they are actually written out. If we run out of core limit
1157 * we just truncate.
1159 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
1161 int has_dumped = 0;
1162 mm_segment_t fs;
1163 int segs;
1164 size_t size = 0;
1165 int i;
1166 struct vm_area_struct *vma;
1167 struct elfhdr elf;
1168 off_t offset = 0, dataoff;
1169 unsigned long limit = current->rlim[RLIMIT_CORE].rlim_cur;
1170 int numnote = 5;
1171 struct memelfnote notes[5];
1172 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1173 struct elf_prpsinfo psinfo; /* NT_PRPSINFO */
1174 struct task_struct *g, *p;
1175 LIST_HEAD(thread_list);
1176 struct list_head *t;
1177 elf_fpregset_t fpu;
1178 #ifdef ELF_CORE_COPY_XFPREGS
1179 elf_fpxregset_t xfpu;
1180 #endif
1181 int thread_status_size = 0;
1183 /* We no longer stop all vm operations
1185 * This because those proceses that could possibly
1186 * change map_count or the mmap / vma pages are now blocked in do_exit on current finishing
1187 * this core dump.
1189 * Only ptrace can touch these memory addresses, but it doesn't change
1190 * the map_count or the pages allocated. So no possibility of crashing exists while dumping
1191 * the mm->vm_next areas to the core file.
1195 /* capture the status of all other threads */
1196 if (signr) {
1197 read_lock(&tasklist_lock);
1198 do_each_thread(g,p)
1199 if (current->mm == p->mm && current != p) {
1200 int sz = elf_dump_thread_status(signr, p, &thread_list);
1201 if (!sz) {
1202 read_unlock(&tasklist_lock);
1203 goto cleanup;
1204 } else
1205 thread_status_size += sz;
1207 while_each_thread(g,p);
1208 read_unlock(&tasklist_lock);
1211 /* now collect the dump for the current */
1212 memset(&prstatus, 0, sizeof(prstatus));
1213 fill_prstatus(&prstatus, current, signr);
1214 elf_core_copy_regs(&prstatus.pr_reg, regs);
1216 segs = current->mm->map_count;
1218 /* Set up header */
1219 fill_elf_header(&elf, segs+1); /* including notes section*/
1221 fs = get_fs();
1222 set_fs(KERNEL_DS);
1224 has_dumped = 1;
1225 current->flags |= PF_DUMPCORE;
1227 DUMP_WRITE(&elf, sizeof(elf));
1228 offset += sizeof(elf); /* Elf header */
1229 offset += (segs+1) * sizeof(struct elf_phdr); /* Program headers */
1232 * Set up the notes in similar form to SVR4 core dumps made
1233 * with info from their /proc.
1236 fill_note(&notes[0], "CORE", NT_PRSTATUS, sizeof(prstatus), &prstatus);
1238 fill_psinfo(&psinfo, current->group_leader);
1239 fill_note(&notes[1], "CORE", NT_PRPSINFO, sizeof(psinfo), &psinfo);
1241 fill_note(&notes[2], "CORE", NT_TASKSTRUCT, sizeof(*current), current);
1243 /* Try to dump the FPU. */
1244 if ((prstatus.pr_fpvalid = elf_core_copy_task_fpregs(current, &fpu)))
1245 fill_note(&notes[3], "CORE", NT_PRFPREG, sizeof(fpu), &fpu);
1246 else
1247 --numnote;
1248 #ifdef ELF_CORE_COPY_XFPREGS
1249 if (elf_core_copy_task_xfpregs(current, &xfpu))
1250 fill_note(&notes[4], "LINUX", NT_PRXFPREG, sizeof(xfpu), &xfpu);
1251 else
1252 --numnote;
1253 #else
1254 numnote --;
1255 #endif
1257 /* Write notes phdr entry */
1259 struct elf_phdr phdr;
1260 int sz = 0;
1262 for(i = 0; i < numnote; i++)
1263 sz += notesize(&notes[i]);
1265 sz += thread_status_size;
1267 fill_elf_note_phdr(&phdr, sz, offset);
1268 offset += sz;
1269 DUMP_WRITE(&phdr, sizeof(phdr));
1272 /* Page-align dumped data */
1273 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
1275 /* Write program headers for segments dump */
1276 for(vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1277 struct elf_phdr phdr;
1278 size_t sz;
1280 sz = vma->vm_end - vma->vm_start;
1282 phdr.p_type = PT_LOAD;
1283 phdr.p_offset = offset;
1284 phdr.p_vaddr = vma->vm_start;
1285 phdr.p_paddr = 0;
1286 phdr.p_filesz = maydump(vma) ? sz : 0;
1287 phdr.p_memsz = sz;
1288 offset += phdr.p_filesz;
1289 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
1290 if (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W;
1291 if (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X;
1292 phdr.p_align = ELF_EXEC_PAGESIZE;
1294 DUMP_WRITE(&phdr, sizeof(phdr));
1297 /* write out the notes section */
1298 for(i = 0; i < numnote; i++)
1299 if (!writenote(&notes[i], file))
1300 goto end_coredump;
1302 /* write out the thread status notes section */
1303 list_for_each(t, &thread_list) {
1304 struct elf_thread_status *tmp = list_entry(t, struct elf_thread_status, list);
1305 for (i = 0; i < tmp->num_notes; i++)
1306 if (!writenote(&tmp->notes[i], file))
1307 goto end_coredump;
1310 DUMP_SEEK(dataoff);
1312 for(vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1313 unsigned long addr;
1315 if (!maydump(vma))
1316 continue;
1318 for (addr = vma->vm_start;
1319 addr < vma->vm_end;
1320 addr += PAGE_SIZE) {
1321 struct page* page;
1322 struct vm_area_struct *vma;
1324 if (get_user_pages(current, current->mm, addr, 1, 0, 1,
1325 &page, &vma) <= 0) {
1326 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1327 } else {
1328 if (page == ZERO_PAGE(addr)) {
1329 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1330 } else {
1331 void *kaddr;
1332 flush_cache_page(vma, addr);
1333 kaddr = kmap(page);
1334 DUMP_WRITE(kaddr, PAGE_SIZE);
1335 flush_page_to_ram(page);
1336 kunmap(page);
1338 page_cache_release(page);
1343 if ((off_t) file->f_pos != offset) {
1344 /* Sanity check */
1345 printk("elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
1346 (off_t) file->f_pos, offset);
1349 end_coredump:
1350 set_fs(fs);
1352 cleanup:
1353 while(!list_empty(&thread_list)) {
1354 struct list_head *tmp = thread_list.next;
1355 list_del(tmp);
1356 kfree(list_entry(tmp, struct elf_thread_status, list));
1359 return has_dumped;
1362 #endif /* USE_ELF_CORE_DUMP */
1364 static int __init init_elf_binfmt(void)
1366 return register_binfmt(&elf_format);
1369 static void __exit exit_elf_binfmt(void)
1371 /* Remove the COFF and ELF loaders. */
1372 unregister_binfmt(&elf_format);
1375 module_init(init_elf_binfmt)
1376 module_exit(exit_elf_binfmt)
1377 MODULE_LICENSE("GPL");