[PATCH] vmscan: balancing fix
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / exec.c
blobe75a9548da8ef397b4cec6e1527f00d81e6fd5c4
1 /*
2 * linux/fs/exec.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
7 /*
8 * #!-checking implemented by tytso.
9 */
11 * Demand-loading implemented 01.12.91 - no need to read anything but
12 * the header into memory. The inode of the executable is put into
13 * "current->executable", and page faults do the actual loading. Clean.
15 * Once more I can proudly say that linux stood up to being changed: it
16 * was less than 2 hours work to get demand-loading completely implemented.
18 * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead,
19 * current->executable is only used by the procfs. This allows a dispatch
20 * table to check for several different types of binary formats. We keep
21 * trying until we recognize the file or we run out of supported binary
22 * formats.
25 #include <linux/config.h>
26 #include <linux/slab.h>
27 #include <linux/file.h>
28 #include <linux/mman.h>
29 #include <linux/a.out.h>
30 #include <linux/stat.h>
31 #include <linux/fcntl.h>
32 #include <linux/smp_lock.h>
33 #include <linux/init.h>
34 #include <linux/pagemap.h>
35 #include <linux/highmem.h>
36 #include <linux/spinlock.h>
37 #include <linux/key.h>
38 #include <linux/personality.h>
39 #include <linux/binfmts.h>
40 #include <linux/swap.h>
41 #include <linux/utsname.h>
42 #include <linux/module.h>
43 #include <linux/namei.h>
44 #include <linux/proc_fs.h>
45 #include <linux/ptrace.h>
46 #include <linux/mount.h>
47 #include <linux/security.h>
48 #include <linux/syscalls.h>
49 #include <linux/rmap.h>
50 #include <linux/acct.h>
51 #include <linux/cn_proc.h>
53 #include <asm/uaccess.h>
54 #include <asm/mmu_context.h>
56 #ifdef CONFIG_KMOD
57 #include <linux/kmod.h>
58 #endif
60 int core_uses_pid;
61 char core_pattern[65] = "core";
62 int suid_dumpable = 0;
64 EXPORT_SYMBOL(suid_dumpable);
65 /* The maximal length of core_pattern is also specified in sysctl.c */
67 static struct linux_binfmt *formats;
68 static DEFINE_RWLOCK(binfmt_lock);
70 int register_binfmt(struct linux_binfmt * fmt)
72 struct linux_binfmt ** tmp = &formats;
74 if (!fmt)
75 return -EINVAL;
76 if (fmt->next)
77 return -EBUSY;
78 write_lock(&binfmt_lock);
79 while (*tmp) {
80 if (fmt == *tmp) {
81 write_unlock(&binfmt_lock);
82 return -EBUSY;
84 tmp = &(*tmp)->next;
86 fmt->next = formats;
87 formats = fmt;
88 write_unlock(&binfmt_lock);
89 return 0;
92 EXPORT_SYMBOL(register_binfmt);
94 int unregister_binfmt(struct linux_binfmt * fmt)
96 struct linux_binfmt ** tmp = &formats;
98 write_lock(&binfmt_lock);
99 while (*tmp) {
100 if (fmt == *tmp) {
101 *tmp = fmt->next;
102 write_unlock(&binfmt_lock);
103 return 0;
105 tmp = &(*tmp)->next;
107 write_unlock(&binfmt_lock);
108 return -EINVAL;
111 EXPORT_SYMBOL(unregister_binfmt);
113 static inline void put_binfmt(struct linux_binfmt * fmt)
115 module_put(fmt->module);
119 * Note that a shared library must be both readable and executable due to
120 * security reasons.
122 * Also note that we take the address to load from from the file itself.
124 asmlinkage long sys_uselib(const char __user * library)
126 struct file * file;
127 struct nameidata nd;
128 int error;
130 error = __user_path_lookup_open(library, LOOKUP_FOLLOW, &nd, FMODE_READ);
131 if (error)
132 goto out;
134 error = -EINVAL;
135 if (!S_ISREG(nd.dentry->d_inode->i_mode))
136 goto exit;
138 error = vfs_permission(&nd, MAY_READ | MAY_EXEC);
139 if (error)
140 goto exit;
142 file = nameidata_to_filp(&nd, O_RDONLY);
143 error = PTR_ERR(file);
144 if (IS_ERR(file))
145 goto out;
147 error = -ENOEXEC;
148 if(file->f_op) {
149 struct linux_binfmt * fmt;
151 read_lock(&binfmt_lock);
152 for (fmt = formats ; fmt ; fmt = fmt->next) {
153 if (!fmt->load_shlib)
154 continue;
155 if (!try_module_get(fmt->module))
156 continue;
157 read_unlock(&binfmt_lock);
158 error = fmt->load_shlib(file);
159 read_lock(&binfmt_lock);
160 put_binfmt(fmt);
161 if (error != -ENOEXEC)
162 break;
164 read_unlock(&binfmt_lock);
166 fput(file);
167 out:
168 return error;
169 exit:
170 release_open_intent(&nd);
171 path_release(&nd);
172 goto out;
176 * count() counts the number of strings in array ARGV.
178 static int count(char __user * __user * argv, int max)
180 int i = 0;
182 if (argv != NULL) {
183 for (;;) {
184 char __user * p;
186 if (get_user(p, argv))
187 return -EFAULT;
188 if (!p)
189 break;
190 argv++;
191 if(++i > max)
192 return -E2BIG;
193 cond_resched();
196 return i;
200 * 'copy_strings()' copies argument/environment strings from user
201 * memory to free pages in kernel mem. These are in a format ready
202 * to be put directly into the top of new user memory.
204 static int copy_strings(int argc, char __user * __user * argv,
205 struct linux_binprm *bprm)
207 struct page *kmapped_page = NULL;
208 char *kaddr = NULL;
209 int ret;
211 while (argc-- > 0) {
212 char __user *str;
213 int len;
214 unsigned long pos;
216 if (get_user(str, argv+argc) ||
217 !(len = strnlen_user(str, bprm->p))) {
218 ret = -EFAULT;
219 goto out;
222 if (bprm->p < len) {
223 ret = -E2BIG;
224 goto out;
227 bprm->p -= len;
228 /* XXX: add architecture specific overflow check here. */
229 pos = bprm->p;
231 while (len > 0) {
232 int i, new, err;
233 int offset, bytes_to_copy;
234 struct page *page;
236 offset = pos % PAGE_SIZE;
237 i = pos/PAGE_SIZE;
238 page = bprm->page[i];
239 new = 0;
240 if (!page) {
241 page = alloc_page(GFP_HIGHUSER);
242 bprm->page[i] = page;
243 if (!page) {
244 ret = -ENOMEM;
245 goto out;
247 new = 1;
250 if (page != kmapped_page) {
251 if (kmapped_page)
252 kunmap(kmapped_page);
253 kmapped_page = page;
254 kaddr = kmap(kmapped_page);
256 if (new && offset)
257 memset(kaddr, 0, offset);
258 bytes_to_copy = PAGE_SIZE - offset;
259 if (bytes_to_copy > len) {
260 bytes_to_copy = len;
261 if (new)
262 memset(kaddr+offset+len, 0,
263 PAGE_SIZE-offset-len);
265 err = copy_from_user(kaddr+offset, str, bytes_to_copy);
266 if (err) {
267 ret = -EFAULT;
268 goto out;
271 pos += bytes_to_copy;
272 str += bytes_to_copy;
273 len -= bytes_to_copy;
276 ret = 0;
277 out:
278 if (kmapped_page)
279 kunmap(kmapped_page);
280 return ret;
284 * Like copy_strings, but get argv and its values from kernel memory.
286 int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
288 int r;
289 mm_segment_t oldfs = get_fs();
290 set_fs(KERNEL_DS);
291 r = copy_strings(argc, (char __user * __user *)argv, bprm);
292 set_fs(oldfs);
293 return r;
296 EXPORT_SYMBOL(copy_strings_kernel);
298 #ifdef CONFIG_MMU
300 * This routine is used to map in a page into an address space: needed by
301 * execve() for the initial stack and environment pages.
303 * vma->vm_mm->mmap_sem is held for writing.
305 void install_arg_page(struct vm_area_struct *vma,
306 struct page *page, unsigned long address)
308 struct mm_struct *mm = vma->vm_mm;
309 pte_t * pte;
310 spinlock_t *ptl;
312 if (unlikely(anon_vma_prepare(vma)))
313 goto out;
315 flush_dcache_page(page);
316 pte = get_locked_pte(mm, address, &ptl);
317 if (!pte)
318 goto out;
319 if (!pte_none(*pte)) {
320 pte_unmap_unlock(pte, ptl);
321 goto out;
323 inc_mm_counter(mm, anon_rss);
324 lru_cache_add_active(page);
325 set_pte_at(mm, address, pte, pte_mkdirty(pte_mkwrite(mk_pte(
326 page, vma->vm_page_prot))));
327 page_add_new_anon_rmap(page, vma, address);
328 pte_unmap_unlock(pte, ptl);
330 /* no need for flush_tlb */
331 return;
332 out:
333 __free_page(page);
334 force_sig(SIGKILL, current);
337 #define EXTRA_STACK_VM_PAGES 20 /* random */
339 int setup_arg_pages(struct linux_binprm *bprm,
340 unsigned long stack_top,
341 int executable_stack)
343 unsigned long stack_base;
344 struct vm_area_struct *mpnt;
345 struct mm_struct *mm = current->mm;
346 int i, ret;
347 long arg_size;
349 #ifdef CONFIG_STACK_GROWSUP
350 /* Move the argument and environment strings to the bottom of the
351 * stack space.
353 int offset, j;
354 char *to, *from;
356 /* Start by shifting all the pages down */
357 i = 0;
358 for (j = 0; j < MAX_ARG_PAGES; j++) {
359 struct page *page = bprm->page[j];
360 if (!page)
361 continue;
362 bprm->page[i++] = page;
365 /* Now move them within their pages */
366 offset = bprm->p % PAGE_SIZE;
367 to = kmap(bprm->page[0]);
368 for (j = 1; j < i; j++) {
369 memmove(to, to + offset, PAGE_SIZE - offset);
370 from = kmap(bprm->page[j]);
371 memcpy(to + PAGE_SIZE - offset, from, offset);
372 kunmap(bprm->page[j - 1]);
373 to = from;
375 memmove(to, to + offset, PAGE_SIZE - offset);
376 kunmap(bprm->page[j - 1]);
378 /* Limit stack size to 1GB */
379 stack_base = current->signal->rlim[RLIMIT_STACK].rlim_max;
380 if (stack_base > (1 << 30))
381 stack_base = 1 << 30;
382 stack_base = PAGE_ALIGN(stack_top - stack_base);
384 /* Adjust bprm->p to point to the end of the strings. */
385 bprm->p = stack_base + PAGE_SIZE * i - offset;
387 mm->arg_start = stack_base;
388 arg_size = i << PAGE_SHIFT;
390 /* zero pages that were copied above */
391 while (i < MAX_ARG_PAGES)
392 bprm->page[i++] = NULL;
393 #else
394 stack_base = arch_align_stack(stack_top - MAX_ARG_PAGES*PAGE_SIZE);
395 stack_base = PAGE_ALIGN(stack_base);
396 bprm->p += stack_base;
397 mm->arg_start = bprm->p;
398 arg_size = stack_top - (PAGE_MASK & (unsigned long) mm->arg_start);
399 #endif
401 arg_size += EXTRA_STACK_VM_PAGES * PAGE_SIZE;
403 if (bprm->loader)
404 bprm->loader += stack_base;
405 bprm->exec += stack_base;
407 mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
408 if (!mpnt)
409 return -ENOMEM;
411 memset(mpnt, 0, sizeof(*mpnt));
413 down_write(&mm->mmap_sem);
415 mpnt->vm_mm = mm;
416 #ifdef CONFIG_STACK_GROWSUP
417 mpnt->vm_start = stack_base;
418 mpnt->vm_end = stack_base + arg_size;
419 #else
420 mpnt->vm_end = stack_top;
421 mpnt->vm_start = mpnt->vm_end - arg_size;
422 #endif
423 /* Adjust stack execute permissions; explicitly enable
424 * for EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X
425 * and leave alone (arch default) otherwise. */
426 if (unlikely(executable_stack == EXSTACK_ENABLE_X))
427 mpnt->vm_flags = VM_STACK_FLAGS | VM_EXEC;
428 else if (executable_stack == EXSTACK_DISABLE_X)
429 mpnt->vm_flags = VM_STACK_FLAGS & ~VM_EXEC;
430 else
431 mpnt->vm_flags = VM_STACK_FLAGS;
432 mpnt->vm_flags |= mm->def_flags;
433 mpnt->vm_page_prot = protection_map[mpnt->vm_flags & 0x7];
434 if ((ret = insert_vm_struct(mm, mpnt))) {
435 up_write(&mm->mmap_sem);
436 kmem_cache_free(vm_area_cachep, mpnt);
437 return ret;
439 mm->stack_vm = mm->total_vm = vma_pages(mpnt);
442 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
443 struct page *page = bprm->page[i];
444 if (page) {
445 bprm->page[i] = NULL;
446 install_arg_page(mpnt, page, stack_base);
448 stack_base += PAGE_SIZE;
450 up_write(&mm->mmap_sem);
452 return 0;
455 EXPORT_SYMBOL(setup_arg_pages);
457 #define free_arg_pages(bprm) do { } while (0)
459 #else
461 static inline void free_arg_pages(struct linux_binprm *bprm)
463 int i;
465 for (i = 0; i < MAX_ARG_PAGES; i++) {
466 if (bprm->page[i])
467 __free_page(bprm->page[i]);
468 bprm->page[i] = NULL;
472 #endif /* CONFIG_MMU */
474 struct file *open_exec(const char *name)
476 struct nameidata nd;
477 int err;
478 struct file *file;
480 err = path_lookup_open(name, LOOKUP_FOLLOW, &nd, FMODE_READ);
481 file = ERR_PTR(err);
483 if (!err) {
484 struct inode *inode = nd.dentry->d_inode;
485 file = ERR_PTR(-EACCES);
486 if (!(nd.mnt->mnt_flags & MNT_NOEXEC) &&
487 S_ISREG(inode->i_mode)) {
488 int err = vfs_permission(&nd, MAY_EXEC);
489 if (!err && !(inode->i_mode & 0111))
490 err = -EACCES;
491 file = ERR_PTR(err);
492 if (!err) {
493 file = nameidata_to_filp(&nd, O_RDONLY);
494 if (!IS_ERR(file)) {
495 err = deny_write_access(file);
496 if (err) {
497 fput(file);
498 file = ERR_PTR(err);
501 out:
502 return file;
505 release_open_intent(&nd);
506 path_release(&nd);
508 goto out;
511 EXPORT_SYMBOL(open_exec);
513 int kernel_read(struct file *file, unsigned long offset,
514 char *addr, unsigned long count)
516 mm_segment_t old_fs;
517 loff_t pos = offset;
518 int result;
520 old_fs = get_fs();
521 set_fs(get_ds());
522 /* The cast to a user pointer is valid due to the set_fs() */
523 result = vfs_read(file, (void __user *)addr, count, &pos);
524 set_fs(old_fs);
525 return result;
528 EXPORT_SYMBOL(kernel_read);
530 static int exec_mmap(struct mm_struct *mm)
532 struct task_struct *tsk;
533 struct mm_struct * old_mm, *active_mm;
535 /* Notify parent that we're no longer interested in the old VM */
536 tsk = current;
537 old_mm = current->mm;
538 mm_release(tsk, old_mm);
540 if (old_mm) {
542 * Make sure that if there is a core dump in progress
543 * for the old mm, we get out and die instead of going
544 * through with the exec. We must hold mmap_sem around
545 * checking core_waiters and changing tsk->mm. The
546 * core-inducing thread will increment core_waiters for
547 * each thread whose ->mm == old_mm.
549 down_read(&old_mm->mmap_sem);
550 if (unlikely(old_mm->core_waiters)) {
551 up_read(&old_mm->mmap_sem);
552 return -EINTR;
555 task_lock(tsk);
556 active_mm = tsk->active_mm;
557 tsk->mm = mm;
558 tsk->active_mm = mm;
559 activate_mm(active_mm, mm);
560 task_unlock(tsk);
561 arch_pick_mmap_layout(mm);
562 if (old_mm) {
563 up_read(&old_mm->mmap_sem);
564 if (active_mm != old_mm) BUG();
565 mmput(old_mm);
566 return 0;
568 mmdrop(active_mm);
569 return 0;
573 * This function makes sure the current process has its own signal table,
574 * so that flush_signal_handlers can later reset the handlers without
575 * disturbing other processes. (Other processes might share the signal
576 * table via the CLONE_SIGHAND option to clone().)
578 static inline int de_thread(struct task_struct *tsk)
580 struct signal_struct *sig = tsk->signal;
581 struct sighand_struct *newsighand, *oldsighand = tsk->sighand;
582 spinlock_t *lock = &oldsighand->siglock;
583 struct task_struct *leader = NULL;
584 int count;
587 * If we don't share sighandlers, then we aren't sharing anything
588 * and we can just re-use it all.
590 if (atomic_read(&oldsighand->count) <= 1) {
591 BUG_ON(atomic_read(&sig->count) != 1);
592 exit_itimers(sig);
593 return 0;
596 newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
597 if (!newsighand)
598 return -ENOMEM;
600 if (thread_group_empty(current))
601 goto no_thread_group;
604 * Kill all other threads in the thread group.
605 * We must hold tasklist_lock to call zap_other_threads.
607 read_lock(&tasklist_lock);
608 spin_lock_irq(lock);
609 if (sig->flags & SIGNAL_GROUP_EXIT) {
611 * Another group action in progress, just
612 * return so that the signal is processed.
614 spin_unlock_irq(lock);
615 read_unlock(&tasklist_lock);
616 kmem_cache_free(sighand_cachep, newsighand);
617 return -EAGAIN;
619 zap_other_threads(current);
620 read_unlock(&tasklist_lock);
623 * Account for the thread group leader hanging around:
625 count = 1;
626 if (!thread_group_leader(current)) {
627 count = 2;
629 * The SIGALRM timer survives the exec, but needs to point
630 * at us as the new group leader now. We have a race with
631 * a timer firing now getting the old leader, so we need to
632 * synchronize with any firing (by calling del_timer_sync)
633 * before we can safely let the old group leader die.
635 sig->real_timer.data = (unsigned long)current;
636 spin_unlock_irq(lock);
637 if (del_timer_sync(&sig->real_timer))
638 add_timer(&sig->real_timer);
639 spin_lock_irq(lock);
641 while (atomic_read(&sig->count) > count) {
642 sig->group_exit_task = current;
643 sig->notify_count = count;
644 __set_current_state(TASK_UNINTERRUPTIBLE);
645 spin_unlock_irq(lock);
646 schedule();
647 spin_lock_irq(lock);
649 sig->group_exit_task = NULL;
650 sig->notify_count = 0;
651 spin_unlock_irq(lock);
654 * At this point all other threads have exited, all we have to
655 * do is to wait for the thread group leader to become inactive,
656 * and to assume its PID:
658 if (!thread_group_leader(current)) {
659 struct task_struct *parent;
660 struct dentry *proc_dentry1, *proc_dentry2;
661 unsigned long ptrace;
664 * Wait for the thread group leader to be a zombie.
665 * It should already be zombie at this point, most
666 * of the time.
668 leader = current->group_leader;
669 while (leader->exit_state != EXIT_ZOMBIE)
670 yield();
672 spin_lock(&leader->proc_lock);
673 spin_lock(&current->proc_lock);
674 proc_dentry1 = proc_pid_unhash(current);
675 proc_dentry2 = proc_pid_unhash(leader);
676 write_lock_irq(&tasklist_lock);
678 BUG_ON(leader->tgid != current->tgid);
679 BUG_ON(current->pid == current->tgid);
681 * An exec() starts a new thread group with the
682 * TGID of the previous thread group. Rehash the
683 * two threads with a switched PID, and release
684 * the former thread group leader:
686 ptrace = leader->ptrace;
687 parent = leader->parent;
688 if (unlikely(ptrace) && unlikely(parent == current)) {
690 * Joker was ptracing his own group leader,
691 * and now he wants to be his own parent!
692 * We can't have that.
694 ptrace = 0;
697 ptrace_unlink(current);
698 ptrace_unlink(leader);
699 remove_parent(current);
700 remove_parent(leader);
702 switch_exec_pids(leader, current);
704 current->parent = current->real_parent = leader->real_parent;
705 leader->parent = leader->real_parent = child_reaper;
706 current->group_leader = current;
707 leader->group_leader = leader;
709 add_parent(current, current->parent);
710 add_parent(leader, leader->parent);
711 if (ptrace) {
712 current->ptrace = ptrace;
713 __ptrace_link(current, parent);
716 list_del(&current->tasks);
717 list_add_tail(&current->tasks, &init_task.tasks);
718 current->exit_signal = SIGCHLD;
720 BUG_ON(leader->exit_state != EXIT_ZOMBIE);
721 leader->exit_state = EXIT_DEAD;
723 write_unlock_irq(&tasklist_lock);
724 spin_unlock(&leader->proc_lock);
725 spin_unlock(&current->proc_lock);
726 proc_pid_flush(proc_dentry1);
727 proc_pid_flush(proc_dentry2);
731 * There may be one thread left which is just exiting,
732 * but it's safe to stop telling the group to kill themselves.
734 sig->flags = 0;
736 no_thread_group:
737 exit_itimers(sig);
738 if (leader)
739 release_task(leader);
741 BUG_ON(atomic_read(&sig->count) != 1);
743 if (atomic_read(&oldsighand->count) == 1) {
745 * Now that we nuked the rest of the thread group,
746 * it turns out we are not sharing sighand any more either.
747 * So we can just keep it.
749 kmem_cache_free(sighand_cachep, newsighand);
750 } else {
752 * Move our state over to newsighand and switch it in.
754 spin_lock_init(&newsighand->siglock);
755 atomic_set(&newsighand->count, 1);
756 memcpy(newsighand->action, oldsighand->action,
757 sizeof(newsighand->action));
759 write_lock_irq(&tasklist_lock);
760 spin_lock(&oldsighand->siglock);
761 spin_lock(&newsighand->siglock);
763 current->sighand = newsighand;
764 recalc_sigpending();
766 spin_unlock(&newsighand->siglock);
767 spin_unlock(&oldsighand->siglock);
768 write_unlock_irq(&tasklist_lock);
770 if (atomic_dec_and_test(&oldsighand->count))
771 kmem_cache_free(sighand_cachep, oldsighand);
774 BUG_ON(!thread_group_leader(current));
775 return 0;
779 * These functions flushes out all traces of the currently running executable
780 * so that a new one can be started
783 static inline void flush_old_files(struct files_struct * files)
785 long j = -1;
786 struct fdtable *fdt;
788 spin_lock(&files->file_lock);
789 for (;;) {
790 unsigned long set, i;
792 j++;
793 i = j * __NFDBITS;
794 fdt = files_fdtable(files);
795 if (i >= fdt->max_fds || i >= fdt->max_fdset)
796 break;
797 set = fdt->close_on_exec->fds_bits[j];
798 if (!set)
799 continue;
800 fdt->close_on_exec->fds_bits[j] = 0;
801 spin_unlock(&files->file_lock);
802 for ( ; set ; i++,set >>= 1) {
803 if (set & 1) {
804 sys_close(i);
807 spin_lock(&files->file_lock);
810 spin_unlock(&files->file_lock);
813 void get_task_comm(char *buf, struct task_struct *tsk)
815 /* buf must be at least sizeof(tsk->comm) in size */
816 task_lock(tsk);
817 strncpy(buf, tsk->comm, sizeof(tsk->comm));
818 task_unlock(tsk);
821 void set_task_comm(struct task_struct *tsk, char *buf)
823 task_lock(tsk);
824 strlcpy(tsk->comm, buf, sizeof(tsk->comm));
825 task_unlock(tsk);
828 int flush_old_exec(struct linux_binprm * bprm)
830 char * name;
831 int i, ch, retval;
832 struct files_struct *files;
833 char tcomm[sizeof(current->comm)];
836 * Make sure we have a private signal table and that
837 * we are unassociated from the previous thread group.
839 retval = de_thread(current);
840 if (retval)
841 goto out;
844 * Make sure we have private file handles. Ask the
845 * fork helper to do the work for us and the exit
846 * helper to do the cleanup of the old one.
848 files = current->files; /* refcounted so safe to hold */
849 retval = unshare_files();
850 if (retval)
851 goto out;
853 * Release all of the old mmap stuff
855 retval = exec_mmap(bprm->mm);
856 if (retval)
857 goto mmap_failed;
859 bprm->mm = NULL; /* We're using it now */
861 /* This is the point of no return */
862 steal_locks(files);
863 put_files_struct(files);
865 current->sas_ss_sp = current->sas_ss_size = 0;
867 if (current->euid == current->uid && current->egid == current->gid)
868 current->mm->dumpable = 1;
869 else
870 current->mm->dumpable = suid_dumpable;
872 name = bprm->filename;
874 /* Copies the binary name from after last slash */
875 for (i=0; (ch = *(name++)) != '\0';) {
876 if (ch == '/')
877 i = 0; /* overwrite what we wrote */
878 else
879 if (i < (sizeof(tcomm) - 1))
880 tcomm[i++] = ch;
882 tcomm[i] = '\0';
883 set_task_comm(current, tcomm);
885 current->flags &= ~PF_RANDOMIZE;
886 flush_thread();
888 if (bprm->e_uid != current->euid || bprm->e_gid != current->egid ||
889 file_permission(bprm->file, MAY_READ) ||
890 (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)) {
891 suid_keys(current);
892 current->mm->dumpable = suid_dumpable;
895 /* An exec changes our domain. We are no longer part of the thread
896 group */
898 current->self_exec_id++;
900 flush_signal_handlers(current, 0);
901 flush_old_files(current->files);
903 return 0;
905 mmap_failed:
906 put_files_struct(current->files);
907 current->files = files;
908 out:
909 return retval;
912 EXPORT_SYMBOL(flush_old_exec);
915 * Fill the binprm structure from the inode.
916 * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
918 int prepare_binprm(struct linux_binprm *bprm)
920 int mode;
921 struct inode * inode = bprm->file->f_dentry->d_inode;
922 int retval;
924 mode = inode->i_mode;
926 * Check execute perms again - if the caller has CAP_DAC_OVERRIDE,
927 * generic_permission lets a non-executable through
929 if (!(mode & 0111)) /* with at least _one_ execute bit set */
930 return -EACCES;
931 if (bprm->file->f_op == NULL)
932 return -EACCES;
934 bprm->e_uid = current->euid;
935 bprm->e_gid = current->egid;
937 if(!(bprm->file->f_vfsmnt->mnt_flags & MNT_NOSUID)) {
938 /* Set-uid? */
939 if (mode & S_ISUID) {
940 current->personality &= ~PER_CLEAR_ON_SETID;
941 bprm->e_uid = inode->i_uid;
944 /* Set-gid? */
946 * If setgid is set but no group execute bit then this
947 * is a candidate for mandatory locking, not a setgid
948 * executable.
950 if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
951 current->personality &= ~PER_CLEAR_ON_SETID;
952 bprm->e_gid = inode->i_gid;
956 /* fill in binprm security blob */
957 retval = security_bprm_set(bprm);
958 if (retval)
959 return retval;
961 memset(bprm->buf,0,BINPRM_BUF_SIZE);
962 return kernel_read(bprm->file,0,bprm->buf,BINPRM_BUF_SIZE);
965 EXPORT_SYMBOL(prepare_binprm);
967 static inline int unsafe_exec(struct task_struct *p)
969 int unsafe = 0;
970 if (p->ptrace & PT_PTRACED) {
971 if (p->ptrace & PT_PTRACE_CAP)
972 unsafe |= LSM_UNSAFE_PTRACE_CAP;
973 else
974 unsafe |= LSM_UNSAFE_PTRACE;
976 if (atomic_read(&p->fs->count) > 1 ||
977 atomic_read(&p->files->count) > 1 ||
978 atomic_read(&p->sighand->count) > 1)
979 unsafe |= LSM_UNSAFE_SHARE;
981 return unsafe;
984 void compute_creds(struct linux_binprm *bprm)
986 int unsafe;
988 if (bprm->e_uid != current->uid)
989 suid_keys(current);
990 exec_keys(current);
992 task_lock(current);
993 unsafe = unsafe_exec(current);
994 security_bprm_apply_creds(bprm, unsafe);
995 task_unlock(current);
996 security_bprm_post_apply_creds(bprm);
999 EXPORT_SYMBOL(compute_creds);
1001 void remove_arg_zero(struct linux_binprm *bprm)
1003 if (bprm->argc) {
1004 unsigned long offset;
1005 char * kaddr;
1006 struct page *page;
1008 offset = bprm->p % PAGE_SIZE;
1009 goto inside;
1011 while (bprm->p++, *(kaddr+offset++)) {
1012 if (offset != PAGE_SIZE)
1013 continue;
1014 offset = 0;
1015 kunmap_atomic(kaddr, KM_USER0);
1016 inside:
1017 page = bprm->page[bprm->p/PAGE_SIZE];
1018 kaddr = kmap_atomic(page, KM_USER0);
1020 kunmap_atomic(kaddr, KM_USER0);
1021 bprm->argc--;
1025 EXPORT_SYMBOL(remove_arg_zero);
1028 * cycle the list of binary formats handler, until one recognizes the image
1030 int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
1032 int try,retval;
1033 struct linux_binfmt *fmt;
1034 #ifdef __alpha__
1035 /* handle /sbin/loader.. */
1037 struct exec * eh = (struct exec *) bprm->buf;
1039 if (!bprm->loader && eh->fh.f_magic == 0x183 &&
1040 (eh->fh.f_flags & 0x3000) == 0x3000)
1042 struct file * file;
1043 unsigned long loader;
1045 allow_write_access(bprm->file);
1046 fput(bprm->file);
1047 bprm->file = NULL;
1049 loader = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
1051 file = open_exec("/sbin/loader");
1052 retval = PTR_ERR(file);
1053 if (IS_ERR(file))
1054 return retval;
1056 /* Remember if the application is TASO. */
1057 bprm->sh_bang = eh->ah.entry < 0x100000000UL;
1059 bprm->file = file;
1060 bprm->loader = loader;
1061 retval = prepare_binprm(bprm);
1062 if (retval<0)
1063 return retval;
1064 /* should call search_binary_handler recursively here,
1065 but it does not matter */
1068 #endif
1069 retval = security_bprm_check(bprm);
1070 if (retval)
1071 return retval;
1073 /* kernel module loader fixup */
1074 /* so we don't try to load run modprobe in kernel space. */
1075 set_fs(USER_DS);
1076 retval = -ENOENT;
1077 for (try=0; try<2; try++) {
1078 read_lock(&binfmt_lock);
1079 for (fmt = formats ; fmt ; fmt = fmt->next) {
1080 int (*fn)(struct linux_binprm *, struct pt_regs *) = fmt->load_binary;
1081 if (!fn)
1082 continue;
1083 if (!try_module_get(fmt->module))
1084 continue;
1085 read_unlock(&binfmt_lock);
1086 retval = fn(bprm, regs);
1087 if (retval >= 0) {
1088 put_binfmt(fmt);
1089 allow_write_access(bprm->file);
1090 if (bprm->file)
1091 fput(bprm->file);
1092 bprm->file = NULL;
1093 current->did_exec = 1;
1094 proc_exec_connector(current);
1095 return retval;
1097 read_lock(&binfmt_lock);
1098 put_binfmt(fmt);
1099 if (retval != -ENOEXEC || bprm->mm == NULL)
1100 break;
1101 if (!bprm->file) {
1102 read_unlock(&binfmt_lock);
1103 return retval;
1106 read_unlock(&binfmt_lock);
1107 if (retval != -ENOEXEC || bprm->mm == NULL) {
1108 break;
1109 #ifdef CONFIG_KMOD
1110 }else{
1111 #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1112 if (printable(bprm->buf[0]) &&
1113 printable(bprm->buf[1]) &&
1114 printable(bprm->buf[2]) &&
1115 printable(bprm->buf[3]))
1116 break; /* -ENOEXEC */
1117 request_module("binfmt-%04x", *(unsigned short *)(&bprm->buf[2]));
1118 #endif
1121 return retval;
1124 EXPORT_SYMBOL(search_binary_handler);
1127 * sys_execve() executes a new program.
1129 int do_execve(char * filename,
1130 char __user *__user *argv,
1131 char __user *__user *envp,
1132 struct pt_regs * regs)
1134 struct linux_binprm *bprm;
1135 struct file *file;
1136 int retval;
1137 int i;
1139 retval = -ENOMEM;
1140 bprm = kmalloc(sizeof(*bprm), GFP_KERNEL);
1141 if (!bprm)
1142 goto out_ret;
1143 memset(bprm, 0, sizeof(*bprm));
1145 file = open_exec(filename);
1146 retval = PTR_ERR(file);
1147 if (IS_ERR(file))
1148 goto out_kfree;
1150 sched_exec();
1152 bprm->p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
1154 bprm->file = file;
1155 bprm->filename = filename;
1156 bprm->interp = filename;
1157 bprm->mm = mm_alloc();
1158 retval = -ENOMEM;
1159 if (!bprm->mm)
1160 goto out_file;
1162 retval = init_new_context(current, bprm->mm);
1163 if (retval < 0)
1164 goto out_mm;
1166 bprm->argc = count(argv, bprm->p / sizeof(void *));
1167 if ((retval = bprm->argc) < 0)
1168 goto out_mm;
1170 bprm->envc = count(envp, bprm->p / sizeof(void *));
1171 if ((retval = bprm->envc) < 0)
1172 goto out_mm;
1174 retval = security_bprm_alloc(bprm);
1175 if (retval)
1176 goto out;
1178 retval = prepare_binprm(bprm);
1179 if (retval < 0)
1180 goto out;
1182 retval = copy_strings_kernel(1, &bprm->filename, bprm);
1183 if (retval < 0)
1184 goto out;
1186 bprm->exec = bprm->p;
1187 retval = copy_strings(bprm->envc, envp, bprm);
1188 if (retval < 0)
1189 goto out;
1191 retval = copy_strings(bprm->argc, argv, bprm);
1192 if (retval < 0)
1193 goto out;
1195 retval = search_binary_handler(bprm,regs);
1196 if (retval >= 0) {
1197 free_arg_pages(bprm);
1199 /* execve success */
1200 security_bprm_free(bprm);
1201 acct_update_integrals(current);
1202 kfree(bprm);
1203 return retval;
1206 out:
1207 /* Something went wrong, return the inode and free the argument pages*/
1208 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
1209 struct page * page = bprm->page[i];
1210 if (page)
1211 __free_page(page);
1214 if (bprm->security)
1215 security_bprm_free(bprm);
1217 out_mm:
1218 if (bprm->mm)
1219 mmdrop(bprm->mm);
1221 out_file:
1222 if (bprm->file) {
1223 allow_write_access(bprm->file);
1224 fput(bprm->file);
1227 out_kfree:
1228 kfree(bprm);
1230 out_ret:
1231 return retval;
1234 int set_binfmt(struct linux_binfmt *new)
1236 struct linux_binfmt *old = current->binfmt;
1238 if (new) {
1239 if (!try_module_get(new->module))
1240 return -1;
1242 current->binfmt = new;
1243 if (old)
1244 module_put(old->module);
1245 return 0;
1248 EXPORT_SYMBOL(set_binfmt);
1250 #define CORENAME_MAX_SIZE 64
1252 /* format_corename will inspect the pattern parameter, and output a
1253 * name into corename, which must have space for at least
1254 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
1256 static void format_corename(char *corename, const char *pattern, long signr)
1258 const char *pat_ptr = pattern;
1259 char *out_ptr = corename;
1260 char *const out_end = corename + CORENAME_MAX_SIZE;
1261 int rc;
1262 int pid_in_pattern = 0;
1264 /* Repeat as long as we have more pattern to process and more output
1265 space */
1266 while (*pat_ptr) {
1267 if (*pat_ptr != '%') {
1268 if (out_ptr == out_end)
1269 goto out;
1270 *out_ptr++ = *pat_ptr++;
1271 } else {
1272 switch (*++pat_ptr) {
1273 case 0:
1274 goto out;
1275 /* Double percent, output one percent */
1276 case '%':
1277 if (out_ptr == out_end)
1278 goto out;
1279 *out_ptr++ = '%';
1280 break;
1281 /* pid */
1282 case 'p':
1283 pid_in_pattern = 1;
1284 rc = snprintf(out_ptr, out_end - out_ptr,
1285 "%d", current->tgid);
1286 if (rc > out_end - out_ptr)
1287 goto out;
1288 out_ptr += rc;
1289 break;
1290 /* uid */
1291 case 'u':
1292 rc = snprintf(out_ptr, out_end - out_ptr,
1293 "%d", current->uid);
1294 if (rc > out_end - out_ptr)
1295 goto out;
1296 out_ptr += rc;
1297 break;
1298 /* gid */
1299 case 'g':
1300 rc = snprintf(out_ptr, out_end - out_ptr,
1301 "%d", current->gid);
1302 if (rc > out_end - out_ptr)
1303 goto out;
1304 out_ptr += rc;
1305 break;
1306 /* signal that caused the coredump */
1307 case 's':
1308 rc = snprintf(out_ptr, out_end - out_ptr,
1309 "%ld", signr);
1310 if (rc > out_end - out_ptr)
1311 goto out;
1312 out_ptr += rc;
1313 break;
1314 /* UNIX time of coredump */
1315 case 't': {
1316 struct timeval tv;
1317 do_gettimeofday(&tv);
1318 rc = snprintf(out_ptr, out_end - out_ptr,
1319 "%lu", tv.tv_sec);
1320 if (rc > out_end - out_ptr)
1321 goto out;
1322 out_ptr += rc;
1323 break;
1325 /* hostname */
1326 case 'h':
1327 down_read(&uts_sem);
1328 rc = snprintf(out_ptr, out_end - out_ptr,
1329 "%s", system_utsname.nodename);
1330 up_read(&uts_sem);
1331 if (rc > out_end - out_ptr)
1332 goto out;
1333 out_ptr += rc;
1334 break;
1335 /* executable */
1336 case 'e':
1337 rc = snprintf(out_ptr, out_end - out_ptr,
1338 "%s", current->comm);
1339 if (rc > out_end - out_ptr)
1340 goto out;
1341 out_ptr += rc;
1342 break;
1343 default:
1344 break;
1346 ++pat_ptr;
1349 /* Backward compatibility with core_uses_pid:
1351 * If core_pattern does not include a %p (as is the default)
1352 * and core_uses_pid is set, then .%pid will be appended to
1353 * the filename */
1354 if (!pid_in_pattern
1355 && (core_uses_pid || atomic_read(&current->mm->mm_users) != 1)) {
1356 rc = snprintf(out_ptr, out_end - out_ptr,
1357 ".%d", current->tgid);
1358 if (rc > out_end - out_ptr)
1359 goto out;
1360 out_ptr += rc;
1362 out:
1363 *out_ptr = 0;
1366 static void zap_threads (struct mm_struct *mm)
1368 struct task_struct *g, *p;
1369 struct task_struct *tsk = current;
1370 struct completion *vfork_done = tsk->vfork_done;
1371 int traced = 0;
1374 * Make sure nobody is waiting for us to release the VM,
1375 * otherwise we can deadlock when we wait on each other
1377 if (vfork_done) {
1378 tsk->vfork_done = NULL;
1379 complete(vfork_done);
1382 read_lock(&tasklist_lock);
1383 do_each_thread(g,p)
1384 if (mm == p->mm && p != tsk) {
1385 force_sig_specific(SIGKILL, p);
1386 mm->core_waiters++;
1387 if (unlikely(p->ptrace) &&
1388 unlikely(p->parent->mm == mm))
1389 traced = 1;
1391 while_each_thread(g,p);
1393 read_unlock(&tasklist_lock);
1395 if (unlikely(traced)) {
1397 * We are zapping a thread and the thread it ptraces.
1398 * If the tracee went into a ptrace stop for exit tracing,
1399 * we could deadlock since the tracer is waiting for this
1400 * coredump to finish. Detach them so they can both die.
1402 write_lock_irq(&tasklist_lock);
1403 do_each_thread(g,p) {
1404 if (mm == p->mm && p != tsk &&
1405 p->ptrace && p->parent->mm == mm) {
1406 __ptrace_unlink(p);
1408 } while_each_thread(g,p);
1409 write_unlock_irq(&tasklist_lock);
1413 static void coredump_wait(struct mm_struct *mm)
1415 DECLARE_COMPLETION(startup_done);
1416 int core_waiters;
1418 mm->core_startup_done = &startup_done;
1420 zap_threads(mm);
1421 core_waiters = mm->core_waiters;
1422 up_write(&mm->mmap_sem);
1424 if (core_waiters)
1425 wait_for_completion(&startup_done);
1426 BUG_ON(mm->core_waiters);
1429 int do_coredump(long signr, int exit_code, struct pt_regs * regs)
1431 char corename[CORENAME_MAX_SIZE + 1];
1432 struct mm_struct *mm = current->mm;
1433 struct linux_binfmt * binfmt;
1434 struct inode * inode;
1435 struct file * file;
1436 int retval = 0;
1437 int fsuid = current->fsuid;
1438 int flag = 0;
1440 binfmt = current->binfmt;
1441 if (!binfmt || !binfmt->core_dump)
1442 goto fail;
1443 down_write(&mm->mmap_sem);
1444 if (!mm->dumpable) {
1445 up_write(&mm->mmap_sem);
1446 goto fail;
1450 * We cannot trust fsuid as being the "true" uid of the
1451 * process nor do we know its entire history. We only know it
1452 * was tainted so we dump it as root in mode 2.
1454 if (mm->dumpable == 2) { /* Setuid core dump mode */
1455 flag = O_EXCL; /* Stop rewrite attacks */
1456 current->fsuid = 0; /* Dump root private */
1458 mm->dumpable = 0;
1460 retval = -EAGAIN;
1461 spin_lock_irq(&current->sighand->siglock);
1462 if (!(current->signal->flags & SIGNAL_GROUP_EXIT)) {
1463 current->signal->flags = SIGNAL_GROUP_EXIT;
1464 current->signal->group_exit_code = exit_code;
1465 retval = 0;
1467 spin_unlock_irq(&current->sighand->siglock);
1468 if (retval) {
1469 up_write(&mm->mmap_sem);
1470 goto fail;
1473 init_completion(&mm->core_done);
1474 coredump_wait(mm);
1477 * Clear any false indication of pending signals that might
1478 * be seen by the filesystem code called to write the core file.
1480 current->signal->group_stop_count = 0;
1481 clear_thread_flag(TIF_SIGPENDING);
1483 if (current->signal->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump)
1484 goto fail_unlock;
1487 * lock_kernel() because format_corename() is controlled by sysctl, which
1488 * uses lock_kernel()
1490 lock_kernel();
1491 format_corename(corename, core_pattern, signr);
1492 unlock_kernel();
1493 file = filp_open(corename, O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag, 0600);
1494 if (IS_ERR(file))
1495 goto fail_unlock;
1496 inode = file->f_dentry->d_inode;
1497 if (inode->i_nlink > 1)
1498 goto close_fail; /* multiple links - don't dump */
1499 if (d_unhashed(file->f_dentry))
1500 goto close_fail;
1502 if (!S_ISREG(inode->i_mode))
1503 goto close_fail;
1504 if (!file->f_op)
1505 goto close_fail;
1506 if (!file->f_op->write)
1507 goto close_fail;
1508 if (do_truncate(file->f_dentry, 0, file) != 0)
1509 goto close_fail;
1511 retval = binfmt->core_dump(signr, regs, file);
1513 if (retval)
1514 current->signal->group_exit_code |= 0x80;
1515 close_fail:
1516 filp_close(file, NULL);
1517 fail_unlock:
1518 current->fsuid = fsuid;
1519 complete_all(&mm->core_done);
1520 fail:
1521 return retval;