[IA64] Itanium MC Error Injection Tool: Doc and sample application
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / exec.c
blob11fe93f7363c9dfb0cd05f52992ac0e6dd6bba9c
1 /*
2 * linux/fs/exec.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
7 /*
8 * #!-checking implemented by tytso.
9 */
11 * Demand-loading implemented 01.12.91 - no need to read anything but
12 * the header into memory. The inode of the executable is put into
13 * "current->executable", and page faults do the actual loading. Clean.
15 * Once more I can proudly say that linux stood up to being changed: it
16 * was less than 2 hours work to get demand-loading completely implemented.
18 * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead,
19 * current->executable is only used by the procfs. This allows a dispatch
20 * table to check for several different types of binary formats. We keep
21 * trying until we recognize the file or we run out of supported binary
22 * formats.
25 #include <linux/slab.h>
26 #include <linux/file.h>
27 #include <linux/mman.h>
28 #include <linux/a.out.h>
29 #include <linux/stat.h>
30 #include <linux/fcntl.h>
31 #include <linux/smp_lock.h>
32 #include <linux/init.h>
33 #include <linux/pagemap.h>
34 #include <linux/highmem.h>
35 #include <linux/spinlock.h>
36 #include <linux/key.h>
37 #include <linux/personality.h>
38 #include <linux/binfmts.h>
39 #include <linux/swap.h>
40 #include <linux/utsname.h>
41 #include <linux/pid_namespace.h>
42 #include <linux/module.h>
43 #include <linux/namei.h>
44 #include <linux/proc_fs.h>
45 #include <linux/ptrace.h>
46 #include <linux/mount.h>
47 #include <linux/security.h>
48 #include <linux/syscalls.h>
49 #include <linux/rmap.h>
50 #include <linux/tsacct_kern.h>
51 #include <linux/cn_proc.h>
52 #include <linux/audit.h>
54 #include <asm/uaccess.h>
55 #include <asm/mmu_context.h>
57 #ifdef CONFIG_KMOD
58 #include <linux/kmod.h>
59 #endif
61 int core_uses_pid;
62 char core_pattern[128] = "core";
63 int suid_dumpable = 0;
65 EXPORT_SYMBOL(suid_dumpable);
66 /* The maximal length of core_pattern is also specified in sysctl.c */
68 static struct linux_binfmt *formats;
69 static DEFINE_RWLOCK(binfmt_lock);
71 int register_binfmt(struct linux_binfmt * fmt)
73 struct linux_binfmt ** tmp = &formats;
75 if (!fmt)
76 return -EINVAL;
77 if (fmt->next)
78 return -EBUSY;
79 write_lock(&binfmt_lock);
80 while (*tmp) {
81 if (fmt == *tmp) {
82 write_unlock(&binfmt_lock);
83 return -EBUSY;
85 tmp = &(*tmp)->next;
87 fmt->next = formats;
88 formats = fmt;
89 write_unlock(&binfmt_lock);
90 return 0;
93 EXPORT_SYMBOL(register_binfmt);
95 int unregister_binfmt(struct linux_binfmt * fmt)
97 struct linux_binfmt ** tmp = &formats;
99 write_lock(&binfmt_lock);
100 while (*tmp) {
101 if (fmt == *tmp) {
102 *tmp = fmt->next;
103 write_unlock(&binfmt_lock);
104 return 0;
106 tmp = &(*tmp)->next;
108 write_unlock(&binfmt_lock);
109 return -EINVAL;
112 EXPORT_SYMBOL(unregister_binfmt);
114 static inline void put_binfmt(struct linux_binfmt * fmt)
116 module_put(fmt->module);
120 * Note that a shared library must be both readable and executable due to
121 * security reasons.
123 * Also note that we take the address to load from from the file itself.
125 asmlinkage long sys_uselib(const char __user * library)
127 struct file * file;
128 struct nameidata nd;
129 int error;
131 error = __user_path_lookup_open(library, LOOKUP_FOLLOW, &nd, FMODE_READ|FMODE_EXEC);
132 if (error)
133 goto out;
135 error = -EINVAL;
136 if (!S_ISREG(nd.dentry->d_inode->i_mode))
137 goto exit;
139 error = vfs_permission(&nd, MAY_READ | MAY_EXEC);
140 if (error)
141 goto exit;
143 file = nameidata_to_filp(&nd, O_RDONLY);
144 error = PTR_ERR(file);
145 if (IS_ERR(file))
146 goto out;
148 error = -ENOEXEC;
149 if(file->f_op) {
150 struct linux_binfmt * fmt;
152 read_lock(&binfmt_lock);
153 for (fmt = formats ; fmt ; fmt = fmt->next) {
154 if (!fmt->load_shlib)
155 continue;
156 if (!try_module_get(fmt->module))
157 continue;
158 read_unlock(&binfmt_lock);
159 error = fmt->load_shlib(file);
160 read_lock(&binfmt_lock);
161 put_binfmt(fmt);
162 if (error != -ENOEXEC)
163 break;
165 read_unlock(&binfmt_lock);
167 fput(file);
168 out:
169 return error;
170 exit:
171 release_open_intent(&nd);
172 path_release(&nd);
173 goto out;
177 * count() counts the number of strings in array ARGV.
179 static int count(char __user * __user * argv, int max)
181 int i = 0;
183 if (argv != NULL) {
184 for (;;) {
185 char __user * p;
187 if (get_user(p, argv))
188 return -EFAULT;
189 if (!p)
190 break;
191 argv++;
192 if(++i > max)
193 return -E2BIG;
194 cond_resched();
197 return i;
201 * 'copy_strings()' copies argument/environment strings from user
202 * memory to free pages in kernel mem. These are in a format ready
203 * to be put directly into the top of new user memory.
205 static int copy_strings(int argc, char __user * __user * argv,
206 struct linux_binprm *bprm)
208 struct page *kmapped_page = NULL;
209 char *kaddr = NULL;
210 int ret;
212 while (argc-- > 0) {
213 char __user *str;
214 int len;
215 unsigned long pos;
217 if (get_user(str, argv+argc) ||
218 !(len = strnlen_user(str, bprm->p))) {
219 ret = -EFAULT;
220 goto out;
223 if (bprm->p < len) {
224 ret = -E2BIG;
225 goto out;
228 bprm->p -= len;
229 /* XXX: add architecture specific overflow check here. */
230 pos = bprm->p;
232 while (len > 0) {
233 int i, new, err;
234 int offset, bytes_to_copy;
235 struct page *page;
237 offset = pos % PAGE_SIZE;
238 i = pos/PAGE_SIZE;
239 page = bprm->page[i];
240 new = 0;
241 if (!page) {
242 page = alloc_page(GFP_HIGHUSER);
243 bprm->page[i] = page;
244 if (!page) {
245 ret = -ENOMEM;
246 goto out;
248 new = 1;
251 if (page != kmapped_page) {
252 if (kmapped_page)
253 kunmap(kmapped_page);
254 kmapped_page = page;
255 kaddr = kmap(kmapped_page);
257 if (new && offset)
258 memset(kaddr, 0, offset);
259 bytes_to_copy = PAGE_SIZE - offset;
260 if (bytes_to_copy > len) {
261 bytes_to_copy = len;
262 if (new)
263 memset(kaddr+offset+len, 0,
264 PAGE_SIZE-offset-len);
266 err = copy_from_user(kaddr+offset, str, bytes_to_copy);
267 if (err) {
268 ret = -EFAULT;
269 goto out;
272 pos += bytes_to_copy;
273 str += bytes_to_copy;
274 len -= bytes_to_copy;
277 ret = 0;
278 out:
279 if (kmapped_page)
280 kunmap(kmapped_page);
281 return ret;
285 * Like copy_strings, but get argv and its values from kernel memory.
287 int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
289 int r;
290 mm_segment_t oldfs = get_fs();
291 set_fs(KERNEL_DS);
292 r = copy_strings(argc, (char __user * __user *)argv, bprm);
293 set_fs(oldfs);
294 return r;
297 EXPORT_SYMBOL(copy_strings_kernel);
299 #ifdef CONFIG_MMU
301 * This routine is used to map in a page into an address space: needed by
302 * execve() for the initial stack and environment pages.
304 * vma->vm_mm->mmap_sem is held for writing.
306 void install_arg_page(struct vm_area_struct *vma,
307 struct page *page, unsigned long address)
309 struct mm_struct *mm = vma->vm_mm;
310 pte_t * pte;
311 spinlock_t *ptl;
313 if (unlikely(anon_vma_prepare(vma)))
314 goto out;
316 flush_dcache_page(page);
317 pte = get_locked_pte(mm, address, &ptl);
318 if (!pte)
319 goto out;
320 if (!pte_none(*pte)) {
321 pte_unmap_unlock(pte, ptl);
322 goto out;
324 inc_mm_counter(mm, anon_rss);
325 lru_cache_add_active(page);
326 set_pte_at(mm, address, pte, pte_mkdirty(pte_mkwrite(mk_pte(
327 page, vma->vm_page_prot))));
328 page_add_new_anon_rmap(page, vma, address);
329 pte_unmap_unlock(pte, ptl);
331 /* no need for flush_tlb */
332 return;
333 out:
334 __free_page(page);
335 force_sig(SIGKILL, current);
338 #define EXTRA_STACK_VM_PAGES 20 /* random */
340 int setup_arg_pages(struct linux_binprm *bprm,
341 unsigned long stack_top,
342 int executable_stack)
344 unsigned long stack_base;
345 struct vm_area_struct *mpnt;
346 struct mm_struct *mm = current->mm;
347 int i, ret;
348 long arg_size;
350 #ifdef CONFIG_STACK_GROWSUP
351 /* Move the argument and environment strings to the bottom of the
352 * stack space.
354 int offset, j;
355 char *to, *from;
357 /* Start by shifting all the pages down */
358 i = 0;
359 for (j = 0; j < MAX_ARG_PAGES; j++) {
360 struct page *page = bprm->page[j];
361 if (!page)
362 continue;
363 bprm->page[i++] = page;
366 /* Now move them within their pages */
367 offset = bprm->p % PAGE_SIZE;
368 to = kmap(bprm->page[0]);
369 for (j = 1; j < i; j++) {
370 memmove(to, to + offset, PAGE_SIZE - offset);
371 from = kmap(bprm->page[j]);
372 memcpy(to + PAGE_SIZE - offset, from, offset);
373 kunmap(bprm->page[j - 1]);
374 to = from;
376 memmove(to, to + offset, PAGE_SIZE - offset);
377 kunmap(bprm->page[j - 1]);
379 /* Limit stack size to 1GB */
380 stack_base = current->signal->rlim[RLIMIT_STACK].rlim_max;
381 if (stack_base > (1 << 30))
382 stack_base = 1 << 30;
383 stack_base = PAGE_ALIGN(stack_top - stack_base);
385 /* Adjust bprm->p to point to the end of the strings. */
386 bprm->p = stack_base + PAGE_SIZE * i - offset;
388 mm->arg_start = stack_base;
389 arg_size = i << PAGE_SHIFT;
391 /* zero pages that were copied above */
392 while (i < MAX_ARG_PAGES)
393 bprm->page[i++] = NULL;
394 #else
395 stack_base = arch_align_stack(stack_top - MAX_ARG_PAGES*PAGE_SIZE);
396 stack_base = PAGE_ALIGN(stack_base);
397 bprm->p += stack_base;
398 mm->arg_start = bprm->p;
399 arg_size = stack_top - (PAGE_MASK & (unsigned long) mm->arg_start);
400 #endif
402 arg_size += EXTRA_STACK_VM_PAGES * PAGE_SIZE;
404 if (bprm->loader)
405 bprm->loader += stack_base;
406 bprm->exec += stack_base;
408 mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
409 if (!mpnt)
410 return -ENOMEM;
412 memset(mpnt, 0, sizeof(*mpnt));
414 down_write(&mm->mmap_sem);
416 mpnt->vm_mm = mm;
417 #ifdef CONFIG_STACK_GROWSUP
418 mpnt->vm_start = stack_base;
419 mpnt->vm_end = stack_base + arg_size;
420 #else
421 mpnt->vm_end = stack_top;
422 mpnt->vm_start = mpnt->vm_end - arg_size;
423 #endif
424 /* Adjust stack execute permissions; explicitly enable
425 * for EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X
426 * and leave alone (arch default) otherwise. */
427 if (unlikely(executable_stack == EXSTACK_ENABLE_X))
428 mpnt->vm_flags = VM_STACK_FLAGS | VM_EXEC;
429 else if (executable_stack == EXSTACK_DISABLE_X)
430 mpnt->vm_flags = VM_STACK_FLAGS & ~VM_EXEC;
431 else
432 mpnt->vm_flags = VM_STACK_FLAGS;
433 mpnt->vm_flags |= mm->def_flags;
434 mpnt->vm_page_prot = protection_map[mpnt->vm_flags & 0x7];
435 if ((ret = insert_vm_struct(mm, mpnt))) {
436 up_write(&mm->mmap_sem);
437 kmem_cache_free(vm_area_cachep, mpnt);
438 return ret;
440 mm->stack_vm = mm->total_vm = vma_pages(mpnt);
443 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
444 struct page *page = bprm->page[i];
445 if (page) {
446 bprm->page[i] = NULL;
447 install_arg_page(mpnt, page, stack_base);
449 stack_base += PAGE_SIZE;
451 up_write(&mm->mmap_sem);
453 return 0;
456 EXPORT_SYMBOL(setup_arg_pages);
458 #define free_arg_pages(bprm) do { } while (0)
460 #else
462 static inline void free_arg_pages(struct linux_binprm *bprm)
464 int i;
466 for (i = 0; i < MAX_ARG_PAGES; i++) {
467 if (bprm->page[i])
468 __free_page(bprm->page[i]);
469 bprm->page[i] = NULL;
473 #endif /* CONFIG_MMU */
475 struct file *open_exec(const char *name)
477 struct nameidata nd;
478 int err;
479 struct file *file;
481 err = path_lookup_open(AT_FDCWD, name, LOOKUP_FOLLOW, &nd, FMODE_READ|FMODE_EXEC);
482 file = ERR_PTR(err);
484 if (!err) {
485 struct inode *inode = nd.dentry->d_inode;
486 file = ERR_PTR(-EACCES);
487 if (!(nd.mnt->mnt_flags & MNT_NOEXEC) &&
488 S_ISREG(inode->i_mode)) {
489 int err = vfs_permission(&nd, MAY_EXEC);
490 file = ERR_PTR(err);
491 if (!err) {
492 file = nameidata_to_filp(&nd, O_RDONLY);
493 if (!IS_ERR(file)) {
494 err = deny_write_access(file);
495 if (err) {
496 fput(file);
497 file = ERR_PTR(err);
500 out:
501 return file;
504 release_open_intent(&nd);
505 path_release(&nd);
507 goto out;
510 EXPORT_SYMBOL(open_exec);
512 int kernel_read(struct file *file, unsigned long offset,
513 char *addr, unsigned long count)
515 mm_segment_t old_fs;
516 loff_t pos = offset;
517 int result;
519 old_fs = get_fs();
520 set_fs(get_ds());
521 /* The cast to a user pointer is valid due to the set_fs() */
522 result = vfs_read(file, (void __user *)addr, count, &pos);
523 set_fs(old_fs);
524 return result;
527 EXPORT_SYMBOL(kernel_read);
529 static int exec_mmap(struct mm_struct *mm)
531 struct task_struct *tsk;
532 struct mm_struct * old_mm, *active_mm;
534 /* Notify parent that we're no longer interested in the old VM */
535 tsk = current;
536 old_mm = current->mm;
537 mm_release(tsk, old_mm);
539 if (old_mm) {
541 * Make sure that if there is a core dump in progress
542 * for the old mm, we get out and die instead of going
543 * through with the exec. We must hold mmap_sem around
544 * checking core_waiters and changing tsk->mm. The
545 * core-inducing thread will increment core_waiters for
546 * each thread whose ->mm == old_mm.
548 down_read(&old_mm->mmap_sem);
549 if (unlikely(old_mm->core_waiters)) {
550 up_read(&old_mm->mmap_sem);
551 return -EINTR;
554 task_lock(tsk);
555 active_mm = tsk->active_mm;
556 tsk->mm = mm;
557 tsk->active_mm = mm;
558 activate_mm(active_mm, mm);
559 task_unlock(tsk);
560 arch_pick_mmap_layout(mm);
561 if (old_mm) {
562 up_read(&old_mm->mmap_sem);
563 BUG_ON(active_mm != old_mm);
564 mmput(old_mm);
565 return 0;
567 mmdrop(active_mm);
568 return 0;
572 * This function makes sure the current process has its own signal table,
573 * so that flush_signal_handlers can later reset the handlers without
574 * disturbing other processes. (Other processes might share the signal
575 * table via the CLONE_SIGHAND option to clone().)
577 static int de_thread(struct task_struct *tsk)
579 struct signal_struct *sig = tsk->signal;
580 struct sighand_struct *newsighand, *oldsighand = tsk->sighand;
581 spinlock_t *lock = &oldsighand->siglock;
582 struct task_struct *leader = NULL;
583 int count;
586 * If we don't share sighandlers, then we aren't sharing anything
587 * and we can just re-use it all.
589 if (atomic_read(&oldsighand->count) <= 1) {
590 BUG_ON(atomic_read(&sig->count) != 1);
591 exit_itimers(sig);
592 return 0;
595 newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
596 if (!newsighand)
597 return -ENOMEM;
599 if (thread_group_empty(tsk))
600 goto no_thread_group;
603 * Kill all other threads in the thread group.
604 * We must hold tasklist_lock to call zap_other_threads.
606 read_lock(&tasklist_lock);
607 spin_lock_irq(lock);
608 if (sig->flags & SIGNAL_GROUP_EXIT) {
610 * Another group action in progress, just
611 * return so that the signal is processed.
613 spin_unlock_irq(lock);
614 read_unlock(&tasklist_lock);
615 kmem_cache_free(sighand_cachep, newsighand);
616 return -EAGAIN;
620 * child_reaper ignores SIGKILL, change it now.
621 * Reparenting needs write_lock on tasklist_lock,
622 * so it is safe to do it under read_lock.
624 if (unlikely(tsk->group_leader == child_reaper(tsk)))
625 tsk->nsproxy->pid_ns->child_reaper = tsk;
627 zap_other_threads(tsk);
628 read_unlock(&tasklist_lock);
631 * Account for the thread group leader hanging around:
633 count = 1;
634 if (!thread_group_leader(tsk)) {
635 count = 2;
637 * The SIGALRM timer survives the exec, but needs to point
638 * at us as the new group leader now. We have a race with
639 * a timer firing now getting the old leader, so we need to
640 * synchronize with any firing (by calling del_timer_sync)
641 * before we can safely let the old group leader die.
643 sig->tsk = tsk;
644 spin_unlock_irq(lock);
645 if (hrtimer_cancel(&sig->real_timer))
646 hrtimer_restart(&sig->real_timer);
647 spin_lock_irq(lock);
649 while (atomic_read(&sig->count) > count) {
650 sig->group_exit_task = tsk;
651 sig->notify_count = count;
652 __set_current_state(TASK_UNINTERRUPTIBLE);
653 spin_unlock_irq(lock);
654 schedule();
655 spin_lock_irq(lock);
657 sig->group_exit_task = NULL;
658 sig->notify_count = 0;
659 spin_unlock_irq(lock);
662 * At this point all other threads have exited, all we have to
663 * do is to wait for the thread group leader to become inactive,
664 * and to assume its PID:
666 if (!thread_group_leader(tsk)) {
668 * Wait for the thread group leader to be a zombie.
669 * It should already be zombie at this point, most
670 * of the time.
672 leader = tsk->group_leader;
673 while (leader->exit_state != EXIT_ZOMBIE)
674 yield();
677 * The only record we have of the real-time age of a
678 * process, regardless of execs it's done, is start_time.
679 * All the past CPU time is accumulated in signal_struct
680 * from sister threads now dead. But in this non-leader
681 * exec, nothing survives from the original leader thread,
682 * whose birth marks the true age of this process now.
683 * When we take on its identity by switching to its PID, we
684 * also take its birthdate (always earlier than our own).
686 tsk->start_time = leader->start_time;
688 write_lock_irq(&tasklist_lock);
690 BUG_ON(leader->tgid != tsk->tgid);
691 BUG_ON(tsk->pid == tsk->tgid);
693 * An exec() starts a new thread group with the
694 * TGID of the previous thread group. Rehash the
695 * two threads with a switched PID, and release
696 * the former thread group leader:
699 /* Become a process group leader with the old leader's pid.
700 * The old leader becomes a thread of the this thread group.
701 * Note: The old leader also uses this pid until release_task
702 * is called. Odd but simple and correct.
704 detach_pid(tsk, PIDTYPE_PID);
705 tsk->pid = leader->pid;
706 attach_pid(tsk, PIDTYPE_PID, tsk->pid);
707 transfer_pid(leader, tsk, PIDTYPE_PGID);
708 transfer_pid(leader, tsk, PIDTYPE_SID);
709 list_replace_rcu(&leader->tasks, &tsk->tasks);
711 tsk->group_leader = tsk;
712 leader->group_leader = tsk;
714 tsk->exit_signal = SIGCHLD;
716 BUG_ON(leader->exit_state != EXIT_ZOMBIE);
717 leader->exit_state = EXIT_DEAD;
719 write_unlock_irq(&tasklist_lock);
723 * There may be one thread left which is just exiting,
724 * but it's safe to stop telling the group to kill themselves.
726 sig->flags = 0;
728 no_thread_group:
729 exit_itimers(sig);
730 if (leader)
731 release_task(leader);
733 BUG_ON(atomic_read(&sig->count) != 1);
735 if (atomic_read(&oldsighand->count) == 1) {
737 * Now that we nuked the rest of the thread group,
738 * it turns out we are not sharing sighand any more either.
739 * So we can just keep it.
741 kmem_cache_free(sighand_cachep, newsighand);
742 } else {
744 * Move our state over to newsighand and switch it in.
746 atomic_set(&newsighand->count, 1);
747 memcpy(newsighand->action, oldsighand->action,
748 sizeof(newsighand->action));
750 write_lock_irq(&tasklist_lock);
751 spin_lock(&oldsighand->siglock);
752 spin_lock_nested(&newsighand->siglock, SINGLE_DEPTH_NESTING);
754 rcu_assign_pointer(tsk->sighand, newsighand);
755 recalc_sigpending();
757 spin_unlock(&newsighand->siglock);
758 spin_unlock(&oldsighand->siglock);
759 write_unlock_irq(&tasklist_lock);
761 if (atomic_dec_and_test(&oldsighand->count))
762 kmem_cache_free(sighand_cachep, oldsighand);
765 BUG_ON(!thread_group_leader(tsk));
766 return 0;
770 * These functions flushes out all traces of the currently running executable
771 * so that a new one can be started
774 static void flush_old_files(struct files_struct * files)
776 long j = -1;
777 struct fdtable *fdt;
779 spin_lock(&files->file_lock);
780 for (;;) {
781 unsigned long set, i;
783 j++;
784 i = j * __NFDBITS;
785 fdt = files_fdtable(files);
786 if (i >= fdt->max_fds)
787 break;
788 set = fdt->close_on_exec->fds_bits[j];
789 if (!set)
790 continue;
791 fdt->close_on_exec->fds_bits[j] = 0;
792 spin_unlock(&files->file_lock);
793 for ( ; set ; i++,set >>= 1) {
794 if (set & 1) {
795 sys_close(i);
798 spin_lock(&files->file_lock);
801 spin_unlock(&files->file_lock);
804 void get_task_comm(char *buf, struct task_struct *tsk)
806 /* buf must be at least sizeof(tsk->comm) in size */
807 task_lock(tsk);
808 strncpy(buf, tsk->comm, sizeof(tsk->comm));
809 task_unlock(tsk);
812 void set_task_comm(struct task_struct *tsk, char *buf)
814 task_lock(tsk);
815 strlcpy(tsk->comm, buf, sizeof(tsk->comm));
816 task_unlock(tsk);
819 int flush_old_exec(struct linux_binprm * bprm)
821 char * name;
822 int i, ch, retval;
823 struct files_struct *files;
824 char tcomm[sizeof(current->comm)];
827 * Make sure we have a private signal table and that
828 * we are unassociated from the previous thread group.
830 retval = de_thread(current);
831 if (retval)
832 goto out;
835 * Make sure we have private file handles. Ask the
836 * fork helper to do the work for us and the exit
837 * helper to do the cleanup of the old one.
839 files = current->files; /* refcounted so safe to hold */
840 retval = unshare_files();
841 if (retval)
842 goto out;
844 * Release all of the old mmap stuff
846 retval = exec_mmap(bprm->mm);
847 if (retval)
848 goto mmap_failed;
850 bprm->mm = NULL; /* We're using it now */
852 /* This is the point of no return */
853 put_files_struct(files);
855 current->sas_ss_sp = current->sas_ss_size = 0;
857 if (current->euid == current->uid && current->egid == current->gid)
858 current->mm->dumpable = 1;
859 else
860 current->mm->dumpable = suid_dumpable;
862 name = bprm->filename;
864 /* Copies the binary name from after last slash */
865 for (i=0; (ch = *(name++)) != '\0';) {
866 if (ch == '/')
867 i = 0; /* overwrite what we wrote */
868 else
869 if (i < (sizeof(tcomm) - 1))
870 tcomm[i++] = ch;
872 tcomm[i] = '\0';
873 set_task_comm(current, tcomm);
875 current->flags &= ~PF_RANDOMIZE;
876 flush_thread();
878 /* Set the new mm task size. We have to do that late because it may
879 * depend on TIF_32BIT which is only updated in flush_thread() on
880 * some architectures like powerpc
882 current->mm->task_size = TASK_SIZE;
884 if (bprm->e_uid != current->euid || bprm->e_gid != current->egid ||
885 file_permission(bprm->file, MAY_READ) ||
886 (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)) {
887 suid_keys(current);
888 current->mm->dumpable = suid_dumpable;
891 /* An exec changes our domain. We are no longer part of the thread
892 group */
894 current->self_exec_id++;
896 flush_signal_handlers(current, 0);
897 flush_old_files(current->files);
899 return 0;
901 mmap_failed:
902 reset_files_struct(current, files);
903 out:
904 return retval;
907 EXPORT_SYMBOL(flush_old_exec);
910 * Fill the binprm structure from the inode.
911 * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
913 int prepare_binprm(struct linux_binprm *bprm)
915 int mode;
916 struct inode * inode = bprm->file->f_path.dentry->d_inode;
917 int retval;
919 mode = inode->i_mode;
920 if (bprm->file->f_op == NULL)
921 return -EACCES;
923 bprm->e_uid = current->euid;
924 bprm->e_gid = current->egid;
926 if(!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)) {
927 /* Set-uid? */
928 if (mode & S_ISUID) {
929 current->personality &= ~PER_CLEAR_ON_SETID;
930 bprm->e_uid = inode->i_uid;
933 /* Set-gid? */
935 * If setgid is set but no group execute bit then this
936 * is a candidate for mandatory locking, not a setgid
937 * executable.
939 if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
940 current->personality &= ~PER_CLEAR_ON_SETID;
941 bprm->e_gid = inode->i_gid;
945 /* fill in binprm security blob */
946 retval = security_bprm_set(bprm);
947 if (retval)
948 return retval;
950 memset(bprm->buf,0,BINPRM_BUF_SIZE);
951 return kernel_read(bprm->file,0,bprm->buf,BINPRM_BUF_SIZE);
954 EXPORT_SYMBOL(prepare_binprm);
956 static int unsafe_exec(struct task_struct *p)
958 int unsafe = 0;
959 if (p->ptrace & PT_PTRACED) {
960 if (p->ptrace & PT_PTRACE_CAP)
961 unsafe |= LSM_UNSAFE_PTRACE_CAP;
962 else
963 unsafe |= LSM_UNSAFE_PTRACE;
965 if (atomic_read(&p->fs->count) > 1 ||
966 atomic_read(&p->files->count) > 1 ||
967 atomic_read(&p->sighand->count) > 1)
968 unsafe |= LSM_UNSAFE_SHARE;
970 return unsafe;
973 void compute_creds(struct linux_binprm *bprm)
975 int unsafe;
977 if (bprm->e_uid != current->uid)
978 suid_keys(current);
979 exec_keys(current);
981 task_lock(current);
982 unsafe = unsafe_exec(current);
983 security_bprm_apply_creds(bprm, unsafe);
984 task_unlock(current);
985 security_bprm_post_apply_creds(bprm);
988 EXPORT_SYMBOL(compute_creds);
990 void remove_arg_zero(struct linux_binprm *bprm)
992 if (bprm->argc) {
993 unsigned long offset;
994 char * kaddr;
995 struct page *page;
997 offset = bprm->p % PAGE_SIZE;
998 goto inside;
1000 while (bprm->p++, *(kaddr+offset++)) {
1001 if (offset != PAGE_SIZE)
1002 continue;
1003 offset = 0;
1004 kunmap_atomic(kaddr, KM_USER0);
1005 inside:
1006 page = bprm->page[bprm->p/PAGE_SIZE];
1007 kaddr = kmap_atomic(page, KM_USER0);
1009 kunmap_atomic(kaddr, KM_USER0);
1010 bprm->argc--;
1014 EXPORT_SYMBOL(remove_arg_zero);
1017 * cycle the list of binary formats handler, until one recognizes the image
1019 int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
1021 int try,retval;
1022 struct linux_binfmt *fmt;
1023 #ifdef __alpha__
1024 /* handle /sbin/loader.. */
1026 struct exec * eh = (struct exec *) bprm->buf;
1028 if (!bprm->loader && eh->fh.f_magic == 0x183 &&
1029 (eh->fh.f_flags & 0x3000) == 0x3000)
1031 struct file * file;
1032 unsigned long loader;
1034 allow_write_access(bprm->file);
1035 fput(bprm->file);
1036 bprm->file = NULL;
1038 loader = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
1040 file = open_exec("/sbin/loader");
1041 retval = PTR_ERR(file);
1042 if (IS_ERR(file))
1043 return retval;
1045 /* Remember if the application is TASO. */
1046 bprm->sh_bang = eh->ah.entry < 0x100000000UL;
1048 bprm->file = file;
1049 bprm->loader = loader;
1050 retval = prepare_binprm(bprm);
1051 if (retval<0)
1052 return retval;
1053 /* should call search_binary_handler recursively here,
1054 but it does not matter */
1057 #endif
1058 retval = security_bprm_check(bprm);
1059 if (retval)
1060 return retval;
1062 /* kernel module loader fixup */
1063 /* so we don't try to load run modprobe in kernel space. */
1064 set_fs(USER_DS);
1066 retval = audit_bprm(bprm);
1067 if (retval)
1068 return retval;
1070 retval = -ENOENT;
1071 for (try=0; try<2; try++) {
1072 read_lock(&binfmt_lock);
1073 for (fmt = formats ; fmt ; fmt = fmt->next) {
1074 int (*fn)(struct linux_binprm *, struct pt_regs *) = fmt->load_binary;
1075 if (!fn)
1076 continue;
1077 if (!try_module_get(fmt->module))
1078 continue;
1079 read_unlock(&binfmt_lock);
1080 retval = fn(bprm, regs);
1081 if (retval >= 0) {
1082 put_binfmt(fmt);
1083 allow_write_access(bprm->file);
1084 if (bprm->file)
1085 fput(bprm->file);
1086 bprm->file = NULL;
1087 current->did_exec = 1;
1088 proc_exec_connector(current);
1089 return retval;
1091 read_lock(&binfmt_lock);
1092 put_binfmt(fmt);
1093 if (retval != -ENOEXEC || bprm->mm == NULL)
1094 break;
1095 if (!bprm->file) {
1096 read_unlock(&binfmt_lock);
1097 return retval;
1100 read_unlock(&binfmt_lock);
1101 if (retval != -ENOEXEC || bprm->mm == NULL) {
1102 break;
1103 #ifdef CONFIG_KMOD
1104 }else{
1105 #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1106 if (printable(bprm->buf[0]) &&
1107 printable(bprm->buf[1]) &&
1108 printable(bprm->buf[2]) &&
1109 printable(bprm->buf[3]))
1110 break; /* -ENOEXEC */
1111 request_module("binfmt-%04x", *(unsigned short *)(&bprm->buf[2]));
1112 #endif
1115 return retval;
1118 EXPORT_SYMBOL(search_binary_handler);
1121 * sys_execve() executes a new program.
1123 int do_execve(char * filename,
1124 char __user *__user *argv,
1125 char __user *__user *envp,
1126 struct pt_regs * regs)
1128 struct linux_binprm *bprm;
1129 struct file *file;
1130 int retval;
1131 int i;
1133 retval = -ENOMEM;
1134 bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
1135 if (!bprm)
1136 goto out_ret;
1138 file = open_exec(filename);
1139 retval = PTR_ERR(file);
1140 if (IS_ERR(file))
1141 goto out_kfree;
1143 sched_exec();
1145 bprm->p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
1147 bprm->file = file;
1148 bprm->filename = filename;
1149 bprm->interp = filename;
1150 bprm->mm = mm_alloc();
1151 retval = -ENOMEM;
1152 if (!bprm->mm)
1153 goto out_file;
1155 retval = init_new_context(current, bprm->mm);
1156 if (retval < 0)
1157 goto out_mm;
1159 bprm->argc = count(argv, bprm->p / sizeof(void *));
1160 if ((retval = bprm->argc) < 0)
1161 goto out_mm;
1163 bprm->envc = count(envp, bprm->p / sizeof(void *));
1164 if ((retval = bprm->envc) < 0)
1165 goto out_mm;
1167 retval = security_bprm_alloc(bprm);
1168 if (retval)
1169 goto out;
1171 retval = prepare_binprm(bprm);
1172 if (retval < 0)
1173 goto out;
1175 retval = copy_strings_kernel(1, &bprm->filename, bprm);
1176 if (retval < 0)
1177 goto out;
1179 bprm->exec = bprm->p;
1180 retval = copy_strings(bprm->envc, envp, bprm);
1181 if (retval < 0)
1182 goto out;
1184 retval = copy_strings(bprm->argc, argv, bprm);
1185 if (retval < 0)
1186 goto out;
1188 retval = search_binary_handler(bprm,regs);
1189 if (retval >= 0) {
1190 free_arg_pages(bprm);
1192 /* execve success */
1193 security_bprm_free(bprm);
1194 acct_update_integrals(current);
1195 kfree(bprm);
1196 return retval;
1199 out:
1200 /* Something went wrong, return the inode and free the argument pages*/
1201 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
1202 struct page * page = bprm->page[i];
1203 if (page)
1204 __free_page(page);
1207 if (bprm->security)
1208 security_bprm_free(bprm);
1210 out_mm:
1211 if (bprm->mm)
1212 mmdrop(bprm->mm);
1214 out_file:
1215 if (bprm->file) {
1216 allow_write_access(bprm->file);
1217 fput(bprm->file);
1220 out_kfree:
1221 kfree(bprm);
1223 out_ret:
1224 return retval;
1227 int set_binfmt(struct linux_binfmt *new)
1229 struct linux_binfmt *old = current->binfmt;
1231 if (new) {
1232 if (!try_module_get(new->module))
1233 return -1;
1235 current->binfmt = new;
1236 if (old)
1237 module_put(old->module);
1238 return 0;
1241 EXPORT_SYMBOL(set_binfmt);
1243 #define CORENAME_MAX_SIZE 64
1245 /* format_corename will inspect the pattern parameter, and output a
1246 * name into corename, which must have space for at least
1247 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
1249 static void format_corename(char *corename, const char *pattern, long signr)
1251 const char *pat_ptr = pattern;
1252 char *out_ptr = corename;
1253 char *const out_end = corename + CORENAME_MAX_SIZE;
1254 int rc;
1255 int pid_in_pattern = 0;
1257 /* Repeat as long as we have more pattern to process and more output
1258 space */
1259 while (*pat_ptr) {
1260 if (*pat_ptr != '%') {
1261 if (out_ptr == out_end)
1262 goto out;
1263 *out_ptr++ = *pat_ptr++;
1264 } else {
1265 switch (*++pat_ptr) {
1266 case 0:
1267 goto out;
1268 /* Double percent, output one percent */
1269 case '%':
1270 if (out_ptr == out_end)
1271 goto out;
1272 *out_ptr++ = '%';
1273 break;
1274 /* pid */
1275 case 'p':
1276 pid_in_pattern = 1;
1277 rc = snprintf(out_ptr, out_end - out_ptr,
1278 "%d", current->tgid);
1279 if (rc > out_end - out_ptr)
1280 goto out;
1281 out_ptr += rc;
1282 break;
1283 /* uid */
1284 case 'u':
1285 rc = snprintf(out_ptr, out_end - out_ptr,
1286 "%d", current->uid);
1287 if (rc > out_end - out_ptr)
1288 goto out;
1289 out_ptr += rc;
1290 break;
1291 /* gid */
1292 case 'g':
1293 rc = snprintf(out_ptr, out_end - out_ptr,
1294 "%d", current->gid);
1295 if (rc > out_end - out_ptr)
1296 goto out;
1297 out_ptr += rc;
1298 break;
1299 /* signal that caused the coredump */
1300 case 's':
1301 rc = snprintf(out_ptr, out_end - out_ptr,
1302 "%ld", signr);
1303 if (rc > out_end - out_ptr)
1304 goto out;
1305 out_ptr += rc;
1306 break;
1307 /* UNIX time of coredump */
1308 case 't': {
1309 struct timeval tv;
1310 do_gettimeofday(&tv);
1311 rc = snprintf(out_ptr, out_end - out_ptr,
1312 "%lu", tv.tv_sec);
1313 if (rc > out_end - out_ptr)
1314 goto out;
1315 out_ptr += rc;
1316 break;
1318 /* hostname */
1319 case 'h':
1320 down_read(&uts_sem);
1321 rc = snprintf(out_ptr, out_end - out_ptr,
1322 "%s", utsname()->nodename);
1323 up_read(&uts_sem);
1324 if (rc > out_end - out_ptr)
1325 goto out;
1326 out_ptr += rc;
1327 break;
1328 /* executable */
1329 case 'e':
1330 rc = snprintf(out_ptr, out_end - out_ptr,
1331 "%s", current->comm);
1332 if (rc > out_end - out_ptr)
1333 goto out;
1334 out_ptr += rc;
1335 break;
1336 default:
1337 break;
1339 ++pat_ptr;
1342 /* Backward compatibility with core_uses_pid:
1344 * If core_pattern does not include a %p (as is the default)
1345 * and core_uses_pid is set, then .%pid will be appended to
1346 * the filename */
1347 if (!pid_in_pattern
1348 && (core_uses_pid || atomic_read(&current->mm->mm_users) != 1)) {
1349 rc = snprintf(out_ptr, out_end - out_ptr,
1350 ".%d", current->tgid);
1351 if (rc > out_end - out_ptr)
1352 goto out;
1353 out_ptr += rc;
1355 out:
1356 *out_ptr = 0;
1359 static void zap_process(struct task_struct *start)
1361 struct task_struct *t;
1363 start->signal->flags = SIGNAL_GROUP_EXIT;
1364 start->signal->group_stop_count = 0;
1366 t = start;
1367 do {
1368 if (t != current && t->mm) {
1369 t->mm->core_waiters++;
1370 sigaddset(&t->pending.signal, SIGKILL);
1371 signal_wake_up(t, 1);
1373 } while ((t = next_thread(t)) != start);
1376 static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
1377 int exit_code)
1379 struct task_struct *g, *p;
1380 unsigned long flags;
1381 int err = -EAGAIN;
1383 spin_lock_irq(&tsk->sighand->siglock);
1384 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
1385 tsk->signal->group_exit_code = exit_code;
1386 zap_process(tsk);
1387 err = 0;
1389 spin_unlock_irq(&tsk->sighand->siglock);
1390 if (err)
1391 return err;
1393 if (atomic_read(&mm->mm_users) == mm->core_waiters + 1)
1394 goto done;
1396 rcu_read_lock();
1397 for_each_process(g) {
1398 if (g == tsk->group_leader)
1399 continue;
1401 p = g;
1402 do {
1403 if (p->mm) {
1404 if (p->mm == mm) {
1406 * p->sighand can't disappear, but
1407 * may be changed by de_thread()
1409 lock_task_sighand(p, &flags);
1410 zap_process(p);
1411 unlock_task_sighand(p, &flags);
1413 break;
1415 } while ((p = next_thread(p)) != g);
1417 rcu_read_unlock();
1418 done:
1419 return mm->core_waiters;
1422 static int coredump_wait(int exit_code)
1424 struct task_struct *tsk = current;
1425 struct mm_struct *mm = tsk->mm;
1426 struct completion startup_done;
1427 struct completion *vfork_done;
1428 int core_waiters;
1430 init_completion(&mm->core_done);
1431 init_completion(&startup_done);
1432 mm->core_startup_done = &startup_done;
1434 core_waiters = zap_threads(tsk, mm, exit_code);
1435 up_write(&mm->mmap_sem);
1437 if (unlikely(core_waiters < 0))
1438 goto fail;
1441 * Make sure nobody is waiting for us to release the VM,
1442 * otherwise we can deadlock when we wait on each other
1444 vfork_done = tsk->vfork_done;
1445 if (vfork_done) {
1446 tsk->vfork_done = NULL;
1447 complete(vfork_done);
1450 if (core_waiters)
1451 wait_for_completion(&startup_done);
1452 fail:
1453 BUG_ON(mm->core_waiters);
1454 return core_waiters;
1457 int do_coredump(long signr, int exit_code, struct pt_regs * regs)
1459 char corename[CORENAME_MAX_SIZE + 1];
1460 struct mm_struct *mm = current->mm;
1461 struct linux_binfmt * binfmt;
1462 struct inode * inode;
1463 struct file * file;
1464 int retval = 0;
1465 int fsuid = current->fsuid;
1466 int flag = 0;
1467 int ispipe = 0;
1469 binfmt = current->binfmt;
1470 if (!binfmt || !binfmt->core_dump)
1471 goto fail;
1472 down_write(&mm->mmap_sem);
1473 if (!mm->dumpable) {
1474 up_write(&mm->mmap_sem);
1475 goto fail;
1479 * We cannot trust fsuid as being the "true" uid of the
1480 * process nor do we know its entire history. We only know it
1481 * was tainted so we dump it as root in mode 2.
1483 if (mm->dumpable == 2) { /* Setuid core dump mode */
1484 flag = O_EXCL; /* Stop rewrite attacks */
1485 current->fsuid = 0; /* Dump root private */
1487 mm->dumpable = 0;
1489 retval = coredump_wait(exit_code);
1490 if (retval < 0)
1491 goto fail;
1494 * Clear any false indication of pending signals that might
1495 * be seen by the filesystem code called to write the core file.
1497 clear_thread_flag(TIF_SIGPENDING);
1499 if (current->signal->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump)
1500 goto fail_unlock;
1503 * lock_kernel() because format_corename() is controlled by sysctl, which
1504 * uses lock_kernel()
1506 lock_kernel();
1507 format_corename(corename, core_pattern, signr);
1508 unlock_kernel();
1509 if (corename[0] == '|') {
1510 /* SIGPIPE can happen, but it's just never processed */
1511 if(call_usermodehelper_pipe(corename+1, NULL, NULL, &file)) {
1512 printk(KERN_INFO "Core dump to %s pipe failed\n",
1513 corename);
1514 goto fail_unlock;
1516 ispipe = 1;
1517 } else
1518 file = filp_open(corename,
1519 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
1520 0600);
1521 if (IS_ERR(file))
1522 goto fail_unlock;
1523 inode = file->f_path.dentry->d_inode;
1524 if (inode->i_nlink > 1)
1525 goto close_fail; /* multiple links - don't dump */
1526 if (!ispipe && d_unhashed(file->f_path.dentry))
1527 goto close_fail;
1529 /* AK: actually i see no reason to not allow this for named pipes etc.,
1530 but keep the previous behaviour for now. */
1531 if (!ispipe && !S_ISREG(inode->i_mode))
1532 goto close_fail;
1533 if (!file->f_op)
1534 goto close_fail;
1535 if (!file->f_op->write)
1536 goto close_fail;
1537 if (!ispipe && do_truncate(file->f_path.dentry, 0, 0, file) != 0)
1538 goto close_fail;
1540 retval = binfmt->core_dump(signr, regs, file);
1542 if (retval)
1543 current->signal->group_exit_code |= 0x80;
1544 close_fail:
1545 filp_close(file, NULL);
1546 fail_unlock:
1547 current->fsuid = fsuid;
1548 complete_all(&mm->core_done);
1549 fail:
1550 return retval;