thinkpad-acpi: make drivers/misc/thinkpad_acpi:fan_mutex static
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / exec.c
blobbd1ab3f460a342f302402c715e59616eecb301c8
1 /*
2 * linux/fs/exec.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
7 /*
8 * #!-checking implemented by tytso.
9 */
11 * Demand-loading implemented 01.12.91 - no need to read anything but
12 * the header into memory. The inode of the executable is put into
13 * "current->executable", and page faults do the actual loading. Clean.
15 * Once more I can proudly say that linux stood up to being changed: it
16 * was less than 2 hours work to get demand-loading completely implemented.
18 * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead,
19 * current->executable is only used by the procfs. This allows a dispatch
20 * table to check for several different types of binary formats. We keep
21 * trying until we recognize the file or we run out of supported binary
22 * formats.
25 #include <linux/slab.h>
26 #include <linux/file.h>
27 #include <linux/mman.h>
28 #include <linux/a.out.h>
29 #include <linux/stat.h>
30 #include <linux/fcntl.h>
31 #include <linux/smp_lock.h>
32 #include <linux/init.h>
33 #include <linux/pagemap.h>
34 #include <linux/highmem.h>
35 #include <linux/spinlock.h>
36 #include <linux/key.h>
37 #include <linux/personality.h>
38 #include <linux/binfmts.h>
39 #include <linux/swap.h>
40 #include <linux/utsname.h>
41 #include <linux/pid_namespace.h>
42 #include <linux/module.h>
43 #include <linux/namei.h>
44 #include <linux/proc_fs.h>
45 #include <linux/ptrace.h>
46 #include <linux/mount.h>
47 #include <linux/security.h>
48 #include <linux/syscalls.h>
49 #include <linux/rmap.h>
50 #include <linux/tsacct_kern.h>
51 #include <linux/cn_proc.h>
52 #include <linux/audit.h>
54 #include <asm/uaccess.h>
55 #include <asm/mmu_context.h>
57 #ifdef CONFIG_KMOD
58 #include <linux/kmod.h>
59 #endif
61 int core_uses_pid;
62 char core_pattern[128] = "core";
63 int suid_dumpable = 0;
65 EXPORT_SYMBOL(suid_dumpable);
66 /* The maximal length of core_pattern is also specified in sysctl.c */
68 static struct linux_binfmt *formats;
69 static DEFINE_RWLOCK(binfmt_lock);
71 int register_binfmt(struct linux_binfmt * fmt)
73 struct linux_binfmt ** tmp = &formats;
75 if (!fmt)
76 return -EINVAL;
77 if (fmt->next)
78 return -EBUSY;
79 write_lock(&binfmt_lock);
80 while (*tmp) {
81 if (fmt == *tmp) {
82 write_unlock(&binfmt_lock);
83 return -EBUSY;
85 tmp = &(*tmp)->next;
87 fmt->next = formats;
88 formats = fmt;
89 write_unlock(&binfmt_lock);
90 return 0;
93 EXPORT_SYMBOL(register_binfmt);
95 int unregister_binfmt(struct linux_binfmt * fmt)
97 struct linux_binfmt ** tmp = &formats;
99 write_lock(&binfmt_lock);
100 while (*tmp) {
101 if (fmt == *tmp) {
102 *tmp = fmt->next;
103 write_unlock(&binfmt_lock);
104 return 0;
106 tmp = &(*tmp)->next;
108 write_unlock(&binfmt_lock);
109 return -EINVAL;
112 EXPORT_SYMBOL(unregister_binfmt);
114 static inline void put_binfmt(struct linux_binfmt * fmt)
116 module_put(fmt->module);
120 * Note that a shared library must be both readable and executable due to
121 * security reasons.
123 * Also note that we take the address to load from from the file itself.
125 asmlinkage long sys_uselib(const char __user * library)
127 struct file * file;
128 struct nameidata nd;
129 int error;
131 error = __user_path_lookup_open(library, LOOKUP_FOLLOW, &nd, FMODE_READ|FMODE_EXEC);
132 if (error)
133 goto out;
135 error = -EINVAL;
136 if (!S_ISREG(nd.dentry->d_inode->i_mode))
137 goto exit;
139 error = vfs_permission(&nd, MAY_READ | MAY_EXEC);
140 if (error)
141 goto exit;
143 file = nameidata_to_filp(&nd, O_RDONLY);
144 error = PTR_ERR(file);
145 if (IS_ERR(file))
146 goto out;
148 error = -ENOEXEC;
149 if(file->f_op) {
150 struct linux_binfmt * fmt;
152 read_lock(&binfmt_lock);
153 for (fmt = formats ; fmt ; fmt = fmt->next) {
154 if (!fmt->load_shlib)
155 continue;
156 if (!try_module_get(fmt->module))
157 continue;
158 read_unlock(&binfmt_lock);
159 error = fmt->load_shlib(file);
160 read_lock(&binfmt_lock);
161 put_binfmt(fmt);
162 if (error != -ENOEXEC)
163 break;
165 read_unlock(&binfmt_lock);
167 fput(file);
168 out:
169 return error;
170 exit:
171 release_open_intent(&nd);
172 path_release(&nd);
173 goto out;
177 * count() counts the number of strings in array ARGV.
179 static int count(char __user * __user * argv, int max)
181 int i = 0;
183 if (argv != NULL) {
184 for (;;) {
185 char __user * p;
187 if (get_user(p, argv))
188 return -EFAULT;
189 if (!p)
190 break;
191 argv++;
192 if(++i > max)
193 return -E2BIG;
194 cond_resched();
197 return i;
201 * 'copy_strings()' copies argument/environment strings from user
202 * memory to free pages in kernel mem. These are in a format ready
203 * to be put directly into the top of new user memory.
205 static int copy_strings(int argc, char __user * __user * argv,
206 struct linux_binprm *bprm)
208 struct page *kmapped_page = NULL;
209 char *kaddr = NULL;
210 int ret;
212 while (argc-- > 0) {
213 char __user *str;
214 int len;
215 unsigned long pos;
217 if (get_user(str, argv+argc) ||
218 !(len = strnlen_user(str, bprm->p))) {
219 ret = -EFAULT;
220 goto out;
223 if (bprm->p < len) {
224 ret = -E2BIG;
225 goto out;
228 bprm->p -= len;
229 /* XXX: add architecture specific overflow check here. */
230 pos = bprm->p;
232 while (len > 0) {
233 int i, new, err;
234 int offset, bytes_to_copy;
235 struct page *page;
237 offset = pos % PAGE_SIZE;
238 i = pos/PAGE_SIZE;
239 page = bprm->page[i];
240 new = 0;
241 if (!page) {
242 page = alloc_page(GFP_HIGHUSER);
243 bprm->page[i] = page;
244 if (!page) {
245 ret = -ENOMEM;
246 goto out;
248 new = 1;
251 if (page != kmapped_page) {
252 if (kmapped_page)
253 kunmap(kmapped_page);
254 kmapped_page = page;
255 kaddr = kmap(kmapped_page);
257 if (new && offset)
258 memset(kaddr, 0, offset);
259 bytes_to_copy = PAGE_SIZE - offset;
260 if (bytes_to_copy > len) {
261 bytes_to_copy = len;
262 if (new)
263 memset(kaddr+offset+len, 0,
264 PAGE_SIZE-offset-len);
266 err = copy_from_user(kaddr+offset, str, bytes_to_copy);
267 if (err) {
268 ret = -EFAULT;
269 goto out;
272 pos += bytes_to_copy;
273 str += bytes_to_copy;
274 len -= bytes_to_copy;
277 ret = 0;
278 out:
279 if (kmapped_page)
280 kunmap(kmapped_page);
281 return ret;
285 * Like copy_strings, but get argv and its values from kernel memory.
287 int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
289 int r;
290 mm_segment_t oldfs = get_fs();
291 set_fs(KERNEL_DS);
292 r = copy_strings(argc, (char __user * __user *)argv, bprm);
293 set_fs(oldfs);
294 return r;
297 EXPORT_SYMBOL(copy_strings_kernel);
299 #ifdef CONFIG_MMU
301 * This routine is used to map in a page into an address space: needed by
302 * execve() for the initial stack and environment pages.
304 * vma->vm_mm->mmap_sem is held for writing.
306 void install_arg_page(struct vm_area_struct *vma,
307 struct page *page, unsigned long address)
309 struct mm_struct *mm = vma->vm_mm;
310 pte_t * pte;
311 spinlock_t *ptl;
313 if (unlikely(anon_vma_prepare(vma)))
314 goto out;
316 flush_dcache_page(page);
317 pte = get_locked_pte(mm, address, &ptl);
318 if (!pte)
319 goto out;
320 if (!pte_none(*pte)) {
321 pte_unmap_unlock(pte, ptl);
322 goto out;
324 inc_mm_counter(mm, anon_rss);
325 lru_cache_add_active(page);
326 set_pte_at(mm, address, pte, pte_mkdirty(pte_mkwrite(mk_pte(
327 page, vma->vm_page_prot))));
328 page_add_new_anon_rmap(page, vma, address);
329 pte_unmap_unlock(pte, ptl);
331 /* no need for flush_tlb */
332 return;
333 out:
334 __free_page(page);
335 force_sig(SIGKILL, current);
338 #define EXTRA_STACK_VM_PAGES 20 /* random */
340 int setup_arg_pages(struct linux_binprm *bprm,
341 unsigned long stack_top,
342 int executable_stack)
344 unsigned long stack_base;
345 struct vm_area_struct *mpnt;
346 struct mm_struct *mm = current->mm;
347 int i, ret;
348 long arg_size;
350 #ifdef CONFIG_STACK_GROWSUP
351 /* Move the argument and environment strings to the bottom of the
352 * stack space.
354 int offset, j;
355 char *to, *from;
357 /* Start by shifting all the pages down */
358 i = 0;
359 for (j = 0; j < MAX_ARG_PAGES; j++) {
360 struct page *page = bprm->page[j];
361 if (!page)
362 continue;
363 bprm->page[i++] = page;
366 /* Now move them within their pages */
367 offset = bprm->p % PAGE_SIZE;
368 to = kmap(bprm->page[0]);
369 for (j = 1; j < i; j++) {
370 memmove(to, to + offset, PAGE_SIZE - offset);
371 from = kmap(bprm->page[j]);
372 memcpy(to + PAGE_SIZE - offset, from, offset);
373 kunmap(bprm->page[j - 1]);
374 to = from;
376 memmove(to, to + offset, PAGE_SIZE - offset);
377 kunmap(bprm->page[j - 1]);
379 /* Limit stack size to 1GB */
380 stack_base = current->signal->rlim[RLIMIT_STACK].rlim_max;
381 if (stack_base > (1 << 30))
382 stack_base = 1 << 30;
383 stack_base = PAGE_ALIGN(stack_top - stack_base);
385 /* Adjust bprm->p to point to the end of the strings. */
386 bprm->p = stack_base + PAGE_SIZE * i - offset;
388 mm->arg_start = stack_base;
389 arg_size = i << PAGE_SHIFT;
391 /* zero pages that were copied above */
392 while (i < MAX_ARG_PAGES)
393 bprm->page[i++] = NULL;
394 #else
395 stack_base = arch_align_stack(stack_top - MAX_ARG_PAGES*PAGE_SIZE);
396 stack_base = PAGE_ALIGN(stack_base);
397 bprm->p += stack_base;
398 mm->arg_start = bprm->p;
399 arg_size = stack_top - (PAGE_MASK & (unsigned long) mm->arg_start);
400 #endif
402 arg_size += EXTRA_STACK_VM_PAGES * PAGE_SIZE;
404 if (bprm->loader)
405 bprm->loader += stack_base;
406 bprm->exec += stack_base;
408 mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
409 if (!mpnt)
410 return -ENOMEM;
412 memset(mpnt, 0, sizeof(*mpnt));
414 down_write(&mm->mmap_sem);
416 mpnt->vm_mm = mm;
417 #ifdef CONFIG_STACK_GROWSUP
418 mpnt->vm_start = stack_base;
419 mpnt->vm_end = stack_base + arg_size;
420 #else
421 mpnt->vm_end = stack_top;
422 mpnt->vm_start = mpnt->vm_end - arg_size;
423 #endif
424 /* Adjust stack execute permissions; explicitly enable
425 * for EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X
426 * and leave alone (arch default) otherwise. */
427 if (unlikely(executable_stack == EXSTACK_ENABLE_X))
428 mpnt->vm_flags = VM_STACK_FLAGS | VM_EXEC;
429 else if (executable_stack == EXSTACK_DISABLE_X)
430 mpnt->vm_flags = VM_STACK_FLAGS & ~VM_EXEC;
431 else
432 mpnt->vm_flags = VM_STACK_FLAGS;
433 mpnt->vm_flags |= mm->def_flags;
434 mpnt->vm_page_prot = protection_map[mpnt->vm_flags & 0x7];
435 if ((ret = insert_vm_struct(mm, mpnt))) {
436 up_write(&mm->mmap_sem);
437 kmem_cache_free(vm_area_cachep, mpnt);
438 return ret;
440 mm->stack_vm = mm->total_vm = vma_pages(mpnt);
443 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
444 struct page *page = bprm->page[i];
445 if (page) {
446 bprm->page[i] = NULL;
447 install_arg_page(mpnt, page, stack_base);
449 stack_base += PAGE_SIZE;
451 up_write(&mm->mmap_sem);
453 return 0;
456 EXPORT_SYMBOL(setup_arg_pages);
458 #define free_arg_pages(bprm) do { } while (0)
460 #else
462 static inline void free_arg_pages(struct linux_binprm *bprm)
464 int i;
466 for (i = 0; i < MAX_ARG_PAGES; i++) {
467 if (bprm->page[i])
468 __free_page(bprm->page[i]);
469 bprm->page[i] = NULL;
473 #endif /* CONFIG_MMU */
475 struct file *open_exec(const char *name)
477 struct nameidata nd;
478 int err;
479 struct file *file;
481 err = path_lookup_open(AT_FDCWD, name, LOOKUP_FOLLOW, &nd, FMODE_READ|FMODE_EXEC);
482 file = ERR_PTR(err);
484 if (!err) {
485 struct inode *inode = nd.dentry->d_inode;
486 file = ERR_PTR(-EACCES);
487 if (!(nd.mnt->mnt_flags & MNT_NOEXEC) &&
488 S_ISREG(inode->i_mode)) {
489 int err = vfs_permission(&nd, MAY_EXEC);
490 file = ERR_PTR(err);
491 if (!err) {
492 file = nameidata_to_filp(&nd, O_RDONLY);
493 if (!IS_ERR(file)) {
494 err = deny_write_access(file);
495 if (err) {
496 fput(file);
497 file = ERR_PTR(err);
500 out:
501 return file;
504 release_open_intent(&nd);
505 path_release(&nd);
507 goto out;
510 EXPORT_SYMBOL(open_exec);
512 int kernel_read(struct file *file, unsigned long offset,
513 char *addr, unsigned long count)
515 mm_segment_t old_fs;
516 loff_t pos = offset;
517 int result;
519 old_fs = get_fs();
520 set_fs(get_ds());
521 /* The cast to a user pointer is valid due to the set_fs() */
522 result = vfs_read(file, (void __user *)addr, count, &pos);
523 set_fs(old_fs);
524 return result;
527 EXPORT_SYMBOL(kernel_read);
529 static int exec_mmap(struct mm_struct *mm)
531 struct task_struct *tsk;
532 struct mm_struct * old_mm, *active_mm;
534 /* Notify parent that we're no longer interested in the old VM */
535 tsk = current;
536 old_mm = current->mm;
537 mm_release(tsk, old_mm);
539 if (old_mm) {
541 * Make sure that if there is a core dump in progress
542 * for the old mm, we get out and die instead of going
543 * through with the exec. We must hold mmap_sem around
544 * checking core_waiters and changing tsk->mm. The
545 * core-inducing thread will increment core_waiters for
546 * each thread whose ->mm == old_mm.
548 down_read(&old_mm->mmap_sem);
549 if (unlikely(old_mm->core_waiters)) {
550 up_read(&old_mm->mmap_sem);
551 return -EINTR;
554 task_lock(tsk);
555 active_mm = tsk->active_mm;
556 tsk->mm = mm;
557 tsk->active_mm = mm;
558 activate_mm(active_mm, mm);
559 task_unlock(tsk);
560 arch_pick_mmap_layout(mm);
561 if (old_mm) {
562 up_read(&old_mm->mmap_sem);
563 BUG_ON(active_mm != old_mm);
564 mmput(old_mm);
565 return 0;
567 mmdrop(active_mm);
568 return 0;
572 * This function makes sure the current process has its own signal table,
573 * so that flush_signal_handlers can later reset the handlers without
574 * disturbing other processes. (Other processes might share the signal
575 * table via the CLONE_SIGHAND option to clone().)
577 static int de_thread(struct task_struct *tsk)
579 struct signal_struct *sig = tsk->signal;
580 struct sighand_struct *newsighand, *oldsighand = tsk->sighand;
581 spinlock_t *lock = &oldsighand->siglock;
582 struct task_struct *leader = NULL;
583 int count;
586 * If we don't share sighandlers, then we aren't sharing anything
587 * and we can just re-use it all.
589 if (atomic_read(&oldsighand->count) <= 1) {
590 BUG_ON(atomic_read(&sig->count) != 1);
591 exit_itimers(sig);
592 return 0;
595 newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
596 if (!newsighand)
597 return -ENOMEM;
599 if (thread_group_empty(tsk))
600 goto no_thread_group;
603 * Kill all other threads in the thread group.
604 * We must hold tasklist_lock to call zap_other_threads.
606 read_lock(&tasklist_lock);
607 spin_lock_irq(lock);
608 if (sig->flags & SIGNAL_GROUP_EXIT) {
610 * Another group action in progress, just
611 * return so that the signal is processed.
613 spin_unlock_irq(lock);
614 read_unlock(&tasklist_lock);
615 kmem_cache_free(sighand_cachep, newsighand);
616 return -EAGAIN;
620 * child_reaper ignores SIGKILL, change it now.
621 * Reparenting needs write_lock on tasklist_lock,
622 * so it is safe to do it under read_lock.
624 if (unlikely(tsk->group_leader == child_reaper(tsk)))
625 tsk->nsproxy->pid_ns->child_reaper = tsk;
627 zap_other_threads(tsk);
628 read_unlock(&tasklist_lock);
631 * Account for the thread group leader hanging around:
633 count = 1;
634 if (!thread_group_leader(tsk)) {
635 count = 2;
637 * The SIGALRM timer survives the exec, but needs to point
638 * at us as the new group leader now. We have a race with
639 * a timer firing now getting the old leader, so we need to
640 * synchronize with any firing (by calling del_timer_sync)
641 * before we can safely let the old group leader die.
643 sig->tsk = tsk;
644 spin_unlock_irq(lock);
645 if (hrtimer_cancel(&sig->real_timer))
646 hrtimer_restart(&sig->real_timer);
647 spin_lock_irq(lock);
649 while (atomic_read(&sig->count) > count) {
650 sig->group_exit_task = tsk;
651 sig->notify_count = count;
652 __set_current_state(TASK_UNINTERRUPTIBLE);
653 spin_unlock_irq(lock);
654 schedule();
655 spin_lock_irq(lock);
657 sig->group_exit_task = NULL;
658 sig->notify_count = 0;
659 spin_unlock_irq(lock);
662 * At this point all other threads have exited, all we have to
663 * do is to wait for the thread group leader to become inactive,
664 * and to assume its PID:
666 if (!thread_group_leader(tsk)) {
668 * Wait for the thread group leader to be a zombie.
669 * It should already be zombie at this point, most
670 * of the time.
672 leader = tsk->group_leader;
673 while (leader->exit_state != EXIT_ZOMBIE)
674 yield();
677 * The only record we have of the real-time age of a
678 * process, regardless of execs it's done, is start_time.
679 * All the past CPU time is accumulated in signal_struct
680 * from sister threads now dead. But in this non-leader
681 * exec, nothing survives from the original leader thread,
682 * whose birth marks the true age of this process now.
683 * When we take on its identity by switching to its PID, we
684 * also take its birthdate (always earlier than our own).
686 tsk->start_time = leader->start_time;
688 write_lock_irq(&tasklist_lock);
690 BUG_ON(leader->tgid != tsk->tgid);
691 BUG_ON(tsk->pid == tsk->tgid);
693 * An exec() starts a new thread group with the
694 * TGID of the previous thread group. Rehash the
695 * two threads with a switched PID, and release
696 * the former thread group leader:
699 /* Become a process group leader with the old leader's pid.
700 * The old leader becomes a thread of the this thread group.
701 * Note: The old leader also uses this pid until release_task
702 * is called. Odd but simple and correct.
704 detach_pid(tsk, PIDTYPE_PID);
705 tsk->pid = leader->pid;
706 attach_pid(tsk, PIDTYPE_PID, tsk->pid);
707 transfer_pid(leader, tsk, PIDTYPE_PGID);
708 transfer_pid(leader, tsk, PIDTYPE_SID);
709 list_replace_rcu(&leader->tasks, &tsk->tasks);
711 tsk->group_leader = tsk;
712 leader->group_leader = tsk;
714 tsk->exit_signal = SIGCHLD;
716 BUG_ON(leader->exit_state != EXIT_ZOMBIE);
717 leader->exit_state = EXIT_DEAD;
719 write_unlock_irq(&tasklist_lock);
723 * There may be one thread left which is just exiting,
724 * but it's safe to stop telling the group to kill themselves.
726 sig->flags = 0;
728 no_thread_group:
729 exit_itimers(sig);
730 if (leader)
731 release_task(leader);
733 BUG_ON(atomic_read(&sig->count) != 1);
735 if (atomic_read(&oldsighand->count) == 1) {
737 * Now that we nuked the rest of the thread group,
738 * it turns out we are not sharing sighand any more either.
739 * So we can just keep it.
741 kmem_cache_free(sighand_cachep, newsighand);
742 } else {
744 * Move our state over to newsighand and switch it in.
746 atomic_set(&newsighand->count, 1);
747 memcpy(newsighand->action, oldsighand->action,
748 sizeof(newsighand->action));
750 write_lock_irq(&tasklist_lock);
751 spin_lock(&oldsighand->siglock);
752 spin_lock_nested(&newsighand->siglock, SINGLE_DEPTH_NESTING);
754 rcu_assign_pointer(tsk->sighand, newsighand);
755 recalc_sigpending();
757 spin_unlock(&newsighand->siglock);
758 spin_unlock(&oldsighand->siglock);
759 write_unlock_irq(&tasklist_lock);
761 if (atomic_dec_and_test(&oldsighand->count))
762 kmem_cache_free(sighand_cachep, oldsighand);
765 BUG_ON(!thread_group_leader(tsk));
766 return 0;
770 * These functions flushes out all traces of the currently running executable
771 * so that a new one can be started
774 static void flush_old_files(struct files_struct * files)
776 long j = -1;
777 struct fdtable *fdt;
779 spin_lock(&files->file_lock);
780 for (;;) {
781 unsigned long set, i;
783 j++;
784 i = j * __NFDBITS;
785 fdt = files_fdtable(files);
786 if (i >= fdt->max_fds)
787 break;
788 set = fdt->close_on_exec->fds_bits[j];
789 if (!set)
790 continue;
791 fdt->close_on_exec->fds_bits[j] = 0;
792 spin_unlock(&files->file_lock);
793 for ( ; set ; i++,set >>= 1) {
794 if (set & 1) {
795 sys_close(i);
798 spin_lock(&files->file_lock);
801 spin_unlock(&files->file_lock);
804 void get_task_comm(char *buf, struct task_struct *tsk)
806 /* buf must be at least sizeof(tsk->comm) in size */
807 task_lock(tsk);
808 strncpy(buf, tsk->comm, sizeof(tsk->comm));
809 task_unlock(tsk);
812 void set_task_comm(struct task_struct *tsk, char *buf)
814 task_lock(tsk);
815 strlcpy(tsk->comm, buf, sizeof(tsk->comm));
816 task_unlock(tsk);
819 int flush_old_exec(struct linux_binprm * bprm)
821 char * name;
822 int i, ch, retval;
823 struct files_struct *files;
824 char tcomm[sizeof(current->comm)];
827 * Make sure we have a private signal table and that
828 * we are unassociated from the previous thread group.
830 retval = de_thread(current);
831 if (retval)
832 goto out;
835 * Make sure we have private file handles. Ask the
836 * fork helper to do the work for us and the exit
837 * helper to do the cleanup of the old one.
839 files = current->files; /* refcounted so safe to hold */
840 retval = unshare_files();
841 if (retval)
842 goto out;
844 * Release all of the old mmap stuff
846 retval = exec_mmap(bprm->mm);
847 if (retval)
848 goto mmap_failed;
850 bprm->mm = NULL; /* We're using it now */
852 /* This is the point of no return */
853 put_files_struct(files);
855 current->sas_ss_sp = current->sas_ss_size = 0;
857 if (current->euid == current->uid && current->egid == current->gid)
858 current->mm->dumpable = 1;
859 else
860 current->mm->dumpable = suid_dumpable;
862 name = bprm->filename;
864 /* Copies the binary name from after last slash */
865 for (i=0; (ch = *(name++)) != '\0';) {
866 if (ch == '/')
867 i = 0; /* overwrite what we wrote */
868 else
869 if (i < (sizeof(tcomm) - 1))
870 tcomm[i++] = ch;
872 tcomm[i] = '\0';
873 set_task_comm(current, tcomm);
875 current->flags &= ~PF_RANDOMIZE;
876 flush_thread();
878 /* Set the new mm task size. We have to do that late because it may
879 * depend on TIF_32BIT which is only updated in flush_thread() on
880 * some architectures like powerpc
882 current->mm->task_size = TASK_SIZE;
884 if (bprm->e_uid != current->euid || bprm->e_gid != current->egid) {
885 suid_keys(current);
886 current->mm->dumpable = suid_dumpable;
887 current->pdeath_signal = 0;
888 } else if (file_permission(bprm->file, MAY_READ) ||
889 (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)) {
890 suid_keys(current);
891 current->mm->dumpable = suid_dumpable;
894 /* An exec changes our domain. We are no longer part of the thread
895 group */
897 current->self_exec_id++;
899 flush_signal_handlers(current, 0);
900 flush_old_files(current->files);
902 return 0;
904 mmap_failed:
905 reset_files_struct(current, files);
906 out:
907 return retval;
910 EXPORT_SYMBOL(flush_old_exec);
913 * Fill the binprm structure from the inode.
914 * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
916 int prepare_binprm(struct linux_binprm *bprm)
918 int mode;
919 struct inode * inode = bprm->file->f_path.dentry->d_inode;
920 int retval;
922 mode = inode->i_mode;
923 if (bprm->file->f_op == NULL)
924 return -EACCES;
926 bprm->e_uid = current->euid;
927 bprm->e_gid = current->egid;
929 if(!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)) {
930 /* Set-uid? */
931 if (mode & S_ISUID) {
932 current->personality &= ~PER_CLEAR_ON_SETID;
933 bprm->e_uid = inode->i_uid;
936 /* Set-gid? */
938 * If setgid is set but no group execute bit then this
939 * is a candidate for mandatory locking, not a setgid
940 * executable.
942 if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
943 current->personality &= ~PER_CLEAR_ON_SETID;
944 bprm->e_gid = inode->i_gid;
948 /* fill in binprm security blob */
949 retval = security_bprm_set(bprm);
950 if (retval)
951 return retval;
953 memset(bprm->buf,0,BINPRM_BUF_SIZE);
954 return kernel_read(bprm->file,0,bprm->buf,BINPRM_BUF_SIZE);
957 EXPORT_SYMBOL(prepare_binprm);
959 static int unsafe_exec(struct task_struct *p)
961 int unsafe = 0;
962 if (p->ptrace & PT_PTRACED) {
963 if (p->ptrace & PT_PTRACE_CAP)
964 unsafe |= LSM_UNSAFE_PTRACE_CAP;
965 else
966 unsafe |= LSM_UNSAFE_PTRACE;
968 if (atomic_read(&p->fs->count) > 1 ||
969 atomic_read(&p->files->count) > 1 ||
970 atomic_read(&p->sighand->count) > 1)
971 unsafe |= LSM_UNSAFE_SHARE;
973 return unsafe;
976 void compute_creds(struct linux_binprm *bprm)
978 int unsafe;
980 if (bprm->e_uid != current->uid) {
981 suid_keys(current);
982 current->pdeath_signal = 0;
984 exec_keys(current);
986 task_lock(current);
987 unsafe = unsafe_exec(current);
988 security_bprm_apply_creds(bprm, unsafe);
989 task_unlock(current);
990 security_bprm_post_apply_creds(bprm);
993 EXPORT_SYMBOL(compute_creds);
995 void remove_arg_zero(struct linux_binprm *bprm)
997 if (bprm->argc) {
998 unsigned long offset;
999 char * kaddr;
1000 struct page *page;
1002 offset = bprm->p % PAGE_SIZE;
1003 goto inside;
1005 while (bprm->p++, *(kaddr+offset++)) {
1006 if (offset != PAGE_SIZE)
1007 continue;
1008 offset = 0;
1009 kunmap_atomic(kaddr, KM_USER0);
1010 inside:
1011 page = bprm->page[bprm->p/PAGE_SIZE];
1012 kaddr = kmap_atomic(page, KM_USER0);
1014 kunmap_atomic(kaddr, KM_USER0);
1015 bprm->argc--;
1019 EXPORT_SYMBOL(remove_arg_zero);
1022 * cycle the list of binary formats handler, until one recognizes the image
1024 int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
1026 int try,retval;
1027 struct linux_binfmt *fmt;
1028 #ifdef __alpha__
1029 /* handle /sbin/loader.. */
1031 struct exec * eh = (struct exec *) bprm->buf;
1033 if (!bprm->loader && eh->fh.f_magic == 0x183 &&
1034 (eh->fh.f_flags & 0x3000) == 0x3000)
1036 struct file * file;
1037 unsigned long loader;
1039 allow_write_access(bprm->file);
1040 fput(bprm->file);
1041 bprm->file = NULL;
1043 loader = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
1045 file = open_exec("/sbin/loader");
1046 retval = PTR_ERR(file);
1047 if (IS_ERR(file))
1048 return retval;
1050 /* Remember if the application is TASO. */
1051 bprm->sh_bang = eh->ah.entry < 0x100000000UL;
1053 bprm->file = file;
1054 bprm->loader = loader;
1055 retval = prepare_binprm(bprm);
1056 if (retval<0)
1057 return retval;
1058 /* should call search_binary_handler recursively here,
1059 but it does not matter */
1062 #endif
1063 retval = security_bprm_check(bprm);
1064 if (retval)
1065 return retval;
1067 /* kernel module loader fixup */
1068 /* so we don't try to load run modprobe in kernel space. */
1069 set_fs(USER_DS);
1071 retval = audit_bprm(bprm);
1072 if (retval)
1073 return retval;
1075 retval = -ENOENT;
1076 for (try=0; try<2; try++) {
1077 read_lock(&binfmt_lock);
1078 for (fmt = formats ; fmt ; fmt = fmt->next) {
1079 int (*fn)(struct linux_binprm *, struct pt_regs *) = fmt->load_binary;
1080 if (!fn)
1081 continue;
1082 if (!try_module_get(fmt->module))
1083 continue;
1084 read_unlock(&binfmt_lock);
1085 retval = fn(bprm, regs);
1086 if (retval >= 0) {
1087 put_binfmt(fmt);
1088 allow_write_access(bprm->file);
1089 if (bprm->file)
1090 fput(bprm->file);
1091 bprm->file = NULL;
1092 current->did_exec = 1;
1093 proc_exec_connector(current);
1094 return retval;
1096 read_lock(&binfmt_lock);
1097 put_binfmt(fmt);
1098 if (retval != -ENOEXEC || bprm->mm == NULL)
1099 break;
1100 if (!bprm->file) {
1101 read_unlock(&binfmt_lock);
1102 return retval;
1105 read_unlock(&binfmt_lock);
1106 if (retval != -ENOEXEC || bprm->mm == NULL) {
1107 break;
1108 #ifdef CONFIG_KMOD
1109 }else{
1110 #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1111 if (printable(bprm->buf[0]) &&
1112 printable(bprm->buf[1]) &&
1113 printable(bprm->buf[2]) &&
1114 printable(bprm->buf[3]))
1115 break; /* -ENOEXEC */
1116 request_module("binfmt-%04x", *(unsigned short *)(&bprm->buf[2]));
1117 #endif
1120 return retval;
1123 EXPORT_SYMBOL(search_binary_handler);
1126 * sys_execve() executes a new program.
1128 int do_execve(char * filename,
1129 char __user *__user *argv,
1130 char __user *__user *envp,
1131 struct pt_regs * regs)
1133 struct linux_binprm *bprm;
1134 struct file *file;
1135 int retval;
1136 int i;
1138 retval = -ENOMEM;
1139 bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
1140 if (!bprm)
1141 goto out_ret;
1143 file = open_exec(filename);
1144 retval = PTR_ERR(file);
1145 if (IS_ERR(file))
1146 goto out_kfree;
1148 sched_exec();
1150 bprm->p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
1152 bprm->file = file;
1153 bprm->filename = filename;
1154 bprm->interp = filename;
1155 bprm->mm = mm_alloc();
1156 retval = -ENOMEM;
1157 if (!bprm->mm)
1158 goto out_file;
1160 retval = init_new_context(current, bprm->mm);
1161 if (retval < 0)
1162 goto out_mm;
1164 bprm->argc = count(argv, bprm->p / sizeof(void *));
1165 if ((retval = bprm->argc) < 0)
1166 goto out_mm;
1168 bprm->envc = count(envp, bprm->p / sizeof(void *));
1169 if ((retval = bprm->envc) < 0)
1170 goto out_mm;
1172 retval = security_bprm_alloc(bprm);
1173 if (retval)
1174 goto out;
1176 retval = prepare_binprm(bprm);
1177 if (retval < 0)
1178 goto out;
1180 retval = copy_strings_kernel(1, &bprm->filename, bprm);
1181 if (retval < 0)
1182 goto out;
1184 bprm->exec = bprm->p;
1185 retval = copy_strings(bprm->envc, envp, bprm);
1186 if (retval < 0)
1187 goto out;
1189 retval = copy_strings(bprm->argc, argv, bprm);
1190 if (retval < 0)
1191 goto out;
1193 retval = search_binary_handler(bprm,regs);
1194 if (retval >= 0) {
1195 free_arg_pages(bprm);
1197 /* execve success */
1198 security_bprm_free(bprm);
1199 acct_update_integrals(current);
1200 kfree(bprm);
1201 return retval;
1204 out:
1205 /* Something went wrong, return the inode and free the argument pages*/
1206 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
1207 struct page * page = bprm->page[i];
1208 if (page)
1209 __free_page(page);
1212 if (bprm->security)
1213 security_bprm_free(bprm);
1215 out_mm:
1216 if (bprm->mm)
1217 mmdrop(bprm->mm);
1219 out_file:
1220 if (bprm->file) {
1221 allow_write_access(bprm->file);
1222 fput(bprm->file);
1225 out_kfree:
1226 kfree(bprm);
1228 out_ret:
1229 return retval;
1232 int set_binfmt(struct linux_binfmt *new)
1234 struct linux_binfmt *old = current->binfmt;
1236 if (new) {
1237 if (!try_module_get(new->module))
1238 return -1;
1240 current->binfmt = new;
1241 if (old)
1242 module_put(old->module);
1243 return 0;
1246 EXPORT_SYMBOL(set_binfmt);
1248 #define CORENAME_MAX_SIZE 64
1250 /* format_corename will inspect the pattern parameter, and output a
1251 * name into corename, which must have space for at least
1252 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
1254 static int format_corename(char *corename, const char *pattern, long signr)
1256 const char *pat_ptr = pattern;
1257 char *out_ptr = corename;
1258 char *const out_end = corename + CORENAME_MAX_SIZE;
1259 int rc;
1260 int pid_in_pattern = 0;
1261 int ispipe = 0;
1263 if (*pattern == '|')
1264 ispipe = 1;
1266 /* Repeat as long as we have more pattern to process and more output
1267 space */
1268 while (*pat_ptr) {
1269 if (*pat_ptr != '%') {
1270 if (out_ptr == out_end)
1271 goto out;
1272 *out_ptr++ = *pat_ptr++;
1273 } else {
1274 switch (*++pat_ptr) {
1275 case 0:
1276 goto out;
1277 /* Double percent, output one percent */
1278 case '%':
1279 if (out_ptr == out_end)
1280 goto out;
1281 *out_ptr++ = '%';
1282 break;
1283 /* pid */
1284 case 'p':
1285 pid_in_pattern = 1;
1286 rc = snprintf(out_ptr, out_end - out_ptr,
1287 "%d", current->tgid);
1288 if (rc > out_end - out_ptr)
1289 goto out;
1290 out_ptr += rc;
1291 break;
1292 /* uid */
1293 case 'u':
1294 rc = snprintf(out_ptr, out_end - out_ptr,
1295 "%d", current->uid);
1296 if (rc > out_end - out_ptr)
1297 goto out;
1298 out_ptr += rc;
1299 break;
1300 /* gid */
1301 case 'g':
1302 rc = snprintf(out_ptr, out_end - out_ptr,
1303 "%d", current->gid);
1304 if (rc > out_end - out_ptr)
1305 goto out;
1306 out_ptr += rc;
1307 break;
1308 /* signal that caused the coredump */
1309 case 's':
1310 rc = snprintf(out_ptr, out_end - out_ptr,
1311 "%ld", signr);
1312 if (rc > out_end - out_ptr)
1313 goto out;
1314 out_ptr += rc;
1315 break;
1316 /* UNIX time of coredump */
1317 case 't': {
1318 struct timeval tv;
1319 do_gettimeofday(&tv);
1320 rc = snprintf(out_ptr, out_end - out_ptr,
1321 "%lu", tv.tv_sec);
1322 if (rc > out_end - out_ptr)
1323 goto out;
1324 out_ptr += rc;
1325 break;
1327 /* hostname */
1328 case 'h':
1329 down_read(&uts_sem);
1330 rc = snprintf(out_ptr, out_end - out_ptr,
1331 "%s", utsname()->nodename);
1332 up_read(&uts_sem);
1333 if (rc > out_end - out_ptr)
1334 goto out;
1335 out_ptr += rc;
1336 break;
1337 /* executable */
1338 case 'e':
1339 rc = snprintf(out_ptr, out_end - out_ptr,
1340 "%s", current->comm);
1341 if (rc > out_end - out_ptr)
1342 goto out;
1343 out_ptr += rc;
1344 break;
1345 default:
1346 break;
1348 ++pat_ptr;
1351 /* Backward compatibility with core_uses_pid:
1353 * If core_pattern does not include a %p (as is the default)
1354 * and core_uses_pid is set, then .%pid will be appended to
1355 * the filename. Do not do this for piped commands. */
1356 if (!ispipe && !pid_in_pattern
1357 && (core_uses_pid || atomic_read(&current->mm->mm_users) != 1)) {
1358 rc = snprintf(out_ptr, out_end - out_ptr,
1359 ".%d", current->tgid);
1360 if (rc > out_end - out_ptr)
1361 goto out;
1362 out_ptr += rc;
1364 out:
1365 *out_ptr = 0;
1366 return ispipe;
1369 static void zap_process(struct task_struct *start)
1371 struct task_struct *t;
1373 start->signal->flags = SIGNAL_GROUP_EXIT;
1374 start->signal->group_stop_count = 0;
1376 t = start;
1377 do {
1378 if (t != current && t->mm) {
1379 t->mm->core_waiters++;
1380 sigaddset(&t->pending.signal, SIGKILL);
1381 signal_wake_up(t, 1);
1383 } while ((t = next_thread(t)) != start);
1386 static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
1387 int exit_code)
1389 struct task_struct *g, *p;
1390 unsigned long flags;
1391 int err = -EAGAIN;
1393 spin_lock_irq(&tsk->sighand->siglock);
1394 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
1395 tsk->signal->group_exit_code = exit_code;
1396 zap_process(tsk);
1397 err = 0;
1399 spin_unlock_irq(&tsk->sighand->siglock);
1400 if (err)
1401 return err;
1403 if (atomic_read(&mm->mm_users) == mm->core_waiters + 1)
1404 goto done;
1406 rcu_read_lock();
1407 for_each_process(g) {
1408 if (g == tsk->group_leader)
1409 continue;
1411 p = g;
1412 do {
1413 if (p->mm) {
1414 if (p->mm == mm) {
1416 * p->sighand can't disappear, but
1417 * may be changed by de_thread()
1419 lock_task_sighand(p, &flags);
1420 zap_process(p);
1421 unlock_task_sighand(p, &flags);
1423 break;
1425 } while ((p = next_thread(p)) != g);
1427 rcu_read_unlock();
1428 done:
1429 return mm->core_waiters;
1432 static int coredump_wait(int exit_code)
1434 struct task_struct *tsk = current;
1435 struct mm_struct *mm = tsk->mm;
1436 struct completion startup_done;
1437 struct completion *vfork_done;
1438 int core_waiters;
1440 init_completion(&mm->core_done);
1441 init_completion(&startup_done);
1442 mm->core_startup_done = &startup_done;
1444 core_waiters = zap_threads(tsk, mm, exit_code);
1445 up_write(&mm->mmap_sem);
1447 if (unlikely(core_waiters < 0))
1448 goto fail;
1451 * Make sure nobody is waiting for us to release the VM,
1452 * otherwise we can deadlock when we wait on each other
1454 vfork_done = tsk->vfork_done;
1455 if (vfork_done) {
1456 tsk->vfork_done = NULL;
1457 complete(vfork_done);
1460 if (core_waiters)
1461 wait_for_completion(&startup_done);
1462 fail:
1463 BUG_ON(mm->core_waiters);
1464 return core_waiters;
1467 int do_coredump(long signr, int exit_code, struct pt_regs * regs)
1469 char corename[CORENAME_MAX_SIZE + 1];
1470 struct mm_struct *mm = current->mm;
1471 struct linux_binfmt * binfmt;
1472 struct inode * inode;
1473 struct file * file;
1474 int retval = 0;
1475 int fsuid = current->fsuid;
1476 int flag = 0;
1477 int ispipe = 0;
1479 binfmt = current->binfmt;
1480 if (!binfmt || !binfmt->core_dump)
1481 goto fail;
1482 down_write(&mm->mmap_sem);
1483 if (!mm->dumpable) {
1484 up_write(&mm->mmap_sem);
1485 goto fail;
1489 * We cannot trust fsuid as being the "true" uid of the
1490 * process nor do we know its entire history. We only know it
1491 * was tainted so we dump it as root in mode 2.
1493 if (mm->dumpable == 2) { /* Setuid core dump mode */
1494 flag = O_EXCL; /* Stop rewrite attacks */
1495 current->fsuid = 0; /* Dump root private */
1497 mm->dumpable = 0;
1499 retval = coredump_wait(exit_code);
1500 if (retval < 0)
1501 goto fail;
1504 * Clear any false indication of pending signals that might
1505 * be seen by the filesystem code called to write the core file.
1507 clear_thread_flag(TIF_SIGPENDING);
1509 if (current->signal->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump)
1510 goto fail_unlock;
1513 * lock_kernel() because format_corename() is controlled by sysctl, which
1514 * uses lock_kernel()
1516 lock_kernel();
1517 ispipe = format_corename(corename, core_pattern, signr);
1518 unlock_kernel();
1519 if (ispipe) {
1520 /* SIGPIPE can happen, but it's just never processed */
1521 if(call_usermodehelper_pipe(corename+1, NULL, NULL, &file)) {
1522 printk(KERN_INFO "Core dump to %s pipe failed\n",
1523 corename);
1524 goto fail_unlock;
1526 } else
1527 file = filp_open(corename,
1528 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
1529 0600);
1530 if (IS_ERR(file))
1531 goto fail_unlock;
1532 inode = file->f_path.dentry->d_inode;
1533 if (inode->i_nlink > 1)
1534 goto close_fail; /* multiple links - don't dump */
1535 if (!ispipe && d_unhashed(file->f_path.dentry))
1536 goto close_fail;
1538 /* AK: actually i see no reason to not allow this for named pipes etc.,
1539 but keep the previous behaviour for now. */
1540 if (!ispipe && !S_ISREG(inode->i_mode))
1541 goto close_fail;
1542 if (!file->f_op)
1543 goto close_fail;
1544 if (!file->f_op->write)
1545 goto close_fail;
1546 if (!ispipe && do_truncate(file->f_path.dentry, 0, 0, file) != 0)
1547 goto close_fail;
1549 retval = binfmt->core_dump(signr, regs, file);
1551 if (retval)
1552 current->signal->group_exit_code |= 0x80;
1553 close_fail:
1554 filp_close(file, NULL);
1555 fail_unlock:
1556 current->fsuid = fsuid;
1557 complete_all(&mm->core_done);
1558 fail:
1559 return retval;