4 * Copyright (C) 1991, 1992 Linus Torvalds
8 * #!-checking implemented by tytso.
11 * Demand-loading implemented 01.12.91 - no need to read anything but
12 * the header into memory. The inode of the executable is put into
13 * "current->executable", and page faults do the actual loading. Clean.
15 * Once more I can proudly say that linux stood up to being changed: it
16 * was less than 2 hours work to get demand-loading completely implemented.
18 * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead,
19 * current->executable is only used by the procfs. This allows a dispatch
20 * table to check for several different types of binary formats. We keep
21 * trying until we recognize the file or we run out of supported binary
25 #include <linux/slab.h>
26 #include <linux/file.h>
27 #include <linux/fdtable.h>
29 #include <linux/stat.h>
30 #include <linux/fcntl.h>
31 #include <linux/swap.h>
32 #include <linux/string.h>
33 #include <linux/init.h>
34 #include <linux/pagemap.h>
35 #include <linux/perf_event.h>
36 #include <linux/highmem.h>
37 #include <linux/spinlock.h>
38 #include <linux/key.h>
39 #include <linux/personality.h>
40 #include <linux/binfmts.h>
41 #include <linux/utsname.h>
42 #include <linux/pid_namespace.h>
43 #include <linux/module.h>
44 #include <linux/namei.h>
45 #include <linux/mount.h>
46 #include <linux/security.h>
47 #include <linux/syscalls.h>
48 #include <linux/tsacct_kern.h>
49 #include <linux/cn_proc.h>
50 #include <linux/audit.h>
51 #include <linux/tracehook.h>
52 #include <linux/kmod.h>
53 #include <linux/fsnotify.h>
54 #include <linux/fs_struct.h>
55 #include <linux/pipe_fs_i.h>
56 #include <linux/oom.h>
57 #include <linux/compat.h>
59 #include <asm/uaccess.h>
60 #include <asm/mmu_context.h>
63 #include <trace/events/task.h>
67 #include <trace/events/sched.h>
69 int suid_dumpable
= 0;
71 static LIST_HEAD(formats
);
72 static DEFINE_RWLOCK(binfmt_lock
);
74 void __register_binfmt(struct linux_binfmt
* fmt
, int insert
)
77 write_lock(&binfmt_lock
);
78 insert
? list_add(&fmt
->lh
, &formats
) :
79 list_add_tail(&fmt
->lh
, &formats
);
80 write_unlock(&binfmt_lock
);
83 EXPORT_SYMBOL(__register_binfmt
);
85 void unregister_binfmt(struct linux_binfmt
* fmt
)
87 write_lock(&binfmt_lock
);
89 write_unlock(&binfmt_lock
);
92 EXPORT_SYMBOL(unregister_binfmt
);
94 static inline void put_binfmt(struct linux_binfmt
* fmt
)
96 module_put(fmt
->module
);
100 * Note that a shared library must be both readable and executable due to
103 * Also note that we take the address to load from from the file itself.
105 SYSCALL_DEFINE1(uselib
, const char __user
*, library
)
108 struct filename
*tmp
= getname(library
);
109 int error
= PTR_ERR(tmp
);
110 static const struct open_flags uselib_flags
= {
111 .open_flag
= O_LARGEFILE
| O_RDONLY
| __FMODE_EXEC
,
112 .acc_mode
= MAY_READ
| MAY_EXEC
| MAY_OPEN
,
113 .intent
= LOOKUP_OPEN
119 file
= do_filp_open(AT_FDCWD
, tmp
, &uselib_flags
, LOOKUP_FOLLOW
);
121 error
= PTR_ERR(file
);
126 if (!S_ISREG(file
->f_path
.dentry
->d_inode
->i_mode
))
130 if (file
->f_path
.mnt
->mnt_flags
& MNT_NOEXEC
)
137 struct linux_binfmt
* fmt
;
139 read_lock(&binfmt_lock
);
140 list_for_each_entry(fmt
, &formats
, lh
) {
141 if (!fmt
->load_shlib
)
143 if (!try_module_get(fmt
->module
))
145 read_unlock(&binfmt_lock
);
146 error
= fmt
->load_shlib(file
);
147 read_lock(&binfmt_lock
);
149 if (error
!= -ENOEXEC
)
152 read_unlock(&binfmt_lock
);
162 * The nascent bprm->mm is not visible until exec_mmap() but it can
163 * use a lot of memory, account these pages in current->mm temporary
164 * for oom_badness()->get_mm_rss(). Once exec succeeds or fails, we
165 * change the counter back via acct_arg_size(0).
167 static void acct_arg_size(struct linux_binprm
*bprm
, unsigned long pages
)
169 struct mm_struct
*mm
= current
->mm
;
170 long diff
= (long)(pages
- bprm
->vma_pages
);
175 bprm
->vma_pages
= pages
;
176 add_mm_counter(mm
, MM_ANONPAGES
, diff
);
179 static struct page
*get_arg_page(struct linux_binprm
*bprm
, unsigned long pos
,
185 #ifdef CONFIG_STACK_GROWSUP
187 ret
= expand_downwards(bprm
->vma
, pos
);
192 ret
= get_user_pages(current
, bprm
->mm
, pos
,
193 1, write
, 1, &page
, NULL
);
198 unsigned long size
= bprm
->vma
->vm_end
- bprm
->vma
->vm_start
;
201 acct_arg_size(bprm
, size
/ PAGE_SIZE
);
204 * We've historically supported up to 32 pages (ARG_MAX)
205 * of argument strings even with small stacks
211 * Limit to 1/4-th the stack size for the argv+env strings.
213 * - the remaining binfmt code will not run out of stack space,
214 * - the program will have a reasonable amount of stack left
217 rlim
= current
->signal
->rlim
;
218 if (size
> ACCESS_ONCE(rlim
[RLIMIT_STACK
].rlim_cur
) / 4) {
227 static void put_arg_page(struct page
*page
)
232 static void free_arg_page(struct linux_binprm
*bprm
, int i
)
236 static void free_arg_pages(struct linux_binprm
*bprm
)
240 static void flush_arg_page(struct linux_binprm
*bprm
, unsigned long pos
,
243 flush_cache_page(bprm
->vma
, pos
, page_to_pfn(page
));
246 static int __bprm_mm_init(struct linux_binprm
*bprm
)
249 struct vm_area_struct
*vma
= NULL
;
250 struct mm_struct
*mm
= bprm
->mm
;
252 bprm
->vma
= vma
= kmem_cache_zalloc(vm_area_cachep
, GFP_KERNEL
);
256 down_write(&mm
->mmap_sem
);
260 * Place the stack at the largest stack address the architecture
261 * supports. Later, we'll move this to an appropriate place. We don't
262 * use STACK_TOP because that can depend on attributes which aren't
265 BUILD_BUG_ON(VM_STACK_FLAGS
& VM_STACK_INCOMPLETE_SETUP
);
266 vma
->vm_end
= STACK_TOP_MAX
;
267 vma
->vm_start
= vma
->vm_end
- PAGE_SIZE
;
268 vma
->vm_flags
= VM_STACK_FLAGS
| VM_STACK_INCOMPLETE_SETUP
;
269 vma
->vm_page_prot
= vm_get_page_prot(vma
->vm_flags
);
270 INIT_LIST_HEAD(&vma
->anon_vma_chain
);
272 err
= insert_vm_struct(mm
, vma
);
276 mm
->stack_vm
= mm
->total_vm
= 1;
277 up_write(&mm
->mmap_sem
);
278 bprm
->p
= vma
->vm_end
- sizeof(void *);
281 up_write(&mm
->mmap_sem
);
283 kmem_cache_free(vm_area_cachep
, vma
);
287 static bool valid_arg_len(struct linux_binprm
*bprm
, long len
)
289 return len
<= MAX_ARG_STRLEN
;
294 static inline void acct_arg_size(struct linux_binprm
*bprm
, unsigned long pages
)
298 static struct page
*get_arg_page(struct linux_binprm
*bprm
, unsigned long pos
,
303 page
= bprm
->page
[pos
/ PAGE_SIZE
];
304 if (!page
&& write
) {
305 page
= alloc_page(GFP_HIGHUSER
|__GFP_ZERO
);
308 bprm
->page
[pos
/ PAGE_SIZE
] = page
;
314 static void put_arg_page(struct page
*page
)
318 static void free_arg_page(struct linux_binprm
*bprm
, int i
)
321 __free_page(bprm
->page
[i
]);
322 bprm
->page
[i
] = NULL
;
326 static void free_arg_pages(struct linux_binprm
*bprm
)
330 for (i
= 0; i
< MAX_ARG_PAGES
; i
++)
331 free_arg_page(bprm
, i
);
334 static void flush_arg_page(struct linux_binprm
*bprm
, unsigned long pos
,
339 static int __bprm_mm_init(struct linux_binprm
*bprm
)
341 bprm
->p
= PAGE_SIZE
* MAX_ARG_PAGES
- sizeof(void *);
345 static bool valid_arg_len(struct linux_binprm
*bprm
, long len
)
347 return len
<= bprm
->p
;
350 #endif /* CONFIG_MMU */
353 * Create a new mm_struct and populate it with a temporary stack
354 * vm_area_struct. We don't have enough context at this point to set the stack
355 * flags, permissions, and offset, so we use temporary values. We'll update
356 * them later in setup_arg_pages().
358 int bprm_mm_init(struct linux_binprm
*bprm
)
361 struct mm_struct
*mm
= NULL
;
363 bprm
->mm
= mm
= mm_alloc();
368 err
= init_new_context(current
, mm
);
372 err
= __bprm_mm_init(bprm
);
387 struct user_arg_ptr
{
392 const char __user
*const __user
*native
;
394 const compat_uptr_t __user
*compat
;
399 static const char __user
*get_user_arg_ptr(struct user_arg_ptr argv
, int nr
)
401 const char __user
*native
;
404 if (unlikely(argv
.is_compat
)) {
405 compat_uptr_t compat
;
407 if (get_user(compat
, argv
.ptr
.compat
+ nr
))
408 return ERR_PTR(-EFAULT
);
410 return compat_ptr(compat
);
414 if (get_user(native
, argv
.ptr
.native
+ nr
))
415 return ERR_PTR(-EFAULT
);
421 * count() counts the number of strings in array ARGV.
423 static int count(struct user_arg_ptr argv
, int max
)
427 if (argv
.ptr
.native
!= NULL
) {
429 const char __user
*p
= get_user_arg_ptr(argv
, i
);
440 if (fatal_signal_pending(current
))
441 return -ERESTARTNOHAND
;
449 * 'copy_strings()' copies argument/environment strings from the old
450 * processes's memory to the new process's stack. The call to get_user_pages()
451 * ensures the destination page is created and not swapped out.
453 static int copy_strings(int argc
, struct user_arg_ptr argv
,
454 struct linux_binprm
*bprm
)
456 struct page
*kmapped_page
= NULL
;
458 unsigned long kpos
= 0;
462 const char __user
*str
;
467 str
= get_user_arg_ptr(argv
, argc
);
471 len
= strnlen_user(str
, MAX_ARG_STRLEN
);
476 if (!valid_arg_len(bprm
, len
))
479 /* We're going to work our way backwords. */
485 int offset
, bytes_to_copy
;
487 if (fatal_signal_pending(current
)) {
488 ret
= -ERESTARTNOHAND
;
493 offset
= pos
% PAGE_SIZE
;
497 bytes_to_copy
= offset
;
498 if (bytes_to_copy
> len
)
501 offset
-= bytes_to_copy
;
502 pos
-= bytes_to_copy
;
503 str
-= bytes_to_copy
;
504 len
-= bytes_to_copy
;
506 if (!kmapped_page
|| kpos
!= (pos
& PAGE_MASK
)) {
509 page
= get_arg_page(bprm
, pos
, 1);
516 flush_kernel_dcache_page(kmapped_page
);
517 kunmap(kmapped_page
);
518 put_arg_page(kmapped_page
);
521 kaddr
= kmap(kmapped_page
);
522 kpos
= pos
& PAGE_MASK
;
523 flush_arg_page(bprm
, kpos
, kmapped_page
);
525 if (copy_from_user(kaddr
+offset
, str
, bytes_to_copy
)) {
534 flush_kernel_dcache_page(kmapped_page
);
535 kunmap(kmapped_page
);
536 put_arg_page(kmapped_page
);
542 * Like copy_strings, but get argv and its values from kernel memory.
544 int copy_strings_kernel(int argc
, const char *const *__argv
,
545 struct linux_binprm
*bprm
)
548 mm_segment_t oldfs
= get_fs();
549 struct user_arg_ptr argv
= {
550 .ptr
.native
= (const char __user
*const __user
*)__argv
,
554 r
= copy_strings(argc
, argv
, bprm
);
559 EXPORT_SYMBOL(copy_strings_kernel
);
564 * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX. Once
565 * the binfmt code determines where the new stack should reside, we shift it to
566 * its final location. The process proceeds as follows:
568 * 1) Use shift to calculate the new vma endpoints.
569 * 2) Extend vma to cover both the old and new ranges. This ensures the
570 * arguments passed to subsequent functions are consistent.
571 * 3) Move vma's page tables to the new range.
572 * 4) Free up any cleared pgd range.
573 * 5) Shrink the vma to cover only the new range.
575 static int shift_arg_pages(struct vm_area_struct
*vma
, unsigned long shift
)
577 struct mm_struct
*mm
= vma
->vm_mm
;
578 unsigned long old_start
= vma
->vm_start
;
579 unsigned long old_end
= vma
->vm_end
;
580 unsigned long length
= old_end
- old_start
;
581 unsigned long new_start
= old_start
- shift
;
582 unsigned long new_end
= old_end
- shift
;
583 struct mmu_gather tlb
;
585 BUG_ON(new_start
> new_end
);
588 * ensure there are no vmas between where we want to go
591 if (vma
!= find_vma(mm
, new_start
))
595 * cover the whole range: [new_start, old_end)
597 if (vma_adjust(vma
, new_start
, old_end
, vma
->vm_pgoff
, NULL
))
601 * move the page tables downwards, on failure we rely on
602 * process cleanup to remove whatever mess we made.
604 if (length
!= move_page_tables(vma
, old_start
,
605 vma
, new_start
, length
, false))
609 tlb_gather_mmu(&tlb
, mm
, 0);
610 if (new_end
> old_start
) {
612 * when the old and new regions overlap clear from new_end.
614 free_pgd_range(&tlb
, new_end
, old_end
, new_end
,
615 vma
->vm_next
? vma
->vm_next
->vm_start
: 0);
618 * otherwise, clean from old_start; this is done to not touch
619 * the address space in [new_end, old_start) some architectures
620 * have constraints on va-space that make this illegal (IA64) -
621 * for the others its just a little faster.
623 free_pgd_range(&tlb
, old_start
, old_end
, new_end
,
624 vma
->vm_next
? vma
->vm_next
->vm_start
: 0);
626 tlb_finish_mmu(&tlb
, new_end
, old_end
);
629 * Shrink the vma to just the new range. Always succeeds.
631 vma_adjust(vma
, new_start
, new_end
, vma
->vm_pgoff
, NULL
);
637 * Finalizes the stack vm_area_struct. The flags and permissions are updated,
638 * the stack is optionally relocated, and some extra space is added.
640 int setup_arg_pages(struct linux_binprm
*bprm
,
641 unsigned long stack_top
,
642 int executable_stack
)
645 unsigned long stack_shift
;
646 struct mm_struct
*mm
= current
->mm
;
647 struct vm_area_struct
*vma
= bprm
->vma
;
648 struct vm_area_struct
*prev
= NULL
;
649 unsigned long vm_flags
;
650 unsigned long stack_base
;
651 unsigned long stack_size
;
652 unsigned long stack_expand
;
653 unsigned long rlim_stack
;
655 #ifdef CONFIG_STACK_GROWSUP
656 /* Limit stack size to 1GB */
657 stack_base
= rlimit_max(RLIMIT_STACK
);
658 if (stack_base
> (1 << 30))
659 stack_base
= 1 << 30;
661 /* Make sure we didn't let the argument array grow too large. */
662 if (vma
->vm_end
- vma
->vm_start
> stack_base
)
665 stack_base
= PAGE_ALIGN(stack_top
- stack_base
);
667 stack_shift
= vma
->vm_start
- stack_base
;
668 mm
->arg_start
= bprm
->p
- stack_shift
;
669 bprm
->p
= vma
->vm_end
- stack_shift
;
671 stack_top
= arch_align_stack(stack_top
);
672 stack_top
= PAGE_ALIGN(stack_top
);
674 if (unlikely(stack_top
< mmap_min_addr
) ||
675 unlikely(vma
->vm_end
- vma
->vm_start
>= stack_top
- mmap_min_addr
))
678 stack_shift
= vma
->vm_end
- stack_top
;
680 bprm
->p
-= stack_shift
;
681 mm
->arg_start
= bprm
->p
;
685 bprm
->loader
-= stack_shift
;
686 bprm
->exec
-= stack_shift
;
688 down_write(&mm
->mmap_sem
);
689 vm_flags
= VM_STACK_FLAGS
;
692 * Adjust stack execute permissions; explicitly enable for
693 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
694 * (arch default) otherwise.
696 if (unlikely(executable_stack
== EXSTACK_ENABLE_X
))
698 else if (executable_stack
== EXSTACK_DISABLE_X
)
699 vm_flags
&= ~VM_EXEC
;
700 vm_flags
|= mm
->def_flags
;
701 vm_flags
|= VM_STACK_INCOMPLETE_SETUP
;
703 ret
= mprotect_fixup(vma
, &prev
, vma
->vm_start
, vma
->vm_end
,
709 /* Move stack pages down in memory. */
711 ret
= shift_arg_pages(vma
, stack_shift
);
716 /* mprotect_fixup is overkill to remove the temporary stack flags */
717 vma
->vm_flags
&= ~VM_STACK_INCOMPLETE_SETUP
;
719 stack_expand
= 131072UL; /* randomly 32*4k (or 2*64k) pages */
720 stack_size
= vma
->vm_end
- vma
->vm_start
;
722 * Align this down to a page boundary as expand_stack
725 rlim_stack
= rlimit(RLIMIT_STACK
) & PAGE_MASK
;
726 #ifdef CONFIG_STACK_GROWSUP
727 if (stack_size
+ stack_expand
> rlim_stack
)
728 stack_base
= vma
->vm_start
+ rlim_stack
;
730 stack_base
= vma
->vm_end
+ stack_expand
;
732 if (stack_size
+ stack_expand
> rlim_stack
)
733 stack_base
= vma
->vm_end
- rlim_stack
;
735 stack_base
= vma
->vm_start
- stack_expand
;
737 current
->mm
->start_stack
= bprm
->p
;
738 ret
= expand_stack(vma
, stack_base
);
743 up_write(&mm
->mmap_sem
);
746 EXPORT_SYMBOL(setup_arg_pages
);
748 #endif /* CONFIG_MMU */
750 struct file
*open_exec(const char *name
)
754 struct filename tmp
= { .name
= name
};
755 static const struct open_flags open_exec_flags
= {
756 .open_flag
= O_LARGEFILE
| O_RDONLY
| __FMODE_EXEC
,
757 .acc_mode
= MAY_EXEC
| MAY_OPEN
,
758 .intent
= LOOKUP_OPEN
761 file
= do_filp_open(AT_FDCWD
, &tmp
, &open_exec_flags
, LOOKUP_FOLLOW
);
766 if (!S_ISREG(file
->f_path
.dentry
->d_inode
->i_mode
))
769 if (file
->f_path
.mnt
->mnt_flags
& MNT_NOEXEC
)
774 err
= deny_write_access(file
);
785 EXPORT_SYMBOL(open_exec
);
787 int kernel_read(struct file
*file
, loff_t offset
,
788 char *addr
, unsigned long count
)
796 /* The cast to a user pointer is valid due to the set_fs() */
797 result
= vfs_read(file
, (void __user
*)addr
, count
, &pos
);
802 EXPORT_SYMBOL(kernel_read
);
804 static int exec_mmap(struct mm_struct
*mm
)
806 struct task_struct
*tsk
;
807 struct mm_struct
* old_mm
, *active_mm
;
809 /* Notify parent that we're no longer interested in the old VM */
811 old_mm
= current
->mm
;
812 mm_release(tsk
, old_mm
);
817 * Make sure that if there is a core dump in progress
818 * for the old mm, we get out and die instead of going
819 * through with the exec. We must hold mmap_sem around
820 * checking core_state and changing tsk->mm.
822 down_read(&old_mm
->mmap_sem
);
823 if (unlikely(old_mm
->core_state
)) {
824 up_read(&old_mm
->mmap_sem
);
829 active_mm
= tsk
->active_mm
;
832 activate_mm(active_mm
, mm
);
834 arch_pick_mmap_layout(mm
);
836 up_read(&old_mm
->mmap_sem
);
837 BUG_ON(active_mm
!= old_mm
);
838 setmax_mm_hiwater_rss(&tsk
->signal
->maxrss
, old_mm
);
839 mm_update_next_owner(old_mm
);
848 * This function makes sure the current process has its own signal table,
849 * so that flush_signal_handlers can later reset the handlers without
850 * disturbing other processes. (Other processes might share the signal
851 * table via the CLONE_SIGHAND option to clone().)
853 static int de_thread(struct task_struct
*tsk
)
855 struct signal_struct
*sig
= tsk
->signal
;
856 struct sighand_struct
*oldsighand
= tsk
->sighand
;
857 spinlock_t
*lock
= &oldsighand
->siglock
;
859 if (thread_group_empty(tsk
))
860 goto no_thread_group
;
863 * Kill all other threads in the thread group.
866 if (signal_group_exit(sig
)) {
868 * Another group action in progress, just
869 * return so that the signal is processed.
871 spin_unlock_irq(lock
);
875 sig
->group_exit_task
= tsk
;
876 sig
->notify_count
= zap_other_threads(tsk
);
877 if (!thread_group_leader(tsk
))
880 while (sig
->notify_count
) {
881 __set_current_state(TASK_KILLABLE
);
882 spin_unlock_irq(lock
);
884 if (unlikely(__fatal_signal_pending(tsk
)))
888 spin_unlock_irq(lock
);
891 * At this point all other threads have exited, all we have to
892 * do is to wait for the thread group leader to become inactive,
893 * and to assume its PID:
895 if (!thread_group_leader(tsk
)) {
896 struct task_struct
*leader
= tsk
->group_leader
;
898 sig
->notify_count
= -1; /* for exit_notify() */
900 write_lock_irq(&tasklist_lock
);
901 if (likely(leader
->exit_state
))
903 __set_current_state(TASK_KILLABLE
);
904 write_unlock_irq(&tasklist_lock
);
906 if (unlikely(__fatal_signal_pending(tsk
)))
911 * The only record we have of the real-time age of a
912 * process, regardless of execs it's done, is start_time.
913 * All the past CPU time is accumulated in signal_struct
914 * from sister threads now dead. But in this non-leader
915 * exec, nothing survives from the original leader thread,
916 * whose birth marks the true age of this process now.
917 * When we take on its identity by switching to its PID, we
918 * also take its birthdate (always earlier than our own).
920 tsk
->start_time
= leader
->start_time
;
922 BUG_ON(!same_thread_group(leader
, tsk
));
923 BUG_ON(has_group_leader_pid(tsk
));
925 * An exec() starts a new thread group with the
926 * TGID of the previous thread group. Rehash the
927 * two threads with a switched PID, and release
928 * the former thread group leader:
931 /* Become a process group leader with the old leader's pid.
932 * The old leader becomes a thread of the this thread group.
933 * Note: The old leader also uses this pid until release_task
934 * is called. Odd but simple and correct.
936 detach_pid(tsk
, PIDTYPE_PID
);
937 tsk
->pid
= leader
->pid
;
938 attach_pid(tsk
, PIDTYPE_PID
, task_pid(leader
));
939 transfer_pid(leader
, tsk
, PIDTYPE_PGID
);
940 transfer_pid(leader
, tsk
, PIDTYPE_SID
);
942 list_replace_rcu(&leader
->tasks
, &tsk
->tasks
);
943 list_replace_init(&leader
->sibling
, &tsk
->sibling
);
945 tsk
->group_leader
= tsk
;
946 leader
->group_leader
= tsk
;
948 tsk
->exit_signal
= SIGCHLD
;
949 leader
->exit_signal
= -1;
951 BUG_ON(leader
->exit_state
!= EXIT_ZOMBIE
);
952 leader
->exit_state
= EXIT_DEAD
;
955 * We are going to release_task()->ptrace_unlink() silently,
956 * the tracer can sleep in do_wait(). EXIT_DEAD guarantees
957 * the tracer wont't block again waiting for this thread.
959 if (unlikely(leader
->ptrace
))
960 __wake_up_parent(leader
, leader
->parent
);
961 write_unlock_irq(&tasklist_lock
);
963 release_task(leader
);
966 sig
->group_exit_task
= NULL
;
967 sig
->notify_count
= 0;
970 /* we have changed execution domain */
971 tsk
->exit_signal
= SIGCHLD
;
974 flush_itimer_signals();
976 if (atomic_read(&oldsighand
->count
) != 1) {
977 struct sighand_struct
*newsighand
;
979 * This ->sighand is shared with the CLONE_SIGHAND
980 * but not CLONE_THREAD task, switch to the new one.
982 newsighand
= kmem_cache_alloc(sighand_cachep
, GFP_KERNEL
);
986 atomic_set(&newsighand
->count
, 1);
987 memcpy(newsighand
->action
, oldsighand
->action
,
988 sizeof(newsighand
->action
));
990 write_lock_irq(&tasklist_lock
);
991 spin_lock(&oldsighand
->siglock
);
992 rcu_assign_pointer(tsk
->sighand
, newsighand
);
993 spin_unlock(&oldsighand
->siglock
);
994 write_unlock_irq(&tasklist_lock
);
996 __cleanup_sighand(oldsighand
);
999 BUG_ON(!thread_group_leader(tsk
));
1003 /* protects against exit_notify() and __exit_signal() */
1004 read_lock(&tasklist_lock
);
1005 sig
->group_exit_task
= NULL
;
1006 sig
->notify_count
= 0;
1007 read_unlock(&tasklist_lock
);
1011 char *get_task_comm(char *buf
, struct task_struct
*tsk
)
1013 /* buf must be at least sizeof(tsk->comm) in size */
1015 strncpy(buf
, tsk
->comm
, sizeof(tsk
->comm
));
1019 EXPORT_SYMBOL_GPL(get_task_comm
);
1022 * These functions flushes out all traces of the currently running executable
1023 * so that a new one can be started
1026 void set_task_comm(struct task_struct
*tsk
, char *buf
)
1030 trace_task_rename(tsk
, buf
);
1033 * Threads may access current->comm without holding
1034 * the task lock, so write the string carefully.
1035 * Readers without a lock may see incomplete new
1036 * names but are safe from non-terminating string reads.
1038 memset(tsk
->comm
, 0, TASK_COMM_LEN
);
1040 strlcpy(tsk
->comm
, buf
, sizeof(tsk
->comm
));
1042 perf_event_comm(tsk
);
1045 static void filename_to_taskname(char *tcomm
, const char *fn
, unsigned int len
)
1049 /* Copies the binary name from after last slash */
1050 for (i
= 0; (ch
= *(fn
++)) != '\0';) {
1052 i
= 0; /* overwrite what we wrote */
1060 int flush_old_exec(struct linux_binprm
* bprm
)
1065 * Make sure we have a private signal table and that
1066 * we are unassociated from the previous thread group.
1068 retval
= de_thread(current
);
1072 set_mm_exe_file(bprm
->mm
, bprm
->file
);
1074 filename_to_taskname(bprm
->tcomm
, bprm
->filename
, sizeof(bprm
->tcomm
));
1076 * Release all of the old mmap stuff
1078 acct_arg_size(bprm
, 0);
1079 retval
= exec_mmap(bprm
->mm
);
1083 bprm
->mm
= NULL
; /* We're using it now */
1087 ~(PF_RANDOMIZE
| PF_FORKNOEXEC
| PF_KTHREAD
| PF_NOFREEZE
);
1089 current
->personality
&= ~bprm
->per_clear
;
1096 EXPORT_SYMBOL(flush_old_exec
);
1098 void would_dump(struct linux_binprm
*bprm
, struct file
*file
)
1100 if (inode_permission(file
->f_path
.dentry
->d_inode
, MAY_READ
) < 0)
1101 bprm
->interp_flags
|= BINPRM_FLAGS_ENFORCE_NONDUMP
;
1103 EXPORT_SYMBOL(would_dump
);
1105 void setup_new_exec(struct linux_binprm
* bprm
)
1107 arch_pick_mmap_layout(current
->mm
);
1109 /* This is the point of no return */
1110 current
->sas_ss_sp
= current
->sas_ss_size
= 0;
1112 if (uid_eq(current_euid(), current_uid()) && gid_eq(current_egid(), current_gid()))
1113 set_dumpable(current
->mm
, SUID_DUMPABLE_ENABLED
);
1115 set_dumpable(current
->mm
, suid_dumpable
);
1117 set_task_comm(current
, bprm
->tcomm
);
1119 /* Set the new mm task size. We have to do that late because it may
1120 * depend on TIF_32BIT which is only updated in flush_thread() on
1121 * some architectures like powerpc
1123 current
->mm
->task_size
= TASK_SIZE
;
1125 /* install the new credentials */
1126 if (!uid_eq(bprm
->cred
->uid
, current_euid()) ||
1127 !gid_eq(bprm
->cred
->gid
, current_egid())) {
1128 current
->pdeath_signal
= 0;
1130 would_dump(bprm
, bprm
->file
);
1131 if (bprm
->interp_flags
& BINPRM_FLAGS_ENFORCE_NONDUMP
)
1132 set_dumpable(current
->mm
, suid_dumpable
);
1136 * Flush performance counters when crossing a
1139 if (!get_dumpable(current
->mm
))
1140 perf_event_exit_task(current
);
1142 /* An exec changes our domain. We are no longer part of the thread
1145 current
->self_exec_id
++;
1147 flush_signal_handlers(current
, 0);
1148 do_close_on_exec(current
->files
);
1150 EXPORT_SYMBOL(setup_new_exec
);
1153 * Prepare credentials and lock ->cred_guard_mutex.
1154 * install_exec_creds() commits the new creds and drops the lock.
1155 * Or, if exec fails before, free_bprm() should release ->cred and
1158 int prepare_bprm_creds(struct linux_binprm
*bprm
)
1160 if (mutex_lock_interruptible(¤t
->signal
->cred_guard_mutex
))
1161 return -ERESTARTNOINTR
;
1163 bprm
->cred
= prepare_exec_creds();
1164 if (likely(bprm
->cred
))
1167 mutex_unlock(¤t
->signal
->cred_guard_mutex
);
1171 void free_bprm(struct linux_binprm
*bprm
)
1173 free_arg_pages(bprm
);
1175 mutex_unlock(¤t
->signal
->cred_guard_mutex
);
1176 abort_creds(bprm
->cred
);
1182 * install the new credentials for this executable
1184 void install_exec_creds(struct linux_binprm
*bprm
)
1186 security_bprm_committing_creds(bprm
);
1188 commit_creds(bprm
->cred
);
1191 * cred_guard_mutex must be held at least to this point to prevent
1192 * ptrace_attach() from altering our determination of the task's
1193 * credentials; any time after this it may be unlocked.
1195 security_bprm_committed_creds(bprm
);
1196 mutex_unlock(¤t
->signal
->cred_guard_mutex
);
1198 EXPORT_SYMBOL(install_exec_creds
);
1201 * determine how safe it is to execute the proposed program
1202 * - the caller must hold ->cred_guard_mutex to protect against
1205 static int check_unsafe_exec(struct linux_binprm
*bprm
)
1207 struct task_struct
*p
= current
, *t
;
1212 if (p
->ptrace
& PT_PTRACE_CAP
)
1213 bprm
->unsafe
|= LSM_UNSAFE_PTRACE_CAP
;
1215 bprm
->unsafe
|= LSM_UNSAFE_PTRACE
;
1219 * This isn't strictly necessary, but it makes it harder for LSMs to
1222 if (current
->no_new_privs
)
1223 bprm
->unsafe
|= LSM_UNSAFE_NO_NEW_PRIVS
;
1226 spin_lock(&p
->fs
->lock
);
1228 for (t
= next_thread(p
); t
!= p
; t
= next_thread(t
)) {
1234 if (p
->fs
->users
> n_fs
) {
1235 bprm
->unsafe
|= LSM_UNSAFE_SHARE
;
1238 if (!p
->fs
->in_exec
) {
1243 spin_unlock(&p
->fs
->lock
);
1249 * Fill the binprm structure from the inode.
1250 * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
1252 * This may be called multiple times for binary chains (scripts for example).
1254 int prepare_binprm(struct linux_binprm
*bprm
)
1257 struct inode
* inode
= bprm
->file
->f_path
.dentry
->d_inode
;
1260 mode
= inode
->i_mode
;
1261 if (bprm
->file
->f_op
== NULL
)
1264 /* clear any previous set[ug]id data from a previous binary */
1265 bprm
->cred
->euid
= current_euid();
1266 bprm
->cred
->egid
= current_egid();
1268 if (!(bprm
->file
->f_path
.mnt
->mnt_flags
& MNT_NOSUID
) &&
1269 !current
->no_new_privs
) {
1271 if (mode
& S_ISUID
) {
1272 if (!kuid_has_mapping(bprm
->cred
->user_ns
, inode
->i_uid
))
1274 bprm
->per_clear
|= PER_CLEAR_ON_SETID
;
1275 bprm
->cred
->euid
= inode
->i_uid
;
1281 * If setgid is set but no group execute bit then this
1282 * is a candidate for mandatory locking, not a setgid
1285 if ((mode
& (S_ISGID
| S_IXGRP
)) == (S_ISGID
| S_IXGRP
)) {
1286 if (!kgid_has_mapping(bprm
->cred
->user_ns
, inode
->i_gid
))
1288 bprm
->per_clear
|= PER_CLEAR_ON_SETID
;
1289 bprm
->cred
->egid
= inode
->i_gid
;
1293 /* fill in binprm security blob */
1294 retval
= security_bprm_set_creds(bprm
);
1297 bprm
->cred_prepared
= 1;
1299 memset(bprm
->buf
, 0, BINPRM_BUF_SIZE
);
1300 return kernel_read(bprm
->file
, 0, bprm
->buf
, BINPRM_BUF_SIZE
);
1303 EXPORT_SYMBOL(prepare_binprm
);
1306 * Arguments are '\0' separated strings found at the location bprm->p
1307 * points to; chop off the first by relocating brpm->p to right after
1308 * the first '\0' encountered.
1310 int remove_arg_zero(struct linux_binprm
*bprm
)
1313 unsigned long offset
;
1321 offset
= bprm
->p
& ~PAGE_MASK
;
1322 page
= get_arg_page(bprm
, bprm
->p
, 0);
1327 kaddr
= kmap_atomic(page
);
1329 for (; offset
< PAGE_SIZE
&& kaddr
[offset
];
1330 offset
++, bprm
->p
++)
1333 kunmap_atomic(kaddr
);
1336 if (offset
== PAGE_SIZE
)
1337 free_arg_page(bprm
, (bprm
->p
>> PAGE_SHIFT
) - 1);
1338 } while (offset
== PAGE_SIZE
);
1347 EXPORT_SYMBOL(remove_arg_zero
);
1350 * cycle the list of binary formats handler, until one recognizes the image
1352 int search_binary_handler(struct linux_binprm
*bprm
,struct pt_regs
*regs
)
1354 unsigned int depth
= bprm
->recursion_depth
;
1356 struct linux_binfmt
*fmt
;
1357 pid_t old_pid
, old_vpid
;
1359 retval
= security_bprm_check(bprm
);
1363 retval
= audit_bprm(bprm
);
1367 /* Need to fetch pid before load_binary changes it */
1368 old_pid
= current
->pid
;
1370 old_vpid
= task_pid_nr_ns(current
, task_active_pid_ns(current
->parent
));
1374 for (try=0; try<2; try++) {
1375 read_lock(&binfmt_lock
);
1376 list_for_each_entry(fmt
, &formats
, lh
) {
1377 int (*fn
)(struct linux_binprm
*, struct pt_regs
*) = fmt
->load_binary
;
1380 if (!try_module_get(fmt
->module
))
1382 read_unlock(&binfmt_lock
);
1383 retval
= fn(bprm
, regs
);
1385 * Restore the depth counter to its starting value
1386 * in this call, so we don't have to rely on every
1387 * load_binary function to restore it on return.
1389 bprm
->recursion_depth
= depth
;
1392 trace_sched_process_exec(current
, old_pid
, bprm
);
1393 ptrace_event(PTRACE_EVENT_EXEC
, old_vpid
);
1396 allow_write_access(bprm
->file
);
1400 current
->did_exec
= 1;
1401 proc_exec_connector(current
);
1404 read_lock(&binfmt_lock
);
1406 if (retval
!= -ENOEXEC
|| bprm
->mm
== NULL
)
1409 read_unlock(&binfmt_lock
);
1413 read_unlock(&binfmt_lock
);
1414 #ifdef CONFIG_MODULES
1415 if (retval
!= -ENOEXEC
|| bprm
->mm
== NULL
) {
1418 #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1419 if (printable(bprm
->buf
[0]) &&
1420 printable(bprm
->buf
[1]) &&
1421 printable(bprm
->buf
[2]) &&
1422 printable(bprm
->buf
[3]))
1423 break; /* -ENOEXEC */
1425 break; /* -ENOEXEC */
1426 request_module("binfmt-%04x", *(unsigned short *)(&bprm
->buf
[2]));
1435 EXPORT_SYMBOL(search_binary_handler
);
1438 * sys_execve() executes a new program.
1440 static int do_execve_common(const char *filename
,
1441 struct user_arg_ptr argv
,
1442 struct user_arg_ptr envp
,
1443 struct pt_regs
*regs
)
1445 struct linux_binprm
*bprm
;
1447 struct files_struct
*displaced
;
1450 const struct cred
*cred
= current_cred();
1453 * We move the actual failure in case of RLIMIT_NPROC excess from
1454 * set*uid() to execve() because too many poorly written programs
1455 * don't check setuid() return code. Here we additionally recheck
1456 * whether NPROC limit is still exceeded.
1458 if ((current
->flags
& PF_NPROC_EXCEEDED
) &&
1459 atomic_read(&cred
->user
->processes
) > rlimit(RLIMIT_NPROC
)) {
1464 /* We're below the limit (still or again), so we don't want to make
1465 * further execve() calls fail. */
1466 current
->flags
&= ~PF_NPROC_EXCEEDED
;
1468 retval
= unshare_files(&displaced
);
1473 bprm
= kzalloc(sizeof(*bprm
), GFP_KERNEL
);
1477 retval
= prepare_bprm_creds(bprm
);
1481 retval
= check_unsafe_exec(bprm
);
1484 clear_in_exec
= retval
;
1485 current
->in_execve
= 1;
1487 file
= open_exec(filename
);
1488 retval
= PTR_ERR(file
);
1495 bprm
->filename
= filename
;
1496 bprm
->interp
= filename
;
1498 retval
= bprm_mm_init(bprm
);
1502 bprm
->argc
= count(argv
, MAX_ARG_STRINGS
);
1503 if ((retval
= bprm
->argc
) < 0)
1506 bprm
->envc
= count(envp
, MAX_ARG_STRINGS
);
1507 if ((retval
= bprm
->envc
) < 0)
1510 retval
= prepare_binprm(bprm
);
1514 retval
= copy_strings_kernel(1, &bprm
->filename
, bprm
);
1518 bprm
->exec
= bprm
->p
;
1519 retval
= copy_strings(bprm
->envc
, envp
, bprm
);
1523 retval
= copy_strings(bprm
->argc
, argv
, bprm
);
1527 retval
= search_binary_handler(bprm
,regs
);
1531 /* execve succeeded */
1532 current
->fs
->in_exec
= 0;
1533 current
->in_execve
= 0;
1534 acct_update_integrals(current
);
1537 put_files_struct(displaced
);
1542 acct_arg_size(bprm
, 0);
1548 allow_write_access(bprm
->file
);
1554 current
->fs
->in_exec
= 0;
1555 current
->in_execve
= 0;
1562 reset_files_struct(displaced
);
1567 int do_execve(const char *filename
,
1568 const char __user
*const __user
*__argv
,
1569 const char __user
*const __user
*__envp
,
1570 struct pt_regs
*regs
)
1572 struct user_arg_ptr argv
= { .ptr
.native
= __argv
};
1573 struct user_arg_ptr envp
= { .ptr
.native
= __envp
};
1574 return do_execve_common(filename
, argv
, envp
, regs
);
1577 #ifdef CONFIG_COMPAT
1578 int compat_do_execve(const char *filename
,
1579 const compat_uptr_t __user
*__argv
,
1580 const compat_uptr_t __user
*__envp
,
1581 struct pt_regs
*regs
)
1583 struct user_arg_ptr argv
= {
1585 .ptr
.compat
= __argv
,
1587 struct user_arg_ptr envp
= {
1589 .ptr
.compat
= __envp
,
1591 return do_execve_common(filename
, argv
, envp
, regs
);
1595 void set_binfmt(struct linux_binfmt
*new)
1597 struct mm_struct
*mm
= current
->mm
;
1600 module_put(mm
->binfmt
->module
);
1604 __module_get(new->module
);
1607 EXPORT_SYMBOL(set_binfmt
);
1610 * set_dumpable converts traditional three-value dumpable to two flags and
1611 * stores them into mm->flags. It modifies lower two bits of mm->flags, but
1612 * these bits are not changed atomically. So get_dumpable can observe the
1613 * intermediate state. To avoid doing unexpected behavior, get get_dumpable
1614 * return either old dumpable or new one by paying attention to the order of
1615 * modifying the bits.
1617 * dumpable | mm->flags (binary)
1618 * old new | initial interim final
1619 * ---------+-----------------------
1627 * (*) get_dumpable regards interim value of 10 as 11.
1629 void set_dumpable(struct mm_struct
*mm
, int value
)
1632 case SUID_DUMPABLE_DISABLED
:
1633 clear_bit(MMF_DUMPABLE
, &mm
->flags
);
1635 clear_bit(MMF_DUMP_SECURELY
, &mm
->flags
);
1637 case SUID_DUMPABLE_ENABLED
:
1638 set_bit(MMF_DUMPABLE
, &mm
->flags
);
1640 clear_bit(MMF_DUMP_SECURELY
, &mm
->flags
);
1642 case SUID_DUMPABLE_SAFE
:
1643 set_bit(MMF_DUMP_SECURELY
, &mm
->flags
);
1645 set_bit(MMF_DUMPABLE
, &mm
->flags
);
1650 int __get_dumpable(unsigned long mm_flags
)
1654 ret
= mm_flags
& MMF_DUMPABLE_MASK
;
1655 return (ret
> SUID_DUMPABLE_ENABLED
) ? SUID_DUMPABLE_SAFE
: ret
;
1658 int get_dumpable(struct mm_struct
*mm
)
1660 return __get_dumpable(mm
->flags
);
1663 #ifdef __ARCH_WANT_SYS_EXECVE
1664 SYSCALL_DEFINE3(execve
,
1665 const char __user
*, filename
,
1666 const char __user
*const __user
*, argv
,
1667 const char __user
*const __user
*, envp
)
1669 struct filename
*path
= getname(filename
);
1670 int error
= PTR_ERR(path
);
1671 if (!IS_ERR(path
)) {
1672 error
= do_execve(path
->name
, argv
, envp
, current_pt_regs());
1677 #ifdef CONFIG_COMPAT
1678 asmlinkage
long compat_sys_execve(const char __user
* filename
,
1679 const compat_uptr_t __user
* argv
,
1680 const compat_uptr_t __user
* envp
)
1682 struct filename
*path
= getname(filename
);
1683 int error
= PTR_ERR(path
);
1684 if (!IS_ERR(path
)) {
1685 error
= compat_do_execve(path
->name
, argv
, envp
,
1694 #ifdef __ARCH_WANT_KERNEL_EXECVE
1695 int kernel_execve(const char *filename
,
1696 const char *const argv
[],
1697 const char *const envp
[])
1699 struct pt_regs
*p
= current_pt_regs();
1702 ret
= do_execve(filename
,
1703 (const char __user
*const __user
*)argv
,
1704 (const char __user
*const __user
*)envp
, p
);
1709 * We were successful. We won't be returning to our caller, but
1710 * instead to user space by manipulating the kernel stack.
1712 ret_from_kernel_execve(p
);