4 * Copyright (C) 1991, 1992 Linus Torvalds
8 * #!-checking implemented by tytso.
11 * Demand-loading implemented 01.12.91 - no need to read anything but
12 * the header into memory. The inode of the executable is put into
13 * "current->executable", and page faults do the actual loading. Clean.
15 * Once more I can proudly say that linux stood up to being changed: it
16 * was less than 2 hours work to get demand-loading completely implemented.
18 * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead,
19 * current->executable is only used by the procfs. This allows a dispatch
20 * table to check for several different types of binary formats. We keep
21 * trying until we recognize the file or we run out of supported binary
25 #include <linux/slab.h>
26 #include <linux/file.h>
27 #include <linux/fdtable.h>
29 #include <linux/stat.h>
30 #include <linux/fcntl.h>
31 #include <linux/swap.h>
32 #include <linux/string.h>
33 #include <linux/init.h>
34 #include <linux/pagemap.h>
35 #include <linux/perf_event.h>
36 #include <linux/highmem.h>
37 #include <linux/spinlock.h>
38 #include <linux/key.h>
39 #include <linux/personality.h>
40 #include <linux/binfmts.h>
41 #include <linux/utsname.h>
42 #include <linux/pid_namespace.h>
43 #include <linux/module.h>
44 #include <linux/namei.h>
45 #include <linux/mount.h>
46 #include <linux/security.h>
47 #include <linux/syscalls.h>
48 #include <linux/tsacct_kern.h>
49 #include <linux/cn_proc.h>
50 #include <linux/audit.h>
51 #include <linux/tracehook.h>
52 #include <linux/kmod.h>
53 #include <linux/fsnotify.h>
54 #include <linux/fs_struct.h>
55 #include <linux/pipe_fs_i.h>
56 #include <linux/oom.h>
57 #include <linux/compat.h>
59 #include <asm/uaccess.h>
60 #include <asm/mmu_context.h>
63 #include <trace/events/task.h>
67 #include <trace/events/sched.h>
69 int suid_dumpable
= 0;
71 static LIST_HEAD(formats
);
72 static DEFINE_RWLOCK(binfmt_lock
);
74 void __register_binfmt(struct linux_binfmt
* fmt
, int insert
)
77 write_lock(&binfmt_lock
);
78 insert
? list_add(&fmt
->lh
, &formats
) :
79 list_add_tail(&fmt
->lh
, &formats
);
80 write_unlock(&binfmt_lock
);
83 EXPORT_SYMBOL(__register_binfmt
);
85 void unregister_binfmt(struct linux_binfmt
* fmt
)
87 write_lock(&binfmt_lock
);
89 write_unlock(&binfmt_lock
);
92 EXPORT_SYMBOL(unregister_binfmt
);
94 static inline void put_binfmt(struct linux_binfmt
* fmt
)
96 module_put(fmt
->module
);
100 * Note that a shared library must be both readable and executable due to
103 * Also note that we take the address to load from from the file itself.
105 SYSCALL_DEFINE1(uselib
, const char __user
*, library
)
108 struct filename
*tmp
= getname(library
);
109 int error
= PTR_ERR(tmp
);
110 static const struct open_flags uselib_flags
= {
111 .open_flag
= O_LARGEFILE
| O_RDONLY
| __FMODE_EXEC
,
112 .acc_mode
= MAY_READ
| MAY_EXEC
| MAY_OPEN
,
113 .intent
= LOOKUP_OPEN
,
114 .lookup_flags
= LOOKUP_FOLLOW
,
120 file
= do_filp_open(AT_FDCWD
, tmp
, &uselib_flags
);
122 error
= PTR_ERR(file
);
127 if (!S_ISREG(file_inode(file
)->i_mode
))
131 if (file
->f_path
.mnt
->mnt_flags
& MNT_NOEXEC
)
138 struct linux_binfmt
* fmt
;
140 read_lock(&binfmt_lock
);
141 list_for_each_entry(fmt
, &formats
, lh
) {
142 if (!fmt
->load_shlib
)
144 if (!try_module_get(fmt
->module
))
146 read_unlock(&binfmt_lock
);
147 error
= fmt
->load_shlib(file
);
148 read_lock(&binfmt_lock
);
150 if (error
!= -ENOEXEC
)
153 read_unlock(&binfmt_lock
);
163 * The nascent bprm->mm is not visible until exec_mmap() but it can
164 * use a lot of memory, account these pages in current->mm temporary
165 * for oom_badness()->get_mm_rss(). Once exec succeeds or fails, we
166 * change the counter back via acct_arg_size(0).
168 static void acct_arg_size(struct linux_binprm
*bprm
, unsigned long pages
)
170 struct mm_struct
*mm
= current
->mm
;
171 long diff
= (long)(pages
- bprm
->vma_pages
);
176 bprm
->vma_pages
= pages
;
177 add_mm_counter(mm
, MM_ANONPAGES
, diff
);
180 static struct page
*get_arg_page(struct linux_binprm
*bprm
, unsigned long pos
,
186 #ifdef CONFIG_STACK_GROWSUP
188 ret
= expand_downwards(bprm
->vma
, pos
);
193 ret
= get_user_pages(current
, bprm
->mm
, pos
,
194 1, write
, 1, &page
, NULL
);
199 unsigned long size
= bprm
->vma
->vm_end
- bprm
->vma
->vm_start
;
202 acct_arg_size(bprm
, size
/ PAGE_SIZE
);
205 * We've historically supported up to 32 pages (ARG_MAX)
206 * of argument strings even with small stacks
212 * Limit to 1/4-th the stack size for the argv+env strings.
214 * - the remaining binfmt code will not run out of stack space,
215 * - the program will have a reasonable amount of stack left
218 rlim
= current
->signal
->rlim
;
219 if (size
> ACCESS_ONCE(rlim
[RLIMIT_STACK
].rlim_cur
) / 4) {
228 static void put_arg_page(struct page
*page
)
233 static void free_arg_page(struct linux_binprm
*bprm
, int i
)
237 static void free_arg_pages(struct linux_binprm
*bprm
)
241 static void flush_arg_page(struct linux_binprm
*bprm
, unsigned long pos
,
244 flush_cache_page(bprm
->vma
, pos
, page_to_pfn(page
));
247 static int __bprm_mm_init(struct linux_binprm
*bprm
)
250 struct vm_area_struct
*vma
= NULL
;
251 struct mm_struct
*mm
= bprm
->mm
;
253 bprm
->vma
= vma
= kmem_cache_zalloc(vm_area_cachep
, GFP_KERNEL
);
257 down_write(&mm
->mmap_sem
);
261 * Place the stack at the largest stack address the architecture
262 * supports. Later, we'll move this to an appropriate place. We don't
263 * use STACK_TOP because that can depend on attributes which aren't
266 BUILD_BUG_ON(VM_STACK_FLAGS
& VM_STACK_INCOMPLETE_SETUP
);
267 vma
->vm_end
= STACK_TOP_MAX
;
268 vma
->vm_start
= vma
->vm_end
- PAGE_SIZE
;
269 vma
->vm_flags
= VM_STACK_FLAGS
| VM_STACK_INCOMPLETE_SETUP
;
270 vma
->vm_page_prot
= vm_get_page_prot(vma
->vm_flags
);
271 INIT_LIST_HEAD(&vma
->anon_vma_chain
);
273 err
= insert_vm_struct(mm
, vma
);
277 mm
->stack_vm
= mm
->total_vm
= 1;
278 up_write(&mm
->mmap_sem
);
279 bprm
->p
= vma
->vm_end
- sizeof(void *);
282 up_write(&mm
->mmap_sem
);
284 kmem_cache_free(vm_area_cachep
, vma
);
288 static bool valid_arg_len(struct linux_binprm
*bprm
, long len
)
290 return len
<= MAX_ARG_STRLEN
;
295 static inline void acct_arg_size(struct linux_binprm
*bprm
, unsigned long pages
)
299 static struct page
*get_arg_page(struct linux_binprm
*bprm
, unsigned long pos
,
304 page
= bprm
->page
[pos
/ PAGE_SIZE
];
305 if (!page
&& write
) {
306 page
= alloc_page(GFP_HIGHUSER
|__GFP_ZERO
);
309 bprm
->page
[pos
/ PAGE_SIZE
] = page
;
315 static void put_arg_page(struct page
*page
)
319 static void free_arg_page(struct linux_binprm
*bprm
, int i
)
322 __free_page(bprm
->page
[i
]);
323 bprm
->page
[i
] = NULL
;
327 static void free_arg_pages(struct linux_binprm
*bprm
)
331 for (i
= 0; i
< MAX_ARG_PAGES
; i
++)
332 free_arg_page(bprm
, i
);
335 static void flush_arg_page(struct linux_binprm
*bprm
, unsigned long pos
,
340 static int __bprm_mm_init(struct linux_binprm
*bprm
)
342 bprm
->p
= PAGE_SIZE
* MAX_ARG_PAGES
- sizeof(void *);
346 static bool valid_arg_len(struct linux_binprm
*bprm
, long len
)
348 return len
<= bprm
->p
;
351 #endif /* CONFIG_MMU */
354 * Create a new mm_struct and populate it with a temporary stack
355 * vm_area_struct. We don't have enough context at this point to set the stack
356 * flags, permissions, and offset, so we use temporary values. We'll update
357 * them later in setup_arg_pages().
359 static int bprm_mm_init(struct linux_binprm
*bprm
)
362 struct mm_struct
*mm
= NULL
;
364 bprm
->mm
= mm
= mm_alloc();
369 err
= init_new_context(current
, mm
);
373 err
= __bprm_mm_init(bprm
);
388 struct user_arg_ptr
{
393 const char __user
*const __user
*native
;
395 const compat_uptr_t __user
*compat
;
400 static const char __user
*get_user_arg_ptr(struct user_arg_ptr argv
, int nr
)
402 const char __user
*native
;
405 if (unlikely(argv
.is_compat
)) {
406 compat_uptr_t compat
;
408 if (get_user(compat
, argv
.ptr
.compat
+ nr
))
409 return ERR_PTR(-EFAULT
);
411 return compat_ptr(compat
);
415 if (get_user(native
, argv
.ptr
.native
+ nr
))
416 return ERR_PTR(-EFAULT
);
422 * count() counts the number of strings in array ARGV.
424 static int count(struct user_arg_ptr argv
, int max
)
428 if (argv
.ptr
.native
!= NULL
) {
430 const char __user
*p
= get_user_arg_ptr(argv
, i
);
442 if (fatal_signal_pending(current
))
443 return -ERESTARTNOHAND
;
451 * 'copy_strings()' copies argument/environment strings from the old
452 * processes's memory to the new process's stack. The call to get_user_pages()
453 * ensures the destination page is created and not swapped out.
455 static int copy_strings(int argc
, struct user_arg_ptr argv
,
456 struct linux_binprm
*bprm
)
458 struct page
*kmapped_page
= NULL
;
460 unsigned long kpos
= 0;
464 const char __user
*str
;
469 str
= get_user_arg_ptr(argv
, argc
);
473 len
= strnlen_user(str
, MAX_ARG_STRLEN
);
478 if (!valid_arg_len(bprm
, len
))
481 /* We're going to work our way backwords. */
487 int offset
, bytes_to_copy
;
489 if (fatal_signal_pending(current
)) {
490 ret
= -ERESTARTNOHAND
;
495 offset
= pos
% PAGE_SIZE
;
499 bytes_to_copy
= offset
;
500 if (bytes_to_copy
> len
)
503 offset
-= bytes_to_copy
;
504 pos
-= bytes_to_copy
;
505 str
-= bytes_to_copy
;
506 len
-= bytes_to_copy
;
508 if (!kmapped_page
|| kpos
!= (pos
& PAGE_MASK
)) {
511 page
= get_arg_page(bprm
, pos
, 1);
518 flush_kernel_dcache_page(kmapped_page
);
519 kunmap(kmapped_page
);
520 put_arg_page(kmapped_page
);
523 kaddr
= kmap(kmapped_page
);
524 kpos
= pos
& PAGE_MASK
;
525 flush_arg_page(bprm
, kpos
, kmapped_page
);
527 if (copy_from_user(kaddr
+offset
, str
, bytes_to_copy
)) {
536 flush_kernel_dcache_page(kmapped_page
);
537 kunmap(kmapped_page
);
538 put_arg_page(kmapped_page
);
544 * Like copy_strings, but get argv and its values from kernel memory.
546 int copy_strings_kernel(int argc
, const char *const *__argv
,
547 struct linux_binprm
*bprm
)
550 mm_segment_t oldfs
= get_fs();
551 struct user_arg_ptr argv
= {
552 .ptr
.native
= (const char __user
*const __user
*)__argv
,
556 r
= copy_strings(argc
, argv
, bprm
);
561 EXPORT_SYMBOL(copy_strings_kernel
);
566 * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX. Once
567 * the binfmt code determines where the new stack should reside, we shift it to
568 * its final location. The process proceeds as follows:
570 * 1) Use shift to calculate the new vma endpoints.
571 * 2) Extend vma to cover both the old and new ranges. This ensures the
572 * arguments passed to subsequent functions are consistent.
573 * 3) Move vma's page tables to the new range.
574 * 4) Free up any cleared pgd range.
575 * 5) Shrink the vma to cover only the new range.
577 static int shift_arg_pages(struct vm_area_struct
*vma
, unsigned long shift
)
579 struct mm_struct
*mm
= vma
->vm_mm
;
580 unsigned long old_start
= vma
->vm_start
;
581 unsigned long old_end
= vma
->vm_end
;
582 unsigned long length
= old_end
- old_start
;
583 unsigned long new_start
= old_start
- shift
;
584 unsigned long new_end
= old_end
- shift
;
585 struct mmu_gather tlb
;
587 BUG_ON(new_start
> new_end
);
590 * ensure there are no vmas between where we want to go
593 if (vma
!= find_vma(mm
, new_start
))
597 * cover the whole range: [new_start, old_end)
599 if (vma_adjust(vma
, new_start
, old_end
, vma
->vm_pgoff
, NULL
))
603 * move the page tables downwards, on failure we rely on
604 * process cleanup to remove whatever mess we made.
606 if (length
!= move_page_tables(vma
, old_start
,
607 vma
, new_start
, length
, false))
611 tlb_gather_mmu(&tlb
, mm
, 0);
612 if (new_end
> old_start
) {
614 * when the old and new regions overlap clear from new_end.
616 free_pgd_range(&tlb
, new_end
, old_end
, new_end
,
617 vma
->vm_next
? vma
->vm_next
->vm_start
: USER_PGTABLES_CEILING
);
620 * otherwise, clean from old_start; this is done to not touch
621 * the address space in [new_end, old_start) some architectures
622 * have constraints on va-space that make this illegal (IA64) -
623 * for the others its just a little faster.
625 free_pgd_range(&tlb
, old_start
, old_end
, new_end
,
626 vma
->vm_next
? vma
->vm_next
->vm_start
: USER_PGTABLES_CEILING
);
628 tlb_finish_mmu(&tlb
, new_end
, old_end
);
631 * Shrink the vma to just the new range. Always succeeds.
633 vma_adjust(vma
, new_start
, new_end
, vma
->vm_pgoff
, NULL
);
639 * Finalizes the stack vm_area_struct. The flags and permissions are updated,
640 * the stack is optionally relocated, and some extra space is added.
642 int setup_arg_pages(struct linux_binprm
*bprm
,
643 unsigned long stack_top
,
644 int executable_stack
)
647 unsigned long stack_shift
;
648 struct mm_struct
*mm
= current
->mm
;
649 struct vm_area_struct
*vma
= bprm
->vma
;
650 struct vm_area_struct
*prev
= NULL
;
651 unsigned long vm_flags
;
652 unsigned long stack_base
;
653 unsigned long stack_size
;
654 unsigned long stack_expand
;
655 unsigned long rlim_stack
;
657 #ifdef CONFIG_STACK_GROWSUP
658 /* Limit stack size to 1GB */
659 stack_base
= rlimit_max(RLIMIT_STACK
);
660 if (stack_base
> (1 << 30))
661 stack_base
= 1 << 30;
663 /* Make sure we didn't let the argument array grow too large. */
664 if (vma
->vm_end
- vma
->vm_start
> stack_base
)
667 stack_base
= PAGE_ALIGN(stack_top
- stack_base
);
669 stack_shift
= vma
->vm_start
- stack_base
;
670 mm
->arg_start
= bprm
->p
- stack_shift
;
671 bprm
->p
= vma
->vm_end
- stack_shift
;
673 stack_top
= arch_align_stack(stack_top
);
674 stack_top
= PAGE_ALIGN(stack_top
);
676 if (unlikely(stack_top
< mmap_min_addr
) ||
677 unlikely(vma
->vm_end
- vma
->vm_start
>= stack_top
- mmap_min_addr
))
680 stack_shift
= vma
->vm_end
- stack_top
;
682 bprm
->p
-= stack_shift
;
683 mm
->arg_start
= bprm
->p
;
687 bprm
->loader
-= stack_shift
;
688 bprm
->exec
-= stack_shift
;
690 down_write(&mm
->mmap_sem
);
691 vm_flags
= VM_STACK_FLAGS
;
694 * Adjust stack execute permissions; explicitly enable for
695 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
696 * (arch default) otherwise.
698 if (unlikely(executable_stack
== EXSTACK_ENABLE_X
))
700 else if (executable_stack
== EXSTACK_DISABLE_X
)
701 vm_flags
&= ~VM_EXEC
;
702 vm_flags
|= mm
->def_flags
;
703 vm_flags
|= VM_STACK_INCOMPLETE_SETUP
;
705 ret
= mprotect_fixup(vma
, &prev
, vma
->vm_start
, vma
->vm_end
,
711 /* Move stack pages down in memory. */
713 ret
= shift_arg_pages(vma
, stack_shift
);
718 /* mprotect_fixup is overkill to remove the temporary stack flags */
719 vma
->vm_flags
&= ~VM_STACK_INCOMPLETE_SETUP
;
721 stack_expand
= 131072UL; /* randomly 32*4k (or 2*64k) pages */
722 stack_size
= vma
->vm_end
- vma
->vm_start
;
724 * Align this down to a page boundary as expand_stack
727 rlim_stack
= rlimit(RLIMIT_STACK
) & PAGE_MASK
;
728 #ifdef CONFIG_STACK_GROWSUP
729 if (stack_size
+ stack_expand
> rlim_stack
)
730 stack_base
= vma
->vm_start
+ rlim_stack
;
732 stack_base
= vma
->vm_end
+ stack_expand
;
734 if (stack_size
+ stack_expand
> rlim_stack
)
735 stack_base
= vma
->vm_end
- rlim_stack
;
737 stack_base
= vma
->vm_start
- stack_expand
;
739 current
->mm
->start_stack
= bprm
->p
;
740 ret
= expand_stack(vma
, stack_base
);
745 up_write(&mm
->mmap_sem
);
748 EXPORT_SYMBOL(setup_arg_pages
);
750 #endif /* CONFIG_MMU */
752 struct file
*open_exec(const char *name
)
756 struct filename tmp
= { .name
= name
};
757 static const struct open_flags open_exec_flags
= {
758 .open_flag
= O_LARGEFILE
| O_RDONLY
| __FMODE_EXEC
,
759 .acc_mode
= MAY_EXEC
| MAY_OPEN
,
760 .intent
= LOOKUP_OPEN
,
761 .lookup_flags
= LOOKUP_FOLLOW
,
764 file
= do_filp_open(AT_FDCWD
, &tmp
, &open_exec_flags
);
769 if (!S_ISREG(file_inode(file
)->i_mode
))
772 if (file
->f_path
.mnt
->mnt_flags
& MNT_NOEXEC
)
777 err
= deny_write_access(file
);
788 EXPORT_SYMBOL(open_exec
);
790 int kernel_read(struct file
*file
, loff_t offset
,
791 char *addr
, unsigned long count
)
799 /* The cast to a user pointer is valid due to the set_fs() */
800 result
= vfs_read(file
, (void __user
*)addr
, count
, &pos
);
805 EXPORT_SYMBOL(kernel_read
);
807 ssize_t
read_code(struct file
*file
, unsigned long addr
, loff_t pos
, size_t len
)
809 ssize_t res
= file
->f_op
->read(file
, (void __user
*)addr
, len
, &pos
);
811 flush_icache_range(addr
, addr
+ len
);
814 EXPORT_SYMBOL(read_code
);
816 static int exec_mmap(struct mm_struct
*mm
)
818 struct task_struct
*tsk
;
819 struct mm_struct
* old_mm
, *active_mm
;
821 /* Notify parent that we're no longer interested in the old VM */
823 old_mm
= current
->mm
;
824 mm_release(tsk
, old_mm
);
829 * Make sure that if there is a core dump in progress
830 * for the old mm, we get out and die instead of going
831 * through with the exec. We must hold mmap_sem around
832 * checking core_state and changing tsk->mm.
834 down_read(&old_mm
->mmap_sem
);
835 if (unlikely(old_mm
->core_state
)) {
836 up_read(&old_mm
->mmap_sem
);
841 active_mm
= tsk
->active_mm
;
844 activate_mm(active_mm
, mm
);
846 arch_pick_mmap_layout(mm
);
848 up_read(&old_mm
->mmap_sem
);
849 BUG_ON(active_mm
!= old_mm
);
850 setmax_mm_hiwater_rss(&tsk
->signal
->maxrss
, old_mm
);
851 mm_update_next_owner(old_mm
);
860 * This function makes sure the current process has its own signal table,
861 * so that flush_signal_handlers can later reset the handlers without
862 * disturbing other processes. (Other processes might share the signal
863 * table via the CLONE_SIGHAND option to clone().)
865 static int de_thread(struct task_struct
*tsk
)
867 struct signal_struct
*sig
= tsk
->signal
;
868 struct sighand_struct
*oldsighand
= tsk
->sighand
;
869 spinlock_t
*lock
= &oldsighand
->siglock
;
871 if (thread_group_empty(tsk
))
872 goto no_thread_group
;
875 * Kill all other threads in the thread group.
878 if (signal_group_exit(sig
)) {
880 * Another group action in progress, just
881 * return so that the signal is processed.
883 spin_unlock_irq(lock
);
887 sig
->group_exit_task
= tsk
;
888 sig
->notify_count
= zap_other_threads(tsk
);
889 if (!thread_group_leader(tsk
))
892 while (sig
->notify_count
) {
893 __set_current_state(TASK_KILLABLE
);
894 spin_unlock_irq(lock
);
896 if (unlikely(__fatal_signal_pending(tsk
)))
900 spin_unlock_irq(lock
);
903 * At this point all other threads have exited, all we have to
904 * do is to wait for the thread group leader to become inactive,
905 * and to assume its PID:
907 if (!thread_group_leader(tsk
)) {
908 struct task_struct
*leader
= tsk
->group_leader
;
910 sig
->notify_count
= -1; /* for exit_notify() */
912 threadgroup_change_begin(tsk
);
913 write_lock_irq(&tasklist_lock
);
914 if (likely(leader
->exit_state
))
916 __set_current_state(TASK_KILLABLE
);
917 write_unlock_irq(&tasklist_lock
);
918 threadgroup_change_end(tsk
);
920 if (unlikely(__fatal_signal_pending(tsk
)))
925 * The only record we have of the real-time age of a
926 * process, regardless of execs it's done, is start_time.
927 * All the past CPU time is accumulated in signal_struct
928 * from sister threads now dead. But in this non-leader
929 * exec, nothing survives from the original leader thread,
930 * whose birth marks the true age of this process now.
931 * When we take on its identity by switching to its PID, we
932 * also take its birthdate (always earlier than our own).
934 tsk
->start_time
= leader
->start_time
;
936 BUG_ON(!same_thread_group(leader
, tsk
));
937 BUG_ON(has_group_leader_pid(tsk
));
939 * An exec() starts a new thread group with the
940 * TGID of the previous thread group. Rehash the
941 * two threads with a switched PID, and release
942 * the former thread group leader:
945 /* Become a process group leader with the old leader's pid.
946 * The old leader becomes a thread of the this thread group.
947 * Note: The old leader also uses this pid until release_task
948 * is called. Odd but simple and correct.
950 detach_pid(tsk
, PIDTYPE_PID
);
951 tsk
->pid
= leader
->pid
;
952 attach_pid(tsk
, PIDTYPE_PID
, task_pid(leader
));
953 transfer_pid(leader
, tsk
, PIDTYPE_PGID
);
954 transfer_pid(leader
, tsk
, PIDTYPE_SID
);
956 list_replace_rcu(&leader
->tasks
, &tsk
->tasks
);
957 list_replace_init(&leader
->sibling
, &tsk
->sibling
);
959 tsk
->group_leader
= tsk
;
960 leader
->group_leader
= tsk
;
962 tsk
->exit_signal
= SIGCHLD
;
963 leader
->exit_signal
= -1;
965 BUG_ON(leader
->exit_state
!= EXIT_ZOMBIE
);
966 leader
->exit_state
= EXIT_DEAD
;
969 * We are going to release_task()->ptrace_unlink() silently,
970 * the tracer can sleep in do_wait(). EXIT_DEAD guarantees
971 * the tracer wont't block again waiting for this thread.
973 if (unlikely(leader
->ptrace
))
974 __wake_up_parent(leader
, leader
->parent
);
975 write_unlock_irq(&tasklist_lock
);
976 threadgroup_change_end(tsk
);
978 release_task(leader
);
981 sig
->group_exit_task
= NULL
;
982 sig
->notify_count
= 0;
985 /* we have changed execution domain */
986 tsk
->exit_signal
= SIGCHLD
;
989 flush_itimer_signals();
991 if (atomic_read(&oldsighand
->count
) != 1) {
992 struct sighand_struct
*newsighand
;
994 * This ->sighand is shared with the CLONE_SIGHAND
995 * but not CLONE_THREAD task, switch to the new one.
997 newsighand
= kmem_cache_alloc(sighand_cachep
, GFP_KERNEL
);
1001 atomic_set(&newsighand
->count
, 1);
1002 memcpy(newsighand
->action
, oldsighand
->action
,
1003 sizeof(newsighand
->action
));
1005 write_lock_irq(&tasklist_lock
);
1006 spin_lock(&oldsighand
->siglock
);
1007 rcu_assign_pointer(tsk
->sighand
, newsighand
);
1008 spin_unlock(&oldsighand
->siglock
);
1009 write_unlock_irq(&tasklist_lock
);
1011 __cleanup_sighand(oldsighand
);
1014 BUG_ON(!thread_group_leader(tsk
));
1018 /* protects against exit_notify() and __exit_signal() */
1019 read_lock(&tasklist_lock
);
1020 sig
->group_exit_task
= NULL
;
1021 sig
->notify_count
= 0;
1022 read_unlock(&tasklist_lock
);
1026 char *get_task_comm(char *buf
, struct task_struct
*tsk
)
1028 /* buf must be at least sizeof(tsk->comm) in size */
1030 strncpy(buf
, tsk
->comm
, sizeof(tsk
->comm
));
1034 EXPORT_SYMBOL_GPL(get_task_comm
);
1037 * These functions flushes out all traces of the currently running executable
1038 * so that a new one can be started
1041 void set_task_comm(struct task_struct
*tsk
, char *buf
)
1044 trace_task_rename(tsk
, buf
);
1045 strlcpy(tsk
->comm
, buf
, sizeof(tsk
->comm
));
1047 perf_event_comm(tsk
);
1050 static void filename_to_taskname(char *tcomm
, const char *fn
, unsigned int len
)
1054 /* Copies the binary name from after last slash */
1055 for (i
= 0; (ch
= *(fn
++)) != '\0';) {
1057 i
= 0; /* overwrite what we wrote */
1065 int flush_old_exec(struct linux_binprm
* bprm
)
1070 * Make sure we have a private signal table and that
1071 * we are unassociated from the previous thread group.
1073 retval
= de_thread(current
);
1077 set_mm_exe_file(bprm
->mm
, bprm
->file
);
1079 filename_to_taskname(bprm
->tcomm
, bprm
->filename
, sizeof(bprm
->tcomm
));
1081 * Release all of the old mmap stuff
1083 acct_arg_size(bprm
, 0);
1084 retval
= exec_mmap(bprm
->mm
);
1088 bprm
->mm
= NULL
; /* We're using it now */
1092 ~(PF_RANDOMIZE
| PF_FORKNOEXEC
| PF_KTHREAD
| PF_NOFREEZE
);
1094 current
->personality
&= ~bprm
->per_clear
;
1101 EXPORT_SYMBOL(flush_old_exec
);
1103 void would_dump(struct linux_binprm
*bprm
, struct file
*file
)
1105 if (inode_permission(file_inode(file
), MAY_READ
) < 0)
1106 bprm
->interp_flags
|= BINPRM_FLAGS_ENFORCE_NONDUMP
;
1108 EXPORT_SYMBOL(would_dump
);
1110 void setup_new_exec(struct linux_binprm
* bprm
)
1112 arch_pick_mmap_layout(current
->mm
);
1114 /* This is the point of no return */
1115 current
->sas_ss_sp
= current
->sas_ss_size
= 0;
1117 if (uid_eq(current_euid(), current_uid()) && gid_eq(current_egid(), current_gid()))
1118 set_dumpable(current
->mm
, SUID_DUMP_USER
);
1120 set_dumpable(current
->mm
, suid_dumpable
);
1122 set_task_comm(current
, bprm
->tcomm
);
1124 /* Set the new mm task size. We have to do that late because it may
1125 * depend on TIF_32BIT which is only updated in flush_thread() on
1126 * some architectures like powerpc
1128 current
->mm
->task_size
= TASK_SIZE
;
1130 /* install the new credentials */
1131 if (!uid_eq(bprm
->cred
->uid
, current_euid()) ||
1132 !gid_eq(bprm
->cred
->gid
, current_egid())) {
1133 current
->pdeath_signal
= 0;
1135 would_dump(bprm
, bprm
->file
);
1136 if (bprm
->interp_flags
& BINPRM_FLAGS_ENFORCE_NONDUMP
)
1137 set_dumpable(current
->mm
, suid_dumpable
);
1140 /* An exec changes our domain. We are no longer part of the thread
1143 current
->self_exec_id
++;
1145 flush_signal_handlers(current
, 0);
1146 do_close_on_exec(current
->files
);
1148 EXPORT_SYMBOL(setup_new_exec
);
1151 * Prepare credentials and lock ->cred_guard_mutex.
1152 * install_exec_creds() commits the new creds and drops the lock.
1153 * Or, if exec fails before, free_bprm() should release ->cred and
1156 int prepare_bprm_creds(struct linux_binprm
*bprm
)
1158 if (mutex_lock_interruptible(¤t
->signal
->cred_guard_mutex
))
1159 return -ERESTARTNOINTR
;
1161 bprm
->cred
= prepare_exec_creds();
1162 if (likely(bprm
->cred
))
1165 mutex_unlock(¤t
->signal
->cred_guard_mutex
);
1169 void free_bprm(struct linux_binprm
*bprm
)
1171 free_arg_pages(bprm
);
1173 mutex_unlock(¤t
->signal
->cred_guard_mutex
);
1174 abort_creds(bprm
->cred
);
1176 /* If a binfmt changed the interp, free it. */
1177 if (bprm
->interp
!= bprm
->filename
)
1178 kfree(bprm
->interp
);
1182 int bprm_change_interp(char *interp
, struct linux_binprm
*bprm
)
1184 /* If a binfmt changed the interp, free it first. */
1185 if (bprm
->interp
!= bprm
->filename
)
1186 kfree(bprm
->interp
);
1187 bprm
->interp
= kstrdup(interp
, GFP_KERNEL
);
1192 EXPORT_SYMBOL(bprm_change_interp
);
1195 * install the new credentials for this executable
1197 void install_exec_creds(struct linux_binprm
*bprm
)
1199 security_bprm_committing_creds(bprm
);
1201 commit_creds(bprm
->cred
);
1205 * Disable monitoring for regular users
1206 * when executing setuid binaries. Must
1207 * wait until new credentials are committed
1208 * by commit_creds() above
1210 if (get_dumpable(current
->mm
) != SUID_DUMP_USER
)
1211 perf_event_exit_task(current
);
1213 * cred_guard_mutex must be held at least to this point to prevent
1214 * ptrace_attach() from altering our determination of the task's
1215 * credentials; any time after this it may be unlocked.
1217 security_bprm_committed_creds(bprm
);
1218 mutex_unlock(¤t
->signal
->cred_guard_mutex
);
1220 EXPORT_SYMBOL(install_exec_creds
);
1223 * determine how safe it is to execute the proposed program
1224 * - the caller must hold ->cred_guard_mutex to protect against
1227 static int check_unsafe_exec(struct linux_binprm
*bprm
)
1229 struct task_struct
*p
= current
, *t
;
1234 if (p
->ptrace
& PT_PTRACE_CAP
)
1235 bprm
->unsafe
|= LSM_UNSAFE_PTRACE_CAP
;
1237 bprm
->unsafe
|= LSM_UNSAFE_PTRACE
;
1241 * This isn't strictly necessary, but it makes it harder for LSMs to
1244 if (current
->no_new_privs
)
1245 bprm
->unsafe
|= LSM_UNSAFE_NO_NEW_PRIVS
;
1248 spin_lock(&p
->fs
->lock
);
1250 for (t
= next_thread(p
); t
!= p
; t
= next_thread(t
)) {
1256 if (p
->fs
->users
> n_fs
) {
1257 bprm
->unsafe
|= LSM_UNSAFE_SHARE
;
1260 if (!p
->fs
->in_exec
) {
1265 spin_unlock(&p
->fs
->lock
);
1271 * Fill the binprm structure from the inode.
1272 * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
1274 * This may be called multiple times for binary chains (scripts for example).
1276 int prepare_binprm(struct linux_binprm
*bprm
)
1279 struct inode
* inode
= file_inode(bprm
->file
);
1282 mode
= inode
->i_mode
;
1283 if (bprm
->file
->f_op
== NULL
)
1286 /* clear any previous set[ug]id data from a previous binary */
1287 bprm
->cred
->euid
= current_euid();
1288 bprm
->cred
->egid
= current_egid();
1290 if (!(bprm
->file
->f_path
.mnt
->mnt_flags
& MNT_NOSUID
) &&
1291 !current
->no_new_privs
&&
1292 kuid_has_mapping(bprm
->cred
->user_ns
, inode
->i_uid
) &&
1293 kgid_has_mapping(bprm
->cred
->user_ns
, inode
->i_gid
)) {
1295 if (mode
& S_ISUID
) {
1296 bprm
->per_clear
|= PER_CLEAR_ON_SETID
;
1297 bprm
->cred
->euid
= inode
->i_uid
;
1302 * If setgid is set but no group execute bit then this
1303 * is a candidate for mandatory locking, not a setgid
1306 if ((mode
& (S_ISGID
| S_IXGRP
)) == (S_ISGID
| S_IXGRP
)) {
1307 bprm
->per_clear
|= PER_CLEAR_ON_SETID
;
1308 bprm
->cred
->egid
= inode
->i_gid
;
1312 /* fill in binprm security blob */
1313 retval
= security_bprm_set_creds(bprm
);
1316 bprm
->cred_prepared
= 1;
1318 memset(bprm
->buf
, 0, BINPRM_BUF_SIZE
);
1319 return kernel_read(bprm
->file
, 0, bprm
->buf
, BINPRM_BUF_SIZE
);
1322 EXPORT_SYMBOL(prepare_binprm
);
1325 * Arguments are '\0' separated strings found at the location bprm->p
1326 * points to; chop off the first by relocating brpm->p to right after
1327 * the first '\0' encountered.
1329 int remove_arg_zero(struct linux_binprm
*bprm
)
1332 unsigned long offset
;
1340 offset
= bprm
->p
& ~PAGE_MASK
;
1341 page
= get_arg_page(bprm
, bprm
->p
, 0);
1346 kaddr
= kmap_atomic(page
);
1348 for (; offset
< PAGE_SIZE
&& kaddr
[offset
];
1349 offset
++, bprm
->p
++)
1352 kunmap_atomic(kaddr
);
1355 if (offset
== PAGE_SIZE
)
1356 free_arg_page(bprm
, (bprm
->p
>> PAGE_SHIFT
) - 1);
1357 } while (offset
== PAGE_SIZE
);
1366 EXPORT_SYMBOL(remove_arg_zero
);
1369 * cycle the list of binary formats handler, until one recognizes the image
1371 int search_binary_handler(struct linux_binprm
*bprm
)
1373 unsigned int depth
= bprm
->recursion_depth
;
1375 struct linux_binfmt
*fmt
;
1376 pid_t old_pid
, old_vpid
;
1378 /* This allows 4 levels of binfmt rewrites before failing hard. */
1382 retval
= security_bprm_check(bprm
);
1386 retval
= audit_bprm(bprm
);
1390 /* Need to fetch pid before load_binary changes it */
1391 old_pid
= current
->pid
;
1393 old_vpid
= task_pid_nr_ns(current
, task_active_pid_ns(current
->parent
));
1397 for (try=0; try<2; try++) {
1398 read_lock(&binfmt_lock
);
1399 list_for_each_entry(fmt
, &formats
, lh
) {
1400 int (*fn
)(struct linux_binprm
*) = fmt
->load_binary
;
1403 if (!try_module_get(fmt
->module
))
1405 read_unlock(&binfmt_lock
);
1406 bprm
->recursion_depth
= depth
+ 1;
1408 bprm
->recursion_depth
= depth
;
1411 trace_sched_process_exec(current
, old_pid
, bprm
);
1412 ptrace_event(PTRACE_EVENT_EXEC
, old_vpid
);
1415 allow_write_access(bprm
->file
);
1419 current
->did_exec
= 1;
1420 proc_exec_connector(current
);
1423 read_lock(&binfmt_lock
);
1425 if (retval
!= -ENOEXEC
|| bprm
->mm
== NULL
)
1428 read_unlock(&binfmt_lock
);
1432 read_unlock(&binfmt_lock
);
1433 #ifdef CONFIG_MODULES
1434 if (retval
!= -ENOEXEC
|| bprm
->mm
== NULL
) {
1437 #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1438 if (printable(bprm
->buf
[0]) &&
1439 printable(bprm
->buf
[1]) &&
1440 printable(bprm
->buf
[2]) &&
1441 printable(bprm
->buf
[3]))
1442 break; /* -ENOEXEC */
1444 break; /* -ENOEXEC */
1445 request_module("binfmt-%04x", *(unsigned short *)(&bprm
->buf
[2]));
1454 EXPORT_SYMBOL(search_binary_handler
);
1457 * sys_execve() executes a new program.
1459 static int do_execve_common(const char *filename
,
1460 struct user_arg_ptr argv
,
1461 struct user_arg_ptr envp
)
1463 struct linux_binprm
*bprm
;
1465 struct files_struct
*displaced
;
1468 const struct cred
*cred
= current_cred();
1471 * We move the actual failure in case of RLIMIT_NPROC excess from
1472 * set*uid() to execve() because too many poorly written programs
1473 * don't check setuid() return code. Here we additionally recheck
1474 * whether NPROC limit is still exceeded.
1476 if ((current
->flags
& PF_NPROC_EXCEEDED
) &&
1477 atomic_read(&cred
->user
->processes
) > rlimit(RLIMIT_NPROC
)) {
1482 /* We're below the limit (still or again), so we don't want to make
1483 * further execve() calls fail. */
1484 current
->flags
&= ~PF_NPROC_EXCEEDED
;
1486 retval
= unshare_files(&displaced
);
1491 bprm
= kzalloc(sizeof(*bprm
), GFP_KERNEL
);
1495 retval
= prepare_bprm_creds(bprm
);
1499 retval
= check_unsafe_exec(bprm
);
1502 clear_in_exec
= retval
;
1503 current
->in_execve
= 1;
1505 file
= open_exec(filename
);
1506 retval
= PTR_ERR(file
);
1513 bprm
->filename
= filename
;
1514 bprm
->interp
= filename
;
1516 retval
= bprm_mm_init(bprm
);
1520 bprm
->argc
= count(argv
, MAX_ARG_STRINGS
);
1521 if ((retval
= bprm
->argc
) < 0)
1524 bprm
->envc
= count(envp
, MAX_ARG_STRINGS
);
1525 if ((retval
= bprm
->envc
) < 0)
1528 retval
= prepare_binprm(bprm
);
1532 retval
= copy_strings_kernel(1, &bprm
->filename
, bprm
);
1536 bprm
->exec
= bprm
->p
;
1537 retval
= copy_strings(bprm
->envc
, envp
, bprm
);
1541 retval
= copy_strings(bprm
->argc
, argv
, bprm
);
1545 retval
= search_binary_handler(bprm
);
1549 /* execve succeeded */
1550 current
->fs
->in_exec
= 0;
1551 current
->in_execve
= 0;
1552 acct_update_integrals(current
);
1555 put_files_struct(displaced
);
1560 acct_arg_size(bprm
, 0);
1566 allow_write_access(bprm
->file
);
1572 current
->fs
->in_exec
= 0;
1573 current
->in_execve
= 0;
1580 reset_files_struct(displaced
);
1585 int do_execve(const char *filename
,
1586 const char __user
*const __user
*__argv
,
1587 const char __user
*const __user
*__envp
)
1589 struct user_arg_ptr argv
= { .ptr
.native
= __argv
};
1590 struct user_arg_ptr envp
= { .ptr
.native
= __envp
};
1591 return do_execve_common(filename
, argv
, envp
);
1594 #ifdef CONFIG_COMPAT
1595 static int compat_do_execve(const char *filename
,
1596 const compat_uptr_t __user
*__argv
,
1597 const compat_uptr_t __user
*__envp
)
1599 struct user_arg_ptr argv
= {
1601 .ptr
.compat
= __argv
,
1603 struct user_arg_ptr envp
= {
1605 .ptr
.compat
= __envp
,
1607 return do_execve_common(filename
, argv
, envp
);
1611 void set_binfmt(struct linux_binfmt
*new)
1613 struct mm_struct
*mm
= current
->mm
;
1616 module_put(mm
->binfmt
->module
);
1620 __module_get(new->module
);
1623 EXPORT_SYMBOL(set_binfmt
);
1626 * set_dumpable converts traditional three-value dumpable to two flags and
1627 * stores them into mm->flags. It modifies lower two bits of mm->flags, but
1628 * these bits are not changed atomically. So get_dumpable can observe the
1629 * intermediate state. To avoid doing unexpected behavior, get get_dumpable
1630 * return either old dumpable or new one by paying attention to the order of
1631 * modifying the bits.
1633 * dumpable | mm->flags (binary)
1634 * old new | initial interim final
1635 * ---------+-----------------------
1643 * (*) get_dumpable regards interim value of 10 as 11.
1645 void set_dumpable(struct mm_struct
*mm
, int value
)
1648 case SUID_DUMP_DISABLE
:
1649 clear_bit(MMF_DUMPABLE
, &mm
->flags
);
1651 clear_bit(MMF_DUMP_SECURELY
, &mm
->flags
);
1653 case SUID_DUMP_USER
:
1654 set_bit(MMF_DUMPABLE
, &mm
->flags
);
1656 clear_bit(MMF_DUMP_SECURELY
, &mm
->flags
);
1658 case SUID_DUMP_ROOT
:
1659 set_bit(MMF_DUMP_SECURELY
, &mm
->flags
);
1661 set_bit(MMF_DUMPABLE
, &mm
->flags
);
1666 int __get_dumpable(unsigned long mm_flags
)
1670 ret
= mm_flags
& MMF_DUMPABLE_MASK
;
1671 return (ret
> SUID_DUMP_USER
) ? SUID_DUMP_ROOT
: ret
;
1674 int get_dumpable(struct mm_struct
*mm
)
1676 return __get_dumpable(mm
->flags
);
1679 SYSCALL_DEFINE3(execve
,
1680 const char __user
*, filename
,
1681 const char __user
*const __user
*, argv
,
1682 const char __user
*const __user
*, envp
)
1684 struct filename
*path
= getname(filename
);
1685 int error
= PTR_ERR(path
);
1686 if (!IS_ERR(path
)) {
1687 error
= do_execve(path
->name
, argv
, envp
);
1692 #ifdef CONFIG_COMPAT
1693 asmlinkage
long compat_sys_execve(const char __user
* filename
,
1694 const compat_uptr_t __user
* argv
,
1695 const compat_uptr_t __user
* envp
)
1697 struct filename
*path
= getname(filename
);
1698 int error
= PTR_ERR(path
);
1699 if (!IS_ERR(path
)) {
1700 error
= compat_do_execve(path
->name
, argv
, envp
);