4 * Copyright (C) 1991, 1992 Linus Torvalds
8 * #!-checking implemented by tytso.
11 * Demand-loading implemented 01.12.91 - no need to read anything but
12 * the header into memory. The inode of the executable is put into
13 * "current->executable", and page faults do the actual loading. Clean.
15 * Once more I can proudly say that linux stood up to being changed: it
16 * was less than 2 hours work to get demand-loading completely implemented.
18 * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead,
19 * current->executable is only used by the procfs. This allows a dispatch
20 * table to check for several different types of binary formats. We keep
21 * trying until we recognize the file or we run out of supported binary
25 #include <linux/slab.h>
26 #include <linux/file.h>
27 #include <linux/fdtable.h>
29 #include <linux/stat.h>
30 #include <linux/fcntl.h>
31 #include <linux/swap.h>
32 #include <linux/string.h>
33 #include <linux/init.h>
34 #include <linux/pagemap.h>
35 #include <linux/perf_event.h>
36 #include <linux/highmem.h>
37 #include <linux/spinlock.h>
38 #include <linux/key.h>
39 #include <linux/personality.h>
40 #include <linux/binfmts.h>
41 #include <linux/utsname.h>
42 #include <linux/pid_namespace.h>
43 #include <linux/module.h>
44 #include <linux/namei.h>
45 #include <linux/mount.h>
46 #include <linux/security.h>
47 #include <linux/syscalls.h>
48 #include <linux/tsacct_kern.h>
49 #include <linux/cn_proc.h>
50 #include <linux/audit.h>
51 #include <linux/tracehook.h>
52 #include <linux/kmod.h>
53 #include <linux/fsnotify.h>
54 #include <linux/fs_struct.h>
55 #include <linux/pipe_fs_i.h>
56 #include <linux/oom.h>
57 #include <linux/compat.h>
59 #include <asm/uaccess.h>
60 #include <asm/mmu_context.h>
65 char core_pattern
[CORENAME_MAX_SIZE
] = "core";
66 unsigned int core_pipe_limit
;
67 int suid_dumpable
= 0;
73 static atomic_t call_count
= ATOMIC_INIT(1);
75 /* The maximal length of core_pattern is also specified in sysctl.c */
77 static LIST_HEAD(formats
);
78 static DEFINE_RWLOCK(binfmt_lock
);
80 int __register_binfmt(struct linux_binfmt
* fmt
, int insert
)
84 write_lock(&binfmt_lock
);
85 insert
? list_add(&fmt
->lh
, &formats
) :
86 list_add_tail(&fmt
->lh
, &formats
);
87 write_unlock(&binfmt_lock
);
91 EXPORT_SYMBOL(__register_binfmt
);
93 void unregister_binfmt(struct linux_binfmt
* fmt
)
95 write_lock(&binfmt_lock
);
97 write_unlock(&binfmt_lock
);
100 EXPORT_SYMBOL(unregister_binfmt
);
102 static inline void put_binfmt(struct linux_binfmt
* fmt
)
104 module_put(fmt
->module
);
108 * Note that a shared library must be both readable and executable due to
111 * Also note that we take the address to load from from the file itself.
113 SYSCALL_DEFINE1(uselib
, const char __user
*, library
)
116 char *tmp
= getname(library
);
117 int error
= PTR_ERR(tmp
);
118 static const struct open_flags uselib_flags
= {
119 .open_flag
= O_LARGEFILE
| O_RDONLY
| __FMODE_EXEC
,
120 .acc_mode
= MAY_READ
| MAY_EXEC
| MAY_OPEN
,
121 .intent
= LOOKUP_OPEN
127 file
= do_filp_open(AT_FDCWD
, tmp
, &uselib_flags
, LOOKUP_FOLLOW
);
129 error
= PTR_ERR(file
);
134 if (!S_ISREG(file
->f_path
.dentry
->d_inode
->i_mode
))
138 if (file
->f_path
.mnt
->mnt_flags
& MNT_NOEXEC
)
145 struct linux_binfmt
* fmt
;
147 read_lock(&binfmt_lock
);
148 list_for_each_entry(fmt
, &formats
, lh
) {
149 if (!fmt
->load_shlib
)
151 if (!try_module_get(fmt
->module
))
153 read_unlock(&binfmt_lock
);
154 error
= fmt
->load_shlib(file
);
155 read_lock(&binfmt_lock
);
157 if (error
!= -ENOEXEC
)
160 read_unlock(&binfmt_lock
);
170 * The nascent bprm->mm is not visible until exec_mmap() but it can
171 * use a lot of memory, account these pages in current->mm temporary
172 * for oom_badness()->get_mm_rss(). Once exec succeeds or fails, we
173 * change the counter back via acct_arg_size(0).
175 static void acct_arg_size(struct linux_binprm
*bprm
, unsigned long pages
)
177 struct mm_struct
*mm
= current
->mm
;
178 long diff
= (long)(pages
- bprm
->vma_pages
);
183 bprm
->vma_pages
= pages
;
185 #ifdef SPLIT_RSS_COUNTING
186 add_mm_counter(mm
, MM_ANONPAGES
, diff
);
188 spin_lock(&mm
->page_table_lock
);
189 add_mm_counter(mm
, MM_ANONPAGES
, diff
);
190 spin_unlock(&mm
->page_table_lock
);
194 static struct page
*get_arg_page(struct linux_binprm
*bprm
, unsigned long pos
,
200 #ifdef CONFIG_STACK_GROWSUP
202 ret
= expand_downwards(bprm
->vma
, pos
);
207 ret
= get_user_pages(current
, bprm
->mm
, pos
,
208 1, write
, 1, &page
, NULL
);
213 unsigned long size
= bprm
->vma
->vm_end
- bprm
->vma
->vm_start
;
216 acct_arg_size(bprm
, size
/ PAGE_SIZE
);
219 * We've historically supported up to 32 pages (ARG_MAX)
220 * of argument strings even with small stacks
226 * Limit to 1/4-th the stack size for the argv+env strings.
228 * - the remaining binfmt code will not run out of stack space,
229 * - the program will have a reasonable amount of stack left
232 rlim
= current
->signal
->rlim
;
233 if (size
> ACCESS_ONCE(rlim
[RLIMIT_STACK
].rlim_cur
) / 4) {
242 static void put_arg_page(struct page
*page
)
247 static void free_arg_page(struct linux_binprm
*bprm
, int i
)
251 static void free_arg_pages(struct linux_binprm
*bprm
)
255 static void flush_arg_page(struct linux_binprm
*bprm
, unsigned long pos
,
258 flush_cache_page(bprm
->vma
, pos
, page_to_pfn(page
));
261 static int __bprm_mm_init(struct linux_binprm
*bprm
)
264 struct vm_area_struct
*vma
= NULL
;
265 struct mm_struct
*mm
= bprm
->mm
;
267 bprm
->vma
= vma
= kmem_cache_zalloc(vm_area_cachep
, GFP_KERNEL
);
271 down_write(&mm
->mmap_sem
);
275 * Place the stack at the largest stack address the architecture
276 * supports. Later, we'll move this to an appropriate place. We don't
277 * use STACK_TOP because that can depend on attributes which aren't
280 BUG_ON(VM_STACK_FLAGS
& VM_STACK_INCOMPLETE_SETUP
);
281 vma
->vm_end
= STACK_TOP_MAX
;
282 vma
->vm_start
= vma
->vm_end
- PAGE_SIZE
;
283 vma
->vm_flags
= VM_STACK_FLAGS
| VM_STACK_INCOMPLETE_SETUP
;
284 vma
->vm_page_prot
= vm_get_page_prot(vma
->vm_flags
);
285 INIT_LIST_HEAD(&vma
->anon_vma_chain
);
287 err
= security_file_mmap(NULL
, 0, 0, 0, vma
->vm_start
, 1);
291 err
= insert_vm_struct(mm
, vma
);
295 mm
->stack_vm
= mm
->total_vm
= 1;
296 up_write(&mm
->mmap_sem
);
297 bprm
->p
= vma
->vm_end
- sizeof(void *);
300 up_write(&mm
->mmap_sem
);
302 kmem_cache_free(vm_area_cachep
, vma
);
306 static bool valid_arg_len(struct linux_binprm
*bprm
, long len
)
308 return len
<= MAX_ARG_STRLEN
;
313 static inline void acct_arg_size(struct linux_binprm
*bprm
, unsigned long pages
)
317 static struct page
*get_arg_page(struct linux_binprm
*bprm
, unsigned long pos
,
322 page
= bprm
->page
[pos
/ PAGE_SIZE
];
323 if (!page
&& write
) {
324 page
= alloc_page(GFP_HIGHUSER
|__GFP_ZERO
);
327 bprm
->page
[pos
/ PAGE_SIZE
] = page
;
333 static void put_arg_page(struct page
*page
)
337 static void free_arg_page(struct linux_binprm
*bprm
, int i
)
340 __free_page(bprm
->page
[i
]);
341 bprm
->page
[i
] = NULL
;
345 static void free_arg_pages(struct linux_binprm
*bprm
)
349 for (i
= 0; i
< MAX_ARG_PAGES
; i
++)
350 free_arg_page(bprm
, i
);
353 static void flush_arg_page(struct linux_binprm
*bprm
, unsigned long pos
,
358 static int __bprm_mm_init(struct linux_binprm
*bprm
)
360 bprm
->p
= PAGE_SIZE
* MAX_ARG_PAGES
- sizeof(void *);
364 static bool valid_arg_len(struct linux_binprm
*bprm
, long len
)
366 return len
<= bprm
->p
;
369 #endif /* CONFIG_MMU */
372 * Create a new mm_struct and populate it with a temporary stack
373 * vm_area_struct. We don't have enough context at this point to set the stack
374 * flags, permissions, and offset, so we use temporary values. We'll update
375 * them later in setup_arg_pages().
377 int bprm_mm_init(struct linux_binprm
*bprm
)
380 struct mm_struct
*mm
= NULL
;
382 bprm
->mm
= mm
= mm_alloc();
387 err
= init_new_context(current
, mm
);
391 err
= __bprm_mm_init(bprm
);
406 struct user_arg_ptr
{
411 const char __user
*const __user
*native
;
413 compat_uptr_t __user
*compat
;
418 static const char __user
*get_user_arg_ptr(struct user_arg_ptr argv
, int nr
)
420 const char __user
*native
;
423 if (unlikely(argv
.is_compat
)) {
424 compat_uptr_t compat
;
426 if (get_user(compat
, argv
.ptr
.compat
+ nr
))
427 return ERR_PTR(-EFAULT
);
429 return compat_ptr(compat
);
433 if (get_user(native
, argv
.ptr
.native
+ nr
))
434 return ERR_PTR(-EFAULT
);
440 * count() counts the number of strings in array ARGV.
442 static int count(struct user_arg_ptr argv
, int max
)
446 if (argv
.ptr
.native
!= NULL
) {
448 const char __user
*p
= get_user_arg_ptr(argv
, i
);
459 if (fatal_signal_pending(current
))
460 return -ERESTARTNOHAND
;
468 * 'copy_strings()' copies argument/environment strings from the old
469 * processes's memory to the new process's stack. The call to get_user_pages()
470 * ensures the destination page is created and not swapped out.
472 static int copy_strings(int argc
, struct user_arg_ptr argv
,
473 struct linux_binprm
*bprm
)
475 struct page
*kmapped_page
= NULL
;
477 unsigned long kpos
= 0;
481 const char __user
*str
;
486 str
= get_user_arg_ptr(argv
, argc
);
490 len
= strnlen_user(str
, MAX_ARG_STRLEN
);
495 if (!valid_arg_len(bprm
, len
))
498 /* We're going to work our way backwords. */
504 int offset
, bytes_to_copy
;
506 if (fatal_signal_pending(current
)) {
507 ret
= -ERESTARTNOHAND
;
512 offset
= pos
% PAGE_SIZE
;
516 bytes_to_copy
= offset
;
517 if (bytes_to_copy
> len
)
520 offset
-= bytes_to_copy
;
521 pos
-= bytes_to_copy
;
522 str
-= bytes_to_copy
;
523 len
-= bytes_to_copy
;
525 if (!kmapped_page
|| kpos
!= (pos
& PAGE_MASK
)) {
528 page
= get_arg_page(bprm
, pos
, 1);
535 flush_kernel_dcache_page(kmapped_page
);
536 kunmap(kmapped_page
);
537 put_arg_page(kmapped_page
);
540 kaddr
= kmap(kmapped_page
);
541 kpos
= pos
& PAGE_MASK
;
542 flush_arg_page(bprm
, kpos
, kmapped_page
);
544 if (copy_from_user(kaddr
+offset
, str
, bytes_to_copy
)) {
553 flush_kernel_dcache_page(kmapped_page
);
554 kunmap(kmapped_page
);
555 put_arg_page(kmapped_page
);
561 * Like copy_strings, but get argv and its values from kernel memory.
563 int copy_strings_kernel(int argc
, const char *const *__argv
,
564 struct linux_binprm
*bprm
)
567 mm_segment_t oldfs
= get_fs();
568 struct user_arg_ptr argv
= {
569 .ptr
.native
= (const char __user
*const __user
*)__argv
,
573 r
= copy_strings(argc
, argv
, bprm
);
578 EXPORT_SYMBOL(copy_strings_kernel
);
583 * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX. Once
584 * the binfmt code determines where the new stack should reside, we shift it to
585 * its final location. The process proceeds as follows:
587 * 1) Use shift to calculate the new vma endpoints.
588 * 2) Extend vma to cover both the old and new ranges. This ensures the
589 * arguments passed to subsequent functions are consistent.
590 * 3) Move vma's page tables to the new range.
591 * 4) Free up any cleared pgd range.
592 * 5) Shrink the vma to cover only the new range.
594 static int shift_arg_pages(struct vm_area_struct
*vma
, unsigned long shift
)
596 struct mm_struct
*mm
= vma
->vm_mm
;
597 unsigned long old_start
= vma
->vm_start
;
598 unsigned long old_end
= vma
->vm_end
;
599 unsigned long length
= old_end
- old_start
;
600 unsigned long new_start
= old_start
- shift
;
601 unsigned long new_end
= old_end
- shift
;
602 struct mmu_gather tlb
;
604 BUG_ON(new_start
> new_end
);
607 * ensure there are no vmas between where we want to go
610 if (vma
!= find_vma(mm
, new_start
))
614 * cover the whole range: [new_start, old_end)
616 if (vma_adjust(vma
, new_start
, old_end
, vma
->vm_pgoff
, NULL
))
620 * move the page tables downwards, on failure we rely on
621 * process cleanup to remove whatever mess we made.
623 if (length
!= move_page_tables(vma
, old_start
,
624 vma
, new_start
, length
))
628 tlb_gather_mmu(&tlb
, mm
, 0);
629 if (new_end
> old_start
) {
631 * when the old and new regions overlap clear from new_end.
633 free_pgd_range(&tlb
, new_end
, old_end
, new_end
,
634 vma
->vm_next
? vma
->vm_next
->vm_start
: 0);
637 * otherwise, clean from old_start; this is done to not touch
638 * the address space in [new_end, old_start) some architectures
639 * have constraints on va-space that make this illegal (IA64) -
640 * for the others its just a little faster.
642 free_pgd_range(&tlb
, old_start
, old_end
, new_end
,
643 vma
->vm_next
? vma
->vm_next
->vm_start
: 0);
645 tlb_finish_mmu(&tlb
, new_end
, old_end
);
648 * Shrink the vma to just the new range. Always succeeds.
650 vma_adjust(vma
, new_start
, new_end
, vma
->vm_pgoff
, NULL
);
656 * Finalizes the stack vm_area_struct. The flags and permissions are updated,
657 * the stack is optionally relocated, and some extra space is added.
659 int setup_arg_pages(struct linux_binprm
*bprm
,
660 unsigned long stack_top
,
661 int executable_stack
)
664 unsigned long stack_shift
;
665 struct mm_struct
*mm
= current
->mm
;
666 struct vm_area_struct
*vma
= bprm
->vma
;
667 struct vm_area_struct
*prev
= NULL
;
668 unsigned long vm_flags
;
669 unsigned long stack_base
;
670 unsigned long stack_size
;
671 unsigned long stack_expand
;
672 unsigned long rlim_stack
;
674 #ifdef CONFIG_STACK_GROWSUP
675 /* Limit stack size to 1GB */
676 stack_base
= rlimit_max(RLIMIT_STACK
);
677 if (stack_base
> (1 << 30))
678 stack_base
= 1 << 30;
680 /* Make sure we didn't let the argument array grow too large. */
681 if (vma
->vm_end
- vma
->vm_start
> stack_base
)
684 stack_base
= PAGE_ALIGN(stack_top
- stack_base
);
686 stack_shift
= vma
->vm_start
- stack_base
;
687 mm
->arg_start
= bprm
->p
- stack_shift
;
688 bprm
->p
= vma
->vm_end
- stack_shift
;
690 stack_top
= arch_align_stack(stack_top
);
691 stack_top
= PAGE_ALIGN(stack_top
);
693 if (unlikely(stack_top
< mmap_min_addr
) ||
694 unlikely(vma
->vm_end
- vma
->vm_start
>= stack_top
- mmap_min_addr
))
697 stack_shift
= vma
->vm_end
- stack_top
;
699 bprm
->p
-= stack_shift
;
700 mm
->arg_start
= bprm
->p
;
704 bprm
->loader
-= stack_shift
;
705 bprm
->exec
-= stack_shift
;
707 down_write(&mm
->mmap_sem
);
708 vm_flags
= VM_STACK_FLAGS
;
711 * Adjust stack execute permissions; explicitly enable for
712 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
713 * (arch default) otherwise.
715 if (unlikely(executable_stack
== EXSTACK_ENABLE_X
))
717 else if (executable_stack
== EXSTACK_DISABLE_X
)
718 vm_flags
&= ~VM_EXEC
;
719 vm_flags
|= mm
->def_flags
;
720 vm_flags
|= VM_STACK_INCOMPLETE_SETUP
;
722 ret
= mprotect_fixup(vma
, &prev
, vma
->vm_start
, vma
->vm_end
,
728 /* Move stack pages down in memory. */
730 ret
= shift_arg_pages(vma
, stack_shift
);
735 /* mprotect_fixup is overkill to remove the temporary stack flags */
736 vma
->vm_flags
&= ~VM_STACK_INCOMPLETE_SETUP
;
738 stack_expand
= 131072UL; /* randomly 32*4k (or 2*64k) pages */
739 stack_size
= vma
->vm_end
- vma
->vm_start
;
741 * Align this down to a page boundary as expand_stack
744 rlim_stack
= rlimit(RLIMIT_STACK
) & PAGE_MASK
;
745 #ifdef CONFIG_STACK_GROWSUP
746 if (stack_size
+ stack_expand
> rlim_stack
)
747 stack_base
= vma
->vm_start
+ rlim_stack
;
749 stack_base
= vma
->vm_end
+ stack_expand
;
751 if (stack_size
+ stack_expand
> rlim_stack
)
752 stack_base
= vma
->vm_end
- rlim_stack
;
754 stack_base
= vma
->vm_start
- stack_expand
;
756 current
->mm
->start_stack
= bprm
->p
;
757 ret
= expand_stack(vma
, stack_base
);
762 up_write(&mm
->mmap_sem
);
765 EXPORT_SYMBOL(setup_arg_pages
);
767 #endif /* CONFIG_MMU */
769 struct file
*open_exec(const char *name
)
773 static const struct open_flags open_exec_flags
= {
774 .open_flag
= O_LARGEFILE
| O_RDONLY
| __FMODE_EXEC
,
775 .acc_mode
= MAY_EXEC
| MAY_OPEN
,
776 .intent
= LOOKUP_OPEN
779 file
= do_filp_open(AT_FDCWD
, name
, &open_exec_flags
, LOOKUP_FOLLOW
);
784 if (!S_ISREG(file
->f_path
.dentry
->d_inode
->i_mode
))
787 if (file
->f_path
.mnt
->mnt_flags
& MNT_NOEXEC
)
792 err
= deny_write_access(file
);
803 EXPORT_SYMBOL(open_exec
);
805 int kernel_read(struct file
*file
, loff_t offset
,
806 char *addr
, unsigned long count
)
814 /* The cast to a user pointer is valid due to the set_fs() */
815 result
= vfs_read(file
, (void __user
*)addr
, count
, &pos
);
820 EXPORT_SYMBOL(kernel_read
);
822 static int exec_mmap(struct mm_struct
*mm
)
824 struct task_struct
*tsk
;
825 struct mm_struct
* old_mm
, *active_mm
;
827 /* Notify parent that we're no longer interested in the old VM */
829 old_mm
= current
->mm
;
830 sync_mm_rss(tsk
, old_mm
);
831 mm_release(tsk
, old_mm
);
835 * Make sure that if there is a core dump in progress
836 * for the old mm, we get out and die instead of going
837 * through with the exec. We must hold mmap_sem around
838 * checking core_state and changing tsk->mm.
840 down_read(&old_mm
->mmap_sem
);
841 if (unlikely(old_mm
->core_state
)) {
842 up_read(&old_mm
->mmap_sem
);
847 active_mm
= tsk
->active_mm
;
850 activate_mm(active_mm
, mm
);
851 if (old_mm
&& tsk
->signal
->oom_score_adj
== OOM_SCORE_ADJ_MIN
) {
852 atomic_dec(&old_mm
->oom_disable_count
);
853 atomic_inc(&tsk
->mm
->oom_disable_count
);
856 arch_pick_mmap_layout(mm
);
858 up_read(&old_mm
->mmap_sem
);
859 BUG_ON(active_mm
!= old_mm
);
860 mm_update_next_owner(old_mm
);
869 * This function makes sure the current process has its own signal table,
870 * so that flush_signal_handlers can later reset the handlers without
871 * disturbing other processes. (Other processes might share the signal
872 * table via the CLONE_SIGHAND option to clone().)
874 static int de_thread(struct task_struct
*tsk
)
876 struct signal_struct
*sig
= tsk
->signal
;
877 struct sighand_struct
*oldsighand
= tsk
->sighand
;
878 spinlock_t
*lock
= &oldsighand
->siglock
;
880 if (thread_group_empty(tsk
))
881 goto no_thread_group
;
884 * Kill all other threads in the thread group.
887 if (signal_group_exit(sig
)) {
889 * Another group action in progress, just
890 * return so that the signal is processed.
892 spin_unlock_irq(lock
);
896 sig
->group_exit_task
= tsk
;
897 sig
->notify_count
= zap_other_threads(tsk
);
898 if (!thread_group_leader(tsk
))
901 while (sig
->notify_count
) {
902 __set_current_state(TASK_UNINTERRUPTIBLE
);
903 spin_unlock_irq(lock
);
907 spin_unlock_irq(lock
);
910 * At this point all other threads have exited, all we have to
911 * do is to wait for the thread group leader to become inactive,
912 * and to assume its PID:
914 if (!thread_group_leader(tsk
)) {
915 struct task_struct
*leader
= tsk
->group_leader
;
917 sig
->notify_count
= -1; /* for exit_notify() */
919 write_lock_irq(&tasklist_lock
);
920 if (likely(leader
->exit_state
))
922 __set_current_state(TASK_UNINTERRUPTIBLE
);
923 write_unlock_irq(&tasklist_lock
);
928 * The only record we have of the real-time age of a
929 * process, regardless of execs it's done, is start_time.
930 * All the past CPU time is accumulated in signal_struct
931 * from sister threads now dead. But in this non-leader
932 * exec, nothing survives from the original leader thread,
933 * whose birth marks the true age of this process now.
934 * When we take on its identity by switching to its PID, we
935 * also take its birthdate (always earlier than our own).
937 tsk
->start_time
= leader
->start_time
;
939 BUG_ON(!same_thread_group(leader
, tsk
));
940 BUG_ON(has_group_leader_pid(tsk
));
942 * An exec() starts a new thread group with the
943 * TGID of the previous thread group. Rehash the
944 * two threads with a switched PID, and release
945 * the former thread group leader:
948 /* Become a process group leader with the old leader's pid.
949 * The old leader becomes a thread of the this thread group.
950 * Note: The old leader also uses this pid until release_task
951 * is called. Odd but simple and correct.
953 detach_pid(tsk
, PIDTYPE_PID
);
954 tsk
->pid
= leader
->pid
;
955 attach_pid(tsk
, PIDTYPE_PID
, task_pid(leader
));
956 transfer_pid(leader
, tsk
, PIDTYPE_PGID
);
957 transfer_pid(leader
, tsk
, PIDTYPE_SID
);
959 list_replace_rcu(&leader
->tasks
, &tsk
->tasks
);
960 list_replace_init(&leader
->sibling
, &tsk
->sibling
);
962 tsk
->group_leader
= tsk
;
963 leader
->group_leader
= tsk
;
965 tsk
->exit_signal
= SIGCHLD
;
966 leader
->exit_signal
= -1;
968 BUG_ON(leader
->exit_state
!= EXIT_ZOMBIE
);
969 leader
->exit_state
= EXIT_DEAD
;
972 * We are going to release_task()->ptrace_unlink() silently,
973 * the tracer can sleep in do_wait(). EXIT_DEAD guarantees
974 * the tracer wont't block again waiting for this thread.
976 if (unlikely(leader
->ptrace
))
977 __wake_up_parent(leader
, leader
->parent
);
978 write_unlock_irq(&tasklist_lock
);
980 release_task(leader
);
983 sig
->group_exit_task
= NULL
;
984 sig
->notify_count
= 0;
988 setmax_mm_hiwater_rss(&sig
->maxrss
, current
->mm
);
991 flush_itimer_signals();
993 if (atomic_read(&oldsighand
->count
) != 1) {
994 struct sighand_struct
*newsighand
;
996 * This ->sighand is shared with the CLONE_SIGHAND
997 * but not CLONE_THREAD task, switch to the new one.
999 newsighand
= kmem_cache_alloc(sighand_cachep
, GFP_KERNEL
);
1003 atomic_set(&newsighand
->count
, 1);
1004 memcpy(newsighand
->action
, oldsighand
->action
,
1005 sizeof(newsighand
->action
));
1007 write_lock_irq(&tasklist_lock
);
1008 spin_lock(&oldsighand
->siglock
);
1009 rcu_assign_pointer(tsk
->sighand
, newsighand
);
1010 spin_unlock(&oldsighand
->siglock
);
1011 write_unlock_irq(&tasklist_lock
);
1013 __cleanup_sighand(oldsighand
);
1016 BUG_ON(!thread_group_leader(tsk
));
1021 * These functions flushes out all traces of the currently running executable
1022 * so that a new one can be started
1024 static void flush_old_files(struct files_struct
* files
)
1027 struct fdtable
*fdt
;
1029 spin_lock(&files
->file_lock
);
1031 unsigned long set
, i
;
1035 fdt
= files_fdtable(files
);
1036 if (i
>= fdt
->max_fds
)
1038 set
= fdt
->close_on_exec
->fds_bits
[j
];
1041 fdt
->close_on_exec
->fds_bits
[j
] = 0;
1042 spin_unlock(&files
->file_lock
);
1043 for ( ; set
; i
++,set
>>= 1) {
1048 spin_lock(&files
->file_lock
);
1051 spin_unlock(&files
->file_lock
);
1054 char *get_task_comm(char *buf
, struct task_struct
*tsk
)
1056 /* buf must be at least sizeof(tsk->comm) in size */
1058 strncpy(buf
, tsk
->comm
, sizeof(tsk
->comm
));
1062 EXPORT_SYMBOL_GPL(get_task_comm
);
1064 void set_task_comm(struct task_struct
*tsk
, char *buf
)
1069 * Threads may access current->comm without holding
1070 * the task lock, so write the string carefully.
1071 * Readers without a lock may see incomplete new
1072 * names but are safe from non-terminating string reads.
1074 memset(tsk
->comm
, 0, TASK_COMM_LEN
);
1076 strlcpy(tsk
->comm
, buf
, sizeof(tsk
->comm
));
1078 perf_event_comm(tsk
);
1081 int flush_old_exec(struct linux_binprm
* bprm
)
1086 * Make sure we have a private signal table and that
1087 * we are unassociated from the previous thread group.
1089 retval
= de_thread(current
);
1093 set_mm_exe_file(bprm
->mm
, bprm
->file
);
1096 * Release all of the old mmap stuff
1098 acct_arg_size(bprm
, 0);
1099 retval
= exec_mmap(bprm
->mm
);
1103 bprm
->mm
= NULL
; /* We're using it now */
1106 current
->flags
&= ~(PF_RANDOMIZE
| PF_KTHREAD
);
1108 current
->personality
&= ~bprm
->per_clear
;
1115 EXPORT_SYMBOL(flush_old_exec
);
1117 void would_dump(struct linux_binprm
*bprm
, struct file
*file
)
1119 if (inode_permission(file
->f_path
.dentry
->d_inode
, MAY_READ
) < 0)
1120 bprm
->interp_flags
|= BINPRM_FLAGS_ENFORCE_NONDUMP
;
1122 EXPORT_SYMBOL(would_dump
);
1124 void setup_new_exec(struct linux_binprm
* bprm
)
1128 char tcomm
[sizeof(current
->comm
)];
1130 arch_pick_mmap_layout(current
->mm
);
1132 /* This is the point of no return */
1133 current
->sas_ss_sp
= current
->sas_ss_size
= 0;
1135 if (current_euid() == current_uid() && current_egid() == current_gid())
1136 set_dumpable(current
->mm
, 1);
1138 set_dumpable(current
->mm
, suid_dumpable
);
1140 name
= bprm
->filename
;
1142 /* Copies the binary name from after last slash */
1143 for (i
=0; (ch
= *(name
++)) != '\0';) {
1145 i
= 0; /* overwrite what we wrote */
1147 if (i
< (sizeof(tcomm
) - 1))
1151 set_task_comm(current
, tcomm
);
1153 /* Set the new mm task size. We have to do that late because it may
1154 * depend on TIF_32BIT which is only updated in flush_thread() on
1155 * some architectures like powerpc
1157 current
->mm
->task_size
= TASK_SIZE
;
1159 /* install the new credentials */
1160 if (bprm
->cred
->uid
!= current_euid() ||
1161 bprm
->cred
->gid
!= current_egid()) {
1162 current
->pdeath_signal
= 0;
1164 would_dump(bprm
, bprm
->file
);
1165 if (bprm
->interp_flags
& BINPRM_FLAGS_ENFORCE_NONDUMP
)
1166 set_dumpable(current
->mm
, suid_dumpable
);
1170 * Flush performance counters when crossing a
1173 if (!get_dumpable(current
->mm
))
1174 perf_event_exit_task(current
);
1176 /* An exec changes our domain. We are no longer part of the thread
1179 current
->self_exec_id
++;
1181 flush_signal_handlers(current
, 0);
1182 flush_old_files(current
->files
);
1184 EXPORT_SYMBOL(setup_new_exec
);
1187 * Prepare credentials and lock ->cred_guard_mutex.
1188 * install_exec_creds() commits the new creds and drops the lock.
1189 * Or, if exec fails before, free_bprm() should release ->cred and
1192 int prepare_bprm_creds(struct linux_binprm
*bprm
)
1194 if (mutex_lock_interruptible(¤t
->signal
->cred_guard_mutex
))
1195 return -ERESTARTNOINTR
;
1197 bprm
->cred
= prepare_exec_creds();
1198 if (likely(bprm
->cred
))
1201 mutex_unlock(¤t
->signal
->cred_guard_mutex
);
1205 void free_bprm(struct linux_binprm
*bprm
)
1207 free_arg_pages(bprm
);
1209 mutex_unlock(¤t
->signal
->cred_guard_mutex
);
1210 abort_creds(bprm
->cred
);
1216 * install the new credentials for this executable
1218 void install_exec_creds(struct linux_binprm
*bprm
)
1220 security_bprm_committing_creds(bprm
);
1222 commit_creds(bprm
->cred
);
1225 * cred_guard_mutex must be held at least to this point to prevent
1226 * ptrace_attach() from altering our determination of the task's
1227 * credentials; any time after this it may be unlocked.
1229 security_bprm_committed_creds(bprm
);
1230 mutex_unlock(¤t
->signal
->cred_guard_mutex
);
1232 EXPORT_SYMBOL(install_exec_creds
);
1235 * determine how safe it is to execute the proposed program
1236 * - the caller must hold ->cred_guard_mutex to protect against
1239 int check_unsafe_exec(struct linux_binprm
*bprm
)
1241 struct task_struct
*p
= current
, *t
;
1246 if (p
->ptrace
& PT_PTRACE_CAP
)
1247 bprm
->unsafe
|= LSM_UNSAFE_PTRACE_CAP
;
1249 bprm
->unsafe
|= LSM_UNSAFE_PTRACE
;
1253 spin_lock(&p
->fs
->lock
);
1255 for (t
= next_thread(p
); t
!= p
; t
= next_thread(t
)) {
1261 if (p
->fs
->users
> n_fs
) {
1262 bprm
->unsafe
|= LSM_UNSAFE_SHARE
;
1265 if (!p
->fs
->in_exec
) {
1270 spin_unlock(&p
->fs
->lock
);
1276 * Fill the binprm structure from the inode.
1277 * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
1279 * This may be called multiple times for binary chains (scripts for example).
1281 int prepare_binprm(struct linux_binprm
*bprm
)
1284 struct inode
* inode
= bprm
->file
->f_path
.dentry
->d_inode
;
1287 mode
= inode
->i_mode
;
1288 if (bprm
->file
->f_op
== NULL
)
1291 /* clear any previous set[ug]id data from a previous binary */
1292 bprm
->cred
->euid
= current_euid();
1293 bprm
->cred
->egid
= current_egid();
1295 if (!(bprm
->file
->f_path
.mnt
->mnt_flags
& MNT_NOSUID
)) {
1297 if (mode
& S_ISUID
) {
1298 bprm
->per_clear
|= PER_CLEAR_ON_SETID
;
1299 bprm
->cred
->euid
= inode
->i_uid
;
1304 * If setgid is set but no group execute bit then this
1305 * is a candidate for mandatory locking, not a setgid
1308 if ((mode
& (S_ISGID
| S_IXGRP
)) == (S_ISGID
| S_IXGRP
)) {
1309 bprm
->per_clear
|= PER_CLEAR_ON_SETID
;
1310 bprm
->cred
->egid
= inode
->i_gid
;
1314 /* fill in binprm security blob */
1315 retval
= security_bprm_set_creds(bprm
);
1318 bprm
->cred_prepared
= 1;
1320 memset(bprm
->buf
, 0, BINPRM_BUF_SIZE
);
1321 return kernel_read(bprm
->file
, 0, bprm
->buf
, BINPRM_BUF_SIZE
);
1324 EXPORT_SYMBOL(prepare_binprm
);
1327 * Arguments are '\0' separated strings found at the location bprm->p
1328 * points to; chop off the first by relocating brpm->p to right after
1329 * the first '\0' encountered.
1331 int remove_arg_zero(struct linux_binprm
*bprm
)
1334 unsigned long offset
;
1342 offset
= bprm
->p
& ~PAGE_MASK
;
1343 page
= get_arg_page(bprm
, bprm
->p
, 0);
1348 kaddr
= kmap_atomic(page
, KM_USER0
);
1350 for (; offset
< PAGE_SIZE
&& kaddr
[offset
];
1351 offset
++, bprm
->p
++)
1354 kunmap_atomic(kaddr
, KM_USER0
);
1357 if (offset
== PAGE_SIZE
)
1358 free_arg_page(bprm
, (bprm
->p
>> PAGE_SHIFT
) - 1);
1359 } while (offset
== PAGE_SIZE
);
1368 EXPORT_SYMBOL(remove_arg_zero
);
1371 * cycle the list of binary formats handler, until one recognizes the image
1373 int search_binary_handler(struct linux_binprm
*bprm
,struct pt_regs
*regs
)
1375 unsigned int depth
= bprm
->recursion_depth
;
1377 struct linux_binfmt
*fmt
;
1380 retval
= security_bprm_check(bprm
);
1384 retval
= audit_bprm(bprm
);
1388 /* Need to fetch pid before load_binary changes it */
1390 old_pid
= task_pid_nr_ns(current
, task_active_pid_ns(current
->parent
));
1394 for (try=0; try<2; try++) {
1395 read_lock(&binfmt_lock
);
1396 list_for_each_entry(fmt
, &formats
, lh
) {
1397 int (*fn
)(struct linux_binprm
*, struct pt_regs
*) = fmt
->load_binary
;
1400 if (!try_module_get(fmt
->module
))
1402 read_unlock(&binfmt_lock
);
1403 retval
= fn(bprm
, regs
);
1405 * Restore the depth counter to its starting value
1406 * in this call, so we don't have to rely on every
1407 * load_binary function to restore it on return.
1409 bprm
->recursion_depth
= depth
;
1412 ptrace_event(PTRACE_EVENT_EXEC
,
1415 allow_write_access(bprm
->file
);
1419 current
->did_exec
= 1;
1420 proc_exec_connector(current
);
1423 read_lock(&binfmt_lock
);
1425 if (retval
!= -ENOEXEC
|| bprm
->mm
== NULL
)
1428 read_unlock(&binfmt_lock
);
1432 read_unlock(&binfmt_lock
);
1433 if (retval
!= -ENOEXEC
|| bprm
->mm
== NULL
) {
1435 #ifdef CONFIG_MODULES
1437 #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1438 if (printable(bprm
->buf
[0]) &&
1439 printable(bprm
->buf
[1]) &&
1440 printable(bprm
->buf
[2]) &&
1441 printable(bprm
->buf
[3]))
1442 break; /* -ENOEXEC */
1443 request_module("binfmt-%04x", *(unsigned short *)(&bprm
->buf
[2]));
1450 EXPORT_SYMBOL(search_binary_handler
);
1453 * sys_execve() executes a new program.
1455 static int do_execve_common(const char *filename
,
1456 struct user_arg_ptr argv
,
1457 struct user_arg_ptr envp
,
1458 struct pt_regs
*regs
)
1460 struct linux_binprm
*bprm
;
1462 struct files_struct
*displaced
;
1466 retval
= unshare_files(&displaced
);
1471 bprm
= kzalloc(sizeof(*bprm
), GFP_KERNEL
);
1475 retval
= prepare_bprm_creds(bprm
);
1479 retval
= check_unsafe_exec(bprm
);
1482 clear_in_exec
= retval
;
1483 current
->in_execve
= 1;
1485 file
= open_exec(filename
);
1486 retval
= PTR_ERR(file
);
1493 bprm
->filename
= filename
;
1494 bprm
->interp
= filename
;
1496 retval
= bprm_mm_init(bprm
);
1500 bprm
->argc
= count(argv
, MAX_ARG_STRINGS
);
1501 if ((retval
= bprm
->argc
) < 0)
1504 bprm
->envc
= count(envp
, MAX_ARG_STRINGS
);
1505 if ((retval
= bprm
->envc
) < 0)
1508 retval
= prepare_binprm(bprm
);
1512 retval
= copy_strings_kernel(1, &bprm
->filename
, bprm
);
1516 bprm
->exec
= bprm
->p
;
1517 retval
= copy_strings(bprm
->envc
, envp
, bprm
);
1521 retval
= copy_strings(bprm
->argc
, argv
, bprm
);
1525 retval
= search_binary_handler(bprm
,regs
);
1529 /* execve succeeded */
1530 current
->fs
->in_exec
= 0;
1531 current
->in_execve
= 0;
1532 acct_update_integrals(current
);
1535 put_files_struct(displaced
);
1540 acct_arg_size(bprm
, 0);
1546 allow_write_access(bprm
->file
);
1552 current
->fs
->in_exec
= 0;
1553 current
->in_execve
= 0;
1560 reset_files_struct(displaced
);
1565 int do_execve(const char *filename
,
1566 const char __user
*const __user
*__argv
,
1567 const char __user
*const __user
*__envp
,
1568 struct pt_regs
*regs
)
1570 struct user_arg_ptr argv
= { .ptr
.native
= __argv
};
1571 struct user_arg_ptr envp
= { .ptr
.native
= __envp
};
1572 return do_execve_common(filename
, argv
, envp
, regs
);
1575 #ifdef CONFIG_COMPAT
1576 int compat_do_execve(char *filename
,
1577 compat_uptr_t __user
*__argv
,
1578 compat_uptr_t __user
*__envp
,
1579 struct pt_regs
*regs
)
1581 struct user_arg_ptr argv
= {
1583 .ptr
.compat
= __argv
,
1585 struct user_arg_ptr envp
= {
1587 .ptr
.compat
= __envp
,
1589 return do_execve_common(filename
, argv
, envp
, regs
);
1593 void set_binfmt(struct linux_binfmt
*new)
1595 struct mm_struct
*mm
= current
->mm
;
1598 module_put(mm
->binfmt
->module
);
1602 __module_get(new->module
);
1605 EXPORT_SYMBOL(set_binfmt
);
1607 static int expand_corename(struct core_name
*cn
)
1609 char *old_corename
= cn
->corename
;
1611 cn
->size
= CORENAME_MAX_SIZE
* atomic_inc_return(&call_count
);
1612 cn
->corename
= krealloc(old_corename
, cn
->size
, GFP_KERNEL
);
1614 if (!cn
->corename
) {
1615 kfree(old_corename
);
1622 static int cn_printf(struct core_name
*cn
, const char *fmt
, ...)
1630 need
= vsnprintf(NULL
, 0, fmt
, arg
);
1633 if (likely(need
< cn
->size
- cn
->used
- 1))
1636 ret
= expand_corename(cn
);
1641 cur
= cn
->corename
+ cn
->used
;
1643 vsnprintf(cur
, need
+ 1, fmt
, arg
);
1652 static int cn_print_exe_file(struct core_name
*cn
)
1654 struct file
*exe_file
;
1655 char *pathbuf
, *path
, *p
;
1658 exe_file
= get_mm_exe_file(current
->mm
);
1660 return cn_printf(cn
, "(unknown)");
1662 pathbuf
= kmalloc(PATH_MAX
, GFP_TEMPORARY
);
1668 path
= d_path(&exe_file
->f_path
, pathbuf
, PATH_MAX
);
1670 ret
= PTR_ERR(path
);
1674 for (p
= path
; *p
; p
++)
1678 ret
= cn_printf(cn
, "%s", path
);
1687 /* format_corename will inspect the pattern parameter, and output a
1688 * name into corename, which must have space for at least
1689 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
1691 static int format_corename(struct core_name
*cn
, long signr
)
1693 const struct cred
*cred
= current_cred();
1694 const char *pat_ptr
= core_pattern
;
1695 int ispipe
= (*pat_ptr
== '|');
1696 int pid_in_pattern
= 0;
1699 cn
->size
= CORENAME_MAX_SIZE
* atomic_read(&call_count
);
1700 cn
->corename
= kmalloc(cn
->size
, GFP_KERNEL
);
1706 /* Repeat as long as we have more pattern to process and more output
1709 if (*pat_ptr
!= '%') {
1712 err
= cn_printf(cn
, "%c", *pat_ptr
++);
1714 switch (*++pat_ptr
) {
1715 /* single % at the end, drop that */
1718 /* Double percent, output one percent */
1720 err
= cn_printf(cn
, "%c", '%');
1725 err
= cn_printf(cn
, "%d",
1726 task_tgid_vnr(current
));
1730 err
= cn_printf(cn
, "%d", cred
->uid
);
1734 err
= cn_printf(cn
, "%d", cred
->gid
);
1736 /* signal that caused the coredump */
1738 err
= cn_printf(cn
, "%ld", signr
);
1740 /* UNIX time of coredump */
1743 do_gettimeofday(&tv
);
1744 err
= cn_printf(cn
, "%lu", tv
.tv_sec
);
1749 down_read(&uts_sem
);
1750 err
= cn_printf(cn
, "%s",
1751 utsname()->nodename
);
1756 err
= cn_printf(cn
, "%s", current
->comm
);
1759 err
= cn_print_exe_file(cn
);
1761 /* core limit size */
1763 err
= cn_printf(cn
, "%lu",
1764 rlimit(RLIMIT_CORE
));
1776 /* Backward compatibility with core_uses_pid:
1778 * If core_pattern does not include a %p (as is the default)
1779 * and core_uses_pid is set, then .%pid will be appended to
1780 * the filename. Do not do this for piped commands. */
1781 if (!ispipe
&& !pid_in_pattern
&& core_uses_pid
) {
1782 err
= cn_printf(cn
, ".%d", task_tgid_vnr(current
));
1790 static int zap_process(struct task_struct
*start
, int exit_code
)
1792 struct task_struct
*t
;
1795 start
->signal
->flags
= SIGNAL_GROUP_EXIT
;
1796 start
->signal
->group_exit_code
= exit_code
;
1797 start
->signal
->group_stop_count
= 0;
1801 task_clear_jobctl_pending(t
, JOBCTL_PENDING_MASK
);
1802 if (t
!= current
&& t
->mm
) {
1803 sigaddset(&t
->pending
.signal
, SIGKILL
);
1804 signal_wake_up(t
, 1);
1807 } while_each_thread(start
, t
);
1812 static inline int zap_threads(struct task_struct
*tsk
, struct mm_struct
*mm
,
1813 struct core_state
*core_state
, int exit_code
)
1815 struct task_struct
*g
, *p
;
1816 unsigned long flags
;
1819 spin_lock_irq(&tsk
->sighand
->siglock
);
1820 if (!signal_group_exit(tsk
->signal
)) {
1821 mm
->core_state
= core_state
;
1822 nr
= zap_process(tsk
, exit_code
);
1824 spin_unlock_irq(&tsk
->sighand
->siglock
);
1825 if (unlikely(nr
< 0))
1828 if (atomic_read(&mm
->mm_users
) == nr
+ 1)
1831 * We should find and kill all tasks which use this mm, and we should
1832 * count them correctly into ->nr_threads. We don't take tasklist
1833 * lock, but this is safe wrt:
1836 * None of sub-threads can fork after zap_process(leader). All
1837 * processes which were created before this point should be
1838 * visible to zap_threads() because copy_process() adds the new
1839 * process to the tail of init_task.tasks list, and lock/unlock
1840 * of ->siglock provides a memory barrier.
1843 * The caller holds mm->mmap_sem. This means that the task which
1844 * uses this mm can't pass exit_mm(), so it can't exit or clear
1848 * It does list_replace_rcu(&leader->tasks, ¤t->tasks),
1849 * we must see either old or new leader, this does not matter.
1850 * However, it can change p->sighand, so lock_task_sighand(p)
1851 * must be used. Since p->mm != NULL and we hold ->mmap_sem
1854 * Note also that "g" can be the old leader with ->mm == NULL
1855 * and already unhashed and thus removed from ->thread_group.
1856 * This is OK, __unhash_process()->list_del_rcu() does not
1857 * clear the ->next pointer, we will find the new leader via
1861 for_each_process(g
) {
1862 if (g
== tsk
->group_leader
)
1864 if (g
->flags
& PF_KTHREAD
)
1869 if (unlikely(p
->mm
== mm
)) {
1870 lock_task_sighand(p
, &flags
);
1871 nr
+= zap_process(p
, exit_code
);
1872 unlock_task_sighand(p
, &flags
);
1876 } while_each_thread(g
, p
);
1880 atomic_set(&core_state
->nr_threads
, nr
);
1884 static int coredump_wait(int exit_code
, struct core_state
*core_state
)
1886 struct task_struct
*tsk
= current
;
1887 struct mm_struct
*mm
= tsk
->mm
;
1888 struct completion
*vfork_done
;
1889 int core_waiters
= -EBUSY
;
1891 init_completion(&core_state
->startup
);
1892 core_state
->dumper
.task
= tsk
;
1893 core_state
->dumper
.next
= NULL
;
1895 down_write(&mm
->mmap_sem
);
1896 if (!mm
->core_state
)
1897 core_waiters
= zap_threads(tsk
, mm
, core_state
, exit_code
);
1898 up_write(&mm
->mmap_sem
);
1900 if (unlikely(core_waiters
< 0))
1904 * Make sure nobody is waiting for us to release the VM,
1905 * otherwise we can deadlock when we wait on each other
1907 vfork_done
= tsk
->vfork_done
;
1909 tsk
->vfork_done
= NULL
;
1910 complete(vfork_done
);
1914 wait_for_completion(&core_state
->startup
);
1916 return core_waiters
;
1919 static void coredump_finish(struct mm_struct
*mm
)
1921 struct core_thread
*curr
, *next
;
1922 struct task_struct
*task
;
1924 next
= mm
->core_state
->dumper
.next
;
1925 while ((curr
= next
) != NULL
) {
1929 * see exit_mm(), curr->task must not see
1930 * ->task == NULL before we read ->next.
1934 wake_up_process(task
);
1937 mm
->core_state
= NULL
;
1941 * set_dumpable converts traditional three-value dumpable to two flags and
1942 * stores them into mm->flags. It modifies lower two bits of mm->flags, but
1943 * these bits are not changed atomically. So get_dumpable can observe the
1944 * intermediate state. To avoid doing unexpected behavior, get get_dumpable
1945 * return either old dumpable or new one by paying attention to the order of
1946 * modifying the bits.
1948 * dumpable | mm->flags (binary)
1949 * old new | initial interim final
1950 * ---------+-----------------------
1958 * (*) get_dumpable regards interim value of 10 as 11.
1960 void set_dumpable(struct mm_struct
*mm
, int value
)
1964 clear_bit(MMF_DUMPABLE
, &mm
->flags
);
1966 clear_bit(MMF_DUMP_SECURELY
, &mm
->flags
);
1969 set_bit(MMF_DUMPABLE
, &mm
->flags
);
1971 clear_bit(MMF_DUMP_SECURELY
, &mm
->flags
);
1974 set_bit(MMF_DUMP_SECURELY
, &mm
->flags
);
1976 set_bit(MMF_DUMPABLE
, &mm
->flags
);
1981 static int __get_dumpable(unsigned long mm_flags
)
1985 ret
= mm_flags
& MMF_DUMPABLE_MASK
;
1986 return (ret
>= 2) ? 2 : ret
;
1989 int get_dumpable(struct mm_struct
*mm
)
1991 return __get_dumpable(mm
->flags
);
1994 static void wait_for_dump_helpers(struct file
*file
)
1996 struct pipe_inode_info
*pipe
;
1998 pipe
= file
->f_path
.dentry
->d_inode
->i_pipe
;
2004 while ((pipe
->readers
> 1) && (!signal_pending(current
))) {
2005 wake_up_interruptible_sync(&pipe
->wait
);
2006 kill_fasync(&pipe
->fasync_readers
, SIGIO
, POLL_IN
);
2019 * helper function to customize the process used
2020 * to collect the core in userspace. Specifically
2021 * it sets up a pipe and installs it as fd 0 (stdin)
2022 * for the process. Returns 0 on success, or
2023 * PTR_ERR on failure.
2024 * Note that it also sets the core limit to 1. This
2025 * is a special value that we use to trap recursive
2028 static int umh_pipe_setup(struct subprocess_info
*info
, struct cred
*new)
2030 struct file
*rp
, *wp
;
2031 struct fdtable
*fdt
;
2032 struct coredump_params
*cp
= (struct coredump_params
*)info
->data
;
2033 struct files_struct
*cf
= current
->files
;
2035 wp
= create_write_pipe(0);
2039 rp
= create_read_pipe(wp
, 0);
2041 free_write_pipe(wp
);
2049 spin_lock(&cf
->file_lock
);
2050 fdt
= files_fdtable(cf
);
2051 FD_SET(0, fdt
->open_fds
);
2052 FD_CLR(0, fdt
->close_on_exec
);
2053 spin_unlock(&cf
->file_lock
);
2055 /* and disallow core files too */
2056 current
->signal
->rlim
[RLIMIT_CORE
] = (struct rlimit
){1, 1};
2061 void do_coredump(long signr
, int exit_code
, struct pt_regs
*regs
)
2063 struct core_state core_state
;
2064 struct core_name cn
;
2065 struct mm_struct
*mm
= current
->mm
;
2066 struct linux_binfmt
* binfmt
;
2067 const struct cred
*old_cred
;
2072 static atomic_t core_dump_count
= ATOMIC_INIT(0);
2073 struct coredump_params cprm
= {
2076 .limit
= rlimit(RLIMIT_CORE
),
2078 * We must use the same mm->flags while dumping core to avoid
2079 * inconsistency of bit flags, since this flag is not protected
2082 .mm_flags
= mm
->flags
,
2085 audit_core_dumps(signr
);
2087 binfmt
= mm
->binfmt
;
2088 if (!binfmt
|| !binfmt
->core_dump
)
2090 if (!__get_dumpable(cprm
.mm_flags
))
2093 cred
= prepare_creds();
2097 * We cannot trust fsuid as being the "true" uid of the
2098 * process nor do we know its entire history. We only know it
2099 * was tainted so we dump it as root in mode 2.
2101 if (__get_dumpable(cprm
.mm_flags
) == 2) {
2102 /* Setuid core dump mode */
2103 flag
= O_EXCL
; /* Stop rewrite attacks */
2104 cred
->fsuid
= 0; /* Dump root private */
2107 retval
= coredump_wait(exit_code
, &core_state
);
2111 old_cred
= override_creds(cred
);
2114 * Clear any false indication of pending signals that might
2115 * be seen by the filesystem code called to write the core file.
2117 clear_thread_flag(TIF_SIGPENDING
);
2119 ispipe
= format_corename(&cn
, signr
);
2121 if (ispipe
== -ENOMEM
) {
2122 printk(KERN_WARNING
"format_corename failed\n");
2123 printk(KERN_WARNING
"Aborting core\n");
2131 if (cprm
.limit
== 1) {
2133 * Normally core limits are irrelevant to pipes, since
2134 * we're not writing to the file system, but we use
2135 * cprm.limit of 1 here as a speacial value. Any
2136 * non-1 limit gets set to RLIM_INFINITY below, but
2137 * a limit of 0 skips the dump. This is a consistent
2138 * way to catch recursive crashes. We can still crash
2139 * if the core_pattern binary sets RLIM_CORE = !1
2140 * but it runs as root, and can do lots of stupid things
2141 * Note that we use task_tgid_vnr here to grab the pid
2142 * of the process group leader. That way we get the
2143 * right pid if a thread in a multi-threaded
2144 * core_pattern process dies.
2147 "Process %d(%s) has RLIMIT_CORE set to 1\n",
2148 task_tgid_vnr(current
), current
->comm
);
2149 printk(KERN_WARNING
"Aborting core\n");
2152 cprm
.limit
= RLIM_INFINITY
;
2154 dump_count
= atomic_inc_return(&core_dump_count
);
2155 if (core_pipe_limit
&& (core_pipe_limit
< dump_count
)) {
2156 printk(KERN_WARNING
"Pid %d(%s) over core_pipe_limit\n",
2157 task_tgid_vnr(current
), current
->comm
);
2158 printk(KERN_WARNING
"Skipping core dump\n");
2159 goto fail_dropcount
;
2162 helper_argv
= argv_split(GFP_KERNEL
, cn
.corename
+1, NULL
);
2164 printk(KERN_WARNING
"%s failed to allocate memory\n",
2166 goto fail_dropcount
;
2169 retval
= call_usermodehelper_fns(helper_argv
[0], helper_argv
,
2170 NULL
, UMH_WAIT_EXEC
, umh_pipe_setup
,
2172 argv_free(helper_argv
);
2174 printk(KERN_INFO
"Core dump to %s pipe failed\n",
2179 struct inode
*inode
;
2181 if (cprm
.limit
< binfmt
->min_coredump
)
2184 cprm
.file
= filp_open(cn
.corename
,
2185 O_CREAT
| 2 | O_NOFOLLOW
| O_LARGEFILE
| flag
,
2187 if (IS_ERR(cprm
.file
))
2190 inode
= cprm
.file
->f_path
.dentry
->d_inode
;
2191 if (inode
->i_nlink
> 1)
2193 if (d_unhashed(cprm
.file
->f_path
.dentry
))
2196 * AK: actually i see no reason to not allow this for named
2197 * pipes etc, but keep the previous behaviour for now.
2199 if (!S_ISREG(inode
->i_mode
))
2202 * Dont allow local users get cute and trick others to coredump
2203 * into their pre-created files.
2205 if (inode
->i_uid
!= current_fsuid())
2207 if (!cprm
.file
->f_op
|| !cprm
.file
->f_op
->write
)
2209 if (do_truncate(cprm
.file
->f_path
.dentry
, 0, 0, cprm
.file
))
2213 retval
= binfmt
->core_dump(&cprm
);
2215 current
->signal
->group_exit_code
|= 0x80;
2217 if (ispipe
&& core_pipe_limit
)
2218 wait_for_dump_helpers(cprm
.file
);
2221 filp_close(cprm
.file
, NULL
);
2224 atomic_dec(&core_dump_count
);
2228 coredump_finish(mm
);
2229 revert_creds(old_cred
);
2237 * Core dumping helper functions. These are the only things you should
2238 * do on a core-file: use only these functions to write out all the
2241 int dump_write(struct file
*file
, const void *addr
, int nr
)
2243 return access_ok(VERIFY_READ
, addr
, nr
) && file
->f_op
->write(file
, addr
, nr
, &file
->f_pos
) == nr
;
2245 EXPORT_SYMBOL(dump_write
);
2247 int dump_seek(struct file
*file
, loff_t off
)
2251 if (file
->f_op
->llseek
&& file
->f_op
->llseek
!= no_llseek
) {
2252 if (file
->f_op
->llseek(file
, off
, SEEK_CUR
) < 0)
2255 char *buf
= (char *)get_zeroed_page(GFP_KERNEL
);
2260 unsigned long n
= off
;
2264 if (!dump_write(file
, buf
, n
)) {
2270 free_page((unsigned long)buf
);
2274 EXPORT_SYMBOL(dump_seek
);