4 * Copyright (C) 1991, 1992 Linus Torvalds
8 * #!-checking implemented by tytso.
11 * Demand-loading implemented 01.12.91 - no need to read anything but
12 * the header into memory. The inode of the executable is put into
13 * "current->executable", and page faults do the actual loading. Clean.
15 * Once more I can proudly say that linux stood up to being changed: it
16 * was less than 2 hours work to get demand-loading completely implemented.
18 * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead,
19 * current->executable is only used by the procfs. This allows a dispatch
20 * table to check for several different types of binary formats. We keep
21 * trying until we recognize the file or we run out of supported binary
25 #include <linux/slab.h>
26 #include <linux/file.h>
27 #include <linux/fdtable.h>
29 #include <linux/stat.h>
30 #include <linux/fcntl.h>
31 #include <linux/swap.h>
32 #include <linux/string.h>
33 #include <linux/init.h>
34 #include <linux/pagemap.h>
35 #include <linux/perf_event.h>
36 #include <linux/highmem.h>
37 #include <linux/spinlock.h>
38 #include <linux/key.h>
39 #include <linux/personality.h>
40 #include <linux/binfmts.h>
41 #include <linux/utsname.h>
42 #include <linux/pid_namespace.h>
43 #include <linux/module.h>
44 #include <linux/namei.h>
45 #include <linux/proc_fs.h>
46 #include <linux/mount.h>
47 #include <linux/security.h>
48 #include <linux/syscalls.h>
49 #include <linux/tsacct_kern.h>
50 #include <linux/cn_proc.h>
51 #include <linux/audit.h>
52 #include <linux/tracehook.h>
53 #include <linux/kmod.h>
54 #include <linux/fsnotify.h>
55 #include <linux/fs_struct.h>
56 #include <linux/pipe_fs_i.h>
58 #include <asm/uaccess.h>
59 #include <asm/mmu_context.h>
64 char core_pattern
[CORENAME_MAX_SIZE
] = "core";
65 unsigned int core_pipe_limit
;
66 int suid_dumpable
= 0;
68 /* The maximal length of core_pattern is also specified in sysctl.c */
70 static LIST_HEAD(formats
);
71 static DEFINE_RWLOCK(binfmt_lock
);
73 int __register_binfmt(struct linux_binfmt
* fmt
, int insert
)
77 write_lock(&binfmt_lock
);
78 insert
? list_add(&fmt
->lh
, &formats
) :
79 list_add_tail(&fmt
->lh
, &formats
);
80 write_unlock(&binfmt_lock
);
84 EXPORT_SYMBOL(__register_binfmt
);
86 void unregister_binfmt(struct linux_binfmt
* fmt
)
88 write_lock(&binfmt_lock
);
90 write_unlock(&binfmt_lock
);
93 EXPORT_SYMBOL(unregister_binfmt
);
95 static inline void put_binfmt(struct linux_binfmt
* fmt
)
97 module_put(fmt
->module
);
101 * Note that a shared library must be both readable and executable due to
104 * Also note that we take the address to load from from the file itself.
106 SYSCALL_DEFINE1(uselib
, const char __user
*, library
)
109 char *tmp
= getname(library
);
110 int error
= PTR_ERR(tmp
);
115 file
= do_filp_open(AT_FDCWD
, tmp
,
116 O_LARGEFILE
| O_RDONLY
| FMODE_EXEC
, 0,
117 MAY_READ
| MAY_EXEC
| MAY_OPEN
);
119 error
= PTR_ERR(file
);
124 if (!S_ISREG(file
->f_path
.dentry
->d_inode
->i_mode
))
128 if (file
->f_path
.mnt
->mnt_flags
& MNT_NOEXEC
)
135 struct linux_binfmt
* fmt
;
137 read_lock(&binfmt_lock
);
138 list_for_each_entry(fmt
, &formats
, lh
) {
139 if (!fmt
->load_shlib
)
141 if (!try_module_get(fmt
->module
))
143 read_unlock(&binfmt_lock
);
144 error
= fmt
->load_shlib(file
);
145 read_lock(&binfmt_lock
);
147 if (error
!= -ENOEXEC
)
150 read_unlock(&binfmt_lock
);
160 void acct_arg_size(struct linux_binprm
*bprm
, unsigned long pages
)
162 struct mm_struct
*mm
= current
->mm
;
163 long diff
= (long)(pages
- bprm
->vma_pages
);
168 bprm
->vma_pages
= pages
;
170 #ifdef SPLIT_RSS_COUNTING
171 add_mm_counter(mm
, MM_ANONPAGES
, diff
);
173 spin_lock(&mm
->page_table_lock
);
174 add_mm_counter(mm
, MM_ANONPAGES
, diff
);
175 spin_unlock(&mm
->page_table_lock
);
179 struct page
*get_arg_page(struct linux_binprm
*bprm
, unsigned long pos
,
185 #ifdef CONFIG_STACK_GROWSUP
187 ret
= expand_stack_downwards(bprm
->vma
, pos
);
192 ret
= get_user_pages(current
, bprm
->mm
, pos
,
193 1, write
, 1, &page
, NULL
);
198 unsigned long size
= bprm
->vma
->vm_end
- bprm
->vma
->vm_start
;
201 acct_arg_size(bprm
, size
/ PAGE_SIZE
);
204 * We've historically supported up to 32 pages (ARG_MAX)
205 * of argument strings even with small stacks
211 * Limit to 1/4-th the stack size for the argv+env strings.
213 * - the remaining binfmt code will not run out of stack space,
214 * - the program will have a reasonable amount of stack left
217 rlim
= current
->signal
->rlim
;
218 if (size
> ACCESS_ONCE(rlim
[RLIMIT_STACK
].rlim_cur
) / 4) {
227 static void put_arg_page(struct page
*page
)
232 static void free_arg_page(struct linux_binprm
*bprm
, int i
)
236 static void free_arg_pages(struct linux_binprm
*bprm
)
240 static void flush_arg_page(struct linux_binprm
*bprm
, unsigned long pos
,
243 flush_cache_page(bprm
->vma
, pos
, page_to_pfn(page
));
246 static int __bprm_mm_init(struct linux_binprm
*bprm
)
249 struct vm_area_struct
*vma
= NULL
;
250 struct mm_struct
*mm
= bprm
->mm
;
252 bprm
->vma
= vma
= kmem_cache_zalloc(vm_area_cachep
, GFP_KERNEL
);
256 down_write(&mm
->mmap_sem
);
260 * Place the stack at the largest stack address the architecture
261 * supports. Later, we'll move this to an appropriate place. We don't
262 * use STACK_TOP because that can depend on attributes which aren't
265 BUG_ON(VM_STACK_FLAGS
& VM_STACK_INCOMPLETE_SETUP
);
266 vma
->vm_end
= STACK_TOP_MAX
;
267 vma
->vm_start
= vma
->vm_end
- PAGE_SIZE
;
268 vma
->vm_flags
= VM_STACK_FLAGS
| VM_STACK_INCOMPLETE_SETUP
;
269 vma
->vm_page_prot
= vm_get_page_prot(vma
->vm_flags
);
270 INIT_LIST_HEAD(&vma
->anon_vma_chain
);
271 err
= insert_vm_struct(mm
, vma
);
275 mm
->stack_vm
= mm
->total_vm
= 1;
276 up_write(&mm
->mmap_sem
);
277 bprm
->p
= vma
->vm_end
- sizeof(void *);
280 up_write(&mm
->mmap_sem
);
282 kmem_cache_free(vm_area_cachep
, vma
);
286 static bool valid_arg_len(struct linux_binprm
*bprm
, long len
)
288 return len
<= MAX_ARG_STRLEN
;
293 void acct_arg_size(struct linux_binprm
*bprm
, unsigned long pages
)
297 struct page
*get_arg_page(struct linux_binprm
*bprm
, unsigned long pos
,
302 page
= bprm
->page
[pos
/ PAGE_SIZE
];
303 if (!page
&& write
) {
304 page
= alloc_page(GFP_HIGHUSER
|__GFP_ZERO
);
307 bprm
->page
[pos
/ PAGE_SIZE
] = page
;
313 static void put_arg_page(struct page
*page
)
317 static void free_arg_page(struct linux_binprm
*bprm
, int i
)
320 __free_page(bprm
->page
[i
]);
321 bprm
->page
[i
] = NULL
;
325 static void free_arg_pages(struct linux_binprm
*bprm
)
329 for (i
= 0; i
< MAX_ARG_PAGES
; i
++)
330 free_arg_page(bprm
, i
);
333 static void flush_arg_page(struct linux_binprm
*bprm
, unsigned long pos
,
338 static int __bprm_mm_init(struct linux_binprm
*bprm
)
340 bprm
->p
= PAGE_SIZE
* MAX_ARG_PAGES
- sizeof(void *);
344 static bool valid_arg_len(struct linux_binprm
*bprm
, long len
)
346 return len
<= bprm
->p
;
349 #endif /* CONFIG_MMU */
352 * Create a new mm_struct and populate it with a temporary stack
353 * vm_area_struct. We don't have enough context at this point to set the stack
354 * flags, permissions, and offset, so we use temporary values. We'll update
355 * them later in setup_arg_pages().
357 int bprm_mm_init(struct linux_binprm
*bprm
)
360 struct mm_struct
*mm
= NULL
;
362 bprm
->mm
= mm
= mm_alloc();
367 err
= init_new_context(current
, mm
);
371 err
= __bprm_mm_init(bprm
);
387 * count() counts the number of strings in array ARGV.
389 static int count(const char __user
* const __user
* argv
, int max
)
395 const char __user
* p
;
397 if (get_user(p
, argv
))
405 if (fatal_signal_pending(current
))
406 return -ERESTARTNOHAND
;
414 * 'copy_strings()' copies argument/environment strings from the old
415 * processes's memory to the new process's stack. The call to get_user_pages()
416 * ensures the destination page is created and not swapped out.
418 static int copy_strings(int argc
, const char __user
*const __user
*argv
,
419 struct linux_binprm
*bprm
)
421 struct page
*kmapped_page
= NULL
;
423 unsigned long kpos
= 0;
427 const char __user
*str
;
431 if (get_user(str
, argv
+argc
) ||
432 !(len
= strnlen_user(str
, MAX_ARG_STRLEN
))) {
437 if (!valid_arg_len(bprm
, len
)) {
442 /* We're going to work our way backwords. */
448 int offset
, bytes_to_copy
;
450 if (fatal_signal_pending(current
)) {
451 ret
= -ERESTARTNOHAND
;
456 offset
= pos
% PAGE_SIZE
;
460 bytes_to_copy
= offset
;
461 if (bytes_to_copy
> len
)
464 offset
-= bytes_to_copy
;
465 pos
-= bytes_to_copy
;
466 str
-= bytes_to_copy
;
467 len
-= bytes_to_copy
;
469 if (!kmapped_page
|| kpos
!= (pos
& PAGE_MASK
)) {
472 page
= get_arg_page(bprm
, pos
, 1);
479 flush_kernel_dcache_page(kmapped_page
);
480 kunmap(kmapped_page
);
481 put_arg_page(kmapped_page
);
484 kaddr
= kmap(kmapped_page
);
485 kpos
= pos
& PAGE_MASK
;
486 flush_arg_page(bprm
, kpos
, kmapped_page
);
488 if (copy_from_user(kaddr
+offset
, str
, bytes_to_copy
)) {
497 flush_kernel_dcache_page(kmapped_page
);
498 kunmap(kmapped_page
);
499 put_arg_page(kmapped_page
);
505 * Like copy_strings, but get argv and its values from kernel memory.
507 int copy_strings_kernel(int argc
, const char *const *argv
,
508 struct linux_binprm
*bprm
)
511 mm_segment_t oldfs
= get_fs();
513 r
= copy_strings(argc
, (const char __user
*const __user
*)argv
, bprm
);
517 EXPORT_SYMBOL(copy_strings_kernel
);
522 * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX. Once
523 * the binfmt code determines where the new stack should reside, we shift it to
524 * its final location. The process proceeds as follows:
526 * 1) Use shift to calculate the new vma endpoints.
527 * 2) Extend vma to cover both the old and new ranges. This ensures the
528 * arguments passed to subsequent functions are consistent.
529 * 3) Move vma's page tables to the new range.
530 * 4) Free up any cleared pgd range.
531 * 5) Shrink the vma to cover only the new range.
533 static int shift_arg_pages(struct vm_area_struct
*vma
, unsigned long shift
)
535 struct mm_struct
*mm
= vma
->vm_mm
;
536 unsigned long old_start
= vma
->vm_start
;
537 unsigned long old_end
= vma
->vm_end
;
538 unsigned long length
= old_end
- old_start
;
539 unsigned long new_start
= old_start
- shift
;
540 unsigned long new_end
= old_end
- shift
;
541 struct mmu_gather
*tlb
;
543 BUG_ON(new_start
> new_end
);
546 * ensure there are no vmas between where we want to go
549 if (vma
!= find_vma(mm
, new_start
))
553 * cover the whole range: [new_start, old_end)
555 if (vma_adjust(vma
, new_start
, old_end
, vma
->vm_pgoff
, NULL
))
559 * move the page tables downwards, on failure we rely on
560 * process cleanup to remove whatever mess we made.
562 if (length
!= move_page_tables(vma
, old_start
,
563 vma
, new_start
, length
))
567 tlb
= tlb_gather_mmu(mm
, 0);
568 if (new_end
> old_start
) {
570 * when the old and new regions overlap clear from new_end.
572 free_pgd_range(tlb
, new_end
, old_end
, new_end
,
573 vma
->vm_next
? vma
->vm_next
->vm_start
: 0);
576 * otherwise, clean from old_start; this is done to not touch
577 * the address space in [new_end, old_start) some architectures
578 * have constraints on va-space that make this illegal (IA64) -
579 * for the others its just a little faster.
581 free_pgd_range(tlb
, old_start
, old_end
, new_end
,
582 vma
->vm_next
? vma
->vm_next
->vm_start
: 0);
584 tlb_finish_mmu(tlb
, new_end
, old_end
);
587 * Shrink the vma to just the new range. Always succeeds.
589 vma_adjust(vma
, new_start
, new_end
, vma
->vm_pgoff
, NULL
);
595 * Finalizes the stack vm_area_struct. The flags and permissions are updated,
596 * the stack is optionally relocated, and some extra space is added.
598 int setup_arg_pages(struct linux_binprm
*bprm
,
599 unsigned long stack_top
,
600 int executable_stack
)
603 unsigned long stack_shift
;
604 struct mm_struct
*mm
= current
->mm
;
605 struct vm_area_struct
*vma
= bprm
->vma
;
606 struct vm_area_struct
*prev
= NULL
;
607 unsigned long vm_flags
;
608 unsigned long stack_base
;
609 unsigned long stack_size
;
610 unsigned long stack_expand
;
611 unsigned long rlim_stack
;
613 #ifdef CONFIG_STACK_GROWSUP
614 /* Limit stack size to 1GB */
615 stack_base
= rlimit_max(RLIMIT_STACK
);
616 if (stack_base
> (1 << 30))
617 stack_base
= 1 << 30;
619 /* Make sure we didn't let the argument array grow too large. */
620 if (vma
->vm_end
- vma
->vm_start
> stack_base
)
623 stack_base
= PAGE_ALIGN(stack_top
- stack_base
);
625 stack_shift
= vma
->vm_start
- stack_base
;
626 mm
->arg_start
= bprm
->p
- stack_shift
;
627 bprm
->p
= vma
->vm_end
- stack_shift
;
629 stack_top
= arch_align_stack(stack_top
);
630 stack_top
= PAGE_ALIGN(stack_top
);
632 if (unlikely(stack_top
< mmap_min_addr
) ||
633 unlikely(vma
->vm_end
- vma
->vm_start
>= stack_top
- mmap_min_addr
))
636 stack_shift
= vma
->vm_end
- stack_top
;
638 bprm
->p
-= stack_shift
;
639 mm
->arg_start
= bprm
->p
;
643 bprm
->loader
-= stack_shift
;
644 bprm
->exec
-= stack_shift
;
646 down_write(&mm
->mmap_sem
);
647 vm_flags
= VM_STACK_FLAGS
;
650 * Adjust stack execute permissions; explicitly enable for
651 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
652 * (arch default) otherwise.
654 if (unlikely(executable_stack
== EXSTACK_ENABLE_X
))
656 else if (executable_stack
== EXSTACK_DISABLE_X
)
657 vm_flags
&= ~VM_EXEC
;
658 vm_flags
|= mm
->def_flags
;
659 vm_flags
|= VM_STACK_INCOMPLETE_SETUP
;
661 ret
= mprotect_fixup(vma
, &prev
, vma
->vm_start
, vma
->vm_end
,
667 /* Move stack pages down in memory. */
669 ret
= shift_arg_pages(vma
, stack_shift
);
674 /* mprotect_fixup is overkill to remove the temporary stack flags */
675 vma
->vm_flags
&= ~VM_STACK_INCOMPLETE_SETUP
;
677 stack_expand
= 131072UL; /* randomly 32*4k (or 2*64k) pages */
678 stack_size
= vma
->vm_end
- vma
->vm_start
;
680 * Align this down to a page boundary as expand_stack
683 rlim_stack
= rlimit(RLIMIT_STACK
) & PAGE_MASK
;
684 #ifdef CONFIG_STACK_GROWSUP
685 if (stack_size
+ stack_expand
> rlim_stack
)
686 stack_base
= vma
->vm_start
+ rlim_stack
;
688 stack_base
= vma
->vm_end
+ stack_expand
;
690 if (stack_size
+ stack_expand
> rlim_stack
)
691 stack_base
= vma
->vm_end
- rlim_stack
;
693 stack_base
= vma
->vm_start
- stack_expand
;
695 current
->mm
->start_stack
= bprm
->p
;
696 ret
= expand_stack(vma
, stack_base
);
701 up_write(&mm
->mmap_sem
);
704 EXPORT_SYMBOL(setup_arg_pages
);
706 #endif /* CONFIG_MMU */
708 struct file
*open_exec(const char *name
)
713 file
= do_filp_open(AT_FDCWD
, name
,
714 O_LARGEFILE
| O_RDONLY
| FMODE_EXEC
, 0,
715 MAY_EXEC
| MAY_OPEN
);
720 if (!S_ISREG(file
->f_path
.dentry
->d_inode
->i_mode
))
723 if (file
->f_path
.mnt
->mnt_flags
& MNT_NOEXEC
)
728 err
= deny_write_access(file
);
739 EXPORT_SYMBOL(open_exec
);
741 int kernel_read(struct file
*file
, loff_t offset
,
742 char *addr
, unsigned long count
)
750 /* The cast to a user pointer is valid due to the set_fs() */
751 result
= vfs_read(file
, (void __user
*)addr
, count
, &pos
);
756 EXPORT_SYMBOL(kernel_read
);
758 static int exec_mmap(struct mm_struct
*mm
)
760 struct task_struct
*tsk
;
761 struct mm_struct
* old_mm
, *active_mm
;
763 /* Notify parent that we're no longer interested in the old VM */
765 old_mm
= current
->mm
;
766 sync_mm_rss(tsk
, old_mm
);
767 mm_release(tsk
, old_mm
);
771 * Make sure that if there is a core dump in progress
772 * for the old mm, we get out and die instead of going
773 * through with the exec. We must hold mmap_sem around
774 * checking core_state and changing tsk->mm.
776 down_read(&old_mm
->mmap_sem
);
777 if (unlikely(old_mm
->core_state
)) {
778 up_read(&old_mm
->mmap_sem
);
783 active_mm
= tsk
->active_mm
;
786 activate_mm(active_mm
, mm
);
788 arch_pick_mmap_layout(mm
);
790 up_read(&old_mm
->mmap_sem
);
791 BUG_ON(active_mm
!= old_mm
);
792 mm_update_next_owner(old_mm
);
801 * This function makes sure the current process has its own signal table,
802 * so that flush_signal_handlers can later reset the handlers without
803 * disturbing other processes. (Other processes might share the signal
804 * table via the CLONE_SIGHAND option to clone().)
806 static int de_thread(struct task_struct
*tsk
)
808 struct signal_struct
*sig
= tsk
->signal
;
809 struct sighand_struct
*oldsighand
= tsk
->sighand
;
810 spinlock_t
*lock
= &oldsighand
->siglock
;
812 if (thread_group_empty(tsk
))
813 goto no_thread_group
;
816 * Kill all other threads in the thread group.
819 if (signal_group_exit(sig
)) {
821 * Another group action in progress, just
822 * return so that the signal is processed.
824 spin_unlock_irq(lock
);
828 sig
->group_exit_task
= tsk
;
829 sig
->notify_count
= zap_other_threads(tsk
);
830 if (!thread_group_leader(tsk
))
833 while (sig
->notify_count
) {
834 __set_current_state(TASK_UNINTERRUPTIBLE
);
835 spin_unlock_irq(lock
);
839 spin_unlock_irq(lock
);
842 * At this point all other threads have exited, all we have to
843 * do is to wait for the thread group leader to become inactive,
844 * and to assume its PID:
846 if (!thread_group_leader(tsk
)) {
847 struct task_struct
*leader
= tsk
->group_leader
;
849 sig
->notify_count
= -1; /* for exit_notify() */
851 write_lock_irq(&tasklist_lock
);
852 if (likely(leader
->exit_state
))
854 __set_current_state(TASK_UNINTERRUPTIBLE
);
855 write_unlock_irq(&tasklist_lock
);
860 * The only record we have of the real-time age of a
861 * process, regardless of execs it's done, is start_time.
862 * All the past CPU time is accumulated in signal_struct
863 * from sister threads now dead. But in this non-leader
864 * exec, nothing survives from the original leader thread,
865 * whose birth marks the true age of this process now.
866 * When we take on its identity by switching to its PID, we
867 * also take its birthdate (always earlier than our own).
869 tsk
->start_time
= leader
->start_time
;
871 BUG_ON(!same_thread_group(leader
, tsk
));
872 BUG_ON(has_group_leader_pid(tsk
));
874 * An exec() starts a new thread group with the
875 * TGID of the previous thread group. Rehash the
876 * two threads with a switched PID, and release
877 * the former thread group leader:
880 /* Become a process group leader with the old leader's pid.
881 * The old leader becomes a thread of the this thread group.
882 * Note: The old leader also uses this pid until release_task
883 * is called. Odd but simple and correct.
885 detach_pid(tsk
, PIDTYPE_PID
);
886 tsk
->pid
= leader
->pid
;
887 attach_pid(tsk
, PIDTYPE_PID
, task_pid(leader
));
888 transfer_pid(leader
, tsk
, PIDTYPE_PGID
);
889 transfer_pid(leader
, tsk
, PIDTYPE_SID
);
891 list_replace_rcu(&leader
->tasks
, &tsk
->tasks
);
892 list_replace_init(&leader
->sibling
, &tsk
->sibling
);
894 tsk
->group_leader
= tsk
;
895 leader
->group_leader
= tsk
;
897 tsk
->exit_signal
= SIGCHLD
;
899 BUG_ON(leader
->exit_state
!= EXIT_ZOMBIE
);
900 leader
->exit_state
= EXIT_DEAD
;
901 write_unlock_irq(&tasklist_lock
);
903 release_task(leader
);
906 sig
->group_exit_task
= NULL
;
907 sig
->notify_count
= 0;
911 setmax_mm_hiwater_rss(&sig
->maxrss
, current
->mm
);
914 flush_itimer_signals();
916 if (atomic_read(&oldsighand
->count
) != 1) {
917 struct sighand_struct
*newsighand
;
919 * This ->sighand is shared with the CLONE_SIGHAND
920 * but not CLONE_THREAD task, switch to the new one.
922 newsighand
= kmem_cache_alloc(sighand_cachep
, GFP_KERNEL
);
926 atomic_set(&newsighand
->count
, 1);
927 memcpy(newsighand
->action
, oldsighand
->action
,
928 sizeof(newsighand
->action
));
930 write_lock_irq(&tasklist_lock
);
931 spin_lock(&oldsighand
->siglock
);
932 rcu_assign_pointer(tsk
->sighand
, newsighand
);
933 spin_unlock(&oldsighand
->siglock
);
934 write_unlock_irq(&tasklist_lock
);
936 __cleanup_sighand(oldsighand
);
939 BUG_ON(!thread_group_leader(tsk
));
944 * These functions flushes out all traces of the currently running executable
945 * so that a new one can be started
947 static void flush_old_files(struct files_struct
* files
)
952 spin_lock(&files
->file_lock
);
954 unsigned long set
, i
;
958 fdt
= files_fdtable(files
);
959 if (i
>= fdt
->max_fds
)
961 set
= fdt
->close_on_exec
->fds_bits
[j
];
964 fdt
->close_on_exec
->fds_bits
[j
] = 0;
965 spin_unlock(&files
->file_lock
);
966 for ( ; set
; i
++,set
>>= 1) {
971 spin_lock(&files
->file_lock
);
974 spin_unlock(&files
->file_lock
);
977 char *get_task_comm(char *buf
, struct task_struct
*tsk
)
979 /* buf must be at least sizeof(tsk->comm) in size */
981 strncpy(buf
, tsk
->comm
, sizeof(tsk
->comm
));
986 void set_task_comm(struct task_struct
*tsk
, char *buf
)
991 * Threads may access current->comm without holding
992 * the task lock, so write the string carefully.
993 * Readers without a lock may see incomplete new
994 * names but are safe from non-terminating string reads.
996 memset(tsk
->comm
, 0, TASK_COMM_LEN
);
998 strlcpy(tsk
->comm
, buf
, sizeof(tsk
->comm
));
1000 perf_event_comm(tsk
);
1003 int flush_old_exec(struct linux_binprm
* bprm
)
1008 * Make sure we have a private signal table and that
1009 * we are unassociated from the previous thread group.
1011 retval
= de_thread(current
);
1015 set_mm_exe_file(bprm
->mm
, bprm
->file
);
1018 * Release all of the old mmap stuff
1020 acct_arg_size(bprm
, 0);
1021 retval
= exec_mmap(bprm
->mm
);
1025 bprm
->mm
= NULL
; /* We're using it now */
1027 current
->flags
&= ~PF_RANDOMIZE
;
1029 current
->personality
&= ~bprm
->per_clear
;
1036 EXPORT_SYMBOL(flush_old_exec
);
1038 void setup_new_exec(struct linux_binprm
* bprm
)
1042 char tcomm
[sizeof(current
->comm
)];
1044 arch_pick_mmap_layout(current
->mm
);
1046 /* This is the point of no return */
1047 current
->sas_ss_sp
= current
->sas_ss_size
= 0;
1049 if (current_euid() == current_uid() && current_egid() == current_gid())
1050 set_dumpable(current
->mm
, 1);
1052 set_dumpable(current
->mm
, suid_dumpable
);
1054 name
= bprm
->filename
;
1056 /* Copies the binary name from after last slash */
1057 for (i
=0; (ch
= *(name
++)) != '\0';) {
1059 i
= 0; /* overwrite what we wrote */
1061 if (i
< (sizeof(tcomm
) - 1))
1065 set_task_comm(current
, tcomm
);
1067 /* Set the new mm task size. We have to do that late because it may
1068 * depend on TIF_32BIT which is only updated in flush_thread() on
1069 * some architectures like powerpc
1071 current
->mm
->task_size
= TASK_SIZE
;
1073 /* install the new credentials */
1074 if (bprm
->cred
->uid
!= current_euid() ||
1075 bprm
->cred
->gid
!= current_egid()) {
1076 current
->pdeath_signal
= 0;
1077 } else if (file_permission(bprm
->file
, MAY_READ
) ||
1078 bprm
->interp_flags
& BINPRM_FLAGS_ENFORCE_NONDUMP
) {
1079 set_dumpable(current
->mm
, suid_dumpable
);
1083 * Flush performance counters when crossing a
1086 if (!get_dumpable(current
->mm
))
1087 perf_event_exit_task(current
);
1089 /* An exec changes our domain. We are no longer part of the thread
1092 current
->self_exec_id
++;
1094 flush_signal_handlers(current
, 0);
1095 flush_old_files(current
->files
);
1097 EXPORT_SYMBOL(setup_new_exec
);
1100 * Prepare credentials and lock ->cred_guard_mutex.
1101 * install_exec_creds() commits the new creds and drops the lock.
1102 * Or, if exec fails before, free_bprm() should release ->cred and
1105 int prepare_bprm_creds(struct linux_binprm
*bprm
)
1107 if (mutex_lock_interruptible(¤t
->cred_guard_mutex
))
1108 return -ERESTARTNOINTR
;
1110 bprm
->cred
= prepare_exec_creds();
1111 if (likely(bprm
->cred
))
1114 mutex_unlock(¤t
->cred_guard_mutex
);
1118 void free_bprm(struct linux_binprm
*bprm
)
1120 free_arg_pages(bprm
);
1122 mutex_unlock(¤t
->cred_guard_mutex
);
1123 abort_creds(bprm
->cred
);
1129 * install the new credentials for this executable
1131 void install_exec_creds(struct linux_binprm
*bprm
)
1133 security_bprm_committing_creds(bprm
);
1135 commit_creds(bprm
->cred
);
1138 * cred_guard_mutex must be held at least to this point to prevent
1139 * ptrace_attach() from altering our determination of the task's
1140 * credentials; any time after this it may be unlocked.
1142 security_bprm_committed_creds(bprm
);
1143 mutex_unlock(¤t
->cred_guard_mutex
);
1145 EXPORT_SYMBOL(install_exec_creds
);
1148 * determine how safe it is to execute the proposed program
1149 * - the caller must hold current->cred_guard_mutex to protect against
1152 int check_unsafe_exec(struct linux_binprm
*bprm
)
1154 struct task_struct
*p
= current
, *t
;
1158 bprm
->unsafe
= tracehook_unsafe_exec(p
);
1161 spin_lock(&p
->fs
->lock
);
1163 for (t
= next_thread(p
); t
!= p
; t
= next_thread(t
)) {
1169 if (p
->fs
->users
> n_fs
) {
1170 bprm
->unsafe
|= LSM_UNSAFE_SHARE
;
1173 if (!p
->fs
->in_exec
) {
1178 spin_unlock(&p
->fs
->lock
);
1184 * Fill the binprm structure from the inode.
1185 * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
1187 * This may be called multiple times for binary chains (scripts for example).
1189 int prepare_binprm(struct linux_binprm
*bprm
)
1192 struct inode
* inode
= bprm
->file
->f_path
.dentry
->d_inode
;
1195 mode
= inode
->i_mode
;
1196 if (bprm
->file
->f_op
== NULL
)
1199 /* clear any previous set[ug]id data from a previous binary */
1200 bprm
->cred
->euid
= current_euid();
1201 bprm
->cred
->egid
= current_egid();
1203 if (!(bprm
->file
->f_path
.mnt
->mnt_flags
& MNT_NOSUID
)) {
1205 if (mode
& S_ISUID
) {
1206 bprm
->per_clear
|= PER_CLEAR_ON_SETID
;
1207 bprm
->cred
->euid
= inode
->i_uid
;
1212 * If setgid is set but no group execute bit then this
1213 * is a candidate for mandatory locking, not a setgid
1216 if ((mode
& (S_ISGID
| S_IXGRP
)) == (S_ISGID
| S_IXGRP
)) {
1217 bprm
->per_clear
|= PER_CLEAR_ON_SETID
;
1218 bprm
->cred
->egid
= inode
->i_gid
;
1222 /* fill in binprm security blob */
1223 retval
= security_bprm_set_creds(bprm
);
1226 bprm
->cred_prepared
= 1;
1228 memset(bprm
->buf
, 0, BINPRM_BUF_SIZE
);
1229 return kernel_read(bprm
->file
, 0, bprm
->buf
, BINPRM_BUF_SIZE
);
1232 EXPORT_SYMBOL(prepare_binprm
);
1235 * Arguments are '\0' separated strings found at the location bprm->p
1236 * points to; chop off the first by relocating brpm->p to right after
1237 * the first '\0' encountered.
1239 int remove_arg_zero(struct linux_binprm
*bprm
)
1242 unsigned long offset
;
1250 offset
= bprm
->p
& ~PAGE_MASK
;
1251 page
= get_arg_page(bprm
, bprm
->p
, 0);
1256 kaddr
= kmap_atomic(page
, KM_USER0
);
1258 for (; offset
< PAGE_SIZE
&& kaddr
[offset
];
1259 offset
++, bprm
->p
++)
1262 kunmap_atomic(kaddr
, KM_USER0
);
1265 if (offset
== PAGE_SIZE
)
1266 free_arg_page(bprm
, (bprm
->p
>> PAGE_SHIFT
) - 1);
1267 } while (offset
== PAGE_SIZE
);
1276 EXPORT_SYMBOL(remove_arg_zero
);
1279 * cycle the list of binary formats handler, until one recognizes the image
1281 int search_binary_handler(struct linux_binprm
*bprm
,struct pt_regs
*regs
)
1283 unsigned int depth
= bprm
->recursion_depth
;
1285 struct linux_binfmt
*fmt
;
1287 retval
= security_bprm_check(bprm
);
1291 /* kernel module loader fixup */
1292 /* so we don't try to load run modprobe in kernel space. */
1295 retval
= audit_bprm(bprm
);
1300 for (try=0; try<2; try++) {
1301 read_lock(&binfmt_lock
);
1302 list_for_each_entry(fmt
, &formats
, lh
) {
1303 int (*fn
)(struct linux_binprm
*, struct pt_regs
*) = fmt
->load_binary
;
1306 if (!try_module_get(fmt
->module
))
1308 read_unlock(&binfmt_lock
);
1309 retval
= fn(bprm
, regs
);
1311 * Restore the depth counter to its starting value
1312 * in this call, so we don't have to rely on every
1313 * load_binary function to restore it on return.
1315 bprm
->recursion_depth
= depth
;
1318 tracehook_report_exec(fmt
, bprm
, regs
);
1320 allow_write_access(bprm
->file
);
1324 current
->did_exec
= 1;
1325 proc_exec_connector(current
);
1328 read_lock(&binfmt_lock
);
1330 if (retval
!= -ENOEXEC
|| bprm
->mm
== NULL
)
1333 read_unlock(&binfmt_lock
);
1337 read_unlock(&binfmt_lock
);
1338 if (retval
!= -ENOEXEC
|| bprm
->mm
== NULL
) {
1340 #ifdef CONFIG_MODULES
1342 #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1343 if (printable(bprm
->buf
[0]) &&
1344 printable(bprm
->buf
[1]) &&
1345 printable(bprm
->buf
[2]) &&
1346 printable(bprm
->buf
[3]))
1347 break; /* -ENOEXEC */
1348 request_module("binfmt-%04x", *(unsigned short *)(&bprm
->buf
[2]));
1355 EXPORT_SYMBOL(search_binary_handler
);
1358 * sys_execve() executes a new program.
1360 int do_execve(const char * filename
,
1361 const char __user
*const __user
*argv
,
1362 const char __user
*const __user
*envp
,
1363 struct pt_regs
* regs
)
1365 struct linux_binprm
*bprm
;
1367 struct files_struct
*displaced
;
1371 retval
= unshare_files(&displaced
);
1376 bprm
= kzalloc(sizeof(*bprm
), GFP_KERNEL
);
1380 retval
= prepare_bprm_creds(bprm
);
1384 retval
= check_unsafe_exec(bprm
);
1387 clear_in_exec
= retval
;
1388 current
->in_execve
= 1;
1390 file
= open_exec(filename
);
1391 retval
= PTR_ERR(file
);
1398 bprm
->filename
= filename
;
1399 bprm
->interp
= filename
;
1401 retval
= bprm_mm_init(bprm
);
1405 bprm
->argc
= count(argv
, MAX_ARG_STRINGS
);
1406 if ((retval
= bprm
->argc
) < 0)
1409 bprm
->envc
= count(envp
, MAX_ARG_STRINGS
);
1410 if ((retval
= bprm
->envc
) < 0)
1413 retval
= prepare_binprm(bprm
);
1417 retval
= copy_strings_kernel(1, &bprm
->filename
, bprm
);
1421 bprm
->exec
= bprm
->p
;
1422 retval
= copy_strings(bprm
->envc
, envp
, bprm
);
1426 retval
= copy_strings(bprm
->argc
, argv
, bprm
);
1430 current
->flags
&= ~PF_KTHREAD
;
1431 retval
= search_binary_handler(bprm
,regs
);
1435 /* execve succeeded */
1436 current
->fs
->in_exec
= 0;
1437 current
->in_execve
= 0;
1438 acct_update_integrals(current
);
1441 put_files_struct(displaced
);
1446 acct_arg_size(bprm
, 0);
1452 allow_write_access(bprm
->file
);
1458 current
->fs
->in_exec
= 0;
1459 current
->in_execve
= 0;
1466 reset_files_struct(displaced
);
1471 void set_binfmt(struct linux_binfmt
*new)
1473 struct mm_struct
*mm
= current
->mm
;
1476 module_put(mm
->binfmt
->module
);
1480 __module_get(new->module
);
1483 EXPORT_SYMBOL(set_binfmt
);
1485 /* format_corename will inspect the pattern parameter, and output a
1486 * name into corename, which must have space for at least
1487 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
1489 static int format_corename(char *corename
, long signr
)
1491 const struct cred
*cred
= current_cred();
1492 const char *pat_ptr
= core_pattern
;
1493 int ispipe
= (*pat_ptr
== '|');
1494 char *out_ptr
= corename
;
1495 char *const out_end
= corename
+ CORENAME_MAX_SIZE
;
1497 int pid_in_pattern
= 0;
1499 /* Repeat as long as we have more pattern to process and more output
1502 if (*pat_ptr
!= '%') {
1503 if (out_ptr
== out_end
)
1505 *out_ptr
++ = *pat_ptr
++;
1507 switch (*++pat_ptr
) {
1510 /* Double percent, output one percent */
1512 if (out_ptr
== out_end
)
1519 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1520 "%d", task_tgid_vnr(current
));
1521 if (rc
> out_end
- out_ptr
)
1527 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1529 if (rc
> out_end
- out_ptr
)
1535 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1537 if (rc
> out_end
- out_ptr
)
1541 /* signal that caused the coredump */
1543 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1545 if (rc
> out_end
- out_ptr
)
1549 /* UNIX time of coredump */
1552 do_gettimeofday(&tv
);
1553 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1555 if (rc
> out_end
- out_ptr
)
1562 down_read(&uts_sem
);
1563 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1564 "%s", utsname()->nodename
);
1566 if (rc
> out_end
- out_ptr
)
1572 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1573 "%s", current
->comm
);
1574 if (rc
> out_end
- out_ptr
)
1578 /* core limit size */
1580 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1581 "%lu", rlimit(RLIMIT_CORE
));
1582 if (rc
> out_end
- out_ptr
)
1592 /* Backward compatibility with core_uses_pid:
1594 * If core_pattern does not include a %p (as is the default)
1595 * and core_uses_pid is set, then .%pid will be appended to
1596 * the filename. Do not do this for piped commands. */
1597 if (!ispipe
&& !pid_in_pattern
&& core_uses_pid
) {
1598 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1599 ".%d", task_tgid_vnr(current
));
1600 if (rc
> out_end
- out_ptr
)
1609 static int zap_process(struct task_struct
*start
, int exit_code
)
1611 struct task_struct
*t
;
1614 start
->signal
->flags
= SIGNAL_GROUP_EXIT
;
1615 start
->signal
->group_exit_code
= exit_code
;
1616 start
->signal
->group_stop_count
= 0;
1620 if (t
!= current
&& t
->mm
) {
1621 sigaddset(&t
->pending
.signal
, SIGKILL
);
1622 signal_wake_up(t
, 1);
1625 } while_each_thread(start
, t
);
1630 static inline int zap_threads(struct task_struct
*tsk
, struct mm_struct
*mm
,
1631 struct core_state
*core_state
, int exit_code
)
1633 struct task_struct
*g
, *p
;
1634 unsigned long flags
;
1637 spin_lock_irq(&tsk
->sighand
->siglock
);
1638 if (!signal_group_exit(tsk
->signal
)) {
1639 mm
->core_state
= core_state
;
1640 nr
= zap_process(tsk
, exit_code
);
1642 spin_unlock_irq(&tsk
->sighand
->siglock
);
1643 if (unlikely(nr
< 0))
1646 if (atomic_read(&mm
->mm_users
) == nr
+ 1)
1649 * We should find and kill all tasks which use this mm, and we should
1650 * count them correctly into ->nr_threads. We don't take tasklist
1651 * lock, but this is safe wrt:
1654 * None of sub-threads can fork after zap_process(leader). All
1655 * processes which were created before this point should be
1656 * visible to zap_threads() because copy_process() adds the new
1657 * process to the tail of init_task.tasks list, and lock/unlock
1658 * of ->siglock provides a memory barrier.
1661 * The caller holds mm->mmap_sem. This means that the task which
1662 * uses this mm can't pass exit_mm(), so it can't exit or clear
1666 * It does list_replace_rcu(&leader->tasks, ¤t->tasks),
1667 * we must see either old or new leader, this does not matter.
1668 * However, it can change p->sighand, so lock_task_sighand(p)
1669 * must be used. Since p->mm != NULL and we hold ->mmap_sem
1672 * Note also that "g" can be the old leader with ->mm == NULL
1673 * and already unhashed and thus removed from ->thread_group.
1674 * This is OK, __unhash_process()->list_del_rcu() does not
1675 * clear the ->next pointer, we will find the new leader via
1679 for_each_process(g
) {
1680 if (g
== tsk
->group_leader
)
1682 if (g
->flags
& PF_KTHREAD
)
1687 if (unlikely(p
->mm
== mm
)) {
1688 lock_task_sighand(p
, &flags
);
1689 nr
+= zap_process(p
, exit_code
);
1690 unlock_task_sighand(p
, &flags
);
1694 } while_each_thread(g
, p
);
1698 atomic_set(&core_state
->nr_threads
, nr
);
1702 static int coredump_wait(int exit_code
, struct core_state
*core_state
)
1704 struct task_struct
*tsk
= current
;
1705 struct mm_struct
*mm
= tsk
->mm
;
1706 struct completion
*vfork_done
;
1707 int core_waiters
= -EBUSY
;
1709 init_completion(&core_state
->startup
);
1710 core_state
->dumper
.task
= tsk
;
1711 core_state
->dumper
.next
= NULL
;
1713 down_write(&mm
->mmap_sem
);
1714 if (!mm
->core_state
)
1715 core_waiters
= zap_threads(tsk
, mm
, core_state
, exit_code
);
1716 up_write(&mm
->mmap_sem
);
1718 if (unlikely(core_waiters
< 0))
1722 * Make sure nobody is waiting for us to release the VM,
1723 * otherwise we can deadlock when we wait on each other
1725 vfork_done
= tsk
->vfork_done
;
1727 tsk
->vfork_done
= NULL
;
1728 complete(vfork_done
);
1732 wait_for_completion(&core_state
->startup
);
1734 return core_waiters
;
1737 static void coredump_finish(struct mm_struct
*mm
)
1739 struct core_thread
*curr
, *next
;
1740 struct task_struct
*task
;
1742 next
= mm
->core_state
->dumper
.next
;
1743 while ((curr
= next
) != NULL
) {
1747 * see exit_mm(), curr->task must not see
1748 * ->task == NULL before we read ->next.
1752 wake_up_process(task
);
1755 mm
->core_state
= NULL
;
1759 * set_dumpable converts traditional three-value dumpable to two flags and
1760 * stores them into mm->flags. It modifies lower two bits of mm->flags, but
1761 * these bits are not changed atomically. So get_dumpable can observe the
1762 * intermediate state. To avoid doing unexpected behavior, get get_dumpable
1763 * return either old dumpable or new one by paying attention to the order of
1764 * modifying the bits.
1766 * dumpable | mm->flags (binary)
1767 * old new | initial interim final
1768 * ---------+-----------------------
1776 * (*) get_dumpable regards interim value of 10 as 11.
1778 void set_dumpable(struct mm_struct
*mm
, int value
)
1782 clear_bit(MMF_DUMPABLE
, &mm
->flags
);
1784 clear_bit(MMF_DUMP_SECURELY
, &mm
->flags
);
1787 set_bit(MMF_DUMPABLE
, &mm
->flags
);
1789 clear_bit(MMF_DUMP_SECURELY
, &mm
->flags
);
1792 set_bit(MMF_DUMP_SECURELY
, &mm
->flags
);
1794 set_bit(MMF_DUMPABLE
, &mm
->flags
);
1799 static int __get_dumpable(unsigned long mm_flags
)
1803 ret
= mm_flags
& MMF_DUMPABLE_MASK
;
1804 return (ret
>= 2) ? 2 : ret
;
1807 int get_dumpable(struct mm_struct
*mm
)
1809 return __get_dumpable(mm
->flags
);
1812 static void wait_for_dump_helpers(struct file
*file
)
1814 struct pipe_inode_info
*pipe
;
1816 pipe
= file
->f_path
.dentry
->d_inode
->i_pipe
;
1822 while ((pipe
->readers
> 1) && (!signal_pending(current
))) {
1823 wake_up_interruptible_sync(&pipe
->wait
);
1824 kill_fasync(&pipe
->fasync_readers
, SIGIO
, POLL_IN
);
1837 * helper function to customize the process used
1838 * to collect the core in userspace. Specifically
1839 * it sets up a pipe and installs it as fd 0 (stdin)
1840 * for the process. Returns 0 on success, or
1841 * PTR_ERR on failure.
1842 * Note that it also sets the core limit to 1. This
1843 * is a special value that we use to trap recursive
1846 static int umh_pipe_setup(struct subprocess_info
*info
)
1848 struct file
*rp
, *wp
;
1849 struct fdtable
*fdt
;
1850 struct coredump_params
*cp
= (struct coredump_params
*)info
->data
;
1851 struct files_struct
*cf
= current
->files
;
1853 wp
= create_write_pipe(0);
1857 rp
= create_read_pipe(wp
, 0);
1859 free_write_pipe(wp
);
1867 spin_lock(&cf
->file_lock
);
1868 fdt
= files_fdtable(cf
);
1869 FD_SET(0, fdt
->open_fds
);
1870 FD_CLR(0, fdt
->close_on_exec
);
1871 spin_unlock(&cf
->file_lock
);
1873 /* and disallow core files too */
1874 current
->signal
->rlim
[RLIMIT_CORE
] = (struct rlimit
){1, 1};
1879 void do_coredump(long signr
, int exit_code
, struct pt_regs
*regs
)
1881 struct core_state core_state
;
1882 char corename
[CORENAME_MAX_SIZE
+ 1];
1883 struct mm_struct
*mm
= current
->mm
;
1884 struct linux_binfmt
* binfmt
;
1885 const struct cred
*old_cred
;
1890 static atomic_t core_dump_count
= ATOMIC_INIT(0);
1891 struct coredump_params cprm
= {
1894 .limit
= rlimit(RLIMIT_CORE
),
1896 * We must use the same mm->flags while dumping core to avoid
1897 * inconsistency of bit flags, since this flag is not protected
1900 .mm_flags
= mm
->flags
,
1903 audit_core_dumps(signr
);
1905 binfmt
= mm
->binfmt
;
1906 if (!binfmt
|| !binfmt
->core_dump
)
1908 if (!__get_dumpable(cprm
.mm_flags
))
1911 cred
= prepare_creds();
1915 * We cannot trust fsuid as being the "true" uid of the
1916 * process nor do we know its entire history. We only know it
1917 * was tainted so we dump it as root in mode 2.
1919 if (__get_dumpable(cprm
.mm_flags
) == 2) {
1920 /* Setuid core dump mode */
1921 flag
= O_EXCL
; /* Stop rewrite attacks */
1922 cred
->fsuid
= 0; /* Dump root private */
1925 retval
= coredump_wait(exit_code
, &core_state
);
1929 old_cred
= override_creds(cred
);
1932 * Clear any false indication of pending signals that might
1933 * be seen by the filesystem code called to write the core file.
1935 clear_thread_flag(TIF_SIGPENDING
);
1937 ispipe
= format_corename(corename
, signr
);
1943 if (cprm
.limit
== 1) {
1945 * Normally core limits are irrelevant to pipes, since
1946 * we're not writing to the file system, but we use
1947 * cprm.limit of 1 here as a speacial value. Any
1948 * non-1 limit gets set to RLIM_INFINITY below, but
1949 * a limit of 0 skips the dump. This is a consistent
1950 * way to catch recursive crashes. We can still crash
1951 * if the core_pattern binary sets RLIM_CORE = !1
1952 * but it runs as root, and can do lots of stupid things
1953 * Note that we use task_tgid_vnr here to grab the pid
1954 * of the process group leader. That way we get the
1955 * right pid if a thread in a multi-threaded
1956 * core_pattern process dies.
1959 "Process %d(%s) has RLIMIT_CORE set to 1\n",
1960 task_tgid_vnr(current
), current
->comm
);
1961 printk(KERN_WARNING
"Aborting core\n");
1964 cprm
.limit
= RLIM_INFINITY
;
1966 dump_count
= atomic_inc_return(&core_dump_count
);
1967 if (core_pipe_limit
&& (core_pipe_limit
< dump_count
)) {
1968 printk(KERN_WARNING
"Pid %d(%s) over core_pipe_limit\n",
1969 task_tgid_vnr(current
), current
->comm
);
1970 printk(KERN_WARNING
"Skipping core dump\n");
1971 goto fail_dropcount
;
1974 helper_argv
= argv_split(GFP_KERNEL
, corename
+1, NULL
);
1976 printk(KERN_WARNING
"%s failed to allocate memory\n",
1978 goto fail_dropcount
;
1981 retval
= call_usermodehelper_fns(helper_argv
[0], helper_argv
,
1982 NULL
, UMH_WAIT_EXEC
, umh_pipe_setup
,
1984 argv_free(helper_argv
);
1986 printk(KERN_INFO
"Core dump to %s pipe failed\n",
1991 struct inode
*inode
;
1993 if (cprm
.limit
< binfmt
->min_coredump
)
1996 cprm
.file
= filp_open(corename
,
1997 O_CREAT
| 2 | O_NOFOLLOW
| O_LARGEFILE
| flag
,
1999 if (IS_ERR(cprm
.file
))
2002 inode
= cprm
.file
->f_path
.dentry
->d_inode
;
2003 if (inode
->i_nlink
> 1)
2005 if (d_unhashed(cprm
.file
->f_path
.dentry
))
2008 * AK: actually i see no reason to not allow this for named
2009 * pipes etc, but keep the previous behaviour for now.
2011 if (!S_ISREG(inode
->i_mode
))
2014 * Dont allow local users get cute and trick others to coredump
2015 * into their pre-created files.
2017 if (inode
->i_uid
!= current_fsuid())
2019 if (!cprm
.file
->f_op
|| !cprm
.file
->f_op
->write
)
2021 if (do_truncate(cprm
.file
->f_path
.dentry
, 0, 0, cprm
.file
))
2025 retval
= binfmt
->core_dump(&cprm
);
2027 current
->signal
->group_exit_code
|= 0x80;
2029 if (ispipe
&& core_pipe_limit
)
2030 wait_for_dump_helpers(cprm
.file
);
2033 filp_close(cprm
.file
, NULL
);
2036 atomic_dec(&core_dump_count
);
2038 coredump_finish(mm
);
2039 revert_creds(old_cred
);
2047 * Core dumping helper functions. These are the only things you should
2048 * do on a core-file: use only these functions to write out all the
2051 int dump_write(struct file
*file
, const void *addr
, int nr
)
2053 return access_ok(VERIFY_READ
, addr
, nr
) && file
->f_op
->write(file
, addr
, nr
, &file
->f_pos
) == nr
;
2055 EXPORT_SYMBOL(dump_write
);
2057 int dump_seek(struct file
*file
, loff_t off
)
2061 if (file
->f_op
->llseek
&& file
->f_op
->llseek
!= no_llseek
) {
2062 if (file
->f_op
->llseek(file
, off
, SEEK_CUR
) < 0)
2065 char *buf
= (char *)get_zeroed_page(GFP_KERNEL
);
2070 unsigned long n
= off
;
2074 if (!dump_write(file
, buf
, n
)) {
2080 free_page((unsigned long)buf
);
2084 EXPORT_SYMBOL(dump_seek
);