4 * Copyright (C) 1991, 1992 Linus Torvalds
8 * #!-checking implemented by tytso.
11 * Demand-loading implemented 01.12.91 - no need to read anything but
12 * the header into memory. The inode of the executable is put into
13 * "current->executable", and page faults do the actual loading. Clean.
15 * Once more I can proudly say that linux stood up to being changed: it
16 * was less than 2 hours work to get demand-loading completely implemented.
18 * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead,
19 * current->executable is only used by the procfs. This allows a dispatch
20 * table to check for several different types of binary formats. We keep
21 * trying until we recognize the file or we run out of supported binary
25 #include <linux/slab.h>
26 #include <linux/file.h>
27 #include <linux/fdtable.h>
29 #include <linux/stat.h>
30 #include <linux/fcntl.h>
31 #include <linux/swap.h>
32 #include <linux/string.h>
33 #include <linux/init.h>
34 #include <linux/pagemap.h>
35 #include <linux/perf_event.h>
36 #include <linux/highmem.h>
37 #include <linux/spinlock.h>
38 #include <linux/key.h>
39 #include <linux/personality.h>
40 #include <linux/binfmts.h>
41 #include <linux/utsname.h>
42 #include <linux/pid_namespace.h>
43 #include <linux/module.h>
44 #include <linux/namei.h>
45 #include <linux/proc_fs.h>
46 #include <linux/mount.h>
47 #include <linux/security.h>
48 #include <linux/syscalls.h>
49 #include <linux/tsacct_kern.h>
50 #include <linux/cn_proc.h>
51 #include <linux/audit.h>
52 #include <linux/tracehook.h>
53 #include <linux/kmod.h>
54 #include <linux/fsnotify.h>
55 #include <linux/fs_struct.h>
56 #include <linux/pipe_fs_i.h>
58 #include <asm/uaccess.h>
59 #include <asm/mmu_context.h>
64 char core_pattern
[CORENAME_MAX_SIZE
] = "core";
65 unsigned int core_pipe_limit
;
66 int suid_dumpable
= 0;
68 /* The maximal length of core_pattern is also specified in sysctl.c */
70 static LIST_HEAD(formats
);
71 static DEFINE_RWLOCK(binfmt_lock
);
73 int __register_binfmt(struct linux_binfmt
* fmt
, int insert
)
77 write_lock(&binfmt_lock
);
78 insert
? list_add(&fmt
->lh
, &formats
) :
79 list_add_tail(&fmt
->lh
, &formats
);
80 write_unlock(&binfmt_lock
);
84 EXPORT_SYMBOL(__register_binfmt
);
86 void unregister_binfmt(struct linux_binfmt
* fmt
)
88 write_lock(&binfmt_lock
);
90 write_unlock(&binfmt_lock
);
93 EXPORT_SYMBOL(unregister_binfmt
);
95 static inline void put_binfmt(struct linux_binfmt
* fmt
)
97 module_put(fmt
->module
);
101 * Note that a shared library must be both readable and executable due to
104 * Also note that we take the address to load from from the file itself.
106 SYSCALL_DEFINE1(uselib
, const char __user
*, library
)
109 char *tmp
= getname(library
);
110 int error
= PTR_ERR(tmp
);
115 file
= do_filp_open(AT_FDCWD
, tmp
,
116 O_LARGEFILE
| O_RDONLY
| FMODE_EXEC
, 0,
117 MAY_READ
| MAY_EXEC
| MAY_OPEN
);
119 error
= PTR_ERR(file
);
124 if (!S_ISREG(file
->f_path
.dentry
->d_inode
->i_mode
))
128 if (file
->f_path
.mnt
->mnt_flags
& MNT_NOEXEC
)
135 struct linux_binfmt
* fmt
;
137 read_lock(&binfmt_lock
);
138 list_for_each_entry(fmt
, &formats
, lh
) {
139 if (!fmt
->load_shlib
)
141 if (!try_module_get(fmt
->module
))
143 read_unlock(&binfmt_lock
);
144 error
= fmt
->load_shlib(file
);
145 read_lock(&binfmt_lock
);
147 if (error
!= -ENOEXEC
)
150 read_unlock(&binfmt_lock
);
160 static struct page
*get_arg_page(struct linux_binprm
*bprm
, unsigned long pos
,
166 #ifdef CONFIG_STACK_GROWSUP
168 ret
= expand_stack_downwards(bprm
->vma
, pos
);
173 ret
= get_user_pages(current
, bprm
->mm
, pos
,
174 1, write
, 1, &page
, NULL
);
179 unsigned long size
= bprm
->vma
->vm_end
- bprm
->vma
->vm_start
;
183 * We've historically supported up to 32 pages (ARG_MAX)
184 * of argument strings even with small stacks
190 * Limit to 1/4-th the stack size for the argv+env strings.
192 * - the remaining binfmt code will not run out of stack space,
193 * - the program will have a reasonable amount of stack left
196 rlim
= current
->signal
->rlim
;
197 if (size
> ACCESS_ONCE(rlim
[RLIMIT_STACK
].rlim_cur
) / 4) {
206 static void put_arg_page(struct page
*page
)
211 static void free_arg_page(struct linux_binprm
*bprm
, int i
)
215 static void free_arg_pages(struct linux_binprm
*bprm
)
219 static void flush_arg_page(struct linux_binprm
*bprm
, unsigned long pos
,
222 flush_cache_page(bprm
->vma
, pos
, page_to_pfn(page
));
225 static int __bprm_mm_init(struct linux_binprm
*bprm
)
228 struct vm_area_struct
*vma
= NULL
;
229 struct mm_struct
*mm
= bprm
->mm
;
231 bprm
->vma
= vma
= kmem_cache_zalloc(vm_area_cachep
, GFP_KERNEL
);
235 down_write(&mm
->mmap_sem
);
239 * Place the stack at the largest stack address the architecture
240 * supports. Later, we'll move this to an appropriate place. We don't
241 * use STACK_TOP because that can depend on attributes which aren't
244 BUG_ON(VM_STACK_FLAGS
& VM_STACK_INCOMPLETE_SETUP
);
245 vma
->vm_end
= STACK_TOP_MAX
;
246 vma
->vm_start
= vma
->vm_end
- PAGE_SIZE
;
247 vma
->vm_flags
= VM_STACK_FLAGS
| VM_STACK_INCOMPLETE_SETUP
;
248 vma
->vm_page_prot
= vm_get_page_prot(vma
->vm_flags
);
249 INIT_LIST_HEAD(&vma
->anon_vma_chain
);
250 err
= insert_vm_struct(mm
, vma
);
254 mm
->stack_vm
= mm
->total_vm
= 1;
255 up_write(&mm
->mmap_sem
);
256 bprm
->p
= vma
->vm_end
- sizeof(void *);
259 up_write(&mm
->mmap_sem
);
261 kmem_cache_free(vm_area_cachep
, vma
);
265 static bool valid_arg_len(struct linux_binprm
*bprm
, long len
)
267 return len
<= MAX_ARG_STRLEN
;
272 static struct page
*get_arg_page(struct linux_binprm
*bprm
, unsigned long pos
,
277 page
= bprm
->page
[pos
/ PAGE_SIZE
];
278 if (!page
&& write
) {
279 page
= alloc_page(GFP_HIGHUSER
|__GFP_ZERO
);
282 bprm
->page
[pos
/ PAGE_SIZE
] = page
;
288 static void put_arg_page(struct page
*page
)
292 static void free_arg_page(struct linux_binprm
*bprm
, int i
)
295 __free_page(bprm
->page
[i
]);
296 bprm
->page
[i
] = NULL
;
300 static void free_arg_pages(struct linux_binprm
*bprm
)
304 for (i
= 0; i
< MAX_ARG_PAGES
; i
++)
305 free_arg_page(bprm
, i
);
308 static void flush_arg_page(struct linux_binprm
*bprm
, unsigned long pos
,
313 static int __bprm_mm_init(struct linux_binprm
*bprm
)
315 bprm
->p
= PAGE_SIZE
* MAX_ARG_PAGES
- sizeof(void *);
319 static bool valid_arg_len(struct linux_binprm
*bprm
, long len
)
321 return len
<= bprm
->p
;
324 #endif /* CONFIG_MMU */
327 * Create a new mm_struct and populate it with a temporary stack
328 * vm_area_struct. We don't have enough context at this point to set the stack
329 * flags, permissions, and offset, so we use temporary values. We'll update
330 * them later in setup_arg_pages().
332 int bprm_mm_init(struct linux_binprm
*bprm
)
335 struct mm_struct
*mm
= NULL
;
337 bprm
->mm
= mm
= mm_alloc();
342 err
= init_new_context(current
, mm
);
346 err
= __bprm_mm_init(bprm
);
362 * count() counts the number of strings in array ARGV.
364 static int count(const char __user
* const __user
* argv
, int max
)
370 const char __user
* p
;
372 if (get_user(p
, argv
))
386 * 'copy_strings()' copies argument/environment strings from the old
387 * processes's memory to the new process's stack. The call to get_user_pages()
388 * ensures the destination page is created and not swapped out.
390 static int copy_strings(int argc
, const char __user
*const __user
*argv
,
391 struct linux_binprm
*bprm
)
393 struct page
*kmapped_page
= NULL
;
395 unsigned long kpos
= 0;
399 const char __user
*str
;
403 if (get_user(str
, argv
+argc
) ||
404 !(len
= strnlen_user(str
, MAX_ARG_STRLEN
))) {
409 if (!valid_arg_len(bprm
, len
)) {
414 /* We're going to work our way backwords. */
420 int offset
, bytes_to_copy
;
422 offset
= pos
% PAGE_SIZE
;
426 bytes_to_copy
= offset
;
427 if (bytes_to_copy
> len
)
430 offset
-= bytes_to_copy
;
431 pos
-= bytes_to_copy
;
432 str
-= bytes_to_copy
;
433 len
-= bytes_to_copy
;
435 if (!kmapped_page
|| kpos
!= (pos
& PAGE_MASK
)) {
438 page
= get_arg_page(bprm
, pos
, 1);
445 flush_kernel_dcache_page(kmapped_page
);
446 kunmap(kmapped_page
);
447 put_arg_page(kmapped_page
);
450 kaddr
= kmap(kmapped_page
);
451 kpos
= pos
& PAGE_MASK
;
452 flush_arg_page(bprm
, kpos
, kmapped_page
);
454 if (copy_from_user(kaddr
+offset
, str
, bytes_to_copy
)) {
463 flush_kernel_dcache_page(kmapped_page
);
464 kunmap(kmapped_page
);
465 put_arg_page(kmapped_page
);
471 * Like copy_strings, but get argv and its values from kernel memory.
473 int copy_strings_kernel(int argc
, const char *const *argv
,
474 struct linux_binprm
*bprm
)
477 mm_segment_t oldfs
= get_fs();
479 r
= copy_strings(argc
, (const char __user
*const __user
*)argv
, bprm
);
483 EXPORT_SYMBOL(copy_strings_kernel
);
488 * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX. Once
489 * the binfmt code determines where the new stack should reside, we shift it to
490 * its final location. The process proceeds as follows:
492 * 1) Use shift to calculate the new vma endpoints.
493 * 2) Extend vma to cover both the old and new ranges. This ensures the
494 * arguments passed to subsequent functions are consistent.
495 * 3) Move vma's page tables to the new range.
496 * 4) Free up any cleared pgd range.
497 * 5) Shrink the vma to cover only the new range.
499 static int shift_arg_pages(struct vm_area_struct
*vma
, unsigned long shift
)
501 struct mm_struct
*mm
= vma
->vm_mm
;
502 unsigned long old_start
= vma
->vm_start
;
503 unsigned long old_end
= vma
->vm_end
;
504 unsigned long length
= old_end
- old_start
;
505 unsigned long new_start
= old_start
- shift
;
506 unsigned long new_end
= old_end
- shift
;
507 struct mmu_gather
*tlb
;
509 BUG_ON(new_start
> new_end
);
512 * ensure there are no vmas between where we want to go
515 if (vma
!= find_vma(mm
, new_start
))
519 * cover the whole range: [new_start, old_end)
521 if (vma_adjust(vma
, new_start
, old_end
, vma
->vm_pgoff
, NULL
))
525 * move the page tables downwards, on failure we rely on
526 * process cleanup to remove whatever mess we made.
528 if (length
!= move_page_tables(vma
, old_start
,
529 vma
, new_start
, length
))
533 tlb
= tlb_gather_mmu(mm
, 0);
534 if (new_end
> old_start
) {
536 * when the old and new regions overlap clear from new_end.
538 free_pgd_range(tlb
, new_end
, old_end
, new_end
,
539 vma
->vm_next
? vma
->vm_next
->vm_start
: 0);
542 * otherwise, clean from old_start; this is done to not touch
543 * the address space in [new_end, old_start) some architectures
544 * have constraints on va-space that make this illegal (IA64) -
545 * for the others its just a little faster.
547 free_pgd_range(tlb
, old_start
, old_end
, new_end
,
548 vma
->vm_next
? vma
->vm_next
->vm_start
: 0);
550 tlb_finish_mmu(tlb
, new_end
, old_end
);
553 * Shrink the vma to just the new range. Always succeeds.
555 vma_adjust(vma
, new_start
, new_end
, vma
->vm_pgoff
, NULL
);
561 * Finalizes the stack vm_area_struct. The flags and permissions are updated,
562 * the stack is optionally relocated, and some extra space is added.
564 int setup_arg_pages(struct linux_binprm
*bprm
,
565 unsigned long stack_top
,
566 int executable_stack
)
569 unsigned long stack_shift
;
570 struct mm_struct
*mm
= current
->mm
;
571 struct vm_area_struct
*vma
= bprm
->vma
;
572 struct vm_area_struct
*prev
= NULL
;
573 unsigned long vm_flags
;
574 unsigned long stack_base
;
575 unsigned long stack_size
;
576 unsigned long stack_expand
;
577 unsigned long rlim_stack
;
579 #ifdef CONFIG_STACK_GROWSUP
580 /* Limit stack size to 1GB */
581 stack_base
= rlimit_max(RLIMIT_STACK
);
582 if (stack_base
> (1 << 30))
583 stack_base
= 1 << 30;
585 /* Make sure we didn't let the argument array grow too large. */
586 if (vma
->vm_end
- vma
->vm_start
> stack_base
)
589 stack_base
= PAGE_ALIGN(stack_top
- stack_base
);
591 stack_shift
= vma
->vm_start
- stack_base
;
592 mm
->arg_start
= bprm
->p
- stack_shift
;
593 bprm
->p
= vma
->vm_end
- stack_shift
;
595 stack_top
= arch_align_stack(stack_top
);
596 stack_top
= PAGE_ALIGN(stack_top
);
597 stack_shift
= vma
->vm_end
- stack_top
;
599 bprm
->p
-= stack_shift
;
600 mm
->arg_start
= bprm
->p
;
604 bprm
->loader
-= stack_shift
;
605 bprm
->exec
-= stack_shift
;
607 down_write(&mm
->mmap_sem
);
608 vm_flags
= VM_STACK_FLAGS
;
611 * Adjust stack execute permissions; explicitly enable for
612 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
613 * (arch default) otherwise.
615 if (unlikely(executable_stack
== EXSTACK_ENABLE_X
))
617 else if (executable_stack
== EXSTACK_DISABLE_X
)
618 vm_flags
&= ~VM_EXEC
;
619 vm_flags
|= mm
->def_flags
;
620 vm_flags
|= VM_STACK_INCOMPLETE_SETUP
;
622 ret
= mprotect_fixup(vma
, &prev
, vma
->vm_start
, vma
->vm_end
,
628 /* Move stack pages down in memory. */
630 ret
= shift_arg_pages(vma
, stack_shift
);
635 /* mprotect_fixup is overkill to remove the temporary stack flags */
636 vma
->vm_flags
&= ~VM_STACK_INCOMPLETE_SETUP
;
638 stack_expand
= 131072UL; /* randomly 32*4k (or 2*64k) pages */
639 stack_size
= vma
->vm_end
- vma
->vm_start
;
641 * Align this down to a page boundary as expand_stack
644 rlim_stack
= rlimit(RLIMIT_STACK
) & PAGE_MASK
;
645 #ifdef CONFIG_STACK_GROWSUP
646 if (stack_size
+ stack_expand
> rlim_stack
)
647 stack_base
= vma
->vm_start
+ rlim_stack
;
649 stack_base
= vma
->vm_end
+ stack_expand
;
651 if (stack_size
+ stack_expand
> rlim_stack
)
652 stack_base
= vma
->vm_end
- rlim_stack
;
654 stack_base
= vma
->vm_start
- stack_expand
;
656 current
->mm
->start_stack
= bprm
->p
;
657 ret
= expand_stack(vma
, stack_base
);
662 up_write(&mm
->mmap_sem
);
665 EXPORT_SYMBOL(setup_arg_pages
);
667 #endif /* CONFIG_MMU */
669 struct file
*open_exec(const char *name
)
674 file
= do_filp_open(AT_FDCWD
, name
,
675 O_LARGEFILE
| O_RDONLY
| FMODE_EXEC
, 0,
676 MAY_EXEC
| MAY_OPEN
);
681 if (!S_ISREG(file
->f_path
.dentry
->d_inode
->i_mode
))
684 if (file
->f_path
.mnt
->mnt_flags
& MNT_NOEXEC
)
689 err
= deny_write_access(file
);
700 EXPORT_SYMBOL(open_exec
);
702 int kernel_read(struct file
*file
, loff_t offset
,
703 char *addr
, unsigned long count
)
711 /* The cast to a user pointer is valid due to the set_fs() */
712 result
= vfs_read(file
, (void __user
*)addr
, count
, &pos
);
717 EXPORT_SYMBOL(kernel_read
);
719 static int exec_mmap(struct mm_struct
*mm
)
721 struct task_struct
*tsk
;
722 struct mm_struct
* old_mm
, *active_mm
;
724 /* Notify parent that we're no longer interested in the old VM */
726 old_mm
= current
->mm
;
727 sync_mm_rss(tsk
, old_mm
);
728 mm_release(tsk
, old_mm
);
732 * Make sure that if there is a core dump in progress
733 * for the old mm, we get out and die instead of going
734 * through with the exec. We must hold mmap_sem around
735 * checking core_state and changing tsk->mm.
737 down_read(&old_mm
->mmap_sem
);
738 if (unlikely(old_mm
->core_state
)) {
739 up_read(&old_mm
->mmap_sem
);
744 active_mm
= tsk
->active_mm
;
747 activate_mm(active_mm
, mm
);
749 arch_pick_mmap_layout(mm
);
751 up_read(&old_mm
->mmap_sem
);
752 BUG_ON(active_mm
!= old_mm
);
753 mm_update_next_owner(old_mm
);
762 * This function makes sure the current process has its own signal table,
763 * so that flush_signal_handlers can later reset the handlers without
764 * disturbing other processes. (Other processes might share the signal
765 * table via the CLONE_SIGHAND option to clone().)
767 static int de_thread(struct task_struct
*tsk
)
769 struct signal_struct
*sig
= tsk
->signal
;
770 struct sighand_struct
*oldsighand
= tsk
->sighand
;
771 spinlock_t
*lock
= &oldsighand
->siglock
;
773 if (thread_group_empty(tsk
))
774 goto no_thread_group
;
777 * Kill all other threads in the thread group.
780 if (signal_group_exit(sig
)) {
782 * Another group action in progress, just
783 * return so that the signal is processed.
785 spin_unlock_irq(lock
);
789 sig
->group_exit_task
= tsk
;
790 sig
->notify_count
= zap_other_threads(tsk
);
791 if (!thread_group_leader(tsk
))
794 while (sig
->notify_count
) {
795 __set_current_state(TASK_UNINTERRUPTIBLE
);
796 spin_unlock_irq(lock
);
800 spin_unlock_irq(lock
);
803 * At this point all other threads have exited, all we have to
804 * do is to wait for the thread group leader to become inactive,
805 * and to assume its PID:
807 if (!thread_group_leader(tsk
)) {
808 struct task_struct
*leader
= tsk
->group_leader
;
810 sig
->notify_count
= -1; /* for exit_notify() */
812 write_lock_irq(&tasklist_lock
);
813 if (likely(leader
->exit_state
))
815 __set_current_state(TASK_UNINTERRUPTIBLE
);
816 write_unlock_irq(&tasklist_lock
);
821 * The only record we have of the real-time age of a
822 * process, regardless of execs it's done, is start_time.
823 * All the past CPU time is accumulated in signal_struct
824 * from sister threads now dead. But in this non-leader
825 * exec, nothing survives from the original leader thread,
826 * whose birth marks the true age of this process now.
827 * When we take on its identity by switching to its PID, we
828 * also take its birthdate (always earlier than our own).
830 tsk
->start_time
= leader
->start_time
;
832 BUG_ON(!same_thread_group(leader
, tsk
));
833 BUG_ON(has_group_leader_pid(tsk
));
835 * An exec() starts a new thread group with the
836 * TGID of the previous thread group. Rehash the
837 * two threads with a switched PID, and release
838 * the former thread group leader:
841 /* Become a process group leader with the old leader's pid.
842 * The old leader becomes a thread of the this thread group.
843 * Note: The old leader also uses this pid until release_task
844 * is called. Odd but simple and correct.
846 detach_pid(tsk
, PIDTYPE_PID
);
847 tsk
->pid
= leader
->pid
;
848 attach_pid(tsk
, PIDTYPE_PID
, task_pid(leader
));
849 transfer_pid(leader
, tsk
, PIDTYPE_PGID
);
850 transfer_pid(leader
, tsk
, PIDTYPE_SID
);
852 list_replace_rcu(&leader
->tasks
, &tsk
->tasks
);
853 list_replace_init(&leader
->sibling
, &tsk
->sibling
);
855 tsk
->group_leader
= tsk
;
856 leader
->group_leader
= tsk
;
858 tsk
->exit_signal
= SIGCHLD
;
860 BUG_ON(leader
->exit_state
!= EXIT_ZOMBIE
);
861 leader
->exit_state
= EXIT_DEAD
;
862 write_unlock_irq(&tasklist_lock
);
864 release_task(leader
);
867 sig
->group_exit_task
= NULL
;
868 sig
->notify_count
= 0;
872 setmax_mm_hiwater_rss(&sig
->maxrss
, current
->mm
);
875 flush_itimer_signals();
877 if (atomic_read(&oldsighand
->count
) != 1) {
878 struct sighand_struct
*newsighand
;
880 * This ->sighand is shared with the CLONE_SIGHAND
881 * but not CLONE_THREAD task, switch to the new one.
883 newsighand
= kmem_cache_alloc(sighand_cachep
, GFP_KERNEL
);
887 atomic_set(&newsighand
->count
, 1);
888 memcpy(newsighand
->action
, oldsighand
->action
,
889 sizeof(newsighand
->action
));
891 write_lock_irq(&tasklist_lock
);
892 spin_lock(&oldsighand
->siglock
);
893 rcu_assign_pointer(tsk
->sighand
, newsighand
);
894 spin_unlock(&oldsighand
->siglock
);
895 write_unlock_irq(&tasklist_lock
);
897 __cleanup_sighand(oldsighand
);
900 BUG_ON(!thread_group_leader(tsk
));
905 * These functions flushes out all traces of the currently running executable
906 * so that a new one can be started
908 static void flush_old_files(struct files_struct
* files
)
913 spin_lock(&files
->file_lock
);
915 unsigned long set
, i
;
919 fdt
= files_fdtable(files
);
920 if (i
>= fdt
->max_fds
)
922 set
= fdt
->close_on_exec
->fds_bits
[j
];
925 fdt
->close_on_exec
->fds_bits
[j
] = 0;
926 spin_unlock(&files
->file_lock
);
927 for ( ; set
; i
++,set
>>= 1) {
932 spin_lock(&files
->file_lock
);
935 spin_unlock(&files
->file_lock
);
938 char *get_task_comm(char *buf
, struct task_struct
*tsk
)
940 /* buf must be at least sizeof(tsk->comm) in size */
942 strncpy(buf
, tsk
->comm
, sizeof(tsk
->comm
));
947 void set_task_comm(struct task_struct
*tsk
, char *buf
)
952 * Threads may access current->comm without holding
953 * the task lock, so write the string carefully.
954 * Readers without a lock may see incomplete new
955 * names but are safe from non-terminating string reads.
957 memset(tsk
->comm
, 0, TASK_COMM_LEN
);
959 strlcpy(tsk
->comm
, buf
, sizeof(tsk
->comm
));
961 perf_event_comm(tsk
);
964 int flush_old_exec(struct linux_binprm
* bprm
)
969 * Make sure we have a private signal table and that
970 * we are unassociated from the previous thread group.
972 retval
= de_thread(current
);
976 set_mm_exe_file(bprm
->mm
, bprm
->file
);
979 * Release all of the old mmap stuff
981 retval
= exec_mmap(bprm
->mm
);
985 bprm
->mm
= NULL
; /* We're using it now */
987 current
->flags
&= ~PF_RANDOMIZE
;
989 current
->personality
&= ~bprm
->per_clear
;
996 EXPORT_SYMBOL(flush_old_exec
);
998 void setup_new_exec(struct linux_binprm
* bprm
)
1002 char tcomm
[sizeof(current
->comm
)];
1004 arch_pick_mmap_layout(current
->mm
);
1006 /* This is the point of no return */
1007 current
->sas_ss_sp
= current
->sas_ss_size
= 0;
1009 if (current_euid() == current_uid() && current_egid() == current_gid())
1010 set_dumpable(current
->mm
, 1);
1012 set_dumpable(current
->mm
, suid_dumpable
);
1014 name
= bprm
->filename
;
1016 /* Copies the binary name from after last slash */
1017 for (i
=0; (ch
= *(name
++)) != '\0';) {
1019 i
= 0; /* overwrite what we wrote */
1021 if (i
< (sizeof(tcomm
) - 1))
1025 set_task_comm(current
, tcomm
);
1027 /* Set the new mm task size. We have to do that late because it may
1028 * depend on TIF_32BIT which is only updated in flush_thread() on
1029 * some architectures like powerpc
1031 current
->mm
->task_size
= TASK_SIZE
;
1033 /* install the new credentials */
1034 if (bprm
->cred
->uid
!= current_euid() ||
1035 bprm
->cred
->gid
!= current_egid()) {
1036 current
->pdeath_signal
= 0;
1037 } else if (file_permission(bprm
->file
, MAY_READ
) ||
1038 bprm
->interp_flags
& BINPRM_FLAGS_ENFORCE_NONDUMP
) {
1039 set_dumpable(current
->mm
, suid_dumpable
);
1043 * Flush performance counters when crossing a
1046 if (!get_dumpable(current
->mm
))
1047 perf_event_exit_task(current
);
1049 /* An exec changes our domain. We are no longer part of the thread
1052 current
->self_exec_id
++;
1054 flush_signal_handlers(current
, 0);
1055 flush_old_files(current
->files
);
1057 EXPORT_SYMBOL(setup_new_exec
);
1060 * Prepare credentials and lock ->cred_guard_mutex.
1061 * install_exec_creds() commits the new creds and drops the lock.
1062 * Or, if exec fails before, free_bprm() should release ->cred and
1065 int prepare_bprm_creds(struct linux_binprm
*bprm
)
1067 if (mutex_lock_interruptible(¤t
->cred_guard_mutex
))
1068 return -ERESTARTNOINTR
;
1070 bprm
->cred
= prepare_exec_creds();
1071 if (likely(bprm
->cred
))
1074 mutex_unlock(¤t
->cred_guard_mutex
);
1078 void free_bprm(struct linux_binprm
*bprm
)
1080 free_arg_pages(bprm
);
1082 mutex_unlock(¤t
->cred_guard_mutex
);
1083 abort_creds(bprm
->cred
);
1089 * install the new credentials for this executable
1091 void install_exec_creds(struct linux_binprm
*bprm
)
1093 security_bprm_committing_creds(bprm
);
1095 commit_creds(bprm
->cred
);
1098 * cred_guard_mutex must be held at least to this point to prevent
1099 * ptrace_attach() from altering our determination of the task's
1100 * credentials; any time after this it may be unlocked.
1102 security_bprm_committed_creds(bprm
);
1103 mutex_unlock(¤t
->cred_guard_mutex
);
1105 EXPORT_SYMBOL(install_exec_creds
);
1108 * determine how safe it is to execute the proposed program
1109 * - the caller must hold current->cred_guard_mutex to protect against
1112 int check_unsafe_exec(struct linux_binprm
*bprm
)
1114 struct task_struct
*p
= current
, *t
;
1118 bprm
->unsafe
= tracehook_unsafe_exec(p
);
1121 spin_lock(&p
->fs
->lock
);
1123 for (t
= next_thread(p
); t
!= p
; t
= next_thread(t
)) {
1129 if (p
->fs
->users
> n_fs
) {
1130 bprm
->unsafe
|= LSM_UNSAFE_SHARE
;
1133 if (!p
->fs
->in_exec
) {
1138 spin_unlock(&p
->fs
->lock
);
1144 * Fill the binprm structure from the inode.
1145 * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
1147 * This may be called multiple times for binary chains (scripts for example).
1149 int prepare_binprm(struct linux_binprm
*bprm
)
1152 struct inode
* inode
= bprm
->file
->f_path
.dentry
->d_inode
;
1155 mode
= inode
->i_mode
;
1156 if (bprm
->file
->f_op
== NULL
)
1159 /* clear any previous set[ug]id data from a previous binary */
1160 bprm
->cred
->euid
= current_euid();
1161 bprm
->cred
->egid
= current_egid();
1163 if (!(bprm
->file
->f_path
.mnt
->mnt_flags
& MNT_NOSUID
)) {
1165 if (mode
& S_ISUID
) {
1166 bprm
->per_clear
|= PER_CLEAR_ON_SETID
;
1167 bprm
->cred
->euid
= inode
->i_uid
;
1172 * If setgid is set but no group execute bit then this
1173 * is a candidate for mandatory locking, not a setgid
1176 if ((mode
& (S_ISGID
| S_IXGRP
)) == (S_ISGID
| S_IXGRP
)) {
1177 bprm
->per_clear
|= PER_CLEAR_ON_SETID
;
1178 bprm
->cred
->egid
= inode
->i_gid
;
1182 /* fill in binprm security blob */
1183 retval
= security_bprm_set_creds(bprm
);
1186 bprm
->cred_prepared
= 1;
1188 memset(bprm
->buf
, 0, BINPRM_BUF_SIZE
);
1189 return kernel_read(bprm
->file
, 0, bprm
->buf
, BINPRM_BUF_SIZE
);
1192 EXPORT_SYMBOL(prepare_binprm
);
1195 * Arguments are '\0' separated strings found at the location bprm->p
1196 * points to; chop off the first by relocating brpm->p to right after
1197 * the first '\0' encountered.
1199 int remove_arg_zero(struct linux_binprm
*bprm
)
1202 unsigned long offset
;
1210 offset
= bprm
->p
& ~PAGE_MASK
;
1211 page
= get_arg_page(bprm
, bprm
->p
, 0);
1216 kaddr
= kmap_atomic(page
, KM_USER0
);
1218 for (; offset
< PAGE_SIZE
&& kaddr
[offset
];
1219 offset
++, bprm
->p
++)
1222 kunmap_atomic(kaddr
, KM_USER0
);
1225 if (offset
== PAGE_SIZE
)
1226 free_arg_page(bprm
, (bprm
->p
>> PAGE_SHIFT
) - 1);
1227 } while (offset
== PAGE_SIZE
);
1236 EXPORT_SYMBOL(remove_arg_zero
);
1239 * cycle the list of binary formats handler, until one recognizes the image
1241 int search_binary_handler(struct linux_binprm
*bprm
,struct pt_regs
*regs
)
1243 unsigned int depth
= bprm
->recursion_depth
;
1245 struct linux_binfmt
*fmt
;
1247 retval
= security_bprm_check(bprm
);
1251 /* kernel module loader fixup */
1252 /* so we don't try to load run modprobe in kernel space. */
1255 retval
= audit_bprm(bprm
);
1260 for (try=0; try<2; try++) {
1261 read_lock(&binfmt_lock
);
1262 list_for_each_entry(fmt
, &formats
, lh
) {
1263 int (*fn
)(struct linux_binprm
*, struct pt_regs
*) = fmt
->load_binary
;
1266 if (!try_module_get(fmt
->module
))
1268 read_unlock(&binfmt_lock
);
1269 retval
= fn(bprm
, regs
);
1271 * Restore the depth counter to its starting value
1272 * in this call, so we don't have to rely on every
1273 * load_binary function to restore it on return.
1275 bprm
->recursion_depth
= depth
;
1278 tracehook_report_exec(fmt
, bprm
, regs
);
1280 allow_write_access(bprm
->file
);
1284 current
->did_exec
= 1;
1285 proc_exec_connector(current
);
1288 read_lock(&binfmt_lock
);
1290 if (retval
!= -ENOEXEC
|| bprm
->mm
== NULL
)
1293 read_unlock(&binfmt_lock
);
1297 read_unlock(&binfmt_lock
);
1298 if (retval
!= -ENOEXEC
|| bprm
->mm
== NULL
) {
1300 #ifdef CONFIG_MODULES
1302 #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1303 if (printable(bprm
->buf
[0]) &&
1304 printable(bprm
->buf
[1]) &&
1305 printable(bprm
->buf
[2]) &&
1306 printable(bprm
->buf
[3]))
1307 break; /* -ENOEXEC */
1308 request_module("binfmt-%04x", *(unsigned short *)(&bprm
->buf
[2]));
1315 EXPORT_SYMBOL(search_binary_handler
);
1318 * sys_execve() executes a new program.
1320 int do_execve(const char * filename
,
1321 const char __user
*const __user
*argv
,
1322 const char __user
*const __user
*envp
,
1323 struct pt_regs
* regs
)
1325 struct linux_binprm
*bprm
;
1327 struct files_struct
*displaced
;
1331 retval
= unshare_files(&displaced
);
1336 bprm
= kzalloc(sizeof(*bprm
), GFP_KERNEL
);
1340 retval
= prepare_bprm_creds(bprm
);
1344 retval
= check_unsafe_exec(bprm
);
1347 clear_in_exec
= retval
;
1348 current
->in_execve
= 1;
1350 file
= open_exec(filename
);
1351 retval
= PTR_ERR(file
);
1358 bprm
->filename
= filename
;
1359 bprm
->interp
= filename
;
1361 retval
= bprm_mm_init(bprm
);
1365 bprm
->argc
= count(argv
, MAX_ARG_STRINGS
);
1366 if ((retval
= bprm
->argc
) < 0)
1369 bprm
->envc
= count(envp
, MAX_ARG_STRINGS
);
1370 if ((retval
= bprm
->envc
) < 0)
1373 retval
= prepare_binprm(bprm
);
1377 retval
= copy_strings_kernel(1, &bprm
->filename
, bprm
);
1381 bprm
->exec
= bprm
->p
;
1382 retval
= copy_strings(bprm
->envc
, envp
, bprm
);
1386 retval
= copy_strings(bprm
->argc
, argv
, bprm
);
1390 current
->flags
&= ~PF_KTHREAD
;
1391 retval
= search_binary_handler(bprm
,regs
);
1395 /* execve succeeded */
1396 current
->fs
->in_exec
= 0;
1397 current
->in_execve
= 0;
1398 acct_update_integrals(current
);
1401 put_files_struct(displaced
);
1410 allow_write_access(bprm
->file
);
1416 current
->fs
->in_exec
= 0;
1417 current
->in_execve
= 0;
1424 reset_files_struct(displaced
);
1429 void set_binfmt(struct linux_binfmt
*new)
1431 struct mm_struct
*mm
= current
->mm
;
1434 module_put(mm
->binfmt
->module
);
1438 __module_get(new->module
);
1441 EXPORT_SYMBOL(set_binfmt
);
1443 /* format_corename will inspect the pattern parameter, and output a
1444 * name into corename, which must have space for at least
1445 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
1447 static int format_corename(char *corename
, long signr
)
1449 const struct cred
*cred
= current_cred();
1450 const char *pat_ptr
= core_pattern
;
1451 int ispipe
= (*pat_ptr
== '|');
1452 char *out_ptr
= corename
;
1453 char *const out_end
= corename
+ CORENAME_MAX_SIZE
;
1455 int pid_in_pattern
= 0;
1457 /* Repeat as long as we have more pattern to process and more output
1460 if (*pat_ptr
!= '%') {
1461 if (out_ptr
== out_end
)
1463 *out_ptr
++ = *pat_ptr
++;
1465 switch (*++pat_ptr
) {
1468 /* Double percent, output one percent */
1470 if (out_ptr
== out_end
)
1477 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1478 "%d", task_tgid_vnr(current
));
1479 if (rc
> out_end
- out_ptr
)
1485 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1487 if (rc
> out_end
- out_ptr
)
1493 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1495 if (rc
> out_end
- out_ptr
)
1499 /* signal that caused the coredump */
1501 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1503 if (rc
> out_end
- out_ptr
)
1507 /* UNIX time of coredump */
1510 do_gettimeofday(&tv
);
1511 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1513 if (rc
> out_end
- out_ptr
)
1520 down_read(&uts_sem
);
1521 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1522 "%s", utsname()->nodename
);
1524 if (rc
> out_end
- out_ptr
)
1530 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1531 "%s", current
->comm
);
1532 if (rc
> out_end
- out_ptr
)
1536 /* core limit size */
1538 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1539 "%lu", rlimit(RLIMIT_CORE
));
1540 if (rc
> out_end
- out_ptr
)
1550 /* Backward compatibility with core_uses_pid:
1552 * If core_pattern does not include a %p (as is the default)
1553 * and core_uses_pid is set, then .%pid will be appended to
1554 * the filename. Do not do this for piped commands. */
1555 if (!ispipe
&& !pid_in_pattern
&& core_uses_pid
) {
1556 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1557 ".%d", task_tgid_vnr(current
));
1558 if (rc
> out_end
- out_ptr
)
1567 static int zap_process(struct task_struct
*start
, int exit_code
)
1569 struct task_struct
*t
;
1572 start
->signal
->flags
= SIGNAL_GROUP_EXIT
;
1573 start
->signal
->group_exit_code
= exit_code
;
1574 start
->signal
->group_stop_count
= 0;
1578 if (t
!= current
&& t
->mm
) {
1579 sigaddset(&t
->pending
.signal
, SIGKILL
);
1580 signal_wake_up(t
, 1);
1583 } while_each_thread(start
, t
);
1588 static inline int zap_threads(struct task_struct
*tsk
, struct mm_struct
*mm
,
1589 struct core_state
*core_state
, int exit_code
)
1591 struct task_struct
*g
, *p
;
1592 unsigned long flags
;
1595 spin_lock_irq(&tsk
->sighand
->siglock
);
1596 if (!signal_group_exit(tsk
->signal
)) {
1597 mm
->core_state
= core_state
;
1598 nr
= zap_process(tsk
, exit_code
);
1600 spin_unlock_irq(&tsk
->sighand
->siglock
);
1601 if (unlikely(nr
< 0))
1604 if (atomic_read(&mm
->mm_users
) == nr
+ 1)
1607 * We should find and kill all tasks which use this mm, and we should
1608 * count them correctly into ->nr_threads. We don't take tasklist
1609 * lock, but this is safe wrt:
1612 * None of sub-threads can fork after zap_process(leader). All
1613 * processes which were created before this point should be
1614 * visible to zap_threads() because copy_process() adds the new
1615 * process to the tail of init_task.tasks list, and lock/unlock
1616 * of ->siglock provides a memory barrier.
1619 * The caller holds mm->mmap_sem. This means that the task which
1620 * uses this mm can't pass exit_mm(), so it can't exit or clear
1624 * It does list_replace_rcu(&leader->tasks, ¤t->tasks),
1625 * we must see either old or new leader, this does not matter.
1626 * However, it can change p->sighand, so lock_task_sighand(p)
1627 * must be used. Since p->mm != NULL and we hold ->mmap_sem
1630 * Note also that "g" can be the old leader with ->mm == NULL
1631 * and already unhashed and thus removed from ->thread_group.
1632 * This is OK, __unhash_process()->list_del_rcu() does not
1633 * clear the ->next pointer, we will find the new leader via
1637 for_each_process(g
) {
1638 if (g
== tsk
->group_leader
)
1640 if (g
->flags
& PF_KTHREAD
)
1645 if (unlikely(p
->mm
== mm
)) {
1646 lock_task_sighand(p
, &flags
);
1647 nr
+= zap_process(p
, exit_code
);
1648 unlock_task_sighand(p
, &flags
);
1652 } while_each_thread(g
, p
);
1656 atomic_set(&core_state
->nr_threads
, nr
);
1660 static int coredump_wait(int exit_code
, struct core_state
*core_state
)
1662 struct task_struct
*tsk
= current
;
1663 struct mm_struct
*mm
= tsk
->mm
;
1664 struct completion
*vfork_done
;
1665 int core_waiters
= -EBUSY
;
1667 init_completion(&core_state
->startup
);
1668 core_state
->dumper
.task
= tsk
;
1669 core_state
->dumper
.next
= NULL
;
1671 down_write(&mm
->mmap_sem
);
1672 if (!mm
->core_state
)
1673 core_waiters
= zap_threads(tsk
, mm
, core_state
, exit_code
);
1674 up_write(&mm
->mmap_sem
);
1676 if (unlikely(core_waiters
< 0))
1680 * Make sure nobody is waiting for us to release the VM,
1681 * otherwise we can deadlock when we wait on each other
1683 vfork_done
= tsk
->vfork_done
;
1685 tsk
->vfork_done
= NULL
;
1686 complete(vfork_done
);
1690 wait_for_completion(&core_state
->startup
);
1692 return core_waiters
;
1695 static void coredump_finish(struct mm_struct
*mm
)
1697 struct core_thread
*curr
, *next
;
1698 struct task_struct
*task
;
1700 next
= mm
->core_state
->dumper
.next
;
1701 while ((curr
= next
) != NULL
) {
1705 * see exit_mm(), curr->task must not see
1706 * ->task == NULL before we read ->next.
1710 wake_up_process(task
);
1713 mm
->core_state
= NULL
;
1717 * set_dumpable converts traditional three-value dumpable to two flags and
1718 * stores them into mm->flags. It modifies lower two bits of mm->flags, but
1719 * these bits are not changed atomically. So get_dumpable can observe the
1720 * intermediate state. To avoid doing unexpected behavior, get get_dumpable
1721 * return either old dumpable or new one by paying attention to the order of
1722 * modifying the bits.
1724 * dumpable | mm->flags (binary)
1725 * old new | initial interim final
1726 * ---------+-----------------------
1734 * (*) get_dumpable regards interim value of 10 as 11.
1736 void set_dumpable(struct mm_struct
*mm
, int value
)
1740 clear_bit(MMF_DUMPABLE
, &mm
->flags
);
1742 clear_bit(MMF_DUMP_SECURELY
, &mm
->flags
);
1745 set_bit(MMF_DUMPABLE
, &mm
->flags
);
1747 clear_bit(MMF_DUMP_SECURELY
, &mm
->flags
);
1750 set_bit(MMF_DUMP_SECURELY
, &mm
->flags
);
1752 set_bit(MMF_DUMPABLE
, &mm
->flags
);
1757 static int __get_dumpable(unsigned long mm_flags
)
1761 ret
= mm_flags
& MMF_DUMPABLE_MASK
;
1762 return (ret
>= 2) ? 2 : ret
;
1765 int get_dumpable(struct mm_struct
*mm
)
1767 return __get_dumpable(mm
->flags
);
1770 static void wait_for_dump_helpers(struct file
*file
)
1772 struct pipe_inode_info
*pipe
;
1774 pipe
= file
->f_path
.dentry
->d_inode
->i_pipe
;
1780 while ((pipe
->readers
> 1) && (!signal_pending(current
))) {
1781 wake_up_interruptible_sync(&pipe
->wait
);
1782 kill_fasync(&pipe
->fasync_readers
, SIGIO
, POLL_IN
);
1795 * helper function to customize the process used
1796 * to collect the core in userspace. Specifically
1797 * it sets up a pipe and installs it as fd 0 (stdin)
1798 * for the process. Returns 0 on success, or
1799 * PTR_ERR on failure.
1800 * Note that it also sets the core limit to 1. This
1801 * is a special value that we use to trap recursive
1804 static int umh_pipe_setup(struct subprocess_info
*info
)
1806 struct file
*rp
, *wp
;
1807 struct fdtable
*fdt
;
1808 struct coredump_params
*cp
= (struct coredump_params
*)info
->data
;
1809 struct files_struct
*cf
= current
->files
;
1811 wp
= create_write_pipe(0);
1815 rp
= create_read_pipe(wp
, 0);
1817 free_write_pipe(wp
);
1825 spin_lock(&cf
->file_lock
);
1826 fdt
= files_fdtable(cf
);
1827 FD_SET(0, fdt
->open_fds
);
1828 FD_CLR(0, fdt
->close_on_exec
);
1829 spin_unlock(&cf
->file_lock
);
1831 /* and disallow core files too */
1832 current
->signal
->rlim
[RLIMIT_CORE
] = (struct rlimit
){1, 1};
1837 void do_coredump(long signr
, int exit_code
, struct pt_regs
*regs
)
1839 struct core_state core_state
;
1840 char corename
[CORENAME_MAX_SIZE
+ 1];
1841 struct mm_struct
*mm
= current
->mm
;
1842 struct linux_binfmt
* binfmt
;
1843 const struct cred
*old_cred
;
1848 static atomic_t core_dump_count
= ATOMIC_INIT(0);
1849 struct coredump_params cprm
= {
1852 .limit
= rlimit(RLIMIT_CORE
),
1854 * We must use the same mm->flags while dumping core to avoid
1855 * inconsistency of bit flags, since this flag is not protected
1858 .mm_flags
= mm
->flags
,
1861 audit_core_dumps(signr
);
1863 binfmt
= mm
->binfmt
;
1864 if (!binfmt
|| !binfmt
->core_dump
)
1866 if (!__get_dumpable(cprm
.mm_flags
))
1869 cred
= prepare_creds();
1873 * We cannot trust fsuid as being the "true" uid of the
1874 * process nor do we know its entire history. We only know it
1875 * was tainted so we dump it as root in mode 2.
1877 if (__get_dumpable(cprm
.mm_flags
) == 2) {
1878 /* Setuid core dump mode */
1879 flag
= O_EXCL
; /* Stop rewrite attacks */
1880 cred
->fsuid
= 0; /* Dump root private */
1883 retval
= coredump_wait(exit_code
, &core_state
);
1887 old_cred
= override_creds(cred
);
1890 * Clear any false indication of pending signals that might
1891 * be seen by the filesystem code called to write the core file.
1893 clear_thread_flag(TIF_SIGPENDING
);
1895 ispipe
= format_corename(corename
, signr
);
1901 if (cprm
.limit
== 1) {
1903 * Normally core limits are irrelevant to pipes, since
1904 * we're not writing to the file system, but we use
1905 * cprm.limit of 1 here as a speacial value. Any
1906 * non-1 limit gets set to RLIM_INFINITY below, but
1907 * a limit of 0 skips the dump. This is a consistent
1908 * way to catch recursive crashes. We can still crash
1909 * if the core_pattern binary sets RLIM_CORE = !1
1910 * but it runs as root, and can do lots of stupid things
1911 * Note that we use task_tgid_vnr here to grab the pid
1912 * of the process group leader. That way we get the
1913 * right pid if a thread in a multi-threaded
1914 * core_pattern process dies.
1917 "Process %d(%s) has RLIMIT_CORE set to 1\n",
1918 task_tgid_vnr(current
), current
->comm
);
1919 printk(KERN_WARNING
"Aborting core\n");
1922 cprm
.limit
= RLIM_INFINITY
;
1924 dump_count
= atomic_inc_return(&core_dump_count
);
1925 if (core_pipe_limit
&& (core_pipe_limit
< dump_count
)) {
1926 printk(KERN_WARNING
"Pid %d(%s) over core_pipe_limit\n",
1927 task_tgid_vnr(current
), current
->comm
);
1928 printk(KERN_WARNING
"Skipping core dump\n");
1929 goto fail_dropcount
;
1932 helper_argv
= argv_split(GFP_KERNEL
, corename
+1, NULL
);
1934 printk(KERN_WARNING
"%s failed to allocate memory\n",
1936 goto fail_dropcount
;
1939 retval
= call_usermodehelper_fns(helper_argv
[0], helper_argv
,
1940 NULL
, UMH_WAIT_EXEC
, umh_pipe_setup
,
1942 argv_free(helper_argv
);
1944 printk(KERN_INFO
"Core dump to %s pipe failed\n",
1949 struct inode
*inode
;
1951 if (cprm
.limit
< binfmt
->min_coredump
)
1954 cprm
.file
= filp_open(corename
,
1955 O_CREAT
| 2 | O_NOFOLLOW
| O_LARGEFILE
| flag
,
1957 if (IS_ERR(cprm
.file
))
1960 inode
= cprm
.file
->f_path
.dentry
->d_inode
;
1961 if (inode
->i_nlink
> 1)
1963 if (d_unhashed(cprm
.file
->f_path
.dentry
))
1966 * AK: actually i see no reason to not allow this for named
1967 * pipes etc, but keep the previous behaviour for now.
1969 if (!S_ISREG(inode
->i_mode
))
1972 * Dont allow local users get cute and trick others to coredump
1973 * into their pre-created files.
1975 if (inode
->i_uid
!= current_fsuid())
1977 if (!cprm
.file
->f_op
|| !cprm
.file
->f_op
->write
)
1979 if (do_truncate(cprm
.file
->f_path
.dentry
, 0, 0, cprm
.file
))
1983 retval
= binfmt
->core_dump(&cprm
);
1985 current
->signal
->group_exit_code
|= 0x80;
1987 if (ispipe
&& core_pipe_limit
)
1988 wait_for_dump_helpers(cprm
.file
);
1991 filp_close(cprm
.file
, NULL
);
1994 atomic_dec(&core_dump_count
);
1996 coredump_finish(mm
);
1997 revert_creds(old_cred
);