4 * Copyright (C) 1991, 1992 Linus Torvalds
8 * #!-checking implemented by tytso.
11 * Demand-loading implemented 01.12.91 - no need to read anything but
12 * the header into memory. The inode of the executable is put into
13 * "current->executable", and page faults do the actual loading. Clean.
15 * Once more I can proudly say that linux stood up to being changed: it
16 * was less than 2 hours work to get demand-loading completely implemented.
18 * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead,
19 * current->executable is only used by the procfs. This allows a dispatch
20 * table to check for several different types of binary formats. We keep
21 * trying until we recognize the file or we run out of supported binary
25 #include <linux/slab.h>
26 #include <linux/file.h>
27 #include <linux/fdtable.h>
29 #include <linux/stat.h>
30 #include <linux/fcntl.h>
31 #include <linux/smp_lock.h>
32 #include <linux/swap.h>
33 #include <linux/string.h>
34 #include <linux/init.h>
35 #include <linux/pagemap.h>
36 #include <linux/highmem.h>
37 #include <linux/spinlock.h>
38 #include <linux/key.h>
39 #include <linux/personality.h>
40 #include <linux/binfmts.h>
41 #include <linux/utsname.h>
42 #include <linux/pid_namespace.h>
43 #include <linux/module.h>
44 #include <linux/namei.h>
45 #include <linux/proc_fs.h>
46 #include <linux/mount.h>
47 #include <linux/security.h>
48 #include <linux/ima.h>
49 #include <linux/syscalls.h>
50 #include <linux/tsacct_kern.h>
51 #include <linux/cn_proc.h>
52 #include <linux/audit.h>
53 #include <linux/tracehook.h>
54 #include <linux/kmod.h>
55 #include <linux/fsnotify.h>
56 #include <linux/fs_struct.h>
58 #include <asm/uaccess.h>
59 #include <asm/mmu_context.h>
64 char core_pattern
[CORENAME_MAX_SIZE
] = "core";
65 int suid_dumpable
= 0;
67 /* The maximal length of core_pattern is also specified in sysctl.c */
69 static LIST_HEAD(formats
);
70 static DEFINE_RWLOCK(binfmt_lock
);
72 int register_binfmt(struct linux_binfmt
* fmt
)
76 write_lock(&binfmt_lock
);
77 list_add(&fmt
->lh
, &formats
);
78 write_unlock(&binfmt_lock
);
82 EXPORT_SYMBOL(register_binfmt
);
84 void unregister_binfmt(struct linux_binfmt
* fmt
)
86 write_lock(&binfmt_lock
);
88 write_unlock(&binfmt_lock
);
91 EXPORT_SYMBOL(unregister_binfmt
);
93 static inline void put_binfmt(struct linux_binfmt
* fmt
)
95 module_put(fmt
->module
);
99 * Note that a shared library must be both readable and executable due to
102 * Also note that we take the address to load from from the file itself.
104 SYSCALL_DEFINE1(uselib
, const char __user
*, library
)
108 char *tmp
= getname(library
);
109 int error
= PTR_ERR(tmp
);
112 error
= path_lookup_open(AT_FDCWD
, tmp
,
114 FMODE_READ
|FMODE_EXEC
);
121 if (!S_ISREG(nd
.path
.dentry
->d_inode
->i_mode
))
125 if (nd
.path
.mnt
->mnt_flags
& MNT_NOEXEC
)
128 error
= inode_permission(nd
.path
.dentry
->d_inode
,
129 MAY_READ
| MAY_EXEC
| MAY_OPEN
);
132 error
= ima_path_check(&nd
.path
, MAY_READ
| MAY_EXEC
| MAY_OPEN
);
136 file
= nameidata_to_filp(&nd
, O_RDONLY
|O_LARGEFILE
);
137 error
= PTR_ERR(file
);
141 fsnotify_open(file
->f_path
.dentry
);
145 struct linux_binfmt
* fmt
;
147 read_lock(&binfmt_lock
);
148 list_for_each_entry(fmt
, &formats
, lh
) {
149 if (!fmt
->load_shlib
)
151 if (!try_module_get(fmt
->module
))
153 read_unlock(&binfmt_lock
);
154 error
= fmt
->load_shlib(file
);
155 read_lock(&binfmt_lock
);
157 if (error
!= -ENOEXEC
)
160 read_unlock(&binfmt_lock
);
166 release_open_intent(&nd
);
173 static struct page
*get_arg_page(struct linux_binprm
*bprm
, unsigned long pos
,
179 #ifdef CONFIG_STACK_GROWSUP
181 ret
= expand_stack_downwards(bprm
->vma
, pos
);
186 ret
= get_user_pages(current
, bprm
->mm
, pos
,
187 1, write
, 1, &page
, NULL
);
192 unsigned long size
= bprm
->vma
->vm_end
- bprm
->vma
->vm_start
;
196 * We've historically supported up to 32 pages (ARG_MAX)
197 * of argument strings even with small stacks
203 * Limit to 1/4-th the stack size for the argv+env strings.
205 * - the remaining binfmt code will not run out of stack space,
206 * - the program will have a reasonable amount of stack left
209 rlim
= current
->signal
->rlim
;
210 if (size
> rlim
[RLIMIT_STACK
].rlim_cur
/ 4) {
219 static void put_arg_page(struct page
*page
)
224 static void free_arg_page(struct linux_binprm
*bprm
, int i
)
228 static void free_arg_pages(struct linux_binprm
*bprm
)
232 static void flush_arg_page(struct linux_binprm
*bprm
, unsigned long pos
,
235 flush_cache_page(bprm
->vma
, pos
, page_to_pfn(page
));
238 static int __bprm_mm_init(struct linux_binprm
*bprm
)
241 struct vm_area_struct
*vma
= NULL
;
242 struct mm_struct
*mm
= bprm
->mm
;
244 bprm
->vma
= vma
= kmem_cache_zalloc(vm_area_cachep
, GFP_KERNEL
);
248 down_write(&mm
->mmap_sem
);
252 * Place the stack at the largest stack address the architecture
253 * supports. Later, we'll move this to an appropriate place. We don't
254 * use STACK_TOP because that can depend on attributes which aren't
257 vma
->vm_end
= STACK_TOP_MAX
;
258 vma
->vm_start
= vma
->vm_end
- PAGE_SIZE
;
259 vma
->vm_flags
= VM_STACK_FLAGS
;
260 vma
->vm_page_prot
= vm_get_page_prot(vma
->vm_flags
);
261 err
= insert_vm_struct(mm
, vma
);
265 mm
->stack_vm
= mm
->total_vm
= 1;
266 up_write(&mm
->mmap_sem
);
267 bprm
->p
= vma
->vm_end
- sizeof(void *);
270 up_write(&mm
->mmap_sem
);
272 kmem_cache_free(vm_area_cachep
, vma
);
276 static bool valid_arg_len(struct linux_binprm
*bprm
, long len
)
278 return len
<= MAX_ARG_STRLEN
;
283 static struct page
*get_arg_page(struct linux_binprm
*bprm
, unsigned long pos
,
288 page
= bprm
->page
[pos
/ PAGE_SIZE
];
289 if (!page
&& write
) {
290 page
= alloc_page(GFP_HIGHUSER
|__GFP_ZERO
);
293 bprm
->page
[pos
/ PAGE_SIZE
] = page
;
299 static void put_arg_page(struct page
*page
)
303 static void free_arg_page(struct linux_binprm
*bprm
, int i
)
306 __free_page(bprm
->page
[i
]);
307 bprm
->page
[i
] = NULL
;
311 static void free_arg_pages(struct linux_binprm
*bprm
)
315 for (i
= 0; i
< MAX_ARG_PAGES
; i
++)
316 free_arg_page(bprm
, i
);
319 static void flush_arg_page(struct linux_binprm
*bprm
, unsigned long pos
,
324 static int __bprm_mm_init(struct linux_binprm
*bprm
)
326 bprm
->p
= PAGE_SIZE
* MAX_ARG_PAGES
- sizeof(void *);
330 static bool valid_arg_len(struct linux_binprm
*bprm
, long len
)
332 return len
<= bprm
->p
;
335 #endif /* CONFIG_MMU */
338 * Create a new mm_struct and populate it with a temporary stack
339 * vm_area_struct. We don't have enough context at this point to set the stack
340 * flags, permissions, and offset, so we use temporary values. We'll update
341 * them later in setup_arg_pages().
343 int bprm_mm_init(struct linux_binprm
*bprm
)
346 struct mm_struct
*mm
= NULL
;
348 bprm
->mm
= mm
= mm_alloc();
353 err
= init_new_context(current
, mm
);
357 err
= __bprm_mm_init(bprm
);
373 * count() counts the number of strings in array ARGV.
375 static int count(char __user
* __user
* argv
, int max
)
383 if (get_user(p
, argv
))
397 * 'copy_strings()' copies argument/environment strings from the old
398 * processes's memory to the new process's stack. The call to get_user_pages()
399 * ensures the destination page is created and not swapped out.
401 static int copy_strings(int argc
, char __user
* __user
* argv
,
402 struct linux_binprm
*bprm
)
404 struct page
*kmapped_page
= NULL
;
406 unsigned long kpos
= 0;
414 if (get_user(str
, argv
+argc
) ||
415 !(len
= strnlen_user(str
, MAX_ARG_STRLEN
))) {
420 if (!valid_arg_len(bprm
, len
)) {
425 /* We're going to work our way backwords. */
431 int offset
, bytes_to_copy
;
433 offset
= pos
% PAGE_SIZE
;
437 bytes_to_copy
= offset
;
438 if (bytes_to_copy
> len
)
441 offset
-= bytes_to_copy
;
442 pos
-= bytes_to_copy
;
443 str
-= bytes_to_copy
;
444 len
-= bytes_to_copy
;
446 if (!kmapped_page
|| kpos
!= (pos
& PAGE_MASK
)) {
449 page
= get_arg_page(bprm
, pos
, 1);
456 flush_kernel_dcache_page(kmapped_page
);
457 kunmap(kmapped_page
);
458 put_arg_page(kmapped_page
);
461 kaddr
= kmap(kmapped_page
);
462 kpos
= pos
& PAGE_MASK
;
463 flush_arg_page(bprm
, kpos
, kmapped_page
);
465 if (copy_from_user(kaddr
+offset
, str
, bytes_to_copy
)) {
474 flush_kernel_dcache_page(kmapped_page
);
475 kunmap(kmapped_page
);
476 put_arg_page(kmapped_page
);
482 * Like copy_strings, but get argv and its values from kernel memory.
484 int copy_strings_kernel(int argc
,char ** argv
, struct linux_binprm
*bprm
)
487 mm_segment_t oldfs
= get_fs();
489 r
= copy_strings(argc
, (char __user
* __user
*)argv
, bprm
);
493 EXPORT_SYMBOL(copy_strings_kernel
);
498 * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX. Once
499 * the binfmt code determines where the new stack should reside, we shift it to
500 * its final location. The process proceeds as follows:
502 * 1) Use shift to calculate the new vma endpoints.
503 * 2) Extend vma to cover both the old and new ranges. This ensures the
504 * arguments passed to subsequent functions are consistent.
505 * 3) Move vma's page tables to the new range.
506 * 4) Free up any cleared pgd range.
507 * 5) Shrink the vma to cover only the new range.
509 static int shift_arg_pages(struct vm_area_struct
*vma
, unsigned long shift
)
511 struct mm_struct
*mm
= vma
->vm_mm
;
512 unsigned long old_start
= vma
->vm_start
;
513 unsigned long old_end
= vma
->vm_end
;
514 unsigned long length
= old_end
- old_start
;
515 unsigned long new_start
= old_start
- shift
;
516 unsigned long new_end
= old_end
- shift
;
517 struct mmu_gather
*tlb
;
519 BUG_ON(new_start
> new_end
);
522 * ensure there are no vmas between where we want to go
525 if (vma
!= find_vma(mm
, new_start
))
529 * cover the whole range: [new_start, old_end)
531 vma_adjust(vma
, new_start
, old_end
, vma
->vm_pgoff
, NULL
);
534 * move the page tables downwards, on failure we rely on
535 * process cleanup to remove whatever mess we made.
537 if (length
!= move_page_tables(vma
, old_start
,
538 vma
, new_start
, length
))
542 tlb
= tlb_gather_mmu(mm
, 0);
543 if (new_end
> old_start
) {
545 * when the old and new regions overlap clear from new_end.
547 free_pgd_range(tlb
, new_end
, old_end
, new_end
,
548 vma
->vm_next
? vma
->vm_next
->vm_start
: 0);
551 * otherwise, clean from old_start; this is done to not touch
552 * the address space in [new_end, old_start) some architectures
553 * have constraints on va-space that make this illegal (IA64) -
554 * for the others its just a little faster.
556 free_pgd_range(tlb
, old_start
, old_end
, new_end
,
557 vma
->vm_next
? vma
->vm_next
->vm_start
: 0);
559 tlb_finish_mmu(tlb
, new_end
, old_end
);
562 * shrink the vma to just the new range.
564 vma_adjust(vma
, new_start
, new_end
, vma
->vm_pgoff
, NULL
);
569 #define EXTRA_STACK_VM_PAGES 20 /* random */
572 * Finalizes the stack vm_area_struct. The flags and permissions are updated,
573 * the stack is optionally relocated, and some extra space is added.
575 int setup_arg_pages(struct linux_binprm
*bprm
,
576 unsigned long stack_top
,
577 int executable_stack
)
580 unsigned long stack_shift
;
581 struct mm_struct
*mm
= current
->mm
;
582 struct vm_area_struct
*vma
= bprm
->vma
;
583 struct vm_area_struct
*prev
= NULL
;
584 unsigned long vm_flags
;
585 unsigned long stack_base
;
587 #ifdef CONFIG_STACK_GROWSUP
588 /* Limit stack size to 1GB */
589 stack_base
= current
->signal
->rlim
[RLIMIT_STACK
].rlim_max
;
590 if (stack_base
> (1 << 30))
591 stack_base
= 1 << 30;
593 /* Make sure we didn't let the argument array grow too large. */
594 if (vma
->vm_end
- vma
->vm_start
> stack_base
)
597 stack_base
= PAGE_ALIGN(stack_top
- stack_base
);
599 stack_shift
= vma
->vm_start
- stack_base
;
600 mm
->arg_start
= bprm
->p
- stack_shift
;
601 bprm
->p
= vma
->vm_end
- stack_shift
;
603 stack_top
= arch_align_stack(stack_top
);
604 stack_top
= PAGE_ALIGN(stack_top
);
605 stack_shift
= vma
->vm_end
- stack_top
;
607 bprm
->p
-= stack_shift
;
608 mm
->arg_start
= bprm
->p
;
612 bprm
->loader
-= stack_shift
;
613 bprm
->exec
-= stack_shift
;
615 down_write(&mm
->mmap_sem
);
616 vm_flags
= VM_STACK_FLAGS
;
619 * Adjust stack execute permissions; explicitly enable for
620 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
621 * (arch default) otherwise.
623 if (unlikely(executable_stack
== EXSTACK_ENABLE_X
))
625 else if (executable_stack
== EXSTACK_DISABLE_X
)
626 vm_flags
&= ~VM_EXEC
;
627 vm_flags
|= mm
->def_flags
;
629 ret
= mprotect_fixup(vma
, &prev
, vma
->vm_start
, vma
->vm_end
,
635 /* Move stack pages down in memory. */
637 ret
= shift_arg_pages(vma
, stack_shift
);
639 up_write(&mm
->mmap_sem
);
644 #ifdef CONFIG_STACK_GROWSUP
645 stack_base
= vma
->vm_end
+ EXTRA_STACK_VM_PAGES
* PAGE_SIZE
;
647 stack_base
= vma
->vm_start
- EXTRA_STACK_VM_PAGES
* PAGE_SIZE
;
649 ret
= expand_stack(vma
, stack_base
);
654 up_write(&mm
->mmap_sem
);
657 EXPORT_SYMBOL(setup_arg_pages
);
659 #endif /* CONFIG_MMU */
661 struct file
*open_exec(const char *name
)
667 err
= path_lookup_open(AT_FDCWD
, name
, LOOKUP_FOLLOW
, &nd
,
668 FMODE_READ
|FMODE_EXEC
);
673 if (!S_ISREG(nd
.path
.dentry
->d_inode
->i_mode
))
676 if (nd
.path
.mnt
->mnt_flags
& MNT_NOEXEC
)
679 err
= inode_permission(nd
.path
.dentry
->d_inode
, MAY_EXEC
| MAY_OPEN
);
682 err
= ima_path_check(&nd
.path
, MAY_EXEC
| MAY_OPEN
);
686 file
= nameidata_to_filp(&nd
, O_RDONLY
|O_LARGEFILE
);
690 fsnotify_open(file
->f_path
.dentry
);
692 err
= deny_write_access(file
);
701 release_open_intent(&nd
);
706 EXPORT_SYMBOL(open_exec
);
708 int kernel_read(struct file
*file
, unsigned long offset
,
709 char *addr
, unsigned long count
)
717 /* The cast to a user pointer is valid due to the set_fs() */
718 result
= vfs_read(file
, (void __user
*)addr
, count
, &pos
);
723 EXPORT_SYMBOL(kernel_read
);
725 static int exec_mmap(struct mm_struct
*mm
)
727 struct task_struct
*tsk
;
728 struct mm_struct
* old_mm
, *active_mm
;
730 /* Notify parent that we're no longer interested in the old VM */
732 old_mm
= current
->mm
;
733 mm_release(tsk
, old_mm
);
737 * Make sure that if there is a core dump in progress
738 * for the old mm, we get out and die instead of going
739 * through with the exec. We must hold mmap_sem around
740 * checking core_state and changing tsk->mm.
742 down_read(&old_mm
->mmap_sem
);
743 if (unlikely(old_mm
->core_state
)) {
744 up_read(&old_mm
->mmap_sem
);
749 active_mm
= tsk
->active_mm
;
752 activate_mm(active_mm
, mm
);
754 arch_pick_mmap_layout(mm
);
756 up_read(&old_mm
->mmap_sem
);
757 BUG_ON(active_mm
!= old_mm
);
758 mm_update_next_owner(old_mm
);
767 * This function makes sure the current process has its own signal table,
768 * so that flush_signal_handlers can later reset the handlers without
769 * disturbing other processes. (Other processes might share the signal
770 * table via the CLONE_SIGHAND option to clone().)
772 static int de_thread(struct task_struct
*tsk
)
774 struct signal_struct
*sig
= tsk
->signal
;
775 struct sighand_struct
*oldsighand
= tsk
->sighand
;
776 spinlock_t
*lock
= &oldsighand
->siglock
;
779 if (thread_group_empty(tsk
))
780 goto no_thread_group
;
783 * Kill all other threads in the thread group.
786 if (signal_group_exit(sig
)) {
788 * Another group action in progress, just
789 * return so that the signal is processed.
791 spin_unlock_irq(lock
);
794 sig
->group_exit_task
= tsk
;
795 zap_other_threads(tsk
);
797 /* Account for the thread group leader hanging around: */
798 count
= thread_group_leader(tsk
) ? 1 : 2;
799 sig
->notify_count
= count
;
800 while (atomic_read(&sig
->count
) > count
) {
801 __set_current_state(TASK_UNINTERRUPTIBLE
);
802 spin_unlock_irq(lock
);
806 spin_unlock_irq(lock
);
809 * At this point all other threads have exited, all we have to
810 * do is to wait for the thread group leader to become inactive,
811 * and to assume its PID:
813 if (!thread_group_leader(tsk
)) {
814 struct task_struct
*leader
= tsk
->group_leader
;
816 sig
->notify_count
= -1; /* for exit_notify() */
818 write_lock_irq(&tasklist_lock
);
819 if (likely(leader
->exit_state
))
821 __set_current_state(TASK_UNINTERRUPTIBLE
);
822 write_unlock_irq(&tasklist_lock
);
827 * The only record we have of the real-time age of a
828 * process, regardless of execs it's done, is start_time.
829 * All the past CPU time is accumulated in signal_struct
830 * from sister threads now dead. But in this non-leader
831 * exec, nothing survives from the original leader thread,
832 * whose birth marks the true age of this process now.
833 * When we take on its identity by switching to its PID, we
834 * also take its birthdate (always earlier than our own).
836 tsk
->start_time
= leader
->start_time
;
838 BUG_ON(!same_thread_group(leader
, tsk
));
839 BUG_ON(has_group_leader_pid(tsk
));
841 * An exec() starts a new thread group with the
842 * TGID of the previous thread group. Rehash the
843 * two threads with a switched PID, and release
844 * the former thread group leader:
847 /* Become a process group leader with the old leader's pid.
848 * The old leader becomes a thread of the this thread group.
849 * Note: The old leader also uses this pid until release_task
850 * is called. Odd but simple and correct.
852 detach_pid(tsk
, PIDTYPE_PID
);
853 tsk
->pid
= leader
->pid
;
854 attach_pid(tsk
, PIDTYPE_PID
, task_pid(leader
));
855 transfer_pid(leader
, tsk
, PIDTYPE_PGID
);
856 transfer_pid(leader
, tsk
, PIDTYPE_SID
);
857 list_replace_rcu(&leader
->tasks
, &tsk
->tasks
);
859 tsk
->group_leader
= tsk
;
860 leader
->group_leader
= tsk
;
862 tsk
->exit_signal
= SIGCHLD
;
864 BUG_ON(leader
->exit_state
!= EXIT_ZOMBIE
);
865 leader
->exit_state
= EXIT_DEAD
;
866 write_unlock_irq(&tasklist_lock
);
868 release_task(leader
);
871 sig
->group_exit_task
= NULL
;
872 sig
->notify_count
= 0;
876 flush_itimer_signals();
878 if (atomic_read(&oldsighand
->count
) != 1) {
879 struct sighand_struct
*newsighand
;
881 * This ->sighand is shared with the CLONE_SIGHAND
882 * but not CLONE_THREAD task, switch to the new one.
884 newsighand
= kmem_cache_alloc(sighand_cachep
, GFP_KERNEL
);
888 atomic_set(&newsighand
->count
, 1);
889 memcpy(newsighand
->action
, oldsighand
->action
,
890 sizeof(newsighand
->action
));
892 write_lock_irq(&tasklist_lock
);
893 spin_lock(&oldsighand
->siglock
);
894 rcu_assign_pointer(tsk
->sighand
, newsighand
);
895 spin_unlock(&oldsighand
->siglock
);
896 write_unlock_irq(&tasklist_lock
);
898 __cleanup_sighand(oldsighand
);
901 BUG_ON(!thread_group_leader(tsk
));
906 * These functions flushes out all traces of the currently running executable
907 * so that a new one can be started
909 static void flush_old_files(struct files_struct
* files
)
914 spin_lock(&files
->file_lock
);
916 unsigned long set
, i
;
920 fdt
= files_fdtable(files
);
921 if (i
>= fdt
->max_fds
)
923 set
= fdt
->close_on_exec
->fds_bits
[j
];
926 fdt
->close_on_exec
->fds_bits
[j
] = 0;
927 spin_unlock(&files
->file_lock
);
928 for ( ; set
; i
++,set
>>= 1) {
933 spin_lock(&files
->file_lock
);
936 spin_unlock(&files
->file_lock
);
939 char *get_task_comm(char *buf
, struct task_struct
*tsk
)
941 /* buf must be at least sizeof(tsk->comm) in size */
943 strncpy(buf
, tsk
->comm
, sizeof(tsk
->comm
));
948 void set_task_comm(struct task_struct
*tsk
, char *buf
)
951 strlcpy(tsk
->comm
, buf
, sizeof(tsk
->comm
));
955 int flush_old_exec(struct linux_binprm
* bprm
)
959 char tcomm
[sizeof(current
->comm
)];
962 * Make sure we have a private signal table and that
963 * we are unassociated from the previous thread group.
965 retval
= de_thread(current
);
969 set_mm_exe_file(bprm
->mm
, bprm
->file
);
972 * Release all of the old mmap stuff
974 retval
= exec_mmap(bprm
->mm
);
978 bprm
->mm
= NULL
; /* We're using it now */
980 /* This is the point of no return */
981 current
->sas_ss_sp
= current
->sas_ss_size
= 0;
983 if (current_euid() == current_uid() && current_egid() == current_gid())
984 set_dumpable(current
->mm
, 1);
986 set_dumpable(current
->mm
, suid_dumpable
);
988 name
= bprm
->filename
;
990 /* Copies the binary name from after last slash */
991 for (i
=0; (ch
= *(name
++)) != '\0';) {
993 i
= 0; /* overwrite what we wrote */
995 if (i
< (sizeof(tcomm
) - 1))
999 set_task_comm(current
, tcomm
);
1001 current
->flags
&= ~PF_RANDOMIZE
;
1004 /* Set the new mm task size. We have to do that late because it may
1005 * depend on TIF_32BIT which is only updated in flush_thread() on
1006 * some architectures like powerpc
1008 current
->mm
->task_size
= TASK_SIZE
;
1010 /* install the new credentials */
1011 if (bprm
->cred
->uid
!= current_euid() ||
1012 bprm
->cred
->gid
!= current_egid()) {
1013 current
->pdeath_signal
= 0;
1014 } else if (file_permission(bprm
->file
, MAY_READ
) ||
1015 bprm
->interp_flags
& BINPRM_FLAGS_ENFORCE_NONDUMP
) {
1016 set_dumpable(current
->mm
, suid_dumpable
);
1019 current
->personality
&= ~bprm
->per_clear
;
1021 /* An exec changes our domain. We are no longer part of the thread
1024 current
->self_exec_id
++;
1026 flush_signal_handlers(current
, 0);
1027 flush_old_files(current
->files
);
1035 EXPORT_SYMBOL(flush_old_exec
);
1038 * install the new credentials for this executable
1040 void install_exec_creds(struct linux_binprm
*bprm
)
1042 security_bprm_committing_creds(bprm
);
1044 commit_creds(bprm
->cred
);
1047 /* cred_exec_mutex must be held at least to this point to prevent
1048 * ptrace_attach() from altering our determination of the task's
1049 * credentials; any time after this it may be unlocked */
1051 security_bprm_committed_creds(bprm
);
1053 EXPORT_SYMBOL(install_exec_creds
);
1056 * determine how safe it is to execute the proposed program
1057 * - the caller must hold current->cred_exec_mutex to protect against
1060 int check_unsafe_exec(struct linux_binprm
*bprm
)
1062 struct task_struct
*p
= current
, *t
;
1063 unsigned long flags
;
1067 bprm
->unsafe
= tracehook_unsafe_exec(p
);
1070 write_lock(&p
->fs
->lock
);
1071 lock_task_sighand(p
, &flags
);
1072 for (t
= next_thread(p
); t
!= p
; t
= next_thread(t
)) {
1077 if (p
->fs
->users
> n_fs
) {
1078 bprm
->unsafe
|= LSM_UNSAFE_SHARE
;
1085 unlock_task_sighand(p
, &flags
);
1086 write_unlock(&p
->fs
->lock
);
1092 * Fill the binprm structure from the inode.
1093 * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
1095 * This may be called multiple times for binary chains (scripts for example).
1097 int prepare_binprm(struct linux_binprm
*bprm
)
1100 struct inode
* inode
= bprm
->file
->f_path
.dentry
->d_inode
;
1103 mode
= inode
->i_mode
;
1104 if (bprm
->file
->f_op
== NULL
)
1107 /* clear any previous set[ug]id data from a previous binary */
1108 bprm
->cred
->euid
= current_euid();
1109 bprm
->cred
->egid
= current_egid();
1111 if (!(bprm
->file
->f_path
.mnt
->mnt_flags
& MNT_NOSUID
)) {
1113 if (mode
& S_ISUID
) {
1114 bprm
->per_clear
|= PER_CLEAR_ON_SETID
;
1115 bprm
->cred
->euid
= inode
->i_uid
;
1120 * If setgid is set but no group execute bit then this
1121 * is a candidate for mandatory locking, not a setgid
1124 if ((mode
& (S_ISGID
| S_IXGRP
)) == (S_ISGID
| S_IXGRP
)) {
1125 bprm
->per_clear
|= PER_CLEAR_ON_SETID
;
1126 bprm
->cred
->egid
= inode
->i_gid
;
1130 /* fill in binprm security blob */
1131 retval
= security_bprm_set_creds(bprm
);
1134 bprm
->cred_prepared
= 1;
1136 memset(bprm
->buf
, 0, BINPRM_BUF_SIZE
);
1137 return kernel_read(bprm
->file
, 0, bprm
->buf
, BINPRM_BUF_SIZE
);
1140 EXPORT_SYMBOL(prepare_binprm
);
1143 * Arguments are '\0' separated strings found at the location bprm->p
1144 * points to; chop off the first by relocating brpm->p to right after
1145 * the first '\0' encountered.
1147 int remove_arg_zero(struct linux_binprm
*bprm
)
1150 unsigned long offset
;
1158 offset
= bprm
->p
& ~PAGE_MASK
;
1159 page
= get_arg_page(bprm
, bprm
->p
, 0);
1164 kaddr
= kmap_atomic(page
, KM_USER0
);
1166 for (; offset
< PAGE_SIZE
&& kaddr
[offset
];
1167 offset
++, bprm
->p
++)
1170 kunmap_atomic(kaddr
, KM_USER0
);
1173 if (offset
== PAGE_SIZE
)
1174 free_arg_page(bprm
, (bprm
->p
>> PAGE_SHIFT
) - 1);
1175 } while (offset
== PAGE_SIZE
);
1184 EXPORT_SYMBOL(remove_arg_zero
);
1187 * cycle the list of binary formats handler, until one recognizes the image
1189 int search_binary_handler(struct linux_binprm
*bprm
,struct pt_regs
*regs
)
1191 unsigned int depth
= bprm
->recursion_depth
;
1193 struct linux_binfmt
*fmt
;
1195 retval
= security_bprm_check(bprm
);
1198 retval
= ima_bprm_check(bprm
);
1202 /* kernel module loader fixup */
1203 /* so we don't try to load run modprobe in kernel space. */
1206 retval
= audit_bprm(bprm
);
1211 for (try=0; try<2; try++) {
1212 read_lock(&binfmt_lock
);
1213 list_for_each_entry(fmt
, &formats
, lh
) {
1214 int (*fn
)(struct linux_binprm
*, struct pt_regs
*) = fmt
->load_binary
;
1217 if (!try_module_get(fmt
->module
))
1219 read_unlock(&binfmt_lock
);
1220 retval
= fn(bprm
, regs
);
1222 * Restore the depth counter to its starting value
1223 * in this call, so we don't have to rely on every
1224 * load_binary function to restore it on return.
1226 bprm
->recursion_depth
= depth
;
1229 tracehook_report_exec(fmt
, bprm
, regs
);
1231 allow_write_access(bprm
->file
);
1235 current
->did_exec
= 1;
1236 proc_exec_connector(current
);
1239 read_lock(&binfmt_lock
);
1241 if (retval
!= -ENOEXEC
|| bprm
->mm
== NULL
)
1244 read_unlock(&binfmt_lock
);
1248 read_unlock(&binfmt_lock
);
1249 if (retval
!= -ENOEXEC
|| bprm
->mm
== NULL
) {
1251 #ifdef CONFIG_MODULES
1253 #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1254 if (printable(bprm
->buf
[0]) &&
1255 printable(bprm
->buf
[1]) &&
1256 printable(bprm
->buf
[2]) &&
1257 printable(bprm
->buf
[3]))
1258 break; /* -ENOEXEC */
1259 request_module("binfmt-%04x", *(unsigned short *)(&bprm
->buf
[2]));
1266 EXPORT_SYMBOL(search_binary_handler
);
1268 void free_bprm(struct linux_binprm
*bprm
)
1270 free_arg_pages(bprm
);
1272 abort_creds(bprm
->cred
);
1277 * sys_execve() executes a new program.
1279 int do_execve(char * filename
,
1280 char __user
*__user
*argv
,
1281 char __user
*__user
*envp
,
1282 struct pt_regs
* regs
)
1284 struct linux_binprm
*bprm
;
1286 struct files_struct
*displaced
;
1289 retval
= unshare_files(&displaced
);
1294 bprm
= kzalloc(sizeof(*bprm
), GFP_KERNEL
);
1298 retval
= mutex_lock_interruptible(¤t
->cred_exec_mutex
);
1301 current
->in_execve
= 1;
1304 bprm
->cred
= prepare_exec_creds();
1308 retval
= check_unsafe_exec(bprm
);
1312 file
= open_exec(filename
);
1313 retval
= PTR_ERR(file
);
1320 bprm
->filename
= filename
;
1321 bprm
->interp
= filename
;
1323 retval
= bprm_mm_init(bprm
);
1327 bprm
->argc
= count(argv
, MAX_ARG_STRINGS
);
1328 if ((retval
= bprm
->argc
) < 0)
1331 bprm
->envc
= count(envp
, MAX_ARG_STRINGS
);
1332 if ((retval
= bprm
->envc
) < 0)
1335 retval
= prepare_binprm(bprm
);
1339 retval
= copy_strings_kernel(1, &bprm
->filename
, bprm
);
1343 bprm
->exec
= bprm
->p
;
1344 retval
= copy_strings(bprm
->envc
, envp
, bprm
);
1348 retval
= copy_strings(bprm
->argc
, argv
, bprm
);
1352 current
->flags
&= ~PF_KTHREAD
;
1353 retval
= search_binary_handler(bprm
,regs
);
1357 /* execve succeeded */
1358 write_lock(¤t
->fs
->lock
);
1359 current
->fs
->in_exec
= 0;
1360 write_unlock(¤t
->fs
->lock
);
1361 current
->in_execve
= 0;
1362 mutex_unlock(¤t
->cred_exec_mutex
);
1363 acct_update_integrals(current
);
1366 put_files_struct(displaced
);
1375 allow_write_access(bprm
->file
);
1380 write_lock(¤t
->fs
->lock
);
1381 current
->fs
->in_exec
= 0;
1382 write_unlock(¤t
->fs
->lock
);
1385 current
->in_execve
= 0;
1386 mutex_unlock(¤t
->cred_exec_mutex
);
1393 reset_files_struct(displaced
);
1398 int set_binfmt(struct linux_binfmt
*new)
1400 struct linux_binfmt
*old
= current
->binfmt
;
1403 if (!try_module_get(new->module
))
1406 current
->binfmt
= new;
1408 module_put(old
->module
);
1412 EXPORT_SYMBOL(set_binfmt
);
1414 /* format_corename will inspect the pattern parameter, and output a
1415 * name into corename, which must have space for at least
1416 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
1418 static int format_corename(char *corename
, long signr
)
1420 const struct cred
*cred
= current_cred();
1421 const char *pat_ptr
= core_pattern
;
1422 int ispipe
= (*pat_ptr
== '|');
1423 char *out_ptr
= corename
;
1424 char *const out_end
= corename
+ CORENAME_MAX_SIZE
;
1426 int pid_in_pattern
= 0;
1428 /* Repeat as long as we have more pattern to process and more output
1431 if (*pat_ptr
!= '%') {
1432 if (out_ptr
== out_end
)
1434 *out_ptr
++ = *pat_ptr
++;
1436 switch (*++pat_ptr
) {
1439 /* Double percent, output one percent */
1441 if (out_ptr
== out_end
)
1448 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1449 "%d", task_tgid_vnr(current
));
1450 if (rc
> out_end
- out_ptr
)
1456 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1458 if (rc
> out_end
- out_ptr
)
1464 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1466 if (rc
> out_end
- out_ptr
)
1470 /* signal that caused the coredump */
1472 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1474 if (rc
> out_end
- out_ptr
)
1478 /* UNIX time of coredump */
1481 do_gettimeofday(&tv
);
1482 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1484 if (rc
> out_end
- out_ptr
)
1491 down_read(&uts_sem
);
1492 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1493 "%s", utsname()->nodename
);
1495 if (rc
> out_end
- out_ptr
)
1501 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1502 "%s", current
->comm
);
1503 if (rc
> out_end
- out_ptr
)
1507 /* core limit size */
1509 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1510 "%lu", current
->signal
->rlim
[RLIMIT_CORE
].rlim_cur
);
1511 if (rc
> out_end
- out_ptr
)
1521 /* Backward compatibility with core_uses_pid:
1523 * If core_pattern does not include a %p (as is the default)
1524 * and core_uses_pid is set, then .%pid will be appended to
1525 * the filename. Do not do this for piped commands. */
1526 if (!ispipe
&& !pid_in_pattern
&& core_uses_pid
) {
1527 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1528 ".%d", task_tgid_vnr(current
));
1529 if (rc
> out_end
- out_ptr
)
1538 static int zap_process(struct task_struct
*start
)
1540 struct task_struct
*t
;
1543 start
->signal
->flags
= SIGNAL_GROUP_EXIT
;
1544 start
->signal
->group_stop_count
= 0;
1548 if (t
!= current
&& t
->mm
) {
1549 sigaddset(&t
->pending
.signal
, SIGKILL
);
1550 signal_wake_up(t
, 1);
1553 } while_each_thread(start
, t
);
1558 static inline int zap_threads(struct task_struct
*tsk
, struct mm_struct
*mm
,
1559 struct core_state
*core_state
, int exit_code
)
1561 struct task_struct
*g
, *p
;
1562 unsigned long flags
;
1565 spin_lock_irq(&tsk
->sighand
->siglock
);
1566 if (!signal_group_exit(tsk
->signal
)) {
1567 mm
->core_state
= core_state
;
1568 tsk
->signal
->group_exit_code
= exit_code
;
1569 nr
= zap_process(tsk
);
1571 spin_unlock_irq(&tsk
->sighand
->siglock
);
1572 if (unlikely(nr
< 0))
1575 if (atomic_read(&mm
->mm_users
) == nr
+ 1)
1578 * We should find and kill all tasks which use this mm, and we should
1579 * count them correctly into ->nr_threads. We don't take tasklist
1580 * lock, but this is safe wrt:
1583 * None of sub-threads can fork after zap_process(leader). All
1584 * processes which were created before this point should be
1585 * visible to zap_threads() because copy_process() adds the new
1586 * process to the tail of init_task.tasks list, and lock/unlock
1587 * of ->siglock provides a memory barrier.
1590 * The caller holds mm->mmap_sem. This means that the task which
1591 * uses this mm can't pass exit_mm(), so it can't exit or clear
1595 * It does list_replace_rcu(&leader->tasks, ¤t->tasks),
1596 * we must see either old or new leader, this does not matter.
1597 * However, it can change p->sighand, so lock_task_sighand(p)
1598 * must be used. Since p->mm != NULL and we hold ->mmap_sem
1601 * Note also that "g" can be the old leader with ->mm == NULL
1602 * and already unhashed and thus removed from ->thread_group.
1603 * This is OK, __unhash_process()->list_del_rcu() does not
1604 * clear the ->next pointer, we will find the new leader via
1608 for_each_process(g
) {
1609 if (g
== tsk
->group_leader
)
1611 if (g
->flags
& PF_KTHREAD
)
1616 if (unlikely(p
->mm
== mm
)) {
1617 lock_task_sighand(p
, &flags
);
1618 nr
+= zap_process(p
);
1619 unlock_task_sighand(p
, &flags
);
1623 } while_each_thread(g
, p
);
1627 atomic_set(&core_state
->nr_threads
, nr
);
1631 static int coredump_wait(int exit_code
, struct core_state
*core_state
)
1633 struct task_struct
*tsk
= current
;
1634 struct mm_struct
*mm
= tsk
->mm
;
1635 struct completion
*vfork_done
;
1638 init_completion(&core_state
->startup
);
1639 core_state
->dumper
.task
= tsk
;
1640 core_state
->dumper
.next
= NULL
;
1641 core_waiters
= zap_threads(tsk
, mm
, core_state
, exit_code
);
1642 up_write(&mm
->mmap_sem
);
1644 if (unlikely(core_waiters
< 0))
1648 * Make sure nobody is waiting for us to release the VM,
1649 * otherwise we can deadlock when we wait on each other
1651 vfork_done
= tsk
->vfork_done
;
1653 tsk
->vfork_done
= NULL
;
1654 complete(vfork_done
);
1658 wait_for_completion(&core_state
->startup
);
1660 return core_waiters
;
1663 static void coredump_finish(struct mm_struct
*mm
)
1665 struct core_thread
*curr
, *next
;
1666 struct task_struct
*task
;
1668 next
= mm
->core_state
->dumper
.next
;
1669 while ((curr
= next
) != NULL
) {
1673 * see exit_mm(), curr->task must not see
1674 * ->task == NULL before we read ->next.
1678 wake_up_process(task
);
1681 mm
->core_state
= NULL
;
1685 * set_dumpable converts traditional three-value dumpable to two flags and
1686 * stores them into mm->flags. It modifies lower two bits of mm->flags, but
1687 * these bits are not changed atomically. So get_dumpable can observe the
1688 * intermediate state. To avoid doing unexpected behavior, get get_dumpable
1689 * return either old dumpable or new one by paying attention to the order of
1690 * modifying the bits.
1692 * dumpable | mm->flags (binary)
1693 * old new | initial interim final
1694 * ---------+-----------------------
1702 * (*) get_dumpable regards interim value of 10 as 11.
1704 void set_dumpable(struct mm_struct
*mm
, int value
)
1708 clear_bit(MMF_DUMPABLE
, &mm
->flags
);
1710 clear_bit(MMF_DUMP_SECURELY
, &mm
->flags
);
1713 set_bit(MMF_DUMPABLE
, &mm
->flags
);
1715 clear_bit(MMF_DUMP_SECURELY
, &mm
->flags
);
1718 set_bit(MMF_DUMP_SECURELY
, &mm
->flags
);
1720 set_bit(MMF_DUMPABLE
, &mm
->flags
);
1725 int get_dumpable(struct mm_struct
*mm
)
1729 ret
= mm
->flags
& 0x3;
1730 return (ret
>= 2) ? 2 : ret
;
1733 void do_coredump(long signr
, int exit_code
, struct pt_regs
*regs
)
1735 struct core_state core_state
;
1736 char corename
[CORENAME_MAX_SIZE
+ 1];
1737 struct mm_struct
*mm
= current
->mm
;
1738 struct linux_binfmt
* binfmt
;
1739 struct inode
* inode
;
1741 const struct cred
*old_cred
;
1746 unsigned long core_limit
= current
->signal
->rlim
[RLIMIT_CORE
].rlim_cur
;
1747 char **helper_argv
= NULL
;
1748 int helper_argc
= 0;
1751 audit_core_dumps(signr
);
1753 binfmt
= current
->binfmt
;
1754 if (!binfmt
|| !binfmt
->core_dump
)
1757 cred
= prepare_creds();
1763 down_write(&mm
->mmap_sem
);
1765 * If another thread got here first, or we are not dumpable, bail out.
1767 if (mm
->core_state
|| !get_dumpable(mm
)) {
1768 up_write(&mm
->mmap_sem
);
1774 * We cannot trust fsuid as being the "true" uid of the
1775 * process nor do we know its entire history. We only know it
1776 * was tainted so we dump it as root in mode 2.
1778 if (get_dumpable(mm
) == 2) { /* Setuid core dump mode */
1779 flag
= O_EXCL
; /* Stop rewrite attacks */
1780 cred
->fsuid
= 0; /* Dump root private */
1783 retval
= coredump_wait(exit_code
, &core_state
);
1789 old_cred
= override_creds(cred
);
1792 * Clear any false indication of pending signals that might
1793 * be seen by the filesystem code called to write the core file.
1795 clear_thread_flag(TIF_SIGPENDING
);
1798 * lock_kernel() because format_corename() is controlled by sysctl, which
1799 * uses lock_kernel()
1802 ispipe
= format_corename(corename
, signr
);
1805 * Don't bother to check the RLIMIT_CORE value if core_pattern points
1806 * to a pipe. Since we're not writing directly to the filesystem
1807 * RLIMIT_CORE doesn't really apply, as no actual core file will be
1808 * created unless the pipe reader choses to write out the core file
1809 * at which point file size limits and permissions will be imposed
1810 * as it does with any other process
1812 if ((!ispipe
) && (core_limit
< binfmt
->min_coredump
))
1816 helper_argv
= argv_split(GFP_KERNEL
, corename
+1, &helper_argc
);
1818 printk(KERN_WARNING
"%s failed to allocate memory\n",
1822 /* Terminate the string before the first option */
1823 delimit
= strchr(corename
, ' ');
1826 delimit
= strrchr(helper_argv
[0], '/');
1830 delimit
= helper_argv
[0];
1831 if (!strcmp(delimit
, current
->comm
)) {
1832 printk(KERN_NOTICE
"Recursive core dump detected, "
1837 core_limit
= RLIM_INFINITY
;
1839 /* SIGPIPE can happen, but it's just never processed */
1840 if (call_usermodehelper_pipe(corename
+1, helper_argv
, NULL
,
1842 printk(KERN_INFO
"Core dump to %s pipe failed\n",
1847 file
= filp_open(corename
,
1848 O_CREAT
| 2 | O_NOFOLLOW
| O_LARGEFILE
| flag
,
1852 inode
= file
->f_path
.dentry
->d_inode
;
1853 if (inode
->i_nlink
> 1)
1854 goto close_fail
; /* multiple links - don't dump */
1855 if (!ispipe
&& d_unhashed(file
->f_path
.dentry
))
1858 /* AK: actually i see no reason to not allow this for named pipes etc.,
1859 but keep the previous behaviour for now. */
1860 if (!ispipe
&& !S_ISREG(inode
->i_mode
))
1863 * Dont allow local users get cute and trick others to coredump
1864 * into their pre-created files:
1866 if (inode
->i_uid
!= current_fsuid())
1870 if (!file
->f_op
->write
)
1872 if (!ispipe
&& do_truncate(file
->f_path
.dentry
, 0, 0, file
) != 0)
1875 retval
= binfmt
->core_dump(signr
, regs
, file
, core_limit
);
1878 current
->signal
->group_exit_code
|= 0x80;
1880 filp_close(file
, NULL
);
1883 argv_free(helper_argv
);
1885 revert_creds(old_cred
);
1887 coredump_finish(mm
);