4 * Copyright (C) 1991, 1992 Linus Torvalds
8 * #!-checking implemented by tytso.
11 * Demand-loading implemented 01.12.91 - no need to read anything but
12 * the header into memory. The inode of the executable is put into
13 * "current->executable", and page faults do the actual loading. Clean.
15 * Once more I can proudly say that linux stood up to being changed: it
16 * was less than 2 hours work to get demand-loading completely implemented.
18 * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead,
19 * current->executable is only used by the procfs. This allows a dispatch
20 * table to check for several different types of binary formats. We keep
21 * trying until we recognize the file or we run out of supported binary
25 #include <linux/slab.h>
26 #include <linux/file.h>
27 #include <linux/fdtable.h>
29 #include <linux/stat.h>
30 #include <linux/fcntl.h>
31 #include <linux/smp_lock.h>
32 #include <linux/swap.h>
33 #include <linux/string.h>
34 #include <linux/init.h>
35 #include <linux/pagemap.h>
36 #include <linux/highmem.h>
37 #include <linux/spinlock.h>
38 #include <linux/key.h>
39 #include <linux/personality.h>
40 #include <linux/binfmts.h>
41 #include <linux/utsname.h>
42 #include <linux/pid_namespace.h>
43 #include <linux/module.h>
44 #include <linux/namei.h>
45 #include <linux/proc_fs.h>
46 #include <linux/mount.h>
47 #include <linux/security.h>
48 #include <linux/ima.h>
49 #include <linux/syscalls.h>
50 #include <linux/tsacct_kern.h>
51 #include <linux/cn_proc.h>
52 #include <linux/audit.h>
53 #include <linux/tracehook.h>
54 #include <linux/kmod.h>
56 #include <asm/uaccess.h>
57 #include <asm/mmu_context.h>
62 /* for /sbin/loader handling in search_binary_handler() */
63 #include <linux/a.out.h>
67 char core_pattern
[CORENAME_MAX_SIZE
] = "core";
68 int suid_dumpable
= 0;
70 /* The maximal length of core_pattern is also specified in sysctl.c */
72 static LIST_HEAD(formats
);
73 static DEFINE_RWLOCK(binfmt_lock
);
75 int register_binfmt(struct linux_binfmt
* fmt
)
79 write_lock(&binfmt_lock
);
80 list_add(&fmt
->lh
, &formats
);
81 write_unlock(&binfmt_lock
);
85 EXPORT_SYMBOL(register_binfmt
);
87 void unregister_binfmt(struct linux_binfmt
* fmt
)
89 write_lock(&binfmt_lock
);
91 write_unlock(&binfmt_lock
);
94 EXPORT_SYMBOL(unregister_binfmt
);
96 static inline void put_binfmt(struct linux_binfmt
* fmt
)
98 module_put(fmt
->module
);
102 * Note that a shared library must be both readable and executable due to
105 * Also note that we take the address to load from from the file itself.
107 asmlinkage
long sys_uselib(const char __user
* library
)
111 char *tmp
= getname(library
);
112 int error
= PTR_ERR(tmp
);
115 error
= path_lookup_open(AT_FDCWD
, tmp
,
117 FMODE_READ
|FMODE_EXEC
);
124 if (!S_ISREG(nd
.path
.dentry
->d_inode
->i_mode
))
128 if (nd
.path
.mnt
->mnt_flags
& MNT_NOEXEC
)
131 error
= vfs_permission(&nd
, MAY_READ
| MAY_EXEC
| MAY_OPEN
);
134 error
= ima_path_check(&nd
.path
, MAY_READ
| MAY_EXEC
| MAY_OPEN
);
138 file
= nameidata_to_filp(&nd
, O_RDONLY
|O_LARGEFILE
);
139 error
= PTR_ERR(file
);
145 struct linux_binfmt
* fmt
;
147 read_lock(&binfmt_lock
);
148 list_for_each_entry(fmt
, &formats
, lh
) {
149 if (!fmt
->load_shlib
)
151 if (!try_module_get(fmt
->module
))
153 read_unlock(&binfmt_lock
);
154 error
= fmt
->load_shlib(file
);
155 read_lock(&binfmt_lock
);
157 if (error
!= -ENOEXEC
)
160 read_unlock(&binfmt_lock
);
166 release_open_intent(&nd
);
173 static struct page
*get_arg_page(struct linux_binprm
*bprm
, unsigned long pos
,
179 #ifdef CONFIG_STACK_GROWSUP
181 ret
= expand_stack_downwards(bprm
->vma
, pos
);
186 ret
= get_user_pages(current
, bprm
->mm
, pos
,
187 1, write
, 1, &page
, NULL
);
192 unsigned long size
= bprm
->vma
->vm_end
- bprm
->vma
->vm_start
;
196 * We've historically supported up to 32 pages (ARG_MAX)
197 * of argument strings even with small stacks
203 * Limit to 1/4-th the stack size for the argv+env strings.
205 * - the remaining binfmt code will not run out of stack space,
206 * - the program will have a reasonable amount of stack left
209 rlim
= current
->signal
->rlim
;
210 if (size
> rlim
[RLIMIT_STACK
].rlim_cur
/ 4) {
219 static void put_arg_page(struct page
*page
)
224 static void free_arg_page(struct linux_binprm
*bprm
, int i
)
228 static void free_arg_pages(struct linux_binprm
*bprm
)
232 static void flush_arg_page(struct linux_binprm
*bprm
, unsigned long pos
,
235 flush_cache_page(bprm
->vma
, pos
, page_to_pfn(page
));
238 static int __bprm_mm_init(struct linux_binprm
*bprm
)
241 struct vm_area_struct
*vma
= NULL
;
242 struct mm_struct
*mm
= bprm
->mm
;
244 bprm
->vma
= vma
= kmem_cache_zalloc(vm_area_cachep
, GFP_KERNEL
);
248 down_write(&mm
->mmap_sem
);
252 * Place the stack at the largest stack address the architecture
253 * supports. Later, we'll move this to an appropriate place. We don't
254 * use STACK_TOP because that can depend on attributes which aren't
257 vma
->vm_end
= STACK_TOP_MAX
;
258 vma
->vm_start
= vma
->vm_end
- PAGE_SIZE
;
260 vma
->vm_flags
= VM_STACK_FLAGS
;
261 vma
->vm_page_prot
= vm_get_page_prot(vma
->vm_flags
);
262 err
= insert_vm_struct(mm
, vma
);
264 up_write(&mm
->mmap_sem
);
268 mm
->stack_vm
= mm
->total_vm
= 1;
269 up_write(&mm
->mmap_sem
);
271 bprm
->p
= vma
->vm_end
- sizeof(void *);
278 kmem_cache_free(vm_area_cachep
, vma
);
284 static bool valid_arg_len(struct linux_binprm
*bprm
, long len
)
286 return len
<= MAX_ARG_STRLEN
;
291 static struct page
*get_arg_page(struct linux_binprm
*bprm
, unsigned long pos
,
296 page
= bprm
->page
[pos
/ PAGE_SIZE
];
297 if (!page
&& write
) {
298 page
= alloc_page(GFP_HIGHUSER
|__GFP_ZERO
);
301 bprm
->page
[pos
/ PAGE_SIZE
] = page
;
307 static void put_arg_page(struct page
*page
)
311 static void free_arg_page(struct linux_binprm
*bprm
, int i
)
314 __free_page(bprm
->page
[i
]);
315 bprm
->page
[i
] = NULL
;
319 static void free_arg_pages(struct linux_binprm
*bprm
)
323 for (i
= 0; i
< MAX_ARG_PAGES
; i
++)
324 free_arg_page(bprm
, i
);
327 static void flush_arg_page(struct linux_binprm
*bprm
, unsigned long pos
,
332 static int __bprm_mm_init(struct linux_binprm
*bprm
)
334 bprm
->p
= PAGE_SIZE
* MAX_ARG_PAGES
- sizeof(void *);
338 static bool valid_arg_len(struct linux_binprm
*bprm
, long len
)
340 return len
<= bprm
->p
;
343 #endif /* CONFIG_MMU */
346 * Create a new mm_struct and populate it with a temporary stack
347 * vm_area_struct. We don't have enough context at this point to set the stack
348 * flags, permissions, and offset, so we use temporary values. We'll update
349 * them later in setup_arg_pages().
351 int bprm_mm_init(struct linux_binprm
*bprm
)
354 struct mm_struct
*mm
= NULL
;
356 bprm
->mm
= mm
= mm_alloc();
361 err
= init_new_context(current
, mm
);
365 err
= __bprm_mm_init(bprm
);
381 * count() counts the number of strings in array ARGV.
383 static int count(char __user
* __user
* argv
, int max
)
391 if (get_user(p
, argv
))
405 * 'copy_strings()' copies argument/environment strings from the old
406 * processes's memory to the new process's stack. The call to get_user_pages()
407 * ensures the destination page is created and not swapped out.
409 static int copy_strings(int argc
, char __user
* __user
* argv
,
410 struct linux_binprm
*bprm
)
412 struct page
*kmapped_page
= NULL
;
414 unsigned long kpos
= 0;
422 if (get_user(str
, argv
+argc
) ||
423 !(len
= strnlen_user(str
, MAX_ARG_STRLEN
))) {
428 if (!valid_arg_len(bprm
, len
)) {
433 /* We're going to work our way backwords. */
439 int offset
, bytes_to_copy
;
441 offset
= pos
% PAGE_SIZE
;
445 bytes_to_copy
= offset
;
446 if (bytes_to_copy
> len
)
449 offset
-= bytes_to_copy
;
450 pos
-= bytes_to_copy
;
451 str
-= bytes_to_copy
;
452 len
-= bytes_to_copy
;
454 if (!kmapped_page
|| kpos
!= (pos
& PAGE_MASK
)) {
457 page
= get_arg_page(bprm
, pos
, 1);
464 flush_kernel_dcache_page(kmapped_page
);
465 kunmap(kmapped_page
);
466 put_arg_page(kmapped_page
);
469 kaddr
= kmap(kmapped_page
);
470 kpos
= pos
& PAGE_MASK
;
471 flush_arg_page(bprm
, kpos
, kmapped_page
);
473 if (copy_from_user(kaddr
+offset
, str
, bytes_to_copy
)) {
482 flush_kernel_dcache_page(kmapped_page
);
483 kunmap(kmapped_page
);
484 put_arg_page(kmapped_page
);
490 * Like copy_strings, but get argv and its values from kernel memory.
492 int copy_strings_kernel(int argc
,char ** argv
, struct linux_binprm
*bprm
)
495 mm_segment_t oldfs
= get_fs();
497 r
= copy_strings(argc
, (char __user
* __user
*)argv
, bprm
);
501 EXPORT_SYMBOL(copy_strings_kernel
);
506 * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX. Once
507 * the binfmt code determines where the new stack should reside, we shift it to
508 * its final location. The process proceeds as follows:
510 * 1) Use shift to calculate the new vma endpoints.
511 * 2) Extend vma to cover both the old and new ranges. This ensures the
512 * arguments passed to subsequent functions are consistent.
513 * 3) Move vma's page tables to the new range.
514 * 4) Free up any cleared pgd range.
515 * 5) Shrink the vma to cover only the new range.
517 static int shift_arg_pages(struct vm_area_struct
*vma
, unsigned long shift
)
519 struct mm_struct
*mm
= vma
->vm_mm
;
520 unsigned long old_start
= vma
->vm_start
;
521 unsigned long old_end
= vma
->vm_end
;
522 unsigned long length
= old_end
- old_start
;
523 unsigned long new_start
= old_start
- shift
;
524 unsigned long new_end
= old_end
- shift
;
525 struct mmu_gather
*tlb
;
527 BUG_ON(new_start
> new_end
);
530 * ensure there are no vmas between where we want to go
533 if (vma
!= find_vma(mm
, new_start
))
537 * cover the whole range: [new_start, old_end)
539 vma_adjust(vma
, new_start
, old_end
, vma
->vm_pgoff
, NULL
);
542 * move the page tables downwards, on failure we rely on
543 * process cleanup to remove whatever mess we made.
545 if (length
!= move_page_tables(vma
, old_start
,
546 vma
, new_start
, length
))
550 tlb
= tlb_gather_mmu(mm
, 0);
551 if (new_end
> old_start
) {
553 * when the old and new regions overlap clear from new_end.
555 free_pgd_range(tlb
, new_end
, old_end
, new_end
,
556 vma
->vm_next
? vma
->vm_next
->vm_start
: 0);
559 * otherwise, clean from old_start; this is done to not touch
560 * the address space in [new_end, old_start) some architectures
561 * have constraints on va-space that make this illegal (IA64) -
562 * for the others its just a little faster.
564 free_pgd_range(tlb
, old_start
, old_end
, new_end
,
565 vma
->vm_next
? vma
->vm_next
->vm_start
: 0);
567 tlb_finish_mmu(tlb
, new_end
, old_end
);
570 * shrink the vma to just the new range.
572 vma_adjust(vma
, new_start
, new_end
, vma
->vm_pgoff
, NULL
);
577 #define EXTRA_STACK_VM_PAGES 20 /* random */
580 * Finalizes the stack vm_area_struct. The flags and permissions are updated,
581 * the stack is optionally relocated, and some extra space is added.
583 int setup_arg_pages(struct linux_binprm
*bprm
,
584 unsigned long stack_top
,
585 int executable_stack
)
588 unsigned long stack_shift
;
589 struct mm_struct
*mm
= current
->mm
;
590 struct vm_area_struct
*vma
= bprm
->vma
;
591 struct vm_area_struct
*prev
= NULL
;
592 unsigned long vm_flags
;
593 unsigned long stack_base
;
595 #ifdef CONFIG_STACK_GROWSUP
596 /* Limit stack size to 1GB */
597 stack_base
= current
->signal
->rlim
[RLIMIT_STACK
].rlim_max
;
598 if (stack_base
> (1 << 30))
599 stack_base
= 1 << 30;
601 /* Make sure we didn't let the argument array grow too large. */
602 if (vma
->vm_end
- vma
->vm_start
> stack_base
)
605 stack_base
= PAGE_ALIGN(stack_top
- stack_base
);
607 stack_shift
= vma
->vm_start
- stack_base
;
608 mm
->arg_start
= bprm
->p
- stack_shift
;
609 bprm
->p
= vma
->vm_end
- stack_shift
;
611 stack_top
= arch_align_stack(stack_top
);
612 stack_top
= PAGE_ALIGN(stack_top
);
613 stack_shift
= vma
->vm_end
- stack_top
;
615 bprm
->p
-= stack_shift
;
616 mm
->arg_start
= bprm
->p
;
620 bprm
->loader
-= stack_shift
;
621 bprm
->exec
-= stack_shift
;
623 down_write(&mm
->mmap_sem
);
624 vm_flags
= VM_STACK_FLAGS
;
627 * Adjust stack execute permissions; explicitly enable for
628 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
629 * (arch default) otherwise.
631 if (unlikely(executable_stack
== EXSTACK_ENABLE_X
))
633 else if (executable_stack
== EXSTACK_DISABLE_X
)
634 vm_flags
&= ~VM_EXEC
;
635 vm_flags
|= mm
->def_flags
;
637 ret
= mprotect_fixup(vma
, &prev
, vma
->vm_start
, vma
->vm_end
,
643 /* Move stack pages down in memory. */
645 ret
= shift_arg_pages(vma
, stack_shift
);
647 up_write(&mm
->mmap_sem
);
652 #ifdef CONFIG_STACK_GROWSUP
653 stack_base
= vma
->vm_end
+ EXTRA_STACK_VM_PAGES
* PAGE_SIZE
;
655 stack_base
= vma
->vm_start
- EXTRA_STACK_VM_PAGES
* PAGE_SIZE
;
657 ret
= expand_stack(vma
, stack_base
);
662 up_write(&mm
->mmap_sem
);
665 EXPORT_SYMBOL(setup_arg_pages
);
667 #endif /* CONFIG_MMU */
669 struct file
*open_exec(const char *name
)
675 err
= path_lookup_open(AT_FDCWD
, name
, LOOKUP_FOLLOW
, &nd
,
676 FMODE_READ
|FMODE_EXEC
);
681 if (!S_ISREG(nd
.path
.dentry
->d_inode
->i_mode
))
684 if (nd
.path
.mnt
->mnt_flags
& MNT_NOEXEC
)
687 err
= vfs_permission(&nd
, MAY_EXEC
| MAY_OPEN
);
690 err
= ima_path_check(&nd
.path
, MAY_EXEC
| MAY_OPEN
);
694 file
= nameidata_to_filp(&nd
, O_RDONLY
|O_LARGEFILE
);
698 err
= deny_write_access(file
);
707 release_open_intent(&nd
);
712 EXPORT_SYMBOL(open_exec
);
714 int kernel_read(struct file
*file
, unsigned long offset
,
715 char *addr
, unsigned long count
)
723 /* The cast to a user pointer is valid due to the set_fs() */
724 result
= vfs_read(file
, (void __user
*)addr
, count
, &pos
);
729 EXPORT_SYMBOL(kernel_read
);
731 static int exec_mmap(struct mm_struct
*mm
)
733 struct task_struct
*tsk
;
734 struct mm_struct
* old_mm
, *active_mm
;
736 /* Notify parent that we're no longer interested in the old VM */
738 old_mm
= current
->mm
;
739 mm_release(tsk
, old_mm
);
743 * Make sure that if there is a core dump in progress
744 * for the old mm, we get out and die instead of going
745 * through with the exec. We must hold mmap_sem around
746 * checking core_state and changing tsk->mm.
748 down_read(&old_mm
->mmap_sem
);
749 if (unlikely(old_mm
->core_state
)) {
750 up_read(&old_mm
->mmap_sem
);
755 active_mm
= tsk
->active_mm
;
758 activate_mm(active_mm
, mm
);
760 arch_pick_mmap_layout(mm
);
762 up_read(&old_mm
->mmap_sem
);
763 BUG_ON(active_mm
!= old_mm
);
764 mm_update_next_owner(old_mm
);
773 * This function makes sure the current process has its own signal table,
774 * so that flush_signal_handlers can later reset the handlers without
775 * disturbing other processes. (Other processes might share the signal
776 * table via the CLONE_SIGHAND option to clone().)
778 static int de_thread(struct task_struct
*tsk
)
780 struct signal_struct
*sig
= tsk
->signal
;
781 struct sighand_struct
*oldsighand
= tsk
->sighand
;
782 spinlock_t
*lock
= &oldsighand
->siglock
;
785 if (thread_group_empty(tsk
))
786 goto no_thread_group
;
789 * Kill all other threads in the thread group.
792 if (signal_group_exit(sig
)) {
794 * Another group action in progress, just
795 * return so that the signal is processed.
797 spin_unlock_irq(lock
);
800 sig
->group_exit_task
= tsk
;
801 zap_other_threads(tsk
);
803 /* Account for the thread group leader hanging around: */
804 count
= thread_group_leader(tsk
) ? 1 : 2;
805 sig
->notify_count
= count
;
806 while (atomic_read(&sig
->count
) > count
) {
807 __set_current_state(TASK_UNINTERRUPTIBLE
);
808 spin_unlock_irq(lock
);
812 spin_unlock_irq(lock
);
815 * At this point all other threads have exited, all we have to
816 * do is to wait for the thread group leader to become inactive,
817 * and to assume its PID:
819 if (!thread_group_leader(tsk
)) {
820 struct task_struct
*leader
= tsk
->group_leader
;
822 sig
->notify_count
= -1; /* for exit_notify() */
824 write_lock_irq(&tasklist_lock
);
825 if (likely(leader
->exit_state
))
827 __set_current_state(TASK_UNINTERRUPTIBLE
);
828 write_unlock_irq(&tasklist_lock
);
833 * The only record we have of the real-time age of a
834 * process, regardless of execs it's done, is start_time.
835 * All the past CPU time is accumulated in signal_struct
836 * from sister threads now dead. But in this non-leader
837 * exec, nothing survives from the original leader thread,
838 * whose birth marks the true age of this process now.
839 * When we take on its identity by switching to its PID, we
840 * also take its birthdate (always earlier than our own).
842 tsk
->start_time
= leader
->start_time
;
844 BUG_ON(!same_thread_group(leader
, tsk
));
845 BUG_ON(has_group_leader_pid(tsk
));
847 * An exec() starts a new thread group with the
848 * TGID of the previous thread group. Rehash the
849 * two threads with a switched PID, and release
850 * the former thread group leader:
853 /* Become a process group leader with the old leader's pid.
854 * The old leader becomes a thread of the this thread group.
855 * Note: The old leader also uses this pid until release_task
856 * is called. Odd but simple and correct.
858 detach_pid(tsk
, PIDTYPE_PID
);
859 tsk
->pid
= leader
->pid
;
860 attach_pid(tsk
, PIDTYPE_PID
, task_pid(leader
));
861 transfer_pid(leader
, tsk
, PIDTYPE_PGID
);
862 transfer_pid(leader
, tsk
, PIDTYPE_SID
);
863 list_replace_rcu(&leader
->tasks
, &tsk
->tasks
);
865 tsk
->group_leader
= tsk
;
866 leader
->group_leader
= tsk
;
868 tsk
->exit_signal
= SIGCHLD
;
870 BUG_ON(leader
->exit_state
!= EXIT_ZOMBIE
);
871 leader
->exit_state
= EXIT_DEAD
;
872 write_unlock_irq(&tasklist_lock
);
874 release_task(leader
);
877 sig
->group_exit_task
= NULL
;
878 sig
->notify_count
= 0;
882 flush_itimer_signals();
884 if (atomic_read(&oldsighand
->count
) != 1) {
885 struct sighand_struct
*newsighand
;
887 * This ->sighand is shared with the CLONE_SIGHAND
888 * but not CLONE_THREAD task, switch to the new one.
890 newsighand
= kmem_cache_alloc(sighand_cachep
, GFP_KERNEL
);
894 atomic_set(&newsighand
->count
, 1);
895 memcpy(newsighand
->action
, oldsighand
->action
,
896 sizeof(newsighand
->action
));
898 write_lock_irq(&tasklist_lock
);
899 spin_lock(&oldsighand
->siglock
);
900 rcu_assign_pointer(tsk
->sighand
, newsighand
);
901 spin_unlock(&oldsighand
->siglock
);
902 write_unlock_irq(&tasklist_lock
);
904 __cleanup_sighand(oldsighand
);
907 BUG_ON(!thread_group_leader(tsk
));
912 * These functions flushes out all traces of the currently running executable
913 * so that a new one can be started
915 static void flush_old_files(struct files_struct
* files
)
920 spin_lock(&files
->file_lock
);
922 unsigned long set
, i
;
926 fdt
= files_fdtable(files
);
927 if (i
>= fdt
->max_fds
)
929 set
= fdt
->close_on_exec
->fds_bits
[j
];
932 fdt
->close_on_exec
->fds_bits
[j
] = 0;
933 spin_unlock(&files
->file_lock
);
934 for ( ; set
; i
++,set
>>= 1) {
939 spin_lock(&files
->file_lock
);
942 spin_unlock(&files
->file_lock
);
945 char *get_task_comm(char *buf
, struct task_struct
*tsk
)
947 /* buf must be at least sizeof(tsk->comm) in size */
949 strncpy(buf
, tsk
->comm
, sizeof(tsk
->comm
));
954 void set_task_comm(struct task_struct
*tsk
, char *buf
)
957 strlcpy(tsk
->comm
, buf
, sizeof(tsk
->comm
));
961 int flush_old_exec(struct linux_binprm
* bprm
)
965 char tcomm
[sizeof(current
->comm
)];
968 * Make sure we have a private signal table and that
969 * we are unassociated from the previous thread group.
971 retval
= de_thread(current
);
975 set_mm_exe_file(bprm
->mm
, bprm
->file
);
978 * Release all of the old mmap stuff
980 retval
= exec_mmap(bprm
->mm
);
984 bprm
->mm
= NULL
; /* We're using it now */
986 /* This is the point of no return */
987 current
->sas_ss_sp
= current
->sas_ss_size
= 0;
989 if (current_euid() == current_uid() && current_egid() == current_gid())
990 set_dumpable(current
->mm
, 1);
992 set_dumpable(current
->mm
, suid_dumpable
);
994 name
= bprm
->filename
;
996 /* Copies the binary name from after last slash */
997 for (i
=0; (ch
= *(name
++)) != '\0';) {
999 i
= 0; /* overwrite what we wrote */
1001 if (i
< (sizeof(tcomm
) - 1))
1005 set_task_comm(current
, tcomm
);
1007 current
->flags
&= ~PF_RANDOMIZE
;
1010 /* Set the new mm task size. We have to do that late because it may
1011 * depend on TIF_32BIT which is only updated in flush_thread() on
1012 * some architectures like powerpc
1014 current
->mm
->task_size
= TASK_SIZE
;
1016 /* install the new credentials */
1017 if (bprm
->cred
->uid
!= current_euid() ||
1018 bprm
->cred
->gid
!= current_egid()) {
1019 current
->pdeath_signal
= 0;
1020 } else if (file_permission(bprm
->file
, MAY_READ
) ||
1021 bprm
->interp_flags
& BINPRM_FLAGS_ENFORCE_NONDUMP
) {
1022 set_dumpable(current
->mm
, suid_dumpable
);
1025 current
->personality
&= ~bprm
->per_clear
;
1027 /* An exec changes our domain. We are no longer part of the thread
1030 current
->self_exec_id
++;
1032 flush_signal_handlers(current
, 0);
1033 flush_old_files(current
->files
);
1041 EXPORT_SYMBOL(flush_old_exec
);
1044 * install the new credentials for this executable
1046 void install_exec_creds(struct linux_binprm
*bprm
)
1048 security_bprm_committing_creds(bprm
);
1050 commit_creds(bprm
->cred
);
1053 /* cred_exec_mutex must be held at least to this point to prevent
1054 * ptrace_attach() from altering our determination of the task's
1055 * credentials; any time after this it may be unlocked */
1057 security_bprm_committed_creds(bprm
);
1059 EXPORT_SYMBOL(install_exec_creds
);
1062 * determine how safe it is to execute the proposed program
1063 * - the caller must hold current->cred_exec_mutex to protect against
1066 void check_unsafe_exec(struct linux_binprm
*bprm
)
1068 struct task_struct
*p
= current
;
1070 bprm
->unsafe
= tracehook_unsafe_exec(p
);
1072 if (atomic_read(&p
->fs
->count
) > 1 ||
1073 atomic_read(&p
->files
->count
) > 1 ||
1074 atomic_read(&p
->sighand
->count
) > 1)
1075 bprm
->unsafe
|= LSM_UNSAFE_SHARE
;
1079 * Fill the binprm structure from the inode.
1080 * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
1082 * This may be called multiple times for binary chains (scripts for example).
1084 int prepare_binprm(struct linux_binprm
*bprm
)
1087 struct inode
* inode
= bprm
->file
->f_path
.dentry
->d_inode
;
1090 mode
= inode
->i_mode
;
1091 if (bprm
->file
->f_op
== NULL
)
1094 /* clear any previous set[ug]id data from a previous binary */
1095 bprm
->cred
->euid
= current_euid();
1096 bprm
->cred
->egid
= current_egid();
1098 if (!(bprm
->file
->f_path
.mnt
->mnt_flags
& MNT_NOSUID
)) {
1100 if (mode
& S_ISUID
) {
1101 bprm
->per_clear
|= PER_CLEAR_ON_SETID
;
1102 bprm
->cred
->euid
= inode
->i_uid
;
1107 * If setgid is set but no group execute bit then this
1108 * is a candidate for mandatory locking, not a setgid
1111 if ((mode
& (S_ISGID
| S_IXGRP
)) == (S_ISGID
| S_IXGRP
)) {
1112 bprm
->per_clear
|= PER_CLEAR_ON_SETID
;
1113 bprm
->cred
->egid
= inode
->i_gid
;
1117 /* fill in binprm security blob */
1118 retval
= security_bprm_set_creds(bprm
);
1121 bprm
->cred_prepared
= 1;
1123 memset(bprm
->buf
, 0, BINPRM_BUF_SIZE
);
1124 return kernel_read(bprm
->file
, 0, bprm
->buf
, BINPRM_BUF_SIZE
);
1127 EXPORT_SYMBOL(prepare_binprm
);
1130 * Arguments are '\0' separated strings found at the location bprm->p
1131 * points to; chop off the first by relocating brpm->p to right after
1132 * the first '\0' encountered.
1134 int remove_arg_zero(struct linux_binprm
*bprm
)
1137 unsigned long offset
;
1145 offset
= bprm
->p
& ~PAGE_MASK
;
1146 page
= get_arg_page(bprm
, bprm
->p
, 0);
1151 kaddr
= kmap_atomic(page
, KM_USER0
);
1153 for (; offset
< PAGE_SIZE
&& kaddr
[offset
];
1154 offset
++, bprm
->p
++)
1157 kunmap_atomic(kaddr
, KM_USER0
);
1160 if (offset
== PAGE_SIZE
)
1161 free_arg_page(bprm
, (bprm
->p
>> PAGE_SHIFT
) - 1);
1162 } while (offset
== PAGE_SIZE
);
1171 EXPORT_SYMBOL(remove_arg_zero
);
1174 * cycle the list of binary formats handler, until one recognizes the image
1176 int search_binary_handler(struct linux_binprm
*bprm
,struct pt_regs
*regs
)
1178 unsigned int depth
= bprm
->recursion_depth
;
1180 struct linux_binfmt
*fmt
;
1182 /* handle /sbin/loader.. */
1184 struct exec
* eh
= (struct exec
*) bprm
->buf
;
1186 if (!bprm
->loader
&& eh
->fh
.f_magic
== 0x183 &&
1187 (eh
->fh
.f_flags
& 0x3000) == 0x3000)
1190 unsigned long loader
;
1192 allow_write_access(bprm
->file
);
1196 loader
= bprm
->vma
->vm_end
- sizeof(void *);
1198 file
= open_exec("/sbin/loader");
1199 retval
= PTR_ERR(file
);
1203 /* Remember if the application is TASO. */
1204 bprm
->taso
= eh
->ah
.entry
< 0x100000000UL
;
1207 bprm
->loader
= loader
;
1208 retval
= prepare_binprm(bprm
);
1211 /* should call search_binary_handler recursively here,
1212 but it does not matter */
1216 retval
= security_bprm_check(bprm
);
1219 retval
= ima_bprm_check(bprm
);
1223 /* kernel module loader fixup */
1224 /* so we don't try to load run modprobe in kernel space. */
1227 retval
= audit_bprm(bprm
);
1232 for (try=0; try<2; try++) {
1233 read_lock(&binfmt_lock
);
1234 list_for_each_entry(fmt
, &formats
, lh
) {
1235 int (*fn
)(struct linux_binprm
*, struct pt_regs
*) = fmt
->load_binary
;
1238 if (!try_module_get(fmt
->module
))
1240 read_unlock(&binfmt_lock
);
1241 retval
= fn(bprm
, regs
);
1243 * Restore the depth counter to its starting value
1244 * in this call, so we don't have to rely on every
1245 * load_binary function to restore it on return.
1247 bprm
->recursion_depth
= depth
;
1250 tracehook_report_exec(fmt
, bprm
, regs
);
1252 allow_write_access(bprm
->file
);
1256 current
->did_exec
= 1;
1257 proc_exec_connector(current
);
1260 read_lock(&binfmt_lock
);
1262 if (retval
!= -ENOEXEC
|| bprm
->mm
== NULL
)
1265 read_unlock(&binfmt_lock
);
1269 read_unlock(&binfmt_lock
);
1270 if (retval
!= -ENOEXEC
|| bprm
->mm
== NULL
) {
1272 #ifdef CONFIG_MODULES
1274 #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1275 if (printable(bprm
->buf
[0]) &&
1276 printable(bprm
->buf
[1]) &&
1277 printable(bprm
->buf
[2]) &&
1278 printable(bprm
->buf
[3]))
1279 break; /* -ENOEXEC */
1280 request_module("binfmt-%04x", *(unsigned short *)(&bprm
->buf
[2]));
1287 EXPORT_SYMBOL(search_binary_handler
);
1289 void free_bprm(struct linux_binprm
*bprm
)
1291 free_arg_pages(bprm
);
1293 abort_creds(bprm
->cred
);
1298 * sys_execve() executes a new program.
1300 int do_execve(char * filename
,
1301 char __user
*__user
*argv
,
1302 char __user
*__user
*envp
,
1303 struct pt_regs
* regs
)
1305 struct linux_binprm
*bprm
;
1307 struct files_struct
*displaced
;
1310 retval
= unshare_files(&displaced
);
1315 bprm
= kzalloc(sizeof(*bprm
), GFP_KERNEL
);
1319 retval
= mutex_lock_interruptible(¤t
->cred_exec_mutex
);
1324 bprm
->cred
= prepare_exec_creds();
1327 check_unsafe_exec(bprm
);
1329 file
= open_exec(filename
);
1330 retval
= PTR_ERR(file
);
1337 bprm
->filename
= filename
;
1338 bprm
->interp
= filename
;
1340 retval
= bprm_mm_init(bprm
);
1344 bprm
->argc
= count(argv
, MAX_ARG_STRINGS
);
1345 if ((retval
= bprm
->argc
) < 0)
1348 bprm
->envc
= count(envp
, MAX_ARG_STRINGS
);
1349 if ((retval
= bprm
->envc
) < 0)
1352 retval
= prepare_binprm(bprm
);
1356 retval
= copy_strings_kernel(1, &bprm
->filename
, bprm
);
1360 bprm
->exec
= bprm
->p
;
1361 retval
= copy_strings(bprm
->envc
, envp
, bprm
);
1365 retval
= copy_strings(bprm
->argc
, argv
, bprm
);
1369 current
->flags
&= ~PF_KTHREAD
;
1370 retval
= search_binary_handler(bprm
,regs
);
1374 /* execve succeeded */
1375 mutex_unlock(¤t
->cred_exec_mutex
);
1376 acct_update_integrals(current
);
1379 put_files_struct(displaced
);
1388 allow_write_access(bprm
->file
);
1393 mutex_unlock(¤t
->cred_exec_mutex
);
1400 reset_files_struct(displaced
);
1405 int set_binfmt(struct linux_binfmt
*new)
1407 struct linux_binfmt
*old
= current
->binfmt
;
1410 if (!try_module_get(new->module
))
1413 current
->binfmt
= new;
1415 module_put(old
->module
);
1419 EXPORT_SYMBOL(set_binfmt
);
1421 /* format_corename will inspect the pattern parameter, and output a
1422 * name into corename, which must have space for at least
1423 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
1425 static int format_corename(char *corename
, long signr
)
1427 const struct cred
*cred
= current_cred();
1428 const char *pat_ptr
= core_pattern
;
1429 int ispipe
= (*pat_ptr
== '|');
1430 char *out_ptr
= corename
;
1431 char *const out_end
= corename
+ CORENAME_MAX_SIZE
;
1433 int pid_in_pattern
= 0;
1435 /* Repeat as long as we have more pattern to process and more output
1438 if (*pat_ptr
!= '%') {
1439 if (out_ptr
== out_end
)
1441 *out_ptr
++ = *pat_ptr
++;
1443 switch (*++pat_ptr
) {
1446 /* Double percent, output one percent */
1448 if (out_ptr
== out_end
)
1455 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1456 "%d", task_tgid_vnr(current
));
1457 if (rc
> out_end
- out_ptr
)
1463 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1465 if (rc
> out_end
- out_ptr
)
1471 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1473 if (rc
> out_end
- out_ptr
)
1477 /* signal that caused the coredump */
1479 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1481 if (rc
> out_end
- out_ptr
)
1485 /* UNIX time of coredump */
1488 do_gettimeofday(&tv
);
1489 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1491 if (rc
> out_end
- out_ptr
)
1498 down_read(&uts_sem
);
1499 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1500 "%s", utsname()->nodename
);
1502 if (rc
> out_end
- out_ptr
)
1508 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1509 "%s", current
->comm
);
1510 if (rc
> out_end
- out_ptr
)
1514 /* core limit size */
1516 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1517 "%lu", current
->signal
->rlim
[RLIMIT_CORE
].rlim_cur
);
1518 if (rc
> out_end
- out_ptr
)
1528 /* Backward compatibility with core_uses_pid:
1530 * If core_pattern does not include a %p (as is the default)
1531 * and core_uses_pid is set, then .%pid will be appended to
1532 * the filename. Do not do this for piped commands. */
1533 if (!ispipe
&& !pid_in_pattern
&& core_uses_pid
) {
1534 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1535 ".%d", task_tgid_vnr(current
));
1536 if (rc
> out_end
- out_ptr
)
1545 static int zap_process(struct task_struct
*start
)
1547 struct task_struct
*t
;
1550 start
->signal
->flags
= SIGNAL_GROUP_EXIT
;
1551 start
->signal
->group_stop_count
= 0;
1555 if (t
!= current
&& t
->mm
) {
1556 sigaddset(&t
->pending
.signal
, SIGKILL
);
1557 signal_wake_up(t
, 1);
1560 } while_each_thread(start
, t
);
1565 static inline int zap_threads(struct task_struct
*tsk
, struct mm_struct
*mm
,
1566 struct core_state
*core_state
, int exit_code
)
1568 struct task_struct
*g
, *p
;
1569 unsigned long flags
;
1572 spin_lock_irq(&tsk
->sighand
->siglock
);
1573 if (!signal_group_exit(tsk
->signal
)) {
1574 mm
->core_state
= core_state
;
1575 tsk
->signal
->group_exit_code
= exit_code
;
1576 nr
= zap_process(tsk
);
1578 spin_unlock_irq(&tsk
->sighand
->siglock
);
1579 if (unlikely(nr
< 0))
1582 if (atomic_read(&mm
->mm_users
) == nr
+ 1)
1585 * We should find and kill all tasks which use this mm, and we should
1586 * count them correctly into ->nr_threads. We don't take tasklist
1587 * lock, but this is safe wrt:
1590 * None of sub-threads can fork after zap_process(leader). All
1591 * processes which were created before this point should be
1592 * visible to zap_threads() because copy_process() adds the new
1593 * process to the tail of init_task.tasks list, and lock/unlock
1594 * of ->siglock provides a memory barrier.
1597 * The caller holds mm->mmap_sem. This means that the task which
1598 * uses this mm can't pass exit_mm(), so it can't exit or clear
1602 * It does list_replace_rcu(&leader->tasks, ¤t->tasks),
1603 * we must see either old or new leader, this does not matter.
1604 * However, it can change p->sighand, so lock_task_sighand(p)
1605 * must be used. Since p->mm != NULL and we hold ->mmap_sem
1608 * Note also that "g" can be the old leader with ->mm == NULL
1609 * and already unhashed and thus removed from ->thread_group.
1610 * This is OK, __unhash_process()->list_del_rcu() does not
1611 * clear the ->next pointer, we will find the new leader via
1615 for_each_process(g
) {
1616 if (g
== tsk
->group_leader
)
1618 if (g
->flags
& PF_KTHREAD
)
1623 if (unlikely(p
->mm
== mm
)) {
1624 lock_task_sighand(p
, &flags
);
1625 nr
+= zap_process(p
);
1626 unlock_task_sighand(p
, &flags
);
1630 } while_each_thread(g
, p
);
1634 atomic_set(&core_state
->nr_threads
, nr
);
1638 static int coredump_wait(int exit_code
, struct core_state
*core_state
)
1640 struct task_struct
*tsk
= current
;
1641 struct mm_struct
*mm
= tsk
->mm
;
1642 struct completion
*vfork_done
;
1645 init_completion(&core_state
->startup
);
1646 core_state
->dumper
.task
= tsk
;
1647 core_state
->dumper
.next
= NULL
;
1648 core_waiters
= zap_threads(tsk
, mm
, core_state
, exit_code
);
1649 up_write(&mm
->mmap_sem
);
1651 if (unlikely(core_waiters
< 0))
1655 * Make sure nobody is waiting for us to release the VM,
1656 * otherwise we can deadlock when we wait on each other
1658 vfork_done
= tsk
->vfork_done
;
1660 tsk
->vfork_done
= NULL
;
1661 complete(vfork_done
);
1665 wait_for_completion(&core_state
->startup
);
1667 return core_waiters
;
1670 static void coredump_finish(struct mm_struct
*mm
)
1672 struct core_thread
*curr
, *next
;
1673 struct task_struct
*task
;
1675 next
= mm
->core_state
->dumper
.next
;
1676 while ((curr
= next
) != NULL
) {
1680 * see exit_mm(), curr->task must not see
1681 * ->task == NULL before we read ->next.
1685 wake_up_process(task
);
1688 mm
->core_state
= NULL
;
1692 * set_dumpable converts traditional three-value dumpable to two flags and
1693 * stores them into mm->flags. It modifies lower two bits of mm->flags, but
1694 * these bits are not changed atomically. So get_dumpable can observe the
1695 * intermediate state. To avoid doing unexpected behavior, get get_dumpable
1696 * return either old dumpable or new one by paying attention to the order of
1697 * modifying the bits.
1699 * dumpable | mm->flags (binary)
1700 * old new | initial interim final
1701 * ---------+-----------------------
1709 * (*) get_dumpable regards interim value of 10 as 11.
1711 void set_dumpable(struct mm_struct
*mm
, int value
)
1715 clear_bit(MMF_DUMPABLE
, &mm
->flags
);
1717 clear_bit(MMF_DUMP_SECURELY
, &mm
->flags
);
1720 set_bit(MMF_DUMPABLE
, &mm
->flags
);
1722 clear_bit(MMF_DUMP_SECURELY
, &mm
->flags
);
1725 set_bit(MMF_DUMP_SECURELY
, &mm
->flags
);
1727 set_bit(MMF_DUMPABLE
, &mm
->flags
);
1732 int get_dumpable(struct mm_struct
*mm
)
1736 ret
= mm
->flags
& 0x3;
1737 return (ret
>= 2) ? 2 : ret
;
1740 int do_coredump(long signr
, int exit_code
, struct pt_regs
* regs
)
1742 struct core_state core_state
;
1743 char corename
[CORENAME_MAX_SIZE
+ 1];
1744 struct mm_struct
*mm
= current
->mm
;
1745 struct linux_binfmt
* binfmt
;
1746 struct inode
* inode
;
1748 const struct cred
*old_cred
;
1753 unsigned long core_limit
= current
->signal
->rlim
[RLIMIT_CORE
].rlim_cur
;
1754 char **helper_argv
= NULL
;
1755 int helper_argc
= 0;
1758 audit_core_dumps(signr
);
1760 binfmt
= current
->binfmt
;
1761 if (!binfmt
|| !binfmt
->core_dump
)
1764 cred
= prepare_creds();
1770 down_write(&mm
->mmap_sem
);
1772 * If another thread got here first, or we are not dumpable, bail out.
1774 if (mm
->core_state
|| !get_dumpable(mm
)) {
1775 up_write(&mm
->mmap_sem
);
1781 * We cannot trust fsuid as being the "true" uid of the
1782 * process nor do we know its entire history. We only know it
1783 * was tainted so we dump it as root in mode 2.
1785 if (get_dumpable(mm
) == 2) { /* Setuid core dump mode */
1786 flag
= O_EXCL
; /* Stop rewrite attacks */
1787 cred
->fsuid
= 0; /* Dump root private */
1790 retval
= coredump_wait(exit_code
, &core_state
);
1796 old_cred
= override_creds(cred
);
1799 * Clear any false indication of pending signals that might
1800 * be seen by the filesystem code called to write the core file.
1802 clear_thread_flag(TIF_SIGPENDING
);
1805 * lock_kernel() because format_corename() is controlled by sysctl, which
1806 * uses lock_kernel()
1809 ispipe
= format_corename(corename
, signr
);
1812 * Don't bother to check the RLIMIT_CORE value if core_pattern points
1813 * to a pipe. Since we're not writing directly to the filesystem
1814 * RLIMIT_CORE doesn't really apply, as no actual core file will be
1815 * created unless the pipe reader choses to write out the core file
1816 * at which point file size limits and permissions will be imposed
1817 * as it does with any other process
1819 if ((!ispipe
) && (core_limit
< binfmt
->min_coredump
))
1823 helper_argv
= argv_split(GFP_KERNEL
, corename
+1, &helper_argc
);
1824 /* Terminate the string before the first option */
1825 delimit
= strchr(corename
, ' ');
1828 delimit
= strrchr(helper_argv
[0], '/');
1832 delimit
= helper_argv
[0];
1833 if (!strcmp(delimit
, current
->comm
)) {
1834 printk(KERN_NOTICE
"Recursive core dump detected, "
1839 core_limit
= RLIM_INFINITY
;
1841 /* SIGPIPE can happen, but it's just never processed */
1842 if (call_usermodehelper_pipe(corename
+1, helper_argv
, NULL
,
1844 printk(KERN_INFO
"Core dump to %s pipe failed\n",
1849 file
= filp_open(corename
,
1850 O_CREAT
| 2 | O_NOFOLLOW
| O_LARGEFILE
| flag
,
1854 inode
= file
->f_path
.dentry
->d_inode
;
1855 if (inode
->i_nlink
> 1)
1856 goto close_fail
; /* multiple links - don't dump */
1857 if (!ispipe
&& d_unhashed(file
->f_path
.dentry
))
1860 /* AK: actually i see no reason to not allow this for named pipes etc.,
1861 but keep the previous behaviour for now. */
1862 if (!ispipe
&& !S_ISREG(inode
->i_mode
))
1865 * Dont allow local users get cute and trick others to coredump
1866 * into their pre-created files:
1868 if (inode
->i_uid
!= current_fsuid())
1872 if (!file
->f_op
->write
)
1874 if (!ispipe
&& do_truncate(file
->f_path
.dentry
, 0, 0, file
) != 0)
1877 retval
= binfmt
->core_dump(signr
, regs
, file
, core_limit
);
1880 current
->signal
->group_exit_code
|= 0x80;
1882 filp_close(file
, NULL
);
1885 argv_free(helper_argv
);
1887 revert_creds(old_cred
);
1889 coredump_finish(mm
);