4 * Copyright (C) 1991, 1992 Linus Torvalds
8 * #!-checking implemented by tytso.
11 * Demand-loading implemented 01.12.91 - no need to read anything but
12 * the header into memory. The inode of the executable is put into
13 * "current->executable", and page faults do the actual loading. Clean.
15 * Once more I can proudly say that linux stood up to being changed: it
16 * was less than 2 hours work to get demand-loading completely implemented.
18 * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead,
19 * current->executable is only used by the procfs. This allows a dispatch
20 * table to check for several different types of binary formats. We keep
21 * trying until we recognize the file or we run out of supported binary
25 #include <linux/slab.h>
26 #include <linux/file.h>
27 #include <linux/mman.h>
28 #include <linux/a.out.h>
29 #include <linux/stat.h>
30 #include <linux/fcntl.h>
31 #include <linux/smp_lock.h>
32 #include <linux/string.h>
33 #include <linux/init.h>
34 #include <linux/pagemap.h>
35 #include <linux/highmem.h>
36 #include <linux/spinlock.h>
37 #include <linux/key.h>
38 #include <linux/personality.h>
39 #include <linux/binfmts.h>
40 #include <linux/swap.h>
41 #include <linux/utsname.h>
42 #include <linux/pid_namespace.h>
43 #include <linux/module.h>
44 #include <linux/namei.h>
45 #include <linux/proc_fs.h>
46 #include <linux/ptrace.h>
47 #include <linux/mount.h>
48 #include <linux/security.h>
49 #include <linux/syscalls.h>
50 #include <linux/rmap.h>
51 #include <linux/tsacct_kern.h>
52 #include <linux/cn_proc.h>
53 #include <linux/audit.h>
55 #include <asm/uaccess.h>
56 #include <asm/mmu_context.h>
60 #include <linux/kmod.h>
64 char core_pattern
[CORENAME_MAX_SIZE
] = "core";
65 int suid_dumpable
= 0;
67 EXPORT_SYMBOL(suid_dumpable
);
68 /* The maximal length of core_pattern is also specified in sysctl.c */
70 static LIST_HEAD(formats
);
71 static DEFINE_RWLOCK(binfmt_lock
);
73 int register_binfmt(struct linux_binfmt
* fmt
)
77 write_lock(&binfmt_lock
);
78 list_add(&fmt
->lh
, &formats
);
79 write_unlock(&binfmt_lock
);
83 EXPORT_SYMBOL(register_binfmt
);
85 void unregister_binfmt(struct linux_binfmt
* fmt
)
87 write_lock(&binfmt_lock
);
89 write_unlock(&binfmt_lock
);
92 EXPORT_SYMBOL(unregister_binfmt
);
94 static inline void put_binfmt(struct linux_binfmt
* fmt
)
96 module_put(fmt
->module
);
100 * Note that a shared library must be both readable and executable due to
103 * Also note that we take the address to load from from the file itself.
105 asmlinkage
long sys_uselib(const char __user
* library
)
111 error
= __user_path_lookup_open(library
, LOOKUP_FOLLOW
, &nd
, FMODE_READ
|FMODE_EXEC
);
116 if (!S_ISREG(nd
.dentry
->d_inode
->i_mode
))
119 error
= vfs_permission(&nd
, MAY_READ
| MAY_EXEC
);
123 file
= nameidata_to_filp(&nd
, O_RDONLY
);
124 error
= PTR_ERR(file
);
130 struct linux_binfmt
* fmt
;
132 read_lock(&binfmt_lock
);
133 list_for_each_entry(fmt
, &formats
, lh
) {
134 if (!fmt
->load_shlib
)
136 if (!try_module_get(fmt
->module
))
138 read_unlock(&binfmt_lock
);
139 error
= fmt
->load_shlib(file
);
140 read_lock(&binfmt_lock
);
142 if (error
!= -ENOEXEC
)
145 read_unlock(&binfmt_lock
);
151 release_open_intent(&nd
);
158 static struct page
*get_arg_page(struct linux_binprm
*bprm
, unsigned long pos
,
164 #ifdef CONFIG_STACK_GROWSUP
166 ret
= expand_stack_downwards(bprm
->vma
, pos
);
171 ret
= get_user_pages(current
, bprm
->mm
, pos
,
172 1, write
, 1, &page
, NULL
);
177 struct rlimit
*rlim
= current
->signal
->rlim
;
178 unsigned long size
= bprm
->vma
->vm_end
- bprm
->vma
->vm_start
;
181 * Limit to 1/4-th the stack size for the argv+env strings.
183 * - the remaining binfmt code will not run out of stack space,
184 * - the program will have a reasonable amount of stack left
187 if (size
> rlim
[RLIMIT_STACK
].rlim_cur
/ 4) {
196 static void put_arg_page(struct page
*page
)
201 static void free_arg_page(struct linux_binprm
*bprm
, int i
)
205 static void free_arg_pages(struct linux_binprm
*bprm
)
209 static void flush_arg_page(struct linux_binprm
*bprm
, unsigned long pos
,
212 flush_cache_page(bprm
->vma
, pos
, page_to_pfn(page
));
215 static int __bprm_mm_init(struct linux_binprm
*bprm
)
218 struct vm_area_struct
*vma
= NULL
;
219 struct mm_struct
*mm
= bprm
->mm
;
221 bprm
->vma
= vma
= kmem_cache_zalloc(vm_area_cachep
, GFP_KERNEL
);
225 down_write(&mm
->mmap_sem
);
229 * Place the stack at the largest stack address the architecture
230 * supports. Later, we'll move this to an appropriate place. We don't
231 * use STACK_TOP because that can depend on attributes which aren't
234 vma
->vm_end
= STACK_TOP_MAX
;
235 vma
->vm_start
= vma
->vm_end
- PAGE_SIZE
;
237 vma
->vm_flags
= VM_STACK_FLAGS
;
238 vma
->vm_page_prot
= protection_map
[vma
->vm_flags
& 0x7];
239 err
= insert_vm_struct(mm
, vma
);
241 up_write(&mm
->mmap_sem
);
245 mm
->stack_vm
= mm
->total_vm
= 1;
246 up_write(&mm
->mmap_sem
);
248 bprm
->p
= vma
->vm_end
- sizeof(void *);
255 kmem_cache_free(vm_area_cachep
, vma
);
261 static bool valid_arg_len(struct linux_binprm
*bprm
, long len
)
263 return len
<= MAX_ARG_STRLEN
;
268 static struct page
*get_arg_page(struct linux_binprm
*bprm
, unsigned long pos
,
273 page
= bprm
->page
[pos
/ PAGE_SIZE
];
274 if (!page
&& write
) {
275 page
= alloc_page(GFP_HIGHUSER
|__GFP_ZERO
);
278 bprm
->page
[pos
/ PAGE_SIZE
] = page
;
284 static void put_arg_page(struct page
*page
)
288 static void free_arg_page(struct linux_binprm
*bprm
, int i
)
291 __free_page(bprm
->page
[i
]);
292 bprm
->page
[i
] = NULL
;
296 static void free_arg_pages(struct linux_binprm
*bprm
)
300 for (i
= 0; i
< MAX_ARG_PAGES
; i
++)
301 free_arg_page(bprm
, i
);
304 static void flush_arg_page(struct linux_binprm
*bprm
, unsigned long pos
,
309 static int __bprm_mm_init(struct linux_binprm
*bprm
)
311 bprm
->p
= PAGE_SIZE
* MAX_ARG_PAGES
- sizeof(void *);
315 static bool valid_arg_len(struct linux_binprm
*bprm
, long len
)
317 return len
<= bprm
->p
;
320 #endif /* CONFIG_MMU */
323 * Create a new mm_struct and populate it with a temporary stack
324 * vm_area_struct. We don't have enough context at this point to set the stack
325 * flags, permissions, and offset, so we use temporary values. We'll update
326 * them later in setup_arg_pages().
328 int bprm_mm_init(struct linux_binprm
*bprm
)
331 struct mm_struct
*mm
= NULL
;
333 bprm
->mm
= mm
= mm_alloc();
338 err
= init_new_context(current
, mm
);
342 err
= __bprm_mm_init(bprm
);
358 * count() counts the number of strings in array ARGV.
360 static int count(char __user
* __user
* argv
, int max
)
368 if (get_user(p
, argv
))
382 * 'copy_strings()' copies argument/environment strings from the old
383 * processes's memory to the new process's stack. The call to get_user_pages()
384 * ensures the destination page is created and not swapped out.
386 static int copy_strings(int argc
, char __user
* __user
* argv
,
387 struct linux_binprm
*bprm
)
389 struct page
*kmapped_page
= NULL
;
391 unsigned long kpos
= 0;
399 if (get_user(str
, argv
+argc
) ||
400 !(len
= strnlen_user(str
, MAX_ARG_STRLEN
))) {
405 if (!valid_arg_len(bprm
, len
)) {
410 /* We're going to work our way backwords. */
416 int offset
, bytes_to_copy
;
418 offset
= pos
% PAGE_SIZE
;
422 bytes_to_copy
= offset
;
423 if (bytes_to_copy
> len
)
426 offset
-= bytes_to_copy
;
427 pos
-= bytes_to_copy
;
428 str
-= bytes_to_copy
;
429 len
-= bytes_to_copy
;
431 if (!kmapped_page
|| kpos
!= (pos
& PAGE_MASK
)) {
434 page
= get_arg_page(bprm
, pos
, 1);
441 flush_kernel_dcache_page(kmapped_page
);
442 kunmap(kmapped_page
);
443 put_arg_page(kmapped_page
);
446 kaddr
= kmap(kmapped_page
);
447 kpos
= pos
& PAGE_MASK
;
448 flush_arg_page(bprm
, kpos
, kmapped_page
);
450 if (copy_from_user(kaddr
+offset
, str
, bytes_to_copy
)) {
459 flush_kernel_dcache_page(kmapped_page
);
460 kunmap(kmapped_page
);
461 put_arg_page(kmapped_page
);
467 * Like copy_strings, but get argv and its values from kernel memory.
469 int copy_strings_kernel(int argc
,char ** argv
, struct linux_binprm
*bprm
)
472 mm_segment_t oldfs
= get_fs();
474 r
= copy_strings(argc
, (char __user
* __user
*)argv
, bprm
);
478 EXPORT_SYMBOL(copy_strings_kernel
);
483 * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX. Once
484 * the binfmt code determines where the new stack should reside, we shift it to
485 * its final location. The process proceeds as follows:
487 * 1) Use shift to calculate the new vma endpoints.
488 * 2) Extend vma to cover both the old and new ranges. This ensures the
489 * arguments passed to subsequent functions are consistent.
490 * 3) Move vma's page tables to the new range.
491 * 4) Free up any cleared pgd range.
492 * 5) Shrink the vma to cover only the new range.
494 static int shift_arg_pages(struct vm_area_struct
*vma
, unsigned long shift
)
496 struct mm_struct
*mm
= vma
->vm_mm
;
497 unsigned long old_start
= vma
->vm_start
;
498 unsigned long old_end
= vma
->vm_end
;
499 unsigned long length
= old_end
- old_start
;
500 unsigned long new_start
= old_start
- shift
;
501 unsigned long new_end
= old_end
- shift
;
502 struct mmu_gather
*tlb
;
504 BUG_ON(new_start
> new_end
);
507 * ensure there are no vmas between where we want to go
510 if (vma
!= find_vma(mm
, new_start
))
514 * cover the whole range: [new_start, old_end)
516 vma_adjust(vma
, new_start
, old_end
, vma
->vm_pgoff
, NULL
);
519 * move the page tables downwards, on failure we rely on
520 * process cleanup to remove whatever mess we made.
522 if (length
!= move_page_tables(vma
, old_start
,
523 vma
, new_start
, length
))
527 tlb
= tlb_gather_mmu(mm
, 0);
528 if (new_end
> old_start
) {
530 * when the old and new regions overlap clear from new_end.
532 free_pgd_range(&tlb
, new_end
, old_end
, new_end
,
533 vma
->vm_next
? vma
->vm_next
->vm_start
: 0);
536 * otherwise, clean from old_start; this is done to not touch
537 * the address space in [new_end, old_start) some architectures
538 * have constraints on va-space that make this illegal (IA64) -
539 * for the others its just a little faster.
541 free_pgd_range(&tlb
, old_start
, old_end
, new_end
,
542 vma
->vm_next
? vma
->vm_next
->vm_start
: 0);
544 tlb_finish_mmu(tlb
, new_end
, old_end
);
547 * shrink the vma to just the new range.
549 vma_adjust(vma
, new_start
, new_end
, vma
->vm_pgoff
, NULL
);
554 #define EXTRA_STACK_VM_PAGES 20 /* random */
557 * Finalizes the stack vm_area_struct. The flags and permissions are updated,
558 * the stack is optionally relocated, and some extra space is added.
560 int setup_arg_pages(struct linux_binprm
*bprm
,
561 unsigned long stack_top
,
562 int executable_stack
)
565 unsigned long stack_shift
;
566 struct mm_struct
*mm
= current
->mm
;
567 struct vm_area_struct
*vma
= bprm
->vma
;
568 struct vm_area_struct
*prev
= NULL
;
569 unsigned long vm_flags
;
570 unsigned long stack_base
;
572 #ifdef CONFIG_STACK_GROWSUP
573 /* Limit stack size to 1GB */
574 stack_base
= current
->signal
->rlim
[RLIMIT_STACK
].rlim_max
;
575 if (stack_base
> (1 << 30))
576 stack_base
= 1 << 30;
578 /* Make sure we didn't let the argument array grow too large. */
579 if (vma
->vm_end
- vma
->vm_start
> stack_base
)
582 stack_base
= PAGE_ALIGN(stack_top
- stack_base
);
584 stack_shift
= vma
->vm_start
- stack_base
;
585 mm
->arg_start
= bprm
->p
- stack_shift
;
586 bprm
->p
= vma
->vm_end
- stack_shift
;
588 stack_top
= arch_align_stack(stack_top
);
589 stack_top
= PAGE_ALIGN(stack_top
);
590 stack_shift
= vma
->vm_end
- stack_top
;
592 bprm
->p
-= stack_shift
;
593 mm
->arg_start
= bprm
->p
;
597 bprm
->loader
-= stack_shift
;
598 bprm
->exec
-= stack_shift
;
600 down_write(&mm
->mmap_sem
);
601 vm_flags
= vma
->vm_flags
;
604 * Adjust stack execute permissions; explicitly enable for
605 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
606 * (arch default) otherwise.
608 if (unlikely(executable_stack
== EXSTACK_ENABLE_X
))
610 else if (executable_stack
== EXSTACK_DISABLE_X
)
611 vm_flags
&= ~VM_EXEC
;
612 vm_flags
|= mm
->def_flags
;
614 ret
= mprotect_fixup(vma
, &prev
, vma
->vm_start
, vma
->vm_end
,
620 /* Move stack pages down in memory. */
622 ret
= shift_arg_pages(vma
, stack_shift
);
624 up_write(&mm
->mmap_sem
);
629 #ifdef CONFIG_STACK_GROWSUP
630 stack_base
= vma
->vm_end
+ EXTRA_STACK_VM_PAGES
* PAGE_SIZE
;
632 stack_base
= vma
->vm_start
- EXTRA_STACK_VM_PAGES
* PAGE_SIZE
;
634 ret
= expand_stack(vma
, stack_base
);
639 up_write(&mm
->mmap_sem
);
642 EXPORT_SYMBOL(setup_arg_pages
);
644 #endif /* CONFIG_MMU */
646 struct file
*open_exec(const char *name
)
652 err
= path_lookup_open(AT_FDCWD
, name
, LOOKUP_FOLLOW
, &nd
, FMODE_READ
|FMODE_EXEC
);
656 struct inode
*inode
= nd
.dentry
->d_inode
;
657 file
= ERR_PTR(-EACCES
);
658 if (S_ISREG(inode
->i_mode
)) {
659 int err
= vfs_permission(&nd
, MAY_EXEC
);
662 file
= nameidata_to_filp(&nd
, O_RDONLY
);
664 err
= deny_write_access(file
);
674 release_open_intent(&nd
);
680 EXPORT_SYMBOL(open_exec
);
682 int kernel_read(struct file
*file
, unsigned long offset
,
683 char *addr
, unsigned long count
)
691 /* The cast to a user pointer is valid due to the set_fs() */
692 result
= vfs_read(file
, (void __user
*)addr
, count
, &pos
);
697 EXPORT_SYMBOL(kernel_read
);
699 static int exec_mmap(struct mm_struct
*mm
)
701 struct task_struct
*tsk
;
702 struct mm_struct
* old_mm
, *active_mm
;
704 /* Notify parent that we're no longer interested in the old VM */
706 old_mm
= current
->mm
;
707 mm_release(tsk
, old_mm
);
711 * Make sure that if there is a core dump in progress
712 * for the old mm, we get out and die instead of going
713 * through with the exec. We must hold mmap_sem around
714 * checking core_waiters and changing tsk->mm. The
715 * core-inducing thread will increment core_waiters for
716 * each thread whose ->mm == old_mm.
718 down_read(&old_mm
->mmap_sem
);
719 if (unlikely(old_mm
->core_waiters
)) {
720 up_read(&old_mm
->mmap_sem
);
725 active_mm
= tsk
->active_mm
;
728 activate_mm(active_mm
, mm
);
730 arch_pick_mmap_layout(mm
);
732 up_read(&old_mm
->mmap_sem
);
733 BUG_ON(active_mm
!= old_mm
);
742 * This function makes sure the current process has its own signal table,
743 * so that flush_signal_handlers can later reset the handlers without
744 * disturbing other processes. (Other processes might share the signal
745 * table via the CLONE_SIGHAND option to clone().)
747 static int de_thread(struct task_struct
*tsk
)
749 struct signal_struct
*sig
= tsk
->signal
;
750 struct sighand_struct
*newsighand
, *oldsighand
= tsk
->sighand
;
751 spinlock_t
*lock
= &oldsighand
->siglock
;
752 struct task_struct
*leader
= NULL
;
756 * If we don't share sighandlers, then we aren't sharing anything
757 * and we can just re-use it all.
759 if (atomic_read(&oldsighand
->count
) <= 1) {
764 newsighand
= kmem_cache_alloc(sighand_cachep
, GFP_KERNEL
);
768 if (thread_group_empty(tsk
))
769 goto no_thread_group
;
772 * Kill all other threads in the thread group.
773 * We must hold tasklist_lock to call zap_other_threads.
775 read_lock(&tasklist_lock
);
777 if (sig
->flags
& SIGNAL_GROUP_EXIT
) {
779 * Another group action in progress, just
780 * return so that the signal is processed.
782 spin_unlock_irq(lock
);
783 read_unlock(&tasklist_lock
);
784 kmem_cache_free(sighand_cachep
, newsighand
);
789 * child_reaper ignores SIGKILL, change it now.
790 * Reparenting needs write_lock on tasklist_lock,
791 * so it is safe to do it under read_lock.
793 if (unlikely(tsk
->group_leader
== child_reaper(tsk
)))
794 tsk
->nsproxy
->pid_ns
->child_reaper
= tsk
;
796 zap_other_threads(tsk
);
797 read_unlock(&tasklist_lock
);
800 * Account for the thread group leader hanging around:
803 if (!thread_group_leader(tsk
)) {
806 * The SIGALRM timer survives the exec, but needs to point
807 * at us as the new group leader now. We have a race with
808 * a timer firing now getting the old leader, so we need to
809 * synchronize with any firing (by calling del_timer_sync)
810 * before we can safely let the old group leader die.
813 spin_unlock_irq(lock
);
814 if (hrtimer_cancel(&sig
->real_timer
))
815 hrtimer_restart(&sig
->real_timer
);
818 while (atomic_read(&sig
->count
) > count
) {
819 sig
->group_exit_task
= tsk
;
820 sig
->notify_count
= count
;
821 __set_current_state(TASK_UNINTERRUPTIBLE
);
822 spin_unlock_irq(lock
);
826 sig
->group_exit_task
= NULL
;
827 sig
->notify_count
= 0;
828 spin_unlock_irq(lock
);
831 * At this point all other threads have exited, all we have to
832 * do is to wait for the thread group leader to become inactive,
833 * and to assume its PID:
835 if (!thread_group_leader(tsk
)) {
837 * Wait for the thread group leader to be a zombie.
838 * It should already be zombie at this point, most
841 leader
= tsk
->group_leader
;
842 while (leader
->exit_state
!= EXIT_ZOMBIE
)
846 * The only record we have of the real-time age of a
847 * process, regardless of execs it's done, is start_time.
848 * All the past CPU time is accumulated in signal_struct
849 * from sister threads now dead. But in this non-leader
850 * exec, nothing survives from the original leader thread,
851 * whose birth marks the true age of this process now.
852 * When we take on its identity by switching to its PID, we
853 * also take its birthdate (always earlier than our own).
855 tsk
->start_time
= leader
->start_time
;
857 write_lock_irq(&tasklist_lock
);
859 BUG_ON(leader
->tgid
!= tsk
->tgid
);
860 BUG_ON(tsk
->pid
== tsk
->tgid
);
862 * An exec() starts a new thread group with the
863 * TGID of the previous thread group. Rehash the
864 * two threads with a switched PID, and release
865 * the former thread group leader:
868 /* Become a process group leader with the old leader's pid.
869 * The old leader becomes a thread of the this thread group.
870 * Note: The old leader also uses this pid until release_task
871 * is called. Odd but simple and correct.
873 detach_pid(tsk
, PIDTYPE_PID
);
874 tsk
->pid
= leader
->pid
;
875 attach_pid(tsk
, PIDTYPE_PID
, find_pid(tsk
->pid
));
876 transfer_pid(leader
, tsk
, PIDTYPE_PGID
);
877 transfer_pid(leader
, tsk
, PIDTYPE_SID
);
878 list_replace_rcu(&leader
->tasks
, &tsk
->tasks
);
880 tsk
->group_leader
= tsk
;
881 leader
->group_leader
= tsk
;
883 tsk
->exit_signal
= SIGCHLD
;
885 BUG_ON(leader
->exit_state
!= EXIT_ZOMBIE
);
886 leader
->exit_state
= EXIT_DEAD
;
888 write_unlock_irq(&tasklist_lock
);
892 * There may be one thread left which is just exiting,
893 * but it's safe to stop telling the group to kill themselves.
900 release_task(leader
);
902 if (atomic_read(&oldsighand
->count
) == 1) {
904 * Now that we nuked the rest of the thread group,
905 * it turns out we are not sharing sighand any more either.
906 * So we can just keep it.
908 kmem_cache_free(sighand_cachep
, newsighand
);
911 * Move our state over to newsighand and switch it in.
913 atomic_set(&newsighand
->count
, 1);
914 memcpy(newsighand
->action
, oldsighand
->action
,
915 sizeof(newsighand
->action
));
917 write_lock_irq(&tasklist_lock
);
918 spin_lock(&oldsighand
->siglock
);
919 rcu_assign_pointer(tsk
->sighand
, newsighand
);
920 spin_unlock(&oldsighand
->siglock
);
921 write_unlock_irq(&tasklist_lock
);
923 __cleanup_sighand(oldsighand
);
926 BUG_ON(!thread_group_leader(tsk
));
931 * These functions flushes out all traces of the currently running executable
932 * so that a new one can be started
934 static void flush_old_files(struct files_struct
* files
)
939 spin_lock(&files
->file_lock
);
941 unsigned long set
, i
;
945 fdt
= files_fdtable(files
);
946 if (i
>= fdt
->max_fds
)
948 set
= fdt
->close_on_exec
->fds_bits
[j
];
951 fdt
->close_on_exec
->fds_bits
[j
] = 0;
952 spin_unlock(&files
->file_lock
);
953 for ( ; set
; i
++,set
>>= 1) {
958 spin_lock(&files
->file_lock
);
961 spin_unlock(&files
->file_lock
);
964 void get_task_comm(char *buf
, struct task_struct
*tsk
)
966 /* buf must be at least sizeof(tsk->comm) in size */
968 strncpy(buf
, tsk
->comm
, sizeof(tsk
->comm
));
972 void set_task_comm(struct task_struct
*tsk
, char *buf
)
975 strlcpy(tsk
->comm
, buf
, sizeof(tsk
->comm
));
979 int flush_old_exec(struct linux_binprm
* bprm
)
983 struct files_struct
*files
;
984 char tcomm
[sizeof(current
->comm
)];
987 * Make sure we have a private signal table and that
988 * we are unassociated from the previous thread group.
990 retval
= de_thread(current
);
995 * Make sure we have private file handles. Ask the
996 * fork helper to do the work for us and the exit
997 * helper to do the cleanup of the old one.
999 files
= current
->files
; /* refcounted so safe to hold */
1000 retval
= unshare_files();
1004 * Release all of the old mmap stuff
1006 retval
= exec_mmap(bprm
->mm
);
1010 bprm
->mm
= NULL
; /* We're using it now */
1012 /* This is the point of no return */
1013 put_files_struct(files
);
1015 current
->sas_ss_sp
= current
->sas_ss_size
= 0;
1017 if (current
->euid
== current
->uid
&& current
->egid
== current
->gid
)
1018 set_dumpable(current
->mm
, 1);
1020 set_dumpable(current
->mm
, suid_dumpable
);
1022 name
= bprm
->filename
;
1024 /* Copies the binary name from after last slash */
1025 for (i
=0; (ch
= *(name
++)) != '\0';) {
1027 i
= 0; /* overwrite what we wrote */
1029 if (i
< (sizeof(tcomm
) - 1))
1033 set_task_comm(current
, tcomm
);
1035 current
->flags
&= ~PF_RANDOMIZE
;
1038 /* Set the new mm task size. We have to do that late because it may
1039 * depend on TIF_32BIT which is only updated in flush_thread() on
1040 * some architectures like powerpc
1042 current
->mm
->task_size
= TASK_SIZE
;
1044 if (bprm
->e_uid
!= current
->euid
|| bprm
->e_gid
!= current
->egid
) {
1046 set_dumpable(current
->mm
, suid_dumpable
);
1047 current
->pdeath_signal
= 0;
1048 } else if (file_permission(bprm
->file
, MAY_READ
) ||
1049 (bprm
->interp_flags
& BINPRM_FLAGS_ENFORCE_NONDUMP
)) {
1051 set_dumpable(current
->mm
, suid_dumpable
);
1054 /* An exec changes our domain. We are no longer part of the thread
1057 current
->self_exec_id
++;
1059 flush_signal_handlers(current
, 0);
1060 flush_old_files(current
->files
);
1065 reset_files_struct(current
, files
);
1070 EXPORT_SYMBOL(flush_old_exec
);
1073 * Fill the binprm structure from the inode.
1074 * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
1076 int prepare_binprm(struct linux_binprm
*bprm
)
1079 struct inode
* inode
= bprm
->file
->f_path
.dentry
->d_inode
;
1082 mode
= inode
->i_mode
;
1083 if (bprm
->file
->f_op
== NULL
)
1086 bprm
->e_uid
= current
->euid
;
1087 bprm
->e_gid
= current
->egid
;
1089 if(!(bprm
->file
->f_path
.mnt
->mnt_flags
& MNT_NOSUID
)) {
1091 if (mode
& S_ISUID
) {
1092 current
->personality
&= ~PER_CLEAR_ON_SETID
;
1093 bprm
->e_uid
= inode
->i_uid
;
1098 * If setgid is set but no group execute bit then this
1099 * is a candidate for mandatory locking, not a setgid
1102 if ((mode
& (S_ISGID
| S_IXGRP
)) == (S_ISGID
| S_IXGRP
)) {
1103 current
->personality
&= ~PER_CLEAR_ON_SETID
;
1104 bprm
->e_gid
= inode
->i_gid
;
1108 /* fill in binprm security blob */
1109 retval
= security_bprm_set(bprm
);
1113 memset(bprm
->buf
,0,BINPRM_BUF_SIZE
);
1114 return kernel_read(bprm
->file
,0,bprm
->buf
,BINPRM_BUF_SIZE
);
1117 EXPORT_SYMBOL(prepare_binprm
);
1119 static int unsafe_exec(struct task_struct
*p
)
1122 if (p
->ptrace
& PT_PTRACED
) {
1123 if (p
->ptrace
& PT_PTRACE_CAP
)
1124 unsafe
|= LSM_UNSAFE_PTRACE_CAP
;
1126 unsafe
|= LSM_UNSAFE_PTRACE
;
1128 if (atomic_read(&p
->fs
->count
) > 1 ||
1129 atomic_read(&p
->files
->count
) > 1 ||
1130 atomic_read(&p
->sighand
->count
) > 1)
1131 unsafe
|= LSM_UNSAFE_SHARE
;
1136 void compute_creds(struct linux_binprm
*bprm
)
1140 if (bprm
->e_uid
!= current
->uid
) {
1142 current
->pdeath_signal
= 0;
1147 unsafe
= unsafe_exec(current
);
1148 security_bprm_apply_creds(bprm
, unsafe
);
1149 task_unlock(current
);
1150 security_bprm_post_apply_creds(bprm
);
1152 EXPORT_SYMBOL(compute_creds
);
1155 * Arguments are '\0' separated strings found at the location bprm->p
1156 * points to; chop off the first by relocating brpm->p to right after
1157 * the first '\0' encountered.
1159 int remove_arg_zero(struct linux_binprm
*bprm
)
1162 unsigned long offset
;
1170 offset
= bprm
->p
& ~PAGE_MASK
;
1171 page
= get_arg_page(bprm
, bprm
->p
, 0);
1176 kaddr
= kmap_atomic(page
, KM_USER0
);
1178 for (; offset
< PAGE_SIZE
&& kaddr
[offset
];
1179 offset
++, bprm
->p
++)
1182 kunmap_atomic(kaddr
, KM_USER0
);
1185 if (offset
== PAGE_SIZE
)
1186 free_arg_page(bprm
, (bprm
->p
>> PAGE_SHIFT
) - 1);
1187 } while (offset
== PAGE_SIZE
);
1196 EXPORT_SYMBOL(remove_arg_zero
);
1199 * cycle the list of binary formats handler, until one recognizes the image
1201 int search_binary_handler(struct linux_binprm
*bprm
,struct pt_regs
*regs
)
1204 struct linux_binfmt
*fmt
;
1206 /* handle /sbin/loader.. */
1208 struct exec
* eh
= (struct exec
*) bprm
->buf
;
1210 if (!bprm
->loader
&& eh
->fh
.f_magic
== 0x183 &&
1211 (eh
->fh
.f_flags
& 0x3000) == 0x3000)
1214 unsigned long loader
;
1216 allow_write_access(bprm
->file
);
1220 loader
= bprm
->vma
->vm_end
- sizeof(void *);
1222 file
= open_exec("/sbin/loader");
1223 retval
= PTR_ERR(file
);
1227 /* Remember if the application is TASO. */
1228 bprm
->sh_bang
= eh
->ah
.entry
< 0x100000000UL
;
1231 bprm
->loader
= loader
;
1232 retval
= prepare_binprm(bprm
);
1235 /* should call search_binary_handler recursively here,
1236 but it does not matter */
1240 retval
= security_bprm_check(bprm
);
1244 /* kernel module loader fixup */
1245 /* so we don't try to load run modprobe in kernel space. */
1248 retval
= audit_bprm(bprm
);
1253 for (try=0; try<2; try++) {
1254 read_lock(&binfmt_lock
);
1255 list_for_each_entry(fmt
, &formats
, lh
) {
1256 int (*fn
)(struct linux_binprm
*, struct pt_regs
*) = fmt
->load_binary
;
1259 if (!try_module_get(fmt
->module
))
1261 read_unlock(&binfmt_lock
);
1262 retval
= fn(bprm
, regs
);
1265 allow_write_access(bprm
->file
);
1269 current
->did_exec
= 1;
1270 proc_exec_connector(current
);
1273 read_lock(&binfmt_lock
);
1275 if (retval
!= -ENOEXEC
|| bprm
->mm
== NULL
)
1278 read_unlock(&binfmt_lock
);
1282 read_unlock(&binfmt_lock
);
1283 if (retval
!= -ENOEXEC
|| bprm
->mm
== NULL
) {
1287 #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1288 if (printable(bprm
->buf
[0]) &&
1289 printable(bprm
->buf
[1]) &&
1290 printable(bprm
->buf
[2]) &&
1291 printable(bprm
->buf
[3]))
1292 break; /* -ENOEXEC */
1293 request_module("binfmt-%04x", *(unsigned short *)(&bprm
->buf
[2]));
1300 EXPORT_SYMBOL(search_binary_handler
);
1303 * sys_execve() executes a new program.
1305 int do_execve(char * filename
,
1306 char __user
*__user
*argv
,
1307 char __user
*__user
*envp
,
1308 struct pt_regs
* regs
)
1310 struct linux_binprm
*bprm
;
1312 unsigned long env_p
;
1316 bprm
= kzalloc(sizeof(*bprm
), GFP_KERNEL
);
1320 file
= open_exec(filename
);
1321 retval
= PTR_ERR(file
);
1328 bprm
->filename
= filename
;
1329 bprm
->interp
= filename
;
1331 retval
= bprm_mm_init(bprm
);
1335 bprm
->argc
= count(argv
, MAX_ARG_STRINGS
);
1336 if ((retval
= bprm
->argc
) < 0)
1339 bprm
->envc
= count(envp
, MAX_ARG_STRINGS
);
1340 if ((retval
= bprm
->envc
) < 0)
1343 retval
= security_bprm_alloc(bprm
);
1347 retval
= prepare_binprm(bprm
);
1351 retval
= copy_strings_kernel(1, &bprm
->filename
, bprm
);
1355 bprm
->exec
= bprm
->p
;
1356 retval
= copy_strings(bprm
->envc
, envp
, bprm
);
1361 retval
= copy_strings(bprm
->argc
, argv
, bprm
);
1364 bprm
->argv_len
= env_p
- bprm
->p
;
1366 retval
= search_binary_handler(bprm
,regs
);
1368 /* execve success */
1369 free_arg_pages(bprm
);
1370 security_bprm_free(bprm
);
1371 acct_update_integrals(current
);
1377 free_arg_pages(bprm
);
1379 security_bprm_free(bprm
);
1387 allow_write_access(bprm
->file
);
1397 int set_binfmt(struct linux_binfmt
*new)
1399 struct linux_binfmt
*old
= current
->binfmt
;
1402 if (!try_module_get(new->module
))
1405 current
->binfmt
= new;
1407 module_put(old
->module
);
1411 EXPORT_SYMBOL(set_binfmt
);
1413 /* format_corename will inspect the pattern parameter, and output a
1414 * name into corename, which must have space for at least
1415 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
1417 static int format_corename(char *corename
, const char *pattern
, long signr
)
1419 const char *pat_ptr
= pattern
;
1420 char *out_ptr
= corename
;
1421 char *const out_end
= corename
+ CORENAME_MAX_SIZE
;
1423 int pid_in_pattern
= 0;
1426 if (*pattern
== '|')
1429 /* Repeat as long as we have more pattern to process and more output
1432 if (*pat_ptr
!= '%') {
1433 if (out_ptr
== out_end
)
1435 *out_ptr
++ = *pat_ptr
++;
1437 switch (*++pat_ptr
) {
1440 /* Double percent, output one percent */
1442 if (out_ptr
== out_end
)
1449 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1450 "%d", current
->tgid
);
1451 if (rc
> out_end
- out_ptr
)
1457 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1458 "%d", current
->uid
);
1459 if (rc
> out_end
- out_ptr
)
1465 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1466 "%d", current
->gid
);
1467 if (rc
> out_end
- out_ptr
)
1471 /* signal that caused the coredump */
1473 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1475 if (rc
> out_end
- out_ptr
)
1479 /* UNIX time of coredump */
1482 do_gettimeofday(&tv
);
1483 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1485 if (rc
> out_end
- out_ptr
)
1492 down_read(&uts_sem
);
1493 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1494 "%s", utsname()->nodename
);
1496 if (rc
> out_end
- out_ptr
)
1502 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1503 "%s", current
->comm
);
1504 if (rc
> out_end
- out_ptr
)
1508 /* core limit size */
1510 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1511 "%lu", current
->signal
->rlim
[RLIMIT_CORE
].rlim_cur
);
1512 if (rc
> out_end
- out_ptr
)
1522 /* Backward compatibility with core_uses_pid:
1524 * If core_pattern does not include a %p (as is the default)
1525 * and core_uses_pid is set, then .%pid will be appended to
1526 * the filename. Do not do this for piped commands. */
1527 if (!ispipe
&& !pid_in_pattern
1528 && (core_uses_pid
|| atomic_read(¤t
->mm
->mm_users
) != 1)) {
1529 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1530 ".%d", current
->tgid
);
1531 if (rc
> out_end
- out_ptr
)
1540 static void zap_process(struct task_struct
*start
)
1542 struct task_struct
*t
;
1544 start
->signal
->flags
= SIGNAL_GROUP_EXIT
;
1545 start
->signal
->group_stop_count
= 0;
1549 if (t
!= current
&& t
->mm
) {
1550 t
->mm
->core_waiters
++;
1551 sigaddset(&t
->pending
.signal
, SIGKILL
);
1552 signal_wake_up(t
, 1);
1554 } while ((t
= next_thread(t
)) != start
);
1557 static inline int zap_threads(struct task_struct
*tsk
, struct mm_struct
*mm
,
1560 struct task_struct
*g
, *p
;
1561 unsigned long flags
;
1564 spin_lock_irq(&tsk
->sighand
->siglock
);
1565 if (!(tsk
->signal
->flags
& SIGNAL_GROUP_EXIT
)) {
1566 tsk
->signal
->group_exit_code
= exit_code
;
1570 spin_unlock_irq(&tsk
->sighand
->siglock
);
1574 if (atomic_read(&mm
->mm_users
) == mm
->core_waiters
+ 1)
1578 for_each_process(g
) {
1579 if (g
== tsk
->group_leader
)
1587 * p->sighand can't disappear, but
1588 * may be changed by de_thread()
1590 lock_task_sighand(p
, &flags
);
1592 unlock_task_sighand(p
, &flags
);
1596 } while ((p
= next_thread(p
)) != g
);
1600 return mm
->core_waiters
;
1603 static int coredump_wait(int exit_code
)
1605 struct task_struct
*tsk
= current
;
1606 struct mm_struct
*mm
= tsk
->mm
;
1607 struct completion startup_done
;
1608 struct completion
*vfork_done
;
1611 init_completion(&mm
->core_done
);
1612 init_completion(&startup_done
);
1613 mm
->core_startup_done
= &startup_done
;
1615 core_waiters
= zap_threads(tsk
, mm
, exit_code
);
1616 up_write(&mm
->mmap_sem
);
1618 if (unlikely(core_waiters
< 0))
1622 * Make sure nobody is waiting for us to release the VM,
1623 * otherwise we can deadlock when we wait on each other
1625 vfork_done
= tsk
->vfork_done
;
1627 tsk
->vfork_done
= NULL
;
1628 complete(vfork_done
);
1632 wait_for_completion(&startup_done
);
1634 BUG_ON(mm
->core_waiters
);
1635 return core_waiters
;
1639 * set_dumpable converts traditional three-value dumpable to two flags and
1640 * stores them into mm->flags. It modifies lower two bits of mm->flags, but
1641 * these bits are not changed atomically. So get_dumpable can observe the
1642 * intermediate state. To avoid doing unexpected behavior, get get_dumpable
1643 * return either old dumpable or new one by paying attention to the order of
1644 * modifying the bits.
1646 * dumpable | mm->flags (binary)
1647 * old new | initial interim final
1648 * ---------+-----------------------
1656 * (*) get_dumpable regards interim value of 10 as 11.
1658 void set_dumpable(struct mm_struct
*mm
, int value
)
1662 clear_bit(MMF_DUMPABLE
, &mm
->flags
);
1664 clear_bit(MMF_DUMP_SECURELY
, &mm
->flags
);
1667 set_bit(MMF_DUMPABLE
, &mm
->flags
);
1669 clear_bit(MMF_DUMP_SECURELY
, &mm
->flags
);
1672 set_bit(MMF_DUMP_SECURELY
, &mm
->flags
);
1674 set_bit(MMF_DUMPABLE
, &mm
->flags
);
1678 EXPORT_SYMBOL_GPL(set_dumpable
);
1680 int get_dumpable(struct mm_struct
*mm
)
1684 ret
= mm
->flags
& 0x3;
1685 return (ret
>= 2) ? 2 : ret
;
1688 int do_coredump(long signr
, int exit_code
, struct pt_regs
* regs
)
1690 char corename
[CORENAME_MAX_SIZE
+ 1];
1691 struct mm_struct
*mm
= current
->mm
;
1692 struct linux_binfmt
* binfmt
;
1693 struct inode
* inode
;
1696 int fsuid
= current
->fsuid
;
1699 unsigned long core_limit
= current
->signal
->rlim
[RLIMIT_CORE
].rlim_cur
;
1700 char **helper_argv
= NULL
;
1701 int helper_argc
= 0;
1704 audit_core_dumps(signr
);
1706 binfmt
= current
->binfmt
;
1707 if (!binfmt
|| !binfmt
->core_dump
)
1709 down_write(&mm
->mmap_sem
);
1710 if (!get_dumpable(mm
)) {
1711 up_write(&mm
->mmap_sem
);
1716 * We cannot trust fsuid as being the "true" uid of the
1717 * process nor do we know its entire history. We only know it
1718 * was tainted so we dump it as root in mode 2.
1720 if (get_dumpable(mm
) == 2) { /* Setuid core dump mode */
1721 flag
= O_EXCL
; /* Stop rewrite attacks */
1722 current
->fsuid
= 0; /* Dump root private */
1724 set_dumpable(mm
, 0);
1726 retval
= coredump_wait(exit_code
);
1731 * Clear any false indication of pending signals that might
1732 * be seen by the filesystem code called to write the core file.
1734 clear_thread_flag(TIF_SIGPENDING
);
1737 * lock_kernel() because format_corename() is controlled by sysctl, which
1738 * uses lock_kernel()
1741 ispipe
= format_corename(corename
, core_pattern
, signr
);
1744 * Don't bother to check the RLIMIT_CORE value if core_pattern points
1745 * to a pipe. Since we're not writing directly to the filesystem
1746 * RLIMIT_CORE doesn't really apply, as no actual core file will be
1747 * created unless the pipe reader choses to write out the core file
1748 * at which point file size limits and permissions will be imposed
1749 * as it does with any other process
1751 if ((!ispipe
) && (core_limit
< binfmt
->min_coredump
))
1755 helper_argv
= argv_split(GFP_KERNEL
, corename
+1, &helper_argc
);
1756 /* Terminate the string before the first option */
1757 delimit
= strchr(corename
, ' ');
1760 delimit
= strrchr(helper_argv
[0], '/');
1764 delimit
= helper_argv
[0];
1765 if (!strcmp(delimit
, current
->comm
)) {
1766 printk(KERN_NOTICE
"Recursive core dump detected, "
1771 core_limit
= RLIM_INFINITY
;
1773 /* SIGPIPE can happen, but it's just never processed */
1774 if (call_usermodehelper_pipe(corename
+1, helper_argv
, NULL
,
1776 printk(KERN_INFO
"Core dump to %s pipe failed\n",
1781 file
= filp_open(corename
,
1782 O_CREAT
| 2 | O_NOFOLLOW
| O_LARGEFILE
| flag
,
1786 inode
= file
->f_path
.dentry
->d_inode
;
1787 if (inode
->i_nlink
> 1)
1788 goto close_fail
; /* multiple links - don't dump */
1789 if (!ispipe
&& d_unhashed(file
->f_path
.dentry
))
1792 /* AK: actually i see no reason to not allow this for named pipes etc.,
1793 but keep the previous behaviour for now. */
1794 if (!ispipe
&& !S_ISREG(inode
->i_mode
))
1798 if (!file
->f_op
->write
)
1800 if (!ispipe
&& do_truncate(file
->f_path
.dentry
, 0, 0, file
) != 0)
1803 retval
= binfmt
->core_dump(signr
, regs
, file
, core_limit
);
1806 current
->signal
->group_exit_code
|= 0x80;
1808 filp_close(file
, NULL
);
1811 argv_free(helper_argv
);
1813 current
->fsuid
= fsuid
;
1814 complete_all(&mm
->core_done
);