4 * Copyright (C) 1991, 1992 Linus Torvalds
8 * #!-checking implemented by tytso.
11 * Demand-loading implemented 01.12.91 - no need to read anything but
12 * the header into memory. The inode of the executable is put into
13 * "current->executable", and page faults do the actual loading. Clean.
15 * Once more I can proudly say that linux stood up to being changed: it
16 * was less than 2 hours work to get demand-loading completely implemented.
18 * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead,
19 * current->executable is only used by the procfs. This allows a dispatch
20 * table to check for several different types of binary formats. We keep
21 * trying until we recognize the file or we run out of supported binary
25 #include <linux/config.h>
26 #include <linux/slab.h>
27 #include <linux/file.h>
28 #include <linux/mman.h>
29 #include <linux/a.out.h>
30 #include <linux/stat.h>
31 #include <linux/fcntl.h>
32 #include <linux/smp_lock.h>
33 #include <linux/init.h>
34 #include <linux/pagemap.h>
35 #include <linux/highmem.h>
36 #include <linux/spinlock.h>
37 #include <linux/personality.h>
38 #include <linux/binfmts.h>
39 #include <linux/swap.h>
40 #include <linux/utsname.h>
41 #include <linux/module.h>
42 #include <linux/namei.h>
43 #include <linux/proc_fs.h>
44 #include <linux/ptrace.h>
45 #include <linux/mount.h>
46 #include <linux/security.h>
47 #include <linux/rmap-locking.h>
49 #include <asm/uaccess.h>
50 #include <asm/pgalloc.h>
51 #include <asm/mmu_context.h>
54 #include <linux/kmod.h>
58 char core_pattern
[65] = "core";
59 /* The maximal length of core_pattern is also specified in sysctl.c */
61 static struct linux_binfmt
*formats
;
62 static rwlock_t binfmt_lock
= RW_LOCK_UNLOCKED
;
64 int register_binfmt(struct linux_binfmt
* fmt
)
66 struct linux_binfmt
** tmp
= &formats
;
72 write_lock(&binfmt_lock
);
75 write_unlock(&binfmt_lock
);
82 write_unlock(&binfmt_lock
);
86 EXPORT_SYMBOL(register_binfmt
);
88 int unregister_binfmt(struct linux_binfmt
* fmt
)
90 struct linux_binfmt
** tmp
= &formats
;
92 write_lock(&binfmt_lock
);
96 write_unlock(&binfmt_lock
);
101 write_unlock(&binfmt_lock
);
105 EXPORT_SYMBOL(unregister_binfmt
);
107 static inline void put_binfmt(struct linux_binfmt
* fmt
)
109 module_put(fmt
->module
);
113 * Note that a shared library must be both readable and executable due to
116 * Also note that we take the address to load from from the file itself.
118 asmlinkage
long sys_uselib(const char __user
* library
)
124 nd
.intent
.open
.flags
= O_RDONLY
;
125 error
= __user_walk(library
, LOOKUP_FOLLOW
|LOOKUP_OPEN
, &nd
);
130 if (!S_ISREG(nd
.dentry
->d_inode
->i_mode
))
133 error
= permission(nd
.dentry
->d_inode
, MAY_READ
| MAY_EXEC
, &nd
);
137 file
= dentry_open(nd
.dentry
, nd
.mnt
, O_RDONLY
);
138 error
= PTR_ERR(file
);
144 struct linux_binfmt
* fmt
;
146 read_lock(&binfmt_lock
);
147 for (fmt
= formats
; fmt
; fmt
= fmt
->next
) {
148 if (!fmt
->load_shlib
)
150 if (!try_module_get(fmt
->module
))
152 read_unlock(&binfmt_lock
);
153 error
= fmt
->load_shlib(file
);
154 read_lock(&binfmt_lock
);
156 if (error
!= -ENOEXEC
)
159 read_unlock(&binfmt_lock
);
170 * count() counts the number of strings in array ARGV.
172 static int count(char __user
* __user
* argv
, int max
)
180 if (get_user(p
, argv
))
193 * 'copy_strings()' copies argument/environment strings from user
194 * memory to free pages in kernel mem. These are in a format ready
195 * to be put directly into the top of new user memory.
197 int copy_strings(int argc
,char __user
* __user
* argv
, struct linux_binprm
*bprm
)
199 struct page
*kmapped_page
= NULL
;
208 if (get_user(str
, argv
+argc
) ||
209 !(len
= strnlen_user(str
, bprm
->p
))) {
220 /* XXX: add architecture specific overflow check here. */
225 int offset
, bytes_to_copy
;
228 offset
= pos
% PAGE_SIZE
;
230 page
= bprm
->page
[i
];
233 page
= alloc_page(GFP_HIGHUSER
);
234 bprm
->page
[i
] = page
;
242 if (page
!= kmapped_page
) {
244 kunmap(kmapped_page
);
246 kaddr
= kmap(kmapped_page
);
249 memset(kaddr
, 0, offset
);
250 bytes_to_copy
= PAGE_SIZE
- offset
;
251 if (bytes_to_copy
> len
) {
254 memset(kaddr
+offset
+len
, 0,
255 PAGE_SIZE
-offset
-len
);
257 err
= copy_from_user(kaddr
+offset
, str
, bytes_to_copy
);
263 pos
+= bytes_to_copy
;
264 str
+= bytes_to_copy
;
265 len
-= bytes_to_copy
;
271 kunmap(kmapped_page
);
276 * Like copy_strings, but get argv and its values from kernel memory.
278 int copy_strings_kernel(int argc
,char ** argv
, struct linux_binprm
*bprm
)
281 mm_segment_t oldfs
= get_fs();
283 r
= copy_strings(argc
, (char __user
* __user
*)argv
, bprm
);
288 EXPORT_SYMBOL(copy_strings_kernel
);
292 * This routine is used to map in a page into an address space: needed by
293 * execve() for the initial stack and environment pages.
295 * tsk->mmap_sem is held for writing.
297 void put_dirty_page(struct task_struct
*tsk
, struct page
*page
,
298 unsigned long address
, pgprot_t prot
)
303 struct pte_chain
*pte_chain
;
305 if (page_count(page
) != 1)
306 printk(KERN_ERR
"mem_map disagrees with %p at %08lx\n",
309 pgd
= pgd_offset(tsk
->mm
, address
);
310 pte_chain
= pte_chain_alloc(GFP_KERNEL
);
313 spin_lock(&tsk
->mm
->page_table_lock
);
314 pmd
= pmd_alloc(tsk
->mm
, pgd
, address
);
317 pte
= pte_alloc_map(tsk
->mm
, pmd
, address
);
320 if (!pte_none(*pte
)) {
324 lru_cache_add_active(page
);
325 flush_dcache_page(page
);
326 set_pte(pte
, pte_mkdirty(pte_mkwrite(mk_pte(page
, prot
))));
327 pte_chain
= page_add_rmap(page
, pte
, pte_chain
);
330 spin_unlock(&tsk
->mm
->page_table_lock
);
332 /* no need for flush_tlb */
333 pte_chain_free(pte_chain
);
336 spin_unlock(&tsk
->mm
->page_table_lock
);
339 force_sig(SIGKILL
, tsk
);
340 pte_chain_free(pte_chain
);
344 int setup_arg_pages(struct linux_binprm
*bprm
)
346 unsigned long stack_base
;
347 struct vm_area_struct
*mpnt
;
348 struct mm_struct
*mm
= current
->mm
;
352 #ifdef CONFIG_STACK_GROWSUP
353 /* Move the argument and environment strings to the bottom of the
359 /* Start by shifting all the pages down */
361 for (j
= 0; j
< MAX_ARG_PAGES
; j
++) {
362 struct page
*page
= bprm
->page
[j
];
365 bprm
->page
[i
++] = page
;
368 /* Now move them within their pages */
369 offset
= bprm
->p
% PAGE_SIZE
;
370 to
= kmap(bprm
->page
[0]);
371 for (j
= 1; j
< i
; j
++) {
372 memmove(to
, to
+ offset
, PAGE_SIZE
- offset
);
373 from
= kmap(bprm
->page
[j
]);
374 memcpy(to
+ PAGE_SIZE
- offset
, from
, offset
);
375 kunmap(bprm
->page
[j
- 1]);
378 memmove(to
, to
+ offset
, PAGE_SIZE
- offset
);
379 kunmap(bprm
->page
[j
- 1]);
381 /* Adjust bprm->p to point to the end of the strings. */
382 bprm
->p
= PAGE_SIZE
* i
- offset
;
384 /* Limit stack size to 1GB */
385 stack_base
= current
->rlim
[RLIMIT_STACK
].rlim_max
;
386 if (stack_base
> (1 << 30))
387 stack_base
= 1 << 30;
388 stack_base
= PAGE_ALIGN(STACK_TOP
- stack_base
);
390 mm
->arg_start
= stack_base
;
391 arg_size
= i
<< PAGE_SHIFT
;
393 /* zero pages that were copied above */
394 while (i
< MAX_ARG_PAGES
)
395 bprm
->page
[i
++] = NULL
;
397 stack_base
= STACK_TOP
- MAX_ARG_PAGES
* PAGE_SIZE
;
398 mm
->arg_start
= bprm
->p
+ stack_base
;
399 arg_size
= STACK_TOP
- (PAGE_MASK
& (unsigned long) mm
->arg_start
);
402 bprm
->p
+= stack_base
;
404 bprm
->loader
+= stack_base
;
405 bprm
->exec
+= stack_base
;
407 mpnt
= kmem_cache_alloc(vm_area_cachep
, SLAB_KERNEL
);
411 if (security_vm_enough_memory(arg_size
>> PAGE_SHIFT
)) {
412 kmem_cache_free(vm_area_cachep
, mpnt
);
416 down_write(&mm
->mmap_sem
);
419 #ifdef CONFIG_STACK_GROWSUP
420 mpnt
->vm_start
= stack_base
;
421 mpnt
->vm_end
= PAGE_MASK
&
422 (PAGE_SIZE
- 1 + (unsigned long) bprm
->p
);
424 mpnt
->vm_start
= PAGE_MASK
& (unsigned long) bprm
->p
;
425 mpnt
->vm_end
= STACK_TOP
;
427 mpnt
->vm_page_prot
= protection_map
[VM_STACK_FLAGS
& 0x7];
428 mpnt
->vm_flags
= VM_STACK_FLAGS
;
431 mpnt
->vm_file
= NULL
;
432 INIT_LIST_HEAD(&mpnt
->shared
);
433 mpnt
->vm_private_data
= (void *) 0;
434 insert_vm_struct(mm
, mpnt
);
435 mm
->total_vm
= (mpnt
->vm_end
- mpnt
->vm_start
) >> PAGE_SHIFT
;
438 for (i
= 0 ; i
< MAX_ARG_PAGES
; i
++) {
439 struct page
*page
= bprm
->page
[i
];
441 bprm
->page
[i
] = NULL
;
442 put_dirty_page(current
, page
, stack_base
,
445 stack_base
+= PAGE_SIZE
;
447 up_write(&mm
->mmap_sem
);
452 EXPORT_SYMBOL(setup_arg_pages
);
454 #define free_arg_pages(bprm) do { } while (0)
458 static inline void free_arg_pages(struct linux_binprm
*bprm
)
462 for (i
= 0; i
< MAX_ARG_PAGES
; i
++) {
464 __free_page(bprm
->page
[i
]);
465 bprm
->page
[i
] = NULL
;
469 #endif /* CONFIG_MMU */
471 struct file
*open_exec(const char *name
)
474 int err
= path_lookup(name
, LOOKUP_FOLLOW
, &nd
);
475 struct file
*file
= ERR_PTR(err
);
478 struct inode
*inode
= nd
.dentry
->d_inode
;
479 file
= ERR_PTR(-EACCES
);
480 if (!(nd
.mnt
->mnt_flags
& MNT_NOEXEC
) &&
481 S_ISREG(inode
->i_mode
)) {
482 int err
= permission(inode
, MAY_EXEC
, &nd
);
483 if (!err
&& !(inode
->i_mode
& 0111))
487 file
= dentry_open(nd
.dentry
, nd
.mnt
, O_RDONLY
);
489 err
= deny_write_access(file
);
504 EXPORT_SYMBOL(open_exec
);
506 int kernel_read(struct file
*file
, unsigned long offset
,
507 char *addr
, unsigned long count
)
515 /* The cast to a user pointer is valid due to the set_fs() */
516 result
= vfs_read(file
, (void __user
*)addr
, count
, &pos
);
521 EXPORT_SYMBOL(kernel_read
);
523 static int exec_mmap(struct mm_struct
*mm
)
525 struct task_struct
*tsk
;
526 struct mm_struct
* old_mm
, *active_mm
;
528 /* Add it to the list of mm's */
529 spin_lock(&mmlist_lock
);
530 list_add(&mm
->mmlist
, &init_mm
.mmlist
);
532 spin_unlock(&mmlist_lock
);
534 /* Notify parent that we're no longer interested in the old VM */
536 old_mm
= current
->mm
;
537 mm_release(tsk
, old_mm
);
540 active_mm
= tsk
->active_mm
;
543 activate_mm(active_mm
, mm
);
546 if (active_mm
!= old_mm
) BUG();
555 * This function makes sure the current process has its own signal table,
556 * so that flush_signal_handlers can later reset the handlers without
557 * disturbing other processes. (Other processes might share the signal
558 * table via the CLONE_SIGHAND option to clone().)
560 static inline int de_thread(struct task_struct
*tsk
)
562 struct signal_struct
*newsig
, *oldsig
= tsk
->signal
;
563 struct sighand_struct
*newsighand
, *oldsighand
= tsk
->sighand
;
564 spinlock_t
*lock
= &oldsighand
->siglock
;
568 * If we don't share sighandlers, then we aren't sharing anything
569 * and we can just re-use it all.
571 if (atomic_read(&oldsighand
->count
) <= 1)
574 newsighand
= kmem_cache_alloc(sighand_cachep
, GFP_KERNEL
);
578 spin_lock_init(&newsighand
->siglock
);
579 atomic_set(&newsighand
->count
, 1);
580 memcpy(newsighand
->action
, oldsighand
->action
, sizeof(newsighand
->action
));
583 * See if we need to allocate a new signal structure
586 if (atomic_read(&oldsig
->count
) > 1) {
587 newsig
= kmem_cache_alloc(signal_cachep
, GFP_KERNEL
);
589 kmem_cache_free(sighand_cachep
, newsighand
);
592 atomic_set(&newsig
->count
, 1);
593 newsig
->group_exit
= 0;
594 newsig
->group_exit_code
= 0;
595 newsig
->group_exit_task
= NULL
;
596 newsig
->group_stop_count
= 0;
597 newsig
->curr_target
= NULL
;
598 init_sigpending(&newsig
->shared_pending
);
600 newsig
->pgrp
= oldsig
->pgrp
;
601 newsig
->session
= oldsig
->session
;
602 newsig
->leader
= oldsig
->leader
;
603 newsig
->tty_old_pgrp
= oldsig
->tty_old_pgrp
;
606 if (thread_group_empty(current
))
607 goto no_thread_group
;
610 * Kill all other threads in the thread group.
611 * We must hold tasklist_lock to call zap_other_threads.
613 read_lock(&tasklist_lock
);
615 if (oldsig
->group_exit
) {
617 * Another group action in progress, just
618 * return so that the signal is processed.
620 spin_unlock_irq(lock
);
621 read_unlock(&tasklist_lock
);
622 kmem_cache_free(sighand_cachep
, newsighand
);
624 kmem_cache_free(signal_cachep
, newsig
);
627 oldsig
->group_exit
= 1;
628 zap_other_threads(current
);
629 read_unlock(&tasklist_lock
);
632 * Account for the thread group leader hanging around:
635 if (current
->pid
== current
->tgid
)
637 while (atomic_read(&oldsig
->count
) > count
) {
638 oldsig
->group_exit_task
= current
;
639 oldsig
->notify_count
= count
;
640 __set_current_state(TASK_UNINTERRUPTIBLE
);
641 spin_unlock_irq(lock
);
645 spin_unlock_irq(lock
);
648 * At this point all other threads have exited, all we have to
649 * do is to wait for the thread group leader to become inactive,
650 * and to assume its PID:
652 if (current
->pid
!= current
->tgid
) {
653 struct task_struct
*leader
= current
->group_leader
, *parent
;
654 struct dentry
*proc_dentry1
, *proc_dentry2
;
655 unsigned long state
, ptrace
;
658 * Wait for the thread group leader to be a zombie.
659 * It should already be zombie at this point, most
662 while (leader
->state
!= TASK_ZOMBIE
)
665 spin_lock(&leader
->proc_lock
);
666 spin_lock(¤t
->proc_lock
);
667 proc_dentry1
= proc_pid_unhash(current
);
668 proc_dentry2
= proc_pid_unhash(leader
);
669 write_lock_irq(&tasklist_lock
);
671 if (leader
->tgid
!= current
->tgid
)
673 if (current
->pid
== current
->tgid
)
676 * An exec() starts a new thread group with the
677 * TGID of the previous thread group. Rehash the
678 * two threads with a switched PID, and release
679 * the former thread group leader:
681 ptrace
= leader
->ptrace
;
682 parent
= leader
->parent
;
684 ptrace_unlink(current
);
685 ptrace_unlink(leader
);
686 remove_parent(current
);
687 remove_parent(leader
);
689 switch_exec_pids(leader
, current
);
691 current
->parent
= current
->real_parent
= leader
->real_parent
;
692 leader
->parent
= leader
->real_parent
= child_reaper
;
693 current
->group_leader
= current
;
694 leader
->group_leader
= leader
;
696 add_parent(current
, current
->parent
);
697 add_parent(leader
, leader
->parent
);
699 current
->ptrace
= ptrace
;
700 __ptrace_link(current
, parent
);
703 list_del(¤t
->tasks
);
704 list_add_tail(¤t
->tasks
, &init_task
.tasks
);
705 current
->exit_signal
= SIGCHLD
;
706 state
= leader
->state
;
708 write_unlock_irq(&tasklist_lock
);
709 spin_unlock(&leader
->proc_lock
);
710 spin_unlock(¤t
->proc_lock
);
711 proc_pid_flush(proc_dentry1
);
712 proc_pid_flush(proc_dentry2
);
714 if (state
!= TASK_ZOMBIE
)
716 release_task(leader
);
721 write_lock_irq(&tasklist_lock
);
722 spin_lock(&oldsighand
->siglock
);
723 spin_lock(&newsighand
->siglock
);
725 if (current
== oldsig
->curr_target
)
726 oldsig
->curr_target
= next_thread(current
);
728 current
->signal
= newsig
;
729 current
->sighand
= newsighand
;
730 init_sigpending(¤t
->pending
);
733 spin_unlock(&newsighand
->siglock
);
734 spin_unlock(&oldsighand
->siglock
);
735 write_unlock_irq(&tasklist_lock
);
737 if (newsig
&& atomic_dec_and_test(&oldsig
->count
))
738 kmem_cache_free(signal_cachep
, oldsig
);
740 if (atomic_dec_and_test(&oldsighand
->count
))
741 kmem_cache_free(sighand_cachep
, oldsighand
);
743 if (!thread_group_empty(current
))
745 if (current
->tgid
!= current
->pid
)
751 * These functions flushes out all traces of the currently running executable
752 * so that a new one can be started
755 static inline void flush_old_files(struct files_struct
* files
)
759 spin_lock(&files
->file_lock
);
761 unsigned long set
, i
;
765 if (i
>= files
->max_fds
|| i
>= files
->max_fdset
)
767 set
= files
->close_on_exec
->fds_bits
[j
];
770 files
->close_on_exec
->fds_bits
[j
] = 0;
771 spin_unlock(&files
->file_lock
);
772 for ( ; set
; i
++,set
>>= 1) {
777 spin_lock(&files
->file_lock
);
780 spin_unlock(&files
->file_lock
);
783 int flush_old_exec(struct linux_binprm
* bprm
)
789 * Make sure we have a private signal table and that
790 * we are unassociated from the previous thread group.
792 retval
= de_thread(current
);
797 * Release all of the old mmap stuff
799 retval
= exec_mmap(bprm
->mm
);
803 bprm
->mm
= NULL
; /* We're using it now */
805 /* This is the point of no return */
807 current
->sas_ss_sp
= current
->sas_ss_size
= 0;
809 if (current
->euid
== current
->uid
&& current
->egid
== current
->gid
)
810 current
->mm
->dumpable
= 1;
811 name
= bprm
->filename
;
812 for (i
=0; (ch
= *(name
++)) != '\0';) {
817 current
->comm
[i
++] = ch
;
819 current
->comm
[i
] = '\0';
823 if (bprm
->e_uid
!= current
->euid
|| bprm
->e_gid
!= current
->egid
||
824 permission(bprm
->file
->f_dentry
->d_inode
,MAY_READ
, NULL
))
825 current
->mm
->dumpable
= 0;
827 /* An exec changes our domain. We are no longer part of the thread
830 current
->self_exec_id
++;
832 flush_signal_handlers(current
, 0);
833 flush_old_files(current
->files
);
834 exit_itimers(current
);
842 EXPORT_SYMBOL(flush_old_exec
);
845 * We mustn't allow tracing of suid binaries, unless
846 * the tracer has the capability to trace anything..
848 static inline int must_not_trace_exec(struct task_struct
* p
)
850 return (p
->ptrace
& PT_PTRACED
) && !(p
->ptrace
& PT_PTRACE_CAP
);
854 * Fill the binprm structure from the inode.
855 * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
857 int prepare_binprm(struct linux_binprm
*bprm
)
860 struct inode
* inode
= bprm
->file
->f_dentry
->d_inode
;
863 mode
= inode
->i_mode
;
865 * Check execute perms again - if the caller has CAP_DAC_OVERRIDE,
866 * vfs_permission lets a non-executable through
868 if (!(mode
& 0111)) /* with at least _one_ execute bit set */
870 if (bprm
->file
->f_op
== NULL
)
873 bprm
->e_uid
= current
->euid
;
874 bprm
->e_gid
= current
->egid
;
876 if(!(bprm
->file
->f_vfsmnt
->mnt_flags
& MNT_NOSUID
)) {
879 bprm
->e_uid
= inode
->i_uid
;
883 * If setgid is set but no group execute bit then this
884 * is a candidate for mandatory locking, not a setgid
887 if ((mode
& (S_ISGID
| S_IXGRP
)) == (S_ISGID
| S_IXGRP
))
888 bprm
->e_gid
= inode
->i_gid
;
891 /* fill in binprm security blob */
892 retval
= security_bprm_set(bprm
);
896 memset(bprm
->buf
,0,BINPRM_BUF_SIZE
);
897 return kernel_read(bprm
->file
,0,bprm
->buf
,BINPRM_BUF_SIZE
);
900 EXPORT_SYMBOL(prepare_binprm
);
903 * This function is used to produce the new IDs and capabilities
904 * from the old ones and the file's capabilities.
906 * The formula used for evolving capabilities is:
909 * (***) pP' = (fP & X) | (fI & pI)
910 * pE' = pP' & fE [NB. fE is 0 or ~0]
912 * I=Inheritable, P=Permitted, E=Effective // p=process, f=file
913 * ' indicates post-exec(), and X is the global 'cap_bset'.
917 void compute_creds(struct linux_binprm
*bprm
)
920 if (bprm
->e_uid
!= current
->uid
|| bprm
->e_gid
!= current
->gid
) {
921 current
->mm
->dumpable
= 0;
923 if (must_not_trace_exec(current
)
924 || atomic_read(¤t
->fs
->count
) > 1
925 || atomic_read(¤t
->files
->count
) > 1
926 || atomic_read(¤t
->sighand
->count
) > 1) {
927 if(!capable(CAP_SETUID
)) {
928 bprm
->e_uid
= current
->uid
;
929 bprm
->e_gid
= current
->gid
;
934 current
->suid
= current
->euid
= current
->fsuid
= bprm
->e_uid
;
935 current
->sgid
= current
->egid
= current
->fsgid
= bprm
->e_gid
;
937 task_unlock(current
);
939 security_bprm_compute_creds(bprm
);
942 EXPORT_SYMBOL(compute_creds
);
944 void remove_arg_zero(struct linux_binprm
*bprm
)
947 unsigned long offset
;
951 offset
= bprm
->p
% PAGE_SIZE
;
954 while (bprm
->p
++, *(kaddr
+offset
++)) {
955 if (offset
!= PAGE_SIZE
)
958 kunmap_atomic(kaddr
, KM_USER0
);
960 page
= bprm
->page
[bprm
->p
/PAGE_SIZE
];
961 kaddr
= kmap_atomic(page
, KM_USER0
);
963 kunmap_atomic(kaddr
, KM_USER0
);
968 EXPORT_SYMBOL(remove_arg_zero
);
971 * cycle the list of binary formats handler, until one recognizes the image
973 int search_binary_handler(struct linux_binprm
*bprm
,struct pt_regs
*regs
)
976 struct linux_binfmt
*fmt
;
978 /* handle /sbin/loader.. */
980 struct exec
* eh
= (struct exec
*) bprm
->buf
;
982 if (!bprm
->loader
&& eh
->fh
.f_magic
== 0x183 &&
983 (eh
->fh
.f_flags
& 0x3000) == 0x3000)
986 unsigned long loader
;
988 allow_write_access(bprm
->file
);
992 loader
= PAGE_SIZE
*MAX_ARG_PAGES
-sizeof(void *);
994 file
= open_exec("/sbin/loader");
995 retval
= PTR_ERR(file
);
999 /* Remember if the application is TASO. */
1000 bprm
->sh_bang
= eh
->ah
.entry
< 0x100000000;
1003 bprm
->loader
= loader
;
1004 retval
= prepare_binprm(bprm
);
1007 /* should call search_binary_handler recursively here,
1008 but it does not matter */
1012 retval
= security_bprm_check(bprm
);
1016 /* kernel module loader fixup */
1017 /* so we don't try to load run modprobe in kernel space. */
1019 for (try=0; try<2; try++) {
1020 read_lock(&binfmt_lock
);
1021 for (fmt
= formats
; fmt
; fmt
= fmt
->next
) {
1022 int (*fn
)(struct linux_binprm
*, struct pt_regs
*) = fmt
->load_binary
;
1025 if (!try_module_get(fmt
->module
))
1027 read_unlock(&binfmt_lock
);
1028 retval
= fn(bprm
, regs
);
1031 allow_write_access(bprm
->file
);
1035 current
->did_exec
= 1;
1038 read_lock(&binfmt_lock
);
1040 if (retval
!= -ENOEXEC
|| bprm
->mm
== NULL
)
1043 read_unlock(&binfmt_lock
);
1047 read_unlock(&binfmt_lock
);
1048 if (retval
!= -ENOEXEC
|| bprm
->mm
== NULL
) {
1052 #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1053 if (printable(bprm
->buf
[0]) &&
1054 printable(bprm
->buf
[1]) &&
1055 printable(bprm
->buf
[2]) &&
1056 printable(bprm
->buf
[3]))
1057 break; /* -ENOEXEC */
1058 request_module("binfmt-%04x", *(unsigned short *)(&bprm
->buf
[2]));
1065 EXPORT_SYMBOL(search_binary_handler
);
1068 * sys_execve() executes a new program.
1070 int do_execve(char * filename
,
1071 char __user
*__user
*argv
,
1072 char __user
*__user
*envp
,
1073 struct pt_regs
* regs
)
1075 struct linux_binprm bprm
;
1079 sched_balance_exec();
1081 file
= open_exec(filename
);
1083 retval
= PTR_ERR(file
);
1087 bprm
.p
= PAGE_SIZE
*MAX_ARG_PAGES
-sizeof(void *);
1088 memset(bprm
.page
, 0, MAX_ARG_PAGES
*sizeof(bprm
.page
[0]));
1091 bprm
.filename
= filename
;
1092 bprm
.interp
= filename
;
1096 bprm
.security
= NULL
;
1097 bprm
.mm
= mm_alloc();
1102 retval
= init_new_context(current
, bprm
.mm
);
1106 bprm
.argc
= count(argv
, bprm
.p
/ sizeof(void *));
1107 if ((retval
= bprm
.argc
) < 0)
1110 bprm
.envc
= count(envp
, bprm
.p
/ sizeof(void *));
1111 if ((retval
= bprm
.envc
) < 0)
1114 retval
= security_bprm_alloc(&bprm
);
1118 retval
= prepare_binprm(&bprm
);
1122 retval
= copy_strings_kernel(1, &bprm
.filename
, &bprm
);
1127 retval
= copy_strings(bprm
.envc
, envp
, &bprm
);
1131 retval
= copy_strings(bprm
.argc
, argv
, &bprm
);
1135 retval
= search_binary_handler(&bprm
,regs
);
1137 free_arg_pages(&bprm
);
1139 /* execve success */
1140 security_bprm_free(&bprm
);
1145 /* Something went wrong, return the inode and free the argument pages*/
1146 free_arg_pages(&bprm
);
1149 security_bprm_free(&bprm
);
1157 allow_write_access(bprm
.file
);
1163 EXPORT_SYMBOL(do_execve
);
1165 int set_binfmt(struct linux_binfmt
*new)
1167 struct linux_binfmt
*old
= current
->binfmt
;
1170 if (!try_module_get(new->module
))
1173 current
->binfmt
= new;
1175 module_put(old
->module
);
1179 EXPORT_SYMBOL(set_binfmt
);
1181 #define CORENAME_MAX_SIZE 64
1183 /* format_corename will inspect the pattern parameter, and output a
1184 * name into corename, which must have space for at least
1185 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
1187 void format_corename(char *corename
, const char *pattern
, long signr
)
1189 const char *pat_ptr
= pattern
;
1190 char *out_ptr
= corename
;
1191 char *const out_end
= corename
+ CORENAME_MAX_SIZE
;
1193 int pid_in_pattern
= 0;
1195 /* Repeat as long as we have more pattern to process and more output
1198 if (*pat_ptr
!= '%') {
1199 if (out_ptr
== out_end
)
1201 *out_ptr
++ = *pat_ptr
++;
1203 switch (*++pat_ptr
) {
1206 /* Double percent, output one percent */
1208 if (out_ptr
== out_end
)
1215 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1216 "%d", current
->tgid
);
1217 if (rc
> out_end
- out_ptr
)
1223 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1224 "%d", current
->uid
);
1225 if (rc
> out_end
- out_ptr
)
1231 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1232 "%d", current
->gid
);
1233 if (rc
> out_end
- out_ptr
)
1237 /* signal that caused the coredump */
1239 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1241 if (rc
> out_end
- out_ptr
)
1245 /* UNIX time of coredump */
1248 do_gettimeofday(&tv
);
1249 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1251 if (rc
> out_end
- out_ptr
)
1258 down_read(&uts_sem
);
1259 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1260 "%s", system_utsname
.nodename
);
1262 if (rc
> out_end
- out_ptr
)
1268 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1269 "%s", current
->comm
);
1270 if (rc
> out_end
- out_ptr
)
1280 /* Backward compatibility with core_uses_pid:
1282 * If core_pattern does not include a %p (as is the default)
1283 * and core_uses_pid is set, then .%pid will be appended to
1286 && (core_uses_pid
|| atomic_read(¤t
->mm
->mm_users
) != 1)) {
1287 rc
= snprintf(out_ptr
, out_end
- out_ptr
,
1288 ".%d", current
->tgid
);
1289 if (rc
> out_end
- out_ptr
)
1297 static void zap_threads (struct mm_struct
*mm
)
1299 struct task_struct
*g
, *p
;
1300 struct task_struct
*tsk
= current
;
1301 struct completion
*vfork_done
= tsk
->vfork_done
;
1304 * Make sure nobody is waiting for us to release the VM,
1305 * otherwise we can deadlock when we wait on each other
1308 tsk
->vfork_done
= NULL
;
1309 complete(vfork_done
);
1312 read_lock(&tasklist_lock
);
1314 if (mm
== p
->mm
&& p
!= tsk
) {
1315 force_sig_specific(SIGKILL
, p
);
1318 while_each_thread(g
,p
);
1320 read_unlock(&tasklist_lock
);
1323 static void coredump_wait(struct mm_struct
*mm
)
1325 DECLARE_COMPLETION(startup_done
);
1327 mm
->core_waiters
++; /* let other threads block */
1328 mm
->core_startup_done
= &startup_done
;
1330 /* give other threads a chance to run: */
1334 if (--mm
->core_waiters
) {
1335 up_write(&mm
->mmap_sem
);
1336 wait_for_completion(&startup_done
);
1338 up_write(&mm
->mmap_sem
);
1339 BUG_ON(mm
->core_waiters
);
1342 int do_coredump(long signr
, int exit_code
, struct pt_regs
* regs
)
1344 char corename
[CORENAME_MAX_SIZE
+ 1];
1345 struct mm_struct
*mm
= current
->mm
;
1346 struct linux_binfmt
* binfmt
;
1347 struct inode
* inode
;
1352 binfmt
= current
->binfmt
;
1353 if (!binfmt
|| !binfmt
->core_dump
)
1355 down_write(&mm
->mmap_sem
);
1356 if (!mm
->dumpable
) {
1357 up_write(&mm
->mmap_sem
);
1361 init_completion(&mm
->core_done
);
1362 current
->signal
->group_exit
= 1;
1363 current
->signal
->group_exit_code
= exit_code
;
1366 if (current
->rlim
[RLIMIT_CORE
].rlim_cur
< binfmt
->min_coredump
)
1369 format_corename(corename
, core_pattern
, signr
);
1370 file
= filp_open(corename
, O_CREAT
| 2 | O_NOFOLLOW
, 0600);
1373 inode
= file
->f_dentry
->d_inode
;
1374 if (inode
->i_nlink
> 1)
1375 goto close_fail
; /* multiple links - don't dump */
1376 if (d_unhashed(file
->f_dentry
))
1379 if (!S_ISREG(inode
->i_mode
))
1383 if (!file
->f_op
->write
)
1385 if (do_truncate(file
->f_dentry
, 0) != 0)
1388 retval
= binfmt
->core_dump(signr
, regs
, file
);
1390 current
->signal
->group_exit_code
|= 0x80;
1392 filp_close(file
, NULL
);
1394 complete_all(&mm
->core_done
);