- Kai Germaschewski: ISDN update (including Makefiles)
[davej-history.git] / fs / exec.c
blob695280bc9c51b34f640517505591748ade68c52c
1 /*
2 * linux/fs/exec.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
7 /*
8 * #!-checking implemented by tytso.
9 */
11 * Demand-loading implemented 01.12.91 - no need to read anything but
12 * the header into memory. The inode of the executable is put into
13 * "current->executable", and page faults do the actual loading. Clean.
15 * Once more I can proudly say that linux stood up to being changed: it
16 * was less than 2 hours work to get demand-loading completely implemented.
18 * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead,
19 * current->executable is only used by the procfs. This allows a dispatch
20 * table to check for several different types of binary formats. We keep
21 * trying until we recognize the file or we run out of supported binary
22 * formats.
25 #include <linux/config.h>
26 #include <linux/slab.h>
27 #include <linux/file.h>
28 #include <linux/mman.h>
29 #include <linux/a.out.h>
30 #include <linux/stat.h>
31 #include <linux/fcntl.h>
32 #include <linux/smp_lock.h>
33 #include <linux/init.h>
34 #include <linux/pagemap.h>
35 #include <linux/highmem.h>
36 #include <linux/spinlock.h>
37 #define __NO_VERSION__
38 #include <linux/module.h>
40 #include <asm/uaccess.h>
41 #include <asm/pgalloc.h>
42 #include <asm/mmu_context.h>
44 #ifdef CONFIG_KMOD
45 #include <linux/kmod.h>
46 #endif
48 static struct linux_binfmt *formats;
49 static rwlock_t binfmt_lock = RW_LOCK_UNLOCKED;
51 int register_binfmt(struct linux_binfmt * fmt)
53 struct linux_binfmt ** tmp = &formats;
55 if (!fmt)
56 return -EINVAL;
57 if (fmt->next)
58 return -EBUSY;
59 write_lock(&binfmt_lock);
60 while (*tmp) {
61 if (fmt == *tmp) {
62 write_unlock(&binfmt_lock);
63 return -EBUSY;
65 tmp = &(*tmp)->next;
67 fmt->next = formats;
68 formats = fmt;
69 write_unlock(&binfmt_lock);
70 return 0;
73 int unregister_binfmt(struct linux_binfmt * fmt)
75 struct linux_binfmt ** tmp = &formats;
77 write_lock(&binfmt_lock);
78 while (*tmp) {
79 if (fmt == *tmp) {
80 *tmp = fmt->next;
81 write_unlock(&binfmt_lock);
82 return 0;
84 tmp = &(*tmp)->next;
86 write_unlock(&binfmt_lock);
87 return -EINVAL;
90 static inline void put_binfmt(struct linux_binfmt * fmt)
92 if (fmt->module)
93 __MOD_DEC_USE_COUNT(fmt->module);
97 * Note that a shared library must be both readable and executable due to
98 * security reasons.
100 * Also note that we take the address to load from from the file itself.
102 asmlinkage long sys_uselib(const char * library)
104 struct file * file;
105 struct nameidata nd;
106 int error;
108 error = user_path_walk(library, &nd);
109 if (error)
110 goto out;
112 error = -EINVAL;
113 if (!S_ISREG(nd.dentry->d_inode->i_mode))
114 goto exit;
116 error = permission(nd.dentry->d_inode, MAY_READ | MAY_EXEC);
117 if (error)
118 goto exit;
120 file = dentry_open(nd.dentry, nd.mnt, O_RDONLY);
121 error = PTR_ERR(file);
122 if (IS_ERR(file))
123 goto out;
125 error = -ENOEXEC;
126 if(file->f_op && file->f_op->read) {
127 struct linux_binfmt * fmt;
129 read_lock(&binfmt_lock);
130 for (fmt = formats ; fmt ; fmt = fmt->next) {
131 if (!fmt->load_shlib)
132 continue;
133 if (!try_inc_mod_count(fmt->module))
134 continue;
135 read_unlock(&binfmt_lock);
136 error = fmt->load_shlib(file);
137 read_lock(&binfmt_lock);
138 put_binfmt(fmt);
139 if (error != -ENOEXEC)
140 break;
142 read_unlock(&binfmt_lock);
144 fput(file);
145 out:
146 return error;
147 exit:
148 path_release(&nd);
149 goto out;
153 * count() counts the number of arguments/envelopes
155 static int count(char ** argv, int max)
157 int i = 0;
159 if (argv != NULL) {
160 for (;;) {
161 char * p;
162 int error;
164 error = get_user(p,argv);
165 if (error)
166 return error;
167 if (!p)
168 break;
169 argv++;
170 if(++i > max)
171 return -E2BIG;
174 return i;
178 * 'copy_strings()' copies argument/envelope strings from user
179 * memory to free pages in kernel mem. These are in a format ready
180 * to be put directly into the top of new user memory.
182 int copy_strings(int argc,char ** argv, struct linux_binprm *bprm)
184 while (argc-- > 0) {
185 char *str;
186 int len;
187 unsigned long pos;
189 if (get_user(str, argv+argc) || !str || !(len = strnlen_user(str, bprm->p)))
190 return -EFAULT;
191 if (bprm->p < len)
192 return -E2BIG;
194 bprm->p -= len;
195 /* XXX: add architecture specific overflow check here. */
197 pos = bprm->p;
198 while (len > 0) {
199 char *kaddr;
200 int i, new, err;
201 struct page *page;
202 int offset, bytes_to_copy;
204 offset = pos % PAGE_SIZE;
205 i = pos/PAGE_SIZE;
206 page = bprm->page[i];
207 new = 0;
208 if (!page) {
209 page = alloc_page(GFP_HIGHUSER);
210 bprm->page[i] = page;
211 if (!page)
212 return -ENOMEM;
213 new = 1;
215 kaddr = kmap(page);
217 if (new && offset)
218 memset(kaddr, 0, offset);
219 bytes_to_copy = PAGE_SIZE - offset;
220 if (bytes_to_copy > len) {
221 bytes_to_copy = len;
222 if (new)
223 memset(kaddr+offset+len, 0, PAGE_SIZE-offset-len);
225 err = copy_from_user(kaddr + offset, str, bytes_to_copy);
226 kunmap(page);
228 if (err)
229 return -EFAULT;
231 pos += bytes_to_copy;
232 str += bytes_to_copy;
233 len -= bytes_to_copy;
236 return 0;
240 * Like copy_strings, but get argv and its values from kernel memory.
242 int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
244 int r;
245 mm_segment_t oldfs = get_fs();
246 set_fs(KERNEL_DS);
247 r = copy_strings(argc, argv, bprm);
248 set_fs(oldfs);
249 return r;
253 * This routine is used to map in a page into an address space: needed by
254 * execve() for the initial stack and environment pages.
256 void put_dirty_page(struct task_struct * tsk, struct page *page, unsigned long address)
258 pgd_t * pgd;
259 pmd_t * pmd;
260 pte_t * pte;
262 if (page_count(page) != 1)
263 printk("mem_map disagrees with %p at %08lx\n", page, address);
264 pgd = pgd_offset(tsk->mm, address);
265 pmd = pmd_alloc(pgd, address);
266 if (!pmd) {
267 __free_page(page);
268 force_sig(SIGKILL, tsk);
269 return;
271 pte = pte_alloc(pmd, address);
272 if (!pte) {
273 __free_page(page);
274 force_sig(SIGKILL, tsk);
275 return;
277 if (!pte_none(*pte)) {
278 pte_ERROR(*pte);
279 __free_page(page);
280 return;
282 flush_dcache_page(page);
283 flush_page_to_ram(page);
284 set_pte(pte, pte_mkdirty(pte_mkwrite(mk_pte(page, PAGE_COPY))));
285 /* no need for flush_tlb */
288 int setup_arg_pages(struct linux_binprm *bprm)
290 unsigned long stack_base;
291 struct vm_area_struct *mpnt;
292 int i;
294 stack_base = STACK_TOP - MAX_ARG_PAGES*PAGE_SIZE;
296 bprm->p += stack_base;
297 if (bprm->loader)
298 bprm->loader += stack_base;
299 bprm->exec += stack_base;
301 mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
302 if (!mpnt)
303 return -ENOMEM;
305 down(&current->mm->mmap_sem);
307 mpnt->vm_mm = current->mm;
308 mpnt->vm_start = PAGE_MASK & (unsigned long) bprm->p;
309 mpnt->vm_end = STACK_TOP;
310 mpnt->vm_page_prot = PAGE_COPY;
311 mpnt->vm_flags = VM_STACK_FLAGS;
312 mpnt->vm_ops = NULL;
313 mpnt->vm_pgoff = 0;
314 mpnt->vm_file = NULL;
315 mpnt->vm_private_data = (void *) 0;
316 spin_lock(&current->mm->page_table_lock);
317 insert_vm_struct(current->mm, mpnt);
318 spin_unlock(&current->mm->page_table_lock);
319 current->mm->total_vm = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
322 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
323 struct page *page = bprm->page[i];
324 if (page) {
325 bprm->page[i] = NULL;
326 current->mm->rss++;
327 put_dirty_page(current,page,stack_base);
329 stack_base += PAGE_SIZE;
331 up(&current->mm->mmap_sem);
333 return 0;
336 struct file *open_exec(const char *name)
338 struct nameidata nd;
339 struct inode *inode;
340 struct file *file;
341 int err = 0;
343 if (path_init(name, LOOKUP_FOLLOW|LOOKUP_POSITIVE, &nd))
344 err = path_walk(name, &nd);
345 file = ERR_PTR(err);
346 if (!err) {
347 inode = nd.dentry->d_inode;
348 file = ERR_PTR(-EACCES);
349 if (!IS_NOEXEC(inode) && S_ISREG(inode->i_mode)) {
350 int err = permission(inode, MAY_EXEC);
351 file = ERR_PTR(err);
352 if (!err) {
353 file = dentry_open(nd.dentry, nd.mnt, O_RDONLY);
354 if (!IS_ERR(file)) {
355 err = deny_write_access(file);
356 if (err) {
357 fput(file);
358 file = ERR_PTR(err);
361 out:
362 return file;
365 path_release(&nd);
367 goto out;
370 int kernel_read(struct file *file, unsigned long offset,
371 char * addr, unsigned long count)
373 mm_segment_t old_fs;
374 loff_t pos = offset;
375 int result = -ENOSYS;
377 if (!file->f_op->read)
378 goto fail;
379 old_fs = get_fs();
380 set_fs(get_ds());
381 result = file->f_op->read(file, addr, count, &pos);
382 set_fs(old_fs);
383 fail:
384 return result;
387 static int exec_mmap(void)
389 struct mm_struct * mm, * old_mm;
391 old_mm = current->mm;
392 if (old_mm && atomic_read(&old_mm->mm_users) == 1) {
393 flush_cache_mm(old_mm);
394 mm_release();
395 exit_mmap(old_mm);
396 flush_tlb_mm(old_mm);
397 return 0;
400 mm = mm_alloc();
401 if (mm) {
402 struct mm_struct *active_mm = current->active_mm;
404 if (init_new_context(current, mm)) {
405 mmdrop(mm);
406 return -ENOMEM;
408 task_lock(current);
409 current->mm = mm;
410 current->active_mm = mm;
411 task_unlock(current);
412 activate_mm(active_mm, mm);
413 mm_release();
414 if (old_mm) {
415 if (active_mm != old_mm) BUG();
416 mmput(old_mm);
417 return 0;
419 mmdrop(active_mm);
420 return 0;
422 return -ENOMEM;
426 * This function makes sure the current process has its own signal table,
427 * so that flush_signal_handlers can later reset the handlers without
428 * disturbing other processes. (Other processes might share the signal
429 * table via the CLONE_SIGNAL option to clone().)
432 static inline int make_private_signals(void)
434 struct signal_struct * newsig;
436 if (atomic_read(&current->sig->count) <= 1)
437 return 0;
438 newsig = kmem_cache_alloc(sigact_cachep, GFP_KERNEL);
439 if (newsig == NULL)
440 return -ENOMEM;
441 spin_lock_init(&newsig->siglock);
442 atomic_set(&newsig->count, 1);
443 memcpy(newsig->action, current->sig->action, sizeof(newsig->action));
444 spin_lock_irq(&current->sigmask_lock);
445 current->sig = newsig;
446 spin_unlock_irq(&current->sigmask_lock);
447 return 0;
451 * If make_private_signals() made a copy of the signal table, decrement the
452 * refcount of the original table, and free it if necessary.
453 * We don't do that in make_private_signals() so that we can back off
454 * in flush_old_exec() if an error occurs after calling make_private_signals().
457 static inline void release_old_signals(struct signal_struct * oldsig)
459 if (current->sig == oldsig)
460 return;
461 if (atomic_dec_and_test(&oldsig->count))
462 kmem_cache_free(sigact_cachep, oldsig);
466 * These functions flushes out all traces of the currently running executable
467 * so that a new one can be started
470 static inline void flush_old_files(struct files_struct * files)
472 long j = -1;
474 write_lock(&files->file_lock);
475 for (;;) {
476 unsigned long set, i;
478 j++;
479 i = j * __NFDBITS;
480 if (i >= files->max_fds || i >= files->max_fdset)
481 break;
482 set = files->close_on_exec->fds_bits[j];
483 if (!set)
484 continue;
485 files->close_on_exec->fds_bits[j] = 0;
486 write_unlock(&files->file_lock);
487 for ( ; set ; i++,set >>= 1) {
488 if (set & 1) {
489 sys_close(i);
492 write_lock(&files->file_lock);
495 write_unlock(&files->file_lock);
499 * An execve() will automatically "de-thread" the process.
500 * Note: we don't have to hold the tasklist_lock to test
501 * whether we migth need to do this. If we're not part of
502 * a thread group, there is no way we can become one
503 * dynamically. And if we are, we only need to protect the
504 * unlink - even if we race with the last other thread exit,
505 * at worst the list_del_init() might end up being a no-op.
507 static inline void de_thread(struct task_struct *tsk)
509 if (!list_empty(&tsk->thread_group)) {
510 write_lock_irq(&tasklist_lock);
511 list_del_init(&tsk->thread_group);
512 write_unlock_irq(&tasklist_lock);
515 /* Minor oddity: this might stay the same. */
516 tsk->tgid = tsk->pid;
519 int flush_old_exec(struct linux_binprm * bprm)
521 char * name;
522 int i, ch, retval;
523 struct signal_struct * oldsig;
526 * Make sure we have a private signal table
528 oldsig = current->sig;
529 retval = make_private_signals();
530 if (retval) goto flush_failed;
533 * Release all of the old mmap stuff
535 retval = exec_mmap();
536 if (retval) goto mmap_failed;
538 /* This is the point of no return */
539 release_old_signals(oldsig);
541 current->sas_ss_sp = current->sas_ss_size = 0;
543 if (current->euid == current->uid && current->egid == current->gid)
544 current->dumpable = 1;
545 name = bprm->filename;
546 for (i=0; (ch = *(name++)) != '\0';) {
547 if (ch == '/')
548 i = 0;
549 else
550 if (i < 15)
551 current->comm[i++] = ch;
553 current->comm[i] = '\0';
555 flush_thread();
557 de_thread(current);
559 if (bprm->e_uid != current->euid || bprm->e_gid != current->egid ||
560 permission(bprm->file->f_dentry->d_inode,MAY_READ))
561 current->dumpable = 0;
563 /* An exec changes our domain. We are no longer part of the thread
564 group */
566 current->self_exec_id++;
568 flush_signal_handlers(current);
569 flush_old_files(current->files);
571 return 0;
573 mmap_failed:
574 flush_failed:
575 spin_lock_irq(&current->sigmask_lock);
576 if (current->sig != oldsig)
577 kfree(current->sig);
578 current->sig = oldsig;
579 spin_unlock_irq(&current->sigmask_lock);
580 return retval;
584 * We mustn't allow tracing of suid binaries, unless
585 * the tracer has the capability to trace anything..
587 static inline int must_not_trace_exec(struct task_struct * p)
589 return (p->ptrace & PT_PTRACED) && !cap_raised(p->p_pptr->cap_effective, CAP_SYS_PTRACE);
593 * Fill the binprm structure from the inode.
594 * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
596 int prepare_binprm(struct linux_binprm *bprm)
598 int mode;
599 int id_change,cap_raised;
600 struct inode * inode = bprm->file->f_dentry->d_inode;
602 mode = inode->i_mode;
603 /* Huh? We had already checked for MAY_EXEC, WTF do we check this? */
604 if (!(mode & 0111)) /* with at least _one_ execute bit set */
605 return -EACCES;
606 if (bprm->file->f_op == NULL)
607 return -EACCES;
609 bprm->e_uid = current->euid;
610 bprm->e_gid = current->egid;
611 id_change = cap_raised = 0;
613 /* Set-uid? */
614 if (mode & S_ISUID) {
615 bprm->e_uid = inode->i_uid;
616 if (bprm->e_uid != current->euid)
617 id_change = 1;
620 /* Set-gid? */
622 * If setgid is set but no group execute bit then this
623 * is a candidate for mandatory locking, not a setgid
624 * executable.
626 if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
627 bprm->e_gid = inode->i_gid;
628 if (!in_group_p(bprm->e_gid))
629 id_change = 1;
632 /* We don't have VFS support for capabilities yet */
633 cap_clear(bprm->cap_inheritable);
634 cap_clear(bprm->cap_permitted);
635 cap_clear(bprm->cap_effective);
637 /* To support inheritance of root-permissions and suid-root
638 * executables under compatibility mode, we raise all three
639 * capability sets for the file.
641 * If only the real uid is 0, we only raise the inheritable
642 * and permitted sets of the executable file.
645 if (!issecure(SECURE_NOROOT)) {
646 if (bprm->e_uid == 0 || current->uid == 0) {
647 cap_set_full(bprm->cap_inheritable);
648 cap_set_full(bprm->cap_permitted);
650 if (bprm->e_uid == 0)
651 cap_set_full(bprm->cap_effective);
654 /* Only if pP' is _not_ a subset of pP, do we consider there
655 * has been a capability related "change of capability". In
656 * such cases, we need to check that the elevation of
657 * privilege does not go against other system constraints.
658 * The new Permitted set is defined below -- see (***). */
660 kernel_cap_t permitted, working;
662 permitted = cap_intersect(bprm->cap_permitted, cap_bset);
663 working = cap_intersect(bprm->cap_inheritable,
664 current->cap_inheritable);
665 working = cap_combine(permitted, working);
666 if (!cap_issubset(working, current->cap_permitted)) {
667 cap_raised = 1;
671 if (id_change || cap_raised) {
672 /* We can't suid-execute if we're sharing parts of the executable */
673 /* or if we're being traced (or if suid execs are not allowed) */
674 /* (current->mm->mm_users > 1 is ok, as we'll get a new mm anyway) */
675 if (IS_NOSUID(inode)
676 || must_not_trace_exec(current)
677 || (atomic_read(&current->fs->count) > 1)
678 || (atomic_read(&current->sig->count) > 1)
679 || (atomic_read(&current->files->count) > 1)) {
680 if (id_change && !capable(CAP_SETUID))
681 return -EPERM;
682 if (cap_raised && !capable(CAP_SETPCAP))
683 return -EPERM;
687 memset(bprm->buf,0,BINPRM_BUF_SIZE);
688 return kernel_read(bprm->file,0,bprm->buf,BINPRM_BUF_SIZE);
692 * This function is used to produce the new IDs and capabilities
693 * from the old ones and the file's capabilities.
695 * The formula used for evolving capabilities is:
697 * pI' = pI
698 * (***) pP' = (fP & X) | (fI & pI)
699 * pE' = pP' & fE [NB. fE is 0 or ~0]
701 * I=Inheritable, P=Permitted, E=Effective // p=process, f=file
702 * ' indicates post-exec(), and X is the global 'cap_bset'.
705 void compute_creds(struct linux_binprm *bprm)
707 kernel_cap_t new_permitted, working;
709 new_permitted = cap_intersect(bprm->cap_permitted, cap_bset);
710 working = cap_intersect(bprm->cap_inheritable,
711 current->cap_inheritable);
712 new_permitted = cap_combine(new_permitted, working);
714 /* For init, we want to retain the capabilities set
715 * in the init_task struct. Thus we skip the usual
716 * capability rules */
717 if (current->pid != 1) {
718 current->cap_permitted = new_permitted;
719 current->cap_effective =
720 cap_intersect(new_permitted, bprm->cap_effective);
723 /* AUD: Audit candidate if current->cap_effective is set */
725 current->suid = current->euid = current->fsuid = bprm->e_uid;
726 current->sgid = current->egid = current->fsgid = bprm->e_gid;
727 if (current->euid != current->uid || current->egid != current->gid ||
728 !cap_issubset(new_permitted, current->cap_permitted))
729 current->dumpable = 0;
731 current->keep_capabilities = 0;
735 void remove_arg_zero(struct linux_binprm *bprm)
737 if (bprm->argc) {
738 unsigned long offset;
739 char * kaddr;
740 struct page *page;
742 offset = bprm->p % PAGE_SIZE;
743 goto inside;
745 while (bprm->p++, *(kaddr+offset++)) {
746 if (offset != PAGE_SIZE)
747 continue;
748 offset = 0;
749 kunmap(page);
750 inside:
751 page = bprm->page[bprm->p/PAGE_SIZE];
752 kaddr = kmap(page);
754 kunmap(page);
755 bprm->argc--;
760 * cycle the list of binary formats handler, until one recognizes the image
762 int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
764 int try,retval=0;
765 struct linux_binfmt *fmt;
766 #ifdef __alpha__
767 /* handle /sbin/loader.. */
769 struct exec * eh = (struct exec *) bprm->buf;
770 struct linux_binprm bprm_loader;
772 if (!bprm->loader && eh->fh.f_magic == 0x183 &&
773 (eh->fh.f_flags & 0x3000) == 0x3000)
775 int i;
776 char * dynloader[] = { "/sbin/loader" };
777 struct file * file;
779 allow_write_access(bprm->file);
780 fput(bprm->file);
781 bprm->file = NULL;
783 bprm_loader.p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
784 for (i = 0 ; i < MAX_ARG_PAGES ; i++) /* clear page-table */
785 bprm_loader.page[i] = NULL;
787 file = open_exec(dynloader[0]);
788 retval = PTR_ERR(file);
789 if (IS_ERR(file))
790 return retval;
791 bprm->file = file;
792 bprm->loader = bprm_loader.p;
793 retval = prepare_binprm(bprm);
794 if (retval<0)
795 return retval;
796 /* should call search_binary_handler recursively here,
797 but it does not matter */
800 #endif
801 for (try=0; try<2; try++) {
802 read_lock(&binfmt_lock);
803 for (fmt = formats ; fmt ; fmt = fmt->next) {
804 int (*fn)(struct linux_binprm *, struct pt_regs *) = fmt->load_binary;
805 if (!fn)
806 continue;
807 if (!try_inc_mod_count(fmt->module))
808 continue;
809 read_unlock(&binfmt_lock);
810 retval = fn(bprm, regs);
811 if (retval >= 0) {
812 put_binfmt(fmt);
813 allow_write_access(bprm->file);
814 if (bprm->file)
815 fput(bprm->file);
816 bprm->file = NULL;
817 current->did_exec = 1;
818 return retval;
820 read_lock(&binfmt_lock);
821 put_binfmt(fmt);
822 if (retval != -ENOEXEC)
823 break;
824 if (!bprm->file) {
825 read_unlock(&binfmt_lock);
826 return retval;
829 read_unlock(&binfmt_lock);
830 if (retval != -ENOEXEC) {
831 break;
832 #ifdef CONFIG_KMOD
833 }else{
834 #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
835 char modname[20];
836 if (printable(bprm->buf[0]) &&
837 printable(bprm->buf[1]) &&
838 printable(bprm->buf[2]) &&
839 printable(bprm->buf[3]))
840 break; /* -ENOEXEC */
841 sprintf(modname, "binfmt-%04x", *(unsigned short *)(&bprm->buf[2]));
842 request_module(modname);
843 #endif
846 return retval;
851 * sys_execve() executes a new program.
853 int do_execve(char * filename, char ** argv, char ** envp, struct pt_regs * regs)
855 struct linux_binprm bprm;
856 struct file *file;
857 int retval;
858 int i;
860 file = open_exec(filename);
862 retval = PTR_ERR(file);
863 if (IS_ERR(file))
864 return retval;
866 bprm.p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
867 memset(bprm.page, 0, MAX_ARG_PAGES*sizeof(bprm.page[0]));
869 bprm.file = file;
870 bprm.filename = filename;
871 bprm.sh_bang = 0;
872 bprm.loader = 0;
873 bprm.exec = 0;
874 if ((bprm.argc = count(argv, bprm.p / sizeof(void *))) < 0) {
875 allow_write_access(file);
876 fput(file);
877 return bprm.argc;
880 if ((bprm.envc = count(envp, bprm.p / sizeof(void *))) < 0) {
881 allow_write_access(file);
882 fput(file);
883 return bprm.envc;
886 retval = prepare_binprm(&bprm);
887 if (retval < 0)
888 goto out;
890 retval = copy_strings_kernel(1, &bprm.filename, &bprm);
891 if (retval < 0)
892 goto out;
894 bprm.exec = bprm.p;
895 retval = copy_strings(bprm.envc, envp, &bprm);
896 if (retval < 0)
897 goto out;
899 retval = copy_strings(bprm.argc, argv, &bprm);
900 if (retval < 0)
901 goto out;
903 retval = search_binary_handler(&bprm,regs);
904 if (retval >= 0)
905 /* execve success */
906 return retval;
908 out:
909 /* Something went wrong, return the inode and free the argument pages*/
910 allow_write_access(bprm.file);
911 if (bprm.file)
912 fput(bprm.file);
914 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
915 struct page * page = bprm.page[i];
916 if (page)
917 __free_page(page);
920 return retval;
923 void set_binfmt(struct linux_binfmt *new)
925 struct linux_binfmt *old = current->binfmt;
926 if (new && new->module)
927 __MOD_INC_USE_COUNT(new->module);
928 current->binfmt = new;
929 if (old && old->module)
930 __MOD_DEC_USE_COUNT(old->module);
933 int do_coredump(long signr, struct pt_regs * regs)
935 struct linux_binfmt * binfmt;
936 char corename[6+sizeof(current->comm)];
937 struct file * file;
938 struct inode * inode;
940 lock_kernel();
941 binfmt = current->binfmt;
942 if (!binfmt || !binfmt->core_dump)
943 goto fail;
944 if (!current->dumpable || atomic_read(&current->mm->mm_users) != 1)
945 goto fail;
946 current->dumpable = 0;
947 if (current->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump)
948 goto fail;
950 memcpy(corename,"core.", 5);
951 #if 0
952 memcpy(corename+5,current->comm,sizeof(current->comm));
953 #else
954 corename[4] = '\0';
955 #endif
956 file = filp_open(corename, O_CREAT | 2 | O_TRUNC | O_NOFOLLOW, 0600);
957 if (IS_ERR(file))
958 goto fail;
959 inode = file->f_dentry->d_inode;
960 if (inode->i_nlink > 1)
961 goto close_fail; /* multiple links - don't dump */
963 if (!S_ISREG(inode->i_mode))
964 goto close_fail;
965 if (!file->f_op)
966 goto close_fail;
967 if (!file->f_op->write)
968 goto close_fail;
969 if (!binfmt->core_dump(signr, regs, file))
970 goto close_fail;
971 unlock_kernel();
972 filp_close(file, NULL);
973 return 1;
975 close_fail:
976 filp_close(file, NULL);
977 fail:
978 unlock_kernel();
979 return 0;