4 * Copyright (C) 1991, 1992 Linus Torvalds
8 * 'fork.c' contains the help-routines for the 'fork' system call
9 * (see also entry.S and others).
10 * Fork is rather simple, once you get the hang of it, but the memory
11 * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
14 #include <linux/config.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/unistd.h>
18 #include <linux/smp_lock.h>
19 #include <linux/module.h>
20 #include <linux/vmalloc.h>
21 #include <linux/completion.h>
22 #include <linux/namespace.h>
23 #include <linux/personality.h>
24 #include <linux/file.h>
25 #include <linux/binfmts.h>
26 #include <linux/mman.h>
28 #include <linux/security.h>
29 #include <linux/jiffies.h>
30 #include <linux/futex.h>
31 #include <linux/ptrace.h>
32 #include <linux/mount.h>
34 #include <asm/pgtable.h>
35 #include <asm/pgalloc.h>
36 #include <asm/uaccess.h>
37 #include <asm/mmu_context.h>
38 #include <asm/cacheflush.h>
39 #include <asm/tlbflush.h>
41 extern int copy_semundo(unsigned long clone_flags
, struct task_struct
*tsk
);
42 extern void exit_sem(struct task_struct
*tsk
);
44 /* The idle threads do not count..
45 * Protected by write_lock_irq(&tasklist_lock)
50 unsigned long total_forks
; /* Handle normal Linux uptimes. */
52 DEFINE_PER_CPU(unsigned long, process_counts
) = 0;
54 rwlock_t tasklist_lock __cacheline_aligned
= RW_LOCK_UNLOCKED
; /* outer */
57 * A per-CPU task cache - this relies on the fact that
58 * the very last portion of sys_exit() is executed with
59 * preemption turned off.
61 static task_t
*task_cache
[NR_CPUS
] __cacheline_aligned
;
63 int nr_processes(void)
68 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++) {
70 total
+= per_cpu(process_counts
, cpu
);
75 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
76 # define alloc_task_struct() kmem_cache_alloc(task_struct_cachep, GFP_KERNEL)
77 # define free_task_struct(tsk) kmem_cache_free(task_struct_cachep, (tsk))
78 static kmem_cache_t
*task_struct_cachep
;
81 static void free_task(struct task_struct
*tsk
)
84 * The task cache is effectively disabled right now.
85 * Do we want it? The slab cache already has per-cpu
86 * stuff, but the thread info (usually a order-1 page
87 * allocation) doesn't.
90 free_thread_info(tsk
->thread_info
);
91 free_task_struct(tsk
);
95 tsk
= task_cache
[cpu
];
97 free_thread_info(tsk
->thread_info
);
98 free_task_struct(tsk
);
100 task_cache
[cpu
] = current
;
105 void __put_task_struct(struct task_struct
*tsk
)
107 WARN_ON(!(tsk
->state
& (TASK_DEAD
| TASK_ZOMBIE
)));
108 WARN_ON(atomic_read(&tsk
->usage
));
109 WARN_ON(tsk
== current
);
111 security_task_free(tsk
);
116 void add_wait_queue(wait_queue_head_t
*q
, wait_queue_t
* wait
)
120 wait
->flags
&= ~WQ_FLAG_EXCLUSIVE
;
121 spin_lock_irqsave(&q
->lock
, flags
);
122 __add_wait_queue(q
, wait
);
123 spin_unlock_irqrestore(&q
->lock
, flags
);
126 void add_wait_queue_exclusive(wait_queue_head_t
*q
, wait_queue_t
* wait
)
130 wait
->flags
|= WQ_FLAG_EXCLUSIVE
;
131 spin_lock_irqsave(&q
->lock
, flags
);
132 __add_wait_queue_tail(q
, wait
);
133 spin_unlock_irqrestore(&q
->lock
, flags
);
136 void remove_wait_queue(wait_queue_head_t
*q
, wait_queue_t
* wait
)
140 spin_lock_irqsave(&q
->lock
, flags
);
141 __remove_wait_queue(q
, wait
);
142 spin_unlock_irqrestore(&q
->lock
, flags
);
145 void prepare_to_wait(wait_queue_head_t
*q
, wait_queue_t
*wait
, int state
)
149 __set_current_state(state
);
150 wait
->flags
&= ~WQ_FLAG_EXCLUSIVE
;
151 spin_lock_irqsave(&q
->lock
, flags
);
152 if (list_empty(&wait
->task_list
))
153 __add_wait_queue(q
, wait
);
154 spin_unlock_irqrestore(&q
->lock
, flags
);
158 prepare_to_wait_exclusive(wait_queue_head_t
*q
, wait_queue_t
*wait
, int state
)
162 __set_current_state(state
);
163 wait
->flags
|= WQ_FLAG_EXCLUSIVE
;
164 spin_lock_irqsave(&q
->lock
, flags
);
165 if (list_empty(&wait
->task_list
))
166 __add_wait_queue_tail(q
, wait
);
167 spin_unlock_irqrestore(&q
->lock
, flags
);
170 void finish_wait(wait_queue_head_t
*q
, wait_queue_t
*wait
)
174 __set_current_state(TASK_RUNNING
);
175 if (!list_empty(&wait
->task_list
)) {
176 spin_lock_irqsave(&q
->lock
, flags
);
177 list_del_init(&wait
->task_list
);
178 spin_unlock_irqrestore(&q
->lock
, flags
);
182 int autoremove_wake_function(wait_queue_t
*wait
, unsigned mode
, int sync
)
184 int ret
= default_wake_function(wait
, mode
, sync
);
187 list_del_init(&wait
->task_list
);
191 void __init
fork_init(unsigned long mempages
)
193 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
194 /* create a slab on which task_structs can be allocated */
196 kmem_cache_create("task_struct",
197 sizeof(struct task_struct
),0,
198 SLAB_MUST_HWCACHE_ALIGN
, NULL
, NULL
);
199 if (!task_struct_cachep
)
200 panic("fork_init(): cannot create task_struct SLAB cache");
204 * The default maximum number of threads is set to a safe
205 * value: the thread structures can take up at most half
208 max_threads
= mempages
/ (THREAD_SIZE
/PAGE_SIZE
) / 8;
210 * we need to allow at least 20 threads to boot a system
215 init_task
.rlim
[RLIMIT_NPROC
].rlim_cur
= max_threads
/2;
216 init_task
.rlim
[RLIMIT_NPROC
].rlim_max
= max_threads
/2;
219 static struct task_struct
*dup_task_struct(struct task_struct
*orig
)
221 struct task_struct
*tsk
;
222 struct thread_info
*ti
;
225 prepare_to_copy(orig
);
227 tsk
= task_cache
[cpu
];
228 task_cache
[cpu
] = NULL
;
231 tsk
= alloc_task_struct();
235 ti
= alloc_thread_info(tsk
);
237 free_task_struct(tsk
);
241 ti
= tsk
->thread_info
;
243 *ti
= *orig
->thread_info
;
245 tsk
->thread_info
= ti
;
248 /* One for us, one for whoever does the "release_task()" (usually parent) */
249 atomic_set(&tsk
->usage
,2);
254 static inline int dup_mmap(struct mm_struct
* mm
, struct mm_struct
* oldmm
)
256 struct vm_area_struct
* mpnt
, *tmp
, **pprev
;
258 unsigned long charge
= 0;
260 down_write(&oldmm
->mmap_sem
);
261 flush_cache_mm(current
->mm
);
264 mm
->mmap_cache
= NULL
;
265 mm
->free_area_cache
= TASK_UNMAPPED_BASE
;
272 * Add it to the mmlist after the parent.
273 * Doing it this way means that we can order the list,
274 * and fork() won't mess up the ordering significantly.
275 * Add it first so that swapoff can see any swap entries.
277 spin_lock(&mmlist_lock
);
278 list_add(&mm
->mmlist
, ¤t
->mm
->mmlist
);
280 spin_unlock(&mmlist_lock
);
282 for (mpnt
= current
->mm
->mmap
; mpnt
; mpnt
= mpnt
->vm_next
) {
285 if(mpnt
->vm_flags
& VM_DONTCOPY
)
287 if (mpnt
->vm_flags
& VM_ACCOUNT
) {
288 unsigned int len
= (mpnt
->vm_end
- mpnt
->vm_start
) >> PAGE_SHIFT
;
289 if (!vm_enough_memory(len
))
293 tmp
= kmem_cache_alloc(vm_area_cachep
, SLAB_KERNEL
);
297 tmp
->vm_flags
&= ~VM_LOCKED
;
301 INIT_LIST_HEAD(&tmp
->shared
);
303 struct inode
*inode
= file
->f_dentry
->d_inode
;
305 if (tmp
->vm_flags
& VM_DENYWRITE
)
306 atomic_dec(&inode
->i_writecount
);
308 /* insert tmp into the share list, just after mpnt */
309 down(&inode
->i_mapping
->i_shared_sem
);
310 list_add_tail(&tmp
->shared
, &mpnt
->shared
);
311 up(&inode
->i_mapping
->i_shared_sem
);
315 * Link in the new vma and copy the page table entries:
316 * link in first so that swapoff can see swap entries.
318 spin_lock(&mm
->page_table_lock
);
320 pprev
= &tmp
->vm_next
;
322 retval
= copy_page_range(mm
, current
->mm
, tmp
);
323 spin_unlock(&mm
->page_table_lock
);
325 if (tmp
->vm_ops
&& tmp
->vm_ops
->open
)
326 tmp
->vm_ops
->open(tmp
);
335 flush_tlb_mm(current
->mm
);
336 up_write(&oldmm
->mmap_sem
);
341 vm_unacct_memory(charge
);
344 static inline int mm_alloc_pgd(struct mm_struct
* mm
)
346 mm
->pgd
= pgd_alloc(mm
);
347 if (unlikely(!mm
->pgd
))
352 static inline void mm_free_pgd(struct mm_struct
* mm
)
357 #define dup_mmap(mm, oldmm) (0)
358 #define mm_alloc_pgd(mm) (0)
359 #define mm_free_pgd(mm)
360 #endif /* CONFIG_MMU */
362 spinlock_t mmlist_lock __cacheline_aligned_in_smp
= SPIN_LOCK_UNLOCKED
;
365 #define allocate_mm() (kmem_cache_alloc(mm_cachep, SLAB_KERNEL))
366 #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
368 #include <linux/init_task.h>
370 static struct mm_struct
* mm_init(struct mm_struct
* mm
)
372 atomic_set(&mm
->mm_users
, 1);
373 atomic_set(&mm
->mm_count
, 1);
374 init_rwsem(&mm
->mmap_sem
);
375 mm
->core_waiters
= 0;
376 mm
->page_table_lock
= SPIN_LOCK_UNLOCKED
;
377 mm
->ioctx_list_lock
= RW_LOCK_UNLOCKED
;
378 mm
->default_kioctx
= (struct kioctx
)INIT_KIOCTX(mm
->default_kioctx
, *mm
);
379 mm
->free_area_cache
= TASK_UNMAPPED_BASE
;
381 if (likely(!mm_alloc_pgd(mm
))) {
390 * Allocate and initialize an mm_struct.
392 struct mm_struct
* mm_alloc(void)
394 struct mm_struct
* mm
;
398 memset(mm
, 0, sizeof(*mm
));
405 * Called when the last reference to the mm
406 * is dropped: either by a lazy thread or by
407 * mmput. Free the page directory and the mm.
409 inline void __mmdrop(struct mm_struct
*mm
)
411 BUG_ON(mm
== &init_mm
);
418 * Decrement the use count and release all resources for an mm.
420 void mmput(struct mm_struct
*mm
)
422 if (atomic_dec_and_lock(&mm
->mm_users
, &mmlist_lock
)) {
423 list_del(&mm
->mmlist
);
425 spin_unlock(&mmlist_lock
);
432 /* Please note the differences between mmput and mm_release.
433 * mmput is called whenever we stop holding onto a mm_struct,
434 * error success whatever.
436 * mm_release is called after a mm_struct has been removed
437 * from the current process.
439 * This difference is important for error handling, when we
440 * only half set up a mm_struct for a new process and need to restore
441 * the old one. Because we mmput the new mm_struct before
442 * restoring the old one. . .
443 * Eric Biederman 10 January 1998
445 void mm_release(struct task_struct
*tsk
, struct mm_struct
*mm
)
447 struct completion
*vfork_done
= tsk
->vfork_done
;
449 /* Get rid of any cached register state */
450 deactivate_mm(tsk
, mm
);
452 /* notify parent sleeping on vfork() */
454 tsk
->vfork_done
= NULL
;
455 complete(vfork_done
);
457 if (tsk
->clear_child_tid
&& atomic_read(&mm
->mm_users
) > 1) {
458 u32 __user
* tidptr
= tsk
->clear_child_tid
;
459 tsk
->clear_child_tid
= NULL
;
462 * We don't check the error code - if userspace has
463 * not set up a proper pointer then tough luck.
466 sys_futex(tidptr
, FUTEX_WAKE
, 1, NULL
, NULL
);
470 static int copy_mm(unsigned long clone_flags
, struct task_struct
* tsk
)
472 struct mm_struct
* mm
, *oldmm
;
475 tsk
->min_flt
= tsk
->maj_flt
= 0;
476 tsk
->cmin_flt
= tsk
->cmaj_flt
= 0;
477 tsk
->nswap
= tsk
->cnswap
= 0;
480 tsk
->active_mm
= NULL
;
483 * Are we cloning a kernel thread?
485 * We need to steal a active VM for that..
491 if (clone_flags
& CLONE_VM
) {
492 atomic_inc(&oldmm
->mm_users
);
495 * There are cases where the PTL is held to ensure no
496 * new threads start up in user mode using an mm, which
497 * allows optimizing out ipis; the tlb_gather_mmu code
500 spin_unlock_wait(&oldmm
->page_table_lock
);
509 /* Copy the current MM stuff.. */
510 memcpy(mm
, oldmm
, sizeof(*mm
));
514 if (init_new_context(tsk
,mm
))
517 retval
= dup_mmap(mm
, oldmm
);
532 static inline struct fs_struct
*__copy_fs_struct(struct fs_struct
*old
)
534 struct fs_struct
*fs
= kmem_cache_alloc(fs_cachep
, GFP_KERNEL
);
535 /* We don't need to lock fs - think why ;-) */
537 atomic_set(&fs
->count
, 1);
538 fs
->lock
= RW_LOCK_UNLOCKED
;
539 fs
->umask
= old
->umask
;
540 read_lock(&old
->lock
);
541 fs
->rootmnt
= mntget(old
->rootmnt
);
542 fs
->root
= dget(old
->root
);
543 fs
->pwdmnt
= mntget(old
->pwdmnt
);
544 fs
->pwd
= dget(old
->pwd
);
546 fs
->altrootmnt
= mntget(old
->altrootmnt
);
547 fs
->altroot
= dget(old
->altroot
);
549 fs
->altrootmnt
= NULL
;
552 read_unlock(&old
->lock
);
557 struct fs_struct
*copy_fs_struct(struct fs_struct
*old
)
559 return __copy_fs_struct(old
);
562 static inline int copy_fs(unsigned long clone_flags
, struct task_struct
* tsk
)
564 if (clone_flags
& CLONE_FS
) {
565 atomic_inc(¤t
->fs
->count
);
568 tsk
->fs
= __copy_fs_struct(current
->fs
);
574 static int count_open_files(struct files_struct
*files
, int size
)
578 /* Find the last open fd */
579 for (i
= size
/(8*sizeof(long)); i
> 0; ) {
580 if (files
->open_fds
->fds_bits
[--i
])
583 i
= (i
+1) * 8 * sizeof(long);
587 static int copy_files(unsigned long clone_flags
, struct task_struct
* tsk
)
589 struct files_struct
*oldf
, *newf
;
590 struct file
**old_fds
, **new_fds
;
591 int open_files
, nfds
, size
, i
, error
= 0;
594 * A background process may not have any files ...
596 oldf
= current
->files
;
600 if (clone_flags
& CLONE_FILES
) {
601 atomic_inc(&oldf
->count
);
607 newf
= kmem_cache_alloc(files_cachep
, SLAB_KERNEL
);
611 atomic_set(&newf
->count
, 1);
613 newf
->file_lock
= SPIN_LOCK_UNLOCKED
;
615 newf
->max_fds
= NR_OPEN_DEFAULT
;
616 newf
->max_fdset
= __FD_SETSIZE
;
617 newf
->close_on_exec
= &newf
->close_on_exec_init
;
618 newf
->open_fds
= &newf
->open_fds_init
;
619 newf
->fd
= &newf
->fd_array
[0];
621 /* We don't yet have the oldf readlock, but even if the old
622 fdset gets grown now, we'll only copy up to "size" fds */
623 size
= oldf
->max_fdset
;
624 if (size
> __FD_SETSIZE
) {
626 spin_lock(&newf
->file_lock
);
627 error
= expand_fdset(newf
, size
-1);
628 spin_unlock(&newf
->file_lock
);
632 spin_lock(&oldf
->file_lock
);
634 open_files
= count_open_files(oldf
, size
);
637 * Check whether we need to allocate a larger fd array.
638 * Note: we're not a clone task, so the open count won't
641 nfds
= NR_OPEN_DEFAULT
;
642 if (open_files
> nfds
) {
643 spin_unlock(&oldf
->file_lock
);
645 spin_lock(&newf
->file_lock
);
646 error
= expand_fd_array(newf
, open_files
-1);
647 spin_unlock(&newf
->file_lock
);
650 nfds
= newf
->max_fds
;
651 spin_lock(&oldf
->file_lock
);
657 memcpy(newf
->open_fds
->fds_bits
, oldf
->open_fds
->fds_bits
, open_files
/8);
658 memcpy(newf
->close_on_exec
->fds_bits
, oldf
->close_on_exec
->fds_bits
, open_files
/8);
660 for (i
= open_files
; i
!= 0; i
--) {
661 struct file
*f
= *old_fds
++;
666 spin_unlock(&oldf
->file_lock
);
668 /* compute the remainder to be cleared */
669 size
= (newf
->max_fds
- open_files
) * sizeof(struct file
*);
671 /* This is long word aligned thus could use a optimized version */
672 memset(new_fds
, 0, size
);
674 if (newf
->max_fdset
> open_files
) {
675 int left
= (newf
->max_fdset
-open_files
)/8;
676 int start
= open_files
/ (8 * sizeof(unsigned long));
678 memset(&newf
->open_fds
->fds_bits
[start
], 0, left
);
679 memset(&newf
->close_on_exec
->fds_bits
[start
], 0, left
);
688 free_fdset (newf
->close_on_exec
, newf
->max_fdset
);
689 free_fdset (newf
->open_fds
, newf
->max_fdset
);
690 kmem_cache_free(files_cachep
, newf
);
694 static inline int copy_sighand(unsigned long clone_flags
, struct task_struct
* tsk
)
696 struct sighand_struct
*sig
;
698 if (clone_flags
& (CLONE_SIGHAND
| CLONE_THREAD
)) {
699 atomic_inc(¤t
->sighand
->count
);
702 sig
= kmem_cache_alloc(sighand_cachep
, GFP_KERNEL
);
706 spin_lock_init(&sig
->siglock
);
707 atomic_set(&sig
->count
, 1);
708 memcpy(sig
->action
, current
->sighand
->action
, sizeof(sig
->action
));
712 static inline int copy_signal(unsigned long clone_flags
, struct task_struct
* tsk
)
714 struct signal_struct
*sig
;
716 if (clone_flags
& CLONE_THREAD
) {
717 atomic_inc(¤t
->signal
->count
);
720 sig
= kmem_cache_alloc(signal_cachep
, GFP_KERNEL
);
724 atomic_set(&sig
->count
, 1);
726 sig
->group_exit_code
= 0;
727 sig
->group_exit_task
= NULL
;
728 sig
->group_stop_count
= 0;
729 sig
->curr_target
= NULL
;
730 init_sigpending(&sig
->shared_pending
);
735 static inline void copy_flags(unsigned long clone_flags
, struct task_struct
*p
)
737 unsigned long new_flags
= p
->flags
;
739 new_flags
&= ~PF_SUPERPRIV
;
740 new_flags
|= PF_FORKNOEXEC
;
741 if (!(clone_flags
& CLONE_PTRACE
))
743 p
->flags
= new_flags
;
746 asmlinkage
long sys_set_tid_address(int __user
*tidptr
)
748 current
->clear_child_tid
= tidptr
;
754 * This creates a new process as a copy of the old one,
755 * but does not actually start it yet.
757 * It copies the registers, and all the appropriate
758 * parts of the process environment (as per the clone
759 * flags). The actual kick-off is left to the caller.
761 struct task_struct
*copy_process(unsigned long clone_flags
,
762 unsigned long stack_start
,
763 struct pt_regs
*regs
,
764 unsigned long stack_size
,
765 int __user
*parent_tidptr
,
766 int __user
*child_tidptr
)
769 struct task_struct
*p
= NULL
;
771 if ((clone_flags
& (CLONE_NEWNS
|CLONE_FS
)) == (CLONE_NEWNS
|CLONE_FS
))
772 return ERR_PTR(-EINVAL
);
775 * Thread groups must share signals as well, and detached threads
776 * can only be started up within the thread group.
778 if ((clone_flags
& CLONE_THREAD
) && !(clone_flags
& CLONE_SIGHAND
))
779 return ERR_PTR(-EINVAL
);
780 if ((clone_flags
& CLONE_DETACHED
) && !(clone_flags
& CLONE_THREAD
))
781 return ERR_PTR(-EINVAL
);
783 retval
= security_task_create(clone_flags
);
788 p
= dup_task_struct(current
);
793 if (atomic_read(&p
->user
->processes
) >= p
->rlim
[RLIMIT_NPROC
].rlim_cur
) {
794 if (!capable(CAP_SYS_ADMIN
) && !capable(CAP_SYS_RESOURCE
))
798 atomic_inc(&p
->user
->__count
);
799 atomic_inc(&p
->user
->processes
);
802 * If multiple threads are within copy_process(), then this check
803 * triggers too late. This doesn't hurt, the check is only there
804 * to stop root fork bombs.
806 if (nr_threads
>= max_threads
)
807 goto bad_fork_cleanup_count
;
809 if (!try_module_get(p
->thread_info
->exec_domain
->module
))
810 goto bad_fork_cleanup_count
;
812 if (p
->binfmt
&& !try_module_get(p
->binfmt
->module
))
813 goto bad_fork_cleanup_put_domain
;
815 #ifdef CONFIG_PREEMPT
817 * schedule_tail drops this_rq()->lock so we compensate with a count
818 * of 1. Also, we want to start with kernel preemption disabled.
820 p
->thread_info
->preempt_count
= 1;
823 p
->state
= TASK_UNINTERRUPTIBLE
;
825 copy_flags(clone_flags
, p
);
826 if (clone_flags
& CLONE_IDLETASK
)
829 p
->pid
= alloc_pidmap();
831 goto bad_fork_cleanup
;
834 if (clone_flags
& CLONE_PARENT_SETTID
)
835 if (put_user(p
->pid
, parent_tidptr
))
836 goto bad_fork_cleanup
;
838 p
->proc_dentry
= NULL
;
840 INIT_LIST_HEAD(&p
->run_list
);
842 INIT_LIST_HEAD(&p
->children
);
843 INIT_LIST_HEAD(&p
->sibling
);
844 INIT_LIST_HEAD(&p
->posix_timers
);
845 init_waitqueue_head(&p
->wait_chldexit
);
846 p
->vfork_done
= NULL
;
847 spin_lock_init(&p
->alloc_lock
);
848 spin_lock_init(&p
->switch_lock
);
849 spin_lock_init(&p
->proc_lock
);
851 clear_tsk_thread_flag(p
, TIF_SIGPENDING
);
852 init_sigpending(&p
->pending
);
854 p
->it_real_value
= p
->it_virt_value
= p
->it_prof_value
= 0;
855 p
->it_real_incr
= p
->it_virt_incr
= p
->it_prof_incr
= 0;
856 init_timer(&p
->real_timer
);
857 p
->real_timer
.data
= (unsigned long) p
;
859 p
->leader
= 0; /* session leadership doesn't inherit */
861 p
->utime
= p
->stime
= 0;
862 p
->cutime
= p
->cstime
= 0;
864 p
->lock_depth
= -1; /* -1 = no lock */
865 p
->start_time
= get_jiffies_64();
869 if ((retval
= security_task_alloc(p
)))
870 goto bad_fork_cleanup
;
871 /* copy all the process information */
872 if ((retval
= copy_semundo(clone_flags
, p
)))
873 goto bad_fork_cleanup_security
;
874 if ((retval
= copy_files(clone_flags
, p
)))
875 goto bad_fork_cleanup_semundo
;
876 if ((retval
= copy_fs(clone_flags
, p
)))
877 goto bad_fork_cleanup_files
;
878 if ((retval
= copy_sighand(clone_flags
, p
)))
879 goto bad_fork_cleanup_fs
;
880 if ((retval
= copy_signal(clone_flags
, p
)))
881 goto bad_fork_cleanup_sighand
;
882 if ((retval
= copy_mm(clone_flags
, p
)))
883 goto bad_fork_cleanup_signal
;
884 if ((retval
= copy_namespace(clone_flags
, p
)))
885 goto bad_fork_cleanup_mm
;
886 retval
= copy_thread(0, clone_flags
, stack_start
, stack_size
, p
, regs
);
888 goto bad_fork_cleanup_namespace
;
890 p
->set_child_tid
= (clone_flags
& CLONE_CHILD_SETTID
) ? child_tidptr
: NULL
;
892 * Clear TID on mm_release()?
894 p
->clear_child_tid
= (clone_flags
& CLONE_CHILD_CLEARTID
) ? child_tidptr
: NULL
;
897 * Syscall tracing should be turned off in the child regardless
900 clear_tsk_thread_flag(p
, TIF_SYSCALL_TRACE
);
902 /* Our parent execution domain becomes current domain
903 These must match for thread signalling to apply */
905 p
->parent_exec_id
= p
->self_exec_id
;
907 /* ok, now we should be set up.. */
908 if (clone_flags
& CLONE_DETACHED
)
911 p
->exit_signal
= clone_flags
& CSIGNAL
;
912 p
->pdeath_signal
= 0;
915 * Share the timeslice between parent and child, thus the
916 * total amount of pending timeslices in the system doesn't change,
917 * resulting in more scheduling fairness.
920 p
->time_slice
= (current
->time_slice
+ 1) >> 1;
922 * The remainder of the first timeslice might be recovered by
923 * the parent if the child exits early enough.
925 p
->first_time_slice
= 1;
926 current
->time_slice
>>= 1;
927 p
->last_run
= jiffies
;
928 if (!current
->time_slice
) {
930 * This case is rare, it happens when the parent has only
931 * a single jiffy left from its timeslice. Taking the
932 * runqueue lock is not a problem.
934 current
->time_slice
= 1;
936 scheduler_tick(0, 0);
942 * Ok, add it to the run-queues and make it
943 * visible to the rest of the system.
949 INIT_LIST_HEAD(&p
->ptrace_children
);
950 INIT_LIST_HEAD(&p
->ptrace_list
);
952 /* Need tasklist lock for parent etc handling! */
953 write_lock_irq(&tasklist_lock
);
955 * Check for pending SIGKILL! The new thread should not be allowed
956 * to slip out of an OOM kill. (or normal SIGKILL.)
958 if (sigismember(¤t
->pending
.signal
, SIGKILL
)) {
959 write_unlock_irq(&tasklist_lock
);
961 goto bad_fork_cleanup_namespace
;
964 /* CLONE_PARENT re-uses the old parent */
965 if (clone_flags
& CLONE_PARENT
)
966 p
->real_parent
= current
->real_parent
;
968 p
->real_parent
= current
;
969 p
->parent
= p
->real_parent
;
971 if (clone_flags
& CLONE_THREAD
) {
972 spin_lock(¤t
->sighand
->siglock
);
974 * Important: if an exit-all has been started then
975 * do not create this new thread - the whole thread
976 * group is supposed to exit anyway.
978 if (current
->signal
->group_exit
) {
979 spin_unlock(¤t
->sighand
->siglock
);
980 write_unlock_irq(&tasklist_lock
);
981 goto bad_fork_cleanup_namespace
;
983 p
->tgid
= current
->tgid
;
984 p
->group_leader
= current
->group_leader
;
986 if (current
->signal
->group_stop_count
> 0) {
988 * There is an all-stop in progress for the group.
989 * We ourselves will stop as soon as we check signals.
990 * Make the new thread part of that group stop too.
992 current
->signal
->group_stop_count
++;
993 set_tsk_thread_flag(p
, TIF_SIGPENDING
);
996 spin_unlock(¤t
->sighand
->siglock
);
1000 if (p
->ptrace
& PT_PTRACED
)
1001 __ptrace_link(p
, current
->parent
);
1003 attach_pid(p
, PIDTYPE_PID
, p
->pid
);
1004 if (thread_group_leader(p
)) {
1005 attach_pid(p
, PIDTYPE_TGID
, p
->tgid
);
1006 attach_pid(p
, PIDTYPE_PGID
, p
->pgrp
);
1007 attach_pid(p
, PIDTYPE_SID
, p
->session
);
1009 __get_cpu_var(process_counts
)++;
1011 link_pid(p
, p
->pids
+ PIDTYPE_TGID
, &p
->group_leader
->pids
[PIDTYPE_TGID
].pid
);
1014 write_unlock_irq(&tasklist_lock
);
1019 return ERR_PTR(retval
);
1022 bad_fork_cleanup_namespace
:
1024 bad_fork_cleanup_mm
:
1026 bad_fork_cleanup_signal
:
1028 bad_fork_cleanup_sighand
:
1030 bad_fork_cleanup_fs
:
1031 exit_fs(p
); /* blocking */
1032 bad_fork_cleanup_files
:
1033 exit_files(p
); /* blocking */
1034 bad_fork_cleanup_semundo
:
1036 bad_fork_cleanup_security
:
1037 security_task_free(p
);
1040 free_pidmap(p
->pid
);
1042 module_put(p
->binfmt
->module
);
1043 bad_fork_cleanup_put_domain
:
1044 module_put(p
->thread_info
->exec_domain
->module
);
1045 bad_fork_cleanup_count
:
1046 atomic_dec(&p
->user
->processes
);
1053 static inline int fork_traceflag (unsigned clone_flags
)
1055 if (clone_flags
& (CLONE_UNTRACED
| CLONE_IDLETASK
))
1057 else if (clone_flags
& CLONE_VFORK
) {
1058 if (current
->ptrace
& PT_TRACE_VFORK
)
1059 return PTRACE_EVENT_VFORK
;
1060 } else if ((clone_flags
& CSIGNAL
) != SIGCHLD
) {
1061 if (current
->ptrace
& PT_TRACE_CLONE
)
1062 return PTRACE_EVENT_CLONE
;
1063 } else if (current
->ptrace
& PT_TRACE_FORK
)
1064 return PTRACE_EVENT_FORK
;
1070 * Ok, this is the main fork-routine.
1072 * It copies the process, and if successful kick-starts
1073 * it and waits for it to finish using the VM if required.
1075 long do_fork(unsigned long clone_flags
,
1076 unsigned long stack_start
,
1077 struct pt_regs
*regs
,
1078 unsigned long stack_size
,
1079 int __user
*parent_tidptr
,
1080 int __user
*child_tidptr
)
1082 struct task_struct
*p
;
1086 if (unlikely(current
->ptrace
)) {
1087 trace
= fork_traceflag (clone_flags
);
1089 clone_flags
|= CLONE_PTRACE
;
1092 p
= copy_process(clone_flags
, stack_start
, regs
, stack_size
, parent_tidptr
, child_tidptr
);
1094 * Do this prior waking up the new thread - the thread pointer
1095 * might get invalid after that point, if the thread exits quickly.
1097 pid
= IS_ERR(p
) ? PTR_ERR(p
) : p
->pid
;
1100 struct completion vfork
;
1102 if (clone_flags
& CLONE_VFORK
) {
1103 p
->vfork_done
= &vfork
;
1104 init_completion(&vfork
);
1107 if (p
->ptrace
& PT_PTRACED
) {
1109 * We'll start up with an immediate SIGSTOP.
1111 sigaddset(&p
->pending
.signal
, SIGSTOP
);
1112 set_tsk_thread_flag(p
, TIF_SIGPENDING
);
1115 wake_up_forked_process(p
); /* do this last */
1118 if (unlikely (trace
)) {
1119 current
->ptrace_message
= pid
;
1120 ptrace_notify ((trace
<< 8) | SIGTRAP
);
1123 if (clone_flags
& CLONE_VFORK
) {
1124 wait_for_completion(&vfork
);
1125 if (unlikely (current
->ptrace
& PT_TRACE_VFORK_DONE
))
1126 ptrace_notify ((PTRACE_EVENT_VFORK_DONE
<< 8) | SIGTRAP
);
1129 * Let the child process run first, to avoid most of the
1130 * COW overhead when the child exec()s afterwards.
1137 /* SLAB cache for signal_struct structures (tsk->signal) */
1138 kmem_cache_t
*signal_cachep
;
1140 /* SLAB cache for sighand_struct structures (tsk->sighand) */
1141 kmem_cache_t
*sighand_cachep
;
1143 /* SLAB cache for files_struct structures (tsk->files) */
1144 kmem_cache_t
*files_cachep
;
1146 /* SLAB cache for fs_struct structures (tsk->fs) */
1147 kmem_cache_t
*fs_cachep
;
1149 /* SLAB cache for vm_area_struct structures */
1150 kmem_cache_t
*vm_area_cachep
;
1152 /* SLAB cache for mm_struct structures (tsk->mm) */
1153 kmem_cache_t
*mm_cachep
;
1155 void __init
proc_caches_init(void)
1157 sighand_cachep
= kmem_cache_create("sighand_cache",
1158 sizeof(struct sighand_struct
), 0,
1159 SLAB_HWCACHE_ALIGN
, NULL
, NULL
);
1160 if (!sighand_cachep
)
1161 panic("Cannot create sighand SLAB cache");
1163 signal_cachep
= kmem_cache_create("signal_cache",
1164 sizeof(struct signal_struct
), 0,
1165 SLAB_HWCACHE_ALIGN
, NULL
, NULL
);
1167 panic("Cannot create signal SLAB cache");
1169 files_cachep
= kmem_cache_create("files_cache",
1170 sizeof(struct files_struct
), 0,
1171 SLAB_HWCACHE_ALIGN
, NULL
, NULL
);
1173 panic("Cannot create files SLAB cache");
1175 fs_cachep
= kmem_cache_create("fs_cache",
1176 sizeof(struct fs_struct
), 0,
1177 SLAB_HWCACHE_ALIGN
, NULL
, NULL
);
1179 panic("Cannot create fs_struct SLAB cache");
1181 vm_area_cachep
= kmem_cache_create("vm_area_struct",
1182 sizeof(struct vm_area_struct
), 0,
1185 panic("vma_init: Cannot alloc vm_area_struct SLAB cache");
1187 mm_cachep
= kmem_cache_create("mm_struct",
1188 sizeof(struct mm_struct
), 0,
1189 SLAB_HWCACHE_ALIGN
, NULL
, NULL
);
1191 panic("vma_init: Cannot alloc mm_struct SLAB cache");