4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/config.h>
9 #include <linux/slab.h>
10 #include <linux/interrupt.h>
11 #include <linux/smp_lock.h>
12 #include <linux/module.h>
13 #include <linux/completion.h>
14 #include <linux/personality.h>
15 #include <linux/tty.h>
16 #include <linux/namespace.h>
17 #include <linux/security.h>
18 #include <linux/acct.h>
19 #include <linux/file.h>
20 #include <linux/binfmts.h>
21 #include <linux/ptrace.h>
22 #include <linux/profile.h>
23 #include <linux/mount.h>
24 #include <linux/proc_fs.h>
25 #include <linux/mempolicy.h>
27 #include <asm/uaccess.h>
28 #include <asm/unistd.h>
29 #include <asm/pgtable.h>
30 #include <asm/mmu_context.h>
32 extern void sem_exit (void);
33 extern struct task_struct
*child_reaper
;
35 int getrusage(struct task_struct
*, int, struct rusage __user
*);
37 static void __unhash_process(struct task_struct
*p
)
40 detach_pid(p
, PIDTYPE_PID
);
41 detach_pid(p
, PIDTYPE_TGID
);
42 if (thread_group_leader(p
)) {
43 detach_pid(p
, PIDTYPE_PGID
);
44 detach_pid(p
, PIDTYPE_SID
);
46 __get_cpu_var(process_counts
)--;
52 void release_task(struct task_struct
* p
)
56 struct dentry
*proc_dentry
;
59 BUG_ON(p
->state
< TASK_ZOMBIE
);
61 atomic_dec(&p
->user
->processes
);
62 spin_lock(&p
->proc_lock
);
63 proc_dentry
= proc_pid_unhash(p
);
64 write_lock_irq(&tasklist_lock
);
65 if (unlikely(p
->ptrace
))
67 BUG_ON(!list_empty(&p
->ptrace_list
) || !list_empty(&p
->ptrace_children
));
73 * If we are the last non-leader member of the thread
74 * group, and the leader is zombie, then notify the
75 * group leader's parent process. (if it wants notification.)
78 leader
= p
->group_leader
;
79 if (leader
!= p
&& thread_group_empty(leader
) && leader
->state
== TASK_ZOMBIE
) {
80 BUG_ON(leader
->exit_signal
== -1);
81 do_notify_parent(leader
, leader
->exit_signal
);
83 * If we were the last child thread and the leader has
84 * exited already, and the leader's parent ignores SIGCHLD,
85 * then we are the one who should release the leader.
87 * do_notify_parent() will have marked it self-reaping in
90 zap_leader
= (leader
->exit_signal
== -1);
93 p
->parent
->cutime
+= p
->utime
+ p
->cutime
;
94 p
->parent
->cstime
+= p
->stime
+ p
->cstime
;
95 p
->parent
->cmin_flt
+= p
->min_flt
+ p
->cmin_flt
;
96 p
->parent
->cmaj_flt
+= p
->maj_flt
+ p
->cmaj_flt
;
97 p
->parent
->cnvcsw
+= p
->nvcsw
+ p
->cnvcsw
;
98 p
->parent
->cnivcsw
+= p
->nivcsw
+ p
->cnivcsw
;
100 write_unlock_irq(&tasklist_lock
);
101 spin_unlock(&p
->proc_lock
);
102 proc_pid_flush(proc_dentry
);
107 if (unlikely(zap_leader
))
111 /* we are using it only for SMP init */
113 void unhash_process(struct task_struct
*p
)
115 struct dentry
*proc_dentry
;
117 spin_lock(&p
->proc_lock
);
118 proc_dentry
= proc_pid_unhash(p
);
119 write_lock_irq(&tasklist_lock
);
121 write_unlock_irq(&tasklist_lock
);
122 spin_unlock(&p
->proc_lock
);
123 proc_pid_flush(proc_dentry
);
127 * This checks not only the pgrp, but falls back on the pid if no
128 * satisfactory pgrp is found. I dunno - gdb doesn't work correctly
131 int session_of_pgrp(int pgrp
)
133 struct task_struct
*p
;
138 read_lock(&tasklist_lock
);
139 for_each_task_pid(pgrp
, PIDTYPE_PGID
, p
, l
, pid
)
140 if (p
->signal
->session
> 0) {
141 sid
= p
->signal
->session
;
144 p
= find_task_by_pid(pgrp
);
146 sid
= p
->signal
->session
;
148 read_unlock(&tasklist_lock
);
154 * Determine if a process group is "orphaned", according to the POSIX
155 * definition in 2.2.2.52. Orphaned process groups are not to be affected
156 * by terminal-generated stop signals. Newly orphaned process groups are
157 * to receive a SIGHUP and a SIGCONT.
159 * "I ask you, have you ever known what it is to be an orphan?"
161 static int will_become_orphaned_pgrp(int pgrp
, task_t
*ignored_task
)
163 struct task_struct
*p
;
168 for_each_task_pid(pgrp
, PIDTYPE_PGID
, p
, l
, pid
) {
169 if (p
== ignored_task
170 || p
->state
>= TASK_ZOMBIE
171 || p
->real_parent
->pid
== 1)
173 if (process_group(p
->real_parent
) != pgrp
174 && p
->real_parent
->signal
->session
== p
->signal
->session
) {
179 return ret
; /* (sighing) "Often!" */
182 int is_orphaned_pgrp(int pgrp
)
186 read_lock(&tasklist_lock
);
187 retval
= will_become_orphaned_pgrp(pgrp
, NULL
);
188 read_unlock(&tasklist_lock
);
193 static inline int has_stopped_jobs(int pgrp
)
196 struct task_struct
*p
;
200 for_each_task_pid(pgrp
, PIDTYPE_PGID
, p
, l
, pid
) {
201 if (p
->state
!= TASK_STOPPED
)
204 /* If p is stopped by a debugger on a signal that won't
205 stop it, then don't count p as stopped. This isn't
206 perfect but it's a good approximation. */
207 if (unlikely (p
->ptrace
)
208 && p
->exit_code
!= SIGSTOP
209 && p
->exit_code
!= SIGTSTP
210 && p
->exit_code
!= SIGTTOU
211 && p
->exit_code
!= SIGTTIN
)
221 * reparent_to_init() - Reparent the calling kernel thread to the init task.
223 * If a kernel thread is launched as a result of a system call, or if
224 * it ever exits, it should generally reparent itself to init so that
225 * it is correctly cleaned up on exit.
227 * The various task state such as scheduling policy and priority may have
228 * been inherited from a user process, so we reset them to sane values here.
230 * NOTE that reparent_to_init() gives the caller full capabilities.
232 void reparent_to_init(void)
234 write_lock_irq(&tasklist_lock
);
236 ptrace_unlink(current
);
237 /* Reparent to init */
238 REMOVE_LINKS(current
);
239 current
->parent
= child_reaper
;
240 current
->real_parent
= child_reaper
;
243 /* Set the exit signal to SIGCHLD so we signal init on exit */
244 current
->exit_signal
= SIGCHLD
;
246 if ((current
->policy
== SCHED_NORMAL
) && (task_nice(current
) < 0))
247 set_user_nice(current
, 0);
251 security_task_reparent_to_init(current
);
252 memcpy(current
->rlim
, init_task
.rlim
, sizeof(*(current
->rlim
)));
253 atomic_inc(&(INIT_USER
->__count
));
254 switch_uid(INIT_USER
);
256 write_unlock_irq(&tasklist_lock
);
259 void __set_special_pids(pid_t session
, pid_t pgrp
)
261 struct task_struct
*curr
= current
;
263 if (curr
->signal
->session
!= session
) {
264 detach_pid(curr
, PIDTYPE_SID
);
265 curr
->signal
->session
= session
;
266 attach_pid(curr
, PIDTYPE_SID
, session
);
268 if (process_group(curr
) != pgrp
) {
269 detach_pid(curr
, PIDTYPE_PGID
);
270 curr
->signal
->pgrp
= pgrp
;
271 attach_pid(curr
, PIDTYPE_PGID
, pgrp
);
275 void set_special_pids(pid_t session
, pid_t pgrp
)
277 write_lock_irq(&tasklist_lock
);
278 __set_special_pids(session
, pgrp
);
279 write_unlock_irq(&tasklist_lock
);
283 * Let kernel threads use this to say that they
284 * allow a certain signal (since daemonize() will
285 * have disabled all of them by default).
287 int allow_signal(int sig
)
289 if (sig
< 1 || sig
> _NSIG
)
292 spin_lock_irq(¤t
->sighand
->siglock
);
293 sigdelset(¤t
->blocked
, sig
);
295 /* Kernel threads handle their own signals.
296 Let the signal code know it'll be handled, so
297 that they don't get converted to SIGKILL or
298 just silently dropped */
299 current
->sighand
->action
[(sig
)-1].sa
.sa_handler
= (void *)2;
302 spin_unlock_irq(¤t
->sighand
->siglock
);
306 EXPORT_SYMBOL(allow_signal
);
308 int disallow_signal(int sig
)
310 if (sig
< 1 || sig
> _NSIG
)
313 spin_lock_irq(¤t
->sighand
->siglock
);
314 sigaddset(¤t
->blocked
, sig
);
316 spin_unlock_irq(¤t
->sighand
->siglock
);
320 EXPORT_SYMBOL(disallow_signal
);
323 * Put all the gunge required to become a kernel thread without
324 * attached user resources in one place where it belongs.
327 void daemonize(const char *name
, ...)
330 struct fs_struct
*fs
;
333 va_start(args
, name
);
334 vsnprintf(current
->comm
, sizeof(current
->comm
), name
, args
);
338 * If we were started as result of loading a module, close all of the
339 * user space pages. We don't need them, and if we didn't close them
340 * they would be locked into memory.
344 set_special_pids(1, 1);
345 current
->signal
->tty
= NULL
;
347 /* Block and flush all signals */
348 sigfillset(&blocked
);
349 sigprocmask(SIG_BLOCK
, &blocked
, NULL
);
350 flush_signals(current
);
352 /* Become as one with the init task */
354 exit_fs(current
); /* current->fs->count--; */
357 atomic_inc(&fs
->count
);
359 current
->files
= init_task
.files
;
360 atomic_inc(¤t
->files
->count
);
365 EXPORT_SYMBOL(daemonize
);
367 static inline void close_files(struct files_struct
* files
)
375 if (i
>= files
->max_fdset
|| i
>= files
->max_fds
)
377 set
= files
->open_fds
->fds_bits
[j
++];
380 struct file
* file
= xchg(&files
->fd
[i
], NULL
);
382 filp_close(file
, files
);
390 struct files_struct
*get_files_struct(struct task_struct
*task
)
392 struct files_struct
*files
;
397 atomic_inc(&files
->count
);
403 void fastcall
put_files_struct(struct files_struct
*files
)
405 if (atomic_dec_and_test(&files
->count
)) {
408 * Free the fd and fdset arrays if we expanded them.
410 if (files
->fd
!= &files
->fd_array
[0])
411 free_fd_array(files
->fd
, files
->max_fds
);
412 if (files
->max_fdset
> __FD_SETSIZE
) {
413 free_fdset(files
->open_fds
, files
->max_fdset
);
414 free_fdset(files
->close_on_exec
, files
->max_fdset
);
416 kmem_cache_free(files_cachep
, files
);
420 EXPORT_SYMBOL(put_files_struct
);
422 static inline void __exit_files(struct task_struct
*tsk
)
424 struct files_struct
* files
= tsk
->files
;
430 put_files_struct(files
);
434 void exit_files(struct task_struct
*tsk
)
439 static inline void __put_fs_struct(struct fs_struct
*fs
)
441 /* No need to hold fs->lock if we are killing it */
442 if (atomic_dec_and_test(&fs
->count
)) {
449 mntput(fs
->altrootmnt
);
451 kmem_cache_free(fs_cachep
, fs
);
455 void put_fs_struct(struct fs_struct
*fs
)
460 static inline void __exit_fs(struct task_struct
*tsk
)
462 struct fs_struct
* fs
= tsk
->fs
;
472 void exit_fs(struct task_struct
*tsk
)
477 EXPORT_SYMBOL_GPL(exit_fs
);
480 * Turn us into a lazy TLB process if we
483 static inline void __exit_mm(struct task_struct
* tsk
)
485 struct mm_struct
*mm
= tsk
->mm
;
491 * Serialize with any possible pending coredump.
492 * We must hold mmap_sem around checking core_waiters
493 * and clearing tsk->mm. The core-inducing thread
494 * will increment core_waiters for each thread in the
495 * group with ->mm != NULL.
497 down_read(&mm
->mmap_sem
);
498 if (mm
->core_waiters
) {
499 up_read(&mm
->mmap_sem
);
500 down_write(&mm
->mmap_sem
);
501 if (!--mm
->core_waiters
)
502 complete(mm
->core_startup_done
);
503 up_write(&mm
->mmap_sem
);
505 wait_for_completion(&mm
->core_done
);
506 down_read(&mm
->mmap_sem
);
508 atomic_inc(&mm
->mm_count
);
509 if (mm
!= tsk
->active_mm
) BUG();
510 /* more a memory barrier than a real lock */
513 up_read(&mm
->mmap_sem
);
514 enter_lazy_tlb(mm
, current
);
519 void exit_mm(struct task_struct
*tsk
)
524 EXPORT_SYMBOL(exit_mm
);
526 static inline void choose_new_parent(task_t
*p
, task_t
*reaper
, task_t
*child_reaper
)
529 * Make sure we're not reparenting to ourselves and that
530 * the parent is not a zombie.
532 if (p
== reaper
|| reaper
->state
>= TASK_ZOMBIE
)
533 p
->real_parent
= child_reaper
;
535 p
->real_parent
= reaper
;
536 if (p
->parent
== p
->real_parent
)
540 static inline void reparent_thread(task_t
*p
, task_t
*father
, int traced
)
542 /* We don't want people slaying init. */
543 if (p
->exit_signal
!= -1)
544 p
->exit_signal
= SIGCHLD
;
547 if (p
->pdeath_signal
)
548 /* We already hold the tasklist_lock here. */
549 group_send_sig_info(p
->pdeath_signal
, (void *) 0, p
);
551 /* Move the child from its dying parent to the new one. */
552 if (unlikely(traced
)) {
553 /* Preserve ptrace links if someone else is tracing this child. */
554 list_del_init(&p
->ptrace_list
);
555 if (p
->parent
!= p
->real_parent
)
556 list_add(&p
->ptrace_list
, &p
->real_parent
->ptrace_children
);
558 /* If this child is being traced, then we're the one tracing it
559 * anyway, so let go of it.
562 list_del_init(&p
->sibling
);
563 p
->parent
= p
->real_parent
;
564 list_add_tail(&p
->sibling
, &p
->parent
->children
);
566 /* If we'd notified the old parent about this child's death,
567 * also notify the new parent.
569 if (p
->state
== TASK_ZOMBIE
&& p
->exit_signal
!= -1 &&
570 thread_group_empty(p
))
571 do_notify_parent(p
, p
->exit_signal
);
575 * process group orphan check
576 * Case ii: Our child is in a different pgrp
577 * than we are, and it was the only connection
578 * outside, so the child pgrp is now orphaned.
580 if ((process_group(p
) != process_group(father
)) &&
581 (p
->signal
->session
== father
->signal
->session
)) {
582 int pgrp
= process_group(p
);
584 if (will_become_orphaned_pgrp(pgrp
, NULL
) && has_stopped_jobs(pgrp
)) {
585 __kill_pg_info(SIGHUP
, (void *)1, pgrp
);
586 __kill_pg_info(SIGCONT
, (void *)1, pgrp
);
592 * When we die, we re-parent all our children.
593 * Try to give them to another thread in our thread
594 * group, and if no such member exists, give it to
595 * the global child reaper process (ie "init")
597 static inline void forget_original_parent(struct task_struct
* father
)
599 struct task_struct
*p
, *reaper
= father
;
600 struct list_head
*_p
, *_n
;
602 reaper
= father
->group_leader
;
603 if (reaper
== father
)
604 reaper
= child_reaper
;
607 * There are only two places where our children can be:
609 * - in our child list
610 * - in our ptraced child list
612 * Search them and reparent children.
614 list_for_each_safe(_p
, _n
, &father
->children
) {
615 p
= list_entry(_p
,struct task_struct
,sibling
);
616 if (father
== p
->real_parent
) {
617 choose_new_parent(p
, reaper
, child_reaper
);
618 reparent_thread(p
, father
, 0);
621 if (p
->state
== TASK_ZOMBIE
&& p
->exit_signal
!= -1 &&
622 thread_group_empty(p
))
623 do_notify_parent(p
, p
->exit_signal
);
626 list_for_each_safe(_p
, _n
, &father
->ptrace_children
) {
627 p
= list_entry(_p
,struct task_struct
,ptrace_list
);
628 choose_new_parent(p
, reaper
, child_reaper
);
629 reparent_thread(p
, father
, 1);
634 * Send signals to all our closest relatives so that they know
635 * to properly mourn us..
637 static void exit_notify(struct task_struct
*tsk
)
640 struct task_struct
*t
;
642 if (signal_pending(tsk
) && !tsk
->signal
->group_exit
643 && !thread_group_empty(tsk
)) {
645 * This occurs when there was a race between our exit
646 * syscall and a group signal choosing us as the one to
647 * wake up. It could be that we are the only thread
648 * alerted to check for pending signals, but another thread
649 * should be woken now to take the signal since we will not.
650 * Now we'll wake all the threads in the group just to make
651 * sure someone gets all the pending signals.
653 read_lock(&tasklist_lock
);
654 spin_lock_irq(&tsk
->sighand
->siglock
);
655 for (t
= next_thread(tsk
); t
!= tsk
; t
= next_thread(t
))
656 if (!signal_pending(t
) && !(t
->flags
& PF_EXITING
)) {
657 recalc_sigpending_tsk(t
);
658 if (signal_pending(t
))
659 signal_wake_up(t
, 0);
661 spin_unlock_irq(&tsk
->sighand
->siglock
);
662 read_unlock(&tasklist_lock
);
665 write_lock_irq(&tasklist_lock
);
668 * This does two things:
670 * A. Make init inherit all the child processes
671 * B. Check to see if any process groups have become orphaned
672 * as a result of our exiting, and if they have any stopped
673 * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
676 forget_original_parent(tsk
);
677 BUG_ON(!list_empty(&tsk
->children
));
680 * Check to see if any process groups have become orphaned
681 * as a result of our exiting, and if they have any stopped
682 * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
684 * Case i: Our father is in a different pgrp than we are
685 * and we were the only connection outside, so our pgrp
686 * is about to become orphaned.
689 t
= tsk
->real_parent
;
691 if ((process_group(t
) != process_group(tsk
)) &&
692 (t
->signal
->session
== tsk
->signal
->session
) &&
693 will_become_orphaned_pgrp(process_group(tsk
), tsk
) &&
694 has_stopped_jobs(process_group(tsk
))) {
695 __kill_pg_info(SIGHUP
, (void *)1, process_group(tsk
));
696 __kill_pg_info(SIGCONT
, (void *)1, process_group(tsk
));
699 /* Let father know we died
701 * Thread signals are configurable, but you aren't going to use
702 * that to send signals to arbitary processes.
703 * That stops right now.
705 * If the parent exec id doesn't match the exec id we saved
706 * when we started then we know the parent has changed security
709 * If our self_exec id doesn't match our parent_exec_id then
710 * we have changed execution domain as these two values started
711 * the same after a fork.
715 if (tsk
->exit_signal
!= SIGCHLD
&& tsk
->exit_signal
!= -1 &&
716 ( tsk
->parent_exec_id
!= t
->self_exec_id
||
717 tsk
->self_exec_id
!= tsk
->parent_exec_id
)
718 && !capable(CAP_KILL
))
719 tsk
->exit_signal
= SIGCHLD
;
722 /* If something other than our normal parent is ptracing us, then
723 * send it a SIGCHLD instead of honoring exit_signal. exit_signal
724 * only has special meaning to our real parent.
726 if (tsk
->exit_signal
!= -1 && thread_group_empty(tsk
)) {
727 int signal
= tsk
->parent
== tsk
->real_parent
? tsk
->exit_signal
: SIGCHLD
;
728 do_notify_parent(tsk
, signal
);
729 } else if (tsk
->ptrace
) {
730 do_notify_parent(tsk
, SIGCHLD
);
734 if (tsk
->exit_signal
== -1 && tsk
->ptrace
== 0)
737 tsk
->flags
|= PF_DEAD
;
740 * In the preemption case it must be impossible for the task
741 * to get runnable again, so use "_raw_" unlock to keep
742 * preempt_count elevated until we schedule().
744 * To avoid deadlock on SMP, interrupts must be unmasked. If we
745 * don't, subsequently called functions (e.g, wait_task_inactive()
746 * via release_task()) will spin, with interrupt flags
747 * unwittingly blocked, until the other task sleeps. That task
748 * may itself be waiting for smp_call_function() to answer and
749 * complete, and with interrupts blocked that will never happen.
751 _raw_write_unlock(&tasklist_lock
);
754 /* If the process is dead, release it - nobody will wait for it */
755 if (state
== TASK_DEAD
)
760 asmlinkage NORET_TYPE
void do_exit(long code
)
762 struct task_struct
*tsk
= current
;
764 if (unlikely(in_interrupt()))
765 panic("Aiee, killing interrupt handler!");
766 if (unlikely(!tsk
->pid
))
767 panic("Attempted to kill the idle task!");
768 if (unlikely(tsk
->pid
== 1))
769 panic("Attempted to kill init!");
772 tsk
->flags
|= PF_EXITING
;
773 del_timer_sync(&tsk
->real_timer
);
775 if (unlikely(in_atomic()))
776 printk(KERN_INFO
"note: %s[%d] exited with preempt_count %d\n",
777 current
->comm
, current
->pid
,
780 profile_exit_task(tsk
);
782 if (unlikely(current
->ptrace
& PT_TRACE_EXIT
)) {
783 current
->ptrace_message
= code
;
784 ptrace_notify((PTRACE_EVENT_EXIT
<< 8) | SIGTRAP
);
796 mpol_free(tsk
->mempolicy
);
799 if (tsk
->signal
->leader
)
800 disassociate_ctty(1);
802 module_put(tsk
->thread_info
->exec_domain
->module
);
804 module_put(tsk
->binfmt
->module
);
806 tsk
->exit_code
= code
;
810 /* Avoid "noreturn function does return". */
814 NORET_TYPE
void complete_and_exit(struct completion
*comp
, long code
)
822 EXPORT_SYMBOL(complete_and_exit
);
824 asmlinkage
long sys_exit(int error_code
)
826 do_exit((error_code
&0xff)<<8);
829 task_t fastcall
*next_thread(task_t
*p
)
831 struct pid_link
*link
= p
->pids
+ PIDTYPE_TGID
;
832 struct list_head
*tmp
, *head
= &link
->pidptr
->task_list
;
837 if (!spin_is_locked(&p
->sighand
->siglock
) &&
838 !rwlock_is_locked(&tasklist_lock
))
841 tmp
= link
->pid_chain
.next
;
845 return pid_task(tmp
, PIDTYPE_TGID
);
848 EXPORT_SYMBOL(next_thread
);
851 * Take down every thread in the group. This is called by fatal signals
852 * as well as by sys_exit_group (below).
855 do_group_exit(int exit_code
)
857 BUG_ON(exit_code
& 0x80); /* core dumps don't get here */
859 if (current
->signal
->group_exit
)
860 exit_code
= current
->signal
->group_exit_code
;
861 else if (!thread_group_empty(current
)) {
862 struct signal_struct
*const sig
= current
->signal
;
863 struct sighand_struct
*const sighand
= current
->sighand
;
864 read_lock(&tasklist_lock
);
865 spin_lock_irq(&sighand
->siglock
);
867 /* Another thread got here before we took the lock. */
868 exit_code
= sig
->group_exit_code
;
871 sig
->group_exit_code
= exit_code
;
872 zap_other_threads(current
);
874 spin_unlock_irq(&sighand
->siglock
);
875 read_unlock(&tasklist_lock
);
883 * this kills every thread in the thread group. Note that any externally
884 * wait4()-ing process will get the correct exit code - even if this
885 * thread is not the thread group leader.
887 asmlinkage
void sys_exit_group(int error_code
)
889 do_group_exit((error_code
& 0xff) << 8);
892 static int eligible_child(pid_t pid
, int options
, task_t
*p
)
898 if (process_group(p
) != process_group(current
))
900 } else if (pid
!= -1) {
901 if (process_group(p
) != -pid
)
906 * Do not consider detached threads that are
909 if (p
->exit_signal
== -1 && !p
->ptrace
)
912 /* Wait for all children (clone and not) if __WALL is set;
913 * otherwise, wait for clone children *only* if __WCLONE is
914 * set; otherwise, wait for non-clone children *only*. (Note:
915 * A "clone" child here is one that reports to its parent
916 * using a signal other than SIGCHLD.) */
917 if (((p
->exit_signal
!= SIGCHLD
) ^ ((options
& __WCLONE
) != 0))
918 && !(options
& __WALL
))
921 * Do not consider thread group leaders that are
922 * in a non-empty thread group:
924 if (current
->tgid
!= p
->tgid
&& delay_group_leader(p
))
927 if (security_task_wait(p
))
934 * Handle sys_wait4 work for one task in state TASK_ZOMBIE. We hold
935 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
936 * the lock and this task is uninteresting. If we return nonzero, we have
937 * released the lock and the system call should return.
939 static int wait_task_zombie(task_t
*p
, unsigned int __user
*stat_addr
, struct rusage __user
*ru
)
945 * Try to move the task's state to DEAD
946 * only one thread is allowed to do this:
948 state
= xchg(&p
->state
, TASK_DEAD
);
949 if (state
!= TASK_ZOMBIE
) {
950 BUG_ON(state
!= TASK_DEAD
);
953 if (unlikely(p
->exit_signal
== -1 && p
->ptrace
== 0))
955 * This can only happen in a race with a ptraced thread
956 * dying on another processor.
961 * Now we are sure this task is interesting, and no other
962 * thread can reap it because we set its state to TASK_DEAD.
964 read_unlock(&tasklist_lock
);
966 retval
= ru
? getrusage(p
, RUSAGE_BOTH
, ru
) : 0;
967 if (!retval
&& stat_addr
) {
968 if (p
->signal
->group_exit
)
969 retval
= put_user(p
->signal
->group_exit_code
, stat_addr
);
971 retval
= put_user(p
->exit_code
, stat_addr
);
974 p
->state
= TASK_ZOMBIE
;
978 if (p
->real_parent
!= p
->parent
) {
979 write_lock_irq(&tasklist_lock
);
980 /* Double-check with lock held. */
981 if (p
->real_parent
!= p
->parent
) {
983 p
->state
= TASK_ZOMBIE
;
984 /* If this is a detached thread, this is where it goes away. */
985 if (p
->exit_signal
== -1) {
986 /* release_task takes the lock itself. */
987 write_unlock_irq(&tasklist_lock
);
991 do_notify_parent(p
, p
->exit_signal
);
992 write_unlock_irq(&tasklist_lock
);
997 write_unlock_irq(&tasklist_lock
);
1006 * Handle sys_wait4 work for one task in state TASK_STOPPED. We hold
1007 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
1008 * the lock and this task is uninteresting. If we return nonzero, we have
1009 * released the lock and the system call should return.
1011 static int wait_task_stopped(task_t
*p
, int delayed_group_leader
,
1012 unsigned int __user
*stat_addr
,
1013 struct rusage __user
*ru
)
1015 int retval
, exit_code
;
1019 if (delayed_group_leader
&& !(p
->ptrace
& PT_PTRACED
) &&
1020 p
->signal
&& p
->signal
->group_stop_count
> 0)
1022 * A group stop is in progress and this is the group leader.
1023 * We won't report until all threads have stopped.
1028 * Now we are pretty sure this task is interesting.
1029 * Make sure it doesn't get reaped out from under us while we
1030 * give up the lock and then examine it below. We don't want to
1031 * keep holding onto the tasklist_lock while we call getrusage and
1032 * possibly take page faults for user memory.
1035 read_unlock(&tasklist_lock
);
1036 write_lock_irq(&tasklist_lock
);
1039 * This uses xchg to be atomic with the thread resuming and setting
1040 * it. It must also be done with the write lock held to prevent a
1041 * race with the TASK_ZOMBIE case.
1043 exit_code
= xchg(&p
->exit_code
, 0);
1044 if (unlikely(p
->state
> TASK_STOPPED
)) {
1046 * The task resumed and then died. Let the next iteration
1047 * catch it in TASK_ZOMBIE. Note that exit_code might
1048 * already be zero here if it resumed and did _exit(0).
1049 * The task itself is dead and won't touch exit_code again;
1050 * other processors in this function are locked out.
1052 p
->exit_code
= exit_code
;
1055 if (unlikely(exit_code
== 0)) {
1057 * Another thread in this function got to it first, or it
1058 * resumed, or it resumed and then died.
1060 write_unlock_irq(&tasklist_lock
);
1062 read_lock(&tasklist_lock
);
1066 /* move to end of parent's list to avoid starvation */
1068 add_parent(p
, p
->parent
);
1070 write_unlock_irq(&tasklist_lock
);
1072 retval
= ru
? getrusage(p
, RUSAGE_BOTH
, ru
) : 0;
1073 if (!retval
&& stat_addr
)
1074 retval
= put_user((exit_code
<< 8) | 0x7f, stat_addr
);
1083 asmlinkage
long sys_wait4(pid_t pid
,unsigned int __user
*stat_addr
, int options
, struct rusage __user
*ru
)
1085 DECLARE_WAITQUEUE(wait
, current
);
1086 struct task_struct
*tsk
;
1089 if (options
& ~(WNOHANG
|WUNTRACED
|__WNOTHREAD
|__WCLONE
|__WALL
))
1092 add_wait_queue(¤t
->wait_chldexit
,&wait
);
1095 current
->state
= TASK_INTERRUPTIBLE
;
1096 read_lock(&tasklist_lock
);
1099 struct task_struct
*p
;
1100 struct list_head
*_p
;
1103 list_for_each(_p
,&tsk
->children
) {
1104 p
= list_entry(_p
,struct task_struct
,sibling
);
1106 ret
= eligible_child(pid
, options
, p
);
1113 if (!(options
& WUNTRACED
) &&
1114 !(p
->ptrace
& PT_PTRACED
))
1116 retval
= wait_task_stopped(p
, ret
== 2,
1118 if (retval
!= 0) /* He released the lock. */
1123 * Eligible but we cannot release it yet:
1127 retval
= wait_task_zombie(p
, stat_addr
, ru
);
1128 if (retval
!= 0) /* He released the lock. */
1134 list_for_each (_p
,&tsk
->ptrace_children
) {
1135 p
= list_entry(_p
,struct task_struct
,ptrace_list
);
1136 if (!eligible_child(pid
, options
, p
))
1142 if (options
& __WNOTHREAD
)
1144 tsk
= next_thread(tsk
);
1145 if (tsk
->signal
!= current
->signal
)
1147 } while (tsk
!= current
);
1148 read_unlock(&tasklist_lock
);
1151 if (options
& WNOHANG
)
1153 retval
= -ERESTARTSYS
;
1154 if (signal_pending(current
))
1161 current
->state
= TASK_RUNNING
;
1162 remove_wait_queue(¤t
->wait_chldexit
,&wait
);
1166 #ifdef __ARCH_WANT_SYS_WAITPID
1169 * sys_waitpid() remains for compatibility. waitpid() should be
1170 * implemented by calling sys_wait4() from libc.a.
1172 asmlinkage
long sys_waitpid(pid_t pid
, unsigned __user
*stat_addr
, int options
)
1174 return sys_wait4(pid
, stat_addr
, options
, NULL
);