4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/config.h>
8 #include <linux/malloc.h>
9 #include <linux/interrupt.h>
10 #include <linux/smp_lock.h>
11 #include <linux/module.h>
12 #ifdef CONFIG_BSD_PROCESS_ACCT
13 #include <linux/acct.h>
16 #include <asm/uaccess.h>
17 #include <asm/pgtable.h>
18 #include <asm/mmu_context.h>
20 extern void sem_exit (void);
21 extern struct task_struct
*child_reaper
;
23 int getrusage(struct task_struct
*, int, struct rusage
*);
25 static void release(struct task_struct
* p
)
30 * Wait to make sure the process isn't active on any
35 spin_lock_irq(&runqueue_lock
);
37 spin_unlock_irq(&runqueue_lock
);
47 add_free_taskslot(p
->tarray_ptr
);
49 write_lock_irq(&tasklist_lock
);
52 write_unlock_irq(&tasklist_lock
);
55 current
->cmin_flt
+= p
->min_flt
+ p
->cmin_flt
;
56 current
->cmaj_flt
+= p
->maj_flt
+ p
->cmaj_flt
;
57 current
->cnswap
+= p
->nswap
+ p
->cnswap
;
60 printk("task releasing itself\n");
65 * This checks not only the pgrp, but falls back on the pid if no
66 * satisfactory pgrp is found. I dunno - gdb doesn't work correctly
69 int session_of_pgrp(int pgrp
)
71 struct task_struct
*p
;
75 read_lock(&tasklist_lock
);
79 if (p
->pgrp
== pgrp
) {
80 fallback
= p
->session
;
84 fallback
= p
->session
;
86 read_unlock(&tasklist_lock
);
91 * Determine if a process group is "orphaned", according to the POSIX
92 * definition in 2.2.2.52. Orphaned process groups are not to be affected
93 * by terminal-generated stop signals. Newly orphaned process groups are
94 * to receive a SIGHUP and a SIGCONT.
96 * "I ask you, have you ever known what it is to be an orphan?"
98 static int will_become_orphaned_pgrp(int pgrp
, struct task_struct
* ignored_task
)
100 struct task_struct
*p
;
102 read_lock(&tasklist_lock
);
104 if ((p
== ignored_task
) || (p
->pgrp
!= pgrp
) ||
105 (p
->state
== TASK_ZOMBIE
) ||
106 (p
->p_pptr
->pid
== 1))
108 if ((p
->p_pptr
->pgrp
!= pgrp
) &&
109 (p
->p_pptr
->session
== p
->session
)) {
110 read_unlock(&tasklist_lock
);
114 read_unlock(&tasklist_lock
);
115 return 1; /* (sighing) "Often!" */
118 int is_orphaned_pgrp(int pgrp
)
120 return will_become_orphaned_pgrp(pgrp
, 0);
123 static inline int has_stopped_jobs(int pgrp
)
126 struct task_struct
* p
;
128 read_lock(&tasklist_lock
);
132 if (p
->state
!= TASK_STOPPED
)
137 read_unlock(&tasklist_lock
);
141 static inline void forget_original_parent(struct task_struct
* father
)
143 struct task_struct
* p
;
145 read_lock(&tasklist_lock
);
147 if (p
->p_opptr
== father
) {
148 p
->exit_signal
= SIGCHLD
;
149 p
->p_opptr
= child_reaper
; /* init */
150 if (p
->pdeath_signal
) send_sig(p
->pdeath_signal
, p
, 0);
153 read_unlock(&tasklist_lock
);
156 static inline void close_files(struct files_struct
* files
)
162 unsigned long set
= files
->open_fds
.fds_bits
[j
];
165 if (i
>= files
->max_fds
)
169 struct file
* file
= files
->fd
[i
];
172 filp_close(file
, files
);
181 extern kmem_cache_t
*files_cachep
;
183 static inline void __exit_files(struct task_struct
*tsk
)
185 struct files_struct
* files
= tsk
->files
;
189 if (atomic_dec_and_test(&files
->count
)) {
192 * Free the fd array as appropriate ...
194 if (NR_OPEN
* sizeof(struct file
*) == PAGE_SIZE
)
195 free_page((unsigned long) files
->fd
);
198 kmem_cache_free(files_cachep
, files
);
203 void exit_files(struct task_struct
*tsk
)
208 static inline void __exit_fs(struct task_struct
*tsk
)
210 struct fs_struct
* fs
= tsk
->fs
;
214 if (atomic_dec_and_test(&fs
->count
)) {
222 void exit_fs(struct task_struct
*tsk
)
227 static inline void __exit_sighand(struct task_struct
*tsk
)
229 struct signal_struct
* sig
= tsk
->sig
;
234 spin_lock_irqsave(&tsk
->sigmask_lock
, flags
);
236 spin_unlock_irqrestore(&tsk
->sigmask_lock
, flags
);
237 if (atomic_dec_and_test(&sig
->count
))
244 void exit_sighand(struct task_struct
*tsk
)
249 static inline void __exit_mm(struct task_struct
* tsk
)
251 struct mm_struct
* mm
= tsk
->mm
;
253 /* Set us up to use the kernel mm state */
254 if (mm
!= &init_mm
) {
260 SET_PAGE_DIR(tsk
, swapper_pg_dir
);
266 void exit_mm(struct task_struct
*tsk
)
272 * Send signals to all our closest relatives so that they know
273 * to properly mourn us..
275 static void exit_notify(void)
277 struct task_struct
* p
;
279 forget_original_parent(current
);
281 * Check to see if any process groups have become orphaned
282 * as a result of our exiting, and if they have any stopped
283 * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
285 * Case i: Our father is in a different pgrp than we are
286 * and we were the only connection outside, so our pgrp
287 * is about to become orphaned.
289 if ((current
->p_pptr
->pgrp
!= current
->pgrp
) &&
290 (current
->p_pptr
->session
== current
->session
) &&
291 will_become_orphaned_pgrp(current
->pgrp
, current
) &&
292 has_stopped_jobs(current
->pgrp
)) {
293 kill_pg(current
->pgrp
,SIGHUP
,1);
294 kill_pg(current
->pgrp
,SIGCONT
,1);
297 /* Let father know we died */
298 notify_parent(current
, current
->exit_signal
);
301 * This loop does two things:
303 * A. Make init inherit all the child processes
304 * B. Check to see if any process groups have become orphaned
305 * as a result of our exiting, and if they have any stopped
306 * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
309 write_lock_irq(&tasklist_lock
);
310 while (current
->p_cptr
!= NULL
) {
312 current
->p_cptr
= p
->p_osptr
;
314 p
->flags
&= ~(PF_PTRACED
|PF_TRACESYS
);
316 p
->p_pptr
= p
->p_opptr
;
317 p
->p_osptr
= p
->p_pptr
->p_cptr
;
319 p
->p_osptr
->p_ysptr
= p
;
320 p
->p_pptr
->p_cptr
= p
;
321 if (p
->state
== TASK_ZOMBIE
)
322 notify_parent(p
, p
->exit_signal
);
324 * process group orphan check
325 * Case ii: Our child is in a different pgrp
326 * than we are, and it was the only connection
327 * outside, so the child pgrp is now orphaned.
329 if ((p
->pgrp
!= current
->pgrp
) &&
330 (p
->session
== current
->session
)) {
333 write_unlock_irq(&tasklist_lock
);
334 if (is_orphaned_pgrp(pgrp
) && has_stopped_jobs(pgrp
)) {
335 kill_pg(pgrp
,SIGHUP
,1);
336 kill_pg(pgrp
,SIGCONT
,1);
338 write_lock_irq(&tasklist_lock
);
341 write_unlock_irq(&tasklist_lock
);
344 disassociate_ctty(1);
347 NORET_TYPE
void do_exit(long code
)
349 struct task_struct
*tsk
= current
;
352 printk("Aiee, killing interrupt handler\n");
354 panic("Attempted to kill the idle task!");
355 tsk
->flags
|= PF_EXITING
;
357 del_timer(&tsk
->real_timer
);
362 #ifdef CONFIG_BSD_PROCESS_ACCT
374 tsk
->state
= TASK_ZOMBIE
;
375 tsk
->exit_code
= code
;
377 #ifdef DEBUG_PROC_TREE
380 if (tsk
->exec_domain
&& tsk
->exec_domain
->module
)
381 __MOD_DEC_USE_COUNT(tsk
->exec_domain
->module
);
382 if (tsk
->binfmt
&& tsk
->binfmt
->module
)
383 __MOD_DEC_USE_COUNT(tsk
->binfmt
->module
);
386 * In order to get rid of the "volatile function does return" message
387 * I did this little loop that confuses gcc to think do_exit really
388 * is volatile. In fact it's schedule() that is volatile in some
389 * circumstances: when current->state = ZOMBIE, schedule() never
392 * In fact the natural way to do all this is to have the label and the
393 * goto right after each other, but I put the fake_volatile label at
394 * the start of the function just in case something /really/ bad
395 * happens, and the schedule returns. This way we can try again. I'm
396 * not paranoid: it's just that everybody is out to get me.
401 asmlinkage
int sys_exit(int error_code
)
403 do_exit((error_code
&0xff)<<8);
406 asmlinkage
int sys_wait4(pid_t pid
,unsigned int * stat_addr
, int options
, struct rusage
* ru
)
409 DECLARE_WAITQUEUE(wait
, current
);
410 struct task_struct
*p
;
412 if (options
& ~(WNOHANG
|WUNTRACED
|__WCLONE
))
415 add_wait_queue(¤t
->wait_chldexit
,&wait
);
418 read_lock(&tasklist_lock
);
419 for (p
= current
->p_cptr
; p
; p
= p
->p_osptr
) {
424 if (p
->pgrp
!= current
->pgrp
)
426 } else if (pid
!= -1) {
430 /* wait for cloned processes iff the __WCLONE flag is set */
431 if ((p
->exit_signal
!= SIGCHLD
) ^ ((options
& __WCLONE
) != 0))
438 if (!(options
& WUNTRACED
) && !(p
->flags
& PF_PTRACED
))
440 read_unlock(&tasklist_lock
);
441 retval
= ru
? getrusage(p
, RUSAGE_BOTH
, ru
) : 0;
442 if (!retval
&& stat_addr
)
443 retval
= put_user((p
->exit_code
<< 8) | 0x7f, stat_addr
);
450 current
->times
.tms_cutime
+= p
->times
.tms_utime
+ p
->times
.tms_cutime
;
451 current
->times
.tms_cstime
+= p
->times
.tms_stime
+ p
->times
.tms_cstime
;
452 read_unlock(&tasklist_lock
);
453 retval
= ru
? getrusage(p
, RUSAGE_BOTH
, ru
) : 0;
454 if (!retval
&& stat_addr
)
455 retval
= put_user(p
->exit_code
, stat_addr
);
459 if (p
->p_opptr
!= p
->p_pptr
) {
460 write_lock_irq(&tasklist_lock
);
462 p
->p_pptr
= p
->p_opptr
;
464 write_unlock_irq(&tasklist_lock
);
465 notify_parent(p
, SIGCHLD
);
468 #ifdef DEBUG_PROC_TREE
476 read_unlock(&tasklist_lock
);
479 if (options
& WNOHANG
)
481 retval
= -ERESTARTSYS
;
482 if (signal_pending(current
))
484 current
->state
=TASK_INTERRUPTIBLE
;
490 remove_wait_queue(¤t
->wait_chldexit
,&wait
);
497 * sys_waitpid() remains for compatibility. waitpid() should be
498 * implemented by calling sys_wait4() from libc.a.
500 asmlinkage
int sys_waitpid(pid_t pid
,unsigned int * stat_addr
, int options
)
502 return sys_wait4(pid
, stat_addr
, options
, NULL
);