Import 2.3.11pre8
[davej-history.git] / kernel / exit.c
blobc8021906fc159dffceb82a737d5a03e6be83aed1
1 /*
2 * linux/kernel/exit.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
7 #include <linux/config.h>
8 #include <linux/malloc.h>
9 #include <linux/interrupt.h>
10 #include <linux/smp_lock.h>
11 #include <linux/module.h>
12 #ifdef CONFIG_BSD_PROCESS_ACCT
13 #include <linux/acct.h>
14 #endif
16 #include <asm/uaccess.h>
17 #include <asm/pgtable.h>
18 #include <asm/mmu_context.h>
20 extern void sem_exit (void);
21 extern struct task_struct *child_reaper;
23 int getrusage(struct task_struct *, int, struct rusage *);
25 static void release(struct task_struct * p)
27 if (p != current) {
28 #ifdef __SMP__
30 * Wait to make sure the process isn't on the
31 * runqueue (active on some other CPU still)
33 do {
34 barrier();
35 } while (p->has_cpu);
36 #endif
37 free_uid(p);
38 unhash_process(p);
40 release_thread(p);
41 current->cmin_flt += p->min_flt + p->cmin_flt;
42 current->cmaj_flt += p->maj_flt + p->cmaj_flt;
43 current->cnswap += p->nswap + p->cnswap;
44 free_task_struct(p);
45 } else {
46 printk("task releasing itself\n");
51 * This checks not only the pgrp, but falls back on the pid if no
52 * satisfactory pgrp is found. I dunno - gdb doesn't work correctly
53 * without this...
55 int session_of_pgrp(int pgrp)
57 struct task_struct *p;
58 int fallback;
60 fallback = -1;
61 read_lock(&tasklist_lock);
62 for_each_task(p) {
63 if (p->session <= 0)
64 continue;
65 if (p->pgrp == pgrp) {
66 fallback = p->session;
67 break;
69 if (p->pid == pgrp)
70 fallback = p->session;
72 read_unlock(&tasklist_lock);
73 return fallback;
77 * Determine if a process group is "orphaned", according to the POSIX
78 * definition in 2.2.2.52. Orphaned process groups are not to be affected
79 * by terminal-generated stop signals. Newly orphaned process groups are
80 * to receive a SIGHUP and a SIGCONT.
82 * "I ask you, have you ever known what it is to be an orphan?"
84 static int will_become_orphaned_pgrp(int pgrp, struct task_struct * ignored_task)
86 struct task_struct *p;
88 read_lock(&tasklist_lock);
89 for_each_task(p) {
90 if ((p == ignored_task) || (p->pgrp != pgrp) ||
91 (p->state == TASK_ZOMBIE) ||
92 (p->p_pptr->pid == 1))
93 continue;
94 if ((p->p_pptr->pgrp != pgrp) &&
95 (p->p_pptr->session == p->session)) {
96 read_unlock(&tasklist_lock);
97 return 0;
100 read_unlock(&tasklist_lock);
101 return 1; /* (sighing) "Often!" */
104 int is_orphaned_pgrp(int pgrp)
106 return will_become_orphaned_pgrp(pgrp, 0);
109 static inline int has_stopped_jobs(int pgrp)
111 int retval = 0;
112 struct task_struct * p;
114 read_lock(&tasklist_lock);
115 for_each_task(p) {
116 if (p->pgrp != pgrp)
117 continue;
118 if (p->state != TASK_STOPPED)
119 continue;
120 retval = 1;
121 break;
123 read_unlock(&tasklist_lock);
124 return retval;
127 static inline void forget_original_parent(struct task_struct * father)
129 struct task_struct * p;
131 read_lock(&tasklist_lock);
132 for_each_task(p) {
133 if (p->p_opptr == father) {
134 p->exit_signal = SIGCHLD;
135 p->p_opptr = child_reaper; /* init */
136 if (p->pdeath_signal) send_sig(p->pdeath_signal, p, 0);
139 read_unlock(&tasklist_lock);
142 static inline void close_files(struct files_struct * files)
144 int i, j;
146 j = 0;
147 for (;;) {
148 unsigned long set = files->open_fds.fds_bits[j];
149 i = j * __NFDBITS;
150 j++;
151 if (i >= files->max_fds)
152 break;
153 while (set) {
154 if (set & 1) {
155 struct file * file = xchg(&files->fd[i], NULL);
156 if (file)
157 filp_close(file, files);
159 i++;
160 set >>= 1;
165 extern kmem_cache_t *files_cachep;
167 static inline void __exit_files(struct task_struct *tsk)
169 struct files_struct * files = xchg(&tsk->files, NULL);
171 if (files) {
172 if (atomic_dec_and_test(&files->count)) {
173 close_files(files);
175 * Free the fd array as appropriate ...
177 if (NR_OPEN * sizeof(struct file *) == PAGE_SIZE)
178 free_page((unsigned long) files->fd);
179 else
180 kfree(files->fd);
181 kmem_cache_free(files_cachep, files);
186 void exit_files(struct task_struct *tsk)
188 __exit_files(tsk);
191 static inline void __exit_fs(struct task_struct *tsk)
193 struct fs_struct * fs = tsk->fs;
195 if (fs) {
196 tsk->fs = NULL;
197 if (atomic_dec_and_test(&fs->count)) {
198 dput(fs->root);
199 dput(fs->pwd);
200 kfree(fs);
205 void exit_fs(struct task_struct *tsk)
207 __exit_fs(tsk);
210 static inline void __exit_sighand(struct task_struct *tsk)
212 struct signal_struct * sig = tsk->sig;
214 if (sig) {
215 unsigned long flags;
217 spin_lock_irqsave(&tsk->sigmask_lock, flags);
218 tsk->sig = NULL;
219 spin_unlock_irqrestore(&tsk->sigmask_lock, flags);
220 if (atomic_dec_and_test(&sig->count))
221 kfree(sig);
224 flush_signals(tsk);
227 void exit_sighand(struct task_struct *tsk)
229 __exit_sighand(tsk);
233 * Turn us into a lazy TLB process if we
234 * aren't already..
236 static inline void __exit_mm(struct task_struct * tsk)
238 struct mm_struct * mm = tsk->mm;
240 if (mm) {
241 mm_release();
242 atomic_inc(&mm->mm_count);
243 tsk->mm = NULL;
244 mmput(mm);
248 void exit_mm(struct task_struct *tsk)
250 __exit_mm(tsk);
254 * Send signals to all our closest relatives so that they know
255 * to properly mourn us..
257 static void exit_notify(void)
259 struct task_struct * p;
261 forget_original_parent(current);
263 * Check to see if any process groups have become orphaned
264 * as a result of our exiting, and if they have any stopped
265 * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
267 * Case i: Our father is in a different pgrp than we are
268 * and we were the only connection outside, so our pgrp
269 * is about to become orphaned.
271 if ((current->p_pptr->pgrp != current->pgrp) &&
272 (current->p_pptr->session == current->session) &&
273 will_become_orphaned_pgrp(current->pgrp, current) &&
274 has_stopped_jobs(current->pgrp)) {
275 kill_pg(current->pgrp,SIGHUP,1);
276 kill_pg(current->pgrp,SIGCONT,1);
279 /* Let father know we died */
280 notify_parent(current, current->exit_signal);
283 * This loop does two things:
285 * A. Make init inherit all the child processes
286 * B. Check to see if any process groups have become orphaned
287 * as a result of our exiting, and if they have any stopped
288 * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
291 write_lock_irq(&tasklist_lock);
292 while (current->p_cptr != NULL) {
293 p = current->p_cptr;
294 current->p_cptr = p->p_osptr;
295 p->p_ysptr = NULL;
296 p->flags &= ~(PF_PTRACED|PF_TRACESYS);
298 p->p_pptr = p->p_opptr;
299 p->p_osptr = p->p_pptr->p_cptr;
300 if (p->p_osptr)
301 p->p_osptr->p_ysptr = p;
302 p->p_pptr->p_cptr = p;
303 if (p->state == TASK_ZOMBIE)
304 notify_parent(p, p->exit_signal);
306 * process group orphan check
307 * Case ii: Our child is in a different pgrp
308 * than we are, and it was the only connection
309 * outside, so the child pgrp is now orphaned.
311 if ((p->pgrp != current->pgrp) &&
312 (p->session == current->session)) {
313 int pgrp = p->pgrp;
315 write_unlock_irq(&tasklist_lock);
316 if (is_orphaned_pgrp(pgrp) && has_stopped_jobs(pgrp)) {
317 kill_pg(pgrp,SIGHUP,1);
318 kill_pg(pgrp,SIGCONT,1);
320 write_lock_irq(&tasklist_lock);
323 write_unlock_irq(&tasklist_lock);
325 if (current->leader)
326 disassociate_ctty(1);
329 NORET_TYPE void do_exit(long code)
331 struct task_struct *tsk = current;
333 if (in_interrupt())
334 printk("Aiee, killing interrupt handler\n");
335 if (!tsk->pid)
336 panic("Attempted to kill the idle task!");
337 tsk->flags |= PF_EXITING;
338 start_bh_atomic();
339 del_timer(&tsk->real_timer);
340 end_bh_atomic();
342 lock_kernel();
343 fake_volatile:
344 #ifdef CONFIG_BSD_PROCESS_ACCT
345 acct_process(code);
346 #endif
347 sem_exit();
348 __exit_mm(tsk);
349 #if CONFIG_AP1000
350 exit_msc(tsk);
351 #endif
352 __exit_files(tsk);
353 __exit_fs(tsk);
354 __exit_sighand(tsk);
355 exit_thread();
356 tsk->state = TASK_ZOMBIE;
357 tsk->exit_code = code;
358 exit_notify();
359 #ifdef DEBUG_PROC_TREE
360 audit_ptree();
361 #endif
362 if (tsk->exec_domain && tsk->exec_domain->module)
363 __MOD_DEC_USE_COUNT(tsk->exec_domain->module);
364 if (tsk->binfmt && tsk->binfmt->module)
365 __MOD_DEC_USE_COUNT(tsk->binfmt->module);
366 schedule();
368 * In order to get rid of the "volatile function does return" message
369 * I did this little loop that confuses gcc to think do_exit really
370 * is volatile. In fact it's schedule() that is volatile in some
371 * circumstances: when current->state = ZOMBIE, schedule() never
372 * returns.
374 * In fact the natural way to do all this is to have the label and the
375 * goto right after each other, but I put the fake_volatile label at
376 * the start of the function just in case something /really/ bad
377 * happens, and the schedule returns. This way we can try again. I'm
378 * not paranoid: it's just that everybody is out to get me.
380 goto fake_volatile;
383 asmlinkage int sys_exit(int error_code)
385 do_exit((error_code&0xff)<<8);
388 asmlinkage int sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struct rusage * ru)
390 int flag, retval;
391 DECLARE_WAITQUEUE(wait, current);
392 struct task_struct *p;
394 if (options & ~(WNOHANG|WUNTRACED|__WCLONE))
395 return -EINVAL;
397 add_wait_queue(&current->wait_chldexit,&wait);
398 repeat:
399 flag = 0;
400 read_lock(&tasklist_lock);
401 for (p = current->p_cptr ; p ; p = p->p_osptr) {
402 if (pid>0) {
403 if (p->pid != pid)
404 continue;
405 } else if (!pid) {
406 if (p->pgrp != current->pgrp)
407 continue;
408 } else if (pid != -1) {
409 if (p->pgrp != -pid)
410 continue;
412 /* wait for cloned processes iff the __WCLONE flag is set */
413 if ((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0))
414 continue;
415 flag = 1;
416 switch (p->state) {
417 case TASK_STOPPED:
418 if (!p->exit_code)
419 continue;
420 if (!(options & WUNTRACED) && !(p->flags & PF_PTRACED))
421 continue;
422 read_unlock(&tasklist_lock);
423 retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
424 if (!retval && stat_addr)
425 retval = put_user((p->exit_code << 8) | 0x7f, stat_addr);
426 if (!retval) {
427 p->exit_code = 0;
428 retval = p->pid;
430 goto end_wait4;
431 case TASK_ZOMBIE:
432 current->times.tms_cutime += p->times.tms_utime + p->times.tms_cutime;
433 current->times.tms_cstime += p->times.tms_stime + p->times.tms_cstime;
434 read_unlock(&tasklist_lock);
435 retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
436 if (!retval && stat_addr)
437 retval = put_user(p->exit_code, stat_addr);
438 if (retval)
439 goto end_wait4;
440 retval = p->pid;
441 if (p->p_opptr != p->p_pptr) {
442 write_lock_irq(&tasklist_lock);
443 REMOVE_LINKS(p);
444 p->p_pptr = p->p_opptr;
445 SET_LINKS(p);
446 write_unlock_irq(&tasklist_lock);
447 notify_parent(p, SIGCHLD);
448 } else
449 release(p);
450 #ifdef DEBUG_PROC_TREE
451 audit_ptree();
452 #endif
453 goto end_wait4;
454 default:
455 continue;
458 read_unlock(&tasklist_lock);
459 if (flag) {
460 retval = 0;
461 if (options & WNOHANG)
462 goto end_wait4;
463 retval = -ERESTARTSYS;
464 if (signal_pending(current))
465 goto end_wait4;
466 current->state=TASK_INTERRUPTIBLE;
467 schedule();
468 goto repeat;
470 retval = -ECHILD;
471 end_wait4:
472 remove_wait_queue(&current->wait_chldexit,&wait);
473 return retval;
476 #ifndef __alpha__
479 * sys_waitpid() remains for compatibility. waitpid() should be
480 * implemented by calling sys_wait4() from libc.a.
482 asmlinkage int sys_waitpid(pid_t pid,unsigned int * stat_addr, int options)
484 return sys_wait4(pid, stat_addr, options, NULL);
487 #endif