Ok. I didn't make 2.4.0 in 2000. Tough. I tried, but we had some
[davej-history.git] / arch / sparc64 / kernel / signal.c
blobc2a7833fbdbc258ce84fa507674e367d5d669eaf
1 /* $Id: signal.c,v 1.54 2000/09/05 21:44:54 davem Exp $
2 * arch/sparc64/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
7 * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
8 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9 */
11 #include <linux/config.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/signal.h>
15 #include <linux/errno.h>
16 #include <linux/wait.h>
17 #include <linux/ptrace.h>
18 #include <linux/unistd.h>
19 #include <linux/mm.h>
20 #include <linux/smp_lock.h>
22 #include <asm/uaccess.h>
23 #include <asm/bitops.h>
24 #include <asm/ptrace.h>
25 #include <asm/svr4.h>
26 #include <asm/pgtable.h>
27 #include <asm/fpumacro.h>
28 #include <asm/uctx.h>
29 #include <asm/siginfo.h>
30 #include <asm/visasm.h>
32 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
34 asmlinkage int sys_wait4(pid_t pid, unsigned long *stat_addr,
35 int options, unsigned long *ru);
37 asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs,
38 unsigned long orig_o0, int ret_from_syscall);
40 /* This turned off for production... */
41 /* #define DEBUG_SIGNALS 1 */
42 /* #define DEBUG_SIGNALS_TRACE 1 */
43 /* #define DEBUG_SIGNALS_MAPS 1 */
45 int copy_siginfo_to_user(siginfo_t *to, siginfo_t *from)
47 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
48 return -EFAULT;
49 if (from->si_code < 0)
50 return __copy_to_user(to, from, sizeof(siginfo_t));
51 else {
52 int err;
54 /* If you change siginfo_t structure, please be sure
55 this code is fixed accordingly.
56 It should never copy any pad contained in the structure
57 to avoid security leaks, but must copy the generic
58 3 ints plus the relevant union member. */
59 err = __put_user(*(long *)&from->si_signo, (long *)&to->si_signo);
60 err |= __put_user((short)from->si_code, &to->si_code);
61 switch (from->si_code >> 16) {
62 case __SI_CHLD >> 16:
63 err |= __put_user(from->si_utime, &to->si_utime);
64 err |= __put_user(from->si_stime, &to->si_stime);
65 case __SI_FAULT >> 16:
66 case __SI_POLL >> 16:
67 err |= __put_user(from->si_trapno, &to->si_trapno);
68 default:
69 err |= __put_user(from->si_addr, &to->si_addr);
70 break;
71 /* case __SI_RT: This is not generated by the kernel as of now. */
73 return err;
77 /* {set, get}context() needed for 64-bit SparcLinux userland. */
78 asmlinkage void sparc64_set_context(struct pt_regs *regs)
80 struct ucontext *ucp = (struct ucontext *) regs->u_regs[UREG_I0];
81 struct thread_struct *tp = &current->thread;
82 mc_gregset_t *grp;
83 unsigned long pc, npc, tstate;
84 unsigned long fp, i7;
85 unsigned char fenab;
86 int err;
88 flush_user_windows();
89 if(tp->w_saved ||
90 (((unsigned long)ucp) & (sizeof(unsigned long)-1)) ||
91 (!__access_ok((unsigned long)ucp, sizeof(*ucp))))
92 goto do_sigsegv;
93 grp = &ucp->uc_mcontext.mc_gregs;
94 err = __get_user(pc, &((*grp)[MC_PC]));
95 err |= __get_user(npc, &((*grp)[MC_NPC]));
96 if(err || ((pc | npc) & 3))
97 goto do_sigsegv;
98 if(regs->u_regs[UREG_I1]) {
99 sigset_t set;
101 if (_NSIG_WORDS == 1) {
102 if (__get_user(set.sig[0], &ucp->uc_sigmask.sig[0]))
103 goto do_sigsegv;
104 } else {
105 if (__copy_from_user(&set, &ucp->uc_sigmask, sizeof(sigset_t)))
106 goto do_sigsegv;
108 sigdelsetmask(&set, ~_BLOCKABLE);
109 spin_lock_irq(&current->sigmask_lock);
110 current->blocked = set;
111 recalc_sigpending(current);
112 spin_unlock_irq(&current->sigmask_lock);
114 regs->tpc = pc;
115 regs->tnpc = npc;
116 err |= __get_user(regs->y, &((*grp)[MC_Y]));
117 err |= __get_user(tstate, &((*grp)[MC_TSTATE]));
118 regs->tstate &= ~(TSTATE_ICC | TSTATE_XCC);
119 regs->tstate |= (tstate & (TSTATE_ICC | TSTATE_XCC));
120 err |= __get_user(regs->u_regs[UREG_G1], (&(*grp)[MC_G1]));
121 err |= __get_user(regs->u_regs[UREG_G2], (&(*grp)[MC_G2]));
122 err |= __get_user(regs->u_regs[UREG_G3], (&(*grp)[MC_G3]));
123 err |= __get_user(regs->u_regs[UREG_G4], (&(*grp)[MC_G4]));
124 err |= __get_user(regs->u_regs[UREG_G5], (&(*grp)[MC_G5]));
125 err |= __get_user(regs->u_regs[UREG_G6], (&(*grp)[MC_G6]));
126 err |= __get_user(regs->u_regs[UREG_G7], (&(*grp)[MC_G7]));
127 err |= __get_user(regs->u_regs[UREG_I0], (&(*grp)[MC_O0]));
128 err |= __get_user(regs->u_regs[UREG_I1], (&(*grp)[MC_O1]));
129 err |= __get_user(regs->u_regs[UREG_I2], (&(*grp)[MC_O2]));
130 err |= __get_user(regs->u_regs[UREG_I3], (&(*grp)[MC_O3]));
131 err |= __get_user(regs->u_regs[UREG_I4], (&(*grp)[MC_O4]));
132 err |= __get_user(regs->u_regs[UREG_I5], (&(*grp)[MC_O5]));
133 err |= __get_user(regs->u_regs[UREG_I6], (&(*grp)[MC_O6]));
134 err |= __get_user(regs->u_regs[UREG_I7], (&(*grp)[MC_O7]));
136 err |= __get_user(fp, &(ucp->uc_mcontext.mc_fp));
137 err |= __get_user(i7, &(ucp->uc_mcontext.mc_i7));
138 err |= __put_user(fp,
139 (&(((struct reg_window *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6])));
140 err |= __put_user(i7,
141 (&(((struct reg_window *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7])));
143 err |= __get_user(fenab, &(ucp->uc_mcontext.mc_fpregs.mcfpu_enab));
144 if(fenab) {
145 unsigned long *fpregs = (unsigned long *)(((char *)current) + AOFF_task_fpregs);
146 unsigned long fprs;
148 fprs_write(0);
149 err |= __get_user(fprs, &(ucp->uc_mcontext.mc_fpregs.mcfpu_fprs));
150 if (fprs & FPRS_DL)
151 err |= copy_from_user(fpregs,
152 &(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs),
153 (sizeof(unsigned int) * 32));
154 if (fprs & FPRS_DU)
155 err |= copy_from_user(fpregs+16,
156 ((unsigned long *)&(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs))+16,
157 (sizeof(unsigned int) * 32));
158 err |= __get_user(current->thread.xfsr[0],
159 &(ucp->uc_mcontext.mc_fpregs.mcfpu_fsr));
160 err |= __get_user(current->thread.gsr[0],
161 &(ucp->uc_mcontext.mc_fpregs.mcfpu_gsr));
162 regs->tstate &= ~TSTATE_PEF;
164 if (err)
165 goto do_sigsegv;
167 return;
168 do_sigsegv:
169 do_exit(SIGSEGV);
172 asmlinkage void sparc64_get_context(struct pt_regs *regs)
174 struct ucontext *ucp = (struct ucontext *) regs->u_regs[UREG_I0];
175 struct thread_struct *tp = &current->thread;
176 mc_gregset_t *grp;
177 mcontext_t *mcp;
178 unsigned long fp, i7;
179 unsigned char fenab;
180 int err;
182 synchronize_user_stack();
183 if(tp->w_saved || clear_user(ucp, sizeof(*ucp)))
184 goto do_sigsegv;
186 #if 1
187 fenab = 0; /* IMO get_context is like any other system call, thus modifies FPU state -jj */
188 #else
189 fenab = (current->thread.fpsaved[0] & FPRS_FEF);
190 #endif
192 mcp = &ucp->uc_mcontext;
193 grp = &mcp->mc_gregs;
195 /* Skip over the trap instruction, first. */
196 regs->tpc = regs->tnpc;
197 regs->tnpc += 4;
199 err = 0;
200 if (_NSIG_WORDS == 1)
201 err |= __put_user(current->blocked.sig[0],
202 (unsigned long *)&ucp->uc_sigmask);
203 else
204 err |= __copy_to_user(&ucp->uc_sigmask, &current->blocked,
205 sizeof(sigset_t));
207 err |= __put_user(regs->tstate, &((*grp)[MC_TSTATE]));
208 err |= __put_user(regs->tpc, &((*grp)[MC_PC]));
209 err |= __put_user(regs->tnpc, &((*grp)[MC_NPC]));
210 err |= __put_user(regs->y, &((*grp)[MC_Y]));
211 err |= __put_user(regs->u_regs[UREG_G1], &((*grp)[MC_G1]));
212 err |= __put_user(regs->u_regs[UREG_G2], &((*grp)[MC_G2]));
213 err |= __put_user(regs->u_regs[UREG_G3], &((*grp)[MC_G3]));
214 err |= __put_user(regs->u_regs[UREG_G4], &((*grp)[MC_G4]));
215 err |= __put_user(regs->u_regs[UREG_G5], &((*grp)[MC_G5]));
216 err |= __put_user(regs->u_regs[UREG_G6], &((*grp)[MC_G6]));
217 err |= __put_user(regs->u_regs[UREG_G6], &((*grp)[MC_G7]));
218 err |= __put_user(regs->u_regs[UREG_I0], &((*grp)[MC_O0]));
219 err |= __put_user(regs->u_regs[UREG_I1], &((*grp)[MC_O1]));
220 err |= __put_user(regs->u_regs[UREG_I2], &((*grp)[MC_O2]));
221 err |= __put_user(regs->u_regs[UREG_I3], &((*grp)[MC_O3]));
222 err |= __put_user(regs->u_regs[UREG_I4], &((*grp)[MC_O4]));
223 err |= __put_user(regs->u_regs[UREG_I5], &((*grp)[MC_O5]));
224 err |= __put_user(regs->u_regs[UREG_I6], &((*grp)[MC_O6]));
225 err |= __put_user(regs->u_regs[UREG_I7], &((*grp)[MC_O7]));
227 err |= __get_user(fp,
228 (&(((struct reg_window *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6])));
229 err |= __get_user(i7,
230 (&(((struct reg_window *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7])));
231 err |= __put_user(fp, &(mcp->mc_fp));
232 err |= __put_user(i7, &(mcp->mc_i7));
234 err |= __put_user(fenab, &(mcp->mc_fpregs.mcfpu_enab));
235 if(fenab) {
236 unsigned long *fpregs = (unsigned long *)(((char *)current) + AOFF_task_fpregs);
237 unsigned long fprs;
239 fprs = current->thread.fpsaved[0];
240 if (fprs & FPRS_DL)
241 err |= copy_to_user(&(mcp->mc_fpregs.mcfpu_fregs), fpregs,
242 (sizeof(unsigned int) * 32));
243 if (fprs & FPRS_DU)
244 err |= copy_to_user(
245 ((unsigned long *)&(mcp->mc_fpregs.mcfpu_fregs))+16, fpregs+16,
246 (sizeof(unsigned int) * 32));
247 err |= __put_user(current->thread.xfsr[0], &(mcp->mc_fpregs.mcfpu_fsr));
248 err |= __put_user(current->thread.gsr[0], &(mcp->mc_fpregs.mcfpu_gsr));
249 err |= __put_user(fprs, &(mcp->mc_fpregs.mcfpu_fprs));
251 if (err)
252 goto do_sigsegv;
254 return;
255 do_sigsegv:
256 do_exit(SIGSEGV);
259 struct rt_signal_frame {
260 struct sparc_stackf ss;
261 siginfo_t info;
262 struct pt_regs regs;
263 __siginfo_fpu_t * fpu_save;
264 stack_t stack;
265 sigset_t mask;
266 __siginfo_fpu_t fpu_state;
269 /* Align macros */
270 #define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7)))
273 * atomically swap in the new signal mask, and wait for a signal.
274 * This is really tricky on the Sparc, watch out...
276 asmlinkage void _sigpause_common(old_sigset_t set, struct pt_regs *regs)
278 sigset_t saveset;
280 #ifdef CONFIG_SPARC32_COMPAT
281 if (current->thread.flags & SPARC_FLAG_32BIT) {
282 extern asmlinkage void _sigpause32_common(old_sigset_t32,
283 struct pt_regs *);
284 _sigpause32_common(set, regs);
285 return;
287 #endif
288 set &= _BLOCKABLE;
289 spin_lock_irq(&current->sigmask_lock);
290 saveset = current->blocked;
291 siginitset(&current->blocked, set);
292 recalc_sigpending(current);
293 spin_unlock_irq(&current->sigmask_lock);
295 regs->tpc = regs->tnpc;
296 regs->tnpc += 4;
298 /* Condition codes and return value where set here for sigpause,
299 * and so got used by setup_frame, which again causes sigreturn()
300 * to return -EINTR.
302 while (1) {
303 current->state = TASK_INTERRUPTIBLE;
304 schedule();
306 * Return -EINTR and set condition code here,
307 * so the interrupted system call actually returns
308 * these.
310 regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
311 regs->u_regs[UREG_I0] = EINTR;
312 if (do_signal(&saveset, regs, 0, 0))
313 return;
317 asmlinkage void do_sigpause(unsigned int set, struct pt_regs *regs)
319 _sigpause_common(set, regs);
322 asmlinkage void do_sigsuspend(struct pt_regs *regs)
324 _sigpause_common(regs->u_regs[UREG_I0], regs);
327 asmlinkage void do_rt_sigsuspend(sigset_t *uset, size_t sigsetsize, struct pt_regs *regs)
329 sigset_t oldset, set;
331 /* XXX: Don't preclude handling different sized sigset_t's. */
332 if (sigsetsize != sizeof(sigset_t)) {
333 regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
334 regs->u_regs[UREG_I0] = EINVAL;
335 return;
337 if (copy_from_user(&set, uset, sizeof(set))) {
338 regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
339 regs->u_regs[UREG_I0] = EFAULT;
340 return;
343 sigdelsetmask(&set, ~_BLOCKABLE);
344 spin_lock_irq(&current->sigmask_lock);
345 oldset = current->blocked;
346 current->blocked = set;
347 recalc_sigpending(current);
348 spin_unlock_irq(&current->sigmask_lock);
350 regs->tpc = regs->tnpc;
351 regs->tnpc += 4;
353 /* Condition codes and return value where set here for sigpause,
354 * and so got used by setup_frame, which again causes sigreturn()
355 * to return -EINTR.
357 while (1) {
358 current->state = TASK_INTERRUPTIBLE;
359 schedule();
361 * Return -EINTR and set condition code here,
362 * so the interrupted system call actually returns
363 * these.
365 regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
366 regs->u_regs[UREG_I0] = EINTR;
367 if (do_signal(&oldset, regs, 0, 0))
368 return;
372 static inline int
373 restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t *fpu)
375 unsigned long *fpregs = (unsigned long *)(((char *)current) + AOFF_task_fpregs);
376 unsigned long fprs;
377 int err;
379 err = __get_user(fprs, &fpu->si_fprs);
380 fprs_write(0);
381 regs->tstate &= ~TSTATE_PEF;
382 if (fprs & FPRS_DL)
383 err |= copy_from_user(fpregs, &fpu->si_float_regs[0],
384 (sizeof(unsigned int) * 32));
385 if (fprs & FPRS_DU)
386 err |= copy_from_user(fpregs+16, &fpu->si_float_regs[32],
387 (sizeof(unsigned int) * 32));
388 err |= __get_user(current->thread.xfsr[0], &fpu->si_fsr);
389 err |= __get_user(current->thread.gsr[0], &fpu->si_gsr);
390 current->thread.fpsaved[0] |= fprs;
391 return err;
394 void do_rt_sigreturn(struct pt_regs *regs)
396 struct rt_signal_frame *sf;
397 unsigned long tpc, tnpc, tstate;
398 __siginfo_fpu_t *fpu_save;
399 sigset_t set;
400 stack_t st;
401 int err;
403 synchronize_user_stack ();
404 sf = (struct rt_signal_frame *)
405 (regs->u_regs [UREG_FP] + STACK_BIAS);
407 /* 1. Make sure we are not getting garbage from the user */
408 if (((unsigned long) sf) & 3)
409 goto segv;
411 err = get_user(tpc, &sf->regs.tpc);
412 err |= __get_user(tnpc, &sf->regs.tnpc);
413 err |= ((tpc | tnpc) & 3);
415 /* 2. Restore the state */
416 err |= __get_user(regs->y, &sf->regs.y);
417 err |= __get_user(tstate, &sf->regs.tstate);
418 err |= copy_from_user(regs->u_regs, sf->regs.u_regs, sizeof(regs->u_regs));
420 /* User can only change condition codes in %tstate. */
421 regs->tstate &= ~(TSTATE_ICC);
422 regs->tstate |= (tstate & TSTATE_ICC);
424 err |= __get_user(fpu_save, &sf->fpu_save);
425 if (fpu_save)
426 err |= restore_fpu_state(regs, &sf->fpu_state);
428 err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
429 err |= __copy_from_user(&st, &sf->stack, sizeof(stack_t));
431 if (err)
432 goto segv;
434 regs->tpc = tpc;
435 regs->tnpc = tnpc;
437 /* It is more difficult to avoid calling this function than to
438 call it and ignore errors. */
439 do_sigaltstack(&st, NULL, (unsigned long)sf);
441 sigdelsetmask(&set, ~_BLOCKABLE);
442 spin_lock_irq(&current->sigmask_lock);
443 current->blocked = set;
444 recalc_sigpending(current);
445 spin_unlock_irq(&current->sigmask_lock);
446 return;
447 segv:
448 send_sig(SIGSEGV, current, 1);
451 /* Checks if the fp is valid */
452 static int invalid_frame_pointer(void *fp, int fplen)
454 if (((unsigned long) fp) & 7)
455 return 1;
456 return 0;
459 static inline int
460 save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t *fpu)
462 unsigned long *fpregs = (unsigned long *)(regs+1);
463 unsigned long fprs;
464 int err = 0;
466 fprs = current->thread.fpsaved[0];
467 if (fprs & FPRS_DL)
468 err |= copy_to_user(&fpu->si_float_regs[0], fpregs,
469 (sizeof(unsigned int) * 32));
470 if (fprs & FPRS_DU)
471 err |= copy_to_user(&fpu->si_float_regs[32], fpregs+16,
472 (sizeof(unsigned int) * 32));
473 err |= __put_user(current->thread.xfsr[0], &fpu->si_fsr);
474 err |= __put_user(current->thread.gsr[0], &fpu->si_gsr);
475 err |= __put_user(fprs, &fpu->si_fprs);
477 return err;
480 static inline void *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, unsigned long framesize)
482 unsigned long sp;
484 sp = regs->u_regs[UREG_FP] + STACK_BIAS;
486 /* This is the X/Open sanctioned signal stack switching. */
487 if (ka->sa.sa_flags & SA_ONSTACK) {
488 if (!on_sig_stack(sp) &&
489 !((current->sas_ss_sp + current->sas_ss_size) & 7))
490 sp = current->sas_ss_sp + current->sas_ss_size;
492 return (void *)(sp - framesize);
495 static inline void
496 setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
497 int signo, sigset_t *oldset, siginfo_t *info)
499 struct rt_signal_frame *sf;
500 int sigframe_size, err;
502 /* 1. Make sure everything is clean */
503 synchronize_user_stack();
504 save_and_clear_fpu();
506 sigframe_size = RT_ALIGNEDSZ;
507 if (!(current->thread.fpsaved[0] & FPRS_FEF))
508 sigframe_size -= sizeof(__siginfo_fpu_t);
510 sf = (struct rt_signal_frame *)get_sigframe(ka, regs, sigframe_size);
512 if (invalid_frame_pointer (sf, sigframe_size))
513 goto sigill;
515 if (current->thread.w_saved != 0) {
516 #ifdef DEBUG_SIGNALS
517 printk ("%s[%d]: Invalid user stack frame for "
518 "signal delivery.\n", current->comm, current->pid);
519 #endif
520 goto sigill;
523 /* 2. Save the current process state */
524 err = copy_to_user(&sf->regs, regs, sizeof (*regs));
526 if (current->thread.fpsaved[0] & FPRS_FEF) {
527 err |= save_fpu_state(regs, &sf->fpu_state);
528 err |= __put_user((u64)&sf->fpu_state, &sf->fpu_save);
529 } else {
530 err |= __put_user(0, &sf->fpu_save);
533 /* Setup sigaltstack */
534 err |= __put_user(current->sas_ss_sp, &sf->stack.ss_sp);
535 err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &sf->stack.ss_flags);
536 err |= __put_user(current->sas_ss_size, &sf->stack.ss_size);
538 err |= copy_to_user(&sf->mask, oldset, sizeof(sigset_t));
540 err |= copy_in_user((u64 *)sf,
541 (u64 *)(regs->u_regs[UREG_FP]+STACK_BIAS),
542 sizeof(struct reg_window));
544 if (info)
545 err |= copy_siginfo_to_user(&sf->info, info);
546 else {
547 err |= __put_user(signo, &sf->info.si_signo);
548 err |= __put_user(SI_NOINFO, &sf->info.si_code);
550 if (err)
551 goto sigsegv;
553 /* 3. signal handler back-trampoline and parameters */
554 regs->u_regs[UREG_FP] = ((unsigned long) sf) - STACK_BIAS;
555 regs->u_regs[UREG_I0] = signo;
556 regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
558 /* 5. signal handler */
559 regs->tpc = (unsigned long) ka->sa.sa_handler;
560 regs->tnpc = (regs->tpc + 4);
562 /* 4. return to kernel instructions */
563 regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
564 return;
566 sigill:
567 do_exit(SIGILL);
568 sigsegv:
569 do_exit(SIGSEGV);
572 static inline void handle_signal(unsigned long signr, struct k_sigaction *ka,
573 siginfo_t *info,
574 sigset_t *oldset, struct pt_regs *regs)
576 setup_rt_frame(ka, regs, signr, oldset, (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL);
577 if(ka->sa.sa_flags & SA_ONESHOT)
578 ka->sa.sa_handler = SIG_DFL;
579 if(!(ka->sa.sa_flags & SA_NOMASK)) {
580 spin_lock_irq(&current->sigmask_lock);
581 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
582 sigaddset(&current->blocked,signr);
583 recalc_sigpending(current);
584 spin_unlock_irq(&current->sigmask_lock);
588 static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
589 struct sigaction *sa)
591 switch(regs->u_regs[UREG_I0]) {
592 case ERESTARTNOHAND:
593 no_system_call_restart:
594 regs->u_regs[UREG_I0] = EINTR;
595 regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
596 break;
597 case ERESTARTSYS:
598 if(!(sa->sa_flags & SA_RESTART))
599 goto no_system_call_restart;
600 /* fallthrough */
601 case ERESTARTNOINTR:
602 regs->u_regs[UREG_I0] = orig_i0;
603 regs->tpc -= 4;
604 regs->tnpc -= 4;
608 #ifdef DEBUG_SIGNALS_MAPS
610 #define MAPS_LINE_FORMAT "%016lx-%016lx %s %016lx %s %lu "
612 static inline void read_maps (void)
614 struct vm_area_struct * map, * next;
615 char * buffer;
616 ssize_t i;
618 buffer = (char*)__get_free_page(GFP_KERNEL);
619 if (!buffer)
620 return;
622 for (map = current->mm->mmap ; map ; map = next ) {
623 /* produce the next line */
624 char *line;
625 char str[5], *cp = str;
626 int flags;
627 kdev_t dev;
628 unsigned long ino;
631 * Get the next vma now (but it won't be used if we sleep).
633 next = map->vm_next;
634 flags = map->vm_flags;
636 *cp++ = flags & VM_READ ? 'r' : '-';
637 *cp++ = flags & VM_WRITE ? 'w' : '-';
638 *cp++ = flags & VM_EXEC ? 'x' : '-';
639 *cp++ = flags & VM_MAYSHARE ? 's' : 'p';
640 *cp++ = 0;
642 dev = 0;
643 ino = 0;
644 if (map->vm_file != NULL) {
645 dev = map->vm_file->f_dentry->d_inode->i_dev;
646 ino = map->vm_file->f_dentry->d_inode->i_ino;
647 line = d_path(map->vm_file->f_dentry,
648 map->vm_file->f_vfsmnt,
649 buffer, PAGE_SIZE);
651 printk(MAPS_LINE_FORMAT, map->vm_start, map->vm_end, str, map->vm_pgoff << PAGE_SHIFT,
652 kdevname(dev), ino);
653 if (map->vm_file != NULL)
654 printk("%s\n", line);
655 else
656 printk("\n");
658 free_page((unsigned long)buffer);
659 return;
662 #endif
664 /* Note that 'init' is a special process: it doesn't get signals it doesn't
665 * want to handle. Thus you cannot kill init even with a SIGKILL even by
666 * mistake.
668 asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs,
669 unsigned long orig_i0, int restart_syscall)
671 unsigned long signr;
672 siginfo_t info;
673 struct k_sigaction *ka;
675 if (!oldset)
676 oldset = &current->blocked;
678 #ifdef CONFIG_SPARC32_COMPAT
679 if (current->thread.flags & SPARC_FLAG_32BIT) {
680 extern asmlinkage int do_signal32(sigset_t *, struct pt_regs *,
681 unsigned long, int);
682 return do_signal32(oldset, regs, orig_i0, restart_syscall);
684 #endif
685 for (;;) {
686 spin_lock_irq(&current->sigmask_lock);
687 signr = dequeue_signal(&current->blocked, &info);
688 spin_unlock_irq(&current->sigmask_lock);
690 if (!signr) break;
692 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
693 current->exit_code = signr;
694 current->state = TASK_STOPPED;
695 notify_parent(current, SIGCHLD);
696 schedule();
697 if (!(signr = current->exit_code))
698 continue;
699 current->exit_code = 0;
700 if (signr == SIGSTOP)
701 continue;
703 /* Update the siginfo structure. Is this good? */
704 if (signr != info.si_signo) {
705 info.si_signo = signr;
706 info.si_errno = 0;
707 info.si_code = SI_USER;
708 info.si_pid = current->p_pptr->pid;
709 info.si_uid = current->p_pptr->uid;
712 /* If the (new) signal is now blocked, requeue it. */
713 if (sigismember(&current->blocked, signr)) {
714 send_sig_info(signr, &info, current);
715 continue;
719 ka = &current->sig->action[signr-1];
721 if(ka->sa.sa_handler == SIG_IGN) {
722 if(signr != SIGCHLD)
723 continue;
725 /* sys_wait4() grabs the master kernel lock, so
726 * we need not do so, that sucker should be
727 * threaded and would not be that difficult to
728 * do anyways.
730 while(sys_wait4(-1, NULL, WNOHANG, NULL) > 0)
732 continue;
734 if(ka->sa.sa_handler == SIG_DFL) {
735 unsigned long exit_code = signr;
737 if(current->pid == 1)
738 continue;
739 switch(signr) {
740 case SIGCONT: case SIGCHLD: case SIGWINCH:
741 continue;
743 case SIGTSTP: case SIGTTIN: case SIGTTOU:
744 if (is_orphaned_pgrp(current->pgrp))
745 continue;
747 case SIGSTOP:
748 if (current->ptrace & PT_PTRACED)
749 continue;
750 current->state = TASK_STOPPED;
751 current->exit_code = signr;
752 if(!(current->p_pptr->sig->action[SIGCHLD-1].sa.sa_flags &
753 SA_NOCLDSTOP))
754 notify_parent(current, SIGCHLD);
755 schedule();
756 continue;
758 case SIGQUIT: case SIGILL: case SIGTRAP:
759 case SIGABRT: case SIGFPE: case SIGSEGV:
760 case SIGBUS: case SIGSYS: case SIGXCPU: case SIGXFSZ:
761 if (do_coredump(signr, regs))
762 exit_code |= 0x80;
763 #ifdef DEBUG_SIGNALS
764 /* Very useful to debug the dynamic linker */
765 printk ("Sig %d going...\n", (int)signr);
766 show_regs (regs);
767 #ifdef DEBUG_SIGNALS_TRACE
769 struct reg_window *rw = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
770 unsigned long ins[8];
772 while(rw &&
773 !(((unsigned long) rw) & 0x3)) {
774 copy_from_user(ins, &rw->ins[0], sizeof(ins));
775 printk("Caller[%016lx](%016lx,%016lx,%016lx,%016lx,%016lx,%016lx)\n", ins[7], ins[0], ins[1], ins[2], ins[3], ins[4], ins[5]);
776 rw = (struct reg_window *)(unsigned long)(ins[6] + STACK_BIAS);
779 #endif
780 #ifdef DEBUG_SIGNALS_MAPS
781 printk("Maps:\n");
782 read_maps();
783 #endif
784 #endif
785 /* fall through */
786 default:
787 sigaddset(&current->pending.signal, signr);
788 recalc_sigpending(current);
789 current->flags |= PF_SIGNALED;
790 do_exit(exit_code);
791 /* NOT REACHED */
794 if(restart_syscall)
795 syscall_restart(orig_i0, regs, &ka->sa);
796 handle_signal(signr, ka, &info, oldset, regs);
797 return 1;
799 if(restart_syscall &&
800 (regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
801 regs->u_regs[UREG_I0] == ERESTARTSYS ||
802 regs->u_regs[UREG_I0] == ERESTARTNOINTR)) {
803 /* replay the system call when we are done */
804 regs->u_regs[UREG_I0] = orig_i0;
805 regs->tpc -= 4;
806 regs->tnpc -= 4;
808 return 0;