[PATCH] convert signal handling of NODEFER to act like other Unix boxes.
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / m68knommu / kernel / signal.c
blob43a2726c0d0a4931070a55c1af124650ed872e58
1 /*
2 * linux/arch/m68knommu/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
8 * for more details.
9 */
12 * Linux/m68k support by Hamish Macdonald
14 * 68060 fixes by Jesper Skov
16 * 1997-12-01 Modified for POSIX.1b signals by Andreas Schwab
18 * mathemu support by Roman Zippel
19 * (Note: fpstate in the signal context is completely ignored for the emulator
20 * and the internal floating point format is put on stack)
24 * ++roman (07/09/96): implemented signal stacks (specially for tosemu on
25 * Atari :-) Current limitation: Only one sigstack can be active at one time.
26 * If a second signal with SA_ONSTACK set arrives while working on a sigstack,
27 * SA_ONSTACK is ignored. This behaviour avoids lots of trouble with nested
28 * signal handlers!
31 #include <linux/sched.h>
32 #include <linux/mm.h>
33 #include <linux/kernel.h>
34 #include <linux/signal.h>
35 #include <linux/syscalls.h>
36 #include <linux/errno.h>
37 #include <linux/wait.h>
38 #include <linux/ptrace.h>
39 #include <linux/unistd.h>
40 #include <linux/stddef.h>
41 #include <linux/highuid.h>
42 #include <linux/tty.h>
43 #include <linux/personality.h>
44 #include <linux/binfmts.h>
46 #include <asm/setup.h>
47 #include <asm/uaccess.h>
48 #include <asm/pgtable.h>
49 #include <asm/traps.h>
50 #include <asm/ucontext.h>
52 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
54 asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs);
57 * Atomically swap in the new signal mask, and wait for a signal.
59 asmlinkage int do_sigsuspend(struct pt_regs *regs)
61 old_sigset_t mask = regs->d3;
62 sigset_t saveset;
64 mask &= _BLOCKABLE;
65 spin_lock_irq(&current->sighand->siglock);
66 saveset = current->blocked;
67 siginitset(&current->blocked, mask);
68 recalc_sigpending();
69 spin_unlock_irq(&current->sighand->siglock);
71 regs->d0 = -EINTR;
72 while (1) {
73 current->state = TASK_INTERRUPTIBLE;
74 schedule();
75 if (do_signal(&saveset, regs))
76 return -EINTR;
80 asmlinkage int
81 do_rt_sigsuspend(struct pt_regs *regs)
83 sigset_t *unewset = (sigset_t *)regs->d1;
84 size_t sigsetsize = (size_t)regs->d2;
85 sigset_t saveset, newset;
87 /* XXX: Don't preclude handling different sized sigset_t's. */
88 if (sigsetsize != sizeof(sigset_t))
89 return -EINVAL;
91 if (copy_from_user(&newset, unewset, sizeof(newset)))
92 return -EFAULT;
93 sigdelsetmask(&newset, ~_BLOCKABLE);
95 spin_lock_irq(&current->sighand->siglock);
96 saveset = current->blocked;
97 current->blocked = newset;
98 recalc_sigpending();
99 spin_unlock_irq(&current->sighand->siglock);
101 regs->d0 = -EINTR;
102 while (1) {
103 current->state = TASK_INTERRUPTIBLE;
104 schedule();
105 if (do_signal(&saveset, regs))
106 return -EINTR;
110 asmlinkage int
111 sys_sigaction(int sig, const struct old_sigaction *act,
112 struct old_sigaction *oact)
114 struct k_sigaction new_ka, old_ka;
115 int ret;
117 if (act) {
118 old_sigset_t mask;
119 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
120 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
121 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
122 return -EFAULT;
123 __get_user(new_ka.sa.sa_flags, &act->sa_flags);
124 __get_user(mask, &act->sa_mask);
125 siginitset(&new_ka.sa.sa_mask, mask);
128 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
130 if (!ret && oact) {
131 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
132 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
133 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
134 return -EFAULT;
135 __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
136 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
139 return ret;
142 asmlinkage int
143 sys_sigaltstack(const stack_t *uss, stack_t *uoss)
145 return do_sigaltstack(uss, uoss, rdusp());
150 * Do a signal return; undo the signal stack.
152 * Keep the return code on the stack quadword aligned!
153 * That makes the cache flush below easier.
156 struct sigframe
158 char *pretcode;
159 int sig;
160 int code;
161 struct sigcontext *psc;
162 char retcode[8];
163 unsigned long extramask[_NSIG_WORDS-1];
164 struct sigcontext sc;
167 struct rt_sigframe
169 char *pretcode;
170 int sig;
171 struct siginfo *pinfo;
172 void *puc;
173 char retcode[8];
174 struct siginfo info;
175 struct ucontext uc;
178 #ifdef CONFIG_FPU
180 static unsigned char fpu_version = 0; /* version number of fpu, set by setup_frame */
182 static inline int restore_fpu_state(struct sigcontext *sc)
184 int err = 1;
186 if (FPU_IS_EMU) {
187 /* restore registers */
188 memcpy(current->thread.fpcntl, sc->sc_fpcntl, 12);
189 memcpy(current->thread.fp, sc->sc_fpregs, 24);
190 return 0;
193 if (sc->sc_fpstate[0]) {
194 /* Verify the frame format. */
195 if (sc->sc_fpstate[0] != fpu_version)
196 goto out;
198 __asm__ volatile (".chip 68k/68881\n\t"
199 "fmovemx %0,%/fp0-%/fp1\n\t"
200 "fmoveml %1,%/fpcr/%/fpsr/%/fpiar\n\t"
201 ".chip 68k"
202 : /* no outputs */
203 : "m" (*sc->sc_fpregs), "m" (*sc->sc_fpcntl));
205 __asm__ volatile (".chip 68k/68881\n\t"
206 "frestore %0\n\t"
207 ".chip 68k" : : "m" (*sc->sc_fpstate));
208 err = 0;
210 out:
211 return err;
214 #define FPCONTEXT_SIZE 216
215 #define uc_fpstate uc_filler[0]
216 #define uc_formatvec uc_filler[FPCONTEXT_SIZE/4]
217 #define uc_extra uc_filler[FPCONTEXT_SIZE/4+1]
219 static inline int rt_restore_fpu_state(struct ucontext *uc)
221 unsigned char fpstate[FPCONTEXT_SIZE];
222 int context_size = 0;
223 fpregset_t fpregs;
224 int err = 1;
226 if (FPU_IS_EMU) {
227 /* restore fpu control register */
228 if (__copy_from_user(current->thread.fpcntl,
229 &uc->uc_mcontext.fpregs.f_pcr, 12))
230 goto out;
231 /* restore all other fpu register */
232 if (__copy_from_user(current->thread.fp,
233 uc->uc_mcontext.fpregs.f_fpregs, 96))
234 goto out;
235 return 0;
238 if (__get_user(*(long *)fpstate, (long *)&uc->uc_fpstate))
239 goto out;
240 if (fpstate[0]) {
241 context_size = fpstate[1];
243 /* Verify the frame format. */
244 if (fpstate[0] != fpu_version)
245 goto out;
246 if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs,
247 sizeof(fpregs)))
248 goto out;
249 __asm__ volatile (".chip 68k/68881\n\t"
250 "fmovemx %0,%/fp0-%/fp7\n\t"
251 "fmoveml %1,%/fpcr/%/fpsr/%/fpiar\n\t"
252 ".chip 68k"
253 : /* no outputs */
254 : "m" (*fpregs.f_fpregs),
255 "m" (fpregs.f_pcr));
257 if (context_size &&
258 __copy_from_user(fpstate + 4, (long *)&uc->uc_fpstate + 1,
259 context_size))
260 goto out;
261 __asm__ volatile (".chip 68k/68881\n\t"
262 "frestore %0\n\t"
263 ".chip 68k" : : "m" (*fpstate));
264 err = 0;
266 out:
267 return err;
270 #endif
272 static inline int
273 restore_sigcontext(struct pt_regs *regs, struct sigcontext *usc, void *fp,
274 int *pd0)
276 int formatvec;
277 struct sigcontext context;
278 int err = 0;
280 /* get previous context */
281 if (copy_from_user(&context, usc, sizeof(context)))
282 goto badframe;
284 /* restore passed registers */
285 regs->d1 = context.sc_d1;
286 regs->a0 = context.sc_a0;
287 regs->a1 = context.sc_a1;
288 regs->sr = (regs->sr & 0xff00) | (context.sc_sr & 0xff);
289 regs->pc = context.sc_pc;
290 regs->orig_d0 = -1; /* disable syscall checks */
291 wrusp(context.sc_usp);
292 formatvec = context.sc_formatvec;
293 regs->format = formatvec >> 12;
294 regs->vector = formatvec & 0xfff;
296 #ifdef CONFIG_FPU
297 err = restore_fpu_state(&context);
298 #endif
300 *pd0 = context.sc_d0;
301 return err;
303 badframe:
304 return 1;
307 static inline int
308 rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
309 struct ucontext *uc, int *pd0)
311 int temp;
312 greg_t *gregs = uc->uc_mcontext.gregs;
313 unsigned long usp;
314 int err;
316 err = __get_user(temp, &uc->uc_mcontext.version);
317 if (temp != MCONTEXT_VERSION)
318 goto badframe;
319 /* restore passed registers */
320 err |= __get_user(regs->d0, &gregs[0]);
321 err |= __get_user(regs->d1, &gregs[1]);
322 err |= __get_user(regs->d2, &gregs[2]);
323 err |= __get_user(regs->d3, &gregs[3]);
324 err |= __get_user(regs->d4, &gregs[4]);
325 err |= __get_user(regs->d5, &gregs[5]);
326 err |= __get_user(sw->d6, &gregs[6]);
327 err |= __get_user(sw->d7, &gregs[7]);
328 err |= __get_user(regs->a0, &gregs[8]);
329 err |= __get_user(regs->a1, &gregs[9]);
330 err |= __get_user(regs->a2, &gregs[10]);
331 err |= __get_user(sw->a3, &gregs[11]);
332 err |= __get_user(sw->a4, &gregs[12]);
333 err |= __get_user(sw->a5, &gregs[13]);
334 err |= __get_user(sw->a6, &gregs[14]);
335 err |= __get_user(usp, &gregs[15]);
336 wrusp(usp);
337 err |= __get_user(regs->pc, &gregs[16]);
338 err |= __get_user(temp, &gregs[17]);
339 regs->sr = (regs->sr & 0xff00) | (temp & 0xff);
340 regs->orig_d0 = -1; /* disable syscall checks */
341 regs->format = temp >> 12;
342 regs->vector = temp & 0xfff;
344 if (do_sigaltstack(&uc->uc_stack, NULL, usp) == -EFAULT)
345 goto badframe;
347 *pd0 = regs->d0;
348 return err;
350 badframe:
351 return 1;
354 asmlinkage int do_sigreturn(unsigned long __unused)
356 struct switch_stack *sw = (struct switch_stack *) &__unused;
357 struct pt_regs *regs = (struct pt_regs *) (sw + 1);
358 unsigned long usp = rdusp();
359 struct sigframe *frame = (struct sigframe *)(usp - 4);
360 sigset_t set;
361 int d0;
363 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
364 goto badframe;
365 if (__get_user(set.sig[0], &frame->sc.sc_mask) ||
366 (_NSIG_WORDS > 1 &&
367 __copy_from_user(&set.sig[1], &frame->extramask,
368 sizeof(frame->extramask))))
369 goto badframe;
371 sigdelsetmask(&set, ~_BLOCKABLE);
372 spin_lock_irq(&current->sighand->siglock);
373 current->blocked = set;
374 recalc_sigpending();
375 spin_unlock_irq(&current->sighand->siglock);
377 if (restore_sigcontext(regs, &frame->sc, frame + 1, &d0))
378 goto badframe;
379 return d0;
381 badframe:
382 force_sig(SIGSEGV, current);
383 return 0;
386 asmlinkage int do_rt_sigreturn(unsigned long __unused)
388 struct switch_stack *sw = (struct switch_stack *) &__unused;
389 struct pt_regs *regs = (struct pt_regs *) (sw + 1);
390 unsigned long usp = rdusp();
391 struct rt_sigframe *frame = (struct rt_sigframe *)(usp - 4);
392 sigset_t set;
393 int d0;
395 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
396 goto badframe;
397 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
398 goto badframe;
400 sigdelsetmask(&set, ~_BLOCKABLE);
401 spin_lock_irq(&current->sighand->siglock);
402 current->blocked = set;
403 recalc_sigpending();
404 spin_unlock_irq(&current->sighand->siglock);
406 if (rt_restore_ucontext(regs, sw, &frame->uc, &d0))
407 goto badframe;
408 return d0;
410 badframe:
411 force_sig(SIGSEGV, current);
412 return 0;
415 #ifdef CONFIG_FPU
417 * Set up a signal frame.
420 static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
422 if (FPU_IS_EMU) {
423 /* save registers */
424 memcpy(sc->sc_fpcntl, current->thread.fpcntl, 12);
425 memcpy(sc->sc_fpregs, current->thread.fp, 24);
426 return;
429 __asm__ volatile (".chip 68k/68881\n\t"
430 "fsave %0\n\t"
431 ".chip 68k"
432 : : "m" (*sc->sc_fpstate) : "memory");
434 if (sc->sc_fpstate[0]) {
435 fpu_version = sc->sc_fpstate[0];
436 __asm__ volatile (".chip 68k/68881\n\t"
437 "fmovemx %/fp0-%/fp1,%0\n\t"
438 "fmoveml %/fpcr/%/fpsr/%/fpiar,%1\n\t"
439 ".chip 68k"
440 : /* no outputs */
441 : "m" (*sc->sc_fpregs),
442 "m" (*sc->sc_fpcntl)
443 : "memory");
447 static inline int rt_save_fpu_state(struct ucontext *uc, struct pt_regs *regs)
449 unsigned char fpstate[FPCONTEXT_SIZE];
450 int context_size = 0;
451 int err = 0;
453 if (FPU_IS_EMU) {
454 /* save fpu control register */
455 err |= copy_to_user(&uc->uc_mcontext.fpregs.f_pcr,
456 current->thread.fpcntl, 12);
457 /* save all other fpu register */
458 err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpregs,
459 current->thread.fp, 96);
460 return err;
463 __asm__ volatile (".chip 68k/68881\n\t"
464 "fsave %0\n\t"
465 ".chip 68k"
466 : : "m" (*fpstate) : "memory");
468 err |= __put_user(*(long *)fpstate, (long *)&uc->uc_fpstate);
469 if (fpstate[0]) {
470 fpregset_t fpregs;
471 context_size = fpstate[1];
472 fpu_version = fpstate[0];
473 __asm__ volatile (".chip 68k/68881\n\t"
474 "fmovemx %/fp0-%/fp7,%0\n\t"
475 "fmoveml %/fpcr/%/fpsr/%/fpiar,%1\n\t"
476 ".chip 68k"
477 : /* no outputs */
478 : "m" (*fpregs.f_fpregs),
479 "m" (fpregs.f_pcr)
480 : "memory");
481 err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs,
482 sizeof(fpregs));
484 if (context_size)
485 err |= copy_to_user((long *)&uc->uc_fpstate + 1, fpstate + 4,
486 context_size);
487 return err;
490 #endif
492 static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
493 unsigned long mask)
495 sc->sc_mask = mask;
496 sc->sc_usp = rdusp();
497 sc->sc_d0 = regs->d0;
498 sc->sc_d1 = regs->d1;
499 sc->sc_a0 = regs->a0;
500 sc->sc_a1 = regs->a1;
501 sc->sc_sr = regs->sr;
502 sc->sc_pc = regs->pc;
503 sc->sc_formatvec = regs->format << 12 | regs->vector;
504 #ifdef CONFIG_FPU
505 save_fpu_state(sc, regs);
506 #endif
509 static inline int rt_setup_ucontext(struct ucontext *uc, struct pt_regs *regs)
511 struct switch_stack *sw = (struct switch_stack *)regs - 1;
512 greg_t *gregs = uc->uc_mcontext.gregs;
513 int err = 0;
515 err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version);
516 err |= __put_user(regs->d0, &gregs[0]);
517 err |= __put_user(regs->d1, &gregs[1]);
518 err |= __put_user(regs->d2, &gregs[2]);
519 err |= __put_user(regs->d3, &gregs[3]);
520 err |= __put_user(regs->d4, &gregs[4]);
521 err |= __put_user(regs->d5, &gregs[5]);
522 err |= __put_user(sw->d6, &gregs[6]);
523 err |= __put_user(sw->d7, &gregs[7]);
524 err |= __put_user(regs->a0, &gregs[8]);
525 err |= __put_user(regs->a1, &gregs[9]);
526 err |= __put_user(regs->a2, &gregs[10]);
527 err |= __put_user(sw->a3, &gregs[11]);
528 err |= __put_user(sw->a4, &gregs[12]);
529 err |= __put_user(sw->a5, &gregs[13]);
530 err |= __put_user(sw->a6, &gregs[14]);
531 err |= __put_user(rdusp(), &gregs[15]);
532 err |= __put_user(regs->pc, &gregs[16]);
533 err |= __put_user(regs->sr, &gregs[17]);
534 #ifdef CONFIG_FPU
535 err |= rt_save_fpu_state(uc, regs);
536 #endif
537 return err;
540 static inline void push_cache (unsigned long vaddr)
544 static inline void *
545 get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
547 unsigned long usp;
549 /* Default to using normal stack. */
550 usp = rdusp();
552 /* This is the X/Open sanctioned signal stack switching. */
553 if (ka->sa.sa_flags & SA_ONSTACK) {
554 if (!on_sig_stack(usp))
555 usp = current->sas_ss_sp + current->sas_ss_size;
557 return (void *)((usp - frame_size) & -8UL);
560 static void setup_frame (int sig, struct k_sigaction *ka,
561 sigset_t *set, struct pt_regs *regs)
563 struct sigframe *frame;
564 struct sigcontext context;
565 int err = 0;
567 frame = get_sigframe(ka, regs, sizeof(*frame));
569 err |= __put_user((current_thread_info()->exec_domain
570 && current_thread_info()->exec_domain->signal_invmap
571 && sig < 32
572 ? current_thread_info()->exec_domain->signal_invmap[sig]
573 : sig),
574 &frame->sig);
576 err |= __put_user(regs->vector, &frame->code);
577 err |= __put_user(&frame->sc, &frame->psc);
579 if (_NSIG_WORDS > 1)
580 err |= copy_to_user(frame->extramask, &set->sig[1],
581 sizeof(frame->extramask));
583 setup_sigcontext(&context, regs, set->sig[0]);
584 err |= copy_to_user (&frame->sc, &context, sizeof(context));
586 /* Set up to return from userspace. */
587 err |= __put_user(frame->retcode, &frame->pretcode);
588 /* moveq #,d0; trap #0 */
589 err |= __put_user(0x70004e40 + (__NR_sigreturn << 16),
590 (long *)(frame->retcode));
592 if (err)
593 goto give_sigsegv;
595 push_cache ((unsigned long) &frame->retcode);
597 /* Set up registers for signal handler */
598 wrusp ((unsigned long) frame);
599 regs->pc = (unsigned long) ka->sa.sa_handler;
601 adjust_stack:
602 /* Prepare to skip over the extra stuff in the exception frame. */
603 if (regs->stkadj) {
604 struct pt_regs *tregs =
605 (struct pt_regs *)((ulong)regs + regs->stkadj);
606 #if DEBUG
607 printk(KERN_DEBUG "Performing stackadjust=%04x\n", regs->stkadj);
608 #endif
609 /* This must be copied with decreasing addresses to
610 handle overlaps. */
611 tregs->vector = 0;
612 tregs->format = 0;
613 tregs->pc = regs->pc;
614 tregs->sr = regs->sr;
616 return;
618 give_sigsegv:
619 force_sigsegv(sig, current);
620 goto adjust_stack;
623 static void setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info,
624 sigset_t *set, struct pt_regs *regs)
626 struct rt_sigframe *frame;
627 int err = 0;
629 frame = get_sigframe(ka, regs, sizeof(*frame));
631 err |= __put_user((current_thread_info()->exec_domain
632 && current_thread_info()->exec_domain->signal_invmap
633 && sig < 32
634 ? current_thread_info()->exec_domain->signal_invmap[sig]
635 : sig),
636 &frame->sig);
637 err |= __put_user(&frame->info, &frame->pinfo);
638 err |= __put_user(&frame->uc, &frame->puc);
639 err |= copy_siginfo_to_user(&frame->info, info);
641 /* Create the ucontext. */
642 err |= __put_user(0, &frame->uc.uc_flags);
643 err |= __put_user(0, &frame->uc.uc_link);
644 err |= __put_user((void *)current->sas_ss_sp,
645 &frame->uc.uc_stack.ss_sp);
646 err |= __put_user(sas_ss_flags(rdusp()),
647 &frame->uc.uc_stack.ss_flags);
648 err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
649 err |= rt_setup_ucontext(&frame->uc, regs);
650 err |= copy_to_user (&frame->uc.uc_sigmask, set, sizeof(*set));
652 /* Set up to return from userspace. */
653 err |= __put_user(frame->retcode, &frame->pretcode);
654 /* moveq #,d0; notb d0; trap #0 */
655 err |= __put_user(0x70004600 + ((__NR_rt_sigreturn ^ 0xff) << 16),
656 (long *)(frame->retcode + 0));
657 err |= __put_user(0x4e40, (short *)(frame->retcode + 4));
659 if (err)
660 goto give_sigsegv;
662 push_cache ((unsigned long) &frame->retcode);
664 /* Set up registers for signal handler */
665 wrusp ((unsigned long) frame);
666 regs->pc = (unsigned long) ka->sa.sa_handler;
668 adjust_stack:
669 /* Prepare to skip over the extra stuff in the exception frame. */
670 if (regs->stkadj) {
671 struct pt_regs *tregs =
672 (struct pt_regs *)((ulong)regs + regs->stkadj);
673 #if DEBUG
674 printk(KERN_DEBUG "Performing stackadjust=%04x\n", regs->stkadj);
675 #endif
676 /* This must be copied with decreasing addresses to
677 handle overlaps. */
678 tregs->vector = 0;
679 tregs->format = 0;
680 tregs->pc = regs->pc;
681 tregs->sr = regs->sr;
683 return;
685 give_sigsegv:
686 force_sigsegv(sig, current);
687 goto adjust_stack;
690 static inline void
691 handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
693 switch (regs->d0) {
694 case -ERESTARTNOHAND:
695 if (!has_handler)
696 goto do_restart;
697 regs->d0 = -EINTR;
698 break;
700 case -ERESTARTSYS:
701 if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) {
702 regs->d0 = -EINTR;
703 break;
705 /* fallthrough */
706 case -ERESTARTNOINTR:
707 do_restart:
708 regs->d0 = regs->orig_d0;
709 regs->pc -= 2;
710 break;
715 * OK, we're invoking a handler
717 static void
718 handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
719 sigset_t *oldset, struct pt_regs *regs)
721 /* are we from a system call? */
722 if (regs->orig_d0 >= 0)
723 /* If so, check system call restarting.. */
724 handle_restart(regs, ka, 1);
726 /* set up the stack frame */
727 if (ka->sa.sa_flags & SA_SIGINFO)
728 setup_rt_frame(sig, ka, info, oldset, regs);
729 else
730 setup_frame(sig, ka, oldset, regs);
732 if (ka->sa.sa_flags & SA_ONESHOT)
733 ka->sa.sa_handler = SIG_DFL;
735 spin_lock_irq(&current->sighand->siglock);
736 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
737 if (!(ka->sa.sa_flags & SA_NODEFER))
738 sigaddset(&current->blocked,sig);
739 recalc_sigpending();
740 spin_unlock_irq(&current->sighand->siglock);
744 * Note that 'init' is a special process: it doesn't get signals it doesn't
745 * want to handle. Thus you cannot kill init even with a SIGKILL even by
746 * mistake.
748 asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs)
750 struct k_sigaction ka;
751 siginfo_t info;
752 int signr;
755 * We want the common case to go fast, which
756 * is why we may in certain cases get here from
757 * kernel mode. Just return without doing anything
758 * if so.
760 if (!user_mode(regs))
761 return 1;
763 if (!oldset)
764 oldset = &current->blocked;
766 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
767 if (signr > 0) {
768 /* Whee! Actually deliver the signal. */
769 handle_signal(signr, &ka, &info, oldset, regs);
770 return 1;
773 /* Did we come from a system call? */
774 if (regs->orig_d0 >= 0) {
775 /* Restart the system call - no handlers present */
776 if (regs->d0 == -ERESTARTNOHAND
777 || regs->d0 == -ERESTARTSYS
778 || regs->d0 == -ERESTARTNOINTR) {
779 regs->d0 = regs->orig_d0;
780 regs->pc -= 2;
781 } else if (regs->d0 == -ERESTART_RESTARTBLOCK) {
782 regs->d0 = __NR_restart_syscall;
783 regs->pc -= 2;
786 return 0;