2 * linux/arch/arm/kernel/signal.c
4 * Copyright (C) 1995-2002 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/errno.h>
11 #include <linux/signal.h>
12 #include <linux/ptrace.h>
13 #include <linux/personality.h>
14 #include <linux/freezer.h>
17 #include <asm/cacheflush.h>
18 #include <asm/ucontext.h>
19 #include <asm/uaccess.h>
20 #include <asm/unistd.h>
25 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
28 * For ARM syscalls, we encode the syscall number into the instruction.
30 #define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn))
31 #define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn))
34 * With EABI, the syscall number has to be loaded into r7.
36 #define MOV_R7_NR_SIGRETURN (0xe3a07000 | (__NR_sigreturn - __NR_SYSCALL_BASE))
37 #define MOV_R7_NR_RT_SIGRETURN (0xe3a07000 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
40 * For Thumb syscalls, we pass the syscall number via r7. We therefore
41 * need two 16-bit instructions.
43 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE))
44 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
46 const unsigned long sigreturn_codes
[7] = {
47 MOV_R7_NR_SIGRETURN
, SWI_SYS_SIGRETURN
, SWI_THUMB_SIGRETURN
,
48 MOV_R7_NR_RT_SIGRETURN
, SWI_SYS_RT_SIGRETURN
, SWI_THUMB_RT_SIGRETURN
,
51 static int do_signal(sigset_t
*oldset
, struct pt_regs
* regs
, int syscall
);
54 * atomically swap in the new signal mask, and wait for a signal.
56 asmlinkage
int sys_sigsuspend(int restart
, unsigned long oldmask
, old_sigset_t mask
, struct pt_regs
*regs
)
61 spin_lock_irq(¤t
->sighand
->siglock
);
62 saveset
= current
->blocked
;
63 siginitset(¤t
->blocked
, mask
);
65 spin_unlock_irq(¤t
->sighand
->siglock
);
66 regs
->ARM_r0
= -EINTR
;
69 current
->state
= TASK_INTERRUPTIBLE
;
71 if (do_signal(&saveset
, regs
, 0))
77 sys_rt_sigsuspend(sigset_t __user
*unewset
, size_t sigsetsize
, struct pt_regs
*regs
)
79 sigset_t saveset
, newset
;
81 /* XXX: Don't preclude handling different sized sigset_t's. */
82 if (sigsetsize
!= sizeof(sigset_t
))
85 if (copy_from_user(&newset
, unewset
, sizeof(newset
)))
87 sigdelsetmask(&newset
, ~_BLOCKABLE
);
89 spin_lock_irq(¤t
->sighand
->siglock
);
90 saveset
= current
->blocked
;
91 current
->blocked
= newset
;
93 spin_unlock_irq(¤t
->sighand
->siglock
);
94 regs
->ARM_r0
= -EINTR
;
97 current
->state
= TASK_INTERRUPTIBLE
;
99 if (do_signal(&saveset
, regs
, 0))
105 sys_sigaction(int sig
, const struct old_sigaction __user
*act
,
106 struct old_sigaction __user
*oact
)
108 struct k_sigaction new_ka
, old_ka
;
113 if (!access_ok(VERIFY_READ
, act
, sizeof(*act
)) ||
114 __get_user(new_ka
.sa
.sa_handler
, &act
->sa_handler
) ||
115 __get_user(new_ka
.sa
.sa_restorer
, &act
->sa_restorer
))
117 __get_user(new_ka
.sa
.sa_flags
, &act
->sa_flags
);
118 __get_user(mask
, &act
->sa_mask
);
119 siginitset(&new_ka
.sa
.sa_mask
, mask
);
122 ret
= do_sigaction(sig
, act
? &new_ka
: NULL
, oact
? &old_ka
: NULL
);
125 if (!access_ok(VERIFY_WRITE
, oact
, sizeof(*oact
)) ||
126 __put_user(old_ka
.sa
.sa_handler
, &oact
->sa_handler
) ||
127 __put_user(old_ka
.sa
.sa_restorer
, &oact
->sa_restorer
))
129 __put_user(old_ka
.sa
.sa_flags
, &oact
->sa_flags
);
130 __put_user(old_ka
.sa
.sa_mask
.sig
[0], &oact
->sa_mask
);
137 static int preserve_crunch_context(struct crunch_sigframe
*frame
)
139 char kbuf
[sizeof(*frame
) + 8];
140 struct crunch_sigframe
*kframe
;
142 /* the crunch context must be 64 bit aligned */
143 kframe
= (struct crunch_sigframe
*)((unsigned long)(kbuf
+ 8) & ~7);
144 kframe
->magic
= CRUNCH_MAGIC
;
145 kframe
->size
= CRUNCH_STORAGE_SIZE
;
146 crunch_task_copy(current_thread_info(), &kframe
->storage
);
147 return __copy_to_user(frame
, kframe
, sizeof(*frame
));
150 static int restore_crunch_context(struct crunch_sigframe
*frame
)
152 char kbuf
[sizeof(*frame
) + 8];
153 struct crunch_sigframe
*kframe
;
155 /* the crunch context must be 64 bit aligned */
156 kframe
= (struct crunch_sigframe
*)((unsigned long)(kbuf
+ 8) & ~7);
157 if (__copy_from_user(kframe
, frame
, sizeof(*frame
)))
159 if (kframe
->magic
!= CRUNCH_MAGIC
||
160 kframe
->size
!= CRUNCH_STORAGE_SIZE
)
162 crunch_task_restore(current_thread_info(), &kframe
->storage
);
169 static int preserve_iwmmxt_context(struct iwmmxt_sigframe
*frame
)
171 char kbuf
[sizeof(*frame
) + 8];
172 struct iwmmxt_sigframe
*kframe
;
174 /* the iWMMXt context must be 64 bit aligned */
175 kframe
= (struct iwmmxt_sigframe
*)((unsigned long)(kbuf
+ 8) & ~7);
176 kframe
->magic
= IWMMXT_MAGIC
;
177 kframe
->size
= IWMMXT_STORAGE_SIZE
;
178 iwmmxt_task_copy(current_thread_info(), &kframe
->storage
);
179 return __copy_to_user(frame
, kframe
, sizeof(*frame
));
182 static int restore_iwmmxt_context(struct iwmmxt_sigframe
*frame
)
184 char kbuf
[sizeof(*frame
) + 8];
185 struct iwmmxt_sigframe
*kframe
;
187 /* the iWMMXt context must be 64 bit aligned */
188 kframe
= (struct iwmmxt_sigframe
*)((unsigned long)(kbuf
+ 8) & ~7);
189 if (__copy_from_user(kframe
, frame
, sizeof(*frame
)))
191 if (kframe
->magic
!= IWMMXT_MAGIC
||
192 kframe
->size
!= IWMMXT_STORAGE_SIZE
)
194 iwmmxt_task_restore(current_thread_info(), &kframe
->storage
);
201 * Do a signal return; undo the signal stack. These are aligned to 64-bit.
205 unsigned long retcode
[2];
213 static int restore_sigframe(struct pt_regs
*regs
, struct sigframe __user
*sf
)
215 struct aux_sigframe __user
*aux
;
219 err
= __copy_from_user(&set
, &sf
->uc
.uc_sigmask
, sizeof(set
));
221 sigdelsetmask(&set
, ~_BLOCKABLE
);
222 spin_lock_irq(¤t
->sighand
->siglock
);
223 current
->blocked
= set
;
225 spin_unlock_irq(¤t
->sighand
->siglock
);
228 __get_user_error(regs
->ARM_r0
, &sf
->uc
.uc_mcontext
.arm_r0
, err
);
229 __get_user_error(regs
->ARM_r1
, &sf
->uc
.uc_mcontext
.arm_r1
, err
);
230 __get_user_error(regs
->ARM_r2
, &sf
->uc
.uc_mcontext
.arm_r2
, err
);
231 __get_user_error(regs
->ARM_r3
, &sf
->uc
.uc_mcontext
.arm_r3
, err
);
232 __get_user_error(regs
->ARM_r4
, &sf
->uc
.uc_mcontext
.arm_r4
, err
);
233 __get_user_error(regs
->ARM_r5
, &sf
->uc
.uc_mcontext
.arm_r5
, err
);
234 __get_user_error(regs
->ARM_r6
, &sf
->uc
.uc_mcontext
.arm_r6
, err
);
235 __get_user_error(regs
->ARM_r7
, &sf
->uc
.uc_mcontext
.arm_r7
, err
);
236 __get_user_error(regs
->ARM_r8
, &sf
->uc
.uc_mcontext
.arm_r8
, err
);
237 __get_user_error(regs
->ARM_r9
, &sf
->uc
.uc_mcontext
.arm_r9
, err
);
238 __get_user_error(regs
->ARM_r10
, &sf
->uc
.uc_mcontext
.arm_r10
, err
);
239 __get_user_error(regs
->ARM_fp
, &sf
->uc
.uc_mcontext
.arm_fp
, err
);
240 __get_user_error(regs
->ARM_ip
, &sf
->uc
.uc_mcontext
.arm_ip
, err
);
241 __get_user_error(regs
->ARM_sp
, &sf
->uc
.uc_mcontext
.arm_sp
, err
);
242 __get_user_error(regs
->ARM_lr
, &sf
->uc
.uc_mcontext
.arm_lr
, err
);
243 __get_user_error(regs
->ARM_pc
, &sf
->uc
.uc_mcontext
.arm_pc
, err
);
244 __get_user_error(regs
->ARM_cpsr
, &sf
->uc
.uc_mcontext
.arm_cpsr
, err
);
246 err
|= !valid_user_regs(regs
);
248 aux
= (struct aux_sigframe __user
*) sf
->uc
.uc_regspace
;
251 err
|= restore_crunch_context(&aux
->crunch
);
254 if (err
== 0 && test_thread_flag(TIF_USING_IWMMXT
))
255 err
|= restore_iwmmxt_context(&aux
->iwmmxt
);
259 // err |= vfp_restore_state(&sf->aux.vfp);
265 asmlinkage
int sys_sigreturn(struct pt_regs
*regs
)
267 struct sigframe __user
*frame
;
269 /* Always make any pending restarted system calls return -EINTR */
270 current_thread_info()->restart_block
.fn
= do_no_restart_syscall
;
273 * Since we stacked the signal on a 64-bit boundary,
274 * then 'sp' should be word aligned here. If it's
275 * not, then the user is trying to mess with us.
277 if (regs
->ARM_sp
& 7)
280 frame
= (struct sigframe __user
*)regs
->ARM_sp
;
282 if (!access_ok(VERIFY_READ
, frame
, sizeof (*frame
)))
285 if (restore_sigframe(regs
, frame
))
288 /* Send SIGTRAP if we're single-stepping */
289 if (current
->ptrace
& PT_SINGLESTEP
) {
290 ptrace_cancel_bpt(current
);
291 send_sig(SIGTRAP
, current
, 1);
297 force_sig(SIGSEGV
, current
);
301 asmlinkage
int sys_rt_sigreturn(struct pt_regs
*regs
)
303 struct rt_sigframe __user
*frame
;
305 /* Always make any pending restarted system calls return -EINTR */
306 current_thread_info()->restart_block
.fn
= do_no_restart_syscall
;
309 * Since we stacked the signal on a 64-bit boundary,
310 * then 'sp' should be word aligned here. If it's
311 * not, then the user is trying to mess with us.
313 if (regs
->ARM_sp
& 7)
316 frame
= (struct rt_sigframe __user
*)regs
->ARM_sp
;
318 if (!access_ok(VERIFY_READ
, frame
, sizeof (*frame
)))
321 if (restore_sigframe(regs
, &frame
->sig
))
324 if (do_sigaltstack(&frame
->sig
.uc
.uc_stack
, NULL
, regs
->ARM_sp
) == -EFAULT
)
327 /* Send SIGTRAP if we're single-stepping */
328 if (current
->ptrace
& PT_SINGLESTEP
) {
329 ptrace_cancel_bpt(current
);
330 send_sig(SIGTRAP
, current
, 1);
336 force_sig(SIGSEGV
, current
);
341 setup_sigframe(struct sigframe __user
*sf
, struct pt_regs
*regs
, sigset_t
*set
)
343 struct aux_sigframe __user
*aux
;
346 __put_user_error(regs
->ARM_r0
, &sf
->uc
.uc_mcontext
.arm_r0
, err
);
347 __put_user_error(regs
->ARM_r1
, &sf
->uc
.uc_mcontext
.arm_r1
, err
);
348 __put_user_error(regs
->ARM_r2
, &sf
->uc
.uc_mcontext
.arm_r2
, err
);
349 __put_user_error(regs
->ARM_r3
, &sf
->uc
.uc_mcontext
.arm_r3
, err
);
350 __put_user_error(regs
->ARM_r4
, &sf
->uc
.uc_mcontext
.arm_r4
, err
);
351 __put_user_error(regs
->ARM_r5
, &sf
->uc
.uc_mcontext
.arm_r5
, err
);
352 __put_user_error(regs
->ARM_r6
, &sf
->uc
.uc_mcontext
.arm_r6
, err
);
353 __put_user_error(regs
->ARM_r7
, &sf
->uc
.uc_mcontext
.arm_r7
, err
);
354 __put_user_error(regs
->ARM_r8
, &sf
->uc
.uc_mcontext
.arm_r8
, err
);
355 __put_user_error(regs
->ARM_r9
, &sf
->uc
.uc_mcontext
.arm_r9
, err
);
356 __put_user_error(regs
->ARM_r10
, &sf
->uc
.uc_mcontext
.arm_r10
, err
);
357 __put_user_error(regs
->ARM_fp
, &sf
->uc
.uc_mcontext
.arm_fp
, err
);
358 __put_user_error(regs
->ARM_ip
, &sf
->uc
.uc_mcontext
.arm_ip
, err
);
359 __put_user_error(regs
->ARM_sp
, &sf
->uc
.uc_mcontext
.arm_sp
, err
);
360 __put_user_error(regs
->ARM_lr
, &sf
->uc
.uc_mcontext
.arm_lr
, err
);
361 __put_user_error(regs
->ARM_pc
, &sf
->uc
.uc_mcontext
.arm_pc
, err
);
362 __put_user_error(regs
->ARM_cpsr
, &sf
->uc
.uc_mcontext
.arm_cpsr
, err
);
364 __put_user_error(current
->thread
.trap_no
, &sf
->uc
.uc_mcontext
.trap_no
, err
);
365 __put_user_error(current
->thread
.error_code
, &sf
->uc
.uc_mcontext
.error_code
, err
);
366 __put_user_error(current
->thread
.address
, &sf
->uc
.uc_mcontext
.fault_address
, err
);
367 __put_user_error(set
->sig
[0], &sf
->uc
.uc_mcontext
.oldmask
, err
);
369 err
|= __copy_to_user(&sf
->uc
.uc_sigmask
, set
, sizeof(*set
));
371 aux
= (struct aux_sigframe __user
*) sf
->uc
.uc_regspace
;
374 err
|= preserve_crunch_context(&aux
->crunch
);
377 if (err
== 0 && test_thread_flag(TIF_USING_IWMMXT
))
378 err
|= preserve_iwmmxt_context(&aux
->iwmmxt
);
382 // err |= vfp_save_state(&sf->aux.vfp);
384 __put_user_error(0, &aux
->end_magic
, err
);
389 static inline void __user
*
390 get_sigframe(struct k_sigaction
*ka
, struct pt_regs
*regs
, int framesize
)
392 unsigned long sp
= regs
->ARM_sp
;
396 * This is the X/Open sanctioned signal stack switching.
398 if ((ka
->sa
.sa_flags
& SA_ONSTACK
) && !sas_ss_flags(sp
))
399 sp
= current
->sas_ss_sp
+ current
->sas_ss_size
;
402 * ATPCS B01 mandates 8-byte alignment
404 frame
= (void __user
*)((sp
- framesize
) & ~7);
407 * Check that we can actually write to the signal frame.
409 if (!access_ok(VERIFY_WRITE
, frame
, framesize
))
416 setup_return(struct pt_regs
*regs
, struct k_sigaction
*ka
,
417 unsigned long __user
*rc
, void __user
*frame
, int usig
)
419 unsigned long handler
= (unsigned long)ka
->sa
.sa_handler
;
420 unsigned long retcode
;
422 unsigned long cpsr
= regs
->ARM_cpsr
& ~PSR_f
;
425 * Maybe we need to deliver a 32-bit signal to a 26-bit task.
427 if (ka
->sa
.sa_flags
& SA_THIRTYTWO
)
428 cpsr
= (cpsr
& ~MODE_MASK
) | USR_MODE
;
430 #ifdef CONFIG_ARM_THUMB
431 if (elf_hwcap
& HWCAP_THUMB
) {
433 * The LSB of the handler determines if we're going to
434 * be using THUMB or ARM mode for this signal handler.
445 if (ka
->sa
.sa_flags
& SA_RESTORER
) {
446 retcode
= (unsigned long)ka
->sa
.sa_restorer
;
448 unsigned int idx
= thumb
<< 1;
450 if (ka
->sa
.sa_flags
& SA_SIGINFO
)
453 if (__put_user(sigreturn_codes
[idx
], rc
) ||
454 __put_user(sigreturn_codes
[idx
+1], rc
+1))
457 if (cpsr
& MODE32_BIT
) {
459 * 32-bit code can use the new high-page
460 * signal return code support.
462 retcode
= KERN_SIGRETURN_CODE
+ (idx
<< 2) + thumb
;
465 * Ensure that the instruction cache sees
466 * the return code written onto the stack.
468 flush_icache_range((unsigned long)rc
,
469 (unsigned long)(rc
+ 2));
471 retcode
= ((unsigned long)rc
) + thumb
;
476 regs
->ARM_sp
= (unsigned long)frame
;
477 regs
->ARM_lr
= retcode
;
478 regs
->ARM_pc
= handler
;
479 regs
->ARM_cpsr
= cpsr
;
485 setup_frame(int usig
, struct k_sigaction
*ka
, sigset_t
*set
, struct pt_regs
*regs
)
487 struct sigframe __user
*frame
= get_sigframe(ka
, regs
, sizeof(*frame
));
494 * Set uc.uc_flags to a value which sc.trap_no would never have.
496 __put_user_error(0x5ac3c35a, &frame
->uc
.uc_flags
, err
);
498 err
|= setup_sigframe(frame
, regs
, set
);
500 err
= setup_return(regs
, ka
, frame
->retcode
, frame
, usig
);
506 setup_rt_frame(int usig
, struct k_sigaction
*ka
, siginfo_t
*info
,
507 sigset_t
*set
, struct pt_regs
*regs
)
509 struct rt_sigframe __user
*frame
= get_sigframe(ka
, regs
, sizeof(*frame
));
516 err
|= copy_siginfo_to_user(&frame
->info
, info
);
518 __put_user_error(0, &frame
->sig
.uc
.uc_flags
, err
);
519 __put_user_error(NULL
, &frame
->sig
.uc
.uc_link
, err
);
521 memset(&stack
, 0, sizeof(stack
));
522 stack
.ss_sp
= (void __user
*)current
->sas_ss_sp
;
523 stack
.ss_flags
= sas_ss_flags(regs
->ARM_sp
);
524 stack
.ss_size
= current
->sas_ss_size
;
525 err
|= __copy_to_user(&frame
->sig
.uc
.uc_stack
, &stack
, sizeof(stack
));
527 err
|= setup_sigframe(&frame
->sig
, regs
, set
);
529 err
= setup_return(regs
, ka
, frame
->sig
.retcode
, frame
, usig
);
533 * For realtime signals we must also set the second and third
534 * arguments for the signal handler.
535 * -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06
537 regs
->ARM_r1
= (unsigned long)&frame
->info
;
538 regs
->ARM_r2
= (unsigned long)&frame
->sig
.uc
;
544 static inline void restart_syscall(struct pt_regs
*regs
)
546 regs
->ARM_r0
= regs
->ARM_ORIG_r0
;
547 regs
->ARM_pc
-= thumb_mode(regs
) ? 2 : 4;
551 * OK, we're invoking a handler
554 handle_signal(unsigned long sig
, struct k_sigaction
*ka
,
555 siginfo_t
*info
, sigset_t
*oldset
,
556 struct pt_regs
* regs
, int syscall
)
558 struct thread_info
*thread
= current_thread_info();
559 struct task_struct
*tsk
= current
;
564 * If we were from a system call, check for system call restarting...
567 switch (regs
->ARM_r0
) {
568 case -ERESTART_RESTARTBLOCK
:
569 case -ERESTARTNOHAND
:
570 regs
->ARM_r0
= -EINTR
;
573 if (!(ka
->sa
.sa_flags
& SA_RESTART
)) {
574 regs
->ARM_r0
= -EINTR
;
578 case -ERESTARTNOINTR
:
579 restart_syscall(regs
);
584 * translate the signal
586 if (usig
< 32 && thread
->exec_domain
&& thread
->exec_domain
->signal_invmap
)
587 usig
= thread
->exec_domain
->signal_invmap
[usig
];
590 * Set up the stack frame
592 if (ka
->sa
.sa_flags
& SA_SIGINFO
)
593 ret
= setup_rt_frame(usig
, ka
, info
, oldset
, regs
);
595 ret
= setup_frame(usig
, ka
, oldset
, regs
);
598 * Check that the resulting registers are actually sane.
600 ret
|= !valid_user_regs(regs
);
603 force_sigsegv(sig
, tsk
);
608 * Block the signal if we were successful.
610 spin_lock_irq(&tsk
->sighand
->siglock
);
611 sigorsets(&tsk
->blocked
, &tsk
->blocked
,
613 if (!(ka
->sa
.sa_flags
& SA_NODEFER
))
614 sigaddset(&tsk
->blocked
, sig
);
616 spin_unlock_irq(&tsk
->sighand
->siglock
);
621 * Note that 'init' is a special process: it doesn't get signals it doesn't
622 * want to handle. Thus you cannot kill init even with a SIGKILL even by
625 * Note that we go through the signals twice: once to check the signals that
626 * the kernel can handle, and then we build all the user-level signal handling
627 * stack-frames in one go after that.
629 static int do_signal(sigset_t
*oldset
, struct pt_regs
*regs
, int syscall
)
631 struct k_sigaction ka
;
636 * We want the common case to go fast, which
637 * is why we may in certain cases get here from
638 * kernel mode. Just return without doing anything
641 if (!user_mode(regs
))
647 if (current
->ptrace
& PT_SINGLESTEP
)
648 ptrace_cancel_bpt(current
);
650 signr
= get_signal_to_deliver(&info
, &ka
, regs
, NULL
);
652 handle_signal(signr
, &ka
, &info
, oldset
, regs
, syscall
);
653 if (current
->ptrace
& PT_SINGLESTEP
)
654 ptrace_set_bpt(current
);
660 * No signal to deliver to the process - restart the syscall.
663 if (regs
->ARM_r0
== -ERESTART_RESTARTBLOCK
) {
664 if (thumb_mode(regs
)) {
665 regs
->ARM_r7
= __NR_restart_syscall
- __NR_SYSCALL_BASE
;
668 #if defined(CONFIG_AEABI) && !defined(CONFIG_OABI_COMPAT)
669 regs
->ARM_r7
= __NR_restart_syscall
;
673 u32 swival
= __NR_restart_syscall
;
676 usp
= (u32 __user
*)regs
->ARM_sp
;
679 * Either we supports OABI only, or we have
680 * EABI with the OABI compat layer enabled.
681 * In the later case we don't know if user
682 * space is EABI or not, and if not we must
683 * not clobber r7. Always using the OABI
684 * syscall solves that issue and works for
687 swival
= swival
- __NR_SYSCALL_BASE
+ __NR_OABI_SYSCALL_BASE
;
689 put_user(regs
->ARM_pc
, &usp
[0]);
690 /* swi __NR_restart_syscall */
691 put_user(0xef000000 | swival
, &usp
[1]);
692 /* ldr pc, [sp], #12 */
693 put_user(0xe49df00c, &usp
[2]);
695 flush_icache_range((unsigned long)usp
,
696 (unsigned long)(usp
+ 3));
698 regs
->ARM_pc
= regs
->ARM_sp
+ 4;
702 if (regs
->ARM_r0
== -ERESTARTNOHAND
||
703 regs
->ARM_r0
== -ERESTARTSYS
||
704 regs
->ARM_r0
== -ERESTARTNOINTR
) {
705 restart_syscall(regs
);
708 if (current
->ptrace
& PT_SINGLESTEP
)
709 ptrace_set_bpt(current
);
714 do_notify_resume(struct pt_regs
*regs
, unsigned int thread_flags
, int syscall
)
716 if (thread_flags
& _TIF_SIGPENDING
)
717 do_signal(¤t
->blocked
, regs
, syscall
);