2 * linux/arch/arm/kernel/signal.c
4 * Copyright (C) 1995-2009 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/errno.h>
11 #include <linux/signal.h>
12 #include <linux/personality.h>
13 #include <linux/freezer.h>
14 #include <linux/uaccess.h>
15 #include <linux/tracehook.h>
18 #include <asm/cacheflush.h>
19 #include <asm/ucontext.h>
20 #include <asm/unistd.h>
25 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
28 * For ARM syscalls, we encode the syscall number into the instruction.
30 #define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE))
31 #define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE))
32 #define SWI_SYS_RESTART (0xef000000|__NR_restart_syscall|__NR_OABI_SYSCALL_BASE)
35 * With EABI, the syscall number has to be loaded into r7.
37 #define MOV_R7_NR_SIGRETURN (0xe3a07000 | (__NR_sigreturn - __NR_SYSCALL_BASE))
38 #define MOV_R7_NR_RT_SIGRETURN (0xe3a07000 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
41 * For Thumb syscalls, we pass the syscall number via r7. We therefore
42 * need two 16-bit instructions.
44 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE))
45 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
47 const unsigned long sigreturn_codes
[7] = {
48 MOV_R7_NR_SIGRETURN
, SWI_SYS_SIGRETURN
, SWI_THUMB_SIGRETURN
,
49 MOV_R7_NR_RT_SIGRETURN
, SWI_SYS_RT_SIGRETURN
, SWI_THUMB_RT_SIGRETURN
,
53 * Either we support OABI only, or we have EABI with the OABI
54 * compat layer enabled. In the later case we don't know if
55 * user space is EABI or not, and if not we must not clobber r7.
56 * Always using the OABI syscall solves that issue and works for
59 const unsigned long syscall_restart_code
[2] = {
60 SWI_SYS_RESTART
, /* swi __NR_restart_syscall */
61 0xe49df004, /* ldr pc, [sp], #4 */
65 * atomically swap in the new signal mask, and wait for a signal.
67 asmlinkage
int sys_sigsuspend(int restart
, unsigned long oldmask
, old_sigset_t mask
)
70 spin_lock_irq(¤t
->sighand
->siglock
);
71 current
->saved_sigmask
= current
->blocked
;
72 siginitset(¤t
->blocked
, mask
);
74 spin_unlock_irq(¤t
->sighand
->siglock
);
76 current
->state
= TASK_INTERRUPTIBLE
;
78 set_restore_sigmask();
79 return -ERESTARTNOHAND
;
83 sys_sigaction(int sig
, const struct old_sigaction __user
*act
,
84 struct old_sigaction __user
*oact
)
86 struct k_sigaction new_ka
, old_ka
;
91 if (!access_ok(VERIFY_READ
, act
, sizeof(*act
)) ||
92 __get_user(new_ka
.sa
.sa_handler
, &act
->sa_handler
) ||
93 __get_user(new_ka
.sa
.sa_restorer
, &act
->sa_restorer
))
95 __get_user(new_ka
.sa
.sa_flags
, &act
->sa_flags
);
96 __get_user(mask
, &act
->sa_mask
);
97 siginitset(&new_ka
.sa
.sa_mask
, mask
);
100 ret
= do_sigaction(sig
, act
? &new_ka
: NULL
, oact
? &old_ka
: NULL
);
103 if (!access_ok(VERIFY_WRITE
, oact
, sizeof(*oact
)) ||
104 __put_user(old_ka
.sa
.sa_handler
, &oact
->sa_handler
) ||
105 __put_user(old_ka
.sa
.sa_restorer
, &oact
->sa_restorer
))
107 __put_user(old_ka
.sa
.sa_flags
, &oact
->sa_flags
);
108 __put_user(old_ka
.sa
.sa_mask
.sig
[0], &oact
->sa_mask
);
115 static int preserve_crunch_context(struct crunch_sigframe __user
*frame
)
117 char kbuf
[sizeof(*frame
) + 8];
118 struct crunch_sigframe
*kframe
;
120 /* the crunch context must be 64 bit aligned */
121 kframe
= (struct crunch_sigframe
*)((unsigned long)(kbuf
+ 8) & ~7);
122 kframe
->magic
= CRUNCH_MAGIC
;
123 kframe
->size
= CRUNCH_STORAGE_SIZE
;
124 crunch_task_copy(current_thread_info(), &kframe
->storage
);
125 return __copy_to_user(frame
, kframe
, sizeof(*frame
));
128 static int restore_crunch_context(struct crunch_sigframe __user
*frame
)
130 char kbuf
[sizeof(*frame
) + 8];
131 struct crunch_sigframe
*kframe
;
133 /* the crunch context must be 64 bit aligned */
134 kframe
= (struct crunch_sigframe
*)((unsigned long)(kbuf
+ 8) & ~7);
135 if (__copy_from_user(kframe
, frame
, sizeof(*frame
)))
137 if (kframe
->magic
!= CRUNCH_MAGIC
||
138 kframe
->size
!= CRUNCH_STORAGE_SIZE
)
140 crunch_task_restore(current_thread_info(), &kframe
->storage
);
147 static int preserve_iwmmxt_context(struct iwmmxt_sigframe
*frame
)
149 char kbuf
[sizeof(*frame
) + 8];
150 struct iwmmxt_sigframe
*kframe
;
152 /* the iWMMXt context must be 64 bit aligned */
153 kframe
= (struct iwmmxt_sigframe
*)((unsigned long)(kbuf
+ 8) & ~7);
154 kframe
->magic
= IWMMXT_MAGIC
;
155 kframe
->size
= IWMMXT_STORAGE_SIZE
;
156 iwmmxt_task_copy(current_thread_info(), &kframe
->storage
);
157 return __copy_to_user(frame
, kframe
, sizeof(*frame
));
160 static int restore_iwmmxt_context(struct iwmmxt_sigframe
*frame
)
162 char kbuf
[sizeof(*frame
) + 8];
163 struct iwmmxt_sigframe
*kframe
;
165 /* the iWMMXt context must be 64 bit aligned */
166 kframe
= (struct iwmmxt_sigframe
*)((unsigned long)(kbuf
+ 8) & ~7);
167 if (__copy_from_user(kframe
, frame
, sizeof(*frame
)))
169 if (kframe
->magic
!= IWMMXT_MAGIC
||
170 kframe
->size
!= IWMMXT_STORAGE_SIZE
)
172 iwmmxt_task_restore(current_thread_info(), &kframe
->storage
);
179 * Do a signal return; undo the signal stack. These are aligned to 64-bit.
183 unsigned long retcode
[2];
191 static int restore_sigframe(struct pt_regs
*regs
, struct sigframe __user
*sf
)
193 struct aux_sigframe __user
*aux
;
197 err
= __copy_from_user(&set
, &sf
->uc
.uc_sigmask
, sizeof(set
));
199 sigdelsetmask(&set
, ~_BLOCKABLE
);
200 spin_lock_irq(¤t
->sighand
->siglock
);
201 current
->blocked
= set
;
203 spin_unlock_irq(¤t
->sighand
->siglock
);
206 __get_user_error(regs
->ARM_r0
, &sf
->uc
.uc_mcontext
.arm_r0
, err
);
207 __get_user_error(regs
->ARM_r1
, &sf
->uc
.uc_mcontext
.arm_r1
, err
);
208 __get_user_error(regs
->ARM_r2
, &sf
->uc
.uc_mcontext
.arm_r2
, err
);
209 __get_user_error(regs
->ARM_r3
, &sf
->uc
.uc_mcontext
.arm_r3
, err
);
210 __get_user_error(regs
->ARM_r4
, &sf
->uc
.uc_mcontext
.arm_r4
, err
);
211 __get_user_error(regs
->ARM_r5
, &sf
->uc
.uc_mcontext
.arm_r5
, err
);
212 __get_user_error(regs
->ARM_r6
, &sf
->uc
.uc_mcontext
.arm_r6
, err
);
213 __get_user_error(regs
->ARM_r7
, &sf
->uc
.uc_mcontext
.arm_r7
, err
);
214 __get_user_error(regs
->ARM_r8
, &sf
->uc
.uc_mcontext
.arm_r8
, err
);
215 __get_user_error(regs
->ARM_r9
, &sf
->uc
.uc_mcontext
.arm_r9
, err
);
216 __get_user_error(regs
->ARM_r10
, &sf
->uc
.uc_mcontext
.arm_r10
, err
);
217 __get_user_error(regs
->ARM_fp
, &sf
->uc
.uc_mcontext
.arm_fp
, err
);
218 __get_user_error(regs
->ARM_ip
, &sf
->uc
.uc_mcontext
.arm_ip
, err
);
219 __get_user_error(regs
->ARM_sp
, &sf
->uc
.uc_mcontext
.arm_sp
, err
);
220 __get_user_error(regs
->ARM_lr
, &sf
->uc
.uc_mcontext
.arm_lr
, err
);
221 __get_user_error(regs
->ARM_pc
, &sf
->uc
.uc_mcontext
.arm_pc
, err
);
222 __get_user_error(regs
->ARM_cpsr
, &sf
->uc
.uc_mcontext
.arm_cpsr
, err
);
224 err
|= !valid_user_regs(regs
);
226 aux
= (struct aux_sigframe __user
*) sf
->uc
.uc_regspace
;
229 err
|= restore_crunch_context(&aux
->crunch
);
232 if (err
== 0 && test_thread_flag(TIF_USING_IWMMXT
))
233 err
|= restore_iwmmxt_context(&aux
->iwmmxt
);
237 // err |= vfp_restore_state(&sf->aux.vfp);
243 asmlinkage
int sys_sigreturn(struct pt_regs
*regs
)
245 struct sigframe __user
*frame
;
247 /* Always make any pending restarted system calls return -EINTR */
248 current_thread_info()->restart_block
.fn
= do_no_restart_syscall
;
251 * Since we stacked the signal on a 64-bit boundary,
252 * then 'sp' should be word aligned here. If it's
253 * not, then the user is trying to mess with us.
255 if (regs
->ARM_sp
& 7)
258 frame
= (struct sigframe __user
*)regs
->ARM_sp
;
260 if (!access_ok(VERIFY_READ
, frame
, sizeof (*frame
)))
263 if (restore_sigframe(regs
, frame
))
266 single_step_trap(current
);
271 force_sig(SIGSEGV
, current
);
275 asmlinkage
int sys_rt_sigreturn(struct pt_regs
*regs
)
277 struct rt_sigframe __user
*frame
;
279 /* Always make any pending restarted system calls return -EINTR */
280 current_thread_info()->restart_block
.fn
= do_no_restart_syscall
;
283 * Since we stacked the signal on a 64-bit boundary,
284 * then 'sp' should be word aligned here. If it's
285 * not, then the user is trying to mess with us.
287 if (regs
->ARM_sp
& 7)
290 frame
= (struct rt_sigframe __user
*)regs
->ARM_sp
;
292 if (!access_ok(VERIFY_READ
, frame
, sizeof (*frame
)))
295 if (restore_sigframe(regs
, &frame
->sig
))
298 if (do_sigaltstack(&frame
->sig
.uc
.uc_stack
, NULL
, regs
->ARM_sp
) == -EFAULT
)
301 single_step_trap(current
);
306 force_sig(SIGSEGV
, current
);
311 setup_sigframe(struct sigframe __user
*sf
, struct pt_regs
*regs
, sigset_t
*set
)
313 struct aux_sigframe __user
*aux
;
316 __put_user_error(regs
->ARM_r0
, &sf
->uc
.uc_mcontext
.arm_r0
, err
);
317 __put_user_error(regs
->ARM_r1
, &sf
->uc
.uc_mcontext
.arm_r1
, err
);
318 __put_user_error(regs
->ARM_r2
, &sf
->uc
.uc_mcontext
.arm_r2
, err
);
319 __put_user_error(regs
->ARM_r3
, &sf
->uc
.uc_mcontext
.arm_r3
, err
);
320 __put_user_error(regs
->ARM_r4
, &sf
->uc
.uc_mcontext
.arm_r4
, err
);
321 __put_user_error(regs
->ARM_r5
, &sf
->uc
.uc_mcontext
.arm_r5
, err
);
322 __put_user_error(regs
->ARM_r6
, &sf
->uc
.uc_mcontext
.arm_r6
, err
);
323 __put_user_error(regs
->ARM_r7
, &sf
->uc
.uc_mcontext
.arm_r7
, err
);
324 __put_user_error(regs
->ARM_r8
, &sf
->uc
.uc_mcontext
.arm_r8
, err
);
325 __put_user_error(regs
->ARM_r9
, &sf
->uc
.uc_mcontext
.arm_r9
, err
);
326 __put_user_error(regs
->ARM_r10
, &sf
->uc
.uc_mcontext
.arm_r10
, err
);
327 __put_user_error(regs
->ARM_fp
, &sf
->uc
.uc_mcontext
.arm_fp
, err
);
328 __put_user_error(regs
->ARM_ip
, &sf
->uc
.uc_mcontext
.arm_ip
, err
);
329 __put_user_error(regs
->ARM_sp
, &sf
->uc
.uc_mcontext
.arm_sp
, err
);
330 __put_user_error(regs
->ARM_lr
, &sf
->uc
.uc_mcontext
.arm_lr
, err
);
331 __put_user_error(regs
->ARM_pc
, &sf
->uc
.uc_mcontext
.arm_pc
, err
);
332 __put_user_error(regs
->ARM_cpsr
, &sf
->uc
.uc_mcontext
.arm_cpsr
, err
);
334 __put_user_error(current
->thread
.trap_no
, &sf
->uc
.uc_mcontext
.trap_no
, err
);
335 __put_user_error(current
->thread
.error_code
, &sf
->uc
.uc_mcontext
.error_code
, err
);
336 __put_user_error(current
->thread
.address
, &sf
->uc
.uc_mcontext
.fault_address
, err
);
337 __put_user_error(set
->sig
[0], &sf
->uc
.uc_mcontext
.oldmask
, err
);
339 err
|= __copy_to_user(&sf
->uc
.uc_sigmask
, set
, sizeof(*set
));
341 aux
= (struct aux_sigframe __user
*) sf
->uc
.uc_regspace
;
344 err
|= preserve_crunch_context(&aux
->crunch
);
347 if (err
== 0 && test_thread_flag(TIF_USING_IWMMXT
))
348 err
|= preserve_iwmmxt_context(&aux
->iwmmxt
);
352 // err |= vfp_save_state(&sf->aux.vfp);
354 __put_user_error(0, &aux
->end_magic
, err
);
359 static inline void __user
*
360 get_sigframe(struct k_sigaction
*ka
, struct pt_regs
*regs
, int framesize
)
362 unsigned long sp
= regs
->ARM_sp
;
366 * This is the X/Open sanctioned signal stack switching.
368 if ((ka
->sa
.sa_flags
& SA_ONSTACK
) && !sas_ss_flags(sp
))
369 sp
= current
->sas_ss_sp
+ current
->sas_ss_size
;
372 * ATPCS B01 mandates 8-byte alignment
374 frame
= (void __user
*)((sp
- framesize
) & ~7);
377 * Check that we can actually write to the signal frame.
379 if (!access_ok(VERIFY_WRITE
, frame
, framesize
))
386 setup_return(struct pt_regs
*regs
, struct k_sigaction
*ka
,
387 unsigned long __user
*rc
, void __user
*frame
, int usig
)
389 unsigned long handler
= (unsigned long)ka
->sa
.sa_handler
;
390 unsigned long retcode
;
392 unsigned long cpsr
= regs
->ARM_cpsr
& ~PSR_f
;
395 * Maybe we need to deliver a 32-bit signal to a 26-bit task.
397 if (ka
->sa
.sa_flags
& SA_THIRTYTWO
)
398 cpsr
= (cpsr
& ~MODE_MASK
) | USR_MODE
;
400 #ifdef CONFIG_ARM_THUMB
401 if (elf_hwcap
& HWCAP_THUMB
) {
403 * The LSB of the handler determines if we're going to
404 * be using THUMB or ARM mode for this signal handler.
410 #if __LINUX_ARM_ARCH__ >= 7
411 /* clear the If-Then Thumb-2 execution state */
412 cpsr
&= ~PSR_IT_MASK
;
419 if (ka
->sa
.sa_flags
& SA_RESTORER
) {
420 retcode
= (unsigned long)ka
->sa
.sa_restorer
;
422 unsigned int idx
= thumb
<< 1;
424 if (ka
->sa
.sa_flags
& SA_SIGINFO
)
427 if (__put_user(sigreturn_codes
[idx
], rc
) ||
428 __put_user(sigreturn_codes
[idx
+1], rc
+1))
431 if (cpsr
& MODE32_BIT
) {
433 * 32-bit code can use the new high-page
434 * signal return code support.
436 retcode
= KERN_SIGRETURN_CODE
+ (idx
<< 2) + thumb
;
439 * Ensure that the instruction cache sees
440 * the return code written onto the stack.
442 flush_icache_range((unsigned long)rc
,
443 (unsigned long)(rc
+ 2));
445 retcode
= ((unsigned long)rc
) + thumb
;
450 regs
->ARM_sp
= (unsigned long)frame
;
451 regs
->ARM_lr
= retcode
;
452 regs
->ARM_pc
= handler
;
453 regs
->ARM_cpsr
= cpsr
;
459 setup_frame(int usig
, struct k_sigaction
*ka
, sigset_t
*set
, struct pt_regs
*regs
)
461 struct sigframe __user
*frame
= get_sigframe(ka
, regs
, sizeof(*frame
));
468 * Set uc.uc_flags to a value which sc.trap_no would never have.
470 __put_user_error(0x5ac3c35a, &frame
->uc
.uc_flags
, err
);
472 err
|= setup_sigframe(frame
, regs
, set
);
474 err
= setup_return(regs
, ka
, frame
->retcode
, frame
, usig
);
480 setup_rt_frame(int usig
, struct k_sigaction
*ka
, siginfo_t
*info
,
481 sigset_t
*set
, struct pt_regs
*regs
)
483 struct rt_sigframe __user
*frame
= get_sigframe(ka
, regs
, sizeof(*frame
));
490 err
|= copy_siginfo_to_user(&frame
->info
, info
);
492 __put_user_error(0, &frame
->sig
.uc
.uc_flags
, err
);
493 __put_user_error(NULL
, &frame
->sig
.uc
.uc_link
, err
);
495 memset(&stack
, 0, sizeof(stack
));
496 stack
.ss_sp
= (void __user
*)current
->sas_ss_sp
;
497 stack
.ss_flags
= sas_ss_flags(regs
->ARM_sp
);
498 stack
.ss_size
= current
->sas_ss_size
;
499 err
|= __copy_to_user(&frame
->sig
.uc
.uc_stack
, &stack
, sizeof(stack
));
501 err
|= setup_sigframe(&frame
->sig
, regs
, set
);
503 err
= setup_return(regs
, ka
, frame
->sig
.retcode
, frame
, usig
);
507 * For realtime signals we must also set the second and third
508 * arguments for the signal handler.
509 * -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06
511 regs
->ARM_r1
= (unsigned long)&frame
->info
;
512 regs
->ARM_r2
= (unsigned long)&frame
->sig
.uc
;
518 static inline void setup_syscall_restart(struct pt_regs
*regs
)
520 regs
->ARM_r0
= regs
->ARM_ORIG_r0
;
521 regs
->ARM_pc
-= thumb_mode(regs
) ? 2 : 4;
525 * OK, we're invoking a handler
528 handle_signal(unsigned long sig
, struct k_sigaction
*ka
,
529 siginfo_t
*info
, sigset_t
*oldset
,
530 struct pt_regs
* regs
, int syscall
)
532 struct thread_info
*thread
= current_thread_info();
533 struct task_struct
*tsk
= current
;
538 * If we were from a system call, check for system call restarting...
541 switch (regs
->ARM_r0
) {
542 case -ERESTART_RESTARTBLOCK
:
543 case -ERESTARTNOHAND
:
544 regs
->ARM_r0
= -EINTR
;
547 if (!(ka
->sa
.sa_flags
& SA_RESTART
)) {
548 regs
->ARM_r0
= -EINTR
;
552 case -ERESTARTNOINTR
:
553 setup_syscall_restart(regs
);
558 * translate the signal
560 if (usig
< 32 && thread
->exec_domain
&& thread
->exec_domain
->signal_invmap
)
561 usig
= thread
->exec_domain
->signal_invmap
[usig
];
564 * Set up the stack frame
566 if (ka
->sa
.sa_flags
& SA_SIGINFO
)
567 ret
= setup_rt_frame(usig
, ka
, info
, oldset
, regs
);
569 ret
= setup_frame(usig
, ka
, oldset
, regs
);
572 * Check that the resulting registers are actually sane.
574 ret
|= !valid_user_regs(regs
);
577 force_sigsegv(sig
, tsk
);
582 * Block the signal if we were successful.
584 spin_lock_irq(&tsk
->sighand
->siglock
);
585 sigorsets(&tsk
->blocked
, &tsk
->blocked
,
587 if (!(ka
->sa
.sa_flags
& SA_NODEFER
))
588 sigaddset(&tsk
->blocked
, sig
);
590 spin_unlock_irq(&tsk
->sighand
->siglock
);
596 * Note that 'init' is a special process: it doesn't get signals it doesn't
597 * want to handle. Thus you cannot kill init even with a SIGKILL even by
600 * Note that we go through the signals twice: once to check the signals that
601 * the kernel can handle, and then we build all the user-level signal handling
602 * stack-frames in one go after that.
604 static void do_signal(struct pt_regs
*regs
, int syscall
)
606 struct k_sigaction ka
;
611 * We want the common case to go fast, which
612 * is why we may in certain cases get here from
613 * kernel mode. Just return without doing anything
616 if (!user_mode(regs
))
622 single_step_clear(current
);
624 signr
= get_signal_to_deliver(&info
, &ka
, regs
, NULL
);
628 if (test_thread_flag(TIF_RESTORE_SIGMASK
))
629 oldset
= ¤t
->saved_sigmask
;
631 oldset
= ¤t
->blocked
;
632 if (handle_signal(signr
, &ka
, &info
, oldset
, regs
, syscall
) == 0) {
634 * A signal was successfully delivered; the saved
635 * sigmask will have been stored in the signal frame,
636 * and will be restored by sigreturn, so we can simply
637 * clear the TIF_RESTORE_SIGMASK flag.
639 if (test_thread_flag(TIF_RESTORE_SIGMASK
))
640 clear_thread_flag(TIF_RESTORE_SIGMASK
);
642 single_step_set(current
);
648 * No signal to deliver to the process - restart the syscall.
651 if (regs
->ARM_r0
== -ERESTART_RESTARTBLOCK
) {
652 if (thumb_mode(regs
)) {
653 regs
->ARM_r7
= __NR_restart_syscall
- __NR_SYSCALL_BASE
;
656 #if defined(CONFIG_AEABI) && !defined(CONFIG_OABI_COMPAT)
657 regs
->ARM_r7
= __NR_restart_syscall
;
663 usp
= (u32 __user
*)regs
->ARM_sp
;
665 put_user(regs
->ARM_pc
, usp
);
666 regs
->ARM_pc
= KERN_RESTART_CODE
;
670 if (regs
->ARM_r0
== -ERESTARTNOHAND
||
671 regs
->ARM_r0
== -ERESTARTSYS
||
672 regs
->ARM_r0
== -ERESTARTNOINTR
) {
673 setup_syscall_restart(regs
);
676 /* If there's no signal to deliver, we just put the saved sigmask
679 if (test_thread_flag(TIF_RESTORE_SIGMASK
)) {
680 clear_thread_flag(TIF_RESTORE_SIGMASK
);
681 sigprocmask(SIG_SETMASK
, ¤t
->saved_sigmask
, NULL
);
684 single_step_set(current
);
688 do_notify_resume(struct pt_regs
*regs
, unsigned int thread_flags
, int syscall
)
690 if (thread_flags
& _TIF_SIGPENDING
)
691 do_signal(regs
, syscall
);
693 if (thread_flags
& _TIF_NOTIFY_RESUME
) {
694 clear_thread_flag(TIF_NOTIFY_RESUME
);
695 tracehook_notify_resume(regs
);
696 if (current
->replacement_session_keyring
)
697 key_replace_session_keyring();