clk: ux500: pass clock base adresses in init call
[linux-2.6.git] / arch / arm / kernel / signal.c
blob296786bdbb737870d8fa99479127f9bc5bd8c271
1 /*
2 * linux/arch/arm/kernel/signal.c
4 * Copyright (C) 1995-2009 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/errno.h>
11 #include <linux/signal.h>
12 #include <linux/personality.h>
13 #include <linux/uaccess.h>
14 #include <linux/tracehook.h>
16 #include <asm/elf.h>
17 #include <asm/cacheflush.h>
18 #include <asm/ucontext.h>
19 #include <asm/unistd.h>
20 #include <asm/vfp.h>
22 #include "signal.h"
25 * For ARM syscalls, we encode the syscall number into the instruction.
27 #define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE))
28 #define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE))
31 * With EABI, the syscall number has to be loaded into r7.
33 #define MOV_R7_NR_SIGRETURN (0xe3a07000 | (__NR_sigreturn - __NR_SYSCALL_BASE))
34 #define MOV_R7_NR_RT_SIGRETURN (0xe3a07000 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
37 * For Thumb syscalls, we pass the syscall number via r7. We therefore
38 * need two 16-bit instructions.
40 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE))
41 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
43 const unsigned long sigreturn_codes[7] = {
44 MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
45 MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
48 #ifdef CONFIG_CRUNCH
49 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
51 char kbuf[sizeof(*frame) + 8];
52 struct crunch_sigframe *kframe;
54 /* the crunch context must be 64 bit aligned */
55 kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7);
56 kframe->magic = CRUNCH_MAGIC;
57 kframe->size = CRUNCH_STORAGE_SIZE;
58 crunch_task_copy(current_thread_info(), &kframe->storage);
59 return __copy_to_user(frame, kframe, sizeof(*frame));
62 static int restore_crunch_context(struct crunch_sigframe __user *frame)
64 char kbuf[sizeof(*frame) + 8];
65 struct crunch_sigframe *kframe;
67 /* the crunch context must be 64 bit aligned */
68 kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7);
69 if (__copy_from_user(kframe, frame, sizeof(*frame)))
70 return -1;
71 if (kframe->magic != CRUNCH_MAGIC ||
72 kframe->size != CRUNCH_STORAGE_SIZE)
73 return -1;
74 crunch_task_restore(current_thread_info(), &kframe->storage);
75 return 0;
77 #endif
79 #ifdef CONFIG_IWMMXT
81 static int preserve_iwmmxt_context(struct iwmmxt_sigframe *frame)
83 char kbuf[sizeof(*frame) + 8];
84 struct iwmmxt_sigframe *kframe;
86 /* the iWMMXt context must be 64 bit aligned */
87 kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
88 kframe->magic = IWMMXT_MAGIC;
89 kframe->size = IWMMXT_STORAGE_SIZE;
90 iwmmxt_task_copy(current_thread_info(), &kframe->storage);
91 return __copy_to_user(frame, kframe, sizeof(*frame));
94 static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame)
96 char kbuf[sizeof(*frame) + 8];
97 struct iwmmxt_sigframe *kframe;
99 /* the iWMMXt context must be 64 bit aligned */
100 kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
101 if (__copy_from_user(kframe, frame, sizeof(*frame)))
102 return -1;
103 if (kframe->magic != IWMMXT_MAGIC ||
104 kframe->size != IWMMXT_STORAGE_SIZE)
105 return -1;
106 iwmmxt_task_restore(current_thread_info(), &kframe->storage);
107 return 0;
110 #endif
112 #ifdef CONFIG_VFP
114 static int preserve_vfp_context(struct vfp_sigframe __user *frame)
116 const unsigned long magic = VFP_MAGIC;
117 const unsigned long size = VFP_STORAGE_SIZE;
118 int err = 0;
120 __put_user_error(magic, &frame->magic, err);
121 __put_user_error(size, &frame->size, err);
123 if (err)
124 return -EFAULT;
126 return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc);
129 static int restore_vfp_context(struct vfp_sigframe __user *frame)
131 unsigned long magic;
132 unsigned long size;
133 int err = 0;
135 __get_user_error(magic, &frame->magic, err);
136 __get_user_error(size, &frame->size, err);
138 if (err)
139 return -EFAULT;
140 if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
141 return -EINVAL;
143 return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc);
146 #endif
149 * Do a signal return; undo the signal stack. These are aligned to 64-bit.
151 struct sigframe {
152 struct ucontext uc;
153 unsigned long retcode[2];
156 struct rt_sigframe {
157 struct siginfo info;
158 struct sigframe sig;
161 static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
163 struct aux_sigframe __user *aux;
164 sigset_t set;
165 int err;
167 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
168 if (err == 0)
169 set_current_blocked(&set);
171 __get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
172 __get_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
173 __get_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
174 __get_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
175 __get_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
176 __get_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
177 __get_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
178 __get_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
179 __get_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
180 __get_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
181 __get_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
182 __get_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
183 __get_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
184 __get_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
185 __get_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
186 __get_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
187 __get_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
189 err |= !valid_user_regs(regs);
191 aux = (struct aux_sigframe __user *) sf->uc.uc_regspace;
192 #ifdef CONFIG_CRUNCH
193 if (err == 0)
194 err |= restore_crunch_context(&aux->crunch);
195 #endif
196 #ifdef CONFIG_IWMMXT
197 if (err == 0 && test_thread_flag(TIF_USING_IWMMXT))
198 err |= restore_iwmmxt_context(&aux->iwmmxt);
199 #endif
200 #ifdef CONFIG_VFP
201 if (err == 0)
202 err |= restore_vfp_context(&aux->vfp);
203 #endif
205 return err;
208 asmlinkage int sys_sigreturn(struct pt_regs *regs)
210 struct sigframe __user *frame;
212 /* Always make any pending restarted system calls return -EINTR */
213 current_thread_info()->restart_block.fn = do_no_restart_syscall;
216 * Since we stacked the signal on a 64-bit boundary,
217 * then 'sp' should be word aligned here. If it's
218 * not, then the user is trying to mess with us.
220 if (regs->ARM_sp & 7)
221 goto badframe;
223 frame = (struct sigframe __user *)regs->ARM_sp;
225 if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
226 goto badframe;
228 if (restore_sigframe(regs, frame))
229 goto badframe;
231 return regs->ARM_r0;
233 badframe:
234 force_sig(SIGSEGV, current);
235 return 0;
238 asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
240 struct rt_sigframe __user *frame;
242 /* Always make any pending restarted system calls return -EINTR */
243 current_thread_info()->restart_block.fn = do_no_restart_syscall;
246 * Since we stacked the signal on a 64-bit boundary,
247 * then 'sp' should be word aligned here. If it's
248 * not, then the user is trying to mess with us.
250 if (regs->ARM_sp & 7)
251 goto badframe;
253 frame = (struct rt_sigframe __user *)regs->ARM_sp;
255 if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
256 goto badframe;
258 if (restore_sigframe(regs, &frame->sig))
259 goto badframe;
261 if (restore_altstack(&frame->sig.uc.uc_stack))
262 goto badframe;
264 return regs->ARM_r0;
266 badframe:
267 force_sig(SIGSEGV, current);
268 return 0;
271 static int
272 setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
274 struct aux_sigframe __user *aux;
275 int err = 0;
277 __put_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
278 __put_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
279 __put_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
280 __put_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
281 __put_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
282 __put_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
283 __put_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
284 __put_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
285 __put_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
286 __put_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
287 __put_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
288 __put_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
289 __put_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
290 __put_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
291 __put_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
292 __put_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
293 __put_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
295 __put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, err);
296 __put_user_error(current->thread.error_code, &sf->uc.uc_mcontext.error_code, err);
297 __put_user_error(current->thread.address, &sf->uc.uc_mcontext.fault_address, err);
298 __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
300 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
302 aux = (struct aux_sigframe __user *) sf->uc.uc_regspace;
303 #ifdef CONFIG_CRUNCH
304 if (err == 0)
305 err |= preserve_crunch_context(&aux->crunch);
306 #endif
307 #ifdef CONFIG_IWMMXT
308 if (err == 0 && test_thread_flag(TIF_USING_IWMMXT))
309 err |= preserve_iwmmxt_context(&aux->iwmmxt);
310 #endif
311 #ifdef CONFIG_VFP
312 if (err == 0)
313 err |= preserve_vfp_context(&aux->vfp);
314 #endif
315 __put_user_error(0, &aux->end_magic, err);
317 return err;
320 static inline void __user *
321 get_sigframe(struct ksignal *ksig, struct pt_regs *regs, int framesize)
323 unsigned long sp = sigsp(regs->ARM_sp, ksig);
324 void __user *frame;
327 * ATPCS B01 mandates 8-byte alignment
329 frame = (void __user *)((sp - framesize) & ~7);
332 * Check that we can actually write to the signal frame.
334 if (!access_ok(VERIFY_WRITE, frame, framesize))
335 frame = NULL;
337 return frame;
341 * translate the signal
343 static inline int map_sig(int sig)
345 struct thread_info *thread = current_thread_info();
346 if (sig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap)
347 sig = thread->exec_domain->signal_invmap[sig];
348 return sig;
351 static int
352 setup_return(struct pt_regs *regs, struct ksignal *ksig,
353 unsigned long __user *rc, void __user *frame)
355 unsigned long handler = (unsigned long)ksig->ka.sa.sa_handler;
356 unsigned long retcode;
357 int thumb = 0;
358 unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT);
360 cpsr |= PSR_ENDSTATE;
363 * Maybe we need to deliver a 32-bit signal to a 26-bit task.
365 if (ksig->ka.sa.sa_flags & SA_THIRTYTWO)
366 cpsr = (cpsr & ~MODE_MASK) | USR_MODE;
368 #ifdef CONFIG_ARM_THUMB
369 if (elf_hwcap & HWCAP_THUMB) {
371 * The LSB of the handler determines if we're going to
372 * be using THUMB or ARM mode for this signal handler.
374 thumb = handler & 1;
376 if (thumb) {
377 cpsr |= PSR_T_BIT;
378 #if __LINUX_ARM_ARCH__ >= 7
379 /* clear the If-Then Thumb-2 execution state */
380 cpsr &= ~PSR_IT_MASK;
381 #endif
382 } else
383 cpsr &= ~PSR_T_BIT;
385 #endif
387 if (ksig->ka.sa.sa_flags & SA_RESTORER) {
388 retcode = (unsigned long)ksig->ka.sa.sa_restorer;
389 } else {
390 unsigned int idx = thumb << 1;
392 if (ksig->ka.sa.sa_flags & SA_SIGINFO)
393 idx += 3;
395 if (__put_user(sigreturn_codes[idx], rc) ||
396 __put_user(sigreturn_codes[idx+1], rc+1))
397 return 1;
399 if (cpsr & MODE32_BIT) {
401 * 32-bit code can use the new high-page
402 * signal return code support.
404 retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb;
405 } else {
407 * Ensure that the instruction cache sees
408 * the return code written onto the stack.
410 flush_icache_range((unsigned long)rc,
411 (unsigned long)(rc + 2));
413 retcode = ((unsigned long)rc) + thumb;
417 regs->ARM_r0 = map_sig(ksig->sig);
418 regs->ARM_sp = (unsigned long)frame;
419 regs->ARM_lr = retcode;
420 regs->ARM_pc = handler;
421 regs->ARM_cpsr = cpsr;
423 return 0;
426 static int
427 setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
429 struct sigframe __user *frame = get_sigframe(ksig, regs, sizeof(*frame));
430 int err = 0;
432 if (!frame)
433 return 1;
436 * Set uc.uc_flags to a value which sc.trap_no would never have.
438 __put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err);
440 err |= setup_sigframe(frame, regs, set);
441 if (err == 0)
442 err = setup_return(regs, ksig, frame->retcode, frame);
444 return err;
447 static int
448 setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
450 struct rt_sigframe __user *frame = get_sigframe(ksig, regs, sizeof(*frame));
451 int err = 0;
453 if (!frame)
454 return 1;
456 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
458 __put_user_error(0, &frame->sig.uc.uc_flags, err);
459 __put_user_error(NULL, &frame->sig.uc.uc_link, err);
461 err |= __save_altstack(&frame->sig.uc.uc_stack, regs->ARM_sp);
462 err |= setup_sigframe(&frame->sig, regs, set);
463 if (err == 0)
464 err = setup_return(regs, ksig, frame->sig.retcode, frame);
466 if (err == 0) {
468 * For realtime signals we must also set the second and third
469 * arguments for the signal handler.
470 * -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06
472 regs->ARM_r1 = (unsigned long)&frame->info;
473 regs->ARM_r2 = (unsigned long)&frame->sig.uc;
476 return err;
480 * OK, we're invoking a handler
482 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
484 sigset_t *oldset = sigmask_to_save();
485 int ret;
488 * Set up the stack frame
490 if (ksig->ka.sa.sa_flags & SA_SIGINFO)
491 ret = setup_rt_frame(ksig, oldset, regs);
492 else
493 ret = setup_frame(ksig, oldset, regs);
496 * Check that the resulting registers are actually sane.
498 ret |= !valid_user_regs(regs);
500 signal_setup_done(ret, ksig, 0);
504 * Note that 'init' is a special process: it doesn't get signals it doesn't
505 * want to handle. Thus you cannot kill init even with a SIGKILL even by
506 * mistake.
508 * Note that we go through the signals twice: once to check the signals that
509 * the kernel can handle, and then we build all the user-level signal handling
510 * stack-frames in one go after that.
512 static int do_signal(struct pt_regs *regs, int syscall)
514 unsigned int retval = 0, continue_addr = 0, restart_addr = 0;
515 struct ksignal ksig;
516 int restart = 0;
519 * If we were from a system call, check for system call restarting...
521 if (syscall) {
522 continue_addr = regs->ARM_pc;
523 restart_addr = continue_addr - (thumb_mode(regs) ? 2 : 4);
524 retval = regs->ARM_r0;
527 * Prepare for system call restart. We do this here so that a
528 * debugger will see the already changed PSW.
530 switch (retval) {
531 case -ERESTART_RESTARTBLOCK:
532 restart -= 2;
533 case -ERESTARTNOHAND:
534 case -ERESTARTSYS:
535 case -ERESTARTNOINTR:
536 restart++;
537 regs->ARM_r0 = regs->ARM_ORIG_r0;
538 regs->ARM_pc = restart_addr;
539 break;
544 * Get the signal to deliver. When running under ptrace, at this
545 * point the debugger may change all our registers ...
548 * Depending on the signal settings we may need to revert the
549 * decision to restart the system call. But skip this if a
550 * debugger has chosen to restart at a different PC.
552 if (get_signal(&ksig)) {
553 /* handler */
554 if (unlikely(restart) && regs->ARM_pc == restart_addr) {
555 if (retval == -ERESTARTNOHAND ||
556 retval == -ERESTART_RESTARTBLOCK
557 || (retval == -ERESTARTSYS
558 && !(ksig.ka.sa.sa_flags & SA_RESTART))) {
559 regs->ARM_r0 = -EINTR;
560 regs->ARM_pc = continue_addr;
563 handle_signal(&ksig, regs);
564 } else {
565 /* no handler */
566 restore_saved_sigmask();
567 if (unlikely(restart) && regs->ARM_pc == restart_addr) {
568 regs->ARM_pc = continue_addr;
569 return restart;
572 return 0;
575 asmlinkage int
576 do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
578 do {
579 if (likely(thread_flags & _TIF_NEED_RESCHED)) {
580 schedule();
581 } else {
582 if (unlikely(!user_mode(regs)))
583 return 0;
584 local_irq_enable();
585 if (thread_flags & _TIF_SIGPENDING) {
586 int restart = do_signal(regs, syscall);
587 if (unlikely(restart)) {
589 * Restart without handlers.
590 * Deal with it without leaving
591 * the kernel space.
593 return restart;
595 syscall = 0;
596 } else {
597 clear_thread_flag(TIF_NOTIFY_RESUME);
598 tracehook_notify_resume(regs);
601 local_irq_disable();
602 thread_flags = current_thread_info()->flags;
603 } while (thread_flags & _TIF_WORK_MASK);
604 return 0;