Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6/linux-loongson.git] / arch / powerpc / kernel / traps.c
blob678fbff0d206e8fdc47db0c6450be09bd8bfffb1
1 /*
2 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Modified by Cort Dougan (cort@cs.nmt.edu)
10 * and Paul Mackerras (paulus@samba.org)
14 * This file handles the architecture-dependent parts of hardware exceptions
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/mm.h>
21 #include <linux/stddef.h>
22 #include <linux/unistd.h>
23 #include <linux/ptrace.h>
24 #include <linux/slab.h>
25 #include <linux/user.h>
26 #include <linux/interrupt.h>
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/prctl.h>
30 #include <linux/delay.h>
31 #include <linux/kprobes.h>
32 #include <linux/kexec.h>
33 #include <linux/backlight.h>
34 #include <linux/bug.h>
35 #include <linux/kdebug.h>
37 #include <asm/pgtable.h>
38 #include <asm/uaccess.h>
39 #include <asm/system.h>
40 #include <asm/io.h>
41 #include <asm/machdep.h>
42 #include <asm/rtas.h>
43 #include <asm/pmc.h>
44 #ifdef CONFIG_PPC32
45 #include <asm/reg.h>
46 #endif
47 #ifdef CONFIG_PMAC_BACKLIGHT
48 #include <asm/backlight.h>
49 #endif
50 #ifdef CONFIG_PPC64
51 #include <asm/firmware.h>
52 #include <asm/processor.h>
53 #endif
54 #include <asm/kexec.h>
55 #include <asm/ppc-opcode.h>
56 #ifdef CONFIG_FSL_BOOKE
57 #include <asm/dbell.h>
58 #endif
60 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
61 int (*__debugger)(struct pt_regs *regs);
62 int (*__debugger_ipi)(struct pt_regs *regs);
63 int (*__debugger_bpt)(struct pt_regs *regs);
64 int (*__debugger_sstep)(struct pt_regs *regs);
65 int (*__debugger_iabr_match)(struct pt_regs *regs);
66 int (*__debugger_dabr_match)(struct pt_regs *regs);
67 int (*__debugger_fault_handler)(struct pt_regs *regs);
69 EXPORT_SYMBOL(__debugger);
70 EXPORT_SYMBOL(__debugger_ipi);
71 EXPORT_SYMBOL(__debugger_bpt);
72 EXPORT_SYMBOL(__debugger_sstep);
73 EXPORT_SYMBOL(__debugger_iabr_match);
74 EXPORT_SYMBOL(__debugger_dabr_match);
75 EXPORT_SYMBOL(__debugger_fault_handler);
76 #endif
79 * Trap & Exception support
82 #ifdef CONFIG_PMAC_BACKLIGHT
83 static void pmac_backlight_unblank(void)
85 mutex_lock(&pmac_backlight_mutex);
86 if (pmac_backlight) {
87 struct backlight_properties *props;
89 props = &pmac_backlight->props;
90 props->brightness = props->max_brightness;
91 props->power = FB_BLANK_UNBLANK;
92 backlight_update_status(pmac_backlight);
94 mutex_unlock(&pmac_backlight_mutex);
96 #else
97 static inline void pmac_backlight_unblank(void) { }
98 #endif
100 int die(const char *str, struct pt_regs *regs, long err)
102 static struct {
103 spinlock_t lock;
104 u32 lock_owner;
105 int lock_owner_depth;
106 } die = {
107 .lock = __SPIN_LOCK_UNLOCKED(die.lock),
108 .lock_owner = -1,
109 .lock_owner_depth = 0
111 static int die_counter;
112 unsigned long flags;
114 if (debugger(regs))
115 return 1;
117 oops_enter();
119 if (die.lock_owner != raw_smp_processor_id()) {
120 console_verbose();
121 spin_lock_irqsave(&die.lock, flags);
122 die.lock_owner = smp_processor_id();
123 die.lock_owner_depth = 0;
124 bust_spinlocks(1);
125 if (machine_is(powermac))
126 pmac_backlight_unblank();
127 } else {
128 local_save_flags(flags);
131 if (++die.lock_owner_depth < 3) {
132 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
133 #ifdef CONFIG_PREEMPT
134 printk("PREEMPT ");
135 #endif
136 #ifdef CONFIG_SMP
137 printk("SMP NR_CPUS=%d ", NR_CPUS);
138 #endif
139 #ifdef CONFIG_DEBUG_PAGEALLOC
140 printk("DEBUG_PAGEALLOC ");
141 #endif
142 #ifdef CONFIG_NUMA
143 printk("NUMA ");
144 #endif
145 printk("%s\n", ppc_md.name ? ppc_md.name : "");
147 print_modules();
148 show_regs(regs);
149 } else {
150 printk("Recursive die() failure, output suppressed\n");
153 bust_spinlocks(0);
154 die.lock_owner = -1;
155 add_taint(TAINT_DIE);
156 spin_unlock_irqrestore(&die.lock, flags);
158 if (kexec_should_crash(current) ||
159 kexec_sr_activated(smp_processor_id()))
160 crash_kexec(regs);
161 crash_kexec_secondary(regs);
163 if (in_interrupt())
164 panic("Fatal exception in interrupt");
166 if (panic_on_oops)
167 panic("Fatal exception");
169 oops_exit();
170 do_exit(err);
172 return 0;
175 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
177 siginfo_t info;
178 const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \
179 "at %08lx nip %08lx lr %08lx code %x\n";
180 const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \
181 "at %016lx nip %016lx lr %016lx code %x\n";
183 if (!user_mode(regs)) {
184 if (die("Exception in kernel mode", regs, signr))
185 return;
186 } else if (show_unhandled_signals &&
187 unhandled_signal(current, signr) &&
188 printk_ratelimit()) {
189 printk(regs->msr & MSR_SF ? fmt64 : fmt32,
190 current->comm, current->pid, signr,
191 addr, regs->nip, regs->link, code);
194 memset(&info, 0, sizeof(info));
195 info.si_signo = signr;
196 info.si_code = code;
197 info.si_addr = (void __user *) addr;
198 force_sig_info(signr, &info, current);
201 * Init gets no signals that it doesn't have a handler for.
202 * That's all very well, but if it has caused a synchronous
203 * exception and we ignore the resulting signal, it will just
204 * generate the same exception over and over again and we get
205 * nowhere. Better to kill it and let the kernel panic.
207 if (is_global_init(current)) {
208 __sighandler_t handler;
210 spin_lock_irq(&current->sighand->siglock);
211 handler = current->sighand->action[signr-1].sa.sa_handler;
212 spin_unlock_irq(&current->sighand->siglock);
213 if (handler == SIG_DFL) {
214 /* init has generated a synchronous exception
215 and it doesn't have a handler for the signal */
216 printk(KERN_CRIT "init has generated signal %d "
217 "but has no handler for it\n", signr);
218 do_exit(signr);
223 #ifdef CONFIG_PPC64
224 void system_reset_exception(struct pt_regs *regs)
226 /* See if any machine dependent calls */
227 if (ppc_md.system_reset_exception) {
228 if (ppc_md.system_reset_exception(regs))
229 return;
232 #ifdef CONFIG_KEXEC
233 cpu_set(smp_processor_id(), cpus_in_sr);
234 #endif
236 die("System Reset", regs, SIGABRT);
239 * Some CPUs when released from the debugger will execute this path.
240 * These CPUs entered the debugger via a soft-reset. If the CPU was
241 * hung before entering the debugger it will return to the hung
242 * state when exiting this function. This causes a problem in
243 * kdump since the hung CPU(s) will not respond to the IPI sent
244 * from kdump. To prevent the problem we call crash_kexec_secondary()
245 * here. If a kdump had not been initiated or we exit the debugger
246 * with the "exit and recover" command (x) crash_kexec_secondary()
247 * will return after 5ms and the CPU returns to its previous state.
249 crash_kexec_secondary(regs);
251 /* Must die if the interrupt is not recoverable */
252 if (!(regs->msr & MSR_RI))
253 panic("Unrecoverable System Reset");
255 /* What should we do here? We could issue a shutdown or hard reset. */
257 #endif
260 * I/O accesses can cause machine checks on powermacs.
261 * Check if the NIP corresponds to the address of a sync
262 * instruction for which there is an entry in the exception
263 * table.
264 * Note that the 601 only takes a machine check on TEA
265 * (transfer error ack) signal assertion, and does not
266 * set any of the top 16 bits of SRR1.
267 * -- paulus.
269 static inline int check_io_access(struct pt_regs *regs)
271 #ifdef CONFIG_PPC32
272 unsigned long msr = regs->msr;
273 const struct exception_table_entry *entry;
274 unsigned int *nip = (unsigned int *)regs->nip;
276 if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
277 && (entry = search_exception_tables(regs->nip)) != NULL) {
279 * Check that it's a sync instruction, or somewhere
280 * in the twi; isync; nop sequence that inb/inw/inl uses.
281 * As the address is in the exception table
282 * we should be able to read the instr there.
283 * For the debug message, we look at the preceding
284 * load or store.
286 if (*nip == 0x60000000) /* nop */
287 nip -= 2;
288 else if (*nip == 0x4c00012c) /* isync */
289 --nip;
290 if (*nip == 0x7c0004ac || (*nip >> 26) == 3) {
291 /* sync or twi */
292 unsigned int rb;
294 --nip;
295 rb = (*nip >> 11) & 0x1f;
296 printk(KERN_DEBUG "%s bad port %lx at %p\n",
297 (*nip & 0x100)? "OUT to": "IN from",
298 regs->gpr[rb] - _IO_BASE, nip);
299 regs->msr |= MSR_RI;
300 regs->nip = entry->fixup;
301 return 1;
304 #endif /* CONFIG_PPC32 */
305 return 0;
308 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
309 /* On 4xx, the reason for the machine check or program exception
310 is in the ESR. */
311 #define get_reason(regs) ((regs)->dsisr)
312 #ifndef CONFIG_FSL_BOOKE
313 #define get_mc_reason(regs) ((regs)->dsisr)
314 #else
315 #define get_mc_reason(regs) (mfspr(SPRN_MCSR) & MCSR_MASK)
316 #endif
317 #define REASON_FP ESR_FP
318 #define REASON_ILLEGAL (ESR_PIL | ESR_PUO)
319 #define REASON_PRIVILEGED ESR_PPR
320 #define REASON_TRAP ESR_PTR
322 /* single-step stuff */
323 #define single_stepping(regs) (current->thread.dbcr0 & DBCR0_IC)
324 #define clear_single_step(regs) (current->thread.dbcr0 &= ~DBCR0_IC)
326 #else
327 /* On non-4xx, the reason for the machine check or program
328 exception is in the MSR. */
329 #define get_reason(regs) ((regs)->msr)
330 #define get_mc_reason(regs) ((regs)->msr)
331 #define REASON_FP 0x100000
332 #define REASON_ILLEGAL 0x80000
333 #define REASON_PRIVILEGED 0x40000
334 #define REASON_TRAP 0x20000
336 #define single_stepping(regs) ((regs)->msr & MSR_SE)
337 #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
338 #endif
340 #if defined(CONFIG_4xx)
341 int machine_check_4xx(struct pt_regs *regs)
343 unsigned long reason = get_mc_reason(regs);
345 if (reason & ESR_IMCP) {
346 printk("Instruction");
347 mtspr(SPRN_ESR, reason & ~ESR_IMCP);
348 } else
349 printk("Data");
350 printk(" machine check in kernel mode.\n");
352 return 0;
355 int machine_check_440A(struct pt_regs *regs)
357 unsigned long reason = get_mc_reason(regs);
359 printk("Machine check in kernel mode.\n");
360 if (reason & ESR_IMCP){
361 printk("Instruction Synchronous Machine Check exception\n");
362 mtspr(SPRN_ESR, reason & ~ESR_IMCP);
364 else {
365 u32 mcsr = mfspr(SPRN_MCSR);
366 if (mcsr & MCSR_IB)
367 printk("Instruction Read PLB Error\n");
368 if (mcsr & MCSR_DRB)
369 printk("Data Read PLB Error\n");
370 if (mcsr & MCSR_DWB)
371 printk("Data Write PLB Error\n");
372 if (mcsr & MCSR_TLBP)
373 printk("TLB Parity Error\n");
374 if (mcsr & MCSR_ICP){
375 flush_instruction_cache();
376 printk("I-Cache Parity Error\n");
378 if (mcsr & MCSR_DCSP)
379 printk("D-Cache Search Parity Error\n");
380 if (mcsr & MCSR_DCFP)
381 printk("D-Cache Flush Parity Error\n");
382 if (mcsr & MCSR_IMPE)
383 printk("Machine Check exception is imprecise\n");
385 /* Clear MCSR */
386 mtspr(SPRN_MCSR, mcsr);
388 return 0;
390 #elif defined(CONFIG_E500)
391 int machine_check_e500(struct pt_regs *regs)
393 unsigned long reason = get_mc_reason(regs);
395 printk("Machine check in kernel mode.\n");
396 printk("Caused by (from MCSR=%lx): ", reason);
398 if (reason & MCSR_MCP)
399 printk("Machine Check Signal\n");
400 if (reason & MCSR_ICPERR)
401 printk("Instruction Cache Parity Error\n");
402 if (reason & MCSR_DCP_PERR)
403 printk("Data Cache Push Parity Error\n");
404 if (reason & MCSR_DCPERR)
405 printk("Data Cache Parity Error\n");
406 if (reason & MCSR_BUS_IAERR)
407 printk("Bus - Instruction Address Error\n");
408 if (reason & MCSR_BUS_RAERR)
409 printk("Bus - Read Address Error\n");
410 if (reason & MCSR_BUS_WAERR)
411 printk("Bus - Write Address Error\n");
412 if (reason & MCSR_BUS_IBERR)
413 printk("Bus - Instruction Data Error\n");
414 if (reason & MCSR_BUS_RBERR)
415 printk("Bus - Read Data Bus Error\n");
416 if (reason & MCSR_BUS_WBERR)
417 printk("Bus - Read Data Bus Error\n");
418 if (reason & MCSR_BUS_IPERR)
419 printk("Bus - Instruction Parity Error\n");
420 if (reason & MCSR_BUS_RPERR)
421 printk("Bus - Read Parity Error\n");
423 return 0;
425 #elif defined(CONFIG_E200)
426 int machine_check_e200(struct pt_regs *regs)
428 unsigned long reason = get_mc_reason(regs);
430 printk("Machine check in kernel mode.\n");
431 printk("Caused by (from MCSR=%lx): ", reason);
433 if (reason & MCSR_MCP)
434 printk("Machine Check Signal\n");
435 if (reason & MCSR_CP_PERR)
436 printk("Cache Push Parity Error\n");
437 if (reason & MCSR_CPERR)
438 printk("Cache Parity Error\n");
439 if (reason & MCSR_EXCP_ERR)
440 printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
441 if (reason & MCSR_BUS_IRERR)
442 printk("Bus - Read Bus Error on instruction fetch\n");
443 if (reason & MCSR_BUS_DRERR)
444 printk("Bus - Read Bus Error on data load\n");
445 if (reason & MCSR_BUS_WRERR)
446 printk("Bus - Write Bus Error on buffered store or cache line push\n");
448 return 0;
450 #else
451 int machine_check_generic(struct pt_regs *regs)
453 unsigned long reason = get_mc_reason(regs);
455 printk("Machine check in kernel mode.\n");
456 printk("Caused by (from SRR1=%lx): ", reason);
457 switch (reason & 0x601F0000) {
458 case 0x80000:
459 printk("Machine check signal\n");
460 break;
461 case 0: /* for 601 */
462 case 0x40000:
463 case 0x140000: /* 7450 MSS error and TEA */
464 printk("Transfer error ack signal\n");
465 break;
466 case 0x20000:
467 printk("Data parity error signal\n");
468 break;
469 case 0x10000:
470 printk("Address parity error signal\n");
471 break;
472 case 0x20000000:
473 printk("L1 Data Cache error\n");
474 break;
475 case 0x40000000:
476 printk("L1 Instruction Cache error\n");
477 break;
478 case 0x00100000:
479 printk("L2 data cache parity error\n");
480 break;
481 default:
482 printk("Unknown values in msr\n");
484 return 0;
486 #endif /* everything else */
488 void machine_check_exception(struct pt_regs *regs)
490 int recover = 0;
492 /* See if any machine dependent calls. In theory, we would want
493 * to call the CPU first, and call the ppc_md. one if the CPU
494 * one returns a positive number. However there is existing code
495 * that assumes the board gets a first chance, so let's keep it
496 * that way for now and fix things later. --BenH.
498 if (ppc_md.machine_check_exception)
499 recover = ppc_md.machine_check_exception(regs);
500 else if (cur_cpu_spec->machine_check)
501 recover = cur_cpu_spec->machine_check(regs);
503 if (recover > 0)
504 return;
506 if (user_mode(regs)) {
507 regs->msr |= MSR_RI;
508 _exception(SIGBUS, regs, BUS_ADRERR, regs->nip);
509 return;
512 #if defined(CONFIG_8xx) && defined(CONFIG_PCI)
513 /* the qspan pci read routines can cause machine checks -- Cort
515 * yuck !!! that totally needs to go away ! There are better ways
516 * to deal with that than having a wart in the mcheck handler.
517 * -- BenH
519 bad_page_fault(regs, regs->dar, SIGBUS);
520 return;
521 #endif
523 if (debugger_fault_handler(regs)) {
524 regs->msr |= MSR_RI;
525 return;
528 if (check_io_access(regs))
529 return;
531 if (debugger_fault_handler(regs))
532 return;
533 die("Machine check", regs, SIGBUS);
535 /* Must die if the interrupt is not recoverable */
536 if (!(regs->msr & MSR_RI))
537 panic("Unrecoverable Machine check");
540 void SMIException(struct pt_regs *regs)
542 die("System Management Interrupt", regs, SIGABRT);
545 void unknown_exception(struct pt_regs *regs)
547 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
548 regs->nip, regs->msr, regs->trap);
550 _exception(SIGTRAP, regs, 0, 0);
553 void instruction_breakpoint_exception(struct pt_regs *regs)
555 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
556 5, SIGTRAP) == NOTIFY_STOP)
557 return;
558 if (debugger_iabr_match(regs))
559 return;
560 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
563 void RunModeException(struct pt_regs *regs)
565 _exception(SIGTRAP, regs, 0, 0);
568 void __kprobes single_step_exception(struct pt_regs *regs)
570 regs->msr &= ~(MSR_SE | MSR_BE); /* Turn off 'trace' bits */
572 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
573 5, SIGTRAP) == NOTIFY_STOP)
574 return;
575 if (debugger_sstep(regs))
576 return;
578 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
582 * After we have successfully emulated an instruction, we have to
583 * check if the instruction was being single-stepped, and if so,
584 * pretend we got a single-step exception. This was pointed out
585 * by Kumar Gala. -- paulus
587 static void emulate_single_step(struct pt_regs *regs)
589 if (single_stepping(regs)) {
590 clear_single_step(regs);
591 _exception(SIGTRAP, regs, TRAP_TRACE, 0);
595 static inline int __parse_fpscr(unsigned long fpscr)
597 int ret = 0;
599 /* Invalid operation */
600 if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
601 ret = FPE_FLTINV;
603 /* Overflow */
604 else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
605 ret = FPE_FLTOVF;
607 /* Underflow */
608 else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
609 ret = FPE_FLTUND;
611 /* Divide by zero */
612 else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
613 ret = FPE_FLTDIV;
615 /* Inexact result */
616 else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
617 ret = FPE_FLTRES;
619 return ret;
622 static void parse_fpe(struct pt_regs *regs)
624 int code = 0;
626 flush_fp_to_thread(current);
628 code = __parse_fpscr(current->thread.fpscr.val);
630 _exception(SIGFPE, regs, code, regs->nip);
634 * Illegal instruction emulation support. Originally written to
635 * provide the PVR to user applications using the mfspr rd, PVR.
636 * Return non-zero if we can't emulate, or -EFAULT if the associated
637 * memory access caused an access fault. Return zero on success.
639 * There are a couple of ways to do this, either "decode" the instruction
640 * or directly match lots of bits. In this case, matching lots of
641 * bits is faster and easier.
644 static int emulate_string_inst(struct pt_regs *regs, u32 instword)
646 u8 rT = (instword >> 21) & 0x1f;
647 u8 rA = (instword >> 16) & 0x1f;
648 u8 NB_RB = (instword >> 11) & 0x1f;
649 u32 num_bytes;
650 unsigned long EA;
651 int pos = 0;
653 /* Early out if we are an invalid form of lswx */
654 if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX)
655 if ((rT == rA) || (rT == NB_RB))
656 return -EINVAL;
658 EA = (rA == 0) ? 0 : regs->gpr[rA];
660 switch (instword & PPC_INST_STRING_MASK) {
661 case PPC_INST_LSWX:
662 case PPC_INST_STSWX:
663 EA += NB_RB;
664 num_bytes = regs->xer & 0x7f;
665 break;
666 case PPC_INST_LSWI:
667 case PPC_INST_STSWI:
668 num_bytes = (NB_RB == 0) ? 32 : NB_RB;
669 break;
670 default:
671 return -EINVAL;
674 while (num_bytes != 0)
676 u8 val;
677 u32 shift = 8 * (3 - (pos & 0x3));
679 switch ((instword & PPC_INST_STRING_MASK)) {
680 case PPC_INST_LSWX:
681 case PPC_INST_LSWI:
682 if (get_user(val, (u8 __user *)EA))
683 return -EFAULT;
684 /* first time updating this reg,
685 * zero it out */
686 if (pos == 0)
687 regs->gpr[rT] = 0;
688 regs->gpr[rT] |= val << shift;
689 break;
690 case PPC_INST_STSWI:
691 case PPC_INST_STSWX:
692 val = regs->gpr[rT] >> shift;
693 if (put_user(val, (u8 __user *)EA))
694 return -EFAULT;
695 break;
697 /* move EA to next address */
698 EA += 1;
699 num_bytes--;
701 /* manage our position within the register */
702 if (++pos == 4) {
703 pos = 0;
704 if (++rT == 32)
705 rT = 0;
709 return 0;
712 static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword)
714 u32 ra,rs;
715 unsigned long tmp;
717 ra = (instword >> 16) & 0x1f;
718 rs = (instword >> 21) & 0x1f;
720 tmp = regs->gpr[rs];
721 tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL);
722 tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL);
723 tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
724 regs->gpr[ra] = tmp;
726 return 0;
729 static int emulate_isel(struct pt_regs *regs, u32 instword)
731 u8 rT = (instword >> 21) & 0x1f;
732 u8 rA = (instword >> 16) & 0x1f;
733 u8 rB = (instword >> 11) & 0x1f;
734 u8 BC = (instword >> 6) & 0x1f;
735 u8 bit;
736 unsigned long tmp;
738 tmp = (rA == 0) ? 0 : regs->gpr[rA];
739 bit = (regs->ccr >> (31 - BC)) & 0x1;
741 regs->gpr[rT] = bit ? tmp : regs->gpr[rB];
743 return 0;
746 static int emulate_instruction(struct pt_regs *regs)
748 u32 instword;
749 u32 rd;
751 if (!user_mode(regs) || (regs->msr & MSR_LE))
752 return -EINVAL;
753 CHECK_FULL_REGS(regs);
755 if (get_user(instword, (u32 __user *)(regs->nip)))
756 return -EFAULT;
758 /* Emulate the mfspr rD, PVR. */
759 if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) {
760 rd = (instword >> 21) & 0x1f;
761 regs->gpr[rd] = mfspr(SPRN_PVR);
762 return 0;
765 /* Emulating the dcba insn is just a no-op. */
766 if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA)
767 return 0;
769 /* Emulate the mcrxr insn. */
770 if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) {
771 int shift = (instword >> 21) & 0x1c;
772 unsigned long msk = 0xf0000000UL >> shift;
774 regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
775 regs->xer &= ~0xf0000000UL;
776 return 0;
779 /* Emulate load/store string insn. */
780 if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING)
781 return emulate_string_inst(regs, instword);
783 /* Emulate the popcntb (Population Count Bytes) instruction. */
784 if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) {
785 return emulate_popcntb_inst(regs, instword);
788 /* Emulate isel (Integer Select) instruction */
789 if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) {
790 return emulate_isel(regs, instword);
793 return -EINVAL;
796 int is_valid_bugaddr(unsigned long addr)
798 return is_kernel_addr(addr);
801 void __kprobes program_check_exception(struct pt_regs *regs)
803 unsigned int reason = get_reason(regs);
804 extern int do_mathemu(struct pt_regs *regs);
806 /* We can now get here via a FP Unavailable exception if the core
807 * has no FPU, in that case the reason flags will be 0 */
809 if (reason & REASON_FP) {
810 /* IEEE FP exception */
811 parse_fpe(regs);
812 return;
814 if (reason & REASON_TRAP) {
815 /* trap exception */
816 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
817 == NOTIFY_STOP)
818 return;
819 if (debugger_bpt(regs))
820 return;
822 if (!(regs->msr & MSR_PR) && /* not user-mode */
823 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
824 regs->nip += 4;
825 return;
827 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
828 return;
831 local_irq_enable();
833 #ifdef CONFIG_MATH_EMULATION
834 /* (reason & REASON_ILLEGAL) would be the obvious thing here,
835 * but there seems to be a hardware bug on the 405GP (RevD)
836 * that means ESR is sometimes set incorrectly - either to
837 * ESR_DST (!?) or 0. In the process of chasing this with the
838 * hardware people - not sure if it can happen on any illegal
839 * instruction or only on FP instructions, whether there is a
840 * pattern to occurences etc. -dgibson 31/Mar/2003 */
841 switch (do_mathemu(regs)) {
842 case 0:
843 emulate_single_step(regs);
844 return;
845 case 1: {
846 int code = 0;
847 code = __parse_fpscr(current->thread.fpscr.val);
848 _exception(SIGFPE, regs, code, regs->nip);
849 return;
851 case -EFAULT:
852 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
853 return;
855 /* fall through on any other errors */
856 #endif /* CONFIG_MATH_EMULATION */
858 /* Try to emulate it if we should. */
859 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
860 switch (emulate_instruction(regs)) {
861 case 0:
862 regs->nip += 4;
863 emulate_single_step(regs);
864 return;
865 case -EFAULT:
866 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
867 return;
871 if (reason & REASON_PRIVILEGED)
872 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
873 else
874 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
877 void alignment_exception(struct pt_regs *regs)
879 int sig, code, fixed = 0;
881 /* we don't implement logging of alignment exceptions */
882 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
883 fixed = fix_alignment(regs);
885 if (fixed == 1) {
886 regs->nip += 4; /* skip over emulated instruction */
887 emulate_single_step(regs);
888 return;
891 /* Operand address was bad */
892 if (fixed == -EFAULT) {
893 sig = SIGSEGV;
894 code = SEGV_ACCERR;
895 } else {
896 sig = SIGBUS;
897 code = BUS_ADRALN;
899 if (user_mode(regs))
900 _exception(sig, regs, code, regs->dar);
901 else
902 bad_page_fault(regs, regs->dar, sig);
905 void StackOverflow(struct pt_regs *regs)
907 printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
908 current, regs->gpr[1]);
909 debugger(regs);
910 show_regs(regs);
911 panic("kernel stack overflow");
914 void nonrecoverable_exception(struct pt_regs *regs)
916 printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
917 regs->nip, regs->msr);
918 debugger(regs);
919 die("nonrecoverable exception", regs, SIGKILL);
922 void trace_syscall(struct pt_regs *regs)
924 printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n",
925 current, task_pid_nr(current), regs->nip, regs->link, regs->gpr[0],
926 regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted());
929 void kernel_fp_unavailable_exception(struct pt_regs *regs)
931 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
932 "%lx at %lx\n", regs->trap, regs->nip);
933 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
936 void altivec_unavailable_exception(struct pt_regs *regs)
938 if (user_mode(regs)) {
939 /* A user program has executed an altivec instruction,
940 but this kernel doesn't support altivec. */
941 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
942 return;
945 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
946 "%lx at %lx\n", regs->trap, regs->nip);
947 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
950 void vsx_unavailable_exception(struct pt_regs *regs)
952 if (user_mode(regs)) {
953 /* A user program has executed an vsx instruction,
954 but this kernel doesn't support vsx. */
955 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
956 return;
959 printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception "
960 "%lx at %lx\n", regs->trap, regs->nip);
961 die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
964 void performance_monitor_exception(struct pt_regs *regs)
966 perf_irq(regs);
969 #ifdef CONFIG_8xx
970 void SoftwareEmulation(struct pt_regs *regs)
972 extern int do_mathemu(struct pt_regs *);
973 extern int Soft_emulate_8xx(struct pt_regs *);
974 #if defined(CONFIG_MATH_EMULATION) || defined(CONFIG_8XX_MINIMAL_FPEMU)
975 int errcode;
976 #endif
978 CHECK_FULL_REGS(regs);
980 if (!user_mode(regs)) {
981 debugger(regs);
982 die("Kernel Mode Software FPU Emulation", regs, SIGFPE);
985 #ifdef CONFIG_MATH_EMULATION
986 errcode = do_mathemu(regs);
988 switch (errcode) {
989 case 0:
990 emulate_single_step(regs);
991 return;
992 case 1: {
993 int code = 0;
994 code = __parse_fpscr(current->thread.fpscr.val);
995 _exception(SIGFPE, regs, code, regs->nip);
996 return;
998 case -EFAULT:
999 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1000 return;
1001 default:
1002 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1003 return;
1006 #elif defined(CONFIG_8XX_MINIMAL_FPEMU)
1007 errcode = Soft_emulate_8xx(regs);
1008 switch (errcode) {
1009 case 0:
1010 emulate_single_step(regs);
1011 return;
1012 case 1:
1013 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1014 return;
1015 case -EFAULT:
1016 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1017 return;
1019 #else
1020 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1021 #endif
1023 #endif /* CONFIG_8xx */
1025 #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
1027 void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
1029 if (debug_status & DBSR_IC) { /* instruction completion */
1030 regs->msr &= ~MSR_DE;
1032 /* Disable instruction completion */
1033 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
1034 /* Clear the instruction completion event */
1035 mtspr(SPRN_DBSR, DBSR_IC);
1037 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
1038 5, SIGTRAP) == NOTIFY_STOP) {
1039 return;
1042 if (debugger_sstep(regs))
1043 return;
1045 if (user_mode(regs)) {
1046 current->thread.dbcr0 &= ~DBCR0_IC;
1049 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
1050 } else if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
1051 regs->msr &= ~MSR_DE;
1053 if (user_mode(regs)) {
1054 current->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W |
1055 DBCR0_IDM);
1056 } else {
1057 /* Disable DAC interupts */
1058 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~(DBSR_DAC1R |
1059 DBSR_DAC1W | DBCR0_IDM));
1061 /* Clear the DAC event */
1062 mtspr(SPRN_DBSR, (DBSR_DAC1R | DBSR_DAC1W));
1064 /* Setup and send the trap to the handler */
1065 do_dabr(regs, mfspr(SPRN_DAC1), debug_status);
1068 #endif /* CONFIG_4xx || CONFIG_BOOKE */
1070 #if !defined(CONFIG_TAU_INT)
1071 void TAUException(struct pt_regs *regs)
1073 printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n",
1074 regs->nip, regs->msr, regs->trap, print_tainted());
1076 #endif /* CONFIG_INT_TAU */
1078 #ifdef CONFIG_ALTIVEC
1079 void altivec_assist_exception(struct pt_regs *regs)
1081 int err;
1083 if (!user_mode(regs)) {
1084 printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
1085 " at %lx\n", regs->nip);
1086 die("Kernel VMX/Altivec assist exception", regs, SIGILL);
1089 flush_altivec_to_thread(current);
1091 err = emulate_altivec(regs);
1092 if (err == 0) {
1093 regs->nip += 4; /* skip emulated instruction */
1094 emulate_single_step(regs);
1095 return;
1098 if (err == -EFAULT) {
1099 /* got an error reading the instruction */
1100 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1101 } else {
1102 /* didn't recognize the instruction */
1103 /* XXX quick hack for now: set the non-Java bit in the VSCR */
1104 if (printk_ratelimit())
1105 printk(KERN_ERR "Unrecognized altivec instruction "
1106 "in %s at %lx\n", current->comm, regs->nip);
1107 current->thread.vscr.u[3] |= 0x10000;
1110 #endif /* CONFIG_ALTIVEC */
1112 #ifdef CONFIG_VSX
1113 void vsx_assist_exception(struct pt_regs *regs)
1115 if (!user_mode(regs)) {
1116 printk(KERN_EMERG "VSX assist exception in kernel mode"
1117 " at %lx\n", regs->nip);
1118 die("Kernel VSX assist exception", regs, SIGILL);
1121 flush_vsx_to_thread(current);
1122 printk(KERN_INFO "VSX assist not supported at %lx\n", regs->nip);
1123 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1125 #endif /* CONFIG_VSX */
1127 #ifdef CONFIG_FSL_BOOKE
1129 void doorbell_exception(struct pt_regs *regs)
1131 #ifdef CONFIG_SMP
1132 int cpu = smp_processor_id();
1133 int msg;
1135 if (num_online_cpus() < 2)
1136 return;
1138 for (msg = 0; msg < 4; msg++)
1139 if (test_and_clear_bit(msg, &dbell_smp_message[cpu]))
1140 smp_message_recv(msg);
1141 #else
1142 printk(KERN_WARNING "Received doorbell on non-smp system\n");
1143 #endif
1146 void CacheLockingException(struct pt_regs *regs, unsigned long address,
1147 unsigned long error_code)
1149 /* We treat cache locking instructions from the user
1150 * as priv ops, in the future we could try to do
1151 * something smarter
1153 if (error_code & (ESR_DLK|ESR_ILK))
1154 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1155 return;
1157 #endif /* CONFIG_FSL_BOOKE */
1159 #ifdef CONFIG_SPE
1160 void SPEFloatingPointException(struct pt_regs *regs)
1162 extern int do_spe_mathemu(struct pt_regs *regs);
1163 unsigned long spefscr;
1164 int fpexc_mode;
1165 int code = 0;
1166 int err;
1168 preempt_disable();
1169 if (regs->msr & MSR_SPE)
1170 giveup_spe(current);
1171 preempt_enable();
1173 spefscr = current->thread.spefscr;
1174 fpexc_mode = current->thread.fpexc_mode;
1176 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
1177 code = FPE_FLTOVF;
1179 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
1180 code = FPE_FLTUND;
1182 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
1183 code = FPE_FLTDIV;
1184 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
1185 code = FPE_FLTINV;
1187 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
1188 code = FPE_FLTRES;
1190 err = do_spe_mathemu(regs);
1191 if (err == 0) {
1192 regs->nip += 4; /* skip emulated instruction */
1193 emulate_single_step(regs);
1194 return;
1197 if (err == -EFAULT) {
1198 /* got an error reading the instruction */
1199 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1200 } else if (err == -EINVAL) {
1201 /* didn't recognize the instruction */
1202 printk(KERN_ERR "unrecognized spe instruction "
1203 "in %s at %lx\n", current->comm, regs->nip);
1204 } else {
1205 _exception(SIGFPE, regs, code, regs->nip);
1208 return;
1211 void SPEFloatingPointRoundException(struct pt_regs *regs)
1213 extern int speround_handler(struct pt_regs *regs);
1214 int err;
1216 preempt_disable();
1217 if (regs->msr & MSR_SPE)
1218 giveup_spe(current);
1219 preempt_enable();
1221 regs->nip -= 4;
1222 err = speround_handler(regs);
1223 if (err == 0) {
1224 regs->nip += 4; /* skip emulated instruction */
1225 emulate_single_step(regs);
1226 return;
1229 if (err == -EFAULT) {
1230 /* got an error reading the instruction */
1231 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1232 } else if (err == -EINVAL) {
1233 /* didn't recognize the instruction */
1234 printk(KERN_ERR "unrecognized spe instruction "
1235 "in %s at %lx\n", current->comm, regs->nip);
1236 } else {
1237 _exception(SIGFPE, regs, 0, regs->nip);
1238 return;
1241 #endif
1244 * We enter here if we get an unrecoverable exception, that is, one
1245 * that happened at a point where the RI (recoverable interrupt) bit
1246 * in the MSR is 0. This indicates that SRR0/1 are live, and that
1247 * we therefore lost state by taking this exception.
1249 void unrecoverable_exception(struct pt_regs *regs)
1251 printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
1252 regs->trap, regs->nip);
1253 die("Unrecoverable exception", regs, SIGABRT);
1256 #ifdef CONFIG_BOOKE_WDT
1258 * Default handler for a Watchdog exception,
1259 * spins until a reboot occurs
1261 void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
1263 /* Generic WatchdogHandler, implement your own */
1264 mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
1265 return;
1268 void WatchdogException(struct pt_regs *regs)
1270 printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
1271 WatchdogHandler(regs);
1273 #endif
1276 * We enter here if we discover during exception entry that we are
1277 * running in supervisor mode with a userspace value in the stack pointer.
1279 void kernel_bad_stack(struct pt_regs *regs)
1281 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
1282 regs->gpr[1], regs->nip);
1283 die("Bad kernel stack pointer", regs, SIGABRT);
1286 void __init trap_init(void)