[SPARC]: sys_newfstatat --> sys_fstatat64
[linux-2.6/verdex.git] / arch / sparc64 / kernel / traps.c
blob8d44ae5a15e32f11e8ebee392ecb67a350799fa3
1 /* $Id: traps.c,v 1.85 2002/02/09 19:49:31 davem Exp $
2 * arch/sparc64/kernel/traps.c
4 * Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
6 */
8 /*
9 * I like traps on v9, :))))
12 #include <linux/config.h>
13 #include <linux/module.h>
14 #include <linux/sched.h> /* for jiffies */
15 #include <linux/kernel.h>
16 #include <linux/kallsyms.h>
17 #include <linux/signal.h>
18 #include <linux/smp.h>
19 #include <linux/smp_lock.h>
20 #include <linux/mm.h>
21 #include <linux/init.h>
23 #include <asm/delay.h>
24 #include <asm/system.h>
25 #include <asm/ptrace.h>
26 #include <asm/oplib.h>
27 #include <asm/page.h>
28 #include <asm/pgtable.h>
29 #include <asm/unistd.h>
30 #include <asm/uaccess.h>
31 #include <asm/fpumacro.h>
32 #include <asm/lsu.h>
33 #include <asm/dcu.h>
34 #include <asm/estate.h>
35 #include <asm/chafsr.h>
36 #include <asm/sfafsr.h>
37 #include <asm/psrcompat.h>
38 #include <asm/processor.h>
39 #include <asm/timer.h>
40 #include <asm/kdebug.h>
41 #ifdef CONFIG_KMOD
42 #include <linux/kmod.h>
43 #endif
45 struct notifier_block *sparc64die_chain;
46 static DEFINE_SPINLOCK(die_notifier_lock);
48 int register_die_notifier(struct notifier_block *nb)
50 int err = 0;
51 unsigned long flags;
52 spin_lock_irqsave(&die_notifier_lock, flags);
53 err = notifier_chain_register(&sparc64die_chain, nb);
54 spin_unlock_irqrestore(&die_notifier_lock, flags);
55 return err;
58 /* When an irrecoverable trap occurs at tl > 0, the trap entry
59 * code logs the trap state registers at every level in the trap
60 * stack. It is found at (pt_regs + sizeof(pt_regs)) and the layout
61 * is as follows:
63 struct tl1_traplog {
64 struct {
65 unsigned long tstate;
66 unsigned long tpc;
67 unsigned long tnpc;
68 unsigned long tt;
69 } trapstack[4];
70 unsigned long tl;
73 static void dump_tl1_traplog(struct tl1_traplog *p)
75 int i;
77 printk("TRAPLOG: Error at trap level 0x%lx, dumping track stack.\n",
78 p->tl);
79 for (i = 0; i < 4; i++) {
80 printk(KERN_CRIT
81 "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
82 "TNPC[%016lx] TT[%lx]\n",
83 i + 1,
84 p->trapstack[i].tstate, p->trapstack[i].tpc,
85 p->trapstack[i].tnpc, p->trapstack[i].tt);
89 void do_call_debug(struct pt_regs *regs)
91 notify_die(DIE_CALL, "debug call", regs, 0, 255, SIGINT);
94 void bad_trap(struct pt_regs *regs, long lvl)
96 char buffer[32];
97 siginfo_t info;
99 if (notify_die(DIE_TRAP, "bad trap", regs,
100 0, lvl, SIGTRAP) == NOTIFY_STOP)
101 return;
103 if (lvl < 0x100) {
104 sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
105 die_if_kernel(buffer, regs);
108 lvl -= 0x100;
109 if (regs->tstate & TSTATE_PRIV) {
110 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
111 die_if_kernel(buffer, regs);
113 if (test_thread_flag(TIF_32BIT)) {
114 regs->tpc &= 0xffffffff;
115 regs->tnpc &= 0xffffffff;
117 info.si_signo = SIGILL;
118 info.si_errno = 0;
119 info.si_code = ILL_ILLTRP;
120 info.si_addr = (void __user *)regs->tpc;
121 info.si_trapno = lvl;
122 force_sig_info(SIGILL, &info, current);
125 void bad_trap_tl1(struct pt_regs *regs, long lvl)
127 char buffer[32];
129 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
130 0, lvl, SIGTRAP) == NOTIFY_STOP)
131 return;
133 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
135 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
136 die_if_kernel (buffer, regs);
139 #ifdef CONFIG_DEBUG_BUGVERBOSE
140 void do_BUG(const char *file, int line)
142 bust_spinlocks(1);
143 printk("kernel BUG at %s:%d!\n", file, line);
145 #endif
147 void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
149 siginfo_t info;
151 if (notify_die(DIE_TRAP, "instruction access exception", regs,
152 0, 0x8, SIGTRAP) == NOTIFY_STOP)
153 return;
155 if (regs->tstate & TSTATE_PRIV) {
156 printk("spitfire_insn_access_exception: SFSR[%016lx] "
157 "SFAR[%016lx], going.\n", sfsr, sfar);
158 die_if_kernel("Iax", regs);
160 if (test_thread_flag(TIF_32BIT)) {
161 regs->tpc &= 0xffffffff;
162 regs->tnpc &= 0xffffffff;
164 info.si_signo = SIGSEGV;
165 info.si_errno = 0;
166 info.si_code = SEGV_MAPERR;
167 info.si_addr = (void __user *)regs->tpc;
168 info.si_trapno = 0;
169 force_sig_info(SIGSEGV, &info, current);
172 void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
174 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
175 0, 0x8, SIGTRAP) == NOTIFY_STOP)
176 return;
178 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
179 spitfire_insn_access_exception(regs, sfsr, sfar);
182 void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
184 siginfo_t info;
186 if (notify_die(DIE_TRAP, "data access exception", regs,
187 0, 0x30, SIGTRAP) == NOTIFY_STOP)
188 return;
190 if (regs->tstate & TSTATE_PRIV) {
191 /* Test if this comes from uaccess places. */
192 const struct exception_table_entry *entry;
194 entry = search_exception_tables(regs->tpc);
195 if (entry) {
196 /* Ouch, somebody is trying VM hole tricks on us... */
197 #ifdef DEBUG_EXCEPTIONS
198 printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
199 printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
200 regs->tpc, entry->fixup);
201 #endif
202 regs->tpc = entry->fixup;
203 regs->tnpc = regs->tpc + 4;
204 return;
206 /* Shit... */
207 printk("spitfire_data_access_exception: SFSR[%016lx] "
208 "SFAR[%016lx], going.\n", sfsr, sfar);
209 die_if_kernel("Dax", regs);
212 info.si_signo = SIGSEGV;
213 info.si_errno = 0;
214 info.si_code = SEGV_MAPERR;
215 info.si_addr = (void __user *)sfar;
216 info.si_trapno = 0;
217 force_sig_info(SIGSEGV, &info, current);
220 void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
222 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
223 0, 0x30, SIGTRAP) == NOTIFY_STOP)
224 return;
226 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
227 spitfire_data_access_exception(regs, sfsr, sfar);
230 #ifdef CONFIG_PCI
231 /* This is really pathetic... */
232 extern volatile int pci_poke_in_progress;
233 extern volatile int pci_poke_cpu;
234 extern volatile int pci_poke_faulted;
235 #endif
237 /* When access exceptions happen, we must do this. */
238 static void spitfire_clean_and_reenable_l1_caches(void)
240 unsigned long va;
242 if (tlb_type != spitfire)
243 BUG();
245 /* Clean 'em. */
246 for (va = 0; va < (PAGE_SIZE << 1); va += 32) {
247 spitfire_put_icache_tag(va, 0x0);
248 spitfire_put_dcache_tag(va, 0x0);
251 /* Re-enable in LSU. */
252 __asm__ __volatile__("flush %%g6\n\t"
253 "membar #Sync\n\t"
254 "stxa %0, [%%g0] %1\n\t"
255 "membar #Sync"
256 : /* no outputs */
257 : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
258 LSU_CONTROL_IM | LSU_CONTROL_DM),
259 "i" (ASI_LSU_CONTROL)
260 : "memory");
263 static void spitfire_enable_estate_errors(void)
265 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
266 "membar #Sync"
267 : /* no outputs */
268 : "r" (ESTATE_ERR_ALL),
269 "i" (ASI_ESTATE_ERROR_EN));
272 static char ecc_syndrome_table[] = {
273 0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
274 0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
275 0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
276 0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
277 0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
278 0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
279 0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
280 0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
281 0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
282 0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
283 0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
284 0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
285 0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
286 0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
287 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
288 0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
289 0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
290 0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
291 0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
292 0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
293 0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
294 0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
295 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
296 0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
297 0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
298 0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
299 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
300 0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
301 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
302 0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
303 0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
304 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
307 static char *syndrome_unknown = "<Unknown>";
309 static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit)
311 unsigned short scode;
312 char memmod_str[64], *p;
314 if (udbl & bit) {
315 scode = ecc_syndrome_table[udbl & 0xff];
316 if (prom_getunumber(scode, afar,
317 memmod_str, sizeof(memmod_str)) == -1)
318 p = syndrome_unknown;
319 else
320 p = memmod_str;
321 printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
322 "Memory Module \"%s\"\n",
323 smp_processor_id(), scode, p);
326 if (udbh & bit) {
327 scode = ecc_syndrome_table[udbh & 0xff];
328 if (prom_getunumber(scode, afar,
329 memmod_str, sizeof(memmod_str)) == -1)
330 p = syndrome_unknown;
331 else
332 p = memmod_str;
333 printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
334 "Memory Module \"%s\"\n",
335 smp_processor_id(), scode, p);
340 static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs)
343 printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
344 "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n",
345 smp_processor_id(), afsr, afar, udbl, udbh, tl1);
347 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE);
349 /* We always log it, even if someone is listening for this
350 * trap.
352 notify_die(DIE_TRAP, "Correctable ECC Error", regs,
353 0, TRAP_TYPE_CEE, SIGTRAP);
355 /* The Correctable ECC Error trap does not disable I/D caches. So
356 * we only have to restore the ESTATE Error Enable register.
358 spitfire_enable_estate_errors();
361 static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs)
363 siginfo_t info;
365 printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] "
366 "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
367 smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1);
369 /* XXX add more human friendly logging of the error status
370 * XXX as is implemented for cheetah
373 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE);
375 /* We always log it, even if someone is listening for this
376 * trap.
378 notify_die(DIE_TRAP, "Uncorrectable Error", regs,
379 0, tt, SIGTRAP);
381 if (regs->tstate & TSTATE_PRIV) {
382 if (tl1)
383 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
384 die_if_kernel("UE", regs);
387 /* XXX need more intelligent processing here, such as is implemented
388 * XXX for cheetah errors, in fact if the E-cache still holds the
389 * XXX line with bad parity this will loop
392 spitfire_clean_and_reenable_l1_caches();
393 spitfire_enable_estate_errors();
395 if (test_thread_flag(TIF_32BIT)) {
396 regs->tpc &= 0xffffffff;
397 regs->tnpc &= 0xffffffff;
399 info.si_signo = SIGBUS;
400 info.si_errno = 0;
401 info.si_code = BUS_OBJERR;
402 info.si_addr = (void *)0;
403 info.si_trapno = 0;
404 force_sig_info(SIGBUS, &info, current);
407 void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar)
409 unsigned long afsr, tt, udbh, udbl;
410 int tl1;
412 afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT;
413 tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT;
414 tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0;
415 udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT;
416 udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT;
418 #ifdef CONFIG_PCI
419 if (tt == TRAP_TYPE_DAE &&
420 pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
421 spitfire_clean_and_reenable_l1_caches();
422 spitfire_enable_estate_errors();
424 pci_poke_faulted = 1;
425 regs->tnpc = regs->tpc + 4;
426 return;
428 #endif
430 if (afsr & SFAFSR_UE)
431 spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs);
433 if (tt == TRAP_TYPE_CEE) {
434 /* Handle the case where we took a CEE trap, but ACK'd
435 * only the UE state in the UDB error registers.
437 if (afsr & SFAFSR_UE) {
438 if (udbh & UDBE_CE) {
439 __asm__ __volatile__(
440 "stxa %0, [%1] %2\n\t"
441 "membar #Sync"
442 : /* no outputs */
443 : "r" (udbh & UDBE_CE),
444 "r" (0x0), "i" (ASI_UDB_ERROR_W));
446 if (udbl & UDBE_CE) {
447 __asm__ __volatile__(
448 "stxa %0, [%1] %2\n\t"
449 "membar #Sync"
450 : /* no outputs */
451 : "r" (udbl & UDBE_CE),
452 "r" (0x18), "i" (ASI_UDB_ERROR_W));
456 spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs);
460 int cheetah_pcache_forced_on;
462 void cheetah_enable_pcache(void)
464 unsigned long dcr;
466 printk("CHEETAH: Enabling P-Cache on cpu %d.\n",
467 smp_processor_id());
469 __asm__ __volatile__("ldxa [%%g0] %1, %0"
470 : "=r" (dcr)
471 : "i" (ASI_DCU_CONTROL_REG));
472 dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL);
473 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
474 "membar #Sync"
475 : /* no outputs */
476 : "r" (dcr), "i" (ASI_DCU_CONTROL_REG));
479 /* Cheetah error trap handling. */
480 static unsigned long ecache_flush_physbase;
481 static unsigned long ecache_flush_linesize;
482 static unsigned long ecache_flush_size;
484 /* WARNING: The error trap handlers in assembly know the precise
485 * layout of the following structure.
487 * C-level handlers below use this information to log the error
488 * and then determine how to recover (if possible).
490 struct cheetah_err_info {
491 /*0x00*/u64 afsr;
492 /*0x08*/u64 afar;
494 /* D-cache state */
495 /*0x10*/u64 dcache_data[4]; /* The actual data */
496 /*0x30*/u64 dcache_index; /* D-cache index */
497 /*0x38*/u64 dcache_tag; /* D-cache tag/valid */
498 /*0x40*/u64 dcache_utag; /* D-cache microtag */
499 /*0x48*/u64 dcache_stag; /* D-cache snooptag */
501 /* I-cache state */
502 /*0x50*/u64 icache_data[8]; /* The actual insns + predecode */
503 /*0x90*/u64 icache_index; /* I-cache index */
504 /*0x98*/u64 icache_tag; /* I-cache phys tag */
505 /*0xa0*/u64 icache_utag; /* I-cache microtag */
506 /*0xa8*/u64 icache_stag; /* I-cache snooptag */
507 /*0xb0*/u64 icache_upper; /* I-cache upper-tag */
508 /*0xb8*/u64 icache_lower; /* I-cache lower-tag */
510 /* E-cache state */
511 /*0xc0*/u64 ecache_data[4]; /* 32 bytes from staging registers */
512 /*0xe0*/u64 ecache_index; /* E-cache index */
513 /*0xe8*/u64 ecache_tag; /* E-cache tag/state */
515 /*0xf0*/u64 __pad[32 - 30];
517 #define CHAFSR_INVALID ((u64)-1L)
519 /* This table is ordered in priority of errors and matches the
520 * AFAR overwrite policy as well.
523 struct afsr_error_table {
524 unsigned long mask;
525 const char *name;
528 static const char CHAFSR_PERR_msg[] =
529 "System interface protocol error";
530 static const char CHAFSR_IERR_msg[] =
531 "Internal processor error";
532 static const char CHAFSR_ISAP_msg[] =
533 "System request parity error on incoming addresss";
534 static const char CHAFSR_UCU_msg[] =
535 "Uncorrectable E-cache ECC error for ifetch/data";
536 static const char CHAFSR_UCC_msg[] =
537 "SW Correctable E-cache ECC error for ifetch/data";
538 static const char CHAFSR_UE_msg[] =
539 "Uncorrectable system bus data ECC error for read";
540 static const char CHAFSR_EDU_msg[] =
541 "Uncorrectable E-cache ECC error for stmerge/blkld";
542 static const char CHAFSR_EMU_msg[] =
543 "Uncorrectable system bus MTAG error";
544 static const char CHAFSR_WDU_msg[] =
545 "Uncorrectable E-cache ECC error for writeback";
546 static const char CHAFSR_CPU_msg[] =
547 "Uncorrectable ECC error for copyout";
548 static const char CHAFSR_CE_msg[] =
549 "HW corrected system bus data ECC error for read";
550 static const char CHAFSR_EDC_msg[] =
551 "HW corrected E-cache ECC error for stmerge/blkld";
552 static const char CHAFSR_EMC_msg[] =
553 "HW corrected system bus MTAG ECC error";
554 static const char CHAFSR_WDC_msg[] =
555 "HW corrected E-cache ECC error for writeback";
556 static const char CHAFSR_CPC_msg[] =
557 "HW corrected ECC error for copyout";
558 static const char CHAFSR_TO_msg[] =
559 "Unmapped error from system bus";
560 static const char CHAFSR_BERR_msg[] =
561 "Bus error response from system bus";
562 static const char CHAFSR_IVC_msg[] =
563 "HW corrected system bus data ECC error for ivec read";
564 static const char CHAFSR_IVU_msg[] =
565 "Uncorrectable system bus data ECC error for ivec read";
566 static struct afsr_error_table __cheetah_error_table[] = {
567 { CHAFSR_PERR, CHAFSR_PERR_msg },
568 { CHAFSR_IERR, CHAFSR_IERR_msg },
569 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
570 { CHAFSR_UCU, CHAFSR_UCU_msg },
571 { CHAFSR_UCC, CHAFSR_UCC_msg },
572 { CHAFSR_UE, CHAFSR_UE_msg },
573 { CHAFSR_EDU, CHAFSR_EDU_msg },
574 { CHAFSR_EMU, CHAFSR_EMU_msg },
575 { CHAFSR_WDU, CHAFSR_WDU_msg },
576 { CHAFSR_CPU, CHAFSR_CPU_msg },
577 { CHAFSR_CE, CHAFSR_CE_msg },
578 { CHAFSR_EDC, CHAFSR_EDC_msg },
579 { CHAFSR_EMC, CHAFSR_EMC_msg },
580 { CHAFSR_WDC, CHAFSR_WDC_msg },
581 { CHAFSR_CPC, CHAFSR_CPC_msg },
582 { CHAFSR_TO, CHAFSR_TO_msg },
583 { CHAFSR_BERR, CHAFSR_BERR_msg },
584 /* These two do not update the AFAR. */
585 { CHAFSR_IVC, CHAFSR_IVC_msg },
586 { CHAFSR_IVU, CHAFSR_IVU_msg },
587 { 0, NULL },
589 static const char CHPAFSR_DTO_msg[] =
590 "System bus unmapped error for prefetch/storequeue-read";
591 static const char CHPAFSR_DBERR_msg[] =
592 "System bus error for prefetch/storequeue-read";
593 static const char CHPAFSR_THCE_msg[] =
594 "Hardware corrected E-cache Tag ECC error";
595 static const char CHPAFSR_TSCE_msg[] =
596 "SW handled correctable E-cache Tag ECC error";
597 static const char CHPAFSR_TUE_msg[] =
598 "Uncorrectable E-cache Tag ECC error";
599 static const char CHPAFSR_DUE_msg[] =
600 "System bus uncorrectable data ECC error due to prefetch/store-fill";
601 static struct afsr_error_table __cheetah_plus_error_table[] = {
602 { CHAFSR_PERR, CHAFSR_PERR_msg },
603 { CHAFSR_IERR, CHAFSR_IERR_msg },
604 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
605 { CHAFSR_UCU, CHAFSR_UCU_msg },
606 { CHAFSR_UCC, CHAFSR_UCC_msg },
607 { CHAFSR_UE, CHAFSR_UE_msg },
608 { CHAFSR_EDU, CHAFSR_EDU_msg },
609 { CHAFSR_EMU, CHAFSR_EMU_msg },
610 { CHAFSR_WDU, CHAFSR_WDU_msg },
611 { CHAFSR_CPU, CHAFSR_CPU_msg },
612 { CHAFSR_CE, CHAFSR_CE_msg },
613 { CHAFSR_EDC, CHAFSR_EDC_msg },
614 { CHAFSR_EMC, CHAFSR_EMC_msg },
615 { CHAFSR_WDC, CHAFSR_WDC_msg },
616 { CHAFSR_CPC, CHAFSR_CPC_msg },
617 { CHAFSR_TO, CHAFSR_TO_msg },
618 { CHAFSR_BERR, CHAFSR_BERR_msg },
619 { CHPAFSR_DTO, CHPAFSR_DTO_msg },
620 { CHPAFSR_DBERR, CHPAFSR_DBERR_msg },
621 { CHPAFSR_THCE, CHPAFSR_THCE_msg },
622 { CHPAFSR_TSCE, CHPAFSR_TSCE_msg },
623 { CHPAFSR_TUE, CHPAFSR_TUE_msg },
624 { CHPAFSR_DUE, CHPAFSR_DUE_msg },
625 /* These two do not update the AFAR. */
626 { CHAFSR_IVC, CHAFSR_IVC_msg },
627 { CHAFSR_IVU, CHAFSR_IVU_msg },
628 { 0, NULL },
630 static const char JPAFSR_JETO_msg[] =
631 "System interface protocol error, hw timeout caused";
632 static const char JPAFSR_SCE_msg[] =
633 "Parity error on system snoop results";
634 static const char JPAFSR_JEIC_msg[] =
635 "System interface protocol error, illegal command detected";
636 static const char JPAFSR_JEIT_msg[] =
637 "System interface protocol error, illegal ADTYPE detected";
638 static const char JPAFSR_OM_msg[] =
639 "Out of range memory error has occurred";
640 static const char JPAFSR_ETP_msg[] =
641 "Parity error on L2 cache tag SRAM";
642 static const char JPAFSR_UMS_msg[] =
643 "Error due to unsupported store";
644 static const char JPAFSR_RUE_msg[] =
645 "Uncorrectable ECC error from remote cache/memory";
646 static const char JPAFSR_RCE_msg[] =
647 "Correctable ECC error from remote cache/memory";
648 static const char JPAFSR_BP_msg[] =
649 "JBUS parity error on returned read data";
650 static const char JPAFSR_WBP_msg[] =
651 "JBUS parity error on data for writeback or block store";
652 static const char JPAFSR_FRC_msg[] =
653 "Foreign read to DRAM incurring correctable ECC error";
654 static const char JPAFSR_FRU_msg[] =
655 "Foreign read to DRAM incurring uncorrectable ECC error";
656 static struct afsr_error_table __jalapeno_error_table[] = {
657 { JPAFSR_JETO, JPAFSR_JETO_msg },
658 { JPAFSR_SCE, JPAFSR_SCE_msg },
659 { JPAFSR_JEIC, JPAFSR_JEIC_msg },
660 { JPAFSR_JEIT, JPAFSR_JEIT_msg },
661 { CHAFSR_PERR, CHAFSR_PERR_msg },
662 { CHAFSR_IERR, CHAFSR_IERR_msg },
663 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
664 { CHAFSR_UCU, CHAFSR_UCU_msg },
665 { CHAFSR_UCC, CHAFSR_UCC_msg },
666 { CHAFSR_UE, CHAFSR_UE_msg },
667 { CHAFSR_EDU, CHAFSR_EDU_msg },
668 { JPAFSR_OM, JPAFSR_OM_msg },
669 { CHAFSR_WDU, CHAFSR_WDU_msg },
670 { CHAFSR_CPU, CHAFSR_CPU_msg },
671 { CHAFSR_CE, CHAFSR_CE_msg },
672 { CHAFSR_EDC, CHAFSR_EDC_msg },
673 { JPAFSR_ETP, JPAFSR_ETP_msg },
674 { CHAFSR_WDC, CHAFSR_WDC_msg },
675 { CHAFSR_CPC, CHAFSR_CPC_msg },
676 { CHAFSR_TO, CHAFSR_TO_msg },
677 { CHAFSR_BERR, CHAFSR_BERR_msg },
678 { JPAFSR_UMS, JPAFSR_UMS_msg },
679 { JPAFSR_RUE, JPAFSR_RUE_msg },
680 { JPAFSR_RCE, JPAFSR_RCE_msg },
681 { JPAFSR_BP, JPAFSR_BP_msg },
682 { JPAFSR_WBP, JPAFSR_WBP_msg },
683 { JPAFSR_FRC, JPAFSR_FRC_msg },
684 { JPAFSR_FRU, JPAFSR_FRU_msg },
685 /* These two do not update the AFAR. */
686 { CHAFSR_IVU, CHAFSR_IVU_msg },
687 { 0, NULL },
689 static struct afsr_error_table *cheetah_error_table;
690 static unsigned long cheetah_afsr_errors;
692 /* This is allocated at boot time based upon the largest hardware
693 * cpu ID in the system. We allocate two entries per cpu, one for
694 * TL==0 logging and one for TL >= 1 logging.
696 struct cheetah_err_info *cheetah_error_log;
698 static __inline__ struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
700 struct cheetah_err_info *p;
701 int cpu = smp_processor_id();
703 if (!cheetah_error_log)
704 return NULL;
706 p = cheetah_error_log + (cpu * 2);
707 if ((afsr & CHAFSR_TL1) != 0UL)
708 p++;
710 return p;
713 extern unsigned int tl0_icpe[], tl1_icpe[];
714 extern unsigned int tl0_dcpe[], tl1_dcpe[];
715 extern unsigned int tl0_fecc[], tl1_fecc[];
716 extern unsigned int tl0_cee[], tl1_cee[];
717 extern unsigned int tl0_iae[], tl1_iae[];
718 extern unsigned int tl0_dae[], tl1_dae[];
719 extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
720 extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
721 extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
722 extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
723 extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
725 void __init cheetah_ecache_flush_init(void)
727 unsigned long largest_size, smallest_linesize, order, ver;
728 int node, i, instance;
730 /* Scan all cpu device tree nodes, note two values:
731 * 1) largest E-cache size
732 * 2) smallest E-cache line size
734 largest_size = 0UL;
735 smallest_linesize = ~0UL;
737 instance = 0;
738 while (!cpu_find_by_instance(instance, &node, NULL)) {
739 unsigned long val;
741 val = prom_getintdefault(node, "ecache-size",
742 (2 * 1024 * 1024));
743 if (val > largest_size)
744 largest_size = val;
745 val = prom_getintdefault(node, "ecache-line-size", 64);
746 if (val < smallest_linesize)
747 smallest_linesize = val;
748 instance++;
751 if (largest_size == 0UL || smallest_linesize == ~0UL) {
752 prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
753 "parameters.\n");
754 prom_halt();
757 ecache_flush_size = (2 * largest_size);
758 ecache_flush_linesize = smallest_linesize;
760 ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size);
762 if (ecache_flush_physbase == ~0UL) {
763 prom_printf("cheetah_ecache_flush_init: Cannot find %d byte "
764 "contiguous physical memory.\n",
765 ecache_flush_size);
766 prom_halt();
769 /* Now allocate error trap reporting scoreboard. */
770 node = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
771 for (order = 0; order < MAX_ORDER; order++) {
772 if ((PAGE_SIZE << order) >= node)
773 break;
775 cheetah_error_log = (struct cheetah_err_info *)
776 __get_free_pages(GFP_KERNEL, order);
777 if (!cheetah_error_log) {
778 prom_printf("cheetah_ecache_flush_init: Failed to allocate "
779 "error logging scoreboard (%d bytes).\n", node);
780 prom_halt();
782 memset(cheetah_error_log, 0, PAGE_SIZE << order);
784 /* Mark all AFSRs as invalid so that the trap handler will
785 * log new new information there.
787 for (i = 0; i < 2 * NR_CPUS; i++)
788 cheetah_error_log[i].afsr = CHAFSR_INVALID;
790 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
791 if ((ver >> 32) == 0x003e0016) {
792 cheetah_error_table = &__jalapeno_error_table[0];
793 cheetah_afsr_errors = JPAFSR_ERRORS;
794 } else if ((ver >> 32) == 0x003e0015) {
795 cheetah_error_table = &__cheetah_plus_error_table[0];
796 cheetah_afsr_errors = CHPAFSR_ERRORS;
797 } else {
798 cheetah_error_table = &__cheetah_error_table[0];
799 cheetah_afsr_errors = CHAFSR_ERRORS;
802 /* Now patch trap tables. */
803 memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
804 memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
805 memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
806 memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
807 memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
808 memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
809 memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
810 memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
811 if (tlb_type == cheetah_plus) {
812 memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
813 memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
814 memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
815 memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
817 flushi(PAGE_OFFSET);
820 static void cheetah_flush_ecache(void)
822 unsigned long flush_base = ecache_flush_physbase;
823 unsigned long flush_linesize = ecache_flush_linesize;
824 unsigned long flush_size = ecache_flush_size;
826 __asm__ __volatile__("1: subcc %0, %4, %0\n\t"
827 " bne,pt %%xcc, 1b\n\t"
828 " ldxa [%2 + %0] %3, %%g0\n\t"
829 : "=&r" (flush_size)
830 : "0" (flush_size), "r" (flush_base),
831 "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
834 static void cheetah_flush_ecache_line(unsigned long physaddr)
836 unsigned long alias;
838 physaddr &= ~(8UL - 1UL);
839 physaddr = (ecache_flush_physbase +
840 (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
841 alias = physaddr + (ecache_flush_size >> 1UL);
842 __asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
843 "ldxa [%1] %2, %%g0\n\t"
844 "membar #Sync"
845 : /* no outputs */
846 : "r" (physaddr), "r" (alias),
847 "i" (ASI_PHYS_USE_EC));
850 /* Unfortunately, the diagnostic access to the I-cache tags we need to
851 * use to clear the thing interferes with I-cache coherency transactions.
853 * So we must only flush the I-cache when it is disabled.
855 static void __cheetah_flush_icache(void)
857 unsigned int icache_size, icache_line_size;
858 unsigned long addr;
860 icache_size = local_cpu_data().icache_size;
861 icache_line_size = local_cpu_data().icache_line_size;
863 /* Clear the valid bits in all the tags. */
864 for (addr = 0; addr < icache_size; addr += icache_line_size) {
865 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
866 "membar #Sync"
867 : /* no outputs */
868 : "r" (addr | (2 << 3)),
869 "i" (ASI_IC_TAG));
873 static void cheetah_flush_icache(void)
875 unsigned long dcu_save;
877 /* Save current DCU, disable I-cache. */
878 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
879 "or %0, %2, %%g1\n\t"
880 "stxa %%g1, [%%g0] %1\n\t"
881 "membar #Sync"
882 : "=r" (dcu_save)
883 : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
884 : "g1");
886 __cheetah_flush_icache();
888 /* Restore DCU register */
889 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
890 "membar #Sync"
891 : /* no outputs */
892 : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
895 static void cheetah_flush_dcache(void)
897 unsigned int dcache_size, dcache_line_size;
898 unsigned long addr;
900 dcache_size = local_cpu_data().dcache_size;
901 dcache_line_size = local_cpu_data().dcache_line_size;
903 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
904 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
905 "membar #Sync"
906 : /* no outputs */
907 : "r" (addr), "i" (ASI_DCACHE_TAG));
911 /* In order to make the even parity correct we must do two things.
912 * First, we clear DC_data_parity and set DC_utag to an appropriate value.
913 * Next, we clear out all 32-bytes of data for that line. Data of
914 * all-zero + tag parity value of zero == correct parity.
916 static void cheetah_plus_zap_dcache_parity(void)
918 unsigned int dcache_size, dcache_line_size;
919 unsigned long addr;
921 dcache_size = local_cpu_data().dcache_size;
922 dcache_line_size = local_cpu_data().dcache_line_size;
924 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
925 unsigned long tag = (addr >> 14);
926 unsigned long line;
928 __asm__ __volatile__("membar #Sync\n\t"
929 "stxa %0, [%1] %2\n\t"
930 "membar #Sync"
931 : /* no outputs */
932 : "r" (tag), "r" (addr),
933 "i" (ASI_DCACHE_UTAG));
934 for (line = addr; line < addr + dcache_line_size; line += 8)
935 __asm__ __volatile__("membar #Sync\n\t"
936 "stxa %%g0, [%0] %1\n\t"
937 "membar #Sync"
938 : /* no outputs */
939 : "r" (line),
940 "i" (ASI_DCACHE_DATA));
944 /* Conversion tables used to frob Cheetah AFSR syndrome values into
945 * something palatable to the memory controller driver get_unumber
946 * routine.
948 #define MT0 137
949 #define MT1 138
950 #define MT2 139
951 #define NONE 254
952 #define MTC0 140
953 #define MTC1 141
954 #define MTC2 142
955 #define MTC3 143
956 #define C0 128
957 #define C1 129
958 #define C2 130
959 #define C3 131
960 #define C4 132
961 #define C5 133
962 #define C6 134
963 #define C7 135
964 #define C8 136
965 #define M2 144
966 #define M3 145
967 #define M4 146
968 #define M 147
969 static unsigned char cheetah_ecc_syntab[] = {
970 /*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
971 /*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
972 /*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
973 /*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
974 /*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
975 /*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
976 /*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
977 /*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
978 /*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
979 /*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
980 /*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
981 /*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
982 /*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
983 /*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
984 /*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
985 /*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
986 /*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
987 /*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
988 /*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
989 /*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
990 /*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
991 /*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
992 /*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
993 /*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
994 /*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
995 /*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
996 /*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
997 /*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
998 /*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
999 /*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
1000 /*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
1001 /*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
1003 static unsigned char cheetah_mtag_syntab[] = {
1004 NONE, MTC0,
1005 MTC1, NONE,
1006 MTC2, NONE,
1007 NONE, MT0,
1008 MTC3, NONE,
1009 NONE, MT1,
1010 NONE, MT2,
1011 NONE, NONE
1014 /* Return the highest priority error conditon mentioned. */
1015 static __inline__ unsigned long cheetah_get_hipri(unsigned long afsr)
1017 unsigned long tmp = 0;
1018 int i;
1020 for (i = 0; cheetah_error_table[i].mask; i++) {
1021 if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
1022 return tmp;
1024 return tmp;
1027 static const char *cheetah_get_string(unsigned long bit)
1029 int i;
1031 for (i = 0; cheetah_error_table[i].mask; i++) {
1032 if ((bit & cheetah_error_table[i].mask) != 0UL)
1033 return cheetah_error_table[i].name;
1035 return "???";
1038 extern int chmc_getunumber(int, unsigned long, char *, int);
1040 static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
1041 unsigned long afsr, unsigned long afar, int recoverable)
1043 unsigned long hipri;
1044 char unum[256];
1046 printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
1047 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1048 afsr, afar,
1049 (afsr & CHAFSR_TL1) ? 1 : 0);
1050 printk("%s" "ERROR(%d): TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1051 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1052 regs->tpc, regs->tnpc, regs->tstate);
1053 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
1054 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1055 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
1056 (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
1057 (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
1058 (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
1059 hipri = cheetah_get_hipri(afsr);
1060 printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
1061 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1062 hipri, cheetah_get_string(hipri));
1064 /* Try to get unumber if relevant. */
1065 #define ESYND_ERRORS (CHAFSR_IVC | CHAFSR_IVU | \
1066 CHAFSR_CPC | CHAFSR_CPU | \
1067 CHAFSR_UE | CHAFSR_CE | \
1068 CHAFSR_EDC | CHAFSR_EDU | \
1069 CHAFSR_UCC | CHAFSR_UCU | \
1070 CHAFSR_WDU | CHAFSR_WDC)
1071 #define MSYND_ERRORS (CHAFSR_EMC | CHAFSR_EMU)
1072 if (afsr & ESYND_ERRORS) {
1073 int syndrome;
1074 int ret;
1076 syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
1077 syndrome = cheetah_ecc_syntab[syndrome];
1078 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1079 if (ret != -1)
1080 printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
1081 (recoverable ? KERN_WARNING : KERN_CRIT),
1082 smp_processor_id(), unum);
1083 } else if (afsr & MSYND_ERRORS) {
1084 int syndrome;
1085 int ret;
1087 syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
1088 syndrome = cheetah_mtag_syntab[syndrome];
1089 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1090 if (ret != -1)
1091 printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
1092 (recoverable ? KERN_WARNING : KERN_CRIT),
1093 smp_processor_id(), unum);
1096 /* Now dump the cache snapshots. */
1097 printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx]\n",
1098 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1099 (int) info->dcache_index,
1100 info->dcache_tag,
1101 info->dcache_utag,
1102 info->dcache_stag);
1103 printk("%s" "ERROR(%d): D-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1104 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1105 info->dcache_data[0],
1106 info->dcache_data[1],
1107 info->dcache_data[2],
1108 info->dcache_data[3]);
1109 printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx] "
1110 "u[%016lx] l[%016lx]\n",
1111 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1112 (int) info->icache_index,
1113 info->icache_tag,
1114 info->icache_utag,
1115 info->icache_stag,
1116 info->icache_upper,
1117 info->icache_lower);
1118 printk("%s" "ERROR(%d): I-cache INSN0[%016lx] INSN1[%016lx] INSN2[%016lx] INSN3[%016lx]\n",
1119 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1120 info->icache_data[0],
1121 info->icache_data[1],
1122 info->icache_data[2],
1123 info->icache_data[3]);
1124 printk("%s" "ERROR(%d): I-cache INSN4[%016lx] INSN5[%016lx] INSN6[%016lx] INSN7[%016lx]\n",
1125 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1126 info->icache_data[4],
1127 info->icache_data[5],
1128 info->icache_data[6],
1129 info->icache_data[7]);
1130 printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016lx]\n",
1131 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1132 (int) info->ecache_index, info->ecache_tag);
1133 printk("%s" "ERROR(%d): E-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1134 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1135 info->ecache_data[0],
1136 info->ecache_data[1],
1137 info->ecache_data[2],
1138 info->ecache_data[3]);
1140 afsr = (afsr & ~hipri) & cheetah_afsr_errors;
1141 while (afsr != 0UL) {
1142 unsigned long bit = cheetah_get_hipri(afsr);
1144 printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
1145 (recoverable ? KERN_WARNING : KERN_CRIT),
1146 bit, cheetah_get_string(bit));
1148 afsr &= ~bit;
1151 if (!recoverable)
1152 printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
1155 static int cheetah_recheck_errors(struct cheetah_err_info *logp)
1157 unsigned long afsr, afar;
1158 int ret = 0;
1160 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1161 : "=r" (afsr)
1162 : "i" (ASI_AFSR));
1163 if ((afsr & cheetah_afsr_errors) != 0) {
1164 if (logp != NULL) {
1165 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1166 : "=r" (afar)
1167 : "i" (ASI_AFAR));
1168 logp->afsr = afsr;
1169 logp->afar = afar;
1171 ret = 1;
1173 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1174 "membar #Sync\n\t"
1175 : : "r" (afsr), "i" (ASI_AFSR));
1177 return ret;
1180 void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1182 struct cheetah_err_info local_snapshot, *p;
1183 int recoverable;
1185 /* Flush E-cache */
1186 cheetah_flush_ecache();
1188 p = cheetah_get_error_log(afsr);
1189 if (!p) {
1190 prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
1191 afsr, afar);
1192 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1193 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1194 prom_halt();
1197 /* Grab snapshot of logged error. */
1198 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1200 /* If the current trap snapshot does not match what the
1201 * trap handler passed along into our args, big trouble.
1202 * In such a case, mark the local copy as invalid.
1204 * Else, it matches and we mark the afsr in the non-local
1205 * copy as invalid so we may log new error traps there.
1207 if (p->afsr != afsr || p->afar != afar)
1208 local_snapshot.afsr = CHAFSR_INVALID;
1209 else
1210 p->afsr = CHAFSR_INVALID;
1212 cheetah_flush_icache();
1213 cheetah_flush_dcache();
1215 /* Re-enable I-cache/D-cache */
1216 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1217 "or %%g1, %1, %%g1\n\t"
1218 "stxa %%g1, [%%g0] %0\n\t"
1219 "membar #Sync"
1220 : /* no outputs */
1221 : "i" (ASI_DCU_CONTROL_REG),
1222 "i" (DCU_DC | DCU_IC)
1223 : "g1");
1225 /* Re-enable error reporting */
1226 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1227 "or %%g1, %1, %%g1\n\t"
1228 "stxa %%g1, [%%g0] %0\n\t"
1229 "membar #Sync"
1230 : /* no outputs */
1231 : "i" (ASI_ESTATE_ERROR_EN),
1232 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1233 : "g1");
1235 /* Decide if we can continue after handling this trap and
1236 * logging the error.
1238 recoverable = 1;
1239 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1240 recoverable = 0;
1242 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1243 * error was logged while we had error reporting traps disabled.
1245 if (cheetah_recheck_errors(&local_snapshot)) {
1246 unsigned long new_afsr = local_snapshot.afsr;
1248 /* If we got a new asynchronous error, die... */
1249 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1250 CHAFSR_WDU | CHAFSR_CPU |
1251 CHAFSR_IVU | CHAFSR_UE |
1252 CHAFSR_BERR | CHAFSR_TO))
1253 recoverable = 0;
1256 /* Log errors. */
1257 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1259 if (!recoverable)
1260 panic("Irrecoverable Fast-ECC error trap.\n");
1262 /* Flush E-cache to kick the error trap handlers out. */
1263 cheetah_flush_ecache();
1266 /* Try to fix a correctable error by pushing the line out from
1267 * the E-cache. Recheck error reporting registers to see if the
1268 * problem is intermittent.
1270 static int cheetah_fix_ce(unsigned long physaddr)
1272 unsigned long orig_estate;
1273 unsigned long alias1, alias2;
1274 int ret;
1276 /* Make sure correctable error traps are disabled. */
1277 __asm__ __volatile__("ldxa [%%g0] %2, %0\n\t"
1278 "andn %0, %1, %%g1\n\t"
1279 "stxa %%g1, [%%g0] %2\n\t"
1280 "membar #Sync"
1281 : "=&r" (orig_estate)
1282 : "i" (ESTATE_ERROR_CEEN),
1283 "i" (ASI_ESTATE_ERROR_EN)
1284 : "g1");
1286 /* We calculate alias addresses that will force the
1287 * cache line in question out of the E-cache. Then
1288 * we bring it back in with an atomic instruction so
1289 * that we get it in some modified/exclusive state,
1290 * then we displace it again to try and get proper ECC
1291 * pushed back into the system.
1293 physaddr &= ~(8UL - 1UL);
1294 alias1 = (ecache_flush_physbase +
1295 (physaddr & ((ecache_flush_size >> 1) - 1)));
1296 alias2 = alias1 + (ecache_flush_size >> 1);
1297 __asm__ __volatile__("ldxa [%0] %3, %%g0\n\t"
1298 "ldxa [%1] %3, %%g0\n\t"
1299 "casxa [%2] %3, %%g0, %%g0\n\t"
1300 "membar #StoreLoad | #StoreStore\n\t"
1301 "ldxa [%0] %3, %%g0\n\t"
1302 "ldxa [%1] %3, %%g0\n\t"
1303 "membar #Sync"
1304 : /* no outputs */
1305 : "r" (alias1), "r" (alias2),
1306 "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1308 /* Did that trigger another error? */
1309 if (cheetah_recheck_errors(NULL)) {
1310 /* Try one more time. */
1311 __asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
1312 "membar #Sync"
1313 : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1314 if (cheetah_recheck_errors(NULL))
1315 ret = 2;
1316 else
1317 ret = 1;
1318 } else {
1319 /* No new error, intermittent problem. */
1320 ret = 0;
1323 /* Restore error enables. */
1324 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1325 "membar #Sync"
1326 : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
1328 return ret;
1331 /* Return non-zero if PADDR is a valid physical memory address. */
1332 static int cheetah_check_main_memory(unsigned long paddr)
1334 unsigned long vaddr = PAGE_OFFSET + paddr;
1336 if (vaddr > (unsigned long) high_memory)
1337 return 0;
1339 return kern_addr_valid(vaddr);
1342 void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1344 struct cheetah_err_info local_snapshot, *p;
1345 int recoverable, is_memory;
1347 p = cheetah_get_error_log(afsr);
1348 if (!p) {
1349 prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
1350 afsr, afar);
1351 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1352 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1353 prom_halt();
1356 /* Grab snapshot of logged error. */
1357 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1359 /* If the current trap snapshot does not match what the
1360 * trap handler passed along into our args, big trouble.
1361 * In such a case, mark the local copy as invalid.
1363 * Else, it matches and we mark the afsr in the non-local
1364 * copy as invalid so we may log new error traps there.
1366 if (p->afsr != afsr || p->afar != afar)
1367 local_snapshot.afsr = CHAFSR_INVALID;
1368 else
1369 p->afsr = CHAFSR_INVALID;
1371 is_memory = cheetah_check_main_memory(afar);
1373 if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
1374 /* XXX Might want to log the results of this operation
1375 * XXX somewhere... -DaveM
1377 cheetah_fix_ce(afar);
1381 int flush_all, flush_line;
1383 flush_all = flush_line = 0;
1384 if ((afsr & CHAFSR_EDC) != 0UL) {
1385 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
1386 flush_line = 1;
1387 else
1388 flush_all = 1;
1389 } else if ((afsr & CHAFSR_CPC) != 0UL) {
1390 if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
1391 flush_line = 1;
1392 else
1393 flush_all = 1;
1396 /* Trap handler only disabled I-cache, flush it. */
1397 cheetah_flush_icache();
1399 /* Re-enable I-cache */
1400 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1401 "or %%g1, %1, %%g1\n\t"
1402 "stxa %%g1, [%%g0] %0\n\t"
1403 "membar #Sync"
1404 : /* no outputs */
1405 : "i" (ASI_DCU_CONTROL_REG),
1406 "i" (DCU_IC)
1407 : "g1");
1409 if (flush_all)
1410 cheetah_flush_ecache();
1411 else if (flush_line)
1412 cheetah_flush_ecache_line(afar);
1415 /* Re-enable error reporting */
1416 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1417 "or %%g1, %1, %%g1\n\t"
1418 "stxa %%g1, [%%g0] %0\n\t"
1419 "membar #Sync"
1420 : /* no outputs */
1421 : "i" (ASI_ESTATE_ERROR_EN),
1422 "i" (ESTATE_ERROR_CEEN)
1423 : "g1");
1425 /* Decide if we can continue after handling this trap and
1426 * logging the error.
1428 recoverable = 1;
1429 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1430 recoverable = 0;
1432 /* Re-check AFSR/AFAR */
1433 (void) cheetah_recheck_errors(&local_snapshot);
1435 /* Log errors. */
1436 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1438 if (!recoverable)
1439 panic("Irrecoverable Correctable-ECC error trap.\n");
1442 void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1444 struct cheetah_err_info local_snapshot, *p;
1445 int recoverable, is_memory;
1447 #ifdef CONFIG_PCI
1448 /* Check for the special PCI poke sequence. */
1449 if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
1450 cheetah_flush_icache();
1451 cheetah_flush_dcache();
1453 /* Re-enable I-cache/D-cache */
1454 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1455 "or %%g1, %1, %%g1\n\t"
1456 "stxa %%g1, [%%g0] %0\n\t"
1457 "membar #Sync"
1458 : /* no outputs */
1459 : "i" (ASI_DCU_CONTROL_REG),
1460 "i" (DCU_DC | DCU_IC)
1461 : "g1");
1463 /* Re-enable error reporting */
1464 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1465 "or %%g1, %1, %%g1\n\t"
1466 "stxa %%g1, [%%g0] %0\n\t"
1467 "membar #Sync"
1468 : /* no outputs */
1469 : "i" (ASI_ESTATE_ERROR_EN),
1470 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1471 : "g1");
1473 (void) cheetah_recheck_errors(NULL);
1475 pci_poke_faulted = 1;
1476 regs->tpc += 4;
1477 regs->tnpc = regs->tpc + 4;
1478 return;
1480 #endif
1482 p = cheetah_get_error_log(afsr);
1483 if (!p) {
1484 prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
1485 afsr, afar);
1486 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1487 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1488 prom_halt();
1491 /* Grab snapshot of logged error. */
1492 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1494 /* If the current trap snapshot does not match what the
1495 * trap handler passed along into our args, big trouble.
1496 * In such a case, mark the local copy as invalid.
1498 * Else, it matches and we mark the afsr in the non-local
1499 * copy as invalid so we may log new error traps there.
1501 if (p->afsr != afsr || p->afar != afar)
1502 local_snapshot.afsr = CHAFSR_INVALID;
1503 else
1504 p->afsr = CHAFSR_INVALID;
1506 is_memory = cheetah_check_main_memory(afar);
1509 int flush_all, flush_line;
1511 flush_all = flush_line = 0;
1512 if ((afsr & CHAFSR_EDU) != 0UL) {
1513 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
1514 flush_line = 1;
1515 else
1516 flush_all = 1;
1517 } else if ((afsr & CHAFSR_BERR) != 0UL) {
1518 if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
1519 flush_line = 1;
1520 else
1521 flush_all = 1;
1524 cheetah_flush_icache();
1525 cheetah_flush_dcache();
1527 /* Re-enable I/D caches */
1528 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1529 "or %%g1, %1, %%g1\n\t"
1530 "stxa %%g1, [%%g0] %0\n\t"
1531 "membar #Sync"
1532 : /* no outputs */
1533 : "i" (ASI_DCU_CONTROL_REG),
1534 "i" (DCU_IC | DCU_DC)
1535 : "g1");
1537 if (flush_all)
1538 cheetah_flush_ecache();
1539 else if (flush_line)
1540 cheetah_flush_ecache_line(afar);
1543 /* Re-enable error reporting */
1544 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1545 "or %%g1, %1, %%g1\n\t"
1546 "stxa %%g1, [%%g0] %0\n\t"
1547 "membar #Sync"
1548 : /* no outputs */
1549 : "i" (ASI_ESTATE_ERROR_EN),
1550 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1551 : "g1");
1553 /* Decide if we can continue after handling this trap and
1554 * logging the error.
1556 recoverable = 1;
1557 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1558 recoverable = 0;
1560 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1561 * error was logged while we had error reporting traps disabled.
1563 if (cheetah_recheck_errors(&local_snapshot)) {
1564 unsigned long new_afsr = local_snapshot.afsr;
1566 /* If we got a new asynchronous error, die... */
1567 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1568 CHAFSR_WDU | CHAFSR_CPU |
1569 CHAFSR_IVU | CHAFSR_UE |
1570 CHAFSR_BERR | CHAFSR_TO))
1571 recoverable = 0;
1574 /* Log errors. */
1575 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1577 /* "Recoverable" here means we try to yank the page from ever
1578 * being newly used again. This depends upon a few things:
1579 * 1) Must be main memory, and AFAR must be valid.
1580 * 2) If we trapped from user, OK.
1581 * 3) Else, if we trapped from kernel we must find exception
1582 * table entry (ie. we have to have been accessing user
1583 * space).
1585 * If AFAR is not in main memory, or we trapped from kernel
1586 * and cannot find an exception table entry, it is unacceptable
1587 * to try and continue.
1589 if (recoverable && is_memory) {
1590 if ((regs->tstate & TSTATE_PRIV) == 0UL) {
1591 /* OK, usermode access. */
1592 recoverable = 1;
1593 } else {
1594 const struct exception_table_entry *entry;
1596 entry = search_exception_tables(regs->tpc);
1597 if (entry) {
1598 /* OK, kernel access to userspace. */
1599 recoverable = 1;
1601 } else {
1602 /* BAD, privileged state is corrupted. */
1603 recoverable = 0;
1606 if (recoverable) {
1607 if (pfn_valid(afar >> PAGE_SHIFT))
1608 get_page(pfn_to_page(afar >> PAGE_SHIFT));
1609 else
1610 recoverable = 0;
1612 /* Only perform fixup if we still have a
1613 * recoverable condition.
1615 if (recoverable) {
1616 regs->tpc = entry->fixup;
1617 regs->tnpc = regs->tpc + 4;
1621 } else {
1622 recoverable = 0;
1625 if (!recoverable)
1626 panic("Irrecoverable deferred error trap.\n");
1629 /* Handle a D/I cache parity error trap. TYPE is encoded as:
1631 * Bit0: 0=dcache,1=icache
1632 * Bit1: 0=recoverable,1=unrecoverable
1634 * The hardware has disabled both the I-cache and D-cache in
1635 * the %dcr register.
1637 void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1639 if (type & 0x1)
1640 __cheetah_flush_icache();
1641 else
1642 cheetah_plus_zap_dcache_parity();
1643 cheetah_flush_dcache();
1645 /* Re-enable I-cache/D-cache */
1646 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1647 "or %%g1, %1, %%g1\n\t"
1648 "stxa %%g1, [%%g0] %0\n\t"
1649 "membar #Sync"
1650 : /* no outputs */
1651 : "i" (ASI_DCU_CONTROL_REG),
1652 "i" (DCU_DC | DCU_IC)
1653 : "g1");
1655 if (type & 0x2) {
1656 printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1657 smp_processor_id(),
1658 (type & 0x1) ? 'I' : 'D',
1659 regs->tpc);
1660 panic("Irrecoverable Cheetah+ parity error.");
1663 printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1664 smp_processor_id(),
1665 (type & 0x1) ? 'I' : 'D',
1666 regs->tpc);
1669 void do_fpe_common(struct pt_regs *regs)
1671 if (regs->tstate & TSTATE_PRIV) {
1672 regs->tpc = regs->tnpc;
1673 regs->tnpc += 4;
1674 } else {
1675 unsigned long fsr = current_thread_info()->xfsr[0];
1676 siginfo_t info;
1678 if (test_thread_flag(TIF_32BIT)) {
1679 regs->tpc &= 0xffffffff;
1680 regs->tnpc &= 0xffffffff;
1682 info.si_signo = SIGFPE;
1683 info.si_errno = 0;
1684 info.si_addr = (void __user *)regs->tpc;
1685 info.si_trapno = 0;
1686 info.si_code = __SI_FAULT;
1687 if ((fsr & 0x1c000) == (1 << 14)) {
1688 if (fsr & 0x10)
1689 info.si_code = FPE_FLTINV;
1690 else if (fsr & 0x08)
1691 info.si_code = FPE_FLTOVF;
1692 else if (fsr & 0x04)
1693 info.si_code = FPE_FLTUND;
1694 else if (fsr & 0x02)
1695 info.si_code = FPE_FLTDIV;
1696 else if (fsr & 0x01)
1697 info.si_code = FPE_FLTRES;
1699 force_sig_info(SIGFPE, &info, current);
1703 void do_fpieee(struct pt_regs *regs)
1705 if (notify_die(DIE_TRAP, "fpu exception ieee", regs,
1706 0, 0x24, SIGFPE) == NOTIFY_STOP)
1707 return;
1709 do_fpe_common(regs);
1712 extern int do_mathemu(struct pt_regs *, struct fpustate *);
1714 void do_fpother(struct pt_regs *regs)
1716 struct fpustate *f = FPUSTATE;
1717 int ret = 0;
1719 if (notify_die(DIE_TRAP, "fpu exception other", regs,
1720 0, 0x25, SIGFPE) == NOTIFY_STOP)
1721 return;
1723 switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
1724 case (2 << 14): /* unfinished_FPop */
1725 case (3 << 14): /* unimplemented_FPop */
1726 ret = do_mathemu(regs, f);
1727 break;
1729 if (ret)
1730 return;
1731 do_fpe_common(regs);
1734 void do_tof(struct pt_regs *regs)
1736 siginfo_t info;
1738 if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs,
1739 0, 0x26, SIGEMT) == NOTIFY_STOP)
1740 return;
1742 if (regs->tstate & TSTATE_PRIV)
1743 die_if_kernel("Penguin overflow trap from kernel mode", regs);
1744 if (test_thread_flag(TIF_32BIT)) {
1745 regs->tpc &= 0xffffffff;
1746 regs->tnpc &= 0xffffffff;
1748 info.si_signo = SIGEMT;
1749 info.si_errno = 0;
1750 info.si_code = EMT_TAGOVF;
1751 info.si_addr = (void __user *)regs->tpc;
1752 info.si_trapno = 0;
1753 force_sig_info(SIGEMT, &info, current);
1756 void do_div0(struct pt_regs *regs)
1758 siginfo_t info;
1760 if (notify_die(DIE_TRAP, "integer division by zero", regs,
1761 0, 0x28, SIGFPE) == NOTIFY_STOP)
1762 return;
1764 if (regs->tstate & TSTATE_PRIV)
1765 die_if_kernel("TL0: Kernel divide by zero.", regs);
1766 if (test_thread_flag(TIF_32BIT)) {
1767 regs->tpc &= 0xffffffff;
1768 regs->tnpc &= 0xffffffff;
1770 info.si_signo = SIGFPE;
1771 info.si_errno = 0;
1772 info.si_code = FPE_INTDIV;
1773 info.si_addr = (void __user *)regs->tpc;
1774 info.si_trapno = 0;
1775 force_sig_info(SIGFPE, &info, current);
1778 void instruction_dump (unsigned int *pc)
1780 int i;
1782 if ((((unsigned long) pc) & 3))
1783 return;
1785 printk("Instruction DUMP:");
1786 for (i = -3; i < 6; i++)
1787 printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
1788 printk("\n");
1791 static void user_instruction_dump (unsigned int __user *pc)
1793 int i;
1794 unsigned int buf[9];
1796 if ((((unsigned long) pc) & 3))
1797 return;
1799 if (copy_from_user(buf, pc - 3, sizeof(buf)))
1800 return;
1802 printk("Instruction DUMP:");
1803 for (i = 0; i < 9; i++)
1804 printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
1805 printk("\n");
1808 void show_stack(struct task_struct *tsk, unsigned long *_ksp)
1810 unsigned long pc, fp, thread_base, ksp;
1811 void *tp = task_stack_page(tsk);
1812 struct reg_window *rw;
1813 int count = 0;
1815 ksp = (unsigned long) _ksp;
1817 if (tp == current_thread_info())
1818 flushw_all();
1820 fp = ksp + STACK_BIAS;
1821 thread_base = (unsigned long) tp;
1823 printk("Call Trace:");
1824 #ifdef CONFIG_KALLSYMS
1825 printk("\n");
1826 #endif
1827 do {
1828 /* Bogus frame pointer? */
1829 if (fp < (thread_base + sizeof(struct thread_info)) ||
1830 fp >= (thread_base + THREAD_SIZE))
1831 break;
1832 rw = (struct reg_window *)fp;
1833 pc = rw->ins[7];
1834 printk(" [%016lx] ", pc);
1835 print_symbol("%s\n", pc);
1836 fp = rw->ins[6] + STACK_BIAS;
1837 } while (++count < 16);
1838 #ifndef CONFIG_KALLSYMS
1839 printk("\n");
1840 #endif
1843 void dump_stack(void)
1845 unsigned long *ksp;
1847 __asm__ __volatile__("mov %%fp, %0"
1848 : "=r" (ksp));
1849 show_stack(current, ksp);
1852 EXPORT_SYMBOL(dump_stack);
1854 static inline int is_kernel_stack(struct task_struct *task,
1855 struct reg_window *rw)
1857 unsigned long rw_addr = (unsigned long) rw;
1858 unsigned long thread_base, thread_end;
1860 if (rw_addr < PAGE_OFFSET) {
1861 if (task != &init_task)
1862 return 0;
1865 thread_base = (unsigned long) task_stack_page(task);
1866 thread_end = thread_base + sizeof(union thread_union);
1867 if (rw_addr >= thread_base &&
1868 rw_addr < thread_end &&
1869 !(rw_addr & 0x7UL))
1870 return 1;
1872 return 0;
1875 static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
1877 unsigned long fp = rw->ins[6];
1879 if (!fp)
1880 return NULL;
1882 return (struct reg_window *) (fp + STACK_BIAS);
1885 void die_if_kernel(char *str, struct pt_regs *regs)
1887 static int die_counter;
1888 extern void __show_regs(struct pt_regs * regs);
1889 extern void smp_report_regs(void);
1890 int count = 0;
1892 /* Amuse the user. */
1893 printk(
1894 " \\|/ ____ \\|/\n"
1895 " \"@'/ .. \\`@\"\n"
1896 " /_| \\__/ |_\\\n"
1897 " \\__U_/\n");
1899 printk("%s(%d): %s [#%d]\n", current->comm, current->pid, str, ++die_counter);
1900 notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
1901 __asm__ __volatile__("flushw");
1902 __show_regs(regs);
1903 if (regs->tstate & TSTATE_PRIV) {
1904 struct reg_window *rw = (struct reg_window *)
1905 (regs->u_regs[UREG_FP] + STACK_BIAS);
1907 /* Stop the back trace when we hit userland or we
1908 * find some badly aligned kernel stack.
1910 while (rw &&
1911 count++ < 30&&
1912 is_kernel_stack(current, rw)) {
1913 printk("Caller[%016lx]", rw->ins[7]);
1914 print_symbol(": %s", rw->ins[7]);
1915 printk("\n");
1917 rw = kernel_stack_up(rw);
1919 instruction_dump ((unsigned int *) regs->tpc);
1920 } else {
1921 if (test_thread_flag(TIF_32BIT)) {
1922 regs->tpc &= 0xffffffff;
1923 regs->tnpc &= 0xffffffff;
1925 user_instruction_dump ((unsigned int __user *) regs->tpc);
1927 #ifdef CONFIG_SMP
1928 smp_report_regs();
1929 #endif
1931 if (regs->tstate & TSTATE_PRIV)
1932 do_exit(SIGKILL);
1933 do_exit(SIGSEGV);
1936 extern int handle_popc(u32 insn, struct pt_regs *regs);
1937 extern int handle_ldf_stq(u32 insn, struct pt_regs *regs);
1939 void do_illegal_instruction(struct pt_regs *regs)
1941 unsigned long pc = regs->tpc;
1942 unsigned long tstate = regs->tstate;
1943 u32 insn;
1944 siginfo_t info;
1946 if (notify_die(DIE_TRAP, "illegal instruction", regs,
1947 0, 0x10, SIGILL) == NOTIFY_STOP)
1948 return;
1950 if (tstate & TSTATE_PRIV)
1951 die_if_kernel("Kernel illegal instruction", regs);
1952 if (test_thread_flag(TIF_32BIT))
1953 pc = (u32)pc;
1954 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
1955 if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
1956 if (handle_popc(insn, regs))
1957 return;
1958 } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
1959 if (handle_ldf_stq(insn, regs))
1960 return;
1963 info.si_signo = SIGILL;
1964 info.si_errno = 0;
1965 info.si_code = ILL_ILLOPC;
1966 info.si_addr = (void __user *)pc;
1967 info.si_trapno = 0;
1968 force_sig_info(SIGILL, &info, current);
1971 void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
1973 siginfo_t info;
1975 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
1976 0, 0x34, SIGSEGV) == NOTIFY_STOP)
1977 return;
1979 if (regs->tstate & TSTATE_PRIV) {
1980 extern void kernel_unaligned_trap(struct pt_regs *regs,
1981 unsigned int insn,
1982 unsigned long sfar,
1983 unsigned long sfsr);
1985 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc),
1986 sfar, sfsr);
1987 return;
1989 info.si_signo = SIGBUS;
1990 info.si_errno = 0;
1991 info.si_code = BUS_ADRALN;
1992 info.si_addr = (void __user *)sfar;
1993 info.si_trapno = 0;
1994 force_sig_info(SIGBUS, &info, current);
1997 void do_privop(struct pt_regs *regs)
1999 siginfo_t info;
2001 if (notify_die(DIE_TRAP, "privileged operation", regs,
2002 0, 0x11, SIGILL) == NOTIFY_STOP)
2003 return;
2005 if (test_thread_flag(TIF_32BIT)) {
2006 regs->tpc &= 0xffffffff;
2007 regs->tnpc &= 0xffffffff;
2009 info.si_signo = SIGILL;
2010 info.si_errno = 0;
2011 info.si_code = ILL_PRVOPC;
2012 info.si_addr = (void __user *)regs->tpc;
2013 info.si_trapno = 0;
2014 force_sig_info(SIGILL, &info, current);
2017 void do_privact(struct pt_regs *regs)
2019 do_privop(regs);
2022 /* Trap level 1 stuff or other traps we should never see... */
2023 void do_cee(struct pt_regs *regs)
2025 die_if_kernel("TL0: Cache Error Exception", regs);
2028 void do_cee_tl1(struct pt_regs *regs)
2030 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2031 die_if_kernel("TL1: Cache Error Exception", regs);
2034 void do_dae_tl1(struct pt_regs *regs)
2036 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2037 die_if_kernel("TL1: Data Access Exception", regs);
2040 void do_iae_tl1(struct pt_regs *regs)
2042 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2043 die_if_kernel("TL1: Instruction Access Exception", regs);
2046 void do_div0_tl1(struct pt_regs *regs)
2048 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2049 die_if_kernel("TL1: DIV0 Exception", regs);
2052 void do_fpdis_tl1(struct pt_regs *regs)
2054 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2055 die_if_kernel("TL1: FPU Disabled", regs);
2058 void do_fpieee_tl1(struct pt_regs *regs)
2060 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2061 die_if_kernel("TL1: FPU IEEE Exception", regs);
2064 void do_fpother_tl1(struct pt_regs *regs)
2066 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2067 die_if_kernel("TL1: FPU Other Exception", regs);
2070 void do_ill_tl1(struct pt_regs *regs)
2072 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2073 die_if_kernel("TL1: Illegal Instruction Exception", regs);
2076 void do_irq_tl1(struct pt_regs *regs)
2078 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2079 die_if_kernel("TL1: IRQ Exception", regs);
2082 void do_lddfmna_tl1(struct pt_regs *regs)
2084 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2085 die_if_kernel("TL1: LDDF Exception", regs);
2088 void do_stdfmna_tl1(struct pt_regs *regs)
2090 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2091 die_if_kernel("TL1: STDF Exception", regs);
2094 void do_paw(struct pt_regs *regs)
2096 die_if_kernel("TL0: Phys Watchpoint Exception", regs);
2099 void do_paw_tl1(struct pt_regs *regs)
2101 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2102 die_if_kernel("TL1: Phys Watchpoint Exception", regs);
2105 void do_vaw(struct pt_regs *regs)
2107 die_if_kernel("TL0: Virt Watchpoint Exception", regs);
2110 void do_vaw_tl1(struct pt_regs *regs)
2112 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2113 die_if_kernel("TL1: Virt Watchpoint Exception", regs);
2116 void do_tof_tl1(struct pt_regs *regs)
2118 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2119 die_if_kernel("TL1: Tag Overflow Exception", regs);
2122 void do_getpsr(struct pt_regs *regs)
2124 regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
2125 regs->tpc = regs->tnpc;
2126 regs->tnpc += 4;
2127 if (test_thread_flag(TIF_32BIT)) {
2128 regs->tpc &= 0xffffffff;
2129 regs->tnpc &= 0xffffffff;
2133 extern void thread_info_offsets_are_bolixed_dave(void);
2135 /* Only invoked on boot processor. */
2136 void __init trap_init(void)
2138 /* Compile time sanity check. */
2139 if (TI_TASK != offsetof(struct thread_info, task) ||
2140 TI_FLAGS != offsetof(struct thread_info, flags) ||
2141 TI_CPU != offsetof(struct thread_info, cpu) ||
2142 TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
2143 TI_KSP != offsetof(struct thread_info, ksp) ||
2144 TI_FAULT_ADDR != offsetof(struct thread_info, fault_address) ||
2145 TI_KREGS != offsetof(struct thread_info, kregs) ||
2146 TI_UTRAPS != offsetof(struct thread_info, utraps) ||
2147 TI_EXEC_DOMAIN != offsetof(struct thread_info, exec_domain) ||
2148 TI_REG_WINDOW != offsetof(struct thread_info, reg_window) ||
2149 TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) ||
2150 TI_GSR != offsetof(struct thread_info, gsr) ||
2151 TI_XFSR != offsetof(struct thread_info, xfsr) ||
2152 TI_USER_CNTD0 != offsetof(struct thread_info, user_cntd0) ||
2153 TI_USER_CNTD1 != offsetof(struct thread_info, user_cntd1) ||
2154 TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) ||
2155 TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) ||
2156 TI_PCR != offsetof(struct thread_info, pcr_reg) ||
2157 TI_CEE_STUFF != offsetof(struct thread_info, cee_stuff) ||
2158 TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) ||
2159 TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
2160 TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) ||
2161 TI_RESTART_BLOCK != offsetof(struct thread_info, restart_block) ||
2162 TI_KUNA_REGS != offsetof(struct thread_info, kern_una_regs) ||
2163 TI_KUNA_INSN != offsetof(struct thread_info, kern_una_insn) ||
2164 TI_FPREGS != offsetof(struct thread_info, fpregs) ||
2165 (TI_FPREGS & (64 - 1)))
2166 thread_info_offsets_are_bolixed_dave();
2168 /* Attach to the address space of init_task. On SMP we
2169 * do this in smp.c:smp_callin for other cpus.
2171 atomic_inc(&init_mm.mm_count);
2172 current->active_mm = &init_mm;