Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/vapier...
[linux-2.6/kvm.git] / arch / blackfin / kernel / traps.c
blobd3cbcd6bd985d6a5c517bba229e77474742d7393
1 /*
2 * Copyright 2004-2009 Analog Devices Inc.
4 * Licensed under the GPL-2 or later
5 */
7 #include <linux/bug.h>
8 #include <linux/uaccess.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/kallsyms.h>
12 #include <linux/fs.h>
13 #include <linux/rbtree.h>
14 #include <asm/traps.h>
15 #include <asm/cacheflush.h>
16 #include <asm/cplb.h>
17 #include <asm/dma.h>
18 #include <asm/blackfin.h>
19 #include <asm/irq_handler.h>
20 #include <linux/irq.h>
21 #include <asm/trace.h>
22 #include <asm/fixed_code.h>
24 #ifdef CONFIG_KGDB
25 # include <linux/kgdb.h>
27 # define CHK_DEBUGGER_TRAP() \
28 do { \
29 kgdb_handle_exception(trapnr, sig, info.si_code, fp); \
30 } while (0)
31 # define CHK_DEBUGGER_TRAP_MAYBE() \
32 do { \
33 if (kgdb_connected) \
34 CHK_DEBUGGER_TRAP(); \
35 } while (0)
36 #else
37 # define CHK_DEBUGGER_TRAP() do { } while (0)
38 # define CHK_DEBUGGER_TRAP_MAYBE() do { } while (0)
39 #endif
42 #ifdef CONFIG_DEBUG_VERBOSE
43 #define verbose_printk(fmt, arg...) \
44 printk(fmt, ##arg)
45 #else
46 #define verbose_printk(fmt, arg...) \
47 ({ if (0) printk(fmt, ##arg); 0; })
48 #endif
50 #if defined(CONFIG_DEBUG_MMRS) || defined(CONFIG_DEBUG_MMRS_MODULE)
51 u32 last_seqstat;
52 #ifdef CONFIG_DEBUG_MMRS_MODULE
53 EXPORT_SYMBOL(last_seqstat);
54 #endif
55 #endif
57 /* Initiate the event table handler */
58 void __init trap_init(void)
60 CSYNC();
61 bfin_write_EVT3(trap);
62 CSYNC();
65 static void decode_address(char *buf, unsigned long address)
67 #ifdef CONFIG_DEBUG_VERBOSE
68 struct task_struct *p;
69 struct mm_struct *mm;
70 unsigned long flags, offset;
71 unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic();
72 struct rb_node *n;
74 #ifdef CONFIG_KALLSYMS
75 unsigned long symsize;
76 const char *symname;
77 char *modname;
78 char *delim = ":";
79 char namebuf[128];
80 #endif
82 buf += sprintf(buf, "<0x%08lx> ", address);
84 #ifdef CONFIG_KALLSYMS
85 /* look up the address and see if we are in kernel space */
86 symname = kallsyms_lookup(address, &symsize, &offset, &modname, namebuf);
88 if (symname) {
89 /* yeah! kernel space! */
90 if (!modname)
91 modname = delim = "";
92 sprintf(buf, "{ %s%s%s%s + 0x%lx }",
93 delim, modname, delim, symname,
94 (unsigned long)offset);
95 return;
97 #endif
99 if (address >= FIXED_CODE_START && address < FIXED_CODE_END) {
100 /* Problem in fixed code section? */
101 strcat(buf, "/* Maybe fixed code section */");
102 return;
104 } else if (address < CONFIG_BOOT_LOAD) {
105 /* Problem somewhere before the kernel start address */
106 strcat(buf, "/* Maybe null pointer? */");
107 return;
109 } else if (address >= COREMMR_BASE) {
110 strcat(buf, "/* core mmrs */");
111 return;
113 } else if (address >= SYSMMR_BASE) {
114 strcat(buf, "/* system mmrs */");
115 return;
117 } else if (address >= L1_ROM_START && address < L1_ROM_START + L1_ROM_LENGTH) {
118 strcat(buf, "/* on-chip L1 ROM */");
119 return;
123 * Don't walk any of the vmas if we are oopsing, it has been known
124 * to cause problems - corrupt vmas (kernel crashes) cause double faults
126 if (oops_in_progress) {
127 strcat(buf, "/* kernel dynamic memory (maybe user-space) */");
128 return;
131 /* looks like we're off in user-land, so let's walk all the
132 * mappings of all our processes and see if we can't be a whee
133 * bit more specific
135 write_lock_irqsave(&tasklist_lock, flags);
136 for_each_process(p) {
137 mm = (in_atomic ? p->mm : get_task_mm(p));
138 if (!mm)
139 continue;
141 for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
142 struct vm_area_struct *vma;
144 vma = rb_entry(n, struct vm_area_struct, vm_rb);
146 if (address >= vma->vm_start && address < vma->vm_end) {
147 char _tmpbuf[256];
148 char *name = p->comm;
149 struct file *file = vma->vm_file;
151 if (file) {
152 char *d_name = d_path(&file->f_path, _tmpbuf,
153 sizeof(_tmpbuf));
154 if (!IS_ERR(d_name))
155 name = d_name;
158 /* FLAT does not have its text aligned to the start of
159 * the map while FDPIC ELF does ...
162 /* before we can check flat/fdpic, we need to
163 * make sure current is valid
165 if ((unsigned long)current >= FIXED_CODE_START &&
166 !((unsigned long)current & 0x3)) {
167 if (current->mm &&
168 (address > current->mm->start_code) &&
169 (address < current->mm->end_code))
170 offset = address - current->mm->start_code;
171 else
172 offset = (address - vma->vm_start) +
173 (vma->vm_pgoff << PAGE_SHIFT);
175 sprintf(buf, "[ %s + 0x%lx ]", name, offset);
176 } else
177 sprintf(buf, "[ %s vma:0x%lx-0x%lx]",
178 name, vma->vm_start, vma->vm_end);
180 if (!in_atomic)
181 mmput(mm);
183 if (buf[0] == '\0')
184 sprintf(buf, "[ %s ] dynamic memory", name);
186 goto done;
189 if (!in_atomic)
190 mmput(mm);
193 /* we were unable to find this address anywhere */
194 sprintf(buf, "/* kernel dynamic memory */");
196 done:
197 write_unlock_irqrestore(&tasklist_lock, flags);
198 #else
199 sprintf(buf, " ");
200 #endif
203 asmlinkage void double_fault_c(struct pt_regs *fp)
205 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
206 int j;
207 trace_buffer_save(j);
208 #endif
210 console_verbose();
211 oops_in_progress = 1;
212 #ifdef CONFIG_DEBUG_VERBOSE
213 printk(KERN_EMERG "Double Fault\n");
214 #ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT
215 if (((long)fp->seqstat & SEQSTAT_EXCAUSE) == VEC_UNCOV) {
216 unsigned int cpu = raw_smp_processor_id();
217 char buf[150];
218 decode_address(buf, cpu_pda[cpu].retx_doublefault);
219 printk(KERN_EMERG "While handling exception (EXCAUSE = 0x%x) at %s:\n",
220 (unsigned int)cpu_pda[cpu].seqstat_doublefault & SEQSTAT_EXCAUSE, buf);
221 decode_address(buf, cpu_pda[cpu].dcplb_doublefault_addr);
222 printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %s\n", buf);
223 decode_address(buf, cpu_pda[cpu].icplb_doublefault_addr);
224 printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %s\n", buf);
226 decode_address(buf, fp->retx);
227 printk(KERN_NOTICE "The instruction at %s caused a double exception\n", buf);
228 } else
229 #endif
231 dump_bfin_process(fp);
232 dump_bfin_mem(fp);
233 show_regs(fp);
234 dump_bfin_trace_buffer();
236 #endif
237 panic("Double Fault - unrecoverable event");
241 static int kernel_mode_regs(struct pt_regs *regs)
243 return regs->ipend & 0xffc0;
246 asmlinkage notrace void trap_c(struct pt_regs *fp)
248 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
249 int j;
250 #endif
251 #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
252 unsigned int cpu = raw_smp_processor_id();
253 #endif
254 const char *strerror = NULL;
255 int sig = 0;
256 siginfo_t info;
257 unsigned long trapnr = fp->seqstat & SEQSTAT_EXCAUSE;
259 trace_buffer_save(j);
260 #if defined(CONFIG_DEBUG_MMRS) || defined(CONFIG_DEBUG_MMRS_MODULE)
261 last_seqstat = (u32)fp->seqstat;
262 #endif
264 /* Important - be very careful dereferncing pointers - will lead to
265 * double faults if the stack has become corrupt
268 /* trap_c() will be called for exceptions. During exceptions
269 * processing, the pc value should be set with retx value.
270 * With this change we can cleanup some code in signal.c- TODO
272 fp->orig_pc = fp->retx;
273 /* printk("exception: 0x%x, ipend=%x, reti=%x, retx=%x\n",
274 trapnr, fp->ipend, fp->pc, fp->retx); */
276 /* send the appropriate signal to the user program */
277 switch (trapnr) {
279 /* This table works in conjuction with the one in ./mach-common/entry.S
280 * Some exceptions are handled there (in assembly, in exception space)
281 * Some are handled here, (in C, in interrupt space)
282 * Some, like CPLB, are handled in both, where the normal path is
283 * handled in assembly/exception space, and the error path is handled
284 * here
287 /* 0x00 - Linux Syscall, getting here is an error */
288 /* 0x01 - userspace gdb breakpoint, handled here */
289 case VEC_EXCPT01:
290 info.si_code = TRAP_ILLTRAP;
291 sig = SIGTRAP;
292 CHK_DEBUGGER_TRAP_MAYBE();
293 /* Check if this is a breakpoint in kernel space */
294 if (kernel_mode_regs(fp))
295 goto traps_done;
296 else
297 break;
298 /* 0x03 - User Defined, userspace stack overflow */
299 case VEC_EXCPT03:
300 info.si_code = SEGV_STACKFLOW;
301 sig = SIGSEGV;
302 strerror = KERN_NOTICE EXC_0x03(KERN_NOTICE);
303 CHK_DEBUGGER_TRAP_MAYBE();
304 break;
305 /* 0x02 - KGDB initial connection and break signal trap */
306 case VEC_EXCPT02:
307 #ifdef CONFIG_KGDB
308 info.si_code = TRAP_ILLTRAP;
309 sig = SIGTRAP;
310 CHK_DEBUGGER_TRAP();
311 goto traps_done;
312 #endif
313 /* 0x04 - User Defined */
314 /* 0x05 - User Defined */
315 /* 0x06 - User Defined */
316 /* 0x07 - User Defined */
317 /* 0x08 - User Defined */
318 /* 0x09 - User Defined */
319 /* 0x0A - User Defined */
320 /* 0x0B - User Defined */
321 /* 0x0C - User Defined */
322 /* 0x0D - User Defined */
323 /* 0x0E - User Defined */
324 /* 0x0F - User Defined */
325 /* If we got here, it is most likely that someone was trying to use a
326 * custom exception handler, and it is not actually installed properly
328 case VEC_EXCPT04 ... VEC_EXCPT15:
329 info.si_code = ILL_ILLPARAOP;
330 sig = SIGILL;
331 strerror = KERN_NOTICE EXC_0x04(KERN_NOTICE);
332 CHK_DEBUGGER_TRAP_MAYBE();
333 break;
334 /* 0x10 HW Single step, handled here */
335 case VEC_STEP:
336 info.si_code = TRAP_STEP;
337 sig = SIGTRAP;
338 CHK_DEBUGGER_TRAP_MAYBE();
339 /* Check if this is a single step in kernel space */
340 if (kernel_mode_regs(fp))
341 goto traps_done;
342 else
343 break;
344 /* 0x11 - Trace Buffer Full, handled here */
345 case VEC_OVFLOW:
346 info.si_code = TRAP_TRACEFLOW;
347 sig = SIGTRAP;
348 strerror = KERN_NOTICE EXC_0x11(KERN_NOTICE);
349 CHK_DEBUGGER_TRAP_MAYBE();
350 break;
351 /* 0x12 - Reserved, Caught by default */
352 /* 0x13 - Reserved, Caught by default */
353 /* 0x14 - Reserved, Caught by default */
354 /* 0x15 - Reserved, Caught by default */
355 /* 0x16 - Reserved, Caught by default */
356 /* 0x17 - Reserved, Caught by default */
357 /* 0x18 - Reserved, Caught by default */
358 /* 0x19 - Reserved, Caught by default */
359 /* 0x1A - Reserved, Caught by default */
360 /* 0x1B - Reserved, Caught by default */
361 /* 0x1C - Reserved, Caught by default */
362 /* 0x1D - Reserved, Caught by default */
363 /* 0x1E - Reserved, Caught by default */
364 /* 0x1F - Reserved, Caught by default */
365 /* 0x20 - Reserved, Caught by default */
366 /* 0x21 - Undefined Instruction, handled here */
367 case VEC_UNDEF_I:
368 #ifdef CONFIG_BUG
369 if (kernel_mode_regs(fp)) {
370 switch (report_bug(fp->pc, fp)) {
371 case BUG_TRAP_TYPE_NONE:
372 break;
373 case BUG_TRAP_TYPE_WARN:
374 dump_bfin_trace_buffer();
375 fp->pc += 2;
376 goto traps_done;
377 case BUG_TRAP_TYPE_BUG:
378 /* call to panic() will dump trace, and it is
379 * off at this point, so it won't be clobbered
381 panic("BUG()");
384 #endif
385 info.si_code = ILL_ILLOPC;
386 sig = SIGILL;
387 strerror = KERN_NOTICE EXC_0x21(KERN_NOTICE);
388 CHK_DEBUGGER_TRAP_MAYBE();
389 break;
390 /* 0x22 - Illegal Instruction Combination, handled here */
391 case VEC_ILGAL_I:
392 info.si_code = ILL_ILLPARAOP;
393 sig = SIGILL;
394 strerror = KERN_NOTICE EXC_0x22(KERN_NOTICE);
395 CHK_DEBUGGER_TRAP_MAYBE();
396 break;
397 /* 0x23 - Data CPLB protection violation, handled here */
398 case VEC_CPLB_VL:
399 info.si_code = ILL_CPLB_VI;
400 sig = SIGSEGV;
401 strerror = KERN_NOTICE EXC_0x23(KERN_NOTICE);
402 CHK_DEBUGGER_TRAP_MAYBE();
403 break;
404 /* 0x24 - Data access misaligned, handled here */
405 case VEC_MISALI_D:
406 info.si_code = BUS_ADRALN;
407 sig = SIGBUS;
408 strerror = KERN_NOTICE EXC_0x24(KERN_NOTICE);
409 CHK_DEBUGGER_TRAP_MAYBE();
410 break;
411 /* 0x25 - Unrecoverable Event, handled here */
412 case VEC_UNCOV:
413 info.si_code = ILL_ILLEXCPT;
414 sig = SIGILL;
415 strerror = KERN_NOTICE EXC_0x25(KERN_NOTICE);
416 CHK_DEBUGGER_TRAP_MAYBE();
417 break;
418 /* 0x26 - Data CPLB Miss, normal case is handled in _cplb_hdr,
419 error case is handled here */
420 case VEC_CPLB_M:
421 info.si_code = BUS_ADRALN;
422 sig = SIGBUS;
423 strerror = KERN_NOTICE EXC_0x26(KERN_NOTICE);
424 break;
425 /* 0x27 - Data CPLB Multiple Hits - Linux Trap Zero, handled here */
426 case VEC_CPLB_MHIT:
427 info.si_code = ILL_CPLB_MULHIT;
428 sig = SIGSEGV;
429 #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
430 if (cpu_pda[cpu].dcplb_fault_addr < FIXED_CODE_START)
431 strerror = KERN_NOTICE "NULL pointer access\n";
432 else
433 #endif
434 strerror = KERN_NOTICE EXC_0x27(KERN_NOTICE);
435 CHK_DEBUGGER_TRAP_MAYBE();
436 break;
437 /* 0x28 - Emulation Watchpoint, handled here */
438 case VEC_WATCH:
439 info.si_code = TRAP_WATCHPT;
440 sig = SIGTRAP;
441 pr_debug(EXC_0x28(KERN_DEBUG));
442 CHK_DEBUGGER_TRAP_MAYBE();
443 /* Check if this is a watchpoint in kernel space */
444 if (kernel_mode_regs(fp))
445 goto traps_done;
446 else
447 break;
448 #ifdef CONFIG_BF535
449 /* 0x29 - Instruction fetch access error (535 only) */
450 case VEC_ISTRU_VL: /* ADSP-BF535 only (MH) */
451 info.si_code = BUS_OPFETCH;
452 sig = SIGBUS;
453 strerror = KERN_NOTICE "BF535: VEC_ISTRU_VL\n";
454 CHK_DEBUGGER_TRAP_MAYBE();
455 break;
456 #else
457 /* 0x29 - Reserved, Caught by default */
458 #endif
459 /* 0x2A - Instruction fetch misaligned, handled here */
460 case VEC_MISALI_I:
461 info.si_code = BUS_ADRALN;
462 sig = SIGBUS;
463 strerror = KERN_NOTICE EXC_0x2A(KERN_NOTICE);
464 CHK_DEBUGGER_TRAP_MAYBE();
465 break;
466 /* 0x2B - Instruction CPLB protection violation, handled here */
467 case VEC_CPLB_I_VL:
468 info.si_code = ILL_CPLB_VI;
469 sig = SIGBUS;
470 strerror = KERN_NOTICE EXC_0x2B(KERN_NOTICE);
471 CHK_DEBUGGER_TRAP_MAYBE();
472 break;
473 /* 0x2C - Instruction CPLB miss, handled in _cplb_hdr */
474 case VEC_CPLB_I_M:
475 info.si_code = ILL_CPLB_MISS;
476 sig = SIGBUS;
477 strerror = KERN_NOTICE EXC_0x2C(KERN_NOTICE);
478 break;
479 /* 0x2D - Instruction CPLB Multiple Hits, handled here */
480 case VEC_CPLB_I_MHIT:
481 info.si_code = ILL_CPLB_MULHIT;
482 sig = SIGSEGV;
483 #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
484 if (cpu_pda[cpu].icplb_fault_addr < FIXED_CODE_START)
485 strerror = KERN_NOTICE "Jump to NULL address\n";
486 else
487 #endif
488 strerror = KERN_NOTICE EXC_0x2D(KERN_NOTICE);
489 CHK_DEBUGGER_TRAP_MAYBE();
490 break;
491 /* 0x2E - Illegal use of Supervisor Resource, handled here */
492 case VEC_ILL_RES:
493 info.si_code = ILL_PRVOPC;
494 sig = SIGILL;
495 strerror = KERN_NOTICE EXC_0x2E(KERN_NOTICE);
496 CHK_DEBUGGER_TRAP_MAYBE();
497 break;
498 /* 0x2F - Reserved, Caught by default */
499 /* 0x30 - Reserved, Caught by default */
500 /* 0x31 - Reserved, Caught by default */
501 /* 0x32 - Reserved, Caught by default */
502 /* 0x33 - Reserved, Caught by default */
503 /* 0x34 - Reserved, Caught by default */
504 /* 0x35 - Reserved, Caught by default */
505 /* 0x36 - Reserved, Caught by default */
506 /* 0x37 - Reserved, Caught by default */
507 /* 0x38 - Reserved, Caught by default */
508 /* 0x39 - Reserved, Caught by default */
509 /* 0x3A - Reserved, Caught by default */
510 /* 0x3B - Reserved, Caught by default */
511 /* 0x3C - Reserved, Caught by default */
512 /* 0x3D - Reserved, Caught by default */
513 /* 0x3E - Reserved, Caught by default */
514 /* 0x3F - Reserved, Caught by default */
515 case VEC_HWERR:
516 info.si_code = BUS_ADRALN;
517 sig = SIGBUS;
518 switch (fp->seqstat & SEQSTAT_HWERRCAUSE) {
519 /* System MMR Error */
520 case (SEQSTAT_HWERRCAUSE_SYSTEM_MMR):
521 info.si_code = BUS_ADRALN;
522 sig = SIGBUS;
523 strerror = KERN_NOTICE HWC_x2(KERN_NOTICE);
524 break;
525 /* External Memory Addressing Error */
526 case (SEQSTAT_HWERRCAUSE_EXTERN_ADDR):
527 if (ANOMALY_05000310) {
528 static unsigned long anomaly_rets;
530 if ((fp->pc >= (L1_CODE_START + L1_CODE_LENGTH - 512)) &&
531 (fp->pc < (L1_CODE_START + L1_CODE_LENGTH))) {
533 * A false hardware error will happen while fetching at
534 * the L1 instruction SRAM boundary. Ignore it.
536 anomaly_rets = fp->rets;
537 goto traps_done;
538 } else if (fp->rets == anomaly_rets) {
540 * While boundary code returns to a function, at the ret
541 * point, a new false hardware error might occur too based
542 * on tests. Ignore it too.
544 goto traps_done;
545 } else if ((fp->rets >= (L1_CODE_START + L1_CODE_LENGTH - 512)) &&
546 (fp->rets < (L1_CODE_START + L1_CODE_LENGTH))) {
548 * If boundary code calls a function, at the entry point,
549 * a new false hardware error maybe happen based on tests.
550 * Ignore it too.
552 goto traps_done;
553 } else
554 anomaly_rets = 0;
557 info.si_code = BUS_ADRERR;
558 sig = SIGBUS;
559 strerror = KERN_NOTICE HWC_x3(KERN_NOTICE);
560 break;
561 /* Performance Monitor Overflow */
562 case (SEQSTAT_HWERRCAUSE_PERF_FLOW):
563 strerror = KERN_NOTICE HWC_x12(KERN_NOTICE);
564 break;
565 /* RAISE 5 instruction */
566 case (SEQSTAT_HWERRCAUSE_RAISE_5):
567 printk(KERN_NOTICE HWC_x18(KERN_NOTICE));
568 break;
569 default: /* Reserved */
570 printk(KERN_NOTICE HWC_default(KERN_NOTICE));
571 break;
573 CHK_DEBUGGER_TRAP_MAYBE();
574 break;
576 * We should be handling all known exception types above,
577 * if we get here we hit a reserved one, so panic
579 default:
580 info.si_code = ILL_ILLPARAOP;
581 sig = SIGILL;
582 verbose_printk(KERN_EMERG "Caught Unhandled Exception, code = %08lx\n",
583 (fp->seqstat & SEQSTAT_EXCAUSE));
584 CHK_DEBUGGER_TRAP_MAYBE();
585 break;
588 BUG_ON(sig == 0);
590 /* If the fault was caused by a kernel thread, or interrupt handler
591 * we will kernel panic, so the system reboots.
593 if (kernel_mode_regs(fp) || (current && !current->mm)) {
594 console_verbose();
595 oops_in_progress = 1;
598 if (sig != SIGTRAP) {
599 if (strerror)
600 verbose_printk(strerror);
602 dump_bfin_process(fp);
603 dump_bfin_mem(fp);
604 show_regs(fp);
606 /* Print out the trace buffer if it makes sense */
607 #ifndef CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE
608 if (trapnr == VEC_CPLB_I_M || trapnr == VEC_CPLB_M)
609 verbose_printk(KERN_NOTICE "No trace since you do not have "
610 "CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE enabled\n\n");
611 else
612 #endif
613 dump_bfin_trace_buffer();
615 if (oops_in_progress) {
616 /* Dump the current kernel stack */
617 verbose_printk(KERN_NOTICE "Kernel Stack\n");
618 show_stack(current, NULL);
619 print_modules();
620 #ifndef CONFIG_ACCESS_CHECK
621 verbose_printk(KERN_EMERG "Please turn on "
622 "CONFIG_ACCESS_CHECK\n");
623 #endif
624 panic("Kernel exception");
625 } else {
626 #ifdef CONFIG_DEBUG_VERBOSE
627 unsigned long *stack;
628 /* Dump the user space stack */
629 stack = (unsigned long *)rdusp();
630 verbose_printk(KERN_NOTICE "Userspace Stack\n");
631 show_stack(NULL, stack);
632 #endif
636 #ifdef CONFIG_IPIPE
637 if (!ipipe_trap_notify(fp->seqstat & 0x3f, fp))
638 #endif
640 info.si_signo = sig;
641 info.si_errno = 0;
642 info.si_addr = (void __user *)fp->pc;
643 force_sig_info(sig, &info, current);
646 if ((ANOMALY_05000461 && trapnr == VEC_HWERR && !access_ok(VERIFY_READ, fp->pc, 8)) ||
647 (ANOMALY_05000281 && trapnr == VEC_HWERR) ||
648 (ANOMALY_05000189 && (trapnr == VEC_CPLB_I_VL || trapnr == VEC_CPLB_VL)))
649 fp->pc = SAFE_USER_INSTRUCTION;
651 traps_done:
652 trace_buffer_restore(j);
655 /* Typical exception handling routines */
657 #define EXPAND_LEN ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 256 - 1)
660 * Similar to get_user, do some address checking, then dereference
661 * Return true on success, false on bad address
663 static bool get_instruction(unsigned short *val, unsigned short *address)
665 unsigned long addr = (unsigned long)address;
667 /* Check for odd addresses */
668 if (addr & 0x1)
669 return false;
671 /* MMR region will never have instructions */
672 if (addr >= SYSMMR_BASE)
673 return false;
675 switch (bfin_mem_access_type(addr, 2)) {
676 case BFIN_MEM_ACCESS_CORE:
677 case BFIN_MEM_ACCESS_CORE_ONLY:
678 *val = *address;
679 return true;
680 case BFIN_MEM_ACCESS_DMA:
681 dma_memcpy(val, address, 2);
682 return true;
683 case BFIN_MEM_ACCESS_ITEST:
684 isram_memcpy(val, address, 2);
685 return true;
686 default: /* invalid access */
687 return false;
692 * decode the instruction if we are printing out the trace, as it
693 * makes things easier to follow, without running it through objdump
694 * These are the normal instructions which cause change of flow, which
695 * would be at the source of the trace buffer
697 #if defined(CONFIG_DEBUG_VERBOSE) && defined(CONFIG_DEBUG_BFIN_HWTRACE_ON)
698 static void decode_instruction(unsigned short *address)
700 unsigned short opcode;
702 if (get_instruction(&opcode, address)) {
703 if (opcode == 0x0010)
704 verbose_printk("RTS");
705 else if (opcode == 0x0011)
706 verbose_printk("RTI");
707 else if (opcode == 0x0012)
708 verbose_printk("RTX");
709 else if (opcode == 0x0013)
710 verbose_printk("RTN");
711 else if (opcode == 0x0014)
712 verbose_printk("RTE");
713 else if (opcode == 0x0025)
714 verbose_printk("EMUEXCPT");
715 else if (opcode == 0x0040 && opcode <= 0x0047)
716 verbose_printk("STI R%i", opcode & 7);
717 else if (opcode >= 0x0050 && opcode <= 0x0057)
718 verbose_printk("JUMP (P%i)", opcode & 7);
719 else if (opcode >= 0x0060 && opcode <= 0x0067)
720 verbose_printk("CALL (P%i)", opcode & 7);
721 else if (opcode >= 0x0070 && opcode <= 0x0077)
722 verbose_printk("CALL (PC+P%i)", opcode & 7);
723 else if (opcode >= 0x0080 && opcode <= 0x0087)
724 verbose_printk("JUMP (PC+P%i)", opcode & 7);
725 else if (opcode >= 0x0090 && opcode <= 0x009F)
726 verbose_printk("RAISE 0x%x", opcode & 0xF);
727 else if (opcode >= 0x00A0 && opcode <= 0x00AF)
728 verbose_printk("EXCPT 0x%x", opcode & 0xF);
729 else if ((opcode >= 0x1000 && opcode <= 0x13FF) || (opcode >= 0x1800 && opcode <= 0x1BFF))
730 verbose_printk("IF !CC JUMP");
731 else if ((opcode >= 0x1400 && opcode <= 0x17ff) || (opcode >= 0x1c00 && opcode <= 0x1fff))
732 verbose_printk("IF CC JUMP");
733 else if (opcode >= 0x2000 && opcode <= 0x2fff)
734 verbose_printk("JUMP.S");
735 else if (opcode >= 0xe080 && opcode <= 0xe0ff)
736 verbose_printk("LSETUP");
737 else if (opcode >= 0xe200 && opcode <= 0xe2ff)
738 verbose_printk("JUMP.L");
739 else if (opcode >= 0xe300 && opcode <= 0xe3ff)
740 verbose_printk("CALL pcrel");
741 else
742 verbose_printk("0x%04x", opcode);
746 #endif
748 void dump_bfin_trace_buffer(void)
750 #ifdef CONFIG_DEBUG_VERBOSE
751 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
752 int tflags, i = 0;
753 char buf[150];
754 unsigned short *addr;
755 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
756 int j, index;
757 #endif
759 trace_buffer_save(tflags);
761 printk(KERN_NOTICE "Hardware Trace:\n");
763 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
764 printk(KERN_NOTICE "WARNING: Expanded trace turned on - can not trace exceptions\n");
765 #endif
767 if (likely(bfin_read_TBUFSTAT() & TBUFCNT)) {
768 for (; bfin_read_TBUFSTAT() & TBUFCNT; i++) {
769 decode_address(buf, (unsigned long)bfin_read_TBUF());
770 printk(KERN_NOTICE "%4i Target : %s\n", i, buf);
771 addr = (unsigned short *)bfin_read_TBUF();
772 decode_address(buf, (unsigned long)addr);
773 printk(KERN_NOTICE " Source : %s ", buf);
774 decode_instruction(addr);
775 printk("\n");
779 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
780 if (trace_buff_offset)
781 index = trace_buff_offset / 4;
782 else
783 index = EXPAND_LEN;
785 j = (1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 128;
786 while (j) {
787 decode_address(buf, software_trace_buff[index]);
788 printk(KERN_NOTICE "%4i Target : %s\n", i, buf);
789 index -= 1;
790 if (index < 0 )
791 index = EXPAND_LEN;
792 decode_address(buf, software_trace_buff[index]);
793 printk(KERN_NOTICE " Source : %s ", buf);
794 decode_instruction((unsigned short *)software_trace_buff[index]);
795 printk("\n");
796 index -= 1;
797 if (index < 0)
798 index = EXPAND_LEN;
799 j--;
800 i++;
802 #endif
804 trace_buffer_restore(tflags);
805 #endif
806 #endif
808 EXPORT_SYMBOL(dump_bfin_trace_buffer);
810 #ifdef CONFIG_BUG
811 int is_valid_bugaddr(unsigned long addr)
813 unsigned short opcode;
815 if (!get_instruction(&opcode, (unsigned short *)addr))
816 return 0;
818 return opcode == BFIN_BUG_OPCODE;
820 #endif
823 * Checks to see if the address pointed to is either a
824 * 16-bit CALL instruction, or a 32-bit CALL instruction
826 static bool is_bfin_call(unsigned short *addr)
828 unsigned short opcode = 0, *ins_addr;
829 ins_addr = (unsigned short *)addr;
831 if (!get_instruction(&opcode, ins_addr))
832 return false;
834 if ((opcode >= 0x0060 && opcode <= 0x0067) ||
835 (opcode >= 0x0070 && opcode <= 0x0077))
836 return true;
838 ins_addr--;
839 if (!get_instruction(&opcode, ins_addr))
840 return false;
842 if (opcode >= 0xE300 && opcode <= 0xE3FF)
843 return true;
845 return false;
849 void show_stack(struct task_struct *task, unsigned long *stack)
851 #ifdef CONFIG_PRINTK
852 unsigned int *addr, *endstack, *fp = 0, *frame;
853 unsigned short *ins_addr;
854 char buf[150];
855 unsigned int i, j, ret_addr, frame_no = 0;
858 * If we have been passed a specific stack, use that one otherwise
859 * if we have been passed a task structure, use that, otherwise
860 * use the stack of where the variable "stack" exists
863 if (stack == NULL) {
864 if (task) {
865 /* We know this is a kernel stack, so this is the start/end */
866 stack = (unsigned long *)task->thread.ksp;
867 endstack = (unsigned int *)(((unsigned int)(stack) & ~(THREAD_SIZE - 1)) + THREAD_SIZE);
868 } else {
869 /* print out the existing stack info */
870 stack = (unsigned long *)&stack;
871 endstack = (unsigned int *)PAGE_ALIGN((unsigned int)stack);
873 } else
874 endstack = (unsigned int *)PAGE_ALIGN((unsigned int)stack);
876 printk(KERN_NOTICE "Stack info:\n");
877 decode_address(buf, (unsigned int)stack);
878 printk(KERN_NOTICE " SP: [0x%p] %s\n", stack, buf);
880 if (!access_ok(VERIFY_READ, stack, (unsigned int)endstack - (unsigned int)stack)) {
881 printk(KERN_NOTICE "Invalid stack pointer\n");
882 return;
885 /* First thing is to look for a frame pointer */
886 for (addr = (unsigned int *)((unsigned int)stack & ~0xF); addr < endstack; addr++) {
887 if (*addr & 0x1)
888 continue;
889 ins_addr = (unsigned short *)*addr;
890 ins_addr--;
891 if (is_bfin_call(ins_addr))
892 fp = addr - 1;
894 if (fp) {
895 /* Let's check to see if it is a frame pointer */
896 while (fp >= (addr - 1) && fp < endstack
897 && fp && ((unsigned int) fp & 0x3) == 0)
898 fp = (unsigned int *)*fp;
899 if (fp == 0 || fp == endstack) {
900 fp = addr - 1;
901 break;
903 fp = 0;
906 if (fp) {
907 frame = fp;
908 printk(KERN_NOTICE " FP: (0x%p)\n", fp);
909 } else
910 frame = 0;
913 * Now that we think we know where things are, we
914 * walk the stack again, this time printing things out
915 * incase there is no frame pointer, we still look for
916 * valid return addresses
919 /* First time print out data, next time, print out symbols */
920 for (j = 0; j <= 1; j++) {
921 if (j)
922 printk(KERN_NOTICE "Return addresses in stack:\n");
923 else
924 printk(KERN_NOTICE " Memory from 0x%08lx to %p", ((long unsigned int)stack & ~0xF), endstack);
926 fp = frame;
927 frame_no = 0;
929 for (addr = (unsigned int *)((unsigned int)stack & ~0xF), i = 0;
930 addr < endstack; addr++, i++) {
932 ret_addr = 0;
933 if (!j && i % 8 == 0)
934 printk(KERN_NOTICE "%p:",addr);
936 /* if it is an odd address, or zero, just skip it */
937 if (*addr & 0x1 || !*addr)
938 goto print;
940 ins_addr = (unsigned short *)*addr;
942 /* Go back one instruction, and see if it is a CALL */
943 ins_addr--;
944 ret_addr = is_bfin_call(ins_addr);
945 print:
946 if (!j && stack == (unsigned long *)addr)
947 printk("[%08x]", *addr);
948 else if (ret_addr)
949 if (j) {
950 decode_address(buf, (unsigned int)*addr);
951 if (frame == addr) {
952 printk(KERN_NOTICE " frame %2i : %s\n", frame_no, buf);
953 continue;
955 printk(KERN_NOTICE " address : %s\n", buf);
956 } else
957 printk("<%08x>", *addr);
958 else if (fp == addr) {
959 if (j)
960 frame = addr+1;
961 else
962 printk("(%08x)", *addr);
964 fp = (unsigned int *)*addr;
965 frame_no++;
967 } else if (!j)
968 printk(" %08x ", *addr);
970 if (!j)
971 printk("\n");
973 #endif
975 EXPORT_SYMBOL(show_stack);
977 void dump_stack(void)
979 unsigned long stack;
980 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
981 int tflags;
982 #endif
983 trace_buffer_save(tflags);
984 dump_bfin_trace_buffer();
985 show_stack(current, &stack);
986 trace_buffer_restore(tflags);
988 EXPORT_SYMBOL(dump_stack);
990 void dump_bfin_process(struct pt_regs *fp)
992 #ifdef CONFIG_DEBUG_VERBOSE
993 /* We should be able to look at fp->ipend, but we don't push it on the
994 * stack all the time, so do this until we fix that */
995 unsigned int context = bfin_read_IPEND();
997 if (oops_in_progress)
998 verbose_printk(KERN_EMERG "Kernel OOPS in progress\n");
1000 if (context & 0x0020 && (fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR)
1001 verbose_printk(KERN_NOTICE "HW Error context\n");
1002 else if (context & 0x0020)
1003 verbose_printk(KERN_NOTICE "Deferred Exception context\n");
1004 else if (context & 0x3FC0)
1005 verbose_printk(KERN_NOTICE "Interrupt context\n");
1006 else if (context & 0x4000)
1007 verbose_printk(KERN_NOTICE "Deferred Interrupt context\n");
1008 else if (context & 0x8000)
1009 verbose_printk(KERN_NOTICE "Kernel process context\n");
1011 /* Because we are crashing, and pointers could be bad, we check things
1012 * pretty closely before we use them
1014 if ((unsigned long)current >= FIXED_CODE_START &&
1015 !((unsigned long)current & 0x3) && current->pid) {
1016 verbose_printk(KERN_NOTICE "CURRENT PROCESS:\n");
1017 if (current->comm >= (char *)FIXED_CODE_START)
1018 verbose_printk(KERN_NOTICE "COMM=%s PID=%d",
1019 current->comm, current->pid);
1020 else
1021 verbose_printk(KERN_NOTICE "COMM= invalid");
1023 printk(KERN_CONT " CPU=%d\n", current_thread_info()->cpu);
1024 if (!((unsigned long)current->mm & 0x3) && (unsigned long)current->mm >= FIXED_CODE_START)
1025 verbose_printk(KERN_NOTICE
1026 "TEXT = 0x%p-0x%p DATA = 0x%p-0x%p\n"
1027 " BSS = 0x%p-0x%p USER-STACK = 0x%p\n\n",
1028 (void *)current->mm->start_code,
1029 (void *)current->mm->end_code,
1030 (void *)current->mm->start_data,
1031 (void *)current->mm->end_data,
1032 (void *)current->mm->end_data,
1033 (void *)current->mm->brk,
1034 (void *)current->mm->start_stack);
1035 else
1036 verbose_printk(KERN_NOTICE "invalid mm\n");
1037 } else
1038 verbose_printk(KERN_NOTICE
1039 "No Valid process in current context\n");
1040 #endif
1043 void dump_bfin_mem(struct pt_regs *fp)
1045 #ifdef CONFIG_DEBUG_VERBOSE
1046 unsigned short *addr, *erraddr, val = 0, err = 0;
1047 char sti = 0, buf[6];
1049 erraddr = (void *)fp->pc;
1051 verbose_printk(KERN_NOTICE "return address: [0x%p]; contents of:", erraddr);
1053 for (addr = (unsigned short *)((unsigned long)erraddr & ~0xF) - 0x10;
1054 addr < (unsigned short *)((unsigned long)erraddr & ~0xF) + 0x10;
1055 addr++) {
1056 if (!((unsigned long)addr & 0xF))
1057 verbose_printk(KERN_NOTICE "0x%p: ", addr);
1059 if (!get_instruction(&val, addr)) {
1060 val = 0;
1061 sprintf(buf, "????");
1062 } else
1063 sprintf(buf, "%04x", val);
1065 if (addr == erraddr) {
1066 verbose_printk("[%s]", buf);
1067 err = val;
1068 } else
1069 verbose_printk(" %s ", buf);
1071 /* Do any previous instructions turn on interrupts? */
1072 if (addr <= erraddr && /* in the past */
1073 ((val >= 0x0040 && val <= 0x0047) || /* STI instruction */
1074 val == 0x017b)) /* [SP++] = RETI */
1075 sti = 1;
1078 verbose_printk("\n");
1080 /* Hardware error interrupts can be deferred */
1081 if (unlikely(sti && (fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR &&
1082 oops_in_progress)){
1083 verbose_printk(KERN_NOTICE "Looks like this was a deferred error - sorry\n");
1084 #ifndef CONFIG_DEBUG_HWERR
1085 verbose_printk(KERN_NOTICE
1086 "The remaining message may be meaningless\n"
1087 "You should enable CONFIG_DEBUG_HWERR to get a better idea where it came from\n");
1088 #else
1089 /* If we are handling only one peripheral interrupt
1090 * and current mm and pid are valid, and the last error
1091 * was in that user space process's text area
1092 * print it out - because that is where the problem exists
1094 if ((!(((fp)->ipend & ~0x30) & (((fp)->ipend & ~0x30) - 1))) &&
1095 (current->pid && current->mm)) {
1096 /* And the last RETI points to the current userspace context */
1097 if ((fp + 1)->pc >= current->mm->start_code &&
1098 (fp + 1)->pc <= current->mm->end_code) {
1099 verbose_printk(KERN_NOTICE "It might be better to look around here : \n");
1100 verbose_printk(KERN_NOTICE "-------------------------------------------\n");
1101 show_regs(fp + 1);
1102 verbose_printk(KERN_NOTICE "-------------------------------------------\n");
1105 #endif
1107 #endif
1110 void show_regs(struct pt_regs *fp)
1112 #ifdef CONFIG_DEBUG_VERBOSE
1113 char buf [150];
1114 struct irqaction *action;
1115 unsigned int i;
1116 unsigned long flags = 0;
1117 unsigned int cpu = raw_smp_processor_id();
1118 unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic();
1120 verbose_printk(KERN_NOTICE "\n");
1121 if (CPUID != bfin_cpuid())
1122 verbose_printk(KERN_NOTICE "Compiled for cpu family 0x%04x (Rev %d), "
1123 "but running on:0x%04x (Rev %d)\n",
1124 CPUID, bfin_compiled_revid(), bfin_cpuid(), bfin_revid());
1126 verbose_printk(KERN_NOTICE "ADSP-%s-0.%d",
1127 CPU, bfin_compiled_revid());
1129 if (bfin_compiled_revid() != bfin_revid())
1130 verbose_printk("(Detected 0.%d)", bfin_revid());
1132 verbose_printk(" %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n",
1133 get_cclk()/1000000, get_sclk()/1000000,
1134 #ifdef CONFIG_MPU
1135 "mpu on"
1136 #else
1137 "mpu off"
1138 #endif
1141 verbose_printk(KERN_NOTICE "%s", linux_banner);
1143 verbose_printk(KERN_NOTICE "\nSEQUENCER STATUS:\t\t%s\n", print_tainted());
1144 verbose_printk(KERN_NOTICE " SEQSTAT: %08lx IPEND: %04lx IMASK: %04lx SYSCFG: %04lx\n",
1145 (long)fp->seqstat, fp->ipend, cpu_pda[raw_smp_processor_id()].ex_imask, fp->syscfg);
1146 if (fp->ipend & EVT_IRPTEN)
1147 verbose_printk(KERN_NOTICE " Global Interrupts Disabled (IPEND[4])\n");
1148 if (!(cpu_pda[raw_smp_processor_id()].ex_imask & (EVT_IVG13 | EVT_IVG12 | EVT_IVG11 |
1149 EVT_IVG10 | EVT_IVG9 | EVT_IVG8 | EVT_IVG7 | EVT_IVTMR)))
1150 verbose_printk(KERN_NOTICE " Peripheral interrupts masked off\n");
1151 if (!(cpu_pda[raw_smp_processor_id()].ex_imask & (EVT_IVG15 | EVT_IVG14)))
1152 verbose_printk(KERN_NOTICE " Kernel interrupts masked off\n");
1153 if ((fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR) {
1154 verbose_printk(KERN_NOTICE " HWERRCAUSE: 0x%lx\n",
1155 (fp->seqstat & SEQSTAT_HWERRCAUSE) >> 14);
1156 #ifdef EBIU_ERRMST
1157 /* If the error was from the EBIU, print it out */
1158 if (bfin_read_EBIU_ERRMST() & CORE_ERROR) {
1159 verbose_printk(KERN_NOTICE " EBIU Error Reason : 0x%04x\n",
1160 bfin_read_EBIU_ERRMST());
1161 verbose_printk(KERN_NOTICE " EBIU Error Address : 0x%08x\n",
1162 bfin_read_EBIU_ERRADD());
1164 #endif
1166 verbose_printk(KERN_NOTICE " EXCAUSE : 0x%lx\n",
1167 fp->seqstat & SEQSTAT_EXCAUSE);
1168 for (i = 2; i <= 15 ; i++) {
1169 if (fp->ipend & (1 << i)) {
1170 if (i != 4) {
1171 decode_address(buf, bfin_read32(EVT0 + 4*i));
1172 verbose_printk(KERN_NOTICE " physical IVG%i asserted : %s\n", i, buf);
1173 } else
1174 verbose_printk(KERN_NOTICE " interrupts disabled\n");
1178 /* if no interrupts are going off, don't print this out */
1179 if (fp->ipend & ~0x3F) {
1180 for (i = 0; i < (NR_IRQS - 1); i++) {
1181 if (!in_atomic)
1182 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
1184 action = irq_desc[i].action;
1185 if (!action)
1186 goto unlock;
1188 decode_address(buf, (unsigned int)action->handler);
1189 verbose_printk(KERN_NOTICE " logical irq %3d mapped : %s", i, buf);
1190 for (action = action->next; action; action = action->next) {
1191 decode_address(buf, (unsigned int)action->handler);
1192 verbose_printk(", %s", buf);
1194 verbose_printk("\n");
1195 unlock:
1196 if (!in_atomic)
1197 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
1201 decode_address(buf, fp->rete);
1202 verbose_printk(KERN_NOTICE " RETE: %s\n", buf);
1203 decode_address(buf, fp->retn);
1204 verbose_printk(KERN_NOTICE " RETN: %s\n", buf);
1205 decode_address(buf, fp->retx);
1206 verbose_printk(KERN_NOTICE " RETX: %s\n", buf);
1207 decode_address(buf, fp->rets);
1208 verbose_printk(KERN_NOTICE " RETS: %s\n", buf);
1209 decode_address(buf, fp->pc);
1210 verbose_printk(KERN_NOTICE " PC : %s\n", buf);
1212 if (((long)fp->seqstat & SEQSTAT_EXCAUSE) &&
1213 (((long)fp->seqstat & SEQSTAT_EXCAUSE) != VEC_HWERR)) {
1214 decode_address(buf, cpu_pda[cpu].dcplb_fault_addr);
1215 verbose_printk(KERN_NOTICE "DCPLB_FAULT_ADDR: %s\n", buf);
1216 decode_address(buf, cpu_pda[cpu].icplb_fault_addr);
1217 verbose_printk(KERN_NOTICE "ICPLB_FAULT_ADDR: %s\n", buf);
1220 verbose_printk(KERN_NOTICE "PROCESSOR STATE:\n");
1221 verbose_printk(KERN_NOTICE " R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n",
1222 fp->r0, fp->r1, fp->r2, fp->r3);
1223 verbose_printk(KERN_NOTICE " R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n",
1224 fp->r4, fp->r5, fp->r6, fp->r7);
1225 verbose_printk(KERN_NOTICE " P0 : %08lx P1 : %08lx P2 : %08lx P3 : %08lx\n",
1226 fp->p0, fp->p1, fp->p2, fp->p3);
1227 verbose_printk(KERN_NOTICE " P4 : %08lx P5 : %08lx FP : %08lx SP : %08lx\n",
1228 fp->p4, fp->p5, fp->fp, (long)fp);
1229 verbose_printk(KERN_NOTICE " LB0: %08lx LT0: %08lx LC0: %08lx\n",
1230 fp->lb0, fp->lt0, fp->lc0);
1231 verbose_printk(KERN_NOTICE " LB1: %08lx LT1: %08lx LC1: %08lx\n",
1232 fp->lb1, fp->lt1, fp->lc1);
1233 verbose_printk(KERN_NOTICE " B0 : %08lx L0 : %08lx M0 : %08lx I0 : %08lx\n",
1234 fp->b0, fp->l0, fp->m0, fp->i0);
1235 verbose_printk(KERN_NOTICE " B1 : %08lx L1 : %08lx M1 : %08lx I1 : %08lx\n",
1236 fp->b1, fp->l1, fp->m1, fp->i1);
1237 verbose_printk(KERN_NOTICE " B2 : %08lx L2 : %08lx M2 : %08lx I2 : %08lx\n",
1238 fp->b2, fp->l2, fp->m2, fp->i2);
1239 verbose_printk(KERN_NOTICE " B3 : %08lx L3 : %08lx M3 : %08lx I3 : %08lx\n",
1240 fp->b3, fp->l3, fp->m3, fp->i3);
1241 verbose_printk(KERN_NOTICE "A0.w: %08lx A0.x: %08lx A1.w: %08lx A1.x: %08lx\n",
1242 fp->a0w, fp->a0x, fp->a1w, fp->a1x);
1244 verbose_printk(KERN_NOTICE "USP : %08lx ASTAT: %08lx\n",
1245 rdusp(), fp->astat);
1247 verbose_printk(KERN_NOTICE "\n");
1248 #endif
1251 #ifdef CONFIG_SYS_BFIN_SPINLOCK_L1
1252 asmlinkage int sys_bfin_spinlock(int *spinlock)__attribute__((l1_text));
1253 #endif
1255 static DEFINE_SPINLOCK(bfin_spinlock_lock);
1257 asmlinkage int sys_bfin_spinlock(int *p)
1259 int ret, tmp = 0;
1261 spin_lock(&bfin_spinlock_lock); /* This would also hold kernel preemption. */
1262 ret = get_user(tmp, p);
1263 if (likely(ret == 0)) {
1264 if (unlikely(tmp))
1265 ret = 1;
1266 else
1267 put_user(1, p);
1269 spin_unlock(&bfin_spinlock_lock);
1270 return ret;
1273 int bfin_request_exception(unsigned int exception, void (*handler)(void))
1275 void (*curr_handler)(void);
1277 if (exception > 0x3F)
1278 return -EINVAL;
1280 curr_handler = ex_table[exception];
1282 if (curr_handler != ex_replaceable)
1283 return -EBUSY;
1285 ex_table[exception] = handler;
1287 return 0;
1289 EXPORT_SYMBOL(bfin_request_exception);
1291 int bfin_free_exception(unsigned int exception, void (*handler)(void))
1293 void (*curr_handler)(void);
1295 if (exception > 0x3F)
1296 return -EINVAL;
1298 curr_handler = ex_table[exception];
1300 if (curr_handler != handler)
1301 return -EBUSY;
1303 ex_table[exception] = ex_replaceable;
1305 return 0;
1307 EXPORT_SYMBOL(bfin_free_exception);
1309 void panic_cplb_error(int cplb_panic, struct pt_regs *fp)
1311 switch (cplb_panic) {
1312 case CPLB_NO_UNLOCKED:
1313 printk(KERN_EMERG "All CPLBs are locked\n");
1314 break;
1315 case CPLB_PROT_VIOL:
1316 return;
1317 case CPLB_NO_ADDR_MATCH:
1318 return;
1319 case CPLB_UNKNOWN_ERR:
1320 printk(KERN_EMERG "Unknown CPLB Exception\n");
1321 break;
1324 oops_in_progress = 1;
1326 dump_bfin_process(fp);
1327 dump_bfin_mem(fp);
1328 show_regs(fp);
1329 dump_stack();
1330 panic("Unrecoverable event");