2 * Copyright 2004-2009 Analog Devices Inc.
4 * Licensed under the GPL-2 or later
8 #include <linux/uaccess.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/kallsyms.h>
13 #include <linux/rbtree.h>
14 #include <asm/traps.h>
15 #include <asm/cacheflush.h>
18 #include <asm/blackfin.h>
19 #include <asm/irq_handler.h>
20 #include <linux/irq.h>
21 #include <asm/trace.h>
22 #include <asm/fixed_code.h>
25 # include <linux/kgdb.h>
27 # define CHK_DEBUGGER_TRAP() \
29 kgdb_handle_exception(trapnr, sig, info.si_code, fp); \
31 # define CHK_DEBUGGER_TRAP_MAYBE() \
34 CHK_DEBUGGER_TRAP(); \
37 # define CHK_DEBUGGER_TRAP() do { } while (0)
38 # define CHK_DEBUGGER_TRAP_MAYBE() do { } while (0)
42 #ifdef CONFIG_DEBUG_VERBOSE
43 #define verbose_printk(fmt, arg...) \
46 #define verbose_printk(fmt, arg...) \
47 ({ if (0) printk(fmt, ##arg); 0; })
50 #if defined(CONFIG_DEBUG_MMRS) || defined(CONFIG_DEBUG_MMRS_MODULE)
52 #ifdef CONFIG_DEBUG_MMRS_MODULE
53 EXPORT_SYMBOL(last_seqstat
);
57 /* Initiate the event table handler */
58 void __init
trap_init(void)
61 bfin_write_EVT3(trap
);
65 static void decode_address(char *buf
, unsigned long address
)
67 #ifdef CONFIG_DEBUG_VERBOSE
68 struct task_struct
*p
;
70 unsigned long flags
, offset
;
71 unsigned char in_atomic
= (bfin_read_IPEND() & 0x10) || in_atomic();
74 #ifdef CONFIG_KALLSYMS
75 unsigned long symsize
;
82 buf
+= sprintf(buf
, "<0x%08lx> ", address
);
84 #ifdef CONFIG_KALLSYMS
85 /* look up the address and see if we are in kernel space */
86 symname
= kallsyms_lookup(address
, &symsize
, &offset
, &modname
, namebuf
);
89 /* yeah! kernel space! */
92 sprintf(buf
, "{ %s%s%s%s + 0x%lx }",
93 delim
, modname
, delim
, symname
,
94 (unsigned long)offset
);
99 if (address
>= FIXED_CODE_START
&& address
< FIXED_CODE_END
) {
100 /* Problem in fixed code section? */
101 strcat(buf
, "/* Maybe fixed code section */");
104 } else if (address
< CONFIG_BOOT_LOAD
) {
105 /* Problem somewhere before the kernel start address */
106 strcat(buf
, "/* Maybe null pointer? */");
109 } else if (address
>= COREMMR_BASE
) {
110 strcat(buf
, "/* core mmrs */");
113 } else if (address
>= SYSMMR_BASE
) {
114 strcat(buf
, "/* system mmrs */");
117 } else if (address
>= L1_ROM_START
&& address
< L1_ROM_START
+ L1_ROM_LENGTH
) {
118 strcat(buf
, "/* on-chip L1 ROM */");
123 * Don't walk any of the vmas if we are oopsing, it has been known
124 * to cause problems - corrupt vmas (kernel crashes) cause double faults
126 if (oops_in_progress
) {
127 strcat(buf
, "/* kernel dynamic memory (maybe user-space) */");
131 /* looks like we're off in user-land, so let's walk all the
132 * mappings of all our processes and see if we can't be a whee
135 write_lock_irqsave(&tasklist_lock
, flags
);
136 for_each_process(p
) {
137 mm
= (in_atomic
? p
->mm
: get_task_mm(p
));
141 for (n
= rb_first(&mm
->mm_rb
); n
; n
= rb_next(n
)) {
142 struct vm_area_struct
*vma
;
144 vma
= rb_entry(n
, struct vm_area_struct
, vm_rb
);
146 if (address
>= vma
->vm_start
&& address
< vma
->vm_end
) {
148 char *name
= p
->comm
;
149 struct file
*file
= vma
->vm_file
;
152 char *d_name
= d_path(&file
->f_path
, _tmpbuf
,
158 /* FLAT does not have its text aligned to the start of
159 * the map while FDPIC ELF does ...
162 /* before we can check flat/fdpic, we need to
163 * make sure current is valid
165 if ((unsigned long)current
>= FIXED_CODE_START
&&
166 !((unsigned long)current
& 0x3)) {
168 (address
> current
->mm
->start_code
) &&
169 (address
< current
->mm
->end_code
))
170 offset
= address
- current
->mm
->start_code
;
172 offset
= (address
- vma
->vm_start
) +
173 (vma
->vm_pgoff
<< PAGE_SHIFT
);
175 sprintf(buf
, "[ %s + 0x%lx ]", name
, offset
);
177 sprintf(buf
, "[ %s vma:0x%lx-0x%lx]",
178 name
, vma
->vm_start
, vma
->vm_end
);
184 sprintf(buf
, "[ %s ] dynamic memory", name
);
193 /* we were unable to find this address anywhere */
194 sprintf(buf
, "/* kernel dynamic memory */");
197 write_unlock_irqrestore(&tasklist_lock
, flags
);
203 asmlinkage
void double_fault_c(struct pt_regs
*fp
)
205 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
207 trace_buffer_save(j
);
211 oops_in_progress
= 1;
212 #ifdef CONFIG_DEBUG_VERBOSE
213 printk(KERN_EMERG
"Double Fault\n");
214 #ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT
215 if (((long)fp
->seqstat
& SEQSTAT_EXCAUSE
) == VEC_UNCOV
) {
216 unsigned int cpu
= raw_smp_processor_id();
218 decode_address(buf
, cpu_pda
[cpu
].retx_doublefault
);
219 printk(KERN_EMERG
"While handling exception (EXCAUSE = 0x%x) at %s:\n",
220 (unsigned int)cpu_pda
[cpu
].seqstat_doublefault
& SEQSTAT_EXCAUSE
, buf
);
221 decode_address(buf
, cpu_pda
[cpu
].dcplb_doublefault_addr
);
222 printk(KERN_NOTICE
" DCPLB_FAULT_ADDR: %s\n", buf
);
223 decode_address(buf
, cpu_pda
[cpu
].icplb_doublefault_addr
);
224 printk(KERN_NOTICE
" ICPLB_FAULT_ADDR: %s\n", buf
);
226 decode_address(buf
, fp
->retx
);
227 printk(KERN_NOTICE
"The instruction at %s caused a double exception\n", buf
);
231 dump_bfin_process(fp
);
234 dump_bfin_trace_buffer();
237 panic("Double Fault - unrecoverable event");
241 static int kernel_mode_regs(struct pt_regs
*regs
)
243 return regs
->ipend
& 0xffc0;
246 asmlinkage notrace
void trap_c(struct pt_regs
*fp
)
248 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
251 #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
252 unsigned int cpu
= raw_smp_processor_id();
254 const char *strerror
= NULL
;
257 unsigned long trapnr
= fp
->seqstat
& SEQSTAT_EXCAUSE
;
259 trace_buffer_save(j
);
260 #if defined(CONFIG_DEBUG_MMRS) || defined(CONFIG_DEBUG_MMRS_MODULE)
261 last_seqstat
= (u32
)fp
->seqstat
;
264 /* Important - be very careful dereferncing pointers - will lead to
265 * double faults if the stack has become corrupt
268 /* trap_c() will be called for exceptions. During exceptions
269 * processing, the pc value should be set with retx value.
270 * With this change we can cleanup some code in signal.c- TODO
272 fp
->orig_pc
= fp
->retx
;
273 /* printk("exception: 0x%x, ipend=%x, reti=%x, retx=%x\n",
274 trapnr, fp->ipend, fp->pc, fp->retx); */
276 /* send the appropriate signal to the user program */
279 /* This table works in conjuction with the one in ./mach-common/entry.S
280 * Some exceptions are handled there (in assembly, in exception space)
281 * Some are handled here, (in C, in interrupt space)
282 * Some, like CPLB, are handled in both, where the normal path is
283 * handled in assembly/exception space, and the error path is handled
287 /* 0x00 - Linux Syscall, getting here is an error */
288 /* 0x01 - userspace gdb breakpoint, handled here */
290 info
.si_code
= TRAP_ILLTRAP
;
292 CHK_DEBUGGER_TRAP_MAYBE();
293 /* Check if this is a breakpoint in kernel space */
294 if (kernel_mode_regs(fp
))
298 /* 0x03 - User Defined, userspace stack overflow */
300 info
.si_code
= SEGV_STACKFLOW
;
302 strerror
= KERN_NOTICE
EXC_0x03(KERN_NOTICE
);
303 CHK_DEBUGGER_TRAP_MAYBE();
305 /* 0x02 - KGDB initial connection and break signal trap */
308 info
.si_code
= TRAP_ILLTRAP
;
313 /* 0x04 - User Defined */
314 /* 0x05 - User Defined */
315 /* 0x06 - User Defined */
316 /* 0x07 - User Defined */
317 /* 0x08 - User Defined */
318 /* 0x09 - User Defined */
319 /* 0x0A - User Defined */
320 /* 0x0B - User Defined */
321 /* 0x0C - User Defined */
322 /* 0x0D - User Defined */
323 /* 0x0E - User Defined */
324 /* 0x0F - User Defined */
325 /* If we got here, it is most likely that someone was trying to use a
326 * custom exception handler, and it is not actually installed properly
328 case VEC_EXCPT04
... VEC_EXCPT15
:
329 info
.si_code
= ILL_ILLPARAOP
;
331 strerror
= KERN_NOTICE
EXC_0x04(KERN_NOTICE
);
332 CHK_DEBUGGER_TRAP_MAYBE();
334 /* 0x10 HW Single step, handled here */
336 info
.si_code
= TRAP_STEP
;
338 CHK_DEBUGGER_TRAP_MAYBE();
339 /* Check if this is a single step in kernel space */
340 if (kernel_mode_regs(fp
))
344 /* 0x11 - Trace Buffer Full, handled here */
346 info
.si_code
= TRAP_TRACEFLOW
;
348 strerror
= KERN_NOTICE
EXC_0x11(KERN_NOTICE
);
349 CHK_DEBUGGER_TRAP_MAYBE();
351 /* 0x12 - Reserved, Caught by default */
352 /* 0x13 - Reserved, Caught by default */
353 /* 0x14 - Reserved, Caught by default */
354 /* 0x15 - Reserved, Caught by default */
355 /* 0x16 - Reserved, Caught by default */
356 /* 0x17 - Reserved, Caught by default */
357 /* 0x18 - Reserved, Caught by default */
358 /* 0x19 - Reserved, Caught by default */
359 /* 0x1A - Reserved, Caught by default */
360 /* 0x1B - Reserved, Caught by default */
361 /* 0x1C - Reserved, Caught by default */
362 /* 0x1D - Reserved, Caught by default */
363 /* 0x1E - Reserved, Caught by default */
364 /* 0x1F - Reserved, Caught by default */
365 /* 0x20 - Reserved, Caught by default */
366 /* 0x21 - Undefined Instruction, handled here */
369 if (kernel_mode_regs(fp
)) {
370 switch (report_bug(fp
->pc
, fp
)) {
371 case BUG_TRAP_TYPE_NONE
:
373 case BUG_TRAP_TYPE_WARN
:
374 dump_bfin_trace_buffer();
377 case BUG_TRAP_TYPE_BUG
:
378 /* call to panic() will dump trace, and it is
379 * off at this point, so it won't be clobbered
385 info
.si_code
= ILL_ILLOPC
;
387 strerror
= KERN_NOTICE
EXC_0x21(KERN_NOTICE
);
388 CHK_DEBUGGER_TRAP_MAYBE();
390 /* 0x22 - Illegal Instruction Combination, handled here */
392 info
.si_code
= ILL_ILLPARAOP
;
394 strerror
= KERN_NOTICE
EXC_0x22(KERN_NOTICE
);
395 CHK_DEBUGGER_TRAP_MAYBE();
397 /* 0x23 - Data CPLB protection violation, handled here */
399 info
.si_code
= ILL_CPLB_VI
;
401 strerror
= KERN_NOTICE
EXC_0x23(KERN_NOTICE
);
402 CHK_DEBUGGER_TRAP_MAYBE();
404 /* 0x24 - Data access misaligned, handled here */
406 info
.si_code
= BUS_ADRALN
;
408 strerror
= KERN_NOTICE
EXC_0x24(KERN_NOTICE
);
409 CHK_DEBUGGER_TRAP_MAYBE();
411 /* 0x25 - Unrecoverable Event, handled here */
413 info
.si_code
= ILL_ILLEXCPT
;
415 strerror
= KERN_NOTICE
EXC_0x25(KERN_NOTICE
);
416 CHK_DEBUGGER_TRAP_MAYBE();
418 /* 0x26 - Data CPLB Miss, normal case is handled in _cplb_hdr,
419 error case is handled here */
421 info
.si_code
= BUS_ADRALN
;
423 strerror
= KERN_NOTICE
EXC_0x26(KERN_NOTICE
);
425 /* 0x27 - Data CPLB Multiple Hits - Linux Trap Zero, handled here */
427 info
.si_code
= ILL_CPLB_MULHIT
;
429 #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
430 if (cpu_pda
[cpu
].dcplb_fault_addr
< FIXED_CODE_START
)
431 strerror
= KERN_NOTICE
"NULL pointer access\n";
434 strerror
= KERN_NOTICE
EXC_0x27(KERN_NOTICE
);
435 CHK_DEBUGGER_TRAP_MAYBE();
437 /* 0x28 - Emulation Watchpoint, handled here */
439 info
.si_code
= TRAP_WATCHPT
;
441 pr_debug(EXC_0x28(KERN_DEBUG
));
442 CHK_DEBUGGER_TRAP_MAYBE();
443 /* Check if this is a watchpoint in kernel space */
444 if (kernel_mode_regs(fp
))
449 /* 0x29 - Instruction fetch access error (535 only) */
450 case VEC_ISTRU_VL
: /* ADSP-BF535 only (MH) */
451 info
.si_code
= BUS_OPFETCH
;
453 strerror
= KERN_NOTICE
"BF535: VEC_ISTRU_VL\n";
454 CHK_DEBUGGER_TRAP_MAYBE();
457 /* 0x29 - Reserved, Caught by default */
459 /* 0x2A - Instruction fetch misaligned, handled here */
461 info
.si_code
= BUS_ADRALN
;
463 strerror
= KERN_NOTICE
EXC_0x2A(KERN_NOTICE
);
464 CHK_DEBUGGER_TRAP_MAYBE();
466 /* 0x2B - Instruction CPLB protection violation, handled here */
468 info
.si_code
= ILL_CPLB_VI
;
470 strerror
= KERN_NOTICE
EXC_0x2B(KERN_NOTICE
);
471 CHK_DEBUGGER_TRAP_MAYBE();
473 /* 0x2C - Instruction CPLB miss, handled in _cplb_hdr */
475 info
.si_code
= ILL_CPLB_MISS
;
477 strerror
= KERN_NOTICE
EXC_0x2C(KERN_NOTICE
);
479 /* 0x2D - Instruction CPLB Multiple Hits, handled here */
480 case VEC_CPLB_I_MHIT
:
481 info
.si_code
= ILL_CPLB_MULHIT
;
483 #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
484 if (cpu_pda
[cpu
].icplb_fault_addr
< FIXED_CODE_START
)
485 strerror
= KERN_NOTICE
"Jump to NULL address\n";
488 strerror
= KERN_NOTICE
EXC_0x2D(KERN_NOTICE
);
489 CHK_DEBUGGER_TRAP_MAYBE();
491 /* 0x2E - Illegal use of Supervisor Resource, handled here */
493 info
.si_code
= ILL_PRVOPC
;
495 strerror
= KERN_NOTICE
EXC_0x2E(KERN_NOTICE
);
496 CHK_DEBUGGER_TRAP_MAYBE();
498 /* 0x2F - Reserved, Caught by default */
499 /* 0x30 - Reserved, Caught by default */
500 /* 0x31 - Reserved, Caught by default */
501 /* 0x32 - Reserved, Caught by default */
502 /* 0x33 - Reserved, Caught by default */
503 /* 0x34 - Reserved, Caught by default */
504 /* 0x35 - Reserved, Caught by default */
505 /* 0x36 - Reserved, Caught by default */
506 /* 0x37 - Reserved, Caught by default */
507 /* 0x38 - Reserved, Caught by default */
508 /* 0x39 - Reserved, Caught by default */
509 /* 0x3A - Reserved, Caught by default */
510 /* 0x3B - Reserved, Caught by default */
511 /* 0x3C - Reserved, Caught by default */
512 /* 0x3D - Reserved, Caught by default */
513 /* 0x3E - Reserved, Caught by default */
514 /* 0x3F - Reserved, Caught by default */
516 info
.si_code
= BUS_ADRALN
;
518 switch (fp
->seqstat
& SEQSTAT_HWERRCAUSE
) {
519 /* System MMR Error */
520 case (SEQSTAT_HWERRCAUSE_SYSTEM_MMR
):
521 info
.si_code
= BUS_ADRALN
;
523 strerror
= KERN_NOTICE
HWC_x2(KERN_NOTICE
);
525 /* External Memory Addressing Error */
526 case (SEQSTAT_HWERRCAUSE_EXTERN_ADDR
):
527 if (ANOMALY_05000310
) {
528 static unsigned long anomaly_rets
;
530 if ((fp
->pc
>= (L1_CODE_START
+ L1_CODE_LENGTH
- 512)) &&
531 (fp
->pc
< (L1_CODE_START
+ L1_CODE_LENGTH
))) {
533 * A false hardware error will happen while fetching at
534 * the L1 instruction SRAM boundary. Ignore it.
536 anomaly_rets
= fp
->rets
;
538 } else if (fp
->rets
== anomaly_rets
) {
540 * While boundary code returns to a function, at the ret
541 * point, a new false hardware error might occur too based
542 * on tests. Ignore it too.
545 } else if ((fp
->rets
>= (L1_CODE_START
+ L1_CODE_LENGTH
- 512)) &&
546 (fp
->rets
< (L1_CODE_START
+ L1_CODE_LENGTH
))) {
548 * If boundary code calls a function, at the entry point,
549 * a new false hardware error maybe happen based on tests.
557 info
.si_code
= BUS_ADRERR
;
559 strerror
= KERN_NOTICE
HWC_x3(KERN_NOTICE
);
561 /* Performance Monitor Overflow */
562 case (SEQSTAT_HWERRCAUSE_PERF_FLOW
):
563 strerror
= KERN_NOTICE
HWC_x12(KERN_NOTICE
);
565 /* RAISE 5 instruction */
566 case (SEQSTAT_HWERRCAUSE_RAISE_5
):
567 printk(KERN_NOTICE
HWC_x18(KERN_NOTICE
));
569 default: /* Reserved */
570 printk(KERN_NOTICE
HWC_default(KERN_NOTICE
));
573 CHK_DEBUGGER_TRAP_MAYBE();
576 * We should be handling all known exception types above,
577 * if we get here we hit a reserved one, so panic
580 info
.si_code
= ILL_ILLPARAOP
;
582 verbose_printk(KERN_EMERG
"Caught Unhandled Exception, code = %08lx\n",
583 (fp
->seqstat
& SEQSTAT_EXCAUSE
));
584 CHK_DEBUGGER_TRAP_MAYBE();
590 /* If the fault was caused by a kernel thread, or interrupt handler
591 * we will kernel panic, so the system reboots.
593 if (kernel_mode_regs(fp
) || (current
&& !current
->mm
)) {
595 oops_in_progress
= 1;
598 if (sig
!= SIGTRAP
) {
600 verbose_printk(strerror
);
602 dump_bfin_process(fp
);
606 /* Print out the trace buffer if it makes sense */
607 #ifndef CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE
608 if (trapnr
== VEC_CPLB_I_M
|| trapnr
== VEC_CPLB_M
)
609 verbose_printk(KERN_NOTICE
"No trace since you do not have "
610 "CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE enabled\n\n");
613 dump_bfin_trace_buffer();
615 if (oops_in_progress
) {
616 /* Dump the current kernel stack */
617 verbose_printk(KERN_NOTICE
"Kernel Stack\n");
618 show_stack(current
, NULL
);
620 #ifndef CONFIG_ACCESS_CHECK
621 verbose_printk(KERN_EMERG
"Please turn on "
622 "CONFIG_ACCESS_CHECK\n");
624 panic("Kernel exception");
626 #ifdef CONFIG_DEBUG_VERBOSE
627 unsigned long *stack
;
628 /* Dump the user space stack */
629 stack
= (unsigned long *)rdusp();
630 verbose_printk(KERN_NOTICE
"Userspace Stack\n");
631 show_stack(NULL
, stack
);
637 if (!ipipe_trap_notify(fp
->seqstat
& 0x3f, fp
))
642 info
.si_addr
= (void __user
*)fp
->pc
;
643 force_sig_info(sig
, &info
, current
);
646 if ((ANOMALY_05000461
&& trapnr
== VEC_HWERR
&& !access_ok(VERIFY_READ
, fp
->pc
, 8)) ||
647 (ANOMALY_05000281
&& trapnr
== VEC_HWERR
) ||
648 (ANOMALY_05000189
&& (trapnr
== VEC_CPLB_I_VL
|| trapnr
== VEC_CPLB_VL
)))
649 fp
->pc
= SAFE_USER_INSTRUCTION
;
652 trace_buffer_restore(j
);
655 /* Typical exception handling routines */
657 #define EXPAND_LEN ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 256 - 1)
660 * Similar to get_user, do some address checking, then dereference
661 * Return true on success, false on bad address
663 static bool get_instruction(unsigned short *val
, unsigned short *address
)
665 unsigned long addr
= (unsigned long)address
;
667 /* Check for odd addresses */
671 /* MMR region will never have instructions */
672 if (addr
>= SYSMMR_BASE
)
675 switch (bfin_mem_access_type(addr
, 2)) {
676 case BFIN_MEM_ACCESS_CORE
:
677 case BFIN_MEM_ACCESS_CORE_ONLY
:
680 case BFIN_MEM_ACCESS_DMA
:
681 dma_memcpy(val
, address
, 2);
683 case BFIN_MEM_ACCESS_ITEST
:
684 isram_memcpy(val
, address
, 2);
686 default: /* invalid access */
692 * decode the instruction if we are printing out the trace, as it
693 * makes things easier to follow, without running it through objdump
694 * These are the normal instructions which cause change of flow, which
695 * would be at the source of the trace buffer
697 #if defined(CONFIG_DEBUG_VERBOSE) && defined(CONFIG_DEBUG_BFIN_HWTRACE_ON)
698 static void decode_instruction(unsigned short *address
)
700 unsigned short opcode
;
702 if (get_instruction(&opcode
, address
)) {
703 if (opcode
== 0x0010)
704 verbose_printk("RTS");
705 else if (opcode
== 0x0011)
706 verbose_printk("RTI");
707 else if (opcode
== 0x0012)
708 verbose_printk("RTX");
709 else if (opcode
== 0x0013)
710 verbose_printk("RTN");
711 else if (opcode
== 0x0014)
712 verbose_printk("RTE");
713 else if (opcode
== 0x0025)
714 verbose_printk("EMUEXCPT");
715 else if (opcode
== 0x0040 && opcode
<= 0x0047)
716 verbose_printk("STI R%i", opcode
& 7);
717 else if (opcode
>= 0x0050 && opcode
<= 0x0057)
718 verbose_printk("JUMP (P%i)", opcode
& 7);
719 else if (opcode
>= 0x0060 && opcode
<= 0x0067)
720 verbose_printk("CALL (P%i)", opcode
& 7);
721 else if (opcode
>= 0x0070 && opcode
<= 0x0077)
722 verbose_printk("CALL (PC+P%i)", opcode
& 7);
723 else if (opcode
>= 0x0080 && opcode
<= 0x0087)
724 verbose_printk("JUMP (PC+P%i)", opcode
& 7);
725 else if (opcode
>= 0x0090 && opcode
<= 0x009F)
726 verbose_printk("RAISE 0x%x", opcode
& 0xF);
727 else if (opcode
>= 0x00A0 && opcode
<= 0x00AF)
728 verbose_printk("EXCPT 0x%x", opcode
& 0xF);
729 else if ((opcode
>= 0x1000 && opcode
<= 0x13FF) || (opcode
>= 0x1800 && opcode
<= 0x1BFF))
730 verbose_printk("IF !CC JUMP");
731 else if ((opcode
>= 0x1400 && opcode
<= 0x17ff) || (opcode
>= 0x1c00 && opcode
<= 0x1fff))
732 verbose_printk("IF CC JUMP");
733 else if (opcode
>= 0x2000 && opcode
<= 0x2fff)
734 verbose_printk("JUMP.S");
735 else if (opcode
>= 0xe080 && opcode
<= 0xe0ff)
736 verbose_printk("LSETUP");
737 else if (opcode
>= 0xe200 && opcode
<= 0xe2ff)
738 verbose_printk("JUMP.L");
739 else if (opcode
>= 0xe300 && opcode
<= 0xe3ff)
740 verbose_printk("CALL pcrel");
742 verbose_printk("0x%04x", opcode
);
748 void dump_bfin_trace_buffer(void)
750 #ifdef CONFIG_DEBUG_VERBOSE
751 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
754 unsigned short *addr
;
755 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
759 trace_buffer_save(tflags
);
761 printk(KERN_NOTICE
"Hardware Trace:\n");
763 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
764 printk(KERN_NOTICE
"WARNING: Expanded trace turned on - can not trace exceptions\n");
767 if (likely(bfin_read_TBUFSTAT() & TBUFCNT
)) {
768 for (; bfin_read_TBUFSTAT() & TBUFCNT
; i
++) {
769 decode_address(buf
, (unsigned long)bfin_read_TBUF());
770 printk(KERN_NOTICE
"%4i Target : %s\n", i
, buf
);
771 addr
= (unsigned short *)bfin_read_TBUF();
772 decode_address(buf
, (unsigned long)addr
);
773 printk(KERN_NOTICE
" Source : %s ", buf
);
774 decode_instruction(addr
);
779 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
780 if (trace_buff_offset
)
781 index
= trace_buff_offset
/ 4;
785 j
= (1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN
) * 128;
787 decode_address(buf
, software_trace_buff
[index
]);
788 printk(KERN_NOTICE
"%4i Target : %s\n", i
, buf
);
792 decode_address(buf
, software_trace_buff
[index
]);
793 printk(KERN_NOTICE
" Source : %s ", buf
);
794 decode_instruction((unsigned short *)software_trace_buff
[index
]);
804 trace_buffer_restore(tflags
);
808 EXPORT_SYMBOL(dump_bfin_trace_buffer
);
811 int is_valid_bugaddr(unsigned long addr
)
813 unsigned short opcode
;
815 if (!get_instruction(&opcode
, (unsigned short *)addr
))
818 return opcode
== BFIN_BUG_OPCODE
;
823 * Checks to see if the address pointed to is either a
824 * 16-bit CALL instruction, or a 32-bit CALL instruction
826 static bool is_bfin_call(unsigned short *addr
)
828 unsigned short opcode
= 0, *ins_addr
;
829 ins_addr
= (unsigned short *)addr
;
831 if (!get_instruction(&opcode
, ins_addr
))
834 if ((opcode
>= 0x0060 && opcode
<= 0x0067) ||
835 (opcode
>= 0x0070 && opcode
<= 0x0077))
839 if (!get_instruction(&opcode
, ins_addr
))
842 if (opcode
>= 0xE300 && opcode
<= 0xE3FF)
849 void show_stack(struct task_struct
*task
, unsigned long *stack
)
852 unsigned int *addr
, *endstack
, *fp
= 0, *frame
;
853 unsigned short *ins_addr
;
855 unsigned int i
, j
, ret_addr
, frame_no
= 0;
858 * If we have been passed a specific stack, use that one otherwise
859 * if we have been passed a task structure, use that, otherwise
860 * use the stack of where the variable "stack" exists
865 /* We know this is a kernel stack, so this is the start/end */
866 stack
= (unsigned long *)task
->thread
.ksp
;
867 endstack
= (unsigned int *)(((unsigned int)(stack
) & ~(THREAD_SIZE
- 1)) + THREAD_SIZE
);
869 /* print out the existing stack info */
870 stack
= (unsigned long *)&stack
;
871 endstack
= (unsigned int *)PAGE_ALIGN((unsigned int)stack
);
874 endstack
= (unsigned int *)PAGE_ALIGN((unsigned int)stack
);
876 printk(KERN_NOTICE
"Stack info:\n");
877 decode_address(buf
, (unsigned int)stack
);
878 printk(KERN_NOTICE
" SP: [0x%p] %s\n", stack
, buf
);
880 if (!access_ok(VERIFY_READ
, stack
, (unsigned int)endstack
- (unsigned int)stack
)) {
881 printk(KERN_NOTICE
"Invalid stack pointer\n");
885 /* First thing is to look for a frame pointer */
886 for (addr
= (unsigned int *)((unsigned int)stack
& ~0xF); addr
< endstack
; addr
++) {
889 ins_addr
= (unsigned short *)*addr
;
891 if (is_bfin_call(ins_addr
))
895 /* Let's check to see if it is a frame pointer */
896 while (fp
>= (addr
- 1) && fp
< endstack
897 && fp
&& ((unsigned int) fp
& 0x3) == 0)
898 fp
= (unsigned int *)*fp
;
899 if (fp
== 0 || fp
== endstack
) {
908 printk(KERN_NOTICE
" FP: (0x%p)\n", fp
);
913 * Now that we think we know where things are, we
914 * walk the stack again, this time printing things out
915 * incase there is no frame pointer, we still look for
916 * valid return addresses
919 /* First time print out data, next time, print out symbols */
920 for (j
= 0; j
<= 1; j
++) {
922 printk(KERN_NOTICE
"Return addresses in stack:\n");
924 printk(KERN_NOTICE
" Memory from 0x%08lx to %p", ((long unsigned int)stack
& ~0xF), endstack
);
929 for (addr
= (unsigned int *)((unsigned int)stack
& ~0xF), i
= 0;
930 addr
< endstack
; addr
++, i
++) {
933 if (!j
&& i
% 8 == 0)
934 printk(KERN_NOTICE
"%p:",addr
);
936 /* if it is an odd address, or zero, just skip it */
937 if (*addr
& 0x1 || !*addr
)
940 ins_addr
= (unsigned short *)*addr
;
942 /* Go back one instruction, and see if it is a CALL */
944 ret_addr
= is_bfin_call(ins_addr
);
946 if (!j
&& stack
== (unsigned long *)addr
)
947 printk("[%08x]", *addr
);
950 decode_address(buf
, (unsigned int)*addr
);
952 printk(KERN_NOTICE
" frame %2i : %s\n", frame_no
, buf
);
955 printk(KERN_NOTICE
" address : %s\n", buf
);
957 printk("<%08x>", *addr
);
958 else if (fp
== addr
) {
962 printk("(%08x)", *addr
);
964 fp
= (unsigned int *)*addr
;
968 printk(" %08x ", *addr
);
975 EXPORT_SYMBOL(show_stack
);
977 void dump_stack(void)
980 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
983 trace_buffer_save(tflags
);
984 dump_bfin_trace_buffer();
985 show_stack(current
, &stack
);
986 trace_buffer_restore(tflags
);
988 EXPORT_SYMBOL(dump_stack
);
990 void dump_bfin_process(struct pt_regs
*fp
)
992 #ifdef CONFIG_DEBUG_VERBOSE
993 /* We should be able to look at fp->ipend, but we don't push it on the
994 * stack all the time, so do this until we fix that */
995 unsigned int context
= bfin_read_IPEND();
997 if (oops_in_progress
)
998 verbose_printk(KERN_EMERG
"Kernel OOPS in progress\n");
1000 if (context
& 0x0020 && (fp
->seqstat
& SEQSTAT_EXCAUSE
) == VEC_HWERR
)
1001 verbose_printk(KERN_NOTICE
"HW Error context\n");
1002 else if (context
& 0x0020)
1003 verbose_printk(KERN_NOTICE
"Deferred Exception context\n");
1004 else if (context
& 0x3FC0)
1005 verbose_printk(KERN_NOTICE
"Interrupt context\n");
1006 else if (context
& 0x4000)
1007 verbose_printk(KERN_NOTICE
"Deferred Interrupt context\n");
1008 else if (context
& 0x8000)
1009 verbose_printk(KERN_NOTICE
"Kernel process context\n");
1011 /* Because we are crashing, and pointers could be bad, we check things
1012 * pretty closely before we use them
1014 if ((unsigned long)current
>= FIXED_CODE_START
&&
1015 !((unsigned long)current
& 0x3) && current
->pid
) {
1016 verbose_printk(KERN_NOTICE
"CURRENT PROCESS:\n");
1017 if (current
->comm
>= (char *)FIXED_CODE_START
)
1018 verbose_printk(KERN_NOTICE
"COMM=%s PID=%d",
1019 current
->comm
, current
->pid
);
1021 verbose_printk(KERN_NOTICE
"COMM= invalid");
1023 printk(KERN_CONT
" CPU=%d\n", current_thread_info()->cpu
);
1024 if (!((unsigned long)current
->mm
& 0x3) && (unsigned long)current
->mm
>= FIXED_CODE_START
)
1025 verbose_printk(KERN_NOTICE
1026 "TEXT = 0x%p-0x%p DATA = 0x%p-0x%p\n"
1027 " BSS = 0x%p-0x%p USER-STACK = 0x%p\n\n",
1028 (void *)current
->mm
->start_code
,
1029 (void *)current
->mm
->end_code
,
1030 (void *)current
->mm
->start_data
,
1031 (void *)current
->mm
->end_data
,
1032 (void *)current
->mm
->end_data
,
1033 (void *)current
->mm
->brk
,
1034 (void *)current
->mm
->start_stack
);
1036 verbose_printk(KERN_NOTICE
"invalid mm\n");
1038 verbose_printk(KERN_NOTICE
1039 "No Valid process in current context\n");
1043 void dump_bfin_mem(struct pt_regs
*fp
)
1045 #ifdef CONFIG_DEBUG_VERBOSE
1046 unsigned short *addr
, *erraddr
, val
= 0, err
= 0;
1047 char sti
= 0, buf
[6];
1049 erraddr
= (void *)fp
->pc
;
1051 verbose_printk(KERN_NOTICE
"return address: [0x%p]; contents of:", erraddr
);
1053 for (addr
= (unsigned short *)((unsigned long)erraddr
& ~0xF) - 0x10;
1054 addr
< (unsigned short *)((unsigned long)erraddr
& ~0xF) + 0x10;
1056 if (!((unsigned long)addr
& 0xF))
1057 verbose_printk(KERN_NOTICE
"0x%p: ", addr
);
1059 if (!get_instruction(&val
, addr
)) {
1061 sprintf(buf
, "????");
1063 sprintf(buf
, "%04x", val
);
1065 if (addr
== erraddr
) {
1066 verbose_printk("[%s]", buf
);
1069 verbose_printk(" %s ", buf
);
1071 /* Do any previous instructions turn on interrupts? */
1072 if (addr
<= erraddr
&& /* in the past */
1073 ((val
>= 0x0040 && val
<= 0x0047) || /* STI instruction */
1074 val
== 0x017b)) /* [SP++] = RETI */
1078 verbose_printk("\n");
1080 /* Hardware error interrupts can be deferred */
1081 if (unlikely(sti
&& (fp
->seqstat
& SEQSTAT_EXCAUSE
) == VEC_HWERR
&&
1083 verbose_printk(KERN_NOTICE
"Looks like this was a deferred error - sorry\n");
1084 #ifndef CONFIG_DEBUG_HWERR
1085 verbose_printk(KERN_NOTICE
1086 "The remaining message may be meaningless\n"
1087 "You should enable CONFIG_DEBUG_HWERR to get a better idea where it came from\n");
1089 /* If we are handling only one peripheral interrupt
1090 * and current mm and pid are valid, and the last error
1091 * was in that user space process's text area
1092 * print it out - because that is where the problem exists
1094 if ((!(((fp
)->ipend
& ~0x30) & (((fp
)->ipend
& ~0x30) - 1))) &&
1095 (current
->pid
&& current
->mm
)) {
1096 /* And the last RETI points to the current userspace context */
1097 if ((fp
+ 1)->pc
>= current
->mm
->start_code
&&
1098 (fp
+ 1)->pc
<= current
->mm
->end_code
) {
1099 verbose_printk(KERN_NOTICE
"It might be better to look around here : \n");
1100 verbose_printk(KERN_NOTICE
"-------------------------------------------\n");
1102 verbose_printk(KERN_NOTICE
"-------------------------------------------\n");
1110 void show_regs(struct pt_regs
*fp
)
1112 #ifdef CONFIG_DEBUG_VERBOSE
1114 struct irqaction
*action
;
1116 unsigned long flags
= 0;
1117 unsigned int cpu
= raw_smp_processor_id();
1118 unsigned char in_atomic
= (bfin_read_IPEND() & 0x10) || in_atomic();
1120 verbose_printk(KERN_NOTICE
"\n");
1121 if (CPUID
!= bfin_cpuid())
1122 verbose_printk(KERN_NOTICE
"Compiled for cpu family 0x%04x (Rev %d), "
1123 "but running on:0x%04x (Rev %d)\n",
1124 CPUID
, bfin_compiled_revid(), bfin_cpuid(), bfin_revid());
1126 verbose_printk(KERN_NOTICE
"ADSP-%s-0.%d",
1127 CPU
, bfin_compiled_revid());
1129 if (bfin_compiled_revid() != bfin_revid())
1130 verbose_printk("(Detected 0.%d)", bfin_revid());
1132 verbose_printk(" %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n",
1133 get_cclk()/1000000, get_sclk()/1000000,
1141 verbose_printk(KERN_NOTICE
"%s", linux_banner
);
1143 verbose_printk(KERN_NOTICE
"\nSEQUENCER STATUS:\t\t%s\n", print_tainted());
1144 verbose_printk(KERN_NOTICE
" SEQSTAT: %08lx IPEND: %04lx IMASK: %04lx SYSCFG: %04lx\n",
1145 (long)fp
->seqstat
, fp
->ipend
, cpu_pda
[raw_smp_processor_id()].ex_imask
, fp
->syscfg
);
1146 if (fp
->ipend
& EVT_IRPTEN
)
1147 verbose_printk(KERN_NOTICE
" Global Interrupts Disabled (IPEND[4])\n");
1148 if (!(cpu_pda
[raw_smp_processor_id()].ex_imask
& (EVT_IVG13
| EVT_IVG12
| EVT_IVG11
|
1149 EVT_IVG10
| EVT_IVG9
| EVT_IVG8
| EVT_IVG7
| EVT_IVTMR
)))
1150 verbose_printk(KERN_NOTICE
" Peripheral interrupts masked off\n");
1151 if (!(cpu_pda
[raw_smp_processor_id()].ex_imask
& (EVT_IVG15
| EVT_IVG14
)))
1152 verbose_printk(KERN_NOTICE
" Kernel interrupts masked off\n");
1153 if ((fp
->seqstat
& SEQSTAT_EXCAUSE
) == VEC_HWERR
) {
1154 verbose_printk(KERN_NOTICE
" HWERRCAUSE: 0x%lx\n",
1155 (fp
->seqstat
& SEQSTAT_HWERRCAUSE
) >> 14);
1157 /* If the error was from the EBIU, print it out */
1158 if (bfin_read_EBIU_ERRMST() & CORE_ERROR
) {
1159 verbose_printk(KERN_NOTICE
" EBIU Error Reason : 0x%04x\n",
1160 bfin_read_EBIU_ERRMST());
1161 verbose_printk(KERN_NOTICE
" EBIU Error Address : 0x%08x\n",
1162 bfin_read_EBIU_ERRADD());
1166 verbose_printk(KERN_NOTICE
" EXCAUSE : 0x%lx\n",
1167 fp
->seqstat
& SEQSTAT_EXCAUSE
);
1168 for (i
= 2; i
<= 15 ; i
++) {
1169 if (fp
->ipend
& (1 << i
)) {
1171 decode_address(buf
, bfin_read32(EVT0
+ 4*i
));
1172 verbose_printk(KERN_NOTICE
" physical IVG%i asserted : %s\n", i
, buf
);
1174 verbose_printk(KERN_NOTICE
" interrupts disabled\n");
1178 /* if no interrupts are going off, don't print this out */
1179 if (fp
->ipend
& ~0x3F) {
1180 for (i
= 0; i
< (NR_IRQS
- 1); i
++) {
1182 raw_spin_lock_irqsave(&irq_desc
[i
].lock
, flags
);
1184 action
= irq_desc
[i
].action
;
1188 decode_address(buf
, (unsigned int)action
->handler
);
1189 verbose_printk(KERN_NOTICE
" logical irq %3d mapped : %s", i
, buf
);
1190 for (action
= action
->next
; action
; action
= action
->next
) {
1191 decode_address(buf
, (unsigned int)action
->handler
);
1192 verbose_printk(", %s", buf
);
1194 verbose_printk("\n");
1197 raw_spin_unlock_irqrestore(&irq_desc
[i
].lock
, flags
);
1201 decode_address(buf
, fp
->rete
);
1202 verbose_printk(KERN_NOTICE
" RETE: %s\n", buf
);
1203 decode_address(buf
, fp
->retn
);
1204 verbose_printk(KERN_NOTICE
" RETN: %s\n", buf
);
1205 decode_address(buf
, fp
->retx
);
1206 verbose_printk(KERN_NOTICE
" RETX: %s\n", buf
);
1207 decode_address(buf
, fp
->rets
);
1208 verbose_printk(KERN_NOTICE
" RETS: %s\n", buf
);
1209 decode_address(buf
, fp
->pc
);
1210 verbose_printk(KERN_NOTICE
" PC : %s\n", buf
);
1212 if (((long)fp
->seqstat
& SEQSTAT_EXCAUSE
) &&
1213 (((long)fp
->seqstat
& SEQSTAT_EXCAUSE
) != VEC_HWERR
)) {
1214 decode_address(buf
, cpu_pda
[cpu
].dcplb_fault_addr
);
1215 verbose_printk(KERN_NOTICE
"DCPLB_FAULT_ADDR: %s\n", buf
);
1216 decode_address(buf
, cpu_pda
[cpu
].icplb_fault_addr
);
1217 verbose_printk(KERN_NOTICE
"ICPLB_FAULT_ADDR: %s\n", buf
);
1220 verbose_printk(KERN_NOTICE
"PROCESSOR STATE:\n");
1221 verbose_printk(KERN_NOTICE
" R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n",
1222 fp
->r0
, fp
->r1
, fp
->r2
, fp
->r3
);
1223 verbose_printk(KERN_NOTICE
" R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n",
1224 fp
->r4
, fp
->r5
, fp
->r6
, fp
->r7
);
1225 verbose_printk(KERN_NOTICE
" P0 : %08lx P1 : %08lx P2 : %08lx P3 : %08lx\n",
1226 fp
->p0
, fp
->p1
, fp
->p2
, fp
->p3
);
1227 verbose_printk(KERN_NOTICE
" P4 : %08lx P5 : %08lx FP : %08lx SP : %08lx\n",
1228 fp
->p4
, fp
->p5
, fp
->fp
, (long)fp
);
1229 verbose_printk(KERN_NOTICE
" LB0: %08lx LT0: %08lx LC0: %08lx\n",
1230 fp
->lb0
, fp
->lt0
, fp
->lc0
);
1231 verbose_printk(KERN_NOTICE
" LB1: %08lx LT1: %08lx LC1: %08lx\n",
1232 fp
->lb1
, fp
->lt1
, fp
->lc1
);
1233 verbose_printk(KERN_NOTICE
" B0 : %08lx L0 : %08lx M0 : %08lx I0 : %08lx\n",
1234 fp
->b0
, fp
->l0
, fp
->m0
, fp
->i0
);
1235 verbose_printk(KERN_NOTICE
" B1 : %08lx L1 : %08lx M1 : %08lx I1 : %08lx\n",
1236 fp
->b1
, fp
->l1
, fp
->m1
, fp
->i1
);
1237 verbose_printk(KERN_NOTICE
" B2 : %08lx L2 : %08lx M2 : %08lx I2 : %08lx\n",
1238 fp
->b2
, fp
->l2
, fp
->m2
, fp
->i2
);
1239 verbose_printk(KERN_NOTICE
" B3 : %08lx L3 : %08lx M3 : %08lx I3 : %08lx\n",
1240 fp
->b3
, fp
->l3
, fp
->m3
, fp
->i3
);
1241 verbose_printk(KERN_NOTICE
"A0.w: %08lx A0.x: %08lx A1.w: %08lx A1.x: %08lx\n",
1242 fp
->a0w
, fp
->a0x
, fp
->a1w
, fp
->a1x
);
1244 verbose_printk(KERN_NOTICE
"USP : %08lx ASTAT: %08lx\n",
1245 rdusp(), fp
->astat
);
1247 verbose_printk(KERN_NOTICE
"\n");
1251 #ifdef CONFIG_SYS_BFIN_SPINLOCK_L1
1252 asmlinkage
int sys_bfin_spinlock(int *spinlock
)__attribute__((l1_text
));
1255 static DEFINE_SPINLOCK(bfin_spinlock_lock
);
1257 asmlinkage
int sys_bfin_spinlock(int *p
)
1261 spin_lock(&bfin_spinlock_lock
); /* This would also hold kernel preemption. */
1262 ret
= get_user(tmp
, p
);
1263 if (likely(ret
== 0)) {
1269 spin_unlock(&bfin_spinlock_lock
);
1273 int bfin_request_exception(unsigned int exception
, void (*handler
)(void))
1275 void (*curr_handler
)(void);
1277 if (exception
> 0x3F)
1280 curr_handler
= ex_table
[exception
];
1282 if (curr_handler
!= ex_replaceable
)
1285 ex_table
[exception
] = handler
;
1289 EXPORT_SYMBOL(bfin_request_exception
);
1291 int bfin_free_exception(unsigned int exception
, void (*handler
)(void))
1293 void (*curr_handler
)(void);
1295 if (exception
> 0x3F)
1298 curr_handler
= ex_table
[exception
];
1300 if (curr_handler
!= handler
)
1303 ex_table
[exception
] = ex_replaceable
;
1307 EXPORT_SYMBOL(bfin_free_exception
);
1309 void panic_cplb_error(int cplb_panic
, struct pt_regs
*fp
)
1311 switch (cplb_panic
) {
1312 case CPLB_NO_UNLOCKED
:
1313 printk(KERN_EMERG
"All CPLBs are locked\n");
1315 case CPLB_PROT_VIOL
:
1317 case CPLB_NO_ADDR_MATCH
:
1319 case CPLB_UNKNOWN_ERR
:
1320 printk(KERN_EMERG
"Unknown CPLB Exception\n");
1324 oops_in_progress
= 1;
1326 dump_bfin_process(fp
);
1330 panic("Unrecoverable event");