1 /* provide some functions which dump the trace buffer, in a nice way for people
2 * to read it, and understand what is going on
4 * Copyright 2004-2010 Analog Devices Inc.
6 * Licensed under the GPL-2 or later
9 #include <linux/kernel.h>
10 #include <linux/hardirq.h>
11 #include <linux/thread_info.h>
13 #include <linux/uaccess.h>
14 #include <linux/module.h>
15 #include <linux/kallsyms.h>
16 #include <linux/err.h>
18 #include <linux/irq.h>
20 #include <asm/trace.h>
21 #include <asm/fixed_code.h>
22 #include <asm/traps.h>
23 #include <asm/irq_handler.h>
25 void decode_address(char *buf
, unsigned long address
)
27 struct task_struct
*p
;
29 unsigned long flags
, offset
;
30 unsigned char in_atomic
= (bfin_read_IPEND() & 0x10) || in_atomic();
33 #ifdef CONFIG_KALLSYMS
34 unsigned long symsize
;
41 buf
+= sprintf(buf
, "<0x%08lx> ", address
);
43 #ifdef CONFIG_KALLSYMS
44 /* look up the address and see if we are in kernel space */
45 symname
= kallsyms_lookup(address
, &symsize
, &offset
, &modname
, namebuf
);
48 /* yeah! kernel space! */
51 sprintf(buf
, "{ %s%s%s%s + 0x%lx }",
52 delim
, modname
, delim
, symname
,
53 (unsigned long)offset
);
58 if (address
>= FIXED_CODE_START
&& address
< FIXED_CODE_END
) {
59 /* Problem in fixed code section? */
60 strcat(buf
, "/* Maybe fixed code section */");
63 } else if (address
< CONFIG_BOOT_LOAD
) {
64 /* Problem somewhere before the kernel start address */
65 strcat(buf
, "/* Maybe null pointer? */");
68 } else if (address
>= COREMMR_BASE
) {
69 strcat(buf
, "/* core mmrs */");
72 } else if (address
>= SYSMMR_BASE
) {
73 strcat(buf
, "/* system mmrs */");
76 } else if (address
>= L1_ROM_START
&& address
< L1_ROM_START
+ L1_ROM_LENGTH
) {
77 strcat(buf
, "/* on-chip L1 ROM */");
80 } else if (address
>= L1_SCRATCH_START
&& address
< L1_SCRATCH_START
+ L1_SCRATCH_LENGTH
) {
81 strcat(buf
, "/* on-chip scratchpad */");
84 } else if (address
>= physical_mem_end
&& address
< ASYNC_BANK0_BASE
) {
85 strcat(buf
, "/* unconnected memory */");
88 } else if (address
>= ASYNC_BANK3_BASE
+ ASYNC_BANK3_SIZE
&& address
< BOOT_ROM_START
) {
89 strcat(buf
, "/* reserved memory */");
92 } else if (address
>= L1_DATA_A_START
&& address
< L1_DATA_A_START
+ L1_DATA_A_LENGTH
) {
93 strcat(buf
, "/* on-chip Data Bank A */");
96 } else if (address
>= L1_DATA_B_START
&& address
< L1_DATA_B_START
+ L1_DATA_B_LENGTH
) {
97 strcat(buf
, "/* on-chip Data Bank B */");
102 * Don't walk any of the vmas if we are oopsing, it has been known
103 * to cause problems - corrupt vmas (kernel crashes) cause double faults
105 if (oops_in_progress
) {
106 strcat(buf
, "/* kernel dynamic memory (maybe user-space) */");
110 /* looks like we're off in user-land, so let's walk all the
111 * mappings of all our processes and see if we can't be a whee
114 write_lock_irqsave(&tasklist_lock
, flags
);
115 for_each_process(p
) {
116 mm
= (in_atomic
? p
->mm
: get_task_mm(p
));
120 if (!down_read_trylock(&mm
->mmap_sem
)) {
126 for (n
= rb_first(&mm
->mm_rb
); n
; n
= rb_next(n
)) {
127 struct vm_area_struct
*vma
;
129 vma
= rb_entry(n
, struct vm_area_struct
, vm_rb
);
131 if (address
>= vma
->vm_start
&& address
< vma
->vm_end
) {
133 char *name
= p
->comm
;
134 struct file
*file
= vma
->vm_file
;
137 char *d_name
= d_path(&file
->f_path
, _tmpbuf
,
143 /* FLAT does not have its text aligned to the start of
144 * the map while FDPIC ELF does ...
147 /* before we can check flat/fdpic, we need to
148 * make sure current is valid
150 if ((unsigned long)current
>= FIXED_CODE_START
&&
151 !((unsigned long)current
& 0x3)) {
153 (address
> current
->mm
->start_code
) &&
154 (address
< current
->mm
->end_code
))
155 offset
= address
- current
->mm
->start_code
;
157 offset
= (address
- vma
->vm_start
) +
158 (vma
->vm_pgoff
<< PAGE_SHIFT
);
160 sprintf(buf
, "[ %s + 0x%lx ]", name
, offset
);
162 sprintf(buf
, "[ %s vma:0x%lx-0x%lx]",
163 name
, vma
->vm_start
, vma
->vm_end
);
165 up_read(&mm
->mmap_sem
);
170 sprintf(buf
, "[ %s ] dynamic memory", name
);
176 up_read(&mm
->mmap_sem
);
182 * we were unable to find this address anywhere,
183 * or some MMs were skipped because they were in use.
185 sprintf(buf
, "/* kernel dynamic memory */");
188 write_unlock_irqrestore(&tasklist_lock
, flags
);
191 #define EXPAND_LEN ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 256 - 1)
194 * Similar to get_user, do some address checking, then dereference
195 * Return true on success, false on bad address
197 bool get_mem16(unsigned short *val
, unsigned short *address
)
199 unsigned long addr
= (unsigned long)address
;
201 /* Check for odd addresses */
205 switch (bfin_mem_access_type(addr
, 2)) {
206 case BFIN_MEM_ACCESS_CORE
:
207 case BFIN_MEM_ACCESS_CORE_ONLY
:
210 case BFIN_MEM_ACCESS_DMA
:
211 dma_memcpy(val
, address
, 2);
213 case BFIN_MEM_ACCESS_ITEST
:
214 isram_memcpy(val
, address
, 2);
216 default: /* invalid access */
221 bool get_instruction(unsigned int *val
, unsigned short *address
)
223 unsigned long addr
= (unsigned long)address
;
224 unsigned short opcode0
, opcode1
;
226 /* Check for odd addresses */
230 /* MMR region will never have instructions */
231 if (addr
>= SYSMMR_BASE
)
234 /* Scratchpad will never have instructions */
235 if (addr
>= L1_SCRATCH_START
&& addr
< L1_SCRATCH_START
+ L1_SCRATCH_LENGTH
)
238 /* Data banks will never have instructions */
239 if (addr
>= BOOT_ROM_START
+ BOOT_ROM_LENGTH
&& addr
< L1_CODE_START
)
242 if (!get_mem16(&opcode0
, address
))
245 /* was this a 32-bit instruction? If so, get the next 16 bits */
246 if ((opcode0
& 0xc000) == 0xc000) {
247 if (!get_mem16(&opcode1
, address
+ 1))
249 *val
= (opcode0
<< 16) + opcode1
;
256 #if defined(CONFIG_DEBUG_BFIN_HWTRACE_ON)
258 * decode the instruction if we are printing out the trace, as it
259 * makes things easier to follow, without running it through objdump
260 * Decode the change of flow, and the common load/store instructions
261 * which are the main cause for faults, and discontinuities in the trace
265 #define ProgCtrl_opcode 0x0000
266 #define ProgCtrl_poprnd_bits 0
267 #define ProgCtrl_poprnd_mask 0xf
268 #define ProgCtrl_prgfunc_bits 4
269 #define ProgCtrl_prgfunc_mask 0xf
270 #define ProgCtrl_code_bits 8
271 #define ProgCtrl_code_mask 0xff
273 static void decode_ProgCtrl_0(unsigned int opcode
)
275 int poprnd
= ((opcode
>> ProgCtrl_poprnd_bits
) & ProgCtrl_poprnd_mask
);
276 int prgfunc
= ((opcode
>> ProgCtrl_prgfunc_bits
) & ProgCtrl_prgfunc_mask
);
278 if (prgfunc
== 0 && poprnd
== 0)
280 else if (prgfunc
== 1 && poprnd
== 0)
282 else if (prgfunc
== 1 && poprnd
== 1)
284 else if (prgfunc
== 1 && poprnd
== 2)
286 else if (prgfunc
== 1 && poprnd
== 3)
288 else if (prgfunc
== 1 && poprnd
== 4)
290 else if (prgfunc
== 2 && poprnd
== 0)
292 else if (prgfunc
== 2 && poprnd
== 3)
294 else if (prgfunc
== 2 && poprnd
== 4)
296 else if (prgfunc
== 2 && poprnd
== 5)
298 else if (prgfunc
== 3)
299 pr_cont("CLI R%i", poprnd
);
300 else if (prgfunc
== 4)
301 pr_cont("STI R%i", poprnd
);
302 else if (prgfunc
== 5)
303 pr_cont("JUMP (P%i)", poprnd
);
304 else if (prgfunc
== 6)
305 pr_cont("CALL (P%i)", poprnd
);
306 else if (prgfunc
== 7)
307 pr_cont("CALL (PC + P%i)", poprnd
);
308 else if (prgfunc
== 8)
309 pr_cont("JUMP (PC + P%i", poprnd
);
310 else if (prgfunc
== 9)
311 pr_cont("RAISE %i", poprnd
);
312 else if (prgfunc
== 10)
313 pr_cont("EXCPT %i", poprnd
);
315 pr_cont("0x%04x", opcode
);
319 #define BRCC_opcode 0x1000
320 #define BRCC_offset_bits 0
321 #define BRCC_offset_mask 0x3ff
322 #define BRCC_B_bits 10
323 #define BRCC_B_mask 0x1
324 #define BRCC_T_bits 11
325 #define BRCC_T_mask 0x1
326 #define BRCC_code_bits 12
327 #define BRCC_code_mask 0xf
329 static void decode_BRCC_0(unsigned int opcode
)
331 int B
= ((opcode
>> BRCC_B_bits
) & BRCC_B_mask
);
332 int T
= ((opcode
>> BRCC_T_bits
) & BRCC_T_mask
);
334 pr_cont("IF %sCC JUMP pcrel %s", T
? "" : "!", B
? "(BP)" : "");
337 #define CALLa_opcode 0xe2000000
338 #define CALLa_addr_bits 0
339 #define CALLa_addr_mask 0xffffff
340 #define CALLa_S_bits 24
341 #define CALLa_S_mask 0x1
342 #define CALLa_code_bits 25
343 #define CALLa_code_mask 0x7f
345 static void decode_CALLa_0(unsigned int opcode
)
347 int S
= ((opcode
>> (CALLa_S_bits
- 16)) & CALLa_S_mask
);
350 pr_cont("CALL pcrel");
355 #define LoopSetup_opcode 0xe0800000
356 #define LoopSetup_eoffset_bits 0
357 #define LoopSetup_eoffset_mask 0x3ff
358 #define LoopSetup_dontcare_bits 10
359 #define LoopSetup_dontcare_mask 0x3
360 #define LoopSetup_reg_bits 12
361 #define LoopSetup_reg_mask 0xf
362 #define LoopSetup_soffset_bits 16
363 #define LoopSetup_soffset_mask 0xf
364 #define LoopSetup_c_bits 20
365 #define LoopSetup_c_mask 0x1
366 #define LoopSetup_rop_bits 21
367 #define LoopSetup_rop_mask 0x3
368 #define LoopSetup_code_bits 23
369 #define LoopSetup_code_mask 0x1ff
371 static void decode_LoopSetup_0(unsigned int opcode
)
373 int c
= ((opcode
>> LoopSetup_c_bits
) & LoopSetup_c_mask
);
374 int reg
= ((opcode
>> LoopSetup_reg_bits
) & LoopSetup_reg_mask
);
375 int rop
= ((opcode
>> LoopSetup_rop_bits
) & LoopSetup_rop_mask
);
377 pr_cont("LSETUP <> LC%i", c
);
379 pr_cont("= P%i", reg
);
384 #define DspLDST_opcode 0x9c00
385 #define DspLDST_reg_bits 0
386 #define DspLDST_reg_mask 0x7
387 #define DspLDST_i_bits 3
388 #define DspLDST_i_mask 0x3
389 #define DspLDST_m_bits 5
390 #define DspLDST_m_mask 0x3
391 #define DspLDST_aop_bits 7
392 #define DspLDST_aop_mask 0x3
393 #define DspLDST_W_bits 9
394 #define DspLDST_W_mask 0x1
395 #define DspLDST_code_bits 10
396 #define DspLDST_code_mask 0x3f
398 static void decode_dspLDST_0(unsigned int opcode
)
400 int i
= ((opcode
>> DspLDST_i_bits
) & DspLDST_i_mask
);
401 int m
= ((opcode
>> DspLDST_m_bits
) & DspLDST_m_mask
);
402 int W
= ((opcode
>> DspLDST_W_bits
) & DspLDST_W_mask
);
403 int aop
= ((opcode
>> DspLDST_aop_bits
) & DspLDST_aop_mask
);
404 int reg
= ((opcode
>> DspLDST_reg_bits
) & DspLDST_reg_mask
);
433 pr_cont(" = R%i", reg
);
445 #define LDST_opcode 0x9000
446 #define LDST_reg_bits 0
447 #define LDST_reg_mask 0x7
448 #define LDST_ptr_bits 3
449 #define LDST_ptr_mask 0x7
450 #define LDST_Z_bits 6
451 #define LDST_Z_mask 0x1
452 #define LDST_aop_bits 7
453 #define LDST_aop_mask 0x3
454 #define LDST_W_bits 9
455 #define LDST_W_mask 0x1
456 #define LDST_sz_bits 10
457 #define LDST_sz_mask 0x3
458 #define LDST_code_bits 12
459 #define LDST_code_mask 0xf
461 static void decode_LDST_0(unsigned int opcode
)
463 int Z
= ((opcode
>> LDST_Z_bits
) & LDST_Z_mask
);
464 int W
= ((opcode
>> LDST_W_bits
) & LDST_W_mask
);
465 int sz
= ((opcode
>> LDST_sz_bits
) & LDST_sz_mask
);
466 int aop
= ((opcode
>> LDST_aop_bits
) & LDST_aop_mask
);
467 int reg
= ((opcode
>> LDST_reg_bits
) & LDST_reg_mask
);
468 int ptr
= ((opcode
>> LDST_ptr_bits
) & LDST_ptr_mask
);
471 pr_cont("%s%i = ", (sz
== 0 && Z
== 1) ? "P" : "R", reg
);
482 pr_cont("[P%i", ptr
);
495 pr_cont(" = %s%i ", (sz
== 0 && Z
== 1) ? "P" : "R", reg
);
505 #define LDSTii_opcode 0xa000
506 #define LDSTii_reg_bit 0
507 #define LDSTii_reg_mask 0x7
508 #define LDSTii_ptr_bit 3
509 #define LDSTii_ptr_mask 0x7
510 #define LDSTii_offset_bit 6
511 #define LDSTii_offset_mask 0xf
512 #define LDSTii_op_bit 10
513 #define LDSTii_op_mask 0x3
514 #define LDSTii_W_bit 12
515 #define LDSTii_W_mask 0x1
516 #define LDSTii_code_bit 13
517 #define LDSTii_code_mask 0x7
519 static void decode_LDSTii_0(unsigned int opcode
)
521 int reg
= ((opcode
>> LDSTii_reg_bit
) & LDSTii_reg_mask
);
522 int ptr
= ((opcode
>> LDSTii_ptr_bit
) & LDSTii_ptr_mask
);
523 int offset
= ((opcode
>> LDSTii_offset_bit
) & LDSTii_offset_mask
);
524 int op
= ((opcode
>> LDSTii_op_bit
) & LDSTii_op_mask
);
525 int W
= ((opcode
>> LDSTii_W_bit
) & LDSTii_W_mask
);
528 pr_cont("%s%i = %s[P%i + %i]", op
== 3 ? "R" : "P", reg
,
529 op
== 1 || op
== 2 ? "" : "W", ptr
, offset
);
535 pr_cont("%s[P%i + %i] = %s%i", op
== 0 ? "" : "W", ptr
,
536 offset
, op
== 3 ? "P" : "R", reg
);
540 #define LDSTidxI_opcode 0xe4000000
541 #define LDSTidxI_offset_bits 0
542 #define LDSTidxI_offset_mask 0xffff
543 #define LDSTidxI_reg_bits 16
544 #define LDSTidxI_reg_mask 0x7
545 #define LDSTidxI_ptr_bits 19
546 #define LDSTidxI_ptr_mask 0x7
547 #define LDSTidxI_sz_bits 22
548 #define LDSTidxI_sz_mask 0x3
549 #define LDSTidxI_Z_bits 24
550 #define LDSTidxI_Z_mask 0x1
551 #define LDSTidxI_W_bits 25
552 #define LDSTidxI_W_mask 0x1
553 #define LDSTidxI_code_bits 26
554 #define LDSTidxI_code_mask 0x3f
556 static void decode_LDSTidxI_0(unsigned int opcode
)
558 int Z
= ((opcode
>> LDSTidxI_Z_bits
) & LDSTidxI_Z_mask
);
559 int W
= ((opcode
>> LDSTidxI_W_bits
) & LDSTidxI_W_mask
);
560 int sz
= ((opcode
>> LDSTidxI_sz_bits
) & LDSTidxI_sz_mask
);
561 int reg
= ((opcode
>> LDSTidxI_reg_bits
) & LDSTidxI_reg_mask
);
562 int ptr
= ((opcode
>> LDSTidxI_ptr_bits
) & LDSTidxI_ptr_mask
);
563 int offset
= ((opcode
>> LDSTidxI_offset_bits
) & LDSTidxI_offset_mask
);
566 pr_cont("%s%i = ", sz
== 0 && Z
== 1 ? "P" : "R", reg
);
573 pr_cont("[P%i + %s0x%x]", ptr
, offset
& 0x20 ? "-" : "",
574 (offset
& 0x1f) << 2);
576 if (W
== 0 && sz
!= 0) {
584 pr_cont("= %s%i", (sz
== 0 && Z
== 1) ? "P" : "R", reg
);
588 static void decode_opcode(unsigned int opcode
)
591 if (opcode
== BFIN_BUG_OPCODE
)
595 if ((opcode
& 0xffffff00) == ProgCtrl_opcode
)
596 decode_ProgCtrl_0(opcode
);
597 else if ((opcode
& 0xfffff000) == BRCC_opcode
)
598 decode_BRCC_0(opcode
);
599 else if ((opcode
& 0xfffff000) == 0x2000)
601 else if ((opcode
& 0xfe000000) == CALLa_opcode
)
602 decode_CALLa_0(opcode
);
603 else if ((opcode
& 0xff8000C0) == LoopSetup_opcode
)
604 decode_LoopSetup_0(opcode
);
605 else if ((opcode
& 0xfffffc00) == DspLDST_opcode
)
606 decode_dspLDST_0(opcode
);
607 else if ((opcode
& 0xfffff000) == LDST_opcode
)
608 decode_LDST_0(opcode
);
609 else if ((opcode
& 0xffffe000) == LDSTii_opcode
)
610 decode_LDSTii_0(opcode
);
611 else if ((opcode
& 0xfc000000) == LDSTidxI_opcode
)
612 decode_LDSTidxI_0(opcode
);
613 else if (opcode
& 0xffff0000)
614 pr_cont("0x%08x", opcode
);
616 pr_cont("0x%04x", opcode
);
619 #define BIT_MULTI_INS 0x08000000
620 static void decode_instruction(unsigned short *address
)
624 if (!get_instruction(&opcode
, address
))
627 decode_opcode(opcode
);
629 /* If things are a 32-bit instruction, it has the possibility of being
630 * a multi-issue instruction (a 32-bit, and 2 16 bit instrucitions)
631 * This test collidates with the unlink instruction, so disallow that
633 if ((opcode
& 0xc0000000) == 0xc0000000 &&
634 (opcode
& BIT_MULTI_INS
) &&
635 (opcode
& 0xe8000000) != 0xe8000000) {
637 if (!get_instruction(&opcode
, address
+ 2))
639 decode_opcode(opcode
);
641 if (!get_instruction(&opcode
, address
+ 3))
643 decode_opcode(opcode
);
648 void dump_bfin_trace_buffer(void)
650 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
651 int tflags
, i
= 0, fault
= 0;
653 unsigned short *addr
;
654 unsigned int cpu
= raw_smp_processor_id();
655 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
659 trace_buffer_save(tflags
);
661 pr_notice("Hardware Trace:\n");
663 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
664 pr_notice("WARNING: Expanded trace turned on - can not trace exceptions\n");
667 if (likely(bfin_read_TBUFSTAT() & TBUFCNT
)) {
668 for (; bfin_read_TBUFSTAT() & TBUFCNT
; i
++) {
669 addr
= (unsigned short *)bfin_read_TBUF();
670 decode_address(buf
, (unsigned long)addr
);
671 pr_notice("%4i Target : %s\n", i
, buf
);
672 /* Normally, the faulting instruction doesn't go into
673 * the trace buffer, (since it doesn't commit), so
674 * we print out the fault address here
676 if (!fault
&& addr
== ((unsigned short *)evt_ivhw
)) {
677 addr
= (unsigned short *)bfin_read_TBUF();
678 decode_address(buf
, (unsigned long)addr
);
679 pr_notice(" FAULT : %s ", buf
);
680 decode_instruction(addr
);
685 if (!fault
&& addr
== (unsigned short *)trap
&&
686 (cpu_pda
[cpu
].seqstat
& SEQSTAT_EXCAUSE
) > VEC_EXCPT15
) {
687 decode_address(buf
, cpu_pda
[cpu
].icplb_fault_addr
);
688 pr_notice(" FAULT : %s ", buf
);
689 decode_instruction((unsigned short *)cpu_pda
[cpu
].icplb_fault_addr
);
693 addr
= (unsigned short *)bfin_read_TBUF();
694 decode_address(buf
, (unsigned long)addr
);
695 pr_notice(" Source : %s ", buf
);
696 decode_instruction(addr
);
701 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
702 if (trace_buff_offset
)
703 index
= trace_buff_offset
/ 4;
707 j
= (1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN
) * 128;
709 decode_address(buf
, software_trace_buff
[index
]);
710 pr_notice("%4i Target : %s\n", i
, buf
);
714 decode_address(buf
, software_trace_buff
[index
]);
715 pr_notice(" Source : %s ", buf
);
716 decode_instruction((unsigned short *)software_trace_buff
[index
]);
726 trace_buffer_restore(tflags
);
729 EXPORT_SYMBOL(dump_bfin_trace_buffer
);
731 void dump_bfin_process(struct pt_regs
*fp
)
733 /* We should be able to look at fp->ipend, but we don't push it on the
734 * stack all the time, so do this until we fix that */
735 unsigned int context
= bfin_read_IPEND();
737 if (oops_in_progress
)
738 pr_emerg("Kernel OOPS in progress\n");
740 if (context
& 0x0020 && (fp
->seqstat
& SEQSTAT_EXCAUSE
) == VEC_HWERR
)
741 pr_notice("HW Error context\n");
742 else if (context
& 0x0020)
743 pr_notice("Deferred Exception context\n");
744 else if (context
& 0x3FC0)
745 pr_notice("Interrupt context\n");
746 else if (context
& 0x4000)
747 pr_notice("Deferred Interrupt context\n");
748 else if (context
& 0x8000)
749 pr_notice("Kernel process context\n");
751 /* Because we are crashing, and pointers could be bad, we check things
752 * pretty closely before we use them
754 if ((unsigned long)current
>= FIXED_CODE_START
&&
755 !((unsigned long)current
& 0x3) && current
->pid
) {
756 pr_notice("CURRENT PROCESS:\n");
757 if (current
->comm
>= (char *)FIXED_CODE_START
)
758 pr_notice("COMM=%s PID=%d",
759 current
->comm
, current
->pid
);
761 pr_notice("COMM= invalid");
763 pr_cont(" CPU=%d\n", current_thread_info()->cpu
);
764 if (!((unsigned long)current
->mm
& 0x3) &&
765 (unsigned long)current
->mm
>= FIXED_CODE_START
) {
766 pr_notice("TEXT = 0x%p-0x%p DATA = 0x%p-0x%p\n",
767 (void *)current
->mm
->start_code
,
768 (void *)current
->mm
->end_code
,
769 (void *)current
->mm
->start_data
,
770 (void *)current
->mm
->end_data
);
771 pr_notice(" BSS = 0x%p-0x%p USER-STACK = 0x%p\n\n",
772 (void *)current
->mm
->end_data
,
773 (void *)current
->mm
->brk
,
774 (void *)current
->mm
->start_stack
);
776 pr_notice("invalid mm\n");
778 pr_notice("No Valid process in current context\n");
781 void dump_bfin_mem(struct pt_regs
*fp
)
783 unsigned short *addr
, *erraddr
, val
= 0, err
= 0;
784 char sti
= 0, buf
[6];
786 erraddr
= (void *)fp
->pc
;
788 pr_notice("return address: [0x%p]; contents of:", erraddr
);
790 for (addr
= (unsigned short *)((unsigned long)erraddr
& ~0xF) - 0x10;
791 addr
< (unsigned short *)((unsigned long)erraddr
& ~0xF) + 0x10;
793 if (!((unsigned long)addr
& 0xF))
794 pr_notice("0x%p: ", addr
);
796 if (!get_mem16(&val
, addr
)) {
798 sprintf(buf
, "????");
800 sprintf(buf
, "%04x", val
);
802 if (addr
== erraddr
) {
803 pr_cont("[%s]", buf
);
806 pr_cont(" %s ", buf
);
808 /* Do any previous instructions turn on interrupts? */
809 if (addr
<= erraddr
&& /* in the past */
810 ((val
>= 0x0040 && val
<= 0x0047) || /* STI instruction */
811 val
== 0x017b)) /* [SP++] = RETI */
817 /* Hardware error interrupts can be deferred */
818 if (unlikely(sti
&& (fp
->seqstat
& SEQSTAT_EXCAUSE
) == VEC_HWERR
&&
820 pr_notice("Looks like this was a deferred error - sorry\n");
821 #ifndef CONFIG_DEBUG_HWERR
822 pr_notice("The remaining message may be meaningless\n");
823 pr_notice("You should enable CONFIG_DEBUG_HWERR to get a better idea where it came from\n");
825 /* If we are handling only one peripheral interrupt
826 * and current mm and pid are valid, and the last error
827 * was in that user space process's text area
828 * print it out - because that is where the problem exists
830 if ((!(((fp
)->ipend
& ~0x30) & (((fp
)->ipend
& ~0x30) - 1))) &&
831 (current
->pid
&& current
->mm
)) {
832 /* And the last RETI points to the current userspace context */
833 if ((fp
+ 1)->pc
>= current
->mm
->start_code
&&
834 (fp
+ 1)->pc
<= current
->mm
->end_code
) {
835 pr_notice("It might be better to look around here :\n");
836 pr_notice("-------------------------------------------\n");
838 pr_notice("-------------------------------------------\n");
845 void show_regs(struct pt_regs
*fp
)
848 struct irqaction
*action
;
850 unsigned long flags
= 0;
851 unsigned int cpu
= raw_smp_processor_id();
852 unsigned char in_atomic
= (bfin_read_IPEND() & 0x10) || in_atomic();
855 if (CPUID
!= bfin_cpuid())
856 pr_notice("Compiled for cpu family 0x%04x (Rev %d), "
857 "but running on:0x%04x (Rev %d)\n",
858 CPUID
, bfin_compiled_revid(), bfin_cpuid(), bfin_revid());
860 pr_notice("ADSP-%s-0.%d",
861 CPU
, bfin_compiled_revid());
863 if (bfin_compiled_revid() != bfin_revid())
864 pr_cont("(Detected 0.%d)", bfin_revid());
866 pr_cont(" %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n",
867 get_cclk()/1000000, get_sclk()/1000000,
875 pr_notice("%s", linux_banner
);
877 pr_notice("\nSEQUENCER STATUS:\t\t%s\n", print_tainted());
878 pr_notice(" SEQSTAT: %08lx IPEND: %04lx IMASK: %04lx SYSCFG: %04lx\n",
879 (long)fp
->seqstat
, fp
->ipend
, cpu_pda
[raw_smp_processor_id()].ex_imask
, fp
->syscfg
);
880 if (fp
->ipend
& EVT_IRPTEN
)
881 pr_notice(" Global Interrupts Disabled (IPEND[4])\n");
882 if (!(cpu_pda
[raw_smp_processor_id()].ex_imask
& (EVT_IVG13
| EVT_IVG12
| EVT_IVG11
|
883 EVT_IVG10
| EVT_IVG9
| EVT_IVG8
| EVT_IVG7
| EVT_IVTMR
)))
884 pr_notice(" Peripheral interrupts masked off\n");
885 if (!(cpu_pda
[raw_smp_processor_id()].ex_imask
& (EVT_IVG15
| EVT_IVG14
)))
886 pr_notice(" Kernel interrupts masked off\n");
887 if ((fp
->seqstat
& SEQSTAT_EXCAUSE
) == VEC_HWERR
) {
888 pr_notice(" HWERRCAUSE: 0x%lx\n",
889 (fp
->seqstat
& SEQSTAT_HWERRCAUSE
) >> 14);
891 /* If the error was from the EBIU, print it out */
892 if (bfin_read_EBIU_ERRMST() & CORE_ERROR
) {
893 pr_notice(" EBIU Error Reason : 0x%04x\n",
894 bfin_read_EBIU_ERRMST());
895 pr_notice(" EBIU Error Address : 0x%08x\n",
896 bfin_read_EBIU_ERRADD());
900 pr_notice(" EXCAUSE : 0x%lx\n",
901 fp
->seqstat
& SEQSTAT_EXCAUSE
);
902 for (i
= 2; i
<= 15 ; i
++) {
903 if (fp
->ipend
& (1 << i
)) {
905 decode_address(buf
, bfin_read32(EVT0
+ 4*i
));
906 pr_notice(" physical IVG%i asserted : %s\n", i
, buf
);
908 pr_notice(" interrupts disabled\n");
912 /* if no interrupts are going off, don't print this out */
913 if (fp
->ipend
& ~0x3F) {
914 for (i
= 0; i
< (NR_IRQS
- 1); i
++) {
916 raw_spin_lock_irqsave(&irq_desc
[i
].lock
, flags
);
918 action
= irq_desc
[i
].action
;
922 decode_address(buf
, (unsigned int)action
->handler
);
923 pr_notice(" logical irq %3d mapped : %s", i
, buf
);
924 for (action
= action
->next
; action
; action
= action
->next
) {
925 decode_address(buf
, (unsigned int)action
->handler
);
926 pr_cont(", %s", buf
);
931 raw_spin_unlock_irqrestore(&irq_desc
[i
].lock
, flags
);
935 decode_address(buf
, fp
->rete
);
936 pr_notice(" RETE: %s\n", buf
);
937 decode_address(buf
, fp
->retn
);
938 pr_notice(" RETN: %s\n", buf
);
939 decode_address(buf
, fp
->retx
);
940 pr_notice(" RETX: %s\n", buf
);
941 decode_address(buf
, fp
->rets
);
942 pr_notice(" RETS: %s\n", buf
);
943 decode_address(buf
, fp
->pc
);
944 pr_notice(" PC : %s\n", buf
);
946 if (((long)fp
->seqstat
& SEQSTAT_EXCAUSE
) &&
947 (((long)fp
->seqstat
& SEQSTAT_EXCAUSE
) != VEC_HWERR
)) {
948 decode_address(buf
, cpu_pda
[cpu
].dcplb_fault_addr
);
949 pr_notice("DCPLB_FAULT_ADDR: %s\n", buf
);
950 decode_address(buf
, cpu_pda
[cpu
].icplb_fault_addr
);
951 pr_notice("ICPLB_FAULT_ADDR: %s\n", buf
);
954 pr_notice("PROCESSOR STATE:\n");
955 pr_notice(" R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n",
956 fp
->r0
, fp
->r1
, fp
->r2
, fp
->r3
);
957 pr_notice(" R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n",
958 fp
->r4
, fp
->r5
, fp
->r6
, fp
->r7
);
959 pr_notice(" P0 : %08lx P1 : %08lx P2 : %08lx P3 : %08lx\n",
960 fp
->p0
, fp
->p1
, fp
->p2
, fp
->p3
);
961 pr_notice(" P4 : %08lx P5 : %08lx FP : %08lx SP : %08lx\n",
962 fp
->p4
, fp
->p5
, fp
->fp
, (long)fp
);
963 pr_notice(" LB0: %08lx LT0: %08lx LC0: %08lx\n",
964 fp
->lb0
, fp
->lt0
, fp
->lc0
);
965 pr_notice(" LB1: %08lx LT1: %08lx LC1: %08lx\n",
966 fp
->lb1
, fp
->lt1
, fp
->lc1
);
967 pr_notice(" B0 : %08lx L0 : %08lx M0 : %08lx I0 : %08lx\n",
968 fp
->b0
, fp
->l0
, fp
->m0
, fp
->i0
);
969 pr_notice(" B1 : %08lx L1 : %08lx M1 : %08lx I1 : %08lx\n",
970 fp
->b1
, fp
->l1
, fp
->m1
, fp
->i1
);
971 pr_notice(" B2 : %08lx L2 : %08lx M2 : %08lx I2 : %08lx\n",
972 fp
->b2
, fp
->l2
, fp
->m2
, fp
->i2
);
973 pr_notice(" B3 : %08lx L3 : %08lx M3 : %08lx I3 : %08lx\n",
974 fp
->b3
, fp
->l3
, fp
->m3
, fp
->i3
);
975 pr_notice("A0.w: %08lx A0.x: %08lx A1.w: %08lx A1.x: %08lx\n",
976 fp
->a0w
, fp
->a0x
, fp
->a1w
, fp
->a1x
);
978 pr_notice("USP : %08lx ASTAT: %08lx\n",