1 /* provide some functions which dump the trace buffer, in a nice way for people
2 * to read it, and understand what is going on
4 * Copyright 2004-2010 Analog Devices Inc.
6 * Licensed under the GPL-2 or later
9 #include <linux/kernel.h>
10 #include <linux/hardirq.h>
11 #include <linux/thread_info.h>
13 #include <linux/uaccess.h>
14 #include <linux/module.h>
15 #include <linux/kallsyms.h>
16 #include <linux/err.h>
19 #include <asm/trace.h>
20 #include <asm/fixed_code.h>
21 #include <asm/traps.h>
23 void decode_address(char *buf
, unsigned long address
)
25 struct task_struct
*p
;
27 unsigned long flags
, offset
;
28 unsigned char in_atomic
= (bfin_read_IPEND() & 0x10) || in_atomic();
31 #ifdef CONFIG_KALLSYMS
32 unsigned long symsize
;
39 buf
+= sprintf(buf
, "<0x%08lx> ", address
);
41 #ifdef CONFIG_KALLSYMS
42 /* look up the address and see if we are in kernel space */
43 symname
= kallsyms_lookup(address
, &symsize
, &offset
, &modname
, namebuf
);
46 /* yeah! kernel space! */
49 sprintf(buf
, "{ %s%s%s%s + 0x%lx }",
50 delim
, modname
, delim
, symname
,
51 (unsigned long)offset
);
56 if (address
>= FIXED_CODE_START
&& address
< FIXED_CODE_END
) {
57 /* Problem in fixed code section? */
58 strcat(buf
, "/* Maybe fixed code section */");
61 } else if (address
< CONFIG_BOOT_LOAD
) {
62 /* Problem somewhere before the kernel start address */
63 strcat(buf
, "/* Maybe null pointer? */");
66 } else if (address
>= COREMMR_BASE
) {
67 strcat(buf
, "/* core mmrs */");
70 } else if (address
>= SYSMMR_BASE
) {
71 strcat(buf
, "/* system mmrs */");
74 } else if (address
>= L1_ROM_START
&& address
< L1_ROM_START
+ L1_ROM_LENGTH
) {
75 strcat(buf
, "/* on-chip L1 ROM */");
80 * Don't walk any of the vmas if we are oopsing, it has been known
81 * to cause problems - corrupt vmas (kernel crashes) cause double faults
83 if (oops_in_progress
) {
84 strcat(buf
, "/* kernel dynamic memory (maybe user-space) */");
88 /* looks like we're off in user-land, so let's walk all the
89 * mappings of all our processes and see if we can't be a whee
92 write_lock_irqsave(&tasklist_lock
, flags
);
94 mm
= (in_atomic
? p
->mm
: get_task_mm(p
));
98 if (!down_read_trylock(&mm
->mmap_sem
)) {
104 for (n
= rb_first(&mm
->mm_rb
); n
; n
= rb_next(n
)) {
105 struct vm_area_struct
*vma
;
107 vma
= rb_entry(n
, struct vm_area_struct
, vm_rb
);
109 if (address
>= vma
->vm_start
&& address
< vma
->vm_end
) {
111 char *name
= p
->comm
;
112 struct file
*file
= vma
->vm_file
;
115 char *d_name
= d_path(&file
->f_path
, _tmpbuf
,
121 /* FLAT does not have its text aligned to the start of
122 * the map while FDPIC ELF does ...
125 /* before we can check flat/fdpic, we need to
126 * make sure current is valid
128 if ((unsigned long)current
>= FIXED_CODE_START
&&
129 !((unsigned long)current
& 0x3)) {
131 (address
> current
->mm
->start_code
) &&
132 (address
< current
->mm
->end_code
))
133 offset
= address
- current
->mm
->start_code
;
135 offset
= (address
- vma
->vm_start
) +
136 (vma
->vm_pgoff
<< PAGE_SHIFT
);
138 sprintf(buf
, "[ %s + 0x%lx ]", name
, offset
);
140 sprintf(buf
, "[ %s vma:0x%lx-0x%lx]",
141 name
, vma
->vm_start
, vma
->vm_end
);
143 up_read(&mm
->mmap_sem
);
148 sprintf(buf
, "[ %s ] dynamic memory", name
);
154 up_read(&mm
->mmap_sem
);
160 * we were unable to find this address anywhere,
161 * or some MMs were skipped because they were in use.
163 sprintf(buf
, "/* kernel dynamic memory */");
166 write_unlock_irqrestore(&tasklist_lock
, flags
);
169 #define EXPAND_LEN ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 256 - 1)
172 * Similar to get_user, do some address checking, then dereference
173 * Return true on success, false on bad address
175 bool get_instruction(unsigned short *val
, unsigned short *address
)
177 unsigned long addr
= (unsigned long)address
;
179 /* Check for odd addresses */
183 /* MMR region will never have instructions */
184 if (addr
>= SYSMMR_BASE
)
187 switch (bfin_mem_access_type(addr
, 2)) {
188 case BFIN_MEM_ACCESS_CORE
:
189 case BFIN_MEM_ACCESS_CORE_ONLY
:
192 case BFIN_MEM_ACCESS_DMA
:
193 dma_memcpy(val
, address
, 2);
195 case BFIN_MEM_ACCESS_ITEST
:
196 isram_memcpy(val
, address
, 2);
198 default: /* invalid access */
204 * decode the instruction if we are printing out the trace, as it
205 * makes things easier to follow, without running it through objdump
206 * These are the normal instructions which cause change of flow, which
207 * would be at the source of the trace buffer
209 #if defined(CONFIG_DEBUG_BFIN_HWTRACE_ON)
210 static void decode_instruction(unsigned short *address
)
212 unsigned short opcode
;
214 if (get_instruction(&opcode
, address
)) {
215 if (opcode
== 0x0010)
217 else if (opcode
== 0x0011)
219 else if (opcode
== 0x0012)
221 else if (opcode
== 0x0013)
223 else if (opcode
== 0x0014)
225 else if (opcode
== 0x0025)
227 else if (opcode
>= 0x0040 && opcode
<= 0x0047)
228 pr_cont("STI R%i", opcode
& 7);
229 else if (opcode
>= 0x0050 && opcode
<= 0x0057)
230 pr_cont("JUMP (P%i)", opcode
& 7);
231 else if (opcode
>= 0x0060 && opcode
<= 0x0067)
232 pr_cont("CALL (P%i)", opcode
& 7);
233 else if (opcode
>= 0x0070 && opcode
<= 0x0077)
234 pr_cont("CALL (PC+P%i)", opcode
& 7);
235 else if (opcode
>= 0x0080 && opcode
<= 0x0087)
236 pr_cont("JUMP (PC+P%i)", opcode
& 7);
237 else if (opcode
>= 0x0090 && opcode
<= 0x009F)
238 pr_cont("RAISE 0x%x", opcode
& 0xF);
239 else if (opcode
>= 0x00A0 && opcode
<= 0x00AF)
240 pr_cont("EXCPT 0x%x", opcode
& 0xF);
241 else if ((opcode
>= 0x1000 && opcode
<= 0x13FF) || (opcode
>= 0x1800 && opcode
<= 0x1BFF))
242 pr_cont("IF !CC JUMP");
243 else if ((opcode
>= 0x1400 && opcode
<= 0x17ff) || (opcode
>= 0x1c00 && opcode
<= 0x1fff))
244 pr_cont("IF CC JUMP");
245 else if (opcode
>= 0x2000 && opcode
<= 0x2fff)
247 else if (opcode
>= 0xe080 && opcode
<= 0xe0ff)
249 else if (opcode
>= 0xe200 && opcode
<= 0xe2ff)
251 else if (opcode
>= 0xe300 && opcode
<= 0xe3ff)
252 pr_cont("CALL pcrel");
254 pr_cont("0x%04x", opcode
);
260 void dump_bfin_trace_buffer(void)
262 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
265 unsigned short *addr
;
266 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
270 trace_buffer_save(tflags
);
272 pr_notice("Hardware Trace:\n");
274 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
275 pr_notice("WARNING: Expanded trace turned on - can not trace exceptions\n");
278 if (likely(bfin_read_TBUFSTAT() & TBUFCNT
)) {
279 for (; bfin_read_TBUFSTAT() & TBUFCNT
; i
++) {
280 decode_address(buf
, (unsigned long)bfin_read_TBUF());
281 pr_notice("%4i Target : %s\n", i
, buf
);
282 addr
= (unsigned short *)bfin_read_TBUF();
283 decode_address(buf
, (unsigned long)addr
);
284 pr_notice(" Source : %s ", buf
);
285 decode_instruction(addr
);
290 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
291 if (trace_buff_offset
)
292 index
= trace_buff_offset
/ 4;
296 j
= (1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN
) * 128;
298 decode_address(buf
, software_trace_buff
[index
]);
299 pr_notice("%4i Target : %s\n", i
, buf
);
303 decode_address(buf
, software_trace_buff
[index
]);
304 pr_notice(" Source : %s ", buf
);
305 decode_instruction((unsigned short *)software_trace_buff
[index
]);
315 trace_buffer_restore(tflags
);
318 EXPORT_SYMBOL(dump_bfin_trace_buffer
);
320 void dump_bfin_process(struct pt_regs
*fp
)
322 /* We should be able to look at fp->ipend, but we don't push it on the
323 * stack all the time, so do this until we fix that */
324 unsigned int context
= bfin_read_IPEND();
326 if (oops_in_progress
)
327 pr_emerg("Kernel OOPS in progress\n");
329 if (context
& 0x0020 && (fp
->seqstat
& SEQSTAT_EXCAUSE
) == VEC_HWERR
)
330 pr_notice("HW Error context\n");
331 else if (context
& 0x0020)
332 pr_notice("Deferred Exception context\n");
333 else if (context
& 0x3FC0)
334 pr_notice("Interrupt context\n");
335 else if (context
& 0x4000)
336 pr_notice("Deferred Interrupt context\n");
337 else if (context
& 0x8000)
338 pr_notice("Kernel process context\n");
340 /* Because we are crashing, and pointers could be bad, we check things
341 * pretty closely before we use them
343 if ((unsigned long)current
>= FIXED_CODE_START
&&
344 !((unsigned long)current
& 0x3) && current
->pid
) {
345 pr_notice("CURRENT PROCESS:\n");
346 if (current
->comm
>= (char *)FIXED_CODE_START
)
347 pr_notice("COMM=%s PID=%d",
348 current
->comm
, current
->pid
);
350 pr_notice("COMM= invalid");
352 pr_cont(" CPU=%d\n", current_thread_info()->cpu
);
353 if (!((unsigned long)current
->mm
& 0x3) &&
354 (unsigned long)current
->mm
>= FIXED_CODE_START
) {
355 pr_notice("TEXT = 0x%p-0x%p DATA = 0x%p-0x%p\n",
356 (void *)current
->mm
->start_code
,
357 (void *)current
->mm
->end_code
,
358 (void *)current
->mm
->start_data
,
359 (void *)current
->mm
->end_data
);
360 pr_notice(" BSS = 0x%p-0x%p USER-STACK = 0x%p\n\n",
361 (void *)current
->mm
->end_data
,
362 (void *)current
->mm
->brk
,
363 (void *)current
->mm
->start_stack
);
365 pr_notice("invalid mm\n");
367 pr_notice("No Valid process in current context\n");
370 void dump_bfin_mem(struct pt_regs
*fp
)
372 unsigned short *addr
, *erraddr
, val
= 0, err
= 0;
373 char sti
= 0, buf
[6];
375 erraddr
= (void *)fp
->pc
;
377 pr_notice("return address: [0x%p]; contents of:", erraddr
);
379 for (addr
= (unsigned short *)((unsigned long)erraddr
& ~0xF) - 0x10;
380 addr
< (unsigned short *)((unsigned long)erraddr
& ~0xF) + 0x10;
382 if (!((unsigned long)addr
& 0xF))
383 pr_notice("0x%p: ", addr
);
385 if (!get_instruction(&val
, addr
)) {
387 sprintf(buf
, "????");
389 sprintf(buf
, "%04x", val
);
391 if (addr
== erraddr
) {
392 pr_cont("[%s]", buf
);
395 pr_cont(" %s ", buf
);
397 /* Do any previous instructions turn on interrupts? */
398 if (addr
<= erraddr
&& /* in the past */
399 ((val
>= 0x0040 && val
<= 0x0047) || /* STI instruction */
400 val
== 0x017b)) /* [SP++] = RETI */
406 /* Hardware error interrupts can be deferred */
407 if (unlikely(sti
&& (fp
->seqstat
& SEQSTAT_EXCAUSE
) == VEC_HWERR
&&
409 pr_notice("Looks like this was a deferred error - sorry\n");
410 #ifndef CONFIG_DEBUG_HWERR
411 pr_notice("The remaining message may be meaningless\n");
412 pr_notice("You should enable CONFIG_DEBUG_HWERR to get a better idea where it came from\n");
414 /* If we are handling only one peripheral interrupt
415 * and current mm and pid are valid, and the last error
416 * was in that user space process's text area
417 * print it out - because that is where the problem exists
419 if ((!(((fp
)->ipend
& ~0x30) & (((fp
)->ipend
& ~0x30) - 1))) &&
420 (current
->pid
&& current
->mm
)) {
421 /* And the last RETI points to the current userspace context */
422 if ((fp
+ 1)->pc
>= current
->mm
->start_code
&&
423 (fp
+ 1)->pc
<= current
->mm
->end_code
) {
424 pr_notice("It might be better to look around here :\n");
425 pr_notice("-------------------------------------------\n");
427 pr_notice("-------------------------------------------\n");
434 void show_regs(struct pt_regs
*fp
)
437 struct irqaction
*action
;
439 unsigned long flags
= 0;
440 unsigned int cpu
= raw_smp_processor_id();
441 unsigned char in_atomic
= (bfin_read_IPEND() & 0x10) || in_atomic();
444 if (CPUID
!= bfin_cpuid())
445 pr_notice("Compiled for cpu family 0x%04x (Rev %d), "
446 "but running on:0x%04x (Rev %d)\n",
447 CPUID
, bfin_compiled_revid(), bfin_cpuid(), bfin_revid());
449 pr_notice("ADSP-%s-0.%d",
450 CPU
, bfin_compiled_revid());
452 if (bfin_compiled_revid() != bfin_revid())
453 pr_cont("(Detected 0.%d)", bfin_revid());
455 pr_cont(" %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n",
456 get_cclk()/1000000, get_sclk()/1000000,
464 pr_notice("%s", linux_banner
);
466 pr_notice("\nSEQUENCER STATUS:\t\t%s\n", print_tainted());
467 pr_notice(" SEQSTAT: %08lx IPEND: %04lx IMASK: %04lx SYSCFG: %04lx\n",
468 (long)fp
->seqstat
, fp
->ipend
, cpu_pda
[raw_smp_processor_id()].ex_imask
, fp
->syscfg
);
469 if (fp
->ipend
& EVT_IRPTEN
)
470 pr_notice(" Global Interrupts Disabled (IPEND[4])\n");
471 if (!(cpu_pda
[raw_smp_processor_id()].ex_imask
& (EVT_IVG13
| EVT_IVG12
| EVT_IVG11
|
472 EVT_IVG10
| EVT_IVG9
| EVT_IVG8
| EVT_IVG7
| EVT_IVTMR
)))
473 pr_notice(" Peripheral interrupts masked off\n");
474 if (!(cpu_pda
[raw_smp_processor_id()].ex_imask
& (EVT_IVG15
| EVT_IVG14
)))
475 pr_notice(" Kernel interrupts masked off\n");
476 if ((fp
->seqstat
& SEQSTAT_EXCAUSE
) == VEC_HWERR
) {
477 pr_notice(" HWERRCAUSE: 0x%lx\n",
478 (fp
->seqstat
& SEQSTAT_HWERRCAUSE
) >> 14);
480 /* If the error was from the EBIU, print it out */
481 if (bfin_read_EBIU_ERRMST() & CORE_ERROR
) {
482 pr_notice(" EBIU Error Reason : 0x%04x\n",
483 bfin_read_EBIU_ERRMST());
484 pr_notice(" EBIU Error Address : 0x%08x\n",
485 bfin_read_EBIU_ERRADD());
489 pr_notice(" EXCAUSE : 0x%lx\n",
490 fp
->seqstat
& SEQSTAT_EXCAUSE
);
491 for (i
= 2; i
<= 15 ; i
++) {
492 if (fp
->ipend
& (1 << i
)) {
494 decode_address(buf
, bfin_read32(EVT0
+ 4*i
));
495 pr_notice(" physical IVG%i asserted : %s\n", i
, buf
);
497 pr_notice(" interrupts disabled\n");
501 /* if no interrupts are going off, don't print this out */
502 if (fp
->ipend
& ~0x3F) {
503 for (i
= 0; i
< (NR_IRQS
- 1); i
++) {
505 raw_spin_lock_irqsave(&irq_desc
[i
].lock
, flags
);
507 action
= irq_desc
[i
].action
;
511 decode_address(buf
, (unsigned int)action
->handler
);
512 pr_notice(" logical irq %3d mapped : %s", i
, buf
);
513 for (action
= action
->next
; action
; action
= action
->next
) {
514 decode_address(buf
, (unsigned int)action
->handler
);
515 pr_cont(", %s", buf
);
520 raw_spin_unlock_irqrestore(&irq_desc
[i
].lock
, flags
);
524 decode_address(buf
, fp
->rete
);
525 pr_notice(" RETE: %s\n", buf
);
526 decode_address(buf
, fp
->retn
);
527 pr_notice(" RETN: %s\n", buf
);
528 decode_address(buf
, fp
->retx
);
529 pr_notice(" RETX: %s\n", buf
);
530 decode_address(buf
, fp
->rets
);
531 pr_notice(" RETS: %s\n", buf
);
532 decode_address(buf
, fp
->pc
);
533 pr_notice(" PC : %s\n", buf
);
535 if (((long)fp
->seqstat
& SEQSTAT_EXCAUSE
) &&
536 (((long)fp
->seqstat
& SEQSTAT_EXCAUSE
) != VEC_HWERR
)) {
537 decode_address(buf
, cpu_pda
[cpu
].dcplb_fault_addr
);
538 pr_notice("DCPLB_FAULT_ADDR: %s\n", buf
);
539 decode_address(buf
, cpu_pda
[cpu
].icplb_fault_addr
);
540 pr_notice("ICPLB_FAULT_ADDR: %s\n", buf
);
543 pr_notice("PROCESSOR STATE:\n");
544 pr_notice(" R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n",
545 fp
->r0
, fp
->r1
, fp
->r2
, fp
->r3
);
546 pr_notice(" R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n",
547 fp
->r4
, fp
->r5
, fp
->r6
, fp
->r7
);
548 pr_notice(" P0 : %08lx P1 : %08lx P2 : %08lx P3 : %08lx\n",
549 fp
->p0
, fp
->p1
, fp
->p2
, fp
->p3
);
550 pr_notice(" P4 : %08lx P5 : %08lx FP : %08lx SP : %08lx\n",
551 fp
->p4
, fp
->p5
, fp
->fp
, (long)fp
);
552 pr_notice(" LB0: %08lx LT0: %08lx LC0: %08lx\n",
553 fp
->lb0
, fp
->lt0
, fp
->lc0
);
554 pr_notice(" LB1: %08lx LT1: %08lx LC1: %08lx\n",
555 fp
->lb1
, fp
->lt1
, fp
->lc1
);
556 pr_notice(" B0 : %08lx L0 : %08lx M0 : %08lx I0 : %08lx\n",
557 fp
->b0
, fp
->l0
, fp
->m0
, fp
->i0
);
558 pr_notice(" B1 : %08lx L1 : %08lx M1 : %08lx I1 : %08lx\n",
559 fp
->b1
, fp
->l1
, fp
->m1
, fp
->i1
);
560 pr_notice(" B2 : %08lx L2 : %08lx M2 : %08lx I2 : %08lx\n",
561 fp
->b2
, fp
->l2
, fp
->m2
, fp
->i2
);
562 pr_notice(" B3 : %08lx L3 : %08lx M3 : %08lx I3 : %08lx\n",
563 fp
->b3
, fp
->l3
, fp
->m3
, fp
->i3
);
564 pr_notice("A0.w: %08lx A0.x: %08lx A1.w: %08lx A1.x: %08lx\n",
565 fp
->a0w
, fp
->a0x
, fp
->a1w
, fp
->a1x
);
567 pr_notice("USP : %08lx ASTAT: %08lx\n",