2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994 - 1999 by Ralf Baechle
7 * Copyright (C) 1995, 1996 Paul M. Antoine
8 * Copyright (C) 1998 Ulf Carlsson
9 * Copyright (C) 1999 Silicon Graphics, Inc.
11 #include <linux/config.h>
12 #include <linux/init.h>
14 #include <linux/sched.h>
15 #include <linux/smp.h>
16 #include <linux/smp_lock.h>
17 #include <linux/spinlock.h>
19 #include <asm/branch.h>
20 #include <asm/cachectl.h>
21 #include <asm/pgtable.h>
23 #include <asm/bootinfo.h>
24 #include <asm/ptrace.h>
25 #include <asm/watch.h>
26 #include <asm/system.h>
27 #include <asm/uaccess.h>
28 #include <asm/mmu_context.h>
30 extern asmlinkage
void __xtlb_mod(void);
31 extern asmlinkage
void __xtlb_tlbl(void);
32 extern asmlinkage
void __xtlb_tlbs(void);
33 extern asmlinkage
void handle_adel(void);
34 extern asmlinkage
void handle_ades(void);
35 extern asmlinkage
void handle_ibe(void);
36 extern asmlinkage
void handle_dbe(void);
37 extern asmlinkage
void handle_sys(void);
38 extern asmlinkage
void handle_bp(void);
39 extern asmlinkage
void handle_ri(void);
40 extern asmlinkage
void handle_cpu(void);
41 extern asmlinkage
void handle_ov(void);
42 extern asmlinkage
void handle_tr(void);
43 extern asmlinkage
void handle_fpe(void);
44 extern asmlinkage
void handle_watch(void);
45 extern asmlinkage
void handle_reserved(void);
47 static char *cpu_names
[] = CPU_NAMES
;
49 char watch_available
= 0;
50 char dedicated_iv_available
= 0;
51 char vce_available
= 0;
52 char mips4_available
= 0;
54 int kstack_depth_to_print
= 24;
57 * These constant is for searching for possible module text segments.
58 * MODULE_RANGE is a guess of how much space is likely to be vmalloced.
60 #define MODULE_RANGE (8*1024*1024)
63 * This routine abuses get_user()/put_user() to reference pointers
64 * with at least a bit of error checking ...
66 void show_stack(unsigned long *sp
)
75 while ((unsigned long) stack
& (PAGE_SIZE
- 1)) {
76 unsigned long stackdata
;
78 if (__get_user(stackdata
, stack
++)) {
79 printk(" (Bad stack address)");
83 printk(" %016lx", stackdata
);
95 void show_trace(unsigned long *sp
)
99 unsigned long kernel_start
, kernel_end
;
100 unsigned long module_start
, module_end
;
101 extern char _stext
, _etext
;
106 kernel_start
= (unsigned long) &_stext
;
107 kernel_end
= (unsigned long) &_etext
;
108 module_start
= VMALLOC_START
;
109 module_end
= module_start
+ MODULE_RANGE
;
111 printk("\nCall Trace:");
113 while ((unsigned long) stack
& (PAGE_SIZE
-1)) {
116 if (__get_user(addr
, stack
++)) {
117 printk(" (Bad stack address)\n");
122 * If the address is either in the text segment of the
123 * kernel, or in the region which contains vmalloc'ed
124 * memory, it *may* be the address of a calling
125 * routine; if so, print it so that someone tracing
126 * down the cause of the crash will be able to figure
127 * out the call path that was taken.
130 if ((addr
>= kernel_start
&& addr
< kernel_end
) ||
131 (addr
>= module_start
&& addr
< module_end
)) {
133 /* Since our kernel is still at KSEG0,
134 * truncate the address so that ksymoops
137 printk(" [<%08x>]", (unsigned int) addr
);
146 void show_code(unsigned int *pc
)
152 for(i
= -3 ; i
< 6 ; i
++) {
154 if (__get_user(insn
, pc
+ i
)) {
155 printk(" (Bad address in epc)\n");
158 printk("%c%08x%c",(i
?' ':'<'),insn
,(i
?' ':'>'));
164 void die(const char * str
, struct pt_regs
* regs
, unsigned long err
)
166 if (user_mode(regs
)) /* Just return if in user mode. */
170 spin_lock_irq(&die_lock
);
171 printk("%s: %04lx\n", str
, err
& 0xffff);
173 printk("Process %s (pid: %d, stackpage=%08lx)\n",
174 current
->comm
, current
->pid
, (unsigned long) current
);
175 show_stack((unsigned long *) regs
->regs
[29]);
176 show_trace((unsigned long *) regs
->regs
[29]);
177 show_code((unsigned int *) regs
->cp0_epc
);
179 spin_unlock_irq(&die_lock
);
183 void die_if_kernel(const char * str
, struct pt_regs
* regs
, unsigned long err
)
185 if (!user_mode(regs
))
189 void do_ov(struct pt_regs
*regs
)
191 if (compute_return_epc(regs
))
193 force_sig(SIGFPE
, current
);
196 #ifdef CONFIG_MIPS_FPE_MODULE
197 static void (*fpe_handler
)(struct pt_regs
*regs
, unsigned int fcr31
);
200 * Register_fpe/unregister_fpe are for debugging purposes only. To make
201 * this hack work a bit better there is no error checking.
203 int register_fpe(void (*handler
)(struct pt_regs
*regs
, unsigned int fcr31
))
205 fpe_handler
= handler
;
209 int unregister_fpe(void (*handler
)(struct pt_regs
*regs
, unsigned int fcr31
))
217 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
219 void do_fpe(struct pt_regs
*regs
, unsigned long fcr31
)
223 extern void simfp(unsigned int);
225 #ifdef CONFIG_MIPS_FPE_MODULE
226 if (fpe_handler
!= NULL
) {
227 fpe_handler(regs
, fcr31
);
231 if (fcr31
& 0x20000) {
232 /* Retry instruction with flush to zero ... */
233 if (!(fcr31
& (1<<24))) {
234 printk("Setting flush to zero for %s.\n",
238 __asm__
__volatile__(
244 pc
= regs
->cp0_epc
+ ((regs
->cp0_cause
& CAUSEF_BD
) ? 4 : 0);
245 if (get_user(insn
, (unsigned int *)pc
)) {
246 /* XXX Can this happen? */
247 force_sig(SIGSEGV
, current
);
250 printk(KERN_DEBUG
"Unimplemented exception for insn %08x at 0x%08lx in %s.\n",
251 insn
, regs
->cp0_epc
, current
->comm
);
255 if (compute_return_epc(regs
))
257 //force_sig(SIGFPE, current);
258 printk(KERN_DEBUG
"Should send SIGFPE to %s\n", current
->comm
);
261 static inline int get_insn_opcode(struct pt_regs
*regs
, unsigned int *opcode
)
265 epc
= (unsigned int *) (unsigned long) regs
->cp0_epc
;
266 if (regs
->cp0_cause
& CAUSEF_BD
)
269 if (verify_area(VERIFY_READ
, epc
, 4)) {
270 force_sig(SIGSEGV
, current
);
278 void do_bp(struct pt_regs
*regs
)
280 unsigned int opcode
, bcode
;
283 * There is the ancient bug in the MIPS assemblers that the break
284 * code starts left to bit 16 instead to bit 6 in the opcode.
285 * Gas is bug-compatible ...
287 if (get_insn_opcode(regs
, &opcode
))
289 bcode
= ((opcode
>> 16) & ((1 << 20) - 1));
292 * (A short test says that IRIX 5.3 sends SIGTRAP for all break
293 * insns, even for break codes that indicate arithmetic failures.
296 force_sig(SIGTRAP
, current
);
299 void do_tr(struct pt_regs
*regs
)
301 unsigned int opcode
, bcode
;
303 if (get_insn_opcode(regs
, &opcode
))
305 bcode
= ((opcode
>> 6) & ((1 << 20) - 1));
308 * (A short test says that IRIX 5.3 sends SIGTRAP for all break
309 * insns, even for break codes that indicate arithmetic failures.
312 force_sig(SIGTRAP
, current
);
315 void do_ri(struct pt_regs
*regs
)
317 printk("Cpu%d[%s:%d] Illegal instruction at %08lx ra=%08lx\n",
318 smp_processor_id(), current
->comm
, current
->pid
, regs
->cp0_epc
,
320 if (compute_return_epc(regs
))
322 force_sig(SIGILL
, current
);
325 void do_cpu(struct pt_regs
*regs
)
329 cpid
= (regs
->cp0_cause
>> CAUSEB_CE
) & 3;
333 regs
->cp0_status
|= ST0_CU1
;
335 if (last_task_used_math
== current
)
338 if (current
->used_math
) { /* Using the FPU again. */
339 lazy_fpu_switch(last_task_used_math
, current
);
340 } else { /* First time FPU user. */
341 lazy_fpu_switch(last_task_used_math
, 0);
343 current
->used_math
= 1;
345 last_task_used_math
= current
;
347 if (current
->used_math
) {
348 lazy_fpu_switch(0, current
);
351 current
->used_math
= 1;
353 current
->flags
|= PF_USEDFPU
;
358 force_sig(SIGILL
, current
);
361 void do_watch(struct pt_regs
*regs
)
364 * We use the watch exception where available to detect stack
368 panic("Caught WATCH exception - probably caused by stack overflow.");
371 void do_reserved(struct pt_regs
*regs
)
374 * Game over - no way to handle this if it ever occurs. Most probably
375 * caused by a new unknown cpu type or after another deadly
376 * hard/software error.
378 panic("Caught reserved exception %ld - should not happen.",
379 (regs
->cp0_cause
& 0x1f) >> 2);
382 static inline void watch_init(unsigned long cputype
)
394 set_except_vector(23, handle_watch
);
401 * Some MIPS CPUs have a dedicated interrupt vector which reduces the
402 * interrupt processing overhead. Use it where available.
403 * FIXME: more CPUs than just the Nevada have this feature.
405 static inline void setup_dedicated_int(void)
407 extern void except_vec4(void);
409 switch(mips_cputype
) {
411 memcpy((void *)(KSEG0
+ 0x200), except_vec4
, 8);
412 set_cp0_cause(CAUSEF_IV
, CAUSEF_IV
);
413 dedicated_iv_available
= 1;
417 unsigned long exception_handlers
[32];
420 * As a side effect of the way this is implemented we're limited
421 * to interrupt handlers in the address range from
422 * KSEG0 <= x < KSEG0 + 256mb on the Nevada. Oh well ...
424 void set_except_vector(int n
, void *addr
)
426 unsigned long handler
= (unsigned long) addr
;
427 exception_handlers
[n
] = handler
;
428 if (n
== 0 && dedicated_iv_available
) {
429 *(volatile u32
*)(KSEG0
+0x200) = 0x08000000 |
430 (0x03ffffff & (handler
>> 2));
431 flush_icache_range(KSEG0
+0x200, KSEG0
+ 0x204);
435 static inline void mips4_setup(void)
437 switch (mips_cputype
) {
444 set_cp0_status(ST0_XX
, ST0_XX
);
448 static inline void go_64(void)
452 bits
= ST0_KX
|ST0_SX
|ST0_UX
;
453 set_cp0_status(bits
, bits
);
454 printk("Entering 64-bit mode.\n");
457 void __init
trap_init(void)
459 extern char except_vec0
;
460 extern char except_vec1_r10k
;
461 extern char except_vec2_generic
;
462 extern char except_vec3_generic
, except_vec3_r4000
;
463 extern void bus_error_init(void);
466 /* Some firmware leaves the BEV flag set, clear it. */
467 set_cp0_status(ST0_BEV
, 0);
469 /* Copy the generic exception handler code to it's final destination. */
470 memcpy((void *)(KSEG0
+ 0x100), &except_vec2_generic
, 0x80);
471 memcpy((void *)(KSEG0
+ 0x180), &except_vec3_generic
, 0x80);
474 * Setup default vectors
476 for(i
= 0; i
<= 31; i
++)
477 set_except_vector(i
, handle_reserved
);
480 * Only some CPUs have the watch exceptions or a dedicated
483 watch_init(mips_cputype
);
484 setup_dedicated_int();
486 go_64(); /* In memoriam C128 ;-) */
489 * Handling the following exceptions depends mostly of the cpu type
491 switch(mips_cputype
) {
494 * The R10000 is in most aspects similar to the R4400. It
495 * should get some special optimizations.
497 write_32bit_cp0_register(CP0_FRAMEMASK
, 0);
498 set_cp0_status(ST0_XX
, ST0_XX
);
506 /* Fall through ... */
515 /* Debug TLB refill handler. */
516 memcpy((void *)KSEG0
, &except_vec0
, 0x80);
517 memcpy((void *)KSEG0
+ 0x080, &except_vec1_r10k
, 0x80);
519 /* Cache error vector */
520 memcpy((void *)(KSEG0
+ 0x100), (void *) KSEG0
, 0x80);
523 memcpy((void *)(KSEG0
+ 0x180), &except_vec3_r4000
,
526 memcpy((void *)(KSEG0
+ 0x180), &except_vec3_generic
,
530 set_except_vector(1, __xtlb_mod
);
531 set_except_vector(2, __xtlb_tlbl
);
532 set_except_vector(3, __xtlb_tlbs
);
533 set_except_vector(4, handle_adel
);
534 set_except_vector(5, handle_ades
);
536 /* DBE / IBE exception handler are system specific. */
539 set_except_vector(8, handle_sys
);
540 set_except_vector(9, handle_bp
);
541 set_except_vector(10, handle_ri
);
542 set_except_vector(11, handle_cpu
);
543 set_except_vector(12, handle_ov
);
544 set_except_vector(13, handle_tr
);
545 set_except_vector(15, handle_fpe
);
549 panic("unsupported CPU type %s.\n", cpu_names
[mips_cputype
]);
554 panic("Unknown CPU type");
556 flush_icache_range(KSEG0
, KSEG0
+ 0x200);
558 atomic_inc(&init_mm
.mm_count
); /* XXX UP? */
559 current
->active_mm
= &init_mm
;