- Andries Brouwer: final isofs pieces.
[davej-history.git] / arch / mips64 / kernel / traps.c
blobc6777f938695b328d05481cead12ba1c7ed56235
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 1994 - 1999 by Ralf Baechle
7 * Copyright (C) 1995, 1996 Paul M. Antoine
8 * Copyright (C) 1998 Ulf Carlsson
9 * Copyright (C) 1999 Silicon Graphics, Inc.
11 #include <linux/config.h>
12 #include <linux/init.h>
13 #include <linux/mm.h>
14 #include <linux/sched.h>
15 #include <linux/smp.h>
16 #include <linux/smp_lock.h>
17 #include <linux/spinlock.h>
19 #include <asm/branch.h>
20 #include <asm/cachectl.h>
21 #include <asm/pgtable.h>
22 #include <asm/io.h>
23 #include <asm/bootinfo.h>
24 #include <asm/ptrace.h>
25 #include <asm/watch.h>
26 #include <asm/system.h>
27 #include <asm/uaccess.h>
28 #include <asm/mmu_context.h>
30 extern asmlinkage void __xtlb_mod(void);
31 extern asmlinkage void __xtlb_tlbl(void);
32 extern asmlinkage void __xtlb_tlbs(void);
33 extern asmlinkage void handle_adel(void);
34 extern asmlinkage void handle_ades(void);
35 extern asmlinkage void handle_ibe(void);
36 extern asmlinkage void handle_dbe(void);
37 extern asmlinkage void handle_sys(void);
38 extern asmlinkage void handle_bp(void);
39 extern asmlinkage void handle_ri(void);
40 extern asmlinkage void handle_cpu(void);
41 extern asmlinkage void handle_ov(void);
42 extern asmlinkage void handle_tr(void);
43 extern asmlinkage void handle_fpe(void);
44 extern asmlinkage void handle_watch(void);
45 extern asmlinkage void handle_reserved(void);
47 static char *cpu_names[] = CPU_NAMES;
49 char watch_available = 0;
50 char dedicated_iv_available = 0;
51 char vce_available = 0;
52 char mips4_available = 0;
54 int kstack_depth_to_print = 24;
57 * These constant is for searching for possible module text segments.
58 * MODULE_RANGE is a guess of how much space is likely to be vmalloced.
60 #define MODULE_RANGE (8*1024*1024)
63 * This routine abuses get_user()/put_user() to reference pointers
64 * with at least a bit of error checking ...
66 void show_stack(unsigned long *sp)
68 int i;
69 unsigned long *stack;
71 stack = sp;
72 i = 0;
74 printk("Stack:");
75 while ((unsigned long) stack & (PAGE_SIZE - 1)) {
76 unsigned long stackdata;
78 if (__get_user(stackdata, stack++)) {
79 printk(" (Bad stack address)");
80 break;
83 printk(" %016lx", stackdata);
85 if (++i > 40) {
86 printk(" ...");
87 break;
90 if (i % 4 == 0)
91 printk("\n ");
95 void show_trace(unsigned long *sp)
97 int i;
98 unsigned long *stack;
99 unsigned long kernel_start, kernel_end;
100 unsigned long module_start, module_end;
101 extern char _stext, _etext;
103 stack = sp;
104 i = 0;
106 kernel_start = (unsigned long) &_stext;
107 kernel_end = (unsigned long) &_etext;
108 module_start = VMALLOC_START;
109 module_end = module_start + MODULE_RANGE;
111 printk("\nCall Trace:");
113 while ((unsigned long) stack & (PAGE_SIZE -1)) {
114 unsigned long addr;
116 if (__get_user(addr, stack++)) {
117 printk(" (Bad stack address)\n");
118 break;
122 * If the address is either in the text segment of the
123 * kernel, or in the region which contains vmalloc'ed
124 * memory, it *may* be the address of a calling
125 * routine; if so, print it so that someone tracing
126 * down the cause of the crash will be able to figure
127 * out the call path that was taken.
130 if ((addr >= kernel_start && addr < kernel_end) ||
131 (addr >= module_start && addr < module_end)) {
133 /* Since our kernel is still at KSEG0,
134 * truncate the address so that ksymoops
135 * understands it.
137 printk(" [<%08x>]", (unsigned int) addr);
138 if (++i > 40) {
139 printk(" ...");
140 break;
146 void show_code(unsigned int *pc)
148 long i;
150 printk("\nCode:");
152 for(i = -3 ; i < 6 ; i++) {
153 unsigned int insn;
154 if (__get_user(insn, pc + i)) {
155 printk(" (Bad address in epc)\n");
156 break;
158 printk("%c%08x%c",(i?' ':'<'),insn,(i?' ':'>'));
162 spinlock_t die_lock;
164 void die(const char * str, struct pt_regs * regs, unsigned long err)
166 if (user_mode(regs)) /* Just return if in user mode. */
167 return;
169 console_verbose();
170 spin_lock_irq(&die_lock);
171 printk("%s: %04lx\n", str, err & 0xffff);
172 show_regs(regs);
173 printk("Process %s (pid: %d, stackpage=%08lx)\n",
174 current->comm, current->pid, (unsigned long) current);
175 show_stack((unsigned long *) regs->regs[29]);
176 show_trace((unsigned long *) regs->regs[29]);
177 show_code((unsigned int *) regs->cp0_epc);
178 printk("\n");
179 spin_unlock_irq(&die_lock);
180 do_exit(SIGSEGV);
183 void die_if_kernel(const char * str, struct pt_regs * regs, unsigned long err)
185 if (!user_mode(regs))
186 die(str, regs, err);
189 void do_ov(struct pt_regs *regs)
191 if (compute_return_epc(regs))
192 return;
193 force_sig(SIGFPE, current);
196 #ifdef CONFIG_MIPS_FPE_MODULE
197 static void (*fpe_handler)(struct pt_regs *regs, unsigned int fcr31);
200 * Register_fpe/unregister_fpe are for debugging purposes only. To make
201 * this hack work a bit better there is no error checking.
203 int register_fpe(void (*handler)(struct pt_regs *regs, unsigned int fcr31))
205 fpe_handler = handler;
206 return 0;
209 int unregister_fpe(void (*handler)(struct pt_regs *regs, unsigned int fcr31))
211 fpe_handler = NULL;
212 return 0;
214 #endif
217 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
219 void do_fpe(struct pt_regs *regs, unsigned long fcr31)
221 unsigned long pc;
222 unsigned int insn;
223 extern void simfp(unsigned int);
225 #ifdef CONFIG_MIPS_FPE_MODULE
226 if (fpe_handler != NULL) {
227 fpe_handler(regs, fcr31);
228 return;
230 #endif
231 if (fcr31 & 0x20000) {
232 /* Retry instruction with flush to zero ... */
233 if (!(fcr31 & (1<<24))) {
234 printk("Setting flush to zero for %s.\n",
235 current->comm);
236 fcr31 &= ~0x20000;
237 fcr31 |= (1<<24);
238 __asm__ __volatile__(
239 "ctc1\t%0,$31"
240 : /* No outputs */
241 : "r" (fcr31));
242 return;
244 pc = regs->cp0_epc + ((regs->cp0_cause & CAUSEF_BD) ? 4 : 0);
245 if (get_user(insn, (unsigned int *)pc)) {
246 /* XXX Can this happen? */
247 force_sig(SIGSEGV, current);
250 printk(KERN_DEBUG "Unimplemented exception for insn %08x at 0x%08lx in %s.\n",
251 insn, regs->cp0_epc, current->comm);
252 simfp(insn);
255 if (compute_return_epc(regs))
256 return;
257 //force_sig(SIGFPE, current);
258 printk(KERN_DEBUG "Should send SIGFPE to %s\n", current->comm);
261 static inline int get_insn_opcode(struct pt_regs *regs, unsigned int *opcode)
263 unsigned int *epc;
265 epc = (unsigned int *) (unsigned long) regs->cp0_epc;
266 if (regs->cp0_cause & CAUSEF_BD)
267 epc += 4;
269 if (verify_area(VERIFY_READ, epc, 4)) {
270 force_sig(SIGSEGV, current);
271 return 1;
273 *opcode = *epc;
275 return 0;
278 void do_bp(struct pt_regs *regs)
280 unsigned int opcode, bcode;
283 * There is the ancient bug in the MIPS assemblers that the break
284 * code starts left to bit 16 instead to bit 6 in the opcode.
285 * Gas is bug-compatible ...
287 if (get_insn_opcode(regs, &opcode))
288 return;
289 bcode = ((opcode >> 16) & ((1 << 20) - 1));
292 * (A short test says that IRIX 5.3 sends SIGTRAP for all break
293 * insns, even for break codes that indicate arithmetic failures.
294 * Weird ...)
296 force_sig(SIGTRAP, current);
299 void do_tr(struct pt_regs *regs)
301 unsigned int opcode, bcode;
303 if (get_insn_opcode(regs, &opcode))
304 return;
305 bcode = ((opcode >> 6) & ((1 << 20) - 1));
308 * (A short test says that IRIX 5.3 sends SIGTRAP for all break
309 * insns, even for break codes that indicate arithmetic failures.
310 * Wiered ...)
312 force_sig(SIGTRAP, current);
315 void do_ri(struct pt_regs *regs)
317 printk("Cpu%d[%s:%d] Illegal instruction at %08lx ra=%08lx\n",
318 smp_processor_id(), current->comm, current->pid, regs->cp0_epc,
319 regs->regs[31]);
320 if (compute_return_epc(regs))
321 return;
322 force_sig(SIGILL, current);
325 void do_cpu(struct pt_regs *regs)
327 u32 cpid;
329 cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
330 if (cpid != 1)
331 goto bad_cid;
333 regs->cp0_status |= ST0_CU1;
334 #ifndef CONFIG_SMP
335 if (last_task_used_math == current)
336 return;
338 if (current->used_math) { /* Using the FPU again. */
339 lazy_fpu_switch(last_task_used_math, current);
340 } else { /* First time FPU user. */
341 lazy_fpu_switch(last_task_used_math, 0);
342 init_fpu();
343 current->used_math = 1;
345 last_task_used_math = current;
346 #else
347 if (current->used_math) {
348 lazy_fpu_switch(0, current);
349 } else {
350 init_fpu();
351 current->used_math = 1;
353 current->flags |= PF_USEDFPU;
354 #endif
355 return;
357 bad_cid:
358 force_sig(SIGILL, current);
361 void do_watch(struct pt_regs *regs)
364 * We use the watch exception where available to detect stack
365 * overflows.
367 show_regs(regs);
368 panic("Caught WATCH exception - probably caused by stack overflow.");
371 void do_reserved(struct pt_regs *regs)
374 * Game over - no way to handle this if it ever occurs. Most probably
375 * caused by a new unknown cpu type or after another deadly
376 * hard/software error.
378 panic("Caught reserved exception %ld - should not happen.",
379 (regs->cp0_cause & 0x1f) >> 2);
382 static inline void watch_init(unsigned long cputype)
384 switch(cputype) {
385 case CPU_R10000:
386 case CPU_R4000MC:
387 case CPU_R4400MC:
388 case CPU_R4000SC:
389 case CPU_R4400SC:
390 case CPU_R4000PC:
391 case CPU_R4400PC:
392 case CPU_R4200:
393 case CPU_R4300:
394 set_except_vector(23, handle_watch);
395 watch_available = 1;
396 break;
401 * Some MIPS CPUs have a dedicated interrupt vector which reduces the
402 * interrupt processing overhead. Use it where available.
403 * FIXME: more CPUs than just the Nevada have this feature.
405 static inline void setup_dedicated_int(void)
407 extern void except_vec4(void);
409 switch(mips_cputype) {
410 case CPU_NEVADA:
411 memcpy((void *)(KSEG0 + 0x200), except_vec4, 8);
412 set_cp0_cause(CAUSEF_IV, CAUSEF_IV);
413 dedicated_iv_available = 1;
417 unsigned long exception_handlers[32];
420 * As a side effect of the way this is implemented we're limited
421 * to interrupt handlers in the address range from
422 * KSEG0 <= x < KSEG0 + 256mb on the Nevada. Oh well ...
424 void set_except_vector(int n, void *addr)
426 unsigned long handler = (unsigned long) addr;
427 exception_handlers[n] = handler;
428 if (n == 0 && dedicated_iv_available) {
429 *(volatile u32 *)(KSEG0+0x200) = 0x08000000 |
430 (0x03ffffff & (handler >> 2));
431 flush_icache_range(KSEG0+0x200, KSEG0 + 0x204);
435 static inline void mips4_setup(void)
437 switch (mips_cputype) {
438 case CPU_R5000:
439 case CPU_R5000A:
440 case CPU_NEVADA:
441 case CPU_R8000:
442 case CPU_R10000:
443 mips4_available = 1;
444 set_cp0_status(ST0_XX, ST0_XX);
448 static inline void go_64(void)
450 unsigned int bits;
452 bits = ST0_KX|ST0_SX|ST0_UX;
453 set_cp0_status(bits, bits);
454 printk("Entering 64-bit mode.\n");
457 void __init trap_init(void)
459 extern char except_vec0;
460 extern char except_vec1_r10k;
461 extern char except_vec2_generic;
462 extern char except_vec3_generic, except_vec3_r4000;
463 extern void bus_error_init(void);
464 unsigned long i;
466 /* Some firmware leaves the BEV flag set, clear it. */
467 set_cp0_status(ST0_BEV, 0);
469 /* Copy the generic exception handler code to it's final destination. */
470 memcpy((void *)(KSEG0 + 0x100), &except_vec2_generic, 0x80);
471 memcpy((void *)(KSEG0 + 0x180), &except_vec3_generic, 0x80);
474 * Setup default vectors
476 for(i = 0; i <= 31; i++)
477 set_except_vector(i, handle_reserved);
480 * Only some CPUs have the watch exceptions or a dedicated
481 * interrupt vector.
483 watch_init(mips_cputype);
484 setup_dedicated_int();
485 mips4_setup();
486 go_64(); /* In memoriam C128 ;-) */
489 * Handling the following exceptions depends mostly of the cpu type
491 switch(mips_cputype) {
492 case CPU_R10000:
494 * The R10000 is in most aspects similar to the R4400. It
495 * should get some special optimizations.
497 write_32bit_cp0_register(CP0_FRAMEMASK, 0);
498 set_cp0_status(ST0_XX, ST0_XX);
499 goto r4k;
501 case CPU_R4000MC:
502 case CPU_R4400MC:
503 case CPU_R4000SC:
504 case CPU_R4400SC:
505 vce_available = 1;
506 /* Fall through ... */
507 case CPU_R4000PC:
508 case CPU_R4400PC:
509 case CPU_R4200:
510 case CPU_R4300:
511 case CPU_R4600:
512 case CPU_R5000:
513 case CPU_NEVADA:
514 r4k:
515 /* Debug TLB refill handler. */
516 memcpy((void *)KSEG0, &except_vec0, 0x80);
517 memcpy((void *)KSEG0 + 0x080, &except_vec1_r10k, 0x80);
519 /* Cache error vector */
520 memcpy((void *)(KSEG0 + 0x100), (void *) KSEG0, 0x80);
522 if (vce_available) {
523 memcpy((void *)(KSEG0 + 0x180), &except_vec3_r4000,
524 0x180);
525 } else {
526 memcpy((void *)(KSEG0 + 0x180), &except_vec3_generic,
527 0x100);
530 set_except_vector(1, __xtlb_mod);
531 set_except_vector(2, __xtlb_tlbl);
532 set_except_vector(3, __xtlb_tlbs);
533 set_except_vector(4, handle_adel);
534 set_except_vector(5, handle_ades);
536 /* DBE / IBE exception handler are system specific. */
537 bus_error_init();
539 set_except_vector(8, handle_sys);
540 set_except_vector(9, handle_bp);
541 set_except_vector(10, handle_ri);
542 set_except_vector(11, handle_cpu);
543 set_except_vector(12, handle_ov);
544 set_except_vector(13, handle_tr);
545 set_except_vector(15, handle_fpe);
546 break;
548 case CPU_R8000:
549 panic("unsupported CPU type %s.\n", cpu_names[mips_cputype]);
550 break;
552 case CPU_UNKNOWN:
553 default:
554 panic("Unknown CPU type");
556 flush_icache_range(KSEG0, KSEG0 + 0x200);
558 atomic_inc(&init_mm.mm_count); /* XXX UP? */
559 current->active_mm = &init_mm;