Somhow those ll/sc counters in their current form are rather useless as
[linux-2.6/linux-mips.git] / arch / mips / kernel / traps.c
blob6bd2426212d680058e817da2caa4d808e444afa4
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 1994 - 1999, 2000, 01 Ralf Baechle
7 * Copyright (C) 1995, 1996 Paul M. Antoine
8 * Copyright (C) 1998 Ulf Carlsson
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11 * Copyright (C) 2000, 01 MIPS Technologies, Inc.
12 * Copyright (C) 2002, 2003 Maciej W. Rozycki
14 #include <linux/config.h>
15 #include <linux/init.h>
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/sched.h>
19 #include <linux/smp.h>
20 #include <linux/smp_lock.h>
21 #include <linux/spinlock.h>
23 #include <asm/bootinfo.h>
24 #include <asm/branch.h>
25 #include <asm/cpu.h>
26 #include <asm/fpu.h>
27 #include <asm/inst.h>
28 #include <asm/module.h>
29 #include <asm/pgtable.h>
30 #include <asm/siginfo.h>
31 #include <asm/system.h>
32 #include <asm/tlbdebug.h>
33 #include <asm/traps.h>
34 #include <asm/uaccess.h>
35 #include <asm/mmu_context.h>
36 #include <asm/watch.h>
37 #include <asm/types.h>
39 extern asmlinkage void handle_mod(void);
40 extern asmlinkage void handle_tlbl(void);
41 extern asmlinkage void handle_tlbs(void);
42 extern asmlinkage void handle_adel(void);
43 extern asmlinkage void handle_ades(void);
44 extern asmlinkage void handle_ibe(void);
45 extern asmlinkage void handle_dbe(void);
46 extern asmlinkage void handle_sys(void);
47 extern asmlinkage void handle_bp(void);
48 extern asmlinkage void handle_ri(void);
49 extern asmlinkage void handle_cpu(void);
50 extern asmlinkage void handle_ov(void);
51 extern asmlinkage void handle_tr(void);
52 extern asmlinkage void handle_fpe(void);
53 extern asmlinkage void handle_mdmx(void);
54 extern asmlinkage void handle_watch(void);
55 extern asmlinkage void handle_mcheck(void);
56 extern asmlinkage void handle_reserved(void);
58 extern int fpu_emulator_cop1Handler(int xcptno, struct pt_regs *xcp,
59 struct mips_fpu_soft_struct *ctx);
61 void (*board_be_init)(void);
62 int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
65 * These constant is for searching for possible module text segments.
66 * MODULE_RANGE is a guess of how much space is likely to be vmalloced.
68 #define MODULE_RANGE (8*1024*1024)
71 * If the address is either in the .text section of the
72 * kernel, or in the vmalloc'ed module regions, it *may*
73 * be the address of a calling routine
76 #ifdef CONFIG_MODULES
78 extern struct module *module_list;
79 extern struct module kernel_module;
81 static inline int kernel_text_address(long addr)
83 extern char _stext, _etext;
84 int retval = 0;
85 struct module *mod;
87 if (addr >= (long) &_stext && addr <= (long) &_etext)
88 return 1;
90 for (mod = module_list; mod != &kernel_module; mod = mod->next) {
91 /* mod_bound tests for addr being inside the vmalloc'ed
92 * module area. Of course it'd be better to test only
93 * for the .text subset... */
94 if (mod_bound(addr, 0, mod)) {
95 retval = 1;
96 break;
100 return retval;
103 #else
105 static inline int kernel_text_address(long addr)
107 extern char _stext, _etext;
109 return (addr >= (long) &_stext && addr <= (long) &_etext);
112 #endif
115 * This routine abuses get_user()/put_user() to reference pointers
116 * with at least a bit of error checking ...
118 void show_stack(unsigned long *sp)
120 int i;
121 long stackdata;
123 sp = sp ? sp : (unsigned long *) &sp;
125 printk("Stack: ");
126 i = 1;
127 while ((unsigned long) sp & (PAGE_SIZE - 1)) {
128 if (i && ((i % 8) == 0))
129 printk("\n");
130 if (i > 40) {
131 printk(" ...");
132 break;
135 if (__get_user(stackdata, sp++)) {
136 printk(" (Bad stack address)");
137 break;
140 printk(" %08lx", stackdata);
141 i++;
143 printk("\n");
146 void show_trace(unsigned long *sp)
148 int i;
149 long addr;
151 sp = sp ? sp : (long *) &sp;
153 printk("Call Trace: ");
154 i = 1;
155 while ((long) sp & (PAGE_SIZE - 1)) {
157 if (__get_user(addr, sp++)) {
158 if (i && ((i % 6) == 0))
159 printk("\n");
160 printk(" (Bad stack address)\n");
161 break;
165 * If the address is either in the text segment of the
166 * kernel, or in the region which contains vmalloc'ed
167 * memory, it *may* be the address of a calling
168 * routine; if so, print it so that someone tracing
169 * down the cause of the crash will be able to figure
170 * out the call path that was taken.
173 if (kernel_text_address(addr)) {
174 if (i && ((i % 6) == 0))
175 printk("\n");
176 if (i > 40) {
177 printk(" ...");
178 break;
181 printk(" [<%08lx>]", addr);
182 i++;
185 printk("\n");
188 void show_trace_task(struct task_struct *tsk)
190 show_trace((long *)tsk->thread.reg29);
193 void show_code(unsigned int *pc)
195 long i;
197 printk("\nCode:");
199 for(i = -3 ; i < 6 ; i++) {
200 unsigned long insn;
201 if (__get_user(insn, pc + i)) {
202 printk(" (Bad address in epc)\n");
203 break;
205 printk("%c%08lx%c",(i?' ':'<'),insn,(i?' ':'>'));
209 void show_regs(struct pt_regs *regs)
212 * Saved main processor registers
214 printk("$0 : %08x %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
215 0, regs->regs[1], regs->regs[2], regs->regs[3],
216 regs->regs[4], regs->regs[5], regs->regs[6], regs->regs[7]);
217 printk("$8 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
218 regs->regs[8], regs->regs[9], regs->regs[10], regs->regs[11],
219 regs->regs[12], regs->regs[13], regs->regs[14], regs->regs[15]);
220 printk("$16: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
221 regs->regs[16], regs->regs[17], regs->regs[18], regs->regs[19],
222 regs->regs[20], regs->regs[21], regs->regs[22], regs->regs[23]);
223 printk("$24: %08lx %08lx %08lx %08lx %08lx %08lx\n",
224 regs->regs[24], regs->regs[25],
225 regs->regs[28], regs->regs[29], regs->regs[30], regs->regs[31]);
226 printk("Hi : %08lx\n", regs->hi);
227 printk("Lo : %08lx\n", regs->lo);
230 * Saved cp0 registers
232 printk("epc : %08lx %s\nStatus: %08lx\nCause : %08lx\n",
233 regs->cp0_epc, print_tainted(), regs->cp0_status,
234 regs->cp0_cause);
237 void show_registers(struct pt_regs *regs)
239 show_regs(regs);
240 printk("Process %s (pid: %d, stackpage=%08lx)\n",
241 current->comm, current->pid, (unsigned long) current);
242 show_stack((long *) regs->regs[29]);
243 show_trace((long *) regs->regs[29]);
244 show_code((unsigned int *) regs->cp0_epc);
245 printk("\n");
248 static spinlock_t die_lock = SPIN_LOCK_UNLOCKED;
250 void __die(const char * str, struct pt_regs * regs, const char * file,
251 const char * func, unsigned long line)
253 console_verbose();
254 spin_lock_irq(&die_lock);
255 printk("%s", str);
256 if (file && func)
257 printk(" in %s:%s, line %ld", file, func, line);
258 printk(":\n");
259 show_registers(regs);
260 spin_unlock_irq(&die_lock);
261 do_exit(SIGSEGV);
264 void __die_if_kernel(const char * str, struct pt_regs * regs,
265 const char * file, const char * func, unsigned long line)
267 if (!user_mode(regs))
268 __die(str, regs, file, func, line);
271 extern const struct exception_table_entry __start___dbe_table[];
272 extern const struct exception_table_entry __stop___dbe_table[];
274 void __declare_dbe_table(void)
276 __asm__ __volatile__(
277 ".section\t__dbe_table,\"a\"\n\t"
278 ".previous"
282 static inline unsigned long
283 search_one_table(const struct exception_table_entry *first,
284 const struct exception_table_entry *last,
285 unsigned long value)
287 const struct exception_table_entry *mid;
288 long diff;
290 while (first < last) {
291 mid = (last - first) / 2 + first;
292 diff = mid->insn - value;
293 if (diff < 0)
294 first = mid + 1;
295 else
296 last = mid;
298 return (first == last && first->insn == value) ? first->nextinsn : 0;
301 extern spinlock_t modlist_lock;
303 static inline unsigned long
304 search_dbe_table(unsigned long addr)
306 unsigned long ret = 0;
308 #ifndef CONFIG_MODULES
309 /* There is only the kernel to search. */
310 ret = search_one_table(__start___dbe_table, __stop___dbe_table-1, addr);
311 return ret;
312 #else
313 unsigned long flags;
315 /* The kernel is the last "module" -- no need to treat it special. */
316 struct module *mp;
317 struct archdata *ap;
319 spin_lock_irqsave(&modlist_lock, flags);
320 for (mp = module_list; mp != NULL; mp = mp->next) {
321 if (!mod_member_present(mp, archdata_end) ||
322 !mod_archdata_member_present(mp, struct archdata,
323 dbe_table_end))
324 continue;
325 ap = (struct archdata *)(mp->archdata_start);
327 if (ap->dbe_table_start == NULL ||
328 !(mp->flags & (MOD_RUNNING | MOD_INITIALIZING)))
329 continue;
330 ret = search_one_table(ap->dbe_table_start,
331 ap->dbe_table_end - 1, addr);
332 if (ret)
333 break;
335 spin_unlock_irqrestore(&modlist_lock, flags);
336 return ret;
337 #endif
340 asmlinkage void do_be(struct pt_regs *regs)
342 unsigned long new_epc;
343 unsigned long fixup = 0;
344 int data = regs->cp0_cause & 4;
345 int action = MIPS_BE_FATAL;
347 if (data && !user_mode(regs))
348 fixup = search_dbe_table(regs->cp0_epc);
350 if (fixup)
351 action = MIPS_BE_FIXUP;
353 if (board_be_handler)
354 action = board_be_handler(regs, fixup != 0);
356 switch (action) {
357 case MIPS_BE_DISCARD:
358 return;
359 case MIPS_BE_FIXUP:
360 if (fixup) {
361 new_epc = fixup_exception(dpf_reg, fixup,
362 regs->cp0_epc);
363 regs->cp0_epc = new_epc;
364 return;
366 break;
367 default:
368 break;
372 * Assume it would be too dangerous to continue ...
374 printk(KERN_ALERT "%s bus error, epc == %08lx, ra == %08lx\n",
375 data ? "Data" : "Instruction",
376 regs->cp0_epc, regs->regs[31]);
377 die_if_kernel("Oops", regs);
378 force_sig(SIGBUS, current);
382 * ll/sc emulation
385 #define OPCODE 0xfc000000
386 #define BASE 0x03e00000
387 #define RT 0x001f0000
388 #define OFFSET 0x0000ffff
389 #define LL 0xc0000000
390 #define SC 0xe0000000
393 * The ll_bit is cleared by r*_switch.S
396 unsigned long ll_bit;
398 static struct task_struct *ll_task = NULL;
400 static inline void simulate_ll(struct pt_regs *regs, unsigned int opcode)
402 unsigned long value, *vaddr;
403 long offset;
404 int signal = 0;
407 * analyse the ll instruction that just caused a ri exception
408 * and put the referenced address to addr.
411 /* sign extend offset */
412 offset = opcode & OFFSET;
413 offset <<= 16;
414 offset >>= 16;
416 vaddr = (unsigned long *)((long)(regs->regs[(opcode & BASE) >> 21]) + offset);
418 if ((unsigned long)vaddr & 3) {
419 signal = SIGBUS;
420 goto sig;
422 if (get_user(value, vaddr)) {
423 signal = SIGSEGV;
424 goto sig;
427 if (ll_task == NULL || ll_task == current) {
428 ll_bit = 1;
429 } else {
430 ll_bit = 0;
432 ll_task = current;
434 regs->regs[(opcode & RT) >> 16] = value;
436 compute_return_epc(regs);
437 return;
439 sig:
440 force_sig(signal, current);
443 static inline void simulate_sc(struct pt_regs *regs, unsigned int opcode)
445 unsigned long *vaddr, reg;
446 long offset;
447 int signal = 0;
450 * analyse the sc instruction that just caused a ri exception
451 * and put the referenced address to addr.
454 /* sign extend offset */
455 offset = opcode & OFFSET;
456 offset <<= 16;
457 offset >>= 16;
459 vaddr = (unsigned long *)((long)(regs->regs[(opcode & BASE) >> 21]) + offset);
460 reg = (opcode & RT) >> 16;
462 if ((unsigned long)vaddr & 3) {
463 signal = SIGBUS;
464 goto sig;
466 if (ll_bit == 0 || ll_task != current) {
467 regs->regs[reg] = 0;
468 compute_return_epc(regs);
469 return;
472 if (put_user(regs->regs[reg], vaddr)) {
473 signal = SIGSEGV;
474 goto sig;
477 regs->regs[reg] = 1;
479 compute_return_epc(regs);
480 return;
482 sig:
483 force_sig(signal, current);
487 * ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both
488 * opcodes are supposed to result in coprocessor unusable exceptions if
489 * executed on ll/sc-less processors. That's the theory. In practice a
490 * few processors such as NEC's VR4100 throw reserved instruction exceptions
491 * instead, so we're doing the emulation thing in both exception handlers.
493 static inline int simulate_llsc(struct pt_regs *regs)
495 unsigned int opcode;
497 if (unlikely(get_insn_opcode(regs, &opcode)))
498 return -EFAULT;
500 if ((opcode & OPCODE) == LL) {
501 simulate_ll(regs, opcode);
502 return 0;
504 if ((opcode & OPCODE) == SC) {
505 simulate_sc(regs, opcode);
506 return 0;
510 asmlinkage void do_ov(struct pt_regs *regs)
512 siginfo_t info;
514 info.si_code = FPE_INTOVF;
515 info.si_signo = SIGFPE;
516 info.si_errno = 0;
517 info.si_addr = (void *)regs->cp0_epc;
518 force_sig_info(SIGFPE, &info, current);
522 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
524 asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
526 if (fcr31 & FPU_CSR_UNI_X) {
527 int sig;
530 * Unimplemented operation exception. If we've got the full
531 * software emulator on-board, let's use it...
533 * Force FPU to dump state into task/thread context. We're
534 * moving a lot of data here for what is probably a single
535 * instruction, but the alternative is to pre-decode the FP
536 * register operands before invoking the emulator, which seems
537 * a bit extreme for what should be an infrequent event.
539 save_fp(current);
541 /* Run the emulator */
542 sig = fpu_emulator_cop1Handler (0, regs,
543 &current->thread.fpu.soft);
546 * We can't allow the emulated instruction to leave any of
547 * the cause bit set in $fcr31.
549 current->thread.fpu.soft.sr &= ~FPU_CSR_ALL_X;
551 /* Restore the hardware register state */
552 restore_fp(current);
554 /* If something went wrong, signal */
555 if (sig)
556 force_sig(sig, current);
558 return;
561 force_sig(SIGFPE, current);
564 static inline int get_insn_opcode(struct pt_regs *regs, unsigned int *opcode)
566 unsigned int *epc;
568 epc = (unsigned int *) regs->cp0_epc +
569 ((regs->cp0_cause & CAUSEF_BD) != 0);
570 if (!get_user(*opcode, epc))
571 return 0;
573 force_sig(SIGSEGV, current);
574 return 1;
577 asmlinkage void do_bp(struct pt_regs *regs)
579 unsigned int opcode, bcode;
580 siginfo_t info;
582 if (get_insn_opcode(regs, &opcode))
583 return;
586 * There is the ancient bug in the MIPS assemblers that the break
587 * code starts left to bit 16 instead to bit 6 in the opcode.
588 * Gas is bug-compatible ...
590 bcode = ((opcode >> 16) & ((1 << 20) - 1));
593 * (A short test says that IRIX 5.3 sends SIGTRAP for all break
594 * insns, even for break codes that indicate arithmetic failures.
595 * Weird ...)
596 * But should we continue the brokenness??? --macro
598 switch (bcode) {
599 case 6:
600 case 7:
601 if (bcode == 7)
602 info.si_code = FPE_INTDIV;
603 else
604 info.si_code = FPE_INTOVF;
605 info.si_signo = SIGFPE;
606 info.si_errno = 0;
607 info.si_addr = (void *)regs->cp0_epc;
608 force_sig_info(SIGFPE, &info, current);
609 break;
610 default:
611 force_sig(SIGTRAP, current);
615 asmlinkage void do_tr(struct pt_regs *regs)
617 unsigned int opcode, tcode = 0;
618 siginfo_t info;
620 if (get_insn_opcode(regs, &opcode))
621 return;
623 /* Immediate versions don't provide a code. */
624 if (!(opcode & OPCODE))
625 tcode = ((opcode >> 6) & ((1 << 20) - 1));
628 * (A short test says that IRIX 5.3 sends SIGTRAP for all trap
629 * insns, even for trap codes that indicate arithmetic failures.
630 * Weird ...)
631 * But should we continue the brokenness??? --macro
633 switch (tcode) {
634 case 6:
635 case 7:
636 if (tcode == 7)
637 info.si_code = FPE_INTDIV;
638 else
639 info.si_code = FPE_INTOVF;
640 info.si_signo = SIGFPE;
641 info.si_errno = 0;
642 info.si_addr = (void *)regs->cp0_epc;
643 force_sig_info(SIGFPE, &info, current);
644 break;
645 default:
646 force_sig(SIGTRAP, current);
650 asmlinkage void do_ri(struct pt_regs *regs)
652 die_if_kernel("Reserved instruction in kernel code", regs);
654 if (!cpu_has_llsc)
655 if (!simulate_llsc(regs))
656 return;
658 force_sig(SIGILL, current);
661 asmlinkage void do_cpu(struct pt_regs *regs)
663 unsigned int cpid;
665 die_if_kernel("do_cpu invoked from kernel context!", regs);
667 cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
669 switch (cpid) {
670 case 0:
671 if (cpu_has_llsc)
672 break;
674 if (!simulate_llsc(regs))
675 return;
676 break;
678 case 1:
679 own_fpu();
680 if (current->used_math) { /* Using the FPU again. */
681 restore_fp(current);
682 } else { /* First time FPU user. */
683 init_fpu();
684 current->used_math = 1;
687 if (!cpu_has_fpu) {
688 int sig = fpu_emulator_cop1Handler(0, regs,
689 &current->thread.fpu.soft);
690 if (sig)
691 force_sig(sig, current);
694 return;
696 case 2:
697 case 3:
698 break;
701 force_sig(SIGILL, current);
704 asmlinkage void do_mdmx(struct pt_regs *regs)
706 force_sig(SIGILL, current);
709 asmlinkage void do_watch(struct pt_regs *regs)
712 * We use the watch exception where available to detect stack
713 * overflows.
715 dump_tlb_all();
716 show_regs(regs);
717 panic("Caught WATCH exception - probably caused by stack overflow.");
720 asmlinkage void do_mcheck(struct pt_regs *regs)
722 show_regs(regs);
723 dump_tlb_all();
725 * Some chips may have other causes of machine check (e.g. SB1
726 * graduation timer)
728 panic("Caught Machine Check exception - %scaused by multiple "
729 "matching entries in the TLB.",
730 (regs->cp0_status & ST0_TS) ? "" : "not ");
733 asmlinkage void do_reserved(struct pt_regs *regs)
736 * Game over - no way to handle this if it ever occurs. Most probably
737 * caused by a new unknown cpu type or after another deadly
738 * hard/software error.
740 show_regs(regs);
741 panic("Caught reserved exception %ld - should not happen.",
742 (regs->cp0_cause & 0x7f) >> 2);
746 * Some MIPS CPUs can enable/disable for cache parity detection, but do
747 * it different ways.
749 static inline void parity_protection_init(void)
751 switch (current_cpu_data.cputype) {
752 case CPU_5KC:
753 /* Set the PE bit (bit 31) in the c0_ecc register. */
754 printk(KERN_INFO "Enable the cache parity protection for "
755 "MIPS 5KC CPUs.\n");
756 write_c0_ecc(read_c0_ecc() | 0x80000000);
757 break;
758 default:
759 break;
763 asmlinkage void cache_parity_error(void)
765 unsigned int reg_val;
767 /* For the moment, report the problem and hang. */
768 printk("Cache error exception:\n");
769 printk("cp0_errorepc == %08lx\n", read_c0_errorepc());
770 reg_val = read_c0_cacheerr();
771 printk("c0_cacheerr == %08x\n", reg_val);
773 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
774 reg_val & (1<<30) ? "secondary" : "primary",
775 reg_val & (1<<31) ? "data" : "insn");
776 printk("Error bits: %s%s%s%s%s%s%s\n",
777 reg_val & (1<<29) ? "ED " : "",
778 reg_val & (1<<28) ? "ET " : "",
779 reg_val & (1<<26) ? "EE " : "",
780 reg_val & (1<<25) ? "EB " : "",
781 reg_val & (1<<24) ? "EI " : "",
782 reg_val & (1<<23) ? "E1 " : "",
783 reg_val & (1<<22) ? "E0 " : "");
784 printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
786 #if defined(CONFIG_CPU_MIPS32) || defined (CONFIG_CPU_MIPS64)
787 if (reg_val & (1<<22))
788 printk("DErrAddr0: 0x%08x\n", read_c0_derraddr0());
790 if (reg_val & (1<<23))
791 printk("DErrAddr1: 0x%08x\n", read_c0_derraddr1());
792 #endif
794 panic("Can't handle the cache error!");
798 * SDBBP EJTAG debug exception handler.
799 * We skip the instruction and return to the next instruction.
801 void ejtag_exception_handler(struct pt_regs *regs)
803 unsigned long depc, old_epc;
804 unsigned int debug;
806 printk("SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
807 depc = read_c0_depc();
808 debug = read_c0_debug();
809 printk("c0_depc = %08lx, DEBUG = %08x\n", depc, debug);
810 if (debug & 0x80000000) {
812 * In branch delay slot.
813 * We cheat a little bit here and use EPC to calculate the
814 * debug return address (DEPC). EPC is restored after the
815 * calculation.
817 old_epc = regs->cp0_epc;
818 regs->cp0_epc = depc;
819 __compute_return_epc(regs);
820 depc = regs->cp0_epc;
821 regs->cp0_epc = old_epc;
822 } else
823 depc += 4;
824 write_c0_depc(depc);
826 #if 0
827 printk("\n\n----- Enable EJTAG single stepping ----\n\n");
828 write_c0_debug(debug | 0x100);
829 #endif
833 * NMI exception handler.
835 void nmi_exception_handler(struct pt_regs *regs)
837 printk("NMI taken!!!!\n");
838 die("NMI", regs);
839 while(1) ;
842 unsigned long exception_handlers[32];
845 * As a side effect of the way this is implemented we're limited
846 * to interrupt handlers in the address range from
847 * KSEG0 <= x < KSEG0 + 256mb on the Nevada. Oh well ...
849 void *set_except_vector(int n, void *addr)
851 unsigned long handler = (unsigned long) addr;
852 unsigned long old_handler = exception_handlers[n];
854 exception_handlers[n] = handler;
855 if (n == 0 && cpu_has_divec) {
856 *(volatile u32 *)(KSEG0+0x200) = 0x08000000 |
857 (0x03ffffff & (handler >> 2));
858 flush_icache_range(KSEG0+0x200, KSEG0 + 0x204);
860 return (void *)old_handler;
863 asmlinkage int (*save_fp_context)(struct sigcontext *sc);
864 asmlinkage int (*restore_fp_context)(struct sigcontext *sc);
866 extern asmlinkage int _save_fp_context(struct sigcontext *sc);
867 extern asmlinkage int _restore_fp_context(struct sigcontext *sc);
869 extern asmlinkage int fpu_emulator_save_context(struct sigcontext *sc);
870 extern asmlinkage int fpu_emulator_restore_context(struct sigcontext *sc);
872 void __init per_cpu_trap_init(void)
874 unsigned int cpu = smp_processor_id();
876 /* Some firmware leaves the BEV flag set, clear it. */
877 clear_c0_status(ST0_CU1|ST0_CU2|ST0_CU3|ST0_BEV);
880 * Some MIPS CPUs have a dedicated interrupt vector which reduces the
881 * interrupt processing overhead. Use it where available.
883 if (cpu_has_divec)
884 set_c0_cause(CAUSEF_IV);
886 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
887 write_c0_context(cpu << 23);
890 void __init trap_init(void)
892 extern char except_vec1_generic;
893 extern char except_vec3_generic, except_vec3_r4000;
894 extern char except_vec_ejtag_debug;
895 extern char except_vec4;
896 unsigned long i;
898 per_cpu_trap_init();
900 /* Copy the generic exception handler code to it's final destination. */
901 memcpy((void *)(KSEG0 + 0x80), &except_vec1_generic, 0x80);
904 * Setup default vectors
906 for (i = 0; i <= 31; i++)
907 set_except_vector(i, handle_reserved);
910 * Copy the EJTAG debug exception vector handler code to it's final
911 * destination.
913 if (cpu_has_ejtag)
914 memcpy((void *)(KSEG0 + 0x300), &except_vec_ejtag_debug, 0x80);
917 * Only some CPUs have the watch exceptions or a dedicated
918 * interrupt vector.
920 if (cpu_has_watch)
921 set_except_vector(23, handle_watch);
924 * Some MIPS CPUs have a dedicated interrupt vector which reduces the
925 * interrupt processing overhead. Use it where available.
927 if (cpu_has_divec)
928 memcpy((void *)(KSEG0 + 0x200), &except_vec4, 8);
931 * Some CPUs can enable/disable for cache parity detection, but does
932 * it different ways.
934 parity_protection_init();
937 * The Data Bus Errors / Instruction Bus Errors are signaled
938 * by external hardware. Therefore these two exceptions
939 * may have board specific handlers.
941 if (board_be_init)
942 board_be_init();
944 set_except_vector(1, handle_mod);
945 set_except_vector(2, handle_tlbl);
946 set_except_vector(3, handle_tlbs);
947 set_except_vector(4, handle_adel);
948 set_except_vector(5, handle_ades);
950 set_except_vector(6, handle_ibe);
951 set_except_vector(7, handle_dbe);
953 set_except_vector(8, handle_sys);
954 set_except_vector(9, handle_bp);
955 set_except_vector(10, handle_ri);
956 set_except_vector(11, handle_cpu);
957 set_except_vector(12, handle_ov);
958 set_except_vector(13, handle_tr);
959 set_except_vector(22, handle_mdmx);
961 if (cpu_has_fpu && !cpu_has_nofpuex)
962 set_except_vector(15, handle_fpe);
964 if (cpu_has_mcheck)
965 set_except_vector(24, handle_mcheck);
967 if (cpu_has_vce)
968 memcpy((void *)(KSEG0 + 0x180), &except_vec3_r4000, 0x80);
969 else if (cpu_has_4kex)
970 memcpy((void *)(KSEG0 + 0x180), &except_vec3_generic, 0x80);
971 else
972 memcpy((void *)(KSEG0 + 0x080), &except_vec3_generic, 0x80);
974 if (current_cpu_data.cputype == CPU_R6000 ||
975 current_cpu_data.cputype == CPU_R6000A) {
977 * The R6000 is the only R-series CPU that features a machine
978 * check exception (similar to the R4000 cache error) and
979 * unaligned ldc1/sdc1 exception. The handlers have not been
980 * written yet. Well, anyway there is no R6000 machine on the
981 * current list of targets for Linux/MIPS.
982 * (Duh, crap, there is someone with a tripple R6k machine)
984 //set_except_vector(14, handle_mc);
985 //set_except_vector(15, handle_ndc);
988 if (cpu_has_fpu) {
989 save_fp_context = _save_fp_context;
990 restore_fp_context = _restore_fp_context;
991 } else {
992 save_fp_context = fpu_emulator_save_context;
993 restore_fp_context = fpu_emulator_restore_context;
996 flush_icache_range(KSEG0, KSEG0 + 0x400);
998 if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV)
999 set_c0_status(ST0_XX);
1001 atomic_inc(&init_mm.mm_count); /* XXX UP? */
1002 current->active_mm = &init_mm;
1004 /* XXX Must be done for all CPUs */
1005 TLBMISS_HANDLER_SETUP();