Committer: Michael Beasley <mike@snafu.setup>
[mikesnafu-overlay.git] / arch / parisc / kernel / traps.c
blob9dc6dc42f9cfb151e5d827338c9e27431e40c58d
1 /*
2 * linux/arch/parisc/traps.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1999, 2000 Philipp Rumpf <prumpf@tux.org>
6 */
8 /*
9 * 'Traps.c' handles hardware traps and faults after we have saved some
10 * state in 'asm.s'.
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/ptrace.h>
18 #include <linux/timer.h>
19 #include <linux/delay.h>
20 #include <linux/mm.h>
21 #include <linux/module.h>
22 #include <linux/smp.h>
23 #include <linux/spinlock.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/console.h>
27 #include <linux/kallsyms.h>
28 #include <linux/bug.h>
30 #include <asm/assembly.h>
31 #include <asm/system.h>
32 #include <asm/uaccess.h>
33 #include <asm/io.h>
34 #include <asm/irq.h>
35 #include <asm/traps.h>
36 #include <asm/unaligned.h>
37 #include <asm/atomic.h>
38 #include <asm/smp.h>
39 #include <asm/pdc.h>
40 #include <asm/pdc_chassis.h>
41 #include <asm/unwind.h>
42 #include <asm/tlbflush.h>
43 #include <asm/cacheflush.h>
45 #include "../math-emu/math-emu.h" /* for handle_fpe() */
47 #define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
48 /* dumped to the console via printk) */
50 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
51 DEFINE_SPINLOCK(pa_dbit_lock);
52 #endif
54 void parisc_show_stack(struct task_struct *t, unsigned long *sp,
55 struct pt_regs *regs);
57 static int printbinary(char *buf, unsigned long x, int nbits)
59 unsigned long mask = 1UL << (nbits - 1);
60 while (mask != 0) {
61 *buf++ = (mask & x ? '1' : '0');
62 mask >>= 1;
64 *buf = '\0';
66 return nbits;
69 #ifdef CONFIG_64BIT
70 #define RFMT "%016lx"
71 #else
72 #define RFMT "%08lx"
73 #endif
74 #define FFMT "%016llx" /* fpregs are 64-bit always */
76 #define PRINTREGS(lvl,r,f,fmt,x) \
77 printk("%s%s%02d-%02d " fmt " " fmt " " fmt " " fmt "\n", \
78 lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1], \
79 (r)[(x)+2], (r)[(x)+3])
81 static void print_gr(char *level, struct pt_regs *regs)
83 int i;
84 char buf[64];
86 printk("%s\n", level);
87 printk("%s YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
88 printbinary(buf, regs->gr[0], 32);
89 printk("%sPSW: %s %s\n", level, buf, print_tainted());
91 for (i = 0; i < 32; i += 4)
92 PRINTREGS(level, regs->gr, "r", RFMT, i);
95 static void print_fr(char *level, struct pt_regs *regs)
97 int i;
98 char buf[64];
99 struct { u32 sw[2]; } s;
101 /* FR are 64bit everywhere. Need to use asm to get the content
102 * of fpsr/fper1, and we assume that we won't have a FP Identify
103 * in our way, otherwise we're screwed.
104 * The fldd is used to restore the T-bit if there was one, as the
105 * store clears it anyway.
106 * PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */
107 asm volatile ("fstd %%fr0,0(%1) \n\t"
108 "fldd 0(%1),%%fr0 \n\t"
109 : "=m" (s) : "r" (&s) : "r0");
111 printk("%s\n", level);
112 printk("%s VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level);
113 printbinary(buf, s.sw[0], 32);
114 printk("%sFPSR: %s\n", level, buf);
115 printk("%sFPER1: %08x\n", level, s.sw[1]);
117 /* here we'll print fr0 again, tho it'll be meaningless */
118 for (i = 0; i < 32; i += 4)
119 PRINTREGS(level, regs->fr, "fr", FFMT, i);
122 void show_regs(struct pt_regs *regs)
124 int i;
125 char *level;
126 unsigned long cr30, cr31;
128 level = user_mode(regs) ? KERN_DEBUG : KERN_CRIT;
130 print_gr(level, regs);
132 for (i = 0; i < 8; i += 4)
133 PRINTREGS(level, regs->sr, "sr", RFMT, i);
135 if (user_mode(regs))
136 print_fr(level, regs);
138 cr30 = mfctl(30);
139 cr31 = mfctl(31);
140 printk("%s\n", level);
141 printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
142 level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
143 printk("%s IIR: %08lx ISR: " RFMT " IOR: " RFMT "\n",
144 level, regs->iir, regs->isr, regs->ior);
145 printk("%s CPU: %8d CR30: " RFMT " CR31: " RFMT "\n",
146 level, current_thread_info()->cpu, cr30, cr31);
147 printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
148 printk(level);
149 print_symbol(" IAOQ[0]: %s\n", regs->iaoq[0]);
150 printk(level);
151 print_symbol(" IAOQ[1]: %s\n", regs->iaoq[1]);
152 printk(level);
153 print_symbol(" RP(r2): %s\n", regs->gr[2]);
155 parisc_show_stack(current, NULL, regs);
159 void dump_stack(void)
161 show_stack(NULL, NULL);
164 EXPORT_SYMBOL(dump_stack);
166 static void do_show_stack(struct unwind_frame_info *info)
168 int i = 1;
170 printk(KERN_CRIT "Backtrace:\n");
171 while (i <= 16) {
172 if (unwind_once(info) < 0 || info->ip == 0)
173 break;
175 if (__kernel_text_address(info->ip)) {
176 printk("%s [<" RFMT ">] ", (i&0x3)==1 ? KERN_CRIT : "", info->ip);
177 #ifdef CONFIG_KALLSYMS
178 print_symbol("%s\n", info->ip);
179 #else
180 if ((i & 0x03) == 0)
181 printk("\n");
182 #endif
183 i++;
186 printk("\n");
189 void parisc_show_stack(struct task_struct *task, unsigned long *sp,
190 struct pt_regs *regs)
192 struct unwind_frame_info info;
193 struct task_struct *t;
195 t = task ? task : current;
196 if (regs) {
197 unwind_frame_init(&info, t, regs);
198 goto show_stack;
201 if (t == current) {
202 unsigned long sp;
204 HERE:
205 asm volatile ("copy %%r30, %0" : "=r"(sp));
207 struct pt_regs r;
209 memset(&r, 0, sizeof(struct pt_regs));
210 r.iaoq[0] = (unsigned long)&&HERE;
211 r.gr[2] = (unsigned long)__builtin_return_address(0);
212 r.gr[30] = sp;
214 unwind_frame_init(&info, current, &r);
216 } else {
217 unwind_frame_init_from_blocked_task(&info, t);
220 show_stack:
221 do_show_stack(&info);
224 void show_stack(struct task_struct *t, unsigned long *sp)
226 return parisc_show_stack(t, sp, NULL);
229 int is_valid_bugaddr(unsigned long iaoq)
231 return 1;
234 void die_if_kernel(char *str, struct pt_regs *regs, long err)
236 if (user_mode(regs)) {
237 if (err == 0)
238 return; /* STFU */
240 printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
241 current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
242 #ifdef PRINT_USER_FAULTS
243 /* XXX for debugging only */
244 show_regs(regs);
245 #endif
246 return;
249 oops_in_progress = 1;
251 /* Amuse the user in a SPARC fashion */
252 if (err) printk(
253 KERN_CRIT " _______________________________ \n"
254 KERN_CRIT " < Your System ate a SPARC! Gah! >\n"
255 KERN_CRIT " ------------------------------- \n"
256 KERN_CRIT " \\ ^__^\n"
257 KERN_CRIT " \\ (xx)\\_______\n"
258 KERN_CRIT " (__)\\ )\\/\\\n"
259 KERN_CRIT " U ||----w |\n"
260 KERN_CRIT " || ||\n");
262 /* unlock the pdc lock if necessary */
263 pdc_emergency_unlock();
265 /* maybe the kernel hasn't booted very far yet and hasn't been able
266 * to initialize the serial or STI console. In that case we should
267 * re-enable the pdc console, so that the user will be able to
268 * identify the problem. */
269 if (!console_drivers)
270 pdc_console_restart();
272 if (err)
273 printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
274 current->comm, task_pid_nr(current), str, err);
276 /* Wot's wrong wif bein' racy? */
277 if (current->thread.flags & PARISC_KERNEL_DEATH) {
278 printk(KERN_CRIT "%s() recursion detected.\n", __FUNCTION__);
279 local_irq_enable();
280 while (1);
282 current->thread.flags |= PARISC_KERNEL_DEATH;
284 show_regs(regs);
285 dump_stack();
286 add_taint(TAINT_DIE);
288 if (in_interrupt())
289 panic("Fatal exception in interrupt");
291 if (panic_on_oops) {
292 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
293 ssleep(5);
294 panic("Fatal exception");
297 do_exit(SIGSEGV);
300 int syscall_ipi(int (*syscall) (struct pt_regs *), struct pt_regs *regs)
302 return syscall(regs);
305 /* gdb uses break 4,8 */
306 #define GDB_BREAK_INSN 0x10004
307 static void handle_gdb_break(struct pt_regs *regs, int wot)
309 struct siginfo si;
311 si.si_signo = SIGTRAP;
312 si.si_errno = 0;
313 si.si_code = wot;
314 si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
315 force_sig_info(SIGTRAP, &si, current);
318 static void handle_break(struct pt_regs *regs)
320 unsigned iir = regs->iir;
322 if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) {
323 /* check if a BUG() or WARN() trapped here. */
324 enum bug_trap_type tt;
325 tt = report_bug(regs->iaoq[0] & ~3, regs);
326 if (tt == BUG_TRAP_TYPE_WARN) {
327 regs->iaoq[0] += 4;
328 regs->iaoq[1] += 4;
329 return; /* return to next instruction when WARN_ON(). */
331 die_if_kernel("Unknown kernel breakpoint", regs,
332 (tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
335 #ifdef PRINT_USER_FAULTS
336 if (unlikely(iir != GDB_BREAK_INSN)) {
337 printk(KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
338 iir & 31, (iir>>13) & ((1<<13)-1),
339 task_pid_nr(current), current->comm);
340 show_regs(regs);
342 #endif
344 /* send standard GDB signal */
345 handle_gdb_break(regs, TRAP_BRKPT);
348 static void default_trap(int code, struct pt_regs *regs)
350 printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
351 show_regs(regs);
354 void (*cpu_lpmc) (int code, struct pt_regs *regs) __read_mostly = default_trap;
357 void transfer_pim_to_trap_frame(struct pt_regs *regs)
359 register int i;
360 extern unsigned int hpmc_pim_data[];
361 struct pdc_hpmc_pim_11 *pim_narrow;
362 struct pdc_hpmc_pim_20 *pim_wide;
364 if (boot_cpu_data.cpu_type >= pcxu) {
366 pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
369 * Note: The following code will probably generate a
370 * bunch of truncation error warnings from the compiler.
371 * Could be handled with an ifdef, but perhaps there
372 * is a better way.
375 regs->gr[0] = pim_wide->cr[22];
377 for (i = 1; i < 32; i++)
378 regs->gr[i] = pim_wide->gr[i];
380 for (i = 0; i < 32; i++)
381 regs->fr[i] = pim_wide->fr[i];
383 for (i = 0; i < 8; i++)
384 regs->sr[i] = pim_wide->sr[i];
386 regs->iasq[0] = pim_wide->cr[17];
387 regs->iasq[1] = pim_wide->iasq_back;
388 regs->iaoq[0] = pim_wide->cr[18];
389 regs->iaoq[1] = pim_wide->iaoq_back;
391 regs->sar = pim_wide->cr[11];
392 regs->iir = pim_wide->cr[19];
393 regs->isr = pim_wide->cr[20];
394 regs->ior = pim_wide->cr[21];
396 else {
397 pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
399 regs->gr[0] = pim_narrow->cr[22];
401 for (i = 1; i < 32; i++)
402 regs->gr[i] = pim_narrow->gr[i];
404 for (i = 0; i < 32; i++)
405 regs->fr[i] = pim_narrow->fr[i];
407 for (i = 0; i < 8; i++)
408 regs->sr[i] = pim_narrow->sr[i];
410 regs->iasq[0] = pim_narrow->cr[17];
411 regs->iasq[1] = pim_narrow->iasq_back;
412 regs->iaoq[0] = pim_narrow->cr[18];
413 regs->iaoq[1] = pim_narrow->iaoq_back;
415 regs->sar = pim_narrow->cr[11];
416 regs->iir = pim_narrow->cr[19];
417 regs->isr = pim_narrow->cr[20];
418 regs->ior = pim_narrow->cr[21];
422 * The following fields only have meaning if we came through
423 * another path. So just zero them here.
426 regs->ksp = 0;
427 regs->kpc = 0;
428 regs->orig_r28 = 0;
433 * This routine is called as a last resort when everything else
434 * has gone clearly wrong. We get called for faults in kernel space,
435 * and HPMC's.
437 void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
439 static DEFINE_SPINLOCK(terminate_lock);
441 oops_in_progress = 1;
443 set_eiem(0);
444 local_irq_disable();
445 spin_lock(&terminate_lock);
447 /* unlock the pdc lock if necessary */
448 pdc_emergency_unlock();
450 /* restart pdc console if necessary */
451 if (!console_drivers)
452 pdc_console_restart();
454 /* Not all paths will gutter the processor... */
455 switch(code){
457 case 1:
458 transfer_pim_to_trap_frame(regs);
459 break;
461 default:
462 /* Fall through */
463 break;
468 /* show_stack(NULL, (unsigned long *)regs->gr[30]); */
469 struct unwind_frame_info info;
470 unwind_frame_init(&info, current, regs);
471 do_show_stack(&info);
474 printk("\n");
475 printk(KERN_CRIT "%s: Code=%d regs=%p (Addr=" RFMT ")\n",
476 msg, code, regs, offset);
477 show_regs(regs);
479 spin_unlock(&terminate_lock);
481 /* put soft power button back under hardware control;
482 * if the user had pressed it once at any time, the
483 * system will shut down immediately right here. */
484 pdc_soft_power_button(0);
486 /* Call kernel panic() so reboot timeouts work properly
487 * FIXME: This function should be on the list of
488 * panic notifiers, and we should call panic
489 * directly from the location that we wish.
490 * e.g. We should not call panic from
491 * parisc_terminate, but rather the oter way around.
492 * This hack works, prints the panic message twice,
493 * and it enables reboot timers!
495 panic(msg);
498 void handle_interruption(int code, struct pt_regs *regs)
500 unsigned long fault_address = 0;
501 unsigned long fault_space = 0;
502 struct siginfo si;
504 if (code == 1)
505 pdc_console_restart(); /* switch back to pdc if HPMC */
506 else
507 local_irq_enable();
509 /* Security check:
510 * If the priority level is still user, and the
511 * faulting space is not equal to the active space
512 * then the user is attempting something in a space
513 * that does not belong to them. Kill the process.
515 * This is normally the situation when the user
516 * attempts to jump into the kernel space at the
517 * wrong offset, be it at the gateway page or a
518 * random location.
520 * We cannot normally signal the process because it
521 * could *be* on the gateway page, and processes
522 * executing on the gateway page can't have signals
523 * delivered.
525 * We merely readjust the address into the users
526 * space, at a destination address of zero, and
527 * allow processing to continue.
529 if (((unsigned long)regs->iaoq[0] & 3) &&
530 ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) {
531 /* Kill the user process later */
532 regs->iaoq[0] = 0 | 3;
533 regs->iaoq[1] = regs->iaoq[0] + 4;
534 regs->iasq[0] = regs->iasq[0] = regs->sr[7];
535 regs->gr[0] &= ~PSW_B;
536 return;
539 #if 0
540 printk(KERN_CRIT "Interruption # %d\n", code);
541 #endif
543 switch(code) {
545 case 1:
546 /* High-priority machine check (HPMC) */
548 /* set up a new led state on systems shipped with a LED State panel */
549 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
551 parisc_terminate("High Priority Machine Check (HPMC)",
552 regs, code, 0);
553 /* NOT REACHED */
555 case 2:
556 /* Power failure interrupt */
557 printk(KERN_CRIT "Power failure interrupt !\n");
558 return;
560 case 3:
561 /* Recovery counter trap */
562 regs->gr[0] &= ~PSW_R;
563 if (user_space(regs))
564 handle_gdb_break(regs, TRAP_TRACE);
565 /* else this must be the start of a syscall - just let it run */
566 return;
568 case 5:
569 /* Low-priority machine check */
570 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
572 flush_cache_all();
573 flush_tlb_all();
574 cpu_lpmc(5, regs);
575 return;
577 case 6:
578 /* Instruction TLB miss fault/Instruction page fault */
579 fault_address = regs->iaoq[0];
580 fault_space = regs->iasq[0];
581 break;
583 case 8:
584 /* Illegal instruction trap */
585 die_if_kernel("Illegal instruction", regs, code);
586 si.si_code = ILL_ILLOPC;
587 goto give_sigill;
589 case 9:
590 /* Break instruction trap */
591 handle_break(regs);
592 return;
594 case 10:
595 /* Privileged operation trap */
596 die_if_kernel("Privileged operation", regs, code);
597 si.si_code = ILL_PRVOPC;
598 goto give_sigill;
600 case 11:
601 /* Privileged register trap */
602 if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
604 /* This is a MFCTL cr26/cr27 to gr instruction.
605 * PCXS traps on this, so we need to emulate it.
608 if (regs->iir & 0x00200000)
609 regs->gr[regs->iir & 0x1f] = mfctl(27);
610 else
611 regs->gr[regs->iir & 0x1f] = mfctl(26);
613 regs->iaoq[0] = regs->iaoq[1];
614 regs->iaoq[1] += 4;
615 regs->iasq[0] = regs->iasq[1];
616 return;
619 die_if_kernel("Privileged register usage", regs, code);
620 si.si_code = ILL_PRVREG;
621 give_sigill:
622 si.si_signo = SIGILL;
623 si.si_errno = 0;
624 si.si_addr = (void __user *) regs->iaoq[0];
625 force_sig_info(SIGILL, &si, current);
626 return;
628 case 12:
629 /* Overflow Trap, let the userland signal handler do the cleanup */
630 si.si_signo = SIGFPE;
631 si.si_code = FPE_INTOVF;
632 si.si_addr = (void __user *) regs->iaoq[0];
633 force_sig_info(SIGFPE, &si, current);
634 return;
636 case 13:
637 /* Conditional Trap
638 The condition succeeds in an instruction which traps
639 on condition */
640 if(user_mode(regs)){
641 si.si_signo = SIGFPE;
642 /* Set to zero, and let the userspace app figure it out from
643 the insn pointed to by si_addr */
644 si.si_code = 0;
645 si.si_addr = (void __user *) regs->iaoq[0];
646 force_sig_info(SIGFPE, &si, current);
647 return;
649 /* The kernel doesn't want to handle condition codes */
650 break;
652 case 14:
653 /* Assist Exception Trap, i.e. floating point exception. */
654 die_if_kernel("Floating point exception", regs, 0); /* quiet */
655 handle_fpe(regs);
656 return;
658 case 15:
659 /* Data TLB miss fault/Data page fault */
660 /* Fall through */
661 case 16:
662 /* Non-access instruction TLB miss fault */
663 /* The instruction TLB entry needed for the target address of the FIC
664 is absent, and hardware can't find it, so we get to cleanup */
665 /* Fall through */
666 case 17:
667 /* Non-access data TLB miss fault/Non-access data page fault */
668 /* FIXME:
669 Still need to add slow path emulation code here!
670 If the insn used a non-shadow register, then the tlb
671 handlers could not have their side-effect (e.g. probe
672 writing to a target register) emulated since rfir would
673 erase the changes to said register. Instead we have to
674 setup everything, call this function we are in, and emulate
675 by hand. Technically we need to emulate:
676 fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
678 fault_address = regs->ior;
679 fault_space = regs->isr;
680 break;
682 case 18:
683 /* PCXS only -- later cpu's split this into types 26,27 & 28 */
684 /* Check for unaligned access */
685 if (check_unaligned(regs)) {
686 handle_unaligned(regs);
687 return;
689 /* Fall Through */
690 case 26:
691 /* PCXL: Data memory access rights trap */
692 fault_address = regs->ior;
693 fault_space = regs->isr;
694 break;
696 case 19:
697 /* Data memory break trap */
698 regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
699 /* fall thru */
700 case 21:
701 /* Page reference trap */
702 handle_gdb_break(regs, TRAP_HWBKPT);
703 return;
705 case 25:
706 /* Taken branch trap */
707 regs->gr[0] &= ~PSW_T;
708 if (user_space(regs))
709 handle_gdb_break(regs, TRAP_BRANCH);
710 /* else this must be the start of a syscall - just let it
711 * run.
713 return;
715 case 7:
716 /* Instruction access rights */
717 /* PCXL: Instruction memory protection trap */
720 * This could be caused by either: 1) a process attempting
721 * to execute within a vma that does not have execute
722 * permission, or 2) an access rights violation caused by a
723 * flush only translation set up by ptep_get_and_clear().
724 * So we check the vma permissions to differentiate the two.
725 * If the vma indicates we have execute permission, then
726 * the cause is the latter one. In this case, we need to
727 * call do_page_fault() to fix the problem.
730 if (user_mode(regs)) {
731 struct vm_area_struct *vma;
733 down_read(&current->mm->mmap_sem);
734 vma = find_vma(current->mm,regs->iaoq[0]);
735 if (vma && (regs->iaoq[0] >= vma->vm_start)
736 && (vma->vm_flags & VM_EXEC)) {
738 fault_address = regs->iaoq[0];
739 fault_space = regs->iasq[0];
741 up_read(&current->mm->mmap_sem);
742 break; /* call do_page_fault() */
744 up_read(&current->mm->mmap_sem);
746 /* Fall Through */
747 case 27:
748 /* Data memory protection ID trap */
749 die_if_kernel("Protection id trap", regs, code);
750 si.si_code = SEGV_MAPERR;
751 si.si_signo = SIGSEGV;
752 si.si_errno = 0;
753 if (code == 7)
754 si.si_addr = (void __user *) regs->iaoq[0];
755 else
756 si.si_addr = (void __user *) regs->ior;
757 force_sig_info(SIGSEGV, &si, current);
758 return;
760 case 28:
761 /* Unaligned data reference trap */
762 handle_unaligned(regs);
763 return;
765 default:
766 if (user_mode(regs)) {
767 #ifdef PRINT_USER_FAULTS
768 printk(KERN_DEBUG "\nhandle_interruption() pid=%d command='%s'\n",
769 task_pid_nr(current), current->comm);
770 show_regs(regs);
771 #endif
772 /* SIGBUS, for lack of a better one. */
773 si.si_signo = SIGBUS;
774 si.si_code = BUS_OBJERR;
775 si.si_errno = 0;
776 si.si_addr = (void __user *) regs->ior;
777 force_sig_info(SIGBUS, &si, current);
778 return;
780 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
782 parisc_terminate("Unexpected interruption", regs, code, 0);
783 /* NOT REACHED */
786 if (user_mode(regs)) {
787 if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
788 #ifdef PRINT_USER_FAULTS
789 if (fault_space == 0)
790 printk(KERN_DEBUG "User Fault on Kernel Space ");
791 else
792 printk(KERN_DEBUG "User Fault (long pointer) (fault %d) ",
793 code);
794 printk("pid=%d command='%s'\n", task_pid_nr(current), current->comm);
795 show_regs(regs);
796 #endif
797 si.si_signo = SIGSEGV;
798 si.si_errno = 0;
799 si.si_code = SEGV_MAPERR;
800 si.si_addr = (void __user *) regs->ior;
801 force_sig_info(SIGSEGV, &si, current);
802 return;
805 else {
808 * The kernel should never fault on its own address space.
811 if (fault_space == 0)
813 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
814 parisc_terminate("Kernel Fault", regs, code, fault_address);
819 do_page_fault(regs, code, fault_address);
823 int __init check_ivt(void *iva)
825 extern const u32 os_hpmc[];
826 extern const u32 os_hpmc_end[];
828 int i;
829 u32 check = 0;
830 u32 *ivap;
831 u32 *hpmcp;
832 u32 length;
834 if (strcmp((char *)iva, "cows can fly"))
835 return -1;
837 ivap = (u32 *)iva;
839 for (i = 0; i < 8; i++)
840 *ivap++ = 0;
842 /* Compute Checksum for HPMC handler */
844 length = os_hpmc_end - os_hpmc;
845 ivap[7] = length;
847 hpmcp = (u32 *)os_hpmc;
849 for (i=0; i<length/4; i++)
850 check += *hpmcp++;
852 for (i=0; i<8; i++)
853 check += ivap[i];
855 ivap[5] = -check;
857 return 0;
860 #ifndef CONFIG_64BIT
861 extern const void fault_vector_11;
862 #endif
863 extern const void fault_vector_20;
865 void __init trap_init(void)
867 void *iva;
869 if (boot_cpu_data.cpu_type >= pcxu)
870 iva = (void *) &fault_vector_20;
871 else
872 #ifdef CONFIG_64BIT
873 panic("Can't boot 64-bit OS on PA1.1 processor!");
874 #else
875 iva = (void *) &fault_vector_11;
876 #endif
878 if (check_ivt(iva))
879 panic("IVT invalid");