x86: memtest: adapt log messages
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / parisc / kernel / traps.c
blobba658d2086f77decbc35404da73287338e27618a
1 /*
2 * linux/arch/parisc/traps.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1999, 2000 Philipp Rumpf <prumpf@tux.org>
6 */
8 /*
9 * 'Traps.c' handles hardware traps and faults after we have saved some
10 * state in 'asm.s'.
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/ptrace.h>
18 #include <linux/timer.h>
19 #include <linux/delay.h>
20 #include <linux/mm.h>
21 #include <linux/module.h>
22 #include <linux/smp.h>
23 #include <linux/spinlock.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/console.h>
27 #include <linux/bug.h>
29 #include <asm/assembly.h>
30 #include <asm/system.h>
31 #include <asm/uaccess.h>
32 #include <asm/io.h>
33 #include <asm/irq.h>
34 #include <asm/traps.h>
35 #include <asm/unaligned.h>
36 #include <asm/atomic.h>
37 #include <asm/smp.h>
38 #include <asm/pdc.h>
39 #include <asm/pdc_chassis.h>
40 #include <asm/unwind.h>
41 #include <asm/tlbflush.h>
42 #include <asm/cacheflush.h>
44 #include "../math-emu/math-emu.h" /* for handle_fpe() */
46 #define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
47 /* dumped to the console via printk) */
49 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
50 DEFINE_SPINLOCK(pa_dbit_lock);
51 #endif
53 static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
54 struct pt_regs *regs);
56 static int printbinary(char *buf, unsigned long x, int nbits)
58 unsigned long mask = 1UL << (nbits - 1);
59 while (mask != 0) {
60 *buf++ = (mask & x ? '1' : '0');
61 mask >>= 1;
63 *buf = '\0';
65 return nbits;
68 #ifdef CONFIG_64BIT
69 #define RFMT "%016lx"
70 #else
71 #define RFMT "%08lx"
72 #endif
73 #define FFMT "%016llx" /* fpregs are 64-bit always */
75 #define PRINTREGS(lvl,r,f,fmt,x) \
76 printk("%s%s%02d-%02d " fmt " " fmt " " fmt " " fmt "\n", \
77 lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1], \
78 (r)[(x)+2], (r)[(x)+3])
80 static void print_gr(char *level, struct pt_regs *regs)
82 int i;
83 char buf[64];
85 printk("%s\n", level);
86 printk("%s YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
87 printbinary(buf, regs->gr[0], 32);
88 printk("%sPSW: %s %s\n", level, buf, print_tainted());
90 for (i = 0; i < 32; i += 4)
91 PRINTREGS(level, regs->gr, "r", RFMT, i);
94 static void print_fr(char *level, struct pt_regs *regs)
96 int i;
97 char buf[64];
98 struct { u32 sw[2]; } s;
100 /* FR are 64bit everywhere. Need to use asm to get the content
101 * of fpsr/fper1, and we assume that we won't have a FP Identify
102 * in our way, otherwise we're screwed.
103 * The fldd is used to restore the T-bit if there was one, as the
104 * store clears it anyway.
105 * PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */
106 asm volatile ("fstd %%fr0,0(%1) \n\t"
107 "fldd 0(%1),%%fr0 \n\t"
108 : "=m" (s) : "r" (&s) : "r0");
110 printk("%s\n", level);
111 printk("%s VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level);
112 printbinary(buf, s.sw[0], 32);
113 printk("%sFPSR: %s\n", level, buf);
114 printk("%sFPER1: %08x\n", level, s.sw[1]);
116 /* here we'll print fr0 again, tho it'll be meaningless */
117 for (i = 0; i < 32; i += 4)
118 PRINTREGS(level, regs->fr, "fr", FFMT, i);
121 void show_regs(struct pt_regs *regs)
123 int i, user;
124 char *level;
125 unsigned long cr30, cr31;
127 user = user_mode(regs);
128 level = user ? KERN_DEBUG : KERN_CRIT;
130 print_gr(level, regs);
132 for (i = 0; i < 8; i += 4)
133 PRINTREGS(level, regs->sr, "sr", RFMT, i);
135 if (user)
136 print_fr(level, regs);
138 cr30 = mfctl(30);
139 cr31 = mfctl(31);
140 printk("%s\n", level);
141 printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
142 level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
143 printk("%s IIR: %08lx ISR: " RFMT " IOR: " RFMT "\n",
144 level, regs->iir, regs->isr, regs->ior);
145 printk("%s CPU: %8d CR30: " RFMT " CR31: " RFMT "\n",
146 level, current_thread_info()->cpu, cr30, cr31);
147 printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
149 if (user) {
150 printk("%s IAOQ[0]: " RFMT "\n", level, regs->iaoq[0]);
151 printk("%s IAOQ[1]: " RFMT "\n", level, regs->iaoq[1]);
152 printk("%s RP(r2): " RFMT "\n", level, regs->gr[2]);
153 } else {
154 printk("%s IAOQ[0]: %pS\n", level, (void *) regs->iaoq[0]);
155 printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]);
156 printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]);
158 parisc_show_stack(current, NULL, regs);
163 void dump_stack(void)
165 show_stack(NULL, NULL);
168 EXPORT_SYMBOL(dump_stack);
170 static void do_show_stack(struct unwind_frame_info *info)
172 int i = 1;
174 printk(KERN_CRIT "Backtrace:\n");
175 while (i <= 16) {
176 if (unwind_once(info) < 0 || info->ip == 0)
177 break;
179 if (__kernel_text_address(info->ip)) {
180 printk(KERN_CRIT " [<" RFMT ">] %pS\n",
181 info->ip, (void *) info->ip);
182 i++;
185 printk(KERN_CRIT "\n");
188 static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
189 struct pt_regs *regs)
191 struct unwind_frame_info info;
192 struct task_struct *t;
194 t = task ? task : current;
195 if (regs) {
196 unwind_frame_init(&info, t, regs);
197 goto show_stack;
200 if (t == current) {
201 unsigned long sp;
203 HERE:
204 asm volatile ("copy %%r30, %0" : "=r"(sp));
206 struct pt_regs r;
208 memset(&r, 0, sizeof(struct pt_regs));
209 r.iaoq[0] = (unsigned long)&&HERE;
210 r.gr[2] = (unsigned long)__builtin_return_address(0);
211 r.gr[30] = sp;
213 unwind_frame_init(&info, current, &r);
215 } else {
216 unwind_frame_init_from_blocked_task(&info, t);
219 show_stack:
220 do_show_stack(&info);
223 void show_stack(struct task_struct *t, unsigned long *sp)
225 return parisc_show_stack(t, sp, NULL);
228 int is_valid_bugaddr(unsigned long iaoq)
230 return 1;
233 void die_if_kernel(char *str, struct pt_regs *regs, long err)
235 if (user_mode(regs)) {
236 if (err == 0)
237 return; /* STFU */
239 printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
240 current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
241 #ifdef PRINT_USER_FAULTS
242 /* XXX for debugging only */
243 show_regs(regs);
244 #endif
245 return;
248 oops_in_progress = 1;
250 /* Amuse the user in a SPARC fashion */
251 if (err) printk(
252 KERN_CRIT " _______________________________ \n"
253 KERN_CRIT " < Your System ate a SPARC! Gah! >\n"
254 KERN_CRIT " ------------------------------- \n"
255 KERN_CRIT " \\ ^__^\n"
256 KERN_CRIT " \\ (xx)\\_______\n"
257 KERN_CRIT " (__)\\ )\\/\\\n"
258 KERN_CRIT " U ||----w |\n"
259 KERN_CRIT " || ||\n");
261 /* unlock the pdc lock if necessary */
262 pdc_emergency_unlock();
264 /* maybe the kernel hasn't booted very far yet and hasn't been able
265 * to initialize the serial or STI console. In that case we should
266 * re-enable the pdc console, so that the user will be able to
267 * identify the problem. */
268 if (!console_drivers)
269 pdc_console_restart();
271 if (err)
272 printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
273 current->comm, task_pid_nr(current), str, err);
275 /* Wot's wrong wif bein' racy? */
276 if (current->thread.flags & PARISC_KERNEL_DEATH) {
277 printk(KERN_CRIT "%s() recursion detected.\n", __func__);
278 local_irq_enable();
279 while (1);
281 current->thread.flags |= PARISC_KERNEL_DEATH;
283 show_regs(regs);
284 dump_stack();
285 add_taint(TAINT_DIE);
287 if (in_interrupt())
288 panic("Fatal exception in interrupt");
290 if (panic_on_oops) {
291 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
292 ssleep(5);
293 panic("Fatal exception");
296 do_exit(SIGSEGV);
299 int syscall_ipi(int (*syscall) (struct pt_regs *), struct pt_regs *regs)
301 return syscall(regs);
304 /* gdb uses break 4,8 */
305 #define GDB_BREAK_INSN 0x10004
306 static void handle_gdb_break(struct pt_regs *regs, int wot)
308 struct siginfo si;
310 si.si_signo = SIGTRAP;
311 si.si_errno = 0;
312 si.si_code = wot;
313 si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
314 force_sig_info(SIGTRAP, &si, current);
317 static void handle_break(struct pt_regs *regs)
319 unsigned iir = regs->iir;
321 if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) {
322 /* check if a BUG() or WARN() trapped here. */
323 enum bug_trap_type tt;
324 tt = report_bug(regs->iaoq[0] & ~3, regs);
325 if (tt == BUG_TRAP_TYPE_WARN) {
326 regs->iaoq[0] += 4;
327 regs->iaoq[1] += 4;
328 return; /* return to next instruction when WARN_ON(). */
330 die_if_kernel("Unknown kernel breakpoint", regs,
331 (tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
334 #ifdef PRINT_USER_FAULTS
335 if (unlikely(iir != GDB_BREAK_INSN)) {
336 printk(KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
337 iir & 31, (iir>>13) & ((1<<13)-1),
338 task_pid_nr(current), current->comm);
339 show_regs(regs);
341 #endif
343 /* send standard GDB signal */
344 handle_gdb_break(regs, TRAP_BRKPT);
347 static void default_trap(int code, struct pt_regs *regs)
349 printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
350 show_regs(regs);
353 void (*cpu_lpmc) (int code, struct pt_regs *regs) __read_mostly = default_trap;
356 void transfer_pim_to_trap_frame(struct pt_regs *regs)
358 register int i;
359 extern unsigned int hpmc_pim_data[];
360 struct pdc_hpmc_pim_11 *pim_narrow;
361 struct pdc_hpmc_pim_20 *pim_wide;
363 if (boot_cpu_data.cpu_type >= pcxu) {
365 pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
368 * Note: The following code will probably generate a
369 * bunch of truncation error warnings from the compiler.
370 * Could be handled with an ifdef, but perhaps there
371 * is a better way.
374 regs->gr[0] = pim_wide->cr[22];
376 for (i = 1; i < 32; i++)
377 regs->gr[i] = pim_wide->gr[i];
379 for (i = 0; i < 32; i++)
380 regs->fr[i] = pim_wide->fr[i];
382 for (i = 0; i < 8; i++)
383 regs->sr[i] = pim_wide->sr[i];
385 regs->iasq[0] = pim_wide->cr[17];
386 regs->iasq[1] = pim_wide->iasq_back;
387 regs->iaoq[0] = pim_wide->cr[18];
388 regs->iaoq[1] = pim_wide->iaoq_back;
390 regs->sar = pim_wide->cr[11];
391 regs->iir = pim_wide->cr[19];
392 regs->isr = pim_wide->cr[20];
393 regs->ior = pim_wide->cr[21];
395 else {
396 pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
398 regs->gr[0] = pim_narrow->cr[22];
400 for (i = 1; i < 32; i++)
401 regs->gr[i] = pim_narrow->gr[i];
403 for (i = 0; i < 32; i++)
404 regs->fr[i] = pim_narrow->fr[i];
406 for (i = 0; i < 8; i++)
407 regs->sr[i] = pim_narrow->sr[i];
409 regs->iasq[0] = pim_narrow->cr[17];
410 regs->iasq[1] = pim_narrow->iasq_back;
411 regs->iaoq[0] = pim_narrow->cr[18];
412 regs->iaoq[1] = pim_narrow->iaoq_back;
414 regs->sar = pim_narrow->cr[11];
415 regs->iir = pim_narrow->cr[19];
416 regs->isr = pim_narrow->cr[20];
417 regs->ior = pim_narrow->cr[21];
421 * The following fields only have meaning if we came through
422 * another path. So just zero them here.
425 regs->ksp = 0;
426 regs->kpc = 0;
427 regs->orig_r28 = 0;
432 * This routine is called as a last resort when everything else
433 * has gone clearly wrong. We get called for faults in kernel space,
434 * and HPMC's.
436 void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
438 static DEFINE_SPINLOCK(terminate_lock);
440 oops_in_progress = 1;
442 set_eiem(0);
443 local_irq_disable();
444 spin_lock(&terminate_lock);
446 /* unlock the pdc lock if necessary */
447 pdc_emergency_unlock();
449 /* restart pdc console if necessary */
450 if (!console_drivers)
451 pdc_console_restart();
453 /* Not all paths will gutter the processor... */
454 switch(code){
456 case 1:
457 transfer_pim_to_trap_frame(regs);
458 break;
460 default:
461 /* Fall through */
462 break;
467 /* show_stack(NULL, (unsigned long *)regs->gr[30]); */
468 struct unwind_frame_info info;
469 unwind_frame_init(&info, current, regs);
470 do_show_stack(&info);
473 printk("\n");
474 printk(KERN_CRIT "%s: Code=%d regs=%p (Addr=" RFMT ")\n",
475 msg, code, regs, offset);
476 show_regs(regs);
478 spin_unlock(&terminate_lock);
480 /* put soft power button back under hardware control;
481 * if the user had pressed it once at any time, the
482 * system will shut down immediately right here. */
483 pdc_soft_power_button(0);
485 /* Call kernel panic() so reboot timeouts work properly
486 * FIXME: This function should be on the list of
487 * panic notifiers, and we should call panic
488 * directly from the location that we wish.
489 * e.g. We should not call panic from
490 * parisc_terminate, but rather the oter way around.
491 * This hack works, prints the panic message twice,
492 * and it enables reboot timers!
494 panic(msg);
497 void handle_interruption(int code, struct pt_regs *regs)
499 unsigned long fault_address = 0;
500 unsigned long fault_space = 0;
501 struct siginfo si;
503 if (code == 1)
504 pdc_console_restart(); /* switch back to pdc if HPMC */
505 else
506 local_irq_enable();
508 /* Security check:
509 * If the priority level is still user, and the
510 * faulting space is not equal to the active space
511 * then the user is attempting something in a space
512 * that does not belong to them. Kill the process.
514 * This is normally the situation when the user
515 * attempts to jump into the kernel space at the
516 * wrong offset, be it at the gateway page or a
517 * random location.
519 * We cannot normally signal the process because it
520 * could *be* on the gateway page, and processes
521 * executing on the gateway page can't have signals
522 * delivered.
524 * We merely readjust the address into the users
525 * space, at a destination address of zero, and
526 * allow processing to continue.
528 if (((unsigned long)regs->iaoq[0] & 3) &&
529 ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) {
530 /* Kill the user process later */
531 regs->iaoq[0] = 0 | 3;
532 regs->iaoq[1] = regs->iaoq[0] + 4;
533 regs->iasq[0] = regs->iasq[0] = regs->sr[7];
534 regs->gr[0] &= ~PSW_B;
535 return;
538 #if 0
539 printk(KERN_CRIT "Interruption # %d\n", code);
540 #endif
542 switch(code) {
544 case 1:
545 /* High-priority machine check (HPMC) */
547 /* set up a new led state on systems shipped with a LED State panel */
548 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
550 parisc_terminate("High Priority Machine Check (HPMC)",
551 regs, code, 0);
552 /* NOT REACHED */
554 case 2:
555 /* Power failure interrupt */
556 printk(KERN_CRIT "Power failure interrupt !\n");
557 return;
559 case 3:
560 /* Recovery counter trap */
561 regs->gr[0] &= ~PSW_R;
562 if (user_space(regs))
563 handle_gdb_break(regs, TRAP_TRACE);
564 /* else this must be the start of a syscall - just let it run */
565 return;
567 case 5:
568 /* Low-priority machine check */
569 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
571 flush_cache_all();
572 flush_tlb_all();
573 cpu_lpmc(5, regs);
574 return;
576 case 6:
577 /* Instruction TLB miss fault/Instruction page fault */
578 fault_address = regs->iaoq[0];
579 fault_space = regs->iasq[0];
580 break;
582 case 8:
583 /* Illegal instruction trap */
584 die_if_kernel("Illegal instruction", regs, code);
585 si.si_code = ILL_ILLOPC;
586 goto give_sigill;
588 case 9:
589 /* Break instruction trap */
590 handle_break(regs);
591 return;
593 case 10:
594 /* Privileged operation trap */
595 die_if_kernel("Privileged operation", regs, code);
596 si.si_code = ILL_PRVOPC;
597 goto give_sigill;
599 case 11:
600 /* Privileged register trap */
601 if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
603 /* This is a MFCTL cr26/cr27 to gr instruction.
604 * PCXS traps on this, so we need to emulate it.
607 if (regs->iir & 0x00200000)
608 regs->gr[regs->iir & 0x1f] = mfctl(27);
609 else
610 regs->gr[regs->iir & 0x1f] = mfctl(26);
612 regs->iaoq[0] = regs->iaoq[1];
613 regs->iaoq[1] += 4;
614 regs->iasq[0] = regs->iasq[1];
615 return;
618 die_if_kernel("Privileged register usage", regs, code);
619 si.si_code = ILL_PRVREG;
620 give_sigill:
621 si.si_signo = SIGILL;
622 si.si_errno = 0;
623 si.si_addr = (void __user *) regs->iaoq[0];
624 force_sig_info(SIGILL, &si, current);
625 return;
627 case 12:
628 /* Overflow Trap, let the userland signal handler do the cleanup */
629 si.si_signo = SIGFPE;
630 si.si_code = FPE_INTOVF;
631 si.si_addr = (void __user *) regs->iaoq[0];
632 force_sig_info(SIGFPE, &si, current);
633 return;
635 case 13:
636 /* Conditional Trap
637 The condition succeeds in an instruction which traps
638 on condition */
639 if(user_mode(regs)){
640 si.si_signo = SIGFPE;
641 /* Set to zero, and let the userspace app figure it out from
642 the insn pointed to by si_addr */
643 si.si_code = 0;
644 si.si_addr = (void __user *) regs->iaoq[0];
645 force_sig_info(SIGFPE, &si, current);
646 return;
648 /* The kernel doesn't want to handle condition codes */
649 break;
651 case 14:
652 /* Assist Exception Trap, i.e. floating point exception. */
653 die_if_kernel("Floating point exception", regs, 0); /* quiet */
654 handle_fpe(regs);
655 return;
657 case 15:
658 /* Data TLB miss fault/Data page fault */
659 /* Fall through */
660 case 16:
661 /* Non-access instruction TLB miss fault */
662 /* The instruction TLB entry needed for the target address of the FIC
663 is absent, and hardware can't find it, so we get to cleanup */
664 /* Fall through */
665 case 17:
666 /* Non-access data TLB miss fault/Non-access data page fault */
667 /* FIXME:
668 Still need to add slow path emulation code here!
669 If the insn used a non-shadow register, then the tlb
670 handlers could not have their side-effect (e.g. probe
671 writing to a target register) emulated since rfir would
672 erase the changes to said register. Instead we have to
673 setup everything, call this function we are in, and emulate
674 by hand. Technically we need to emulate:
675 fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
677 fault_address = regs->ior;
678 fault_space = regs->isr;
679 break;
681 case 18:
682 /* PCXS only -- later cpu's split this into types 26,27 & 28 */
683 /* Check for unaligned access */
684 if (check_unaligned(regs)) {
685 handle_unaligned(regs);
686 return;
688 /* Fall Through */
689 case 26:
690 /* PCXL: Data memory access rights trap */
691 fault_address = regs->ior;
692 fault_space = regs->isr;
693 break;
695 case 19:
696 /* Data memory break trap */
697 regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
698 /* fall thru */
699 case 21:
700 /* Page reference trap */
701 handle_gdb_break(regs, TRAP_HWBKPT);
702 return;
704 case 25:
705 /* Taken branch trap */
706 regs->gr[0] &= ~PSW_T;
707 if (user_space(regs))
708 handle_gdb_break(regs, TRAP_BRANCH);
709 /* else this must be the start of a syscall - just let it
710 * run.
712 return;
714 case 7:
715 /* Instruction access rights */
716 /* PCXL: Instruction memory protection trap */
719 * This could be caused by either: 1) a process attempting
720 * to execute within a vma that does not have execute
721 * permission, or 2) an access rights violation caused by a
722 * flush only translation set up by ptep_get_and_clear().
723 * So we check the vma permissions to differentiate the two.
724 * If the vma indicates we have execute permission, then
725 * the cause is the latter one. In this case, we need to
726 * call do_page_fault() to fix the problem.
729 if (user_mode(regs)) {
730 struct vm_area_struct *vma;
732 down_read(&current->mm->mmap_sem);
733 vma = find_vma(current->mm,regs->iaoq[0]);
734 if (vma && (regs->iaoq[0] >= vma->vm_start)
735 && (vma->vm_flags & VM_EXEC)) {
737 fault_address = regs->iaoq[0];
738 fault_space = regs->iasq[0];
740 up_read(&current->mm->mmap_sem);
741 break; /* call do_page_fault() */
743 up_read(&current->mm->mmap_sem);
745 /* Fall Through */
746 case 27:
747 /* Data memory protection ID trap */
748 if (code == 27 && !user_mode(regs) &&
749 fixup_exception(regs))
750 return;
752 die_if_kernel("Protection id trap", regs, code);
753 si.si_code = SEGV_MAPERR;
754 si.si_signo = SIGSEGV;
755 si.si_errno = 0;
756 if (code == 7)
757 si.si_addr = (void __user *) regs->iaoq[0];
758 else
759 si.si_addr = (void __user *) regs->ior;
760 force_sig_info(SIGSEGV, &si, current);
761 return;
763 case 28:
764 /* Unaligned data reference trap */
765 handle_unaligned(regs);
766 return;
768 default:
769 if (user_mode(regs)) {
770 #ifdef PRINT_USER_FAULTS
771 printk(KERN_DEBUG "\nhandle_interruption() pid=%d command='%s'\n",
772 task_pid_nr(current), current->comm);
773 show_regs(regs);
774 #endif
775 /* SIGBUS, for lack of a better one. */
776 si.si_signo = SIGBUS;
777 si.si_code = BUS_OBJERR;
778 si.si_errno = 0;
779 si.si_addr = (void __user *) regs->ior;
780 force_sig_info(SIGBUS, &si, current);
781 return;
783 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
785 parisc_terminate("Unexpected interruption", regs, code, 0);
786 /* NOT REACHED */
789 if (user_mode(regs)) {
790 if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
791 #ifdef PRINT_USER_FAULTS
792 if (fault_space == 0)
793 printk(KERN_DEBUG "User Fault on Kernel Space ");
794 else
795 printk(KERN_DEBUG "User Fault (long pointer) (fault %d) ",
796 code);
797 printk("pid=%d command='%s'\n", task_pid_nr(current), current->comm);
798 show_regs(regs);
799 #endif
800 si.si_signo = SIGSEGV;
801 si.si_errno = 0;
802 si.si_code = SEGV_MAPERR;
803 si.si_addr = (void __user *) regs->ior;
804 force_sig_info(SIGSEGV, &si, current);
805 return;
808 else {
811 * The kernel should never fault on its own address space.
814 if (fault_space == 0)
816 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
817 parisc_terminate("Kernel Fault", regs, code, fault_address);
822 do_page_fault(regs, code, fault_address);
826 int __init check_ivt(void *iva)
828 extern u32 os_hpmc_size;
829 extern const u32 os_hpmc[];
831 int i;
832 u32 check = 0;
833 u32 *ivap;
834 u32 *hpmcp;
835 u32 length;
837 if (strcmp((char *)iva, "cows can fly"))
838 return -1;
840 ivap = (u32 *)iva;
842 for (i = 0; i < 8; i++)
843 *ivap++ = 0;
845 /* Compute Checksum for HPMC handler */
846 length = os_hpmc_size;
847 ivap[7] = length;
849 hpmcp = (u32 *)os_hpmc;
851 for (i=0; i<length/4; i++)
852 check += *hpmcp++;
854 for (i=0; i<8; i++)
855 check += ivap[i];
857 ivap[5] = -check;
859 return 0;
862 #ifndef CONFIG_64BIT
863 extern const void fault_vector_11;
864 #endif
865 extern const void fault_vector_20;
867 void __init trap_init(void)
869 void *iva;
871 if (boot_cpu_data.cpu_type >= pcxu)
872 iva = (void *) &fault_vector_20;
873 else
874 #ifdef CONFIG_64BIT
875 panic("Can't boot 64-bit OS on PA1.1 processor!");
876 #else
877 iva = (void *) &fault_vector_11;
878 #endif
880 if (check_ivt(iva))
881 panic("IVT invalid");