RT-AC56 3.0.0.4.374.37 core
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / arch / parisc / kernel / traps.c
bloba478d5bb12670ec2e8ab73418b38a0c766e08198
1 /*
2 * linux/arch/parisc/traps.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1999, 2000 Philipp Rumpf <prumpf@tux.org>
6 */
8 /*
9 * 'Traps.c' handles hardware traps and faults after we have saved some
10 * state in 'asm.s'.
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/ptrace.h>
18 #include <linux/timer.h>
19 #include <linux/delay.h>
20 #include <linux/mm.h>
21 #include <linux/module.h>
22 #include <linux/smp.h>
23 #include <linux/spinlock.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/console.h>
27 #include <linux/bug.h>
29 #include <asm/assembly.h>
30 #include <asm/system.h>
31 #include <asm/uaccess.h>
32 #include <asm/io.h>
33 #include <asm/irq.h>
34 #include <asm/traps.h>
35 #include <asm/unaligned.h>
36 #include <asm/atomic.h>
37 #include <asm/smp.h>
38 #include <asm/pdc.h>
39 #include <asm/pdc_chassis.h>
40 #include <asm/unwind.h>
41 #include <asm/tlbflush.h>
42 #include <asm/cacheflush.h>
44 #include "../math-emu/math-emu.h" /* for handle_fpe() */
46 #define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
47 /* dumped to the console via printk) */
49 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
50 DEFINE_SPINLOCK(pa_dbit_lock);
51 #endif
53 static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
54 struct pt_regs *regs);
56 static int printbinary(char *buf, unsigned long x, int nbits)
58 unsigned long mask = 1UL << (nbits - 1);
59 while (mask != 0) {
60 *buf++ = (mask & x ? '1' : '0');
61 mask >>= 1;
63 *buf = '\0';
65 return nbits;
68 #ifdef CONFIG_64BIT
69 #define RFMT "%016lx"
70 #else
71 #define RFMT "%08lx"
72 #endif
73 #define FFMT "%016llx" /* fpregs are 64-bit always */
75 #define PRINTREGS(lvl,r,f,fmt,x) \
76 printk("%s%s%02d-%02d " fmt " " fmt " " fmt " " fmt "\n", \
77 lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1], \
78 (r)[(x)+2], (r)[(x)+3])
80 static void print_gr(char *level, struct pt_regs *regs)
82 int i;
83 char buf[64];
85 printk("%s\n", level);
86 printk("%s YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
87 printbinary(buf, regs->gr[0], 32);
88 printk("%sPSW: %s %s\n", level, buf, print_tainted());
90 for (i = 0; i < 32; i += 4)
91 PRINTREGS(level, regs->gr, "r", RFMT, i);
94 static void print_fr(char *level, struct pt_regs *regs)
96 int i;
97 char buf[64];
98 struct { u32 sw[2]; } s;
100 /* FR are 64bit everywhere. Need to use asm to get the content
101 * of fpsr/fper1, and we assume that we won't have a FP Identify
102 * in our way, otherwise we're screwed.
103 * The fldd is used to restore the T-bit if there was one, as the
104 * store clears it anyway.
105 * PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */
106 asm volatile ("fstd %%fr0,0(%1) \n\t"
107 "fldd 0(%1),%%fr0 \n\t"
108 : "=m" (s) : "r" (&s) : "r0");
110 printk("%s\n", level);
111 printk("%s VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level);
112 printbinary(buf, s.sw[0], 32);
113 printk("%sFPSR: %s\n", level, buf);
114 printk("%sFPER1: %08x\n", level, s.sw[1]);
116 /* here we'll print fr0 again, tho it'll be meaningless */
117 for (i = 0; i < 32; i += 4)
118 PRINTREGS(level, regs->fr, "fr", FFMT, i);
121 void show_regs(struct pt_regs *regs)
123 int i, user;
124 char *level;
125 unsigned long cr30, cr31;
127 user = user_mode(regs);
128 level = user ? KERN_DEBUG : KERN_CRIT;
130 print_gr(level, regs);
132 for (i = 0; i < 8; i += 4)
133 PRINTREGS(level, regs->sr, "sr", RFMT, i);
135 if (user)
136 print_fr(level, regs);
138 cr30 = mfctl(30);
139 cr31 = mfctl(31);
140 printk("%s\n", level);
141 printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
142 level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
143 printk("%s IIR: %08lx ISR: " RFMT " IOR: " RFMT "\n",
144 level, regs->iir, regs->isr, regs->ior);
145 printk("%s CPU: %8d CR30: " RFMT " CR31: " RFMT "\n",
146 level, current_thread_info()->cpu, cr30, cr31);
147 printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
149 if (user) {
150 printk("%s IAOQ[0]: " RFMT "\n", level, regs->iaoq[0]);
151 printk("%s IAOQ[1]: " RFMT "\n", level, regs->iaoq[1]);
152 printk("%s RP(r2): " RFMT "\n", level, regs->gr[2]);
153 } else {
154 printk("%s IAOQ[0]: %pS\n", level, (void *) regs->iaoq[0]);
155 printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]);
156 printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]);
158 parisc_show_stack(current, NULL, regs);
163 void dump_stack(void)
165 show_stack(NULL, NULL);
168 EXPORT_SYMBOL(dump_stack);
170 static void do_show_stack(struct unwind_frame_info *info)
172 int i = 1;
174 printk(KERN_CRIT "Backtrace:\n");
175 while (i <= 16) {
176 if (unwind_once(info) < 0 || info->ip == 0)
177 break;
179 if (__kernel_text_address(info->ip)) {
180 printk(KERN_CRIT " [<" RFMT ">] %pS\n",
181 info->ip, (void *) info->ip);
182 i++;
185 printk(KERN_CRIT "\n");
188 static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
189 struct pt_regs *regs)
191 struct unwind_frame_info info;
192 struct task_struct *t;
194 t = task ? task : current;
195 if (regs) {
196 unwind_frame_init(&info, t, regs);
197 goto show_stack;
200 if (t == current) {
201 unsigned long sp;
203 HERE:
204 asm volatile ("copy %%r30, %0" : "=r"(sp));
206 struct pt_regs r;
208 memset(&r, 0, sizeof(struct pt_regs));
209 r.iaoq[0] = (unsigned long)&&HERE;
210 r.gr[2] = (unsigned long)__builtin_return_address(0);
211 r.gr[30] = sp;
213 unwind_frame_init(&info, current, &r);
215 } else {
216 unwind_frame_init_from_blocked_task(&info, t);
219 show_stack:
220 do_show_stack(&info);
223 void show_stack(struct task_struct *t, unsigned long *sp)
225 return parisc_show_stack(t, sp, NULL);
228 int is_valid_bugaddr(unsigned long iaoq)
230 return 1;
233 void die_if_kernel(char *str, struct pt_regs *regs, long err)
235 if (user_mode(regs)) {
236 if (err == 0)
237 return; /* STFU */
239 printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
240 current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
241 #ifdef PRINT_USER_FAULTS
242 show_regs(regs);
243 #endif
244 return;
247 oops_in_progress = 1;
249 oops_enter();
251 /* Amuse the user in a SPARC fashion */
252 if (err) printk(KERN_CRIT
253 " _______________________________ \n"
254 " < Your System ate a SPARC! Gah! >\n"
255 " ------------------------------- \n"
256 " \\ ^__^\n"
257 " (__)\\ )\\/\\\n"
258 " U ||----w |\n"
259 " || ||\n");
261 /* unlock the pdc lock if necessary */
262 pdc_emergency_unlock();
264 /* maybe the kernel hasn't booted very far yet and hasn't been able
265 * to initialize the serial or STI console. In that case we should
266 * re-enable the pdc console, so that the user will be able to
267 * identify the problem. */
268 if (!console_drivers)
269 pdc_console_restart();
271 if (err)
272 printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
273 current->comm, task_pid_nr(current), str, err);
275 /* Wot's wrong wif bein' racy? */
276 if (current->thread.flags & PARISC_KERNEL_DEATH) {
277 printk(KERN_CRIT "%s() recursion detected.\n", __func__);
278 local_irq_enable();
279 while (1);
281 current->thread.flags |= PARISC_KERNEL_DEATH;
283 show_regs(regs);
284 dump_stack();
285 add_taint(TAINT_DIE);
287 if (in_interrupt())
288 panic("Fatal exception in interrupt");
290 if (panic_on_oops) {
291 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
292 ssleep(5);
293 panic("Fatal exception");
296 oops_exit();
297 do_exit(SIGSEGV);
300 int syscall_ipi(int (*syscall) (struct pt_regs *), struct pt_regs *regs)
302 return syscall(regs);
305 /* gdb uses break 4,8 */
306 #define GDB_BREAK_INSN 0x10004
307 static void handle_gdb_break(struct pt_regs *regs, int wot)
309 struct siginfo si;
311 si.si_signo = SIGTRAP;
312 si.si_errno = 0;
313 si.si_code = wot;
314 si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
315 force_sig_info(SIGTRAP, &si, current);
318 static void handle_break(struct pt_regs *regs)
320 unsigned iir = regs->iir;
322 if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) {
323 /* check if a BUG() or WARN() trapped here. */
324 enum bug_trap_type tt;
325 tt = report_bug(regs->iaoq[0] & ~3, regs);
326 if (tt == BUG_TRAP_TYPE_WARN) {
327 regs->iaoq[0] += 4;
328 regs->iaoq[1] += 4;
329 return; /* return to next instruction when WARN_ON(). */
331 die_if_kernel("Unknown kernel breakpoint", regs,
332 (tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
335 #ifdef PRINT_USER_FAULTS
336 if (unlikely(iir != GDB_BREAK_INSN)) {
337 printk(KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
338 iir & 31, (iir>>13) & ((1<<13)-1),
339 task_pid_nr(current), current->comm);
340 show_regs(regs);
342 #endif
344 /* send standard GDB signal */
345 handle_gdb_break(regs, TRAP_BRKPT);
348 static void default_trap(int code, struct pt_regs *regs)
350 printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
351 show_regs(regs);
354 void (*cpu_lpmc) (int code, struct pt_regs *regs) __read_mostly = default_trap;
357 void transfer_pim_to_trap_frame(struct pt_regs *regs)
359 register int i;
360 extern unsigned int hpmc_pim_data[];
361 struct pdc_hpmc_pim_11 *pim_narrow;
362 struct pdc_hpmc_pim_20 *pim_wide;
364 if (boot_cpu_data.cpu_type >= pcxu) {
366 pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
369 * Note: The following code will probably generate a
370 * bunch of truncation error warnings from the compiler.
371 * Could be handled with an ifdef, but perhaps there
372 * is a better way.
375 regs->gr[0] = pim_wide->cr[22];
377 for (i = 1; i < 32; i++)
378 regs->gr[i] = pim_wide->gr[i];
380 for (i = 0; i < 32; i++)
381 regs->fr[i] = pim_wide->fr[i];
383 for (i = 0; i < 8; i++)
384 regs->sr[i] = pim_wide->sr[i];
386 regs->iasq[0] = pim_wide->cr[17];
387 regs->iasq[1] = pim_wide->iasq_back;
388 regs->iaoq[0] = pim_wide->cr[18];
389 regs->iaoq[1] = pim_wide->iaoq_back;
391 regs->sar = pim_wide->cr[11];
392 regs->iir = pim_wide->cr[19];
393 regs->isr = pim_wide->cr[20];
394 regs->ior = pim_wide->cr[21];
396 else {
397 pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
399 regs->gr[0] = pim_narrow->cr[22];
401 for (i = 1; i < 32; i++)
402 regs->gr[i] = pim_narrow->gr[i];
404 for (i = 0; i < 32; i++)
405 regs->fr[i] = pim_narrow->fr[i];
407 for (i = 0; i < 8; i++)
408 regs->sr[i] = pim_narrow->sr[i];
410 regs->iasq[0] = pim_narrow->cr[17];
411 regs->iasq[1] = pim_narrow->iasq_back;
412 regs->iaoq[0] = pim_narrow->cr[18];
413 regs->iaoq[1] = pim_narrow->iaoq_back;
415 regs->sar = pim_narrow->cr[11];
416 regs->iir = pim_narrow->cr[19];
417 regs->isr = pim_narrow->cr[20];
418 regs->ior = pim_narrow->cr[21];
422 * The following fields only have meaning if we came through
423 * another path. So just zero them here.
426 regs->ksp = 0;
427 regs->kpc = 0;
428 regs->orig_r28 = 0;
433 * This routine is called as a last resort when everything else
434 * has gone clearly wrong. We get called for faults in kernel space,
435 * and HPMC's.
437 void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
439 static DEFINE_SPINLOCK(terminate_lock);
441 oops_in_progress = 1;
443 set_eiem(0);
444 local_irq_disable();
445 spin_lock(&terminate_lock);
447 /* unlock the pdc lock if necessary */
448 pdc_emergency_unlock();
450 /* restart pdc console if necessary */
451 if (!console_drivers)
452 pdc_console_restart();
454 /* Not all paths will gutter the processor... */
455 switch(code){
457 case 1:
458 transfer_pim_to_trap_frame(regs);
459 break;
461 default:
462 /* Fall through */
463 break;
468 /* show_stack(NULL, (unsigned long *)regs->gr[30]); */
469 struct unwind_frame_info info;
470 unwind_frame_init(&info, current, regs);
471 do_show_stack(&info);
474 printk("\n");
475 printk(KERN_CRIT "%s: Code=%d regs=%p (Addr=" RFMT ")\n",
476 msg, code, regs, offset);
477 show_regs(regs);
479 spin_unlock(&terminate_lock);
481 /* put soft power button back under hardware control;
482 * if the user had pressed it once at any time, the
483 * system will shut down immediately right here. */
484 pdc_soft_power_button(0);
486 panic(msg);
489 void notrace handle_interruption(int code, struct pt_regs *regs)
491 unsigned long fault_address = 0;
492 unsigned long fault_space = 0;
493 struct siginfo si;
495 if (code == 1)
496 pdc_console_restart(); /* switch back to pdc if HPMC */
497 else
498 local_irq_enable();
500 /* Security check:
501 * If the priority level is still user, and the
502 * faulting space is not equal to the active space
503 * then the user is attempting something in a space
504 * that does not belong to them. Kill the process.
506 * This is normally the situation when the user
507 * attempts to jump into the kernel space at the
508 * wrong offset, be it at the gateway page or a
509 * random location.
511 * We cannot normally signal the process because it
512 * could *be* on the gateway page, and processes
513 * executing on the gateway page can't have signals
514 * delivered.
516 * We merely readjust the address into the users
517 * space, at a destination address of zero, and
518 * allow processing to continue.
520 if (((unsigned long)regs->iaoq[0] & 3) &&
521 ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) {
522 /* Kill the user process later */
523 regs->iaoq[0] = 0 | 3;
524 regs->iaoq[1] = regs->iaoq[0] + 4;
525 regs->iasq[0] = regs->iasq[1] = regs->sr[7];
526 regs->gr[0] &= ~PSW_B;
527 return;
531 switch(code) {
533 case 1:
534 /* High-priority machine check (HPMC) */
536 /* set up a new led state on systems shipped with a LED State panel */
537 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
539 parisc_terminate("High Priority Machine Check (HPMC)",
540 regs, code, 0);
541 /* NOT REACHED */
543 case 2:
544 /* Power failure interrupt */
545 printk(KERN_CRIT "Power failure interrupt !\n");
546 return;
548 case 3:
549 /* Recovery counter trap */
550 regs->gr[0] &= ~PSW_R;
551 if (user_space(regs))
552 handle_gdb_break(regs, TRAP_TRACE);
553 /* else this must be the start of a syscall - just let it run */
554 return;
556 case 5:
557 /* Low-priority machine check */
558 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
560 flush_cache_all();
561 flush_tlb_all();
562 cpu_lpmc(5, regs);
563 return;
565 case 6:
566 /* Instruction TLB miss fault/Instruction page fault */
567 fault_address = regs->iaoq[0];
568 fault_space = regs->iasq[0];
569 break;
571 case 8:
572 /* Illegal instruction trap */
573 die_if_kernel("Illegal instruction", regs, code);
574 si.si_code = ILL_ILLOPC;
575 goto give_sigill;
577 case 9:
578 /* Break instruction trap */
579 handle_break(regs);
580 return;
582 case 10:
583 /* Privileged operation trap */
584 die_if_kernel("Privileged operation", regs, code);
585 si.si_code = ILL_PRVOPC;
586 goto give_sigill;
588 case 11:
589 /* Privileged register trap */
590 if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
592 /* This is a MFCTL cr26/cr27 to gr instruction.
593 * PCXS traps on this, so we need to emulate it.
596 if (regs->iir & 0x00200000)
597 regs->gr[regs->iir & 0x1f] = mfctl(27);
598 else
599 regs->gr[regs->iir & 0x1f] = mfctl(26);
601 regs->iaoq[0] = regs->iaoq[1];
602 regs->iaoq[1] += 4;
603 regs->iasq[0] = regs->iasq[1];
604 return;
607 die_if_kernel("Privileged register usage", regs, code);
608 si.si_code = ILL_PRVREG;
609 give_sigill:
610 si.si_signo = SIGILL;
611 si.si_errno = 0;
612 si.si_addr = (void __user *) regs->iaoq[0];
613 force_sig_info(SIGILL, &si, current);
614 return;
616 case 12:
617 /* Overflow Trap, let the userland signal handler do the cleanup */
618 si.si_signo = SIGFPE;
619 si.si_code = FPE_INTOVF;
620 si.si_addr = (void __user *) regs->iaoq[0];
621 force_sig_info(SIGFPE, &si, current);
622 return;
624 case 13:
625 /* Conditional Trap
626 The condition succeeds in an instruction which traps
627 on condition */
628 if(user_mode(regs)){
629 si.si_signo = SIGFPE;
630 /* Set to zero, and let the userspace app figure it out from
631 the insn pointed to by si_addr */
632 si.si_code = 0;
633 si.si_addr = (void __user *) regs->iaoq[0];
634 force_sig_info(SIGFPE, &si, current);
635 return;
637 /* The kernel doesn't want to handle condition codes */
638 break;
640 case 14:
641 /* Assist Exception Trap, i.e. floating point exception. */
642 die_if_kernel("Floating point exception", regs, 0); /* quiet */
643 handle_fpe(regs);
644 return;
646 case 15:
647 /* Data TLB miss fault/Data page fault */
648 /* Fall through */
649 case 16:
650 /* Non-access instruction TLB miss fault */
651 /* The instruction TLB entry needed for the target address of the FIC
652 is absent, and hardware can't find it, so we get to cleanup */
653 /* Fall through */
654 case 17:
655 /* Non-access data TLB miss fault/Non-access data page fault */
656 fault_address = regs->ior;
657 fault_space = regs->isr;
658 break;
660 case 18:
661 /* PCXS only -- later cpu's split this into types 26,27 & 28 */
662 /* Check for unaligned access */
663 if (check_unaligned(regs)) {
664 handle_unaligned(regs);
665 return;
667 /* Fall Through */
668 case 26:
669 /* PCXL: Data memory access rights trap */
670 fault_address = regs->ior;
671 fault_space = regs->isr;
672 break;
674 case 19:
675 /* Data memory break trap */
676 regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
677 /* fall thru */
678 case 21:
679 /* Page reference trap */
680 handle_gdb_break(regs, TRAP_HWBKPT);
681 return;
683 case 25:
684 /* Taken branch trap */
685 regs->gr[0] &= ~PSW_T;
686 if (user_space(regs))
687 handle_gdb_break(regs, TRAP_BRANCH);
688 /* else this must be the start of a syscall - just let it
689 * run.
691 return;
693 case 7:
694 /* Instruction access rights */
695 /* PCXL: Instruction memory protection trap */
698 * This could be caused by either: 1) a process attempting
699 * to execute within a vma that does not have execute
700 * permission, or 2) an access rights violation caused by a
701 * flush only translation set up by ptep_get_and_clear().
702 * So we check the vma permissions to differentiate the two.
703 * If the vma indicates we have execute permission, then
704 * the cause is the latter one. In this case, we need to
705 * call do_page_fault() to fix the problem.
708 if (user_mode(regs)) {
709 struct vm_area_struct *vma;
711 down_read(&current->mm->mmap_sem);
712 vma = find_vma(current->mm,regs->iaoq[0]);
713 if (vma && (regs->iaoq[0] >= vma->vm_start)
714 && (vma->vm_flags & VM_EXEC)) {
716 fault_address = regs->iaoq[0];
717 fault_space = regs->iasq[0];
719 up_read(&current->mm->mmap_sem);
720 break; /* call do_page_fault() */
722 up_read(&current->mm->mmap_sem);
724 /* Fall Through */
725 case 27:
726 /* Data memory protection ID trap */
727 if (code == 27 && !user_mode(regs) &&
728 fixup_exception(regs))
729 return;
731 die_if_kernel("Protection id trap", regs, code);
732 si.si_code = SEGV_MAPERR;
733 si.si_signo = SIGSEGV;
734 si.si_errno = 0;
735 if (code == 7)
736 si.si_addr = (void __user *) regs->iaoq[0];
737 else
738 si.si_addr = (void __user *) regs->ior;
739 force_sig_info(SIGSEGV, &si, current);
740 return;
742 case 28:
743 /* Unaligned data reference trap */
744 handle_unaligned(regs);
745 return;
747 default:
748 if (user_mode(regs)) {
749 #ifdef PRINT_USER_FAULTS
750 printk(KERN_DEBUG "\nhandle_interruption() pid=%d command='%s'\n",
751 task_pid_nr(current), current->comm);
752 show_regs(regs);
753 #endif
754 /* SIGBUS, for lack of a better one. */
755 si.si_signo = SIGBUS;
756 si.si_code = BUS_OBJERR;
757 si.si_errno = 0;
758 si.si_addr = (void __user *) regs->ior;
759 force_sig_info(SIGBUS, &si, current);
760 return;
762 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
764 parisc_terminate("Unexpected interruption", regs, code, 0);
765 /* NOT REACHED */
768 if (user_mode(regs)) {
769 if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
770 #ifdef PRINT_USER_FAULTS
771 if (fault_space == 0)
772 printk(KERN_DEBUG "User Fault on Kernel Space ");
773 else
774 printk(KERN_DEBUG "User Fault (long pointer) (fault %d) ",
775 code);
776 printk(KERN_CONT "pid=%d command='%s'\n",
777 task_pid_nr(current), current->comm);
778 show_regs(regs);
779 #endif
780 si.si_signo = SIGSEGV;
781 si.si_errno = 0;
782 si.si_code = SEGV_MAPERR;
783 si.si_addr = (void __user *) regs->ior;
784 force_sig_info(SIGSEGV, &si, current);
785 return;
788 else {
791 * The kernel should never fault on its own address space.
794 if (fault_space == 0)
796 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
797 parisc_terminate("Kernel Fault", regs, code, fault_address);
802 do_page_fault(regs, code, fault_address);
806 int __init check_ivt(void *iva)
808 extern u32 os_hpmc_size;
809 extern const u32 os_hpmc[];
811 int i;
812 u32 check = 0;
813 u32 *ivap;
814 u32 *hpmcp;
815 u32 length;
817 if (strcmp((char *)iva, "cows can fly"))
818 return -1;
820 ivap = (u32 *)iva;
822 for (i = 0; i < 8; i++)
823 *ivap++ = 0;
825 /* Compute Checksum for HPMC handler */
826 length = os_hpmc_size;
827 ivap[7] = length;
829 hpmcp = (u32 *)os_hpmc;
831 for (i=0; i<length/4; i++)
832 check += *hpmcp++;
834 for (i=0; i<8; i++)
835 check += ivap[i];
837 ivap[5] = -check;
839 return 0;
842 #ifndef CONFIG_64BIT
843 extern const void fault_vector_11;
844 #endif
845 extern const void fault_vector_20;
847 void __init trap_init(void)
849 void *iva;
851 if (boot_cpu_data.cpu_type >= pcxu)
852 iva = (void *) &fault_vector_20;
853 else
854 #ifdef CONFIG_64BIT
855 panic("Can't boot 64-bit OS on PA1.1 processor!");
856 #else
857 iva = (void *) &fault_vector_11;
858 #endif
860 if (check_ivt(iva))
861 panic("IVT invalid");