Linux-2.6.12-rc2
[linux-2.6/kvm.git] / arch / parisc / kernel / traps.c
blobd2e5b229a2f49df65cb2fe9890640598d80811aa
1 /*
2 * linux/arch/parisc/traps.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1999, 2000 Philipp Rumpf <prumpf@tux.org>
6 */
8 /*
9 * 'Traps.c' handles hardware traps and faults after we have saved some
10 * state in 'asm.s'.
13 #include <linux/config.h>
14 #include <linux/sched.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/ptrace.h>
19 #include <linux/timer.h>
20 #include <linux/mm.h>
21 #include <linux/module.h>
22 #include <linux/smp.h>
23 #include <linux/smp_lock.h>
24 #include <linux/spinlock.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/console.h>
28 #include <linux/kallsyms.h>
30 #include <asm/assembly.h>
31 #include <asm/system.h>
32 #include <asm/uaccess.h>
33 #include <asm/io.h>
34 #include <asm/irq.h>
35 #include <asm/traps.h>
36 #include <asm/unaligned.h>
37 #include <asm/atomic.h>
38 #include <asm/smp.h>
39 #include <asm/pdc.h>
40 #include <asm/pdc_chassis.h>
41 #include <asm/unwind.h>
43 #include "../math-emu/math-emu.h" /* for handle_fpe() */
45 #define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
46 /* dumped to the console via printk) */
48 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
49 DEFINE_SPINLOCK(pa_dbit_lock);
50 #endif
52 int printbinary(char *buf, unsigned long x, int nbits)
54 unsigned long mask = 1UL << (nbits - 1);
55 while (mask != 0) {
56 *buf++ = (mask & x ? '1' : '0');
57 mask >>= 1;
59 *buf = '\0';
61 return nbits;
64 #ifdef __LP64__
65 #define RFMT "%016lx"
66 #else
67 #define RFMT "%08lx"
68 #endif
70 void show_regs(struct pt_regs *regs)
72 int i;
73 char buf[128], *p;
74 char *level;
75 unsigned long cr30;
76 unsigned long cr31;
78 level = user_mode(regs) ? KERN_DEBUG : KERN_CRIT;
80 printk("%s\n", level); /* don't want to have that pretty register dump messed up */
82 printk("%s YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
83 printbinary(buf, regs->gr[0], 32);
84 printk("%sPSW: %s %s\n", level, buf, print_tainted());
86 for (i = 0; i < 32; i += 4) {
87 int j;
88 p = buf;
89 p += sprintf(p, "%sr%02d-%02d ", level, i, i + 3);
90 for (j = 0; j < 4; j++) {
91 p += sprintf(p, " " RFMT, (i+j) == 0 ? 0 : regs->gr[i + j]);
93 printk("%s\n", buf);
96 for (i = 0; i < 8; i += 4) {
97 int j;
98 p = buf;
99 p += sprintf(p, "%ssr%d-%d ", level, i, i + 3);
100 for (j = 0; j < 4; j++) {
101 p += sprintf(p, " " RFMT, regs->sr[i + j]);
103 printk("%s\n", buf);
106 #if RIDICULOUSLY_VERBOSE
107 for (i = 0; i < 32; i += 2)
108 printk("%sFR%02d : %016lx FR%2d : %016lx", level, i,
109 regs->fr[i], i+1, regs->fr[i+1]);
110 #endif
112 cr30 = mfctl(30);
113 cr31 = mfctl(31);
114 printk("%s\n", level);
115 printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
116 level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
117 printk("%s IIR: %08lx ISR: " RFMT " IOR: " RFMT "\n",
118 level, regs->iir, regs->isr, regs->ior);
119 printk("%s CPU: %8d CR30: " RFMT " CR31: " RFMT "\n",
120 level, current_thread_info()->cpu, cr30, cr31);
121 printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
122 printk(level);
123 print_symbol(" IAOQ[0]: %s\n", regs->iaoq[0]);
124 printk(level);
125 print_symbol(" IAOQ[1]: %s\n", regs->iaoq[1]);
126 printk(level);
127 print_symbol(" RP(r2): %s\n", regs->gr[2]);
131 void dump_stack(void)
133 show_stack(NULL, NULL);
136 EXPORT_SYMBOL(dump_stack);
138 static void do_show_stack(struct unwind_frame_info *info)
140 int i = 1;
142 printk("Backtrace:\n");
143 while (i <= 16) {
144 if (unwind_once(info) < 0 || info->ip == 0)
145 break;
147 if (__kernel_text_address(info->ip)) {
148 printk(" [<" RFMT ">] ", info->ip);
149 #ifdef CONFIG_KALLSYMS
150 print_symbol("%s\n", info->ip);
151 #else
152 if ((i & 0x03) == 0)
153 printk("\n");
154 #endif
155 i++;
158 printk("\n");
161 void show_stack(struct task_struct *task, unsigned long *s)
163 struct unwind_frame_info info;
165 if (!task) {
166 unsigned long sp;
167 struct pt_regs *r;
169 HERE:
170 asm volatile ("copy %%r30, %0" : "=r"(sp));
171 r = (struct pt_regs *)kmalloc(sizeof(struct pt_regs), GFP_KERNEL);
172 if (!r)
173 return;
174 memset(r, 0, sizeof(struct pt_regs));
175 r->iaoq[0] = (unsigned long)&&HERE;
176 r->gr[2] = (unsigned long)__builtin_return_address(0);
177 r->gr[30] = sp;
178 unwind_frame_init(&info, current, r);
179 kfree(r);
180 } else {
181 unwind_frame_init_from_blocked_task(&info, task);
184 do_show_stack(&info);
187 void die_if_kernel(char *str, struct pt_regs *regs, long err)
189 if (user_mode(regs)) {
190 if (err == 0)
191 return; /* STFU */
193 printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
194 current->comm, current->pid, str, err, regs->iaoq[0]);
195 #ifdef PRINT_USER_FAULTS
196 /* XXX for debugging only */
197 show_regs(regs);
198 #endif
199 return;
202 oops_in_progress = 1;
204 /* Amuse the user in a SPARC fashion */
205 printk(
206 " _______________________________ \n"
207 " < Your System ate a SPARC! Gah! >\n"
208 " ------------------------------- \n"
209 " \\ ^__^\n"
210 " \\ (xx)\\_______\n"
211 " (__)\\ )\\/\\\n"
212 " U ||----w |\n"
213 " || ||\n");
215 /* unlock the pdc lock if necessary */
216 pdc_emergency_unlock();
218 /* maybe the kernel hasn't booted very far yet and hasn't been able
219 * to initialize the serial or STI console. In that case we should
220 * re-enable the pdc console, so that the user will be able to
221 * identify the problem. */
222 if (!console_drivers)
223 pdc_console_restart();
225 printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
226 current->comm, current->pid, str, err);
227 show_regs(regs);
229 /* Wot's wrong wif bein' racy? */
230 if (current->thread.flags & PARISC_KERNEL_DEATH) {
231 printk(KERN_CRIT "%s() recursion detected.\n", __FUNCTION__);
232 local_irq_enable();
233 while (1);
236 current->thread.flags |= PARISC_KERNEL_DEATH;
237 do_exit(SIGSEGV);
240 int syscall_ipi(int (*syscall) (struct pt_regs *), struct pt_regs *regs)
242 return syscall(regs);
245 /* gdb uses break 4,8 */
246 #define GDB_BREAK_INSN 0x10004
247 void handle_gdb_break(struct pt_regs *regs, int wot)
249 struct siginfo si;
251 si.si_code = wot;
252 si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
253 si.si_signo = SIGTRAP;
254 si.si_errno = 0;
255 force_sig_info(SIGTRAP, &si, current);
258 void handle_break(unsigned iir, struct pt_regs *regs)
260 struct siginfo si;
262 switch(iir) {
263 case 0x00:
264 #ifdef PRINT_USER_FAULTS
265 printk(KERN_DEBUG "break 0,0: pid=%d command='%s'\n",
266 current->pid, current->comm);
267 #endif
268 die_if_kernel("Breakpoint", regs, 0);
269 #ifdef PRINT_USER_FAULTS
270 show_regs(regs);
271 #endif
272 si.si_code = TRAP_BRKPT;
273 si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
274 si.si_signo = SIGTRAP;
275 force_sig_info(SIGTRAP, &si, current);
276 break;
278 case GDB_BREAK_INSN:
279 die_if_kernel("Breakpoint", regs, 0);
280 handle_gdb_break(regs, TRAP_BRKPT);
281 break;
283 default:
284 #ifdef PRINT_USER_FAULTS
285 printk(KERN_DEBUG "break %#08x: pid=%d command='%s'\n",
286 iir, current->pid, current->comm);
287 show_regs(regs);
288 #endif
289 si.si_signo = SIGTRAP;
290 si.si_code = TRAP_BRKPT;
291 si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
292 force_sig_info(SIGTRAP, &si, current);
293 return;
298 int handle_toc(void)
300 printk(KERN_CRIT "TOC call.\n");
301 return 0;
304 static void default_trap(int code, struct pt_regs *regs)
306 printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
307 show_regs(regs);
310 void (*cpu_lpmc) (int code, struct pt_regs *regs) = default_trap;
313 void transfer_pim_to_trap_frame(struct pt_regs *regs)
315 register int i;
316 extern unsigned int hpmc_pim_data[];
317 struct pdc_hpmc_pim_11 *pim_narrow;
318 struct pdc_hpmc_pim_20 *pim_wide;
320 if (boot_cpu_data.cpu_type >= pcxu) {
322 pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
325 * Note: The following code will probably generate a
326 * bunch of truncation error warnings from the compiler.
327 * Could be handled with an ifdef, but perhaps there
328 * is a better way.
331 regs->gr[0] = pim_wide->cr[22];
333 for (i = 1; i < 32; i++)
334 regs->gr[i] = pim_wide->gr[i];
336 for (i = 0; i < 32; i++)
337 regs->fr[i] = pim_wide->fr[i];
339 for (i = 0; i < 8; i++)
340 regs->sr[i] = pim_wide->sr[i];
342 regs->iasq[0] = pim_wide->cr[17];
343 regs->iasq[1] = pim_wide->iasq_back;
344 regs->iaoq[0] = pim_wide->cr[18];
345 regs->iaoq[1] = pim_wide->iaoq_back;
347 regs->sar = pim_wide->cr[11];
348 regs->iir = pim_wide->cr[19];
349 regs->isr = pim_wide->cr[20];
350 regs->ior = pim_wide->cr[21];
352 else {
353 pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
355 regs->gr[0] = pim_narrow->cr[22];
357 for (i = 1; i < 32; i++)
358 regs->gr[i] = pim_narrow->gr[i];
360 for (i = 0; i < 32; i++)
361 regs->fr[i] = pim_narrow->fr[i];
363 for (i = 0; i < 8; i++)
364 regs->sr[i] = pim_narrow->sr[i];
366 regs->iasq[0] = pim_narrow->cr[17];
367 regs->iasq[1] = pim_narrow->iasq_back;
368 regs->iaoq[0] = pim_narrow->cr[18];
369 regs->iaoq[1] = pim_narrow->iaoq_back;
371 regs->sar = pim_narrow->cr[11];
372 regs->iir = pim_narrow->cr[19];
373 regs->isr = pim_narrow->cr[20];
374 regs->ior = pim_narrow->cr[21];
378 * The following fields only have meaning if we came through
379 * another path. So just zero them here.
382 regs->ksp = 0;
383 regs->kpc = 0;
384 regs->orig_r28 = 0;
389 * This routine is called as a last resort when everything else
390 * has gone clearly wrong. We get called for faults in kernel space,
391 * and HPMC's.
393 void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
395 static DEFINE_SPINLOCK(terminate_lock);
397 oops_in_progress = 1;
399 set_eiem(0);
400 local_irq_disable();
401 spin_lock(&terminate_lock);
403 /* unlock the pdc lock if necessary */
404 pdc_emergency_unlock();
406 /* restart pdc console if necessary */
407 if (!console_drivers)
408 pdc_console_restart();
410 /* Not all paths will gutter the processor... */
411 switch(code){
413 case 1:
414 transfer_pim_to_trap_frame(regs);
415 break;
417 default:
418 /* Fall through */
419 break;
424 /* show_stack(NULL, (unsigned long *)regs->gr[30]); */
425 struct unwind_frame_info info;
426 unwind_frame_init(&info, current, regs);
427 do_show_stack(&info);
430 printk("\n");
431 printk(KERN_CRIT "%s: Code=%d regs=%p (Addr=" RFMT ")\n",
432 msg, code, regs, offset);
433 show_regs(regs);
435 spin_unlock(&terminate_lock);
437 /* put soft power button back under hardware control;
438 * if the user had pressed it once at any time, the
439 * system will shut down immediately right here. */
440 pdc_soft_power_button(0);
442 /* Call kernel panic() so reboot timeouts work properly
443 * FIXME: This function should be on the list of
444 * panic notifiers, and we should call panic
445 * directly from the location that we wish.
446 * e.g. We should not call panic from
447 * parisc_terminate, but rather the oter way around.
448 * This hack works, prints the panic message twice,
449 * and it enables reboot timers!
451 panic(msg);
454 void handle_interruption(int code, struct pt_regs *regs)
456 unsigned long fault_address = 0;
457 unsigned long fault_space = 0;
458 struct siginfo si;
460 if (code == 1)
461 pdc_console_restart(); /* switch back to pdc if HPMC */
462 else
463 local_irq_enable();
465 /* Security check:
466 * If the priority level is still user, and the
467 * faulting space is not equal to the active space
468 * then the user is attempting something in a space
469 * that does not belong to them. Kill the process.
471 * This is normally the situation when the user
472 * attempts to jump into the kernel space at the
473 * wrong offset, be it at the gateway page or a
474 * random location.
476 * We cannot normally signal the process because it
477 * could *be* on the gateway page, and processes
478 * executing on the gateway page can't have signals
479 * delivered.
481 * We merely readjust the address into the users
482 * space, at a destination address of zero, and
483 * allow processing to continue.
485 if (((unsigned long)regs->iaoq[0] & 3) &&
486 ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) {
487 /* Kill the user process later */
488 regs->iaoq[0] = 0 | 3;
489 regs->iaoq[1] = regs->iaoq[0] + 4;
490 regs->iasq[0] = regs->iasq[0] = regs->sr[7];
491 regs->gr[0] &= ~PSW_B;
492 return;
495 #if 0
496 printk(KERN_CRIT "Interruption # %d\n", code);
497 #endif
499 switch(code) {
501 case 1:
502 /* High-priority machine check (HPMC) */
504 /* set up a new led state on systems shipped with a LED State panel */
505 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
507 parisc_terminate("High Priority Machine Check (HPMC)",
508 regs, code, 0);
509 /* NOT REACHED */
511 case 2:
512 /* Power failure interrupt */
513 printk(KERN_CRIT "Power failure interrupt !\n");
514 return;
516 case 3:
517 /* Recovery counter trap */
518 regs->gr[0] &= ~PSW_R;
519 if (user_space(regs))
520 handle_gdb_break(regs, TRAP_TRACE);
521 /* else this must be the start of a syscall - just let it run */
522 return;
524 case 5:
525 /* Low-priority machine check */
526 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
528 flush_all_caches();
529 cpu_lpmc(5, regs);
530 return;
532 case 6:
533 /* Instruction TLB miss fault/Instruction page fault */
534 fault_address = regs->iaoq[0];
535 fault_space = regs->iasq[0];
536 break;
538 case 8:
539 /* Illegal instruction trap */
540 die_if_kernel("Illegal instruction", regs, code);
541 si.si_code = ILL_ILLOPC;
542 goto give_sigill;
544 case 9:
545 /* Break instruction trap */
546 handle_break(regs->iir,regs);
547 return;
549 case 10:
550 /* Privileged operation trap */
551 die_if_kernel("Privileged operation", regs, code);
552 si.si_code = ILL_PRVOPC;
553 goto give_sigill;
555 case 11:
556 /* Privileged register trap */
557 if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
559 /* This is a MFCTL cr26/cr27 to gr instruction.
560 * PCXS traps on this, so we need to emulate it.
563 if (regs->iir & 0x00200000)
564 regs->gr[regs->iir & 0x1f] = mfctl(27);
565 else
566 regs->gr[regs->iir & 0x1f] = mfctl(26);
568 regs->iaoq[0] = regs->iaoq[1];
569 regs->iaoq[1] += 4;
570 regs->iasq[0] = regs->iasq[1];
571 return;
574 die_if_kernel("Privileged register usage", regs, code);
575 si.si_code = ILL_PRVREG;
576 give_sigill:
577 si.si_signo = SIGILL;
578 si.si_errno = 0;
579 si.si_addr = (void __user *) regs->iaoq[0];
580 force_sig_info(SIGILL, &si, current);
581 return;
583 case 12:
584 /* Overflow Trap, let the userland signal handler do the cleanup */
585 si.si_signo = SIGFPE;
586 si.si_code = FPE_INTOVF;
587 si.si_addr = (void __user *) regs->iaoq[0];
588 force_sig_info(SIGFPE, &si, current);
589 return;
591 case 13:
592 /* Conditional Trap
593 The condition succees in an instruction which traps
594 on condition */
595 if(user_mode(regs)){
596 si.si_signo = SIGFPE;
597 /* Set to zero, and let the userspace app figure it out from
598 the insn pointed to by si_addr */
599 si.si_code = 0;
600 si.si_addr = (void __user *) regs->iaoq[0];
601 force_sig_info(SIGFPE, &si, current);
602 return;
604 /* The kernel doesn't want to handle condition codes */
605 break;
607 case 14:
608 /* Assist Exception Trap, i.e. floating point exception. */
609 die_if_kernel("Floating point exception", regs, 0); /* quiet */
610 handle_fpe(regs);
611 return;
613 case 15:
614 /* Data TLB miss fault/Data page fault */
615 /* Fall through */
616 case 16:
617 /* Non-access instruction TLB miss fault */
618 /* The instruction TLB entry needed for the target address of the FIC
619 is absent, and hardware can't find it, so we get to cleanup */
620 /* Fall through */
621 case 17:
622 /* Non-access data TLB miss fault/Non-access data page fault */
623 /* FIXME:
624 Still need to add slow path emulation code here!
625 If the insn used a non-shadow register, then the tlb
626 handlers could not have their side-effect (e.g. probe
627 writing to a target register) emulated since rfir would
628 erase the changes to said register. Instead we have to
629 setup everything, call this function we are in, and emulate
630 by hand. Technically we need to emulate:
631 fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
633 fault_address = regs->ior;
634 fault_space = regs->isr;
635 break;
637 case 18:
638 /* PCXS only -- later cpu's split this into types 26,27 & 28 */
639 /* Check for unaligned access */
640 if (check_unaligned(regs)) {
641 handle_unaligned(regs);
642 return;
644 /* Fall Through */
645 case 26:
646 /* PCXL: Data memory access rights trap */
647 fault_address = regs->ior;
648 fault_space = regs->isr;
649 break;
651 case 19:
652 /* Data memory break trap */
653 regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
654 /* fall thru */
655 case 21:
656 /* Page reference trap */
657 handle_gdb_break(regs, TRAP_HWBKPT);
658 return;
660 case 25:
661 /* Taken branch trap */
662 regs->gr[0] &= ~PSW_T;
663 if (user_space(regs))
664 handle_gdb_break(regs, TRAP_BRANCH);
665 /* else this must be the start of a syscall - just let it
666 * run.
668 return;
670 case 7:
671 /* Instruction access rights */
672 /* PCXL: Instruction memory protection trap */
675 * This could be caused by either: 1) a process attempting
676 * to execute within a vma that does not have execute
677 * permission, or 2) an access rights violation caused by a
678 * flush only translation set up by ptep_get_and_clear().
679 * So we check the vma permissions to differentiate the two.
680 * If the vma indicates we have execute permission, then
681 * the cause is the latter one. In this case, we need to
682 * call do_page_fault() to fix the problem.
685 if (user_mode(regs)) {
686 struct vm_area_struct *vma;
688 down_read(&current->mm->mmap_sem);
689 vma = find_vma(current->mm,regs->iaoq[0]);
690 if (vma && (regs->iaoq[0] >= vma->vm_start)
691 && (vma->vm_flags & VM_EXEC)) {
693 fault_address = regs->iaoq[0];
694 fault_space = regs->iasq[0];
696 up_read(&current->mm->mmap_sem);
697 break; /* call do_page_fault() */
699 up_read(&current->mm->mmap_sem);
701 /* Fall Through */
702 case 27:
703 /* Data memory protection ID trap */
704 die_if_kernel("Protection id trap", regs, code);
705 si.si_code = SEGV_MAPERR;
706 si.si_signo = SIGSEGV;
707 si.si_errno = 0;
708 if (code == 7)
709 si.si_addr = (void __user *) regs->iaoq[0];
710 else
711 si.si_addr = (void __user *) regs->ior;
712 force_sig_info(SIGSEGV, &si, current);
713 return;
715 case 28:
716 /* Unaligned data reference trap */
717 handle_unaligned(regs);
718 return;
720 default:
721 if (user_mode(regs)) {
722 #ifdef PRINT_USER_FAULTS
723 printk(KERN_DEBUG "\nhandle_interruption() pid=%d command='%s'\n",
724 current->pid, current->comm);
725 show_regs(regs);
726 #endif
727 /* SIGBUS, for lack of a better one. */
728 si.si_signo = SIGBUS;
729 si.si_code = BUS_OBJERR;
730 si.si_errno = 0;
731 si.si_addr = (void __user *) regs->ior;
732 force_sig_info(SIGBUS, &si, current);
733 return;
735 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
737 parisc_terminate("Unexpected interruption", regs, code, 0);
738 /* NOT REACHED */
741 if (user_mode(regs)) {
742 if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
743 #ifdef PRINT_USER_FAULTS
744 if (fault_space == 0)
745 printk(KERN_DEBUG "User Fault on Kernel Space ");
746 else
747 printk(KERN_DEBUG "User Fault (long pointer) (fault %d) ",
748 code);
749 printk("pid=%d command='%s'\n", current->pid, current->comm);
750 show_regs(regs);
751 #endif
752 si.si_signo = SIGSEGV;
753 si.si_errno = 0;
754 si.si_code = SEGV_MAPERR;
755 si.si_addr = (void __user *) regs->ior;
756 force_sig_info(SIGSEGV, &si, current);
757 return;
760 else {
763 * The kernel should never fault on its own address space.
766 if (fault_space == 0)
768 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
769 parisc_terminate("Kernel Fault", regs, code, fault_address);
774 do_page_fault(regs, code, fault_address);
778 int __init check_ivt(void *iva)
780 int i;
781 u32 check = 0;
782 u32 *ivap;
783 u32 *hpmcp;
784 u32 length;
785 extern void os_hpmc(void);
786 extern void os_hpmc_end(void);
788 if (strcmp((char *)iva, "cows can fly"))
789 return -1;
791 ivap = (u32 *)iva;
793 for (i = 0; i < 8; i++)
794 *ivap++ = 0;
796 /* Compute Checksum for HPMC handler */
798 length = (u32)((unsigned long)os_hpmc_end - (unsigned long)os_hpmc);
799 ivap[7] = length;
801 hpmcp = (u32 *)os_hpmc;
803 for (i=0; i<length/4; i++)
804 check += *hpmcp++;
806 for (i=0; i<8; i++)
807 check += ivap[i];
809 ivap[5] = -check;
811 return 0;
814 #ifndef __LP64__
815 extern const void fault_vector_11;
816 #endif
817 extern const void fault_vector_20;
819 void __init trap_init(void)
821 void *iva;
823 if (boot_cpu_data.cpu_type >= pcxu)
824 iva = (void *) &fault_vector_20;
825 else
826 #ifdef __LP64__
827 panic("Can't boot 64-bit OS on PA1.1 processor!");
828 #else
829 iva = (void *) &fault_vector_11;
830 #endif
832 if (check_ivt(iva))
833 panic("IVT invalid");