initial commit with v2.6.9
[linux-2.6.9-moxart.git] / arch / s390 / kernel / traps.c
blobe67ee2702eb430a62382a52030a83b86450744a8
1 /*
2 * arch/s390/kernel/traps.c
4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
9 * Derived from "arch/i386/kernel/traps.c"
10 * Copyright (C) 1991, 1992 Linus Torvalds
14 * 'Traps.c' handles hardware traps and faults after we have saved some
15 * state in 'asm.s'.
17 #include <linux/config.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/string.h>
21 #include <linux/errno.h>
22 #include <linux/ptrace.h>
23 #include <linux/timer.h>
24 #include <linux/mm.h>
25 #include <linux/smp.h>
26 #include <linux/smp_lock.h>
27 #include <linux/init.h>
28 #include <linux/interrupt.h>
29 #include <linux/delay.h>
30 #include <linux/module.h>
31 #include <linux/kallsyms.h>
33 #include <asm/system.h>
34 #include <asm/uaccess.h>
35 #include <asm/io.h>
36 #include <asm/atomic.h>
37 #include <asm/mathemu.h>
38 #include <asm/cpcmd.h>
39 #include <asm/s390_ext.h>
40 #include <asm/lowcore.h>
42 /* Called from entry.S only */
43 extern void handle_per_exception(struct pt_regs *regs);
45 typedef void pgm_check_handler_t(struct pt_regs *, long);
46 pgm_check_handler_t *pgm_check_table[128];
48 #ifdef CONFIG_SYSCTL
49 #ifdef CONFIG_PROCESS_DEBUG
50 int sysctl_userprocess_debug = 1;
51 #else
52 int sysctl_userprocess_debug = 0;
53 #endif
54 #endif
56 extern pgm_check_handler_t do_protection_exception;
57 extern pgm_check_handler_t do_dat_exception;
58 extern pgm_check_handler_t do_pseudo_page_fault;
59 #ifdef CONFIG_PFAULT
60 extern int pfault_init(void);
61 extern void pfault_fini(void);
62 extern void pfault_interrupt(struct pt_regs *regs, __u16 error_code);
63 static ext_int_info_t ext_int_pfault;
64 #endif
65 extern pgm_check_handler_t do_monitor_call;
67 #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
69 #ifndef CONFIG_ARCH_S390X
70 #define FOURLONG "%08lx %08lx %08lx %08lx\n"
71 static int kstack_depth_to_print = 12;
72 #else /* CONFIG_ARCH_S390X */
73 #define FOURLONG "%016lx %016lx %016lx %016lx\n"
74 static int kstack_depth_to_print = 20;
75 #endif /* CONFIG_ARCH_S390X */
78 * For show_trace we have tree different stack to consider:
79 * - the panic stack which is used if the kernel stack has overflown
80 * - the asynchronous interrupt stack (cpu related)
81 * - the synchronous kernel stack (process related)
82 * The stack trace can start at any of the three stack and can potentially
83 * touch all of them. The order is: panic stack, async stack, sync stack.
85 static unsigned long
86 __show_trace(unsigned long sp, unsigned long low, unsigned long high)
88 struct stack_frame *sf;
89 struct pt_regs *regs;
91 while (1) {
92 sp = sp & PSW_ADDR_INSN;
93 if (sp < low || sp > high - sizeof(*sf))
94 return sp;
95 sf = (struct stack_frame *) sp;
96 printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
97 print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
98 /* Follow the backchain. */
99 while (1) {
100 low = sp;
101 sp = sf->back_chain & PSW_ADDR_INSN;
102 if (!sp)
103 break;
104 if (sp <= low || sp > high - sizeof(*sf))
105 return sp;
106 sf = (struct stack_frame *) sp;
107 printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
108 print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
110 /* Zero backchain detected, check for interrupt frame. */
111 sp = (unsigned long) (sf + 1);
112 if (sp <= low || sp > high - sizeof(*regs))
113 return sp;
114 regs = (struct pt_regs *) sp;
115 printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
116 print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
117 low = sp;
118 sp = regs->gprs[15];
122 void show_trace(struct task_struct *task, unsigned long * stack)
124 register unsigned long __r15 asm ("15");
125 unsigned long sp;
127 sp = (unsigned long) stack;
128 if (!sp)
129 sp = task ? task->thread.ksp : __r15;
130 printk("Call Trace:\n");
131 #ifdef CONFIG_CHECK_STACK
132 sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
133 S390_lowcore.panic_stack);
134 #endif
135 sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
136 S390_lowcore.async_stack);
137 if (task)
138 __show_trace(sp, (unsigned long) task->thread_info,
139 (unsigned long) task->thread_info + THREAD_SIZE);
140 else
141 __show_trace(sp, S390_lowcore.thread_info,
142 S390_lowcore.thread_info + THREAD_SIZE);
143 printk("\n");
146 void show_stack(struct task_struct *task, unsigned long *sp)
148 register unsigned long * __r15 asm ("15");
149 unsigned long *stack;
150 int i;
152 // debugging aid: "show_stack(NULL);" prints the
153 // back trace for this cpu.
155 if (!sp)
156 sp = task ? (unsigned long *) task->thread.ksp : __r15;
158 stack = sp;
159 for (i = 0; i < kstack_depth_to_print; i++) {
160 if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
161 break;
162 if (i && ((i * sizeof (long) % 32) == 0))
163 printk("\n ");
164 printk("%p ", (void *)*stack++);
166 printk("\n");
167 show_trace(task, sp);
171 * The architecture-independent dump_stack generator
173 void dump_stack(void)
175 show_stack(0, 0);
178 EXPORT_SYMBOL(dump_stack);
180 void show_registers(struct pt_regs *regs)
182 mm_segment_t old_fs;
183 char *mode;
184 int i;
186 mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl";
187 printk("%s PSW : %p %p",
188 mode, (void *) regs->psw.mask,
189 (void *) regs->psw.addr);
190 print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
191 printk("%s GPRS: " FOURLONG, mode,
192 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
193 printk(" " FOURLONG,
194 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
195 printk(" " FOURLONG,
196 regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
197 printk(" " FOURLONG,
198 regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
200 #if 0
201 /* FIXME: this isn't needed any more but it changes the ksymoops
202 * input. To remove or not to remove ... */
203 save_access_regs(regs->acrs);
204 printk("%s ACRS: %08x %08x %08x %08x\n", mode,
205 regs->acrs[0], regs->acrs[1], regs->acrs[2], regs->acrs[3]);
206 printk(" %08x %08x %08x %08x\n",
207 regs->acrs[4], regs->acrs[5], regs->acrs[6], regs->acrs[7]);
208 printk(" %08x %08x %08x %08x\n",
209 regs->acrs[8], regs->acrs[9], regs->acrs[10], regs->acrs[11]);
210 printk(" %08x %08x %08x %08x\n",
211 regs->acrs[12], regs->acrs[13], regs->acrs[14], regs->acrs[15]);
212 #endif
215 * Print the first 20 byte of the instruction stream at the
216 * time of the fault.
218 old_fs = get_fs();
219 if (regs->psw.mask & PSW_MASK_PSTATE)
220 set_fs(USER_DS);
221 else
222 set_fs(KERNEL_DS);
223 printk("%s Code: ", mode);
224 for (i = 0; i < 20; i++) {
225 unsigned char c;
226 if (__get_user(c, (char __user *)(regs->psw.addr + i))) {
227 printk(" Bad PSW.");
228 break;
230 printk("%02x ", c);
232 set_fs(old_fs);
234 printk("\n");
237 /* This is called from fs/proc/array.c */
238 char *task_show_regs(struct task_struct *task, char *buffer)
240 struct pt_regs *regs;
242 regs = __KSTK_PTREGS(task);
243 buffer += sprintf(buffer, "task: %p, ksp: %p\n",
244 task, (void *)task->thread.ksp);
245 buffer += sprintf(buffer, "User PSW : %p %p\n",
246 (void *) regs->psw.mask, (void *)regs->psw.addr);
248 buffer += sprintf(buffer, "User GPRS: " FOURLONG,
249 regs->gprs[0], regs->gprs[1],
250 regs->gprs[2], regs->gprs[3]);
251 buffer += sprintf(buffer, " " FOURLONG,
252 regs->gprs[4], regs->gprs[5],
253 regs->gprs[6], regs->gprs[7]);
254 buffer += sprintf(buffer, " " FOURLONG,
255 regs->gprs[8], regs->gprs[9],
256 regs->gprs[10], regs->gprs[11]);
257 buffer += sprintf(buffer, " " FOURLONG,
258 regs->gprs[12], regs->gprs[13],
259 regs->gprs[14], regs->gprs[15]);
260 buffer += sprintf(buffer, "User ACRS: %08x %08x %08x %08x\n",
261 task->thread.acrs[0], task->thread.acrs[1],
262 task->thread.acrs[2], task->thread.acrs[3]);
263 buffer += sprintf(buffer, " %08x %08x %08x %08x\n",
264 task->thread.acrs[4], task->thread.acrs[5],
265 task->thread.acrs[6], task->thread.acrs[7]);
266 buffer += sprintf(buffer, " %08x %08x %08x %08x\n",
267 task->thread.acrs[8], task->thread.acrs[9],
268 task->thread.acrs[10], task->thread.acrs[11]);
269 buffer += sprintf(buffer, " %08x %08x %08x %08x\n",
270 task->thread.acrs[12], task->thread.acrs[13],
271 task->thread.acrs[14], task->thread.acrs[15]);
272 return buffer;
275 spinlock_t die_lock = SPIN_LOCK_UNLOCKED;
277 void die(const char * str, struct pt_regs * regs, long err)
279 static int die_counter;
280 console_verbose();
281 spin_lock_irq(&die_lock);
282 bust_spinlocks(1);
283 printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
284 show_regs(regs);
285 bust_spinlocks(0);
286 spin_unlock_irq(&die_lock);
287 if (in_interrupt())
288 panic("Fatal exception in interrupt");
289 if (panic_on_oops)
290 panic("Fatal exception: panic_on_oops");
291 do_exit(SIGSEGV);
294 static void inline do_trap(long interruption_code, int signr, char *str,
295 struct pt_regs *regs, siginfo_t *info)
298 * We got all needed information from the lowcore and can
299 * now safely switch on interrupts.
301 if (regs->psw.mask & PSW_MASK_PSTATE)
302 local_irq_enable();
304 if (regs->psw.mask & PSW_MASK_PSTATE) {
305 struct task_struct *tsk = current;
307 tsk->thread.trap_no = interruption_code & 0xffff;
308 if (info)
309 force_sig_info(signr, info, tsk);
310 else
311 force_sig(signr, tsk);
312 #ifndef CONFIG_SYSCTL
313 #ifdef CONFIG_PROCESS_DEBUG
314 printk("User process fault: interruption code 0x%lX\n",
315 interruption_code);
316 show_regs(regs);
317 #endif
318 #else
319 if (sysctl_userprocess_debug) {
320 printk("User process fault: interruption code 0x%lX\n",
321 interruption_code);
322 show_regs(regs);
324 #endif
325 } else {
326 const struct exception_table_entry *fixup;
327 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
328 if (fixup)
329 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
330 else
331 die(str, regs, interruption_code);
335 static inline void *get_check_address(struct pt_regs *regs)
337 return (void *)((regs->psw.addr-S390_lowcore.pgm_ilc) & PSW_ADDR_INSN);
340 void do_single_step(struct pt_regs *regs)
342 if ((current->ptrace & PT_PTRACED) != 0)
343 force_sig(SIGTRAP, current);
346 #define DO_ERROR(signr, str, name) \
347 asmlinkage void name(struct pt_regs * regs, long interruption_code) \
349 do_trap(interruption_code, signr, str, regs, NULL); \
352 #define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \
353 asmlinkage void name(struct pt_regs * regs, long interruption_code) \
355 siginfo_t info; \
356 info.si_signo = signr; \
357 info.si_errno = 0; \
358 info.si_code = sicode; \
359 info.si_addr = (void *)siaddr; \
360 do_trap(interruption_code, signr, str, regs, &info); \
363 DO_ERROR(SIGSEGV, "Unknown program exception", default_trap_handler)
365 DO_ERROR_INFO(SIGILL, "addressing exception", addressing_exception,
366 ILL_ILLADR, get_check_address(regs))
367 DO_ERROR_INFO(SIGILL, "execute exception", execute_exception,
368 ILL_ILLOPN, get_check_address(regs))
369 DO_ERROR_INFO(SIGFPE, "fixpoint divide exception", divide_exception,
370 FPE_INTDIV, get_check_address(regs))
371 DO_ERROR_INFO(SIGFPE, "fixpoint overflow exception", overflow_exception,
372 FPE_INTOVF, get_check_address(regs))
373 DO_ERROR_INFO(SIGFPE, "HFP overflow exception", hfp_overflow_exception,
374 FPE_FLTOVF, get_check_address(regs))
375 DO_ERROR_INFO(SIGFPE, "HFP underflow exception", hfp_underflow_exception,
376 FPE_FLTUND, get_check_address(regs))
377 DO_ERROR_INFO(SIGFPE, "HFP significance exception", hfp_significance_exception,
378 FPE_FLTRES, get_check_address(regs))
379 DO_ERROR_INFO(SIGFPE, "HFP divide exception", hfp_divide_exception,
380 FPE_FLTDIV, get_check_address(regs))
381 DO_ERROR_INFO(SIGFPE, "HFP square root exception", hfp_sqrt_exception,
382 FPE_FLTINV, get_check_address(regs))
383 DO_ERROR_INFO(SIGILL, "operand exception", operand_exception,
384 ILL_ILLOPN, get_check_address(regs))
385 DO_ERROR_INFO(SIGILL, "privileged operation", privileged_op,
386 ILL_PRVOPC, get_check_address(regs))
387 DO_ERROR_INFO(SIGILL, "special operation exception", special_op_exception,
388 ILL_ILLOPN, get_check_address(regs))
389 DO_ERROR_INFO(SIGILL, "translation exception", translation_exception,
390 ILL_ILLOPN, get_check_address(regs))
392 static inline void
393 do_fp_trap(struct pt_regs *regs, void *location,
394 int fpc, long interruption_code)
396 siginfo_t si;
398 si.si_signo = SIGFPE;
399 si.si_errno = 0;
400 si.si_addr = location;
401 si.si_code = 0;
402 /* FPC[2] is Data Exception Code */
403 if ((fpc & 0x00000300) == 0) {
404 /* bits 6 and 7 of DXC are 0 iff IEEE exception */
405 if (fpc & 0x8000) /* invalid fp operation */
406 si.si_code = FPE_FLTINV;
407 else if (fpc & 0x4000) /* div by 0 */
408 si.si_code = FPE_FLTDIV;
409 else if (fpc & 0x2000) /* overflow */
410 si.si_code = FPE_FLTOVF;
411 else if (fpc & 0x1000) /* underflow */
412 si.si_code = FPE_FLTUND;
413 else if (fpc & 0x0800) /* inexact */
414 si.si_code = FPE_FLTRES;
416 current->thread.ieee_instruction_pointer = (addr_t) location;
417 do_trap(interruption_code, SIGFPE,
418 "floating point exception", regs, &si);
421 asmlinkage void illegal_op(struct pt_regs * regs, long interruption_code)
423 __u8 opcode[6];
424 __u16 *location;
425 int signal = 0;
427 location = (__u16 *) get_check_address(regs);
430 * We got all needed information from the lowcore and can
431 * now safely switch on interrupts.
433 if (regs->psw.mask & PSW_MASK_PSTATE)
434 local_irq_enable();
436 if (regs->psw.mask & PSW_MASK_PSTATE) {
437 get_user(*((__u16 *) opcode), (__u16 __user *) location);
438 if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
439 if (current->ptrace & PT_PTRACED)
440 force_sig(SIGTRAP, current);
441 else
442 signal = SIGILL;
443 #ifdef CONFIG_MATHEMU
444 } else if (opcode[0] == 0xb3) {
445 get_user(*((__u16 *) (opcode+2)), location+1);
446 signal = math_emu_b3(opcode, regs);
447 } else if (opcode[0] == 0xed) {
448 get_user(*((__u32 *) (opcode+2)),
449 (__u32 *)(location+1));
450 signal = math_emu_ed(opcode, regs);
451 } else if (*((__u16 *) opcode) == 0xb299) {
452 get_user(*((__u16 *) (opcode+2)), location+1);
453 signal = math_emu_srnm(opcode, regs);
454 } else if (*((__u16 *) opcode) == 0xb29c) {
455 get_user(*((__u16 *) (opcode+2)), location+1);
456 signal = math_emu_stfpc(opcode, regs);
457 } else if (*((__u16 *) opcode) == 0xb29d) {
458 get_user(*((__u16 *) (opcode+2)), location+1);
459 signal = math_emu_lfpc(opcode, regs);
460 #endif
461 } else
462 signal = SIGILL;
463 } else
464 signal = SIGILL;
466 if (signal == SIGFPE)
467 do_fp_trap(regs, location,
468 current->thread.fp_regs.fpc, interruption_code);
469 else if (signal)
470 do_trap(interruption_code, signal,
471 "illegal operation", regs, NULL);
475 #ifdef CONFIG_MATHEMU
476 asmlinkage void
477 specification_exception(struct pt_regs * regs, long interruption_code)
479 __u8 opcode[6];
480 __u16 *location = NULL;
481 int signal = 0;
483 location = (__u16 *) get_check_address(regs);
486 * We got all needed information from the lowcore and can
487 * now safely switch on interrupts.
489 if (regs->psw.mask & PSW_MASK_PSTATE)
490 local_irq_enable();
492 if (regs->psw.mask & PSW_MASK_PSTATE) {
493 get_user(*((__u16 *) opcode), location);
494 switch (opcode[0]) {
495 case 0x28: /* LDR Rx,Ry */
496 signal = math_emu_ldr(opcode);
497 break;
498 case 0x38: /* LER Rx,Ry */
499 signal = math_emu_ler(opcode);
500 break;
501 case 0x60: /* STD R,D(X,B) */
502 get_user(*((__u16 *) (opcode+2)), location+1);
503 signal = math_emu_std(opcode, regs);
504 break;
505 case 0x68: /* LD R,D(X,B) */
506 get_user(*((__u16 *) (opcode+2)), location+1);
507 signal = math_emu_ld(opcode, regs);
508 break;
509 case 0x70: /* STE R,D(X,B) */
510 get_user(*((__u16 *) (opcode+2)), location+1);
511 signal = math_emu_ste(opcode, regs);
512 break;
513 case 0x78: /* LE R,D(X,B) */
514 get_user(*((__u16 *) (opcode+2)), location+1);
515 signal = math_emu_le(opcode, regs);
516 break;
517 default:
518 signal = SIGILL;
519 break;
521 } else
522 signal = SIGILL;
524 if (signal == SIGFPE)
525 do_fp_trap(regs, location,
526 current->thread.fp_regs.fpc, interruption_code);
527 else if (signal) {
528 siginfo_t info;
529 info.si_signo = signal;
530 info.si_errno = 0;
531 info.si_code = ILL_ILLOPN;
532 info.si_addr = location;
533 do_trap(interruption_code, signal,
534 "specification exception", regs, &info);
537 #else
538 DO_ERROR_INFO(SIGILL, "specification exception", specification_exception,
539 ILL_ILLOPN, get_check_address(regs));
540 #endif
542 asmlinkage void data_exception(struct pt_regs * regs, long interruption_code)
544 __u16 *location;
545 int signal = 0;
547 location = (__u16 *) get_check_address(regs);
550 * We got all needed information from the lowcore and can
551 * now safely switch on interrupts.
553 if (regs->psw.mask & PSW_MASK_PSTATE)
554 local_irq_enable();
556 if (MACHINE_HAS_IEEE)
557 __asm__ volatile ("stfpc %0\n\t"
558 : "=m" (current->thread.fp_regs.fpc));
560 #ifdef CONFIG_MATHEMU
561 else if (regs->psw.mask & PSW_MASK_PSTATE) {
562 __u8 opcode[6];
563 get_user(*((__u16 *) opcode), location);
564 switch (opcode[0]) {
565 case 0x28: /* LDR Rx,Ry */
566 signal = math_emu_ldr(opcode);
567 break;
568 case 0x38: /* LER Rx,Ry */
569 signal = math_emu_ler(opcode);
570 break;
571 case 0x60: /* STD R,D(X,B) */
572 get_user(*((__u16 *) (opcode+2)), location+1);
573 signal = math_emu_std(opcode, regs);
574 break;
575 case 0x68: /* LD R,D(X,B) */
576 get_user(*((__u16 *) (opcode+2)), location+1);
577 signal = math_emu_ld(opcode, regs);
578 break;
579 case 0x70: /* STE R,D(X,B) */
580 get_user(*((__u16 *) (opcode+2)), location+1);
581 signal = math_emu_ste(opcode, regs);
582 break;
583 case 0x78: /* LE R,D(X,B) */
584 get_user(*((__u16 *) (opcode+2)), location+1);
585 signal = math_emu_le(opcode, regs);
586 break;
587 case 0xb3:
588 get_user(*((__u16 *) (opcode+2)), location+1);
589 signal = math_emu_b3(opcode, regs);
590 break;
591 case 0xed:
592 get_user(*((__u32 *) (opcode+2)),
593 (__u32 *)(location+1));
594 signal = math_emu_ed(opcode, regs);
595 break;
596 case 0xb2:
597 if (opcode[1] == 0x99) {
598 get_user(*((__u16 *) (opcode+2)), location+1);
599 signal = math_emu_srnm(opcode, regs);
600 } else if (opcode[1] == 0x9c) {
601 get_user(*((__u16 *) (opcode+2)), location+1);
602 signal = math_emu_stfpc(opcode, regs);
603 } else if (opcode[1] == 0x9d) {
604 get_user(*((__u16 *) (opcode+2)), location+1);
605 signal = math_emu_lfpc(opcode, regs);
606 } else
607 signal = SIGILL;
608 break;
609 default:
610 signal = SIGILL;
611 break;
614 #endif
615 if (current->thread.fp_regs.fpc & FPC_DXC_MASK)
616 signal = SIGFPE;
617 else
618 signal = SIGILL;
619 if (signal == SIGFPE)
620 do_fp_trap(regs, location,
621 current->thread.fp_regs.fpc, interruption_code);
622 else if (signal) {
623 siginfo_t info;
624 info.si_signo = signal;
625 info.si_errno = 0;
626 info.si_code = ILL_ILLOPN;
627 info.si_addr = location;
628 do_trap(interruption_code, signal,
629 "data exception", regs, &info);
633 asmlinkage void kernel_stack_overflow(struct pt_regs * regs)
635 die("Kernel stack overflow", regs, 0);
636 panic("Corrupt kernel stack, can't continue.");
640 /* init is done in lowcore.S and head.S */
642 void __init trap_init(void)
644 int i;
646 for (i = 0; i < 128; i++)
647 pgm_check_table[i] = &default_trap_handler;
648 pgm_check_table[1] = &illegal_op;
649 pgm_check_table[2] = &privileged_op;
650 pgm_check_table[3] = &execute_exception;
651 pgm_check_table[4] = &do_protection_exception;
652 pgm_check_table[5] = &addressing_exception;
653 pgm_check_table[6] = &specification_exception;
654 pgm_check_table[7] = &data_exception;
655 pgm_check_table[8] = &overflow_exception;
656 pgm_check_table[9] = &divide_exception;
657 pgm_check_table[0x0A] = &overflow_exception;
658 pgm_check_table[0x0B] = &divide_exception;
659 pgm_check_table[0x0C] = &hfp_overflow_exception;
660 pgm_check_table[0x0D] = &hfp_underflow_exception;
661 pgm_check_table[0x0E] = &hfp_significance_exception;
662 pgm_check_table[0x0F] = &hfp_divide_exception;
663 pgm_check_table[0x10] = &do_dat_exception;
664 pgm_check_table[0x11] = &do_dat_exception;
665 pgm_check_table[0x12] = &translation_exception;
666 pgm_check_table[0x13] = &special_op_exception;
667 #ifndef CONFIG_ARCH_S390X
668 pgm_check_table[0x14] = &do_pseudo_page_fault;
669 #else /* CONFIG_ARCH_S390X */
670 pgm_check_table[0x38] = &do_dat_exception;
671 pgm_check_table[0x39] = &do_dat_exception;
672 pgm_check_table[0x3A] = &do_dat_exception;
673 pgm_check_table[0x3B] = &do_dat_exception;
674 #endif /* CONFIG_ARCH_S390X */
675 pgm_check_table[0x15] = &operand_exception;
676 pgm_check_table[0x1C] = &privileged_op;
677 pgm_check_table[0x1D] = &hfp_sqrt_exception;
678 pgm_check_table[0x40] = &do_monitor_call;
680 if (MACHINE_IS_VM) {
682 * First try to get pfault pseudo page faults going.
683 * If this isn't available turn on pagex page faults.
685 #ifdef CONFIG_PFAULT
686 /* request the 0x2603 external interrupt */
687 if (register_early_external_interrupt(0x2603, pfault_interrupt,
688 &ext_int_pfault) != 0)
689 panic("Couldn't request external interrupt 0x2603");
691 if (pfault_init() == 0)
692 return;
694 /* Tough luck, no pfault. */
695 unregister_early_external_interrupt(0x2603, pfault_interrupt,
696 &ext_int_pfault);
697 #endif
698 #ifndef CONFIG_ARCH_S390X
699 cpcmd("SET PAGEX ON", NULL, 0);
700 #endif