RT-AC56 3.0.0.4.374.37 core
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / arch / s390 / kernel / traps.c
blob5d8f0f3d025008a11614500fc1908e8cf7de1658
1 /*
2 * arch/s390/kernel/traps.c
4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
9 * Derived from "arch/i386/kernel/traps.c"
10 * Copyright (C) 1991, 1992 Linus Torvalds
14 * 'Traps.c' handles hardware traps and faults after we have saved some
15 * state in 'asm.s'.
17 #include <linux/sched.h>
18 #include <linux/kernel.h>
19 #include <linux/string.h>
20 #include <linux/errno.h>
21 #include <linux/tracehook.h>
22 #include <linux/timer.h>
23 #include <linux/mm.h>
24 #include <linux/smp.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/seq_file.h>
28 #include <linux/delay.h>
29 #include <linux/module.h>
30 #include <linux/kdebug.h>
31 #include <linux/kallsyms.h>
32 #include <linux/reboot.h>
33 #include <linux/kprobes.h>
34 #include <linux/bug.h>
35 #include <linux/utsname.h>
36 #include <asm/system.h>
37 #include <asm/uaccess.h>
38 #include <asm/io.h>
39 #include <asm/atomic.h>
40 #include <asm/mathemu.h>
41 #include <asm/cpcmd.h>
42 #include <asm/s390_ext.h>
43 #include <asm/lowcore.h>
44 #include <asm/debug.h>
45 #include "entry.h"
47 pgm_check_handler_t *pgm_check_table[128];
49 int show_unhandled_signals;
51 extern pgm_check_handler_t do_protection_exception;
52 extern pgm_check_handler_t do_dat_exception;
53 extern pgm_check_handler_t do_asce_exception;
55 #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
57 #ifndef CONFIG_64BIT
58 #define LONG "%08lx "
59 #define FOURLONG "%08lx %08lx %08lx %08lx\n"
60 static int kstack_depth_to_print = 12;
61 #else /* CONFIG_64BIT */
62 #define LONG "%016lx "
63 #define FOURLONG "%016lx %016lx %016lx %016lx\n"
64 static int kstack_depth_to_print = 20;
65 #endif /* CONFIG_64BIT */
68 * For show_trace we have tree different stack to consider:
69 * - the panic stack which is used if the kernel stack has overflown
70 * - the asynchronous interrupt stack (cpu related)
71 * - the synchronous kernel stack (process related)
72 * The stack trace can start at any of the three stack and can potentially
73 * touch all of them. The order is: panic stack, async stack, sync stack.
75 static unsigned long
76 __show_trace(unsigned long sp, unsigned long low, unsigned long high)
78 struct stack_frame *sf;
79 struct pt_regs *regs;
81 while (1) {
82 sp = sp & PSW_ADDR_INSN;
83 if (sp < low || sp > high - sizeof(*sf))
84 return sp;
85 sf = (struct stack_frame *) sp;
86 printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
87 print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
88 /* Follow the backchain. */
89 while (1) {
90 low = sp;
91 sp = sf->back_chain & PSW_ADDR_INSN;
92 if (!sp)
93 break;
94 if (sp <= low || sp > high - sizeof(*sf))
95 return sp;
96 sf = (struct stack_frame *) sp;
97 printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
98 print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
100 /* Zero backchain detected, check for interrupt frame. */
101 sp = (unsigned long) (sf + 1);
102 if (sp <= low || sp > high - sizeof(*regs))
103 return sp;
104 regs = (struct pt_regs *) sp;
105 printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
106 print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
107 low = sp;
108 sp = regs->gprs[15];
112 static void show_trace(struct task_struct *task, unsigned long *stack)
114 register unsigned long __r15 asm ("15");
115 unsigned long sp;
117 sp = (unsigned long) stack;
118 if (!sp)
119 sp = task ? task->thread.ksp : __r15;
120 printk("Call Trace:\n");
121 #ifdef CONFIG_CHECK_STACK
122 sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
123 S390_lowcore.panic_stack);
124 #endif
125 sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
126 S390_lowcore.async_stack);
127 if (task)
128 __show_trace(sp, (unsigned long) task_stack_page(task),
129 (unsigned long) task_stack_page(task) + THREAD_SIZE);
130 else
131 __show_trace(sp, S390_lowcore.thread_info,
132 S390_lowcore.thread_info + THREAD_SIZE);
133 if (!task)
134 task = current;
135 debug_show_held_locks(task);
138 void show_stack(struct task_struct *task, unsigned long *sp)
140 register unsigned long * __r15 asm ("15");
141 unsigned long *stack;
142 int i;
144 if (!sp)
145 stack = task ? (unsigned long *) task->thread.ksp : __r15;
146 else
147 stack = sp;
149 for (i = 0; i < kstack_depth_to_print; i++) {
150 if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
151 break;
152 if (i && ((i * sizeof (long) % 32) == 0))
153 printk("\n ");
154 printk(LONG, *stack++);
156 printk("\n");
157 show_trace(task, sp);
160 static void show_last_breaking_event(struct pt_regs *regs)
162 #ifdef CONFIG_64BIT
163 printk("Last Breaking-Event-Address:\n");
164 printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN);
165 print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN);
166 #endif
170 * The architecture-independent dump_stack generator
172 void dump_stack(void)
174 printk("CPU: %d %s %s %.*s\n",
175 task_thread_info(current)->cpu, print_tainted(),
176 init_utsname()->release,
177 (int)strcspn(init_utsname()->version, " "),
178 init_utsname()->version);
179 printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
180 current->comm, current->pid, current,
181 (void *) current->thread.ksp);
182 show_stack(NULL, NULL);
184 EXPORT_SYMBOL(dump_stack);
186 static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
188 return (regs->psw.mask & bits) / ((~bits + 1) & bits);
191 void show_registers(struct pt_regs *regs)
193 char *mode;
195 mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl";
196 printk("%s PSW : %p %p",
197 mode, (void *) regs->psw.mask,
198 (void *) regs->psw.addr);
199 print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
200 printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
201 "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
202 mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
203 mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
204 mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
205 mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
206 mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
207 #ifdef CONFIG_64BIT
208 printk(" EA:%x", mask_bits(regs, PSW_BASE_BITS));
209 #endif
210 printk("\n%s GPRS: " FOURLONG, mode,
211 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
212 printk(" " FOURLONG,
213 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
214 printk(" " FOURLONG,
215 regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
216 printk(" " FOURLONG,
217 regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
219 show_code(regs);
222 void show_regs(struct pt_regs *regs)
224 print_modules();
225 printk("CPU: %d %s %s %.*s\n",
226 task_thread_info(current)->cpu, print_tainted(),
227 init_utsname()->release,
228 (int)strcspn(init_utsname()->version, " "),
229 init_utsname()->version);
230 printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
231 current->comm, current->pid, current,
232 (void *) current->thread.ksp);
233 show_registers(regs);
234 /* Show stack backtrace if pt_regs is from kernel mode */
235 if (!(regs->psw.mask & PSW_MASK_PSTATE))
236 show_trace(NULL, (unsigned long *) regs->gprs[15]);
237 show_last_breaking_event(regs);
240 /* This is called from fs/proc/array.c */
241 void task_show_regs(struct seq_file *m, struct task_struct *task)
243 struct pt_regs *regs;
245 regs = task_pt_regs(task);
246 seq_printf(m, "task: %p, ksp: %p\n",
247 task, (void *)task->thread.ksp);
248 seq_printf(m, "User PSW : %p %p\n",
249 (void *) regs->psw.mask, (void *)regs->psw.addr);
251 seq_printf(m, "User GPRS: " FOURLONG,
252 regs->gprs[0], regs->gprs[1],
253 regs->gprs[2], regs->gprs[3]);
254 seq_printf(m, " " FOURLONG,
255 regs->gprs[4], regs->gprs[5],
256 regs->gprs[6], regs->gprs[7]);
257 seq_printf(m, " " FOURLONG,
258 regs->gprs[8], regs->gprs[9],
259 regs->gprs[10], regs->gprs[11]);
260 seq_printf(m, " " FOURLONG,
261 regs->gprs[12], regs->gprs[13],
262 regs->gprs[14], regs->gprs[15]);
263 seq_printf(m, "User ACRS: %08x %08x %08x %08x\n",
264 task->thread.acrs[0], task->thread.acrs[1],
265 task->thread.acrs[2], task->thread.acrs[3]);
266 seq_printf(m, " %08x %08x %08x %08x\n",
267 task->thread.acrs[4], task->thread.acrs[5],
268 task->thread.acrs[6], task->thread.acrs[7]);
269 seq_printf(m, " %08x %08x %08x %08x\n",
270 task->thread.acrs[8], task->thread.acrs[9],
271 task->thread.acrs[10], task->thread.acrs[11]);
272 seq_printf(m, " %08x %08x %08x %08x\n",
273 task->thread.acrs[12], task->thread.acrs[13],
274 task->thread.acrs[14], task->thread.acrs[15]);
277 static DEFINE_SPINLOCK(die_lock);
279 void die(const char * str, struct pt_regs * regs, long err)
281 static int die_counter;
283 oops_enter();
284 debug_stop_all();
285 console_verbose();
286 spin_lock_irq(&die_lock);
287 bust_spinlocks(1);
288 printk("%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
289 #ifdef CONFIG_PREEMPT
290 printk("PREEMPT ");
291 #endif
292 #ifdef CONFIG_SMP
293 printk("SMP ");
294 #endif
295 #ifdef CONFIG_DEBUG_PAGEALLOC
296 printk("DEBUG_PAGEALLOC");
297 #endif
298 printk("\n");
299 notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
300 show_regs(regs);
301 bust_spinlocks(0);
302 add_taint(TAINT_DIE);
303 spin_unlock_irq(&die_lock);
304 if (in_interrupt())
305 panic("Fatal exception in interrupt");
306 if (panic_on_oops)
307 panic("Fatal exception: panic_on_oops");
308 oops_exit();
309 do_exit(SIGSEGV);
312 static void inline report_user_fault(struct pt_regs *regs, long int_code,
313 int signr)
315 if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
316 return;
317 if (!unhandled_signal(current, signr))
318 return;
319 if (!printk_ratelimit())
320 return;
321 printk("User process fault: interruption code 0x%lX ", int_code);
322 print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN);
323 printk("\n");
324 show_regs(regs);
327 int is_valid_bugaddr(unsigned long addr)
329 return 1;
332 static void __kprobes inline do_trap(long interruption_code, int signr,
333 char *str, struct pt_regs *regs,
334 siginfo_t *info)
337 * We got all needed information from the lowcore and can
338 * now safely switch on interrupts.
340 if (regs->psw.mask & PSW_MASK_PSTATE)
341 local_irq_enable();
343 if (notify_die(DIE_TRAP, str, regs, interruption_code,
344 interruption_code, signr) == NOTIFY_STOP)
345 return;
347 if (regs->psw.mask & PSW_MASK_PSTATE) {
348 struct task_struct *tsk = current;
350 tsk->thread.trap_no = interruption_code & 0xffff;
351 force_sig_info(signr, info, tsk);
352 report_user_fault(regs, interruption_code, signr);
353 } else {
354 const struct exception_table_entry *fixup;
355 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
356 if (fixup)
357 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
358 else {
359 enum bug_trap_type btt;
361 btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs);
362 if (btt == BUG_TRAP_TYPE_WARN)
363 return;
364 die(str, regs, interruption_code);
369 static inline void __user *get_check_address(struct pt_regs *regs)
371 return (void __user *)((regs->psw.addr-S390_lowcore.pgm_ilc) & PSW_ADDR_INSN);
374 void __kprobes do_single_step(struct pt_regs *regs)
376 if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0,
377 SIGTRAP) == NOTIFY_STOP){
378 return;
380 if (tracehook_consider_fatal_signal(current, SIGTRAP))
381 force_sig(SIGTRAP, current);
384 static void default_trap_handler(struct pt_regs * regs, long interruption_code)
386 if (regs->psw.mask & PSW_MASK_PSTATE) {
387 local_irq_enable();
388 report_user_fault(regs, interruption_code, SIGSEGV);
389 do_exit(SIGSEGV);
390 } else
391 die("Unknown program exception", regs, interruption_code);
394 #define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \
395 static void name(struct pt_regs * regs, long interruption_code) \
397 siginfo_t info; \
398 info.si_signo = signr; \
399 info.si_errno = 0; \
400 info.si_code = sicode; \
401 info.si_addr = siaddr; \
402 do_trap(interruption_code, signr, str, regs, &info); \
405 DO_ERROR_INFO(SIGILL, "addressing exception", addressing_exception,
406 ILL_ILLADR, get_check_address(regs))
407 DO_ERROR_INFO(SIGILL, "execute exception", execute_exception,
408 ILL_ILLOPN, get_check_address(regs))
409 DO_ERROR_INFO(SIGFPE, "fixpoint divide exception", divide_exception,
410 FPE_INTDIV, get_check_address(regs))
411 DO_ERROR_INFO(SIGFPE, "fixpoint overflow exception", overflow_exception,
412 FPE_INTOVF, get_check_address(regs))
413 DO_ERROR_INFO(SIGFPE, "HFP overflow exception", hfp_overflow_exception,
414 FPE_FLTOVF, get_check_address(regs))
415 DO_ERROR_INFO(SIGFPE, "HFP underflow exception", hfp_underflow_exception,
416 FPE_FLTUND, get_check_address(regs))
417 DO_ERROR_INFO(SIGFPE, "HFP significance exception", hfp_significance_exception,
418 FPE_FLTRES, get_check_address(regs))
419 DO_ERROR_INFO(SIGFPE, "HFP divide exception", hfp_divide_exception,
420 FPE_FLTDIV, get_check_address(regs))
421 DO_ERROR_INFO(SIGFPE, "HFP square root exception", hfp_sqrt_exception,
422 FPE_FLTINV, get_check_address(regs))
423 DO_ERROR_INFO(SIGILL, "operand exception", operand_exception,
424 ILL_ILLOPN, get_check_address(regs))
425 DO_ERROR_INFO(SIGILL, "privileged operation", privileged_op,
426 ILL_PRVOPC, get_check_address(regs))
427 DO_ERROR_INFO(SIGILL, "special operation exception", special_op_exception,
428 ILL_ILLOPN, get_check_address(regs))
429 DO_ERROR_INFO(SIGILL, "translation exception", translation_exception,
430 ILL_ILLOPN, get_check_address(regs))
432 static inline void
433 do_fp_trap(struct pt_regs *regs, void __user *location,
434 int fpc, long interruption_code)
436 siginfo_t si;
438 si.si_signo = SIGFPE;
439 si.si_errno = 0;
440 si.si_addr = location;
441 si.si_code = 0;
442 /* FPC[2] is Data Exception Code */
443 if ((fpc & 0x00000300) == 0) {
444 /* bits 6 and 7 of DXC are 0 iff IEEE exception */
445 if (fpc & 0x8000) /* invalid fp operation */
446 si.si_code = FPE_FLTINV;
447 else if (fpc & 0x4000) /* div by 0 */
448 si.si_code = FPE_FLTDIV;
449 else if (fpc & 0x2000) /* overflow */
450 si.si_code = FPE_FLTOVF;
451 else if (fpc & 0x1000) /* underflow */
452 si.si_code = FPE_FLTUND;
453 else if (fpc & 0x0800) /* inexact */
454 si.si_code = FPE_FLTRES;
456 current->thread.ieee_instruction_pointer = (addr_t) location;
457 do_trap(interruption_code, SIGFPE,
458 "floating point exception", regs, &si);
461 static void illegal_op(struct pt_regs * regs, long interruption_code)
463 siginfo_t info;
464 __u8 opcode[6];
465 __u16 __user *location;
466 int signal = 0;
468 location = get_check_address(regs);
471 * We got all needed information from the lowcore and can
472 * now safely switch on interrupts.
474 if (regs->psw.mask & PSW_MASK_PSTATE)
475 local_irq_enable();
477 if (regs->psw.mask & PSW_MASK_PSTATE) {
478 if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
479 return;
480 if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
481 if (tracehook_consider_fatal_signal(current, SIGTRAP))
482 force_sig(SIGTRAP, current);
483 else
484 signal = SIGILL;
485 #ifdef CONFIG_MATHEMU
486 } else if (opcode[0] == 0xb3) {
487 if (get_user(*((__u16 *) (opcode+2)), location+1))
488 return;
489 signal = math_emu_b3(opcode, regs);
490 } else if (opcode[0] == 0xed) {
491 if (get_user(*((__u32 *) (opcode+2)),
492 (__u32 __user *)(location+1)))
493 return;
494 signal = math_emu_ed(opcode, regs);
495 } else if (*((__u16 *) opcode) == 0xb299) {
496 if (get_user(*((__u16 *) (opcode+2)), location+1))
497 return;
498 signal = math_emu_srnm(opcode, regs);
499 } else if (*((__u16 *) opcode) == 0xb29c) {
500 if (get_user(*((__u16 *) (opcode+2)), location+1))
501 return;
502 signal = math_emu_stfpc(opcode, regs);
503 } else if (*((__u16 *) opcode) == 0xb29d) {
504 if (get_user(*((__u16 *) (opcode+2)), location+1))
505 return;
506 signal = math_emu_lfpc(opcode, regs);
507 #endif
508 } else
509 signal = SIGILL;
510 } else {
512 * If we get an illegal op in kernel mode, send it through the
513 * kprobes notifier. If kprobes doesn't pick it up, SIGILL
515 if (notify_die(DIE_BPT, "bpt", regs, interruption_code,
516 3, SIGTRAP) != NOTIFY_STOP)
517 signal = SIGILL;
520 #ifdef CONFIG_MATHEMU
521 if (signal == SIGFPE)
522 do_fp_trap(regs, location,
523 current->thread.fp_regs.fpc, interruption_code);
524 else if (signal == SIGSEGV) {
525 info.si_signo = signal;
526 info.si_errno = 0;
527 info.si_code = SEGV_MAPERR;
528 info.si_addr = (void __user *) location;
529 do_trap(interruption_code, signal,
530 "user address fault", regs, &info);
531 } else
532 #endif
533 if (signal) {
534 info.si_signo = signal;
535 info.si_errno = 0;
536 info.si_code = ILL_ILLOPC;
537 info.si_addr = (void __user *) location;
538 do_trap(interruption_code, signal,
539 "illegal operation", regs, &info);
544 #ifdef CONFIG_MATHEMU
545 asmlinkage void
546 specification_exception(struct pt_regs * regs, long interruption_code)
548 __u8 opcode[6];
549 __u16 __user *location = NULL;
550 int signal = 0;
552 location = (__u16 __user *) get_check_address(regs);
555 * We got all needed information from the lowcore and can
556 * now safely switch on interrupts.
558 if (regs->psw.mask & PSW_MASK_PSTATE)
559 local_irq_enable();
561 if (regs->psw.mask & PSW_MASK_PSTATE) {
562 get_user(*((__u16 *) opcode), location);
563 switch (opcode[0]) {
564 case 0x28: /* LDR Rx,Ry */
565 signal = math_emu_ldr(opcode);
566 break;
567 case 0x38: /* LER Rx,Ry */
568 signal = math_emu_ler(opcode);
569 break;
570 case 0x60: /* STD R,D(X,B) */
571 get_user(*((__u16 *) (opcode+2)), location+1);
572 signal = math_emu_std(opcode, regs);
573 break;
574 case 0x68: /* LD R,D(X,B) */
575 get_user(*((__u16 *) (opcode+2)), location+1);
576 signal = math_emu_ld(opcode, regs);
577 break;
578 case 0x70: /* STE R,D(X,B) */
579 get_user(*((__u16 *) (opcode+2)), location+1);
580 signal = math_emu_ste(opcode, regs);
581 break;
582 case 0x78: /* LE R,D(X,B) */
583 get_user(*((__u16 *) (opcode+2)), location+1);
584 signal = math_emu_le(opcode, regs);
585 break;
586 default:
587 signal = SIGILL;
588 break;
590 } else
591 signal = SIGILL;
593 if (signal == SIGFPE)
594 do_fp_trap(regs, location,
595 current->thread.fp_regs.fpc, interruption_code);
596 else if (signal) {
597 siginfo_t info;
598 info.si_signo = signal;
599 info.si_errno = 0;
600 info.si_code = ILL_ILLOPN;
601 info.si_addr = location;
602 do_trap(interruption_code, signal,
603 "specification exception", regs, &info);
606 #else
607 DO_ERROR_INFO(SIGILL, "specification exception", specification_exception,
608 ILL_ILLOPN, get_check_address(regs));
609 #endif
611 static void data_exception(struct pt_regs * regs, long interruption_code)
613 __u16 __user *location;
614 int signal = 0;
616 location = get_check_address(regs);
619 * We got all needed information from the lowcore and can
620 * now safely switch on interrupts.
622 if (regs->psw.mask & PSW_MASK_PSTATE)
623 local_irq_enable();
625 if (MACHINE_HAS_IEEE)
626 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
628 #ifdef CONFIG_MATHEMU
629 else if (regs->psw.mask & PSW_MASK_PSTATE) {
630 __u8 opcode[6];
631 get_user(*((__u16 *) opcode), location);
632 switch (opcode[0]) {
633 case 0x28: /* LDR Rx,Ry */
634 signal = math_emu_ldr(opcode);
635 break;
636 case 0x38: /* LER Rx,Ry */
637 signal = math_emu_ler(opcode);
638 break;
639 case 0x60: /* STD R,D(X,B) */
640 get_user(*((__u16 *) (opcode+2)), location+1);
641 signal = math_emu_std(opcode, regs);
642 break;
643 case 0x68: /* LD R,D(X,B) */
644 get_user(*((__u16 *) (opcode+2)), location+1);
645 signal = math_emu_ld(opcode, regs);
646 break;
647 case 0x70: /* STE R,D(X,B) */
648 get_user(*((__u16 *) (opcode+2)), location+1);
649 signal = math_emu_ste(opcode, regs);
650 break;
651 case 0x78: /* LE R,D(X,B) */
652 get_user(*((__u16 *) (opcode+2)), location+1);
653 signal = math_emu_le(opcode, regs);
654 break;
655 case 0xb3:
656 get_user(*((__u16 *) (opcode+2)), location+1);
657 signal = math_emu_b3(opcode, regs);
658 break;
659 case 0xed:
660 get_user(*((__u32 *) (opcode+2)),
661 (__u32 __user *)(location+1));
662 signal = math_emu_ed(opcode, regs);
663 break;
664 case 0xb2:
665 if (opcode[1] == 0x99) {
666 get_user(*((__u16 *) (opcode+2)), location+1);
667 signal = math_emu_srnm(opcode, regs);
668 } else if (opcode[1] == 0x9c) {
669 get_user(*((__u16 *) (opcode+2)), location+1);
670 signal = math_emu_stfpc(opcode, regs);
671 } else if (opcode[1] == 0x9d) {
672 get_user(*((__u16 *) (opcode+2)), location+1);
673 signal = math_emu_lfpc(opcode, regs);
674 } else
675 signal = SIGILL;
676 break;
677 default:
678 signal = SIGILL;
679 break;
682 #endif
683 if (current->thread.fp_regs.fpc & FPC_DXC_MASK)
684 signal = SIGFPE;
685 else
686 signal = SIGILL;
687 if (signal == SIGFPE)
688 do_fp_trap(regs, location,
689 current->thread.fp_regs.fpc, interruption_code);
690 else if (signal) {
691 siginfo_t info;
692 info.si_signo = signal;
693 info.si_errno = 0;
694 info.si_code = ILL_ILLOPN;
695 info.si_addr = location;
696 do_trap(interruption_code, signal,
697 "data exception", regs, &info);
701 static void space_switch_exception(struct pt_regs * regs, long int_code)
703 siginfo_t info;
705 /* Set user psw back to home space mode. */
706 if (regs->psw.mask & PSW_MASK_PSTATE)
707 regs->psw.mask |= PSW_ASC_HOME;
708 /* Send SIGILL. */
709 info.si_signo = SIGILL;
710 info.si_errno = 0;
711 info.si_code = ILL_PRVOPC;
712 info.si_addr = get_check_address(regs);
713 do_trap(int_code, SIGILL, "space switch event", regs, &info);
716 asmlinkage void kernel_stack_overflow(struct pt_regs * regs)
718 bust_spinlocks(1);
719 printk("Kernel stack overflow.\n");
720 show_regs(regs);
721 bust_spinlocks(0);
722 panic("Corrupt kernel stack, can't continue.");
725 /* init is done in lowcore.S and head.S */
727 void __init trap_init(void)
729 int i;
731 for (i = 0; i < 128; i++)
732 pgm_check_table[i] = &default_trap_handler;
733 pgm_check_table[1] = &illegal_op;
734 pgm_check_table[2] = &privileged_op;
735 pgm_check_table[3] = &execute_exception;
736 pgm_check_table[4] = &do_protection_exception;
737 pgm_check_table[5] = &addressing_exception;
738 pgm_check_table[6] = &specification_exception;
739 pgm_check_table[7] = &data_exception;
740 pgm_check_table[8] = &overflow_exception;
741 pgm_check_table[9] = &divide_exception;
742 pgm_check_table[0x0A] = &overflow_exception;
743 pgm_check_table[0x0B] = &divide_exception;
744 pgm_check_table[0x0C] = &hfp_overflow_exception;
745 pgm_check_table[0x0D] = &hfp_underflow_exception;
746 pgm_check_table[0x0E] = &hfp_significance_exception;
747 pgm_check_table[0x0F] = &hfp_divide_exception;
748 pgm_check_table[0x10] = &do_dat_exception;
749 pgm_check_table[0x11] = &do_dat_exception;
750 pgm_check_table[0x12] = &translation_exception;
751 pgm_check_table[0x13] = &special_op_exception;
752 #ifdef CONFIG_64BIT
753 pgm_check_table[0x38] = &do_asce_exception;
754 pgm_check_table[0x39] = &do_dat_exception;
755 pgm_check_table[0x3A] = &do_dat_exception;
756 pgm_check_table[0x3B] = &do_dat_exception;
757 #endif /* CONFIG_64BIT */
758 pgm_check_table[0x15] = &operand_exception;
759 pgm_check_table[0x1C] = &space_switch_exception;
760 pgm_check_table[0x1D] = &hfp_sqrt_exception;
761 pfault_irq_init();