2 * arch/s390/kernel/traps.c
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
9 * Derived from "arch/i386/kernel/traps.c"
10 * Copyright (C) 1991, 1992 Linus Torvalds
14 * 'Traps.c' handles hardware traps and faults after we have saved some
17 #include <linux/sched.h>
18 #include <linux/kernel.h>
19 #include <linux/string.h>
20 #include <linux/errno.h>
21 #include <linux/tracehook.h>
22 #include <linux/timer.h>
24 #include <linux/smp.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/seq_file.h>
28 #include <linux/delay.h>
29 #include <linux/module.h>
30 #include <linux/kdebug.h>
31 #include <linux/kallsyms.h>
32 #include <linux/reboot.h>
33 #include <linux/kprobes.h>
34 #include <linux/bug.h>
35 #include <linux/utsname.h>
36 #include <asm/system.h>
37 #include <asm/uaccess.h>
39 #include <asm/atomic.h>
40 #include <asm/mathemu.h>
41 #include <asm/cpcmd.h>
42 #include <asm/s390_ext.h>
43 #include <asm/lowcore.h>
44 #include <asm/debug.h>
47 pgm_check_handler_t
*pgm_check_table
[128];
50 #ifdef CONFIG_PROCESS_DEBUG
51 int sysctl_userprocess_debug
= 1;
53 int sysctl_userprocess_debug
= 0;
57 extern pgm_check_handler_t do_protection_exception
;
58 extern pgm_check_handler_t do_dat_exception
;
59 extern pgm_check_handler_t do_asce_exception
;
61 #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
65 #define FOURLONG "%08lx %08lx %08lx %08lx\n"
66 static int kstack_depth_to_print
= 12;
67 #else /* CONFIG_64BIT */
68 #define LONG "%016lx "
69 #define FOURLONG "%016lx %016lx %016lx %016lx\n"
70 static int kstack_depth_to_print
= 20;
71 #endif /* CONFIG_64BIT */
74 * For show_trace we have tree different stack to consider:
75 * - the panic stack which is used if the kernel stack has overflown
76 * - the asynchronous interrupt stack (cpu related)
77 * - the synchronous kernel stack (process related)
78 * The stack trace can start at any of the three stack and can potentially
79 * touch all of them. The order is: panic stack, async stack, sync stack.
82 __show_trace(unsigned long sp
, unsigned long low
, unsigned long high
)
84 struct stack_frame
*sf
;
88 sp
= sp
& PSW_ADDR_INSN
;
89 if (sp
< low
|| sp
> high
- sizeof(*sf
))
91 sf
= (struct stack_frame
*) sp
;
92 printk("([<%016lx>] ", sf
->gprs
[8] & PSW_ADDR_INSN
);
93 print_symbol("%s)\n", sf
->gprs
[8] & PSW_ADDR_INSN
);
94 /* Follow the backchain. */
97 sp
= sf
->back_chain
& PSW_ADDR_INSN
;
100 if (sp
<= low
|| sp
> high
- sizeof(*sf
))
102 sf
= (struct stack_frame
*) sp
;
103 printk(" [<%016lx>] ", sf
->gprs
[8] & PSW_ADDR_INSN
);
104 print_symbol("%s\n", sf
->gprs
[8] & PSW_ADDR_INSN
);
106 /* Zero backchain detected, check for interrupt frame. */
107 sp
= (unsigned long) (sf
+ 1);
108 if (sp
<= low
|| sp
> high
- sizeof(*regs
))
110 regs
= (struct pt_regs
*) sp
;
111 printk(" [<%016lx>] ", regs
->psw
.addr
& PSW_ADDR_INSN
);
112 print_symbol("%s\n", regs
->psw
.addr
& PSW_ADDR_INSN
);
118 static void show_trace(struct task_struct
*task
, unsigned long *stack
)
120 register unsigned long __r15
asm ("15");
123 sp
= (unsigned long) stack
;
125 sp
= task
? task
->thread
.ksp
: __r15
;
126 printk("Call Trace:\n");
127 #ifdef CONFIG_CHECK_STACK
128 sp
= __show_trace(sp
, S390_lowcore
.panic_stack
- 4096,
129 S390_lowcore
.panic_stack
);
131 sp
= __show_trace(sp
, S390_lowcore
.async_stack
- ASYNC_SIZE
,
132 S390_lowcore
.async_stack
);
134 __show_trace(sp
, (unsigned long) task_stack_page(task
),
135 (unsigned long) task_stack_page(task
) + THREAD_SIZE
);
137 __show_trace(sp
, S390_lowcore
.thread_info
,
138 S390_lowcore
.thread_info
+ THREAD_SIZE
);
141 debug_show_held_locks(task
);
144 void show_stack(struct task_struct
*task
, unsigned long *sp
)
146 register unsigned long * __r15
asm ("15");
147 unsigned long *stack
;
151 stack
= task
? (unsigned long *) task
->thread
.ksp
: __r15
;
155 for (i
= 0; i
< kstack_depth_to_print
; i
++) {
156 if (((addr_t
) stack
& (THREAD_SIZE
-1)) == 0)
158 if (i
&& ((i
* sizeof (long) % 32) == 0))
160 printk(LONG
, *stack
++);
163 show_trace(task
, sp
);
166 static void show_last_breaking_event(struct pt_regs
*regs
)
169 printk("Last Breaking-Event-Address:\n");
170 printk(" [<%016lx>] ", regs
->args
[0] & PSW_ADDR_INSN
);
171 print_symbol("%s\n", regs
->args
[0] & PSW_ADDR_INSN
);
176 * The architecture-independent dump_stack generator
178 void dump_stack(void)
180 printk("CPU: %d %s %s %.*s\n",
181 task_thread_info(current
)->cpu
, print_tainted(),
182 init_utsname()->release
,
183 (int)strcspn(init_utsname()->version
, " "),
184 init_utsname()->version
);
185 printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
186 current
->comm
, current
->pid
, current
,
187 (void *) current
->thread
.ksp
);
188 show_stack(NULL
, NULL
);
190 EXPORT_SYMBOL(dump_stack
);
192 static inline int mask_bits(struct pt_regs
*regs
, unsigned long bits
)
194 return (regs
->psw
.mask
& bits
) / ((~bits
+ 1) & bits
);
197 void show_registers(struct pt_regs
*regs
)
201 mode
= (regs
->psw
.mask
& PSW_MASK_PSTATE
) ? "User" : "Krnl";
202 printk("%s PSW : %p %p",
203 mode
, (void *) regs
->psw
.mask
,
204 (void *) regs
->psw
.addr
);
205 print_symbol(" (%s)\n", regs
->psw
.addr
& PSW_ADDR_INSN
);
206 printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
207 "P:%x AS:%x CC:%x PM:%x", mask_bits(regs
, PSW_MASK_PER
),
208 mask_bits(regs
, PSW_MASK_DAT
), mask_bits(regs
, PSW_MASK_IO
),
209 mask_bits(regs
, PSW_MASK_EXT
), mask_bits(regs
, PSW_MASK_KEY
),
210 mask_bits(regs
, PSW_MASK_MCHECK
), mask_bits(regs
, PSW_MASK_WAIT
),
211 mask_bits(regs
, PSW_MASK_PSTATE
), mask_bits(regs
, PSW_MASK_ASC
),
212 mask_bits(regs
, PSW_MASK_CC
), mask_bits(regs
, PSW_MASK_PM
));
214 printk(" EA:%x", mask_bits(regs
, PSW_BASE_BITS
));
216 printk("\n%s GPRS: " FOURLONG
, mode
,
217 regs
->gprs
[0], regs
->gprs
[1], regs
->gprs
[2], regs
->gprs
[3]);
219 regs
->gprs
[4], regs
->gprs
[5], regs
->gprs
[6], regs
->gprs
[7]);
221 regs
->gprs
[8], regs
->gprs
[9], regs
->gprs
[10], regs
->gprs
[11]);
223 regs
->gprs
[12], regs
->gprs
[13], regs
->gprs
[14], regs
->gprs
[15]);
228 void show_regs(struct pt_regs
*regs
)
231 printk("CPU: %d %s %s %.*s\n",
232 task_thread_info(current
)->cpu
, print_tainted(),
233 init_utsname()->release
,
234 (int)strcspn(init_utsname()->version
, " "),
235 init_utsname()->version
);
236 printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
237 current
->comm
, current
->pid
, current
,
238 (void *) current
->thread
.ksp
);
239 show_registers(regs
);
240 /* Show stack backtrace if pt_regs is from kernel mode */
241 if (!(regs
->psw
.mask
& PSW_MASK_PSTATE
))
242 show_trace(NULL
, (unsigned long *) regs
->gprs
[15]);
243 show_last_breaking_event(regs
);
246 /* This is called from fs/proc/array.c */
247 void task_show_regs(struct seq_file
*m
, struct task_struct
*task
)
249 struct pt_regs
*regs
;
251 regs
= task_pt_regs(task
);
252 seq_printf(m
, "task: %p, ksp: %p\n",
253 task
, (void *)task
->thread
.ksp
);
254 seq_printf(m
, "User PSW : %p %p\n",
255 (void *) regs
->psw
.mask
, (void *)regs
->psw
.addr
);
257 seq_printf(m
, "User GPRS: " FOURLONG
,
258 regs
->gprs
[0], regs
->gprs
[1],
259 regs
->gprs
[2], regs
->gprs
[3]);
260 seq_printf(m
, " " FOURLONG
,
261 regs
->gprs
[4], regs
->gprs
[5],
262 regs
->gprs
[6], regs
->gprs
[7]);
263 seq_printf(m
, " " FOURLONG
,
264 regs
->gprs
[8], regs
->gprs
[9],
265 regs
->gprs
[10], regs
->gprs
[11]);
266 seq_printf(m
, " " FOURLONG
,
267 regs
->gprs
[12], regs
->gprs
[13],
268 regs
->gprs
[14], regs
->gprs
[15]);
269 seq_printf(m
, "User ACRS: %08x %08x %08x %08x\n",
270 task
->thread
.acrs
[0], task
->thread
.acrs
[1],
271 task
->thread
.acrs
[2], task
->thread
.acrs
[3]);
272 seq_printf(m
, " %08x %08x %08x %08x\n",
273 task
->thread
.acrs
[4], task
->thread
.acrs
[5],
274 task
->thread
.acrs
[6], task
->thread
.acrs
[7]);
275 seq_printf(m
, " %08x %08x %08x %08x\n",
276 task
->thread
.acrs
[8], task
->thread
.acrs
[9],
277 task
->thread
.acrs
[10], task
->thread
.acrs
[11]);
278 seq_printf(m
, " %08x %08x %08x %08x\n",
279 task
->thread
.acrs
[12], task
->thread
.acrs
[13],
280 task
->thread
.acrs
[14], task
->thread
.acrs
[15]);
283 static DEFINE_SPINLOCK(die_lock
);
285 void die(const char * str
, struct pt_regs
* regs
, long err
)
287 static int die_counter
;
292 spin_lock_irq(&die_lock
);
294 printk("%s: %04lx [#%d] ", str
, err
& 0xffff, ++die_counter
);
295 #ifdef CONFIG_PREEMPT
301 #ifdef CONFIG_DEBUG_PAGEALLOC
302 printk("DEBUG_PAGEALLOC");
305 notify_die(DIE_OOPS
, str
, regs
, err
, current
->thread
.trap_no
, SIGSEGV
);
308 add_taint(TAINT_DIE
);
309 spin_unlock_irq(&die_lock
);
311 panic("Fatal exception in interrupt");
313 panic("Fatal exception: panic_on_oops");
319 report_user_fault(long interruption_code
, struct pt_regs
*regs
)
321 #if defined(CONFIG_SYSCTL)
322 if (!sysctl_userprocess_debug
)
325 #if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG)
326 printk("User process fault: interruption code 0x%lX\n",
332 int is_valid_bugaddr(unsigned long addr
)
337 static void __kprobes
inline do_trap(long interruption_code
, int signr
,
338 char *str
, struct pt_regs
*regs
,
342 * We got all needed information from the lowcore and can
343 * now safely switch on interrupts.
345 if (regs
->psw
.mask
& PSW_MASK_PSTATE
)
348 if (notify_die(DIE_TRAP
, str
, regs
, interruption_code
,
349 interruption_code
, signr
) == NOTIFY_STOP
)
352 if (regs
->psw
.mask
& PSW_MASK_PSTATE
) {
353 struct task_struct
*tsk
= current
;
355 tsk
->thread
.trap_no
= interruption_code
& 0xffff;
356 force_sig_info(signr
, info
, tsk
);
357 report_user_fault(interruption_code
, regs
);
359 const struct exception_table_entry
*fixup
;
360 fixup
= search_exception_tables(regs
->psw
.addr
& PSW_ADDR_INSN
);
362 regs
->psw
.addr
= fixup
->fixup
| PSW_ADDR_AMODE
;
364 enum bug_trap_type btt
;
366 btt
= report_bug(regs
->psw
.addr
& PSW_ADDR_INSN
, regs
);
367 if (btt
== BUG_TRAP_TYPE_WARN
)
369 die(str
, regs
, interruption_code
);
374 static inline void __user
*get_check_address(struct pt_regs
*regs
)
376 return (void __user
*)((regs
->psw
.addr
-S390_lowcore
.pgm_ilc
) & PSW_ADDR_INSN
);
379 void __kprobes
do_single_step(struct pt_regs
*regs
)
381 if (notify_die(DIE_SSTEP
, "sstep", regs
, 0, 0,
382 SIGTRAP
) == NOTIFY_STOP
){
385 if (tracehook_consider_fatal_signal(current
, SIGTRAP
))
386 force_sig(SIGTRAP
, current
);
389 static void default_trap_handler(struct pt_regs
* regs
, long interruption_code
)
391 if (regs
->psw
.mask
& PSW_MASK_PSTATE
) {
394 report_user_fault(interruption_code
, regs
);
396 die("Unknown program exception", regs
, interruption_code
);
399 #define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \
400 static void name(struct pt_regs * regs, long interruption_code) \
403 info.si_signo = signr; \
405 info.si_code = sicode; \
406 info.si_addr = siaddr; \
407 do_trap(interruption_code, signr, str, regs, &info); \
410 DO_ERROR_INFO(SIGILL
, "addressing exception", addressing_exception
,
411 ILL_ILLADR
, get_check_address(regs
))
412 DO_ERROR_INFO(SIGILL
, "execute exception", execute_exception
,
413 ILL_ILLOPN
, get_check_address(regs
))
414 DO_ERROR_INFO(SIGFPE
, "fixpoint divide exception", divide_exception
,
415 FPE_INTDIV
, get_check_address(regs
))
416 DO_ERROR_INFO(SIGFPE
, "fixpoint overflow exception", overflow_exception
,
417 FPE_INTOVF
, get_check_address(regs
))
418 DO_ERROR_INFO(SIGFPE
, "HFP overflow exception", hfp_overflow_exception
,
419 FPE_FLTOVF
, get_check_address(regs
))
420 DO_ERROR_INFO(SIGFPE
, "HFP underflow exception", hfp_underflow_exception
,
421 FPE_FLTUND
, get_check_address(regs
))
422 DO_ERROR_INFO(SIGFPE
, "HFP significance exception", hfp_significance_exception
,
423 FPE_FLTRES
, get_check_address(regs
))
424 DO_ERROR_INFO(SIGFPE
, "HFP divide exception", hfp_divide_exception
,
425 FPE_FLTDIV
, get_check_address(regs
))
426 DO_ERROR_INFO(SIGFPE
, "HFP square root exception", hfp_sqrt_exception
,
427 FPE_FLTINV
, get_check_address(regs
))
428 DO_ERROR_INFO(SIGILL
, "operand exception", operand_exception
,
429 ILL_ILLOPN
, get_check_address(regs
))
430 DO_ERROR_INFO(SIGILL
, "privileged operation", privileged_op
,
431 ILL_PRVOPC
, get_check_address(regs
))
432 DO_ERROR_INFO(SIGILL
, "special operation exception", special_op_exception
,
433 ILL_ILLOPN
, get_check_address(regs
))
434 DO_ERROR_INFO(SIGILL
, "translation exception", translation_exception
,
435 ILL_ILLOPN
, get_check_address(regs
))
438 do_fp_trap(struct pt_regs
*regs
, void __user
*location
,
439 int fpc
, long interruption_code
)
443 si
.si_signo
= SIGFPE
;
445 si
.si_addr
= location
;
447 /* FPC[2] is Data Exception Code */
448 if ((fpc
& 0x00000300) == 0) {
449 /* bits 6 and 7 of DXC are 0 iff IEEE exception */
450 if (fpc
& 0x8000) /* invalid fp operation */
451 si
.si_code
= FPE_FLTINV
;
452 else if (fpc
& 0x4000) /* div by 0 */
453 si
.si_code
= FPE_FLTDIV
;
454 else if (fpc
& 0x2000) /* overflow */
455 si
.si_code
= FPE_FLTOVF
;
456 else if (fpc
& 0x1000) /* underflow */
457 si
.si_code
= FPE_FLTUND
;
458 else if (fpc
& 0x0800) /* inexact */
459 si
.si_code
= FPE_FLTRES
;
461 current
->thread
.ieee_instruction_pointer
= (addr_t
) location
;
462 do_trap(interruption_code
, SIGFPE
,
463 "floating point exception", regs
, &si
);
466 static void illegal_op(struct pt_regs
* regs
, long interruption_code
)
470 __u16 __user
*location
;
473 location
= get_check_address(regs
);
476 * We got all needed information from the lowcore and can
477 * now safely switch on interrupts.
479 if (regs
->psw
.mask
& PSW_MASK_PSTATE
)
482 if (regs
->psw
.mask
& PSW_MASK_PSTATE
) {
483 if (get_user(*((__u16
*) opcode
), (__u16 __user
*) location
))
485 if (*((__u16
*) opcode
) == S390_BREAKPOINT_U16
) {
486 if (tracehook_consider_fatal_signal(current
, SIGTRAP
))
487 force_sig(SIGTRAP
, current
);
490 #ifdef CONFIG_MATHEMU
491 } else if (opcode
[0] == 0xb3) {
492 if (get_user(*((__u16
*) (opcode
+2)), location
+1))
494 signal
= math_emu_b3(opcode
, regs
);
495 } else if (opcode
[0] == 0xed) {
496 if (get_user(*((__u32
*) (opcode
+2)),
497 (__u32 __user
*)(location
+1)))
499 signal
= math_emu_ed(opcode
, regs
);
500 } else if (*((__u16
*) opcode
) == 0xb299) {
501 if (get_user(*((__u16
*) (opcode
+2)), location
+1))
503 signal
= math_emu_srnm(opcode
, regs
);
504 } else if (*((__u16
*) opcode
) == 0xb29c) {
505 if (get_user(*((__u16
*) (opcode
+2)), location
+1))
507 signal
= math_emu_stfpc(opcode
, regs
);
508 } else if (*((__u16
*) opcode
) == 0xb29d) {
509 if (get_user(*((__u16
*) (opcode
+2)), location
+1))
511 signal
= math_emu_lfpc(opcode
, regs
);
517 * If we get an illegal op in kernel mode, send it through the
518 * kprobes notifier. If kprobes doesn't pick it up, SIGILL
520 if (notify_die(DIE_BPT
, "bpt", regs
, interruption_code
,
521 3, SIGTRAP
) != NOTIFY_STOP
)
525 #ifdef CONFIG_MATHEMU
526 if (signal
== SIGFPE
)
527 do_fp_trap(regs
, location
,
528 current
->thread
.fp_regs
.fpc
, interruption_code
);
529 else if (signal
== SIGSEGV
) {
530 info
.si_signo
= signal
;
532 info
.si_code
= SEGV_MAPERR
;
533 info
.si_addr
= (void __user
*) location
;
534 do_trap(interruption_code
, signal
,
535 "user address fault", regs
, &info
);
539 info
.si_signo
= signal
;
541 info
.si_code
= ILL_ILLOPC
;
542 info
.si_addr
= (void __user
*) location
;
543 do_trap(interruption_code
, signal
,
544 "illegal operation", regs
, &info
);
549 #ifdef CONFIG_MATHEMU
551 specification_exception(struct pt_regs
* regs
, long interruption_code
)
554 __u16 __user
*location
= NULL
;
557 location
= (__u16 __user
*) get_check_address(regs
);
560 * We got all needed information from the lowcore and can
561 * now safely switch on interrupts.
563 if (regs
->psw
.mask
& PSW_MASK_PSTATE
)
566 if (regs
->psw
.mask
& PSW_MASK_PSTATE
) {
567 get_user(*((__u16
*) opcode
), location
);
569 case 0x28: /* LDR Rx,Ry */
570 signal
= math_emu_ldr(opcode
);
572 case 0x38: /* LER Rx,Ry */
573 signal
= math_emu_ler(opcode
);
575 case 0x60: /* STD R,D(X,B) */
576 get_user(*((__u16
*) (opcode
+2)), location
+1);
577 signal
= math_emu_std(opcode
, regs
);
579 case 0x68: /* LD R,D(X,B) */
580 get_user(*((__u16
*) (opcode
+2)), location
+1);
581 signal
= math_emu_ld(opcode
, regs
);
583 case 0x70: /* STE R,D(X,B) */
584 get_user(*((__u16
*) (opcode
+2)), location
+1);
585 signal
= math_emu_ste(opcode
, regs
);
587 case 0x78: /* LE R,D(X,B) */
588 get_user(*((__u16
*) (opcode
+2)), location
+1);
589 signal
= math_emu_le(opcode
, regs
);
598 if (signal
== SIGFPE
)
599 do_fp_trap(regs
, location
,
600 current
->thread
.fp_regs
.fpc
, interruption_code
);
603 info
.si_signo
= signal
;
605 info
.si_code
= ILL_ILLOPN
;
606 info
.si_addr
= location
;
607 do_trap(interruption_code
, signal
,
608 "specification exception", regs
, &info
);
612 DO_ERROR_INFO(SIGILL
, "specification exception", specification_exception
,
613 ILL_ILLOPN
, get_check_address(regs
));
616 static void data_exception(struct pt_regs
* regs
, long interruption_code
)
618 __u16 __user
*location
;
621 location
= get_check_address(regs
);
624 * We got all needed information from the lowcore and can
625 * now safely switch on interrupts.
627 if (regs
->psw
.mask
& PSW_MASK_PSTATE
)
630 if (MACHINE_HAS_IEEE
)
631 asm volatile("stfpc %0" : "=m" (current
->thread
.fp_regs
.fpc
));
633 #ifdef CONFIG_MATHEMU
634 else if (regs
->psw
.mask
& PSW_MASK_PSTATE
) {
636 get_user(*((__u16
*) opcode
), location
);
638 case 0x28: /* LDR Rx,Ry */
639 signal
= math_emu_ldr(opcode
);
641 case 0x38: /* LER Rx,Ry */
642 signal
= math_emu_ler(opcode
);
644 case 0x60: /* STD R,D(X,B) */
645 get_user(*((__u16
*) (opcode
+2)), location
+1);
646 signal
= math_emu_std(opcode
, regs
);
648 case 0x68: /* LD R,D(X,B) */
649 get_user(*((__u16
*) (opcode
+2)), location
+1);
650 signal
= math_emu_ld(opcode
, regs
);
652 case 0x70: /* STE R,D(X,B) */
653 get_user(*((__u16
*) (opcode
+2)), location
+1);
654 signal
= math_emu_ste(opcode
, regs
);
656 case 0x78: /* LE R,D(X,B) */
657 get_user(*((__u16
*) (opcode
+2)), location
+1);
658 signal
= math_emu_le(opcode
, regs
);
661 get_user(*((__u16
*) (opcode
+2)), location
+1);
662 signal
= math_emu_b3(opcode
, regs
);
665 get_user(*((__u32
*) (opcode
+2)),
666 (__u32 __user
*)(location
+1));
667 signal
= math_emu_ed(opcode
, regs
);
670 if (opcode
[1] == 0x99) {
671 get_user(*((__u16
*) (opcode
+2)), location
+1);
672 signal
= math_emu_srnm(opcode
, regs
);
673 } else if (opcode
[1] == 0x9c) {
674 get_user(*((__u16
*) (opcode
+2)), location
+1);
675 signal
= math_emu_stfpc(opcode
, regs
);
676 } else if (opcode
[1] == 0x9d) {
677 get_user(*((__u16
*) (opcode
+2)), location
+1);
678 signal
= math_emu_lfpc(opcode
, regs
);
688 if (current
->thread
.fp_regs
.fpc
& FPC_DXC_MASK
)
692 if (signal
== SIGFPE
)
693 do_fp_trap(regs
, location
,
694 current
->thread
.fp_regs
.fpc
, interruption_code
);
697 info
.si_signo
= signal
;
699 info
.si_code
= ILL_ILLOPN
;
700 info
.si_addr
= location
;
701 do_trap(interruption_code
, signal
,
702 "data exception", regs
, &info
);
706 static void space_switch_exception(struct pt_regs
* regs
, long int_code
)
710 /* Set user psw back to home space mode. */
711 if (regs
->psw
.mask
& PSW_MASK_PSTATE
)
712 regs
->psw
.mask
|= PSW_ASC_HOME
;
714 info
.si_signo
= SIGILL
;
716 info
.si_code
= ILL_PRVOPC
;
717 info
.si_addr
= get_check_address(regs
);
718 do_trap(int_code
, SIGILL
, "space switch event", regs
, &info
);
721 asmlinkage
void kernel_stack_overflow(struct pt_regs
* regs
)
724 printk("Kernel stack overflow.\n");
727 panic("Corrupt kernel stack, can't continue.");
730 /* init is done in lowcore.S and head.S */
732 void __init
trap_init(void)
736 for (i
= 0; i
< 128; i
++)
737 pgm_check_table
[i
] = &default_trap_handler
;
738 pgm_check_table
[1] = &illegal_op
;
739 pgm_check_table
[2] = &privileged_op
;
740 pgm_check_table
[3] = &execute_exception
;
741 pgm_check_table
[4] = &do_protection_exception
;
742 pgm_check_table
[5] = &addressing_exception
;
743 pgm_check_table
[6] = &specification_exception
;
744 pgm_check_table
[7] = &data_exception
;
745 pgm_check_table
[8] = &overflow_exception
;
746 pgm_check_table
[9] = ÷_exception
;
747 pgm_check_table
[0x0A] = &overflow_exception
;
748 pgm_check_table
[0x0B] = ÷_exception
;
749 pgm_check_table
[0x0C] = &hfp_overflow_exception
;
750 pgm_check_table
[0x0D] = &hfp_underflow_exception
;
751 pgm_check_table
[0x0E] = &hfp_significance_exception
;
752 pgm_check_table
[0x0F] = &hfp_divide_exception
;
753 pgm_check_table
[0x10] = &do_dat_exception
;
754 pgm_check_table
[0x11] = &do_dat_exception
;
755 pgm_check_table
[0x12] = &translation_exception
;
756 pgm_check_table
[0x13] = &special_op_exception
;
758 pgm_check_table
[0x38] = &do_asce_exception
;
759 pgm_check_table
[0x39] = &do_dat_exception
;
760 pgm_check_table
[0x3A] = &do_dat_exception
;
761 pgm_check_table
[0x3B] = &do_dat_exception
;
762 #endif /* CONFIG_64BIT */
763 pgm_check_table
[0x15] = &operand_exception
;
764 pgm_check_table
[0x1C] = &space_switch_exception
;
765 pgm_check_table
[0x1D] = &hfp_sqrt_exception
;