2 * arch/s390/kernel/traps.c
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
9 * Derived from "arch/i386/kernel/traps.c"
10 * Copyright (C) 1991, 1992 Linus Torvalds
14 * 'Traps.c' handles hardware traps and faults after we have saved some
17 #include <linux/sched.h>
18 #include <linux/kernel.h>
19 #include <linux/string.h>
20 #include <linux/errno.h>
21 #include <linux/tracehook.h>
22 #include <linux/timer.h>
24 #include <linux/smp.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/seq_file.h>
28 #include <linux/delay.h>
29 #include <linux/module.h>
30 #include <linux/kdebug.h>
31 #include <linux/kallsyms.h>
32 #include <linux/reboot.h>
33 #include <linux/kprobes.h>
34 #include <linux/bug.h>
35 #include <linux/utsname.h>
36 #include <asm/system.h>
37 #include <asm/uaccess.h>
39 #include <asm/atomic.h>
40 #include <asm/mathemu.h>
41 #include <asm/cpcmd.h>
42 #include <asm/s390_ext.h>
43 #include <asm/lowcore.h>
44 #include <asm/debug.h>
47 pgm_check_handler_t
*pgm_check_table
[128];
49 int show_unhandled_signals
;
51 extern pgm_check_handler_t do_protection_exception
;
52 extern pgm_check_handler_t do_dat_exception
;
53 extern pgm_check_handler_t do_asce_exception
;
55 #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
59 #define FOURLONG "%08lx %08lx %08lx %08lx\n"
60 static int kstack_depth_to_print
= 12;
61 #else /* CONFIG_64BIT */
62 #define LONG "%016lx "
63 #define FOURLONG "%016lx %016lx %016lx %016lx\n"
64 static int kstack_depth_to_print
= 20;
65 #endif /* CONFIG_64BIT */
68 * For show_trace we have tree different stack to consider:
69 * - the panic stack which is used if the kernel stack has overflown
70 * - the asynchronous interrupt stack (cpu related)
71 * - the synchronous kernel stack (process related)
72 * The stack trace can start at any of the three stack and can potentially
73 * touch all of them. The order is: panic stack, async stack, sync stack.
76 __show_trace(unsigned long sp
, unsigned long low
, unsigned long high
)
78 struct stack_frame
*sf
;
82 sp
= sp
& PSW_ADDR_INSN
;
83 if (sp
< low
|| sp
> high
- sizeof(*sf
))
85 sf
= (struct stack_frame
*) sp
;
86 printk("([<%016lx>] ", sf
->gprs
[8] & PSW_ADDR_INSN
);
87 print_symbol("%s)\n", sf
->gprs
[8] & PSW_ADDR_INSN
);
88 /* Follow the backchain. */
91 sp
= sf
->back_chain
& PSW_ADDR_INSN
;
94 if (sp
<= low
|| sp
> high
- sizeof(*sf
))
96 sf
= (struct stack_frame
*) sp
;
97 printk(" [<%016lx>] ", sf
->gprs
[8] & PSW_ADDR_INSN
);
98 print_symbol("%s\n", sf
->gprs
[8] & PSW_ADDR_INSN
);
100 /* Zero backchain detected, check for interrupt frame. */
101 sp
= (unsigned long) (sf
+ 1);
102 if (sp
<= low
|| sp
> high
- sizeof(*regs
))
104 regs
= (struct pt_regs
*) sp
;
105 printk(" [<%016lx>] ", regs
->psw
.addr
& PSW_ADDR_INSN
);
106 print_symbol("%s\n", regs
->psw
.addr
& PSW_ADDR_INSN
);
112 static void show_trace(struct task_struct
*task
, unsigned long *stack
)
114 register unsigned long __r15
asm ("15");
117 sp
= (unsigned long) stack
;
119 sp
= task
? task
->thread
.ksp
: __r15
;
120 printk("Call Trace:\n");
121 #ifdef CONFIG_CHECK_STACK
122 sp
= __show_trace(sp
, S390_lowcore
.panic_stack
- 4096,
123 S390_lowcore
.panic_stack
);
125 sp
= __show_trace(sp
, S390_lowcore
.async_stack
- ASYNC_SIZE
,
126 S390_lowcore
.async_stack
);
128 __show_trace(sp
, (unsigned long) task_stack_page(task
),
129 (unsigned long) task_stack_page(task
) + THREAD_SIZE
);
131 __show_trace(sp
, S390_lowcore
.thread_info
,
132 S390_lowcore
.thread_info
+ THREAD_SIZE
);
135 debug_show_held_locks(task
);
138 void show_stack(struct task_struct
*task
, unsigned long *sp
)
140 register unsigned long * __r15
asm ("15");
141 unsigned long *stack
;
145 stack
= task
? (unsigned long *) task
->thread
.ksp
: __r15
;
149 for (i
= 0; i
< kstack_depth_to_print
; i
++) {
150 if (((addr_t
) stack
& (THREAD_SIZE
-1)) == 0)
152 if (i
&& ((i
* sizeof (long) % 32) == 0))
154 printk(LONG
, *stack
++);
157 show_trace(task
, sp
);
160 static void show_last_breaking_event(struct pt_regs
*regs
)
163 printk("Last Breaking-Event-Address:\n");
164 printk(" [<%016lx>] ", regs
->args
[0] & PSW_ADDR_INSN
);
165 print_symbol("%s\n", regs
->args
[0] & PSW_ADDR_INSN
);
170 * The architecture-independent dump_stack generator
172 void dump_stack(void)
174 printk("CPU: %d %s %s %.*s\n",
175 task_thread_info(current
)->cpu
, print_tainted(),
176 init_utsname()->release
,
177 (int)strcspn(init_utsname()->version
, " "),
178 init_utsname()->version
);
179 printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
180 current
->comm
, current
->pid
, current
,
181 (void *) current
->thread
.ksp
);
182 show_stack(NULL
, NULL
);
184 EXPORT_SYMBOL(dump_stack
);
186 static inline int mask_bits(struct pt_regs
*regs
, unsigned long bits
)
188 return (regs
->psw
.mask
& bits
) / ((~bits
+ 1) & bits
);
191 void show_registers(struct pt_regs
*regs
)
195 mode
= (regs
->psw
.mask
& PSW_MASK_PSTATE
) ? "User" : "Krnl";
196 printk("%s PSW : %p %p",
197 mode
, (void *) regs
->psw
.mask
,
198 (void *) regs
->psw
.addr
);
199 print_symbol(" (%s)\n", regs
->psw
.addr
& PSW_ADDR_INSN
);
200 printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
201 "P:%x AS:%x CC:%x PM:%x", mask_bits(regs
, PSW_MASK_PER
),
202 mask_bits(regs
, PSW_MASK_DAT
), mask_bits(regs
, PSW_MASK_IO
),
203 mask_bits(regs
, PSW_MASK_EXT
), mask_bits(regs
, PSW_MASK_KEY
),
204 mask_bits(regs
, PSW_MASK_MCHECK
), mask_bits(regs
, PSW_MASK_WAIT
),
205 mask_bits(regs
, PSW_MASK_PSTATE
), mask_bits(regs
, PSW_MASK_ASC
),
206 mask_bits(regs
, PSW_MASK_CC
), mask_bits(regs
, PSW_MASK_PM
));
208 printk(" EA:%x", mask_bits(regs
, PSW_BASE_BITS
));
210 printk("\n%s GPRS: " FOURLONG
, mode
,
211 regs
->gprs
[0], regs
->gprs
[1], regs
->gprs
[2], regs
->gprs
[3]);
213 regs
->gprs
[4], regs
->gprs
[5], regs
->gprs
[6], regs
->gprs
[7]);
215 regs
->gprs
[8], regs
->gprs
[9], regs
->gprs
[10], regs
->gprs
[11]);
217 regs
->gprs
[12], regs
->gprs
[13], regs
->gprs
[14], regs
->gprs
[15]);
222 void show_regs(struct pt_regs
*regs
)
225 printk("CPU: %d %s %s %.*s\n",
226 task_thread_info(current
)->cpu
, print_tainted(),
227 init_utsname()->release
,
228 (int)strcspn(init_utsname()->version
, " "),
229 init_utsname()->version
);
230 printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
231 current
->comm
, current
->pid
, current
,
232 (void *) current
->thread
.ksp
);
233 show_registers(regs
);
234 /* Show stack backtrace if pt_regs is from kernel mode */
235 if (!(regs
->psw
.mask
& PSW_MASK_PSTATE
))
236 show_trace(NULL
, (unsigned long *) regs
->gprs
[15]);
237 show_last_breaking_event(regs
);
240 /* This is called from fs/proc/array.c */
241 void task_show_regs(struct seq_file
*m
, struct task_struct
*task
)
243 struct pt_regs
*regs
;
245 regs
= task_pt_regs(task
);
246 seq_printf(m
, "task: %p, ksp: %p\n",
247 task
, (void *)task
->thread
.ksp
);
248 seq_printf(m
, "User PSW : %p %p\n",
249 (void *) regs
->psw
.mask
, (void *)regs
->psw
.addr
);
251 seq_printf(m
, "User GPRS: " FOURLONG
,
252 regs
->gprs
[0], regs
->gprs
[1],
253 regs
->gprs
[2], regs
->gprs
[3]);
254 seq_printf(m
, " " FOURLONG
,
255 regs
->gprs
[4], regs
->gprs
[5],
256 regs
->gprs
[6], regs
->gprs
[7]);
257 seq_printf(m
, " " FOURLONG
,
258 regs
->gprs
[8], regs
->gprs
[9],
259 regs
->gprs
[10], regs
->gprs
[11]);
260 seq_printf(m
, " " FOURLONG
,
261 regs
->gprs
[12], regs
->gprs
[13],
262 regs
->gprs
[14], regs
->gprs
[15]);
263 seq_printf(m
, "User ACRS: %08x %08x %08x %08x\n",
264 task
->thread
.acrs
[0], task
->thread
.acrs
[1],
265 task
->thread
.acrs
[2], task
->thread
.acrs
[3]);
266 seq_printf(m
, " %08x %08x %08x %08x\n",
267 task
->thread
.acrs
[4], task
->thread
.acrs
[5],
268 task
->thread
.acrs
[6], task
->thread
.acrs
[7]);
269 seq_printf(m
, " %08x %08x %08x %08x\n",
270 task
->thread
.acrs
[8], task
->thread
.acrs
[9],
271 task
->thread
.acrs
[10], task
->thread
.acrs
[11]);
272 seq_printf(m
, " %08x %08x %08x %08x\n",
273 task
->thread
.acrs
[12], task
->thread
.acrs
[13],
274 task
->thread
.acrs
[14], task
->thread
.acrs
[15]);
277 static DEFINE_SPINLOCK(die_lock
);
279 void die(const char * str
, struct pt_regs
* regs
, long err
)
281 static int die_counter
;
286 spin_lock_irq(&die_lock
);
288 printk("%s: %04lx [#%d] ", str
, err
& 0xffff, ++die_counter
);
289 #ifdef CONFIG_PREEMPT
295 #ifdef CONFIG_DEBUG_PAGEALLOC
296 printk("DEBUG_PAGEALLOC");
299 notify_die(DIE_OOPS
, str
, regs
, err
, current
->thread
.trap_no
, SIGSEGV
);
302 add_taint(TAINT_DIE
);
303 spin_unlock_irq(&die_lock
);
305 panic("Fatal exception in interrupt");
307 panic("Fatal exception: panic_on_oops");
312 static void inline report_user_fault(struct pt_regs
*regs
, long int_code
,
315 if ((task_pid_nr(current
) > 1) && !show_unhandled_signals
)
317 if (!unhandled_signal(current
, signr
))
319 if (!printk_ratelimit())
321 printk("User process fault: interruption code 0x%lX ", int_code
);
322 print_vma_addr("in ", regs
->psw
.addr
& PSW_ADDR_INSN
);
327 int is_valid_bugaddr(unsigned long addr
)
332 static void __kprobes
inline do_trap(long interruption_code
, int signr
,
333 char *str
, struct pt_regs
*regs
,
337 * We got all needed information from the lowcore and can
338 * now safely switch on interrupts.
340 if (regs
->psw
.mask
& PSW_MASK_PSTATE
)
343 if (notify_die(DIE_TRAP
, str
, regs
, interruption_code
,
344 interruption_code
, signr
) == NOTIFY_STOP
)
347 if (regs
->psw
.mask
& PSW_MASK_PSTATE
) {
348 struct task_struct
*tsk
= current
;
350 tsk
->thread
.trap_no
= interruption_code
& 0xffff;
351 force_sig_info(signr
, info
, tsk
);
352 report_user_fault(regs
, interruption_code
, signr
);
354 const struct exception_table_entry
*fixup
;
355 fixup
= search_exception_tables(regs
->psw
.addr
& PSW_ADDR_INSN
);
357 regs
->psw
.addr
= fixup
->fixup
| PSW_ADDR_AMODE
;
359 enum bug_trap_type btt
;
361 btt
= report_bug(regs
->psw
.addr
& PSW_ADDR_INSN
, regs
);
362 if (btt
== BUG_TRAP_TYPE_WARN
)
364 die(str
, regs
, interruption_code
);
369 static inline void __user
*get_check_address(struct pt_regs
*regs
)
371 return (void __user
*)((regs
->psw
.addr
-S390_lowcore
.pgm_ilc
) & PSW_ADDR_INSN
);
374 void __kprobes
do_single_step(struct pt_regs
*regs
)
376 if (notify_die(DIE_SSTEP
, "sstep", regs
, 0, 0,
377 SIGTRAP
) == NOTIFY_STOP
){
380 if (tracehook_consider_fatal_signal(current
, SIGTRAP
))
381 force_sig(SIGTRAP
, current
);
384 static void default_trap_handler(struct pt_regs
* regs
, long interruption_code
)
386 if (regs
->psw
.mask
& PSW_MASK_PSTATE
) {
388 report_user_fault(regs
, interruption_code
, SIGSEGV
);
391 die("Unknown program exception", regs
, interruption_code
);
394 #define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \
395 static void name(struct pt_regs * regs, long interruption_code) \
398 info.si_signo = signr; \
400 info.si_code = sicode; \
401 info.si_addr = siaddr; \
402 do_trap(interruption_code, signr, str, regs, &info); \
405 DO_ERROR_INFO(SIGILL
, "addressing exception", addressing_exception
,
406 ILL_ILLADR
, get_check_address(regs
))
407 DO_ERROR_INFO(SIGILL
, "execute exception", execute_exception
,
408 ILL_ILLOPN
, get_check_address(regs
))
409 DO_ERROR_INFO(SIGFPE
, "fixpoint divide exception", divide_exception
,
410 FPE_INTDIV
, get_check_address(regs
))
411 DO_ERROR_INFO(SIGFPE
, "fixpoint overflow exception", overflow_exception
,
412 FPE_INTOVF
, get_check_address(regs
))
413 DO_ERROR_INFO(SIGFPE
, "HFP overflow exception", hfp_overflow_exception
,
414 FPE_FLTOVF
, get_check_address(regs
))
415 DO_ERROR_INFO(SIGFPE
, "HFP underflow exception", hfp_underflow_exception
,
416 FPE_FLTUND
, get_check_address(regs
))
417 DO_ERROR_INFO(SIGFPE
, "HFP significance exception", hfp_significance_exception
,
418 FPE_FLTRES
, get_check_address(regs
))
419 DO_ERROR_INFO(SIGFPE
, "HFP divide exception", hfp_divide_exception
,
420 FPE_FLTDIV
, get_check_address(regs
))
421 DO_ERROR_INFO(SIGFPE
, "HFP square root exception", hfp_sqrt_exception
,
422 FPE_FLTINV
, get_check_address(regs
))
423 DO_ERROR_INFO(SIGILL
, "operand exception", operand_exception
,
424 ILL_ILLOPN
, get_check_address(regs
))
425 DO_ERROR_INFO(SIGILL
, "privileged operation", privileged_op
,
426 ILL_PRVOPC
, get_check_address(regs
))
427 DO_ERROR_INFO(SIGILL
, "special operation exception", special_op_exception
,
428 ILL_ILLOPN
, get_check_address(regs
))
429 DO_ERROR_INFO(SIGILL
, "translation exception", translation_exception
,
430 ILL_ILLOPN
, get_check_address(regs
))
433 do_fp_trap(struct pt_regs
*regs
, void __user
*location
,
434 int fpc
, long interruption_code
)
438 si
.si_signo
= SIGFPE
;
440 si
.si_addr
= location
;
442 /* FPC[2] is Data Exception Code */
443 if ((fpc
& 0x00000300) == 0) {
444 /* bits 6 and 7 of DXC are 0 iff IEEE exception */
445 if (fpc
& 0x8000) /* invalid fp operation */
446 si
.si_code
= FPE_FLTINV
;
447 else if (fpc
& 0x4000) /* div by 0 */
448 si
.si_code
= FPE_FLTDIV
;
449 else if (fpc
& 0x2000) /* overflow */
450 si
.si_code
= FPE_FLTOVF
;
451 else if (fpc
& 0x1000) /* underflow */
452 si
.si_code
= FPE_FLTUND
;
453 else if (fpc
& 0x0800) /* inexact */
454 si
.si_code
= FPE_FLTRES
;
456 current
->thread
.ieee_instruction_pointer
= (addr_t
) location
;
457 do_trap(interruption_code
, SIGFPE
,
458 "floating point exception", regs
, &si
);
461 static void illegal_op(struct pt_regs
* regs
, long interruption_code
)
465 __u16 __user
*location
;
468 location
= get_check_address(regs
);
471 * We got all needed information from the lowcore and can
472 * now safely switch on interrupts.
474 if (regs
->psw
.mask
& PSW_MASK_PSTATE
)
477 if (regs
->psw
.mask
& PSW_MASK_PSTATE
) {
478 if (get_user(*((__u16
*) opcode
), (__u16 __user
*) location
))
480 if (*((__u16
*) opcode
) == S390_BREAKPOINT_U16
) {
481 if (tracehook_consider_fatal_signal(current
, SIGTRAP
))
482 force_sig(SIGTRAP
, current
);
485 #ifdef CONFIG_MATHEMU
486 } else if (opcode
[0] == 0xb3) {
487 if (get_user(*((__u16
*) (opcode
+2)), location
+1))
489 signal
= math_emu_b3(opcode
, regs
);
490 } else if (opcode
[0] == 0xed) {
491 if (get_user(*((__u32
*) (opcode
+2)),
492 (__u32 __user
*)(location
+1)))
494 signal
= math_emu_ed(opcode
, regs
);
495 } else if (*((__u16
*) opcode
) == 0xb299) {
496 if (get_user(*((__u16
*) (opcode
+2)), location
+1))
498 signal
= math_emu_srnm(opcode
, regs
);
499 } else if (*((__u16
*) opcode
) == 0xb29c) {
500 if (get_user(*((__u16
*) (opcode
+2)), location
+1))
502 signal
= math_emu_stfpc(opcode
, regs
);
503 } else if (*((__u16
*) opcode
) == 0xb29d) {
504 if (get_user(*((__u16
*) (opcode
+2)), location
+1))
506 signal
= math_emu_lfpc(opcode
, regs
);
512 * If we get an illegal op in kernel mode, send it through the
513 * kprobes notifier. If kprobes doesn't pick it up, SIGILL
515 if (notify_die(DIE_BPT
, "bpt", regs
, interruption_code
,
516 3, SIGTRAP
) != NOTIFY_STOP
)
520 #ifdef CONFIG_MATHEMU
521 if (signal
== SIGFPE
)
522 do_fp_trap(regs
, location
,
523 current
->thread
.fp_regs
.fpc
, interruption_code
);
524 else if (signal
== SIGSEGV
) {
525 info
.si_signo
= signal
;
527 info
.si_code
= SEGV_MAPERR
;
528 info
.si_addr
= (void __user
*) location
;
529 do_trap(interruption_code
, signal
,
530 "user address fault", regs
, &info
);
534 info
.si_signo
= signal
;
536 info
.si_code
= ILL_ILLOPC
;
537 info
.si_addr
= (void __user
*) location
;
538 do_trap(interruption_code
, signal
,
539 "illegal operation", regs
, &info
);
544 #ifdef CONFIG_MATHEMU
546 specification_exception(struct pt_regs
* regs
, long interruption_code
)
549 __u16 __user
*location
= NULL
;
552 location
= (__u16 __user
*) get_check_address(regs
);
555 * We got all needed information from the lowcore and can
556 * now safely switch on interrupts.
558 if (regs
->psw
.mask
& PSW_MASK_PSTATE
)
561 if (regs
->psw
.mask
& PSW_MASK_PSTATE
) {
562 get_user(*((__u16
*) opcode
), location
);
564 case 0x28: /* LDR Rx,Ry */
565 signal
= math_emu_ldr(opcode
);
567 case 0x38: /* LER Rx,Ry */
568 signal
= math_emu_ler(opcode
);
570 case 0x60: /* STD R,D(X,B) */
571 get_user(*((__u16
*) (opcode
+2)), location
+1);
572 signal
= math_emu_std(opcode
, regs
);
574 case 0x68: /* LD R,D(X,B) */
575 get_user(*((__u16
*) (opcode
+2)), location
+1);
576 signal
= math_emu_ld(opcode
, regs
);
578 case 0x70: /* STE R,D(X,B) */
579 get_user(*((__u16
*) (opcode
+2)), location
+1);
580 signal
= math_emu_ste(opcode
, regs
);
582 case 0x78: /* LE R,D(X,B) */
583 get_user(*((__u16
*) (opcode
+2)), location
+1);
584 signal
= math_emu_le(opcode
, regs
);
593 if (signal
== SIGFPE
)
594 do_fp_trap(regs
, location
,
595 current
->thread
.fp_regs
.fpc
, interruption_code
);
598 info
.si_signo
= signal
;
600 info
.si_code
= ILL_ILLOPN
;
601 info
.si_addr
= location
;
602 do_trap(interruption_code
, signal
,
603 "specification exception", regs
, &info
);
607 DO_ERROR_INFO(SIGILL
, "specification exception", specification_exception
,
608 ILL_ILLOPN
, get_check_address(regs
));
611 static void data_exception(struct pt_regs
* regs
, long interruption_code
)
613 __u16 __user
*location
;
616 location
= get_check_address(regs
);
619 * We got all needed information from the lowcore and can
620 * now safely switch on interrupts.
622 if (regs
->psw
.mask
& PSW_MASK_PSTATE
)
625 if (MACHINE_HAS_IEEE
)
626 asm volatile("stfpc %0" : "=m" (current
->thread
.fp_regs
.fpc
));
628 #ifdef CONFIG_MATHEMU
629 else if (regs
->psw
.mask
& PSW_MASK_PSTATE
) {
631 get_user(*((__u16
*) opcode
), location
);
633 case 0x28: /* LDR Rx,Ry */
634 signal
= math_emu_ldr(opcode
);
636 case 0x38: /* LER Rx,Ry */
637 signal
= math_emu_ler(opcode
);
639 case 0x60: /* STD R,D(X,B) */
640 get_user(*((__u16
*) (opcode
+2)), location
+1);
641 signal
= math_emu_std(opcode
, regs
);
643 case 0x68: /* LD R,D(X,B) */
644 get_user(*((__u16
*) (opcode
+2)), location
+1);
645 signal
= math_emu_ld(opcode
, regs
);
647 case 0x70: /* STE R,D(X,B) */
648 get_user(*((__u16
*) (opcode
+2)), location
+1);
649 signal
= math_emu_ste(opcode
, regs
);
651 case 0x78: /* LE R,D(X,B) */
652 get_user(*((__u16
*) (opcode
+2)), location
+1);
653 signal
= math_emu_le(opcode
, regs
);
656 get_user(*((__u16
*) (opcode
+2)), location
+1);
657 signal
= math_emu_b3(opcode
, regs
);
660 get_user(*((__u32
*) (opcode
+2)),
661 (__u32 __user
*)(location
+1));
662 signal
= math_emu_ed(opcode
, regs
);
665 if (opcode
[1] == 0x99) {
666 get_user(*((__u16
*) (opcode
+2)), location
+1);
667 signal
= math_emu_srnm(opcode
, regs
);
668 } else if (opcode
[1] == 0x9c) {
669 get_user(*((__u16
*) (opcode
+2)), location
+1);
670 signal
= math_emu_stfpc(opcode
, regs
);
671 } else if (opcode
[1] == 0x9d) {
672 get_user(*((__u16
*) (opcode
+2)), location
+1);
673 signal
= math_emu_lfpc(opcode
, regs
);
683 if (current
->thread
.fp_regs
.fpc
& FPC_DXC_MASK
)
687 if (signal
== SIGFPE
)
688 do_fp_trap(regs
, location
,
689 current
->thread
.fp_regs
.fpc
, interruption_code
);
692 info
.si_signo
= signal
;
694 info
.si_code
= ILL_ILLOPN
;
695 info
.si_addr
= location
;
696 do_trap(interruption_code
, signal
,
697 "data exception", regs
, &info
);
701 static void space_switch_exception(struct pt_regs
* regs
, long int_code
)
705 /* Set user psw back to home space mode. */
706 if (regs
->psw
.mask
& PSW_MASK_PSTATE
)
707 regs
->psw
.mask
|= PSW_ASC_HOME
;
709 info
.si_signo
= SIGILL
;
711 info
.si_code
= ILL_PRVOPC
;
712 info
.si_addr
= get_check_address(regs
);
713 do_trap(int_code
, SIGILL
, "space switch event", regs
, &info
);
716 asmlinkage
void kernel_stack_overflow(struct pt_regs
* regs
)
719 printk("Kernel stack overflow.\n");
722 panic("Corrupt kernel stack, can't continue.");
725 /* init is done in lowcore.S and head.S */
727 void __init
trap_init(void)
731 for (i
= 0; i
< 128; i
++)
732 pgm_check_table
[i
] = &default_trap_handler
;
733 pgm_check_table
[1] = &illegal_op
;
734 pgm_check_table
[2] = &privileged_op
;
735 pgm_check_table
[3] = &execute_exception
;
736 pgm_check_table
[4] = &do_protection_exception
;
737 pgm_check_table
[5] = &addressing_exception
;
738 pgm_check_table
[6] = &specification_exception
;
739 pgm_check_table
[7] = &data_exception
;
740 pgm_check_table
[8] = &overflow_exception
;
741 pgm_check_table
[9] = ÷_exception
;
742 pgm_check_table
[0x0A] = &overflow_exception
;
743 pgm_check_table
[0x0B] = ÷_exception
;
744 pgm_check_table
[0x0C] = &hfp_overflow_exception
;
745 pgm_check_table
[0x0D] = &hfp_underflow_exception
;
746 pgm_check_table
[0x0E] = &hfp_significance_exception
;
747 pgm_check_table
[0x0F] = &hfp_divide_exception
;
748 pgm_check_table
[0x10] = &do_dat_exception
;
749 pgm_check_table
[0x11] = &do_dat_exception
;
750 pgm_check_table
[0x12] = &translation_exception
;
751 pgm_check_table
[0x13] = &special_op_exception
;
753 pgm_check_table
[0x38] = &do_asce_exception
;
754 pgm_check_table
[0x39] = &do_dat_exception
;
755 pgm_check_table
[0x3A] = &do_dat_exception
;
756 pgm_check_table
[0x3B] = &do_dat_exception
;
757 #endif /* CONFIG_64BIT */
758 pgm_check_table
[0x15] = &operand_exception
;
759 pgm_check_table
[0x1C] = &space_switch_exception
;
760 pgm_check_table
[0x1D] = &hfp_sqrt_exception
;