- Andries Brouwer: final isofs pieces.
[davej-history.git] / arch / s390 / kernel / traps.c
blob4af4f6565b9075c7c6d24084a6a1517d94e55873
1 /*
2 * arch/s390/kernel/traps.c
4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
9 * Derived from "arch/i386/kernel/traps.c"
10 * Copyright (C) 1991, 1992 Linus Torvalds
14 * 'Traps.c' handles hardware traps and faults after we have saved some
15 * state in 'asm.s'.
17 #include <linux/config.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/string.h>
21 #include <linux/errno.h>
22 #include <linux/ptrace.h>
23 #include <linux/timer.h>
24 #include <linux/mm.h>
25 #include <linux/smp.h>
26 #include <linux/smp_lock.h>
27 #include <linux/init.h>
28 #include <linux/delay.h>
30 #include <asm/system.h>
31 #include <asm/uaccess.h>
32 #include <asm/io.h>
33 #include <asm/atomic.h>
34 #include <asm/mathemu.h>
35 #if CONFIG_REMOTE_DEBUG
36 #include <asm/gdb-stub.h>
37 #endif
39 /* Called from entry.S only */
40 extern void handle_per_exception(struct pt_regs *regs);
42 typedef void pgm_check_handler_t(struct pt_regs *, long);
43 pgm_check_handler_t *pgm_check_table[128];
45 extern pgm_check_handler_t default_trap_handler;
46 extern pgm_check_handler_t do_page_fault;
48 asmlinkage int system_call(void);
50 #define DO_ERROR(trapnr, signr, str, name, tsk) \
51 asmlinkage void name(struct pt_regs * regs, long error_code) \
52 { \
53 tsk->thread.error_code = error_code; \
54 tsk->thread.trap_no = trapnr; \
55 die_if_no_fixup(str,regs,error_code); \
56 force_sig(signr, tsk); \
59 /* TODO: define these as 'pgm_check_handler_t xxx;'
60 asmlinkage void divide_error(void);
61 asmlinkage void debug(void);
62 asmlinkage void nmi(void);
63 asmlinkage void int3(void);
64 asmlinkage void overflow(void);
65 asmlinkage void bounds(void);
66 asmlinkage void invalid_op(void);
67 asmlinkage void device_not_available(void);
68 asmlinkage void double_fault(void);
69 asmlinkage void coprocessor_segment_overrun(void);
70 asmlinkage void invalid_TSS(void);
71 asmlinkage void segment_not_present(void);
72 asmlinkage void stack_segment(void);
73 asmlinkage void general_protection(void);
74 asmlinkage void coprocessor_error(void);
75 asmlinkage void reserved(void);
76 asmlinkage void alignment_check(void);
77 asmlinkage void spurious_interrupt_bug(void);
80 int kstack_depth_to_print = 24;
83 * These constants are for searching for possible module text
84 * segments. VMALLOC_OFFSET comes from mm/vmalloc.c; MODULE_RANGE is
85 * a guess of how much space is likely to be vmalloced.
87 #define VMALLOC_OFFSET (8*1024*1024)
88 #define MODULE_RANGE (8*1024*1024)
90 void show_crashed_task_info(void)
92 printk("CPU: %d\n",smp_processor_id());
93 printk("Process %s (pid: %d, stackpage=%08X)\n",
94 current->comm, current->pid, 4096+(addr_t)current);
95 show_regs(current,NULL,NULL);
97 #if 0
98 static void show_registers(struct pt_regs *regs)
100 printk("CPU: %d\nPSW: %08lx %08lx\n",
101 smp_processor_id(), (unsigned long) regs->psw.mask,
102 (unsigned long) regs->psw.addr);
103 printk("GPRS:\n");
105 printk("%08lx %08lx %08lx %08lx\n",
106 regs->gprs[0], regs->gprs[1],
107 regs->gprs[2], regs->gprs[3]);
108 printk("%08lx %08lx %08lx %08lx\n",
109 regs->gprs[4], regs->gprs[5],
110 regs->gprs[6], regs->gprs[7]);
111 printk("%08lx %08lx %08lx %08lx\n",
112 regs->gprs[8], regs->gprs[9],
113 regs->gprs[10], regs->gprs[11]);
114 printk("%08lx %08lx %08lx %08lx\n",
115 regs->gprs[12], regs->gprs[13],
116 regs->gprs[14], regs->gprs[15]);
117 printk("Process %s (pid: %d, stackpage=%08lx)\nStack: ",
118 current->comm, current->pid, 4096+(unsigned long)current);
120 stack = (unsigned long *) esp;
121 for(i=0; i < kstack_depth_to_print; i++) {
122 if (((long) stack & 4095) == 0)
123 break;
124 if (i && ((i % 8) == 0))
125 printk("\n ");
126 printk("%08lx ", get_seg_long(ss,stack++));
128 printk("\nCall Trace: ");
129 stack = (unsigned long *) esp;
130 i = 1;
131 module_start = PAGE_OFFSET + (max_mapnr << PAGE_SHIFT);
132 module_start = ((module_start + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1));
133 module_end = module_start + MODULE_RANGE;
134 while (((long) stack & 4095) != 0) {
135 addr = get_seg_long(ss, stack++); */
137 * If the address is either in the text segment of the
138 * kernel, or in the region which contains vmalloc'ed
139 * memory, it *may* be the address of a calling
140 * routine; if so, print it so that someone tracing
141 * down the cause of the crash will be able to figure
142 * out the call path that was taken.
144 /* if (((addr >= (unsigned long) &_stext) &&
145 (addr <= (unsigned long) &_etext)) ||
146 ((addr >= module_start) && (addr <= module_end))) {
147 if (i && ((i % 8) == 0))
148 printk("\n ");
149 printk("[<%08lx>] ", addr);
150 i++;
153 printk("\nCode: ");
154 for(i=0;i<20;i++)
155 printk("%02x ",0xff & get_seg_byte(regs->xcs & 0xffff,(i+(char *)regs->eip)));
156 printk("\n");
159 #endif
162 spinlock_t die_lock;
164 void die(const char * str, struct pt_regs * regs, long err)
166 console_verbose();
167 spin_lock_irq(&die_lock);
168 printk("%s: %04lx\n", str, err & 0xffff);
169 show_crashed_task_info();
170 spin_unlock_irq(&die_lock);
171 do_exit(SIGSEGV);
174 int check_for_fixup(struct pt_regs * regs)
176 if (!(regs->psw.mask & PSW_PROBLEM_STATE)) {
177 unsigned long fixup;
178 fixup = search_exception_table(regs->psw.addr);
179 if (fixup) {
180 regs->psw.addr = fixup;
181 return 1;
184 return 0;
187 int do_debugger_trap(struct pt_regs *regs,int signal)
189 if(regs->psw.mask&PSW_PROBLEM_STATE)
191 if(current->flags & PF_PTRACED)
192 force_sig(signal,current);
193 else
194 return 1;
196 else
198 #if CONFIG_REMOTE_DEBUG
199 if(gdb_stub_initialised)
201 gdb_stub_handle_exception((gdb_pt_regs *)regs,signal);
202 return 0;
204 #endif
205 return 1;
207 return 0;
210 static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
212 if (!(regs->psw.mask & PSW_PROBLEM_STATE)) {
213 unsigned long fixup;
214 fixup = search_exception_table(regs->psw.addr);
215 if (fixup) {
216 regs->psw.addr = fixup;
217 return;
219 die(str, regs, err);
223 asmlinkage void default_trap_handler(struct pt_regs * regs, long error_code)
225 current->thread.error_code = error_code;
226 current->thread.trap_no = error_code;
227 die_if_no_fixup("Unknown program exception",regs,error_code);
228 force_sig(SIGSEGV, current);
231 DO_ERROR(2, SIGILL, "privileged operation", privileged_op, current)
232 DO_ERROR(3, SIGILL, "execute exception", execute_exception, current)
233 DO_ERROR(5, SIGSEGV, "addressing exception", addressing_exception, current)
234 DO_ERROR(9, SIGFPE, "fixpoint divide exception", divide_exception, current)
235 DO_ERROR(0x12, SIGILL, "translation exception", translation_exception, current)
236 DO_ERROR(0x13, SIGILL, "special operand exception", special_op_exception, current)
237 DO_ERROR(0x15, SIGILL, "operand exception", operand_exception, current)
239 /* need to define
240 DO_ERROR( 6, SIGILL, "invalid operand", invalid_op, current)
241 DO_ERROR( 8, SIGSEGV, "double fault", double_fault, current)
242 DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun, last_task_used_math)
243 DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS, current)
244 DO_ERROR(11, SIGBUS, "segment not present", segment_not_present, current)
245 DO_ERROR(12, SIGBUS, "stack segment", stack_segment, current)
246 DO_ERROR(17, SIGSEGV, "alignment check", alignment_check, current)
247 DO_ERROR(18, SIGSEGV, "reserved", reserved, current)
248 DO_ERROR(19, SIGSEGV, "cache flush denied", cache_flush_denied, current)
251 #ifdef CONFIG_IEEEFPU_EMULATION
253 asmlinkage void illegal_op(struct pt_regs * regs, long error_code)
255 __u8 opcode[6];
256 __u16 *location;
257 int do_sig = 0;
258 int problem_state=(regs->psw.mask & PSW_PROBLEM_STATE);
260 lock_kernel();
261 location = (__u16 *)(regs->psw.addr-S390_lowcore.pgm_ilc);
262 if(problem_state)
263 get_user(*((__u16 *) opcode), location);
264 else
265 *((__u16 *)opcode)=*((__u16 *)location);
266 if(*((__u16 *)opcode)==S390_BREAKPOINT_U16)
268 if(do_debugger_trap(regs,SIGTRAP))
269 do_sig=1;
271 else if (problem_state )
273 if (opcode[0] == 0xb3) {
274 get_user(*((__u16 *) (opcode+2)), location+1);
275 do_sig = math_emu_b3(opcode, regs);
276 } else if (opcode[0] == 0xed) {
277 get_user(*((__u32 *) (opcode+2)),
278 (__u32 *)(location+1));
279 do_sig = math_emu_ed(opcode, regs);
280 } else if (*((__u16 *) opcode) == 0xb299) {
281 get_user(*((__u16 *) (opcode+2)), location+1);
282 do_sig = math_emu_srnm(opcode, regs);
283 } else if (*((__u16 *) opcode) == 0xb29c) {
284 get_user(*((__u16 *) (opcode+2)), location+1);
285 do_sig = math_emu_stfpc(opcode, regs);
286 } else if (*((__u16 *) opcode) == 0xb29d) {
287 get_user(*((__u16 *) (opcode+2)), location+1);
288 do_sig = math_emu_lfpc(opcode, regs);
289 } else
290 do_sig = 1;
291 } else
292 do_sig = 1;
293 if (do_sig) {
294 current->thread.error_code = error_code;
295 current->thread.trap_no = 1;
296 force_sig(SIGILL, current);
297 die_if_no_fixup("illegal operation", regs, error_code);
299 unlock_kernel();
302 asmlinkage void specification_exception(struct pt_regs * regs, long error_code)
304 __u8 opcode[6];
305 __u16 *location;
306 int do_sig = 0;
308 lock_kernel();
309 if (regs->psw.mask & 0x00010000L) {
310 location = (__u16 *)(regs->psw.addr-S390_lowcore.pgm_ilc);
311 get_user(*((__u16 *) opcode), location);
312 switch (opcode[0]) {
313 case 0x28: /* LDR Rx,Ry */
314 math_emu_ldr(opcode);
315 break;
316 case 0x38: /* LER Rx,Ry */
317 math_emu_ler(opcode);
318 break;
319 case 0x60: /* STD R,D(X,B) */
320 get_user(*((__u16 *) (opcode+2)), location+1);
321 math_emu_std(opcode, regs);
322 break;
323 case 0x68: /* LD R,D(X,B) */
324 get_user(*((__u16 *) (opcode+2)), location+1);
325 math_emu_ld(opcode, regs);
326 break;
327 case 0x70: /* STE R,D(X,B) */
328 get_user(*((__u16 *) (opcode+2)), location+1);
329 math_emu_ste(opcode, regs);
330 break;
331 case 0x78: /* LE R,D(X,B) */
332 get_user(*((__u16 *) (opcode+2)), location+1);
333 math_emu_le(opcode, regs);
334 break;
335 default:
336 do_sig = 1;
337 break;
339 } else
340 do_sig = 1;
341 if (do_sig) {
342 current->thread.error_code = error_code;
343 current->thread.trap_no = 1;
344 force_sig(SIGILL, current);
345 die_if_no_fixup("illegal operation", regs, error_code);
347 unlock_kernel();
350 asmlinkage void data_exception(struct pt_regs * regs, long error_code)
352 __u8 opcode[6];
353 __u16 *location;
354 int do_sig = 0;
356 lock_kernel();
357 if (regs->psw.mask & 0x00010000L) {
358 location = (__u16 *)(regs->psw.addr-S390_lowcore.pgm_ilc);
359 get_user(*((__u16 *) opcode), location);
360 switch (opcode[0]) {
361 case 0x28: /* LDR Rx,Ry */
362 math_emu_ldr(opcode);
363 break;
364 case 0x38: /* LER Rx,Ry */
365 math_emu_ler(opcode);
366 break;
367 case 0x60: /* STD R,D(X,B) */
368 get_user(*((__u16 *) (opcode+2)), location+1);
369 math_emu_std(opcode, regs);
370 break;
371 case 0x68: /* LD R,D(X,B) */
372 get_user(*((__u16 *) (opcode+2)), location+1);
373 math_emu_ld(opcode, regs);
374 break;
375 case 0x70: /* STE R,D(X,B) */
376 get_user(*((__u16 *) (opcode+2)), location+1);
377 math_emu_ste(opcode, regs);
378 break;
379 case 0x78: /* LE R,D(X,B) */
380 get_user(*((__u16 *) (opcode+2)), location+1);
381 math_emu_le(opcode, regs);
382 break;
383 case 0xb3:
384 get_user(*((__u16 *) (opcode+2)), location+1);
385 do_sig = math_emu_b3(opcode, regs);
386 break;
387 case 0xed:
388 get_user(*((__u32 *) (opcode+2)),
389 (__u32 *)(location+1));
390 do_sig = math_emu_ed(opcode, regs);
391 break;
392 case 0xb2:
393 if (opcode[1] == 0x99) {
394 get_user(*((__u16 *) (opcode+2)), location+1);
395 do_sig = math_emu_srnm(opcode, regs);
396 } else if (opcode[1] == 0x9c) {
397 get_user(*((__u16 *) (opcode+2)), location+1);
398 do_sig = math_emu_stfpc(opcode, regs);
399 } else if (opcode[1] == 0x9d) {
400 get_user(*((__u16 *) (opcode+2)), location+1);
401 do_sig = math_emu_lfpc(opcode, regs);
402 } else
403 do_sig = 1;
404 break;
405 default:
406 do_sig = 1;
407 break;
409 } else
410 do_sig = 1;
411 if (do_sig) {
412 current->thread.error_code = error_code;
413 current->thread.trap_no = 1;
414 force_sig(SIGILL, current);
415 die_if_no_fixup("illegal operation", regs, error_code);
417 unlock_kernel();
420 #else
421 DO_ERROR(1, SIGILL, "illegal operation", illegal_op, current)
422 DO_ERROR(6, SIGILL, "specification exception", specification_exception, current)
423 DO_ERROR(7, SIGILL, "data exception", data_exception, current)
424 #endif /* CONFIG_IEEEFPU_EMULATION */
427 /* init is done in lowcore.S and head.S */
429 void __init trap_init(void)
431 int i;
433 for (i = 0; i < 128; i++)
434 pgm_check_table[i] = &default_trap_handler;
435 pgm_check_table[1] = &illegal_op;
436 pgm_check_table[2] = &privileged_op;
437 pgm_check_table[3] = &execute_exception;
438 pgm_check_table[5] = &addressing_exception;
439 pgm_check_table[6] = &specification_exception;
440 pgm_check_table[7] = &data_exception;
441 pgm_check_table[9] = &divide_exception;
442 pgm_check_table[0x12] = &translation_exception;
443 pgm_check_table[0x13] = &special_op_exception;
444 pgm_check_table[0x15] = &operand_exception;
445 pgm_check_table[4] = &do_page_fault;
446 pgm_check_table[0x10] = &do_page_fault;
447 pgm_check_table[0x11] = &do_page_fault;
448 pgm_check_table[0x1C] = &privileged_op;
452 void handle_per_exception(struct pt_regs *regs)
454 if(regs->psw.mask&PSW_PROBLEM_STATE)
456 per_struct *per_info=&current->thread.per_info;
457 per_info->lowcore.words.perc_atmid=S390_lowcore.per_perc_atmid;
458 per_info->lowcore.words.address=S390_lowcore.per_address;
459 per_info->lowcore.words.access_id=S390_lowcore.per_access_id;
461 if(do_debugger_trap(regs,SIGTRAP))
463 /* I've seen this possibly a task structure being reused ? */
464 printk("Spurious per exception detected\n");
465 printk("switching off per tracing for this task.\n");
466 show_crashed_task_info();
467 /* Hopefully switching off per tracing will help us survive */
468 regs->psw.mask &= ~PSW_PER_MASK;