2 * linux/arch/i386/traps.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
8 * 'Traps.c' handles hardware traps and faults after we have saved some
11 #include <linux/config.h>
12 #include <linux/head.h>
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/ptrace.h>
18 #include <linux/timer.h>
20 #include <linux/smp.h>
21 #include <linux/smp_lock.h>
22 #include <linux/init.h>
23 #include <linux/delay.h>
25 #include <asm/system.h>
26 #include <asm/uaccess.h>
28 #include <asm/spinlock.h>
29 #include <asm/atomic.h>
30 #include <asm/debugreg.h>
32 asmlinkage
int system_call(void);
33 asmlinkage
void lcall7(void);
34 struct desc_struct default_ldt
= { 0, 0 };
36 static inline void console_verbose(void)
38 extern int console_loglevel
;
39 console_loglevel
= 15;
42 #define DO_ERROR(trapnr, signr, str, name, tsk) \
43 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
45 tsk->tss.error_code = error_code; \
46 tsk->tss.trap_no = trapnr; \
47 force_sig(signr, tsk); \
48 die_if_no_fixup(str,regs,error_code); \
51 #define DO_VM86_ERROR(trapnr, signr, str, name, tsk) \
52 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
55 if (regs->eflags & VM_MASK) { \
56 if (!handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr)) \
58 /* else fall through */ \
60 tsk->tss.error_code = error_code; \
61 tsk->tss.trap_no = trapnr; \
62 force_sig(signr, tsk); \
63 die_if_kernel(str,regs,error_code); \
68 void page_exception(void);
70 asmlinkage
void divide_error(void);
71 asmlinkage
void debug(void);
72 asmlinkage
void nmi(void);
73 asmlinkage
void int3(void);
74 asmlinkage
void overflow(void);
75 asmlinkage
void bounds(void);
76 asmlinkage
void invalid_op(void);
77 asmlinkage
void device_not_available(void);
78 asmlinkage
void double_fault(void);
79 asmlinkage
void coprocessor_segment_overrun(void);
80 asmlinkage
void invalid_TSS(void);
81 asmlinkage
void segment_not_present(void);
82 asmlinkage
void stack_segment(void);
83 asmlinkage
void general_protection(void);
84 asmlinkage
void page_fault(void);
85 asmlinkage
void coprocessor_error(void);
86 asmlinkage
void reserved(void);
87 asmlinkage
void alignment_check(void);
88 asmlinkage
void spurious_interrupt_bug(void);
90 int kstack_depth_to_print
= 24;
93 * These constants are for searching for possible module text
94 * segments. VMALLOC_OFFSET comes from mm/vmalloc.c; MODULE_RANGE is
95 * a guess of how much space is likely to be vmalloced.
97 #define VMALLOC_OFFSET (8*1024*1024)
98 #define MODULE_RANGE (8*1024*1024)
100 static void show_registers(struct pt_regs
*regs
)
106 unsigned long *stack
, addr
, module_start
, module_end
;
107 extern char _stext
, _etext
;
109 esp
= (unsigned long) (1+regs
);
114 ss
= regs
->xss
& 0xffff;
116 printk("CPU: %d\nEIP: %04x:[<%08lx>]\nEFLAGS: %08lx\n",
117 smp_processor_id(), 0xffff & regs
->xcs
, regs
->eip
, regs
->eflags
);
118 printk("eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
119 regs
->eax
, regs
->ebx
, regs
->ecx
, regs
->edx
);
120 printk("esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
121 regs
->esi
, regs
->edi
, regs
->ebp
, esp
);
122 printk("ds: %04x es: %04x ss: %04x\n",
123 regs
->xds
& 0xffff, regs
->xes
& 0xffff, ss
);
125 printk("Process %s (pid: %d, process nr: %d, stackpage=%08lx)",
126 current
->comm
, current
->pid
, 0xffff & i
, 4096+(unsigned long)current
);
129 * When in-kernel, we also print out the stack and code at the
130 * time of the fault..
134 stack
= (unsigned long *) esp
;
135 for(i
=0; i
< kstack_depth_to_print
; i
++) {
136 if (((long) stack
& 4095) == 0)
138 if (i
&& ((i
% 8) == 0))
140 printk("%08lx ", *stack
++);
142 printk("\nCall Trace: ");
143 stack
= (unsigned long *) esp
;
145 module_start
= PAGE_OFFSET
+ (max_mapnr
<< PAGE_SHIFT
);
146 module_start
= ((module_start
+ VMALLOC_OFFSET
) & ~(VMALLOC_OFFSET
-1));
147 module_end
= module_start
+ MODULE_RANGE
;
148 while (((long) stack
& 4095) != 0) {
151 * If the address is either in the text segment of the
152 * kernel, or in the region which contains vmalloc'ed
153 * memory, it *may* be the address of a calling
154 * routine; if so, print it so that someone tracing
155 * down the cause of the crash will be able to figure
156 * out the call path that was taken.
158 if (((addr
>= (unsigned long) &_stext
) &&
159 (addr
<= (unsigned long) &_etext
)) ||
160 ((addr
>= module_start
) && (addr
<= module_end
))) {
161 if (i
&& ((i
% 8) == 0))
163 printk("[<%08lx>] ", addr
);
169 printk("%02x ", ((unsigned char *)regs
->eip
)[i
]);
176 void die(const char * str
, struct pt_regs
* regs
, long err
)
179 spin_lock_irq(&die_lock
);
180 printk("%s: %04lx\n", str
, err
& 0xffff);
181 show_registers(regs
);
182 spin_unlock_irq(&die_lock
);
186 static void die_if_kernel(const char * str
, struct pt_regs
* regs
, long err
)
188 if (!(regs
->eflags
& VM_MASK
) && !(3 & regs
->xcs
))
192 static void die_if_no_fixup(const char * str
, struct pt_regs
* regs
, long err
)
194 if (!(regs
->eflags
& VM_MASK
) && !(3 & regs
->xcs
))
197 fixup
= search_exception_table(regs
->eip
);
206 DO_VM86_ERROR( 0, SIGFPE
, "divide error", divide_error
, current
)
207 DO_VM86_ERROR( 3, SIGTRAP
, "int3", int3
, current
)
208 DO_VM86_ERROR( 4, SIGSEGV
, "overflow", overflow
, current
)
209 DO_VM86_ERROR( 5, SIGSEGV
, "bounds", bounds
, current
)
210 DO_ERROR( 6, SIGILL
, "invalid operand", invalid_op
, current
)
211 DO_VM86_ERROR( 7, SIGSEGV
, "device not available", device_not_available
, current
)
212 DO_ERROR( 8, SIGSEGV
, "double fault", double_fault
, current
)
213 DO_ERROR( 9, SIGFPE
, "coprocessor segment overrun", coprocessor_segment_overrun
, current
)
214 DO_ERROR(10, SIGSEGV
, "invalid TSS", invalid_TSS
, current
)
215 DO_ERROR(11, SIGBUS
, "segment not present", segment_not_present
, current
)
216 DO_ERROR(12, SIGBUS
, "stack segment", stack_segment
, current
)
217 DO_ERROR(17, SIGSEGV
, "alignment check", alignment_check
, current
)
218 DO_ERROR(18, SIGSEGV
, "reserved", reserved
, current
)
219 /* I don't have documents for this but it does seem to cover the cache
220 flush from user space exception some people get. */
221 DO_ERROR(19, SIGSEGV
, "cache flush denied", cache_flush_denied
, current
)
223 asmlinkage
void cache_flush_denied(struct pt_regs
* regs
, long error_code
)
225 if (regs
->eflags
& VM_MASK
) {
226 handle_vm86_fault((struct kernel_vm86_regs
*) regs
, error_code
);
229 die_if_kernel("cache flush denied",regs
,error_code
);
230 current
->tss
.error_code
= error_code
;
231 current
->tss
.trap_no
= 19;
232 force_sig(SIGSEGV
, current
);
235 asmlinkage
void do_general_protection(struct pt_regs
* regs
, long error_code
)
237 if (regs
->eflags
& VM_MASK
)
240 if (!(regs
->xcs
& 3))
243 current
->tss
.error_code
= error_code
;
244 current
->tss
.trap_no
= 13;
245 force_sig(SIGSEGV
, current
);
250 handle_vm86_fault((struct kernel_vm86_regs
*) regs
, error_code
);
257 fixup
= search_exception_table(regs
->eip
);
262 die("general protection fault", regs
, error_code
);
266 static void mem_parity_error(unsigned char reason
, struct pt_regs
* regs
)
268 printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
269 printk("You probably have a hardware problem with your RAM chips\n");
272 static void io_check_error(unsigned char reason
, struct pt_regs
* regs
)
276 printk("NMI: IOCK error (debug interrupt?)\n");
277 show_registers(regs
);
279 /* Re-enable the IOCK line, wait for a few seconds */
283 while (--i
) udelay(1000);
288 static void unknown_nmi_error(unsigned char reason
, struct pt_regs
* regs
)
290 printk("Uhhuh. NMI received for unknown reason %02x.\n", reason
);
291 printk("Dazed and confused, but trying to continue\n");
292 printk("Do you have a strange power saving mode enabled?\n");
295 asmlinkage
void do_nmi(struct pt_regs
* regs
, long error_code
)
297 unsigned char reason
= inb(0x61);
298 extern atomic_t nmi_counter
;
300 atomic_inc(&nmi_counter
);
302 mem_parity_error(reason
, regs
);
304 io_check_error(reason
, regs
);
305 if (!(reason
& 0xc0))
306 unknown_nmi_error(reason
, regs
);
310 * Careful - we must not do a lock-kernel until we have checked that the
311 * debug fault happened in user mode. Getting debug exceptions while
312 * in the kernel has to be handled without locking, to avoid deadlocks..
314 * Being careful here means that we don't have to be as careful in a
315 * lot of more complicated places (task switching can be a bit lazy
316 * about restoring all the debug state, and ptrace doesn't have to
317 * find every occurrence of the TF bit that could be saved away even
318 * by user code - and we don't have to be careful about what values
319 * can be written to the debug registers because there are no really
322 asmlinkage
void do_debug(struct pt_regs
* regs
, long error_code
)
324 unsigned int condition
;
325 struct task_struct
*tsk
= current
;
327 if (regs
->eflags
& VM_MASK
)
330 __asm__
__volatile__("movl %%db6,%0" : "=r" (condition
));
332 /* Mask out spurious TF errors due to lazy TF clearing */
333 if (condition
& DR_STEP
) {
334 if ((tsk
->flags
& PF_PTRACED
) == 0)
338 /* Mast out spurious debug traps due to lazy DR7 setting */
339 if (condition
& (DR_TRAP0
|DR_TRAP1
|DR_TRAP2
|DR_TRAP3
)) {
340 if (!tsk
->tss
.debugreg
[7])
344 /* If this is a kernel mode trap, we need to reset db7 to allow us to continue sanely */
345 if ((regs
->xcs
& 3) == 0)
348 /* Ok, finally something we can handle */
349 tsk
->tss
.trap_no
= 1;
350 tsk
->tss
.error_code
= error_code
;
351 force_sig(SIGTRAP
, tsk
);
356 handle_vm86_trap((struct kernel_vm86_regs
*) regs
, error_code
, 1);
361 __asm__("movl %0,%%db7"
367 regs
->eflags
&= ~TF_MASK
;
372 * Note that we play around with the 'TS' bit in an attempt to get
373 * the correct behaviour even in the presence of the asynchronous
376 void math_error(void)
378 struct task_struct
* task
;
384 * Save the info for the exception handler
386 __asm__
__volatile__("fnsave %0":"=m" (task
->tss
.i387
.hard
));
387 task
->flags
&=~PF_USEDFPU
;
390 task
->tss
.trap_no
= 16;
391 task
->tss
.error_code
= 0;
392 force_sig(SIGFPE
, task
);
396 asmlinkage
void do_coprocessor_error(struct pt_regs
* regs
, long error_code
)
402 asmlinkage
void do_spurious_interrupt_bug(struct pt_regs
* regs
,
406 /* No need to warn about this any longer. */
407 printk("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
412 * 'math_state_restore()' saves the current math information in the
413 * old math state array, and gets the new ones from the current task
415 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
416 * Don't touch unless you *really* know how it works.
418 asmlinkage
void math_state_restore(void)
420 __asm__
__volatile__("clts"); /* Allow maths ops (or we recurse) */
423 * SMP is actually simpler than uniprocessor for once. Because
424 * we can't pull the delayed FPU switching trick Linus does
425 * we simply have to do the restore each context switch and
426 * set the flag. switch_to() will always save the state in
427 * case we swap processors. We also don't use the coprocessor
428 * timer - IRQ 13 mode isn't used with SMP machines (thank god).
431 if(current
->used_math
)
432 __asm__("frstor %0": :"m" (current
->tss
.i387
));
436 * Our first FPU usage, clean the chip.
439 current
->used_math
= 1;
441 current
->flags
|=PF_USEDFPU
; /* So we fnsave on switch_to() */
444 #ifndef CONFIG_MATH_EMULATION
446 asmlinkage
void math_emulate(long arg
)
449 printk("math-emulation not enabled and no coprocessor found.\n");
450 printk("killing %s.\n",current
->comm
);
451 force_sig(SIGFPE
,current
);
456 #endif /* CONFIG_MATH_EMULATION */
458 __initfunc(void trap_init_f00f_bug(void))
466 * Allocate a new page in virtual address space,
467 * move the IDT into it and write protect this page.
469 page
= (unsigned long) vmalloc(PAGE_SIZE
);
470 memcpy((void *) page
, idt_table
, 256*8);
472 pgd
= pgd_offset(&init_mm
, page
);
473 pmd
= pmd_offset(pgd
, page
);
474 pte
= pte_offset(pmd
, page
);
475 *pte
= pte_wrprotect(*pte
);
479 * "idt" is magic - it overlaps the idt_descr
480 * variable so that updating idt will automatically
481 * update the idt descriptor..
483 idt
= (struct desc_struct
*)page
;
484 __asm__
__volatile__("lidt %0": "=m" (idt_descr
));
489 void __init
trap_init(void)
492 struct desc_struct
* p
;
494 if (readl(0x0FFFD9) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
496 set_call_gate(&default_ldt
,lcall7
);
497 set_trap_gate(0,÷_error
);
498 set_trap_gate(1,&debug
);
499 set_trap_gate(2,&nmi
);
500 set_system_gate(3,&int3
); /* int3-5 can be called from all */
501 set_system_gate(4,&overflow
);
502 set_system_gate(5,&bounds
);
503 set_trap_gate(6,&invalid_op
);
504 set_trap_gate(7,&device_not_available
);
505 set_trap_gate(8,&double_fault
);
506 set_trap_gate(9,&coprocessor_segment_overrun
);
507 set_trap_gate(10,&invalid_TSS
);
508 set_trap_gate(11,&segment_not_present
);
509 set_trap_gate(12,&stack_segment
);
510 set_trap_gate(13,&general_protection
);
511 set_trap_gate(14,&page_fault
);
512 set_trap_gate(15,&spurious_interrupt_bug
);
513 set_trap_gate(16,&coprocessor_error
);
514 set_trap_gate(17,&alignment_check
);
516 set_trap_gate(i
,&reserved
);
517 set_system_gate(0x80,&system_call
);
518 /* set up GDT task & ldt entries */
519 p
= gdt
+FIRST_TSS_ENTRY
;
520 set_tss_desc(p
, &init_task
.tss
);
522 set_ldt_desc(p
, &default_ldt
, 1);
524 for(i
=1 ; i
<NR_TASKS
; i
++) {
530 /* Clear NT, so that we won't have troubles with that later on */
531 __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");