2 * linux/arch/x86-64/traps.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
7 * Pentium III FXSR, SSE support
8 * Gareth Hughes <gareth@valinux.com>, May 2000
12 * 'Traps.c' handles hardware traps and faults after we have saved some
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/ptrace.h>
20 #include <linux/timer.h>
22 #include <linux/init.h>
23 #include <linux/delay.h>
24 #include <linux/spinlock.h>
25 #include <linux/interrupt.h>
26 #include <linux/kallsyms.h>
27 #include <linux/module.h>
28 #include <linux/moduleparam.h>
29 #include <linux/nmi.h>
30 #include <linux/kprobes.h>
31 #include <linux/kexec.h>
32 #include <linux/unwind.h>
33 #include <linux/uaccess.h>
34 #include <linux/bug.h>
36 #include <asm/system.h>
38 #include <asm/atomic.h>
39 #include <asm/debugreg.h>
42 #include <asm/kdebug.h>
43 #include <asm/processor.h>
44 #include <asm/unwind.h>
46 #include <asm/pgalloc.h>
48 #include <asm/proto.h>
50 #include <asm/stacktrace.h>
52 asmlinkage
void divide_error(void);
53 asmlinkage
void debug(void);
54 asmlinkage
void nmi(void);
55 asmlinkage
void int3(void);
56 asmlinkage
void overflow(void);
57 asmlinkage
void bounds(void);
58 asmlinkage
void invalid_op(void);
59 asmlinkage
void device_not_available(void);
60 asmlinkage
void double_fault(void);
61 asmlinkage
void coprocessor_segment_overrun(void);
62 asmlinkage
void invalid_TSS(void);
63 asmlinkage
void segment_not_present(void);
64 asmlinkage
void stack_segment(void);
65 asmlinkage
void general_protection(void);
66 asmlinkage
void page_fault(void);
67 asmlinkage
void coprocessor_error(void);
68 asmlinkage
void simd_coprocessor_error(void);
69 asmlinkage
void reserved(void);
70 asmlinkage
void alignment_check(void);
71 asmlinkage
void machine_check(void);
72 asmlinkage
void spurious_interrupt_bug(void);
74 ATOMIC_NOTIFIER_HEAD(die_chain
);
75 EXPORT_SYMBOL(die_chain
);
77 int register_die_notifier(struct notifier_block
*nb
)
80 return atomic_notifier_chain_register(&die_chain
, nb
);
82 EXPORT_SYMBOL(register_die_notifier
); /* used modular by kdb */
84 int unregister_die_notifier(struct notifier_block
*nb
)
86 return atomic_notifier_chain_unregister(&die_chain
, nb
);
88 EXPORT_SYMBOL(unregister_die_notifier
); /* used modular by kdb */
90 static inline void conditional_sti(struct pt_regs
*regs
)
92 if (regs
->eflags
& X86_EFLAGS_IF
)
96 static inline void preempt_conditional_sti(struct pt_regs
*regs
)
99 if (regs
->eflags
& X86_EFLAGS_IF
)
103 static inline void preempt_conditional_cli(struct pt_regs
*regs
)
105 if (regs
->eflags
& X86_EFLAGS_IF
)
107 /* Make sure to not schedule here because we could be running
108 on an exception stack. */
109 preempt_enable_no_resched();
112 int kstack_depth_to_print
= 12;
114 #ifdef CONFIG_KALLSYMS
115 void printk_address(unsigned long address
)
117 unsigned long offset
= 0, symsize
;
123 symname
= kallsyms_lookup(address
, &symsize
, &offset
,
126 printk(" [<%016lx>]\n", address
);
130 modname
= delim
= "";
131 printk(" [<%016lx>] %s%s%s%s+0x%lx/0x%lx\n",
132 address
, delim
, modname
, delim
, symname
, offset
, symsize
);
135 void printk_address(unsigned long address
)
137 printk(" [<%016lx>]\n", address
);
141 static unsigned long *in_exception_stack(unsigned cpu
, unsigned long stack
,
142 unsigned *usedp
, char **idp
)
144 static char ids
[][8] = {
145 [DEBUG_STACK
- 1] = "#DB",
146 [NMI_STACK
- 1] = "NMI",
147 [DOUBLEFAULT_STACK
- 1] = "#DF",
148 [STACKFAULT_STACK
- 1] = "#SS",
149 [MCE_STACK
- 1] = "#MC",
150 #if DEBUG_STKSZ > EXCEPTION_STKSZ
151 [N_EXCEPTION_STACKS
... N_EXCEPTION_STACKS
+ DEBUG_STKSZ
/ EXCEPTION_STKSZ
- 2] = "#DB[?]"
157 * Iterate over all exception stacks, and figure out whether
158 * 'stack' is in one of them:
160 for (k
= 0; k
< N_EXCEPTION_STACKS
; k
++) {
161 unsigned long end
= per_cpu(orig_ist
, cpu
).ist
[k
];
163 * Is 'stack' above this exception frame's end?
164 * If yes then skip to the next frame.
169 * Is 'stack' above this exception frame's start address?
170 * If yes then we found the right frame.
172 if (stack
>= end
- EXCEPTION_STKSZ
) {
174 * Make sure we only iterate through an exception
175 * stack once. If it comes up for the second time
176 * then there's something wrong going on - just
177 * break out and return NULL:
179 if (*usedp
& (1U << k
))
183 return (unsigned long *)end
;
186 * If this is a debug stack, and if it has a larger size than
187 * the usual exception stacks, then 'stack' might still
188 * be within the lower portion of the debug stack:
190 #if DEBUG_STKSZ > EXCEPTION_STKSZ
191 if (k
== DEBUG_STACK
- 1 && stack
>= end
- DEBUG_STKSZ
) {
192 unsigned j
= N_EXCEPTION_STACKS
- 1;
195 * Black magic. A large debug stack is composed of
196 * multiple exception stack entries, which we
197 * iterate through now. Dont look:
201 end
-= EXCEPTION_STKSZ
;
202 ids
[j
][4] = '1' + (j
- N_EXCEPTION_STACKS
);
203 } while (stack
< end
- EXCEPTION_STKSZ
);
204 if (*usedp
& (1U << j
))
208 return (unsigned long *)end
;
215 #define MSG(txt) ops->warning(data, txt)
218 * x86-64 can have upto three kernel stacks:
221 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
224 static inline int valid_stack_ptr(struct thread_info
*tinfo
, void *p
)
226 void *t
= (void *)tinfo
;
227 return p
> t
&& p
< t
+ THREAD_SIZE
- 3;
230 void dump_trace(struct task_struct
*tsk
, struct pt_regs
*regs
,
231 unsigned long *stack
,
232 struct stacktrace_ops
*ops
, void *data
)
234 const unsigned cpu
= get_cpu();
235 unsigned long *irqstack_end
= (unsigned long*)cpu_pda(cpu
)->irqstackptr
;
237 struct thread_info
*tinfo
;
245 if (tsk
&& tsk
!= current
)
246 stack
= (unsigned long *)tsk
->thread
.rsp
;
250 * Print function call entries within a stack. 'cond' is the
251 * "end of stackframe" condition, that the 'stack++'
252 * iteration will eventually trigger.
254 #define HANDLE_STACK(cond) \
256 unsigned long addr = *stack++; \
257 /* Use unlocked access here because except for NMIs \
258 we should be already protected against module unloads */ \
259 if (__kernel_text_address(addr)) { \
261 * If the address is either in the text segment of the \
262 * kernel, or in the region which contains vmalloc'ed \
263 * memory, it *may* be the address of a calling \
264 * routine; if so, print it so that someone tracing \
265 * down the cause of the crash will be able to figure \
266 * out the call path that was taken. \
268 ops->address(data, addr); \
273 * Print function call entries in all stacks, starting at the
274 * current stack address. If the stacks consist of nested
279 unsigned long *estack_end
;
280 estack_end
= in_exception_stack(cpu
, (unsigned long)stack
,
284 if (ops
->stack(data
, id
) < 0)
286 HANDLE_STACK (stack
< estack_end
);
287 ops
->stack(data
, "<EOE>");
289 * We link to the next stack via the
290 * second-to-last pointer (index -2 to end) in the
293 stack
= (unsigned long *) estack_end
[-2];
297 unsigned long *irqstack
;
298 irqstack
= irqstack_end
-
299 (IRQSTACKSIZE
- 64) / sizeof(*irqstack
);
301 if (stack
>= irqstack
&& stack
< irqstack_end
) {
302 if (ops
->stack(data
, "IRQ") < 0)
304 HANDLE_STACK (stack
< irqstack_end
);
306 * We link to the next stack (which would be
307 * the process stack normally) the last
308 * pointer (index -1 to end) in the IRQ stack:
310 stack
= (unsigned long *) (irqstack_end
[-1]);
312 ops
->stack(data
, "EOI");
320 * This handles the process stack:
322 tinfo
= task_thread_info(tsk
);
323 HANDLE_STACK (valid_stack_ptr(tinfo
, stack
));
327 EXPORT_SYMBOL(dump_trace
);
330 print_trace_warning_symbol(void *data
, char *msg
, unsigned long symbol
)
332 print_symbol(msg
, symbol
);
336 static void print_trace_warning(void *data
, char *msg
)
341 static int print_trace_stack(void *data
, char *name
)
343 printk(" <%s> ", name
);
347 static void print_trace_address(void *data
, unsigned long addr
)
349 printk_address(addr
);
352 static struct stacktrace_ops print_trace_ops
= {
353 .warning
= print_trace_warning
,
354 .warning_symbol
= print_trace_warning_symbol
,
355 .stack
= print_trace_stack
,
356 .address
= print_trace_address
,
360 show_trace(struct task_struct
*tsk
, struct pt_regs
*regs
, unsigned long *stack
)
362 printk("\nCall Trace:\n");
363 dump_trace(tsk
, regs
, stack
, &print_trace_ops
, NULL
);
368 _show_stack(struct task_struct
*tsk
, struct pt_regs
*regs
, unsigned long *rsp
)
370 unsigned long *stack
;
372 const int cpu
= smp_processor_id();
373 unsigned long *irqstack_end
= (unsigned long *) (cpu_pda(cpu
)->irqstackptr
);
374 unsigned long *irqstack
= (unsigned long *) (cpu_pda(cpu
)->irqstackptr
- IRQSTACKSIZE
);
376 // debugging aid: "show_stack(NULL, NULL);" prints the
377 // back trace for this cpu.
381 rsp
= (unsigned long *)tsk
->thread
.rsp
;
383 rsp
= (unsigned long *)&rsp
;
387 for(i
=0; i
< kstack_depth_to_print
; i
++) {
388 if (stack
>= irqstack
&& stack
<= irqstack_end
) {
389 if (stack
== irqstack_end
) {
390 stack
= (unsigned long *) (irqstack_end
[-1]);
394 if (((long) stack
& (THREAD_SIZE
-1)) == 0)
397 if (i
&& ((i
% 4) == 0))
399 printk(" %016lx", *stack
++);
400 touch_nmi_watchdog();
402 show_trace(tsk
, regs
, rsp
);
405 void show_stack(struct task_struct
*tsk
, unsigned long * rsp
)
407 _show_stack(tsk
, NULL
, rsp
);
411 * The architecture-independent dump_stack generator
413 void dump_stack(void)
416 show_trace(NULL
, NULL
, &dummy
);
419 EXPORT_SYMBOL(dump_stack
);
421 void show_registers(struct pt_regs
*regs
)
424 int in_kernel
= !user_mode(regs
);
426 const int cpu
= smp_processor_id();
427 struct task_struct
*cur
= cpu_pda(cpu
)->pcurrent
;
431 printk("CPU %d ", cpu
);
433 printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
434 cur
->comm
, cur
->pid
, task_thread_info(cur
), cur
);
437 * When in-kernel, we also print out the stack and code at the
438 * time of the fault..
443 _show_stack(NULL
, regs
, (unsigned long*)rsp
);
446 if (regs
->rip
< PAGE_OFFSET
)
449 for (i
=0; i
<20; i
++) {
451 if (__get_user(c
, &((unsigned char*)regs
->rip
)[i
])) {
453 printk(" Bad RIP value.");
462 int is_valid_bugaddr(unsigned long rip
)
466 if (__copy_from_user(&ud2
, (const void __user
*) rip
, sizeof(ud2
)))
469 return ud2
== 0x0b0f;
473 void out_of_line_bug(void)
477 EXPORT_SYMBOL(out_of_line_bug
);
480 static DEFINE_SPINLOCK(die_lock
);
481 static int die_owner
= -1;
482 static unsigned int die_nest_count
;
484 unsigned __kprobes
long oops_begin(void)
486 int cpu
= smp_processor_id();
491 /* racy, but better than risking deadlock. */
492 local_irq_save(flags
);
493 if (!spin_trylock(&die_lock
)) {
494 if (cpu
== die_owner
)
495 /* nested oops. should stop eventually */;
497 spin_lock(&die_lock
);
506 void __kprobes
oops_end(unsigned long flags
)
512 /* We still own the lock */
513 local_irq_restore(flags
);
515 /* Nest count reaches zero, release the lock. */
516 spin_unlock_irqrestore(&die_lock
, flags
);
518 panic("Fatal exception");
522 void __kprobes
__die(const char * str
, struct pt_regs
* regs
, long err
)
524 static int die_counter
;
525 printk(KERN_EMERG
"%s: %04lx [%u] ", str
, err
& 0xffff,++die_counter
);
526 #ifdef CONFIG_PREEMPT
532 #ifdef CONFIG_DEBUG_PAGEALLOC
533 printk("DEBUG_PAGEALLOC");
536 notify_die(DIE_OOPS
, str
, regs
, err
, current
->thread
.trap_no
, SIGSEGV
);
537 show_registers(regs
);
538 /* Executive summary in case the oops scrolled away */
539 printk(KERN_ALERT
"RIP ");
540 printk_address(regs
->rip
);
541 printk(" RSP <%016lx>\n", regs
->rsp
);
542 if (kexec_should_crash(current
))
546 void die(const char * str
, struct pt_regs
* regs
, long err
)
548 unsigned long flags
= oops_begin();
550 if (!user_mode(regs
))
551 report_bug(regs
->rip
);
553 __die(str
, regs
, err
);
558 void __kprobes
die_nmi(char *str
, struct pt_regs
*regs
, int do_panic
)
560 unsigned long flags
= oops_begin();
563 * We are in trouble anyway, lets at least try
564 * to get a message out.
566 printk(str
, smp_processor_id());
567 show_registers(regs
);
568 if (kexec_should_crash(current
))
570 if (do_panic
|| panic_on_oops
)
571 panic("Non maskable interrupt");
578 static void __kprobes
do_trap(int trapnr
, int signr
, char *str
,
579 struct pt_regs
* regs
, long error_code
,
582 struct task_struct
*tsk
= current
;
584 tsk
->thread
.error_code
= error_code
;
585 tsk
->thread
.trap_no
= trapnr
;
587 if (user_mode(regs
)) {
588 if (exception_trace
&& unhandled_signal(tsk
, signr
))
590 "%s[%d] trap %s rip:%lx rsp:%lx error:%lx\n",
591 tsk
->comm
, tsk
->pid
, str
,
592 regs
->rip
, regs
->rsp
, error_code
);
595 force_sig_info(signr
, info
, tsk
);
597 force_sig(signr
, tsk
);
604 const struct exception_table_entry
*fixup
;
605 fixup
= search_exception_tables(regs
->rip
);
607 regs
->rip
= fixup
->fixup
;
609 die(str
, regs
, error_code
);
614 #define DO_ERROR(trapnr, signr, str, name) \
615 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
617 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
620 conditional_sti(regs); \
621 do_trap(trapnr, signr, str, regs, error_code, NULL); \
624 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
625 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
628 info.si_signo = signr; \
630 info.si_code = sicode; \
631 info.si_addr = (void __user *)siaddr; \
632 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
635 conditional_sti(regs); \
636 do_trap(trapnr, signr, str, regs, error_code, &info); \
639 DO_ERROR_INFO( 0, SIGFPE
, "divide error", divide_error
, FPE_INTDIV
, regs
->rip
)
640 DO_ERROR( 4, SIGSEGV
, "overflow", overflow
)
641 DO_ERROR( 5, SIGSEGV
, "bounds", bounds
)
642 DO_ERROR_INFO( 6, SIGILL
, "invalid opcode", invalid_op
, ILL_ILLOPN
, regs
->rip
)
643 DO_ERROR( 7, SIGSEGV
, "device not available", device_not_available
)
644 DO_ERROR( 9, SIGFPE
, "coprocessor segment overrun", coprocessor_segment_overrun
)
645 DO_ERROR(10, SIGSEGV
, "invalid TSS", invalid_TSS
)
646 DO_ERROR(11, SIGBUS
, "segment not present", segment_not_present
)
647 DO_ERROR_INFO(17, SIGBUS
, "alignment check", alignment_check
, BUS_ADRALN
, 0)
648 DO_ERROR(18, SIGSEGV
, "reserved", reserved
)
650 /* Runs on IST stack */
651 asmlinkage
void do_stack_segment(struct pt_regs
*regs
, long error_code
)
653 if (notify_die(DIE_TRAP
, "stack segment", regs
, error_code
,
654 12, SIGBUS
) == NOTIFY_STOP
)
656 preempt_conditional_sti(regs
);
657 do_trap(12, SIGBUS
, "stack segment", regs
, error_code
, NULL
);
658 preempt_conditional_cli(regs
);
661 asmlinkage
void do_double_fault(struct pt_regs
* regs
, long error_code
)
663 static const char str
[] = "double fault";
664 struct task_struct
*tsk
= current
;
666 /* Return not checked because double check cannot be ignored */
667 notify_die(DIE_TRAP
, str
, regs
, error_code
, 8, SIGSEGV
);
669 tsk
->thread
.error_code
= error_code
;
670 tsk
->thread
.trap_no
= 8;
672 /* This is always a kernel trap and never fixable (and thus must
675 die(str
, regs
, error_code
);
678 asmlinkage
void __kprobes
do_general_protection(struct pt_regs
* regs
,
681 struct task_struct
*tsk
= current
;
683 conditional_sti(regs
);
685 tsk
->thread
.error_code
= error_code
;
686 tsk
->thread
.trap_no
= 13;
688 if (user_mode(regs
)) {
689 if (exception_trace
&& unhandled_signal(tsk
, SIGSEGV
))
691 "%s[%d] general protection rip:%lx rsp:%lx error:%lx\n",
693 regs
->rip
, regs
->rsp
, error_code
);
695 force_sig(SIGSEGV
, tsk
);
701 const struct exception_table_entry
*fixup
;
702 fixup
= search_exception_tables(regs
->rip
);
704 regs
->rip
= fixup
->fixup
;
707 if (notify_die(DIE_GPF
, "general protection fault", regs
,
708 error_code
, 13, SIGSEGV
) == NOTIFY_STOP
)
710 die("general protection fault", regs
, error_code
);
714 static __kprobes
void
715 mem_parity_error(unsigned char reason
, struct pt_regs
* regs
)
717 printk(KERN_EMERG
"Uhhuh. NMI received for unknown reason %02x.\n",
719 printk(KERN_EMERG
"You have some hardware problem, likely on the PCI bus.\n");
721 if (panic_on_unrecovered_nmi
)
722 panic("NMI: Not continuing");
724 printk(KERN_EMERG
"Dazed and confused, but trying to continue\n");
726 /* Clear and disable the memory parity error line. */
727 reason
= (reason
& 0xf) | 4;
731 static __kprobes
void
732 io_check_error(unsigned char reason
, struct pt_regs
* regs
)
734 printk("NMI: IOCK error (debug interrupt?)\n");
735 show_registers(regs
);
737 /* Re-enable the IOCK line, wait for a few seconds */
738 reason
= (reason
& 0xf) | 8;
745 static __kprobes
void
746 unknown_nmi_error(unsigned char reason
, struct pt_regs
* regs
)
748 printk(KERN_EMERG
"Uhhuh. NMI received for unknown reason %02x.\n",
750 printk(KERN_EMERG
"Do you have a strange power saving mode enabled?\n");
752 if (panic_on_unrecovered_nmi
)
753 panic("NMI: Not continuing");
755 printk(KERN_EMERG
"Dazed and confused, but trying to continue\n");
758 /* Runs on IST stack. This code must keep interrupts off all the time.
759 Nested NMIs are prevented by the CPU. */
760 asmlinkage __kprobes
void default_do_nmi(struct pt_regs
*regs
)
762 unsigned char reason
= 0;
765 cpu
= smp_processor_id();
767 /* Only the BSP gets external NMIs from the system. */
769 reason
= get_nmi_reason();
771 if (!(reason
& 0xc0)) {
772 if (notify_die(DIE_NMI_IPI
, "nmi_ipi", regs
, reason
, 2, SIGINT
)
776 * Ok, so this is none of the documented NMI sources,
777 * so it must be the NMI watchdog.
779 if (nmi_watchdog_tick(regs
,reason
))
781 if (!do_nmi_callback(regs
,cpu
))
782 unknown_nmi_error(reason
, regs
);
786 if (notify_die(DIE_NMI
, "nmi", regs
, reason
, 2, SIGINT
) == NOTIFY_STOP
)
789 /* AK: following checks seem to be broken on modern chipsets. FIXME */
792 mem_parity_error(reason
, regs
);
794 io_check_error(reason
, regs
);
797 /* runs on IST stack. */
798 asmlinkage
void __kprobes
do_int3(struct pt_regs
* regs
, long error_code
)
800 if (notify_die(DIE_INT3
, "int3", regs
, error_code
, 3, SIGTRAP
) == NOTIFY_STOP
) {
803 preempt_conditional_sti(regs
);
804 do_trap(3, SIGTRAP
, "int3", regs
, error_code
, NULL
);
805 preempt_conditional_cli(regs
);
808 /* Help handler running on IST stack to switch back to user stack
809 for scheduling or signal handling. The actual stack switch is done in
811 asmlinkage __kprobes
struct pt_regs
*sync_regs(struct pt_regs
*eregs
)
813 struct pt_regs
*regs
= eregs
;
814 /* Did already sync */
815 if (eregs
== (struct pt_regs
*)eregs
->rsp
)
817 /* Exception from user space */
818 else if (user_mode(eregs
))
819 regs
= task_pt_regs(current
);
820 /* Exception from kernel and interrupts are enabled. Move to
821 kernel process stack. */
822 else if (eregs
->eflags
& X86_EFLAGS_IF
)
823 regs
= (struct pt_regs
*)(eregs
->rsp
-= sizeof(struct pt_regs
));
829 /* runs on IST stack. */
830 asmlinkage
void __kprobes
do_debug(struct pt_regs
* regs
,
831 unsigned long error_code
)
833 unsigned long condition
;
834 struct task_struct
*tsk
= current
;
837 get_debugreg(condition
, 6);
839 if (notify_die(DIE_DEBUG
, "debug", regs
, condition
, error_code
,
840 SIGTRAP
) == NOTIFY_STOP
)
843 preempt_conditional_sti(regs
);
845 /* Mask out spurious debug traps due to lazy DR7 setting */
846 if (condition
& (DR_TRAP0
|DR_TRAP1
|DR_TRAP2
|DR_TRAP3
)) {
847 if (!tsk
->thread
.debugreg7
) {
852 tsk
->thread
.debugreg6
= condition
;
854 /* Mask out spurious TF errors due to lazy TF clearing */
855 if (condition
& DR_STEP
) {
857 * The TF error should be masked out only if the current
858 * process is not traced and if the TRAP flag has been set
859 * previously by a tracing process (condition detected by
860 * the PT_DTRACE flag); remember that the i386 TRAP flag
861 * can be modified by the process itself in user mode,
862 * allowing programs to debug themselves without the ptrace()
865 if (!user_mode(regs
))
866 goto clear_TF_reenable
;
868 * Was the TF flag set by a debugger? If so, clear it now,
869 * so that register information is correct.
871 if (tsk
->ptrace
& PT_DTRACE
) {
872 regs
->eflags
&= ~TF_MASK
;
873 tsk
->ptrace
&= ~PT_DTRACE
;
877 /* Ok, finally something we can handle */
878 tsk
->thread
.trap_no
= 1;
879 tsk
->thread
.error_code
= error_code
;
880 info
.si_signo
= SIGTRAP
;
882 info
.si_code
= TRAP_BRKPT
;
883 info
.si_addr
= user_mode(regs
) ? (void __user
*)regs
->rip
: NULL
;
884 force_sig_info(SIGTRAP
, &info
, tsk
);
887 set_debugreg(0UL, 7);
888 preempt_conditional_cli(regs
);
892 set_tsk_thread_flag(tsk
, TIF_SINGLESTEP
);
893 regs
->eflags
&= ~TF_MASK
;
894 preempt_conditional_cli(regs
);
897 static int kernel_math_error(struct pt_regs
*regs
, const char *str
, int trapnr
)
899 const struct exception_table_entry
*fixup
;
900 fixup
= search_exception_tables(regs
->rip
);
902 regs
->rip
= fixup
->fixup
;
905 notify_die(DIE_GPF
, str
, regs
, 0, trapnr
, SIGFPE
);
906 /* Illegal floating point operation in the kernel */
907 current
->thread
.trap_no
= trapnr
;
913 * Note that we play around with the 'TS' bit in an attempt to get
914 * the correct behaviour even in the presence of the asynchronous
917 asmlinkage
void do_coprocessor_error(struct pt_regs
*regs
)
919 void __user
*rip
= (void __user
*)(regs
->rip
);
920 struct task_struct
* task
;
922 unsigned short cwd
, swd
;
924 conditional_sti(regs
);
925 if (!user_mode(regs
) &&
926 kernel_math_error(regs
, "kernel x87 math error", 16))
930 * Save the info for the exception handler and clear the error.
934 task
->thread
.trap_no
= 16;
935 task
->thread
.error_code
= 0;
936 info
.si_signo
= SIGFPE
;
938 info
.si_code
= __SI_FAULT
;
941 * (~cwd & swd) will mask out exceptions that are not set to unmasked
942 * status. 0x3f is the exception bits in these regs, 0x200 is the
943 * C1 reg you need in case of a stack fault, 0x040 is the stack
944 * fault bit. We should only be taking one exception at a time,
945 * so if this combination doesn't produce any single exception,
946 * then we have a bad program that isn't synchronizing its FPU usage
947 * and it will suffer the consequences since we won't be able to
948 * fully reproduce the context of the exception
950 cwd
= get_fpu_cwd(task
);
951 swd
= get_fpu_swd(task
);
952 switch (swd
& ~cwd
& 0x3f) {
956 case 0x001: /* Invalid Op */
958 * swd & 0x240 == 0x040: Stack Underflow
959 * swd & 0x240 == 0x240: Stack Overflow
960 * User must clear the SF bit (0x40) if set
962 info
.si_code
= FPE_FLTINV
;
964 case 0x002: /* Denormalize */
965 case 0x010: /* Underflow */
966 info
.si_code
= FPE_FLTUND
;
968 case 0x004: /* Zero Divide */
969 info
.si_code
= FPE_FLTDIV
;
971 case 0x008: /* Overflow */
972 info
.si_code
= FPE_FLTOVF
;
974 case 0x020: /* Precision */
975 info
.si_code
= FPE_FLTRES
;
978 force_sig_info(SIGFPE
, &info
, task
);
981 asmlinkage
void bad_intr(void)
983 printk("bad interrupt");
986 asmlinkage
void do_simd_coprocessor_error(struct pt_regs
*regs
)
988 void __user
*rip
= (void __user
*)(regs
->rip
);
989 struct task_struct
* task
;
991 unsigned short mxcsr
;
993 conditional_sti(regs
);
994 if (!user_mode(regs
) &&
995 kernel_math_error(regs
, "kernel simd math error", 19))
999 * Save the info for the exception handler and clear the error.
1002 save_init_fpu(task
);
1003 task
->thread
.trap_no
= 19;
1004 task
->thread
.error_code
= 0;
1005 info
.si_signo
= SIGFPE
;
1007 info
.si_code
= __SI_FAULT
;
1010 * The SIMD FPU exceptions are handled a little differently, as there
1011 * is only a single status/control register. Thus, to determine which
1012 * unmasked exception was caught we must mask the exception mask bits
1013 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
1015 mxcsr
= get_fpu_mxcsr(task
);
1016 switch (~((mxcsr
& 0x1f80) >> 7) & (mxcsr
& 0x3f)) {
1020 case 0x001: /* Invalid Op */
1021 info
.si_code
= FPE_FLTINV
;
1023 case 0x002: /* Denormalize */
1024 case 0x010: /* Underflow */
1025 info
.si_code
= FPE_FLTUND
;
1027 case 0x004: /* Zero Divide */
1028 info
.si_code
= FPE_FLTDIV
;
1030 case 0x008: /* Overflow */
1031 info
.si_code
= FPE_FLTOVF
;
1033 case 0x020: /* Precision */
1034 info
.si_code
= FPE_FLTRES
;
1037 force_sig_info(SIGFPE
, &info
, task
);
1040 asmlinkage
void do_spurious_interrupt_bug(struct pt_regs
* regs
)
1044 asmlinkage
void __attribute__((weak
)) smp_thermal_interrupt(void)
1048 asmlinkage
void __attribute__((weak
)) mce_threshold_interrupt(void)
1053 * 'math_state_restore()' saves the current math information in the
1054 * old math state array, and gets the new ones from the current task
1056 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
1057 * Don't touch unless you *really* know how it works.
1059 asmlinkage
void math_state_restore(void)
1061 struct task_struct
*me
= current
;
1062 clts(); /* Allow maths ops (or we recurse) */
1066 restore_fpu_checking(&me
->thread
.i387
.fxsave
);
1067 task_thread_info(me
)->status
|= TS_USEDFPU
;
1071 void __init
trap_init(void)
1073 set_intr_gate(0,÷_error
);
1074 set_intr_gate_ist(1,&debug
,DEBUG_STACK
);
1075 set_intr_gate_ist(2,&nmi
,NMI_STACK
);
1076 set_system_gate_ist(3,&int3
,DEBUG_STACK
); /* int3 can be called from all */
1077 set_system_gate(4,&overflow
); /* int4 can be called from all */
1078 set_intr_gate(5,&bounds
);
1079 set_intr_gate(6,&invalid_op
);
1080 set_intr_gate(7,&device_not_available
);
1081 set_intr_gate_ist(8,&double_fault
, DOUBLEFAULT_STACK
);
1082 set_intr_gate(9,&coprocessor_segment_overrun
);
1083 set_intr_gate(10,&invalid_TSS
);
1084 set_intr_gate(11,&segment_not_present
);
1085 set_intr_gate_ist(12,&stack_segment
,STACKFAULT_STACK
);
1086 set_intr_gate(13,&general_protection
);
1087 set_intr_gate(14,&page_fault
);
1088 set_intr_gate(15,&spurious_interrupt_bug
);
1089 set_intr_gate(16,&coprocessor_error
);
1090 set_intr_gate(17,&alignment_check
);
1091 #ifdef CONFIG_X86_MCE
1092 set_intr_gate_ist(18,&machine_check
, MCE_STACK
);
1094 set_intr_gate(19,&simd_coprocessor_error
);
1096 #ifdef CONFIG_IA32_EMULATION
1097 set_system_gate(IA32_SYSCALL_VECTOR
, ia32_syscall
);
1101 * Should be a barrier for any external CPU state.
1107 static int __init
oops_setup(char *s
)
1111 if (!strcmp(s
, "panic"))
1115 early_param("oops", oops_setup
);
1117 static int __init
kstack_setup(char *s
)
1121 kstack_depth_to_print
= simple_strtoul(s
,NULL
,0);
1124 early_param("kstack", kstack_setup
);