2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
5 * Pentium III FXSR, SSE support
6 * Gareth Hughes <gareth@valinux.com>, May 2000
10 * 'Traps.c' handles hardware traps and faults after we have saved some
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/ptrace.h>
18 #include <linux/timer.h>
20 #include <linux/init.h>
21 #include <linux/delay.h>
22 #include <linux/spinlock.h>
23 #include <linux/interrupt.h>
24 #include <linux/kallsyms.h>
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/nmi.h>
28 #include <linux/kprobes.h>
29 #include <linux/kexec.h>
30 #include <linux/unwind.h>
31 #include <linux/uaccess.h>
32 #include <linux/bug.h>
33 #include <linux/kdebug.h>
34 #include <linux/utsname.h>
36 #if defined(CONFIG_EDAC)
37 #include <linux/edac.h>
40 #include <asm/system.h>
42 #include <asm/atomic.h>
43 #include <asm/debugreg.h>
46 #include <asm/processor.h>
47 #include <asm/unwind.h>
49 #include <asm/pgalloc.h>
51 #include <asm/proto.h>
53 #include <asm/stacktrace.h>
55 asmlinkage
void divide_error(void);
56 asmlinkage
void debug(void);
57 asmlinkage
void nmi(void);
58 asmlinkage
void int3(void);
59 asmlinkage
void overflow(void);
60 asmlinkage
void bounds(void);
61 asmlinkage
void invalid_op(void);
62 asmlinkage
void device_not_available(void);
63 asmlinkage
void double_fault(void);
64 asmlinkage
void coprocessor_segment_overrun(void);
65 asmlinkage
void invalid_TSS(void);
66 asmlinkage
void segment_not_present(void);
67 asmlinkage
void stack_segment(void);
68 asmlinkage
void general_protection(void);
69 asmlinkage
void page_fault(void);
70 asmlinkage
void coprocessor_error(void);
71 asmlinkage
void simd_coprocessor_error(void);
72 asmlinkage
void reserved(void);
73 asmlinkage
void alignment_check(void);
74 asmlinkage
void machine_check(void);
75 asmlinkage
void spurious_interrupt_bug(void);
77 static inline void conditional_sti(struct pt_regs
*regs
)
79 if (regs
->flags
& X86_EFLAGS_IF
)
83 static inline void preempt_conditional_sti(struct pt_regs
*regs
)
86 if (regs
->flags
& X86_EFLAGS_IF
)
90 static inline void preempt_conditional_cli(struct pt_regs
*regs
)
92 if (regs
->flags
& X86_EFLAGS_IF
)
94 /* Make sure to not schedule here because we could be running
95 on an exception stack. */
96 preempt_enable_no_resched();
99 int kstack_depth_to_print
= 12;
101 #ifdef CONFIG_KALLSYMS
102 void printk_address(unsigned long address
, int reliable
)
104 unsigned long offset
= 0, symsize
;
109 char reliab
[4] = "";;
111 symname
= kallsyms_lookup(address
, &symsize
, &offset
,
114 printk(" [<%016lx>]\n", address
);
118 strcpy(reliab
, "? ");
121 modname
= delim
= "";
122 printk(" [<%016lx>] %s%s%s%s%s+0x%lx/0x%lx\n",
123 address
, reliab
, delim
, modname
, delim
, symname
, offset
, symsize
);
126 void printk_address(unsigned long address
, int reliable
)
128 printk(" [<%016lx>]\n", address
);
132 static unsigned long *in_exception_stack(unsigned cpu
, unsigned long stack
,
133 unsigned *usedp
, char **idp
)
135 static char ids
[][8] = {
136 [DEBUG_STACK
- 1] = "#DB",
137 [NMI_STACK
- 1] = "NMI",
138 [DOUBLEFAULT_STACK
- 1] = "#DF",
139 [STACKFAULT_STACK
- 1] = "#SS",
140 [MCE_STACK
- 1] = "#MC",
141 #if DEBUG_STKSZ > EXCEPTION_STKSZ
142 [N_EXCEPTION_STACKS
... N_EXCEPTION_STACKS
+ DEBUG_STKSZ
/ EXCEPTION_STKSZ
- 2] = "#DB[?]"
148 * Iterate over all exception stacks, and figure out whether
149 * 'stack' is in one of them:
151 for (k
= 0; k
< N_EXCEPTION_STACKS
; k
++) {
152 unsigned long end
= per_cpu(orig_ist
, cpu
).ist
[k
];
154 * Is 'stack' above this exception frame's end?
155 * If yes then skip to the next frame.
160 * Is 'stack' above this exception frame's start address?
161 * If yes then we found the right frame.
163 if (stack
>= end
- EXCEPTION_STKSZ
) {
165 * Make sure we only iterate through an exception
166 * stack once. If it comes up for the second time
167 * then there's something wrong going on - just
168 * break out and return NULL:
170 if (*usedp
& (1U << k
))
174 return (unsigned long *)end
;
177 * If this is a debug stack, and if it has a larger size than
178 * the usual exception stacks, then 'stack' might still
179 * be within the lower portion of the debug stack:
181 #if DEBUG_STKSZ > EXCEPTION_STKSZ
182 if (k
== DEBUG_STACK
- 1 && stack
>= end
- DEBUG_STKSZ
) {
183 unsigned j
= N_EXCEPTION_STACKS
- 1;
186 * Black magic. A large debug stack is composed of
187 * multiple exception stack entries, which we
188 * iterate through now. Dont look:
192 end
-= EXCEPTION_STKSZ
;
193 ids
[j
][4] = '1' + (j
- N_EXCEPTION_STACKS
);
194 } while (stack
< end
- EXCEPTION_STKSZ
);
195 if (*usedp
& (1U << j
))
199 return (unsigned long *)end
;
206 #define MSG(txt) ops->warning(data, txt)
209 * x86-64 can have up to three kernel stacks:
212 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
215 static inline int valid_stack_ptr(struct thread_info
*tinfo
,
216 void *p
, unsigned int size
, void *end
)
218 void *t
= (void *)tinfo
;
220 if (p
< end
&& p
>= (end
-THREAD_SIZE
))
225 return p
> t
&& p
< t
+ THREAD_SIZE
- size
;
228 /* The form of the top of the frame on the stack */
230 struct stack_frame
*next_frame
;
231 unsigned long return_address
;
235 static inline unsigned long print_context_stack(struct thread_info
*tinfo
,
236 unsigned long *stack
, unsigned long bp
,
237 const struct stacktrace_ops
*ops
, void *data
,
240 struct stack_frame
*frame
= (struct stack_frame
*)bp
;
242 while (valid_stack_ptr(tinfo
, stack
, sizeof(*stack
), end
)) {
246 if (__kernel_text_address(addr
)) {
247 if ((unsigned long) stack
== bp
+ 8) {
248 ops
->address(data
, addr
, 1);
249 frame
= frame
->next_frame
;
250 bp
= (unsigned long) frame
;
252 ops
->address(data
, addr
, bp
== 0);
260 void dump_trace(struct task_struct
*tsk
, struct pt_regs
*regs
,
261 unsigned long *stack
, unsigned long bp
,
262 const struct stacktrace_ops
*ops
, void *data
)
264 const unsigned cpu
= get_cpu();
265 unsigned long *irqstack_end
= (unsigned long*)cpu_pda(cpu
)->irqstackptr
;
267 struct thread_info
*tinfo
;
271 tinfo
= task_thread_info(tsk
);
276 if (tsk
&& tsk
!= current
)
277 stack
= (unsigned long *)tsk
->thread
.sp
;
280 #ifdef CONFIG_FRAME_POINTER
282 if (tsk
== current
) {
283 /* Grab bp right from our regs */
284 asm("movq %%rbp, %0" : "=r" (bp
):);
286 /* bp is the last reg pushed by switch_to */
287 bp
= *(unsigned long *) tsk
->thread
.sp
;
295 * Print function call entries in all stacks, starting at the
296 * current stack address. If the stacks consist of nested
301 unsigned long *estack_end
;
302 estack_end
= in_exception_stack(cpu
, (unsigned long)stack
,
306 if (ops
->stack(data
, id
) < 0)
309 bp
= print_context_stack(tinfo
, stack
, bp
, ops
,
311 ops
->stack(data
, "<EOE>");
313 * We link to the next stack via the
314 * second-to-last pointer (index -2 to end) in the
317 stack
= (unsigned long *) estack_end
[-2];
321 unsigned long *irqstack
;
322 irqstack
= irqstack_end
-
323 (IRQSTACKSIZE
- 64) / sizeof(*irqstack
);
325 if (stack
>= irqstack
&& stack
< irqstack_end
) {
326 if (ops
->stack(data
, "IRQ") < 0)
328 bp
= print_context_stack(tinfo
, stack
, bp
,
329 ops
, data
, irqstack_end
);
331 * We link to the next stack (which would be
332 * the process stack normally) the last
333 * pointer (index -1 to end) in the IRQ stack:
335 stack
= (unsigned long *) (irqstack_end
[-1]);
337 ops
->stack(data
, "EOI");
345 * This handles the process stack:
347 bp
= print_context_stack(tinfo
, stack
, bp
, ops
, data
, NULL
);
350 EXPORT_SYMBOL(dump_trace
);
353 print_trace_warning_symbol(void *data
, char *msg
, unsigned long symbol
)
355 print_symbol(msg
, symbol
);
359 static void print_trace_warning(void *data
, char *msg
)
364 static int print_trace_stack(void *data
, char *name
)
366 printk(" <%s> ", name
);
370 static void print_trace_address(void *data
, unsigned long addr
, int reliable
)
372 touch_nmi_watchdog();
373 printk_address(addr
, reliable
);
376 static const struct stacktrace_ops print_trace_ops
= {
377 .warning
= print_trace_warning
,
378 .warning_symbol
= print_trace_warning_symbol
,
379 .stack
= print_trace_stack
,
380 .address
= print_trace_address
,
384 show_trace(struct task_struct
*tsk
, struct pt_regs
*regs
, unsigned long *stack
,
387 printk("\nCall Trace:\n");
388 dump_trace(tsk
, regs
, stack
, bp
, &print_trace_ops
, NULL
);
393 _show_stack(struct task_struct
*tsk
, struct pt_regs
*regs
, unsigned long *sp
,
396 unsigned long *stack
;
398 const int cpu
= smp_processor_id();
399 unsigned long *irqstack_end
= (unsigned long *) (cpu_pda(cpu
)->irqstackptr
);
400 unsigned long *irqstack
= (unsigned long *) (cpu_pda(cpu
)->irqstackptr
- IRQSTACKSIZE
);
402 // debugging aid: "show_stack(NULL, NULL);" prints the
403 // back trace for this cpu.
407 sp
= (unsigned long *)tsk
->thread
.sp
;
409 sp
= (unsigned long *)&sp
;
413 for(i
=0; i
< kstack_depth_to_print
; i
++) {
414 if (stack
>= irqstack
&& stack
<= irqstack_end
) {
415 if (stack
== irqstack_end
) {
416 stack
= (unsigned long *) (irqstack_end
[-1]);
420 if (((long) stack
& (THREAD_SIZE
-1)) == 0)
423 if (i
&& ((i
% 4) == 0))
425 printk(" %016lx", *stack
++);
426 touch_nmi_watchdog();
428 show_trace(tsk
, regs
, sp
, bp
);
431 void show_stack(struct task_struct
*tsk
, unsigned long * sp
)
433 _show_stack(tsk
, NULL
, sp
, 0);
437 * The architecture-independent dump_stack generator
439 void dump_stack(void)
442 unsigned long bp
= 0;
444 #ifdef CONFIG_FRAME_POINTER
446 asm("movq %%rbp, %0" : "=r" (bp
):);
449 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
450 current
->pid
, current
->comm
, print_tainted(),
451 init_utsname()->release
,
452 (int)strcspn(init_utsname()->version
, " "),
453 init_utsname()->version
);
454 show_trace(NULL
, NULL
, &dummy
, bp
);
457 EXPORT_SYMBOL(dump_stack
);
459 void show_registers(struct pt_regs
*regs
)
462 int in_kernel
= !user_mode(regs
);
464 const int cpu
= smp_processor_id();
465 struct task_struct
*cur
= cpu_pda(cpu
)->pcurrent
;
468 printk("CPU %d ", cpu
);
470 printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
471 cur
->comm
, cur
->pid
, task_thread_info(cur
), cur
);
474 * When in-kernel, we also print out the stack and code at the
475 * time of the fault..
479 _show_stack(NULL
, regs
, (unsigned long *)sp
, regs
->bp
);
482 if (regs
->ip
< PAGE_OFFSET
)
485 for (i
=0; i
<20; i
++) {
487 if (__get_user(c
, &((unsigned char*)regs
->ip
)[i
])) {
489 printk(" Bad RIP value.");
498 int is_valid_bugaddr(unsigned long ip
)
502 if (__copy_from_user(&ud2
, (const void __user
*) ip
, sizeof(ud2
)))
505 return ud2
== 0x0b0f;
508 static raw_spinlock_t die_lock
= __RAW_SPIN_LOCK_UNLOCKED
;
509 static int die_owner
= -1;
510 static unsigned int die_nest_count
;
512 unsigned __kprobes
long oops_begin(void)
519 /* racy, but better than risking deadlock. */
520 raw_local_irq_save(flags
);
521 cpu
= smp_processor_id();
522 if (!__raw_spin_trylock(&die_lock
)) {
523 if (cpu
== die_owner
)
524 /* nested oops. should stop eventually */;
526 __raw_spin_lock(&die_lock
);
535 void __kprobes
oops_end(unsigned long flags
, struct pt_regs
*regs
, int signr
)
541 /* Nest count reaches zero, release the lock. */
542 __raw_spin_unlock(&die_lock
);
543 raw_local_irq_restore(flags
);
549 panic("Fatal exception");
554 int __kprobes
__die(const char * str
, struct pt_regs
* regs
, long err
)
556 static int die_counter
;
557 printk(KERN_EMERG
"%s: %04lx [%u] ", str
, err
& 0xffff,++die_counter
);
558 #ifdef CONFIG_PREEMPT
564 #ifdef CONFIG_DEBUG_PAGEALLOC
565 printk("DEBUG_PAGEALLOC");
568 if (notify_die(DIE_OOPS
, str
, regs
, err
, current
->thread
.trap_no
, SIGSEGV
) == NOTIFY_STOP
)
570 show_registers(regs
);
571 add_taint(TAINT_DIE
);
572 /* Executive summary in case the oops scrolled away */
573 printk(KERN_ALERT
"RIP ");
574 printk_address(regs
->ip
, regs
->bp
);
575 printk(" RSP <%016lx>\n", regs
->sp
);
576 if (kexec_should_crash(current
))
581 void die(const char * str
, struct pt_regs
* regs
, long err
)
583 unsigned long flags
= oops_begin();
585 if (!user_mode(regs
))
586 report_bug(regs
->ip
, regs
);
588 if (__die(str
, regs
, err
))
590 oops_end(flags
, regs
, SIGSEGV
);
593 void __kprobes
die_nmi(char *str
, struct pt_regs
*regs
, int do_panic
)
595 unsigned long flags
= oops_begin();
598 * We are in trouble anyway, lets at least try
599 * to get a message out.
601 printk(str
, smp_processor_id());
602 show_registers(regs
);
603 if (kexec_should_crash(current
))
605 if (do_panic
|| panic_on_oops
)
606 panic("Non maskable interrupt");
607 oops_end(flags
, NULL
, SIGBUS
);
613 static void __kprobes
do_trap(int trapnr
, int signr
, char *str
,
614 struct pt_regs
* regs
, long error_code
,
617 struct task_struct
*tsk
= current
;
619 if (user_mode(regs
)) {
621 * We want error_code and trap_no set for userspace
622 * faults and kernelspace faults which result in
623 * die(), but not kernelspace faults which are fixed
624 * up. die() gives the process no chance to handle
625 * the signal and notice the kernel fault information,
626 * so that won't result in polluting the information
627 * about previously queued, but not yet delivered,
628 * faults. See also do_general_protection below.
630 tsk
->thread
.error_code
= error_code
;
631 tsk
->thread
.trap_no
= trapnr
;
633 if (show_unhandled_signals
&& unhandled_signal(tsk
, signr
) &&
636 "%s[%d] trap %s ip:%lx sp:%lx error:%lx\n",
637 tsk
->comm
, tsk
->pid
, str
,
638 regs
->ip
, regs
->sp
, error_code
);
641 force_sig_info(signr
, info
, tsk
);
643 force_sig(signr
, tsk
);
648 if (!fixup_exception(regs
)) {
649 tsk
->thread
.error_code
= error_code
;
650 tsk
->thread
.trap_no
= trapnr
;
651 die(str
, regs
, error_code
);
656 #define DO_ERROR(trapnr, signr, str, name) \
657 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
659 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
662 conditional_sti(regs); \
663 do_trap(trapnr, signr, str, regs, error_code, NULL); \
666 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
667 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
670 info.si_signo = signr; \
672 info.si_code = sicode; \
673 info.si_addr = (void __user *)siaddr; \
674 trace_hardirqs_fixup(); \
675 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
678 conditional_sti(regs); \
679 do_trap(trapnr, signr, str, regs, error_code, &info); \
682 DO_ERROR_INFO( 0, SIGFPE
, "divide error", divide_error
, FPE_INTDIV
, regs
->ip
)
683 DO_ERROR( 4, SIGSEGV
, "overflow", overflow
)
684 DO_ERROR( 5, SIGSEGV
, "bounds", bounds
)
685 DO_ERROR_INFO( 6, SIGILL
, "invalid opcode", invalid_op
, ILL_ILLOPN
, regs
->ip
)
686 DO_ERROR( 7, SIGSEGV
, "device not available", device_not_available
)
687 DO_ERROR( 9, SIGFPE
, "coprocessor segment overrun", coprocessor_segment_overrun
)
688 DO_ERROR(10, SIGSEGV
, "invalid TSS", invalid_TSS
)
689 DO_ERROR(11, SIGBUS
, "segment not present", segment_not_present
)
690 DO_ERROR_INFO(17, SIGBUS
, "alignment check", alignment_check
, BUS_ADRALN
, 0)
691 DO_ERROR(18, SIGSEGV
, "reserved", reserved
)
693 /* Runs on IST stack */
694 asmlinkage
void do_stack_segment(struct pt_regs
*regs
, long error_code
)
696 if (notify_die(DIE_TRAP
, "stack segment", regs
, error_code
,
697 12, SIGBUS
) == NOTIFY_STOP
)
699 preempt_conditional_sti(regs
);
700 do_trap(12, SIGBUS
, "stack segment", regs
, error_code
, NULL
);
701 preempt_conditional_cli(regs
);
704 asmlinkage
void do_double_fault(struct pt_regs
* regs
, long error_code
)
706 static const char str
[] = "double fault";
707 struct task_struct
*tsk
= current
;
709 /* Return not checked because double check cannot be ignored */
710 notify_die(DIE_TRAP
, str
, regs
, error_code
, 8, SIGSEGV
);
712 tsk
->thread
.error_code
= error_code
;
713 tsk
->thread
.trap_no
= 8;
715 /* This is always a kernel trap and never fixable (and thus must
718 die(str
, regs
, error_code
);
721 asmlinkage
void __kprobes
do_general_protection(struct pt_regs
* regs
,
724 struct task_struct
*tsk
= current
;
726 conditional_sti(regs
);
728 if (user_mode(regs
)) {
729 tsk
->thread
.error_code
= error_code
;
730 tsk
->thread
.trap_no
= 13;
732 if (show_unhandled_signals
&& unhandled_signal(tsk
, SIGSEGV
) &&
735 "%s[%d] general protection ip:%lx sp:%lx error:%lx\n",
737 regs
->ip
, regs
->sp
, error_code
);
739 force_sig(SIGSEGV
, tsk
);
743 if (fixup_exception(regs
))
746 tsk
->thread
.error_code
= error_code
;
747 tsk
->thread
.trap_no
= 13;
748 if (notify_die(DIE_GPF
, "general protection fault", regs
,
749 error_code
, 13, SIGSEGV
) == NOTIFY_STOP
)
751 die("general protection fault", regs
, error_code
);
754 static __kprobes
void
755 mem_parity_error(unsigned char reason
, struct pt_regs
* regs
)
757 printk(KERN_EMERG
"Uhhuh. NMI received for unknown reason %02x.\n",
759 printk(KERN_EMERG
"You have some hardware problem, likely on the PCI bus.\n");
761 #if defined(CONFIG_EDAC)
762 if(edac_handler_set()) {
763 edac_atomic_assert_error();
768 if (panic_on_unrecovered_nmi
)
769 panic("NMI: Not continuing");
771 printk(KERN_EMERG
"Dazed and confused, but trying to continue\n");
773 /* Clear and disable the memory parity error line. */
774 reason
= (reason
& 0xf) | 4;
778 static __kprobes
void
779 io_check_error(unsigned char reason
, struct pt_regs
* regs
)
781 printk("NMI: IOCK error (debug interrupt?)\n");
782 show_registers(regs
);
784 /* Re-enable the IOCK line, wait for a few seconds */
785 reason
= (reason
& 0xf) | 8;
792 static __kprobes
void
793 unknown_nmi_error(unsigned char reason
, struct pt_regs
* regs
)
795 printk(KERN_EMERG
"Uhhuh. NMI received for unknown reason %02x.\n",
797 printk(KERN_EMERG
"Do you have a strange power saving mode enabled?\n");
799 if (panic_on_unrecovered_nmi
)
800 panic("NMI: Not continuing");
802 printk(KERN_EMERG
"Dazed and confused, but trying to continue\n");
805 /* Runs on IST stack. This code must keep interrupts off all the time.
806 Nested NMIs are prevented by the CPU. */
807 asmlinkage __kprobes
void default_do_nmi(struct pt_regs
*regs
)
809 unsigned char reason
= 0;
812 cpu
= smp_processor_id();
814 /* Only the BSP gets external NMIs from the system. */
816 reason
= get_nmi_reason();
818 if (!(reason
& 0xc0)) {
819 if (notify_die(DIE_NMI_IPI
, "nmi_ipi", regs
, reason
, 2, SIGINT
)
823 * Ok, so this is none of the documented NMI sources,
824 * so it must be the NMI watchdog.
826 if (nmi_watchdog_tick(regs
,reason
))
828 if (!do_nmi_callback(regs
,cpu
))
829 unknown_nmi_error(reason
, regs
);
833 if (notify_die(DIE_NMI
, "nmi", regs
, reason
, 2, SIGINT
) == NOTIFY_STOP
)
836 /* AK: following checks seem to be broken on modern chipsets. FIXME */
839 mem_parity_error(reason
, regs
);
841 io_check_error(reason
, regs
);
844 /* runs on IST stack. */
845 asmlinkage
void __kprobes
do_int3(struct pt_regs
* regs
, long error_code
)
847 trace_hardirqs_fixup();
849 if (notify_die(DIE_INT3
, "int3", regs
, error_code
, 3, SIGTRAP
) == NOTIFY_STOP
) {
852 preempt_conditional_sti(regs
);
853 do_trap(3, SIGTRAP
, "int3", regs
, error_code
, NULL
);
854 preempt_conditional_cli(regs
);
857 /* Help handler running on IST stack to switch back to user stack
858 for scheduling or signal handling. The actual stack switch is done in
860 asmlinkage __kprobes
struct pt_regs
*sync_regs(struct pt_regs
*eregs
)
862 struct pt_regs
*regs
= eregs
;
863 /* Did already sync */
864 if (eregs
== (struct pt_regs
*)eregs
->sp
)
866 /* Exception from user space */
867 else if (user_mode(eregs
))
868 regs
= task_pt_regs(current
);
869 /* Exception from kernel and interrupts are enabled. Move to
870 kernel process stack. */
871 else if (eregs
->flags
& X86_EFLAGS_IF
)
872 regs
= (struct pt_regs
*)(eregs
->sp
-= sizeof(struct pt_regs
));
878 /* runs on IST stack. */
879 asmlinkage
void __kprobes
do_debug(struct pt_regs
* regs
,
880 unsigned long error_code
)
882 unsigned long condition
;
883 struct task_struct
*tsk
= current
;
886 trace_hardirqs_fixup();
888 get_debugreg(condition
, 6);
891 * The processor cleared BTF, so don't mark that we need it set.
893 clear_tsk_thread_flag(tsk
, TIF_DEBUGCTLMSR
);
894 tsk
->thread
.debugctlmsr
= 0;
896 if (notify_die(DIE_DEBUG
, "debug", regs
, condition
, error_code
,
897 SIGTRAP
) == NOTIFY_STOP
)
900 preempt_conditional_sti(regs
);
902 /* Mask out spurious debug traps due to lazy DR7 setting */
903 if (condition
& (DR_TRAP0
|DR_TRAP1
|DR_TRAP2
|DR_TRAP3
)) {
904 if (!tsk
->thread
.debugreg7
) {
909 tsk
->thread
.debugreg6
= condition
;
913 * Single-stepping through TF: make sure we ignore any events in
914 * kernel space (but re-enable TF when returning to user mode).
916 if (condition
& DR_STEP
) {
917 if (!user_mode(regs
))
918 goto clear_TF_reenable
;
921 /* Ok, finally something we can handle */
922 tsk
->thread
.trap_no
= 1;
923 tsk
->thread
.error_code
= error_code
;
924 info
.si_signo
= SIGTRAP
;
926 info
.si_code
= TRAP_BRKPT
;
927 info
.si_addr
= user_mode(regs
) ? (void __user
*)regs
->ip
: NULL
;
928 force_sig_info(SIGTRAP
, &info
, tsk
);
931 set_debugreg(0UL, 7);
932 preempt_conditional_cli(regs
);
936 set_tsk_thread_flag(tsk
, TIF_SINGLESTEP
);
937 regs
->flags
&= ~X86_EFLAGS_TF
;
938 preempt_conditional_cli(regs
);
941 static int kernel_math_error(struct pt_regs
*regs
, const char *str
, int trapnr
)
943 if (fixup_exception(regs
))
946 notify_die(DIE_GPF
, str
, regs
, 0, trapnr
, SIGFPE
);
947 /* Illegal floating point operation in the kernel */
948 current
->thread
.trap_no
= trapnr
;
954 * Note that we play around with the 'TS' bit in an attempt to get
955 * the correct behaviour even in the presence of the asynchronous
958 asmlinkage
void do_coprocessor_error(struct pt_regs
*regs
)
960 void __user
*ip
= (void __user
*)(regs
->ip
);
961 struct task_struct
* task
;
963 unsigned short cwd
, swd
;
965 conditional_sti(regs
);
966 if (!user_mode(regs
) &&
967 kernel_math_error(regs
, "kernel x87 math error", 16))
971 * Save the info for the exception handler and clear the error.
975 task
->thread
.trap_no
= 16;
976 task
->thread
.error_code
= 0;
977 info
.si_signo
= SIGFPE
;
979 info
.si_code
= __SI_FAULT
;
982 * (~cwd & swd) will mask out exceptions that are not set to unmasked
983 * status. 0x3f is the exception bits in these regs, 0x200 is the
984 * C1 reg you need in case of a stack fault, 0x040 is the stack
985 * fault bit. We should only be taking one exception at a time,
986 * so if this combination doesn't produce any single exception,
987 * then we have a bad program that isn't synchronizing its FPU usage
988 * and it will suffer the consequences since we won't be able to
989 * fully reproduce the context of the exception
991 cwd
= get_fpu_cwd(task
);
992 swd
= get_fpu_swd(task
);
993 switch (swd
& ~cwd
& 0x3f) {
997 case 0x001: /* Invalid Op */
999 * swd & 0x240 == 0x040: Stack Underflow
1000 * swd & 0x240 == 0x240: Stack Overflow
1001 * User must clear the SF bit (0x40) if set
1003 info
.si_code
= FPE_FLTINV
;
1005 case 0x002: /* Denormalize */
1006 case 0x010: /* Underflow */
1007 info
.si_code
= FPE_FLTUND
;
1009 case 0x004: /* Zero Divide */
1010 info
.si_code
= FPE_FLTDIV
;
1012 case 0x008: /* Overflow */
1013 info
.si_code
= FPE_FLTOVF
;
1015 case 0x020: /* Precision */
1016 info
.si_code
= FPE_FLTRES
;
1019 force_sig_info(SIGFPE
, &info
, task
);
1022 asmlinkage
void bad_intr(void)
1024 printk("bad interrupt");
1027 asmlinkage
void do_simd_coprocessor_error(struct pt_regs
*regs
)
1029 void __user
*ip
= (void __user
*)(regs
->ip
);
1030 struct task_struct
* task
;
1032 unsigned short mxcsr
;
1034 conditional_sti(regs
);
1035 if (!user_mode(regs
) &&
1036 kernel_math_error(regs
, "kernel simd math error", 19))
1040 * Save the info for the exception handler and clear the error.
1043 save_init_fpu(task
);
1044 task
->thread
.trap_no
= 19;
1045 task
->thread
.error_code
= 0;
1046 info
.si_signo
= SIGFPE
;
1048 info
.si_code
= __SI_FAULT
;
1051 * The SIMD FPU exceptions are handled a little differently, as there
1052 * is only a single status/control register. Thus, to determine which
1053 * unmasked exception was caught we must mask the exception mask bits
1054 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
1056 mxcsr
= get_fpu_mxcsr(task
);
1057 switch (~((mxcsr
& 0x1f80) >> 7) & (mxcsr
& 0x3f)) {
1061 case 0x001: /* Invalid Op */
1062 info
.si_code
= FPE_FLTINV
;
1064 case 0x002: /* Denormalize */
1065 case 0x010: /* Underflow */
1066 info
.si_code
= FPE_FLTUND
;
1068 case 0x004: /* Zero Divide */
1069 info
.si_code
= FPE_FLTDIV
;
1071 case 0x008: /* Overflow */
1072 info
.si_code
= FPE_FLTOVF
;
1074 case 0x020: /* Precision */
1075 info
.si_code
= FPE_FLTRES
;
1078 force_sig_info(SIGFPE
, &info
, task
);
1081 asmlinkage
void do_spurious_interrupt_bug(struct pt_regs
* regs
)
1085 asmlinkage
void __attribute__((weak
)) smp_thermal_interrupt(void)
1089 asmlinkage
void __attribute__((weak
)) mce_threshold_interrupt(void)
1094 * 'math_state_restore()' saves the current math information in the
1095 * old math state array, and gets the new ones from the current task
1097 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
1098 * Don't touch unless you *really* know how it works.
1100 asmlinkage
void math_state_restore(void)
1102 struct task_struct
*me
= current
;
1103 clts(); /* Allow maths ops (or we recurse) */
1107 restore_fpu_checking(&me
->thread
.i387
.fxsave
);
1108 task_thread_info(me
)->status
|= TS_USEDFPU
;
1111 EXPORT_SYMBOL_GPL(math_state_restore
);
1113 void __init
trap_init(void)
1115 set_intr_gate(0,÷_error
);
1116 set_intr_gate_ist(1,&debug
,DEBUG_STACK
);
1117 set_intr_gate_ist(2,&nmi
,NMI_STACK
);
1118 set_system_gate_ist(3,&int3
,DEBUG_STACK
); /* int3 can be called from all */
1119 set_system_gate(4,&overflow
); /* int4 can be called from all */
1120 set_intr_gate(5,&bounds
);
1121 set_intr_gate(6,&invalid_op
);
1122 set_intr_gate(7,&device_not_available
);
1123 set_intr_gate_ist(8,&double_fault
, DOUBLEFAULT_STACK
);
1124 set_intr_gate(9,&coprocessor_segment_overrun
);
1125 set_intr_gate(10,&invalid_TSS
);
1126 set_intr_gate(11,&segment_not_present
);
1127 set_intr_gate_ist(12,&stack_segment
,STACKFAULT_STACK
);
1128 set_intr_gate(13,&general_protection
);
1129 set_intr_gate(14,&page_fault
);
1130 set_intr_gate(15,&spurious_interrupt_bug
);
1131 set_intr_gate(16,&coprocessor_error
);
1132 set_intr_gate(17,&alignment_check
);
1133 #ifdef CONFIG_X86_MCE
1134 set_intr_gate_ist(18,&machine_check
, MCE_STACK
);
1136 set_intr_gate(19,&simd_coprocessor_error
);
1138 #ifdef CONFIG_IA32_EMULATION
1139 set_system_gate(IA32_SYSCALL_VECTOR
, ia32_syscall
);
1143 * Should be a barrier for any external CPU state.
1149 static int __init
oops_setup(char *s
)
1153 if (!strcmp(s
, "panic"))
1157 early_param("oops", oops_setup
);
1159 static int __init
kstack_setup(char *s
)
1163 kstack_depth_to_print
= simple_strtoul(s
,NULL
,0);
1166 early_param("kstack", kstack_setup
);