3 * Copyright (C) 1991, 1992 Linus Torvalds
7 * entry.S contains the system-call and fault low-level handling routines.
8 * This also contains the timer-interrupt handler, as well as all interrupts
9 * and faults that can result in a task-switch.
11 * NOTE: This code handles signal-recognition, which happens every time
12 * after a timer-interrupt and after each system call.
14 * I changed all the .align's to 4 (16 byte alignment), as that's faster
17 * Stack layout in 'syscall_exit':
18 * ptrace needs to have all regs on the stack.
19 * if the order here is changed, it needs to be
20 * updated in fork.c:copy_process, signal.c:do_signal,
21 * ptrace.c and ptrace.h
40 * "current" is in register %ebx during any slow entries.
43 #include <linux/linkage.h>
44 #include <asm/thread_info.h>
45 #include <asm/irqflags.h>
46 #include <asm/errno.h>
47 #include <asm/segment.h>
51 #include <asm/percpu.h>
52 #include <asm/dwarf2.h>
53 #include <asm/processor-flags.h>
54 #include <asm/ftrace.h>
55 #include <asm/irq_vectors.h>
57 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
58 #include <linux/elf-em.h>
59 #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
60 #define __AUDIT_ARCH_LE 0x40000000
62 #ifndef CONFIG_AUDITSYSCALL
63 #define sysenter_audit syscall_trace_entry
64 #define sysexit_audit syscall_exit_work
68 * We use macros for low-level operations which need to be overridden
69 * for paravirtualization. The following will never clobber any registers:
70 * INTERRUPT_RETURN (aka. "iret")
71 * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
72 * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
74 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
75 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
76 * Allowing a register to be clobbered can shrink the paravirt replacement
77 * enough to patch inline, increasing performance.
80 #define nr_syscalls ((syscall_table_size)/4)
83 #define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
85 #define preempt_stop(clobbers)
86 #define resume_kernel restore_nocheck
89 .macro TRACE_IRQS_IRET
90 #ifdef CONFIG_TRACE_IRQFLAGS
91 testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off?
99 #define resume_userspace_sig check_userspace
101 #define resume_userspace_sig resume_userspace
107 CFI_ADJUST_CFA_OFFSET 4
108 /*CFI_REL_OFFSET fs, 0;*/
110 CFI_ADJUST_CFA_OFFSET 4
111 /*CFI_REL_OFFSET es, 0;*/
113 CFI_ADJUST_CFA_OFFSET 4
114 /*CFI_REL_OFFSET ds, 0;*/
116 CFI_ADJUST_CFA_OFFSET 4
117 CFI_REL_OFFSET eax, 0
119 CFI_ADJUST_CFA_OFFSET 4
120 CFI_REL_OFFSET ebp, 0
122 CFI_ADJUST_CFA_OFFSET 4
123 CFI_REL_OFFSET edi, 0
125 CFI_ADJUST_CFA_OFFSET 4
126 CFI_REL_OFFSET esi, 0
128 CFI_ADJUST_CFA_OFFSET 4
129 CFI_REL_OFFSET edx, 0
131 CFI_ADJUST_CFA_OFFSET 4
132 CFI_REL_OFFSET ecx, 0
134 CFI_ADJUST_CFA_OFFSET 4
135 CFI_REL_OFFSET ebx, 0
136 movl $(__USER_DS), %edx
139 movl $(__KERNEL_PERCPU), %edx
143 .macro RESTORE_INT_REGS
145 CFI_ADJUST_CFA_OFFSET -4
148 CFI_ADJUST_CFA_OFFSET -4
151 CFI_ADJUST_CFA_OFFSET -4
154 CFI_ADJUST_CFA_OFFSET -4
157 CFI_ADJUST_CFA_OFFSET -4
160 CFI_ADJUST_CFA_OFFSET -4
163 CFI_ADJUST_CFA_OFFSET -4
170 CFI_ADJUST_CFA_OFFSET -4
173 CFI_ADJUST_CFA_OFFSET -4
176 CFI_ADJUST_CFA_OFFSET -4
178 .pushsection .fixup, "ax"
185 .section __ex_table, "a"
193 .macro RING0_INT_FRAME
197 /*CFI_OFFSET cs, -2*4;*/
201 .macro RING0_EC_FRAME
205 /*CFI_OFFSET cs, -2*4;*/
209 .macro RING0_PTREGS_FRAME
212 CFI_DEF_CFA esp, PT_OLDESP-PT_EBX
213 /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/
214 CFI_OFFSET eip, PT_EIP-PT_OLDESP
215 /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/
216 /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/
217 CFI_OFFSET eax, PT_EAX-PT_OLDESP
218 CFI_OFFSET ebp, PT_EBP-PT_OLDESP
219 CFI_OFFSET edi, PT_EDI-PT_OLDESP
220 CFI_OFFSET esi, PT_ESI-PT_OLDESP
221 CFI_OFFSET edx, PT_EDX-PT_OLDESP
222 CFI_OFFSET ecx, PT_ECX-PT_OLDESP
223 CFI_OFFSET ebx, PT_EBX-PT_OLDESP
229 CFI_ADJUST_CFA_OFFSET 4
231 GET_THREAD_INFO(%ebp)
233 CFI_ADJUST_CFA_OFFSET -4
234 pushl $0x0202 # Reset kernel eflags
235 CFI_ADJUST_CFA_OFFSET 4
237 CFI_ADJUST_CFA_OFFSET -4
243 * Return to user mode is not as complex as all this looks,
244 * but we want the default path for a system call return to
245 * go as quickly as possible which is why some of this is
246 * less clear than it otherwise should be.
249 # userspace resumption stub bypassing syscall exit tracing
253 preempt_stop(CLBR_ANY)
255 GET_THREAD_INFO(%ebp)
257 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
258 movb PT_CS(%esp), %al
259 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
261 jb resume_kernel # not returning to v8086 or userspace
263 ENTRY(resume_userspace)
265 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
266 # setting need_resched or sigpending
267 # between sampling and the iret
269 movl TI_flags(%ebp), %ecx
270 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
271 # int/exception return?
274 END(ret_from_exception)
276 #ifdef CONFIG_PREEMPT
278 DISABLE_INTERRUPTS(CLBR_ANY)
279 cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
282 movl TI_flags(%ebp), %ecx # need_resched set ?
283 testb $_TIF_NEED_RESCHED, %cl
285 testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
287 call preempt_schedule_irq
293 /* SYSENTER_RETURN points to after the "sysenter" instruction in
294 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
296 # sysenter call handler stub
297 ENTRY(ia32_sysenter_target)
301 CFI_REGISTER esp, ebp
302 movl TSS_sysenter_sp0(%esp),%esp
305 * Interrupts are disabled here, but we can't trace it until
306 * enough kernel state to call TRACE_IRQS_OFF can be called - but
307 * we immediately enable interrupts at that point anyway.
310 CFI_ADJUST_CFA_OFFSET 4
311 /*CFI_REL_OFFSET ss, 0*/
313 CFI_ADJUST_CFA_OFFSET 4
314 CFI_REL_OFFSET esp, 0
316 orl $X86_EFLAGS_IF, (%esp)
317 CFI_ADJUST_CFA_OFFSET 4
319 CFI_ADJUST_CFA_OFFSET 4
320 /*CFI_REL_OFFSET cs, 0*/
322 * Push current_thread_info()->sysenter_return to the stack.
323 * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
324 * pushed above; +8 corresponds to copy_thread's esp0 setting.
326 pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
327 CFI_ADJUST_CFA_OFFSET 4
328 CFI_REL_OFFSET eip, 0
331 CFI_ADJUST_CFA_OFFSET 4
333 ENABLE_INTERRUPTS(CLBR_NONE)
336 * Load the potential sixth argument from user stack.
337 * Careful about security.
339 cmpl $__PAGE_OFFSET-3,%ebp
342 movl %ebp,PT_EBP(%esp)
343 .section __ex_table,"a"
345 .long 1b,syscall_fault
348 GET_THREAD_INFO(%ebp)
350 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
351 testw $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
354 cmpl $(nr_syscalls), %eax
356 call *sys_call_table(,%eax,4)
357 movl %eax,PT_EAX(%esp)
359 DISABLE_INTERRUPTS(CLBR_ANY)
361 movl TI_flags(%ebp), %ecx
362 testw $_TIF_ALLWORK_MASK, %cx
365 /* if something modifies registers it must also disable sysexit */
366 movl PT_EIP(%esp), %edx
367 movl PT_OLDESP(%esp), %ecx
370 1: mov PT_FS(%esp), %fs
371 ENABLE_INTERRUPTS_SYSEXIT
373 #ifdef CONFIG_AUDITSYSCALL
375 testw $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
376 jnz syscall_trace_entry
378 CFI_ADJUST_CFA_OFFSET -4
379 /* %esi already in 8(%esp) 6th arg: 4th syscall arg */
380 /* %edx already in 4(%esp) 5th arg: 3rd syscall arg */
381 /* %ecx already in 0(%esp) 4th arg: 2nd syscall arg */
382 movl %ebx,%ecx /* 3rd arg: 1st syscall arg */
383 movl %eax,%edx /* 2nd arg: syscall number */
384 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
385 call audit_syscall_entry
387 CFI_ADJUST_CFA_OFFSET 4
388 movl PT_EAX(%esp),%eax /* reload syscall number */
392 testw $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %cx
393 jne syscall_exit_work
395 ENABLE_INTERRUPTS(CLBR_ANY)
396 movl %eax,%edx /* second arg, syscall return value */
397 cmpl $0,%eax /* is it < 0? */
398 setl %al /* 1 if so, 0 if not */
399 movzbl %al,%eax /* zero-extend that */
400 inc %eax /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
401 call audit_syscall_exit
402 DISABLE_INTERRUPTS(CLBR_ANY)
404 movl TI_flags(%ebp), %ecx
405 testw $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %cx
406 jne syscall_exit_work
407 movl PT_EAX(%esp),%eax /* reload syscall return value */
412 .pushsection .fixup,"ax"
413 2: movl $0,PT_FS(%esp)
415 .section __ex_table,"a"
419 ENDPROC(ia32_sysenter_target)
421 # system call handler stub
423 RING0_INT_FRAME # can't unwind into user space anyway
424 pushl %eax # save orig_eax
425 CFI_ADJUST_CFA_OFFSET 4
427 GET_THREAD_INFO(%ebp)
428 # system call tracing in operation / emulation
429 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
430 testw $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
431 jnz syscall_trace_entry
432 cmpl $(nr_syscalls), %eax
435 call *sys_call_table(,%eax,4)
436 movl %eax,PT_EAX(%esp) # store the return value
439 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
440 # setting need_resched or sigpending
441 # between sampling and the iret
443 movl TI_flags(%ebp), %ecx
444 testw $_TIF_ALLWORK_MASK, %cx # current->work
445 jne syscall_exit_work
448 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
449 # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
450 # are returning to the kernel.
451 # See comments in process.c:copy_thread() for details.
452 movb PT_OLDSS(%esp), %ah
453 movb PT_CS(%esp), %al
454 andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
455 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
457 je ldt_ss # returning to user-space with LDT SS
460 restore_nocheck_notrace:
462 addl $4, %esp # skip orig_eax/error_code
463 CFI_ADJUST_CFA_OFFSET -4
468 pushl $0 # no error code
472 .section __ex_table,"a"
474 .long irq_return,iret_exc
479 larl PT_OLDSS(%esp), %eax
481 testl $0x00400000, %eax # returning to 32bit stack?
482 jnz restore_nocheck # allright, normal return
484 #ifdef CONFIG_PARAVIRT
486 * The kernel can't run on a non-flat stack if paravirt mode
487 * is active. Rather than try to fixup the high bits of
488 * ESP, bypass this code entirely. This may break DOSemu
489 * and/or Wine support in a paravirt VM, although the option
490 * is still available to implement the setting of the high
491 * 16-bits in the INTERRUPT_RETURN paravirt-op.
493 cmpl $0, pv_info+PARAVIRT_enabled
497 /* If returning to userspace with 16bit stack,
498 * try to fix the higher word of ESP, as the CPU
500 * This is an "official" bug of all the x86-compatible
501 * CPUs, which we can try to work around to make
502 * dosemu and wine happy. */
503 movl PT_OLDESP(%esp), %eax
505 call patch_espfix_desc
507 CFI_ADJUST_CFA_OFFSET 4
509 CFI_ADJUST_CFA_OFFSET 4
510 DISABLE_INTERRUPTS(CLBR_EAX)
513 CFI_ADJUST_CFA_OFFSET -8
518 # perform work that needs to be done immediately before resumption
520 RING0_PTREGS_FRAME # can't unwind into user space anyway
522 testb $_TIF_NEED_RESCHED, %cl
527 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
528 # setting need_resched or sigpending
529 # between sampling and the iret
531 movl TI_flags(%ebp), %ecx
532 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
533 # than syscall tracing?
535 testb $_TIF_NEED_RESCHED, %cl
538 work_notifysig: # deal with pending signals and
539 # notify-resume requests
541 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
543 jne work_notifysig_v86 # returning to kernel-space or
546 call do_notify_resume
547 jmp resume_userspace_sig
551 pushl %ecx # save ti_flags for do_notify_resume
552 CFI_ADJUST_CFA_OFFSET 4
553 call save_v86_state # %eax contains pt_regs pointer
555 CFI_ADJUST_CFA_OFFSET -4
561 call do_notify_resume
562 jmp resume_userspace_sig
565 # perform syscall exit tracing
568 movl $-ENOSYS,PT_EAX(%esp)
570 call syscall_trace_enter
571 /* What it returned is what we'll actually use. */
572 cmpl $(nr_syscalls), %eax
575 END(syscall_trace_entry)
577 # perform syscall exit tracing
580 testb $_TIF_WORK_SYSCALL_EXIT, %cl
583 ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call
586 call syscall_trace_leave
588 END(syscall_exit_work)
591 RING0_INT_FRAME # can't unwind into user space anyway
593 GET_THREAD_INFO(%ebp)
594 movl $-EFAULT,PT_EAX(%esp)
599 movl $-ENOSYS,PT_EAX(%esp)
604 .macro FIXUP_ESPFIX_STACK
605 /* since we are on a wrong stack, we cant make it a C code :( */
606 PER_CPU(gdt_page, %ebx)
607 GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah)
610 CFI_ADJUST_CFA_OFFSET 4
612 CFI_ADJUST_CFA_OFFSET 4
614 CFI_ADJUST_CFA_OFFSET -8
616 .macro UNWIND_ESPFIX_STACK
618 /* see if on espfix stack */
619 cmpw $__ESPFIX_SS, %ax
621 movl $__KERNEL_DS, %eax
624 /* switch to normal stack */
630 * Build the entry stubs and pointer table with some assembler magic.
631 * We pack 7 stubs into a single 32-byte chunk, which will fit in a
632 * single cache line on all modern x86 implementations.
634 .section .init.rodata,"a"
638 .p2align CONFIG_X86_L1_CACHE_SHIFT
639 ENTRY(irq_entries_start)
641 vector=FIRST_EXTERNAL_VECTOR
642 .rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
645 .if vector < NR_VECTORS
646 .if vector <> FIRST_EXTERNAL_VECTOR
647 CFI_ADJUST_CFA_OFFSET -4
649 1: pushl $(~vector+0x80) /* Note: always in signed byte range */
650 CFI_ADJUST_CFA_OFFSET 4
651 .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
660 2: jmp common_interrupt
662 END(irq_entries_start)
669 * the CPU automatically disables interrupts when executing an IRQ vector,
670 * so IRQ-flags tracing has to follow that:
672 .p2align CONFIG_X86_L1_CACHE_SHIFT
674 addl $-0x80,(%esp) /* Adjust vector into the [-256,-1] range */
680 ENDPROC(common_interrupt)
683 #define BUILD_INTERRUPT3(name, nr, fn) \
687 CFI_ADJUST_CFA_OFFSET 4; \
696 #define BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(name, nr, smp_##name)
698 /* The include is where all of the SMP etc. interrupts come from */
699 #include "entry_arch.h"
701 ENTRY(coprocessor_error)
704 CFI_ADJUST_CFA_OFFSET 4
705 pushl $do_coprocessor_error
706 CFI_ADJUST_CFA_OFFSET 4
709 END(coprocessor_error)
711 ENTRY(simd_coprocessor_error)
714 CFI_ADJUST_CFA_OFFSET 4
715 pushl $do_simd_coprocessor_error
716 CFI_ADJUST_CFA_OFFSET 4
719 END(simd_coprocessor_error)
721 ENTRY(device_not_available)
723 pushl $-1 # mark this as an int
724 CFI_ADJUST_CFA_OFFSET 4
725 pushl $do_device_not_available
726 CFI_ADJUST_CFA_OFFSET 4
729 END(device_not_available)
731 #ifdef CONFIG_PARAVIRT
734 .section __ex_table,"a"
736 .long native_iret, iret_exc
740 ENTRY(native_irq_enable_sysexit)
743 END(native_irq_enable_sysexit)
749 CFI_ADJUST_CFA_OFFSET 4
751 CFI_ADJUST_CFA_OFFSET 4
759 CFI_ADJUST_CFA_OFFSET 4
761 CFI_ADJUST_CFA_OFFSET 4
769 CFI_ADJUST_CFA_OFFSET 4
771 CFI_ADJUST_CFA_OFFSET 4
776 ENTRY(coprocessor_segment_overrun)
779 CFI_ADJUST_CFA_OFFSET 4
780 pushl $do_coprocessor_segment_overrun
781 CFI_ADJUST_CFA_OFFSET 4
784 END(coprocessor_segment_overrun)
788 pushl $do_invalid_TSS
789 CFI_ADJUST_CFA_OFFSET 4
794 ENTRY(segment_not_present)
796 pushl $do_segment_not_present
797 CFI_ADJUST_CFA_OFFSET 4
800 END(segment_not_present)
804 pushl $do_stack_segment
805 CFI_ADJUST_CFA_OFFSET 4
810 ENTRY(alignment_check)
812 pushl $do_alignment_check
813 CFI_ADJUST_CFA_OFFSET 4
820 pushl $0 # no error code
821 CFI_ADJUST_CFA_OFFSET 4
822 pushl $do_divide_error
823 CFI_ADJUST_CFA_OFFSET 4
828 #ifdef CONFIG_X86_MCE
832 CFI_ADJUST_CFA_OFFSET 4
833 pushl machine_check_vector
834 CFI_ADJUST_CFA_OFFSET 4
840 ENTRY(spurious_interrupt_bug)
843 CFI_ADJUST_CFA_OFFSET 4
844 pushl $do_spurious_interrupt_bug
845 CFI_ADJUST_CFA_OFFSET 4
848 END(spurious_interrupt_bug)
850 ENTRY(kernel_thread_helper)
851 pushl $0 # fake return address for unwinder
855 CFI_ADJUST_CFA_OFFSET 4
858 CFI_ADJUST_CFA_OFFSET 4
860 ud2 # padding for call trace
862 ENDPROC(kernel_thread_helper)
865 /* Xen doesn't set %esp to be precisely what the normal sysenter
866 entrypoint expects, so fix it up before using the normal path. */
867 ENTRY(xen_sysenter_target)
869 addl $5*4, %esp /* remove xen-provided frame */
870 CFI_ADJUST_CFA_OFFSET -5*4
871 jmp sysenter_past_esp
874 ENTRY(xen_hypervisor_callback)
877 CFI_ADJUST_CFA_OFFSET 4
881 /* Check to see if we got the event in the critical
882 region in xen_iret_direct, after we've reenabled
883 events and checked for pending events. This simulates
884 iret instruction's behaviour where it delivers a
885 pending interrupt when enabling interrupts. */
886 movl PT_EIP(%esp),%eax
887 cmpl $xen_iret_start_crit,%eax
889 cmpl $xen_iret_end_crit,%eax
892 jmp xen_iret_crit_fixup
896 call xen_evtchn_do_upcall
899 ENDPROC(xen_hypervisor_callback)
901 # Hypervisor uses this for application faults while it executes.
902 # We get here for two reasons:
903 # 1. Fault while reloading DS, ES, FS or GS
904 # 2. Fault while executing IRET
905 # Category 1 we fix up by reattempting the load, and zeroing the segment
906 # register if the load fails.
907 # Category 2 we fix up by jumping to do_iret_error. We cannot use the
908 # normal Linux return path in this case because if we use the IRET hypercall
909 # to pop the stack frame we end up in an infinite loop of failsafe callbacks.
910 # We distinguish between categories by maintaining a status value in EAX.
911 ENTRY(xen_failsafe_callback)
914 CFI_ADJUST_CFA_OFFSET 4
922 CFI_ADJUST_CFA_OFFSET -4
924 CFI_ADJUST_CFA_OFFSET -16
927 jmp iret_exc # EAX != 0 => Category 2 (Bad IRET)
928 5: pushl $0 # EAX == 0 => Category 1 (Bad segment)
929 CFI_ADJUST_CFA_OFFSET 4
931 jmp ret_from_exception
948 .section __ex_table,"a"
955 ENDPROC(xen_failsafe_callback)
957 #endif /* CONFIG_XEN */
959 #ifdef CONFIG_FUNCTION_TRACER
960 #ifdef CONFIG_DYNAMIC_FTRACE
967 cmpl $0, function_trace_stop
975 subl $MCOUNT_INSN_SIZE, %eax
984 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
985 .globl ftrace_graph_call
995 #else /* ! CONFIG_DYNAMIC_FTRACE */
998 cmpl $0, function_trace_stop
1001 cmpl $ftrace_stub, ftrace_trace_function
1003 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1004 cmpl $ftrace_stub, ftrace_graph_return
1005 jnz ftrace_graph_caller
1007 cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
1008 jnz ftrace_graph_caller
1014 /* taken from glibc */
1019 movl 0xc(%esp), %eax
1020 movl 0x4(%ebp), %edx
1021 subl $MCOUNT_INSN_SIZE, %eax
1023 call *ftrace_trace_function
1030 #endif /* CONFIG_DYNAMIC_FTRACE */
1031 #endif /* CONFIG_FUNCTION_TRACER */
1033 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1034 ENTRY(ftrace_graph_caller)
1035 cmpl $0, function_trace_stop
1041 movl 0xc(%esp), %edx
1043 subl $MCOUNT_INSN_SIZE, %edx
1044 call prepare_ftrace_return
1049 END(ftrace_graph_caller)
1051 .globl return_to_handler
1057 call ftrace_return_to_handler
1058 movl %eax, 0xc(%esp)
1065 .section .rodata,"a"
1066 #include "syscall_table_32.S"
1068 syscall_table_size=(.-sys_call_table)
1071 * Some functions should be protected against kprobes
1073 .pushsection .kprobes.text, "ax"
1077 pushl $do_page_fault
1078 CFI_ADJUST_CFA_OFFSET 4
1081 /* the function address is in %fs's slot on the stack */
1083 CFI_ADJUST_CFA_OFFSET 4
1084 /*CFI_REL_OFFSET es, 0*/
1086 CFI_ADJUST_CFA_OFFSET 4
1087 /*CFI_REL_OFFSET ds, 0*/
1089 CFI_ADJUST_CFA_OFFSET 4
1090 CFI_REL_OFFSET eax, 0
1092 CFI_ADJUST_CFA_OFFSET 4
1093 CFI_REL_OFFSET ebp, 0
1095 CFI_ADJUST_CFA_OFFSET 4
1096 CFI_REL_OFFSET edi, 0
1098 CFI_ADJUST_CFA_OFFSET 4
1099 CFI_REL_OFFSET esi, 0
1101 CFI_ADJUST_CFA_OFFSET 4
1102 CFI_REL_OFFSET edx, 0
1104 CFI_ADJUST_CFA_OFFSET 4
1105 CFI_REL_OFFSET ecx, 0
1107 CFI_ADJUST_CFA_OFFSET 4
1108 CFI_REL_OFFSET ebx, 0
1111 CFI_ADJUST_CFA_OFFSET 4
1112 /*CFI_REL_OFFSET fs, 0*/
1113 movl $(__KERNEL_PERCPU), %ecx
1117 CFI_ADJUST_CFA_OFFSET -4
1118 /*CFI_REGISTER es, ecx*/
1119 movl PT_FS(%esp), %edi # get the function address
1120 movl PT_ORIG_EAX(%esp), %edx # get the error code
1121 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
1122 mov %ecx, PT_FS(%esp)
1123 /*CFI_REL_OFFSET fs, ES*/
1124 movl $(__USER_DS), %ecx
1128 movl %esp,%eax # pt_regs pointer
1130 jmp ret_from_exception
1135 * Debug traps and NMI can happen at the one SYSENTER instruction
1136 * that sets up the real kernel stack. Check here, since we can't
1137 * allow the wrong stack to be used.
1139 * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
1140 * already pushed 3 words if it hits on the sysenter instruction:
1141 * eflags, cs and eip.
1143 * We just load the right stack, and push the three (known) values
1144 * by hand onto the new stack - while updating the return eip past
1145 * the instruction that would have done it for sysenter.
1147 .macro FIX_STACK offset ok label
1148 cmpw $__KERNEL_CS, 4(%esp)
1151 movl TSS_sysenter_sp0 + \offset(%esp), %esp
1155 CFI_ADJUST_CFA_OFFSET 4
1157 CFI_ADJUST_CFA_OFFSET 4
1158 pushl $sysenter_past_esp
1159 CFI_ADJUST_CFA_OFFSET 4
1160 CFI_REL_OFFSET eip, 0
1165 cmpl $ia32_sysenter_target,(%esp)
1166 jne debug_stack_correct
1167 FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
1168 debug_stack_correct:
1169 pushl $-1 # mark this as an int
1170 CFI_ADJUST_CFA_OFFSET 4
1173 xorl %edx,%edx # error code 0
1174 movl %esp,%eax # pt_regs pointer
1176 jmp ret_from_exception
1181 * NMI is doubly nasty. It can happen _while_ we're handling
1182 * a debug fault, and the debug fault hasn't yet been able to
1183 * clear up the stack. So we first check whether we got an
1184 * NMI on the sysenter entry path, but after that we need to
1185 * check whether we got an NMI on the debug path where the debug
1186 * fault happened on the sysenter path.
1191 CFI_ADJUST_CFA_OFFSET 4
1193 cmpw $__ESPFIX_SS, %ax
1195 CFI_ADJUST_CFA_OFFSET -4
1197 cmpl $ia32_sysenter_target,(%esp)
1200 CFI_ADJUST_CFA_OFFSET 4
1202 /* Do not access memory above the end of our stack page,
1203 * it might not exist.
1205 andl $(THREAD_SIZE-1),%eax
1206 cmpl $(THREAD_SIZE-20),%eax
1208 CFI_ADJUST_CFA_OFFSET -4
1209 jae nmi_stack_correct
1210 cmpl $ia32_sysenter_target,12(%esp)
1211 je nmi_debug_stack_check
1213 /* We have a RING0_INT_FRAME here */
1215 CFI_ADJUST_CFA_OFFSET 4
1217 xorl %edx,%edx # zero error code
1218 movl %esp,%eax # pt_regs pointer
1220 jmp restore_nocheck_notrace
1225 FIX_STACK 12, nmi_stack_correct, 1
1226 jmp nmi_stack_correct
1228 nmi_debug_stack_check:
1229 /* We have a RING0_INT_FRAME here */
1230 cmpw $__KERNEL_CS,16(%esp)
1231 jne nmi_stack_correct
1233 jb nmi_stack_correct
1234 cmpl $debug_esp_fix_insn,(%esp)
1235 ja nmi_stack_correct
1236 FIX_STACK 24, nmi_stack_correct, 1
1237 jmp nmi_stack_correct
1240 /* We have a RING0_INT_FRAME here.
1242 * create the pointer to lss back
1245 CFI_ADJUST_CFA_OFFSET 4
1247 CFI_ADJUST_CFA_OFFSET 4
1249 /* copy the iret frame of 12 bytes */
1252 CFI_ADJUST_CFA_OFFSET 4
1255 CFI_ADJUST_CFA_OFFSET 4
1257 FIXUP_ESPFIX_STACK # %eax == %esp
1258 xorl %edx,%edx # zero error code
1261 lss 12+4(%esp), %esp # back to espfix stack
1262 CFI_ADJUST_CFA_OFFSET -24
1269 pushl $-1 # mark this as an int
1270 CFI_ADJUST_CFA_OFFSET 4
1273 xorl %edx,%edx # zero error code
1274 movl %esp,%eax # pt_regs pointer
1276 jmp ret_from_exception
1280 ENTRY(general_protection)
1282 pushl $do_general_protection
1283 CFI_ADJUST_CFA_OFFSET 4
1286 END(general_protection)
1289 * End of kprobes section