making the kernel build with up to date compilers again, see https://bbs.archlinux...
[nao-ulib.git] / kernel / 2.6.29.6-aldebaran-rt / arch / x86 / include / asm / ptrace.h
blobe304b66abeea6eeb819f9c682124b67ff27e05fb
1 #ifndef _ASM_X86_PTRACE_H
2 #define _ASM_X86_PTRACE_H
4 #include <linux/compiler.h> /* For __user */
5 #include <asm/ptrace-abi.h>
6 #include <asm/processor-flags.h>
8 #ifdef __KERNEL__
9 #include <asm/segment.h>
10 #endif
12 #ifndef __ASSEMBLY__
14 #ifdef __i386__
15 /* this struct defines the way the registers are stored on the
16 stack during a system call. */
18 #ifndef __KERNEL__
20 struct pt_regs {
21 long ebx;
22 long ecx;
23 long edx;
24 long esi;
25 long edi;
26 long ebp;
27 long eax;
28 int xds;
29 int xes;
30 int xfs;
31 int xgs;
32 long orig_eax;
33 long eip;
34 int xcs;
35 long eflags;
36 long esp;
37 int xss;
40 #else /* __KERNEL__ */
42 struct pt_regs {
43 unsigned long bx;
44 unsigned long cx;
45 unsigned long dx;
46 unsigned long si;
47 unsigned long di;
48 unsigned long bp;
49 unsigned long ax;
50 unsigned long ds;
51 unsigned long es;
52 unsigned long fs;
53 unsigned long gs;
54 unsigned long orig_ax;
55 unsigned long ip;
56 unsigned long cs;
57 unsigned long flags;
58 unsigned long sp;
59 unsigned long ss;
62 #endif /* __KERNEL__ */
64 #else /* __i386__ */
66 #ifndef __KERNEL__
68 struct pt_regs {
69 unsigned long r15;
70 unsigned long r14;
71 unsigned long r13;
72 unsigned long r12;
73 unsigned long rbp;
74 unsigned long rbx;
75 /* arguments: non interrupts/non tracing syscalls only save upto here*/
76 unsigned long r11;
77 unsigned long r10;
78 unsigned long r9;
79 unsigned long r8;
80 unsigned long rax;
81 unsigned long rcx;
82 unsigned long rdx;
83 unsigned long rsi;
84 unsigned long rdi;
85 unsigned long orig_rax;
86 /* end of arguments */
87 /* cpu exception frame or undefined */
88 unsigned long rip;
89 unsigned long cs;
90 unsigned long eflags;
91 unsigned long rsp;
92 unsigned long ss;
93 /* top of stack page */
96 #else /* __KERNEL__ */
98 struct pt_regs {
99 unsigned long r15;
100 unsigned long r14;
101 unsigned long r13;
102 unsigned long r12;
103 unsigned long bp;
104 unsigned long bx;
105 /* arguments: non interrupts/non tracing syscalls only save upto here*/
106 unsigned long r11;
107 unsigned long r10;
108 unsigned long r9;
109 unsigned long r8;
110 unsigned long ax;
111 unsigned long cx;
112 unsigned long dx;
113 unsigned long si;
114 unsigned long di;
115 unsigned long orig_ax;
116 /* end of arguments */
117 /* cpu exception frame or undefined */
118 unsigned long ip;
119 unsigned long cs;
120 unsigned long flags;
121 unsigned long sp;
122 unsigned long ss;
123 /* top of stack page */
126 #endif /* __KERNEL__ */
127 #endif /* !__i386__ */
130 #ifdef __KERNEL__
132 #include <linux/init.h>
134 struct cpuinfo_x86;
135 struct task_struct;
137 extern unsigned long profile_pc(struct pt_regs *regs);
139 extern unsigned long
140 convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs);
141 extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
142 int error_code, int si_code);
143 void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
145 extern long syscall_trace_enter(struct pt_regs *);
146 extern void syscall_trace_leave(struct pt_regs *);
148 static inline unsigned long regs_return_value(struct pt_regs *regs)
150 return regs->ax;
154 * user_mode_vm(regs) determines whether a register set came from user mode.
155 * This is true if V8086 mode was enabled OR if the register set was from
156 * protected mode with RPL-3 CS value. This tricky test checks that with
157 * one comparison. Many places in the kernel can bypass this full check
158 * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
160 static inline int user_mode(struct pt_regs *regs)
162 #ifdef CONFIG_X86_32
163 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
164 #else
165 return !!(regs->cs & 3);
166 #endif
169 static inline int user_mode_vm(struct pt_regs *regs)
171 #ifdef CONFIG_X86_32
172 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
173 USER_RPL;
174 #else
175 return user_mode(regs);
176 #endif
179 static inline int v8086_mode(struct pt_regs *regs)
181 #ifdef CONFIG_X86_32
182 return (regs->flags & X86_VM_MASK);
183 #else
184 return 0; /* No V86 mode support in long mode */
185 #endif
189 * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
190 * when it traps. So regs will be the current sp.
192 * This is valid only for kernel mode traps.
194 static inline unsigned long kernel_trap_sp(struct pt_regs *regs)
196 #ifdef CONFIG_X86_32
197 return (unsigned long)regs;
198 #else
199 return regs->sp;
200 #endif
203 static inline unsigned long instruction_pointer(struct pt_regs *regs)
205 return regs->ip;
208 static inline unsigned long frame_pointer(struct pt_regs *regs)
210 return regs->bp;
213 static inline unsigned long user_stack_pointer(struct pt_regs *regs)
215 return regs->sp;
219 * These are defined as per linux/ptrace.h, which see.
221 #define arch_has_single_step() (1)
222 extern void user_enable_single_step(struct task_struct *);
223 extern void user_disable_single_step(struct task_struct *);
225 extern void user_enable_block_step(struct task_struct *);
226 #ifdef CONFIG_X86_DEBUGCTLMSR
227 #define arch_has_block_step() (1)
228 #else
229 #define arch_has_block_step() (boot_cpu_data.x86 >= 6)
230 #endif
232 struct user_desc;
233 extern int do_get_thread_area(struct task_struct *p, int idx,
234 struct user_desc __user *info);
235 extern int do_set_thread_area(struct task_struct *p, int idx,
236 struct user_desc __user *info, int can_allocate);
238 extern void x86_ptrace_untrace(struct task_struct *);
239 extern void x86_ptrace_fork(struct task_struct *child,
240 unsigned long clone_flags);
242 #define arch_ptrace_untrace(tsk) x86_ptrace_untrace(tsk)
243 #define arch_ptrace_fork(child, flags) x86_ptrace_fork(child, flags)
245 #endif /* __KERNEL__ */
247 #endif /* !__ASSEMBLY__ */
249 #endif /* _ASM_X86_PTRACE_H */