[PATCH] i386: never block forced SIGSEGV
[usb.git] / include / asm-ia64 / ptrace.h
blob0bef19538406f2b4e89e07d18809c3e985592eb0
1 #ifndef _ASM_IA64_PTRACE_H
2 #define _ASM_IA64_PTRACE_H
4 /*
5 * Copyright (C) 1998-2004 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 * Stephane Eranian <eranian@hpl.hp.com>
8 * Copyright (C) 2003 Intel Co
9 * Suresh Siddha <suresh.b.siddha@intel.com>
10 * Fenghua Yu <fenghua.yu@intel.com>
11 * Arun Sharma <arun.sharma@intel.com>
13 * 12/07/98 S. Eranian added pt_regs & switch_stack
14 * 12/21/98 D. Mosberger updated to match latest code
15 * 6/17/99 D. Mosberger added second unat member to "struct switch_stack"
19 * When a user process is blocked, its state looks as follows:
21 * +----------------------+ ------- IA64_STK_OFFSET
22 * | | ^
23 * | struct pt_regs | |
24 * | | |
25 * +----------------------+ |
26 * | | |
27 * | memory stack | |
28 * | (growing downwards) | |
29 * //.....................// |
30 * |
31 * //.....................// |
32 * | | |
33 * +----------------------+ |
34 * | struct switch_stack | |
35 * | | |
36 * +----------------------+ |
37 * | | |
38 * //.....................// |
39 * |
40 * //.....................// |
41 * | | |
42 * | register stack | |
43 * | (growing upwards) | |
44 * | | |
45 * +----------------------+ | --- IA64_RBS_OFFSET
46 * | struct thread_info | | ^
47 * +----------------------+ | |
48 * | | | |
49 * | struct task_struct | | |
50 * current -> | | | |
51 * +----------------------+ -------
53 * Note that ar.ec is not saved explicitly in pt_reg or switch_stack.
54 * This is because ar.ec is saved as part of ar.pfs.
57 #include <linux/config.h>
59 #include <asm/fpu.h>
60 #include <asm/offsets.h>
63 * Base-2 logarithm of number of pages to allocate per task structure
64 * (including register backing store and memory stack):
66 #if defined(CONFIG_IA64_PAGE_SIZE_4KB)
67 # define KERNEL_STACK_SIZE_ORDER 3
68 #elif defined(CONFIG_IA64_PAGE_SIZE_8KB)
69 # define KERNEL_STACK_SIZE_ORDER 2
70 #elif defined(CONFIG_IA64_PAGE_SIZE_16KB)
71 # define KERNEL_STACK_SIZE_ORDER 1
72 #else
73 # define KERNEL_STACK_SIZE_ORDER 0
74 #endif
76 #define IA64_RBS_OFFSET ((IA64_TASK_SIZE + IA64_THREAD_INFO_SIZE + 15) & ~15)
77 #define IA64_STK_OFFSET ((1 << KERNEL_STACK_SIZE_ORDER)*PAGE_SIZE)
79 #define KERNEL_STACK_SIZE IA64_STK_OFFSET
81 #ifndef __ASSEMBLY__
83 #include <asm/current.h>
84 #include <asm/page.h>
87 * This struct defines the way the registers are saved on system
88 * calls.
90 * We don't save all floating point register because the kernel
91 * is compiled to use only a very small subset, so the other are
92 * untouched.
94 * THIS STRUCTURE MUST BE A MULTIPLE 16-BYTE IN SIZE
95 * (because the memory stack pointer MUST ALWAYS be aligned this way)
98 struct pt_regs {
99 /* The following registers are saved by SAVE_MIN: */
100 unsigned long b6; /* scratch */
101 unsigned long b7; /* scratch */
103 unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */
104 unsigned long ar_ssd; /* reserved for future use (scratch) */
106 unsigned long r8; /* scratch (return value register 0) */
107 unsigned long r9; /* scratch (return value register 1) */
108 unsigned long r10; /* scratch (return value register 2) */
109 unsigned long r11; /* scratch (return value register 3) */
111 unsigned long cr_ipsr; /* interrupted task's psr */
112 unsigned long cr_iip; /* interrupted task's instruction pointer */
114 * interrupted task's function state; if bit 63 is cleared, it
115 * contains syscall's ar.pfs.pfm:
117 unsigned long cr_ifs;
119 unsigned long ar_unat; /* interrupted task's NaT register (preserved) */
120 unsigned long ar_pfs; /* prev function state */
121 unsigned long ar_rsc; /* RSE configuration */
122 /* The following two are valid only if cr_ipsr.cpl > 0: */
123 unsigned long ar_rnat; /* RSE NaT */
124 unsigned long ar_bspstore; /* RSE bspstore */
126 unsigned long pr; /* 64 predicate registers (1 bit each) */
127 unsigned long b0; /* return pointer (bp) */
128 unsigned long loadrs; /* size of dirty partition << 16 */
130 unsigned long r1; /* the gp pointer */
131 unsigned long r12; /* interrupted task's memory stack pointer */
132 unsigned long r13; /* thread pointer */
134 unsigned long ar_fpsr; /* floating point status (preserved) */
135 unsigned long r15; /* scratch */
137 /* The remaining registers are NOT saved for system calls. */
139 unsigned long r14; /* scratch */
140 unsigned long r2; /* scratch */
141 unsigned long r3; /* scratch */
143 /* The following registers are saved by SAVE_REST: */
144 unsigned long r16; /* scratch */
145 unsigned long r17; /* scratch */
146 unsigned long r18; /* scratch */
147 unsigned long r19; /* scratch */
148 unsigned long r20; /* scratch */
149 unsigned long r21; /* scratch */
150 unsigned long r22; /* scratch */
151 unsigned long r23; /* scratch */
152 unsigned long r24; /* scratch */
153 unsigned long r25; /* scratch */
154 unsigned long r26; /* scratch */
155 unsigned long r27; /* scratch */
156 unsigned long r28; /* scratch */
157 unsigned long r29; /* scratch */
158 unsigned long r30; /* scratch */
159 unsigned long r31; /* scratch */
161 unsigned long ar_ccv; /* compare/exchange value (scratch) */
164 * Floating point registers that the kernel considers scratch:
166 struct ia64_fpreg f6; /* scratch */
167 struct ia64_fpreg f7; /* scratch */
168 struct ia64_fpreg f8; /* scratch */
169 struct ia64_fpreg f9; /* scratch */
170 struct ia64_fpreg f10; /* scratch */
171 struct ia64_fpreg f11; /* scratch */
175 * This structure contains the addition registers that need to
176 * preserved across a context switch. This generally consists of
177 * "preserved" registers.
179 struct switch_stack {
180 unsigned long caller_unat; /* user NaT collection register (preserved) */
181 unsigned long ar_fpsr; /* floating-point status register */
183 struct ia64_fpreg f2; /* preserved */
184 struct ia64_fpreg f3; /* preserved */
185 struct ia64_fpreg f4; /* preserved */
186 struct ia64_fpreg f5; /* preserved */
188 struct ia64_fpreg f12; /* scratch, but untouched by kernel */
189 struct ia64_fpreg f13; /* scratch, but untouched by kernel */
190 struct ia64_fpreg f14; /* scratch, but untouched by kernel */
191 struct ia64_fpreg f15; /* scratch, but untouched by kernel */
192 struct ia64_fpreg f16; /* preserved */
193 struct ia64_fpreg f17; /* preserved */
194 struct ia64_fpreg f18; /* preserved */
195 struct ia64_fpreg f19; /* preserved */
196 struct ia64_fpreg f20; /* preserved */
197 struct ia64_fpreg f21; /* preserved */
198 struct ia64_fpreg f22; /* preserved */
199 struct ia64_fpreg f23; /* preserved */
200 struct ia64_fpreg f24; /* preserved */
201 struct ia64_fpreg f25; /* preserved */
202 struct ia64_fpreg f26; /* preserved */
203 struct ia64_fpreg f27; /* preserved */
204 struct ia64_fpreg f28; /* preserved */
205 struct ia64_fpreg f29; /* preserved */
206 struct ia64_fpreg f30; /* preserved */
207 struct ia64_fpreg f31; /* preserved */
209 unsigned long r4; /* preserved */
210 unsigned long r5; /* preserved */
211 unsigned long r6; /* preserved */
212 unsigned long r7; /* preserved */
214 unsigned long b0; /* so we can force a direct return in copy_thread */
215 unsigned long b1;
216 unsigned long b2;
217 unsigned long b3;
218 unsigned long b4;
219 unsigned long b5;
221 unsigned long ar_pfs; /* previous function state */
222 unsigned long ar_lc; /* loop counter (preserved) */
223 unsigned long ar_unat; /* NaT bits for r4-r7 */
224 unsigned long ar_rnat; /* RSE NaT collection register */
225 unsigned long ar_bspstore; /* RSE dirty base (preserved) */
226 unsigned long pr; /* 64 predicate registers (1 bit each) */
229 #ifdef __KERNEL__
231 * We use the ia64_psr(regs)->ri to determine which of the three
232 * instructions in bundle (16 bytes) took the sample. Generate
233 * the canonical representation by adding to instruction pointer.
235 # define instruction_pointer(regs) ((regs)->cr_iip + ia64_psr(regs)->ri)
236 /* Conserve space in histogram by encoding slot bits in address
237 * bits 2 and 3 rather than bits 0 and 1.
239 #define profile_pc(regs) \
240 ({ \
241 unsigned long __ip = instruction_pointer(regs); \
242 (__ip & ~3UL) + ((__ip & 3UL) << 2); \
245 /* given a pointer to a task_struct, return the user's pt_regs */
246 # define ia64_task_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
247 # define ia64_psr(regs) ((struct ia64_psr *) &(regs)->cr_ipsr)
248 # define user_mode(regs) (((struct ia64_psr *) &(regs)->cr_ipsr)->cpl != 0)
249 # define user_stack(task,regs) ((long) regs - (long) task == IA64_STK_OFFSET - sizeof(*regs))
250 # define fsys_mode(task,regs) \
251 ({ \
252 struct task_struct *_task = (task); \
253 struct pt_regs *_regs = (regs); \
254 !user_mode(_regs) && user_stack(_task, _regs); \
258 * System call handlers that, upon successful completion, need to return a negative value
259 * should call force_successful_syscall_return() right before returning. On architectures
260 * where the syscall convention provides for a separate error flag (e.g., alpha, ia64,
261 * ppc{,64}, sparc{,64}, possibly others), this macro can be used to ensure that the error
262 * flag will not get set. On architectures which do not support a separate error flag,
263 * the macro is a no-op and the spurious error condition needs to be filtered out by some
264 * other means (e.g., in user-level, by passing an extra argument to the syscall handler,
265 * or something along those lines).
267 * On ia64, we can clear the user's pt_regs->r8 to force a successful syscall.
269 # define force_successful_syscall_return() (ia64_task_regs(current)->r8 = 0)
271 struct task_struct; /* forward decl */
272 struct unw_frame_info; /* forward decl */
274 extern void show_regs (struct pt_regs *);
275 extern void ia64_do_show_stack (struct unw_frame_info *, void *);
276 extern unsigned long ia64_get_user_rbs_end (struct task_struct *, struct pt_regs *,
277 unsigned long *);
278 extern long ia64_peek (struct task_struct *, struct switch_stack *, unsigned long,
279 unsigned long, long *);
280 extern long ia64_poke (struct task_struct *, struct switch_stack *, unsigned long,
281 unsigned long, long);
282 extern void ia64_flush_fph (struct task_struct *);
283 extern void ia64_sync_fph (struct task_struct *);
284 extern long ia64_sync_user_rbs (struct task_struct *, struct switch_stack *,
285 unsigned long, unsigned long);
287 /* get nat bits for scratch registers such that bit N==1 iff scratch register rN is a NaT */
288 extern unsigned long ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat);
289 /* put nat bits for scratch registers such that scratch register rN is a NaT iff bit N==1 */
290 extern unsigned long ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat);
292 extern void ia64_increment_ip (struct pt_regs *pt);
293 extern void ia64_decrement_ip (struct pt_regs *pt);
295 #endif /* !__KERNEL__ */
297 /* pt_all_user_regs is used for PTRACE_GETREGS PTRACE_SETREGS */
298 struct pt_all_user_regs {
299 unsigned long nat;
300 unsigned long cr_iip;
301 unsigned long cfm;
302 unsigned long cr_ipsr;
303 unsigned long pr;
305 unsigned long gr[32];
306 unsigned long br[8];
307 unsigned long ar[128];
308 struct ia64_fpreg fr[128];
311 #endif /* !__ASSEMBLY__ */
313 /* indices to application-registers array in pt_all_user_regs */
314 #define PT_AUR_RSC 16
315 #define PT_AUR_BSP 17
316 #define PT_AUR_BSPSTORE 18
317 #define PT_AUR_RNAT 19
318 #define PT_AUR_CCV 32
319 #define PT_AUR_UNAT 36
320 #define PT_AUR_FPSR 40
321 #define PT_AUR_PFS 64
322 #define PT_AUR_LC 65
323 #define PT_AUR_EC 66
326 * The numbers chosen here are somewhat arbitrary but absolutely MUST
327 * not overlap with any of the number assigned in <linux/ptrace.h>.
329 #define PTRACE_SINGLEBLOCK 12 /* resume execution until next branch */
330 #define PTRACE_OLD_GETSIGINFO 13 /* (replaced by PTRACE_GETSIGINFO in <linux/ptrace.h>) */
331 #define PTRACE_OLD_SETSIGINFO 14 /* (replaced by PTRACE_SETSIGINFO in <linux/ptrace.h>) */
332 #define PTRACE_GETREGS 18 /* get all registers (pt_all_user_regs) in one shot */
333 #define PTRACE_SETREGS 19 /* set all registers (pt_all_user_regs) in one shot */
335 #define PTRACE_OLDSETOPTIONS 21
337 #endif /* _ASM_IA64_PTRACE_H */