libertas: fix cmdpendingq locking
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / include / linux / ptrace.h
blobb0acccac00c95ab8b105cb34c2fede1c69152304
1 #ifndef _LINUX_PTRACE_H
2 #define _LINUX_PTRACE_H
3 /* ptrace.h */
4 /* structs and defines to help the user use the ptrace system call. */
6 /* has the defines to get at the registers. */
8 #define PTRACE_TRACEME 0
9 #define PTRACE_PEEKTEXT 1
10 #define PTRACE_PEEKDATA 2
11 #define PTRACE_PEEKUSR 3
12 #define PTRACE_POKETEXT 4
13 #define PTRACE_POKEDATA 5
14 #define PTRACE_POKEUSR 6
15 #define PTRACE_CONT 7
16 #define PTRACE_KILL 8
17 #define PTRACE_SINGLESTEP 9
19 #define PTRACE_ATTACH 16
20 #define PTRACE_DETACH 17
22 #define PTRACE_SYSCALL 24
24 /* 0x4200-0x4300 are reserved for architecture-independent additions. */
25 #define PTRACE_SETOPTIONS 0x4200
26 #define PTRACE_GETEVENTMSG 0x4201
27 #define PTRACE_GETSIGINFO 0x4202
28 #define PTRACE_SETSIGINFO 0x4203
30 /* options set using PTRACE_SETOPTIONS */
31 #define PTRACE_O_TRACESYSGOOD 0x00000001
32 #define PTRACE_O_TRACEFORK 0x00000002
33 #define PTRACE_O_TRACEVFORK 0x00000004
34 #define PTRACE_O_TRACECLONE 0x00000008
35 #define PTRACE_O_TRACEEXEC 0x00000010
36 #define PTRACE_O_TRACEVFORKDONE 0x00000020
37 #define PTRACE_O_TRACEEXIT 0x00000040
39 #define PTRACE_O_MASK 0x0000007f
41 /* Wait extended result codes for the above trace options. */
42 #define PTRACE_EVENT_FORK 1
43 #define PTRACE_EVENT_VFORK 2
44 #define PTRACE_EVENT_CLONE 3
45 #define PTRACE_EVENT_EXEC 4
46 #define PTRACE_EVENT_VFORK_DONE 5
47 #define PTRACE_EVENT_EXIT 6
49 #include <asm/ptrace.h>
51 #ifdef __KERNEL__
53 * Ptrace flags
55 * The owner ship rules for task->ptrace which holds the ptrace
56 * flags is simple. When a task is running it owns it's task->ptrace
57 * flags. When the a task is stopped the ptracer owns task->ptrace.
60 #define PT_PTRACED 0x00000001
61 #define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */
62 #define PT_TRACESYSGOOD 0x00000004
63 #define PT_PTRACE_CAP 0x00000008 /* ptracer can follow suid-exec */
64 #define PT_TRACE_FORK 0x00000010
65 #define PT_TRACE_VFORK 0x00000020
66 #define PT_TRACE_CLONE 0x00000040
67 #define PT_TRACE_EXEC 0x00000080
68 #define PT_TRACE_VFORK_DONE 0x00000100
69 #define PT_TRACE_EXIT 0x00000200
71 #define PT_TRACE_MASK 0x000003f4
73 /* single stepping state bits (used on ARM and PA-RISC) */
74 #define PT_SINGLESTEP_BIT 31
75 #define PT_SINGLESTEP (1<<PT_SINGLESTEP_BIT)
76 #define PT_BLOCKSTEP_BIT 30
77 #define PT_BLOCKSTEP (1<<PT_BLOCKSTEP_BIT)
79 #include <linux/compiler.h> /* For unlikely. */
80 #include <linux/sched.h> /* For struct task_struct. */
83 extern long arch_ptrace(struct task_struct *child, long request, long addr, long data);
84 extern int ptrace_traceme(void);
85 extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
86 extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
87 extern int ptrace_attach(struct task_struct *tsk);
88 extern int ptrace_detach(struct task_struct *, unsigned int);
89 extern void ptrace_disable(struct task_struct *);
90 extern int ptrace_check_attach(struct task_struct *task, int kill);
91 extern int ptrace_request(struct task_struct *child, long request, long addr, long data);
92 extern void ptrace_notify(int exit_code);
93 extern void __ptrace_link(struct task_struct *child,
94 struct task_struct *new_parent);
95 extern void __ptrace_unlink(struct task_struct *child);
96 extern void exit_ptrace(struct task_struct *tracer);
97 #define PTRACE_MODE_READ 1
98 #define PTRACE_MODE_ATTACH 2
99 /* Returns 0 on success, -errno on denial. */
100 extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
101 /* Returns true on success, false on denial. */
102 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
104 static inline int ptrace_reparented(struct task_struct *child)
106 return child->real_parent != child->parent;
109 static inline void ptrace_unlink(struct task_struct *child)
111 if (unlikely(child->ptrace))
112 __ptrace_unlink(child);
115 int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data);
116 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data);
119 * task_ptrace - return %PT_* flags that apply to a task
120 * @task: pointer to &task_struct in question
122 * Returns the %PT_* flags that apply to @task.
124 static inline int task_ptrace(struct task_struct *task)
126 return task->ptrace;
130 * ptrace_event - possibly stop for a ptrace event notification
131 * @mask: %PT_* bit to check in @current->ptrace
132 * @event: %PTRACE_EVENT_* value to report if @mask is set
133 * @message: value for %PTRACE_GETEVENTMSG to return
135 * This checks the @mask bit to see if ptrace wants stops for this event.
136 * If so we stop, reporting @event and @message to the ptrace parent.
138 * Returns nonzero if we did a ptrace notification, zero if not.
140 * Called without locks.
142 static inline int ptrace_event(int mask, int event, unsigned long message)
144 if (mask && likely(!(current->ptrace & mask)))
145 return 0;
146 current->ptrace_message = message;
147 ptrace_notify((event << 8) | SIGTRAP);
148 return 1;
152 * ptrace_init_task - initialize ptrace state for a new child
153 * @child: new child task
154 * @ptrace: true if child should be ptrace'd by parent's tracer
156 * This is called immediately after adding @child to its parent's children
157 * list. @ptrace is false in the normal case, and true to ptrace @child.
159 * Called with current's siglock and write_lock_irq(&tasklist_lock) held.
161 static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
163 INIT_LIST_HEAD(&child->ptrace_entry);
164 INIT_LIST_HEAD(&child->ptraced);
165 child->parent = child->real_parent;
166 child->ptrace = 0;
167 if (unlikely(ptrace) && (current->ptrace & PT_PTRACED)) {
168 child->ptrace = current->ptrace;
169 __ptrace_link(child, current->parent);
172 #ifdef CONFIG_HAVE_HW_BREAKPOINT
173 atomic_set(&child->ptrace_bp_refcnt, 1);
174 #endif
178 * ptrace_release_task - final ptrace-related cleanup of a zombie being reaped
179 * @task: task in %EXIT_DEAD state
181 * Called with write_lock(&tasklist_lock) held.
183 static inline void ptrace_release_task(struct task_struct *task)
185 BUG_ON(!list_empty(&task->ptraced));
186 ptrace_unlink(task);
187 BUG_ON(!list_empty(&task->ptrace_entry));
190 #ifndef force_successful_syscall_return
192 * System call handlers that, upon successful completion, need to return a
193 * negative value should call force_successful_syscall_return() right before
194 * returning. On architectures where the syscall convention provides for a
195 * separate error flag (e.g., alpha, ia64, ppc{,64}, sparc{,64}, possibly
196 * others), this macro can be used to ensure that the error flag will not get
197 * set. On architectures which do not support a separate error flag, the macro
198 * is a no-op and the spurious error condition needs to be filtered out by some
199 * other means (e.g., in user-level, by passing an extra argument to the
200 * syscall handler, or something along those lines).
202 #define force_successful_syscall_return() do { } while (0)
203 #endif
206 * <asm/ptrace.h> should define the following things inside #ifdef __KERNEL__.
208 * These do-nothing inlines are used when the arch does not
209 * implement single-step. The kerneldoc comments are here
210 * to document the interface for all arch definitions.
213 #ifndef arch_has_single_step
215 * arch_has_single_step - does this CPU support user-mode single-step?
217 * If this is defined, then there must be function declarations or
218 * inlines for user_enable_single_step() and user_disable_single_step().
219 * arch_has_single_step() should evaluate to nonzero iff the machine
220 * supports instruction single-step for user mode.
221 * It can be a constant or it can test a CPU feature bit.
223 #define arch_has_single_step() (0)
226 * user_enable_single_step - single-step in user-mode task
227 * @task: either current or a task stopped in %TASK_TRACED
229 * This can only be called when arch_has_single_step() has returned nonzero.
230 * Set @task so that when it returns to user mode, it will trap after the
231 * next single instruction executes. If arch_has_block_step() is defined,
232 * this must clear the effects of user_enable_block_step() too.
234 static inline void user_enable_single_step(struct task_struct *task)
236 BUG(); /* This can never be called. */
240 * user_disable_single_step - cancel user-mode single-step
241 * @task: either current or a task stopped in %TASK_TRACED
243 * Clear @task of the effects of user_enable_single_step() and
244 * user_enable_block_step(). This can be called whether or not either
245 * of those was ever called on @task, and even if arch_has_single_step()
246 * returned zero.
248 static inline void user_disable_single_step(struct task_struct *task)
251 #endif /* arch_has_single_step */
253 #ifndef arch_has_block_step
255 * arch_has_block_step - does this CPU support user-mode block-step?
257 * If this is defined, then there must be a function declaration or inline
258 * for user_enable_block_step(), and arch_has_single_step() must be defined
259 * too. arch_has_block_step() should evaluate to nonzero iff the machine
260 * supports step-until-branch for user mode. It can be a constant or it
261 * can test a CPU feature bit.
263 #define arch_has_block_step() (0)
266 * user_enable_block_step - step until branch in user-mode task
267 * @task: either current or a task stopped in %TASK_TRACED
269 * This can only be called when arch_has_block_step() has returned nonzero,
270 * and will never be called when single-instruction stepping is being used.
271 * Set @task so that when it returns to user mode, it will trap after the
272 * next branch or trap taken.
274 static inline void user_enable_block_step(struct task_struct *task)
276 BUG(); /* This can never be called. */
278 #endif /* arch_has_block_step */
280 #ifdef ARCH_HAS_USER_SINGLE_STEP_INFO
281 extern void user_single_step_siginfo(struct task_struct *tsk,
282 struct pt_regs *regs, siginfo_t *info);
283 #else
284 static inline void user_single_step_siginfo(struct task_struct *tsk,
285 struct pt_regs *regs, siginfo_t *info)
287 memset(info, 0, sizeof(*info));
288 info->si_signo = SIGTRAP;
290 #endif
292 #ifndef arch_ptrace_stop_needed
294 * arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called
295 * @code: current->exit_code value ptrace will stop with
296 * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with
298 * This is called with the siglock held, to decide whether or not it's
299 * necessary to release the siglock and call arch_ptrace_stop() with the
300 * same @code and @info arguments. It can be defined to a constant if
301 * arch_ptrace_stop() is never required, or always is. On machines where
302 * this makes sense, it should be defined to a quick test to optimize out
303 * calling arch_ptrace_stop() when it would be superfluous. For example,
304 * if the thread has not been back to user mode since the last stop, the
305 * thread state might indicate that nothing needs to be done.
307 #define arch_ptrace_stop_needed(code, info) (0)
308 #endif
310 #ifndef arch_ptrace_stop
312 * arch_ptrace_stop - Do machine-specific work before stopping for ptrace
313 * @code: current->exit_code value ptrace will stop with
314 * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with
316 * This is called with no locks held when arch_ptrace_stop_needed() has
317 * just returned nonzero. It is allowed to block, e.g. for user memory
318 * access. The arch can have machine-specific work to be done before
319 * ptrace stops. On ia64, register backing store gets written back to user
320 * memory here. Since this can be costly (requires dropping the siglock),
321 * we only do it when the arch requires it for this particular stop, as
322 * indicated by arch_ptrace_stop_needed().
324 #define arch_ptrace_stop(code, info) do { } while (0)
325 #endif
327 #ifndef arch_ptrace_untrace
329 * Do machine-specific work before untracing child.
331 * This is called for a normal detach as well as from ptrace_exit()
332 * when the tracing task dies.
334 * Called with write_lock(&tasklist_lock) held.
336 #define arch_ptrace_untrace(task) do { } while (0)
337 #endif
339 extern int task_current_syscall(struct task_struct *target, long *callno,
340 unsigned long args[6], unsigned int maxargs,
341 unsigned long *sp, unsigned long *pc);
343 #ifdef CONFIG_HAVE_HW_BREAKPOINT
344 extern int ptrace_get_breakpoints(struct task_struct *tsk);
345 extern void ptrace_put_breakpoints(struct task_struct *tsk);
346 #else
347 static inline void ptrace_put_breakpoints(struct task_struct *tsk) { }
348 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
350 #endif /* __KERNEL */
352 #endif