CRED: Wrap task credential accesses in the Ext4 filesystem
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / include / linux / tracehook.h
blob6186a789d6c7a2f6d5c3ce7a3651a1db184b67ed
1 /*
2 * Tracing hooks
4 * Copyright (C) 2008 Red Hat, Inc. All rights reserved.
6 * This copyrighted material is made available to anyone wishing to use,
7 * modify, copy, or redistribute it subject to the terms and conditions
8 * of the GNU General Public License v.2.
10 * This file defines hook entry points called by core code where
11 * user tracing/debugging support might need to do something. These
12 * entry points are called tracehook_*(). Each hook declared below
13 * has a detailed kerneldoc comment giving the context (locking et
14 * al) from which it is called, and the meaning of its return value.
16 * Each function here typically has only one call site, so it is ok
17 * to have some nontrivial tracehook_*() inlines. In all cases, the
18 * fast path when no tracing is enabled should be very short.
20 * The purpose of this file and the tracehook_* layer is to consolidate
21 * the interface that the kernel core and arch code uses to enable any
22 * user debugging or tracing facility (such as ptrace). The interfaces
23 * here are carefully documented so that maintainers of core and arch
24 * code do not need to think about the implementation details of the
25 * tracing facilities. Likewise, maintainers of the tracing code do not
26 * need to understand all the calling core or arch code in detail, just
27 * documented circumstances of each call, such as locking conditions.
29 * If the calling core code changes so that locking is different, then
30 * it is ok to change the interface documented here. The maintainer of
31 * core code changing should notify the maintainers of the tracing code
32 * that they need to work out the change.
34 * Some tracehook_*() inlines take arguments that the current tracing
35 * implementations might not necessarily use. These function signatures
36 * are chosen to pass in all the information that is on hand in the
37 * caller and might conceivably be relevant to a tracer, so that the
38 * core code won't have to be updated when tracing adds more features.
39 * If a call site changes so that some of those parameters are no longer
40 * already on hand without extra work, then the tracehook_* interface
41 * can change so there is no make-work burden on the core code. The
42 * maintainer of core code changing should notify the maintainers of the
43 * tracing code that they need to work out the change.
46 #ifndef _LINUX_TRACEHOOK_H
47 #define _LINUX_TRACEHOOK_H 1
49 #include <linux/sched.h>
50 #include <linux/ptrace.h>
51 #include <linux/security.h>
52 struct linux_binprm;
54 /**
55 * tracehook_expect_breakpoints - guess if task memory might be touched
56 * @task: current task, making a new mapping
58 * Return nonzero if @task is expected to want breakpoint insertion in
59 * its memory at some point. A zero return is no guarantee it won't
60 * be done, but this is a hint that it's known to be likely.
62 * May be called with @task->mm->mmap_sem held for writing.
64 static inline int tracehook_expect_breakpoints(struct task_struct *task)
66 return (task_ptrace(task) & PT_PTRACED) != 0;
70 * ptrace report for syscall entry and exit looks identical.
72 static inline void ptrace_report_syscall(struct pt_regs *regs)
74 int ptrace = task_ptrace(current);
76 if (!(ptrace & PT_PTRACED))
77 return;
79 ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
82 * this isn't the same as continuing with a signal, but it will do
83 * for normal use. strace only continues with a signal if the
84 * stopping signal is not SIGTRAP. -brl
86 if (current->exit_code) {
87 send_sig(current->exit_code, current, 1);
88 current->exit_code = 0;
92 /**
93 * tracehook_report_syscall_entry - task is about to attempt a system call
94 * @regs: user register state of current task
96 * This will be called if %TIF_SYSCALL_TRACE has been set, when the
97 * current task has just entered the kernel for a system call.
98 * Full user register state is available here. Changing the values
99 * in @regs can affect the system call number and arguments to be tried.
100 * It is safe to block here, preventing the system call from beginning.
102 * Returns zero normally, or nonzero if the calling arch code should abort
103 * the system call. That must prevent normal entry so no system call is
104 * made. If @task ever returns to user mode after this, its register state
105 * is unspecified, but should be something harmless like an %ENOSYS error
106 * return. It should preserve enough information so that syscall_rollback()
107 * can work (see asm-generic/syscall.h).
109 * Called without locks, just after entering kernel mode.
111 static inline __must_check int tracehook_report_syscall_entry(
112 struct pt_regs *regs)
114 ptrace_report_syscall(regs);
115 return 0;
119 * tracehook_report_syscall_exit - task has just finished a system call
120 * @regs: user register state of current task
121 * @step: nonzero if simulating single-step or block-step
123 * This will be called if %TIF_SYSCALL_TRACE has been set, when the
124 * current task has just finished an attempted system call. Full
125 * user register state is available here. It is safe to block here,
126 * preventing signals from being processed.
128 * If @step is nonzero, this report is also in lieu of the normal
129 * trap that would follow the system call instruction because
130 * user_enable_block_step() or user_enable_single_step() was used.
131 * In this case, %TIF_SYSCALL_TRACE might not be set.
133 * Called without locks, just before checking for pending signals.
135 static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step)
137 ptrace_report_syscall(regs);
141 * tracehook_unsafe_exec - check for exec declared unsafe due to tracing
142 * @task: current task doing exec
144 * Return %LSM_UNSAFE_* bits applied to an exec because of tracing.
146 * Called with task_lock() held on @task.
148 static inline int tracehook_unsafe_exec(struct task_struct *task)
150 int unsafe = 0;
151 int ptrace = task_ptrace(task);
152 if (ptrace & PT_PTRACED) {
153 if (ptrace & PT_PTRACE_CAP)
154 unsafe |= LSM_UNSAFE_PTRACE_CAP;
155 else
156 unsafe |= LSM_UNSAFE_PTRACE;
158 return unsafe;
162 * tracehook_tracer_task - return the task that is tracing the given task
163 * @tsk: task to consider
165 * Returns NULL if noone is tracing @task, or the &struct task_struct
166 * pointer to its tracer.
168 * Must called under rcu_read_lock(). The pointer returned might be kept
169 * live only by RCU. During exec, this may be called with task_lock()
170 * held on @task, still held from when tracehook_unsafe_exec() was called.
172 static inline struct task_struct *tracehook_tracer_task(struct task_struct *tsk)
174 if (task_ptrace(tsk) & PT_PTRACED)
175 return rcu_dereference(tsk->parent);
176 return NULL;
180 * tracehook_report_exec - a successful exec was completed
181 * @fmt: &struct linux_binfmt that performed the exec
182 * @bprm: &struct linux_binprm containing exec details
183 * @regs: user-mode register state
185 * An exec just completed, we are shortly going to return to user mode.
186 * The freshly initialized register state can be seen and changed in @regs.
187 * The name, file and other pointers in @bprm are still on hand to be
188 * inspected, but will be freed as soon as this returns.
190 * Called with no locks, but with some kernel resources held live
191 * and a reference on @fmt->module.
193 static inline void tracehook_report_exec(struct linux_binfmt *fmt,
194 struct linux_binprm *bprm,
195 struct pt_regs *regs)
197 if (!ptrace_event(PT_TRACE_EXEC, PTRACE_EVENT_EXEC, 0) &&
198 unlikely(task_ptrace(current) & PT_PTRACED))
199 send_sig(SIGTRAP, current, 0);
203 * tracehook_report_exit - task has begun to exit
204 * @exit_code: pointer to value destined for @current->exit_code
206 * @exit_code points to the value passed to do_exit(), which tracing
207 * might change here. This is almost the first thing in do_exit(),
208 * before freeing any resources or setting the %PF_EXITING flag.
210 * Called with no locks held.
212 static inline void tracehook_report_exit(long *exit_code)
214 ptrace_event(PT_TRACE_EXIT, PTRACE_EVENT_EXIT, *exit_code);
218 * tracehook_prepare_clone - prepare for new child to be cloned
219 * @clone_flags: %CLONE_* flags from clone/fork/vfork system call
221 * This is called before a new user task is to be cloned.
222 * Its return value will be passed to tracehook_finish_clone().
224 * Called with no locks held.
226 static inline int tracehook_prepare_clone(unsigned clone_flags)
228 if (clone_flags & CLONE_UNTRACED)
229 return 0;
231 if (clone_flags & CLONE_VFORK) {
232 if (current->ptrace & PT_TRACE_VFORK)
233 return PTRACE_EVENT_VFORK;
234 } else if ((clone_flags & CSIGNAL) != SIGCHLD) {
235 if (current->ptrace & PT_TRACE_CLONE)
236 return PTRACE_EVENT_CLONE;
237 } else if (current->ptrace & PT_TRACE_FORK)
238 return PTRACE_EVENT_FORK;
240 return 0;
244 * tracehook_finish_clone - new child created and being attached
245 * @child: new child task
246 * @clone_flags: %CLONE_* flags from clone/fork/vfork system call
247 * @trace: return value from tracehook_prepare_clone()
249 * This is called immediately after adding @child to its parent's children list.
250 * The @trace value is that returned by tracehook_prepare_clone().
252 * Called with current's siglock and write_lock_irq(&tasklist_lock) held.
254 static inline void tracehook_finish_clone(struct task_struct *child,
255 unsigned long clone_flags, int trace)
257 ptrace_init_task(child, (clone_flags & CLONE_PTRACE) || trace);
261 * tracehook_report_clone - in parent, new child is about to start running
262 * @trace: return value from tracehook_prepare_clone()
263 * @regs: parent's user register state
264 * @clone_flags: flags from parent's system call
265 * @pid: new child's PID in the parent's namespace
266 * @child: new child task
268 * Called after a child is set up, but before it has been started
269 * running. @trace is the value returned by tracehook_prepare_clone().
270 * This is not a good place to block, because the child has not started
271 * yet. Suspend the child here if desired, and then block in
272 * tracehook_report_clone_complete(). This must prevent the child from
273 * self-reaping if tracehook_report_clone_complete() uses the @child
274 * pointer; otherwise it might have died and been released by the time
275 * tracehook_report_clone_complete() is called.
277 * Called with no locks held, but the child cannot run until this returns.
279 static inline void tracehook_report_clone(int trace, struct pt_regs *regs,
280 unsigned long clone_flags,
281 pid_t pid, struct task_struct *child)
283 if (unlikely(trace) || unlikely(clone_flags & CLONE_PTRACE)) {
285 * The child starts up with an immediate SIGSTOP.
287 sigaddset(&child->pending.signal, SIGSTOP);
288 set_tsk_thread_flag(child, TIF_SIGPENDING);
293 * tracehook_report_clone_complete - new child is running
294 * @trace: return value from tracehook_prepare_clone()
295 * @regs: parent's user register state
296 * @clone_flags: flags from parent's system call
297 * @pid: new child's PID in the parent's namespace
298 * @child: child task, already running
300 * This is called just after the child has started running. This is
301 * just before the clone/fork syscall returns, or blocks for vfork
302 * child completion if @clone_flags has the %CLONE_VFORK bit set.
303 * The @child pointer may be invalid if a self-reaping child died and
304 * tracehook_report_clone() took no action to prevent it from self-reaping.
306 * Called with no locks held.
308 static inline void tracehook_report_clone_complete(int trace,
309 struct pt_regs *regs,
310 unsigned long clone_flags,
311 pid_t pid,
312 struct task_struct *child)
314 if (unlikely(trace))
315 ptrace_event(0, trace, pid);
319 * tracehook_report_vfork_done - vfork parent's child has exited or exec'd
320 * @child: child task, already running
321 * @pid: new child's PID in the parent's namespace
323 * Called after a %CLONE_VFORK parent has waited for the child to complete.
324 * The clone/vfork system call will return immediately after this.
325 * The @child pointer may be invalid if a self-reaping child died and
326 * tracehook_report_clone() took no action to prevent it from self-reaping.
328 * Called with no locks held.
330 static inline void tracehook_report_vfork_done(struct task_struct *child,
331 pid_t pid)
333 ptrace_event(PT_TRACE_VFORK_DONE, PTRACE_EVENT_VFORK_DONE, pid);
337 * tracehook_prepare_release_task - task is being reaped, clean up tracing
338 * @task: task in %EXIT_DEAD state
340 * This is called in release_task() just before @task gets finally reaped
341 * and freed. This would be the ideal place to remove and clean up any
342 * tracing-related state for @task.
344 * Called with no locks held.
346 static inline void tracehook_prepare_release_task(struct task_struct *task)
351 * tracehook_finish_release_task - final tracing clean-up
352 * @task: task in %EXIT_DEAD state
354 * This is called in release_task() when @task is being in the middle of
355 * being reaped. After this, there must be no tracing entanglements.
357 * Called with write_lock_irq(&tasklist_lock) held.
359 static inline void tracehook_finish_release_task(struct task_struct *task)
361 ptrace_release_task(task);
365 * tracehook_signal_handler - signal handler setup is complete
366 * @sig: number of signal being delivered
367 * @info: siginfo_t of signal being delivered
368 * @ka: sigaction setting that chose the handler
369 * @regs: user register state
370 * @stepping: nonzero if debugger single-step or block-step in use
372 * Called by the arch code after a signal handler has been set up.
373 * Register and stack state reflects the user handler about to run.
374 * Signal mask changes have already been made.
376 * Called without locks, shortly before returning to user mode
377 * (or handling more signals).
379 static inline void tracehook_signal_handler(int sig, siginfo_t *info,
380 const struct k_sigaction *ka,
381 struct pt_regs *regs, int stepping)
383 if (stepping)
384 ptrace_notify(SIGTRAP);
388 * tracehook_consider_ignored_signal - suppress short-circuit of ignored signal
389 * @task: task receiving the signal
390 * @sig: signal number being sent
391 * @handler: %SIG_IGN or %SIG_DFL
393 * Return zero iff tracing doesn't care to examine this ignored signal,
394 * so it can short-circuit normal delivery and never even get queued.
395 * Either @handler is %SIG_DFL and @sig's default is ignore, or it's %SIG_IGN.
397 * Called with @task->sighand->siglock held.
399 static inline int tracehook_consider_ignored_signal(struct task_struct *task,
400 int sig,
401 void __user *handler)
403 return (task_ptrace(task) & PT_PTRACED) != 0;
407 * tracehook_consider_fatal_signal - suppress special handling of fatal signal
408 * @task: task receiving the signal
409 * @sig: signal number being sent
410 * @handler: %SIG_DFL or %SIG_IGN
412 * Return nonzero to prevent special handling of this termination signal.
413 * Normally @handler is %SIG_DFL. It can be %SIG_IGN if @sig is ignored,
414 * in which case force_sig() is about to reset it to %SIG_DFL.
415 * When this returns zero, this signal might cause a quick termination
416 * that does not give the debugger a chance to intercept the signal.
418 * Called with or without @task->sighand->siglock held.
420 static inline int tracehook_consider_fatal_signal(struct task_struct *task,
421 int sig,
422 void __user *handler)
424 return (task_ptrace(task) & PT_PTRACED) != 0;
428 * tracehook_force_sigpending - let tracing force signal_pending(current) on
430 * Called when recomputing our signal_pending() flag. Return nonzero
431 * to force the signal_pending() flag on, so that tracehook_get_signal()
432 * will be called before the next return to user mode.
434 * Called with @current->sighand->siglock held.
436 static inline int tracehook_force_sigpending(void)
438 return 0;
442 * tracehook_get_signal - deliver synthetic signal to traced task
443 * @task: @current
444 * @regs: task_pt_regs(@current)
445 * @info: details of synthetic signal
446 * @return_ka: sigaction for synthetic signal
448 * Return zero to check for a real pending signal normally.
449 * Return -1 after releasing the siglock to repeat the check.
450 * Return a signal number to induce an artifical signal delivery,
451 * setting *@info and *@return_ka to specify its details and behavior.
453 * The @return_ka->sa_handler value controls the disposition of the
454 * signal, no matter the signal number. For %SIG_DFL, the return value
455 * is a representative signal to indicate the behavior (e.g. %SIGTERM
456 * for death, %SIGQUIT for core dump, %SIGSTOP for job control stop,
457 * %SIGTSTP for stop unless in an orphaned pgrp), but the signal number
458 * reported will be @info->si_signo instead.
460 * Called with @task->sighand->siglock held, before dequeuing pending signals.
462 static inline int tracehook_get_signal(struct task_struct *task,
463 struct pt_regs *regs,
464 siginfo_t *info,
465 struct k_sigaction *return_ka)
467 return 0;
471 * tracehook_notify_jctl - report about job control stop/continue
472 * @notify: nonzero if this is the last thread in the group to stop
473 * @why: %CLD_STOPPED or %CLD_CONTINUED
475 * This is called when we might call do_notify_parent_cldstop().
476 * It's called when about to stop for job control; we are already in
477 * %TASK_STOPPED state, about to call schedule(). It's also called when
478 * a delayed %CLD_STOPPED or %CLD_CONTINUED report is ready to be made.
480 * Return nonzero to generate a %SIGCHLD with @why, which is
481 * normal if @notify is nonzero.
483 * Called with no locks held.
485 static inline int tracehook_notify_jctl(int notify, int why)
487 return notify || (current->ptrace & PT_PTRACED);
490 #define DEATH_REAP -1
491 #define DEATH_DELAYED_GROUP_LEADER -2
494 * tracehook_notify_death - task is dead, ready to notify parent
495 * @task: @current task now exiting
496 * @death_cookie: value to pass to tracehook_report_death()
497 * @group_dead: nonzero if this was the last thread in the group to die
499 * A return value >= 0 means call do_notify_parent() with that signal
500 * number. Negative return value can be %DEATH_REAP to self-reap right
501 * now, or %DEATH_DELAYED_GROUP_LEADER to a zombie without notifying our
502 * parent. Note that a return value of 0 means a do_notify_parent() call
503 * that sends no signal, but still wakes up a parent blocked in wait*().
505 * Called with write_lock_irq(&tasklist_lock) held.
507 static inline int tracehook_notify_death(struct task_struct *task,
508 void **death_cookie, int group_dead)
510 if (task->exit_signal == -1)
511 return task->ptrace ? SIGCHLD : DEATH_REAP;
514 * If something other than our normal parent is ptracing us, then
515 * send it a SIGCHLD instead of honoring exit_signal. exit_signal
516 * only has special meaning to our real parent.
518 if (thread_group_empty(task) && !ptrace_reparented(task))
519 return task->exit_signal;
521 return task->ptrace ? SIGCHLD : DEATH_DELAYED_GROUP_LEADER;
525 * tracehook_report_death - task is dead and ready to be reaped
526 * @task: @current task now exiting
527 * @signal: return value from tracheook_notify_death()
528 * @death_cookie: value passed back from tracehook_notify_death()
529 * @group_dead: nonzero if this was the last thread in the group to die
531 * Thread has just become a zombie or is about to self-reap. If positive,
532 * @signal is the signal number just sent to the parent (usually %SIGCHLD).
533 * If @signal is %DEATH_REAP, this thread will self-reap. If @signal is
534 * %DEATH_DELAYED_GROUP_LEADER, this is a delayed_group_leader() zombie.
535 * The @death_cookie was passed back by tracehook_notify_death().
537 * If normal reaping is not inhibited, @task->exit_state might be changing
538 * in parallel.
540 * Called without locks.
542 static inline void tracehook_report_death(struct task_struct *task,
543 int signal, void *death_cookie,
544 int group_dead)
548 #ifdef TIF_NOTIFY_RESUME
550 * set_notify_resume - cause tracehook_notify_resume() to be called
551 * @task: task that will call tracehook_notify_resume()
553 * Calling this arranges that @task will call tracehook_notify_resume()
554 * before returning to user mode. If it's already running in user mode,
555 * it will enter the kernel and call tracehook_notify_resume() soon.
556 * If it's blocked, it will not be woken.
558 static inline void set_notify_resume(struct task_struct *task)
560 if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_RESUME))
561 kick_process(task);
565 * tracehook_notify_resume - report when about to return to user mode
566 * @regs: user-mode registers of @current task
568 * This is called when %TIF_NOTIFY_RESUME has been set. Now we are
569 * about to return to user mode, and the user state in @regs can be
570 * inspected or adjusted. The caller in arch code has cleared
571 * %TIF_NOTIFY_RESUME before the call. If the flag gets set again
572 * asynchronously, this will be called again before we return to
573 * user mode.
575 * Called without locks.
577 static inline void tracehook_notify_resume(struct pt_regs *regs)
580 #endif /* TIF_NOTIFY_RESUME */
582 #endif /* <linux/tracehook.h> */