ftrace: add quick function trace stop
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / include / linux / ftrace.h
blob794ab907dbfea5520adc19fdf177d8178edbf4c8
1 #ifndef _LINUX_FTRACE_H
2 #define _LINUX_FTRACE_H
4 #include <linux/linkage.h>
5 #include <linux/fs.h>
6 #include <linux/ktime.h>
7 #include <linux/init.h>
8 #include <linux/types.h>
9 #include <linux/kallsyms.h>
11 #ifdef CONFIG_FUNCTION_TRACER
13 extern int ftrace_enabled;
14 extern int
15 ftrace_enable_sysctl(struct ctl_table *table, int write,
16 struct file *filp, void __user *buffer, size_t *lenp,
17 loff_t *ppos);
19 typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip);
21 struct ftrace_ops {
22 ftrace_func_t func;
23 struct ftrace_ops *next;
26 extern int function_trace_stop;
28 /**
29 * ftrace_stop - stop function tracer.
31 * A quick way to stop the function tracer. Note this an on off switch,
32 * it is not something that is recursive like preempt_disable.
33 * This does not disable the calling of mcount, it only stops the
34 * calling of functions from mcount.
36 static inline void ftrace_stop(void)
38 function_trace_stop = 1;
41 /**
42 * ftrace_start - start the function tracer.
44 * This function is the inverse of ftrace_stop. This does not enable
45 * the function tracing if the function tracer is disabled. This only
46 * sets the function tracer flag to continue calling the functions
47 * from mcount.
49 static inline void ftrace_start(void)
51 function_trace_stop = 0;
55 * The ftrace_ops must be a static and should also
56 * be read_mostly. These functions do modify read_mostly variables
57 * so use them sparely. Never free an ftrace_op or modify the
58 * next pointer after it has been registered. Even after unregistering
59 * it, the next pointer may still be used internally.
61 int register_ftrace_function(struct ftrace_ops *ops);
62 int unregister_ftrace_function(struct ftrace_ops *ops);
63 void clear_ftrace_function(void);
65 extern void ftrace_stub(unsigned long a0, unsigned long a1);
67 #else /* !CONFIG_FUNCTION_TRACER */
68 # define register_ftrace_function(ops) do { } while (0)
69 # define unregister_ftrace_function(ops) do { } while (0)
70 # define clear_ftrace_function(ops) do { } while (0)
71 static inline void ftrace_kill(void) { }
72 static inline void ftrace_stop(void) { }
73 static inline void ftrace_start(void) { }
74 #endif /* CONFIG_FUNCTION_TRACER */
76 #ifdef CONFIG_DYNAMIC_FTRACE
78 enum {
79 FTRACE_FL_FREE = (1 << 0),
80 FTRACE_FL_FAILED = (1 << 1),
81 FTRACE_FL_FILTER = (1 << 2),
82 FTRACE_FL_ENABLED = (1 << 3),
83 FTRACE_FL_NOTRACE = (1 << 4),
84 FTRACE_FL_CONVERTED = (1 << 5),
85 FTRACE_FL_FROZEN = (1 << 6),
88 struct dyn_ftrace {
89 struct list_head list;
90 unsigned long ip; /* address of mcount call-site */
91 unsigned long flags;
94 int ftrace_force_update(void);
95 void ftrace_set_filter(unsigned char *buf, int len, int reset);
97 /* defined in arch */
98 extern int ftrace_ip_converted(unsigned long ip);
99 extern unsigned char *ftrace_nop_replace(void);
100 extern unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr);
101 extern int ftrace_dyn_arch_init(void *data);
102 extern int ftrace_update_ftrace_func(ftrace_func_t func);
103 extern void ftrace_caller(void);
104 extern void ftrace_call(void);
105 extern void mcount_call(void);
107 /* May be defined in arch */
108 extern int ftrace_arch_read_dyn_info(char *buf, int size);
111 * ftrace_modify_code - modify code segment
112 * @ip: the address of the code segment
113 * @old_code: the contents of what is expected to be there
114 * @new_code: the code to patch in
116 * This is a very sensitive operation and great care needs
117 * to be taken by the arch. The operation should carefully
118 * read the location, check to see if what is read is indeed
119 * what we expect it to be, and then on success of the compare,
120 * it should write to the location.
122 * Return must be:
123 * 0 on success
124 * -EFAULT on error reading the location
125 * -EINVAL on a failed compare of the contents
126 * -EPERM on error writing to the location
127 * Any other value will be considered a failure.
129 extern int ftrace_modify_code(unsigned long ip, unsigned char *old_code,
130 unsigned char *new_code);
132 extern int skip_trace(unsigned long ip);
134 extern void ftrace_release(void *start, unsigned long size);
136 extern void ftrace_disable_daemon(void);
137 extern void ftrace_enable_daemon(void);
139 #else
140 # define skip_trace(ip) ({ 0; })
141 # define ftrace_force_update() ({ 0; })
142 # define ftrace_set_filter(buf, len, reset) do { } while (0)
143 # define ftrace_disable_daemon() do { } while (0)
144 # define ftrace_enable_daemon() do { } while (0)
145 static inline void ftrace_release(void *start, unsigned long size) { }
146 #endif /* CONFIG_DYNAMIC_FTRACE */
148 /* totally disable ftrace - can not re-enable after this */
149 void ftrace_kill(void);
151 static inline void tracer_disable(void)
153 #ifdef CONFIG_FUNCTION_TRACER
154 ftrace_enabled = 0;
155 #endif
159 * Ftrace disable/restore without lock. Some synchronization mechanism
160 * must be used to prevent ftrace_enabled to be changed between
161 * disable/restore.
163 static inline int __ftrace_enabled_save(void)
165 #ifdef CONFIG_FUNCTION_TRACER
166 int saved_ftrace_enabled = ftrace_enabled;
167 ftrace_enabled = 0;
168 return saved_ftrace_enabled;
169 #else
170 return 0;
171 #endif
174 static inline void __ftrace_enabled_restore(int enabled)
176 #ifdef CONFIG_FUNCTION_TRACER
177 ftrace_enabled = enabled;
178 #endif
181 #ifdef CONFIG_FRAME_POINTER
182 /* TODO: need to fix this for ARM */
183 # define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
184 # define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1))
185 # define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2))
186 # define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3))
187 # define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4))
188 # define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5))
189 # define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6))
190 #else
191 # define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
192 # define CALLER_ADDR1 0UL
193 # define CALLER_ADDR2 0UL
194 # define CALLER_ADDR3 0UL
195 # define CALLER_ADDR4 0UL
196 # define CALLER_ADDR5 0UL
197 # define CALLER_ADDR6 0UL
198 #endif
200 #ifdef CONFIG_IRQSOFF_TRACER
201 extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
202 extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
203 #else
204 # define time_hardirqs_on(a0, a1) do { } while (0)
205 # define time_hardirqs_off(a0, a1) do { } while (0)
206 #endif
208 #ifdef CONFIG_PREEMPT_TRACER
209 extern void trace_preempt_on(unsigned long a0, unsigned long a1);
210 extern void trace_preempt_off(unsigned long a0, unsigned long a1);
211 #else
212 # define trace_preempt_on(a0, a1) do { } while (0)
213 # define trace_preempt_off(a0, a1) do { } while (0)
214 #endif
216 #ifdef CONFIG_TRACING
217 extern int ftrace_dump_on_oops;
219 extern void
220 ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
223 * ftrace_printk - printf formatting in the ftrace buffer
224 * @fmt: the printf format for printing
226 * Note: __ftrace_printk is an internal function for ftrace_printk and
227 * the @ip is passed in via the ftrace_printk macro.
229 * This function allows a kernel developer to debug fast path sections
230 * that printk is not appropriate for. By scattering in various
231 * printk like tracing in the code, a developer can quickly see
232 * where problems are occurring.
234 * This is intended as a debugging tool for the developer only.
235 * Please refrain from leaving ftrace_printks scattered around in
236 * your code.
238 # define ftrace_printk(fmt...) __ftrace_printk(_THIS_IP_, fmt)
239 extern int
240 __ftrace_printk(unsigned long ip, const char *fmt, ...)
241 __attribute__ ((format (printf, 2, 3)));
242 extern void ftrace_dump(void);
243 #else
244 static inline void
245 ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
246 static inline int
247 ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 0)));
249 static inline int
250 ftrace_printk(const char *fmt, ...)
252 return 0;
254 static inline void ftrace_dump(void) { }
255 #endif
257 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
258 extern void ftrace_init(void);
259 extern void ftrace_init_module(unsigned long *start, unsigned long *end);
260 #else
261 static inline void ftrace_init(void) { }
262 static inline void
263 ftrace_init_module(unsigned long *start, unsigned long *end) { }
264 #endif
268 * Structure which defines the trace of an initcall.
269 * You don't have to fill the func field since it is
270 * only used internally by the tracer.
272 struct boot_trace {
273 pid_t caller;
274 char func[KSYM_NAME_LEN];
275 int result;
276 unsigned long long duration; /* usecs */
277 ktime_t calltime;
278 ktime_t rettime;
281 #ifdef CONFIG_BOOT_TRACER
282 /* Append the trace on the ring-buffer */
283 extern void trace_boot(struct boot_trace *it, initcall_t fn);
285 /* Tells the tracer that smp_pre_initcall is finished.
286 * So we can start the tracing
288 extern void start_boot_trace(void);
290 /* Resume the tracing of other necessary events
291 * such as sched switches
293 extern void enable_boot_trace(void);
295 /* Suspend this tracing. Actually, only sched_switches tracing have
296 * to be suspended. Initcalls doesn't need it.)
298 extern void disable_boot_trace(void);
299 #else
300 static inline void trace_boot(struct boot_trace *it, initcall_t fn) { }
301 static inline void start_boot_trace(void) { }
302 static inline void enable_boot_trace(void) { }
303 static inline void disable_boot_trace(void) { }
304 #endif
308 #endif /* _LINUX_FTRACE_H */