kmemtrace: restore original tracing data binary format, improve ABI
[linux-2.6/verdex.git] / kernel / trace / kmemtrace.c
blobd8c2d0c91b4cae5b075139b7a96e36a88acf8073
1 /*
2 * Memory allocator tracing
4 * Copyright (C) 2008 Eduard - Gabriel Munteanu
5 * Copyright (C) 2008 Pekka Enberg <penberg@cs.helsinki.fi>
6 * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
7 */
9 #include <linux/dcache.h>
10 #include <linux/debugfs.h>
11 #include <linux/fs.h>
12 #include <linux/seq_file.h>
13 #include <linux/tracepoint.h>
14 #include <trace/kmemtrace.h>
16 #include "trace.h"
17 #include "trace_output.h"
19 /* Select an alternative, minimalistic output than the original one */
20 #define TRACE_KMEM_OPT_MINIMAL 0x1
22 static struct tracer_opt kmem_opts[] = {
23 /* Default disable the minimalistic output */
24 { TRACER_OPT(kmem_minimalistic, TRACE_KMEM_OPT_MINIMAL) },
25 { }
28 static struct tracer_flags kmem_tracer_flags = {
29 .val = 0,
30 .opts = kmem_opts
33 static struct trace_array *kmemtrace_array;
35 /* Trace allocations */
36 static inline void kmemtrace_alloc(enum kmemtrace_type_id type_id,
37 unsigned long call_site,
38 const void *ptr,
39 size_t bytes_req,
40 size_t bytes_alloc,
41 gfp_t gfp_flags,
42 int node)
44 struct ring_buffer_event *event;
45 struct kmemtrace_alloc_entry *entry;
46 struct trace_array *tr = kmemtrace_array;
48 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
49 if (!event)
50 return;
51 entry = ring_buffer_event_data(event);
52 tracing_generic_entry_update(&entry->ent, 0, 0);
54 entry->ent.type = TRACE_KMEM_ALLOC;
55 entry->type_id = type_id;
56 entry->call_site = call_site;
57 entry->ptr = ptr;
58 entry->bytes_req = bytes_req;
59 entry->bytes_alloc = bytes_alloc;
60 entry->gfp_flags = gfp_flags;
61 entry->node = node;
63 ring_buffer_unlock_commit(tr->buffer, event);
65 trace_wake_up();
68 static inline void kmemtrace_free(enum kmemtrace_type_id type_id,
69 unsigned long call_site,
70 const void *ptr)
72 struct ring_buffer_event *event;
73 struct kmemtrace_free_entry *entry;
74 struct trace_array *tr = kmemtrace_array;
76 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
77 if (!event)
78 return;
79 entry = ring_buffer_event_data(event);
80 tracing_generic_entry_update(&entry->ent, 0, 0);
82 entry->ent.type = TRACE_KMEM_FREE;
83 entry->type_id = type_id;
84 entry->call_site = call_site;
85 entry->ptr = ptr;
87 ring_buffer_unlock_commit(tr->buffer, event);
89 trace_wake_up();
92 static void kmemtrace_kmalloc(unsigned long call_site,
93 const void *ptr,
94 size_t bytes_req,
95 size_t bytes_alloc,
96 gfp_t gfp_flags)
98 kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr,
99 bytes_req, bytes_alloc, gfp_flags, -1);
102 static void kmemtrace_kmem_cache_alloc(unsigned long call_site,
103 const void *ptr,
104 size_t bytes_req,
105 size_t bytes_alloc,
106 gfp_t gfp_flags)
108 kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr,
109 bytes_req, bytes_alloc, gfp_flags, -1);
112 static void kmemtrace_kmalloc_node(unsigned long call_site,
113 const void *ptr,
114 size_t bytes_req,
115 size_t bytes_alloc,
116 gfp_t gfp_flags,
117 int node)
119 kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr,
120 bytes_req, bytes_alloc, gfp_flags, node);
123 static void kmemtrace_kmem_cache_alloc_node(unsigned long call_site,
124 const void *ptr,
125 size_t bytes_req,
126 size_t bytes_alloc,
127 gfp_t gfp_flags,
128 int node)
130 kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr,
131 bytes_req, bytes_alloc, gfp_flags, node);
134 static void kmemtrace_kfree(unsigned long call_site, const void *ptr)
136 kmemtrace_free(KMEMTRACE_TYPE_KMALLOC, call_site, ptr);
139 static void kmemtrace_kmem_cache_free(unsigned long call_site, const void *ptr)
141 kmemtrace_free(KMEMTRACE_TYPE_CACHE, call_site, ptr);
144 static int kmemtrace_start_probes(void)
146 int err;
148 err = register_trace_kmalloc(kmemtrace_kmalloc);
149 if (err)
150 return err;
151 err = register_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc);
152 if (err)
153 return err;
154 err = register_trace_kmalloc_node(kmemtrace_kmalloc_node);
155 if (err)
156 return err;
157 err = register_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node);
158 if (err)
159 return err;
160 err = register_trace_kfree(kmemtrace_kfree);
161 if (err)
162 return err;
163 err = register_trace_kmem_cache_free(kmemtrace_kmem_cache_free);
165 return err;
168 static void kmemtrace_stop_probes(void)
170 unregister_trace_kmalloc(kmemtrace_kmalloc);
171 unregister_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc);
172 unregister_trace_kmalloc_node(kmemtrace_kmalloc_node);
173 unregister_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node);
174 unregister_trace_kfree(kmemtrace_kfree);
175 unregister_trace_kmem_cache_free(kmemtrace_kmem_cache_free);
178 static int kmem_trace_init(struct trace_array *tr)
180 int cpu;
181 kmemtrace_array = tr;
183 for_each_cpu_mask(cpu, cpu_possible_map)
184 tracing_reset(tr, cpu);
186 kmemtrace_start_probes();
188 return 0;
191 static void kmem_trace_reset(struct trace_array *tr)
193 kmemtrace_stop_probes();
196 static void kmemtrace_headers(struct seq_file *s)
198 /* Don't need headers for the original kmemtrace output */
199 if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL))
200 return;
202 seq_printf(s, "#\n");
203 seq_printf(s, "# ALLOC TYPE REQ GIVEN FLAGS "
204 " POINTER NODE CALLER\n");
205 seq_printf(s, "# FREE | | | | "
206 " | | | |\n");
207 seq_printf(s, "# |\n\n");
211 * The following functions give the original output from kmemtrace,
212 * plus the origin CPU, since reordering occurs in-kernel now.
215 #define KMEMTRACE_USER_ALLOC 0
216 #define KMEMTRACE_USER_FREE 1
218 struct kmemtrace_user_event {
219 u8 event_id;
220 u8 type_id;
221 u16 event_size;
222 u32 cpu;
223 u64 timestamp;
224 unsigned long call_site;
225 unsigned long ptr;
228 struct kmemtrace_user_event_alloc {
229 size_t bytes_req;
230 size_t bytes_alloc;
231 unsigned gfp_flags;
232 int node;
235 static enum print_line_t
236 kmemtrace_print_alloc_user(struct trace_iterator *iter,
237 struct kmemtrace_alloc_entry *entry)
239 struct trace_seq *s = &iter->seq;
240 struct kmemtrace_user_event *ev;
241 struct kmemtrace_user_event_alloc *ev_alloc;
243 ev = trace_seq_reserve(s, sizeof(*ev));
244 if (!ev)
245 return TRACE_TYPE_PARTIAL_LINE;
246 ev->event_id = KMEMTRACE_USER_ALLOC;
247 ev->type_id = entry->type_id;
248 ev->event_size = sizeof(*ev) + sizeof(*ev_alloc);
249 ev->cpu = iter->cpu;
250 ev->timestamp = iter->ts;
251 ev->call_site = entry->call_site;
252 ev->ptr = (unsigned long) entry->ptr;
254 ev_alloc = trace_seq_reserve(s, sizeof(*ev_alloc));
255 if (!ev_alloc)
256 return TRACE_TYPE_PARTIAL_LINE;
257 ev_alloc->bytes_req = entry->bytes_req;
258 ev_alloc->bytes_alloc = entry->bytes_alloc;
259 ev_alloc->gfp_flags = entry->gfp_flags;
260 ev_alloc->node = entry->node;
262 return TRACE_TYPE_HANDLED;
265 static enum print_line_t
266 kmemtrace_print_free_user(struct trace_iterator *iter,
267 struct kmemtrace_free_entry *entry)
269 struct trace_seq *s = &iter->seq;
270 struct kmemtrace_user_event *ev;
272 ev = trace_seq_reserve(s, sizeof(*ev));
273 if (!ev)
274 return TRACE_TYPE_PARTIAL_LINE;
275 ev->event_id = KMEMTRACE_USER_FREE;
276 ev->type_id = entry->type_id;
277 ev->event_size = sizeof(*ev);
278 ev->cpu = iter->cpu;
279 ev->timestamp = iter->ts;
280 ev->call_site = entry->call_site;
281 ev->ptr = (unsigned long) entry->ptr;
283 return TRACE_TYPE_HANDLED;
286 /* The two other following provide a more minimalistic output */
287 static enum print_line_t
288 kmemtrace_print_alloc_compress(struct trace_iterator *iter,
289 struct kmemtrace_alloc_entry *entry)
291 struct trace_seq *s = &iter->seq;
292 int ret;
294 /* Alloc entry */
295 ret = trace_seq_printf(s, " + ");
296 if (!ret)
297 return TRACE_TYPE_PARTIAL_LINE;
299 /* Type */
300 switch (entry->type_id) {
301 case KMEMTRACE_TYPE_KMALLOC:
302 ret = trace_seq_printf(s, "K ");
303 break;
304 case KMEMTRACE_TYPE_CACHE:
305 ret = trace_seq_printf(s, "C ");
306 break;
307 case KMEMTRACE_TYPE_PAGES:
308 ret = trace_seq_printf(s, "P ");
309 break;
310 default:
311 ret = trace_seq_printf(s, "? ");
314 if (!ret)
315 return TRACE_TYPE_PARTIAL_LINE;
317 /* Requested */
318 ret = trace_seq_printf(s, "%4zu ", entry->bytes_req);
319 if (!ret)
320 return TRACE_TYPE_PARTIAL_LINE;
322 /* Allocated */
323 ret = trace_seq_printf(s, "%4zu ", entry->bytes_alloc);
324 if (!ret)
325 return TRACE_TYPE_PARTIAL_LINE;
327 /* Flags
328 * TODO: would be better to see the name of the GFP flag names
330 ret = trace_seq_printf(s, "%08x ", entry->gfp_flags);
331 if (!ret)
332 return TRACE_TYPE_PARTIAL_LINE;
334 /* Pointer to allocated */
335 ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr);
336 if (!ret)
337 return TRACE_TYPE_PARTIAL_LINE;
339 /* Node */
340 ret = trace_seq_printf(s, "%4d ", entry->node);
341 if (!ret)
342 return TRACE_TYPE_PARTIAL_LINE;
344 /* Call site */
345 ret = seq_print_ip_sym(s, entry->call_site, 0);
346 if (!ret)
347 return TRACE_TYPE_PARTIAL_LINE;
349 if (!trace_seq_printf(s, "\n"))
350 return TRACE_TYPE_PARTIAL_LINE;
352 return TRACE_TYPE_HANDLED;
355 static enum print_line_t
356 kmemtrace_print_free_compress(struct trace_iterator *iter,
357 struct kmemtrace_free_entry *entry)
359 struct trace_seq *s = &iter->seq;
360 int ret;
362 /* Free entry */
363 ret = trace_seq_printf(s, " - ");
364 if (!ret)
365 return TRACE_TYPE_PARTIAL_LINE;
367 /* Type */
368 switch (entry->type_id) {
369 case KMEMTRACE_TYPE_KMALLOC:
370 ret = trace_seq_printf(s, "K ");
371 break;
372 case KMEMTRACE_TYPE_CACHE:
373 ret = trace_seq_printf(s, "C ");
374 break;
375 case KMEMTRACE_TYPE_PAGES:
376 ret = trace_seq_printf(s, "P ");
377 break;
378 default:
379 ret = trace_seq_printf(s, "? ");
382 if (!ret)
383 return TRACE_TYPE_PARTIAL_LINE;
385 /* Skip requested/allocated/flags */
386 ret = trace_seq_printf(s, " ");
387 if (!ret)
388 return TRACE_TYPE_PARTIAL_LINE;
390 /* Pointer to allocated */
391 ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr);
392 if (!ret)
393 return TRACE_TYPE_PARTIAL_LINE;
395 /* Skip node */
396 ret = trace_seq_printf(s, " ");
397 if (!ret)
398 return TRACE_TYPE_PARTIAL_LINE;
400 /* Call site */
401 ret = seq_print_ip_sym(s, entry->call_site, 0);
402 if (!ret)
403 return TRACE_TYPE_PARTIAL_LINE;
405 if (!trace_seq_printf(s, "\n"))
406 return TRACE_TYPE_PARTIAL_LINE;
408 return TRACE_TYPE_HANDLED;
411 static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter)
413 struct trace_entry *entry = iter->ent;
415 switch (entry->type) {
416 case TRACE_KMEM_ALLOC: {
417 struct kmemtrace_alloc_entry *field;
418 trace_assign_type(field, entry);
419 if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
420 return kmemtrace_print_alloc_compress(iter, field);
421 else
422 return kmemtrace_print_alloc_user(iter, field);
425 case TRACE_KMEM_FREE: {
426 struct kmemtrace_free_entry *field;
427 trace_assign_type(field, entry);
428 if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
429 return kmemtrace_print_free_compress(iter, field);
430 else
431 return kmemtrace_print_free_user(iter, field);
434 default:
435 return TRACE_TYPE_UNHANDLED;
439 static struct tracer kmem_tracer __read_mostly = {
440 .name = "kmemtrace",
441 .init = kmem_trace_init,
442 .reset = kmem_trace_reset,
443 .print_line = kmemtrace_print_line,
444 .print_header = kmemtrace_headers,
445 .flags = &kmem_tracer_flags
448 void kmemtrace_init(void)
450 /* earliest opportunity to start kmem tracing */
453 static int __init init_kmem_tracer(void)
455 return register_tracer(&kmem_tracer);
458 device_initcall(init_kmem_tracer);