2 * Memory allocator tracing
4 * Copyright (C) 2008 Eduard - Gabriel Munteanu
5 * Copyright (C) 2008 Pekka Enberg <penberg@cs.helsinki.fi>
6 * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
9 #include <linux/dcache.h>
10 #include <linux/debugfs.h>
12 #include <linux/seq_file.h>
13 #include <linux/tracepoint.h>
14 #include <trace/kmemtrace.h>
17 #include "trace_output.h"
19 /* Select an alternative, minimalistic output than the original one */
20 #define TRACE_KMEM_OPT_MINIMAL 0x1
22 static struct tracer_opt kmem_opts
[] = {
23 /* Default disable the minimalistic output */
24 { TRACER_OPT(kmem_minimalistic
, TRACE_KMEM_OPT_MINIMAL
) },
28 static struct tracer_flags kmem_tracer_flags
= {
33 static struct trace_array
*kmemtrace_array
;
35 /* Trace allocations */
36 static inline void kmemtrace_alloc(enum kmemtrace_type_id type_id
,
37 unsigned long call_site
,
44 struct ring_buffer_event
*event
;
45 struct kmemtrace_alloc_entry
*entry
;
46 struct trace_array
*tr
= kmemtrace_array
;
48 event
= ring_buffer_lock_reserve(tr
->buffer
, sizeof(*entry
));
51 entry
= ring_buffer_event_data(event
);
52 tracing_generic_entry_update(&entry
->ent
, 0, 0);
54 entry
->ent
.type
= TRACE_KMEM_ALLOC
;
55 entry
->type_id
= type_id
;
56 entry
->call_site
= call_site
;
58 entry
->bytes_req
= bytes_req
;
59 entry
->bytes_alloc
= bytes_alloc
;
60 entry
->gfp_flags
= gfp_flags
;
63 ring_buffer_unlock_commit(tr
->buffer
, event
);
68 static inline void kmemtrace_free(enum kmemtrace_type_id type_id
,
69 unsigned long call_site
,
72 struct ring_buffer_event
*event
;
73 struct kmemtrace_free_entry
*entry
;
74 struct trace_array
*tr
= kmemtrace_array
;
76 event
= ring_buffer_lock_reserve(tr
->buffer
, sizeof(*entry
));
79 entry
= ring_buffer_event_data(event
);
80 tracing_generic_entry_update(&entry
->ent
, 0, 0);
82 entry
->ent
.type
= TRACE_KMEM_FREE
;
83 entry
->type_id
= type_id
;
84 entry
->call_site
= call_site
;
87 ring_buffer_unlock_commit(tr
->buffer
, event
);
92 static void kmemtrace_kmalloc(unsigned long call_site
,
98 kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC
, call_site
, ptr
,
99 bytes_req
, bytes_alloc
, gfp_flags
, -1);
102 static void kmemtrace_kmem_cache_alloc(unsigned long call_site
,
108 kmemtrace_alloc(KMEMTRACE_TYPE_CACHE
, call_site
, ptr
,
109 bytes_req
, bytes_alloc
, gfp_flags
, -1);
112 static void kmemtrace_kmalloc_node(unsigned long call_site
,
119 kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC
, call_site
, ptr
,
120 bytes_req
, bytes_alloc
, gfp_flags
, node
);
123 static void kmemtrace_kmem_cache_alloc_node(unsigned long call_site
,
130 kmemtrace_alloc(KMEMTRACE_TYPE_CACHE
, call_site
, ptr
,
131 bytes_req
, bytes_alloc
, gfp_flags
, node
);
134 static void kmemtrace_kfree(unsigned long call_site
, const void *ptr
)
136 kmemtrace_free(KMEMTRACE_TYPE_KMALLOC
, call_site
, ptr
);
139 static void kmemtrace_kmem_cache_free(unsigned long call_site
, const void *ptr
)
141 kmemtrace_free(KMEMTRACE_TYPE_CACHE
, call_site
, ptr
);
144 static int kmemtrace_start_probes(void)
148 err
= register_trace_kmalloc(kmemtrace_kmalloc
);
151 err
= register_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc
);
154 err
= register_trace_kmalloc_node(kmemtrace_kmalloc_node
);
157 err
= register_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node
);
160 err
= register_trace_kfree(kmemtrace_kfree
);
163 err
= register_trace_kmem_cache_free(kmemtrace_kmem_cache_free
);
168 static void kmemtrace_stop_probes(void)
170 unregister_trace_kmalloc(kmemtrace_kmalloc
);
171 unregister_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc
);
172 unregister_trace_kmalloc_node(kmemtrace_kmalloc_node
);
173 unregister_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node
);
174 unregister_trace_kfree(kmemtrace_kfree
);
175 unregister_trace_kmem_cache_free(kmemtrace_kmem_cache_free
);
178 static int kmem_trace_init(struct trace_array
*tr
)
181 kmemtrace_array
= tr
;
183 for_each_cpu_mask(cpu
, cpu_possible_map
)
184 tracing_reset(tr
, cpu
);
186 kmemtrace_start_probes();
191 static void kmem_trace_reset(struct trace_array
*tr
)
193 kmemtrace_stop_probes();
196 static void kmemtrace_headers(struct seq_file
*s
)
198 /* Don't need headers for the original kmemtrace output */
199 if (!(kmem_tracer_flags
.val
& TRACE_KMEM_OPT_MINIMAL
))
202 seq_printf(s
, "#\n");
203 seq_printf(s
, "# ALLOC TYPE REQ GIVEN FLAGS "
204 " POINTER NODE CALLER\n");
205 seq_printf(s
, "# FREE | | | | "
207 seq_printf(s
, "# |\n\n");
211 * The following functions give the original output from kmemtrace,
212 * plus the origin CPU, since reordering occurs in-kernel now.
215 #define KMEMTRACE_USER_ALLOC 0
216 #define KMEMTRACE_USER_FREE 1
218 struct kmemtrace_user_event
{
224 unsigned long call_site
;
228 struct kmemtrace_user_event_alloc
{
235 static enum print_line_t
236 kmemtrace_print_alloc_user(struct trace_iterator
*iter
,
237 struct kmemtrace_alloc_entry
*entry
)
239 struct trace_seq
*s
= &iter
->seq
;
240 struct kmemtrace_user_event
*ev
;
241 struct kmemtrace_user_event_alloc
*ev_alloc
;
243 ev
= trace_seq_reserve(s
, sizeof(*ev
));
245 return TRACE_TYPE_PARTIAL_LINE
;
246 ev
->event_id
= KMEMTRACE_USER_ALLOC
;
247 ev
->type_id
= entry
->type_id
;
248 ev
->event_size
= sizeof(*ev
) + sizeof(*ev_alloc
);
250 ev
->timestamp
= iter
->ts
;
251 ev
->call_site
= entry
->call_site
;
252 ev
->ptr
= (unsigned long) entry
->ptr
;
254 ev_alloc
= trace_seq_reserve(s
, sizeof(*ev_alloc
));
256 return TRACE_TYPE_PARTIAL_LINE
;
257 ev_alloc
->bytes_req
= entry
->bytes_req
;
258 ev_alloc
->bytes_alloc
= entry
->bytes_alloc
;
259 ev_alloc
->gfp_flags
= entry
->gfp_flags
;
260 ev_alloc
->node
= entry
->node
;
262 return TRACE_TYPE_HANDLED
;
265 static enum print_line_t
266 kmemtrace_print_free_user(struct trace_iterator
*iter
,
267 struct kmemtrace_free_entry
*entry
)
269 struct trace_seq
*s
= &iter
->seq
;
270 struct kmemtrace_user_event
*ev
;
272 ev
= trace_seq_reserve(s
, sizeof(*ev
));
274 return TRACE_TYPE_PARTIAL_LINE
;
275 ev
->event_id
= KMEMTRACE_USER_FREE
;
276 ev
->type_id
= entry
->type_id
;
277 ev
->event_size
= sizeof(*ev
);
279 ev
->timestamp
= iter
->ts
;
280 ev
->call_site
= entry
->call_site
;
281 ev
->ptr
= (unsigned long) entry
->ptr
;
283 return TRACE_TYPE_HANDLED
;
286 /* The two other following provide a more minimalistic output */
287 static enum print_line_t
288 kmemtrace_print_alloc_compress(struct trace_iterator
*iter
,
289 struct kmemtrace_alloc_entry
*entry
)
291 struct trace_seq
*s
= &iter
->seq
;
295 ret
= trace_seq_printf(s
, " + ");
297 return TRACE_TYPE_PARTIAL_LINE
;
300 switch (entry
->type_id
) {
301 case KMEMTRACE_TYPE_KMALLOC
:
302 ret
= trace_seq_printf(s
, "K ");
304 case KMEMTRACE_TYPE_CACHE
:
305 ret
= trace_seq_printf(s
, "C ");
307 case KMEMTRACE_TYPE_PAGES
:
308 ret
= trace_seq_printf(s
, "P ");
311 ret
= trace_seq_printf(s
, "? ");
315 return TRACE_TYPE_PARTIAL_LINE
;
318 ret
= trace_seq_printf(s
, "%4zu ", entry
->bytes_req
);
320 return TRACE_TYPE_PARTIAL_LINE
;
323 ret
= trace_seq_printf(s
, "%4zu ", entry
->bytes_alloc
);
325 return TRACE_TYPE_PARTIAL_LINE
;
328 * TODO: would be better to see the name of the GFP flag names
330 ret
= trace_seq_printf(s
, "%08x ", entry
->gfp_flags
);
332 return TRACE_TYPE_PARTIAL_LINE
;
334 /* Pointer to allocated */
335 ret
= trace_seq_printf(s
, "0x%tx ", (ptrdiff_t)entry
->ptr
);
337 return TRACE_TYPE_PARTIAL_LINE
;
340 ret
= trace_seq_printf(s
, "%4d ", entry
->node
);
342 return TRACE_TYPE_PARTIAL_LINE
;
345 ret
= seq_print_ip_sym(s
, entry
->call_site
, 0);
347 return TRACE_TYPE_PARTIAL_LINE
;
349 if (!trace_seq_printf(s
, "\n"))
350 return TRACE_TYPE_PARTIAL_LINE
;
352 return TRACE_TYPE_HANDLED
;
355 static enum print_line_t
356 kmemtrace_print_free_compress(struct trace_iterator
*iter
,
357 struct kmemtrace_free_entry
*entry
)
359 struct trace_seq
*s
= &iter
->seq
;
363 ret
= trace_seq_printf(s
, " - ");
365 return TRACE_TYPE_PARTIAL_LINE
;
368 switch (entry
->type_id
) {
369 case KMEMTRACE_TYPE_KMALLOC
:
370 ret
= trace_seq_printf(s
, "K ");
372 case KMEMTRACE_TYPE_CACHE
:
373 ret
= trace_seq_printf(s
, "C ");
375 case KMEMTRACE_TYPE_PAGES
:
376 ret
= trace_seq_printf(s
, "P ");
379 ret
= trace_seq_printf(s
, "? ");
383 return TRACE_TYPE_PARTIAL_LINE
;
385 /* Skip requested/allocated/flags */
386 ret
= trace_seq_printf(s
, " ");
388 return TRACE_TYPE_PARTIAL_LINE
;
390 /* Pointer to allocated */
391 ret
= trace_seq_printf(s
, "0x%tx ", (ptrdiff_t)entry
->ptr
);
393 return TRACE_TYPE_PARTIAL_LINE
;
396 ret
= trace_seq_printf(s
, " ");
398 return TRACE_TYPE_PARTIAL_LINE
;
401 ret
= seq_print_ip_sym(s
, entry
->call_site
, 0);
403 return TRACE_TYPE_PARTIAL_LINE
;
405 if (!trace_seq_printf(s
, "\n"))
406 return TRACE_TYPE_PARTIAL_LINE
;
408 return TRACE_TYPE_HANDLED
;
411 static enum print_line_t
kmemtrace_print_line(struct trace_iterator
*iter
)
413 struct trace_entry
*entry
= iter
->ent
;
415 switch (entry
->type
) {
416 case TRACE_KMEM_ALLOC
: {
417 struct kmemtrace_alloc_entry
*field
;
418 trace_assign_type(field
, entry
);
419 if (kmem_tracer_flags
.val
& TRACE_KMEM_OPT_MINIMAL
)
420 return kmemtrace_print_alloc_compress(iter
, field
);
422 return kmemtrace_print_alloc_user(iter
, field
);
425 case TRACE_KMEM_FREE
: {
426 struct kmemtrace_free_entry
*field
;
427 trace_assign_type(field
, entry
);
428 if (kmem_tracer_flags
.val
& TRACE_KMEM_OPT_MINIMAL
)
429 return kmemtrace_print_free_compress(iter
, field
);
431 return kmemtrace_print_free_user(iter
, field
);
435 return TRACE_TYPE_UNHANDLED
;
439 static struct tracer kmem_tracer __read_mostly
= {
441 .init
= kmem_trace_init
,
442 .reset
= kmem_trace_reset
,
443 .print_line
= kmemtrace_print_line
,
444 .print_header
= kmemtrace_headers
,
445 .flags
= &kmem_tracer_flags
448 void kmemtrace_init(void)
450 /* earliest opportunity to start kmem tracing */
453 static int __init
init_kmem_tracer(void)
455 return register_tracer(&kmem_tracer
);
458 device_initcall(init_kmem_tracer
);