2 * Memory allocator tracing
4 * Copyright (C) 2008 Eduard - Gabriel Munteanu
5 * Copyright (C) 2008 Pekka Enberg <penberg@cs.helsinki.fi>
6 * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
9 #include <linux/tracepoint.h>
10 #include <linux/seq_file.h>
11 #include <linux/debugfs.h>
12 #include <linux/dcache.h>
15 #include <trace/kmemtrace.h>
17 #include "trace_output.h"
20 /* Select an alternative, minimalistic output than the original one */
21 #define TRACE_KMEM_OPT_MINIMAL 0x1
23 static struct tracer_opt kmem_opts
[] = {
24 /* Default disable the minimalistic output */
25 { TRACER_OPT(kmem_minimalistic
, TRACE_KMEM_OPT_MINIMAL
) },
29 static struct tracer_flags kmem_tracer_flags
= {
34 static struct trace_array
*kmemtrace_array
;
36 /* Trace allocations */
37 static inline void kmemtrace_alloc(enum kmemtrace_type_id type_id
,
38 unsigned long call_site
,
45 struct trace_array
*tr
= kmemtrace_array
;
46 struct kmemtrace_alloc_entry
*entry
;
47 struct ring_buffer_event
*event
;
49 event
= ring_buffer_lock_reserve(tr
->buffer
, sizeof(*entry
));
53 entry
= ring_buffer_event_data(event
);
54 tracing_generic_entry_update(&entry
->ent
, 0, 0);
56 entry
->ent
.type
= TRACE_KMEM_ALLOC
;
57 entry
->type_id
= type_id
;
58 entry
->call_site
= call_site
;
60 entry
->bytes_req
= bytes_req
;
61 entry
->bytes_alloc
= bytes_alloc
;
62 entry
->gfp_flags
= gfp_flags
;
65 ring_buffer_unlock_commit(tr
->buffer
, event
);
70 static inline void kmemtrace_free(enum kmemtrace_type_id type_id
,
71 unsigned long call_site
,
74 struct trace_array
*tr
= kmemtrace_array
;
75 struct kmemtrace_free_entry
*entry
;
76 struct ring_buffer_event
*event
;
78 event
= ring_buffer_lock_reserve(tr
->buffer
, sizeof(*entry
));
81 entry
= ring_buffer_event_data(event
);
82 tracing_generic_entry_update(&entry
->ent
, 0, 0);
84 entry
->ent
.type
= TRACE_KMEM_FREE
;
85 entry
->type_id
= type_id
;
86 entry
->call_site
= call_site
;
89 ring_buffer_unlock_commit(tr
->buffer
, event
);
94 static void kmemtrace_kmalloc(unsigned long call_site
,
100 kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC
, call_site
, ptr
,
101 bytes_req
, bytes_alloc
, gfp_flags
, -1);
104 static void kmemtrace_kmem_cache_alloc(unsigned long call_site
,
110 kmemtrace_alloc(KMEMTRACE_TYPE_CACHE
, call_site
, ptr
,
111 bytes_req
, bytes_alloc
, gfp_flags
, -1);
114 static void kmemtrace_kmalloc_node(unsigned long call_site
,
121 kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC
, call_site
, ptr
,
122 bytes_req
, bytes_alloc
, gfp_flags
, node
);
125 static void kmemtrace_kmem_cache_alloc_node(unsigned long call_site
,
132 kmemtrace_alloc(KMEMTRACE_TYPE_CACHE
, call_site
, ptr
,
133 bytes_req
, bytes_alloc
, gfp_flags
, node
);
136 static void kmemtrace_kfree(unsigned long call_site
, const void *ptr
)
138 kmemtrace_free(KMEMTRACE_TYPE_KMALLOC
, call_site
, ptr
);
141 static void kmemtrace_kmem_cache_free(unsigned long call_site
, const void *ptr
)
143 kmemtrace_free(KMEMTRACE_TYPE_CACHE
, call_site
, ptr
);
146 static int kmemtrace_start_probes(void)
150 err
= register_trace_kmalloc(kmemtrace_kmalloc
);
153 err
= register_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc
);
156 err
= register_trace_kmalloc_node(kmemtrace_kmalloc_node
);
159 err
= register_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node
);
162 err
= register_trace_kfree(kmemtrace_kfree
);
165 err
= register_trace_kmem_cache_free(kmemtrace_kmem_cache_free
);
170 static void kmemtrace_stop_probes(void)
172 unregister_trace_kmalloc(kmemtrace_kmalloc
);
173 unregister_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc
);
174 unregister_trace_kmalloc_node(kmemtrace_kmalloc_node
);
175 unregister_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node
);
176 unregister_trace_kfree(kmemtrace_kfree
);
177 unregister_trace_kmem_cache_free(kmemtrace_kmem_cache_free
);
180 static int kmem_trace_init(struct trace_array
*tr
)
183 kmemtrace_array
= tr
;
185 for_each_cpu_mask(cpu
, cpu_possible_map
)
186 tracing_reset(tr
, cpu
);
188 kmemtrace_start_probes();
193 static void kmem_trace_reset(struct trace_array
*tr
)
195 kmemtrace_stop_probes();
198 static void kmemtrace_headers(struct seq_file
*s
)
200 /* Don't need headers for the original kmemtrace output */
201 if (!(kmem_tracer_flags
.val
& TRACE_KMEM_OPT_MINIMAL
))
204 seq_printf(s
, "#\n");
205 seq_printf(s
, "# ALLOC TYPE REQ GIVEN FLAGS "
206 " POINTER NODE CALLER\n");
207 seq_printf(s
, "# FREE | | | | "
209 seq_printf(s
, "# |\n\n");
213 * The following functions give the original output from kmemtrace,
214 * plus the origin CPU, since reordering occurs in-kernel now.
217 #define KMEMTRACE_USER_ALLOC 0
218 #define KMEMTRACE_USER_FREE 1
220 struct kmemtrace_user_event
{
226 unsigned long call_site
;
230 struct kmemtrace_user_event_alloc
{
237 static enum print_line_t
238 kmemtrace_print_alloc_user(struct trace_iterator
*iter
,
239 struct kmemtrace_alloc_entry
*entry
)
241 struct kmemtrace_user_event_alloc
*ev_alloc
;
242 struct trace_seq
*s
= &iter
->seq
;
243 struct kmemtrace_user_event
*ev
;
245 ev
= trace_seq_reserve(s
, sizeof(*ev
));
247 return TRACE_TYPE_PARTIAL_LINE
;
249 ev
->event_id
= KMEMTRACE_USER_ALLOC
;
250 ev
->type_id
= entry
->type_id
;
251 ev
->event_size
= sizeof(*ev
) + sizeof(*ev_alloc
);
253 ev
->timestamp
= iter
->ts
;
254 ev
->call_site
= entry
->call_site
;
255 ev
->ptr
= (unsigned long)entry
->ptr
;
257 ev_alloc
= trace_seq_reserve(s
, sizeof(*ev_alloc
));
259 return TRACE_TYPE_PARTIAL_LINE
;
261 ev_alloc
->bytes_req
= entry
->bytes_req
;
262 ev_alloc
->bytes_alloc
= entry
->bytes_alloc
;
263 ev_alloc
->gfp_flags
= entry
->gfp_flags
;
264 ev_alloc
->node
= entry
->node
;
266 return TRACE_TYPE_HANDLED
;
269 static enum print_line_t
270 kmemtrace_print_free_user(struct trace_iterator
*iter
,
271 struct kmemtrace_free_entry
*entry
)
273 struct trace_seq
*s
= &iter
->seq
;
274 struct kmemtrace_user_event
*ev
;
276 ev
= trace_seq_reserve(s
, sizeof(*ev
));
278 return TRACE_TYPE_PARTIAL_LINE
;
280 ev
->event_id
= KMEMTRACE_USER_FREE
;
281 ev
->type_id
= entry
->type_id
;
282 ev
->event_size
= sizeof(*ev
);
284 ev
->timestamp
= iter
->ts
;
285 ev
->call_site
= entry
->call_site
;
286 ev
->ptr
= (unsigned long)entry
->ptr
;
288 return TRACE_TYPE_HANDLED
;
291 /* The two other following provide a more minimalistic output */
292 static enum print_line_t
293 kmemtrace_print_alloc_compress(struct trace_iterator
*iter
,
294 struct kmemtrace_alloc_entry
*entry
)
296 struct trace_seq
*s
= &iter
->seq
;
300 ret
= trace_seq_printf(s
, " + ");
302 return TRACE_TYPE_PARTIAL_LINE
;
305 switch (entry
->type_id
) {
306 case KMEMTRACE_TYPE_KMALLOC
:
307 ret
= trace_seq_printf(s
, "K ");
309 case KMEMTRACE_TYPE_CACHE
:
310 ret
= trace_seq_printf(s
, "C ");
312 case KMEMTRACE_TYPE_PAGES
:
313 ret
= trace_seq_printf(s
, "P ");
316 ret
= trace_seq_printf(s
, "? ");
320 return TRACE_TYPE_PARTIAL_LINE
;
323 ret
= trace_seq_printf(s
, "%4zu ", entry
->bytes_req
);
325 return TRACE_TYPE_PARTIAL_LINE
;
328 ret
= trace_seq_printf(s
, "%4zu ", entry
->bytes_alloc
);
330 return TRACE_TYPE_PARTIAL_LINE
;
333 * TODO: would be better to see the name of the GFP flag names
335 ret
= trace_seq_printf(s
, "%08x ", entry
->gfp_flags
);
337 return TRACE_TYPE_PARTIAL_LINE
;
339 /* Pointer to allocated */
340 ret
= trace_seq_printf(s
, "0x%tx ", (ptrdiff_t)entry
->ptr
);
342 return TRACE_TYPE_PARTIAL_LINE
;
345 ret
= trace_seq_printf(s
, "%4d ", entry
->node
);
347 return TRACE_TYPE_PARTIAL_LINE
;
350 ret
= seq_print_ip_sym(s
, entry
->call_site
, 0);
352 return TRACE_TYPE_PARTIAL_LINE
;
354 if (!trace_seq_printf(s
, "\n"))
355 return TRACE_TYPE_PARTIAL_LINE
;
357 return TRACE_TYPE_HANDLED
;
360 static enum print_line_t
361 kmemtrace_print_free_compress(struct trace_iterator
*iter
,
362 struct kmemtrace_free_entry
*entry
)
364 struct trace_seq
*s
= &iter
->seq
;
368 ret
= trace_seq_printf(s
, " - ");
370 return TRACE_TYPE_PARTIAL_LINE
;
373 switch (entry
->type_id
) {
374 case KMEMTRACE_TYPE_KMALLOC
:
375 ret
= trace_seq_printf(s
, "K ");
377 case KMEMTRACE_TYPE_CACHE
:
378 ret
= trace_seq_printf(s
, "C ");
380 case KMEMTRACE_TYPE_PAGES
:
381 ret
= trace_seq_printf(s
, "P ");
384 ret
= trace_seq_printf(s
, "? ");
388 return TRACE_TYPE_PARTIAL_LINE
;
390 /* Skip requested/allocated/flags */
391 ret
= trace_seq_printf(s
, " ");
393 return TRACE_TYPE_PARTIAL_LINE
;
395 /* Pointer to allocated */
396 ret
= trace_seq_printf(s
, "0x%tx ", (ptrdiff_t)entry
->ptr
);
398 return TRACE_TYPE_PARTIAL_LINE
;
401 ret
= trace_seq_printf(s
, " ");
403 return TRACE_TYPE_PARTIAL_LINE
;
406 ret
= seq_print_ip_sym(s
, entry
->call_site
, 0);
408 return TRACE_TYPE_PARTIAL_LINE
;
410 if (!trace_seq_printf(s
, "\n"))
411 return TRACE_TYPE_PARTIAL_LINE
;
413 return TRACE_TYPE_HANDLED
;
416 static enum print_line_t
kmemtrace_print_line(struct trace_iterator
*iter
)
418 struct trace_entry
*entry
= iter
->ent
;
420 switch (entry
->type
) {
421 case TRACE_KMEM_ALLOC
: {
422 struct kmemtrace_alloc_entry
*field
;
424 trace_assign_type(field
, entry
);
425 if (kmem_tracer_flags
.val
& TRACE_KMEM_OPT_MINIMAL
)
426 return kmemtrace_print_alloc_compress(iter
, field
);
428 return kmemtrace_print_alloc_user(iter
, field
);
431 case TRACE_KMEM_FREE
: {
432 struct kmemtrace_free_entry
*field
;
434 trace_assign_type(field
, entry
);
435 if (kmem_tracer_flags
.val
& TRACE_KMEM_OPT_MINIMAL
)
436 return kmemtrace_print_free_compress(iter
, field
);
438 return kmemtrace_print_free_user(iter
, field
);
442 return TRACE_TYPE_UNHANDLED
;
446 static struct tracer kmem_tracer __read_mostly
= {
448 .init
= kmem_trace_init
,
449 .reset
= kmem_trace_reset
,
450 .print_line
= kmemtrace_print_line
,
451 .print_header
= kmemtrace_headers
,
452 .flags
= &kmem_tracer_flags
455 void kmemtrace_init(void)
457 /* earliest opportunity to start kmem tracing */
460 static int __init
init_kmem_tracer(void)
462 return register_tracer(&kmem_tracer
);
464 device_initcall(init_kmem_tracer
);