2 * Memory allocator tracing
4 * Copyright (C) 2008 Eduard - Gabriel Munteanu
5 * Copyright (C) 2008 Pekka Enberg <penberg@cs.helsinki.fi>
6 * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
9 #include <linux/tracepoint.h>
10 #include <linux/seq_file.h>
11 #include <linux/debugfs.h>
12 #include <linux/dcache.h>
15 #include <linux/kmemtrace.h>
17 #include "trace_output.h"
20 /* Select an alternative, minimalistic output than the original one */
21 #define TRACE_KMEM_OPT_MINIMAL 0x1
23 static struct tracer_opt kmem_opts
[] = {
24 /* Default disable the minimalistic output */
25 { TRACER_OPT(kmem_minimalistic
, TRACE_KMEM_OPT_MINIMAL
) },
29 static struct tracer_flags kmem_tracer_flags
= {
34 static struct trace_array
*kmemtrace_array
;
36 /* Trace allocations */
37 static inline void kmemtrace_alloc(enum kmemtrace_type_id type_id
,
38 unsigned long call_site
,
45 struct ftrace_event_call
*call
= &event_kmem_alloc
;
46 struct trace_array
*tr
= kmemtrace_array
;
47 struct kmemtrace_alloc_entry
*entry
;
48 struct ring_buffer_event
*event
;
50 event
= ring_buffer_lock_reserve(tr
->buffer
, sizeof(*entry
));
54 entry
= ring_buffer_event_data(event
);
55 tracing_generic_entry_update(&entry
->ent
, 0, 0);
57 entry
->ent
.type
= TRACE_KMEM_ALLOC
;
58 entry
->type_id
= type_id
;
59 entry
->call_site
= call_site
;
61 entry
->bytes_req
= bytes_req
;
62 entry
->bytes_alloc
= bytes_alloc
;
63 entry
->gfp_flags
= gfp_flags
;
66 if (!filter_check_discard(call
, entry
, tr
->buffer
, event
))
67 ring_buffer_unlock_commit(tr
->buffer
, event
);
72 static inline void kmemtrace_free(enum kmemtrace_type_id type_id
,
73 unsigned long call_site
,
76 struct ftrace_event_call
*call
= &event_kmem_free
;
77 struct trace_array
*tr
= kmemtrace_array
;
78 struct kmemtrace_free_entry
*entry
;
79 struct ring_buffer_event
*event
;
81 event
= ring_buffer_lock_reserve(tr
->buffer
, sizeof(*entry
));
84 entry
= ring_buffer_event_data(event
);
85 tracing_generic_entry_update(&entry
->ent
, 0, 0);
87 entry
->ent
.type
= TRACE_KMEM_FREE
;
88 entry
->type_id
= type_id
;
89 entry
->call_site
= call_site
;
92 if (!filter_check_discard(call
, entry
, tr
->buffer
, event
))
93 ring_buffer_unlock_commit(tr
->buffer
, event
);
98 static void kmemtrace_kmalloc(unsigned long call_site
,
104 kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC
, call_site
, ptr
,
105 bytes_req
, bytes_alloc
, gfp_flags
, -1);
108 static void kmemtrace_kmem_cache_alloc(unsigned long call_site
,
114 kmemtrace_alloc(KMEMTRACE_TYPE_CACHE
, call_site
, ptr
,
115 bytes_req
, bytes_alloc
, gfp_flags
, -1);
118 static void kmemtrace_kmalloc_node(unsigned long call_site
,
125 kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC
, call_site
, ptr
,
126 bytes_req
, bytes_alloc
, gfp_flags
, node
);
129 static void kmemtrace_kmem_cache_alloc_node(unsigned long call_site
,
136 kmemtrace_alloc(KMEMTRACE_TYPE_CACHE
, call_site
, ptr
,
137 bytes_req
, bytes_alloc
, gfp_flags
, node
);
140 static void kmemtrace_kfree(unsigned long call_site
, const void *ptr
)
142 kmemtrace_free(KMEMTRACE_TYPE_KMALLOC
, call_site
, ptr
);
145 static void kmemtrace_kmem_cache_free(unsigned long call_site
, const void *ptr
)
147 kmemtrace_free(KMEMTRACE_TYPE_CACHE
, call_site
, ptr
);
150 static int kmemtrace_start_probes(void)
154 err
= register_trace_kmalloc(kmemtrace_kmalloc
);
157 err
= register_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc
);
160 err
= register_trace_kmalloc_node(kmemtrace_kmalloc_node
);
163 err
= register_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node
);
166 err
= register_trace_kfree(kmemtrace_kfree
);
169 err
= register_trace_kmem_cache_free(kmemtrace_kmem_cache_free
);
174 static void kmemtrace_stop_probes(void)
176 unregister_trace_kmalloc(kmemtrace_kmalloc
);
177 unregister_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc
);
178 unregister_trace_kmalloc_node(kmemtrace_kmalloc_node
);
179 unregister_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node
);
180 unregister_trace_kfree(kmemtrace_kfree
);
181 unregister_trace_kmem_cache_free(kmemtrace_kmem_cache_free
);
184 static int kmem_trace_init(struct trace_array
*tr
)
187 kmemtrace_array
= tr
;
189 for_each_cpu(cpu
, cpu_possible_mask
)
190 tracing_reset(tr
, cpu
);
192 kmemtrace_start_probes();
197 static void kmem_trace_reset(struct trace_array
*tr
)
199 kmemtrace_stop_probes();
202 static void kmemtrace_headers(struct seq_file
*s
)
204 /* Don't need headers for the original kmemtrace output */
205 if (!(kmem_tracer_flags
.val
& TRACE_KMEM_OPT_MINIMAL
))
208 seq_printf(s
, "#\n");
209 seq_printf(s
, "# ALLOC TYPE REQ GIVEN FLAGS "
210 " POINTER NODE CALLER\n");
211 seq_printf(s
, "# FREE | | | | "
213 seq_printf(s
, "# |\n\n");
217 * The following functions give the original output from kmemtrace,
218 * plus the origin CPU, since reordering occurs in-kernel now.
221 #define KMEMTRACE_USER_ALLOC 0
222 #define KMEMTRACE_USER_FREE 1
224 struct kmemtrace_user_event
{
230 unsigned long call_site
;
234 struct kmemtrace_user_event_alloc
{
241 static enum print_line_t
242 kmemtrace_print_alloc_user(struct trace_iterator
*iter
,
243 struct kmemtrace_alloc_entry
*entry
)
245 struct kmemtrace_user_event_alloc
*ev_alloc
;
246 struct trace_seq
*s
= &iter
->seq
;
247 struct kmemtrace_user_event
*ev
;
249 ev
= trace_seq_reserve(s
, sizeof(*ev
));
251 return TRACE_TYPE_PARTIAL_LINE
;
253 ev
->event_id
= KMEMTRACE_USER_ALLOC
;
254 ev
->type_id
= entry
->type_id
;
255 ev
->event_size
= sizeof(*ev
) + sizeof(*ev_alloc
);
257 ev
->timestamp
= iter
->ts
;
258 ev
->call_site
= entry
->call_site
;
259 ev
->ptr
= (unsigned long)entry
->ptr
;
261 ev_alloc
= trace_seq_reserve(s
, sizeof(*ev_alloc
));
263 return TRACE_TYPE_PARTIAL_LINE
;
265 ev_alloc
->bytes_req
= entry
->bytes_req
;
266 ev_alloc
->bytes_alloc
= entry
->bytes_alloc
;
267 ev_alloc
->gfp_flags
= entry
->gfp_flags
;
268 ev_alloc
->node
= entry
->node
;
270 return TRACE_TYPE_HANDLED
;
273 static enum print_line_t
274 kmemtrace_print_free_user(struct trace_iterator
*iter
,
275 struct kmemtrace_free_entry
*entry
)
277 struct trace_seq
*s
= &iter
->seq
;
278 struct kmemtrace_user_event
*ev
;
280 ev
= trace_seq_reserve(s
, sizeof(*ev
));
282 return TRACE_TYPE_PARTIAL_LINE
;
284 ev
->event_id
= KMEMTRACE_USER_FREE
;
285 ev
->type_id
= entry
->type_id
;
286 ev
->event_size
= sizeof(*ev
);
288 ev
->timestamp
= iter
->ts
;
289 ev
->call_site
= entry
->call_site
;
290 ev
->ptr
= (unsigned long)entry
->ptr
;
292 return TRACE_TYPE_HANDLED
;
295 /* The two other following provide a more minimalistic output */
296 static enum print_line_t
297 kmemtrace_print_alloc_compress(struct trace_iterator
*iter
,
298 struct kmemtrace_alloc_entry
*entry
)
300 struct trace_seq
*s
= &iter
->seq
;
304 ret
= trace_seq_printf(s
, " + ");
306 return TRACE_TYPE_PARTIAL_LINE
;
309 switch (entry
->type_id
) {
310 case KMEMTRACE_TYPE_KMALLOC
:
311 ret
= trace_seq_printf(s
, "K ");
313 case KMEMTRACE_TYPE_CACHE
:
314 ret
= trace_seq_printf(s
, "C ");
316 case KMEMTRACE_TYPE_PAGES
:
317 ret
= trace_seq_printf(s
, "P ");
320 ret
= trace_seq_printf(s
, "? ");
324 return TRACE_TYPE_PARTIAL_LINE
;
327 ret
= trace_seq_printf(s
, "%4zu ", entry
->bytes_req
);
329 return TRACE_TYPE_PARTIAL_LINE
;
332 ret
= trace_seq_printf(s
, "%4zu ", entry
->bytes_alloc
);
334 return TRACE_TYPE_PARTIAL_LINE
;
337 * TODO: would be better to see the name of the GFP flag names
339 ret
= trace_seq_printf(s
, "%08x ", entry
->gfp_flags
);
341 return TRACE_TYPE_PARTIAL_LINE
;
343 /* Pointer to allocated */
344 ret
= trace_seq_printf(s
, "0x%tx ", (ptrdiff_t)entry
->ptr
);
346 return TRACE_TYPE_PARTIAL_LINE
;
349 ret
= trace_seq_printf(s
, "%4d ", entry
->node
);
351 return TRACE_TYPE_PARTIAL_LINE
;
354 ret
= seq_print_ip_sym(s
, entry
->call_site
, 0);
356 return TRACE_TYPE_PARTIAL_LINE
;
358 if (!trace_seq_printf(s
, "\n"))
359 return TRACE_TYPE_PARTIAL_LINE
;
361 return TRACE_TYPE_HANDLED
;
364 static enum print_line_t
365 kmemtrace_print_free_compress(struct trace_iterator
*iter
,
366 struct kmemtrace_free_entry
*entry
)
368 struct trace_seq
*s
= &iter
->seq
;
372 ret
= trace_seq_printf(s
, " - ");
374 return TRACE_TYPE_PARTIAL_LINE
;
377 switch (entry
->type_id
) {
378 case KMEMTRACE_TYPE_KMALLOC
:
379 ret
= trace_seq_printf(s
, "K ");
381 case KMEMTRACE_TYPE_CACHE
:
382 ret
= trace_seq_printf(s
, "C ");
384 case KMEMTRACE_TYPE_PAGES
:
385 ret
= trace_seq_printf(s
, "P ");
388 ret
= trace_seq_printf(s
, "? ");
392 return TRACE_TYPE_PARTIAL_LINE
;
394 /* Skip requested/allocated/flags */
395 ret
= trace_seq_printf(s
, " ");
397 return TRACE_TYPE_PARTIAL_LINE
;
399 /* Pointer to allocated */
400 ret
= trace_seq_printf(s
, "0x%tx ", (ptrdiff_t)entry
->ptr
);
402 return TRACE_TYPE_PARTIAL_LINE
;
405 ret
= trace_seq_printf(s
, " ");
407 return TRACE_TYPE_PARTIAL_LINE
;
410 ret
= seq_print_ip_sym(s
, entry
->call_site
, 0);
412 return TRACE_TYPE_PARTIAL_LINE
;
414 if (!trace_seq_printf(s
, "\n"))
415 return TRACE_TYPE_PARTIAL_LINE
;
417 return TRACE_TYPE_HANDLED
;
420 static enum print_line_t
kmemtrace_print_line(struct trace_iterator
*iter
)
422 struct trace_entry
*entry
= iter
->ent
;
424 switch (entry
->type
) {
425 case TRACE_KMEM_ALLOC
: {
426 struct kmemtrace_alloc_entry
*field
;
428 trace_assign_type(field
, entry
);
429 if (kmem_tracer_flags
.val
& TRACE_KMEM_OPT_MINIMAL
)
430 return kmemtrace_print_alloc_compress(iter
, field
);
432 return kmemtrace_print_alloc_user(iter
, field
);
435 case TRACE_KMEM_FREE
: {
436 struct kmemtrace_free_entry
*field
;
438 trace_assign_type(field
, entry
);
439 if (kmem_tracer_flags
.val
& TRACE_KMEM_OPT_MINIMAL
)
440 return kmemtrace_print_free_compress(iter
, field
);
442 return kmemtrace_print_free_user(iter
, field
);
446 return TRACE_TYPE_UNHANDLED
;
450 static struct tracer kmem_tracer __read_mostly
= {
452 .init
= kmem_trace_init
,
453 .reset
= kmem_trace_reset
,
454 .print_line
= kmemtrace_print_line
,
455 .print_header
= kmemtrace_headers
,
456 .flags
= &kmem_tracer_flags
459 void kmemtrace_init(void)
461 /* earliest opportunity to start kmem tracing */
464 static int __init
init_kmem_tracer(void)
466 return register_tracer(&kmem_tracer
);
468 device_initcall(init_kmem_tracer
);