2 * Memory allocator tracing
4 * Copyright (C) 2008 Eduard - Gabriel Munteanu
5 * Copyright (C) 2008 Pekka Enberg <penberg@cs.helsinki.fi>
6 * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
9 #include <linux/dcache.h>
10 #include <linux/debugfs.h>
12 #include <linux/seq_file.h>
13 #include <trace/kmemtrace.h>
16 #include "trace_output.h"
18 /* Select an alternative, minimalistic output than the original one */
19 #define TRACE_KMEM_OPT_MINIMAL 0x1
21 static struct tracer_opt kmem_opts
[] = {
22 /* Default disable the minimalistic output */
23 { TRACER_OPT(kmem_minimalistic
, TRACE_KMEM_OPT_MINIMAL
) },
27 static struct tracer_flags kmem_tracer_flags
= {
33 static bool kmem_tracing_enabled __read_mostly
;
34 static struct trace_array
*kmemtrace_array
;
36 static int kmem_trace_init(struct trace_array
*tr
)
41 for_each_cpu_mask(cpu
, cpu_possible_map
)
42 tracing_reset(tr
, cpu
);
44 kmem_tracing_enabled
= true;
49 static void kmem_trace_reset(struct trace_array
*tr
)
51 kmem_tracing_enabled
= false;
54 static void kmemtrace_headers(struct seq_file
*s
)
56 /* Don't need headers for the original kmemtrace output */
57 if (!(kmem_tracer_flags
.val
& TRACE_KMEM_OPT_MINIMAL
))
61 seq_printf(s
, "# ALLOC TYPE REQ GIVEN FLAGS "
62 " POINTER NODE CALLER\n");
63 seq_printf(s
, "# FREE | | | | "
65 seq_printf(s
, "# |\n\n");
69 * The two following functions give the original output from kmemtrace,
70 * or something close to....perhaps they need some missing things
72 static enum print_line_t
73 kmemtrace_print_alloc_original(struct trace_iterator
*iter
,
74 struct kmemtrace_alloc_entry
*entry
)
76 struct trace_seq
*s
= &iter
->seq
;
79 /* Taken from the old linux/kmemtrace.h */
80 ret
= trace_seq_printf(s
, "type_id %d call_site %lu ptr %lu "
81 "bytes_req %lu bytes_alloc %lu gfp_flags %lu node %d\n",
82 entry
->type_id
, entry
->call_site
, (unsigned long) entry
->ptr
,
83 (unsigned long) entry
->bytes_req
, (unsigned long) entry
->bytes_alloc
,
84 (unsigned long) entry
->gfp_flags
, entry
->node
);
87 return TRACE_TYPE_PARTIAL_LINE
;
89 return TRACE_TYPE_HANDLED
;
92 static enum print_line_t
93 kmemtrace_print_free_original(struct trace_iterator
*iter
,
94 struct kmemtrace_free_entry
*entry
)
96 struct trace_seq
*s
= &iter
->seq
;
99 /* Taken from the old linux/kmemtrace.h */
100 ret
= trace_seq_printf(s
, "type_id %d call_site %lu ptr %lu\n",
101 entry
->type_id
, entry
->call_site
, (unsigned long) entry
->ptr
);
104 return TRACE_TYPE_PARTIAL_LINE
;
106 return TRACE_TYPE_HANDLED
;
110 /* The two other following provide a more minimalistic output */
111 static enum print_line_t
112 kmemtrace_print_alloc_compress(struct trace_iterator
*iter
,
113 struct kmemtrace_alloc_entry
*entry
)
115 struct trace_seq
*s
= &iter
->seq
;
119 ret
= trace_seq_printf(s
, " + ");
121 return TRACE_TYPE_PARTIAL_LINE
;
124 switch (entry
->type_id
) {
125 case KMEMTRACE_TYPE_KMALLOC
:
126 ret
= trace_seq_printf(s
, "K ");
128 case KMEMTRACE_TYPE_CACHE
:
129 ret
= trace_seq_printf(s
, "C ");
131 case KMEMTRACE_TYPE_PAGES
:
132 ret
= trace_seq_printf(s
, "P ");
135 ret
= trace_seq_printf(s
, "? ");
139 return TRACE_TYPE_PARTIAL_LINE
;
142 ret
= trace_seq_printf(s
, "%4zu ", entry
->bytes_req
);
144 return TRACE_TYPE_PARTIAL_LINE
;
147 ret
= trace_seq_printf(s
, "%4zu ", entry
->bytes_alloc
);
149 return TRACE_TYPE_PARTIAL_LINE
;
152 * TODO: would be better to see the name of the GFP flag names
154 ret
= trace_seq_printf(s
, "%08x ", entry
->gfp_flags
);
156 return TRACE_TYPE_PARTIAL_LINE
;
158 /* Pointer to allocated */
159 ret
= trace_seq_printf(s
, "0x%tx ", (ptrdiff_t)entry
->ptr
);
161 return TRACE_TYPE_PARTIAL_LINE
;
164 ret
= trace_seq_printf(s
, "%4d ", entry
->node
);
166 return TRACE_TYPE_PARTIAL_LINE
;
169 ret
= seq_print_ip_sym(s
, entry
->call_site
, 0);
171 return TRACE_TYPE_PARTIAL_LINE
;
173 if (!trace_seq_printf(s
, "\n"))
174 return TRACE_TYPE_PARTIAL_LINE
;
176 return TRACE_TYPE_HANDLED
;
179 static enum print_line_t
180 kmemtrace_print_free_compress(struct trace_iterator
*iter
,
181 struct kmemtrace_free_entry
*entry
)
183 struct trace_seq
*s
= &iter
->seq
;
187 ret
= trace_seq_printf(s
, " - ");
189 return TRACE_TYPE_PARTIAL_LINE
;
192 switch (entry
->type_id
) {
193 case KMEMTRACE_TYPE_KMALLOC
:
194 ret
= trace_seq_printf(s
, "K ");
196 case KMEMTRACE_TYPE_CACHE
:
197 ret
= trace_seq_printf(s
, "C ");
199 case KMEMTRACE_TYPE_PAGES
:
200 ret
= trace_seq_printf(s
, "P ");
203 ret
= trace_seq_printf(s
, "? ");
207 return TRACE_TYPE_PARTIAL_LINE
;
209 /* Skip requested/allocated/flags */
210 ret
= trace_seq_printf(s
, " ");
212 return TRACE_TYPE_PARTIAL_LINE
;
214 /* Pointer to allocated */
215 ret
= trace_seq_printf(s
, "0x%tx ", (ptrdiff_t)entry
->ptr
);
217 return TRACE_TYPE_PARTIAL_LINE
;
220 ret
= trace_seq_printf(s
, " ");
222 return TRACE_TYPE_PARTIAL_LINE
;
225 ret
= seq_print_ip_sym(s
, entry
->call_site
, 0);
227 return TRACE_TYPE_PARTIAL_LINE
;
229 if (!trace_seq_printf(s
, "\n"))
230 return TRACE_TYPE_PARTIAL_LINE
;
232 return TRACE_TYPE_HANDLED
;
235 static enum print_line_t
kmemtrace_print_line(struct trace_iterator
*iter
)
237 struct trace_entry
*entry
= iter
->ent
;
239 switch (entry
->type
) {
240 case TRACE_KMEM_ALLOC
: {
241 struct kmemtrace_alloc_entry
*field
;
242 trace_assign_type(field
, entry
);
243 if (kmem_tracer_flags
.val
& TRACE_KMEM_OPT_MINIMAL
)
244 return kmemtrace_print_alloc_compress(iter
, field
);
246 return kmemtrace_print_alloc_original(iter
, field
);
249 case TRACE_KMEM_FREE
: {
250 struct kmemtrace_free_entry
*field
;
251 trace_assign_type(field
, entry
);
252 if (kmem_tracer_flags
.val
& TRACE_KMEM_OPT_MINIMAL
)
253 return kmemtrace_print_free_compress(iter
, field
);
255 return kmemtrace_print_free_original(iter
, field
);
259 return TRACE_TYPE_UNHANDLED
;
263 /* Trace allocations */
264 void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id
,
265 unsigned long call_site
,
272 struct ring_buffer_event
*event
;
273 struct kmemtrace_alloc_entry
*entry
;
274 struct trace_array
*tr
= kmemtrace_array
;
276 if (!kmem_tracing_enabled
)
279 event
= trace_buffer_lock_reserve(tr
, TRACE_KMEM_ALLOC
,
280 sizeof(*entry
), 0, 0);
283 entry
= ring_buffer_event_data(event
);
285 entry
->call_site
= call_site
;
287 entry
->bytes_req
= bytes_req
;
288 entry
->bytes_alloc
= bytes_alloc
;
289 entry
->gfp_flags
= gfp_flags
;
292 trace_buffer_unlock_commit(tr
, event
, 0, 0);
294 EXPORT_SYMBOL(kmemtrace_mark_alloc_node
);
296 void kmemtrace_mark_free(enum kmemtrace_type_id type_id
,
297 unsigned long call_site
,
300 struct ring_buffer_event
*event
;
301 struct kmemtrace_free_entry
*entry
;
302 struct trace_array
*tr
= kmemtrace_array
;
304 if (!kmem_tracing_enabled
)
307 event
= trace_buffer_lock_reserve(tr
, TRACE_KMEM_FREE
,
308 sizeof(*entry
), 0, 0);
311 entry
= ring_buffer_event_data(event
);
312 entry
->type_id
= type_id
;
313 entry
->call_site
= call_site
;
316 trace_buffer_unlock_commit(tr
, event
, 0, 0);
318 EXPORT_SYMBOL(kmemtrace_mark_free
);
320 static struct tracer kmem_tracer __read_mostly
= {
322 .init
= kmem_trace_init
,
323 .reset
= kmem_trace_reset
,
324 .print_line
= kmemtrace_print_line
,
325 .print_header
= kmemtrace_headers
,
326 .flags
= &kmem_tracer_flags
329 void kmemtrace_init(void)
331 /* earliest opportunity to start kmem tracing */
334 static int __init
init_kmem_tracer(void)
336 return register_tracer(&kmem_tracer
);
339 device_initcall(init_kmem_tracer
);