2 * Memory mapped I/O tracing
4 * Copyright (C) 2008 Pekka Paalanen <pq@iki.fi>
9 #include <linux/kernel.h>
10 #include <linux/mmiotrace.h>
11 #include <linux/pci.h>
12 #include <linux/slab.h>
13 #include <linux/time.h>
15 #include <linux/atomic.h>
18 #include "trace_output.h"
24 static struct trace_array
*mmio_trace_array
;
25 static bool overrun_detected
;
26 static unsigned long prev_overruns
;
27 static atomic_t dropped_count
;
29 static void mmio_reset_data(struct trace_array
*tr
)
31 overrun_detected
= false;
34 tracing_reset_online_cpus(&tr
->trace_buffer
);
37 static int mmio_trace_init(struct trace_array
*tr
)
39 pr_debug("in %s\n", __func__
);
40 mmio_trace_array
= tr
;
47 static void mmio_trace_reset(struct trace_array
*tr
)
49 pr_debug("in %s\n", __func__
);
53 mmio_trace_array
= NULL
;
56 static void mmio_trace_start(struct trace_array
*tr
)
58 pr_debug("in %s\n", __func__
);
62 static int mmio_print_pcidev(struct trace_seq
*s
, const struct pci_dev
*dev
)
66 resource_size_t start
, end
;
67 const struct pci_driver
*drv
= pci_dev_driver(dev
);
69 /* XXX: incomplete checks for trace_seq_printf() return value */
70 ret
+= trace_seq_printf(s
, "PCIDEV %02x%02x %04x%04x %x",
71 dev
->bus
->number
, dev
->devfn
,
72 dev
->vendor
, dev
->device
, dev
->irq
);
74 * XXX: is pci_resource_to_user() appropriate, since we are
75 * supposed to interpret the __ioremap() phys_addr argument based on
76 * these printed values?
78 for (i
= 0; i
< 7; i
++) {
79 pci_resource_to_user(dev
, i
, &dev
->resource
[i
], &start
, &end
);
80 ret
+= trace_seq_printf(s
, " %llx",
81 (unsigned long long)(start
|
82 (dev
->resource
[i
].flags
& PCI_REGION_FLAG_MASK
)));
84 for (i
= 0; i
< 7; i
++) {
85 pci_resource_to_user(dev
, i
, &dev
->resource
[i
], &start
, &end
);
86 ret
+= trace_seq_printf(s
, " %llx",
87 dev
->resource
[i
].start
< dev
->resource
[i
].end
?
88 (unsigned long long)(end
- start
) + 1 : 0);
91 ret
+= trace_seq_printf(s
, " %s\n", drv
->name
);
93 ret
+= trace_seq_puts(s
, " \n");
97 static void destroy_header_iter(struct header_iter
*hiter
)
101 pci_dev_put(hiter
->dev
);
105 static void mmio_pipe_open(struct trace_iterator
*iter
)
107 struct header_iter
*hiter
;
108 struct trace_seq
*s
= &iter
->seq
;
110 trace_seq_puts(s
, "VERSION 20070824\n");
112 hiter
= kzalloc(sizeof(*hiter
), GFP_KERNEL
);
116 hiter
->dev
= pci_get_device(PCI_ANY_ID
, PCI_ANY_ID
, NULL
);
117 iter
->private = hiter
;
120 /* XXX: This is not called when the pipe is closed! */
121 static void mmio_close(struct trace_iterator
*iter
)
123 struct header_iter
*hiter
= iter
->private;
124 destroy_header_iter(hiter
);
125 iter
->private = NULL
;
128 static unsigned long count_overruns(struct trace_iterator
*iter
)
130 unsigned long cnt
= atomic_xchg(&dropped_count
, 0);
131 unsigned long over
= ring_buffer_overruns(iter
->trace_buffer
->buffer
);
133 if (over
> prev_overruns
)
134 cnt
+= over
- prev_overruns
;
135 prev_overruns
= over
;
139 static ssize_t
mmio_read(struct trace_iterator
*iter
, struct file
*filp
,
140 char __user
*ubuf
, size_t cnt
, loff_t
*ppos
)
143 struct header_iter
*hiter
= iter
->private;
144 struct trace_seq
*s
= &iter
->seq
;
147 n
= count_overruns(iter
);
149 /* XXX: This is later than where events were lost. */
150 trace_seq_printf(s
, "MARK 0.000000 Lost %lu events.\n", n
);
151 if (!overrun_detected
)
152 pr_warning("mmiotrace has lost events.\n");
153 overrun_detected
= true;
160 mmio_print_pcidev(s
, hiter
->dev
);
161 hiter
->dev
= pci_get_device(PCI_ANY_ID
, PCI_ANY_ID
, hiter
->dev
);
164 destroy_header_iter(hiter
);
165 iter
->private = NULL
;
169 ret
= trace_seq_to_user(s
, ubuf
, cnt
);
170 return (ret
== -EBUSY
) ? 0 : ret
;
173 static enum print_line_t
mmio_print_rw(struct trace_iterator
*iter
)
175 struct trace_entry
*entry
= iter
->ent
;
176 struct trace_mmiotrace_rw
*field
;
177 struct mmiotrace_rw
*rw
;
178 struct trace_seq
*s
= &iter
->seq
;
179 unsigned long long t
= ns2usecs(iter
->ts
);
180 unsigned long usec_rem
= do_div(t
, USEC_PER_SEC
);
181 unsigned secs
= (unsigned long)t
;
184 trace_assign_type(field
, entry
);
187 switch (rw
->opcode
) {
189 ret
= trace_seq_printf(s
,
190 "R %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n",
191 rw
->width
, secs
, usec_rem
, rw
->map_id
,
192 (unsigned long long)rw
->phys
,
193 rw
->value
, rw
->pc
, 0);
196 ret
= trace_seq_printf(s
,
197 "W %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n",
198 rw
->width
, secs
, usec_rem
, rw
->map_id
,
199 (unsigned long long)rw
->phys
,
200 rw
->value
, rw
->pc
, 0);
202 case MMIO_UNKNOWN_OP
:
203 ret
= trace_seq_printf(s
,
204 "UNKNOWN %u.%06lu %d 0x%llx %02lx,%02lx,"
206 secs
, usec_rem
, rw
->map_id
,
207 (unsigned long long)rw
->phys
,
208 (rw
->value
>> 16) & 0xff, (rw
->value
>> 8) & 0xff,
209 (rw
->value
>> 0) & 0xff, rw
->pc
, 0);
212 ret
= trace_seq_puts(s
, "rw what?\n");
216 return TRACE_TYPE_HANDLED
;
217 return TRACE_TYPE_PARTIAL_LINE
;
220 static enum print_line_t
mmio_print_map(struct trace_iterator
*iter
)
222 struct trace_entry
*entry
= iter
->ent
;
223 struct trace_mmiotrace_map
*field
;
224 struct mmiotrace_map
*m
;
225 struct trace_seq
*s
= &iter
->seq
;
226 unsigned long long t
= ns2usecs(iter
->ts
);
227 unsigned long usec_rem
= do_div(t
, USEC_PER_SEC
);
228 unsigned secs
= (unsigned long)t
;
231 trace_assign_type(field
, entry
);
236 ret
= trace_seq_printf(s
,
237 "MAP %u.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n",
238 secs
, usec_rem
, m
->map_id
,
239 (unsigned long long)m
->phys
, m
->virt
, m
->len
,
243 ret
= trace_seq_printf(s
,
244 "UNMAP %u.%06lu %d 0x%lx %d\n",
245 secs
, usec_rem
, m
->map_id
, 0UL, 0);
248 ret
= trace_seq_puts(s
, "map what?\n");
252 return TRACE_TYPE_HANDLED
;
253 return TRACE_TYPE_PARTIAL_LINE
;
256 static enum print_line_t
mmio_print_mark(struct trace_iterator
*iter
)
258 struct trace_entry
*entry
= iter
->ent
;
259 struct print_entry
*print
= (struct print_entry
*)entry
;
260 const char *msg
= print
->buf
;
261 struct trace_seq
*s
= &iter
->seq
;
262 unsigned long long t
= ns2usecs(iter
->ts
);
263 unsigned long usec_rem
= do_div(t
, USEC_PER_SEC
);
264 unsigned secs
= (unsigned long)t
;
267 /* The trailing newline must be in the message. */
268 ret
= trace_seq_printf(s
, "MARK %u.%06lu %s", secs
, usec_rem
, msg
);
270 return TRACE_TYPE_PARTIAL_LINE
;
272 return TRACE_TYPE_HANDLED
;
275 static enum print_line_t
mmio_print_line(struct trace_iterator
*iter
)
277 switch (iter
->ent
->type
) {
279 return mmio_print_rw(iter
);
281 return mmio_print_map(iter
);
283 return mmio_print_mark(iter
);
285 return TRACE_TYPE_HANDLED
; /* ignore unknown entries */
289 static struct tracer mmio_tracer __read_mostly
=
292 .init
= mmio_trace_init
,
293 .reset
= mmio_trace_reset
,
294 .start
= mmio_trace_start
,
295 .pipe_open
= mmio_pipe_open
,
298 .print_line
= mmio_print_line
,
301 __init
static int init_mmio_trace(void)
303 return register_tracer(&mmio_tracer
);
305 device_initcall(init_mmio_trace
);
307 static void __trace_mmiotrace_rw(struct trace_array
*tr
,
308 struct trace_array_cpu
*data
,
309 struct mmiotrace_rw
*rw
)
311 struct ftrace_event_call
*call
= &event_mmiotrace_rw
;
312 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
313 struct ring_buffer_event
*event
;
314 struct trace_mmiotrace_rw
*entry
;
315 int pc
= preempt_count();
317 event
= trace_buffer_lock_reserve(buffer
, TRACE_MMIO_RW
,
318 sizeof(*entry
), 0, pc
);
320 atomic_inc(&dropped_count
);
323 entry
= ring_buffer_event_data(event
);
326 if (!filter_check_discard(call
, entry
, buffer
, event
))
327 trace_buffer_unlock_commit(buffer
, event
, 0, pc
);
330 void mmio_trace_rw(struct mmiotrace_rw
*rw
)
332 struct trace_array
*tr
= mmio_trace_array
;
333 struct trace_array_cpu
*data
= per_cpu_ptr(tr
->trace_buffer
.data
, smp_processor_id());
334 __trace_mmiotrace_rw(tr
, data
, rw
);
337 static void __trace_mmiotrace_map(struct trace_array
*tr
,
338 struct trace_array_cpu
*data
,
339 struct mmiotrace_map
*map
)
341 struct ftrace_event_call
*call
= &event_mmiotrace_map
;
342 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
343 struct ring_buffer_event
*event
;
344 struct trace_mmiotrace_map
*entry
;
345 int pc
= preempt_count();
347 event
= trace_buffer_lock_reserve(buffer
, TRACE_MMIO_MAP
,
348 sizeof(*entry
), 0, pc
);
350 atomic_inc(&dropped_count
);
353 entry
= ring_buffer_event_data(event
);
356 if (!filter_check_discard(call
, entry
, buffer
, event
))
357 trace_buffer_unlock_commit(buffer
, event
, 0, pc
);
360 void mmio_trace_mapping(struct mmiotrace_map
*map
)
362 struct trace_array
*tr
= mmio_trace_array
;
363 struct trace_array_cpu
*data
;
366 data
= per_cpu_ptr(tr
->trace_buffer
.data
, smp_processor_id());
367 __trace_mmiotrace_map(tr
, data
, map
);
371 int mmio_trace_printk(const char *fmt
, va_list args
)
373 return trace_vprintk(0, fmt
, args
);