Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-2.6-20160225' into staging
[qemu.git] / scripts / simpletrace.py
blob3916c6d14ae80532cbc5e67d5cc29e9d115fa7c5
1 #!/usr/bin/env python
3 # Pretty-printer for simple trace backend binary trace files
5 # Copyright IBM, Corp. 2010
7 # This work is licensed under the terms of the GNU GPL, version 2. See
8 # the COPYING file in the top-level directory.
10 # For help see docs/tracing.txt
12 import struct
13 import re
14 import inspect
15 from tracetool import _read_events, Event
16 from tracetool.backend.simple import is_string
18 header_event_id = 0xffffffffffffffff
19 header_magic = 0xf2b177cb0aa429b4
20 dropped_event_id = 0xfffffffffffffffe
22 log_header_fmt = '=QQQ'
23 rec_header_fmt = '=QQII'
25 def read_header(fobj, hfmt):
26 '''Read a trace record header'''
27 hlen = struct.calcsize(hfmt)
28 hdr = fobj.read(hlen)
29 if len(hdr) != hlen:
30 return None
31 return struct.unpack(hfmt, hdr)
33 def get_record(edict, rechdr, fobj):
34 """Deserialize a trace record from a file into a tuple (event_num, timestamp, pid, arg1, ..., arg6)."""
35 if rechdr is None:
36 return None
37 rec = (rechdr[0], rechdr[1], rechdr[3])
38 if rechdr[0] != dropped_event_id:
39 event_id = rechdr[0]
40 event = edict[event_id]
41 for type, name in event.args:
42 if is_string(type):
43 l = fobj.read(4)
44 (len,) = struct.unpack('=L', l)
45 s = fobj.read(len)
46 rec = rec + (s,)
47 else:
48 (value,) = struct.unpack('=Q', fobj.read(8))
49 rec = rec + (value,)
50 else:
51 (value,) = struct.unpack('=Q', fobj.read(8))
52 rec = rec + (value,)
53 return rec
56 def read_record(edict, fobj):
57 """Deserialize a trace record from a file into a tuple (event_num, timestamp, pid, arg1, ..., arg6)."""
58 rechdr = read_header(fobj, rec_header_fmt)
59 return get_record(edict, rechdr, fobj) # return tuple of record elements
61 def read_trace_header(fobj):
62 """Read and verify trace file header"""
63 header = read_header(fobj, log_header_fmt)
64 if header is None or \
65 header[0] != header_event_id or \
66 header[1] != header_magic:
67 raise ValueError('Not a valid trace file!')
69 log_version = header[2]
70 if log_version not in [0, 2, 3]:
71 raise ValueError('Unknown version of tracelog format!')
72 if log_version != 3:
73 raise ValueError('Log format %d not supported with this QEMU release!'
74 % log_version)
76 def read_trace_records(edict, fobj):
77 """Deserialize trace records from a file, yielding record tuples (event_num, timestamp, pid, arg1, ..., arg6)."""
78 while True:
79 rec = read_record(edict, fobj)
80 if rec is None:
81 break
83 yield rec
85 class Analyzer(object):
86 """A trace file analyzer which processes trace records.
88 An analyzer can be passed to run() or process(). The begin() method is
89 invoked, then each trace record is processed, and finally the end() method
90 is invoked.
92 If a method matching a trace event name exists, it is invoked to process
93 that trace record. Otherwise the catchall() method is invoked."""
95 def begin(self):
96 """Called at the start of the trace."""
97 pass
99 def catchall(self, event, rec):
100 """Called if no specific method for processing a trace event has been found."""
101 pass
103 def end(self):
104 """Called at the end of the trace."""
105 pass
107 def process(events, log, analyzer, read_header=True):
108 """Invoke an analyzer on each event in a log."""
109 if isinstance(events, str):
110 events = _read_events(open(events, 'r'))
111 if isinstance(log, str):
112 log = open(log, 'rb')
114 if read_header:
115 read_trace_header(log)
117 dropped_event = Event.build("Dropped_Event(uint64_t num_events_dropped)")
118 edict = {dropped_event_id: dropped_event}
120 for num, event in enumerate(events):
121 edict[num] = event
123 def build_fn(analyzer, event):
124 if isinstance(event, str):
125 return analyzer.catchall
127 fn = getattr(analyzer, event.name, None)
128 if fn is None:
129 return analyzer.catchall
131 event_argcount = len(event.args)
132 fn_argcount = len(inspect.getargspec(fn)[0]) - 1
133 if fn_argcount == event_argcount + 1:
134 # Include timestamp as first argument
135 return lambda _, rec: fn(*((rec[1:2],) + rec[3:3 + event_argcount]))
136 elif fn_argcount == event_argcount + 2:
137 # Include timestamp and pid
138 return lambda _, rec: fn(*rec[1:3 + event_argcount])
139 else:
140 # Just arguments, no timestamp or pid
141 return lambda _, rec: fn(*rec[3:3 + event_argcount])
143 analyzer.begin()
144 fn_cache = {}
145 for rec in read_trace_records(edict, log):
146 event_num = rec[0]
147 event = edict[event_num]
148 if event_num not in fn_cache:
149 fn_cache[event_num] = build_fn(analyzer, event)
150 fn_cache[event_num](event, rec)
151 analyzer.end()
153 def run(analyzer):
154 """Execute an analyzer on a trace file given on the command-line.
156 This function is useful as a driver for simple analysis scripts. More
157 advanced scripts will want to call process() instead."""
158 import sys
160 read_header = True
161 if len(sys.argv) == 4 and sys.argv[1] == '--no-header':
162 read_header = False
163 del sys.argv[1]
164 elif len(sys.argv) != 3:
165 sys.stderr.write('usage: %s [--no-header] <trace-events> ' \
166 '<trace-file>\n' % sys.argv[0])
167 sys.exit(1)
169 events = _read_events(open(sys.argv[1], 'r'))
170 process(events, sys.argv[2], analyzer, read_header=read_header)
172 if __name__ == '__main__':
173 class Formatter(Analyzer):
174 def __init__(self):
175 self.last_timestamp = None
177 def catchall(self, event, rec):
178 timestamp = rec[1]
179 if self.last_timestamp is None:
180 self.last_timestamp = timestamp
181 delta_ns = timestamp - self.last_timestamp
182 self.last_timestamp = timestamp
184 fields = [event.name, '%0.3f' % (delta_ns / 1000.0),
185 'pid=%d' % rec[2]]
186 i = 3
187 for type, name in event.args:
188 if is_string(type):
189 fields.append('%s=%s' % (name, rec[i]))
190 else:
191 fields.append('%s=0x%x' % (name, rec[i]))
192 i += 1
193 print ' '.join(fields)
195 run(Formatter())