Merge remote-tracking branch 'remotes/agraf/tags/signed-s390-for-upstream' into staging
[qemu/ar7.git] / scripts / simpletrace.py
blobd60b3a08f7d4e8adce67dde8c092e5974b9d685f
1 #!/usr/bin/env python
3 # Pretty-printer for simple trace backend binary trace files
5 # Copyright IBM, Corp. 2010
7 # This work is licensed under the terms of the GNU GPL, version 2. See
8 # the COPYING file in the top-level directory.
10 # For help see docs/tracing.txt
12 import struct
13 import re
14 import inspect
15 from tracetool import read_events, Event
16 from tracetool.backend.simple import is_string
18 header_event_id = 0xffffffffffffffff
19 header_magic = 0xf2b177cb0aa429b4
20 dropped_event_id = 0xfffffffffffffffe
22 record_type_mapping = 0
23 record_type_event = 1
25 log_header_fmt = '=QQQ'
26 rec_header_fmt = '=QQII'
28 def read_header(fobj, hfmt):
29 '''Read a trace record header'''
30 hlen = struct.calcsize(hfmt)
31 hdr = fobj.read(hlen)
32 if len(hdr) != hlen:
33 return None
34 return struct.unpack(hfmt, hdr)
36 def get_record(edict, idtoname, rechdr, fobj):
37 """Deserialize a trace record from a file into a tuple
38 (name, timestamp, pid, arg1, ..., arg6)."""
39 if rechdr is None:
40 return None
41 if rechdr[0] != dropped_event_id:
42 event_id = rechdr[0]
43 name = idtoname[event_id]
44 rec = (name, rechdr[1], rechdr[3])
45 event = edict[name]
46 for type, name in event.args:
47 if is_string(type):
48 l = fobj.read(4)
49 (len,) = struct.unpack('=L', l)
50 s = fobj.read(len)
51 rec = rec + (s,)
52 else:
53 (value,) = struct.unpack('=Q', fobj.read(8))
54 rec = rec + (value,)
55 else:
56 rec = ("dropped", rechdr[1], rechdr[3])
57 (value,) = struct.unpack('=Q', fobj.read(8))
58 rec = rec + (value,)
59 return rec
61 def get_mapping(fobj):
62 (event_id, ) = struct.unpack('=Q', fobj.read(8))
63 (len, ) = struct.unpack('=L', fobj.read(4))
64 name = fobj.read(len)
66 return (event_id, name)
68 def read_record(edict, idtoname, fobj):
69 """Deserialize a trace record from a file into a tuple (event_num, timestamp, pid, arg1, ..., arg6)."""
70 rechdr = read_header(fobj, rec_header_fmt)
71 return get_record(edict, idtoname, rechdr, fobj)
73 def read_trace_header(fobj):
74 """Read and verify trace file header"""
75 header = read_header(fobj, log_header_fmt)
76 if header is None:
77 raise ValueError('Not a valid trace file!')
78 if header[0] != header_event_id:
79 raise ValueError('Not a valid trace file, header id %d != %d' %
80 (header[0], header_event_id))
81 if header[1] != header_magic:
82 raise ValueError('Not a valid trace file, header magic %d != %d' %
83 (header[1], header_magic))
85 log_version = header[2]
86 if log_version not in [0, 2, 3, 4]:
87 raise ValueError('Unknown version of tracelog format!')
88 if log_version != 4:
89 raise ValueError('Log format %d not supported with this QEMU release!'
90 % log_version)
92 def read_trace_records(edict, fobj):
93 """Deserialize trace records from a file, yielding record tuples (event_num, timestamp, pid, arg1, ..., arg6)."""
94 idtoname = {
95 dropped_event_id: "dropped"
97 while True:
98 t = fobj.read(8)
99 if len(t) == 0:
100 break
102 (rectype, ) = struct.unpack('=Q', t)
103 if rectype == record_type_mapping:
104 event_id, name = get_mapping(fobj)
105 idtoname[event_id] = name
106 else:
107 rec = read_record(edict, idtoname, fobj)
109 yield rec
111 class Analyzer(object):
112 """A trace file analyzer which processes trace records.
114 An analyzer can be passed to run() or process(). The begin() method is
115 invoked, then each trace record is processed, and finally the end() method
116 is invoked.
118 If a method matching a trace event name exists, it is invoked to process
119 that trace record. Otherwise the catchall() method is invoked.
121 Example:
122 The following method handles the runstate_set(int new_state) trace event::
124 def runstate_set(self, new_state):
127 The method can also take a timestamp argument before the trace event
128 arguments::
130 def runstate_set(self, timestamp, new_state):
133 Timestamps have the uint64_t type and are in nanoseconds.
135 The pid can be included in addition to the timestamp and is useful when
136 dealing with traces from multiple processes::
138 def runstate_set(self, timestamp, pid, new_state):
142 def begin(self):
143 """Called at the start of the trace."""
144 pass
146 def catchall(self, event, rec):
147 """Called if no specific method for processing a trace event has been found."""
148 pass
150 def end(self):
151 """Called at the end of the trace."""
152 pass
154 def process(events, log, analyzer, read_header=True):
155 """Invoke an analyzer on each event in a log."""
156 if isinstance(events, str):
157 events = read_events(open(events, 'r'))
158 if isinstance(log, str):
159 log = open(log, 'rb')
161 if read_header:
162 read_trace_header(log)
164 dropped_event = Event.build("Dropped_Event(uint64_t num_events_dropped)")
165 edict = {"dropped": dropped_event}
167 for event in events:
168 edict[event.name] = event
170 def build_fn(analyzer, event):
171 if isinstance(event, str):
172 return analyzer.catchall
174 fn = getattr(analyzer, event.name, None)
175 if fn is None:
176 return analyzer.catchall
178 event_argcount = len(event.args)
179 fn_argcount = len(inspect.getargspec(fn)[0]) - 1
180 if fn_argcount == event_argcount + 1:
181 # Include timestamp as first argument
182 return lambda _, rec: fn(*((rec[1:2],) + rec[3:3 + event_argcount]))
183 elif fn_argcount == event_argcount + 2:
184 # Include timestamp and pid
185 return lambda _, rec: fn(*rec[1:3 + event_argcount])
186 else:
187 # Just arguments, no timestamp or pid
188 return lambda _, rec: fn(*rec[3:3 + event_argcount])
190 analyzer.begin()
191 fn_cache = {}
192 for rec in read_trace_records(edict, log):
193 event_num = rec[0]
194 event = edict[event_num]
195 if event_num not in fn_cache:
196 fn_cache[event_num] = build_fn(analyzer, event)
197 fn_cache[event_num](event, rec)
198 analyzer.end()
200 def run(analyzer):
201 """Execute an analyzer on a trace file given on the command-line.
203 This function is useful as a driver for simple analysis scripts. More
204 advanced scripts will want to call process() instead."""
205 import sys
207 read_header = True
208 if len(sys.argv) == 4 and sys.argv[1] == '--no-header':
209 read_header = False
210 del sys.argv[1]
211 elif len(sys.argv) != 3:
212 sys.stderr.write('usage: %s [--no-header] <trace-events> ' \
213 '<trace-file>\n' % sys.argv[0])
214 sys.exit(1)
216 events = read_events(open(sys.argv[1], 'r'))
217 process(events, sys.argv[2], analyzer, read_header=read_header)
219 if __name__ == '__main__':
220 class Formatter(Analyzer):
221 def __init__(self):
222 self.last_timestamp = None
224 def catchall(self, event, rec):
225 timestamp = rec[1]
226 if self.last_timestamp is None:
227 self.last_timestamp = timestamp
228 delta_ns = timestamp - self.last_timestamp
229 self.last_timestamp = timestamp
231 fields = [event.name, '%0.3f' % (delta_ns / 1000.0),
232 'pid=%d' % rec[2]]
233 i = 3
234 for type, name in event.args:
235 if is_string(type):
236 fields.append('%s=%s' % (name, rec[i]))
237 else:
238 fields.append('%s=0x%x' % (name, rec[i]))
239 i += 1
240 print ' '.join(fields)
242 run(Formatter())