block: add qemu-iotest for resize base during live commit
[qemu/ar7.git] / scripts / simpletrace.py
blob1aa9460e49aee5158a0cc4b43c6892e7cb8a41ff
1 #!/usr/bin/env python
3 # Pretty-printer for simple trace backend binary trace files
5 # Copyright IBM, Corp. 2010
7 # This work is licensed under the terms of the GNU GPL, version 2. See
8 # the COPYING file in the top-level directory.
10 # For help see docs/tracing.txt
12 import struct
13 import re
14 import inspect
15 from tracetool import _read_events, Event
16 from tracetool.backend.simple import is_string
18 header_event_id = 0xffffffffffffffff
19 header_magic = 0xf2b177cb0aa429b4
20 dropped_event_id = 0xfffffffffffffffe
22 log_header_fmt = '=QQQ'
23 rec_header_fmt = '=QQII'
25 def read_header(fobj, hfmt):
26 '''Read a trace record header'''
27 hlen = struct.calcsize(hfmt)
28 hdr = fobj.read(hlen)
29 if len(hdr) != hlen:
30 return None
31 return struct.unpack(hfmt, hdr)
33 def get_record(edict, rechdr, fobj):
34 """Deserialize a trace record from a file into a tuple (event_num, timestamp, pid, arg1, ..., arg6)."""
35 if rechdr is None:
36 return None
37 rec = (rechdr[0], rechdr[1], rechdr[3])
38 if rechdr[0] != dropped_event_id:
39 event_id = rechdr[0]
40 event = edict[event_id]
41 for type, name in event.args:
42 if is_string(type):
43 l = fobj.read(4)
44 (len,) = struct.unpack('=L', l)
45 s = fobj.read(len)
46 rec = rec + (s,)
47 else:
48 (value,) = struct.unpack('=Q', fobj.read(8))
49 rec = rec + (value,)
50 else:
51 (value,) = struct.unpack('=Q', fobj.read(8))
52 rec = rec + (value,)
53 return rec
56 def read_record(edict, fobj):
57 """Deserialize a trace record from a file into a tuple (event_num, timestamp, pid, arg1, ..., arg6)."""
58 rechdr = read_header(fobj, rec_header_fmt)
59 return get_record(edict, rechdr, fobj) # return tuple of record elements
61 def read_trace_file(edict, fobj):
62 """Deserialize trace records from a file, yielding record tuples (event_num, timestamp, pid, arg1, ..., arg6)."""
63 header = read_header(fobj, log_header_fmt)
64 if header is None or \
65 header[0] != header_event_id or \
66 header[1] != header_magic:
67 raise ValueError('Not a valid trace file!')
69 log_version = header[2]
70 if log_version not in [0, 2, 3]:
71 raise ValueError('Unknown version of tracelog format!')
72 if log_version != 3:
73 raise ValueError('Log format %d not supported with this QEMU release!'
74 % log_version)
76 while True:
77 rec = read_record(edict, fobj)
78 if rec is None:
79 break
81 yield rec
83 class Analyzer(object):
84 """A trace file analyzer which processes trace records.
86 An analyzer can be passed to run() or process(). The begin() method is
87 invoked, then each trace record is processed, and finally the end() method
88 is invoked.
90 If a method matching a trace event name exists, it is invoked to process
91 that trace record. Otherwise the catchall() method is invoked."""
93 def begin(self):
94 """Called at the start of the trace."""
95 pass
97 def catchall(self, event, rec):
98 """Called if no specific method for processing a trace event has been found."""
99 pass
101 def end(self):
102 """Called at the end of the trace."""
103 pass
105 def process(events, log, analyzer):
106 """Invoke an analyzer on each event in a log."""
107 if isinstance(events, str):
108 events = _read_events(open(events, 'r'))
109 if isinstance(log, str):
110 log = open(log, 'rb')
112 dropped_event = Event.build("Dropped_Event(uint64_t num_events_dropped)")
113 edict = {dropped_event_id: dropped_event}
115 for num, event in enumerate(events):
116 edict[num] = event
118 def build_fn(analyzer, event):
119 if isinstance(event, str):
120 return analyzer.catchall
122 fn = getattr(analyzer, event.name, None)
123 if fn is None:
124 return analyzer.catchall
126 event_argcount = len(event.args)
127 fn_argcount = len(inspect.getargspec(fn)[0]) - 1
128 if fn_argcount == event_argcount + 1:
129 # Include timestamp as first argument
130 return lambda _, rec: fn(*((rec[1:2],) + rec[3:3 + event_argcount]))
131 elif fn_argcount == event_argcount + 2:
132 # Include timestamp and pid
133 return lambda _, rec: fn(*rec[1:3 + event_argcount])
134 else:
135 # Just arguments, no timestamp or pid
136 return lambda _, rec: fn(*rec[3:3 + event_argcount])
138 analyzer.begin()
139 fn_cache = {}
140 for rec in read_trace_file(edict, log):
141 event_num = rec[0]
142 event = edict[event_num]
143 if event_num not in fn_cache:
144 fn_cache[event_num] = build_fn(analyzer, event)
145 fn_cache[event_num](event, rec)
146 analyzer.end()
148 def run(analyzer):
149 """Execute an analyzer on a trace file given on the command-line.
151 This function is useful as a driver for simple analysis scripts. More
152 advanced scripts will want to call process() instead."""
153 import sys
155 if len(sys.argv) != 3:
156 sys.stderr.write('usage: %s <trace-events> <trace-file>\n' % sys.argv[0])
157 sys.exit(1)
159 events = _read_events(open(sys.argv[1], 'r'))
160 process(events, sys.argv[2], analyzer)
162 if __name__ == '__main__':
163 class Formatter(Analyzer):
164 def __init__(self):
165 self.last_timestamp = None
167 def catchall(self, event, rec):
168 timestamp = rec[1]
169 if self.last_timestamp is None:
170 self.last_timestamp = timestamp
171 delta_ns = timestamp - self.last_timestamp
172 self.last_timestamp = timestamp
174 fields = [event.name, '%0.3f' % (delta_ns / 1000.0),
175 'pid=%d' % rec[2]]
176 i = 3
177 for type, name in event.args:
178 if is_string(type):
179 fields.append('%s=%s' % (name, rec[i]))
180 else:
181 fields.append('%s=0x%x' % (name, rec[i]))
182 i += 1
183 print ' '.join(fields)
185 run(Formatter())