3 # Pretty-printer for simple trace backend binary trace files
5 # Copyright IBM, Corp. 2010
7 # This work is licensed under the terms of the GNU GPL, version 2. See
8 # the COPYING file in the top-level directory.
10 # For help see docs/tracing.txt
15 from tracetool
import read_events
, Event
16 from tracetool
.backend
.simple
import is_string
18 header_event_id
= 0xffffffffffffffff
19 header_magic
= 0xf2b177cb0aa429b4
20 dropped_event_id
= 0xfffffffffffffffe
22 record_type_mapping
= 0
25 log_header_fmt
= '=QQQ'
26 rec_header_fmt
= '=QQII'
28 def read_header(fobj
, hfmt
):
29 '''Read a trace record header'''
30 hlen
= struct
.calcsize(hfmt
)
34 return struct
.unpack(hfmt
, hdr
)
36 def get_record(edict
, idtoname
, rechdr
, fobj
):
37 """Deserialize a trace record from a file into a tuple
38 (name, timestamp, pid, arg1, ..., arg6)."""
41 if rechdr
[0] != dropped_event_id
:
43 name
= idtoname
[event_id
]
44 rec
= (name
, rechdr
[1], rechdr
[3])
49 sys
.stderr
.write('%s event is logged but is not declared ' \
50 'in the trace events file, try using ' \
51 'trace-events-all instead.\n' % str(e
))
54 for type, name
in event
.args
:
57 (len,) = struct
.unpack('=L', l
)
61 (value
,) = struct
.unpack('=Q', fobj
.read(8))
64 rec
= ("dropped", rechdr
[1], rechdr
[3])
65 (value
,) = struct
.unpack('=Q', fobj
.read(8))
69 def get_mapping(fobj
):
70 (event_id
, ) = struct
.unpack('=Q', fobj
.read(8))
71 (len, ) = struct
.unpack('=L', fobj
.read(4))
74 return (event_id
, name
)
76 def read_record(edict
, idtoname
, fobj
):
77 """Deserialize a trace record from a file into a tuple (event_num, timestamp, pid, arg1, ..., arg6)."""
78 rechdr
= read_header(fobj
, rec_header_fmt
)
79 return get_record(edict
, idtoname
, rechdr
, fobj
)
81 def read_trace_header(fobj
):
82 """Read and verify trace file header"""
83 header
= read_header(fobj
, log_header_fmt
)
85 raise ValueError('Not a valid trace file!')
86 if header
[0] != header_event_id
:
87 raise ValueError('Not a valid trace file, header id %d != %d' %
88 (header
[0], header_event_id
))
89 if header
[1] != header_magic
:
90 raise ValueError('Not a valid trace file, header magic %d != %d' %
91 (header
[1], header_magic
))
93 log_version
= header
[2]
94 if log_version
not in [0, 2, 3, 4]:
95 raise ValueError('Unknown version of tracelog format!')
97 raise ValueError('Log format %d not supported with this QEMU release!'
100 def read_trace_records(edict
, fobj
):
101 """Deserialize trace records from a file, yielding record tuples (event_num, timestamp, pid, arg1, ..., arg6)."""
103 dropped_event_id
: "dropped"
110 (rectype
, ) = struct
.unpack('=Q', t
)
111 if rectype
== record_type_mapping
:
112 event_id
, name
= get_mapping(fobj
)
113 idtoname
[event_id
] = name
115 rec
= read_record(edict
, idtoname
, fobj
)
119 class Analyzer(object):
120 """A trace file analyzer which processes trace records.
122 An analyzer can be passed to run() or process(). The begin() method is
123 invoked, then each trace record is processed, and finally the end() method
126 If a method matching a trace event name exists, it is invoked to process
127 that trace record. Otherwise the catchall() method is invoked.
130 The following method handles the runstate_set(int new_state) trace event::
132 def runstate_set(self, new_state):
135 The method can also take a timestamp argument before the trace event
138 def runstate_set(self, timestamp, new_state):
141 Timestamps have the uint64_t type and are in nanoseconds.
143 The pid can be included in addition to the timestamp and is useful when
144 dealing with traces from multiple processes::
146 def runstate_set(self, timestamp, pid, new_state):
151 """Called at the start of the trace."""
154 def catchall(self
, event
, rec
):
155 """Called if no specific method for processing a trace event has been found."""
159 """Called at the end of the trace."""
162 def process(events
, log
, analyzer
, read_header
=True):
163 """Invoke an analyzer on each event in a log."""
164 if isinstance(events
, str):
165 events
= read_events(open(events
, 'r'))
166 if isinstance(log
, str):
167 log
= open(log
, 'rb')
170 read_trace_header(log
)
172 dropped_event
= Event
.build("Dropped_Event(uint64_t num_events_dropped)")
173 edict
= {"dropped": dropped_event
}
176 edict
[event
.name
] = event
178 def build_fn(analyzer
, event
):
179 if isinstance(event
, str):
180 return analyzer
.catchall
182 fn
= getattr(analyzer
, event
.name
, None)
184 return analyzer
.catchall
186 event_argcount
= len(event
.args
)
187 fn_argcount
= len(inspect
.getargspec(fn
)[0]) - 1
188 if fn_argcount
== event_argcount
+ 1:
189 # Include timestamp as first argument
190 return lambda _
, rec
: fn(*((rec
[1:2],) + rec
[3:3 + event_argcount
]))
191 elif fn_argcount
== event_argcount
+ 2:
192 # Include timestamp and pid
193 return lambda _
, rec
: fn(*rec
[1:3 + event_argcount
])
195 # Just arguments, no timestamp or pid
196 return lambda _
, rec
: fn(*rec
[3:3 + event_argcount
])
200 for rec
in read_trace_records(edict
, log
):
202 event
= edict
[event_num
]
203 if event_num
not in fn_cache
:
204 fn_cache
[event_num
] = build_fn(analyzer
, event
)
205 fn_cache
[event_num
](event
, rec
)
209 """Execute an analyzer on a trace file given on the command-line.
211 This function is useful as a driver for simple analysis scripts. More
212 advanced scripts will want to call process() instead."""
216 if len(sys
.argv
) == 4 and sys
.argv
[1] == '--no-header':
219 elif len(sys
.argv
) != 3:
220 sys
.stderr
.write('usage: %s [--no-header] <trace-events> ' \
221 '<trace-file>\n' % sys
.argv
[0])
224 events
= read_events(open(sys
.argv
[1], 'r'))
225 process(events
, sys
.argv
[2], analyzer
, read_header
=read_header
)
227 if __name__
== '__main__':
228 class Formatter(Analyzer
):
230 self
.last_timestamp
= None
232 def catchall(self
, event
, rec
):
234 if self
.last_timestamp
is None:
235 self
.last_timestamp
= timestamp
236 delta_ns
= timestamp
- self
.last_timestamp
237 self
.last_timestamp
= timestamp
239 fields
= [event
.name
, '%0.3f' % (delta_ns
/ 1000.0),
242 for type, name
in event
.args
:
244 fields
.append('%s=%s' % (name
, rec
[i
]))
246 fields
.append('%s=0x%x' % (name
, rec
[i
]))
248 print ' '.join(fields
)