3 # Pretty-printer for simple trace backend binary trace files
5 # Copyright IBM, Corp. 2010
7 # This work is licensed under the terms of the GNU GPL, version 2. See
8 # the COPYING file in the top-level directory.
10 # For help see docs/devel/tracing.rst
16 from tracetool
import read_events
, Event
17 from tracetool
.backend
.simple
import is_string
19 __all__
= ['Analyzer', 'Analyzer2', 'process', 'run']
21 # This is the binary format that the QEMU "simple" trace backend
22 # emits. There is no specification documentation because the format is
23 # not guaranteed to be stable. Trace files must be parsed with the
24 # same trace-events-all file and the same simpletrace.py file that
25 # QEMU was built with.
26 header_event_id
= 0xffffffffffffffff
27 header_magic
= 0xf2b177cb0aa429b4
28 dropped_event_id
= 0xfffffffffffffffe
30 record_type_mapping
= 0
33 log_header_fmt
= '=QQQ'
34 rec_header_fmt
= '=QQII'
35 rec_header_fmt_len
= struct
.calcsize(rec_header_fmt
)
37 class SimpleException(Exception):
40 def read_header(fobj
, hfmt
):
41 '''Read a trace record header'''
42 hlen
= struct
.calcsize(hfmt
)
45 raise SimpleException('Error reading header. Wrong filetype provided?')
46 return struct
.unpack(hfmt
, hdr
)
48 def get_mapping(fobj
):
49 (event_id
, ) = struct
.unpack('=Q', fobj
.read(8))
50 (len, ) = struct
.unpack('=L', fobj
.read(4))
51 name
= fobj
.read(len).decode()
53 return (event_id
, name
)
55 def read_record(fobj
):
56 """Deserialize a trace record from a file into a tuple (event_num, timestamp, pid, args)."""
57 event_id
, timestamp_ns
, record_length
, record_pid
= read_header(fobj
, rec_header_fmt
)
58 args_payload
= fobj
.read(record_length
- rec_header_fmt_len
)
59 return (event_id
, timestamp_ns
, record_pid
, args_payload
)
61 def read_trace_header(fobj
):
62 """Read and verify trace file header"""
63 _header_event_id
, _header_magic
, log_version
= read_header(fobj
, log_header_fmt
)
64 if _header_event_id
!= header_event_id
:
65 raise ValueError(f
'Not a valid trace file, header id {_header_event_id} != {header_event_id}')
66 if _header_magic
!= header_magic
:
67 raise ValueError(f
'Not a valid trace file, header magic {_header_magic} != {header_magic}')
69 if log_version
not in [0, 2, 3, 4]:
70 raise ValueError(f
'Unknown version {log_version} of tracelog format!')
72 raise ValueError(f
'Log format {log_version} not supported with this QEMU release!')
74 def read_trace_records(events
, fobj
, read_header
):
75 """Deserialize trace records from a file, yielding record tuples (event, event_num, timestamp, pid, arg1, ..., arg6).
78 event_mapping (str -> Event): events dict, indexed by name
79 fobj (file): input file
80 read_header (bool): whether headers were read from fobj
83 frameinfo
= inspect
.getframeinfo(inspect
.currentframe())
84 dropped_event
= Event
.build("Dropped_Event(uint64_t num_events_dropped)",
85 frameinfo
.lineno
+ 1, frameinfo
.filename
)
87 event_mapping
= {e
.name
: e
for e
in events
}
88 event_mapping
["dropped"] = dropped_event
89 event_id_to_name
= {dropped_event_id
: "dropped"}
91 # If there is no header assume event ID mapping matches events list
93 for event_id
, event
in enumerate(events
):
94 event_id_to_name
[event_id
] = event
.name
101 (rectype
, ) = struct
.unpack('=Q', t
)
102 if rectype
== record_type_mapping
:
103 event_id
, event_name
= get_mapping(fobj
)
104 event_id_to_name
[event_id
] = event_name
106 event_id
, timestamp_ns
, pid
, args_payload
= read_record(fobj
)
107 event_name
= event_id_to_name
[event_id
]
110 event
= event_mapping
[event_name
]
111 except KeyError as e
:
112 raise SimpleException(
113 f
'{e} event is logged but is not declared in the trace events'
114 'file, try using trace-events-all instead.'
119 for type, _
in event
.args
:
121 (length
,) = struct
.unpack_from('=L', args_payload
, offset
=offset
)
123 s
= args_payload
[offset
:offset
+length
]
127 (value
,) = struct
.unpack_from('=Q', args_payload
, offset
=offset
)
131 yield (event_mapping
[event_name
], event_name
, timestamp_ns
, pid
) + tuple(args
)
134 """[Deprecated. Refer to Analyzer2 instead.]
136 A trace file analyzer which processes trace records.
138 An analyzer can be passed to run() or process(). The begin() method is
139 invoked, then each trace record is processed, and finally the end() method
140 is invoked. When Analyzer is used as a context-manager (using the `with`
141 statement), begin() and end() are called automatically.
143 If a method matching a trace event name exists, it is invoked to process
144 that trace record. Otherwise the catchall() method is invoked.
147 The following method handles the runstate_set(int new_state) trace event::
149 def runstate_set(self, new_state):
152 The method can also take a timestamp argument before the trace event
155 def runstate_set(self, timestamp, new_state):
158 Timestamps have the uint64_t type and are in nanoseconds.
160 The pid can be included in addition to the timestamp and is useful when
161 dealing with traces from multiple processes::
163 def runstate_set(self, timestamp, pid, new_state):
168 """Called at the start of the trace."""
171 def catchall(self
, event
, rec
):
172 """Called if no specific method for processing a trace event has been found."""
175 def _build_fn(self
, event
):
176 fn
= getattr(self
, event
.name
, None)
178 # Return early to avoid costly call to inspect.getfullargspec
181 event_argcount
= len(event
.args
)
182 fn_argcount
= len(inspect
.getfullargspec(fn
)[0]) - 1
183 if fn_argcount
== event_argcount
+ 1:
184 # Include timestamp as first argument
185 return lambda _
, rec
: fn(*(rec
[1:2] + rec
[3:3 + event_argcount
]))
186 elif fn_argcount
== event_argcount
+ 2:
187 # Include timestamp and pid
188 return lambda _
, rec
: fn(*rec
[1:3 + event_argcount
])
190 # Just arguments, no timestamp or pid
191 return lambda _
, rec
: fn(*rec
[3:3 + event_argcount
])
193 def _process_event(self
, rec_args
, *, event
, event_id
, timestamp_ns
, pid
, **kwargs
):
195 "Use of deprecated Analyzer class. Refer to Analyzer2 instead.",
199 if not hasattr(self
, '_fn_cache'):
200 # NOTE: Cannot depend on downstream subclasses to have
201 # super().__init__() because of legacy.
204 rec
= (event_id
, timestamp_ns
, pid
, *rec_args
)
205 if event_id
not in self
._fn
_cache
:
206 self
._fn
_cache
[event_id
] = self
._build
_fn
(event
)
207 self
._fn
_cache
[event_id
](event
, rec
)
210 """Called at the end of the trace."""
217 def __exit__(self
, exc_type
, exc_val
, exc_tb
):
222 class Analyzer2(Analyzer
):
223 """A trace file analyzer which processes trace records.
225 An analyzer can be passed to run() or process(). The begin() method is
226 invoked, then each trace record is processed, and finally the end() method
227 is invoked. When Analyzer is used as a context-manager (using the `with`
228 statement), begin() and end() are called automatically.
230 If a method matching a trace event name exists, it is invoked to process
231 that trace record. Otherwise the catchall() method is invoked.
233 The methods are called with a set of keyword-arguments. These can be ignored
234 using `**kwargs` or defined like any keyword-argument.
236 The following keyword-arguments are available, but make sure to have an
237 **kwargs to allow for unmatched arguments in the future:
238 event: Event object of current trace
239 event_id: The id of the event in the current trace file
240 timestamp_ns: The timestamp in nanoseconds of the trace
241 pid: The process id recorded for the given trace
244 The following method handles the runstate_set(int new_state) trace event::
246 def runstate_set(self, new_state, **kwargs):
249 The method can also explicitly take a timestamp keyword-argument with the
250 trace event arguments::
252 def runstate_set(self, new_state, *, timestamp_ns, **kwargs):
255 Timestamps have the uint64_t type and are in nanoseconds.
257 The pid can be included in addition to the timestamp and is useful when
258 dealing with traces from multiple processes:
260 def runstate_set(self, new_state, *, timestamp_ns, pid, **kwargs):
264 def catchall(self
, *rec_args
, event
, timestamp_ns
, pid
, event_id
, **kwargs
):
265 """Called if no specific method for processing a trace event has been found."""
268 def _process_event(self
, rec_args
, *, event
, **kwargs
):
269 fn
= getattr(self
, event
.name
, self
.catchall
)
270 fn(*rec_args
, event
=event
, **kwargs
)
272 def process(events
, log
, analyzer
, read_header
=True):
273 """Invoke an analyzer on each event in a log.
275 events (file-object or list or str): events list or file-like object or file path as str to read event data from
276 log (file-object or str): file-like object or file path as str to read log data from
277 analyzer (Analyzer): Instance of Analyzer to interpret the event data
278 read_header (bool, optional): Whether to read header data from the log data. Defaults to True.
281 if isinstance(events
, str):
282 with
open(events
, 'r') as f
:
283 events_list
= read_events(f
, events
)
284 elif isinstance(events
, list):
285 # Treat as a list of events already produced by tracetool.read_events
288 # Treat as an already opened file-object
289 events_list
= read_events(events
, events
.name
)
291 if isinstance(log
, str):
292 with
open(log
, 'rb') as log_fobj
:
293 _process(events_list
, log_fobj
, analyzer
, read_header
)
295 # Treat `log` as an already opened file-object. We will not close it,
296 # as we do not own it.
297 _process(events_list
, log
, analyzer
, read_header
)
299 def _process(events
, log_fobj
, analyzer
, read_header
=True):
300 """Internal function for processing
303 events (list): list of events already produced by tracetool.read_events
304 log_fobj (file): file-object to read log data from
305 analyzer (Analyzer): the Analyzer to interpret the event data
306 read_header (bool, optional): Whether to read header data from the log data. Defaults to True.
310 read_trace_header(log_fobj
)
313 for event
, event_id
, timestamp_ns
, record_pid
, *rec_args
in read_trace_records(events
, log_fobj
, read_header
):
314 analyzer
._process
_event
(
318 timestamp_ns
=timestamp_ns
,
323 """Execute an analyzer on a trace file given on the command-line.
325 This function is useful as a driver for simple analysis scripts. More
326 advanced scripts will want to call process() instead."""
329 # NOTE: See built-in `argparse` module for a more robust cli interface
330 *no_header
, trace_event_path
, trace_file_path
= sys
.argv
[1:]
331 assert no_header
== [] or no_header
== ['--no-header'], 'Invalid no-header argument'
332 except (AssertionError, ValueError):
333 raise SimpleException(f
'usage: {sys.argv[0]} [--no-header] <trace-events> <trace-file>\n')
335 with
open(trace_event_path
, 'r') as events_fobj
, open(trace_file_path
, 'rb') as log_fobj
:
336 process(events_fobj
, log_fobj
, analyzer
, read_header
=not no_header
)
338 if __name__
== '__main__':
339 class Formatter2(Analyzer2
):
341 self
.last_timestamp_ns
= None
343 def catchall(self
, *rec_args
, event
, timestamp_ns
, pid
, event_id
):
344 if self
.last_timestamp_ns
is None:
345 self
.last_timestamp_ns
= timestamp_ns
346 delta_ns
= timestamp_ns
- self
.last_timestamp_ns
347 self
.last_timestamp_ns
= timestamp_ns
350 f
'{name}={r}' if is_string(type) else f
'{name}=0x{r:x}'
351 for r
, (type, name
) in zip(rec_args
, event
.args
)
353 print(f
'{event.name} {delta_ns / 1000:0.3f} {pid=} ' + ' '.join(fields
))
357 except SimpleException
as e
:
358 sys
.stderr
.write(str(e
) + "\n")