ui: compile dbus-display1.c with -fPIC as necessary
[qemu/kevin.git] / scripts / analyze-migration.py
blob8a254a5b6a2e2d311b9bfa4003af80b244fd94b3
1 #!/usr/bin/env python3
3 # Migration Stream Analyzer
5 # Copyright (c) 2015 Alexander Graf <agraf@suse.de>
7 # This library is free software; you can redistribute it and/or
8 # modify it under the terms of the GNU Lesser General Public
9 # License as published by the Free Software Foundation; either
10 # version 2.1 of the License, or (at your option) any later version.
12 # This library is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 # Lesser General Public License for more details.
17 # You should have received a copy of the GNU Lesser General Public
18 # License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 import json
21 import os
22 import argparse
23 import collections
24 import struct
25 import sys
28 def mkdir_p(path):
29 try:
30 os.makedirs(path)
31 except OSError:
32 pass
35 class MigrationFile(object):
36 def __init__(self, filename):
37 self.filename = filename
38 self.file = open(self.filename, "rb")
40 def read64(self):
41 return int.from_bytes(self.file.read(8), byteorder='big', signed=False)
43 def read32(self):
44 return int.from_bytes(self.file.read(4), byteorder='big', signed=False)
46 def read16(self):
47 return int.from_bytes(self.file.read(2), byteorder='big', signed=False)
49 def read8(self):
50 return int.from_bytes(self.file.read(1), byteorder='big', signed=True)
52 def readstr(self, len = None):
53 return self.readvar(len).decode('utf-8')
55 def readvar(self, size = None):
56 if size is None:
57 size = self.read8()
58 if size == 0:
59 return ""
60 value = self.file.read(size)
61 if len(value) != size:
62 raise Exception("Unexpected end of %s at 0x%x" % (self.filename, self.file.tell()))
63 return value
65 def tell(self):
66 return self.file.tell()
68 # The VMSD description is at the end of the file, after EOF. Look for
69 # the last NULL byte, then for the beginning brace of JSON.
70 def read_migration_debug_json(self):
71 QEMU_VM_VMDESCRIPTION = 0x06
73 # Remember the offset in the file when we started
74 entrypos = self.file.tell()
76 # Read the last 10MB
77 self.file.seek(0, os.SEEK_END)
78 endpos = self.file.tell()
79 self.file.seek(max(-endpos, -10 * 1024 * 1024), os.SEEK_END)
80 datapos = self.file.tell()
81 data = self.file.read()
82 # The full file read closed the file as well, reopen it
83 self.file = open(self.filename, "rb")
85 # Find the last NULL byte, then the first brace after that. This should
86 # be the beginning of our JSON data.
87 nulpos = data.rfind(b'\0')
88 jsonpos = data.find(b'{', nulpos)
90 # Check backwards from there and see whether we guessed right
91 self.file.seek(datapos + jsonpos - 5, 0)
92 if self.read8() != QEMU_VM_VMDESCRIPTION:
93 raise Exception("No Debug Migration device found")
95 jsonlen = self.read32()
97 # Seek back to where we were at the beginning
98 self.file.seek(entrypos, 0)
100 # explicit decode() needed for Python 3.5 compatibility
101 return data[jsonpos:jsonpos + jsonlen].decode("utf-8")
103 def close(self):
104 self.file.close()
106 class RamSection(object):
107 RAM_SAVE_FLAG_COMPRESS = 0x02
108 RAM_SAVE_FLAG_MEM_SIZE = 0x04
109 RAM_SAVE_FLAG_PAGE = 0x08
110 RAM_SAVE_FLAG_EOS = 0x10
111 RAM_SAVE_FLAG_CONTINUE = 0x20
112 RAM_SAVE_FLAG_XBZRLE = 0x40
113 RAM_SAVE_FLAG_HOOK = 0x80
114 RAM_SAVE_FLAG_COMPRESS_PAGE = 0x100
115 RAM_SAVE_FLAG_MULTIFD_FLUSH = 0x200
117 def __init__(self, file, version_id, ramargs, section_key):
118 if version_id != 4:
119 raise Exception("Unknown RAM version %d" % version_id)
121 self.file = file
122 self.section_key = section_key
123 self.TARGET_PAGE_SIZE = ramargs['page_size']
124 self.dump_memory = ramargs['dump_memory']
125 self.write_memory = ramargs['write_memory']
126 self.ignore_shared = ramargs['ignore_shared']
127 self.sizeinfo = collections.OrderedDict()
128 self.data = collections.OrderedDict()
129 self.data['section sizes'] = self.sizeinfo
130 self.name = ''
131 if self.write_memory:
132 self.files = { }
133 if self.dump_memory:
134 self.memory = collections.OrderedDict()
135 self.data['memory'] = self.memory
137 def __repr__(self):
138 return self.data.__repr__()
140 def __str__(self):
141 return self.data.__str__()
143 def getDict(self):
144 return self.data
146 def read(self):
147 # Read all RAM sections
148 while True:
149 addr = self.file.read64()
150 flags = addr & (self.TARGET_PAGE_SIZE - 1)
151 addr &= ~(self.TARGET_PAGE_SIZE - 1)
153 if flags & self.RAM_SAVE_FLAG_MEM_SIZE:
154 total_length = addr
155 while total_length > 0:
156 namelen = self.file.read8()
157 self.name = self.file.readstr(len = namelen)
158 len = self.file.read64()
159 total_length -= len
160 self.sizeinfo[self.name] = '0x%016x' % len
161 if self.write_memory:
162 print(self.name)
163 mkdir_p('./' + os.path.dirname(self.name))
164 f = open('./' + self.name, "wb")
165 f.truncate(0)
166 f.truncate(len)
167 self.files[self.name] = f
168 if self.ignore_shared:
169 mr_addr = self.file.read64()
170 flags &= ~self.RAM_SAVE_FLAG_MEM_SIZE
172 if flags & self.RAM_SAVE_FLAG_COMPRESS:
173 if flags & self.RAM_SAVE_FLAG_CONTINUE:
174 flags &= ~self.RAM_SAVE_FLAG_CONTINUE
175 else:
176 self.name = self.file.readstr()
177 fill_char = self.file.read8()
178 # The page in question is filled with fill_char now
179 if self.write_memory and fill_char != 0:
180 self.files[self.name].seek(addr, os.SEEK_SET)
181 self.files[self.name].write(chr(fill_char) * self.TARGET_PAGE_SIZE)
182 if self.dump_memory:
183 self.memory['%s (0x%016x)' % (self.name, addr)] = 'Filled with 0x%02x' % fill_char
184 flags &= ~self.RAM_SAVE_FLAG_COMPRESS
185 elif flags & self.RAM_SAVE_FLAG_PAGE:
186 if flags & self.RAM_SAVE_FLAG_CONTINUE:
187 flags &= ~self.RAM_SAVE_FLAG_CONTINUE
188 else:
189 self.name = self.file.readstr()
191 if self.write_memory or self.dump_memory:
192 data = self.file.readvar(size = self.TARGET_PAGE_SIZE)
193 else: # Just skip RAM data
194 self.file.file.seek(self.TARGET_PAGE_SIZE, 1)
196 if self.write_memory:
197 self.files[self.name].seek(addr, os.SEEK_SET)
198 self.files[self.name].write(data)
199 if self.dump_memory:
200 hexdata = " ".join("{0:02x}".format(ord(c)) for c in data)
201 self.memory['%s (0x%016x)' % (self.name, addr)] = hexdata
203 flags &= ~self.RAM_SAVE_FLAG_PAGE
204 elif flags & self.RAM_SAVE_FLAG_XBZRLE:
205 raise Exception("XBZRLE RAM compression is not supported yet")
206 elif flags & self.RAM_SAVE_FLAG_HOOK:
207 raise Exception("RAM hooks don't make sense with files")
208 if flags & self.RAM_SAVE_FLAG_MULTIFD_FLUSH:
209 continue
211 # End of RAM section
212 if flags & self.RAM_SAVE_FLAG_EOS:
213 break
215 if flags != 0:
216 raise Exception("Unknown RAM flags: %x" % flags)
218 def __del__(self):
219 if self.write_memory:
220 for key in self.files:
221 self.files[key].close()
224 class HTABSection(object):
225 HASH_PTE_SIZE_64 = 16
227 def __init__(self, file, version_id, device, section_key):
228 if version_id != 1:
229 raise Exception("Unknown HTAB version %d" % version_id)
231 self.file = file
232 self.section_key = section_key
234 def read(self):
236 header = self.file.read32()
238 if (header == -1):
239 # "no HPT" encoding
240 return
242 if (header > 0):
243 # First section, just the hash shift
244 return
246 # Read until end marker
247 while True:
248 index = self.file.read32()
249 n_valid = self.file.read16()
250 n_invalid = self.file.read16()
252 if index == 0 and n_valid == 0 and n_invalid == 0:
253 break
255 self.file.readvar(n_valid * self.HASH_PTE_SIZE_64)
257 def getDict(self):
258 return ""
261 class S390StorageAttributes(object):
262 STATTR_FLAG_EOS = 0x01
263 STATTR_FLAG_MORE = 0x02
264 STATTR_FLAG_ERROR = 0x04
265 STATTR_FLAG_DONE = 0x08
267 def __init__(self, file, version_id, device, section_key):
268 if version_id != 0:
269 raise Exception("Unknown storage_attributes version %d" % version_id)
271 self.file = file
272 self.section_key = section_key
274 def read(self):
275 while True:
276 addr_flags = self.file.read64()
277 flags = addr_flags & 0xfff
278 if (flags & (self.STATTR_FLAG_DONE | self.STATTR_FLAG_EOS)):
279 return
280 if (flags & self.STATTR_FLAG_ERROR):
281 raise Exception("Error in migration stream")
282 count = self.file.read64()
283 self.file.readvar(count)
285 def getDict(self):
286 return ""
289 class ConfigurationSection(object):
290 def __init__(self, file, desc):
291 self.file = file
292 self.desc = desc
293 self.caps = []
295 def parse_capabilities(self, vmsd_caps):
296 if not vmsd_caps:
297 return
299 ncaps = vmsd_caps.data['caps_count'].data
300 self.caps = vmsd_caps.data['capabilities']
302 if type(self.caps) != list:
303 self.caps = [self.caps]
305 if len(self.caps) != ncaps:
306 raise Exception("Number of capabilities doesn't match "
307 "caps_count field")
309 def has_capability(self, cap):
310 return any([str(c) == cap for c in self.caps])
312 def read(self):
313 if self.desc:
314 version_id = self.desc['version']
315 section = VMSDSection(self.file, version_id, self.desc,
316 'configuration')
317 section.read()
318 self.parse_capabilities(
319 section.data.get("configuration/capabilities"))
320 else:
321 # backward compatibility for older streams that don't have
322 # the configuration section in the json
323 name_len = self.file.read32()
324 name = self.file.readstr(len = name_len)
326 class VMSDFieldGeneric(object):
327 def __init__(self, desc, file):
328 self.file = file
329 self.desc = desc
330 self.data = ""
332 def __repr__(self):
333 return str(self.__str__())
335 def __str__(self):
336 return " ".join("{0:02x}".format(c) for c in self.data)
338 def getDict(self):
339 return self.__str__()
341 def read(self):
342 size = int(self.desc['size'])
343 self.data = self.file.readvar(size)
344 return self.data
346 class VMSDFieldCap(object):
347 def __init__(self, desc, file):
348 self.file = file
349 self.desc = desc
350 self.data = ""
352 def __repr__(self):
353 return self.data
355 def __str__(self):
356 return self.data
358 def read(self):
359 len = self.file.read8()
360 self.data = self.file.readstr(len)
363 class VMSDFieldInt(VMSDFieldGeneric):
364 def __init__(self, desc, file):
365 super(VMSDFieldInt, self).__init__(desc, file)
366 self.size = int(desc['size'])
367 self.format = '0x%%0%dx' % (self.size * 2)
368 self.sdtype = '>i%d' % self.size
369 self.udtype = '>u%d' % self.size
371 def __repr__(self):
372 if self.data < 0:
373 return ('%s (%d)' % ((self.format % self.udata), self.data))
374 else:
375 return self.format % self.data
377 def __str__(self):
378 return self.__repr__()
380 def getDict(self):
381 return self.__str__()
383 def read(self):
384 super(VMSDFieldInt, self).read()
385 self.sdata = int.from_bytes(self.data, byteorder='big', signed=True)
386 self.udata = int.from_bytes(self.data, byteorder='big', signed=False)
387 self.data = self.sdata
388 return self.data
390 class VMSDFieldUInt(VMSDFieldInt):
391 def __init__(self, desc, file):
392 super(VMSDFieldUInt, self).__init__(desc, file)
394 def read(self):
395 super(VMSDFieldUInt, self).read()
396 self.data = self.udata
397 return self.data
399 class VMSDFieldIntLE(VMSDFieldInt):
400 def __init__(self, desc, file):
401 super(VMSDFieldIntLE, self).__init__(desc, file)
402 self.dtype = '<i%d' % self.size
404 class VMSDFieldBool(VMSDFieldGeneric):
405 def __init__(self, desc, file):
406 super(VMSDFieldBool, self).__init__(desc, file)
408 def __repr__(self):
409 return self.data.__repr__()
411 def __str__(self):
412 return self.data.__str__()
414 def getDict(self):
415 return self.data
417 def read(self):
418 super(VMSDFieldBool, self).read()
419 if self.data[0] == 0:
420 self.data = False
421 else:
422 self.data = True
423 return self.data
425 class VMSDFieldStruct(VMSDFieldGeneric):
426 QEMU_VM_SUBSECTION = 0x05
428 def __init__(self, desc, file):
429 super(VMSDFieldStruct, self).__init__(desc, file)
430 self.data = collections.OrderedDict()
432 # When we see compressed array elements, unfold them here
433 new_fields = []
434 for field in self.desc['struct']['fields']:
435 if not 'array_len' in field:
436 new_fields.append(field)
437 continue
438 array_len = field.pop('array_len')
439 field['index'] = 0
440 new_fields.append(field)
441 for i in range(1, array_len):
442 c = field.copy()
443 c['index'] = i
444 new_fields.append(c)
446 self.desc['struct']['fields'] = new_fields
448 def __repr__(self):
449 return self.data.__repr__()
451 def __str__(self):
452 return self.data.__str__()
454 def read(self):
455 for field in self.desc['struct']['fields']:
456 try:
457 reader = vmsd_field_readers[field['type']]
458 except:
459 reader = VMSDFieldGeneric
461 field['data'] = reader(field, self.file)
462 field['data'].read()
464 if 'index' in field:
465 if field['name'] not in self.data:
466 self.data[field['name']] = []
467 a = self.data[field['name']]
468 if len(a) != int(field['index']):
469 raise Exception("internal index of data field unmatched (%d/%d)" % (len(a), int(field['index'])))
470 a.append(field['data'])
471 else:
472 self.data[field['name']] = field['data']
474 if 'subsections' in self.desc['struct']:
475 for subsection in self.desc['struct']['subsections']:
476 if self.file.read8() != self.QEMU_VM_SUBSECTION:
477 raise Exception("Subsection %s not found at offset %x" % ( subsection['vmsd_name'], self.file.tell()))
478 name = self.file.readstr()
479 version_id = self.file.read32()
480 self.data[name] = VMSDSection(self.file, version_id, subsection, (name, 0))
481 self.data[name].read()
483 def getDictItem(self, value):
484 # Strings would fall into the array category, treat
485 # them specially
486 if value.__class__ is ''.__class__:
487 return value
489 try:
490 return self.getDictOrderedDict(value)
491 except:
492 try:
493 return self.getDictArray(value)
494 except:
495 try:
496 return value.getDict()
497 except:
498 return value
500 def getDictArray(self, array):
501 r = []
502 for value in array:
503 r.append(self.getDictItem(value))
504 return r
506 def getDictOrderedDict(self, dict):
507 r = collections.OrderedDict()
508 for (key, value) in dict.items():
509 r[key] = self.getDictItem(value)
510 return r
512 def getDict(self):
513 return self.getDictOrderedDict(self.data)
515 vmsd_field_readers = {
516 "bool" : VMSDFieldBool,
517 "int8" : VMSDFieldInt,
518 "int16" : VMSDFieldInt,
519 "int32" : VMSDFieldInt,
520 "int32 equal" : VMSDFieldInt,
521 "int32 le" : VMSDFieldIntLE,
522 "int64" : VMSDFieldInt,
523 "uint8" : VMSDFieldUInt,
524 "uint16" : VMSDFieldUInt,
525 "uint32" : VMSDFieldUInt,
526 "uint32 equal" : VMSDFieldUInt,
527 "uint64" : VMSDFieldUInt,
528 "int64 equal" : VMSDFieldInt,
529 "uint8 equal" : VMSDFieldInt,
530 "uint16 equal" : VMSDFieldInt,
531 "float64" : VMSDFieldGeneric,
532 "timer" : VMSDFieldGeneric,
533 "buffer" : VMSDFieldGeneric,
534 "unused_buffer" : VMSDFieldGeneric,
535 "bitmap" : VMSDFieldGeneric,
536 "struct" : VMSDFieldStruct,
537 "capability": VMSDFieldCap,
538 "unknown" : VMSDFieldGeneric,
541 class VMSDSection(VMSDFieldStruct):
542 def __init__(self, file, version_id, device, section_key):
543 self.file = file
544 self.data = ""
545 self.vmsd_name = ""
546 self.section_key = section_key
547 desc = device
548 if 'vmsd_name' in device:
549 self.vmsd_name = device['vmsd_name']
551 # A section really is nothing but a FieldStruct :)
552 super(VMSDSection, self).__init__({ 'struct' : desc }, file)
554 ###############################################################################
556 class MigrationDump(object):
557 QEMU_VM_FILE_MAGIC = 0x5145564d
558 QEMU_VM_FILE_VERSION = 0x00000003
559 QEMU_VM_EOF = 0x00
560 QEMU_VM_SECTION_START = 0x01
561 QEMU_VM_SECTION_PART = 0x02
562 QEMU_VM_SECTION_END = 0x03
563 QEMU_VM_SECTION_FULL = 0x04
564 QEMU_VM_SUBSECTION = 0x05
565 QEMU_VM_VMDESCRIPTION = 0x06
566 QEMU_VM_CONFIGURATION = 0x07
567 QEMU_VM_SECTION_FOOTER= 0x7e
569 def __init__(self, filename):
570 self.section_classes = {
571 ( 'ram', 0 ) : [ RamSection, None ],
572 ( 's390-storage_attributes', 0 ) : [ S390StorageAttributes, None],
573 ( 'spapr/htab', 0) : ( HTABSection, None )
575 self.filename = filename
576 self.vmsd_desc = None
578 def read(self, desc_only = False, dump_memory = False, write_memory = False):
579 # Read in the whole file
580 file = MigrationFile(self.filename)
582 # File magic
583 data = file.read32()
584 if data != self.QEMU_VM_FILE_MAGIC:
585 raise Exception("Invalid file magic %x" % data)
587 # Version (has to be v3)
588 data = file.read32()
589 if data != self.QEMU_VM_FILE_VERSION:
590 raise Exception("Invalid version number %d" % data)
592 self.load_vmsd_json(file)
594 # Read sections
595 self.sections = collections.OrderedDict()
597 if desc_only:
598 return
600 ramargs = {}
601 ramargs['page_size'] = self.vmsd_desc['page_size']
602 ramargs['dump_memory'] = dump_memory
603 ramargs['write_memory'] = write_memory
604 ramargs['ignore_shared'] = False
605 self.section_classes[('ram',0)][1] = ramargs
607 while True:
608 section_type = file.read8()
609 if section_type == self.QEMU_VM_EOF:
610 break
611 elif section_type == self.QEMU_VM_CONFIGURATION:
612 config_desc = self.vmsd_desc.get('configuration')
613 section = ConfigurationSection(file, config_desc)
614 section.read()
615 ramargs['ignore_shared'] = section.has_capability('x-ignore-shared')
616 elif section_type == self.QEMU_VM_SECTION_START or section_type == self.QEMU_VM_SECTION_FULL:
617 section_id = file.read32()
618 name = file.readstr()
619 instance_id = file.read32()
620 version_id = file.read32()
621 section_key = (name, instance_id)
622 classdesc = self.section_classes[section_key]
623 section = classdesc[0](file, version_id, classdesc[1], section_key)
624 self.sections[section_id] = section
625 section.read()
626 elif section_type == self.QEMU_VM_SECTION_PART or section_type == self.QEMU_VM_SECTION_END:
627 section_id = file.read32()
628 self.sections[section_id].read()
629 elif section_type == self.QEMU_VM_SECTION_FOOTER:
630 read_section_id = file.read32()
631 if read_section_id != section_id:
632 raise Exception("Mismatched section footer: %x vs %x" % (read_section_id, section_id))
633 else:
634 raise Exception("Unknown section type: %d" % section_type)
635 file.close()
637 def load_vmsd_json(self, file):
638 vmsd_json = file.read_migration_debug_json()
639 self.vmsd_desc = json.loads(vmsd_json, object_pairs_hook=collections.OrderedDict)
640 for device in self.vmsd_desc['devices']:
641 key = (device['name'], device['instance_id'])
642 value = ( VMSDSection, device )
643 self.section_classes[key] = value
645 def getDict(self):
646 r = collections.OrderedDict()
647 for (key, value) in self.sections.items():
648 key = "%s (%d)" % ( value.section_key[0], key )
649 r[key] = value.getDict()
650 return r
652 ###############################################################################
654 class JSONEncoder(json.JSONEncoder):
655 def default(self, o):
656 if isinstance(o, VMSDFieldGeneric):
657 return str(o)
658 return json.JSONEncoder.default(self, o)
660 parser = argparse.ArgumentParser()
661 parser.add_argument("-f", "--file", help='migration dump to read from', required=True)
662 parser.add_argument("-m", "--memory", help='dump RAM contents as well', action='store_true')
663 parser.add_argument("-d", "--dump", help='what to dump ("state" or "desc")', default='state')
664 parser.add_argument("-x", "--extract", help='extract contents into individual files', action='store_true')
665 args = parser.parse_args()
667 jsonenc = JSONEncoder(indent=4, separators=(',', ': '))
669 if args.extract:
670 dump = MigrationDump(args.file)
672 dump.read(desc_only = True)
673 print("desc.json")
674 f = open("desc.json", "w")
675 f.truncate()
676 f.write(jsonenc.encode(dump.vmsd_desc))
677 f.close()
679 dump.read(write_memory = True)
680 dict = dump.getDict()
681 print("state.json")
682 f = open("state.json", "w")
683 f.truncate()
684 f.write(jsonenc.encode(dict))
685 f.close()
686 elif args.dump == "state":
687 dump = MigrationDump(args.file)
688 dump.read(dump_memory = args.memory)
689 dict = dump.getDict()
690 print(jsonenc.encode(dict))
691 elif args.dump == "desc":
692 dump = MigrationDump(args.file)
693 dump.read(desc_only = True)
694 print(jsonenc.encode(dump.vmsd_desc))
695 else:
696 raise Exception("Please specify either -x, -d state or -d desc")