Merge remote-tracking branch 'remotes/bonzini-gitlab/tags/for-upstream' into staging
[qemu.git] / scripts / analyze-migration.py
blobd7177b212c86e826303bd93e74cb2578a2d768fa
1 #!/usr/bin/env python3
3 # Migration Stream Analyzer
5 # Copyright (c) 2015 Alexander Graf <agraf@suse.de>
7 # This library is free software; you can redistribute it and/or
8 # modify it under the terms of the GNU Lesser General Public
9 # License as published by the Free Software Foundation; either
10 # version 2.1 of the License, or (at your option) any later version.
12 # This library is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 # Lesser General Public License for more details.
17 # You should have received a copy of the GNU Lesser General Public
18 # License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 import json
21 import os
22 import argparse
23 import collections
24 import struct
25 import sys
28 def mkdir_p(path):
29 try:
30 os.makedirs(path)
31 except OSError:
32 pass
35 class MigrationFile(object):
36 def __init__(self, filename):
37 self.filename = filename
38 self.file = open(self.filename, "rb")
40 def read64(self):
41 return int.from_bytes(self.file.read(8), byteorder='big', signed=True)
43 def read32(self):
44 return int.from_bytes(self.file.read(4), byteorder='big', signed=True)
46 def read16(self):
47 return int.from_bytes(self.file.read(2), byteorder='big', signed=True)
49 def read8(self):
50 return int.from_bytes(self.file.read(1), byteorder='big', signed=True)
52 def readstr(self, len = None):
53 return self.readvar(len).decode('utf-8')
55 def readvar(self, size = None):
56 if size is None:
57 size = self.read8()
58 if size == 0:
59 return ""
60 value = self.file.read(size)
61 if len(value) != size:
62 raise Exception("Unexpected end of %s at 0x%x" % (self.filename, self.file.tell()))
63 return value
65 def tell(self):
66 return self.file.tell()
68 # The VMSD description is at the end of the file, after EOF. Look for
69 # the last NULL byte, then for the beginning brace of JSON.
70 def read_migration_debug_json(self):
71 QEMU_VM_VMDESCRIPTION = 0x06
73 # Remember the offset in the file when we started
74 entrypos = self.file.tell()
76 # Read the last 10MB
77 self.file.seek(0, os.SEEK_END)
78 endpos = self.file.tell()
79 self.file.seek(max(-endpos, -10 * 1024 * 1024), os.SEEK_END)
80 datapos = self.file.tell()
81 data = self.file.read()
82 # The full file read closed the file as well, reopen it
83 self.file = open(self.filename, "rb")
85 # Find the last NULL byte, then the first brace after that. This should
86 # be the beginning of our JSON data.
87 nulpos = data.rfind(b'\0')
88 jsonpos = data.find(b'{', nulpos)
90 # Check backwards from there and see whether we guessed right
91 self.file.seek(datapos + jsonpos - 5, 0)
92 if self.read8() != QEMU_VM_VMDESCRIPTION:
93 raise Exception("No Debug Migration device found")
95 jsonlen = self.read32()
97 # Seek back to where we were at the beginning
98 self.file.seek(entrypos, 0)
100 # explicit decode() needed for Python 3.5 compatibility
101 return data[jsonpos:jsonpos + jsonlen].decode("utf-8")
103 def close(self):
104 self.file.close()
106 class RamSection(object):
107 RAM_SAVE_FLAG_COMPRESS = 0x02
108 RAM_SAVE_FLAG_MEM_SIZE = 0x04
109 RAM_SAVE_FLAG_PAGE = 0x08
110 RAM_SAVE_FLAG_EOS = 0x10
111 RAM_SAVE_FLAG_CONTINUE = 0x20
112 RAM_SAVE_FLAG_XBZRLE = 0x40
113 RAM_SAVE_FLAG_HOOK = 0x80
115 def __init__(self, file, version_id, ramargs, section_key):
116 if version_id != 4:
117 raise Exception("Unknown RAM version %d" % version_id)
119 self.file = file
120 self.section_key = section_key
121 self.TARGET_PAGE_SIZE = ramargs['page_size']
122 self.dump_memory = ramargs['dump_memory']
123 self.write_memory = ramargs['write_memory']
124 self.sizeinfo = collections.OrderedDict()
125 self.data = collections.OrderedDict()
126 self.data['section sizes'] = self.sizeinfo
127 self.name = ''
128 if self.write_memory:
129 self.files = { }
130 if self.dump_memory:
131 self.memory = collections.OrderedDict()
132 self.data['memory'] = self.memory
134 def __repr__(self):
135 return self.data.__repr__()
137 def __str__(self):
138 return self.data.__str__()
140 def getDict(self):
141 return self.data
143 def read(self):
144 # Read all RAM sections
145 while True:
146 addr = self.file.read64()
147 flags = addr & (self.TARGET_PAGE_SIZE - 1)
148 addr &= ~(self.TARGET_PAGE_SIZE - 1)
150 if flags & self.RAM_SAVE_FLAG_MEM_SIZE:
151 while True:
152 namelen = self.file.read8()
153 # We assume that no RAM chunk is big enough to ever
154 # hit the first byte of the address, so when we see
155 # a zero here we know it has to be an address, not the
156 # length of the next block.
157 if namelen == 0:
158 self.file.file.seek(-1, 1)
159 break
160 self.name = self.file.readstr(len = namelen)
161 len = self.file.read64()
162 self.sizeinfo[self.name] = '0x%016x' % len
163 if self.write_memory:
164 print(self.name)
165 mkdir_p('./' + os.path.dirname(self.name))
166 f = open('./' + self.name, "wb")
167 f.truncate(0)
168 f.truncate(len)
169 self.files[self.name] = f
170 flags &= ~self.RAM_SAVE_FLAG_MEM_SIZE
172 if flags & self.RAM_SAVE_FLAG_COMPRESS:
173 if flags & self.RAM_SAVE_FLAG_CONTINUE:
174 flags &= ~self.RAM_SAVE_FLAG_CONTINUE
175 else:
176 self.name = self.file.readstr()
177 fill_char = self.file.read8()
178 # The page in question is filled with fill_char now
179 if self.write_memory and fill_char != 0:
180 self.files[self.name].seek(addr, os.SEEK_SET)
181 self.files[self.name].write(chr(fill_char) * self.TARGET_PAGE_SIZE)
182 if self.dump_memory:
183 self.memory['%s (0x%016x)' % (self.name, addr)] = 'Filled with 0x%02x' % fill_char
184 flags &= ~self.RAM_SAVE_FLAG_COMPRESS
185 elif flags & self.RAM_SAVE_FLAG_PAGE:
186 if flags & self.RAM_SAVE_FLAG_CONTINUE:
187 flags &= ~self.RAM_SAVE_FLAG_CONTINUE
188 else:
189 self.name = self.file.readstr()
191 if self.write_memory or self.dump_memory:
192 data = self.file.readvar(size = self.TARGET_PAGE_SIZE)
193 else: # Just skip RAM data
194 self.file.file.seek(self.TARGET_PAGE_SIZE, 1)
196 if self.write_memory:
197 self.files[self.name].seek(addr, os.SEEK_SET)
198 self.files[self.name].write(data)
199 if self.dump_memory:
200 hexdata = " ".join("{0:02x}".format(ord(c)) for c in data)
201 self.memory['%s (0x%016x)' % (self.name, addr)] = hexdata
203 flags &= ~self.RAM_SAVE_FLAG_PAGE
204 elif flags & self.RAM_SAVE_FLAG_XBZRLE:
205 raise Exception("XBZRLE RAM compression is not supported yet")
206 elif flags & self.RAM_SAVE_FLAG_HOOK:
207 raise Exception("RAM hooks don't make sense with files")
209 # End of RAM section
210 if flags & self.RAM_SAVE_FLAG_EOS:
211 break
213 if flags != 0:
214 raise Exception("Unknown RAM flags: %x" % flags)
216 def __del__(self):
217 if self.write_memory:
218 for key in self.files:
219 self.files[key].close()
222 class HTABSection(object):
223 HASH_PTE_SIZE_64 = 16
225 def __init__(self, file, version_id, device, section_key):
226 if version_id != 1:
227 raise Exception("Unknown HTAB version %d" % version_id)
229 self.file = file
230 self.section_key = section_key
232 def read(self):
234 header = self.file.read32()
236 if (header == -1):
237 # "no HPT" encoding
238 return
240 if (header > 0):
241 # First section, just the hash shift
242 return
244 # Read until end marker
245 while True:
246 index = self.file.read32()
247 n_valid = self.file.read16()
248 n_invalid = self.file.read16()
250 if index == 0 and n_valid == 0 and n_invalid == 0:
251 break
253 self.file.readvar(n_valid * self.HASH_PTE_SIZE_64)
255 def getDict(self):
256 return ""
259 class ConfigurationSection(object):
260 def __init__(self, file):
261 self.file = file
263 def read(self):
264 name_len = self.file.read32()
265 name = self.file.readstr(len = name_len)
267 class VMSDFieldGeneric(object):
268 def __init__(self, desc, file):
269 self.file = file
270 self.desc = desc
271 self.data = ""
273 def __repr__(self):
274 return str(self.__str__())
276 def __str__(self):
277 return " ".join("{0:02x}".format(c) for c in self.data)
279 def getDict(self):
280 return self.__str__()
282 def read(self):
283 size = int(self.desc['size'])
284 self.data = self.file.readvar(size)
285 return self.data
287 class VMSDFieldInt(VMSDFieldGeneric):
288 def __init__(self, desc, file):
289 super(VMSDFieldInt, self).__init__(desc, file)
290 self.size = int(desc['size'])
291 self.format = '0x%%0%dx' % (self.size * 2)
292 self.sdtype = '>i%d' % self.size
293 self.udtype = '>u%d' % self.size
295 def __repr__(self):
296 if self.data < 0:
297 return ('%s (%d)' % ((self.format % self.udata), self.data))
298 else:
299 return self.format % self.data
301 def __str__(self):
302 return self.__repr__()
304 def getDict(self):
305 return self.__str__()
307 def read(self):
308 super(VMSDFieldInt, self).read()
309 self.sdata = int.from_bytes(self.data, byteorder='big', signed=True)
310 self.udata = int.from_bytes(self.data, byteorder='big', signed=False)
311 self.data = self.sdata
312 return self.data
314 class VMSDFieldUInt(VMSDFieldInt):
315 def __init__(self, desc, file):
316 super(VMSDFieldUInt, self).__init__(desc, file)
318 def read(self):
319 super(VMSDFieldUInt, self).read()
320 self.data = self.udata
321 return self.data
323 class VMSDFieldIntLE(VMSDFieldInt):
324 def __init__(self, desc, file):
325 super(VMSDFieldIntLE, self).__init__(desc, file)
326 self.dtype = '<i%d' % self.size
328 class VMSDFieldBool(VMSDFieldGeneric):
329 def __init__(self, desc, file):
330 super(VMSDFieldBool, self).__init__(desc, file)
332 def __repr__(self):
333 return self.data.__repr__()
335 def __str__(self):
336 return self.data.__str__()
338 def getDict(self):
339 return self.data
341 def read(self):
342 super(VMSDFieldBool, self).read()
343 if self.data[0] == 0:
344 self.data = False
345 else:
346 self.data = True
347 return self.data
349 class VMSDFieldStruct(VMSDFieldGeneric):
350 QEMU_VM_SUBSECTION = 0x05
352 def __init__(self, desc, file):
353 super(VMSDFieldStruct, self).__init__(desc, file)
354 self.data = collections.OrderedDict()
356 # When we see compressed array elements, unfold them here
357 new_fields = []
358 for field in self.desc['struct']['fields']:
359 if not 'array_len' in field:
360 new_fields.append(field)
361 continue
362 array_len = field.pop('array_len')
363 field['index'] = 0
364 new_fields.append(field)
365 for i in range(1, array_len):
366 c = field.copy()
367 c['index'] = i
368 new_fields.append(c)
370 self.desc['struct']['fields'] = new_fields
372 def __repr__(self):
373 return self.data.__repr__()
375 def __str__(self):
376 return self.data.__str__()
378 def read(self):
379 for field in self.desc['struct']['fields']:
380 try:
381 reader = vmsd_field_readers[field['type']]
382 except:
383 reader = VMSDFieldGeneric
385 field['data'] = reader(field, self.file)
386 field['data'].read()
388 if 'index' in field:
389 if field['name'] not in self.data:
390 self.data[field['name']] = []
391 a = self.data[field['name']]
392 if len(a) != int(field['index']):
393 raise Exception("internal index of data field unmatched (%d/%d)" % (len(a), int(field['index'])))
394 a.append(field['data'])
395 else:
396 self.data[field['name']] = field['data']
398 if 'subsections' in self.desc['struct']:
399 for subsection in self.desc['struct']['subsections']:
400 if self.file.read8() != self.QEMU_VM_SUBSECTION:
401 raise Exception("Subsection %s not found at offset %x" % ( subsection['vmsd_name'], self.file.tell()))
402 name = self.file.readstr()
403 version_id = self.file.read32()
404 self.data[name] = VMSDSection(self.file, version_id, subsection, (name, 0))
405 self.data[name].read()
407 def getDictItem(self, value):
408 # Strings would fall into the array category, treat
409 # them specially
410 if value.__class__ is ''.__class__:
411 return value
413 try:
414 return self.getDictOrderedDict(value)
415 except:
416 try:
417 return self.getDictArray(value)
418 except:
419 try:
420 return value.getDict()
421 except:
422 return value
424 def getDictArray(self, array):
425 r = []
426 for value in array:
427 r.append(self.getDictItem(value))
428 return r
430 def getDictOrderedDict(self, dict):
431 r = collections.OrderedDict()
432 for (key, value) in dict.items():
433 r[key] = self.getDictItem(value)
434 return r
436 def getDict(self):
437 return self.getDictOrderedDict(self.data)
439 vmsd_field_readers = {
440 "bool" : VMSDFieldBool,
441 "int8" : VMSDFieldInt,
442 "int16" : VMSDFieldInt,
443 "int32" : VMSDFieldInt,
444 "int32 equal" : VMSDFieldInt,
445 "int32 le" : VMSDFieldIntLE,
446 "int64" : VMSDFieldInt,
447 "uint8" : VMSDFieldUInt,
448 "uint16" : VMSDFieldUInt,
449 "uint32" : VMSDFieldUInt,
450 "uint32 equal" : VMSDFieldUInt,
451 "uint64" : VMSDFieldUInt,
452 "int64 equal" : VMSDFieldInt,
453 "uint8 equal" : VMSDFieldInt,
454 "uint16 equal" : VMSDFieldInt,
455 "float64" : VMSDFieldGeneric,
456 "timer" : VMSDFieldGeneric,
457 "buffer" : VMSDFieldGeneric,
458 "unused_buffer" : VMSDFieldGeneric,
459 "bitmap" : VMSDFieldGeneric,
460 "struct" : VMSDFieldStruct,
461 "unknown" : VMSDFieldGeneric,
464 class VMSDSection(VMSDFieldStruct):
465 def __init__(self, file, version_id, device, section_key):
466 self.file = file
467 self.data = ""
468 self.vmsd_name = ""
469 self.section_key = section_key
470 desc = device
471 if 'vmsd_name' in device:
472 self.vmsd_name = device['vmsd_name']
474 # A section really is nothing but a FieldStruct :)
475 super(VMSDSection, self).__init__({ 'struct' : desc }, file)
477 ###############################################################################
479 class MigrationDump(object):
480 QEMU_VM_FILE_MAGIC = 0x5145564d
481 QEMU_VM_FILE_VERSION = 0x00000003
482 QEMU_VM_EOF = 0x00
483 QEMU_VM_SECTION_START = 0x01
484 QEMU_VM_SECTION_PART = 0x02
485 QEMU_VM_SECTION_END = 0x03
486 QEMU_VM_SECTION_FULL = 0x04
487 QEMU_VM_SUBSECTION = 0x05
488 QEMU_VM_VMDESCRIPTION = 0x06
489 QEMU_VM_CONFIGURATION = 0x07
490 QEMU_VM_SECTION_FOOTER= 0x7e
492 def __init__(self, filename):
493 self.section_classes = { ( 'ram', 0 ) : [ RamSection, None ],
494 ( 'spapr/htab', 0) : ( HTABSection, None ) }
495 self.filename = filename
496 self.vmsd_desc = None
498 def read(self, desc_only = False, dump_memory = False, write_memory = False):
499 # Read in the whole file
500 file = MigrationFile(self.filename)
502 # File magic
503 data = file.read32()
504 if data != self.QEMU_VM_FILE_MAGIC:
505 raise Exception("Invalid file magic %x" % data)
507 # Version (has to be v3)
508 data = file.read32()
509 if data != self.QEMU_VM_FILE_VERSION:
510 raise Exception("Invalid version number %d" % data)
512 self.load_vmsd_json(file)
514 # Read sections
515 self.sections = collections.OrderedDict()
517 if desc_only:
518 return
520 ramargs = {}
521 ramargs['page_size'] = self.vmsd_desc['page_size']
522 ramargs['dump_memory'] = dump_memory
523 ramargs['write_memory'] = write_memory
524 self.section_classes[('ram',0)][1] = ramargs
526 while True:
527 section_type = file.read8()
528 if section_type == self.QEMU_VM_EOF:
529 break
530 elif section_type == self.QEMU_VM_CONFIGURATION:
531 section = ConfigurationSection(file)
532 section.read()
533 elif section_type == self.QEMU_VM_SECTION_START or section_type == self.QEMU_VM_SECTION_FULL:
534 section_id = file.read32()
535 name = file.readstr()
536 instance_id = file.read32()
537 version_id = file.read32()
538 section_key = (name, instance_id)
539 classdesc = self.section_classes[section_key]
540 section = classdesc[0](file, version_id, classdesc[1], section_key)
541 self.sections[section_id] = section
542 section.read()
543 elif section_type == self.QEMU_VM_SECTION_PART or section_type == self.QEMU_VM_SECTION_END:
544 section_id = file.read32()
545 self.sections[section_id].read()
546 elif section_type == self.QEMU_VM_SECTION_FOOTER:
547 read_section_id = file.read32()
548 if read_section_id != section_id:
549 raise Exception("Mismatched section footer: %x vs %x" % (read_section_id, section_id))
550 else:
551 raise Exception("Unknown section type: %d" % section_type)
552 file.close()
554 def load_vmsd_json(self, file):
555 vmsd_json = file.read_migration_debug_json()
556 self.vmsd_desc = json.loads(vmsd_json, object_pairs_hook=collections.OrderedDict)
557 for device in self.vmsd_desc['devices']:
558 key = (device['name'], device['instance_id'])
559 value = ( VMSDSection, device )
560 self.section_classes[key] = value
562 def getDict(self):
563 r = collections.OrderedDict()
564 for (key, value) in self.sections.items():
565 key = "%s (%d)" % ( value.section_key[0], key )
566 r[key] = value.getDict()
567 return r
569 ###############################################################################
571 class JSONEncoder(json.JSONEncoder):
572 def default(self, o):
573 if isinstance(o, VMSDFieldGeneric):
574 return str(o)
575 return json.JSONEncoder.default(self, o)
577 parser = argparse.ArgumentParser()
578 parser.add_argument("-f", "--file", help='migration dump to read from', required=True)
579 parser.add_argument("-m", "--memory", help='dump RAM contents as well', action='store_true')
580 parser.add_argument("-d", "--dump", help='what to dump ("state" or "desc")', default='state')
581 parser.add_argument("-x", "--extract", help='extract contents into individual files', action='store_true')
582 args = parser.parse_args()
584 jsonenc = JSONEncoder(indent=4, separators=(',', ': '))
586 if args.extract:
587 dump = MigrationDump(args.file)
589 dump.read(desc_only = True)
590 print("desc.json")
591 f = open("desc.json", "wb")
592 f.truncate()
593 f.write(jsonenc.encode(dump.vmsd_desc))
594 f.close()
596 dump.read(write_memory = True)
597 dict = dump.getDict()
598 print("state.json")
599 f = open("state.json", "wb")
600 f.truncate()
601 f.write(jsonenc.encode(dict))
602 f.close()
603 elif args.dump == "state":
604 dump = MigrationDump(args.file)
605 dump.read(dump_memory = args.memory)
606 dict = dump.getDict()
607 print(jsonenc.encode(dict))
608 elif args.dump == "desc":
609 dump = MigrationDump(args.file)
610 dump.read(desc_only = True)
611 print(jsonenc.encode(dump.vmsd_desc))
612 else:
613 raise Exception("Please specify either -x, -d state or -d dump")