.travis.yml: drop xcode9.4 from build matrix
[qemu/ar7.git] / scripts / analyze-migration.py
blobe527eb168e9ce7c3944094ec6701674bfc0c4a6b
1 #!/usr/bin/env python
3 # Migration Stream Analyzer
5 # Copyright (c) 2015 Alexander Graf <agraf@suse.de>
7 # This library is free software; you can redistribute it and/or
8 # modify it under the terms of the GNU Lesser General Public
9 # License as published by the Free Software Foundation; either
10 # version 2 of the License, or (at your option) any later version.
12 # This library is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 # Lesser General Public License for more details.
17 # You should have received a copy of the GNU Lesser General Public
18 # License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 from __future__ import print_function
21 import numpy as np
22 import json
23 import os
24 import argparse
25 import collections
27 def mkdir_p(path):
28 try:
29 os.makedirs(path)
30 except OSError:
31 pass
33 class MigrationFile(object):
34 def __init__(self, filename):
35 self.filename = filename
36 self.file = open(self.filename, "rb")
38 def read64(self):
39 return np.asscalar(np.fromfile(self.file, count=1, dtype='>i8')[0])
41 def read32(self):
42 return np.asscalar(np.fromfile(self.file, count=1, dtype='>i4')[0])
44 def read16(self):
45 return np.asscalar(np.fromfile(self.file, count=1, dtype='>i2')[0])
47 def read8(self):
48 return np.asscalar(np.fromfile(self.file, count=1, dtype='>i1')[0])
50 def readstr(self, len = None):
51 if len is None:
52 len = self.read8()
53 if len == 0:
54 return ""
55 return np.fromfile(self.file, count=1, dtype=('S%d' % len))[0]
57 def readvar(self, size = None):
58 if size is None:
59 size = self.read8()
60 if size == 0:
61 return ""
62 value = self.file.read(size)
63 if len(value) != size:
64 raise Exception("Unexpected end of %s at 0x%x" % (self.filename, self.file.tell()))
65 return value
67 def tell(self):
68 return self.file.tell()
70 # The VMSD description is at the end of the file, after EOF. Look for
71 # the last NULL byte, then for the beginning brace of JSON.
72 def read_migration_debug_json(self):
73 QEMU_VM_VMDESCRIPTION = 0x06
75 # Remember the offset in the file when we started
76 entrypos = self.file.tell()
78 # Read the last 10MB
79 self.file.seek(0, os.SEEK_END)
80 endpos = self.file.tell()
81 self.file.seek(max(-endpos, -10 * 1024 * 1024), os.SEEK_END)
82 datapos = self.file.tell()
83 data = self.file.read()
84 # The full file read closed the file as well, reopen it
85 self.file = open(self.filename, "rb")
87 # Find the last NULL byte, then the first brace after that. This should
88 # be the beginning of our JSON data.
89 nulpos = data.rfind("\0")
90 jsonpos = data.find("{", nulpos)
92 # Check backwards from there and see whether we guessed right
93 self.file.seek(datapos + jsonpos - 5, 0)
94 if self.read8() != QEMU_VM_VMDESCRIPTION:
95 raise Exception("No Debug Migration device found")
97 jsonlen = self.read32()
99 # Seek back to where we were at the beginning
100 self.file.seek(entrypos, 0)
102 return data[jsonpos:jsonpos + jsonlen]
104 def close(self):
105 self.file.close()
107 class RamSection(object):
108 RAM_SAVE_FLAG_COMPRESS = 0x02
109 RAM_SAVE_FLAG_MEM_SIZE = 0x04
110 RAM_SAVE_FLAG_PAGE = 0x08
111 RAM_SAVE_FLAG_EOS = 0x10
112 RAM_SAVE_FLAG_CONTINUE = 0x20
113 RAM_SAVE_FLAG_XBZRLE = 0x40
114 RAM_SAVE_FLAG_HOOK = 0x80
116 def __init__(self, file, version_id, ramargs, section_key):
117 if version_id != 4:
118 raise Exception("Unknown RAM version %d" % version_id)
120 self.file = file
121 self.section_key = section_key
122 self.TARGET_PAGE_SIZE = ramargs['page_size']
123 self.dump_memory = ramargs['dump_memory']
124 self.write_memory = ramargs['write_memory']
125 self.sizeinfo = collections.OrderedDict()
126 self.data = collections.OrderedDict()
127 self.data['section sizes'] = self.sizeinfo
128 self.name = ''
129 if self.write_memory:
130 self.files = { }
131 if self.dump_memory:
132 self.memory = collections.OrderedDict()
133 self.data['memory'] = self.memory
135 def __repr__(self):
136 return self.data.__repr__()
138 def __str__(self):
139 return self.data.__str__()
141 def getDict(self):
142 return self.data
144 def read(self):
145 # Read all RAM sections
146 while True:
147 addr = self.file.read64()
148 flags = addr & (self.TARGET_PAGE_SIZE - 1)
149 addr &= ~(self.TARGET_PAGE_SIZE - 1)
151 if flags & self.RAM_SAVE_FLAG_MEM_SIZE:
152 while True:
153 namelen = self.file.read8()
154 # We assume that no RAM chunk is big enough to ever
155 # hit the first byte of the address, so when we see
156 # a zero here we know it has to be an address, not the
157 # length of the next block.
158 if namelen == 0:
159 self.file.file.seek(-1, 1)
160 break
161 self.name = self.file.readstr(len = namelen)
162 len = self.file.read64()
163 self.sizeinfo[self.name] = '0x%016x' % len
164 if self.write_memory:
165 print(self.name)
166 mkdir_p('./' + os.path.dirname(self.name))
167 f = open('./' + self.name, "wb")
168 f.truncate(0)
169 f.truncate(len)
170 self.files[self.name] = f
171 flags &= ~self.RAM_SAVE_FLAG_MEM_SIZE
173 if flags & self.RAM_SAVE_FLAG_COMPRESS:
174 if flags & self.RAM_SAVE_FLAG_CONTINUE:
175 flags &= ~self.RAM_SAVE_FLAG_CONTINUE
176 else:
177 self.name = self.file.readstr()
178 fill_char = self.file.read8()
179 # The page in question is filled with fill_char now
180 if self.write_memory and fill_char != 0:
181 self.files[self.name].seek(addr, os.SEEK_SET)
182 self.files[self.name].write(chr(fill_char) * self.TARGET_PAGE_SIZE)
183 if self.dump_memory:
184 self.memory['%s (0x%016x)' % (self.name, addr)] = 'Filled with 0x%02x' % fill_char
185 flags &= ~self.RAM_SAVE_FLAG_COMPRESS
186 elif flags & self.RAM_SAVE_FLAG_PAGE:
187 if flags & self.RAM_SAVE_FLAG_CONTINUE:
188 flags &= ~self.RAM_SAVE_FLAG_CONTINUE
189 else:
190 self.name = self.file.readstr()
192 if self.write_memory or self.dump_memory:
193 data = self.file.readvar(size = self.TARGET_PAGE_SIZE)
194 else: # Just skip RAM data
195 self.file.file.seek(self.TARGET_PAGE_SIZE, 1)
197 if self.write_memory:
198 self.files[self.name].seek(addr, os.SEEK_SET)
199 self.files[self.name].write(data)
200 if self.dump_memory:
201 hexdata = " ".join("{0:02x}".format(ord(c)) for c in data)
202 self.memory['%s (0x%016x)' % (self.name, addr)] = hexdata
204 flags &= ~self.RAM_SAVE_FLAG_PAGE
205 elif flags & self.RAM_SAVE_FLAG_XBZRLE:
206 raise Exception("XBZRLE RAM compression is not supported yet")
207 elif flags & self.RAM_SAVE_FLAG_HOOK:
208 raise Exception("RAM hooks don't make sense with files")
210 # End of RAM section
211 if flags & self.RAM_SAVE_FLAG_EOS:
212 break
214 if flags != 0:
215 raise Exception("Unknown RAM flags: %x" % flags)
217 def __del__(self):
218 if self.write_memory:
219 for key in self.files:
220 self.files[key].close()
223 class HTABSection(object):
224 HASH_PTE_SIZE_64 = 16
226 def __init__(self, file, version_id, device, section_key):
227 if version_id != 1:
228 raise Exception("Unknown HTAB version %d" % version_id)
230 self.file = file
231 self.section_key = section_key
233 def read(self):
235 header = self.file.read32()
237 if (header == -1):
238 # "no HPT" encoding
239 return
241 if (header > 0):
242 # First section, just the hash shift
243 return
245 # Read until end marker
246 while True:
247 index = self.file.read32()
248 n_valid = self.file.read16()
249 n_invalid = self.file.read16()
251 if index == 0 and n_valid == 0 and n_invalid == 0:
252 break
254 self.file.readvar(n_valid * self.HASH_PTE_SIZE_64)
256 def getDict(self):
257 return ""
260 class ConfigurationSection(object):
261 def __init__(self, file):
262 self.file = file
264 def read(self):
265 name_len = self.file.read32()
266 name = self.file.readstr(len = name_len)
268 class VMSDFieldGeneric(object):
269 def __init__(self, desc, file):
270 self.file = file
271 self.desc = desc
272 self.data = ""
274 def __repr__(self):
275 return str(self.__str__())
277 def __str__(self):
278 return " ".join("{0:02x}".format(ord(c)) for c in self.data)
280 def getDict(self):
281 return self.__str__()
283 def read(self):
284 size = int(self.desc['size'])
285 self.data = self.file.readvar(size)
286 return self.data
288 class VMSDFieldInt(VMSDFieldGeneric):
289 def __init__(self, desc, file):
290 super(VMSDFieldInt, self).__init__(desc, file)
291 self.size = int(desc['size'])
292 self.format = '0x%%0%dx' % (self.size * 2)
293 self.sdtype = '>i%d' % self.size
294 self.udtype = '>u%d' % self.size
296 def __repr__(self):
297 if self.data < 0:
298 return ('%s (%d)' % ((self.format % self.udata), self.data))
299 else:
300 return self.format % self.data
302 def __str__(self):
303 return self.__repr__()
305 def getDict(self):
306 return self.__str__()
308 def read(self):
309 super(VMSDFieldInt, self).read()
310 self.sdata = np.fromstring(self.data, count=1, dtype=(self.sdtype))[0]
311 self.udata = np.fromstring(self.data, count=1, dtype=(self.udtype))[0]
312 self.data = self.sdata
313 return self.data
315 class VMSDFieldUInt(VMSDFieldInt):
316 def __init__(self, desc, file):
317 super(VMSDFieldUInt, self).__init__(desc, file)
319 def read(self):
320 super(VMSDFieldUInt, self).read()
321 self.data = self.udata
322 return self.data
324 class VMSDFieldIntLE(VMSDFieldInt):
325 def __init__(self, desc, file):
326 super(VMSDFieldIntLE, self).__init__(desc, file)
327 self.dtype = '<i%d' % self.size
329 class VMSDFieldBool(VMSDFieldGeneric):
330 def __init__(self, desc, file):
331 super(VMSDFieldBool, self).__init__(desc, file)
333 def __repr__(self):
334 return self.data.__repr__()
336 def __str__(self):
337 return self.data.__str__()
339 def getDict(self):
340 return self.data
342 def read(self):
343 super(VMSDFieldBool, self).read()
344 if self.data[0] == 0:
345 self.data = False
346 else:
347 self.data = True
348 return self.data
350 class VMSDFieldStruct(VMSDFieldGeneric):
351 QEMU_VM_SUBSECTION = 0x05
353 def __init__(self, desc, file):
354 super(VMSDFieldStruct, self).__init__(desc, file)
355 self.data = collections.OrderedDict()
357 # When we see compressed array elements, unfold them here
358 new_fields = []
359 for field in self.desc['struct']['fields']:
360 if not 'array_len' in field:
361 new_fields.append(field)
362 continue
363 array_len = field.pop('array_len')
364 field['index'] = 0
365 new_fields.append(field)
366 for i in xrange(1, array_len):
367 c = field.copy()
368 c['index'] = i
369 new_fields.append(c)
371 self.desc['struct']['fields'] = new_fields
373 def __repr__(self):
374 return self.data.__repr__()
376 def __str__(self):
377 return self.data.__str__()
379 def read(self):
380 for field in self.desc['struct']['fields']:
381 try:
382 reader = vmsd_field_readers[field['type']]
383 except:
384 reader = VMSDFieldGeneric
386 field['data'] = reader(field, self.file)
387 field['data'].read()
389 if 'index' in field:
390 if field['name'] not in self.data:
391 self.data[field['name']] = []
392 a = self.data[field['name']]
393 if len(a) != int(field['index']):
394 raise Exception("internal index of data field unmatched (%d/%d)" % (len(a), int(field['index'])))
395 a.append(field['data'])
396 else:
397 self.data[field['name']] = field['data']
399 if 'subsections' in self.desc['struct']:
400 for subsection in self.desc['struct']['subsections']:
401 if self.file.read8() != self.QEMU_VM_SUBSECTION:
402 raise Exception("Subsection %s not found at offset %x" % ( subsection['vmsd_name'], self.file.tell()))
403 name = self.file.readstr()
404 version_id = self.file.read32()
405 self.data[name] = VMSDSection(self.file, version_id, subsection, (name, 0))
406 self.data[name].read()
408 def getDictItem(self, value):
409 # Strings would fall into the array category, treat
410 # them specially
411 if value.__class__ is ''.__class__:
412 return value
414 try:
415 return self.getDictOrderedDict(value)
416 except:
417 try:
418 return self.getDictArray(value)
419 except:
420 try:
421 return value.getDict()
422 except:
423 return value
425 def getDictArray(self, array):
426 r = []
427 for value in array:
428 r.append(self.getDictItem(value))
429 return r
431 def getDictOrderedDict(self, dict):
432 r = collections.OrderedDict()
433 for (key, value) in dict.items():
434 r[key] = self.getDictItem(value)
435 return r
437 def getDict(self):
438 return self.getDictOrderedDict(self.data)
440 vmsd_field_readers = {
441 "bool" : VMSDFieldBool,
442 "int8" : VMSDFieldInt,
443 "int16" : VMSDFieldInt,
444 "int32" : VMSDFieldInt,
445 "int32 equal" : VMSDFieldInt,
446 "int32 le" : VMSDFieldIntLE,
447 "int64" : VMSDFieldInt,
448 "uint8" : VMSDFieldUInt,
449 "uint16" : VMSDFieldUInt,
450 "uint32" : VMSDFieldUInt,
451 "uint32 equal" : VMSDFieldUInt,
452 "uint64" : VMSDFieldUInt,
453 "int64 equal" : VMSDFieldInt,
454 "uint8 equal" : VMSDFieldInt,
455 "uint16 equal" : VMSDFieldInt,
456 "float64" : VMSDFieldGeneric,
457 "timer" : VMSDFieldGeneric,
458 "buffer" : VMSDFieldGeneric,
459 "unused_buffer" : VMSDFieldGeneric,
460 "bitmap" : VMSDFieldGeneric,
461 "struct" : VMSDFieldStruct,
462 "unknown" : VMSDFieldGeneric,
465 class VMSDSection(VMSDFieldStruct):
466 def __init__(self, file, version_id, device, section_key):
467 self.file = file
468 self.data = ""
469 self.vmsd_name = ""
470 self.section_key = section_key
471 desc = device
472 if 'vmsd_name' in device:
473 self.vmsd_name = device['vmsd_name']
475 # A section really is nothing but a FieldStruct :)
476 super(VMSDSection, self).__init__({ 'struct' : desc }, file)
478 ###############################################################################
480 class MigrationDump(object):
481 QEMU_VM_FILE_MAGIC = 0x5145564d
482 QEMU_VM_FILE_VERSION = 0x00000003
483 QEMU_VM_EOF = 0x00
484 QEMU_VM_SECTION_START = 0x01
485 QEMU_VM_SECTION_PART = 0x02
486 QEMU_VM_SECTION_END = 0x03
487 QEMU_VM_SECTION_FULL = 0x04
488 QEMU_VM_SUBSECTION = 0x05
489 QEMU_VM_VMDESCRIPTION = 0x06
490 QEMU_VM_CONFIGURATION = 0x07
491 QEMU_VM_SECTION_FOOTER= 0x7e
493 def __init__(self, filename):
494 self.section_classes = { ( 'ram', 0 ) : [ RamSection, None ],
495 ( 'spapr/htab', 0) : ( HTABSection, None ) }
496 self.filename = filename
497 self.vmsd_desc = None
499 def read(self, desc_only = False, dump_memory = False, write_memory = False):
500 # Read in the whole file
501 file = MigrationFile(self.filename)
503 # File magic
504 data = file.read32()
505 if data != self.QEMU_VM_FILE_MAGIC:
506 raise Exception("Invalid file magic %x" % data)
508 # Version (has to be v3)
509 data = file.read32()
510 if data != self.QEMU_VM_FILE_VERSION:
511 raise Exception("Invalid version number %d" % data)
513 self.load_vmsd_json(file)
515 # Read sections
516 self.sections = collections.OrderedDict()
518 if desc_only:
519 return
521 ramargs = {}
522 ramargs['page_size'] = self.vmsd_desc['page_size']
523 ramargs['dump_memory'] = dump_memory
524 ramargs['write_memory'] = write_memory
525 self.section_classes[('ram',0)][1] = ramargs
527 while True:
528 section_type = file.read8()
529 if section_type == self.QEMU_VM_EOF:
530 break
531 elif section_type == self.QEMU_VM_CONFIGURATION:
532 section = ConfigurationSection(file)
533 section.read()
534 elif section_type == self.QEMU_VM_SECTION_START or section_type == self.QEMU_VM_SECTION_FULL:
535 section_id = file.read32()
536 name = file.readstr()
537 instance_id = file.read32()
538 version_id = file.read32()
539 section_key = (name, instance_id)
540 classdesc = self.section_classes[section_key]
541 section = classdesc[0](file, version_id, classdesc[1], section_key)
542 self.sections[section_id] = section
543 section.read()
544 elif section_type == self.QEMU_VM_SECTION_PART or section_type == self.QEMU_VM_SECTION_END:
545 section_id = file.read32()
546 self.sections[section_id].read()
547 elif section_type == self.QEMU_VM_SECTION_FOOTER:
548 read_section_id = file.read32()
549 if read_section_id != section_id:
550 raise Exception("Mismatched section footer: %x vs %x" % (read_section_id, section_id))
551 else:
552 raise Exception("Unknown section type: %d" % section_type)
553 file.close()
555 def load_vmsd_json(self, file):
556 vmsd_json = file.read_migration_debug_json()
557 self.vmsd_desc = json.loads(vmsd_json, object_pairs_hook=collections.OrderedDict)
558 for device in self.vmsd_desc['devices']:
559 key = (device['name'], device['instance_id'])
560 value = ( VMSDSection, device )
561 self.section_classes[key] = value
563 def getDict(self):
564 r = collections.OrderedDict()
565 for (key, value) in self.sections.items():
566 key = "%s (%d)" % ( value.section_key[0], key )
567 r[key] = value.getDict()
568 return r
570 ###############################################################################
572 class JSONEncoder(json.JSONEncoder):
573 def default(self, o):
574 if isinstance(o, VMSDFieldGeneric):
575 return str(o)
576 return json.JSONEncoder.default(self, o)
578 parser = argparse.ArgumentParser()
579 parser.add_argument("-f", "--file", help='migration dump to read from', required=True)
580 parser.add_argument("-m", "--memory", help='dump RAM contents as well', action='store_true')
581 parser.add_argument("-d", "--dump", help='what to dump ("state" or "desc")', default='state')
582 parser.add_argument("-x", "--extract", help='extract contents into individual files', action='store_true')
583 args = parser.parse_args()
585 jsonenc = JSONEncoder(indent=4, separators=(',', ': '))
587 if args.extract:
588 dump = MigrationDump(args.file)
590 dump.read(desc_only = True)
591 print("desc.json")
592 f = open("desc.json", "wb")
593 f.truncate()
594 f.write(jsonenc.encode(dump.vmsd_desc))
595 f.close()
597 dump.read(write_memory = True)
598 dict = dump.getDict()
599 print("state.json")
600 f = open("state.json", "wb")
601 f.truncate()
602 f.write(jsonenc.encode(dict))
603 f.close()
604 elif args.dump == "state":
605 dump = MigrationDump(args.file)
606 dump.read(dump_memory = args.memory)
607 dict = dump.getDict()
608 print(jsonenc.encode(dict))
609 elif args.dump == "desc":
610 dump = MigrationDump(args.file)
611 dump.read(desc_only = True)
612 print(jsonenc.encode(dump.vmsd_desc))
613 else:
614 raise Exception("Please specify either -x, -d state or -d dump")