3 # Migration Stream Analyzer
5 # Copyright (c) 2015 Alexander Graf <agraf@suse.de>
7 # This library is free software; you can redistribute it and/or
8 # modify it under the terms of the GNU Lesser General Public
9 # License as published by the Free Software Foundation; either
10 # version 2 of the License, or (at your option) any later version.
12 # This library is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 # Lesser General Public License for more details.
17 # You should have received a copy of the GNU Lesser General Public
18 # License along with this library; if not, see <http://www.gnu.org/licenses/>.
33 class MigrationFile(object):
34 def __init__(self
, filename
):
35 self
.filename
= filename
36 self
.file = open(self
.filename
, "rb")
39 return np
.asscalar(np
.fromfile(self
.file, count
=1, dtype
='>i8')[0])
42 return np
.asscalar(np
.fromfile(self
.file, count
=1, dtype
='>i4')[0])
45 return np
.asscalar(np
.fromfile(self
.file, count
=1, dtype
='>i2')[0])
48 return np
.asscalar(np
.fromfile(self
.file, count
=1, dtype
='>i1')[0])
50 def readstr(self
, len = None):
55 return np
.fromfile(self
.file, count
=1, dtype
=('S%d' % len))[0]
57 def readvar(self
, size
= None):
62 value
= self
.file.read(size
)
63 if len(value
) != size
:
64 raise Exception("Unexpected end of %s at 0x%x" % (self
.filename
, self
.file.tell()))
68 return self
.file.tell()
70 # The VMSD description is at the end of the file, after EOF. Look for
71 # the last NULL byte, then for the beginning brace of JSON.
72 def read_migration_debug_json(self
):
73 QEMU_VM_VMDESCRIPTION
= 0x06
75 # Remember the offset in the file when we started
76 entrypos
= self
.file.tell()
79 self
.file.seek(0, os
.SEEK_END
)
80 endpos
= self
.file.tell()
81 self
.file.seek(max(-endpos
, -10 * 1024 * 1024), os
.SEEK_END
)
82 datapos
= self
.file.tell()
83 data
= self
.file.read()
84 # The full file read closed the file as well, reopen it
85 self
.file = open(self
.filename
, "rb")
87 # Find the last NULL byte, then the first brace after that. This should
88 # be the beginning of our JSON data.
89 nulpos
= data
.rfind("\0")
90 jsonpos
= data
.find("{", nulpos
)
92 # Check backwards from there and see whether we guessed right
93 self
.file.seek(datapos
+ jsonpos
- 5, 0)
94 if self
.read8() != QEMU_VM_VMDESCRIPTION
:
95 raise Exception("No Debug Migration device found")
97 jsonlen
= self
.read32()
99 # Seek back to where we were at the beginning
100 self
.file.seek(entrypos
, 0)
102 return data
[jsonpos
:jsonpos
+ jsonlen
]
107 class RamSection(object):
108 RAM_SAVE_FLAG_COMPRESS
= 0x02
109 RAM_SAVE_FLAG_MEM_SIZE
= 0x04
110 RAM_SAVE_FLAG_PAGE
= 0x08
111 RAM_SAVE_FLAG_EOS
= 0x10
112 RAM_SAVE_FLAG_CONTINUE
= 0x20
113 RAM_SAVE_FLAG_XBZRLE
= 0x40
114 RAM_SAVE_FLAG_HOOK
= 0x80
116 def __init__(self
, file, version_id
, ramargs
, section_key
):
118 raise Exception("Unknown RAM version %d" % version_id
)
121 self
.section_key
= section_key
122 self
.TARGET_PAGE_SIZE
= ramargs
['page_size']
123 self
.dump_memory
= ramargs
['dump_memory']
124 self
.write_memory
= ramargs
['write_memory']
125 self
.sizeinfo
= collections
.OrderedDict()
126 self
.data
= collections
.OrderedDict()
127 self
.data
['section sizes'] = self
.sizeinfo
129 if self
.write_memory
:
132 self
.memory
= collections
.OrderedDict()
133 self
.data
['memory'] = self
.memory
136 return self
.data
.__repr
__()
139 return self
.data
.__str
__()
145 # Read all RAM sections
147 addr
= self
.file.read64()
148 flags
= addr
& (self
.TARGET_PAGE_SIZE
- 1)
149 addr
&= ~
(self
.TARGET_PAGE_SIZE
- 1)
151 if flags
& self
.RAM_SAVE_FLAG_MEM_SIZE
:
153 namelen
= self
.file.read8()
154 # We assume that no RAM chunk is big enough to ever
155 # hit the first byte of the address, so when we see
156 # a zero here we know it has to be an address, not the
157 # length of the next block.
159 self
.file.file.seek(-1, 1)
161 self
.name
= self
.file.readstr(len = namelen
)
162 len = self
.file.read64()
163 self
.sizeinfo
[self
.name
] = '0x%016x' % len
164 if self
.write_memory
:
166 mkdir_p('./' + os
.path
.dirname(self
.name
))
167 f
= open('./' + self
.name
, "wb")
170 self
.files
[self
.name
] = f
171 flags
&= ~self
.RAM_SAVE_FLAG_MEM_SIZE
173 if flags
& self
.RAM_SAVE_FLAG_COMPRESS
:
174 if flags
& self
.RAM_SAVE_FLAG_CONTINUE
:
175 flags
&= ~self
.RAM_SAVE_FLAG_CONTINUE
177 self
.name
= self
.file.readstr()
178 fill_char
= self
.file.read8()
179 # The page in question is filled with fill_char now
180 if self
.write_memory
and fill_char
!= 0:
181 self
.files
[self
.name
].seek(addr
, os
.SEEK_SET
)
182 self
.files
[self
.name
].write(chr(fill_char
) * self
.TARGET_PAGE_SIZE
)
184 self
.memory
['%s (0x%016x)' % (self
.name
, addr
)] = 'Filled with 0x%02x' % fill_char
185 flags
&= ~self
.RAM_SAVE_FLAG_COMPRESS
186 elif flags
& self
.RAM_SAVE_FLAG_PAGE
:
187 if flags
& self
.RAM_SAVE_FLAG_CONTINUE
:
188 flags
&= ~self
.RAM_SAVE_FLAG_CONTINUE
190 self
.name
= self
.file.readstr()
192 if self
.write_memory
or self
.dump_memory
:
193 data
= self
.file.readvar(size
= self
.TARGET_PAGE_SIZE
)
194 else: # Just skip RAM data
195 self
.file.file.seek(self
.TARGET_PAGE_SIZE
, 1)
197 if self
.write_memory
:
198 self
.files
[self
.name
].seek(addr
, os
.SEEK_SET
)
199 self
.files
[self
.name
].write(data
)
201 hexdata
= " ".join("{0:02x}".format(ord(c
)) for c
in data
)
202 self
.memory
['%s (0x%016x)' % (self
.name
, addr
)] = hexdata
204 flags
&= ~self
.RAM_SAVE_FLAG_PAGE
205 elif flags
& self
.RAM_SAVE_FLAG_XBZRLE
:
206 raise Exception("XBZRLE RAM compression is not supported yet")
207 elif flags
& self
.RAM_SAVE_FLAG_HOOK
:
208 raise Exception("RAM hooks don't make sense with files")
211 if flags
& self
.RAM_SAVE_FLAG_EOS
:
215 raise Exception("Unknown RAM flags: %x" % flags
)
218 if self
.write_memory
:
219 for key
in self
.files
:
220 self
.files
[key
].close()
223 class HTABSection(object):
224 HASH_PTE_SIZE_64
= 16
226 def __init__(self
, file, version_id
, device
, section_key
):
228 raise Exception("Unknown HTAB version %d" % version_id
)
231 self
.section_key
= section_key
235 header
= self
.file.read32()
238 # First section, just the hash shift
241 # Read until end marker
243 index
= self
.file.read32()
244 n_valid
= self
.file.read16()
245 n_invalid
= self
.file.read16()
247 if index
== 0 and n_valid
== 0 and n_invalid
== 0:
250 self
.file.readvar(n_valid
* self
.HASH_PTE_SIZE_64
)
255 class VMSDFieldGeneric(object):
256 def __init__(self
, desc
, file):
262 return str(self
.__str
__())
265 return " ".join("{0:02x}".format(ord(c
)) for c
in self
.data
)
268 return self
.__str
__()
271 size
= int(self
.desc
['size'])
272 self
.data
= self
.file.readvar(size
)
275 class VMSDFieldInt(VMSDFieldGeneric
):
276 def __init__(self
, desc
, file):
277 super(VMSDFieldInt
, self
).__init
__(desc
, file)
278 self
.size
= int(desc
['size'])
279 self
.format
= '0x%%0%dx' % (self
.size
* 2)
280 self
.sdtype
= '>i%d' % self
.size
281 self
.udtype
= '>u%d' % self
.size
285 return ('%s (%d)' % ((self
.format
% self
.udata
), self
.data
))
287 return self
.format
% self
.data
290 return self
.__repr
__()
293 return self
.__str
__()
296 super(VMSDFieldInt
, self
).read()
297 self
.sdata
= np
.fromstring(self
.data
, count
=1, dtype
=(self
.sdtype
))[0]
298 self
.udata
= np
.fromstring(self
.data
, count
=1, dtype
=(self
.udtype
))[0]
299 self
.data
= self
.sdata
302 class VMSDFieldUInt(VMSDFieldInt
):
303 def __init__(self
, desc
, file):
304 super(VMSDFieldUInt
, self
).__init
__(desc
, file)
307 super(VMSDFieldUInt
, self
).read()
308 self
.data
= self
.udata
311 class VMSDFieldIntLE(VMSDFieldInt
):
312 def __init__(self
, desc
, file):
313 super(VMSDFieldIntLE
, self
).__init
__(desc
, file)
314 self
.dtype
= '<i%d' % self
.size
316 class VMSDFieldBool(VMSDFieldGeneric
):
317 def __init__(self
, desc
, file):
318 super(VMSDFieldBool
, self
).__init
__(desc
, file)
321 return self
.data
.__repr
__()
324 return self
.data
.__str
__()
330 super(VMSDFieldBool
, self
).read()
331 if self
.data
[0] == 0:
337 class VMSDFieldStruct(VMSDFieldGeneric
):
338 QEMU_VM_SUBSECTION
= 0x05
340 def __init__(self
, desc
, file):
341 super(VMSDFieldStruct
, self
).__init
__(desc
, file)
342 self
.data
= collections
.OrderedDict()
344 # When we see compressed array elements, unfold them here
346 for field
in self
.desc
['struct']['fields']:
347 if not 'array_len' in field
:
348 new_fields
.append(field
)
350 array_len
= field
.pop('array_len')
352 new_fields
.append(field
)
353 for i
in xrange(1, array_len
):
358 self
.desc
['struct']['fields'] = new_fields
361 return self
.data
.__repr
__()
364 return self
.data
.__str
__()
367 for field
in self
.desc
['struct']['fields']:
369 reader
= vmsd_field_readers
[field
['type']]
371 reader
= VMSDFieldGeneric
373 field
['data'] = reader(field
, self
.file)
377 if field
['name'] not in self
.data
:
378 self
.data
[field
['name']] = []
379 a
= self
.data
[field
['name']]
380 if len(a
) != int(field
['index']):
381 raise Exception("internal index of data field unmatched (%d/%d)" % (len(a
), int(field
['index'])))
382 a
.append(field
['data'])
384 self
.data
[field
['name']] = field
['data']
386 if 'subsections' in self
.desc
['struct']:
387 for subsection
in self
.desc
['struct']['subsections']:
388 if self
.file.read8() != self
.QEMU_VM_SUBSECTION
:
389 raise Exception("Subsection %s not found at offset %x" % ( subsection
['vmsd_name'], self
.file.tell()))
390 name
= self
.file.readstr()
391 version_id
= self
.file.read32()
392 self
.data
[name
] = VMSDSection(self
.file, version_id
, subsection
, (name
, 0))
393 self
.data
[name
].read()
395 def getDictItem(self
, value
):
396 # Strings would fall into the array category, treat
398 if value
.__class
__ is ''.__class
__:
402 return self
.getDictOrderedDict(value
)
405 return self
.getDictArray(value
)
408 return value
.getDict()
412 def getDictArray(self
, array
):
415 r
.append(self
.getDictItem(value
))
418 def getDictOrderedDict(self
, dict):
419 r
= collections
.OrderedDict()
420 for (key
, value
) in dict.items():
421 r
[key
] = self
.getDictItem(value
)
425 return self
.getDictOrderedDict(self
.data
)
427 vmsd_field_readers
= {
428 "bool" : VMSDFieldBool
,
429 "int8" : VMSDFieldInt
,
430 "int16" : VMSDFieldInt
,
431 "int32" : VMSDFieldInt
,
432 "int32 equal" : VMSDFieldInt
,
433 "int32 le" : VMSDFieldIntLE
,
434 "int64" : VMSDFieldInt
,
435 "uint8" : VMSDFieldUInt
,
436 "uint16" : VMSDFieldUInt
,
437 "uint32" : VMSDFieldUInt
,
438 "uint32 equal" : VMSDFieldUInt
,
439 "uint64" : VMSDFieldUInt
,
440 "int64 equal" : VMSDFieldInt
,
441 "uint8 equal" : VMSDFieldInt
,
442 "uint16 equal" : VMSDFieldInt
,
443 "float64" : VMSDFieldGeneric
,
444 "timer" : VMSDFieldGeneric
,
445 "buffer" : VMSDFieldGeneric
,
446 "unused_buffer" : VMSDFieldGeneric
,
447 "bitmap" : VMSDFieldGeneric
,
448 "struct" : VMSDFieldStruct
,
449 "unknown" : VMSDFieldGeneric
,
452 class VMSDSection(VMSDFieldStruct
):
453 def __init__(self
, file, version_id
, device
, section_key
):
457 self
.section_key
= section_key
459 if 'vmsd_name' in device
:
460 self
.vmsd_name
= device
['vmsd_name']
462 # A section really is nothing but a FieldStruct :)
463 super(VMSDSection
, self
).__init
__({ 'struct' : desc
}, file)
465 ###############################################################################
467 class MigrationDump(object):
468 QEMU_VM_FILE_MAGIC
= 0x5145564d
469 QEMU_VM_FILE_VERSION
= 0x00000003
471 QEMU_VM_SECTION_START
= 0x01
472 QEMU_VM_SECTION_PART
= 0x02
473 QEMU_VM_SECTION_END
= 0x03
474 QEMU_VM_SECTION_FULL
= 0x04
475 QEMU_VM_SUBSECTION
= 0x05
476 QEMU_VM_VMDESCRIPTION
= 0x06
478 def __init__(self
, filename
):
479 self
.section_classes
= { ( 'ram', 0 ) : [ RamSection
, None ],
480 ( 'spapr/htab', 0) : ( HTABSection
, None ) }
481 self
.filename
= filename
482 self
.vmsd_desc
= None
484 def read(self
, desc_only
= False, dump_memory
= False, write_memory
= False):
485 # Read in the whole file
486 file = MigrationFile(self
.filename
)
490 if data
!= self
.QEMU_VM_FILE_MAGIC
:
491 raise Exception("Invalid file magic %x" % data
)
493 # Version (has to be v3)
495 if data
!= self
.QEMU_VM_FILE_VERSION
:
496 raise Exception("Invalid version number %d" % data
)
498 self
.load_vmsd_json(file)
501 self
.sections
= collections
.OrderedDict()
507 ramargs
['page_size'] = self
.vmsd_desc
['page_size']
508 ramargs
['dump_memory'] = dump_memory
509 ramargs
['write_memory'] = write_memory
510 self
.section_classes
[('ram',0)][1] = ramargs
513 section_type
= file.read8()
514 if section_type
== self
.QEMU_VM_EOF
:
516 elif section_type
== self
.QEMU_VM_SECTION_START
or section_type
== self
.QEMU_VM_SECTION_FULL
:
517 section_id
= file.read32()
518 name
= file.readstr()
519 instance_id
= file.read32()
520 version_id
= file.read32()
521 section_key
= (name
, instance_id
)
522 classdesc
= self
.section_classes
[section_key
]
523 section
= classdesc
[0](file, version_id
, classdesc
[1], section_key
)
524 self
.sections
[section_id
] = section
526 elif section_type
== self
.QEMU_VM_SECTION_PART
or section_type
== self
.QEMU_VM_SECTION_END
:
527 section_id
= file.read32()
528 self
.sections
[section_id
].read()
530 raise Exception("Unknown section type: %d" % section_type
)
533 def load_vmsd_json(self
, file):
534 vmsd_json
= file.read_migration_debug_json()
535 self
.vmsd_desc
= json
.loads(vmsd_json
, object_pairs_hook
=collections
.OrderedDict
)
536 for device
in self
.vmsd_desc
['devices']:
537 key
= (device
['name'], device
['instance_id'])
538 value
= ( VMSDSection
, device
)
539 self
.section_classes
[key
] = value
542 r
= collections
.OrderedDict()
543 for (key
, value
) in self
.sections
.items():
544 key
= "%s (%d)" % ( value
.section_key
[0], key
)
545 r
[key
] = value
.getDict()
548 ###############################################################################
550 class JSONEncoder(json
.JSONEncoder
):
551 def default(self
, o
):
552 if isinstance(o
, VMSDFieldGeneric
):
554 return json
.JSONEncoder
.default(self
, o
)
556 parser
= argparse
.ArgumentParser()
557 parser
.add_argument("-f", "--file", help='migration dump to read from', required
=True)
558 parser
.add_argument("-m", "--memory", help='dump RAM contents as well', action
='store_true')
559 parser
.add_argument("-d", "--dump", help='what to dump ("state" or "desc")', default
='state')
560 parser
.add_argument("-x", "--extract", help='extract contents into individual files', action
='store_true')
561 args
= parser
.parse_args()
563 jsonenc
= JSONEncoder(indent
=4, separators
=(',', ': '))
566 dump
= MigrationDump(args
.file)
568 dump
.read(desc_only
= True)
570 f
= open("desc.json", "wb")
572 f
.write(jsonenc
.encode(dump
.vmsd_desc
))
575 dump
.read(write_memory
= True)
576 dict = dump
.getDict()
578 f
= open("state.json", "wb")
580 f
.write(jsonenc
.encode(dict))
582 elif args
.dump
== "state":
583 dump
= MigrationDump(args
.file)
584 dump
.read(dump_memory
= args
.memory
)
585 dict = dump
.getDict()
586 print jsonenc
.encode(dict)
587 elif args
.dump
== "desc":
588 dump
= MigrationDump(args
.file)
589 dump
.read(desc_only
= True)
590 print jsonenc
.encode(dump
.vmsd_desc
)
592 raise Exception("Please specify either -x, -d state or -d dump")