Merge remote-tracking branch 'remotes/vivier2/tags/linux-user-for-5.1-pull-request...
[qemu/ar7.git] / tests / qemu-iotests / 124
blob3705cbb6b33c5eca38e937bdf7baba00bde280f4
1 #!/usr/bin/env python3
3 # Tests for incremental drive-backup
5 # Copyright (C) 2015 John Snow for Red Hat, Inc.
7 # Based on 056.
9 # This program is free software; you can redistribute it and/or modify
10 # it under the terms of the GNU General Public License as published by
11 # the Free Software Foundation; either version 2 of the License, or
12 # (at your option) any later version.
14 # This program is distributed in the hope that it will be useful,
15 # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17 # GNU General Public License for more details.
19 # You should have received a copy of the GNU General Public License
20 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
23 import os
24 import iotests
27 def io_write_patterns(img, patterns):
28     for pattern in patterns:
29         iotests.qemu_io('-c', 'write -P%s %s %s' % pattern, img)
32 def try_remove(img):
33     try:
34         os.remove(img)
35     except OSError:
36         pass
39 def transaction_action(action, **kwargs):
40     return {
41         'type': action,
42         'data': dict((k.replace('_', '-'), v) for k, v in kwargs.items())
43     }
46 def transaction_bitmap_clear(node, name, **kwargs):
47     return transaction_action('block-dirty-bitmap-clear',
48                               node=node, name=name, **kwargs)
51 def transaction_drive_backup(device, target, **kwargs):
52     return transaction_action('drive-backup', job_id=device, device=device,
53                               target=target, **kwargs)
56 class Bitmap:
57     def __init__(self, name, drive):
58         self.name = name
59         self.drive = drive
60         self.num = 0
61         self.backups = list()
63     def base_target(self):
64         return (self.drive['backup'], None)
66     def new_target(self, num=None):
67         if num is None:
68             num = self.num
69         self.num = num + 1
70         base = os.path.join(iotests.test_dir,
71                             "%s.%s." % (self.drive['id'], self.name))
72         suff = "%i.%s" % (num, self.drive['fmt'])
73         target = base + "inc" + suff
74         reference = base + "ref" + suff
75         self.backups.append((target, reference))
76         return (target, reference)
78     def last_target(self):
79         if self.backups:
80             return self.backups[-1]
81         return self.base_target()
83     def del_target(self):
84         for image in self.backups.pop():
85             try_remove(image)
86         self.num -= 1
88     def cleanup(self):
89         for backup in self.backups:
90             for image in backup:
91                 try_remove(image)
94 class TestIncrementalBackupBase(iotests.QMPTestCase):
95     def __init__(self, *args):
96         super(TestIncrementalBackupBase, self).__init__(*args)
97         self.bitmaps = list()
98         self.files = list()
99         self.drives = list()
100         self.vm = iotests.VM()
101         self.err_img = os.path.join(iotests.test_dir, 'err.%s' % iotests.imgfmt)
104     def setUp(self):
105         # Create a base image with a distinctive patterning
106         drive0 = self.add_node('drive0')
107         self.img_create(drive0['file'], drive0['fmt'])
108         self.vm.add_drive(drive0['file'], opts='node-name=node0')
109         self.write_default_pattern(drive0['file'])
110         self.vm.launch()
113     def write_default_pattern(self, target):
114         io_write_patterns(target, (('0x41', 0, 512),
115                                    ('0xd5', '1M', '32k'),
116                                    ('0xdc', '32M', '124k')))
119     def add_node(self, node_id, fmt=iotests.imgfmt, path=None, backup=None):
120         if path is None:
121             path = os.path.join(iotests.test_dir, '%s.%s' % (node_id, fmt))
122         if backup is None:
123             backup = os.path.join(iotests.test_dir,
124                                   '%s.full.backup.%s' % (node_id, fmt))
126         self.drives.append({
127             'id': node_id,
128             'file': path,
129             'backup': backup,
130             'fmt': fmt })
131         return self.drives[-1]
134     def img_create(self, img, fmt=iotests.imgfmt, size='64M',
135                    parent=None, parentFormat=None, **kwargs):
136         optargs = []
137         for k,v in kwargs.items():
138             optargs = optargs + ['-o', '%s=%s' % (k,v)]
139         args = ['create', '-f', fmt] + optargs + [img, size]
140         if parent:
141             if parentFormat is None:
142                 parentFormat = fmt
143             args = args + ['-b', parent, '-F', parentFormat]
144         iotests.qemu_img(*args)
145         self.files.append(img)
148     def do_qmp_backup(self, error='Input/output error', **kwargs):
149         res = self.vm.qmp('drive-backup', **kwargs)
150         self.assert_qmp(res, 'return', {})
151         return self.wait_qmp_backup(kwargs['device'], error)
154     def ignore_job_status_change_events(self):
155         while True:
156             e = self.vm.event_wait(name="JOB_STATUS_CHANGE")
157             if e['data']['status'] == 'null':
158                 break
160     def wait_qmp_backup(self, device, error='Input/output error'):
161         event = self.vm.event_wait(name="BLOCK_JOB_COMPLETED",
162                                    match={'data': {'device': device}})
163         self.assertNotEqual(event, None)
164         self.ignore_job_status_change_events()
166         try:
167             failure = self.dictpath(event, 'data/error')
168         except AssertionError:
169             # Backup succeeded.
170             self.assert_qmp(event, 'data/offset', event['data']['len'])
171             return True
172         else:
173             # Backup failed.
174             self.assert_qmp(event, 'data/error', error)
175             return False
178     def wait_qmp_backup_cancelled(self, device):
179         event = self.vm.event_wait(name='BLOCK_JOB_CANCELLED',
180                                    match={'data': {'device': device}})
181         self.assertNotEqual(event, None)
182         self.ignore_job_status_change_events()
185     def create_anchor_backup(self, drive=None):
186         if drive is None:
187             drive = self.drives[-1]
188         res = self.do_qmp_backup(job_id=drive['id'],
189                                  device=drive['id'], sync='full',
190                                  format=drive['fmt'], target=drive['backup'])
191         self.assertTrue(res)
192         self.files.append(drive['backup'])
193         return drive['backup']
196     def make_reference_backup(self, bitmap=None):
197         if bitmap is None:
198             bitmap = self.bitmaps[-1]
199         _, reference = bitmap.last_target()
200         res = self.do_qmp_backup(job_id=bitmap.drive['id'],
201                                  device=bitmap.drive['id'], sync='full',
202                                  format=bitmap.drive['fmt'], target=reference)
203         self.assertTrue(res)
206     def add_bitmap(self, name, drive, **kwargs):
207         bitmap = Bitmap(name, drive)
208         self.bitmaps.append(bitmap)
209         result = self.vm.qmp('block-dirty-bitmap-add', node=drive['id'],
210                              name=bitmap.name, **kwargs)
211         self.assert_qmp(result, 'return', {})
212         return bitmap
215     def prepare_backup(self, bitmap=None, parent=None, **kwargs):
216         if bitmap is None:
217             bitmap = self.bitmaps[-1]
218         if parent is None:
219             parent, _ = bitmap.last_target()
221         target, _ = bitmap.new_target()
222         self.img_create(target, bitmap.drive['fmt'], parent=parent,
223                         **kwargs)
224         return target
227     def create_incremental(self, bitmap=None, parent=None,
228                            parentFormat=None, validate=True,
229                            target=None):
230         if bitmap is None:
231             bitmap = self.bitmaps[-1]
232         if parent is None:
233             parent, _ = bitmap.last_target()
235         if target is None:
236             target = self.prepare_backup(bitmap, parent)
237         res = self.do_qmp_backup(job_id=bitmap.drive['id'],
238                                  device=bitmap.drive['id'],
239                                  sync='incremental', bitmap=bitmap.name,
240                                  format=bitmap.drive['fmt'], target=target,
241                                  mode='existing')
242         if not res:
243             bitmap.del_target();
244             self.assertFalse(validate)
245         else:
246             self.make_reference_backup(bitmap)
247         return res
250     def check_backups(self):
251         for bitmap in self.bitmaps:
252             for incremental, reference in bitmap.backups:
253                 self.assertTrue(iotests.compare_images(incremental, reference))
254             last = bitmap.last_target()[0]
255             self.assertTrue(iotests.compare_images(last, bitmap.drive['file']))
258     def hmp_io_writes(self, drive, patterns):
259         for pattern in patterns:
260             self.vm.hmp_qemu_io(drive, 'write -P%s %s %s' % pattern)
261         self.vm.hmp_qemu_io(drive, 'flush')
264     def do_incremental_simple(self, **kwargs):
265         self.create_anchor_backup()
266         self.add_bitmap('bitmap0', self.drives[0], **kwargs)
268         # Sanity: Create a "hollow" incremental backup
269         self.create_incremental()
270         # Three writes: One complete overwrite, one new segment,
271         # and one partial overlap.
272         self.hmp_io_writes(self.drives[0]['id'], (('0xab', 0, 512),
273                                                   ('0xfe', '16M', '256k'),
274                                                   ('0x64', '32736k', '64k')))
275         self.create_incremental()
276         # Three more writes, one of each kind, like above
277         self.hmp_io_writes(self.drives[0]['id'], (('0x9a', 0, 512),
278                                                   ('0x55', '8M', '352k'),
279                                                   ('0x78', '15872k', '1M')))
280         self.create_incremental()
281         self.vm.shutdown()
282         self.check_backups()
285     def tearDown(self):
286         self.vm.shutdown()
287         for bitmap in self.bitmaps:
288             bitmap.cleanup()
289         for filename in self.files:
290             try_remove(filename)
294 class TestIncrementalBackup(TestIncrementalBackupBase):
295     def test_incremental_simple(self):
296         '''
297         Test: Create and verify three incremental backups.
299         Create a bitmap and a full backup before VM execution begins,
300         then create a series of three incremental backups "during execution,"
301         i.e.; after IO requests begin modifying the drive.
302         '''
303         return self.do_incremental_simple()
306     def test_small_granularity(self):
307         '''
308         Test: Create and verify backups made with a small granularity bitmap.
310         Perform the same test as test_incremental_simple, but with a granularity
311         of only 32KiB instead of the present default of 64KiB.
312         '''
313         return self.do_incremental_simple(granularity=32768)
316     def test_large_granularity(self):
317         '''
318         Test: Create and verify backups made with a large granularity bitmap.
320         Perform the same test as test_incremental_simple, but with a granularity
321         of 128KiB instead of the present default of 64KiB.
322         '''
323         return self.do_incremental_simple(granularity=131072)
326     def test_larger_cluster_target(self):
327         '''
328         Test: Create and verify backups made to a larger cluster size target.
330         With a default granularity of 64KiB, verify that backups made to a
331         larger cluster size target of 128KiB without a backing file works.
332         '''
333         drive0 = self.drives[0]
335         # Create a cluster_size=128k full backup / "anchor" backup
336         self.img_create(drive0['backup'], cluster_size='128k')
337         self.assertTrue(self.do_qmp_backup(device=drive0['id'], sync='full',
338                                            format=drive0['fmt'],
339                                            target=drive0['backup'],
340                                            mode='existing'))
342         # Create bitmap and dirty it with some new writes.
343         # overwrite [32736, 32799] which will dirty bitmap clusters at
344         # 32M-64K and 32M. 32M+64K will be left undirtied.
345         bitmap0 = self.add_bitmap('bitmap0', drive0)
346         self.hmp_io_writes(drive0['id'],
347                            (('0xab', 0, 512),
348                             ('0xfe', '16M', '256k'),
349                             ('0x64', '32736k', '64k')))
350         # Check the dirty bitmap stats
351         self.assertTrue(self.vm.check_bitmap_status(
352             'node0', bitmap0.name, {
353                 'name': 'bitmap0',
354                 'count': 458752,
355                 'granularity': 65536,
356                 'status': 'active',
357                 'persistent': False
358             }))
360         # Prepare a cluster_size=128k backup target without a backing file.
361         (target, _) = bitmap0.new_target()
362         self.img_create(target, bitmap0.drive['fmt'], cluster_size='128k')
364         # Perform Incremental Backup
365         self.assertTrue(self.do_qmp_backup(device=bitmap0.drive['id'],
366                                            sync='incremental',
367                                            bitmap=bitmap0.name,
368                                            format=bitmap0.drive['fmt'],
369                                            target=target,
370                                            mode='existing'))
371         self.make_reference_backup(bitmap0)
373         # Add the backing file, then compare and exit.
374         iotests.qemu_img('rebase', '-f', drive0['fmt'], '-u', '-b',
375                          drive0['backup'], '-F', drive0['fmt'], target)
376         self.vm.shutdown()
377         self.check_backups()
380     def test_incremental_transaction(self):
381         '''Test: Verify backups made from transactionally created bitmaps.
383         Create a bitmap "before" VM execution begins, then create a second
384         bitmap AFTER writes have already occurred. Use transactions to create
385         a full backup and synchronize both bitmaps to this backup.
386         Create an incremental backup through both bitmaps and verify that
387         both backups match the current drive0 image.
388         '''
390         drive0 = self.drives[0]
391         bitmap0 = self.add_bitmap('bitmap0', drive0)
392         self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
393                                           ('0xfe', '16M', '256k'),
394                                           ('0x64', '32736k', '64k')))
395         bitmap1 = self.add_bitmap('bitmap1', drive0)
397         result = self.vm.qmp('transaction', actions=[
398             transaction_bitmap_clear(bitmap0.drive['id'], bitmap0.name),
399             transaction_bitmap_clear(bitmap1.drive['id'], bitmap1.name),
400             transaction_drive_backup(drive0['id'], drive0['backup'],
401                                      sync='full', format=drive0['fmt'])
402         ])
403         self.assert_qmp(result, 'return', {})
404         self.wait_until_completed(drive0['id'])
405         self.files.append(drive0['backup'])
407         self.hmp_io_writes(drive0['id'], (('0x9a', 0, 512),
408                                           ('0x55', '8M', '352k'),
409                                           ('0x78', '15872k', '1M')))
410         # Both bitmaps should be correctly in sync.
411         self.create_incremental(bitmap0)
412         self.create_incremental(bitmap1)
413         self.vm.shutdown()
414         self.check_backups()
417     def do_transaction_failure_test(self, race=False):
418         # Create a second drive, with pattern:
419         drive1 = self.add_node('drive1')
420         self.img_create(drive1['file'], drive1['fmt'])
421         io_write_patterns(drive1['file'], (('0x14', 0, 512),
422                                            ('0x5d', '1M', '32k'),
423                                            ('0xcd', '32M', '124k')))
425         # Create a blkdebug interface to this img as 'drive1'
426         result = self.vm.qmp('blockdev-add',
427             node_name=drive1['id'],
428             driver=drive1['fmt'],
429             file={
430                 'driver': 'blkdebug',
431                 'image': {
432                     'driver': 'file',
433                     'filename': drive1['file']
434                 },
435                 'set-state': [{
436                     'event': 'flush_to_disk',
437                     'state': 1,
438                     'new_state': 2
439                 }],
440                 'inject-error': [{
441                     'event': 'read_aio',
442                     'errno': 5,
443                     'state': 2,
444                     'immediately': False,
445                     'once': True
446                 }],
447             }
448         )
449         self.assert_qmp(result, 'return', {})
451         # Create bitmaps and full backups for both drives
452         drive0 = self.drives[0]
453         dr0bm0 = self.add_bitmap('bitmap0', drive0)
454         dr1bm0 = self.add_bitmap('bitmap0', drive1)
455         self.create_anchor_backup(drive0)
456         self.create_anchor_backup(drive1)
457         self.assert_no_active_block_jobs()
458         self.assertFalse(self.vm.get_qmp_events(wait=False))
460         # Emulate some writes
461         if not race:
462             self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
463                                               ('0xfe', '16M', '256k'),
464                                               ('0x64', '32736k', '64k')))
465         self.hmp_io_writes(drive1['id'], (('0xba', 0, 512),
466                                           ('0xef', '16M', '256k'),
467                                           ('0x46', '32736k', '64k')))
469         # Create incremental backup targets
470         target0 = self.prepare_backup(dr0bm0)
471         target1 = self.prepare_backup(dr1bm0)
473         # Ask for a new incremental backup per-each drive,
474         # expecting drive1's backup to fail. In the 'race' test,
475         # we expect drive1 to attempt to cancel the empty drive0 job.
476         transaction = [
477             transaction_drive_backup(drive0['id'], target0, sync='incremental',
478                                      format=drive0['fmt'], mode='existing',
479                                      bitmap=dr0bm0.name),
480             transaction_drive_backup(drive1['id'], target1, sync='incremental',
481                                      format=drive1['fmt'], mode='existing',
482                                      bitmap=dr1bm0.name)
483         ]
484         result = self.vm.qmp('transaction', actions=transaction,
485                              properties={'completion-mode': 'grouped'} )
486         self.assert_qmp(result, 'return', {})
488         # Observe that drive0's backup is cancelled and drive1 completes with
489         # an error.
490         self.wait_qmp_backup_cancelled(drive0['id'])
491         self.assertFalse(self.wait_qmp_backup(drive1['id']))
492         error = self.vm.event_wait('BLOCK_JOB_ERROR')
493         self.assert_qmp(error, 'data', {'device': drive1['id'],
494                                         'action': 'report',
495                                         'operation': 'read'})
496         self.assertFalse(self.vm.get_qmp_events(wait=False))
497         self.assert_no_active_block_jobs()
499         # Delete drive0's successful target and eliminate our record of the
500         # unsuccessful drive1 target.
501         dr0bm0.del_target()
502         dr1bm0.del_target()
503         if race:
504             # Don't re-run the transaction, we only wanted to test the race.
505             self.vm.shutdown()
506             return
508         # Re-run the same transaction:
509         target0 = self.prepare_backup(dr0bm0)
510         target1 = self.prepare_backup(dr1bm0)
512         # Re-run the exact same transaction.
513         result = self.vm.qmp('transaction', actions=transaction,
514                              properties={'completion-mode':'grouped'})
515         self.assert_qmp(result, 'return', {})
517         # Both should complete successfully this time.
518         self.assertTrue(self.wait_qmp_backup(drive0['id']))
519         self.assertTrue(self.wait_qmp_backup(drive1['id']))
520         self.make_reference_backup(dr0bm0)
521         self.make_reference_backup(dr1bm0)
522         self.assertFalse(self.vm.get_qmp_events(wait=False))
523         self.assert_no_active_block_jobs()
525         # And the images should of course validate.
526         self.vm.shutdown()
527         self.check_backups()
529     def test_transaction_failure(self):
530         '''Test: Verify backups made from a transaction that partially fails.
532         Add a second drive with its own unique pattern, and add a bitmap to each
533         drive. Use blkdebug to interfere with the backup on just one drive and
534         attempt to create a coherent incremental backup across both drives.
536         verify a failure in one but not both, then delete the failed stubs and
537         re-run the same transaction.
539         verify that both incrementals are created successfully.
540         '''
541         self.do_transaction_failure_test()
543     def test_transaction_failure_race(self):
544         '''Test: Verify that transactions with jobs that have no data to
545         transfer do not cause race conditions in the cancellation of the entire
546         transaction job group.
547         '''
548         self.do_transaction_failure_test(race=True)
551     def test_sync_dirty_bitmap_missing(self):
552         self.assert_no_active_block_jobs()
553         self.files.append(self.err_img)
554         result = self.vm.qmp('drive-backup', device=self.drives[0]['id'],
555                              sync='incremental', format=self.drives[0]['fmt'],
556                              target=self.err_img)
557         self.assert_qmp(result, 'error/class', 'GenericError')
560     def test_sync_dirty_bitmap_not_found(self):
561         self.assert_no_active_block_jobs()
562         self.files.append(self.err_img)
563         result = self.vm.qmp('drive-backup', device=self.drives[0]['id'],
564                              sync='incremental', bitmap='unknown',
565                              format=self.drives[0]['fmt'], target=self.err_img)
566         self.assert_qmp(result, 'error/class', 'GenericError')
569     def test_sync_dirty_bitmap_bad_granularity(self):
570         '''
571         Test: Test what happens if we provide an improper granularity.
573         The granularity must always be a power of 2.
574         '''
575         self.assert_no_active_block_jobs()
576         self.assertRaises(AssertionError, self.add_bitmap,
577                           'bitmap0', self.drives[0],
578                           granularity=64000)
580     def test_growing_before_backup(self):
581         '''
582         Test: Add a bitmap, truncate the image, write past the old
583               end, do a backup.
585         Incremental backup should not ignore dirty bits past the old
586         image end.
587         '''
588         self.assert_no_active_block_jobs()
590         self.create_anchor_backup()
592         self.add_bitmap('bitmap0', self.drives[0])
594         res = self.vm.qmp('block_resize', device=self.drives[0]['id'],
595                           size=(65 * 1048576))
596         self.assert_qmp(res, 'return', {})
598         # Dirty the image past the old end
599         self.vm.hmp_qemu_io(self.drives[0]['id'], 'write 64M 64k')
601         target = self.prepare_backup(size='65M')
602         self.create_incremental(target=target)
604         self.vm.shutdown()
605         self.check_backups()
608 class TestIncrementalBackupBlkdebug(TestIncrementalBackupBase):
609     '''Incremental backup tests that utilize a BlkDebug filter on drive0.'''
611     def setUp(self):
612         drive0 = self.add_node('drive0')
613         self.img_create(drive0['file'], drive0['fmt'])
614         self.write_default_pattern(drive0['file'])
615         self.vm.launch()
617     def test_incremental_failure(self):
618         '''Test: Verify backups made after a failure are correct.
620         Simulate a failure during an incremental backup block job,
621         emulate additional writes, then create another incremental backup
622         afterwards and verify that the backup created is correct.
623         '''
625         drive0 = self.drives[0]
626         result = self.vm.qmp('blockdev-add',
627             node_name=drive0['id'],
628             driver=drive0['fmt'],
629             file={
630                 'driver': 'blkdebug',
631                 'image': {
632                     'driver': 'file',
633                     'filename': drive0['file']
634                 },
635                 'set-state': [{
636                     'event': 'flush_to_disk',
637                     'state': 1,
638                     'new_state': 2
639                 }],
640                 'inject-error': [{
641                     'event': 'read_aio',
642                     'errno': 5,
643                     'state': 2,
644                     'immediately': False,
645                     'once': True
646                 }],
647             }
648         )
649         self.assert_qmp(result, 'return', {})
651         self.create_anchor_backup(drive0)
652         self.add_bitmap('bitmap0', drive0)
653         # Note: at this point, during a normal execution,
654         # Assume that the VM resumes and begins issuing IO requests here.
656         self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
657                                           ('0xfe', '16M', '256k'),
658                                           ('0x64', '32736k', '64k')))
660         result = self.create_incremental(validate=False)
661         self.assertFalse(result)
662         self.hmp_io_writes(drive0['id'], (('0x9a', 0, 512),
663                                           ('0x55', '8M', '352k'),
664                                           ('0x78', '15872k', '1M')))
665         self.create_incremental()
666         self.vm.shutdown()
667         self.check_backups()
669     def test_incremental_pause(self):
670         """
671         Test an incremental backup that errors into a pause and is resumed.
672         """
674         drive0 = self.drives[0]
675         # NB: The blkdebug script here looks for a "flush, read" pattern.
676         # The flush occurs in hmp_io_writes, and the read during the block job.
677         result = self.vm.qmp('blockdev-add',
678                              node_name=drive0['id'],
679                              driver=drive0['fmt'],
680                              file={
681                                  'driver': 'blkdebug',
682                                  'image': {
683                                      'driver': 'file',
684                                      'filename': drive0['file']
685                                  },
686                                  'set-state': [{
687                                      'event': 'flush_to_disk',
688                                      'state': 1,
689                                      'new_state': 2
690                                  }],
691                                  'inject-error': [{
692                                      'event': 'read_aio',
693                                      'errno': 5,
694                                      'state': 2,
695                                      'immediately': False,
696                                      'once': True
697                                  }],
698                              })
699         self.assert_qmp(result, 'return', {})
700         self.create_anchor_backup(drive0)
701         bitmap = self.add_bitmap('bitmap0', drive0)
703         # Emulate guest activity
704         self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
705                                           ('0xfe', '16M', '256k'),
706                                           ('0x64', '32736k', '64k')))
708         # Bitmap Status Check
709         self.assertTrue(self.vm.check_bitmap_status(
710             drive0['id'], bitmap.name, {
711                 'count': 458752,
712                 'granularity': 65536,
713                 'status': 'active',
714                 'busy': False,
715                 'recording': True
716             }))
718         # Start backup
719         parent, _ = bitmap.last_target()
720         target = self.prepare_backup(bitmap, parent)
721         res = self.vm.qmp('drive-backup',
722                           job_id=bitmap.drive['id'],
723                           device=bitmap.drive['id'],
724                           sync='incremental',
725                           bitmap=bitmap.name,
726                           format=bitmap.drive['fmt'],
727                           target=target,
728                           mode='existing',
729                           on_source_error='stop')
730         self.assert_qmp(res, 'return', {})
732         # Wait for the error
733         event = self.vm.event_wait(name="BLOCK_JOB_ERROR",
734                                    match={"data":{"device":bitmap.drive['id']}})
735         self.assert_qmp(event, 'data', {'device': bitmap.drive['id'],
736                                         'action': 'stop',
737                                         'operation': 'read'})
739         # Bitmap Status Check
740         self.assertTrue(self.vm.check_bitmap_status(
741             drive0['id'], bitmap.name, {
742                 'count': 458752,
743                 'granularity': 65536,
744                 'status': 'frozen',
745                 'busy': True,
746                 'recording': True
747             }))
749         # Resume and check incremental backup for consistency
750         res = self.vm.qmp('block-job-resume', device=bitmap.drive['id'])
751         self.assert_qmp(res, 'return', {})
752         self.wait_qmp_backup(bitmap.drive['id'])
754         # Bitmap Status Check
755         self.assertTrue(self.vm.check_bitmap_status(
756             drive0['id'], bitmap.name, {
757                 'count': 0,
758                 'granularity': 65536,
759                 'status': 'active',
760                 'busy': False,
761                 'recording': True
762             }))
764         # Finalize / Cleanup
765         self.make_reference_backup(bitmap)
766         self.vm.shutdown()
767         self.check_backups()
770 if __name__ == '__main__':
771     iotests.main(supported_fmts=['qcow2'],
772                  supported_protocols=['file'])