MAINTAINERS: Cover docs/igd-assign.txt in VFIO section
[qemu/ar7.git] / tests / qemu-iotests / 124
blob90cdbd8e24f8ae0ecc9ef30bd215418ea03364eb
1 #!/usr/bin/env python3
2 # group: rw backing
4 # Tests for incremental drive-backup
6 # Copyright (C) 2015 John Snow for Red Hat, Inc.
8 # Based on 056.
10 # This program is free software; you can redistribute it and/or modify
11 # it under the terms of the GNU General Public License as published by
12 # the Free Software Foundation; either version 2 of the License, or
13 # (at your option) any later version.
15 # This program is distributed in the hope that it will be useful,
16 # but WITHOUT ANY WARRANTY; without even the implied warranty of
17 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18 # GNU General Public License for more details.
20 # You should have received a copy of the GNU General Public License
21 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
24 import os
25 import iotests
26 from iotests import try_remove
29 def io_write_patterns(img, patterns):
30     for pattern in patterns:
31         iotests.qemu_io('-c', 'write -P%s %s %s' % pattern, img)
34 def transaction_action(action, **kwargs):
35     return {
36         'type': action,
37         'data': dict((k.replace('_', '-'), v) for k, v in kwargs.items())
38     }
41 def transaction_bitmap_clear(node, name, **kwargs):
42     return transaction_action('block-dirty-bitmap-clear',
43                               node=node, name=name, **kwargs)
46 def transaction_drive_backup(device, target, **kwargs):
47     return transaction_action('drive-backup', job_id=device, device=device,
48                               target=target, **kwargs)
51 class Bitmap:
52     def __init__(self, name, drive):
53         self.name = name
54         self.drive = drive
55         self.num = 0
56         self.backups = list()
58     def base_target(self):
59         return (self.drive['backup'], None)
61     def new_target(self, num=None):
62         if num is None:
63             num = self.num
64         self.num = num + 1
65         base = os.path.join(iotests.test_dir,
66                             "%s.%s." % (self.drive['id'], self.name))
67         suff = "%i.%s" % (num, self.drive['fmt'])
68         target = base + "inc" + suff
69         reference = base + "ref" + suff
70         self.backups.append((target, reference))
71         return (target, reference)
73     def last_target(self):
74         if self.backups:
75             return self.backups[-1]
76         return self.base_target()
78     def del_target(self):
79         for image in self.backups.pop():
80             try_remove(image)
81         self.num -= 1
83     def cleanup(self):
84         for backup in self.backups:
85             for image in backup:
86                 try_remove(image)
89 class TestIncrementalBackupBase(iotests.QMPTestCase):
90     def __init__(self, *args):
91         super(TestIncrementalBackupBase, self).__init__(*args)
92         self.bitmaps = list()
93         self.files = list()
94         self.drives = list()
95         self.vm = iotests.VM()
96         self.err_img = os.path.join(iotests.test_dir, 'err.%s' % iotests.imgfmt)
99     def setUp(self):
100         # Create a base image with a distinctive patterning
101         drive0 = self.add_node('drive0')
102         self.img_create(drive0['file'], drive0['fmt'])
103         self.vm.add_drive(drive0['file'], opts='node-name=node0')
104         self.write_default_pattern(drive0['file'])
105         self.vm.launch()
108     def write_default_pattern(self, target):
109         io_write_patterns(target, (('0x41', 0, 512),
110                                    ('0xd5', '1M', '32k'),
111                                    ('0xdc', '32M', '124k')))
114     def add_node(self, node_id, fmt=iotests.imgfmt, path=None, backup=None):
115         if path is None:
116             path = os.path.join(iotests.test_dir, '%s.%s' % (node_id, fmt))
117         if backup is None:
118             backup = os.path.join(iotests.test_dir,
119                                   '%s.full.backup.%s' % (node_id, fmt))
121         self.drives.append({
122             'id': node_id,
123             'file': path,
124             'backup': backup,
125             'fmt': fmt })
126         return self.drives[-1]
129     def img_create(self, img, fmt=iotests.imgfmt, size='64M',
130                    parent=None, parentFormat=None, **kwargs):
131         optargs = []
132         for k,v in kwargs.items():
133             optargs = optargs + ['-o', '%s=%s' % (k,v)]
134         args = ['create', '-f', fmt] + optargs + [img, size]
135         if parent:
136             if parentFormat is None:
137                 parentFormat = fmt
138             args = args + ['-b', parent, '-F', parentFormat]
139         iotests.qemu_img(*args)
140         self.files.append(img)
143     def do_qmp_backup(self, error='Input/output error', **kwargs):
144         res = self.vm.qmp('drive-backup', **kwargs)
145         self.assert_qmp(res, 'return', {})
146         return self.wait_qmp_backup(kwargs['device'], error)
149     def ignore_job_status_change_events(self):
150         while True:
151             e = self.vm.event_wait(name="JOB_STATUS_CHANGE")
152             if e['data']['status'] == 'null':
153                 break
155     def wait_qmp_backup(self, device, error='Input/output error'):
156         event = self.vm.event_wait(name="BLOCK_JOB_COMPLETED",
157                                    match={'data': {'device': device}})
158         self.assertNotEqual(event, None)
159         self.ignore_job_status_change_events()
161         try:
162             failure = self.dictpath(event, 'data/error')
163         except AssertionError:
164             # Backup succeeded.
165             self.assert_qmp(event, 'data/offset', event['data']['len'])
166             return True
167         else:
168             # Backup failed.
169             self.assert_qmp(event, 'data/error', error)
170             return False
173     def wait_qmp_backup_cancelled(self, device):
174         event = self.vm.event_wait(name='BLOCK_JOB_CANCELLED',
175                                    match={'data': {'device': device}})
176         self.assertNotEqual(event, None)
177         self.ignore_job_status_change_events()
180     def create_anchor_backup(self, drive=None):
181         if drive is None:
182             drive = self.drives[-1]
183         res = self.do_qmp_backup(job_id=drive['id'],
184                                  device=drive['id'], sync='full',
185                                  format=drive['fmt'], target=drive['backup'])
186         self.assertTrue(res)
187         self.files.append(drive['backup'])
188         return drive['backup']
191     def make_reference_backup(self, bitmap=None):
192         if bitmap is None:
193             bitmap = self.bitmaps[-1]
194         _, reference = bitmap.last_target()
195         res = self.do_qmp_backup(job_id=bitmap.drive['id'],
196                                  device=bitmap.drive['id'], sync='full',
197                                  format=bitmap.drive['fmt'], target=reference)
198         self.assertTrue(res)
201     def add_bitmap(self, name, drive, **kwargs):
202         bitmap = Bitmap(name, drive)
203         self.bitmaps.append(bitmap)
204         result = self.vm.qmp('block-dirty-bitmap-add', node=drive['id'],
205                              name=bitmap.name, **kwargs)
206         self.assert_qmp(result, 'return', {})
207         return bitmap
210     def prepare_backup(self, bitmap=None, parent=None, **kwargs):
211         if bitmap is None:
212             bitmap = self.bitmaps[-1]
213         if parent is None:
214             parent, _ = bitmap.last_target()
216         target, _ = bitmap.new_target()
217         self.img_create(target, bitmap.drive['fmt'], parent=parent,
218                         **kwargs)
219         return target
222     def create_incremental(self, bitmap=None, parent=None,
223                            parentFormat=None, validate=True,
224                            target=None):
225         if bitmap is None:
226             bitmap = self.bitmaps[-1]
227         if parent is None:
228             parent, _ = bitmap.last_target()
230         if target is None:
231             target = self.prepare_backup(bitmap, parent)
232         res = self.do_qmp_backup(job_id=bitmap.drive['id'],
233                                  device=bitmap.drive['id'],
234                                  sync='incremental', bitmap=bitmap.name,
235                                  format=bitmap.drive['fmt'], target=target,
236                                  mode='existing')
237         if not res:
238             bitmap.del_target();
239             self.assertFalse(validate)
240         else:
241             self.make_reference_backup(bitmap)
242         return res
245     def check_backups(self):
246         for bitmap in self.bitmaps:
247             for incremental, reference in bitmap.backups:
248                 self.assertTrue(iotests.compare_images(incremental, reference))
249             last = bitmap.last_target()[0]
250             self.assertTrue(iotests.compare_images(last, bitmap.drive['file']))
253     def hmp_io_writes(self, drive, patterns):
254         for pattern in patterns:
255             self.vm.hmp_qemu_io(drive, 'write -P%s %s %s' % pattern)
256         self.vm.hmp_qemu_io(drive, 'flush')
259     def do_incremental_simple(self, **kwargs):
260         self.create_anchor_backup()
261         self.add_bitmap('bitmap0', self.drives[0], **kwargs)
263         # Sanity: Create a "hollow" incremental backup
264         self.create_incremental()
265         # Three writes: One complete overwrite, one new segment,
266         # and one partial overlap.
267         self.hmp_io_writes(self.drives[0]['id'], (('0xab', 0, 512),
268                                                   ('0xfe', '16M', '256k'),
269                                                   ('0x64', '32736k', '64k')))
270         self.create_incremental()
271         # Three more writes, one of each kind, like above
272         self.hmp_io_writes(self.drives[0]['id'], (('0x9a', 0, 512),
273                                                   ('0x55', '8M', '352k'),
274                                                   ('0x78', '15872k', '1M')))
275         self.create_incremental()
276         self.vm.shutdown()
277         self.check_backups()
280     def tearDown(self):
281         self.vm.shutdown()
282         for bitmap in self.bitmaps:
283             bitmap.cleanup()
284         for filename in self.files:
285             try_remove(filename)
289 class TestIncrementalBackup(TestIncrementalBackupBase):
290     def test_incremental_simple(self):
291         '''
292         Test: Create and verify three incremental backups.
294         Create a bitmap and a full backup before VM execution begins,
295         then create a series of three incremental backups "during execution,"
296         i.e.; after IO requests begin modifying the drive.
297         '''
298         return self.do_incremental_simple()
301     def test_small_granularity(self):
302         '''
303         Test: Create and verify backups made with a small granularity bitmap.
305         Perform the same test as test_incremental_simple, but with a granularity
306         of only 32KiB instead of the present default of 64KiB.
307         '''
308         return self.do_incremental_simple(granularity=32768)
311     def test_large_granularity(self):
312         '''
313         Test: Create and verify backups made with a large granularity bitmap.
315         Perform the same test as test_incremental_simple, but with a granularity
316         of 128KiB instead of the present default of 64KiB.
317         '''
318         return self.do_incremental_simple(granularity=131072)
321     def test_larger_cluster_target(self):
322         '''
323         Test: Create and verify backups made to a larger cluster size target.
325         With a default granularity of 64KiB, verify that backups made to a
326         larger cluster size target of 128KiB without a backing file works.
327         '''
328         drive0 = self.drives[0]
330         # Create a cluster_size=128k full backup / "anchor" backup
331         self.img_create(drive0['backup'], cluster_size='128k')
332         self.assertTrue(self.do_qmp_backup(device=drive0['id'], sync='full',
333                                            format=drive0['fmt'],
334                                            target=drive0['backup'],
335                                            mode='existing'))
337         # Create bitmap and dirty it with some new writes.
338         # overwrite [32736, 32799] which will dirty bitmap clusters at
339         # 32M-64K and 32M. 32M+64K will be left undirtied.
340         bitmap0 = self.add_bitmap('bitmap0', drive0)
341         self.hmp_io_writes(drive0['id'],
342                            (('0xab', 0, 512),
343                             ('0xfe', '16M', '256k'),
344                             ('0x64', '32736k', '64k')))
345         # Check the dirty bitmap stats
346         self.assertTrue(self.vm.check_bitmap_status(
347             'node0', bitmap0.name, {
348                 'name': 'bitmap0',
349                 'count': 458752,
350                 'granularity': 65536,
351                 'status': 'active',
352                 'persistent': False
353             }))
355         # Prepare a cluster_size=128k backup target without a backing file.
356         (target, _) = bitmap0.new_target()
357         self.img_create(target, bitmap0.drive['fmt'], cluster_size='128k')
359         # Perform Incremental Backup
360         self.assertTrue(self.do_qmp_backup(device=bitmap0.drive['id'],
361                                            sync='incremental',
362                                            bitmap=bitmap0.name,
363                                            format=bitmap0.drive['fmt'],
364                                            target=target,
365                                            mode='existing'))
366         self.make_reference_backup(bitmap0)
368         # Add the backing file, then compare and exit.
369         iotests.qemu_img('rebase', '-f', drive0['fmt'], '-u', '-b',
370                          drive0['backup'], '-F', drive0['fmt'], target)
371         self.vm.shutdown()
372         self.check_backups()
375     def test_incremental_transaction(self):
376         '''Test: Verify backups made from transactionally created bitmaps.
378         Create a bitmap "before" VM execution begins, then create a second
379         bitmap AFTER writes have already occurred. Use transactions to create
380         a full backup and synchronize both bitmaps to this backup.
381         Create an incremental backup through both bitmaps and verify that
382         both backups match the current drive0 image.
383         '''
385         drive0 = self.drives[0]
386         bitmap0 = self.add_bitmap('bitmap0', drive0)
387         self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
388                                           ('0xfe', '16M', '256k'),
389                                           ('0x64', '32736k', '64k')))
390         bitmap1 = self.add_bitmap('bitmap1', drive0)
392         result = self.vm.qmp('transaction', actions=[
393             transaction_bitmap_clear(bitmap0.drive['id'], bitmap0.name),
394             transaction_bitmap_clear(bitmap1.drive['id'], bitmap1.name),
395             transaction_drive_backup(drive0['id'], drive0['backup'],
396                                      sync='full', format=drive0['fmt'])
397         ])
398         self.assert_qmp(result, 'return', {})
399         self.wait_until_completed(drive0['id'])
400         self.files.append(drive0['backup'])
402         self.hmp_io_writes(drive0['id'], (('0x9a', 0, 512),
403                                           ('0x55', '8M', '352k'),
404                                           ('0x78', '15872k', '1M')))
405         # Both bitmaps should be correctly in sync.
406         self.create_incremental(bitmap0)
407         self.create_incremental(bitmap1)
408         self.vm.shutdown()
409         self.check_backups()
412     def do_transaction_failure_test(self, race=False):
413         # Create a second drive, with pattern:
414         drive1 = self.add_node('drive1')
415         self.img_create(drive1['file'], drive1['fmt'])
416         io_write_patterns(drive1['file'], (('0x14', 0, 512),
417                                            ('0x5d', '1M', '32k'),
418                                            ('0xcd', '32M', '124k')))
420         # Create a blkdebug interface to this img as 'drive1'
421         result = self.vm.qmp('blockdev-add',
422             node_name=drive1['id'],
423             driver=drive1['fmt'],
424             file={
425                 'driver': 'blkdebug',
426                 'image': {
427                     'driver': 'file',
428                     'filename': drive1['file']
429                 },
430                 'set-state': [{
431                     'event': 'flush_to_disk',
432                     'state': 1,
433                     'new_state': 2
434                 }],
435                 'inject-error': [{
436                     'event': 'read_aio',
437                     'errno': 5,
438                     'state': 2,
439                     'immediately': False,
440                     'once': True
441                 }],
442             }
443         )
444         self.assert_qmp(result, 'return', {})
446         # Create bitmaps and full backups for both drives
447         drive0 = self.drives[0]
448         dr0bm0 = self.add_bitmap('bitmap0', drive0)
449         dr1bm0 = self.add_bitmap('bitmap0', drive1)
450         self.create_anchor_backup(drive0)
451         self.create_anchor_backup(drive1)
452         self.assert_no_active_block_jobs()
453         self.assertFalse(self.vm.get_qmp_events(wait=False))
455         # Emulate some writes
456         if not race:
457             self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
458                                               ('0xfe', '16M', '256k'),
459                                               ('0x64', '32736k', '64k')))
460         self.hmp_io_writes(drive1['id'], (('0xba', 0, 512),
461                                           ('0xef', '16M', '256k'),
462                                           ('0x46', '32736k', '64k')))
464         # Create incremental backup targets
465         target0 = self.prepare_backup(dr0bm0)
466         target1 = self.prepare_backup(dr1bm0)
468         # Ask for a new incremental backup per-each drive,
469         # expecting drive1's backup to fail. In the 'race' test,
470         # we expect drive1 to attempt to cancel the empty drive0 job.
471         transaction = [
472             transaction_drive_backup(drive0['id'], target0, sync='incremental',
473                                      format=drive0['fmt'], mode='existing',
474                                      bitmap=dr0bm0.name),
475             transaction_drive_backup(drive1['id'], target1, sync='incremental',
476                                      format=drive1['fmt'], mode='existing',
477                                      bitmap=dr1bm0.name)
478         ]
479         result = self.vm.qmp('transaction', actions=transaction,
480                              properties={'completion-mode': 'grouped'} )
481         self.assert_qmp(result, 'return', {})
483         # Observe that drive0's backup is cancelled and drive1 completes with
484         # an error.
485         self.wait_qmp_backup_cancelled(drive0['id'])
486         self.assertFalse(self.wait_qmp_backup(drive1['id']))
487         error = self.vm.event_wait('BLOCK_JOB_ERROR')
488         self.assert_qmp(error, 'data', {'device': drive1['id'],
489                                         'action': 'report',
490                                         'operation': 'read'})
491         self.assertFalse(self.vm.get_qmp_events(wait=False))
492         self.assert_no_active_block_jobs()
494         # Delete drive0's successful target and eliminate our record of the
495         # unsuccessful drive1 target.
496         dr0bm0.del_target()
497         dr1bm0.del_target()
498         if race:
499             # Don't re-run the transaction, we only wanted to test the race.
500             self.vm.shutdown()
501             return
503         # Re-run the same transaction:
504         target0 = self.prepare_backup(dr0bm0)
505         target1 = self.prepare_backup(dr1bm0)
507         # Re-run the exact same transaction.
508         result = self.vm.qmp('transaction', actions=transaction,
509                              properties={'completion-mode':'grouped'})
510         self.assert_qmp(result, 'return', {})
512         # Both should complete successfully this time.
513         self.assertTrue(self.wait_qmp_backup(drive0['id']))
514         self.assertTrue(self.wait_qmp_backup(drive1['id']))
515         self.make_reference_backup(dr0bm0)
516         self.make_reference_backup(dr1bm0)
517         self.assertFalse(self.vm.get_qmp_events(wait=False))
518         self.assert_no_active_block_jobs()
520         # And the images should of course validate.
521         self.vm.shutdown()
522         self.check_backups()
524     def test_transaction_failure(self):
525         '''Test: Verify backups made from a transaction that partially fails.
527         Add a second drive with its own unique pattern, and add a bitmap to each
528         drive. Use blkdebug to interfere with the backup on just one drive and
529         attempt to create a coherent incremental backup across both drives.
531         verify a failure in one but not both, then delete the failed stubs and
532         re-run the same transaction.
534         verify that both incrementals are created successfully.
535         '''
536         self.do_transaction_failure_test()
538     def test_transaction_failure_race(self):
539         '''Test: Verify that transactions with jobs that have no data to
540         transfer do not cause race conditions in the cancellation of the entire
541         transaction job group.
542         '''
543         self.do_transaction_failure_test(race=True)
546     def test_sync_dirty_bitmap_missing(self):
547         self.assert_no_active_block_jobs()
548         self.files.append(self.err_img)
549         result = self.vm.qmp('drive-backup', device=self.drives[0]['id'],
550                              sync='incremental', format=self.drives[0]['fmt'],
551                              target=self.err_img)
552         self.assert_qmp(result, 'error/class', 'GenericError')
555     def test_sync_dirty_bitmap_not_found(self):
556         self.assert_no_active_block_jobs()
557         self.files.append(self.err_img)
558         result = self.vm.qmp('drive-backup', device=self.drives[0]['id'],
559                              sync='incremental', bitmap='unknown',
560                              format=self.drives[0]['fmt'], target=self.err_img)
561         self.assert_qmp(result, 'error/class', 'GenericError')
564     def test_sync_dirty_bitmap_bad_granularity(self):
565         '''
566         Test: Test what happens if we provide an improper granularity.
568         The granularity must always be a power of 2.
569         '''
570         self.assert_no_active_block_jobs()
571         self.assertRaises(AssertionError, self.add_bitmap,
572                           'bitmap0', self.drives[0],
573                           granularity=64000)
575     def test_growing_before_backup(self):
576         '''
577         Test: Add a bitmap, truncate the image, write past the old
578               end, do a backup.
580         Incremental backup should not ignore dirty bits past the old
581         image end.
582         '''
583         self.assert_no_active_block_jobs()
585         self.create_anchor_backup()
587         self.add_bitmap('bitmap0', self.drives[0])
589         res = self.vm.qmp('block_resize', device=self.drives[0]['id'],
590                           size=(65 * 1048576))
591         self.assert_qmp(res, 'return', {})
593         # Dirty the image past the old end
594         self.vm.hmp_qemu_io(self.drives[0]['id'], 'write 64M 64k')
596         target = self.prepare_backup(size='65M')
597         self.create_incremental(target=target)
599         self.vm.shutdown()
600         self.check_backups()
603 class TestIncrementalBackupBlkdebug(TestIncrementalBackupBase):
604     '''Incremental backup tests that utilize a BlkDebug filter on drive0.'''
606     def setUp(self):
607         drive0 = self.add_node('drive0')
608         self.img_create(drive0['file'], drive0['fmt'])
609         self.write_default_pattern(drive0['file'])
610         self.vm.launch()
612     def test_incremental_failure(self):
613         '''Test: Verify backups made after a failure are correct.
615         Simulate a failure during an incremental backup block job,
616         emulate additional writes, then create another incremental backup
617         afterwards and verify that the backup created is correct.
618         '''
620         drive0 = self.drives[0]
621         result = self.vm.qmp('blockdev-add',
622             node_name=drive0['id'],
623             driver=drive0['fmt'],
624             file={
625                 'driver': 'blkdebug',
626                 'image': {
627                     'driver': 'file',
628                     'filename': drive0['file']
629                 },
630                 'set-state': [{
631                     'event': 'flush_to_disk',
632                     'state': 1,
633                     'new_state': 2
634                 }],
635                 'inject-error': [{
636                     'event': 'read_aio',
637                     'errno': 5,
638                     'state': 2,
639                     'immediately': False,
640                     'once': True
641                 }],
642             }
643         )
644         self.assert_qmp(result, 'return', {})
646         self.create_anchor_backup(drive0)
647         self.add_bitmap('bitmap0', drive0)
648         # Note: at this point, during a normal execution,
649         # Assume that the VM resumes and begins issuing IO requests here.
651         self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
652                                           ('0xfe', '16M', '256k'),
653                                           ('0x64', '32736k', '64k')))
655         result = self.create_incremental(validate=False)
656         self.assertFalse(result)
657         self.hmp_io_writes(drive0['id'], (('0x9a', 0, 512),
658                                           ('0x55', '8M', '352k'),
659                                           ('0x78', '15872k', '1M')))
660         self.create_incremental()
661         self.vm.shutdown()
662         self.check_backups()
664     def test_incremental_pause(self):
665         """
666         Test an incremental backup that errors into a pause and is resumed.
667         """
669         drive0 = self.drives[0]
670         # NB: The blkdebug script here looks for a "flush, read" pattern.
671         # The flush occurs in hmp_io_writes, and the read during the block job.
672         result = self.vm.qmp('blockdev-add',
673                              node_name=drive0['id'],
674                              driver=drive0['fmt'],
675                              file={
676                                  'driver': 'blkdebug',
677                                  'image': {
678                                      'driver': 'file',
679                                      'filename': drive0['file']
680                                  },
681                                  'set-state': [{
682                                      'event': 'flush_to_disk',
683                                      'state': 1,
684                                      'new_state': 2
685                                  }],
686                                  'inject-error': [{
687                                      'event': 'read_aio',
688                                      'errno': 5,
689                                      'state': 2,
690                                      'immediately': False,
691                                      'once': True
692                                  }],
693                              })
694         self.assert_qmp(result, 'return', {})
695         self.create_anchor_backup(drive0)
696         bitmap = self.add_bitmap('bitmap0', drive0)
698         # Emulate guest activity
699         self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
700                                           ('0xfe', '16M', '256k'),
701                                           ('0x64', '32736k', '64k')))
703         # Bitmap Status Check
704         self.assertTrue(self.vm.check_bitmap_status(
705             drive0['id'], bitmap.name, {
706                 'count': 458752,
707                 'granularity': 65536,
708                 'status': 'active',
709                 'busy': False,
710                 'recording': True
711             }))
713         # Start backup
714         parent, _ = bitmap.last_target()
715         target = self.prepare_backup(bitmap, parent)
716         res = self.vm.qmp('drive-backup',
717                           job_id=bitmap.drive['id'],
718                           device=bitmap.drive['id'],
719                           sync='incremental',
720                           bitmap=bitmap.name,
721                           format=bitmap.drive['fmt'],
722                           target=target,
723                           mode='existing',
724                           on_source_error='stop')
725         self.assert_qmp(res, 'return', {})
727         # Wait for the error
728         event = self.vm.event_wait(name="BLOCK_JOB_ERROR",
729                                    match={"data":{"device":bitmap.drive['id']}})
730         self.assert_qmp(event, 'data', {'device': bitmap.drive['id'],
731                                         'action': 'stop',
732                                         'operation': 'read'})
734         # Bitmap Status Check
735         self.assertTrue(self.vm.check_bitmap_status(
736             drive0['id'], bitmap.name, {
737                 'count': 458752,
738                 'granularity': 65536,
739                 'status': 'frozen',
740                 'busy': True,
741                 'recording': True
742             }))
744         # Resume and check incremental backup for consistency
745         res = self.vm.qmp('block-job-resume', device=bitmap.drive['id'])
746         self.assert_qmp(res, 'return', {})
747         self.wait_qmp_backup(bitmap.drive['id'])
749         # Bitmap Status Check
750         self.assertTrue(self.vm.check_bitmap_status(
751             drive0['id'], bitmap.name, {
752                 'count': 0,
753                 'granularity': 65536,
754                 'status': 'active',
755                 'busy': False,
756                 'recording': True
757             }))
759         # Finalize / Cleanup
760         self.make_reference_backup(bitmap)
761         self.vm.shutdown()
762         self.check_backups()
765 if __name__ == '__main__':
766     iotests.main(supported_fmts=['qcow2'],
767                  supported_protocols=['file'])