3 # Tests for incremental drive-backup
5 # Copyright (C) 2015 John Snow for Red Hat, Inc.
9 # This program is free software; you can redistribute it and/or modify
10 # it under the terms of the GNU General Public License as published by
11 # the Free Software Foundation; either version 2 of the License, or
12 # (at your option) any later version.
14 # This program is distributed in the hope that it will be useful,
15 # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 # GNU General Public License for more details.
19 # You should have received a copy of the GNU General Public License
20 # along with this program. If not, see <http://www.gnu.org/licenses/>.
27 def io_write_patterns(img, patterns):
28 for pattern in patterns:
29 iotests.qemu_io('-c', 'write -P%s %s %s' % pattern, img)
39 def transaction_action(action, **kwargs):
42 'data': dict((k.replace('_', '-'), v) for k, v in kwargs.items())
46 def transaction_bitmap_clear(node, name, **kwargs):
47 return transaction_action('block-dirty-bitmap-clear',
48 node=node, name=name, **kwargs)
51 def transaction_drive_backup(device, target, **kwargs):
52 return transaction_action('drive-backup', job_id=device, device=device,
53 target=target, **kwargs)
57 def __init__(self, name, drive):
63 def base_target(self):
64 return (self.drive['backup'], None)
66 def new_target(self, num=None):
70 base = os.path.join(iotests.test_dir,
71 "%s.%s." % (self.drive['id'], self.name))
72 suff = "%i.%s" % (num, self.drive['fmt'])
73 target = base + "inc" + suff
74 reference = base + "ref" + suff
75 self.backups.append((target, reference))
76 return (target, reference)
78 def last_target(self):
80 return self.backups[-1]
81 return self.base_target()
84 for image in self.backups.pop():
89 for backup in self.backups:
94 class TestIncrementalBackupBase(iotests.QMPTestCase):
95 def __init__(self, *args):
96 super(TestIncrementalBackupBase, self).__init__(*args)
100 self.vm = iotests.VM()
101 self.err_img = os.path.join(iotests.test_dir, 'err.%s' % iotests.imgfmt)
105 # Create a base image with a distinctive patterning
106 drive0 = self.add_node('drive0')
107 self.img_create(drive0['file'], drive0['fmt'])
108 self.vm.add_drive(drive0['file'], opts='node-name=node0')
109 self.write_default_pattern(drive0['file'])
113 def write_default_pattern(self, target):
114 io_write_patterns(target, (('0x41', 0, 512),
115 ('0xd5', '1M', '32k'),
116 ('0xdc', '32M', '124k')))
119 def add_node(self, node_id, fmt=iotests.imgfmt, path=None, backup=None):
121 path = os.path.join(iotests.test_dir, '%s.%s' % (node_id, fmt))
123 backup = os.path.join(iotests.test_dir,
124 '%s.full.backup.%s' % (node_id, fmt))
131 return self.drives[-1]
134 def img_create(self, img, fmt=iotests.imgfmt, size='64M',
135 parent=None, parentFormat=None, **kwargs):
137 for k,v in kwargs.items():
138 optargs = optargs + ['-o', '%s=%s' % (k,v)]
139 args = ['create', '-f', fmt] + optargs + [img, size]
141 if parentFormat is None:
143 args = args + ['-b', parent, '-F', parentFormat]
144 iotests.qemu_img(*args)
145 self.files.append(img)
148 def do_qmp_backup(self, error='Input/output error', **kwargs):
149 res = self.vm.qmp('drive-backup', **kwargs)
150 self.assert_qmp(res, 'return', {})
151 return self.wait_qmp_backup(kwargs['device'], error)
154 def ignore_job_status_change_events(self):
156 e = self.vm.event_wait(name="JOB_STATUS_CHANGE")
157 if e['data']['status'] == 'null':
160 def wait_qmp_backup(self, device, error='Input/output error'):
161 event = self.vm.event_wait(name="BLOCK_JOB_COMPLETED",
162 match={'data': {'device': device}})
163 self.assertNotEqual(event, None)
164 self.ignore_job_status_change_events()
167 failure = self.dictpath(event, 'data/error')
168 except AssertionError:
170 self.assert_qmp(event, 'data/offset', event['data']['len'])
174 self.assert_qmp(event, 'data/error', error)
178 def wait_qmp_backup_cancelled(self, device):
179 event = self.vm.event_wait(name='BLOCK_JOB_CANCELLED',
180 match={'data': {'device': device}})
181 self.assertNotEqual(event, None)
182 self.ignore_job_status_change_events()
185 def create_anchor_backup(self, drive=None):
187 drive = self.drives[-1]
188 res = self.do_qmp_backup(job_id=drive['id'],
189 device=drive['id'], sync='full',
190 format=drive['fmt'], target=drive['backup'])
192 self.files.append(drive['backup'])
193 return drive['backup']
196 def make_reference_backup(self, bitmap=None):
198 bitmap = self.bitmaps[-1]
199 _, reference = bitmap.last_target()
200 res = self.do_qmp_backup(job_id=bitmap.drive['id'],
201 device=bitmap.drive['id'], sync='full',
202 format=bitmap.drive['fmt'], target=reference)
206 def add_bitmap(self, name, drive, **kwargs):
207 bitmap = Bitmap(name, drive)
208 self.bitmaps.append(bitmap)
209 result = self.vm.qmp('block-dirty-bitmap-add', node=drive['id'],
210 name=bitmap.name, **kwargs)
211 self.assert_qmp(result, 'return', {})
215 def prepare_backup(self, bitmap=None, parent=None, **kwargs):
217 bitmap = self.bitmaps[-1]
219 parent, _ = bitmap.last_target()
221 target, _ = bitmap.new_target()
222 self.img_create(target, bitmap.drive['fmt'], parent=parent,
227 def create_incremental(self, bitmap=None, parent=None,
228 parentFormat=None, validate=True,
231 bitmap = self.bitmaps[-1]
233 parent, _ = bitmap.last_target()
236 target = self.prepare_backup(bitmap, parent)
237 res = self.do_qmp_backup(job_id=bitmap.drive['id'],
238 device=bitmap.drive['id'],
239 sync='incremental', bitmap=bitmap.name,
240 format=bitmap.drive['fmt'], target=target,
244 self.assertFalse(validate)
246 self.make_reference_backup(bitmap)
250 def check_backups(self):
251 for bitmap in self.bitmaps:
252 for incremental, reference in bitmap.backups:
253 self.assertTrue(iotests.compare_images(incremental, reference))
254 last = bitmap.last_target()[0]
255 self.assertTrue(iotests.compare_images(last, bitmap.drive['file']))
258 def hmp_io_writes(self, drive, patterns):
259 for pattern in patterns:
260 self.vm.hmp_qemu_io(drive, 'write -P%s %s %s' % pattern)
261 self.vm.hmp_qemu_io(drive, 'flush')
264 def do_incremental_simple(self, **kwargs):
265 self.create_anchor_backup()
266 self.add_bitmap('bitmap0', self.drives[0], **kwargs)
268 # Sanity: Create a "hollow" incremental backup
269 self.create_incremental()
270 # Three writes: One complete overwrite, one new segment,
271 # and one partial overlap.
272 self.hmp_io_writes(self.drives[0]['id'], (('0xab', 0, 512),
273 ('0xfe', '16M', '256k'),
274 ('0x64', '32736k', '64k')))
275 self.create_incremental()
276 # Three more writes, one of each kind, like above
277 self.hmp_io_writes(self.drives[0]['id'], (('0x9a', 0, 512),
278 ('0x55', '8M', '352k'),
279 ('0x78', '15872k', '1M')))
280 self.create_incremental()
287 for bitmap in self.bitmaps:
289 for filename in self.files:
294 class TestIncrementalBackup(TestIncrementalBackupBase):
295 def test_incremental_simple(self):
297 Test: Create and verify three incremental backups.
299 Create a bitmap and a full backup before VM execution begins,
300 then create a series of three incremental backups "during execution,"
301 i.e.; after IO requests begin modifying the drive.
303 return self.do_incremental_simple()
306 def test_small_granularity(self):
308 Test: Create and verify backups made with a small granularity bitmap.
310 Perform the same test as test_incremental_simple, but with a granularity
311 of only 32KiB instead of the present default of 64KiB.
313 return self.do_incremental_simple(granularity=32768)
316 def test_large_granularity(self):
318 Test: Create and verify backups made with a large granularity bitmap.
320 Perform the same test as test_incremental_simple, but with a granularity
321 of 128KiB instead of the present default of 64KiB.
323 return self.do_incremental_simple(granularity=131072)
326 def test_larger_cluster_target(self):
328 Test: Create and verify backups made to a larger cluster size target.
330 With a default granularity of 64KiB, verify that backups made to a
331 larger cluster size target of 128KiB without a backing file works.
333 drive0 = self.drives[0]
335 # Create a cluster_size=128k full backup / "anchor" backup
336 self.img_create(drive0['backup'], cluster_size='128k')
337 self.assertTrue(self.do_qmp_backup(device=drive0['id'], sync='full',
338 format=drive0['fmt'],
339 target=drive0['backup'],
342 # Create bitmap and dirty it with some new writes.
343 # overwrite [32736, 32799] which will dirty bitmap clusters at
344 # 32M-64K and 32M. 32M+64K will be left undirtied.
345 bitmap0 = self.add_bitmap('bitmap0', drive0)
346 self.hmp_io_writes(drive0['id'],
348 ('0xfe', '16M', '256k'),
349 ('0x64', '32736k', '64k')))
350 # Check the dirty bitmap stats
351 self.assertTrue(self.vm.check_bitmap_status(
352 'node0', bitmap0.name, {
355 'granularity': 65536,
360 # Prepare a cluster_size=128k backup target without a backing file.
361 (target, _) = bitmap0.new_target()
362 self.img_create(target, bitmap0.drive['fmt'], cluster_size='128k')
364 # Perform Incremental Backup
365 self.assertTrue(self.do_qmp_backup(device=bitmap0.drive['id'],
368 format=bitmap0.drive['fmt'],
371 self.make_reference_backup(bitmap0)
373 # Add the backing file, then compare and exit.
374 iotests.qemu_img('rebase', '-f', drive0['fmt'], '-u', '-b',
375 drive0['backup'], '-F', drive0['fmt'], target)
380 def test_incremental_transaction(self):
381 '''Test: Verify backups made from transactionally created bitmaps.
383 Create a bitmap "before" VM execution begins, then create a second
384 bitmap AFTER writes have already occurred. Use transactions to create
385 a full backup and synchronize both bitmaps to this backup.
386 Create an incremental backup through both bitmaps and verify that
387 both backups match the current drive0 image.
390 drive0 = self.drives[0]
391 bitmap0 = self.add_bitmap('bitmap0', drive0)
392 self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
393 ('0xfe', '16M', '256k'),
394 ('0x64', '32736k', '64k')))
395 bitmap1 = self.add_bitmap('bitmap1', drive0)
397 result = self.vm.qmp('transaction', actions=[
398 transaction_bitmap_clear(bitmap0.drive['id'], bitmap0.name),
399 transaction_bitmap_clear(bitmap1.drive['id'], bitmap1.name),
400 transaction_drive_backup(drive0['id'], drive0['backup'],
401 sync='full', format=drive0['fmt'])
403 self.assert_qmp(result, 'return', {})
404 self.wait_until_completed(drive0['id'])
405 self.files.append(drive0['backup'])
407 self.hmp_io_writes(drive0['id'], (('0x9a', 0, 512),
408 ('0x55', '8M', '352k'),
409 ('0x78', '15872k', '1M')))
410 # Both bitmaps should be correctly in sync.
411 self.create_incremental(bitmap0)
412 self.create_incremental(bitmap1)
417 def do_transaction_failure_test(self, race=False):
418 # Create a second drive, with pattern:
419 drive1 = self.add_node('drive1')
420 self.img_create(drive1['file'], drive1['fmt'])
421 io_write_patterns(drive1['file'], (('0x14', 0, 512),
422 ('0x5d', '1M', '32k'),
423 ('0xcd', '32M', '124k')))
425 # Create a blkdebug interface to this img as 'drive1'
426 result = self.vm.qmp('blockdev-add',
427 node_name=drive1['id'],
428 driver=drive1['fmt'],
430 'driver': 'blkdebug',
433 'filename': drive1['file']
436 'event': 'flush_to_disk',
444 'immediately': False,
449 self.assert_qmp(result, 'return', {})
451 # Create bitmaps and full backups for both drives
452 drive0 = self.drives[0]
453 dr0bm0 = self.add_bitmap('bitmap0', drive0)
454 dr1bm0 = self.add_bitmap('bitmap0', drive1)
455 self.create_anchor_backup(drive0)
456 self.create_anchor_backup(drive1)
457 self.assert_no_active_block_jobs()
458 self.assertFalse(self.vm.get_qmp_events(wait=False))
460 # Emulate some writes
462 self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
463 ('0xfe', '16M', '256k'),
464 ('0x64', '32736k', '64k')))
465 self.hmp_io_writes(drive1['id'], (('0xba', 0, 512),
466 ('0xef', '16M', '256k'),
467 ('0x46', '32736k', '64k')))
469 # Create incremental backup targets
470 target0 = self.prepare_backup(dr0bm0)
471 target1 = self.prepare_backup(dr1bm0)
473 # Ask for a new incremental backup per-each drive,
474 # expecting drive1's backup to fail. In the 'race' test,
475 # we expect drive1 to attempt to cancel the empty drive0 job.
477 transaction_drive_backup(drive0['id'], target0, sync='incremental',
478 format=drive0['fmt'], mode='existing',
480 transaction_drive_backup(drive1['id'], target1, sync='incremental',
481 format=drive1['fmt'], mode='existing',
484 result = self.vm.qmp('transaction', actions=transaction,
485 properties={'completion-mode': 'grouped'} )
486 self.assert_qmp(result, 'return', {})
488 # Observe that drive0's backup is cancelled and drive1 completes with
490 self.wait_qmp_backup_cancelled(drive0['id'])
491 self.assertFalse(self.wait_qmp_backup(drive1['id']))
492 error = self.vm.event_wait('BLOCK_JOB_ERROR')
493 self.assert_qmp(error, 'data', {'device': drive1['id'],
495 'operation': 'read'})
496 self.assertFalse(self.vm.get_qmp_events(wait=False))
497 self.assert_no_active_block_jobs()
499 # Delete drive0's successful target and eliminate our record of the
500 # unsuccessful drive1 target.
504 # Don't re-run the transaction, we only wanted to test the race.
508 # Re-run the same transaction:
509 target0 = self.prepare_backup(dr0bm0)
510 target1 = self.prepare_backup(dr1bm0)
512 # Re-run the exact same transaction.
513 result = self.vm.qmp('transaction', actions=transaction,
514 properties={'completion-mode':'grouped'})
515 self.assert_qmp(result, 'return', {})
517 # Both should complete successfully this time.
518 self.assertTrue(self.wait_qmp_backup(drive0['id']))
519 self.assertTrue(self.wait_qmp_backup(drive1['id']))
520 self.make_reference_backup(dr0bm0)
521 self.make_reference_backup(dr1bm0)
522 self.assertFalse(self.vm.get_qmp_events(wait=False))
523 self.assert_no_active_block_jobs()
525 # And the images should of course validate.
529 def test_transaction_failure(self):
530 '''Test: Verify backups made from a transaction that partially fails.
532 Add a second drive with its own unique pattern, and add a bitmap to each
533 drive. Use blkdebug to interfere with the backup on just one drive and
534 attempt to create a coherent incremental backup across both drives.
536 verify a failure in one but not both, then delete the failed stubs and
537 re-run the same transaction.
539 verify that both incrementals are created successfully.
541 self.do_transaction_failure_test()
543 def test_transaction_failure_race(self):
544 '''Test: Verify that transactions with jobs that have no data to
545 transfer do not cause race conditions in the cancellation of the entire
546 transaction job group.
548 self.do_transaction_failure_test(race=True)
551 def test_sync_dirty_bitmap_missing(self):
552 self.assert_no_active_block_jobs()
553 self.files.append(self.err_img)
554 result = self.vm.qmp('drive-backup', device=self.drives[0]['id'],
555 sync='incremental', format=self.drives[0]['fmt'],
557 self.assert_qmp(result, 'error/class', 'GenericError')
560 def test_sync_dirty_bitmap_not_found(self):
561 self.assert_no_active_block_jobs()
562 self.files.append(self.err_img)
563 result = self.vm.qmp('drive-backup', device=self.drives[0]['id'],
564 sync='incremental', bitmap='unknown',
565 format=self.drives[0]['fmt'], target=self.err_img)
566 self.assert_qmp(result, 'error/class', 'GenericError')
569 def test_sync_dirty_bitmap_bad_granularity(self):
571 Test: Test what happens if we provide an improper granularity.
573 The granularity must always be a power of 2.
575 self.assert_no_active_block_jobs()
576 self.assertRaises(AssertionError, self.add_bitmap,
577 'bitmap0', self.drives[0],
580 def test_growing_before_backup(self):
582 Test: Add a bitmap, truncate the image, write past the old
585 Incremental backup should not ignore dirty bits past the old
588 self.assert_no_active_block_jobs()
590 self.create_anchor_backup()
592 self.add_bitmap('bitmap0', self.drives[0])
594 res = self.vm.qmp('block_resize', device=self.drives[0]['id'],
596 self.assert_qmp(res, 'return', {})
598 # Dirty the image past the old end
599 self.vm.hmp_qemu_io(self.drives[0]['id'], 'write 64M 64k')
601 target = self.prepare_backup(size='65M')
602 self.create_incremental(target=target)
608 class TestIncrementalBackupBlkdebug(TestIncrementalBackupBase):
609 '''Incremental backup tests that utilize a BlkDebug filter on drive0.'''
612 drive0 = self.add_node('drive0')
613 self.img_create(drive0['file'], drive0['fmt'])
614 self.write_default_pattern(drive0['file'])
617 def test_incremental_failure(self):
618 '''Test: Verify backups made after a failure are correct.
620 Simulate a failure during an incremental backup block job,
621 emulate additional writes, then create another incremental backup
622 afterwards and verify that the backup created is correct.
625 drive0 = self.drives[0]
626 result = self.vm.qmp('blockdev-add',
627 node_name=drive0['id'],
628 driver=drive0['fmt'],
630 'driver': 'blkdebug',
633 'filename': drive0['file']
636 'event': 'flush_to_disk',
644 'immediately': False,
649 self.assert_qmp(result, 'return', {})
651 self.create_anchor_backup(drive0)
652 self.add_bitmap('bitmap0', drive0)
653 # Note: at this point, during a normal execution,
654 # Assume that the VM resumes and begins issuing IO requests here.
656 self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
657 ('0xfe', '16M', '256k'),
658 ('0x64', '32736k', '64k')))
660 result = self.create_incremental(validate=False)
661 self.assertFalse(result)
662 self.hmp_io_writes(drive0['id'], (('0x9a', 0, 512),
663 ('0x55', '8M', '352k'),
664 ('0x78', '15872k', '1M')))
665 self.create_incremental()
669 def test_incremental_pause(self):
671 Test an incremental backup that errors into a pause and is resumed.
674 drive0 = self.drives[0]
675 # NB: The blkdebug script here looks for a "flush, read" pattern.
676 # The flush occurs in hmp_io_writes, and the read during the block job.
677 result = self.vm.qmp('blockdev-add',
678 node_name=drive0['id'],
679 driver=drive0['fmt'],
681 'driver': 'blkdebug',
684 'filename': drive0['file']
687 'event': 'flush_to_disk',
695 'immediately': False,
699 self.assert_qmp(result, 'return', {})
700 self.create_anchor_backup(drive0)
701 bitmap = self.add_bitmap('bitmap0', drive0)
703 # Emulate guest activity
704 self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
705 ('0xfe', '16M', '256k'),
706 ('0x64', '32736k', '64k')))
708 # Bitmap Status Check
709 self.assertTrue(self.vm.check_bitmap_status(
710 drive0['id'], bitmap.name, {
712 'granularity': 65536,
719 parent, _ = bitmap.last_target()
720 target = self.prepare_backup(bitmap, parent)
721 res = self.vm.qmp('drive-backup',
722 job_id=bitmap.drive['id'],
723 device=bitmap.drive['id'],
726 format=bitmap.drive['fmt'],
729 on_source_error='stop')
730 self.assert_qmp(res, 'return', {})
733 event = self.vm.event_wait(name="BLOCK_JOB_ERROR",
734 match={"data":{"device":bitmap.drive['id']}})
735 self.assert_qmp(event, 'data', {'device': bitmap.drive['id'],
737 'operation': 'read'})
739 # Bitmap Status Check
740 self.assertTrue(self.vm.check_bitmap_status(
741 drive0['id'], bitmap.name, {
743 'granularity': 65536,
749 # Resume and check incremental backup for consistency
750 res = self.vm.qmp('block-job-resume', device=bitmap.drive['id'])
751 self.assert_qmp(res, 'return', {})
752 self.wait_qmp_backup(bitmap.drive['id'])
754 # Bitmap Status Check
755 self.assertTrue(self.vm.check_bitmap_status(
756 drive0['id'], bitmap.name, {
758 'granularity': 65536,
765 self.make_reference_backup(bitmap)
770 if __name__ == '__main__':
771 iotests.main(supported_fmts=['qcow2'],
772 supported_protocols=['file'])