4 # Tests for incremental drive-backup
6 # Copyright (C) 2015 John Snow for Red Hat, Inc.
10 # This program is free software; you can redistribute it and/or modify
11 # it under the terms of the GNU General Public License as published by
12 # the Free Software Foundation; either version 2 of the License, or
13 # (at your option) any later version.
15 # This program is distributed in the hope that it will be useful,
16 # but WITHOUT ANY WARRANTY; without even the implied warranty of
17 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 # GNU General Public License for more details.
20 # You should have received a copy of the GNU General Public License
21 # along with this program. If not, see <http://www.gnu.org/licenses/>.
28 def io_write_patterns(img, patterns):
29 for pattern in patterns:
30 iotests.qemu_io('-c', 'write -P%s %s %s' % pattern, img)
40 def transaction_action(action, **kwargs):
43 'data': dict((k.replace('_', '-'), v) for k, v in kwargs.items())
47 def transaction_bitmap_clear(node, name, **kwargs):
48 return transaction_action('block-dirty-bitmap-clear',
49 node=node, name=name, **kwargs)
52 def transaction_drive_backup(device, target, **kwargs):
53 return transaction_action('drive-backup', job_id=device, device=device,
54 target=target, **kwargs)
58 def __init__(self, name, drive):
64 def base_target(self):
65 return (self.drive['backup'], None)
67 def new_target(self, num=None):
71 base = os.path.join(iotests.test_dir,
72 "%s.%s." % (self.drive['id'], self.name))
73 suff = "%i.%s" % (num, self.drive['fmt'])
74 target = base + "inc" + suff
75 reference = base + "ref" + suff
76 self.backups.append((target, reference))
77 return (target, reference)
79 def last_target(self):
81 return self.backups[-1]
82 return self.base_target()
85 for image in self.backups.pop():
90 for backup in self.backups:
95 class TestIncrementalBackupBase(iotests.QMPTestCase):
96 def __init__(self, *args):
97 super(TestIncrementalBackupBase, self).__init__(*args)
101 self.vm = iotests.VM()
102 self.err_img = os.path.join(iotests.test_dir, 'err.%s' % iotests.imgfmt)
106 # Create a base image with a distinctive patterning
107 drive0 = self.add_node('drive0')
108 self.img_create(drive0['file'], drive0['fmt'])
109 self.vm.add_drive(drive0['file'], opts='node-name=node0')
110 self.write_default_pattern(drive0['file'])
114 def write_default_pattern(self, target):
115 io_write_patterns(target, (('0x41', 0, 512),
116 ('0xd5', '1M', '32k'),
117 ('0xdc', '32M', '124k')))
120 def add_node(self, node_id, fmt=iotests.imgfmt, path=None, backup=None):
122 path = os.path.join(iotests.test_dir, '%s.%s' % (node_id, fmt))
124 backup = os.path.join(iotests.test_dir,
125 '%s.full.backup.%s' % (node_id, fmt))
132 return self.drives[-1]
135 def img_create(self, img, fmt=iotests.imgfmt, size='64M',
136 parent=None, parentFormat=None, **kwargs):
138 for k,v in kwargs.items():
139 optargs = optargs + ['-o', '%s=%s' % (k,v)]
140 args = ['create', '-f', fmt] + optargs + [img, size]
142 if parentFormat is None:
144 args = args + ['-b', parent, '-F', parentFormat]
145 iotests.qemu_img(*args)
146 self.files.append(img)
149 def do_qmp_backup(self, error='Input/output error', **kwargs):
150 res = self.vm.qmp('drive-backup', **kwargs)
151 self.assert_qmp(res, 'return', {})
152 return self.wait_qmp_backup(kwargs['device'], error)
155 def ignore_job_status_change_events(self):
157 e = self.vm.event_wait(name="JOB_STATUS_CHANGE")
158 if e['data']['status'] == 'null':
161 def wait_qmp_backup(self, device, error='Input/output error'):
162 event = self.vm.event_wait(name="BLOCK_JOB_COMPLETED",
163 match={'data': {'device': device}})
164 self.assertNotEqual(event, None)
165 self.ignore_job_status_change_events()
168 failure = self.dictpath(event, 'data/error')
169 except AssertionError:
171 self.assert_qmp(event, 'data/offset', event['data']['len'])
175 self.assert_qmp(event, 'data/error', error)
179 def wait_qmp_backup_cancelled(self, device):
180 event = self.vm.event_wait(name='BLOCK_JOB_CANCELLED',
181 match={'data': {'device': device}})
182 self.assertNotEqual(event, None)
183 self.ignore_job_status_change_events()
186 def create_anchor_backup(self, drive=None):
188 drive = self.drives[-1]
189 res = self.do_qmp_backup(job_id=drive['id'],
190 device=drive['id'], sync='full',
191 format=drive['fmt'], target=drive['backup'])
193 self.files.append(drive['backup'])
194 return drive['backup']
197 def make_reference_backup(self, bitmap=None):
199 bitmap = self.bitmaps[-1]
200 _, reference = bitmap.last_target()
201 res = self.do_qmp_backup(job_id=bitmap.drive['id'],
202 device=bitmap.drive['id'], sync='full',
203 format=bitmap.drive['fmt'], target=reference)
207 def add_bitmap(self, name, drive, **kwargs):
208 bitmap = Bitmap(name, drive)
209 self.bitmaps.append(bitmap)
210 result = self.vm.qmp('block-dirty-bitmap-add', node=drive['id'],
211 name=bitmap.name, **kwargs)
212 self.assert_qmp(result, 'return', {})
216 def prepare_backup(self, bitmap=None, parent=None, **kwargs):
218 bitmap = self.bitmaps[-1]
220 parent, _ = bitmap.last_target()
222 target, _ = bitmap.new_target()
223 self.img_create(target, bitmap.drive['fmt'], parent=parent,
228 def create_incremental(self, bitmap=None, parent=None,
229 parentFormat=None, validate=True,
232 bitmap = self.bitmaps[-1]
234 parent, _ = bitmap.last_target()
237 target = self.prepare_backup(bitmap, parent)
238 res = self.do_qmp_backup(job_id=bitmap.drive['id'],
239 device=bitmap.drive['id'],
240 sync='incremental', bitmap=bitmap.name,
241 format=bitmap.drive['fmt'], target=target,
245 self.assertFalse(validate)
247 self.make_reference_backup(bitmap)
251 def check_backups(self):
252 for bitmap in self.bitmaps:
253 for incremental, reference in bitmap.backups:
254 self.assertTrue(iotests.compare_images(incremental, reference))
255 last = bitmap.last_target()[0]
256 self.assertTrue(iotests.compare_images(last, bitmap.drive['file']))
259 def hmp_io_writes(self, drive, patterns):
260 for pattern in patterns:
261 self.vm.hmp_qemu_io(drive, 'write -P%s %s %s' % pattern)
262 self.vm.hmp_qemu_io(drive, 'flush')
265 def do_incremental_simple(self, **kwargs):
266 self.create_anchor_backup()
267 self.add_bitmap('bitmap0', self.drives[0], **kwargs)
269 # Sanity: Create a "hollow" incremental backup
270 self.create_incremental()
271 # Three writes: One complete overwrite, one new segment,
272 # and one partial overlap.
273 self.hmp_io_writes(self.drives[0]['id'], (('0xab', 0, 512),
274 ('0xfe', '16M', '256k'),
275 ('0x64', '32736k', '64k')))
276 self.create_incremental()
277 # Three more writes, one of each kind, like above
278 self.hmp_io_writes(self.drives[0]['id'], (('0x9a', 0, 512),
279 ('0x55', '8M', '352k'),
280 ('0x78', '15872k', '1M')))
281 self.create_incremental()
288 for bitmap in self.bitmaps:
290 for filename in self.files:
295 class TestIncrementalBackup(TestIncrementalBackupBase):
296 def test_incremental_simple(self):
298 Test: Create and verify three incremental backups.
300 Create a bitmap and a full backup before VM execution begins,
301 then create a series of three incremental backups "during execution,"
302 i.e.; after IO requests begin modifying the drive.
304 return self.do_incremental_simple()
307 def test_small_granularity(self):
309 Test: Create and verify backups made with a small granularity bitmap.
311 Perform the same test as test_incremental_simple, but with a granularity
312 of only 32KiB instead of the present default of 64KiB.
314 return self.do_incremental_simple(granularity=32768)
317 def test_large_granularity(self):
319 Test: Create and verify backups made with a large granularity bitmap.
321 Perform the same test as test_incremental_simple, but with a granularity
322 of 128KiB instead of the present default of 64KiB.
324 return self.do_incremental_simple(granularity=131072)
327 def test_larger_cluster_target(self):
329 Test: Create and verify backups made to a larger cluster size target.
331 With a default granularity of 64KiB, verify that backups made to a
332 larger cluster size target of 128KiB without a backing file works.
334 drive0 = self.drives[0]
336 # Create a cluster_size=128k full backup / "anchor" backup
337 self.img_create(drive0['backup'], cluster_size='128k')
338 self.assertTrue(self.do_qmp_backup(device=drive0['id'], sync='full',
339 format=drive0['fmt'],
340 target=drive0['backup'],
343 # Create bitmap and dirty it with some new writes.
344 # overwrite [32736, 32799] which will dirty bitmap clusters at
345 # 32M-64K and 32M. 32M+64K will be left undirtied.
346 bitmap0 = self.add_bitmap('bitmap0', drive0)
347 self.hmp_io_writes(drive0['id'],
349 ('0xfe', '16M', '256k'),
350 ('0x64', '32736k', '64k')))
351 # Check the dirty bitmap stats
352 self.assertTrue(self.vm.check_bitmap_status(
353 'node0', bitmap0.name, {
356 'granularity': 65536,
361 # Prepare a cluster_size=128k backup target without a backing file.
362 (target, _) = bitmap0.new_target()
363 self.img_create(target, bitmap0.drive['fmt'], cluster_size='128k')
365 # Perform Incremental Backup
366 self.assertTrue(self.do_qmp_backup(device=bitmap0.drive['id'],
369 format=bitmap0.drive['fmt'],
372 self.make_reference_backup(bitmap0)
374 # Add the backing file, then compare and exit.
375 iotests.qemu_img('rebase', '-f', drive0['fmt'], '-u', '-b',
376 drive0['backup'], '-F', drive0['fmt'], target)
381 def test_incremental_transaction(self):
382 '''Test: Verify backups made from transactionally created bitmaps.
384 Create a bitmap "before" VM execution begins, then create a second
385 bitmap AFTER writes have already occurred. Use transactions to create
386 a full backup and synchronize both bitmaps to this backup.
387 Create an incremental backup through both bitmaps and verify that
388 both backups match the current drive0 image.
391 drive0 = self.drives[0]
392 bitmap0 = self.add_bitmap('bitmap0', drive0)
393 self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
394 ('0xfe', '16M', '256k'),
395 ('0x64', '32736k', '64k')))
396 bitmap1 = self.add_bitmap('bitmap1', drive0)
398 result = self.vm.qmp('transaction', actions=[
399 transaction_bitmap_clear(bitmap0.drive['id'], bitmap0.name),
400 transaction_bitmap_clear(bitmap1.drive['id'], bitmap1.name),
401 transaction_drive_backup(drive0['id'], drive0['backup'],
402 sync='full', format=drive0['fmt'])
404 self.assert_qmp(result, 'return', {})
405 self.wait_until_completed(drive0['id'])
406 self.files.append(drive0['backup'])
408 self.hmp_io_writes(drive0['id'], (('0x9a', 0, 512),
409 ('0x55', '8M', '352k'),
410 ('0x78', '15872k', '1M')))
411 # Both bitmaps should be correctly in sync.
412 self.create_incremental(bitmap0)
413 self.create_incremental(bitmap1)
418 def do_transaction_failure_test(self, race=False):
419 # Create a second drive, with pattern:
420 drive1 = self.add_node('drive1')
421 self.img_create(drive1['file'], drive1['fmt'])
422 io_write_patterns(drive1['file'], (('0x14', 0, 512),
423 ('0x5d', '1M', '32k'),
424 ('0xcd', '32M', '124k')))
426 # Create a blkdebug interface to this img as 'drive1'
427 result = self.vm.qmp('blockdev-add',
428 node_name=drive1['id'],
429 driver=drive1['fmt'],
431 'driver': 'blkdebug',
434 'filename': drive1['file']
437 'event': 'flush_to_disk',
445 'immediately': False,
450 self.assert_qmp(result, 'return', {})
452 # Create bitmaps and full backups for both drives
453 drive0 = self.drives[0]
454 dr0bm0 = self.add_bitmap('bitmap0', drive0)
455 dr1bm0 = self.add_bitmap('bitmap0', drive1)
456 self.create_anchor_backup(drive0)
457 self.create_anchor_backup(drive1)
458 self.assert_no_active_block_jobs()
459 self.assertFalse(self.vm.get_qmp_events(wait=False))
461 # Emulate some writes
463 self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
464 ('0xfe', '16M', '256k'),
465 ('0x64', '32736k', '64k')))
466 self.hmp_io_writes(drive1['id'], (('0xba', 0, 512),
467 ('0xef', '16M', '256k'),
468 ('0x46', '32736k', '64k')))
470 # Create incremental backup targets
471 target0 = self.prepare_backup(dr0bm0)
472 target1 = self.prepare_backup(dr1bm0)
474 # Ask for a new incremental backup per-each drive,
475 # expecting drive1's backup to fail. In the 'race' test,
476 # we expect drive1 to attempt to cancel the empty drive0 job.
478 transaction_drive_backup(drive0['id'], target0, sync='incremental',
479 format=drive0['fmt'], mode='existing',
481 transaction_drive_backup(drive1['id'], target1, sync='incremental',
482 format=drive1['fmt'], mode='existing',
485 result = self.vm.qmp('transaction', actions=transaction,
486 properties={'completion-mode': 'grouped'} )
487 self.assert_qmp(result, 'return', {})
489 # Observe that drive0's backup is cancelled and drive1 completes with
491 self.wait_qmp_backup_cancelled(drive0['id'])
492 self.assertFalse(self.wait_qmp_backup(drive1['id']))
493 error = self.vm.event_wait('BLOCK_JOB_ERROR')
494 self.assert_qmp(error, 'data', {'device': drive1['id'],
496 'operation': 'read'})
497 self.assertFalse(self.vm.get_qmp_events(wait=False))
498 self.assert_no_active_block_jobs()
500 # Delete drive0's successful target and eliminate our record of the
501 # unsuccessful drive1 target.
505 # Don't re-run the transaction, we only wanted to test the race.
509 # Re-run the same transaction:
510 target0 = self.prepare_backup(dr0bm0)
511 target1 = self.prepare_backup(dr1bm0)
513 # Re-run the exact same transaction.
514 result = self.vm.qmp('transaction', actions=transaction,
515 properties={'completion-mode':'grouped'})
516 self.assert_qmp(result, 'return', {})
518 # Both should complete successfully this time.
519 self.assertTrue(self.wait_qmp_backup(drive0['id']))
520 self.assertTrue(self.wait_qmp_backup(drive1['id']))
521 self.make_reference_backup(dr0bm0)
522 self.make_reference_backup(dr1bm0)
523 self.assertFalse(self.vm.get_qmp_events(wait=False))
524 self.assert_no_active_block_jobs()
526 # And the images should of course validate.
530 def test_transaction_failure(self):
531 '''Test: Verify backups made from a transaction that partially fails.
533 Add a second drive with its own unique pattern, and add a bitmap to each
534 drive. Use blkdebug to interfere with the backup on just one drive and
535 attempt to create a coherent incremental backup across both drives.
537 verify a failure in one but not both, then delete the failed stubs and
538 re-run the same transaction.
540 verify that both incrementals are created successfully.
542 self.do_transaction_failure_test()
544 def test_transaction_failure_race(self):
545 '''Test: Verify that transactions with jobs that have no data to
546 transfer do not cause race conditions in the cancellation of the entire
547 transaction job group.
549 self.do_transaction_failure_test(race=True)
552 def test_sync_dirty_bitmap_missing(self):
553 self.assert_no_active_block_jobs()
554 self.files.append(self.err_img)
555 result = self.vm.qmp('drive-backup', device=self.drives[0]['id'],
556 sync='incremental', format=self.drives[0]['fmt'],
558 self.assert_qmp(result, 'error/class', 'GenericError')
561 def test_sync_dirty_bitmap_not_found(self):
562 self.assert_no_active_block_jobs()
563 self.files.append(self.err_img)
564 result = self.vm.qmp('drive-backup', device=self.drives[0]['id'],
565 sync='incremental', bitmap='unknown',
566 format=self.drives[0]['fmt'], target=self.err_img)
567 self.assert_qmp(result, 'error/class', 'GenericError')
570 def test_sync_dirty_bitmap_bad_granularity(self):
572 Test: Test what happens if we provide an improper granularity.
574 The granularity must always be a power of 2.
576 self.assert_no_active_block_jobs()
577 self.assertRaises(AssertionError, self.add_bitmap,
578 'bitmap0', self.drives[0],
581 def test_growing_before_backup(self):
583 Test: Add a bitmap, truncate the image, write past the old
586 Incremental backup should not ignore dirty bits past the old
589 self.assert_no_active_block_jobs()
591 self.create_anchor_backup()
593 self.add_bitmap('bitmap0', self.drives[0])
595 res = self.vm.qmp('block_resize', device=self.drives[0]['id'],
597 self.assert_qmp(res, 'return', {})
599 # Dirty the image past the old end
600 self.vm.hmp_qemu_io(self.drives[0]['id'], 'write 64M 64k')
602 target = self.prepare_backup(size='65M')
603 self.create_incremental(target=target)
609 class TestIncrementalBackupBlkdebug(TestIncrementalBackupBase):
610 '''Incremental backup tests that utilize a BlkDebug filter on drive0.'''
613 drive0 = self.add_node('drive0')
614 self.img_create(drive0['file'], drive0['fmt'])
615 self.write_default_pattern(drive0['file'])
618 def test_incremental_failure(self):
619 '''Test: Verify backups made after a failure are correct.
621 Simulate a failure during an incremental backup block job,
622 emulate additional writes, then create another incremental backup
623 afterwards and verify that the backup created is correct.
626 drive0 = self.drives[0]
627 result = self.vm.qmp('blockdev-add',
628 node_name=drive0['id'],
629 driver=drive0['fmt'],
631 'driver': 'blkdebug',
634 'filename': drive0['file']
637 'event': 'flush_to_disk',
645 'immediately': False,
650 self.assert_qmp(result, 'return', {})
652 self.create_anchor_backup(drive0)
653 self.add_bitmap('bitmap0', drive0)
654 # Note: at this point, during a normal execution,
655 # Assume that the VM resumes and begins issuing IO requests here.
657 self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
658 ('0xfe', '16M', '256k'),
659 ('0x64', '32736k', '64k')))
661 result = self.create_incremental(validate=False)
662 self.assertFalse(result)
663 self.hmp_io_writes(drive0['id'], (('0x9a', 0, 512),
664 ('0x55', '8M', '352k'),
665 ('0x78', '15872k', '1M')))
666 self.create_incremental()
670 def test_incremental_pause(self):
672 Test an incremental backup that errors into a pause and is resumed.
675 drive0 = self.drives[0]
676 # NB: The blkdebug script here looks for a "flush, read" pattern.
677 # The flush occurs in hmp_io_writes, and the read during the block job.
678 result = self.vm.qmp('blockdev-add',
679 node_name=drive0['id'],
680 driver=drive0['fmt'],
682 'driver': 'blkdebug',
685 'filename': drive0['file']
688 'event': 'flush_to_disk',
696 'immediately': False,
700 self.assert_qmp(result, 'return', {})
701 self.create_anchor_backup(drive0)
702 bitmap = self.add_bitmap('bitmap0', drive0)
704 # Emulate guest activity
705 self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
706 ('0xfe', '16M', '256k'),
707 ('0x64', '32736k', '64k')))
709 # Bitmap Status Check
710 self.assertTrue(self.vm.check_bitmap_status(
711 drive0['id'], bitmap.name, {
713 'granularity': 65536,
720 parent, _ = bitmap.last_target()
721 target = self.prepare_backup(bitmap, parent)
722 res = self.vm.qmp('drive-backup',
723 job_id=bitmap.drive['id'],
724 device=bitmap.drive['id'],
727 format=bitmap.drive['fmt'],
730 on_source_error='stop')
731 self.assert_qmp(res, 'return', {})
734 event = self.vm.event_wait(name="BLOCK_JOB_ERROR",
735 match={"data":{"device":bitmap.drive['id']}})
736 self.assert_qmp(event, 'data', {'device': bitmap.drive['id'],
738 'operation': 'read'})
740 # Bitmap Status Check
741 self.assertTrue(self.vm.check_bitmap_status(
742 drive0['id'], bitmap.name, {
744 'granularity': 65536,
750 # Resume and check incremental backup for consistency
751 res = self.vm.qmp('block-job-resume', device=bitmap.drive['id'])
752 self.assert_qmp(res, 'return', {})
753 self.wait_qmp_backup(bitmap.drive['id'])
755 # Bitmap Status Check
756 self.assertTrue(self.vm.check_bitmap_status(
757 drive0['id'], bitmap.name, {
759 'granularity': 65536,
766 self.make_reference_backup(bitmap)
771 if __name__ == '__main__':
772 iotests.main(supported_fmts=['qcow2'],
773 supported_protocols=['file'])