4 # Tests for incremental drive-backup
6 # Copyright (C) 2015 John Snow for Red Hat, Inc.
10 # This program is free software; you can redistribute it and/or modify
11 # it under the terms of the GNU General Public License as published by
12 # the Free Software Foundation; either version 2 of the License, or
13 # (at your option) any later version.
15 # This program is distributed in the hope that it will be useful,
16 # but WITHOUT ANY WARRANTY; without even the implied warranty of
17 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 # GNU General Public License for more details.
20 # You should have received a copy of the GNU General Public License
21 # along with this program. If not, see <http://www.gnu.org/licenses/>.
26 from iotests import try_remove
27 from qemu.qmp.qmp_client import ExecuteError
30 def io_write_patterns(img, patterns):
31 for pattern in patterns:
32 iotests.qemu_io('-c', 'write -P%s %s %s' % pattern, img)
35 def transaction_action(action, **kwargs):
38 'data': dict((k.replace('_', '-'), v) for k, v in kwargs.items())
42 def transaction_bitmap_clear(node, name, **kwargs):
43 return transaction_action('block-dirty-bitmap-clear',
44 node=node, name=name, **kwargs)
47 def transaction_drive_backup(device, target, **kwargs):
48 return transaction_action('drive-backup', job_id=device, device=device,
49 target=target, **kwargs)
53 def __init__(self, name, drive):
59 def base_target(self):
60 return (self.drive['backup'], None)
62 def new_target(self, num=None):
66 base = os.path.join(iotests.test_dir,
67 "%s.%s." % (self.drive['id'], self.name))
68 suff = "%i.%s" % (num, self.drive['fmt'])
69 target = base + "inc" + suff
70 reference = base + "ref" + suff
71 self.backups.append((target, reference))
72 return (target, reference)
74 def last_target(self):
76 return self.backups[-1]
77 return self.base_target()
80 for image in self.backups.pop():
85 for backup in self.backups:
90 class TestIncrementalBackupBase(iotests.QMPTestCase):
91 def __init__(self, *args):
92 super(TestIncrementalBackupBase, self).__init__(*args)
96 self.vm = iotests.VM()
97 self.err_img = os.path.join(iotests.test_dir, 'err.%s' % iotests.imgfmt)
101 # Create a base image with a distinctive patterning
102 drive0 = self.add_node('drive0')
103 self.img_create(drive0['file'], drive0['fmt'])
104 self.vm.add_drive(drive0['file'], opts='node-name=node0')
105 self.write_default_pattern(drive0['file'])
109 def write_default_pattern(self, target):
110 io_write_patterns(target, (('0x41', 0, 512),
111 ('0xd5', '1M', '32k'),
112 ('0xdc', '32M', '124k')))
115 def add_node(self, node_id, fmt=iotests.imgfmt, path=None, backup=None):
117 path = os.path.join(iotests.test_dir, '%s.%s' % (node_id, fmt))
119 backup = os.path.join(iotests.test_dir,
120 '%s.full.backup.%s' % (node_id, fmt))
127 return self.drives[-1]
130 def img_create(self, img, fmt=iotests.imgfmt, size='64M',
131 parent=None, parentFormat=None, **kwargs):
133 for k,v in kwargs.items():
134 optargs = optargs + ['-o', '%s=%s' % (k,v)]
135 args = ['create', '-f', fmt] + optargs + [img, size]
137 if parentFormat is None:
139 args = args + ['-b', parent, '-F', parentFormat]
140 iotests.qemu_img(*args)
141 self.files.append(img)
144 def do_qmp_backup(self, error='Input/output error', **kwargs):
145 self.vm.cmd('drive-backup', **kwargs)
146 return self.wait_qmp_backup(kwargs['device'], error)
149 def ignore_job_status_change_events(self):
151 e = self.vm.event_wait(name="JOB_STATUS_CHANGE")
152 if e['data']['status'] == 'null':
155 def wait_qmp_backup(self, device, error='Input/output error'):
156 event = self.vm.event_wait(name="BLOCK_JOB_COMPLETED",
157 match={'data': {'device': device}})
158 self.assertNotEqual(event, None)
159 self.ignore_job_status_change_events()
162 failure = self.dictpath(event, 'data/error')
163 except AssertionError:
165 self.assert_qmp(event, 'data/offset', event['data']['len'])
169 self.assert_qmp(event, 'data/error', error)
173 def wait_qmp_backup_cancelled(self, device):
174 event = self.vm.event_wait(name='BLOCK_JOB_CANCELLED',
175 match={'data': {'device': device}})
176 self.assertNotEqual(event, None)
177 self.ignore_job_status_change_events()
180 def create_anchor_backup(self, drive=None):
182 drive = self.drives[-1]
183 res = self.do_qmp_backup(job_id=drive['id'],
184 device=drive['id'], sync='full',
185 format=drive['fmt'], target=drive['backup'])
187 self.files.append(drive['backup'])
188 return drive['backup']
191 def make_reference_backup(self, bitmap=None):
193 bitmap = self.bitmaps[-1]
194 _, reference = bitmap.last_target()
195 res = self.do_qmp_backup(job_id=bitmap.drive['id'],
196 device=bitmap.drive['id'], sync='full',
197 format=bitmap.drive['fmt'], target=reference)
201 def add_bitmap(self, name, drive, **kwargs):
202 bitmap = Bitmap(name, drive)
203 self.bitmaps.append(bitmap)
204 self.vm.cmd('block-dirty-bitmap-add', node=drive['id'],
205 name=bitmap.name, **kwargs)
209 def prepare_backup(self, bitmap=None, parent=None, **kwargs):
211 bitmap = self.bitmaps[-1]
213 parent, _ = bitmap.last_target()
215 target, _ = bitmap.new_target()
216 self.img_create(target, bitmap.drive['fmt'], parent=parent,
221 def create_incremental(self, bitmap=None, parent=None,
222 parentFormat=None, validate=True,
225 bitmap = self.bitmaps[-1]
227 parent, _ = bitmap.last_target()
230 target = self.prepare_backup(bitmap, parent)
231 res = self.do_qmp_backup(job_id=bitmap.drive['id'],
232 device=bitmap.drive['id'],
233 sync='incremental', bitmap=bitmap.name,
234 format=bitmap.drive['fmt'], target=target,
238 self.assertFalse(validate)
240 self.make_reference_backup(bitmap)
244 def check_backups(self):
245 for bitmap in self.bitmaps:
246 for incremental, reference in bitmap.backups:
247 self.assertTrue(iotests.compare_images(incremental, reference))
248 last = bitmap.last_target()[0]
249 self.assertTrue(iotests.compare_images(last, bitmap.drive['file']))
252 def hmp_io_writes(self, drive, patterns):
253 for pattern in patterns:
254 self.vm.hmp_qemu_io(drive, 'write -P%s %s %s' % pattern)
255 self.vm.hmp_qemu_io(drive, 'flush')
258 def do_incremental_simple(self, **kwargs):
259 self.create_anchor_backup()
260 self.add_bitmap('bitmap0', self.drives[0], **kwargs)
262 # Sanity: Create a "hollow" incremental backup
263 self.create_incremental()
264 # Three writes: One complete overwrite, one new segment,
265 # and one partial overlap.
266 self.hmp_io_writes(self.drives[0]['id'], (('0xab', 0, 512),
267 ('0xfe', '16M', '256k'),
268 ('0x64', '32736k', '64k')))
269 self.create_incremental()
270 # Three more writes, one of each kind, like above
271 self.hmp_io_writes(self.drives[0]['id'], (('0x9a', 0, 512),
272 ('0x55', '8M', '352k'),
273 ('0x78', '15872k', '1M')))
274 self.create_incremental()
281 for bitmap in self.bitmaps:
283 for filename in self.files:
288 class TestIncrementalBackup(TestIncrementalBackupBase):
289 def test_incremental_simple(self):
291 Test: Create and verify three incremental backups.
293 Create a bitmap and a full backup before VM execution begins,
294 then create a series of three incremental backups "during execution,"
295 i.e.; after IO requests begin modifying the drive.
297 return self.do_incremental_simple()
300 def test_small_granularity(self):
302 Test: Create and verify backups made with a small granularity bitmap.
304 Perform the same test as test_incremental_simple, but with a granularity
305 of only 32KiB instead of the present default of 64KiB.
307 return self.do_incremental_simple(granularity=32768)
310 def test_large_granularity(self):
312 Test: Create and verify backups made with a large granularity bitmap.
314 Perform the same test as test_incremental_simple, but with a granularity
315 of 128KiB instead of the present default of 64KiB.
317 return self.do_incremental_simple(granularity=131072)
320 def test_larger_cluster_target(self):
322 Test: Create and verify backups made to a larger cluster size target.
324 With a default granularity of 64KiB, verify that backups made to a
325 larger cluster size target of 128KiB without a backing file works.
327 drive0 = self.drives[0]
329 # Create a cluster_size=128k full backup / "anchor" backup
330 self.img_create(drive0['backup'], cluster_size='128k')
331 self.assertTrue(self.do_qmp_backup(device=drive0['id'], sync='full',
332 format=drive0['fmt'],
333 target=drive0['backup'],
336 # Create bitmap and dirty it with some new writes.
337 # overwrite [32736, 32799] which will dirty bitmap clusters at
338 # 32M-64K and 32M. 32M+64K will be left undirtied.
339 bitmap0 = self.add_bitmap('bitmap0', drive0)
340 self.hmp_io_writes(drive0['id'],
342 ('0xfe', '16M', '256k'),
343 ('0x64', '32736k', '64k')))
344 # Check the dirty bitmap stats
345 self.assertTrue(self.vm.check_bitmap_status(
346 'node0', bitmap0.name, {
349 'granularity': 65536,
353 # Prepare a cluster_size=128k backup target without a backing file.
354 (target, _) = bitmap0.new_target()
355 self.img_create(target, bitmap0.drive['fmt'], cluster_size='128k')
357 # Perform Incremental Backup
358 self.assertTrue(self.do_qmp_backup(device=bitmap0.drive['id'],
361 format=bitmap0.drive['fmt'],
364 self.make_reference_backup(bitmap0)
366 # Add the backing file, then compare and exit.
367 iotests.qemu_img('rebase', '-f', drive0['fmt'], '-u', '-b',
368 drive0['backup'], '-F', drive0['fmt'], target)
373 def test_incremental_transaction(self):
374 '''Test: Verify backups made from transactionally created bitmaps.
376 Create a bitmap "before" VM execution begins, then create a second
377 bitmap AFTER writes have already occurred. Use transactions to create
378 a full backup and synchronize both bitmaps to this backup.
379 Create an incremental backup through both bitmaps and verify that
380 both backups match the current drive0 image.
383 drive0 = self.drives[0]
384 bitmap0 = self.add_bitmap('bitmap0', drive0)
385 self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
386 ('0xfe', '16M', '256k'),
387 ('0x64', '32736k', '64k')))
388 bitmap1 = self.add_bitmap('bitmap1', drive0)
390 self.vm.cmd('transaction', actions=[
391 transaction_bitmap_clear(bitmap0.drive['id'], bitmap0.name),
392 transaction_bitmap_clear(bitmap1.drive['id'], bitmap1.name),
393 transaction_drive_backup(drive0['id'], drive0['backup'],
394 sync='full', format=drive0['fmt'])
396 self.wait_until_completed(drive0['id'])
397 self.files.append(drive0['backup'])
399 self.hmp_io_writes(drive0['id'], (('0x9a', 0, 512),
400 ('0x55', '8M', '352k'),
401 ('0x78', '15872k', '1M')))
402 # Both bitmaps should be correctly in sync.
403 self.create_incremental(bitmap0)
404 self.create_incremental(bitmap1)
409 def do_transaction_failure_test(self, race=False):
410 # Create a second drive, with pattern:
411 drive1 = self.add_node('drive1')
412 self.img_create(drive1['file'], drive1['fmt'])
413 io_write_patterns(drive1['file'], (('0x14', 0, 512),
414 ('0x5d', '1M', '32k'),
415 ('0xcd', '32M', '124k')))
417 # Create a blkdebug interface to this img as 'drive1'
418 self.vm.cmd('blockdev-add',
419 node_name=drive1['id'],
420 driver=drive1['fmt'],
422 'driver': 'blkdebug',
425 'filename': drive1['file']
428 'event': 'flush_to_disk',
436 'immediately': False,
442 # Create bitmaps and full backups for both drives
443 drive0 = self.drives[0]
444 dr0bm0 = self.add_bitmap('bitmap0', drive0)
445 dr1bm0 = self.add_bitmap('bitmap0', drive1)
446 self.create_anchor_backup(drive0)
447 self.create_anchor_backup(drive1)
448 self.assert_no_active_block_jobs()
449 self.assertFalse(self.vm.get_qmp_events(wait=False))
451 # Emulate some writes
453 self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
454 ('0xfe', '16M', '256k'),
455 ('0x64', '32736k', '64k')))
456 self.hmp_io_writes(drive1['id'], (('0xba', 0, 512),
457 ('0xef', '16M', '256k'),
458 ('0x46', '32736k', '64k')))
460 # Create incremental backup targets
461 target0 = self.prepare_backup(dr0bm0)
462 target1 = self.prepare_backup(dr1bm0)
464 # Ask for a new incremental backup per-each drive,
465 # expecting drive1's backup to fail. In the 'race' test,
466 # we expect drive1 to attempt to cancel the empty drive0 job.
468 transaction_drive_backup(drive0['id'], target0, sync='incremental',
469 format=drive0['fmt'], mode='existing',
471 transaction_drive_backup(drive1['id'], target1, sync='incremental',
472 format=drive1['fmt'], mode='existing',
475 self.vm.cmd('transaction', actions=transaction,
476 properties={'completion-mode': 'grouped'} )
478 # Observe that drive0's backup is cancelled and drive1 completes with
480 self.wait_qmp_backup_cancelled(drive0['id'])
481 self.assertFalse(self.wait_qmp_backup(drive1['id']))
482 error = self.vm.event_wait('BLOCK_JOB_ERROR')
483 self.assert_qmp(error, 'data', {'device': drive1['id'],
485 'operation': 'read'})
486 self.assertFalse(self.vm.get_qmp_events(wait=False))
487 self.assert_no_active_block_jobs()
489 # Delete drive0's successful target and eliminate our record of the
490 # unsuccessful drive1 target.
494 # Don't re-run the transaction, we only wanted to test the race.
498 # Re-run the same transaction:
499 target0 = self.prepare_backup(dr0bm0)
500 target1 = self.prepare_backup(dr1bm0)
502 # Re-run the exact same transaction.
503 self.vm.cmd('transaction', actions=transaction,
504 properties={'completion-mode':'grouped'})
506 # Both should complete successfully this time.
507 self.assertTrue(self.wait_qmp_backup(drive0['id']))
508 self.assertTrue(self.wait_qmp_backup(drive1['id']))
509 self.make_reference_backup(dr0bm0)
510 self.make_reference_backup(dr1bm0)
511 self.assertFalse(self.vm.get_qmp_events(wait=False))
512 self.assert_no_active_block_jobs()
514 # And the images should of course validate.
518 def test_transaction_failure(self):
519 '''Test: Verify backups made from a transaction that partially fails.
521 Add a second drive with its own unique pattern, and add a bitmap to each
522 drive. Use blkdebug to interfere with the backup on just one drive and
523 attempt to create a coherent incremental backup across both drives.
525 verify a failure in one but not both, then delete the failed stubs and
526 re-run the same transaction.
528 verify that both incrementals are created successfully.
530 self.do_transaction_failure_test()
532 def test_transaction_failure_race(self):
533 '''Test: Verify that transactions with jobs that have no data to
534 transfer do not cause race conditions in the cancellation of the entire
535 transaction job group.
537 self.do_transaction_failure_test(race=True)
540 def test_sync_dirty_bitmap_missing(self):
541 self.assert_no_active_block_jobs()
542 self.files.append(self.err_img)
543 result = self.vm.qmp('drive-backup', device=self.drives[0]['id'],
544 sync='incremental', format=self.drives[0]['fmt'],
546 self.assert_qmp(result, 'error/class', 'GenericError')
549 def test_sync_dirty_bitmap_not_found(self):
550 self.assert_no_active_block_jobs()
551 self.files.append(self.err_img)
552 result = self.vm.qmp('drive-backup', device=self.drives[0]['id'],
553 sync='incremental', bitmap='unknown',
554 format=self.drives[0]['fmt'], target=self.err_img)
555 self.assert_qmp(result, 'error/class', 'GenericError')
558 def test_sync_dirty_bitmap_bad_granularity(self):
560 Test: Test what happens if we provide an improper granularity.
562 The granularity must always be a power of 2.
564 self.assert_no_active_block_jobs()
565 self.assertRaises(ExecuteError, self.add_bitmap,
566 'bitmap0', self.drives[0],
569 def test_growing_before_backup(self):
571 Test: Add a bitmap, truncate the image, write past the old
574 Incremental backup should not ignore dirty bits past the old
577 self.assert_no_active_block_jobs()
579 self.create_anchor_backup()
581 self.add_bitmap('bitmap0', self.drives[0])
583 self.vm.cmd('block_resize', device=self.drives[0]['id'],
586 # Dirty the image past the old end
587 self.vm.hmp_qemu_io(self.drives[0]['id'], 'write 64M 64k')
589 target = self.prepare_backup(size='65M')
590 self.create_incremental(target=target)
596 class TestIncrementalBackupBlkdebug(TestIncrementalBackupBase):
597 '''Incremental backup tests that utilize a BlkDebug filter on drive0.'''
600 drive0 = self.add_node('drive0')
601 self.img_create(drive0['file'], drive0['fmt'])
602 self.write_default_pattern(drive0['file'])
605 def test_incremental_failure(self):
606 '''Test: Verify backups made after a failure are correct.
608 Simulate a failure during an incremental backup block job,
609 emulate additional writes, then create another incremental backup
610 afterwards and verify that the backup created is correct.
613 drive0 = self.drives[0]
614 self.vm.cmd('blockdev-add',
615 node_name=drive0['id'],
616 driver=drive0['fmt'],
618 'driver': 'blkdebug',
621 'filename': drive0['file']
624 'event': 'flush_to_disk',
632 'immediately': False,
638 self.create_anchor_backup(drive0)
639 self.add_bitmap('bitmap0', drive0)
640 # Note: at this point, during a normal execution,
641 # Assume that the VM resumes and begins issuing IO requests here.
643 self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
644 ('0xfe', '16M', '256k'),
645 ('0x64', '32736k', '64k')))
647 result = self.create_incremental(validate=False)
648 self.assertFalse(result)
649 self.hmp_io_writes(drive0['id'], (('0x9a', 0, 512),
650 ('0x55', '8M', '352k'),
651 ('0x78', '15872k', '1M')))
652 self.create_incremental()
656 def test_incremental_pause(self):
658 Test an incremental backup that errors into a pause and is resumed.
661 drive0 = self.drives[0]
662 # NB: The blkdebug script here looks for a "flush, read" pattern.
663 # The flush occurs in hmp_io_writes, and the read during the block job.
664 self.vm.cmd('blockdev-add',
665 node_name=drive0['id'],
666 driver=drive0['fmt'],
668 'driver': 'blkdebug',
671 'filename': drive0['file']
674 'event': 'flush_to_disk',
682 'immediately': False,
686 self.create_anchor_backup(drive0)
687 bitmap = self.add_bitmap('bitmap0', drive0)
689 # Emulate guest activity
690 self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
691 ('0xfe', '16M', '256k'),
692 ('0x64', '32736k', '64k')))
694 # Bitmap Status Check
695 self.assertTrue(self.vm.check_bitmap_status(
696 drive0['id'], bitmap.name, {
698 'granularity': 65536,
704 parent, _ = bitmap.last_target()
705 target = self.prepare_backup(bitmap, parent)
706 self.vm.cmd('drive-backup',
707 job_id=bitmap.drive['id'],
708 device=bitmap.drive['id'],
711 format=bitmap.drive['fmt'],
714 on_source_error='stop')
717 event = self.vm.event_wait(name="BLOCK_JOB_ERROR",
718 match={"data":{"device":bitmap.drive['id']}})
719 self.assert_qmp(event, 'data', {'device': bitmap.drive['id'],
721 'operation': 'read'})
723 # Bitmap Status Check
724 self.assertTrue(self.vm.check_bitmap_status(
725 drive0['id'], bitmap.name, {
727 'granularity': 65536,
732 # Resume and check incremental backup for consistency
733 self.vm.cmd('block-job-resume', device=bitmap.drive['id'])
734 self.wait_qmp_backup(bitmap.drive['id'])
736 # Bitmap Status Check
737 self.assertTrue(self.vm.check_bitmap_status(
738 drive0['id'], bitmap.name, {
740 'granularity': 65536,
746 self.make_reference_backup(bitmap)
751 if __name__ == '__main__':
752 iotests.main(supported_fmts=['qcow2'],
753 supported_protocols=['file'])