4 # Tests for image streaming.
6 # Copyright (C) 2012 IBM Corp.
8 # This program is free software; you can redistribute it and/or modify
9 # it under the terms of the GNU General Public License as published by
10 # the Free Software Foundation; either version 2 of the License, or
11 # (at your option) any later version.
13 # This program is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU General Public License for more details.
18 # You should have received a copy of the GNU General Public License
19 # along with this program. If not, see <http://www.gnu.org/licenses/>.
26 from iotests import qemu_img, qemu_io
28 backing_img = os.path.join(iotests.test_dir, 'backing.img')
29 mid_img = os.path.join(iotests.test_dir, 'mid.img')
30 test_img = os.path.join(iotests.test_dir, 'test.img')
32 class TestSingleDrive(iotests.QMPTestCase):
33 image_len = 1 * 1024 * 1024 # MB
36 iotests.create_image(backing_img, TestSingleDrive.image_len)
37 qemu_img('create', '-f', iotests.imgfmt,
38 '-o', 'backing_file=%s' % backing_img,
40 qemu_img('create', '-f', iotests.imgfmt,
41 '-o', 'backing_file=%s' % mid_img,
42 '-F', iotests.imgfmt, test_img)
43 qemu_io('-f', 'raw', '-c', 'write -P 0x1 0 512', backing_img)
44 qemu_io('-f', iotests.imgfmt, '-c', 'write -P 0x1 524288 512', mid_img)
45 self.vm = iotests.VM().add_drive("blkdebug::" + test_img,
46 "backing.node-name=mid," +
47 "backing.backing.node-name=base")
54 os.remove(backing_img)
56 def test_stream(self):
57 self.assert_no_active_block_jobs()
59 self.vm.cmd('block-stream', device='drive0')
61 self.wait_until_completed()
63 self.assert_no_active_block_jobs()
67 qemu_io('-f', 'raw', '-c', 'map', backing_img).stdout,
68 qemu_io('-f', iotests.imgfmt, '-c', 'map', test_img).stdout,
69 'image file map does not match backing file after streaming')
71 def test_stream_intermediate(self):
72 self.assert_no_active_block_jobs()
75 qemu_io('-f', 'raw', '-rU', '-c', 'map', backing_img).stdout,
76 qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', mid_img).stdout,
77 'image file map matches backing file before streaming')
79 self.vm.cmd('block-stream', device='mid', job_id='stream-mid')
81 self.wait_until_completed(drive='stream-mid')
83 self.assert_no_active_block_jobs()
87 qemu_io('-f', 'raw', '-c', 'map', backing_img).stdout,
88 qemu_io('-f', iotests.imgfmt, '-c', 'map', mid_img).stdout,
89 'image file map does not match backing file after streaming')
91 def test_stream_pause(self):
92 self.assert_no_active_block_jobs()
94 self.vm.pause_drive('drive0')
95 self.vm.cmd('block-stream', device='drive0')
97 self.pause_job('drive0', wait=False)
98 self.vm.resume_drive('drive0')
99 self.pause_wait('drive0')
101 result = self.vm.qmp('query-block-jobs')
102 offset = self.dictpath(result, 'return[0]/offset')
105 result = self.vm.qmp('query-block-jobs')
106 self.assert_qmp(result, 'return[0]/offset', offset)
108 self.vm.cmd('block-job-resume', device='drive0')
110 self.wait_until_completed()
112 self.assert_no_active_block_jobs()
116 qemu_io('-f', 'raw', '-c', 'map', backing_img).stdout,
117 qemu_io('-f', iotests.imgfmt, '-c', 'map', test_img).stdout,
118 'image file map does not match backing file after streaming')
120 def test_stream_no_op(self):
121 self.assert_no_active_block_jobs()
123 # The image map is empty before the operation
125 '-f', iotests.imgfmt, '-rU', '-c', 'map', test_img).stdout
127 # This is a no-op: no data should ever be copied from the base image
128 self.vm.cmd('block-stream', device='drive0', base=mid_img)
130 self.wait_until_completed()
132 self.assert_no_active_block_jobs()
136 qemu_io('-f', iotests.imgfmt, '-c', 'map', test_img).stdout,
137 empty_map, 'image file map changed after a no-op')
139 def test_stream_partial(self):
140 self.assert_no_active_block_jobs()
142 self.vm.cmd('block-stream', device='drive0', base=backing_img)
144 self.wait_until_completed()
146 self.assert_no_active_block_jobs()
150 qemu_io('-f', iotests.imgfmt, '-c', 'map', mid_img).stdout,
151 qemu_io('-f', iotests.imgfmt, '-c', 'map', test_img).stdout,
152 'image file map does not match backing file after streaming')
154 def test_device_not_found(self):
155 result = self.vm.qmp('block-stream', device='nonexistent')
156 self.assert_qmp(result, 'error/desc',
157 'Cannot find device=\'nonexistent\' nor node-name=\'nonexistent\'')
159 def test_job_id_missing(self):
160 result = self.vm.qmp('block-stream', device='mid')
161 self.assert_qmp(result, 'error/desc', "Invalid job ID ''")
163 def test_read_only(self):
164 # Create a new file that we can attach (we need a read-only top)
165 with iotests.FilePath('ro-top.img') as ro_top_path:
166 qemu_img('create', '-f', iotests.imgfmt, ro_top_path,
169 self.vm.cmd('blockdev-add',
171 driver=iotests.imgfmt,
175 'filename': ro_top_path,
180 result = self.vm.qmp('block-stream', job_id='stream',
181 device='ro-top', base_node='base')
182 self.assert_qmp(result, 'error/desc', 'Block node is read-only')
184 self.vm.cmd('blockdev-del', node_name='ro-top')
187 class TestParallelOps(iotests.QMPTestCase):
188 num_ops = 4 # Number of parallel block-stream operations
189 num_imgs = num_ops * 2 + 1
190 image_len = num_ops * 4 * 1024 * 1024
197 # Initialize file names and command-line options
198 for i in range(self.num_imgs):
199 img_depth = self.num_imgs - i - 1
200 opts.append("backing." * img_depth + "node-name=node%d" % i)
201 self.imgs.append(os.path.join(iotests.test_dir, 'img-%d.img' % i))
204 iotests.create_image(self.imgs[0], self.image_len)
205 for i in range(1, self.num_imgs):
206 qemu_img('create', '-f', iotests.imgfmt,
207 '-o', 'backing_file=%s' % self.imgs[i-1],
208 '-F', 'raw' if i == 1 else iotests.imgfmt, self.imgs[i])
210 # Put data into the images we are copying data from
211 odd_img_indexes = [x for x in reversed(range(self.num_imgs)) if x % 2 == 1]
212 for i in range(len(odd_img_indexes)):
213 # Alternate between 2MB and 4MB.
214 # This way jobs will not finish in the same order they were created
215 num_mb = 2 + 2 * (i % 2)
216 qemu_io('-f', iotests.imgfmt,
217 '-c', 'write -P 0xFF %dM %dM' % (i * 4, num_mb),
218 self.imgs[odd_img_indexes[i]])
220 # Attach the drive to the VM
221 self.vm = iotests.VM()
222 self.vm.add_drive(self.imgs[-1], ','.join(opts))
227 for img in self.imgs:
230 # Test that it's possible to run several block-stream operations
231 # in parallel in the same snapshot chain
232 @unittest.skipIf(os.environ.get('QEMU_CHECK_BLOCK_AUTO'), 'disabled in CI')
233 def test_stream_parallel(self):
234 self.assert_no_active_block_jobs()
236 # Check that the maps don't match before the streaming operations
237 for i in range(2, self.num_imgs, 2):
239 qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.imgs[i]).stdout,
240 qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.imgs[i-1]).stdout,
241 'image file map matches backing file before streaming')
243 # Create all streaming jobs
245 for i in range(2, self.num_imgs, 2):
246 node_name = 'node%d' % i
247 job_id = 'stream-%s' % node_name
248 pending_jobs.append(job_id)
249 self.vm.cmd('block-stream', device=node_name,
250 job_id=job_id, bottom=f'node{i-1}',
253 # Do this in reverse: After unthrottling them, some jobs may finish
254 # before we have unthrottled all of them. This will drain their
255 # subgraph, and this will make jobs above them advance (despite those
256 # jobs on top being throttled). In the worst case, all jobs below the
257 # top one are finished before we can unthrottle it, and this makes it
258 # advance so far that it completes before we can unthrottle it - which
259 # results in an error.
260 # Starting from the top (i.e. in reverse) does not have this problem:
261 # When a job finishes, the ones below it are not advanced.
262 for job in reversed(pending_jobs):
263 self.vm.cmd('block-job-set-speed', device=job, speed=0)
265 # Wait for all jobs to be finished.
266 while len(pending_jobs) > 0:
267 for event in self.vm.get_qmp_events(wait=True):
268 if event['event'] == 'BLOCK_JOB_COMPLETED':
269 job_id = self.dictpath(event, 'data/device')
270 self.assertTrue(job_id in pending_jobs)
271 self.assert_qmp_absent(event, 'data/error')
272 pending_jobs.remove(job_id)
274 self.assert_no_active_block_jobs()
277 # Check that all maps match now
278 for i in range(2, self.num_imgs, 2):
280 qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[i]).stdout,
281 qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[i-1]).stdout,
282 'image file map does not match backing file after streaming')
284 # Test that it's not possible to perform two block-stream
285 # operations if there are nodes involved in both.
286 def test_overlapping_1(self):
287 self.assert_no_active_block_jobs()
289 # Set a speed limit to make sure that this job blocks the rest
290 self.vm.cmd('block-stream', device='node4',
291 job_id='stream-node4', base=self.imgs[1],
292 filter_node_name='stream-filter', speed=1024*1024)
294 result = self.vm.qmp('block-stream', device='node5', job_id='stream-node5', base=self.imgs[2])
295 self.assert_qmp(result, 'error/desc',
296 "Node 'stream-filter' is busy: block device is in use by block job: stream")
298 result = self.vm.qmp('block-stream', device='node3', job_id='stream-node3', base=self.imgs[2])
299 self.assert_qmp(result, 'error/desc',
300 "Node 'node3' is busy: block device is in use by block job: stream")
302 result = self.vm.qmp('block-stream', device='node4', job_id='stream-node4-v2')
303 self.assert_qmp(result, 'error/desc',
304 "Node 'node4' is busy: block device is in use by block job: stream")
306 # block-commit should also fail if it touches nodes used by the stream job
307 result = self.vm.qmp('block-commit', device='drive0', base=self.imgs[4], job_id='commit-node4')
308 self.assert_qmp(result, 'error/desc',
309 "Node 'stream-filter' is busy: block device is in use by block job: stream")
311 result = self.vm.qmp('block-commit', device='drive0', base=self.imgs[1], top=self.imgs[3], job_id='commit-node1')
312 self.assert_qmp(result, 'error/desc',
313 "Node 'node3' is busy: block device is in use by block job: stream")
315 # This fails because it needs to modify the backing string in node2, which is blocked
316 result = self.vm.qmp('block-commit', device='drive0', base=self.imgs[0], top=self.imgs[1], job_id='commit-node0')
317 self.assert_qmp(result, 'error/desc',
318 "Node 'node2' is busy: block device is in use by block job: stream")
320 self.vm.cmd('block-job-set-speed', device='stream-node4', speed=0)
322 self.wait_until_completed(drive='stream-node4')
323 self.assert_no_active_block_jobs()
325 # Similar to test_overlapping_1, but with block-commit
326 # blocking the other jobs
327 def test_overlapping_2(self):
328 self.assertLessEqual(9, self.num_imgs)
329 self.assert_no_active_block_jobs()
331 # Set a speed limit to make sure that this job blocks the rest
332 self.vm.cmd('block-commit', device='drive0', top=self.imgs[5], base=self.imgs[3], job_id='commit-node3', speed=1024*1024)
334 result = self.vm.qmp('block-stream', device='node3', job_id='stream-node3')
335 self.assert_qmp(result, 'error/desc',
336 "Node 'node3' is busy: block device is in use by block job: commit")
338 result = self.vm.qmp('block-stream', device='node6', base=self.imgs[2], job_id='stream-node6')
339 self.assert_qmp(result, 'error/desc',
340 "Node 'node5' is busy: block device is in use by block job: commit")
342 result = self.vm.qmp('block-stream', device='node4', base=self.imgs[2], job_id='stream-node4')
343 self.assert_qmp(result, 'error/desc',
344 "Node 'node4' is busy: block device is in use by block job: commit")
346 result = self.vm.qmp('block-stream', device='node6', base=self.imgs[4], job_id='stream-node6-v2')
347 self.assert_qmp(result, 'error/desc',
348 "Node 'node5' is busy: block device is in use by block job: commit")
350 # This fails because block-commit currently blocks the active layer even if it's not used
351 result = self.vm.qmp('block-stream', device='drive0', base=self.imgs[5], job_id='stream-drive0')
352 self.assert_qmp(result, 'error/desc',
353 "Node 'drive0' is busy: block device is in use by block job: commit")
355 self.vm.cmd('block-job-set-speed', device='commit-node3', speed=0)
357 self.wait_until_completed(drive='commit-node3')
359 # Similar to test_overlapping_2, but here block-commit doesn't use the 'top' parameter.
360 # Internally this uses a mirror block job, hence the separate test case.
361 def test_overlapping_3(self):
362 self.assertLessEqual(8, self.num_imgs)
363 self.assert_no_active_block_jobs()
365 # Set a speed limit to make sure that this job blocks the rest
366 self.vm.cmd('block-commit', device='drive0', base=self.imgs[3], job_id='commit-drive0', speed=1024*1024)
368 result = self.vm.qmp('block-stream', device='node5', base=self.imgs[3], job_id='stream-node6')
369 self.assert_qmp(result, 'error/desc',
370 "Node 'node5' is busy: block device is in use by block job: commit")
372 self.vm.cmd('block-job-set-speed', device='commit-drive0', speed=0)
374 event = self.vm.event_wait(name='BLOCK_JOB_READY')
375 self.assert_qmp(event, 'data/device', 'commit-drive0')
376 self.assert_qmp(event, 'data/type', 'commit')
377 self.assert_qmp_absent(event, 'data/error')
379 self.vm.cmd('block-job-complete', device='commit-drive0')
381 self.wait_until_completed(drive='commit-drive0')
383 # In this case the base node of the stream job is the same as the
384 # top node of commit job. Since this results in the commit filter
385 # node being part of the stream chain, this is not allowed.
386 def test_overlapping_4(self):
387 self.assert_no_active_block_jobs()
389 # Commit from node2 into node0
390 self.vm.cmd('block-commit', device='drive0',
391 top=self.imgs[2], base=self.imgs[0],
392 filter_node_name='commit-filter', speed=1024*1024)
394 # Stream from node2 into node4
395 result = self.vm.qmp('block-stream', device='node4', base_node='node2', job_id='node4')
396 self.assert_qmp(result, 'error/desc',
397 "Cannot freeze 'backing' link to 'commit-filter'")
399 self.vm.cmd('block-job-set-speed', device='drive0', speed=0)
401 self.wait_until_completed()
402 self.assert_no_active_block_jobs()
404 # In this case the base node of the stream job is the commit job's
405 # filter node. stream does not have a real dependency on its base
406 # node, so even though commit removes it when it is done, there is
408 def test_overlapping_5(self):
409 self.assert_no_active_block_jobs()
411 # Commit from node2 into node0
412 self.vm.cmd('block-commit', device='drive0',
413 top_node='node2', base_node='node0',
414 filter_node_name='commit-filter', speed=1024*1024)
416 # Stream from node2 into node4
417 self.vm.cmd('block-stream', device='node4',
418 base_node='commit-filter', job_id='node4')
420 self.vm.cmd('block-job-set-speed', device='drive0', speed=0)
422 self.vm.run_job(job='drive0', auto_dismiss=True)
423 self.vm.run_job(job='node4', auto_dismiss=True)
424 self.assert_no_active_block_jobs()
426 # Assert that node0 is now the backing node of node4
427 result = self.vm.qmp('query-named-block-nodes')
428 node4 = next(node for node in result['return'] if node['node-name'] == 'node4')
429 self.assertEqual(node4['image']['backing-image']['filename'], self.imgs[0])
431 # Test a block-stream and a block-commit job in parallel
432 # Here the stream job is supposed to finish quickly in order to reproduce
433 # the scenario that triggers the bug fixed in 3d5d319e1221 and 1a63a907507
434 def test_stream_commit_1(self):
435 self.assertLessEqual(8, self.num_imgs)
436 self.assert_no_active_block_jobs()
438 # Stream from node0 into node2
439 self.vm.cmd('block-stream', device='node2', base_node='node0', job_id='node2')
441 # Commit from the active layer into node3
442 self.vm.cmd('block-commit', device='drive0', base=self.imgs[3])
444 # Wait for all jobs to be finished.
445 pending_jobs = ['node2', 'drive0']
446 while len(pending_jobs) > 0:
447 for event in self.vm.get_qmp_events(wait=True):
448 if event['event'] == 'BLOCK_JOB_COMPLETED':
449 node_name = self.dictpath(event, 'data/device')
450 self.assertTrue(node_name in pending_jobs)
451 self.assert_qmp_absent(event, 'data/error')
452 pending_jobs.remove(node_name)
453 if event['event'] == 'BLOCK_JOB_READY':
454 self.assert_qmp(event, 'data/device', 'drive0')
455 self.assert_qmp(event, 'data/type', 'commit')
456 self.assert_qmp_absent(event, 'data/error')
457 self.assertTrue('drive0' in pending_jobs)
458 self.vm.qmp('block-job-complete', device='drive0')
460 self.assert_no_active_block_jobs()
462 # This is similar to test_stream_commit_1 but both jobs are slowed
463 # down so they can run in parallel for a little while.
464 def test_stream_commit_2(self):
465 self.assertLessEqual(8, self.num_imgs)
466 self.assert_no_active_block_jobs()
468 # Stream from node0 into node4
469 self.vm.cmd('block-stream', device='node4', base_node='node0', job_id='node4', speed=1024*1024)
471 # Commit from the active layer into node5
472 self.vm.cmd('block-commit', device='drive0', base=self.imgs[5], speed=1024*1024)
474 for job in ['drive0', 'node4']:
475 self.vm.cmd('block-job-set-speed', device=job, speed=0)
477 # Wait for all jobs to be finished.
478 pending_jobs = ['node4', 'drive0']
479 while len(pending_jobs) > 0:
480 for event in self.vm.get_qmp_events(wait=True):
481 if event['event'] == 'BLOCK_JOB_COMPLETED':
482 node_name = self.dictpath(event, 'data/device')
483 self.assertTrue(node_name in pending_jobs)
484 self.assert_qmp_absent(event, 'data/error')
485 pending_jobs.remove(node_name)
486 if event['event'] == 'BLOCK_JOB_READY':
487 self.assert_qmp(event, 'data/device', 'drive0')
488 self.assert_qmp(event, 'data/type', 'commit')
489 self.assert_qmp_absent(event, 'data/error')
490 self.assertTrue('drive0' in pending_jobs)
491 self.vm.qmp('block-job-complete', device='drive0')
493 self.assert_no_active_block_jobs()
495 # Test the base_node parameter
496 def test_stream_base_node_name(self):
497 self.assert_no_active_block_jobs()
500 qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.imgs[4]).stdout,
501 qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.imgs[3]).stdout,
502 'image file map matches backing file before streaming')
504 # Error: the base node does not exist
505 result = self.vm.qmp('block-stream', device='node4', base_node='none', job_id='stream')
506 self.assert_qmp(result, 'error/desc',
507 'Cannot find device=\'\' nor node-name=\'none\'')
509 # Error: the base node is not a backing file of the top node
510 result = self.vm.qmp('block-stream', device='node4', base_node='node6', job_id='stream')
511 self.assert_qmp(result, 'error/desc',
512 "Node 'node6' is not a backing image of 'node4'")
514 # Error: the base node is the same as the top node
515 result = self.vm.qmp('block-stream', device='node4', base_node='node4', job_id='stream')
516 self.assert_qmp(result, 'error/desc',
517 "Node 'node4' is not a backing image of 'node4'")
519 # Error: cannot specify 'base' and 'base-node' at the same time
520 result = self.vm.qmp('block-stream', device='node4', base=self.imgs[2], base_node='node2', job_id='stream')
521 self.assert_qmp(result, 'error/desc',
522 "'base' and 'base-node' cannot be specified at the same time")
524 # Success: the base node is a backing file of the top node
525 self.vm.cmd('block-stream', device='node4', base_node='node2', job_id='stream')
527 self.wait_until_completed(drive='stream')
529 self.assert_no_active_block_jobs()
533 qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[4]).stdout,
534 qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[3]).stdout,
535 'image file map matches backing file after streaming')
537 class TestQuorum(iotests.QMPTestCase):
542 @iotests.skip_if_unsupported(['quorum'])
544 opts = ['driver=quorum', 'vote-threshold=2']
546 # Initialize file names and command-line options
547 for i in range(self.num_children):
548 child_img = os.path.join(iotests.test_dir, 'img-%d.img' % i)
549 backing_img = os.path.join(iotests.test_dir, 'backing-%d.img' % i)
550 self.children.append(child_img)
551 self.backing.append(backing_img)
552 qemu_img('create', '-f', iotests.imgfmt, backing_img, '1M')
553 qemu_io('-f', iotests.imgfmt,
554 '-c', 'write -P 0x55 0 1024', backing_img)
555 qemu_img('create', '-f', iotests.imgfmt,
556 '-o', 'backing_file=%s' % backing_img,
557 '-F', iotests.imgfmt, child_img)
558 opts.append("children.%d.file.filename=%s" % (i, child_img))
559 opts.append("children.%d.node-name=node%d" % (i, i))
561 # Attach the drive to the VM
562 self.vm = iotests.VM()
563 self.vm.add_drive(path = None, opts = ','.join(opts))
568 for img in self.children:
570 for img in self.backing:
573 def test_stream_quorum(self):
575 qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.children[0]).stdout,
576 qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.backing[0]).stdout,
577 'image file map matches backing file before streaming')
579 self.assert_no_active_block_jobs()
581 self.vm.cmd('block-stream', device='node0', job_id='stream-node0')
583 self.wait_until_completed(drive='stream-node0')
585 self.assert_no_active_block_jobs()
589 qemu_io('-f', iotests.imgfmt, '-c', 'map', self.children[0]).stdout,
590 qemu_io('-f', iotests.imgfmt, '-c', 'map', self.backing[0]).stdout,
591 'image file map does not match backing file after streaming')
593 class TestSmallerBackingFile(iotests.QMPTestCase):
594 backing_len = 1 * 1024 * 1024 # MB
595 image_len = 2 * backing_len
598 iotests.create_image(backing_img, self.backing_len)
599 qemu_img('create', '-f', iotests.imgfmt,
600 '-o', 'backing_file=%s' % backing_img,
601 '-F', 'raw', test_img, str(self.image_len))
602 self.vm = iotests.VM().add_drive(test_img)
605 # If this hangs, then you are missing a fix to complete streaming when the
606 # end of the backing file is reached.
607 def test_stream(self):
608 self.assert_no_active_block_jobs()
610 self.vm.cmd('block-stream', device='drive0')
612 self.wait_until_completed()
614 self.assert_no_active_block_jobs()
617 class TestErrors(iotests.QMPTestCase):
618 image_len = 2 * 1024 * 1024 # MB
620 # this should match STREAM_BUFFER_SIZE/512 in block/stream.c
621 STREAM_BUFFER_SIZE = 512 * 1024
623 def create_blkdebug_file(self, name, event, errno):
624 file = open(name, 'w')
643 ''' % (event, errno, self.STREAM_BUFFER_SIZE // 512, event, event))
646 class TestEIO(TestErrors):
648 self.blkdebug_file = backing_img + ".blkdebug"
649 iotests.create_image(backing_img, TestErrors.image_len)
650 self.create_blkdebug_file(self.blkdebug_file, "read_aio", 5)
651 qemu_img('create', '-f', iotests.imgfmt,
652 '-o', 'backing_file=blkdebug:%s:%s,backing_fmt=raw'
653 % (self.blkdebug_file, backing_img),
655 self.vm = iotests.VM().add_drive(test_img)
661 os.remove(backing_img)
662 os.remove(self.blkdebug_file)
664 def test_report(self):
665 self.assert_no_active_block_jobs()
667 self.vm.cmd('block-stream', device='drive0')
672 for event in self.vm.get_qmp_events(wait=True):
673 if event['event'] == 'BLOCK_JOB_ERROR':
674 self.assert_qmp(event, 'data/device', 'drive0')
675 self.assert_qmp(event, 'data/operation', 'read')
677 elif event['event'] == 'BLOCK_JOB_COMPLETED':
678 self.assertTrue(error, 'job completed unexpectedly')
679 self.assert_qmp(event, 'data/type', 'stream')
680 self.assert_qmp(event, 'data/device', 'drive0')
681 self.assert_qmp(event, 'data/error', 'Input/output error')
682 self.assert_qmp(event, 'data/offset', self.STREAM_BUFFER_SIZE)
683 self.assert_qmp(event, 'data/len', self.image_len)
685 elif event['event'] == 'JOB_STATUS_CHANGE':
686 self.assert_qmp(event, 'data/id', 'drive0')
688 self.assert_no_active_block_jobs()
691 def test_ignore(self):
692 self.assert_no_active_block_jobs()
694 self.vm.cmd('block-stream', device='drive0', on_error='ignore')
699 for event in self.vm.get_qmp_events(wait=True):
700 if event['event'] == 'BLOCK_JOB_ERROR':
702 self.assert_qmp(event, 'data/device', 'drive0')
703 self.assert_qmp(event, 'data/operation', 'read')
704 result = self.vm.qmp('query-block-jobs')
705 if result == {'return': []}:
706 # Job finished too quickly
708 self.assertIn(result['return'][0]['status'],
709 ['running', 'pending', 'aborting', 'concluded'])
710 elif event['event'] == 'BLOCK_JOB_COMPLETED':
711 self.assertTrue(error, 'job completed unexpectedly')
712 self.assert_qmp(event, 'data/type', 'stream')
713 self.assert_qmp(event, 'data/device', 'drive0')
714 self.assert_qmp(event, 'data/error', 'Input/output error')
715 self.assert_qmp(event, 'data/offset', self.image_len)
716 self.assert_qmp(event, 'data/len', self.image_len)
718 elif event['event'] == 'JOB_STATUS_CHANGE':
719 self.assert_qmp(event, 'data/id', 'drive0')
721 self.assert_no_active_block_jobs()
725 self.assert_no_active_block_jobs()
727 self.vm.cmd('block-stream', device='drive0', on_error='stop')
732 for event in self.vm.get_qmp_events(wait=True):
733 if event['event'] == 'BLOCK_JOB_ERROR':
735 self.assert_qmp(event, 'data/device', 'drive0')
736 self.assert_qmp(event, 'data/operation', 'read')
738 if self.vm.qmp('query-block-jobs')['return'][0]['status'] != 'paused':
739 self.vm.events_wait([(
741 {'data': {'id': 'drive0', 'status': 'paused'}}
744 result = self.vm.qmp('query-block-jobs')
745 self.assert_qmp(result, 'return[0]/status', 'paused')
746 self.assert_qmp(result, 'return[0]/offset', self.STREAM_BUFFER_SIZE)
747 self.assert_qmp(result, 'return[0]/io-status', 'failed')
749 self.vm.cmd('block-job-resume', device='drive0')
751 result = self.vm.qmp('query-block-jobs')
752 if result == {'return': []}:
753 # Race; likely already finished. Check.
755 self.assertIn(result['return'][0]['status'],
756 ['running', 'pending', 'aborting', 'concluded'])
757 self.assert_qmp(result, 'return[0]/io-status', 'ok')
758 elif event['event'] == 'BLOCK_JOB_COMPLETED':
759 self.assertTrue(error, 'job completed unexpectedly')
760 self.assert_qmp(event, 'data/type', 'stream')
761 self.assert_qmp(event, 'data/device', 'drive0')
762 self.assert_qmp_absent(event, 'data/error')
763 self.assert_qmp(event, 'data/offset', self.image_len)
764 self.assert_qmp(event, 'data/len', self.image_len)
766 elif event['event'] == 'JOB_STATUS_CHANGE':
767 self.assert_qmp(event, 'data/id', 'drive0')
769 self.assert_no_active_block_jobs()
772 def test_enospc(self):
773 self.assert_no_active_block_jobs()
775 self.vm.cmd('block-stream', device='drive0', on_error='enospc')
780 for event in self.vm.get_qmp_events(wait=True):
781 if event['event'] == 'BLOCK_JOB_ERROR':
782 self.assert_qmp(event, 'data/device', 'drive0')
783 self.assert_qmp(event, 'data/operation', 'read')
785 elif event['event'] == 'BLOCK_JOB_COMPLETED':
786 self.assertTrue(error, 'job completed unexpectedly')
787 self.assert_qmp(event, 'data/type', 'stream')
788 self.assert_qmp(event, 'data/device', 'drive0')
789 self.assert_qmp(event, 'data/error', 'Input/output error')
790 self.assert_qmp(event, 'data/offset', self.STREAM_BUFFER_SIZE)
791 self.assert_qmp(event, 'data/len', self.image_len)
793 elif event['event'] == 'JOB_STATUS_CHANGE':
794 self.assert_qmp(event, 'data/id', 'drive0')
796 self.assert_no_active_block_jobs()
799 class TestENOSPC(TestErrors):
801 self.blkdebug_file = backing_img + ".blkdebug"
802 iotests.create_image(backing_img, TestErrors.image_len)
803 self.create_blkdebug_file(self.blkdebug_file, "read_aio", 28)
804 qemu_img('create', '-f', iotests.imgfmt,
805 '-o', 'backing_file=blkdebug:%s:%s,backing_fmt=raw'
806 % (self.blkdebug_file, backing_img),
808 self.vm = iotests.VM().add_drive(test_img)
814 os.remove(backing_img)
815 os.remove(self.blkdebug_file)
817 def test_enospc(self):
818 self.assert_no_active_block_jobs()
820 self.vm.cmd('block-stream', device='drive0', on_error='enospc')
825 for event in self.vm.get_qmp_events(wait=True):
826 if event['event'] == 'BLOCK_JOB_ERROR':
827 self.assert_qmp(event, 'data/device', 'drive0')
828 self.assert_qmp(event, 'data/operation', 'read')
831 if self.vm.qmp('query-block-jobs')['return'][0]['status'] != 'paused':
832 self.vm.events_wait([(
834 {'data': {'id': 'drive0', 'status': 'paused'}}
837 result = self.vm.qmp('query-block-jobs')
838 self.assert_qmp(result, 'return[0]/status', 'paused')
839 self.assert_qmp(result, 'return[0]/offset', self.STREAM_BUFFER_SIZE)
840 self.assert_qmp(result, 'return[0]/io-status', 'nospace')
842 self.vm.cmd('block-job-resume', device='drive0')
844 result = self.vm.qmp('query-block-jobs')
845 if result == {'return': []}:
846 # Race; likely already finished. Check.
848 self.assertIn(result['return'][0]['status'],
849 ['running', 'pending', 'aborting', 'concluded'])
850 self.assert_qmp(result, 'return[0]/io-status', 'ok')
851 elif event['event'] == 'BLOCK_JOB_COMPLETED':
852 self.assertTrue(error, 'job completed unexpectedly')
853 self.assert_qmp(event, 'data/type', 'stream')
854 self.assert_qmp(event, 'data/device', 'drive0')
855 self.assert_qmp_absent(event, 'data/error')
856 self.assert_qmp(event, 'data/offset', self.image_len)
857 self.assert_qmp(event, 'data/len', self.image_len)
859 elif event['event'] == 'JOB_STATUS_CHANGE':
860 self.assert_qmp(event, 'data/id', 'drive0')
862 self.assert_no_active_block_jobs()
865 class TestStreamStop(iotests.QMPTestCase):
866 image_len = 8 * 1024 * 1024 * 1024 # GB
869 qemu_img('create', backing_img, str(TestStreamStop.image_len))
870 qemu_io('-f', 'raw', '-c', 'write -P 0x1 0 32M', backing_img)
871 qemu_img('create', '-f', iotests.imgfmt,
872 '-o', 'backing_file=%s' % backing_img,
873 '-F', 'raw', test_img)
874 qemu_io('-f', iotests.imgfmt, '-c', 'write -P 0x1 32M 32M', test_img)
875 self.vm = iotests.VM().add_drive("blkdebug::" + test_img)
881 os.remove(backing_img)
883 def test_stream_stop(self):
884 self.assert_no_active_block_jobs()
886 self.vm.pause_drive('drive0')
887 self.vm.cmd('block-stream', device='drive0')
890 events = self.vm.get_qmp_events(wait=False)
892 self.assert_qmp(e, 'event', 'JOB_STATUS_CHANGE')
893 self.assert_qmp(e, 'data/id', 'drive0')
895 self.cancel_and_wait(resume=True)
897 class TestSetSpeed(iotests.QMPTestCase):
898 image_len = 80 * 1024 * 1024 # MB
901 qemu_img('create', backing_img, str(TestSetSpeed.image_len))
902 qemu_io('-f', 'raw', '-c', 'write -P 0x1 0 32M', backing_img)
903 qemu_img('create', '-f', iotests.imgfmt,
904 '-o', 'backing_file=%s' % backing_img,
905 '-F', 'raw', test_img)
906 qemu_io('-f', iotests.imgfmt, '-c', 'write -P 0x1 32M 32M', test_img)
907 self.vm = iotests.VM().add_drive('blkdebug::' + test_img)
913 os.remove(backing_img)
915 # This is a short performance test which is not run by default.
916 # Invoke "IMGFMT=qed ./030 TestSetSpeed.perf_test_throughput"
917 def perf_test_throughput(self):
918 self.assert_no_active_block_jobs()
920 self.vm.cmd('block-stream', device='drive0')
922 self.vm.cmd('block-job-set-speed', device='drive0', speed=8 * 1024 * 1024)
924 self.wait_until_completed()
926 self.assert_no_active_block_jobs()
928 def test_set_speed(self):
929 self.assert_no_active_block_jobs()
931 self.vm.pause_drive('drive0')
932 self.vm.cmd('block-stream', device='drive0')
935 result = self.vm.qmp('query-block-jobs')
936 self.assert_qmp(result, 'return[0]/device', 'drive0')
937 self.assert_qmp(result, 'return[0]/speed', 0)
939 self.vm.cmd('block-job-set-speed', device='drive0', speed=8 * 1024 * 1024)
941 # Ensure the speed we set was accepted
942 result = self.vm.qmp('query-block-jobs')
943 self.assert_qmp(result, 'return[0]/device', 'drive0')
944 self.assert_qmp(result, 'return[0]/speed', 8 * 1024 * 1024)
946 self.cancel_and_wait(resume=True)
947 self.vm.pause_drive('drive0')
949 # Check setting speed in block-stream works
950 self.vm.cmd('block-stream', device='drive0', speed=4 * 1024 * 1024)
952 result = self.vm.qmp('query-block-jobs')
953 self.assert_qmp(result, 'return[0]/device', 'drive0')
954 self.assert_qmp(result, 'return[0]/speed', 4 * 1024 * 1024)
956 self.cancel_and_wait(resume=True)
958 def test_set_speed_invalid(self):
959 self.assert_no_active_block_jobs()
961 result = self.vm.qmp('block-stream', device='drive0', speed=-1)
962 self.assert_qmp(result, 'error/desc', "Parameter 'speed' expects a non-negative value")
964 self.assert_no_active_block_jobs()
966 self.vm.pause_drive('drive0')
967 self.vm.cmd('block-stream', device='drive0')
969 result = self.vm.qmp('block-job-set-speed', device='drive0', speed=-1)
970 self.assert_qmp(result, 'error/desc', "Parameter 'speed' expects a non-negative value")
972 self.cancel_and_wait(resume=True)
974 if __name__ == '__main__':
975 iotests.main(supported_fmts=['qcow2', 'qed'],
976 supported_protocols=['file'])