MAINTAINERS: Cover docs/igd-assign.txt in VFIO section
[qemu/ar7.git] / tests / qemu-iotests / 093
blob7745cb04b611f619f9a4e83639dc978c0d331301
1 #!/usr/bin/env python3
2 # group: throttle
4 # Tests for IO throttling
6 # Copyright (C) 2015 Red Hat, Inc.
7 # Copyright (C) 2015-2016 Igalia, S.L.
9 # This program is free software; you can redistribute it and/or modify
10 # it under the terms of the GNU General Public License as published by
11 # the Free Software Foundation; either version 2 of the License, or
12 # (at your option) any later version.
14 # This program is distributed in the hope that it will be useful,
15 # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17 # GNU General Public License for more details.
19 # You should have received a copy of the GNU General Public License
20 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
23 import iotests
25 nsec_per_sec = 1000000000
27 class ThrottleTestCase(iotests.QMPTestCase):
28     test_driver = "null-aio"
29     max_drives = 3
31     def blockstats(self, device):
32         result = self.vm.qmp("query-blockstats")
33         for r in result['return']:
34             if r['device'] == device:
35                 stat = r['stats']
36                 return stat['rd_bytes'], stat['rd_operations'], stat['wr_bytes'], stat['wr_operations']
37         raise Exception("Device not found for blockstats: %s" % device)
39     def required_drivers(self):
40         return [self.test_driver]
42     @iotests.skip_if_unsupported(required_drivers)
43     def setUp(self):
44         self.vm = iotests.VM()
45         for i in range(0, self.max_drives):
46             self.vm.add_drive(self.test_driver + "://", "file.read-zeroes=on")
47         self.vm.launch()
49     def tearDown(self):
50         self.vm.shutdown()
52     def configure_throttle(self, ndrives, params):
53         params['group'] = 'test'
55         # Set the I/O throttling parameters to all drives
56         for i in range(0, ndrives):
57             params['device'] = 'drive%d' % i
58             result = self.vm.qmp("block_set_io_throttle", conv_keys=False, **params)
59             self.assert_qmp(result, 'return', {})
61     def do_test_throttle(self, ndrives, seconds, params, first_drive = 0):
62         def check_limit(limit, num):
63             # IO throttling algorithm is discrete, allow 10% error so the test
64             # is more robust
65             return limit == 0 or \
66                    (num < seconds * limit * 1.1 / ndrives
67                    and num > seconds * limit * 0.9 / ndrives)
69         # Set vm clock to a known value
70         ns = seconds * nsec_per_sec
71         self.vm.qtest("clock_step %d" % ns)
73         # Submit enough requests so the throttling mechanism kicks
74         # in. The throttled requests won't be executed until we
75         # advance the virtual clock.
76         rq_size = 512
77         rd_nr = max(params['bps'] // rq_size // 2,
78                     params['bps_rd'] // rq_size,
79                     params['iops'] // 2,
80                     params['iops_rd'])
81         rd_nr *= seconds * 2
82         rd_nr //= ndrives
83         wr_nr = max(params['bps'] // rq_size // 2,
84                     params['bps_wr'] // rq_size,
85                     params['iops'] // 2,
86                     params['iops_wr'])
87         wr_nr *= seconds * 2
88         wr_nr //= ndrives
90         # Send I/O requests to all drives
91         for i in range(rd_nr):
92             for drive in range(0, ndrives):
93                 idx = first_drive + drive
94                 self.vm.hmp_qemu_io("drive%d" % idx, "aio_read %d %d" %
95                                     (i * rq_size, rq_size))
97         for i in range(wr_nr):
98             for drive in range(0, ndrives):
99                 idx = first_drive + drive
100                 self.vm.hmp_qemu_io("drive%d" % idx, "aio_write %d %d" %
101                                     (i * rq_size, rq_size))
103         # We'll store the I/O stats for each drive in these arrays
104         start_rd_bytes = [0] * ndrives
105         start_rd_iops  = [0] * ndrives
106         start_wr_bytes = [0] * ndrives
107         start_wr_iops  = [0] * ndrives
108         end_rd_bytes   = [0] * ndrives
109         end_rd_iops    = [0] * ndrives
110         end_wr_bytes   = [0] * ndrives
111         end_wr_iops    = [0] * ndrives
113         # Read the stats before advancing the clock
114         for i in range(0, ndrives):
115             idx = first_drive + i
116             start_rd_bytes[i], start_rd_iops[i], start_wr_bytes[i], \
117                 start_wr_iops[i] = self.blockstats('drive%d' % idx)
119         self.vm.qtest("clock_step %d" % ns)
121         # Read the stats after advancing the clock
122         for i in range(0, ndrives):
123             idx = first_drive + i
124             end_rd_bytes[i], end_rd_iops[i], end_wr_bytes[i], \
125                 end_wr_iops[i] = self.blockstats('drive%d' % idx)
127         # Check that the I/O is within the limits and evenly distributed
128         for i in range(0, ndrives):
129             rd_bytes = end_rd_bytes[i] - start_rd_bytes[i]
130             rd_iops = end_rd_iops[i] - start_rd_iops[i]
131             wr_bytes = end_wr_bytes[i] - start_wr_bytes[i]
132             wr_iops = end_wr_iops[i] - start_wr_iops[i]
134             self.assertTrue(check_limit(params['bps'], rd_bytes + wr_bytes))
135             self.assertTrue(check_limit(params['bps_rd'], rd_bytes))
136             self.assertTrue(check_limit(params['bps_wr'], wr_bytes))
137             self.assertTrue(check_limit(params['iops'], rd_iops + wr_iops))
138             self.assertTrue(check_limit(params['iops_rd'], rd_iops))
139             self.assertTrue(check_limit(params['iops_wr'], wr_iops))
141         # Allow remaining requests to finish.  We submitted twice as many to
142         # ensure the throttle limit is reached.
143         self.vm.qtest("clock_step %d" % ns)
145     # Connect N drives to a VM and test I/O in all of them
146     def test_all(self):
147         params = {"bps": 4096,
148                   "bps_rd": 4096,
149                   "bps_wr": 4096,
150                   "iops": 10,
151                   "iops_rd": 10,
152                   "iops_wr": 10,
153                  }
154         # Repeat the test with different numbers of drives
155         for ndrives in range(1, self.max_drives + 1):
156             # Pick each out of all possible params and test
157             for tk in params:
158                 limits = dict([(k, 0) for k in params])
159                 limits[tk] = params[tk] * ndrives
160                 self.configure_throttle(ndrives, limits)
161                 self.do_test_throttle(ndrives, 5, limits)
163     # Connect N drives to a VM and test I/O in just one of them a time
164     def test_one(self):
165         params = {"bps": 4096,
166                   "bps_rd": 4096,
167                   "bps_wr": 4096,
168                   "iops": 10,
169                   "iops_rd": 10,
170                   "iops_wr": 10,
171                  }
172         # Repeat the test for each one of the drives
173         for drive in range(0, self.max_drives):
174             # Pick each out of all possible params and test
175             for tk in params:
176                 limits = dict([(k, 0) for k in params])
177                 limits[tk] = params[tk] * self.max_drives
178                 self.configure_throttle(self.max_drives, limits)
179                 self.do_test_throttle(1, 5, limits, drive)
181     def test_burst(self):
182         params = {"bps": 4096,
183                   "bps_rd": 4096,
184                   "bps_wr": 4096,
185                   "iops": 10,
186                   "iops_rd": 10,
187                   "iops_wr": 10,
188                  }
189         ndrives = 1
190         # Pick each out of all possible params and test
191         for tk in params:
192             rate = params[tk] * ndrives
193             burst_rate = rate * 7
194             burst_length = 4
196             # Configure the throttling settings
197             settings = dict([(k, 0) for k in params])
198             settings[tk] = rate
199             settings['%s_max' % tk] = burst_rate
200             settings['%s_max_length' % tk] = burst_length
201             self.configure_throttle(ndrives, settings)
203             # Wait for the bucket to empty so we can do bursts
204             wait_ns = nsec_per_sec * burst_length * burst_rate // rate
205             self.vm.qtest("clock_step %d" % wait_ns)
207             # Test I/O at the max burst rate
208             limits = dict([(k, 0) for k in params])
209             limits[tk] = burst_rate
210             self.do_test_throttle(ndrives, burst_length, limits)
212             # Now test I/O at the normal rate
213             limits[tk] = rate
214             self.do_test_throttle(ndrives, 5, limits)
216     # Test that removing a drive from a throttle group should not
217     # affect the remaining members of the group.
218     # https://bugzilla.redhat.com/show_bug.cgi?id=1535914
219     def test_remove_group_member(self):
220         # Create a throttle group with two drives
221         # and set a 4 KB/s read limit.
222         params = {"bps": 0,
223                   "bps_rd": 4096,
224                   "bps_wr": 0,
225                   "iops": 0,
226                   "iops_rd": 0,
227                   "iops_wr": 0 }
228         self.configure_throttle(2, params)
230         # Read 4KB from drive0. This is performed immediately.
231         self.vm.hmp_qemu_io("drive0", "aio_read 0 4096")
233         # Read 2KB. The I/O limit has been exceeded so this
234         # request is throttled and a timer is set to wake it up.
235         self.vm.hmp_qemu_io("drive0", "aio_read 0 2048")
237         # Read 2KB again. We're still over the I/O limit so this is
238         # request is also throttled, but no new timer is set since
239         # there's already one.
240         self.vm.hmp_qemu_io("drive0", "aio_read 0 2048")
242         # Read from drive1. This request is also throttled, and no
243         # timer is set in drive1 because there's already one in
244         # drive0.
245         self.vm.hmp_qemu_io("drive1", "aio_read 0 4096")
247         # At this point only the first 4KB have been read from drive0.
248         # The other requests are throttled.
249         self.assertEqual(self.blockstats('drive0')[0], 4096)
250         self.assertEqual(self.blockstats('drive1')[0], 0)
252         # Remove drive0 from the throttle group and disable its I/O limits.
253         # drive1 remains in the group with a throttled request.
254         params['bps_rd'] = 0
255         params['device'] = 'drive0'
256         result = self.vm.qmp("block_set_io_throttle", conv_keys=False, **params)
257         self.assert_qmp(result, 'return', {})
259         # Removing the I/O limits from drive0 drains its two pending requests.
260         # The read request in drive1 is still throttled.
261         self.assertEqual(self.blockstats('drive0')[0], 8192)
262         self.assertEqual(self.blockstats('drive1')[0], 0)
264         # Advance the clock 5 seconds. This completes the request in drive1
265         self.vm.qtest("clock_step %d" % (5 * nsec_per_sec))
267         # Now all requests have been processed.
268         self.assertEqual(self.blockstats('drive0')[0], 8192)
269         self.assertEqual(self.blockstats('drive1')[0], 4096)
271 class ThrottleTestCoroutine(ThrottleTestCase):
272     test_driver = "null-co"
274 class ThrottleTestGroupNames(iotests.QMPTestCase):
275     max_drives = 3
277     def setUp(self):
278         self.vm = iotests.VM()
279         for i in range(0, self.max_drives):
280             self.vm.add_drive("null-co://",
281                               "throttling.iops-total=100,file.read-zeroes=on")
282         self.vm.launch()
284     def tearDown(self):
285         self.vm.shutdown()
287     def set_io_throttle(self, device, params):
288         params["device"] = device
289         result = self.vm.qmp("block_set_io_throttle", conv_keys=False, **params)
290         self.assert_qmp(result, 'return', {})
292     def verify_name(self, device, name):
293         result = self.vm.qmp("query-block")
294         for r in result["return"]:
295             if r["device"] == device:
296                 info = r["inserted"]
297                 if name:
298                     self.assertEqual(info["group"], name)
299                 else:
300                     self.assertFalse('group' in info)
301                 return
303         raise Exception("No group information found for '%s'" % device)
305     def test_group_naming(self):
306         params = {"bps": 0,
307                   "bps_rd": 0,
308                   "bps_wr": 0,
309                   "iops": 0,
310                   "iops_rd": 0,
311                   "iops_wr": 0}
313         # Check the drives added using the command line.
314         # The default throttling group name is the device name.
315         for i in range(self.max_drives):
316             devname = "drive%d" % i
317             self.verify_name(devname, devname)
319         # Clear throttling settings => the group name is gone.
320         for i in range(self.max_drives):
321             devname = "drive%d" % i
322             self.set_io_throttle(devname, params)
323             self.verify_name(devname, None)
325         # Set throttling settings using block_set_io_throttle and
326         # check the default group names.
327         params["iops"] = 10
328         for i in range(self.max_drives):
329             devname = "drive%d" % i
330             self.set_io_throttle(devname, params)
331             self.verify_name(devname, devname)
333         # Set a custom group name for each device
334         for i in range(3):
335             devname = "drive%d" % i
336             groupname = "group%d" % i
337             params['group'] = groupname
338             self.set_io_throttle(devname, params)
339             self.verify_name(devname, groupname)
341         # Put drive0 in group1 and check that all other devices remain
342         # unchanged
343         params['group'] = 'group1'
344         self.set_io_throttle('drive0', params)
345         self.verify_name('drive0', 'group1')
346         for i in range(1, self.max_drives):
347             devname = "drive%d" % i
348             groupname = "group%d" % i
349             self.verify_name(devname, groupname)
351         # Put drive0 in group2 and check that all other devices remain
352         # unchanged
353         params['group'] = 'group2'
354         self.set_io_throttle('drive0', params)
355         self.verify_name('drive0', 'group2')
356         for i in range(1, self.max_drives):
357             devname = "drive%d" % i
358             groupname = "group%d" % i
359             self.verify_name(devname, groupname)
361         # Clear throttling settings from drive0 check that all other
362         # devices remain unchanged
363         params["iops"] = 0
364         self.set_io_throttle('drive0', params)
365         self.verify_name('drive0', None)
366         for i in range(1, self.max_drives):
367             devname = "drive%d" % i
368             groupname = "group%d" % i
369             self.verify_name(devname, groupname)
371 class ThrottleTestRemovableMedia(iotests.QMPTestCase):
372     def setUp(self):
373         self.vm = iotests.VM()
374         self.vm.add_device("{},id=virtio-scsi".format(
375             iotests.get_virtio_scsi_device()))
376         self.vm.launch()
378     def tearDown(self):
379         self.vm.shutdown()
381     def test_removable_media(self):
382         # Add a couple of dummy nodes named cd0 and cd1
383         result = self.vm.qmp("blockdev-add", driver="null-co",
384                              read_zeroes=True, node_name="cd0")
385         self.assert_qmp(result, 'return', {})
386         result = self.vm.qmp("blockdev-add", driver="null-co",
387                              read_zeroes=True, node_name="cd1")
388         self.assert_qmp(result, 'return', {})
390         # Attach a CD drive with cd0 inserted
391         result = self.vm.qmp("device_add", driver="scsi-cd",
392                              id="dev0", drive="cd0")
393         self.assert_qmp(result, 'return', {})
395         # Set I/O limits
396         args = { "id": "dev0", "iops": 100, "iops_rd": 0, "iops_wr": 0,
397                                 "bps":  50,  "bps_rd": 0,  "bps_wr": 0 }
398         result = self.vm.qmp("block_set_io_throttle", conv_keys=False, **args)
399         self.assert_qmp(result, 'return', {})
401         # Check that the I/O limits have been set
402         result = self.vm.qmp("query-block")
403         self.assert_qmp(result, 'return[0]/inserted/iops', 100)
404         self.assert_qmp(result, 'return[0]/inserted/bps',   50)
406         # Now eject cd0 and insert cd1
407         result = self.vm.qmp("blockdev-open-tray", id='dev0')
408         self.assert_qmp(result, 'return', {})
409         result = self.vm.qmp("blockdev-remove-medium", id='dev0')
410         self.assert_qmp(result, 'return', {})
411         result = self.vm.qmp("blockdev-insert-medium", id='dev0', node_name='cd1')
412         self.assert_qmp(result, 'return', {})
414         # Check that the I/O limits are still the same
415         result = self.vm.qmp("query-block")
416         self.assert_qmp(result, 'return[0]/inserted/iops', 100)
417         self.assert_qmp(result, 'return[0]/inserted/bps',   50)
419         # Eject cd1
420         result = self.vm.qmp("blockdev-remove-medium", id='dev0')
421         self.assert_qmp(result, 'return', {})
423         # Check that we can't set limits if the device has no medium
424         result = self.vm.qmp("block_set_io_throttle", conv_keys=False, **args)
425         self.assert_qmp(result, 'error/class', 'GenericError')
427         # Remove the CD drive
428         result = self.vm.qmp("device_del", id='dev0')
429         self.assert_qmp(result, 'return', {})
432 if __name__ == '__main__':
433     if 'null-co' not in iotests.supported_formats():
434         iotests.notrun('null-co driver support missing')
435     iotests.main(supported_fmts=["raw"])