3 # Tests for block device statistics
5 # Copyright (C) 2015 Igalia, S.L.
6 # Author: Alberto Garcia <berto@igalia.com>
8 # This program is free software; you can redistribute it and/or modify
9 # it under the terms of the GNU General Public License as published by
10 # the Free Software Foundation; either version 2 of the License, or
11 # (at your option) any later version.
13 # This program is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU General Public License for more details.
18 # You should have received a copy of the GNU General Public License
19 # along with this program. If not, see <http://www.gnu.org/licenses/>.
26 nsec_per_sec
= 1000000000
27 op_latency
= nsec_per_sec
/ 1000 # See qtest_latency_ns in accounting.c
29 bad_offset
= bad_sector
* 512
30 blkdebug_file
= os
.path
.join(iotests
.test_dir
, 'blkdebug.conf')
32 class BlockDeviceStatsTestCase(iotests
.QMPTestCase
):
33 test_img
= "null-aio://"
45 account_invalid
= False
46 account_failed
= False
48 def blockstats(self
, device
):
49 result
= self
.vm
.qmp("query-blockstats")
50 for r
in result
['return']:
51 if r
['device'] == device
:
53 raise Exception("Device not found for blockstats: %s" % device
)
55 def create_blkdebug_file(self
):
56 file = open(blkdebug_file
, 'w')
67 ''' % (bad_sector
, bad_sector
))
72 drive_args
.append("stats-intervals.0=%d" % interval_length
)
73 drive_args
.append("stats-account-invalid=%s" %
74 (self
.account_invalid
and "on" or "off"))
75 drive_args
.append("stats-account-failed=%s" %
76 (self
.account_failed
and "on" or "off"))
77 self
.create_blkdebug_file()
78 self
.vm
= iotests
.VM().add_drive('blkdebug:%s:%s ' %
79 (blkdebug_file
, self
.test_img
),
82 # Set an initial value for the clock
83 self
.vm
.qtest("clock_step %d" % nsec_per_sec
)
87 os
.remove(blkdebug_file
)
89 def accounted_ops(self
, read
= False, write
= False, flush
= False):
92 ops
+= self
.total_wr_ops
93 if self
.account_failed
:
94 ops
+= self
.failed_wr_ops
95 if self
.account_invalid
:
96 ops
+= self
.invalid_wr_ops
98 ops
+= self
.total_rd_ops
99 if self
.account_failed
:
100 ops
+= self
.failed_rd_ops
101 if self
.account_invalid
:
102 ops
+= self
.invalid_rd_ops
104 ops
+= self
.total_flush_ops
107 def accounted_latency(self
, read
= False, write
= False, flush
= False):
110 latency
+= self
.total_wr_ops
* op_latency
111 if self
.account_failed
:
112 latency
+= self
.failed_wr_ops
* op_latency
114 latency
+= self
.total_rd_ops
* op_latency
115 if self
.account_failed
:
116 latency
+= self
.failed_rd_ops
* op_latency
118 latency
+= self
.total_flush_ops
* op_latency
121 def check_values(self
):
122 stats
= self
.blockstats('drive0')
124 # Check that the totals match with what we have calculated
125 self
.assertEqual(self
.total_rd_bytes
, stats
['rd_bytes'])
126 self
.assertEqual(self
.total_wr_bytes
, stats
['wr_bytes'])
127 self
.assertEqual(self
.total_rd_ops
, stats
['rd_operations'])
128 self
.assertEqual(self
.total_wr_ops
, stats
['wr_operations'])
129 self
.assertEqual(self
.total_flush_ops
, stats
['flush_operations'])
130 self
.assertEqual(self
.wr_highest_offset
, stats
['wr_highest_offset'])
131 self
.assertEqual(self
.failed_rd_ops
, stats
['failed_rd_operations'])
132 self
.assertEqual(self
.failed_wr_ops
, stats
['failed_wr_operations'])
133 self
.assertEqual(self
.invalid_rd_ops
, stats
['invalid_rd_operations'])
134 self
.assertEqual(self
.invalid_wr_ops
, stats
['invalid_wr_operations'])
135 self
.assertEqual(self
.account_invalid
, stats
['account_invalid'])
136 self
.assertEqual(self
.account_failed
, stats
['account_failed'])
137 self
.assertEqual(self
.total_wr_merged
, stats
['wr_merged'])
139 # Check that there's exactly one interval with the length we defined
140 self
.assertEqual(1, len(stats
['timed_stats']))
141 timed_stats
= stats
['timed_stats'][0]
142 self
.assertEqual(interval_length
, timed_stats
['interval_length'])
144 total_rd_latency
= self
.accounted_latency(read
= True)
145 if (total_rd_latency
!= 0):
146 self
.assertEqual(total_rd_latency
, stats
['rd_total_time_ns'])
147 self
.assertEqual(op_latency
, timed_stats
['min_rd_latency_ns'])
148 self
.assertEqual(op_latency
, timed_stats
['max_rd_latency_ns'])
149 self
.assertEqual(op_latency
, timed_stats
['avg_rd_latency_ns'])
150 self
.assertLess(0, timed_stats
['avg_rd_queue_depth'])
152 self
.assertEqual(0, stats
['rd_total_time_ns'])
153 self
.assertEqual(0, timed_stats
['min_rd_latency_ns'])
154 self
.assertEqual(0, timed_stats
['max_rd_latency_ns'])
155 self
.assertEqual(0, timed_stats
['avg_rd_latency_ns'])
156 self
.assertEqual(0, timed_stats
['avg_rd_queue_depth'])
158 # min read latency <= avg read latency <= max read latency
159 self
.assertLessEqual(timed_stats
['min_rd_latency_ns'],
160 timed_stats
['avg_rd_latency_ns'])
161 self
.assertLessEqual(timed_stats
['avg_rd_latency_ns'],
162 timed_stats
['max_rd_latency_ns'])
164 total_wr_latency
= self
.accounted_latency(write
= True)
165 if (total_wr_latency
!= 0):
166 self
.assertEqual(total_wr_latency
, stats
['wr_total_time_ns'])
167 self
.assertEqual(op_latency
, timed_stats
['min_wr_latency_ns'])
168 self
.assertEqual(op_latency
, timed_stats
['max_wr_latency_ns'])
169 self
.assertEqual(op_latency
, timed_stats
['avg_wr_latency_ns'])
170 self
.assertLess(0, timed_stats
['avg_wr_queue_depth'])
172 self
.assertEqual(0, stats
['wr_total_time_ns'])
173 self
.assertEqual(0, timed_stats
['min_wr_latency_ns'])
174 self
.assertEqual(0, timed_stats
['max_wr_latency_ns'])
175 self
.assertEqual(0, timed_stats
['avg_wr_latency_ns'])
176 self
.assertEqual(0, timed_stats
['avg_wr_queue_depth'])
178 # min write latency <= avg write latency <= max write latency
179 self
.assertLessEqual(timed_stats
['min_wr_latency_ns'],
180 timed_stats
['avg_wr_latency_ns'])
181 self
.assertLessEqual(timed_stats
['avg_wr_latency_ns'],
182 timed_stats
['max_wr_latency_ns'])
184 total_flush_latency
= self
.accounted_latency(flush
= True)
185 if (total_flush_latency
!= 0):
186 self
.assertEqual(total_flush_latency
, stats
['flush_total_time_ns'])
187 self
.assertEqual(op_latency
, timed_stats
['min_flush_latency_ns'])
188 self
.assertEqual(op_latency
, timed_stats
['max_flush_latency_ns'])
189 self
.assertEqual(op_latency
, timed_stats
['avg_flush_latency_ns'])
191 self
.assertEqual(0, stats
['flush_total_time_ns'])
192 self
.assertEqual(0, timed_stats
['min_flush_latency_ns'])
193 self
.assertEqual(0, timed_stats
['max_flush_latency_ns'])
194 self
.assertEqual(0, timed_stats
['avg_flush_latency_ns'])
196 # min flush latency <= avg flush latency <= max flush latency
197 self
.assertLessEqual(timed_stats
['min_flush_latency_ns'],
198 timed_stats
['avg_flush_latency_ns'])
199 self
.assertLessEqual(timed_stats
['avg_flush_latency_ns'],
200 timed_stats
['max_flush_latency_ns'])
202 # idle_time_ns must be > 0 if we have performed any operation
203 if (self
.accounted_ops(read
= True, write
= True, flush
= True) != 0):
204 self
.assertLess(0, stats
['idle_time_ns'])
206 self
.assertFalse(stats
.has_key('idle_time_ns'))
208 # This test does not alter these, so they must be all 0
209 self
.assertEqual(0, stats
['rd_merged'])
210 self
.assertEqual(0, stats
['failed_flush_operations'])
211 self
.assertEqual(0, stats
['invalid_flush_operations'])
213 def do_test_stats(self
, rd_size
= 0, rd_ops
= 0, wr_size
= 0, wr_ops
= 0,
214 flush_ops
= 0, invalid_rd_ops
= 0, invalid_wr_ops
= 0,
215 failed_rd_ops
= 0, failed_wr_ops
= 0, wr_merged
= 0):
216 # The 'ops' list will contain all the requested I/O operations
218 for i
in range(rd_ops
):
219 ops
.append("aio_read %d %d" % (i
* rd_size
, rd_size
))
221 for i
in range(wr_ops
):
222 ops
.append("aio_write %d %d" % (i
* wr_size
, wr_size
))
224 for i
in range(flush_ops
):
225 ops
.append("aio_flush")
227 highest_offset
= wr_ops
* wr_size
229 # Two types of invalid operations: unaligned length and unaligned offset
230 for i
in range(invalid_rd_ops
/ 2):
231 ops
.append("aio_read 0 511")
233 for i
in range(invalid_rd_ops
/ 2, invalid_rd_ops
):
234 ops
.append("aio_read 13 512")
236 for i
in range(invalid_wr_ops
/ 2):
237 ops
.append("aio_write 0 511")
239 for i
in range(invalid_wr_ops
/ 2, invalid_wr_ops
):
240 ops
.append("aio_write 13 512")
242 for i
in range(failed_rd_ops
):
243 ops
.append("aio_read %d 512" % bad_offset
)
245 for i
in range(failed_wr_ops
):
246 ops
.append("aio_write %d 512" % bad_offset
)
248 if failed_wr_ops
> 0:
249 highest_offset
= max(highest_offset
, bad_offset
+ 512)
251 for i
in range(wr_merged
):
252 first
= i
* wr_size
* 2
253 second
= first
+ wr_size
254 ops
.append("multiwrite %d %d ; %d %d" %
255 (first
, wr_size
, second
, wr_size
))
257 highest_offset
= max(highest_offset
, wr_merged
* wr_size
* 2)
259 # Now perform all operations
261 self
.vm
.hmp_qemu_io("drive0", op
)
263 # Update the expected totals
264 self
.total_rd_bytes
+= rd_ops
* rd_size
265 self
.total_rd_ops
+= rd_ops
266 self
.total_wr_bytes
+= wr_ops
* wr_size
267 self
.total_wr_ops
+= wr_ops
268 self
.total_wr_merged
+= wr_merged
269 self
.total_flush_ops
+= flush_ops
270 self
.invalid_rd_ops
+= invalid_rd_ops
271 self
.invalid_wr_ops
+= invalid_wr_ops
272 self
.failed_rd_ops
+= failed_rd_ops
273 self
.failed_wr_ops
+= failed_wr_ops
275 self
.wr_highest_offset
= max(self
.wr_highest_offset
, highest_offset
)
277 # Advance the clock so idle_time_ns has a meaningful value
278 self
.vm
.qtest("clock_step %d" % nsec_per_sec
)
280 # And check that the actual statistics match the expected ones
283 def test_read_only(self
):
284 test_values
= [[512, 1],
288 for i
in test_values
:
289 self
.do_test_stats(rd_size
= i
[0], rd_ops
= i
[1])
291 def test_write_only(self
):
292 test_values
= [[512, 1],
296 for i
in test_values
:
297 self
.do_test_stats(wr_size
= i
[0], wr_ops
= i
[1])
299 def test_invalid(self
):
300 self
.do_test_stats(invalid_rd_ops
= 7)
301 self
.do_test_stats(invalid_wr_ops
= 3)
302 self
.do_test_stats(invalid_rd_ops
= 4, invalid_wr_ops
= 5)
304 def test_failed(self
):
305 self
.do_test_stats(failed_rd_ops
= 8)
306 self
.do_test_stats(failed_wr_ops
= 6)
307 self
.do_test_stats(failed_rd_ops
= 5, failed_wr_ops
= 12)
309 def test_flush(self
):
310 self
.do_test_stats(flush_ops
= 8)
312 def test_merged(self
):
314 self
.do_test_stats(wr_merged
= i
* 3)
317 # rd_size, rd_ops, wr_size, wr_ops, flush_ops
318 # invalid_rd_ops, invalid_wr_ops,
319 # failed_rd_ops, failed_wr_ops
321 test_values
= [[512, 1, 512, 1, 1, 4, 7, 5, 2, 1],
322 [65536, 1, 2048, 12, 7, 7, 5, 2, 5, 5],
323 [32768, 9, 8192, 1, 4, 3, 2, 4, 6, 4],
324 [16384, 11, 3584, 16, 9, 8, 6, 7, 3, 4]]
325 for i
in test_values
:
326 self
.do_test_stats(*i
)
328 def test_no_op(self
):
329 # All values must be sane before doing any I/O
333 class BlockDeviceStatsTestAccountInvalid(BlockDeviceStatsTestCase
):
334 account_invalid
= True
335 account_failed
= False
337 class BlockDeviceStatsTestAccountFailed(BlockDeviceStatsTestCase
):
338 account_invalid
= False
339 account_failed
= True
341 class BlockDeviceStatsTestAccountBoth(BlockDeviceStatsTestCase
):
342 account_invalid
= True
343 account_failed
= True
345 class BlockDeviceStatsTestCoroutine(BlockDeviceStatsTestCase
):
346 test_img
= "null-co://"
348 if __name__
== '__main__':
349 iotests
.main(supported_fmts
=["raw"])