include/ui/console.h: Delete is_surface_bgr()
[qemu/ar7.git] / scripts / simplebench / simplebench.py
blobf61513af9022d79fae4127d956e0a3e9209851b1
1 #!/usr/bin/env python
3 # Simple benchmarking framework
5 # Copyright (c) 2019 Virtuozzo International GmbH.
7 # This program is free software; you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation; either version 2 of the License, or
10 # (at your option) any later version.
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
17 # You should have received a copy of the GNU General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
21 import statistics
24 def bench_one(test_func, test_env, test_case, count=5, initial_run=True):
25 """Benchmark one test-case
27 test_func -- benchmarking function with prototype
28 test_func(env, case), which takes test_env and test_case
29 arguments and on success returns dict with 'seconds' or
30 'iops' (or both) fields, specifying the benchmark result.
31 If both 'iops' and 'seconds' provided, the 'iops' is
32 considered the main, and 'seconds' is just an additional
33 info. On failure test_func should return {'error': str}.
34 Returned dict may contain any other additional fields.
35 test_env -- test environment - opaque first argument for test_func
36 test_case -- test case - opaque second argument for test_func
37 count -- how many times to call test_func, to calculate average
38 initial_run -- do initial run of test_func, which don't get into result
40 Returns dict with the following fields:
41 'runs': list of test_func results
42 'dimension': dimension of results, may be 'seconds' or 'iops'
43 'average': average value (iops or seconds) per run (exists only if at
44 least one run succeeded)
45 'stdev': standard deviation of results
46 (exists only if at least one run succeeded)
47 'n-failed': number of failed runs (exists only if at least one run
48 failed)
49 """
50 if initial_run:
51 print(' #initial run:')
52 print(' ', test_func(test_env, test_case))
54 runs = []
55 for i in range(count):
56 print(' #run {}'.format(i+1))
57 res = test_func(test_env, test_case)
58 print(' ', res)
59 runs.append(res)
61 result = {'runs': runs}
63 succeeded = [r for r in runs if ('seconds' in r or 'iops' in r)]
64 if succeeded:
65 if 'iops' in succeeded[0]:
66 assert all('iops' in r for r in succeeded)
67 dim = 'iops'
68 else:
69 assert all('seconds' in r for r in succeeded)
70 assert all('iops' not in r for r in succeeded)
71 dim = 'seconds'
72 result['dimension'] = dim
73 result['average'] = statistics.mean(r[dim] for r in succeeded)
74 result['stdev'] = statistics.stdev(r[dim] for r in succeeded)
76 if len(succeeded) < count:
77 result['n-failed'] = count - len(succeeded)
79 return result
82 def bench(test_func, test_envs, test_cases, *args, **vargs):
83 """Fill benchmark table
85 test_func -- benchmarking function, see bench_one for description
86 test_envs -- list of test environments, see bench_one
87 test_cases -- list of test cases, see bench_one
88 args, vargs -- additional arguments for bench_one
90 Returns dict with the following fields:
91 'envs': test_envs
92 'cases': test_cases
93 'tab': filled 2D array, where cell [i][j] is bench_one result for
94 test_cases[i] for test_envs[j] (i.e., rows are test cases and
95 columns are test environments)
96 """
97 tab = {}
98 results = {
99 'envs': test_envs,
100 'cases': test_cases,
101 'tab': tab
103 n = 1
104 n_tests = len(test_envs) * len(test_cases)
105 for env in test_envs:
106 for case in test_cases:
107 print('Testing {}/{}: {} :: {}'.format(n, n_tests,
108 env['id'], case['id']))
109 if case['id'] not in tab:
110 tab[case['id']] = {}
111 tab[case['id']][env['id']] = bench_one(test_func, env, case,
112 *args, **vargs)
113 n += 1
115 print('Done')
116 return results