2 # Copyright (C) 2017 Free Software Foundation, Inc.
3 # This file is part of the GNU C Library.
5 # The GNU C Library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License, or (at your option) any later version.
10 # The GNU C Library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with the GNU C Library; if not, see
17 # <http://www.gnu.org/licenses/>.
18 """Compare results of string functions
20 Given a string benchmark result file, print a table with comparisons with a
21 baseline. The baseline is the first function, which typically is the builtin
24 import matplotlib
as mpl
33 import jsonschema
as validator
35 print('Could not find jsonschema module.')
39 def parse_file(filename
, schema_filename
):
40 with
open(schema_filename
, 'r') as schemafile
:
41 schema
= json
.load(schemafile
)
42 with
open(filename
, 'r') as benchfile
:
43 bench
= json
.load(benchfile
)
44 validator
.validate(bench
, schema
)
48 def draw_graph(f
, v
, ifuncs
, results
):
49 """Plot graphs for functions
51 Plot line graphs for each of the ifuncs
55 v: Benchmark variant for the function.
56 ifuncs: List of ifunc names
57 results: Dictionary of results for each test criterion
59 xkeys
= results
.keys()
62 fig
= pylab
.figure(frameon
=False)
63 fig
.set_size_inches(32, 18)
64 pylab
.ylabel('Performance improvement from base')
66 pylab
.xticks(X
, xkeys
)
70 while i
< len(ifuncs
):
71 Y
= [results
[k
][i
] for k
in xkeys
]
72 lines
= pylab
.plot(X
, Y
, label
=':'+ifuncs
[i
])
77 pylab
.savefig('%s-%s.png' % (f
, v
), bbox_inches
='tight')
80 def process_results(results
, attrs
, base_func
):
81 """ Process results and print them
84 results: JSON dictionary of results
85 attrs: Attributes that form the test criteria
88 for f
in results
['functions'].keys():
89 print('Function: %s' % f
)
92 base_index
= results
['functions'][f
]['ifuncs'].index(base_func
)
94 print('\t'.join(results
['functions'][f
]['ifuncs']))
95 v
= results
['functions'][f
]['bench-variant']
96 print('Variant: %s' % v
)
99 for res
in results
['functions'][f
]['results']:
100 attr_list
= ['%s=%s' % (a
, res
[a
]) for a
in attrs
]
102 key
= ','.join(attr_list
)
103 sys
.stdout
.write('%s: \t' % key
)
104 graph_res
[key
] = res
['timings']
105 for t
in res
['timings']:
106 sys
.stdout
.write ('%.2f' % t
)
108 diff
= (res
['timings'][base_index
] - t
) * 100 / res
['timings'][base_index
]
109 sys
.stdout
.write (' (%.2f%%)' % diff
)
110 sys
.stdout
.write('\t')
113 draw_graph(f
, v
, results
['functions'][f
]['ifuncs'], graph_res
)
117 """Program Entry Point
119 Take a string benchmark output file and compare timings.
122 print('Usage: %s <input file> <schema file> [-base=ifunc_name] attr1 [attr2 ...]' % sys
.argv
[0])
123 sys
.exit(os
.EX_USAGE
)
127 schema_filename
= args
[1]
128 if args
[2].find('-base=') == 0:
129 base_func
= args
[2][6:]
134 results
= parse_file(filename
, schema_filename
)
135 process_results(results
, attrs
, base_func
)
138 if __name__
== '__main__':