i386: Regenerate libm-test-ulps for for gcc 7
[glibc.git] / benchtests / scripts / compare_strings.py
blob65119edd64fadca702894b3558827583cee304e8
1 #!/usr/bin/python
2 # Copyright (C) 2017 Free Software Foundation, Inc.
3 # This file is part of the GNU C Library.
5 # The GNU C Library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License, or (at your option) any later version.
10 # The GNU C Library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with the GNU C Library; if not, see
17 # <http://www.gnu.org/licenses/>.
18 """Compare results of string functions
20 Given a string benchmark result file, print a table with comparisons with a
21 baseline. The baseline is the first function, which typically is the builtin
22 function.
23 """
24 import matplotlib as mpl
25 mpl.use('Agg')
27 import sys
28 import os
29 import json
30 import pylab
31 import argparse
33 try:
34 import jsonschema as validator
35 except ImportError:
36 print('Could not find jsonschema module.')
37 raise
40 def parse_file(filename, schema_filename):
41 with open(schema_filename, 'r') as schemafile:
42 schema = json.load(schemafile)
43 with open(filename, 'r') as benchfile:
44 bench = json.load(benchfile)
45 validator.validate(bench, schema)
46 return bench
49 def draw_graph(f, v, ifuncs, results):
50 """Plot graphs for functions
52 Plot line graphs for each of the ifuncs
54 Args:
55 f: Function name
56 v: Benchmark variant for the function.
57 ifuncs: List of ifunc names
58 results: Dictionary of results for each test criterion
59 """
60 print('Generating graph for %s, variant \'%s\'' % (f, v))
61 xkeys = results.keys()
63 pylab.clf()
64 fig = pylab.figure(frameon=False)
65 fig.set_size_inches(32, 18)
66 pylab.ylabel('Performance improvement from base')
67 X = range(len(xkeys))
68 pylab.xticks(X, xkeys)
70 i = 0
72 while i < len(ifuncs):
73 Y = [results[k][i] for k in xkeys]
74 lines = pylab.plot(X, Y, label=':'+ifuncs[i])
75 i = i + 1
77 pylab.legend()
78 pylab.grid()
79 pylab.savefig('%s-%s.png' % (f, v), bbox_inches='tight')
82 def process_results(results, attrs, base_func, graph):
83 """ Process results and print them
85 Args:
86 results: JSON dictionary of results
87 attrs: Attributes that form the test criteria
88 """
90 for f in results['functions'].keys():
91 print('Function: %s' % f)
92 v = results['functions'][f]['bench-variant']
93 print('Variant: %s' % v)
95 base_index = 0
96 if base_func:
97 base_index = results['functions'][f]['ifuncs'].index(base_func)
99 print("%36s%s" % (' ', '\t'.join(results['functions'][f]['ifuncs'])))
100 print("=" * 120)
101 graph_res = {}
102 for res in results['functions'][f]['results']:
103 attr_list = ['%s=%s' % (a, res[a]) for a in attrs]
104 i = 0
105 key = ', '.join(attr_list)
106 sys.stdout.write('%36s: ' % key)
107 graph_res[key] = res['timings']
108 for t in res['timings']:
109 sys.stdout.write ('%12.2f' % t)
110 if i != base_index:
111 base = res['timings'][base_index]
112 diff = (base - t) * 100 / base
113 sys.stdout.write (' (%6.2f%%)' % diff)
114 sys.stdout.write('\t')
115 i = i + 1
116 print('')
118 if graph:
119 draw_graph(f, v, results['functions'][f]['ifuncs'], graph_res)
122 def main(args):
123 """Program Entry Point
125 Take a string benchmark output file and compare timings.
128 base_func = None
129 filename = args.input
130 schema_filename = args.schema
131 base_func = args.base
132 attrs = args.attributes.split(',')
134 results = parse_file(args.input, args.schema)
135 process_results(results, attrs, base_func, args.graph)
138 if __name__ == '__main__':
139 parser = argparse.ArgumentParser()
141 # The required arguments.
142 req = parser.add_argument_group(title='required arguments')
143 req.add_argument('-a', '--attributes', required=True,
144 help='Comma separated list of benchmark attributes.')
145 req.add_argument('-i', '--input', required=True,
146 help='Input JSON benchmark result file.')
147 req.add_argument('-s', '--schema', required=True,
148 help='Schema file to validate the result file.')
150 # Optional arguments.
151 parser.add_argument('-b', '--base',
152 help='IFUNC variant to set as baseline.')
153 parser.add_argument('-g', '--graph', action='store_true',
154 help='Generate a graph from results.')
156 args = parser.parse_args()
157 main(args)