2 # Copyright (C) 2017-2023 Free Software Foundation, Inc.
3 # This file is part of the GNU C Library.
5 # The GNU C Library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License, or (at your option) any later version.
10 # The GNU C Library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with the GNU C Library; if not, see
17 # <https://www.gnu.org/licenses/>.
18 """Compare results of string functions
20 Given a string benchmark result file, print a table with comparisons with a
21 baseline. The baseline is the first function, which typically is the builtin
25 import matplotlib
as mpl
36 import jsonschema
as validator
38 print('Could not find jsonschema module.')
42 def parse_file(filename
, schema_filename
):
44 with
open(schema_filename
, 'r') as schemafile
:
45 schema
= json
.load(schemafile
)
46 with
open(filename
, 'r') as benchfile
:
47 bench
= json
.load(benchfile
)
48 validator
.validate(bench
, schema
)
51 print(traceback
.format_exc(limit
=1))
52 sys
.exit(os
.EX_NOINPUT
)
54 def draw_graph(f
, v
, ifuncs
, results
):
55 """Plot graphs for functions
57 Plot line graphs for each of the ifuncs
61 v: Benchmark variant for the function.
62 ifuncs: List of ifunc names
63 results: Dictionary of results for each test criterion
65 print('Generating graph for %s, variant \'%s\'' % (f
, v
))
66 xkeys
= results
.keys()
69 fig
= pylab
.figure(frameon
=False)
70 fig
.set_size_inches(32, 18)
71 pylab
.ylabel('Performance improvement from base')
73 pylab
.xticks(X
, xkeys
)
77 while i
< len(ifuncs
):
78 Y
= [results
[k
][i
] for k
in xkeys
]
79 lines
= pylab
.plot(X
, Y
, label
=':'+ifuncs
[i
])
84 pylab
.savefig('%s-%s.png' % (f
, v
), bbox_inches
='tight')
87 def process_results(results
, attrs
, funcs
, base_func
, graph
, no_diff
,
89 """ Process results and print them
92 results: JSON dictionary of results
93 attrs: Attributes that form the test criteria
94 funcs: Functions that are selected
97 for f
in results
['functions'].keys():
99 v
= results
['functions'][f
]['bench-variant']
107 for i
in results
['functions'][f
]['ifuncs']:
118 ifuncs
= results
['functions'][f
]['ifuncs']
125 base_index
= results
['functions'][f
]['ifuncs'].index(base_func
)
127 sys
.stderr
.write('Invalid -b "%s" parameter. Options: %s.\n' %
128 (base_func
, ', '.join(results
['functions'][f
]['ifuncs'])))
129 sys
.exit(os
.EX_DATAERR
)
132 print('Function: %s' % f
)
133 print('Variant: %s' % v
)
134 print("%36s%s" % (' ', '\t'.join(ifuncs
)))
137 mean_row
= [0 for i
in range(len(ifuncs
))]
140 for res
in results
['functions'][f
]['results']:
142 attr_list
= ['%s=%s' % (a
, res
[a
]) for a
in attrs
]
143 except KeyError as ke
:
144 sys
.stderr
.write('Invalid -a %s parameter. Options: %s.\n'
145 % (ke
, ', '.join([a
for a
in res
.keys() if a
!= 'timings'])))
146 sys
.exit(os
.EX_DATAERR
)
148 key
= ', '.join(attr_list
)
149 sys
.stdout
.write('%36s: ' % key
)
150 graph_res
[key
] = res
['timings']
152 for t
in res
['timings']:
155 mean_row
[i
] = mean_row
[i
]+math
.log(t
)
156 sys
.stdout
.write ('%12.2f' % t
)
159 base
= res
['timings'][base_index
]
160 diff
= (base
- t
) * 100 / base
161 sys
.stdout
.write (' (%6.2f%%)' % diff
)
162 sys
.stdout
.write('\t')
167 draw_graph(f
, v
, results
['functions'][f
]['ifuncs'], graph_res
)
171 total
= len(results
['functions'][f
]['results'])
172 sys
.stdout
.write ('Geo-mean (for %s inputs)'%total
)
174 sys
.stdout
.write ('%12.2f' % (math
.exp(m
/total
)))
177 """Program Entry Point
179 Take a string benchmark output file and compare timings.
183 filename
= args
.input
184 schema_filename
= args
.schema
185 base_func
= args
.base
186 attrs
= args
.attributes
.split(',')
188 funcs
= args
.functions
.split(',')
189 if base_func
and not base_func
in funcs
:
190 print('Baseline function (%s) not found.' % base_func
)
191 sys
.exit(os
.EX_DATAERR
)
195 results
= parse_file(args
.input, args
.schema
)
196 process_results(results
, attrs
, funcs
, base_func
, args
.graph
, args
.no_diff
,
197 args
.no_header
, args
.gmean
)
201 if __name__
== '__main__':
202 parser
= argparse
.ArgumentParser()
204 # The required arguments.
205 req
= parser
.add_argument_group(title
='required arguments')
206 req
.add_argument('-a', '--attributes', required
=True,
207 help='Comma separated list of benchmark attributes.')
208 req
.add_argument('-i', '--input', required
=True,
209 help='Input JSON benchmark result file.')
210 req
.add_argument('-s', '--schema', required
=True,
211 help='Schema file to validate the result file.')
213 # Optional arguments.
214 parser
.add_argument('-f', '--functions',
215 help='Comma separated list of functions.')
216 parser
.add_argument('-b', '--base',
217 help='IFUNC variant to set as baseline.')
218 parser
.add_argument('-g', '--graph', action
='store_true',
219 help='Generate a graph from results.')
220 parser
.add_argument('--no-diff', action
='store_true',
221 help='Do not print the difference from baseline.')
222 parser
.add_argument('--no-header', action
='store_true',
223 help='Do not print the header.')
224 parser
.add_argument('--gmean', action
='store_true',
225 help='Print the geometric mean at the end of the output.')
227 args
= parser
.parse_args()