From 05d9053ff446f888b3b497937e92e2337ec5c9c6 Mon Sep 17 00:00:00 2001 From: Kirill Smelkov Date: Mon, 1 Sep 2008 23:36:30 +0400 Subject: [PATCH] py.bench -- py.test based benchmarking The idea is to reuse py.test infrastructure to o collect tests o run collected tests o present test runs results and substitute 'test' -> 'benchmark'. What we do is 1. collect benchmark files that start with 'bench_' prefix 2. execute two kind of benchmarks a. 'bench_', and b. 'timeit_', for bennch_XXX we simple mesure time to execute, and for timeit_XXX in essence we do IPython's %timeit on it. For actually running benchmarks small py.test like utility is provided and installed for the benefit of all. Signed-off-by: Kirill Smelkov Signed-off-by: Ondrej Certik --- bin/py.bench | 18 ++++ setup.py | 2 +- sympy/utilities/benchmarking.py | 233 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 252 insertions(+), 1 deletion(-) create mode 100755 bin/py.bench create mode 100644 sympy/utilities/benchmarking.py diff --git a/bin/py.bench b/bin/py.bench new file mode 100755 index 0000000..65092db --- /dev/null +++ b/bin/py.bench @@ -0,0 +1,18 @@ +#!/usr/bin/env python + +# hook in-tree SymPy into Python path, if possible +# TODO this should be shared with isympy +import os, sys + +isympy_dir = os.path.dirname(__file__) # bin/isympy +sympy_top = os.path.split(isympy_dir)[0] # ../ +sympy_dir = os.path.join(sympy_top, 'sympy') # ../sympy/ + +if os.path.isdir(sympy_dir): + sys.path.insert(0, sympy_top) + + +from sympy.utilities import benchmarking + +if __name__ == '__main__': + benchmarking.main() diff --git a/setup.py b/setup.py index 11500f2..5471538 100755 --- a/setup.py +++ b/setup.py @@ -310,7 +310,7 @@ setup( url = 'http://code.google.com/p/sympy', packages = ['sympy'] + [ m[1] for m in modules ] + tests + \ pyglet_packages, - scripts = ['bin/isympy'], + scripts = ['bin/isympy', 'bin/py.bench'], ext_modules = [], package_data = { 'sympy.utilities.mathml' : ['data/*.xsl'] }, data_files = [('share/man/man1', ['doc/man/isympy.1'])], diff --git a/sympy/utilities/benchmarking.py b/sympy/utilities/benchmarking.py new file mode 100644 index 0000000..eb906f7 --- /dev/null +++ b/sympy/utilities/benchmarking.py @@ -0,0 +1,233 @@ +"""benchmarking through py.test""" + +import py +from py.__.test.item import Item +from py.__.test.terminal.terminal import TerminalSession + +from math import ceil, floor, log10 +from time import time +import timeit + +from inspect import getsource + + +# from IPython.Magic.magic_timeit +#units = ["s", "ms", "\xc2\xb5s", "ns"] +units = ["s", "ms", "us", "ns"] +scaling = [1, 1e3, 1e6, 1e9] + +unitn = dict((s,i) for i,s in enumerate(units)) + +precision = 3 + + +# like py.test Directory but scan for 'bench_.py' +class Directory(py.test.collect.Directory): + + def filefilter(self, path): + b = path.purebasename + ext = path.ext + return b.startswith('bench_') and ext == '.py' + + +# like py.test Module but scane for 'bench_' and 'timeit_' +class Module(py.test.collect.Module): + + def funcnamefilter(self, name): + return name.startswith('bench_') or name.startswith('timeit_') + + +# Function level benchmarking driver +class Timer(timeit.Timer): + + def __init__(self, stmt, setup='pass', timer=timeit.default_timer, globals=globals()): + # copy of timeit.Timer.__init__ + # similarity index 95% + self.timer = timer + stmt = timeit.reindent(stmt, 8) + setup = timeit.reindent(setup, 4) + src = timeit.template % {'stmt': stmt, 'setup': setup} + self.src = src # Save for traceback display + code = compile(src, timeit.dummy_src_name, "exec") + ns = {} + #exec code in globals(), ns -- original timeit code + exec code in globals, ns # -- we use caller-provided globals instead + self.inner = ns["inner"] + + + +class Function(py.__.test.item.Function): + + def __init__(self, *args, **kw): + super(Function, self).__init__(*args, **kw) + self.benchtime = None + self.benchtitle = None + + + def execute(self, target, *args): + # get func source without first 'def func(...):' line + src = getsource(target) + src = '\n'.join( src.splitlines()[1:] ) + + # extract benchmark title + if target.func_doc is not None: + self.benchtitle = target.func_doc + else: + self.benchtitle = src.splitlines()[0].strip() + + + # XXX we ignore args + timer = Timer(src, globals=target.func_globals) + + if self.name.startswith('timeit_'): + # from IPython.Magic.magic_timeit + repeat = 3 + number = 1 + for i in range(1,10): + t = timer.timeit(number) + + if t >= 0.2: + number *= (0.2 / t) + number = int(ceil(number)) + break + + if t <= 0.02: + # we are not close enough to that 0.2s + number *= 10 + + else: + # since we are very close to be > 0.2s we'd better adjust number + # so that timing time is not too high + number *= (0.2 / t) + number = int(ceil(number)) + break + + + self.benchtime = min(timer.repeat(repeat, number)) / number + + # 'bench_' + else: + self.benchtime = timer.timeit(1) + + +class BenchSession(TerminalSession): + + def header(self, colitems): + #self.out.sep("-", "benchmarking starts") + super(BenchSession, self).header(colitems) + + def footer(self, colitems): + super(BenchSession, self).footer(colitems) + #self.out.sep("-", "benchmarking ends") + + self.out.write('\n') + self.print_bench_results() + + + def print_bench_results(self): + self.out.write('==============================\n') + self.out.write(' *** BENCHMARKING RESULTS *** \n') + self.out.write('==============================\n') + self.out.write('\n') + + # benchname, time, benchtitle + results = [] + + for item, outcome in self._memo: + if isinstance(item, Item): + + best = item.benchtime + + if best is None: + # skipped or failed benchmarks + tstr = '---' + + else: + # from IPython.Magic.magic_timeit + if best > 0.0: + order = min(-int(floor(log10(best)) // 3), 3) + else: + order = 3 + + tstr = "%.*g %s" % (precision, best * scaling[order], units[order]) + + results.append( [item.name, tstr, item.benchtitle] ) + + # dot/unit align second column + # FIXME simpler? this is crappy -- shame on me... + wm = [0]*len(units) + we = [0]*len(units) + + for s in results: + tstr = s[1] + n,u = tstr.split() + + # unit n + un = unitn[u] + + try: + m,e = n.split('.') + except ValueError: + m,e = n,'' + + wm[un] = max(len(m), wm[un]) + we[un] = max(len(e), we[un]) + + for s in results: + tstr = s[1] + n,u = tstr.split() + + un = unitn[u] + + try: + m,e = n.split('.') + except ValueError: + m,e = n,'' + + m = m.rjust(wm[un]) + e = e.ljust(we[un]) + + if e.strip(): + n = '.'.join((m,e)) + else: + n = ' '.join((m,e)) + + + # let's put the number into the right place + txt = '' + for i in range(len(units)): + if i == un: + txt += n + else: + txt += ' '*(wm[i]+we[i]+1) + + s[1] = '%s %s' % (txt, u) + + + # align all columns besides the last one + for i in range(2): + w = max(len(s[i]) for s in results) + + for s in results: + s[i] = s[i].ljust(w) + + # show results + for s in results: + self.out.write('%s | %s | %s\n' % tuple(s)) + + +def main(args=None): + # hook our Directory/Module/Function as defaults + from py.__.test import defaultconftest + + defaultconftest.Directory = Directory + defaultconftest.Module = Module + defaultconftest.Function = Function + + # hook BenchSession as py.test session + config = py.test.config + config._getsessionclass = lambda : BenchSession + + py.test.cmdline.main(args) + + -- 2.11.4.GIT