Bug 1614845 [wpt PR 21744] - Revert "[Shape Detection] Use IDL dictionaries for resul...
[gecko.git] / testing / jsshell / benchmark.py
blob49543ac3aa89c87b6a10883171017e5d1c4a47a9
1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
5 from __future__ import absolute_import, print_function
7 import json
8 import os
9 import re
10 import shutil
11 import sys
12 from abc import ABCMeta, abstractmethod, abstractproperty
13 from argparse import ArgumentParser
14 from collections import defaultdict
16 from mozbuild.base import MozbuildObject, BuildEnvironmentNotFoundException
17 from mozprocess import ProcessHandler
19 here = os.path.abspath(os.path.dirname(__file__))
20 build = MozbuildObject.from_environment(cwd=here)
22 JSSHELL_NOT_FOUND = """
23 Could not detect a JS shell. Either make sure you have a non-artifact build
24 with `ac_add_options --enable-js-shell` or specify it with `--binary`.
25 """.strip()
28 class Benchmark(object):
29 __metaclass__ = ABCMeta
30 lower_is_better = True
31 should_alert = False
33 def __init__(self, shell, args=None, shell_name=None):
34 self.shell = shell
35 self.args = args
36 self.shell_name = shell_name
38 @abstractproperty
39 def units(self):
40 """Returns the unit of measurement of the benchmark."""
42 @abstractproperty
43 def name(self):
44 """Returns the string name of the benchmark."""
46 @abstractproperty
47 def path(self):
48 """Return the path to the benchmark relative to topsrcdir."""
50 @abstractmethod
51 def process_line(self, line):
52 """Process a line of stdout from the benchmark."""
54 @abstractmethod
55 def collect_results(self):
56 """Build the result after the process has finished."""
58 @property
59 def command(self):
60 """Returns the command to run as a list."""
61 cmd = [self.shell]
62 if self.args:
63 cmd += self.args
64 return cmd
66 @property
67 def version(self):
68 if self._version:
69 return self._version
71 with open(os.path.join(self.path, 'VERSION'), 'r') as fh:
72 self._version = fh.read().strip("\r\n\r\n \t")
73 return self._version
75 def reset(self):
76 """Resets state between runs."""
77 name = self.name
78 if self.shell_name:
79 name = '{}-{}'.format(name, self.shell_name)
81 self.perfherder_data = {
82 'framework': {
83 'name': 'js-bench',
85 'suites': [
87 'lowerIsBetter': self.lower_is_better,
88 'name': name,
89 'shouldAlert': self.should_alert,
90 'subtests': [],
91 'units': self.units,
92 'value': None
96 self.suite = self.perfherder_data['suites'][0]
98 def _provision_benchmark_script(self):
99 if os.path.isdir(self.path):
100 return
102 # Some benchmarks may have been downloaded from a fetch task, make
103 # sure they get copied over.
104 fetches_dir = os.environ.get('MOZ_FETCHES_DIR')
105 if fetches_dir and os.path.isdir(fetches_dir):
106 fetchdir = os.path.join(fetches_dir, self.name)
107 if os.path.isdir(fetchdir):
108 shutil.copytree(fetchdir, self.path)
110 def run(self):
111 self.reset()
113 # Update the environment variables
114 env = os.environ.copy()
116 # disable "GC poisoning" Bug# 1499043
117 env['JSGC_DISABLE_POISONING'] = '1'
119 process_args = {
120 'cmd': self.command,
121 'cwd': self.path,
122 'onFinish': self.collect_results,
123 'processOutputLine': self.process_line,
124 'stream': sys.stdout,
125 'env': env,
127 proc = ProcessHandler(**process_args)
128 proc.run()
129 return proc.wait()
132 class RunOnceBenchmark(Benchmark):
133 def collect_results(self):
134 bench_total = 0
135 # NOTE: for this benchmark we run the test once, so we have a single value array
136 for bench, scores in self.scores.items():
137 for score, values in scores.items():
138 test_name = "{}-{}".format(self.name, score)
139 mean = sum(values) / len(values)
140 self.suite['subtests'].append({'name': test_name, 'value': mean})
141 bench_total += int(sum(values))
142 self.suite['value'] = bench_total
145 class Ares6(Benchmark):
146 name = 'ares6'
147 path = os.path.join('third_party', 'webkit', 'PerformanceTests', 'ARES-6')
148 units = 'ms'
150 @property
151 def command(self):
152 cmd = super(Ares6, self).command
153 return cmd + ['cli.js']
155 def reset(self):
156 super(Ares6, self).reset()
158 self.bench_name = None
159 self.last_summary = None
160 # Scores are of the form:
161 # {<bench_name>: {<score_name>: [<values>]}}
162 self.scores = defaultdict(lambda: defaultdict(list))
164 def _try_find_score(self, score_name, line):
165 m = re.search(score_name + ':\s*(\d+\.?\d*?) (\+-)?.+', line)
166 if not m:
167 return False
169 score = m.group(1)
170 self.scores[self.bench_name][score_name].append(float(score))
171 return True
173 def process_line(self, line):
174 m = re.search("Running... (.+) \(.+\)", line)
175 if m:
176 self.bench_name = m.group(1)
177 return
179 if self._try_find_score('firstIteration', line):
180 return
182 if self._try_find_score('averageWorstCase', line):
183 return
185 if self._try_find_score('steadyState', line):
186 return
188 m = re.search('summary:\s*(\d+\.?\d*?) (\+-)?.+', line)
189 if m:
190 self.last_summary = float(m.group(1))
192 def collect_results(self):
193 for bench, scores in self.scores.items():
194 for score, values in scores.items():
195 mean = sum(values) / len(values)
196 test_name = "{}-{}".format(bench, score)
197 self.suite['subtests'].append({'name': test_name, 'value': mean})
199 if self.last_summary:
200 self.suite['value'] = self.last_summary
203 class SixSpeed(RunOnceBenchmark):
204 name = 'six-speed'
205 path = os.path.join('third_party', 'webkit', 'PerformanceTests', 'six-speed')
206 units = 'ms'
208 @property
209 def command(self):
210 cmd = super(SixSpeed, self).command
211 return cmd + ['test.js']
213 def reset(self):
214 super(SixSpeed, self).reset()
216 # Scores are of the form:
217 # {<bench_name>: {<score_name>: [<values>]}}
218 self.scores = defaultdict(lambda: defaultdict(list))
220 def process_line(self, output):
221 m = re.search("(.+): (\d+)", output)
222 if not m:
223 return
224 subtest = m.group(1)
225 score = m.group(2)
226 if subtest not in self.scores[self.name]:
227 self.scores[self.name][subtest] = []
228 self.scores[self.name][subtest].append(int(score))
231 class SunSpider(RunOnceBenchmark):
232 name = 'sunspider'
233 path = os.path.join('third_party', 'webkit', 'PerformanceTests', 'SunSpider', 'sunspider-0.9.1')
234 units = 'ms'
236 @property
237 def command(self):
238 cmd = super(SunSpider, self).command
239 return cmd + ['sunspider-standalone-driver.js']
241 def reset(self):
242 super(SunSpider, self).reset()
244 # Scores are of the form:
245 # {<bench_name>: {<score_name>: [<values>]}}
246 self.scores = defaultdict(lambda: defaultdict(list))
248 def process_line(self, output):
249 m = re.search("(.+): (\d+)", output)
250 if not m:
251 return
252 subtest = m.group(1)
253 score = m.group(2)
254 if subtest not in self.scores[self.name]:
255 self.scores[self.name][subtest] = []
256 self.scores[self.name][subtest].append(int(score))
259 class WebToolingBenchmark(Benchmark):
260 name = 'web-tooling-benchmark'
261 path = os.path.join('third_party', 'webkit', 'PerformanceTests', 'web-tooling-benchmark')
262 main_js = 'cli.js'
263 units = 'score'
264 lower_is_better = False
265 subtests_lower_is_better = False
267 @property
268 def command(self):
269 cmd = super(WebToolingBenchmark, self).command
270 return cmd + [self.main_js]
272 def reset(self):
273 super(WebToolingBenchmark, self).reset()
275 # Scores are of the form:
276 # {<bench_name>: {<score_name>: [<values>]}}
277 self.scores = defaultdict(lambda: defaultdict(list))
279 def process_line(self, output):
280 m = re.search(" +([a-zA-Z].+): +([.0-9]+) +runs/sec", output)
281 if not m:
282 return
283 subtest = m.group(1)
284 score = m.group(2)
285 if subtest not in self.scores[self.name]:
286 self.scores[self.name][subtest] = []
287 self.scores[self.name][subtest].append(float(score))
289 def collect_results(self):
290 # NOTE: for this benchmark we run the test once, so we have a single value array
291 for bench, scores in self.scores.items():
292 for score_name, values in scores.items():
293 test_name = "{}-{}".format(self.name, score_name)
294 mean = sum(values) / len(values)
295 self.suite['subtests'].append({
296 'lowerIsBetter': self.subtests_lower_is_better,
297 'name': test_name,
298 'value': mean,
300 if score_name == 'mean':
301 bench_mean = mean
302 self.suite['value'] = bench_mean
304 def run(self):
305 self._provision_benchmark_script()
306 return super(WebToolingBenchmark, self).run()
309 class Octane(RunOnceBenchmark):
310 name = 'octane'
311 path = os.path.join('third_party', 'webkit', 'PerformanceTests', 'octane')
312 units = 'score'
313 lower_is_better = False
315 @property
316 def command(self):
317 cmd = super(Octane, self).command
318 return cmd + ['run.js']
320 def reset(self):
321 super(Octane, self).reset()
323 # Scores are of the form:
324 # {<bench_name>: {<score_name>: [<values>]}}
325 self.scores = defaultdict(lambda: defaultdict(list))
327 def process_line(self, output):
328 m = re.search("(.+): (\d+)", output)
329 if not m:
330 return
331 subtest = m.group(1)
332 score = m.group(2)
333 if subtest.startswith('Score'):
334 subtest = 'score'
335 if subtest not in self.scores[self.name]:
336 self.scores[self.name][subtest] = []
337 self.scores[self.name][subtest].append(int(score))
339 def collect_results(self):
340 # NOTE: for this benchmark we run the test once, so we have a single value array
341 for bench, scores in self.scores.items():
342 for score_name, values in scores.items():
343 test_name = "{}-{}".format(self.name, score_name)
344 mean = sum(values) / len(values)
345 self.suite['subtests'].append({'name': test_name, 'value': mean})
346 if score_name == 'score':
347 bench_score = mean
348 self.suite['value'] = bench_score
350 def run(self):
351 self._provision_benchmark_script()
352 return super(Octane, self).run()
355 all_benchmarks = {
356 'ares6': Ares6,
357 'six-speed': SixSpeed,
358 'sunspider': SunSpider,
359 'web-tooling-benchmark': WebToolingBenchmark,
360 'octane': Octane
364 def run(benchmark, binary=None, extra_args=None, perfherder=None):
365 if not binary:
366 try:
367 binary = os.path.join(build.bindir, 'js' + build.substs['BIN_SUFFIX'])
368 except BuildEnvironmentNotFoundException:
369 binary = None
371 if not binary or not os.path.isfile(binary):
372 print(JSSHELL_NOT_FOUND)
373 return 1
375 bench = all_benchmarks.get(benchmark)(binary, args=extra_args, shell_name=perfherder)
376 res = bench.run()
378 if perfherder:
379 print("PERFHERDER_DATA: {}".format(json.dumps(bench.perfherder_data)))
380 return res
383 def get_parser():
384 parser = ArgumentParser()
385 parser.add_argument('benchmark', choices=all_benchmarks.keys(),
386 help="The name of the benchmark to run.")
387 parser.add_argument('-b', '--binary', default=None,
388 help="Path to the JS shell binary to use.")
389 parser.add_argument('--arg', dest='extra_args', action='append', default=None,
390 help="Extra arguments to pass to the JS shell.")
391 parser.add_argument('--perfherder', default=None,
392 help="Log PERFHERDER_DATA to stdout using the given suite name.")
393 return parser
396 def cli(args=sys.argv[1:]):
397 parser = get_parser()
398 args = parser.parser_args(args)
399 return run(**vars(args))
402 if __name__ == '__main__':
403 sys.exit(cli())