Bug 1755973 - Try implied arr+[0] in GetUniformIndices. r=gfx-reviewers,bradwerth
[gecko.git] / testing / jsshell / benchmark.py
blobf1250d69aa00522e8482e5f89e3662f795e4b267
1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
5 from __future__ import absolute_import, division, print_function
7 import six
8 import json
9 import os
10 import re
11 import shutil
12 import sys
13 from abc import ABCMeta, abstractmethod, abstractproperty
14 from argparse import ArgumentParser
15 from collections import defaultdict
17 from mozbuild.base import MozbuildObject, BuildEnvironmentNotFoundException
18 from mozprocess import ProcessHandler
20 here = os.path.abspath(os.path.dirname(__file__))
21 build = MozbuildObject.from_environment(cwd=here)
23 JSSHELL_NOT_FOUND = """
24 Could not detect a JS shell. Either make sure you have a non-artifact build
25 with `ac_add_options --enable-js-shell` or specify it with `--binary`.
26 """.strip()
29 @six.add_metaclass(ABCMeta)
30 class Benchmark(object):
31 lower_is_better = True
32 should_alert = True
34 def __init__(self, shell, args=None, shell_name=None):
35 self.shell = shell
36 self.args = args
37 self.shell_name = shell_name
39 @abstractproperty
40 def unit(self):
41 """Returns the unit of measurement of the benchmark."""
43 @abstractproperty
44 def name(self):
45 """Returns the string name of the benchmark."""
47 @abstractproperty
48 def path(self):
49 """Return the path to the benchmark relative to topsrcdir."""
51 @abstractmethod
52 def process_line(self, line):
53 """Process a line of stdout from the benchmark."""
55 @abstractmethod
56 def collect_results(self):
57 """Build the result after the process has finished."""
59 @property
60 def command(self):
61 """Returns the command to run as a list."""
62 cmd = [self.shell]
63 if self.args:
64 cmd += self.args
65 return cmd
67 @property
68 def version(self):
69 if self._version:
70 return self._version
72 with open(os.path.join(self.path, "VERSION"), "r") as fh:
73 self._version = fh.read().strip("\r\n\r\n \t")
74 return self._version
76 def reset(self):
77 """Resets state between runs."""
78 name = self.name
79 if self.shell_name:
80 name = "{}-{}".format(name, self.shell_name)
82 self.perfherder_data = {
83 "framework": {
84 "name": "js-bench",
86 "suites": [
88 "lowerIsBetter": self.lower_is_better,
89 "name": name,
90 "shouldAlert": self.should_alert,
91 "subtests": [],
92 "unit": self.unit,
93 "value": None,
97 self.suite = self.perfherder_data["suites"][0]
99 def _provision_benchmark_script(self):
100 if os.path.isdir(self.path):
101 return
103 # Some benchmarks may have been downloaded from a fetch task, make
104 # sure they get copied over.
105 fetches_dir = os.environ.get("MOZ_FETCHES_DIR")
106 if fetches_dir and os.path.isdir(fetches_dir):
107 fetchdir = os.path.join(fetches_dir, self.name)
108 if os.path.isdir(fetchdir):
109 shutil.copytree(fetchdir, self.path)
111 def run(self):
112 self.reset()
114 # Update the environment variables
115 env = os.environ.copy()
117 process_args = {
118 "cmd": self.command,
119 "cwd": self.path,
120 "onFinish": self.collect_results,
121 "processOutputLine": self.process_line,
122 "stream": sys.stdout,
123 "env": env,
124 "universal_newlines": True,
126 proc = ProcessHandler(**process_args)
127 proc.run()
128 return proc.wait()
131 class RunOnceBenchmark(Benchmark):
132 def collect_results(self):
133 bench_total = 0
134 # NOTE: for this benchmark we run the test once, so we have a single value array
135 for bench, scores in self.scores.items():
136 for score, values in scores.items():
137 test_name = "{}-{}".format(self.name, score)
138 # pylint --py3k W1619
139 mean = sum(values) / len(values)
140 self.suite["subtests"].append({"name": test_name, "value": mean})
141 bench_total += int(sum(values))
142 self.suite["value"] = bench_total
145 class Ares6(Benchmark):
146 name = "ares6"
147 path = os.path.join("third_party", "webkit", "PerformanceTests", "ARES-6")
148 unit = "ms"
150 @property
151 def command(self):
152 cmd = super(Ares6, self).command
153 return cmd + ["cli.js"]
155 def reset(self):
156 super(Ares6, self).reset()
158 self.bench_name = None
159 self.last_summary = None
160 # Scores are of the form:
161 # {<bench_name>: {<score_name>: [<values>]}}
162 self.scores = defaultdict(lambda: defaultdict(list))
164 def _try_find_score(self, score_name, line):
165 m = re.search(score_name + ":\s*(\d+\.?\d*?) (\+-)?.+", line)
166 if not m:
167 return False
169 score = m.group(1)
170 self.scores[self.bench_name][score_name].append(float(score))
171 return True
173 def process_line(self, line):
174 m = re.search("Running... (.+) \(.+\)", line)
175 if m:
176 self.bench_name = m.group(1)
177 return
179 if self._try_find_score("firstIteration", line):
180 return
182 if self._try_find_score("averageWorstCase", line):
183 return
185 if self._try_find_score("steadyState", line):
186 return
188 m = re.search("summary:\s*(\d+\.?\d*?) (\+-)?.+", line)
189 if m:
190 self.last_summary = float(m.group(1))
192 def collect_results(self):
193 for bench, scores in self.scores.items():
194 for score, values in scores.items():
195 # pylint --py3k W1619
196 mean = sum(values) / len(values)
197 test_name = "{}-{}".format(bench, score)
198 self.suite["subtests"].append({"name": test_name, "value": mean})
200 if self.last_summary:
201 self.suite["value"] = self.last_summary
204 class SixSpeed(RunOnceBenchmark):
205 name = "six-speed"
206 path = os.path.join("third_party", "webkit", "PerformanceTests", "six-speed")
207 unit = "ms"
209 @property
210 def command(self):
211 cmd = super(SixSpeed, self).command
212 return cmd + ["test.js"]
214 def reset(self):
215 super(SixSpeed, self).reset()
217 # Scores are of the form:
218 # {<bench_name>: {<score_name>: [<values>]}}
219 self.scores = defaultdict(lambda: defaultdict(list))
221 def process_line(self, output):
222 m = re.search("(.+): (\d+)", output)
223 if not m:
224 return
225 subtest = m.group(1)
226 score = m.group(2)
227 if subtest not in self.scores[self.name]:
228 self.scores[self.name][subtest] = []
229 self.scores[self.name][subtest].append(int(score))
232 class SunSpider(RunOnceBenchmark):
233 name = "sunspider"
234 path = os.path.join(
235 "third_party", "webkit", "PerformanceTests", "SunSpider", "sunspider-0.9.1"
237 unit = "ms"
239 @property
240 def command(self):
241 cmd = super(SunSpider, self).command
242 return cmd + ["sunspider-standalone-driver.js"]
244 def reset(self):
245 super(SunSpider, self).reset()
247 # Scores are of the form:
248 # {<bench_name>: {<score_name>: [<values>]}}
249 self.scores = defaultdict(lambda: defaultdict(list))
251 def process_line(self, output):
252 m = re.search("(.+): (\d+)", output)
253 if not m:
254 return
255 subtest = m.group(1)
256 score = m.group(2)
257 if subtest not in self.scores[self.name]:
258 self.scores[self.name][subtest] = []
259 self.scores[self.name][subtest].append(int(score))
262 class WebToolingBenchmark(Benchmark):
263 name = "web-tooling-benchmark"
264 path = os.path.join(
265 "third_party", "webkit", "PerformanceTests", "web-tooling-benchmark"
267 main_js = "cli.js"
268 unit = "score"
269 lower_is_better = False
270 subtests_lower_is_better = False
272 @property
273 def command(self):
274 cmd = super(WebToolingBenchmark, self).command
275 return cmd + [self.main_js]
277 def reset(self):
278 super(WebToolingBenchmark, self).reset()
280 # Scores are of the form:
281 # {<bench_name>: {<score_name>: [<values>]}}
282 self.scores = defaultdict(lambda: defaultdict(list))
284 def process_line(self, output):
285 m = re.search(" +([a-zA-Z].+): +([.0-9]+) +runs/sec", output)
286 if not m:
287 return
288 subtest = m.group(1)
289 score = m.group(2)
290 if subtest not in self.scores[self.name]:
291 self.scores[self.name][subtest] = []
292 self.scores[self.name][subtest].append(float(score))
294 def collect_results(self):
295 # NOTE: for this benchmark we run the test once, so we have a single value array
296 bench_mean = None
297 for bench, scores in self.scores.items():
298 for score_name, values in scores.items():
299 test_name = "{}-{}".format(self.name, score_name)
300 # pylint --py3k W1619
301 mean = sum(values) / len(values)
302 self.suite["subtests"].append(
304 "lowerIsBetter": self.subtests_lower_is_better,
305 "name": test_name,
306 "value": mean,
309 if score_name == "mean":
310 bench_mean = mean
311 self.suite["value"] = bench_mean
313 def run(self):
314 self._provision_benchmark_script()
315 return super(WebToolingBenchmark, self).run()
318 class Octane(RunOnceBenchmark):
319 name = "octane"
320 path = os.path.join("third_party", "webkit", "PerformanceTests", "octane")
321 unit = "score"
322 lower_is_better = False
324 @property
325 def command(self):
326 cmd = super(Octane, self).command
327 return cmd + ["run.js"]
329 def reset(self):
330 super(Octane, self).reset()
332 # Scores are of the form:
333 # {<bench_name>: {<score_name>: [<values>]}}
334 self.scores = defaultdict(lambda: defaultdict(list))
336 def process_line(self, output):
337 m = re.search("(.+): (\d+)", output)
338 if not m:
339 return
340 subtest = m.group(1)
341 score = m.group(2)
342 if subtest.startswith("Score"):
343 subtest = "score"
344 if subtest not in self.scores[self.name]:
345 self.scores[self.name][subtest] = []
346 self.scores[self.name][subtest].append(int(score))
348 def collect_results(self):
349 bench_score = None
350 # NOTE: for this benchmark we run the test once, so we have a single value array
351 for bench, scores in self.scores.items():
352 for score_name, values in scores.items():
353 test_name = "{}-{}".format(self.name, score_name)
354 # pylint --py3k W1619
355 mean = sum(values) / len(values)
356 self.suite["subtests"].append({"name": test_name, "value": mean})
357 if score_name == "score":
358 bench_score = mean
359 self.suite["value"] = bench_score
361 def run(self):
362 self._provision_benchmark_script()
363 return super(Octane, self).run()
366 all_benchmarks = {
367 "ares6": Ares6,
368 "six-speed": SixSpeed,
369 "sunspider": SunSpider,
370 "web-tooling-benchmark": WebToolingBenchmark,
371 "octane": Octane,
375 def run(benchmark, binary=None, extra_args=None, perfherder=None):
376 if not binary:
377 try:
378 binary = os.path.join(build.bindir, "js" + build.substs["BIN_SUFFIX"])
379 except BuildEnvironmentNotFoundException:
380 binary = None
382 if not binary or not os.path.isfile(binary):
383 print(JSSHELL_NOT_FOUND)
384 return 1
386 bench = all_benchmarks.get(benchmark)(
387 binary, args=extra_args, shell_name=perfherder
389 res = bench.run()
391 if perfherder:
392 print("PERFHERDER_DATA: {}".format(json.dumps(bench.perfherder_data)))
393 return res
396 def get_parser():
397 parser = ArgumentParser()
398 parser.add_argument(
399 "benchmark",
400 choices=list(all_benchmarks),
401 help="The name of the benchmark to run.",
403 parser.add_argument(
404 "-b", "--binary", default=None, help="Path to the JS shell binary to use."
406 parser.add_argument(
407 "--arg",
408 dest="extra_args",
409 action="append",
410 default=None,
411 help="Extra arguments to pass to the JS shell.",
413 parser.add_argument(
414 "--perfherder",
415 default=None,
416 help="Log PERFHERDER_DATA to stdout using the given suite name.",
418 return parser
421 def cli(args=sys.argv[1:]):
422 parser = get_parser()
423 args = parser.parser_args(args)
424 return run(**vars(args))
427 if __name__ == "__main__":
428 sys.exit(cli())