Bug 1890277: part 4) Add CSPParser support for the `trusted-types` directive, guarded...
[gecko.git] / testing / jsshell / benchmark.py
blobb23c8d0e55ab7c3ba5b001204f45c4beab3eee3c
1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
5 import six
6 import json
7 import os
8 import re
9 import shutil
10 import sys
11 from abc import ABCMeta, abstractmethod, abstractproperty
12 from argparse import ArgumentParser
13 from collections import defaultdict
15 from mozbuild.base import MozbuildObject, BuildEnvironmentNotFoundException
16 from mozprocess import run_and_wait
18 here = os.path.abspath(os.path.dirname(__file__))
19 build = MozbuildObject.from_environment(cwd=here)
21 JSSHELL_NOT_FOUND = """
22 Could not detect a JS shell. Either make sure you have a non-artifact build
23 with `ac_add_options --enable-js-shell` or specify it with `--binary`.
24 """.strip()
27 @six.add_metaclass(ABCMeta)
28 class Benchmark(object):
29 lower_is_better = True
30 should_alert = True
32 def __init__(self, shell, args=None, shell_name=None):
33 self.shell = shell
34 self.args = args
35 self.shell_name = shell_name
37 @abstractproperty
38 def unit(self):
39 """Returns the unit of measurement of the benchmark."""
41 @abstractproperty
42 def name(self):
43 """Returns the string name of the benchmark."""
45 @abstractproperty
46 def path(self):
47 """Return the path to the benchmark relative to topsrcdir."""
49 @abstractmethod
50 def process_line(self, proc, line):
51 """Process a line of stdout from the benchmark."""
53 @abstractmethod
54 def collect_results(self):
55 """Build the result after the process has finished."""
57 @property
58 def command(self):
59 """Returns the command to run as a list."""
60 cmd = [self.shell]
61 if self.args:
62 cmd += self.args
63 return cmd
65 @property
66 def version(self):
67 if self._version:
68 return self._version
70 with open(os.path.join(self.path, "VERSION"), "r") as fh:
71 self._version = fh.read().strip("\r\n\r\n \t")
72 return self._version
74 def reset(self):
75 """Resets state between runs."""
76 name = self.name
77 if self.shell_name:
78 name = "{}-{}".format(name, self.shell_name)
80 self.perfherder_data = {
81 "framework": {
82 "name": "js-bench",
84 "suites": [
86 "lowerIsBetter": self.lower_is_better,
87 "name": name,
88 "shouldAlert": self.should_alert,
89 "subtests": [],
90 "unit": self.unit,
91 "value": None,
95 self.suite = self.perfherder_data["suites"][0]
97 def _provision_benchmark_script(self):
98 if os.path.isdir(self.path):
99 return
101 # Some benchmarks may have been downloaded from a fetch task, make
102 # sure they get copied over.
103 fetches_dir = os.environ.get("MOZ_FETCHES_DIR")
104 if fetches_dir and os.path.isdir(fetches_dir):
105 fetchdir = os.path.join(fetches_dir, self.name)
106 if os.path.isdir(fetchdir):
107 shutil.copytree(fetchdir, self.path)
109 def run(self):
110 self.reset()
112 # Update the environment variables
113 env = os.environ.copy()
115 process_args = {
116 "args": self.command,
117 "cwd": self.path,
118 "env": env,
119 "output_line_handler": self.process_line,
121 proc = run_and_wait(**process_args)
122 self.collect_results()
123 return proc.returncode
126 class RunOnceBenchmark(Benchmark):
127 def collect_results(self):
128 bench_total = 0
129 # NOTE: for this benchmark we run the test once, so we have a single value array
130 for bench, scores in self.scores.items():
131 for score, values in scores.items():
132 test_name = "{}-{}".format(self.name, score)
133 # pylint --py3k W1619
134 mean = sum(values) / len(values)
135 self.suite["subtests"].append({"name": test_name, "value": mean})
136 bench_total += int(sum(values))
137 self.suite["value"] = bench_total
140 class Ares6(Benchmark):
141 name = "ares6"
142 path = os.path.join("third_party", "webkit", "PerformanceTests", "ARES-6")
143 unit = "ms"
145 @property
146 def command(self):
147 cmd = super(Ares6, self).command
148 return cmd + ["cli.js"]
150 def reset(self):
151 super(Ares6, self).reset()
153 self.bench_name = None
154 self.last_summary = None
155 # Scores are of the form:
156 # {<bench_name>: {<score_name>: [<values>]}}
157 self.scores = defaultdict(lambda: defaultdict(list))
159 def _try_find_score(self, score_name, line):
160 m = re.search(score_name + r":\s*(\d+\.?\d*?) (\+-)?.+", line)
161 if not m:
162 return False
164 score = m.group(1)
165 self.scores[self.bench_name][score_name].append(float(score))
166 return True
168 def process_line(self, proc, line):
169 line = line.strip("\n")
170 print(line)
171 m = re.search(r"Running... (.+) \(.+\)", line)
172 if m:
173 self.bench_name = m.group(1)
174 return
176 if self._try_find_score("firstIteration", line):
177 return
179 if self._try_find_score("averageWorstCase", line):
180 return
182 if self._try_find_score("steadyState", line):
183 return
185 m = re.search(r"summary:\s*(\d+\.?\d*?) (\+-)?.+", line)
186 if m:
187 self.last_summary = float(m.group(1))
189 def collect_results(self):
190 for bench, scores in self.scores.items():
191 for score, values in scores.items():
192 # pylint --py3k W1619
193 mean = sum(values) / len(values)
194 test_name = "{}-{}".format(bench, score)
195 self.suite["subtests"].append({"name": test_name, "value": mean})
197 if self.last_summary:
198 self.suite["value"] = self.last_summary
201 class SixSpeed(RunOnceBenchmark):
202 name = "six-speed"
203 path = os.path.join("third_party", "webkit", "PerformanceTests", "six-speed")
204 unit = "ms"
206 @property
207 def command(self):
208 cmd = super(SixSpeed, self).command
209 return cmd + ["test.js"]
211 def reset(self):
212 super(SixSpeed, self).reset()
214 # Scores are of the form:
215 # {<bench_name>: {<score_name>: [<values>]}}
216 self.scores = defaultdict(lambda: defaultdict(list))
218 def process_line(self, proc, output):
219 output = output.strip("\n")
220 print(output)
221 m = re.search(r"(.+): (\d+)", output)
222 if not m:
223 return
224 subtest = m.group(1)
225 score = m.group(2)
226 if subtest not in self.scores[self.name]:
227 self.scores[self.name][subtest] = []
228 self.scores[self.name][subtest].append(int(score))
231 class SunSpider(RunOnceBenchmark):
232 name = "sunspider"
233 path = os.path.join(
234 "third_party", "webkit", "PerformanceTests", "SunSpider", "sunspider-0.9.1"
236 unit = "ms"
238 @property
239 def command(self):
240 cmd = super(SunSpider, self).command
241 return cmd + ["sunspider-standalone-driver.js"]
243 def reset(self):
244 super(SunSpider, self).reset()
246 # Scores are of the form:
247 # {<bench_name>: {<score_name>: [<values>]}}
248 self.scores = defaultdict(lambda: defaultdict(list))
250 def process_line(self, proc, output):
251 output = output.strip("\n")
252 print(output)
253 m = re.search(r"(.+): (\d+)", output)
254 if not m:
255 return
256 subtest = m.group(1)
257 score = m.group(2)
258 if subtest not in self.scores[self.name]:
259 self.scores[self.name][subtest] = []
260 self.scores[self.name][subtest].append(int(score))
263 class WebToolingBenchmark(Benchmark):
264 name = "web-tooling-benchmark"
265 path = os.path.join(
266 "third_party", "webkit", "PerformanceTests", "web-tooling-benchmark"
268 main_js = "cli.js"
269 unit = "score"
270 lower_is_better = False
271 subtests_lower_is_better = False
273 @property
274 def command(self):
275 cmd = super(WebToolingBenchmark, self).command
276 return cmd + [self.main_js]
278 def reset(self):
279 super(WebToolingBenchmark, self).reset()
281 # Scores are of the form:
282 # {<bench_name>: {<score_name>: [<values>]}}
283 self.scores = defaultdict(lambda: defaultdict(list))
285 def process_line(self, proc, output):
286 output = output.strip("\n")
287 print(output)
288 m = re.search(" +([a-zA-Z].+): +([.0-9]+) +runs/sec", output)
289 if not m:
290 return
291 subtest = m.group(1)
292 score = m.group(2)
293 if subtest not in self.scores[self.name]:
294 self.scores[self.name][subtest] = []
295 self.scores[self.name][subtest].append(float(score))
297 def collect_results(self):
298 # NOTE: for this benchmark we run the test once, so we have a single value array
299 bench_mean = None
300 for bench, scores in self.scores.items():
301 for score_name, values in scores.items():
302 test_name = "{}-{}".format(self.name, score_name)
303 # pylint --py3k W1619
304 mean = sum(values) / len(values)
305 self.suite["subtests"].append(
307 "lowerIsBetter": self.subtests_lower_is_better,
308 "name": test_name,
309 "value": mean,
312 if score_name == "mean":
313 bench_mean = mean
314 self.suite["value"] = bench_mean
316 def run(self):
317 self._provision_benchmark_script()
318 return super(WebToolingBenchmark, self).run()
321 class Octane(RunOnceBenchmark):
322 name = "octane"
323 path = os.path.join("third_party", "webkit", "PerformanceTests", "octane")
324 unit = "score"
325 lower_is_better = False
327 @property
328 def command(self):
329 cmd = super(Octane, self).command
330 return cmd + ["run.js"]
332 def reset(self):
333 super(Octane, self).reset()
335 # Scores are of the form:
336 # {<bench_name>: {<score_name>: [<values>]}}
337 self.scores = defaultdict(lambda: defaultdict(list))
339 def process_line(self, proc, output):
340 output = output.strip("\n")
341 print(output)
342 m = re.search(r"(.+): (\d+)", output)
343 if not m:
344 return
345 subtest = m.group(1)
346 score = m.group(2)
347 if subtest.startswith("Score"):
348 subtest = "score"
349 if subtest not in self.scores[self.name]:
350 self.scores[self.name][subtest] = []
351 self.scores[self.name][subtest].append(int(score))
353 def collect_results(self):
354 bench_score = None
355 # NOTE: for this benchmark we run the test once, so we have a single value array
356 for bench, scores in self.scores.items():
357 for score_name, values in scores.items():
358 test_name = "{}-{}".format(self.name, score_name)
359 # pylint --py3k W1619
360 mean = sum(values) / len(values)
361 self.suite["subtests"].append({"name": test_name, "value": mean})
362 if score_name == "score":
363 bench_score = mean
364 self.suite["value"] = bench_score
366 def run(self):
367 self._provision_benchmark_script()
368 return super(Octane, self).run()
371 all_benchmarks = {
372 "ares6": Ares6,
373 "six-speed": SixSpeed,
374 "sunspider": SunSpider,
375 "web-tooling-benchmark": WebToolingBenchmark,
376 "octane": Octane,
380 def run(benchmark, binary=None, extra_args=None, perfherder=None):
381 if not binary:
382 try:
383 binary = os.path.join(build.bindir, "js" + build.substs["BIN_SUFFIX"])
384 except BuildEnvironmentNotFoundException:
385 binary = None
387 if not binary or not os.path.isfile(binary):
388 print(JSSHELL_NOT_FOUND)
389 return 1
391 bench = all_benchmarks.get(benchmark)(
392 binary, args=extra_args, shell_name=perfherder
394 res = bench.run()
396 if perfherder:
397 print("PERFHERDER_DATA: {}".format(json.dumps(bench.perfherder_data)))
398 return res
401 def get_parser():
402 parser = ArgumentParser()
403 parser.add_argument(
404 "benchmark",
405 choices=list(all_benchmarks),
406 help="The name of the benchmark to run.",
408 parser.add_argument(
409 "-b", "--binary", default=None, help="Path to the JS shell binary to use."
411 parser.add_argument(
412 "--arg",
413 dest="extra_args",
414 action="append",
415 default=None,
416 help="Extra arguments to pass to the JS shell.",
418 parser.add_argument(
419 "--perfherder",
420 default=None,
421 help="Log PERFHERDER_DATA to stdout using the given suite name.",
423 return parser
426 def cli(args=sys.argv[1:]):
427 parser = get_parser()
428 args = parser.parser_args(args)
429 return run(**vars(args))
432 if __name__ == "__main__":
433 sys.exit(cli())