1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
5 from __future__
import absolute_import
, division
, print_function
13 from abc
import ABCMeta
, abstractmethod
, abstractproperty
14 from argparse
import ArgumentParser
15 from collections
import defaultdict
17 from mozbuild
.base
import MozbuildObject
, BuildEnvironmentNotFoundException
18 from mozprocess
import ProcessHandler
20 here
= os
.path
.abspath(os
.path
.dirname(__file__
))
21 build
= MozbuildObject
.from_environment(cwd
=here
)
23 JSSHELL_NOT_FOUND
= """
24 Could not detect a JS shell. Either make sure you have a non-artifact build
25 with `ac_add_options --enable-js-shell` or specify it with `--binary`.
29 @six.add_metaclass(ABCMeta
)
30 class Benchmark(object):
31 lower_is_better
= True
34 def __init__(self
, shell
, args
=None, shell_name
=None):
37 self
.shell_name
= shell_name
41 """Returns the unit of measurement of the benchmark."""
45 """Returns the string name of the benchmark."""
49 """Return the path to the benchmark relative to topsrcdir."""
52 def process_line(self
, line
):
53 """Process a line of stdout from the benchmark."""
56 def collect_results(self
):
57 """Build the result after the process has finished."""
61 """Returns the command to run as a list."""
72 with
open(os
.path
.join(self
.path
, "VERSION"), "r") as fh
:
73 self
._version
= fh
.read().strip("\r\n\r\n \t")
77 """Resets state between runs."""
80 name
= "{}-{}".format(name
, self
.shell_name
)
82 self
.perfherder_data
= {
88 "lowerIsBetter": self
.lower_is_better
,
90 "shouldAlert": self
.should_alert
,
97 self
.suite
= self
.perfherder_data
["suites"][0]
99 def _provision_benchmark_script(self
):
100 if os
.path
.isdir(self
.path
):
103 # Some benchmarks may have been downloaded from a fetch task, make
104 # sure they get copied over.
105 fetches_dir
= os
.environ
.get("MOZ_FETCHES_DIR")
106 if fetches_dir
and os
.path
.isdir(fetches_dir
):
107 fetchdir
= os
.path
.join(fetches_dir
, self
.name
)
108 if os
.path
.isdir(fetchdir
):
109 shutil
.copytree(fetchdir
, self
.path
)
114 # Update the environment variables
115 env
= os
.environ
.copy()
120 "onFinish": self
.collect_results
,
121 "processOutputLine": self
.process_line
,
122 "stream": sys
.stdout
,
124 "universal_newlines": True,
126 proc
= ProcessHandler(**process_args
)
131 class RunOnceBenchmark(Benchmark
):
132 def collect_results(self
):
134 # NOTE: for this benchmark we run the test once, so we have a single value array
135 for bench
, scores
in self
.scores
.items():
136 for score
, values
in scores
.items():
137 test_name
= "{}-{}".format(self
.name
, score
)
138 # pylint --py3k W1619
139 mean
= sum(values
) / len(values
)
140 self
.suite
["subtests"].append({"name": test_name
, "value": mean
})
141 bench_total
+= int(sum(values
))
142 self
.suite
["value"] = bench_total
145 class Ares6(Benchmark
):
147 path
= os
.path
.join("third_party", "webkit", "PerformanceTests", "ARES-6")
152 cmd
= super(Ares6
, self
).command
153 return cmd
+ ["cli.js"]
156 super(Ares6
, self
).reset()
158 self
.bench_name
= None
159 self
.last_summary
= None
160 # Scores are of the form:
161 # {<bench_name>: {<score_name>: [<values>]}}
162 self
.scores
= defaultdict(lambda: defaultdict(list))
164 def _try_find_score(self
, score_name
, line
):
165 m
= re
.search(score_name
+ ":\s*(\d+\.?\d*?) (\+-)?.+", line
)
170 self
.scores
[self
.bench_name
][score_name
].append(float(score
))
173 def process_line(self
, line
):
174 m
= re
.search("Running... (.+) \(.+\)", line
)
176 self
.bench_name
= m
.group(1)
179 if self
._try
_find
_score
("firstIteration", line
):
182 if self
._try
_find
_score
("averageWorstCase", line
):
185 if self
._try
_find
_score
("steadyState", line
):
188 m
= re
.search("summary:\s*(\d+\.?\d*?) (\+-)?.+", line
)
190 self
.last_summary
= float(m
.group(1))
192 def collect_results(self
):
193 for bench
, scores
in self
.scores
.items():
194 for score
, values
in scores
.items():
195 # pylint --py3k W1619
196 mean
= sum(values
) / len(values
)
197 test_name
= "{}-{}".format(bench
, score
)
198 self
.suite
["subtests"].append({"name": test_name
, "value": mean
})
200 if self
.last_summary
:
201 self
.suite
["value"] = self
.last_summary
204 class SixSpeed(RunOnceBenchmark
):
206 path
= os
.path
.join("third_party", "webkit", "PerformanceTests", "six-speed")
211 cmd
= super(SixSpeed
, self
).command
212 return cmd
+ ["test.js"]
215 super(SixSpeed
, self
).reset()
217 # Scores are of the form:
218 # {<bench_name>: {<score_name>: [<values>]}}
219 self
.scores
= defaultdict(lambda: defaultdict(list))
221 def process_line(self
, output
):
222 m
= re
.search("(.+): (\d+)", output
)
227 if subtest
not in self
.scores
[self
.name
]:
228 self
.scores
[self
.name
][subtest
] = []
229 self
.scores
[self
.name
][subtest
].append(int(score
))
232 class SunSpider(RunOnceBenchmark
):
235 "third_party", "webkit", "PerformanceTests", "SunSpider", "sunspider-0.9.1"
241 cmd
= super(SunSpider
, self
).command
242 return cmd
+ ["sunspider-standalone-driver.js"]
245 super(SunSpider
, self
).reset()
247 # Scores are of the form:
248 # {<bench_name>: {<score_name>: [<values>]}}
249 self
.scores
= defaultdict(lambda: defaultdict(list))
251 def process_line(self
, output
):
252 m
= re
.search("(.+): (\d+)", output
)
257 if subtest
not in self
.scores
[self
.name
]:
258 self
.scores
[self
.name
][subtest
] = []
259 self
.scores
[self
.name
][subtest
].append(int(score
))
262 class WebToolingBenchmark(Benchmark
):
263 name
= "web-tooling-benchmark"
265 "third_party", "webkit", "PerformanceTests", "web-tooling-benchmark"
269 lower_is_better
= False
270 subtests_lower_is_better
= False
274 cmd
= super(WebToolingBenchmark
, self
).command
275 return cmd
+ [self
.main_js
]
278 super(WebToolingBenchmark
, self
).reset()
280 # Scores are of the form:
281 # {<bench_name>: {<score_name>: [<values>]}}
282 self
.scores
= defaultdict(lambda: defaultdict(list))
284 def process_line(self
, output
):
285 m
= re
.search(" +([a-zA-Z].+): +([.0-9]+) +runs/sec", output
)
290 if subtest
not in self
.scores
[self
.name
]:
291 self
.scores
[self
.name
][subtest
] = []
292 self
.scores
[self
.name
][subtest
].append(float(score
))
294 def collect_results(self
):
295 # NOTE: for this benchmark we run the test once, so we have a single value array
297 for bench
, scores
in self
.scores
.items():
298 for score_name
, values
in scores
.items():
299 test_name
= "{}-{}".format(self
.name
, score_name
)
300 # pylint --py3k W1619
301 mean
= sum(values
) / len(values
)
302 self
.suite
["subtests"].append(
304 "lowerIsBetter": self
.subtests_lower_is_better
,
309 if score_name
== "mean":
311 self
.suite
["value"] = bench_mean
314 self
._provision
_benchmark
_script
()
315 return super(WebToolingBenchmark
, self
).run()
318 class Octane(RunOnceBenchmark
):
320 path
= os
.path
.join("third_party", "webkit", "PerformanceTests", "octane")
322 lower_is_better
= False
326 cmd
= super(Octane
, self
).command
327 return cmd
+ ["run.js"]
330 super(Octane
, self
).reset()
332 # Scores are of the form:
333 # {<bench_name>: {<score_name>: [<values>]}}
334 self
.scores
= defaultdict(lambda: defaultdict(list))
336 def process_line(self
, output
):
337 m
= re
.search("(.+): (\d+)", output
)
342 if subtest
.startswith("Score"):
344 if subtest
not in self
.scores
[self
.name
]:
345 self
.scores
[self
.name
][subtest
] = []
346 self
.scores
[self
.name
][subtest
].append(int(score
))
348 def collect_results(self
):
350 # NOTE: for this benchmark we run the test once, so we have a single value array
351 for bench
, scores
in self
.scores
.items():
352 for score_name
, values
in scores
.items():
353 test_name
= "{}-{}".format(self
.name
, score_name
)
354 # pylint --py3k W1619
355 mean
= sum(values
) / len(values
)
356 self
.suite
["subtests"].append({"name": test_name
, "value": mean
})
357 if score_name
== "score":
359 self
.suite
["value"] = bench_score
362 self
._provision
_benchmark
_script
()
363 return super(Octane
, self
).run()
368 "six-speed": SixSpeed
,
369 "sunspider": SunSpider
,
370 "web-tooling-benchmark": WebToolingBenchmark
,
375 def run(benchmark
, binary
=None, extra_args
=None, perfherder
=None):
378 binary
= os
.path
.join(build
.bindir
, "js" + build
.substs
["BIN_SUFFIX"])
379 except BuildEnvironmentNotFoundException
:
382 if not binary
or not os
.path
.isfile(binary
):
383 print(JSSHELL_NOT_FOUND
)
386 bench
= all_benchmarks
.get(benchmark
)(
387 binary
, args
=extra_args
, shell_name
=perfherder
392 print("PERFHERDER_DATA: {}".format(json
.dumps(bench
.perfherder_data
)))
397 parser
= ArgumentParser()
400 choices
=list(all_benchmarks
),
401 help="The name of the benchmark to run.",
404 "-b", "--binary", default
=None, help="Path to the JS shell binary to use."
411 help="Extra arguments to pass to the JS shell.",
416 help="Log PERFHERDER_DATA to stdout using the given suite name.",
421 def cli(args
=sys
.argv
[1:]):
422 parser
= get_parser()
423 args
= parser
.parser_args(args
)
424 return run(**vars(args
))
427 if __name__
== "__main__":