1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
11 from abc
import ABCMeta
, abstractmethod
, abstractproperty
12 from argparse
import ArgumentParser
13 from collections
import defaultdict
15 from mozbuild
.base
import MozbuildObject
, BuildEnvironmentNotFoundException
16 from mozprocess
import run_and_wait
18 here
= os
.path
.abspath(os
.path
.dirname(__file__
))
19 build
= MozbuildObject
.from_environment(cwd
=here
)
21 JSSHELL_NOT_FOUND
= """
22 Could not detect a JS shell. Either make sure you have a non-artifact build
23 with `ac_add_options --enable-js-shell` or specify it with `--binary`.
27 @six.add_metaclass(ABCMeta
)
28 class Benchmark(object):
29 lower_is_better
= True
32 def __init__(self
, shell
, args
=None, shell_name
=None):
35 self
.shell_name
= shell_name
39 """Returns the unit of measurement of the benchmark."""
43 """Returns the string name of the benchmark."""
47 """Return the path to the benchmark relative to topsrcdir."""
50 def process_line(self
, proc
, line
):
51 """Process a line of stdout from the benchmark."""
54 def collect_results(self
):
55 """Build the result after the process has finished."""
59 """Returns the command to run as a list."""
70 with
open(os
.path
.join(self
.path
, "VERSION"), "r") as fh
:
71 self
._version
= fh
.read().strip("\r\n\r\n \t")
75 """Resets state between runs."""
78 name
= "{}-{}".format(name
, self
.shell_name
)
80 self
.perfherder_data
= {
86 "lowerIsBetter": self
.lower_is_better
,
88 "shouldAlert": self
.should_alert
,
95 self
.suite
= self
.perfherder_data
["suites"][0]
97 def _provision_benchmark_script(self
):
98 if os
.path
.isdir(self
.path
):
101 # Some benchmarks may have been downloaded from a fetch task, make
102 # sure they get copied over.
103 fetches_dir
= os
.environ
.get("MOZ_FETCHES_DIR")
104 if fetches_dir
and os
.path
.isdir(fetches_dir
):
105 fetchdir
= os
.path
.join(fetches_dir
, self
.name
)
106 if os
.path
.isdir(fetchdir
):
107 shutil
.copytree(fetchdir
, self
.path
)
112 # Update the environment variables
113 env
= os
.environ
.copy()
116 "args": self
.command
,
119 "output_line_handler": self
.process_line
,
121 proc
= run_and_wait(**process_args
)
122 self
.collect_results()
123 return proc
.returncode
126 class RunOnceBenchmark(Benchmark
):
127 def collect_results(self
):
129 # NOTE: for this benchmark we run the test once, so we have a single value array
130 for bench
, scores
in self
.scores
.items():
131 for score
, values
in scores
.items():
132 test_name
= "{}-{}".format(self
.name
, score
)
133 # pylint --py3k W1619
134 mean
= sum(values
) / len(values
)
135 self
.suite
["subtests"].append({"name": test_name
, "value": mean
})
136 bench_total
+= int(sum(values
))
137 self
.suite
["value"] = bench_total
140 class Ares6(Benchmark
):
142 path
= os
.path
.join("third_party", "webkit", "PerformanceTests", "ARES-6")
147 cmd
= super(Ares6
, self
).command
148 return cmd
+ ["cli.js"]
151 super(Ares6
, self
).reset()
153 self
.bench_name
= None
154 self
.last_summary
= None
155 # Scores are of the form:
156 # {<bench_name>: {<score_name>: [<values>]}}
157 self
.scores
= defaultdict(lambda: defaultdict(list))
159 def _try_find_score(self
, score_name
, line
):
160 m
= re
.search(score_name
+ r
":\s*(\d+\.?\d*?) (\+-)?.+", line
)
165 self
.scores
[self
.bench_name
][score_name
].append(float(score
))
168 def process_line(self
, proc
, line
):
169 line
= line
.strip("\n")
171 m
= re
.search(r
"Running... (.+) \(.+\)", line
)
173 self
.bench_name
= m
.group(1)
176 if self
._try
_find
_score
("firstIteration", line
):
179 if self
._try
_find
_score
("averageWorstCase", line
):
182 if self
._try
_find
_score
("steadyState", line
):
185 m
= re
.search(r
"summary:\s*(\d+\.?\d*?) (\+-)?.+", line
)
187 self
.last_summary
= float(m
.group(1))
189 def collect_results(self
):
190 for bench
, scores
in self
.scores
.items():
191 for score
, values
in scores
.items():
192 # pylint --py3k W1619
193 mean
= sum(values
) / len(values
)
194 test_name
= "{}-{}".format(bench
, score
)
195 self
.suite
["subtests"].append({"name": test_name
, "value": mean
})
197 if self
.last_summary
:
198 self
.suite
["value"] = self
.last_summary
201 class SixSpeed(RunOnceBenchmark
):
203 path
= os
.path
.join("third_party", "webkit", "PerformanceTests", "six-speed")
208 cmd
= super(SixSpeed
, self
).command
209 return cmd
+ ["test.js"]
212 super(SixSpeed
, self
).reset()
214 # Scores are of the form:
215 # {<bench_name>: {<score_name>: [<values>]}}
216 self
.scores
= defaultdict(lambda: defaultdict(list))
218 def process_line(self
, proc
, output
):
219 output
= output
.strip("\n")
221 m
= re
.search(r
"(.+): (\d+)", output
)
226 if subtest
not in self
.scores
[self
.name
]:
227 self
.scores
[self
.name
][subtest
] = []
228 self
.scores
[self
.name
][subtest
].append(int(score
))
231 class SunSpider(RunOnceBenchmark
):
234 "third_party", "webkit", "PerformanceTests", "SunSpider", "sunspider-0.9.1"
240 cmd
= super(SunSpider
, self
).command
241 return cmd
+ ["sunspider-standalone-driver.js"]
244 super(SunSpider
, self
).reset()
246 # Scores are of the form:
247 # {<bench_name>: {<score_name>: [<values>]}}
248 self
.scores
= defaultdict(lambda: defaultdict(list))
250 def process_line(self
, proc
, output
):
251 output
= output
.strip("\n")
253 m
= re
.search(r
"(.+): (\d+)", output
)
258 if subtest
not in self
.scores
[self
.name
]:
259 self
.scores
[self
.name
][subtest
] = []
260 self
.scores
[self
.name
][subtest
].append(int(score
))
263 class WebToolingBenchmark(Benchmark
):
264 name
= "web-tooling-benchmark"
266 "third_party", "webkit", "PerformanceTests", "web-tooling-benchmark"
270 lower_is_better
= False
271 subtests_lower_is_better
= False
275 cmd
= super(WebToolingBenchmark
, self
).command
276 return cmd
+ [self
.main_js
]
279 super(WebToolingBenchmark
, self
).reset()
281 # Scores are of the form:
282 # {<bench_name>: {<score_name>: [<values>]}}
283 self
.scores
= defaultdict(lambda: defaultdict(list))
285 def process_line(self
, proc
, output
):
286 output
= output
.strip("\n")
288 m
= re
.search(" +([a-zA-Z].+): +([.0-9]+) +runs/sec", output
)
293 if subtest
not in self
.scores
[self
.name
]:
294 self
.scores
[self
.name
][subtest
] = []
295 self
.scores
[self
.name
][subtest
].append(float(score
))
297 def collect_results(self
):
298 # NOTE: for this benchmark we run the test once, so we have a single value array
300 for bench
, scores
in self
.scores
.items():
301 for score_name
, values
in scores
.items():
302 test_name
= "{}-{}".format(self
.name
, score_name
)
303 # pylint --py3k W1619
304 mean
= sum(values
) / len(values
)
305 self
.suite
["subtests"].append(
307 "lowerIsBetter": self
.subtests_lower_is_better
,
312 if score_name
== "mean":
314 self
.suite
["value"] = bench_mean
317 self
._provision
_benchmark
_script
()
318 return super(WebToolingBenchmark
, self
).run()
321 class Octane(RunOnceBenchmark
):
323 path
= os
.path
.join("third_party", "webkit", "PerformanceTests", "octane")
325 lower_is_better
= False
329 cmd
= super(Octane
, self
).command
330 return cmd
+ ["run.js"]
333 super(Octane
, self
).reset()
335 # Scores are of the form:
336 # {<bench_name>: {<score_name>: [<values>]}}
337 self
.scores
= defaultdict(lambda: defaultdict(list))
339 def process_line(self
, proc
, output
):
340 output
= output
.strip("\n")
342 m
= re
.search(r
"(.+): (\d+)", output
)
347 if subtest
.startswith("Score"):
349 if subtest
not in self
.scores
[self
.name
]:
350 self
.scores
[self
.name
][subtest
] = []
351 self
.scores
[self
.name
][subtest
].append(int(score
))
353 def collect_results(self
):
355 # NOTE: for this benchmark we run the test once, so we have a single value array
356 for bench
, scores
in self
.scores
.items():
357 for score_name
, values
in scores
.items():
358 test_name
= "{}-{}".format(self
.name
, score_name
)
359 # pylint --py3k W1619
360 mean
= sum(values
) / len(values
)
361 self
.suite
["subtests"].append({"name": test_name
, "value": mean
})
362 if score_name
== "score":
364 self
.suite
["value"] = bench_score
367 self
._provision
_benchmark
_script
()
368 return super(Octane
, self
).run()
373 "six-speed": SixSpeed
,
374 "sunspider": SunSpider
,
375 "web-tooling-benchmark": WebToolingBenchmark
,
380 def run(benchmark
, binary
=None, extra_args
=None, perfherder
=None):
383 binary
= os
.path
.join(build
.bindir
, "js" + build
.substs
["BIN_SUFFIX"])
384 except BuildEnvironmentNotFoundException
:
387 if not binary
or not os
.path
.isfile(binary
):
388 print(JSSHELL_NOT_FOUND
)
391 bench
= all_benchmarks
.get(benchmark
)(
392 binary
, args
=extra_args
, shell_name
=perfherder
397 print("PERFHERDER_DATA: {}".format(json
.dumps(bench
.perfherder_data
)))
402 parser
= ArgumentParser()
405 choices
=list(all_benchmarks
),
406 help="The name of the benchmark to run.",
409 "-b", "--binary", default
=None, help="Path to the JS shell binary to use."
416 help="Extra arguments to pass to the JS shell.",
421 help="Log PERFHERDER_DATA to stdout using the given suite name.",
426 def cli(args
=sys
.argv
[1:]):
427 parser
= get_parser()
428 args
= parser
.parser_args(args
)
429 return run(**vars(args
))
432 if __name__
== "__main__":