1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
5 from __future__
import absolute_import
, print_function
12 from abc
import ABCMeta
, abstractmethod
, abstractproperty
13 from argparse
import ArgumentParser
14 from collections
import defaultdict
16 from mozbuild
.base
import MozbuildObject
, BuildEnvironmentNotFoundException
17 from mozprocess
import ProcessHandler
19 here
= os
.path
.abspath(os
.path
.dirname(__file__
))
20 build
= MozbuildObject
.from_environment(cwd
=here
)
22 JSSHELL_NOT_FOUND
= """
23 Could not detect a JS shell. Either make sure you have a non-artifact build
24 with `ac_add_options --enable-js-shell` or specify it with `--binary`.
28 class Benchmark(object):
29 __metaclass__
= ABCMeta
30 lower_is_better
= True
33 def __init__(self
, shell
, args
=None, shell_name
=None):
36 self
.shell_name
= shell_name
40 """Returns the unit of measurement of the benchmark."""
44 """Returns the string name of the benchmark."""
48 """Return the path to the benchmark relative to topsrcdir."""
51 def process_line(self
, line
):
52 """Process a line of stdout from the benchmark."""
55 def collect_results(self
):
56 """Build the result after the process has finished."""
60 """Returns the command to run as a list."""
71 with
open(os
.path
.join(self
.path
, 'VERSION'), 'r') as fh
:
72 self
._version
= fh
.read().strip("\r\n\r\n \t")
76 """Resets state between runs."""
79 name
= '{}-{}'.format(name
, self
.shell_name
)
81 self
.perfherder_data
= {
87 'lowerIsBetter': self
.lower_is_better
,
89 'shouldAlert': self
.should_alert
,
96 self
.suite
= self
.perfherder_data
['suites'][0]
98 def _provision_benchmark_script(self
):
99 if os
.path
.isdir(self
.path
):
102 # Some benchmarks may have been downloaded from a fetch task, make
103 # sure they get copied over.
104 fetches_dir
= os
.environ
.get('MOZ_FETCHES_DIR')
105 if fetches_dir
and os
.path
.isdir(fetches_dir
):
106 fetchdir
= os
.path
.join(fetches_dir
, self
.name
)
107 if os
.path
.isdir(fetchdir
):
108 shutil
.copytree(fetchdir
, self
.path
)
113 # Update the environment variables
114 env
= os
.environ
.copy()
116 # disable "GC poisoning" Bug# 1499043
117 env
['JSGC_DISABLE_POISONING'] = '1'
122 'onFinish': self
.collect_results
,
123 'processOutputLine': self
.process_line
,
124 'stream': sys
.stdout
,
127 proc
= ProcessHandler(**process_args
)
132 class RunOnceBenchmark(Benchmark
):
133 def collect_results(self
):
135 # NOTE: for this benchmark we run the test once, so we have a single value array
136 for bench
, scores
in self
.scores
.items():
137 for score
, values
in scores
.items():
138 test_name
= "{}-{}".format(self
.name
, score
)
139 mean
= sum(values
) / len(values
)
140 self
.suite
['subtests'].append({'name': test_name
, 'value': mean
})
141 bench_total
+= int(sum(values
))
142 self
.suite
['value'] = bench_total
145 class Ares6(Benchmark
):
147 path
= os
.path
.join('third_party', 'webkit', 'PerformanceTests', 'ARES-6')
152 cmd
= super(Ares6
, self
).command
153 return cmd
+ ['cli.js']
156 super(Ares6
, self
).reset()
158 self
.bench_name
= None
159 self
.last_summary
= None
160 # Scores are of the form:
161 # {<bench_name>: {<score_name>: [<values>]}}
162 self
.scores
= defaultdict(lambda: defaultdict(list))
164 def _try_find_score(self
, score_name
, line
):
165 m
= re
.search(score_name
+ ':\s*(\d+\.?\d*?) (\+-)?.+', line
)
170 self
.scores
[self
.bench_name
][score_name
].append(float(score
))
173 def process_line(self
, line
):
174 m
= re
.search("Running... (.+) \(.+\)", line
)
176 self
.bench_name
= m
.group(1)
179 if self
._try
_find
_score
('firstIteration', line
):
182 if self
._try
_find
_score
('averageWorstCase', line
):
185 if self
._try
_find
_score
('steadyState', line
):
188 m
= re
.search('summary:\s*(\d+\.?\d*?) (\+-)?.+', line
)
190 self
.last_summary
= float(m
.group(1))
192 def collect_results(self
):
193 for bench
, scores
in self
.scores
.items():
194 for score
, values
in scores
.items():
195 mean
= sum(values
) / len(values
)
196 test_name
= "{}-{}".format(bench
, score
)
197 self
.suite
['subtests'].append({'name': test_name
, 'value': mean
})
199 if self
.last_summary
:
200 self
.suite
['value'] = self
.last_summary
203 class SixSpeed(RunOnceBenchmark
):
205 path
= os
.path
.join('third_party', 'webkit', 'PerformanceTests', 'six-speed')
210 cmd
= super(SixSpeed
, self
).command
211 return cmd
+ ['test.js']
214 super(SixSpeed
, self
).reset()
216 # Scores are of the form:
217 # {<bench_name>: {<score_name>: [<values>]}}
218 self
.scores
= defaultdict(lambda: defaultdict(list))
220 def process_line(self
, output
):
221 m
= re
.search("(.+): (\d+)", output
)
226 if subtest
not in self
.scores
[self
.name
]:
227 self
.scores
[self
.name
][subtest
] = []
228 self
.scores
[self
.name
][subtest
].append(int(score
))
231 class SunSpider(RunOnceBenchmark
):
233 path
= os
.path
.join('third_party', 'webkit', 'PerformanceTests', 'SunSpider', 'sunspider-0.9.1')
238 cmd
= super(SunSpider
, self
).command
239 return cmd
+ ['sunspider-standalone-driver.js']
242 super(SunSpider
, self
).reset()
244 # Scores are of the form:
245 # {<bench_name>: {<score_name>: [<values>]}}
246 self
.scores
= defaultdict(lambda: defaultdict(list))
248 def process_line(self
, output
):
249 m
= re
.search("(.+): (\d+)", output
)
254 if subtest
not in self
.scores
[self
.name
]:
255 self
.scores
[self
.name
][subtest
] = []
256 self
.scores
[self
.name
][subtest
].append(int(score
))
259 class WebToolingBenchmark(Benchmark
):
260 name
= 'web-tooling-benchmark'
261 path
= os
.path
.join('third_party', 'webkit', 'PerformanceTests', 'web-tooling-benchmark')
264 lower_is_better
= False
265 subtests_lower_is_better
= False
269 cmd
= super(WebToolingBenchmark
, self
).command
270 return cmd
+ [self
.main_js
]
273 super(WebToolingBenchmark
, self
).reset()
275 # Scores are of the form:
276 # {<bench_name>: {<score_name>: [<values>]}}
277 self
.scores
= defaultdict(lambda: defaultdict(list))
279 def process_line(self
, output
):
280 m
= re
.search(" +([a-zA-Z].+): +([.0-9]+) +runs/sec", output
)
285 if subtest
not in self
.scores
[self
.name
]:
286 self
.scores
[self
.name
][subtest
] = []
287 self
.scores
[self
.name
][subtest
].append(float(score
))
289 def collect_results(self
):
290 # NOTE: for this benchmark we run the test once, so we have a single value array
291 for bench
, scores
in self
.scores
.items():
292 for score_name
, values
in scores
.items():
293 test_name
= "{}-{}".format(self
.name
, score_name
)
294 mean
= sum(values
) / len(values
)
295 self
.suite
['subtests'].append({
296 'lowerIsBetter': self
.subtests_lower_is_better
,
300 if score_name
== 'mean':
302 self
.suite
['value'] = bench_mean
305 self
._provision
_benchmark
_script
()
306 return super(WebToolingBenchmark
, self
).run()
309 class Octane(RunOnceBenchmark
):
311 path
= os
.path
.join('third_party', 'webkit', 'PerformanceTests', 'octane')
313 lower_is_better
= False
317 cmd
= super(Octane
, self
).command
318 return cmd
+ ['run.js']
321 super(Octane
, self
).reset()
323 # Scores are of the form:
324 # {<bench_name>: {<score_name>: [<values>]}}
325 self
.scores
= defaultdict(lambda: defaultdict(list))
327 def process_line(self
, output
):
328 m
= re
.search("(.+): (\d+)", output
)
333 if subtest
.startswith('Score'):
335 if subtest
not in self
.scores
[self
.name
]:
336 self
.scores
[self
.name
][subtest
] = []
337 self
.scores
[self
.name
][subtest
].append(int(score
))
339 def collect_results(self
):
340 # NOTE: for this benchmark we run the test once, so we have a single value array
341 for bench
, scores
in self
.scores
.items():
342 for score_name
, values
in scores
.items():
343 test_name
= "{}-{}".format(self
.name
, score_name
)
344 mean
= sum(values
) / len(values
)
345 self
.suite
['subtests'].append({'name': test_name
, 'value': mean
})
346 if score_name
== 'score':
348 self
.suite
['value'] = bench_score
351 self
._provision
_benchmark
_script
()
352 return super(Octane
, self
).run()
357 'six-speed': SixSpeed
,
358 'sunspider': SunSpider
,
359 'web-tooling-benchmark': WebToolingBenchmark
,
364 def run(benchmark
, binary
=None, extra_args
=None, perfherder
=None):
367 binary
= os
.path
.join(build
.bindir
, 'js' + build
.substs
['BIN_SUFFIX'])
368 except BuildEnvironmentNotFoundException
:
371 if not binary
or not os
.path
.isfile(binary
):
372 print(JSSHELL_NOT_FOUND
)
375 bench
= all_benchmarks
.get(benchmark
)(binary
, args
=extra_args
, shell_name
=perfherder
)
379 print("PERFHERDER_DATA: {}".format(json
.dumps(bench
.perfherder_data
)))
384 parser
= ArgumentParser()
385 parser
.add_argument('benchmark', choices
=all_benchmarks
.keys(),
386 help="The name of the benchmark to run.")
387 parser
.add_argument('-b', '--binary', default
=None,
388 help="Path to the JS shell binary to use.")
389 parser
.add_argument('--arg', dest
='extra_args', action
='append', default
=None,
390 help="Extra arguments to pass to the JS shell.")
391 parser
.add_argument('--perfherder', default
=None,
392 help="Log PERFHERDER_DATA to stdout using the given suite name.")
396 def cli(args
=sys
.argv
[1:]):
397 parser
= get_parser()
398 args
= parser
.parser_args(args
)
399 return run(**vars(args
))
402 if __name__
== '__main__':