Bug 1482193 [wpt PR 12382] - [PE] Allow blending for svg root etc., a=testonly
[gecko.git] / testing / mach_commands.py
blobe95cd9cbfd2fd2e669a3a0c1b1dbaa9bf09a317e
1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
5 from __future__ import absolute_import, print_function, unicode_literals
7 import argparse
8 import json
9 import logging
10 import os
11 import sys
12 import tempfile
13 import subprocess
14 import shutil
16 from mach.decorators import (
17 CommandArgument,
18 CommandProvider,
19 Command,
20 SettingsProvider,
21 SubCommand,
24 from mozbuild.base import (
25 BuildEnvironmentNotFoundException,
26 MachCommandBase,
27 MachCommandConditions as conditions,
29 from moztest.resolve import TEST_SUITES
30 from argparse import ArgumentParser
32 UNKNOWN_TEST = '''
33 I was unable to find tests from the given argument(s).
35 You should specify a test directory, filename, test suite name, or
36 abbreviation. If no arguments are given, there must be local file
37 changes and corresponding IMPACTED_TESTS annotations in moz.build
38 files relevant to those files.
40 It's possible my little brain doesn't know about the type of test you are
41 trying to execute. If you suspect this, please request support by filing
42 a bug at
43 https://bugzilla.mozilla.org/enter_bug.cgi?product=Testing&component=General.
44 '''.strip()
46 UNKNOWN_FLAVOR = '''
47 I know you are trying to run a %s%s test. Unfortunately, I can't run those
48 tests yet. Sorry!
49 '''.strip()
51 TEST_HELP = '''
52 Test or tests to run. Tests can be specified by filename, directory, suite
53 name or suite alias.
55 The following test suites and aliases are supported: %s
56 ''' % ', '.join(sorted(TEST_SUITES))
57 TEST_HELP = TEST_HELP.strip()
60 @SettingsProvider
61 class TestConfig(object):
63 @classmethod
64 def config_settings(cls):
65 from mozlog.commandline import log_formatters
66 from mozlog.structuredlog import log_levels
67 format_desc = "The default format to use when running tests with `mach test`."
68 format_choices = log_formatters.keys()
69 level_desc = "The default log level to use when running tests with `mach test`."
70 level_choices = [l.lower() for l in log_levels]
71 return [
72 ('test.format', 'string', format_desc, 'mach', {'choices': format_choices}),
73 ('test.level', 'string', level_desc, 'info', {'choices': level_choices}),
77 def get_test_parser():
78 from mozlog.commandline import add_logging_group
79 parser = argparse.ArgumentParser()
80 parser.add_argument('what', default=None, nargs='*', help=TEST_HELP)
81 parser.add_argument('extra_args', default=None, nargs=argparse.REMAINDER,
82 help="Extra arguments to pass to the underlying test command(s). "
83 "If an underlying command doesn't recognize the argument, it "
84 "will fail.")
85 add_logging_group(parser)
86 return parser
89 @CommandProvider
90 class Test(MachCommandBase):
91 @Command('test', category='testing',
92 description='Run tests (detects the kind of test and runs it).',
93 parser=get_test_parser)
94 def test(self, what, extra_args, **log_args):
95 """Run tests from names or paths.
97 mach test accepts arguments specifying which tests to run. Each argument
98 can be:
100 * The path to a test file
101 * A directory containing tests
102 * A test suite name
103 * An alias to a test suite name (codes used on TreeHerder)
105 If no input is provided, tests will be run based on files changed in
106 the local tree. Relevant tests, tags, or flavors are determined by
107 IMPACTED_TESTS annotations in moz.build files relevant to the
108 changed files.
110 When paths or directories are given, they are first resolved to test
111 files known to the build system.
113 If resolved tests belong to more than one test type/flavor/harness,
114 the harness for each relevant type/flavor will be invoked. e.g. if
115 you specify a directory with xpcshell and browser chrome mochitests,
116 both harnesses will be invoked.
118 from mozlog.commandline import setup_logging
119 from mozlog.handlers import StreamHandler
120 from moztest.resolve import get_suite_definition, TestResolver, TEST_SUITES
122 resolver = self._spawn(TestResolver)
123 run_suites, run_tests = resolver.resolve_metadata(what)
125 if not run_suites and not run_tests:
126 print(UNKNOWN_TEST)
127 return 1
129 # Create shared logger
130 format_args = {'level': self._mach_context.settings['test']['level']}
131 if not run_suites and len(run_tests) == 1:
132 format_args['verbose'] = True
133 format_args['compact'] = False
135 default_format = self._mach_context.settings['test']['format']
136 log = setup_logging('mach-test', log_args, {default_format: sys.stdout}, format_args)
137 for handler in log.handlers:
138 if isinstance(handler, StreamHandler):
139 handler.formatter.inner.summary_on_shutdown = True
141 status = None
142 for suite_name in run_suites:
143 suite = TEST_SUITES[suite_name]
144 kwargs = suite['kwargs']
145 kwargs['log'] = log
147 if 'mach_command' in suite:
148 res = self._mach_context.commands.dispatch(
149 suite['mach_command'], self._mach_context,
150 argv=extra_args, **kwargs)
151 if res:
152 status = res
154 buckets = {}
155 for test in run_tests:
156 key = (test['flavor'], test.get('subsuite', ''))
157 buckets.setdefault(key, []).append(test)
159 for (flavor, subsuite), tests in sorted(buckets.items()):
160 m = get_suite_definition(flavor, subsuite)
161 if 'mach_command' not in m:
162 substr = '-{}'.format(subsuite) if subsuite else ''
163 print(UNKNOWN_FLAVOR % (flavor, substr))
164 status = 1
165 continue
167 kwargs = dict(m['kwargs'])
168 kwargs['log'] = log
170 res = self._mach_context.commands.dispatch(
171 m['mach_command'], self._mach_context,
172 argv=extra_args, test_objects=tests, **kwargs)
173 if res:
174 status = res
176 log.shutdown()
177 return status
180 @CommandProvider
181 class MachCommands(MachCommandBase):
182 @Command('cppunittest', category='testing',
183 description='Run cpp unit tests (C++ tests).')
184 @CommandArgument('test_files', nargs='*', metavar='N',
185 help='Test to run. Can be specified as one or more files or '
186 'directories, or omitted. If omitted, the entire test suite is '
187 'executed.')
188 def run_cppunit_test(self, **params):
189 from mozlog import commandline
191 log = params.get('log')
192 if not log:
193 log = commandline.setup_logging("cppunittest",
195 {"tbpl": sys.stdout})
197 # See if we have crash symbols
198 symbols_path = os.path.join(self.distdir, 'crashreporter-symbols')
199 if not os.path.isdir(symbols_path):
200 symbols_path = None
202 # If no tests specified, run all tests in main manifest
203 tests = params['test_files']
204 if len(tests) == 0:
205 tests = [os.path.join(self.distdir, 'cppunittests')]
206 manifest_path = os.path.join(
207 self.topsrcdir, 'testing', 'cppunittest.ini')
208 else:
209 manifest_path = None
211 if conditions.is_android(self):
212 from mozrunner.devices.android_device import verify_android_device
213 verify_android_device(self, install=False)
214 return self.run_android_test(tests, symbols_path, manifest_path, log)
216 return self.run_desktop_test(tests, symbols_path, manifest_path, log)
218 def run_desktop_test(self, tests, symbols_path, manifest_path, log):
219 import runcppunittests as cppunittests
220 from mozlog import commandline
222 parser = cppunittests.CPPUnittestOptions()
223 commandline.add_logging_group(parser)
224 options, args = parser.parse_args()
226 options.symbols_path = symbols_path
227 options.manifest_path = manifest_path
228 options.xre_path = self.bindir
230 try:
231 result = cppunittests.run_test_harness(options, tests)
232 except Exception as e:
233 log.error("Caught exception running cpp unit tests: %s" % str(e))
234 result = False
235 raise
237 return 0 if result else 1
239 def run_android_test(self, tests, symbols_path, manifest_path, log):
240 import remotecppunittests as remotecppunittests
241 from mozlog import commandline
243 parser = remotecppunittests.RemoteCPPUnittestOptions()
244 commandline.add_logging_group(parser)
245 options, args = parser.parse_args()
247 if not options.adb_path:
248 from mozrunner.devices.android_device import get_adb_path
249 options.adb_path = get_adb_path(self)
250 options.symbols_path = symbols_path
251 options.manifest_path = manifest_path
252 options.xre_path = self.bindir
253 options.local_lib = self.bindir.replace('bin', 'fennec')
254 for file in os.listdir(os.path.join(self.topobjdir, "dist")):
255 if file.endswith(".apk") and file.startswith("fennec"):
256 options.local_apk = os.path.join(self.topobjdir, "dist", file)
257 log.info("using APK: " + options.local_apk)
258 break
260 try:
261 result = remotecppunittests.run_test_harness(options, tests)
262 except Exception as e:
263 log.error("Caught exception running cpp unit tests: %s" % str(e))
264 result = False
265 raise
267 return 0 if result else 1
270 def executable_name(name):
271 return name + '.exe' if sys.platform.startswith('win') else name
274 @CommandProvider
275 class CheckSpiderMonkeyCommand(MachCommandBase):
276 @Command('jstests', category='testing',
277 description='Run SpiderMonkey JS tests in the JavaScript shell.')
278 @CommandArgument('--shell', help='The shell to be used')
279 @CommandArgument('params', nargs=argparse.REMAINDER,
280 help="Extra arguments to pass down to the test harness.")
281 def run_jstests(self, shell, params):
282 import subprocess
284 self.virtualenv_manager.ensure()
285 python = self.virtualenv_manager.python_path
287 js = shell or os.path.join(self.bindir, executable_name('js'))
288 jstest_cmd = [
289 python,
290 os.path.join(self.topsrcdir, 'js', 'src', 'tests', 'jstests.py'),
292 '--jitflags=all',
293 ] + params
294 return subprocess.call(jstest_cmd)
296 @Command('check-spidermonkey', category='testing',
297 description='Run SpiderMonkey tests (JavaScript engine).')
298 @CommandArgument('--valgrind', action='store_true',
299 help='Run jit-test suite with valgrind flag')
300 def run_checkspidermonkey(self, **params):
301 import subprocess
303 self.virtualenv_manager.ensure()
304 python = self.virtualenv_manager.python_path
306 js = os.path.join(self.bindir, executable_name('js'))
308 print('Running jit-tests')
309 jittest_cmd = [
310 python,
311 os.path.join(self.topsrcdir, 'js', 'src',
312 'jit-test', 'jit_test.py'),
314 '--no-slow',
315 '--jitflags=all',
317 if params['valgrind']:
318 jittest_cmd.append('--valgrind')
320 jittest_result = subprocess.call(jittest_cmd)
322 print('running jstests')
323 jstest_result = self.run_jstests(js, [])
325 print('running jsapi-tests')
326 jsapi_tests_cmd = [os.path.join(
327 self.bindir, executable_name('jsapi-tests'))]
328 jsapi_tests_result = subprocess.call(jsapi_tests_cmd)
330 print('running check-js-msg-encoding')
331 check_js_msg_cmd = [python, os.path.join(
332 self.topsrcdir, 'config', 'check_js_msg_encoding.py')]
333 check_js_msg_result = subprocess.call(
334 check_js_msg_cmd, cwd=self.topsrcdir)
336 all_passed = jittest_result and jstest_result and jsapi_tests_result and \
337 check_js_msg_result
339 return all_passed
342 def has_js_binary(binary):
343 def has_binary(cls):
344 try:
345 name = binary + cls.substs['BIN_SUFFIX']
346 except BuildEnvironmentNotFoundException:
347 return False
349 path = os.path.join(cls.topobjdir, 'dist', 'bin', name)
351 has_binary.__doc__ = """
352 `{}` not found in <objdir>/dist/bin. Make sure you aren't using an artifact build
353 and try rebuilding with `ac_add_options --enable-js-shell`.
354 """.format(name).lstrip()
356 return os.path.isfile(path)
357 return has_binary
360 @CommandProvider
361 class JsapiTestsCommand(MachCommandBase):
362 @Command('jsapi-tests', category='testing',
363 conditions=[has_js_binary('jsapi-tests')],
364 description='Run jsapi tests (JavaScript engine).')
365 @CommandArgument('test_name', nargs='?', metavar='N',
366 help='Test to run. Can be a prefix or omitted. If omitted, the entire '
367 'test suite is executed.')
368 def run_jsapitests(self, **params):
369 import subprocess
371 print('running jsapi-tests')
372 jsapi_tests_cmd = [os.path.join(
373 self.bindir, executable_name('jsapi-tests'))]
374 if params['test_name']:
375 jsapi_tests_cmd.append(params['test_name'])
377 jsapi_tests_result = subprocess.call(jsapi_tests_cmd)
379 return jsapi_tests_result
382 def get_jsshell_parser():
383 from jsshell.benchmark import get_parser
384 return get_parser()
387 @CommandProvider
388 class JsShellTests(MachCommandBase):
389 @Command('jsshell-bench', category='testing',
390 parser=get_jsshell_parser,
391 description="Run benchmarks in the SpiderMonkey JS shell.")
392 def run_jsshelltests(self, **kwargs):
393 self._activate_virtualenv()
394 from jsshell import benchmark
395 return benchmark.run(**kwargs)
398 @CommandProvider
399 class CramTest(MachCommandBase):
400 @Command('cramtest', category='testing',
401 description="Mercurial style .t tests for command line applications.")
402 @CommandArgument('test_paths', nargs='*', metavar='N',
403 help="Test paths to run. Each path can be a test file or directory. "
404 "If omitted, the entire suite will be run.")
405 @CommandArgument('cram_args', nargs=argparse.REMAINDER,
406 help="Extra arguments to pass down to the cram binary. See "
407 "'./mach python -m cram -- -h' for a list of available options.")
408 def cramtest(self, cram_args=None, test_paths=None, test_objects=None):
409 self._activate_virtualenv()
410 import mozinfo
411 from manifestparser import TestManifest
413 if test_objects is None:
414 from moztest.resolve import TestResolver
415 resolver = self._spawn(TestResolver)
416 if test_paths:
417 # If we were given test paths, try to find tests matching them.
418 test_objects = resolver.resolve_tests(paths=test_paths, flavor='cram')
419 else:
420 # Otherwise just run everything in CRAMTEST_MANIFESTS
421 test_objects = resolver.resolve_tests(flavor='cram')
423 if not test_objects:
424 message = 'No tests were collected, check spelling of the test paths.'
425 self.log(logging.WARN, 'cramtest', {}, message)
426 return 1
428 mp = TestManifest()
429 mp.tests.extend(test_objects)
430 tests = mp.active_tests(disabled=False, **mozinfo.info)
432 python = self.virtualenv_manager.python_path
433 cmd = [python, '-m', 'cram'] + cram_args + [t['relpath'] for t in tests]
434 return subprocess.call(cmd, cwd=self.topsrcdir)
437 def get_parser(argv=None):
438 parser = ArgumentParser()
439 parser.add_argument(dest="suite_name",
440 nargs=1,
441 choices=['mochitest'],
442 type=str,
443 help="The test for which chunk should be found. It corresponds "
444 "to the mach test invoked (only 'mochitest' currently).")
446 parser.add_argument(dest="test_path",
447 nargs=1,
448 type=str,
449 help="The test (any mochitest) for which chunk should be found.")
451 parser.add_argument('--total-chunks',
452 type=int,
453 dest='total_chunks',
454 required=True,
455 help='Total number of chunks to split tests into.',
456 default=None)
458 parser.add_argument('--chunk-by-runtime',
459 action='store_true',
460 dest='chunk_by_runtime',
461 help='Group tests such that each chunk has roughly the same runtime.',
462 default=False)
464 parser.add_argument('--chunk-by-dir',
465 type=int,
466 dest='chunk_by_dir',
467 help='Group tests together in the same chunk that are in the same top '
468 'chunkByDir directories.',
469 default=None)
471 parser.add_argument('--disable-e10s',
472 action='store_false',
473 dest='e10s',
474 help='Find test on chunk with electrolysis preferences disabled.',
475 default=True)
477 parser.add_argument('-p', '--platform',
478 choices=['linux', 'linux64', 'mac',
479 'macosx64', 'win32', 'win64'],
480 dest='platform',
481 help="Platform for the chunk to find the test.",
482 default=None)
484 parser.add_argument('--debug',
485 action='store_true',
486 dest='debug',
487 help="Find the test on chunk in a debug build.",
488 default=False)
490 return parser
493 def download_mozinfo(platform=None, debug_build=False):
494 temp_dir = tempfile.mkdtemp()
495 temp_path = os.path.join(temp_dir, "mozinfo.json")
496 args = [
497 'mozdownload',
498 '-t', 'tinderbox',
499 '--ext', 'mozinfo.json',
500 '-d', temp_path,
502 if platform:
503 if platform == 'macosx64':
504 platform = 'mac64'
505 args.extend(['-p', platform])
506 if debug_build:
507 args.extend(['--debug-build'])
509 subprocess.call(args)
510 return temp_dir, temp_path
513 @CommandProvider
514 class ChunkFinder(MachCommandBase):
515 @Command('find-test-chunk', category='testing',
516 description='Find which chunk a test belongs to (works for mochitest).',
517 parser=get_parser)
518 def chunk_finder(self, **kwargs):
519 total_chunks = kwargs['total_chunks']
520 test_path = kwargs['test_path'][0]
521 suite_name = kwargs['suite_name'][0]
522 _, dump_tests = tempfile.mkstemp()
524 from moztest.resolve import TestResolver
525 resolver = self._spawn(TestResolver)
526 relpath = self._wrap_path_argument(test_path).relpath()
527 tests = list(resolver.resolve_tests(paths=[relpath]))
528 if len(tests) != 1:
529 print('No test found for test_path: %s' % test_path)
530 sys.exit(1)
532 flavor = tests[0]['flavor']
533 subsuite = tests[0]['subsuite']
534 args = {
535 'totalChunks': total_chunks,
536 'dump_tests': dump_tests,
537 'chunkByDir': kwargs['chunk_by_dir'],
538 'chunkByRuntime': kwargs['chunk_by_runtime'],
539 'e10s': kwargs['e10s'],
540 'subsuite': subsuite,
543 temp_dir = None
544 if kwargs['platform'] or kwargs['debug']:
545 self._activate_virtualenv()
546 self.virtualenv_manager.install_pip_package('mozdownload==1.17')
547 temp_dir, temp_path = download_mozinfo(
548 kwargs['platform'], kwargs['debug'])
549 args['extra_mozinfo_json'] = temp_path
551 found = False
552 for this_chunk in range(1, total_chunks + 1):
553 args['thisChunk'] = this_chunk
554 try:
555 self._mach_context.commands.dispatch(
556 suite_name, self._mach_context, flavor=flavor, resolve_tests=False, **args)
557 except SystemExit:
558 pass
559 except KeyboardInterrupt:
560 break
562 fp = open(os.path.expanduser(args['dump_tests']), 'r')
563 tests = json.loads(fp.read())['active_tests']
564 for test in tests:
565 if test_path == test['path']:
566 if 'disabled' in test:
567 print('The test %s for flavor %s is disabled on the given platform' % (
568 test_path, flavor))
569 else:
570 print('The test %s for flavor %s is present in chunk number: %d' % (
571 test_path, flavor, this_chunk))
572 found = True
573 break
575 if found:
576 break
578 if not found:
579 raise Exception("Test %s not found." % test_path)
580 # Clean up the file
581 os.remove(dump_tests)
582 if temp_dir:
583 shutil.rmtree(temp_dir)
586 @CommandProvider
587 class TestInfoCommand(MachCommandBase):
588 from datetime import date, timedelta
590 @Command('test-info', category='testing',
591 description='Display historical test result summary.')
592 @CommandArgument('test_names', nargs=argparse.REMAINDER,
593 help='Test(s) of interest.')
594 @CommandArgument('--branches',
595 default='mozilla-central,mozilla-inbound,autoland',
596 help='Report for named branches '
597 '(default: mozilla-central,mozilla-inbound,autoland)')
598 @CommandArgument('--start',
599 default=(date.today() - timedelta(7)
600 ).strftime("%Y-%m-%d"),
601 help='Start date (YYYY-MM-DD)')
602 @CommandArgument('--end',
603 default=date.today().strftime("%Y-%m-%d"),
604 help='End date (YYYY-MM-DD)')
605 @CommandArgument('--show-info', action='store_true',
606 help='Retrieve and display general test information.')
607 @CommandArgument('--show-results', action='store_true',
608 help='Retrieve and display ActiveData test result summary.')
609 @CommandArgument('--show-durations', action='store_true',
610 help='Retrieve and display ActiveData test duration summary.')
611 @CommandArgument('--show-tasks', action='store_true',
612 help='Retrieve and display ActiveData test task names.')
613 @CommandArgument('--show-bugs', action='store_true',
614 help='Retrieve and display related Bugzilla bugs.')
615 @CommandArgument('--verbose', action='store_true',
616 help='Enable debug logging.')
617 def test_info(self, **params):
619 import which
620 from mozbuild.base import MozbuildObject
622 self.branches = params['branches']
623 self.start = params['start']
624 self.end = params['end']
625 self.show_info = params['show_info']
626 self.show_results = params['show_results']
627 self.show_durations = params['show_durations']
628 self.show_tasks = params['show_tasks']
629 self.show_bugs = params['show_bugs']
630 self.verbose = params['verbose']
632 if (not self.show_info and
633 not self.show_results and
634 not self.show_durations and
635 not self.show_tasks and
636 not self.show_bugs):
637 # by default, show everything
638 self.show_info = True
639 self.show_results = True
640 self.show_durations = True
641 self.show_tasks = True
642 self.show_bugs = True
644 here = os.path.abspath(os.path.dirname(__file__))
645 build_obj = MozbuildObject.from_environment(cwd=here)
647 self._hg = None
648 if conditions.is_hg(build_obj):
649 if self._is_windows():
650 self._hg = which.which('hg.exe')
651 else:
652 self._hg = which.which('hg')
654 self._git = None
655 if conditions.is_git(build_obj):
656 if self._is_windows():
657 self._git = which.which('git.exe')
658 else:
659 self._git = which.which('git')
661 for test_name in params['test_names']:
662 print("===== %s =====" % test_name)
663 self.test_name = test_name
664 if len(self.test_name) < 6:
665 print("'%s' is too short for a test name!" % self.test_name)
666 continue
667 self.set_test_name()
668 if self.show_results:
669 self.report_test_results()
670 if self.show_durations:
671 self.report_test_durations()
672 if self.show_tasks:
673 self.report_test_tasks()
674 if self.show_bugs:
675 self.report_bugs()
677 def find_in_hg_or_git(self, test_name):
678 if self._hg:
679 cmd = [self._hg, 'files', '-I', test_name]
680 elif self._git:
681 cmd = [self._git, 'ls-files', test_name]
682 else:
683 return None
684 try:
685 out = subprocess.check_output(cmd).splitlines()
686 except subprocess.CalledProcessError:
687 out = None
688 return out
690 def set_test_name(self):
691 # Generating a unified report for a specific test is complicated
692 # by differences in the test name used in various data sources.
693 # Consider:
694 # - It is often convenient to request a report based only on
695 # a short file name, rather than the full path;
696 # - Bugs may be filed in bugzilla against a simple, short test
697 # name or the full path to the test;
698 # - In ActiveData, the full path is usually used, but sometimes
699 # also includes additional path components outside of the
700 # mercurial repo (common for reftests).
701 # This function attempts to find appropriate names for different
702 # queries based on the specified test name.
704 import posixpath
705 import re
707 # full_test_name is full path to file in hg (or git)
708 self.full_test_name = None
709 out = self.find_in_hg_or_git(self.test_name)
710 if out and len(out) == 1:
711 self.full_test_name = out[0]
712 elif out and len(out) > 1:
713 print("Ambiguous test name specified. Found:")
714 for line in out:
715 print(line)
716 else:
717 out = self.find_in_hg_or_git('**/%s*' % self.test_name)
718 if out and len(out) == 1:
719 self.full_test_name = out[0]
720 elif out and len(out) > 1:
721 print("Ambiguous test name. Found:")
722 for line in out:
723 print(line)
724 if self.full_test_name:
725 self.full_test_name.replace(os.sep, posixpath.sep)
726 print("Found %s in source control." % self.full_test_name)
727 else:
728 print("Unable to validate test name '%s'!" % self.test_name)
729 self.full_test_name = self.test_name
731 # search for full_test_name in test manifests
732 from moztest.resolve import TestResolver
733 resolver = self._spawn(TestResolver)
734 relpath = self._wrap_path_argument(self.full_test_name).relpath()
735 tests = list(resolver.resolve_tests(paths=[relpath]))
736 if len(tests) == 1:
737 relpath = self._wrap_path_argument(tests[0]['manifest']).relpath()
738 print("%s found in manifest %s" % (self.full_test_name, relpath))
739 if tests[0].get('flavor'):
740 print(" flavor: %s" % tests[0]['flavor'])
741 if tests[0].get('skip-if'):
742 print(" skip-if: %s" % tests[0]['skip-if'])
743 if tests[0].get('fail-if'):
744 print(" fail-if: %s" % tests[0]['fail-if'])
745 elif len(tests) == 0:
746 print("%s not found in any test manifest!" % self.full_test_name)
747 else:
748 print("%s found in more than one manifest!" % self.full_test_name)
750 # short_name is full_test_name without path
751 self.short_name = None
752 name_idx = self.full_test_name.rfind('/')
753 if name_idx > 0:
754 self.short_name = self.full_test_name[name_idx + 1:]
756 # robo_name is short_name without ".java" - for robocop
757 self.robo_name = None
758 if self.short_name:
759 robo_idx = self.short_name.rfind('.java')
760 if robo_idx > 0:
761 self.robo_name = self.short_name[:robo_idx]
762 if self.short_name == self.test_name:
763 self.short_name = None
765 if not (self.show_results or self.show_durations or self.show_tasks):
766 # no need to determine ActiveData name if not querying
767 return
769 # activedata_test_name is name in ActiveData
770 self.activedata_test_name = None
771 simple_names = [
772 self.full_test_name,
773 self.test_name,
774 self.short_name,
775 self.robo_name
777 simple_names = [x for x in simple_names if x]
778 searches = [
779 {"in": {"result.test": simple_names}},
781 regex_names = [".*%s.*" % re.escape(x) for x in simple_names if x]
782 for r in regex_names:
783 searches.append({"regexp": {"result.test": r}})
784 query = {
785 "from": "unittest",
786 "format": "list",
787 "limit": 10,
788 "groupby": ["result.test"],
789 "where": {"and": [
790 {"or": searches},
791 {"in": {"build.branch": self.branches.split(',')}},
792 {"gt": {"run.timestamp": {"date": self.start}}},
793 {"lt": {"run.timestamp": {"date": self.end}}}
796 print("Querying ActiveData...") # Following query can take a long time
797 data = self.submit(query)
798 if data and len(data) > 0:
799 self.activedata_test_name = [
800 d['result']['test']
801 for p in simple_names + regex_names
802 for d in data
803 if re.match(p + "$", d['result']['test'])
804 ][0] # first match is best match
805 if self.activedata_test_name:
806 print("Found records matching '%s' in ActiveData." %
807 self.activedata_test_name)
808 else:
809 print("Unable to find matching records in ActiveData; using %s!" %
810 self.test_name)
811 self.activedata_test_name = self.test_name
813 def get_platform(self, record):
814 platform = record['build']['platform']
815 type = record['build']['type']
816 if 'run' in record and 'type' in record['run'] and 'e10s' in record['run']['type']:
817 e10s = "-e10s"
818 else:
819 e10s = ""
820 return "%s/%s%s:" % (platform, type, e10s)
822 def submit(self, query):
823 import requests
824 import datetime
825 if self.verbose:
826 print(datetime.datetime.now())
827 print(json.dumps(query))
828 response = requests.post("http://activedata.allizom.org/query",
829 data=json.dumps(query),
830 stream=True)
831 if self.verbose:
832 print(datetime.datetime.now())
833 print(response)
834 response.raise_for_status()
835 data = response.json()["data"]
836 return data
838 def report_test_results(self):
839 # Report test pass/fail summary from ActiveData
840 query = {
841 "from": "unittest",
842 "format": "list",
843 "limit": 100,
844 "groupby": ["build.platform", "build.type", "run.type"],
845 "select": [
846 {"aggregate": "count"},
848 "name": "failures",
849 "value": {"case": [
850 {"when": {"eq": {"result.ok": "F"}}, "then": 1}
852 "aggregate": "sum",
853 "default": 0
856 "where": {"and": [
857 {"eq": {"result.test": self.activedata_test_name}},
858 {"in": {"build.branch": self.branches.split(',')}},
859 {"gt": {"run.timestamp": {"date": self.start}}},
860 {"lt": {"run.timestamp": {"date": self.end}}}
863 print("\nTest results for %s on %s between %s and %s" %
864 (self.activedata_test_name, self.branches, self.start, self.end))
865 data = self.submit(query)
866 if data and len(data) > 0:
867 data.sort(key=self.get_platform)
868 worst_rate = 0.0
869 worst_platform = None
870 total_runs = 0
871 total_failures = 0
872 for record in data:
873 platform = self.get_platform(record)
874 runs = record['count']
875 total_runs = total_runs + runs
876 failures = record['failures']
877 total_failures = total_failures + failures
878 rate = (float)(failures) / runs
879 if rate >= worst_rate:
880 worst_rate = rate
881 worst_platform = platform
882 worst_failures = failures
883 worst_runs = runs
884 print("%-40s %6d failures in %6d runs" % (
885 platform, failures, runs))
886 print("\nTotal: %d failures in %d runs or %.3f failures/run" %
887 (total_failures, total_runs, (float)(total_failures) / total_runs))
888 if worst_failures > 0:
889 print("Worst rate on %s %d failures in %d runs or %.3f failures/run" %
890 (worst_platform, worst_failures, worst_runs, worst_rate))
891 else:
892 print("No test result data found.")
894 def report_test_durations(self):
895 # Report test durations summary from ActiveData
896 query = {
897 "from": "unittest",
898 "format": "list",
899 "limit": 100,
900 "groupby": ["build.platform", "build.type", "run.type"],
901 "select": [
902 {"value": "result.duration",
903 "aggregate": "average", "name": "average"},
904 {"value": "result.duration", "aggregate": "min", "name": "min"},
905 {"value": "result.duration", "aggregate": "max", "name": "max"},
906 {"aggregate": "count"}
908 "where": {"and": [
909 {"eq": {"result.ok": "T"}},
910 {"eq": {"result.test": self.activedata_test_name}},
911 {"in": {"build.branch": self.branches.split(',')}},
912 {"gt": {"run.timestamp": {"date": self.start}}},
913 {"lt": {"run.timestamp": {"date": self.end}}}
916 data = self.submit(query)
917 print("\nTest durations for %s on %s between %s and %s" %
918 (self.activedata_test_name, self.branches, self.start, self.end))
919 if data and len(data) > 0:
920 data.sort(key=self.get_platform)
921 for record in data:
922 platform = self.get_platform(record)
923 print("%-40s %6.2f s (%.2f s - %.2f s over %d runs)" % (
924 platform, record['average'], record['min'],
925 record['max'], record['count']))
926 else:
927 print("No test durations found.")
929 def report_test_tasks(self):
930 # Report test tasks summary from ActiveData
931 query = {
932 "from": "unittest",
933 "format": "list",
934 "limit": 1000,
935 "select": ["build.platform", "build.type", "run.type", "run.name"],
936 "where": {"and": [
937 {"eq": {"result.test": self.activedata_test_name}},
938 {"in": {"build.branch": self.branches.split(',')}},
939 {"gt": {"run.timestamp": {"date": self.start}}},
940 {"lt": {"run.timestamp": {"date": self.end}}}
943 data = self.submit(query)
944 print("\nTest tasks for %s on %s between %s and %s" %
945 (self.activedata_test_name, self.branches, self.start, self.end))
946 if data and len(data) > 0:
947 data.sort(key=self.get_platform)
948 consolidated = {}
949 for record in data:
950 platform = self.get_platform(record)
951 if platform not in consolidated:
952 consolidated[platform] = {}
953 if record['run']['name'] in consolidated[platform]:
954 consolidated[platform][record['run']['name']] += 1
955 else:
956 consolidated[platform][record['run']['name']] = 1
957 for key in sorted(consolidated.keys()):
958 tasks = ""
959 for task in consolidated[key].keys():
960 if tasks:
961 tasks += "\n%-40s " % ""
962 tasks += task
963 tasks += " in %d runs" % consolidated[key][task]
964 print("%-40s %s" % (key, tasks))
965 else:
966 print("No test tasks found.")
968 def report_bugs(self):
969 # Report open bugs matching test name
970 import requests
971 search = self.full_test_name
972 if self.test_name:
973 search = '%s,%s' % (search, self.test_name)
974 if self.short_name:
975 search = '%s,%s' % (search, self.short_name)
976 if self.robo_name:
977 search = '%s,%s' % (search, self.robo_name)
978 payload = {'quicksearch': search,
979 'include_fields': 'id,summary'}
980 response = requests.get('https://bugzilla.mozilla.org/rest/bug',
981 payload)
982 response.raise_for_status()
983 json_response = response.json()
984 print("\nBugzilla quick search for '%s':" % search)
985 if 'bugs' in json_response:
986 for bug in json_response['bugs']:
987 print("Bug %s: %s" % (bug['id'], bug['summary']))
988 else:
989 print("No bugs found.")
991 @SubCommand('test-info', 'long-tasks',
992 description='Find tasks approaching their taskcluster max-run-time.')
993 @CommandArgument('--branches',
994 default='mozilla-central,mozilla-inbound,autoland',
995 help='Report for named branches '
996 '(default: mozilla-central,mozilla-inbound,autoland)')
997 @CommandArgument('--start',
998 default=(date.today() - timedelta(7)
999 ).strftime("%Y-%m-%d"),
1000 help='Start date (YYYY-MM-DD)')
1001 @CommandArgument('--end',
1002 default=date.today().strftime("%Y-%m-%d"),
1003 help='End date (YYYY-MM-DD)')
1004 @CommandArgument('--max-threshold-pct',
1005 default=90.0,
1006 help='Count tasks exceeding this percentage of max-run-time.')
1007 @CommandArgument('--filter-threshold-pct',
1008 default=0.5,
1009 help='Report tasks exceeding this percentage of long tasks.')
1010 @CommandArgument('--verbose', action='store_true',
1011 help='Enable debug logging.')
1012 def report_long_running_tasks(self, **params):
1013 def get_long_running_ratio(record):
1014 count = record['count']
1015 tasks_gt_pct = record['tasks_gt_pct']
1016 return count / tasks_gt_pct
1018 branches = params['branches']
1019 start = params['start']
1020 end = params['end']
1021 self.verbose = params['verbose']
1022 threshold_pct = float(params['max_threshold_pct'])
1023 filter_threshold_pct = float(params['filter_threshold_pct'])
1025 # Search test durations in ActiveData for long-running tests
1026 query = {
1027 "from": "task",
1028 "format": "list",
1029 "groupby": ["run.name"],
1030 "limit": 1000,
1031 "select": [
1033 "value": "task.maxRunTime",
1034 "aggregate": "median",
1035 "name": "max_run_time"
1038 "aggregate": "count"
1041 "value": {
1042 "when": {
1043 "gt": [
1045 "div": ["action.duration", "task.maxRunTime"]
1046 }, threshold_pct/100.0
1049 "then": 1
1051 "aggregate": "sum",
1052 "name": "tasks_gt_pct"
1055 "where": {"and": [
1056 {"in": {"build.branch": branches.split(',')}},
1057 {"gt": {"task.run.start_time": {"date": start}}},
1058 {"lte": {"task.run.start_time": {"date": end}}},
1059 {"eq": {"state": "completed"}},
1062 data = self.submit(query)
1063 print("\nTasks nearing their max-run-time on %s between %s and %s" %
1064 (branches, start, end))
1065 if data and len(data) > 0:
1066 filtered = []
1067 for record in data:
1068 if 'tasks_gt_pct' in record:
1069 count = record['count']
1070 tasks_gt_pct = record['tasks_gt_pct']
1071 if tasks_gt_pct / count > filter_threshold_pct / 100.0:
1072 filtered.append(record)
1073 filtered.sort(key=get_long_running_ratio)
1074 if not filtered:
1075 print("No long running tasks found.")
1076 for record in filtered:
1077 name = record['run']['name']
1078 count = record['count']
1079 max_run_time = record['max_run_time']
1080 tasks_gt_pct = record['tasks_gt_pct']
1081 print("%-55s: %d of %d runs (%.1f%%) exceeded %d%% of max-run-time (%d s)" %
1082 (name, tasks_gt_pct, count, tasks_gt_pct * 100 / count,
1083 threshold_pct, max_run_time))
1084 else:
1085 print("No tasks found.")