Bug 1568126 - Part 1: Use the contextual WalkerFront in _hideHighlighterIfDeadNode...
[gecko.git] / testing / mach_commands.py
blob7e8b8529060316d92e2c44dde65b5f01977b3ca0
1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
5 from __future__ import absolute_import, print_function, unicode_literals
7 import argparse
8 import errno
9 import json
10 import logging
11 import os
12 import sys
13 import subprocess
15 from mach.decorators import (
16 CommandArgument,
17 CommandProvider,
18 Command,
19 SettingsProvider,
20 SubCommand,
23 from mozbuild.base import (
24 BuildEnvironmentNotFoundException,
25 MachCommandBase,
26 MachCommandConditions as conditions,
29 UNKNOWN_TEST = '''
30 I was unable to find tests from the given argument(s).
32 You should specify a test directory, filename, test suite name, or
33 abbreviation. If no arguments are given, there must be local file
34 changes and corresponding IMPACTED_TESTS annotations in moz.build
35 files relevant to those files.
37 It's possible my little brain doesn't know about the type of test you are
38 trying to execute. If you suspect this, please request support by filing
39 a bug at
40 https://bugzilla.mozilla.org/enter_bug.cgi?product=Testing&component=General.
41 '''.strip()
43 UNKNOWN_FLAVOR = '''
44 I know you are trying to run a %s%s test. Unfortunately, I can't run those
45 tests yet. Sorry!
46 '''.strip()
48 TEST_HELP = '''
49 Test or tests to run. Tests can be specified by filename, directory, suite
50 name or suite alias.
52 The following test suites and aliases are supported: {}
53 '''.strip()
56 @SettingsProvider
57 class TestConfig(object):
59 @classmethod
60 def config_settings(cls):
61 from mozlog.commandline import log_formatters
62 from mozlog.structuredlog import log_levels
63 format_desc = "The default format to use when running tests with `mach test`."
64 format_choices = log_formatters.keys()
65 level_desc = "The default log level to use when running tests with `mach test`."
66 level_choices = [l.lower() for l in log_levels]
67 return [
68 ('test.format', 'string', format_desc, 'mach', {'choices': format_choices}),
69 ('test.level', 'string', level_desc, 'info', {'choices': level_choices}),
73 def get_test_parser():
74 from mozlog.commandline import add_logging_group
75 from moztest.resolve import TEST_SUITES
76 parser = argparse.ArgumentParser()
77 parser.add_argument('what', default=None, nargs='+',
78 help=TEST_HELP.format(', '.join(sorted(TEST_SUITES))))
79 parser.add_argument('extra_args', default=None, nargs=argparse.REMAINDER,
80 help="Extra arguments to pass to the underlying test command(s). "
81 "If an underlying command doesn't recognize the argument, it "
82 "will fail.")
83 parser.add_argument('--debugger', default=None, action='store',
84 nargs='?', help="Specify a debugger to use.")
85 add_logging_group(parser)
86 return parser
89 ADD_TEST_SUPPORTED_SUITES = ['mochitest-chrome', 'mochitest-plain', 'mochitest-browser-chrome',
90 'web-platform-tests-testharness', 'web-platform-tests-reftest',
91 'xpcshell']
92 ADD_TEST_SUPPORTED_DOCS = ['js', 'html', 'xhtml', 'xul']
94 SUITE_SYNONYMS = {
95 "wpt": "web-platform-tests-testharness",
96 "wpt-testharness": "web-platform-tests-testharness",
97 "wpt-reftest": "web-platform-tests-reftest"
100 MISSING_ARG = object()
103 def create_parser_addtest():
104 import addtest
105 parser = argparse.ArgumentParser()
106 parser.add_argument('--suite',
107 choices=sorted(ADD_TEST_SUPPORTED_SUITES + SUITE_SYNONYMS.keys()),
108 help='suite for the test. '
109 'If you pass a `test` argument this will be determined '
110 'based on the filename and the folder it is in')
111 parser.add_argument('-o', '--overwrite',
112 action='store_true',
113 help='Overwrite an existing file if it exists.')
114 parser.add_argument('--doc',
115 choices=ADD_TEST_SUPPORTED_DOCS,
116 help='Document type for the test (if applicable).'
117 'If you pass a `test` argument this will be determined '
118 'based on the filename.')
119 parser.add_argument("-e", "--editor", action="store", nargs="?",
120 default=MISSING_ARG, help="Open the created file(s) in an editor; if a "
121 "binary is supplied it will be used otherwise the default editor for "
122 "your environment will be opened")
124 for base_suite in addtest.TEST_CREATORS:
125 cls = addtest.TEST_CREATORS[base_suite]
126 if hasattr(cls, "get_parser"):
127 group = parser.add_argument_group(base_suite)
128 cls.get_parser(group)
130 parser.add_argument('test',
131 nargs='?',
132 help=('Test to create.'))
133 return parser
136 @CommandProvider
137 class AddTest(MachCommandBase):
138 @Command('addtest', category='testing',
139 description='Generate tests based on templates',
140 parser=create_parser_addtest)
141 def addtest(self, suite=None, test=None, doc=None, overwrite=False,
142 editor=MISSING_ARG, **kwargs):
143 import addtest
144 from moztest.resolve import TEST_SUITES
146 if not suite and not test:
147 return create_parser_addtest().parse_args(["--help"])
149 if suite in SUITE_SYNONYMS:
150 suite = SUITE_SYNONYMS[suite]
152 if test:
153 if not overwrite and os.path.isfile(os.path.abspath(test)):
154 print("Error: can't generate a test that already exists:", test)
155 return 1
157 abs_test = os.path.abspath(test)
158 if doc is None:
159 doc = self.guess_doc(abs_test)
160 if suite is None:
161 guessed_suite, err = self.guess_suite(abs_test)
162 if err:
163 print(err)
164 return 1
165 suite = guessed_suite
167 else:
168 test = None
169 if doc is None:
170 doc = "html"
172 if not suite:
173 print("We couldn't automatically determine a suite. "
174 "Please specify `--suite` with one of the following options:\n{}\n"
175 "If you'd like to add support to a new suite, please file a bug "
176 "blocking https://bugzilla.mozilla.org/show_bug.cgi?id=1540285."
177 .format(ADD_TEST_SUPPORTED_SUITES))
178 return 1
180 if doc not in ADD_TEST_SUPPORTED_DOCS:
181 print("Error: invalid `doc`. Either pass in a test with a valid extension"
182 "({}) or pass in the `doc` argument".format(ADD_TEST_SUPPORTED_DOCS))
183 return 1
185 creator_cls = addtest.creator_for_suite(suite)
187 if creator_cls is None:
188 print("Sorry, `addtest` doesn't currently know how to add {}".format(suite))
189 return 1
191 creator = creator_cls(self.topsrcdir, test, suite, doc, **kwargs)
193 creator.check_args()
195 paths = []
196 added_tests = False
197 for path, template in creator:
198 if not template:
199 continue
200 added_tests = True
201 if (path):
202 paths.append(path)
203 print("Adding a test file at {} (suite `{}`)".format(path, suite))
205 try:
206 os.makedirs(os.path.dirname(path))
207 except OSError:
208 pass
210 with open(path, "w") as f:
211 f.write(template)
212 else:
213 # write to stdout if you passed only suite and doc and not a file path
214 print(template)
216 if not added_tests:
217 return 1
219 if test:
220 creator.update_manifest()
222 # Small hack, should really do this better
223 if suite.startswith("wpt-"):
224 suite = "web-platform-tests"
226 mach_command = TEST_SUITES[suite]["mach_command"]
227 print('Please make sure to add the new test to your commit. '
228 'You can now run the test with:\n ./mach {} {}'.format(mach_command, test))
230 if editor is not MISSING_ARG:
231 if editor is not None:
232 editor = editor
233 elif "VISUAL" in os.environ:
234 editor = os.environ["VISUAL"]
235 elif "EDITOR" in os.environ:
236 editor = os.environ["EDITOR"]
237 else:
238 print('Unable to determine editor; please specify a binary')
239 editor = None
241 proc = None
242 if editor:
243 import subprocess
244 proc = subprocess.Popen("%s %s" % (editor, " ".join(paths)), shell=True)
246 if proc:
247 proc.wait()
249 return 0
251 def guess_doc(self, abs_test):
252 filename = os.path.basename(abs_test)
253 return os.path.splitext(filename)[1].strip(".")
255 def guess_suite(self, abs_test):
256 # If you pass a abs_test, try to detect the type based on the name
257 # and folder. This detection can be skipped if you pass the `type` arg.
258 err = None
259 guessed_suite = None
260 parent = os.path.dirname(abs_test)
261 filename = os.path.basename(abs_test)
263 has_browser_ini = os.path.isfile(os.path.join(parent, "browser.ini"))
264 has_chrome_ini = os.path.isfile(os.path.join(parent, "chrome.ini"))
265 has_plain_ini = os.path.isfile(os.path.join(parent, "mochitest.ini"))
266 has_xpcshell_ini = os.path.isfile(os.path.join(parent, "xpcshell.ini"))
268 in_wpt_folder = abs_test.startswith(
269 os.path.abspath(os.path.join("testing", "web-platform")))
271 if in_wpt_folder:
272 guessed_suite = "web-platform-tests-testharness"
273 if "/css/" in abs_test:
274 guessed_suite = "web-platform-tests-reftest"
275 elif (filename.startswith("test_") and
276 has_xpcshell_ini and
277 self.guess_doc(abs_test) == "js"):
278 guessed_suite = "xpcshell"
279 else:
280 if filename.startswith("browser_") and has_browser_ini:
281 guessed_suite = "mochitest-browser-chrome"
282 elif filename.startswith("test_"):
283 if has_chrome_ini and has_plain_ini:
284 err = ("Error: directory contains both a chrome.ini and mochitest.ini. "
285 "Please set --suite=mochitest-chrome or --suite=mochitest-plain.")
286 elif has_chrome_ini:
287 guessed_suite = "mochitest-chrome"
288 elif has_plain_ini:
289 guessed_suite = "mochitest-plain"
290 return guessed_suite, err
293 @CommandProvider
294 class Test(MachCommandBase):
295 @Command('test', category='testing',
296 description='Run tests (detects the kind of test and runs it).',
297 parser=get_test_parser)
298 def test(self, what, extra_args, **log_args):
299 """Run tests from names or paths.
301 mach test accepts arguments specifying which tests to run. Each argument
302 can be:
304 * The path to a test file
305 * A directory containing tests
306 * A test suite name
307 * An alias to a test suite name (codes used on TreeHerder)
309 If no input is provided, tests will be run based on files changed in
310 the local tree. Relevant tests, tags, or flavors are determined by
311 IMPACTED_TESTS annotations in moz.build files relevant to the
312 changed files.
314 When paths or directories are given, they are first resolved to test
315 files known to the build system.
317 If resolved tests belong to more than one test type/flavor/harness,
318 the harness for each relevant type/flavor will be invoked. e.g. if
319 you specify a directory with xpcshell and browser chrome mochitests,
320 both harnesses will be invoked.
322 from mozlog.commandline import setup_logging
323 from mozlog.handlers import StreamHandler
324 from moztest.resolve import get_suite_definition, TestResolver, TEST_SUITES
326 resolver = self._spawn(TestResolver)
327 run_suites, run_tests = resolver.resolve_metadata(what)
329 if not run_suites and not run_tests:
330 print(UNKNOWN_TEST)
331 return 1
333 if log_args.get('debugger', None):
334 import mozdebug
335 if not mozdebug.get_debugger_info(log_args.get('debugger')):
336 sys.exit(1)
337 extra_args_debugger_notation = '='.join([
338 '--debugger',
339 log_args.get('debugger')
340 ]).encode('ascii')
341 if extra_args:
342 extra_args.append(extra_args_debugger_notation)
343 else:
344 extra_args = [extra_args_debugger_notation]
346 # Create shared logger
347 format_args = {'level': self._mach_context.settings['test']['level']}
348 if not run_suites and len(run_tests) == 1:
349 format_args['verbose'] = True
350 format_args['compact'] = False
352 default_format = self._mach_context.settings['test']['format']
353 log = setup_logging('mach-test', log_args, {default_format: sys.stdout}, format_args)
354 for handler in log.handlers:
355 if isinstance(handler, StreamHandler):
356 handler.formatter.inner.summary_on_shutdown = True
358 status = None
359 for suite_name in run_suites:
360 suite = TEST_SUITES[suite_name]
361 kwargs = suite['kwargs']
362 kwargs['log'] = log
364 if 'mach_command' in suite:
365 res = self._mach_context.commands.dispatch(
366 suite['mach_command'], self._mach_context,
367 argv=extra_args, **kwargs)
368 if res:
369 status = res
371 buckets = {}
372 for test in run_tests:
373 key = (test['flavor'], test.get('subsuite', ''))
374 buckets.setdefault(key, []).append(test)
376 for (flavor, subsuite), tests in sorted(buckets.items()):
377 _, m = get_suite_definition(flavor, subsuite)
378 if 'mach_command' not in m:
379 substr = '-{}'.format(subsuite) if subsuite else ''
380 print(UNKNOWN_FLAVOR % (flavor, substr))
381 status = 1
382 continue
384 kwargs = dict(m['kwargs'])
385 kwargs['log'] = log
387 res = self._mach_context.commands.dispatch(
388 m['mach_command'], self._mach_context,
389 argv=extra_args, test_objects=tests, **kwargs)
390 if res:
391 status = res
393 log.shutdown()
394 return status
397 @CommandProvider
398 class MachCommands(MachCommandBase):
399 @Command('cppunittest', category='testing',
400 description='Run cpp unit tests (C++ tests).')
401 @CommandArgument('--enable-webrender', action='store_true', default=False,
402 dest='enable_webrender',
403 help='Enable the WebRender compositor in Gecko.')
404 @CommandArgument('test_files', nargs='*', metavar='N',
405 help='Test to run. Can be specified as one or more files or '
406 'directories, or omitted. If omitted, the entire test suite is '
407 'executed.')
408 def run_cppunit_test(self, **params):
409 from mozlog import commandline
411 log = params.get('log')
412 if not log:
413 log = commandline.setup_logging("cppunittest",
415 {"tbpl": sys.stdout})
417 # See if we have crash symbols
418 symbols_path = os.path.join(self.distdir, 'crashreporter-symbols')
419 if not os.path.isdir(symbols_path):
420 symbols_path = None
422 # If no tests specified, run all tests in main manifest
423 tests = params['test_files']
424 if len(tests) == 0:
425 tests = [os.path.join(self.distdir, 'cppunittests')]
426 manifest_path = os.path.join(
427 self.topsrcdir, 'testing', 'cppunittest.ini')
428 else:
429 manifest_path = None
431 utility_path = self.bindir
433 if conditions.is_android(self):
434 from mozrunner.devices.android_device import verify_android_device
435 verify_android_device(self, install=False)
436 return self.run_android_test(tests, symbols_path, manifest_path, log)
438 return self.run_desktop_test(tests, symbols_path, manifest_path,
439 utility_path, log)
441 def run_desktop_test(self, tests, symbols_path, manifest_path,
442 utility_path, log):
443 import runcppunittests as cppunittests
444 from mozlog import commandline
446 parser = cppunittests.CPPUnittestOptions()
447 commandline.add_logging_group(parser)
448 options, args = parser.parse_args()
450 options.symbols_path = symbols_path
451 options.manifest_path = manifest_path
452 options.utility_path = utility_path
453 options.xre_path = self.bindir
455 try:
456 result = cppunittests.run_test_harness(options, tests)
457 except Exception as e:
458 log.error("Caught exception running cpp unit tests: %s" % str(e))
459 result = False
460 raise
462 return 0 if result else 1
464 def run_android_test(self, tests, symbols_path, manifest_path, log):
465 import remotecppunittests as remotecppunittests
466 from mozlog import commandline
468 parser = remotecppunittests.RemoteCPPUnittestOptions()
469 commandline.add_logging_group(parser)
470 options, args = parser.parse_args()
472 if not options.adb_path:
473 from mozrunner.devices.android_device import get_adb_path
474 options.adb_path = get_adb_path(self)
475 options.symbols_path = symbols_path
476 options.manifest_path = manifest_path
477 options.xre_path = self.bindir
478 options.local_lib = self.bindir.replace('bin', 'fennec')
479 for file in os.listdir(os.path.join(self.topobjdir, "dist")):
480 if file.endswith(".apk") and file.startswith("fennec"):
481 options.local_apk = os.path.join(self.topobjdir, "dist", file)
482 log.info("using APK: " + options.local_apk)
483 break
485 try:
486 result = remotecppunittests.run_test_harness(options, tests)
487 except Exception as e:
488 log.error("Caught exception running cpp unit tests: %s" % str(e))
489 result = False
490 raise
492 return 0 if result else 1
495 def executable_name(name):
496 return name + '.exe' if sys.platform.startswith('win') else name
499 @CommandProvider
500 class CheckSpiderMonkeyCommand(MachCommandBase):
501 @Command('jstests', category='testing',
502 description='Run SpiderMonkey JS tests in the JavaScript shell.')
503 @CommandArgument('--shell', help='The shell to be used')
504 @CommandArgument('params', nargs=argparse.REMAINDER,
505 help="Extra arguments to pass down to the test harness.")
506 def run_jstests(self, shell, params):
507 import subprocess
509 self.virtualenv_manager.ensure()
510 python = self.virtualenv_manager.python_path
512 js = shell or os.path.join(self.bindir, executable_name('js'))
513 jstest_cmd = [
514 python,
515 os.path.join(self.topsrcdir, 'js', 'src', 'tests', 'jstests.py'),
517 '--jitflags=jstests',
518 ] + params
519 return subprocess.call(jstest_cmd)
521 @Command('check-spidermonkey', category='testing',
522 description='Run SpiderMonkey tests (JavaScript engine).')
523 @CommandArgument('--valgrind', action='store_true',
524 help='Run jit-test suite with valgrind flag')
525 def run_checkspidermonkey(self, **params):
526 import subprocess
528 self.virtualenv_manager.ensure()
529 python = self.virtualenv_manager.python_path
531 js = os.path.join(self.bindir, executable_name('js'))
533 print('Running jit-tests')
534 jittest_cmd = [
535 python,
536 os.path.join(self.topsrcdir, 'js', 'src',
537 'jit-test', 'jit_test.py'),
539 '--no-slow',
540 '--jitflags=all',
542 if params['valgrind']:
543 jittest_cmd.append('--valgrind')
545 jittest_result = subprocess.call(jittest_cmd)
547 print('running jstests')
548 jstest_result = self.run_jstests(js, [])
550 print('running jsapi-tests')
551 jsapi_tests_cmd = [os.path.join(
552 self.bindir, executable_name('jsapi-tests'))]
553 jsapi_tests_result = subprocess.call(jsapi_tests_cmd)
555 print('running check-js-msg-encoding')
556 check_js_msg_cmd = [python, os.path.join(
557 self.topsrcdir, 'config', 'check_js_msg_encoding.py')]
558 check_js_msg_result = subprocess.call(
559 check_js_msg_cmd, cwd=self.topsrcdir)
561 all_passed = jittest_result and jstest_result and jsapi_tests_result and \
562 check_js_msg_result
564 return all_passed
567 def has_js_binary(binary):
568 def has_binary(cls):
569 try:
570 name = binary + cls.substs['BIN_SUFFIX']
571 except BuildEnvironmentNotFoundException:
572 return False
574 path = os.path.join(cls.topobjdir, 'dist', 'bin', name)
576 has_binary.__doc__ = """
577 `{}` not found in <objdir>/dist/bin. Make sure you aren't using an artifact build
578 and try rebuilding with `ac_add_options --enable-js-shell`.
579 """.format(name).lstrip()
581 return os.path.isfile(path)
582 return has_binary
585 @CommandProvider
586 class JsapiTestsCommand(MachCommandBase):
587 @Command('jsapi-tests', category='testing',
588 conditions=[has_js_binary('jsapi-tests')],
589 description='Run jsapi tests (JavaScript engine).')
590 @CommandArgument('test_name', nargs='?', metavar='N',
591 help='Test to run. Can be a prefix or omitted. If omitted, the entire '
592 'test suite is executed.')
593 def run_jsapitests(self, **params):
594 import subprocess
596 print('running jsapi-tests')
597 jsapi_tests_cmd = [os.path.join(
598 self.bindir, executable_name('jsapi-tests'))]
599 if params['test_name']:
600 jsapi_tests_cmd.append(params['test_name'])
602 jsapi_tests_result = subprocess.call(jsapi_tests_cmd)
604 return jsapi_tests_result
607 def get_jsshell_parser():
608 from jsshell.benchmark import get_parser
609 return get_parser()
612 @CommandProvider
613 class JsShellTests(MachCommandBase):
614 @Command('jsshell-bench', category='testing',
615 parser=get_jsshell_parser,
616 description="Run benchmarks in the SpiderMonkey JS shell.")
617 def run_jsshelltests(self, **kwargs):
618 self._activate_virtualenv()
619 from jsshell import benchmark
620 return benchmark.run(**kwargs)
623 @CommandProvider
624 class CramTest(MachCommandBase):
625 @Command('cramtest', category='testing',
626 description="Mercurial style .t tests for command line applications.")
627 @CommandArgument('test_paths', nargs='*', metavar='N',
628 help="Test paths to run. Each path can be a test file or directory. "
629 "If omitted, the entire suite will be run.")
630 @CommandArgument('cram_args', nargs=argparse.REMAINDER,
631 help="Extra arguments to pass down to the cram binary. See "
632 "'./mach python -m cram -- -h' for a list of available options.")
633 def cramtest(self, cram_args=None, test_paths=None, test_objects=None):
634 self._activate_virtualenv()
635 import mozinfo
636 from manifestparser import TestManifest
638 if test_objects is None:
639 from moztest.resolve import TestResolver
640 resolver = self._spawn(TestResolver)
641 if test_paths:
642 # If we were given test paths, try to find tests matching them.
643 test_objects = resolver.resolve_tests(paths=test_paths, flavor='cram')
644 else:
645 # Otherwise just run everything in CRAMTEST_MANIFESTS
646 test_objects = resolver.resolve_tests(flavor='cram')
648 if not test_objects:
649 message = 'No tests were collected, check spelling of the test paths.'
650 self.log(logging.WARN, 'cramtest', {}, message)
651 return 1
653 mp = TestManifest()
654 mp.tests.extend(test_objects)
655 tests = mp.active_tests(disabled=False, **mozinfo.info)
657 python = self.virtualenv_manager.python_path
658 cmd = [python, '-m', 'cram'] + cram_args + [t['relpath'] for t in tests]
659 return subprocess.call(cmd, cwd=self.topsrcdir)
662 @CommandProvider
663 class TestInfoCommand(MachCommandBase):
664 from datetime import date, timedelta
666 @Command('test-info', category='testing',
667 description='Display historical test results.')
668 def test_info(self):
670 All functions implemented as subcommands.
673 @SubCommand('test-info', 'tests',
674 description='Display historical test result summary for named tests.')
675 @CommandArgument('test_names', nargs=argparse.REMAINDER,
676 help='Test(s) of interest.')
677 @CommandArgument('--branches',
678 default='mozilla-central,mozilla-inbound,autoland',
679 help='Report for named branches '
680 '(default: mozilla-central,mozilla-inbound,autoland)')
681 @CommandArgument('--start',
682 default=(date.today() - timedelta(7)
683 ).strftime("%Y-%m-%d"),
684 help='Start date (YYYY-MM-DD)')
685 @CommandArgument('--end',
686 default=date.today().strftime("%Y-%m-%d"),
687 help='End date (YYYY-MM-DD)')
688 @CommandArgument('--show-info', action='store_true',
689 help='Retrieve and display general test information.')
690 @CommandArgument('--show-results', action='store_true',
691 help='Retrieve and display ActiveData test result summary.')
692 @CommandArgument('--show-durations', action='store_true',
693 help='Retrieve and display ActiveData test duration summary.')
694 @CommandArgument('--show-tasks', action='store_true',
695 help='Retrieve and display ActiveData test task names.')
696 @CommandArgument('--show-bugs', action='store_true',
697 help='Retrieve and display related Bugzilla bugs.')
698 @CommandArgument('--verbose', action='store_true',
699 help='Enable debug logging.')
700 def test_info_tests(self, **params):
701 from mozbuild.base import MozbuildObject
702 from mozfile import which
704 self.branches = params['branches']
705 self.start = params['start']
706 self.end = params['end']
707 self.show_info = params['show_info']
708 self.show_results = params['show_results']
709 self.show_durations = params['show_durations']
710 self.show_tasks = params['show_tasks']
711 self.show_bugs = params['show_bugs']
712 self.verbose = params['verbose']
714 if (not self.show_info and
715 not self.show_results and
716 not self.show_durations and
717 not self.show_tasks and
718 not self.show_bugs):
719 # by default, show everything
720 self.show_info = True
721 self.show_results = True
722 self.show_durations = True
723 self.show_tasks = True
724 self.show_bugs = True
726 here = os.path.abspath(os.path.dirname(__file__))
727 build_obj = MozbuildObject.from_environment(cwd=here)
729 self._hg = None
730 if conditions.is_hg(build_obj):
731 self._hg = which('hg')
732 if not self._hg:
733 raise OSError(errno.ENOENT, "Could not find 'hg' on PATH.")
735 self._git = None
736 if conditions.is_git(build_obj):
737 self._git = which('git')
738 if not self._git:
739 raise OSError(errno.ENOENT, "Could not find 'git' on PATH.")
741 for test_name in params['test_names']:
742 print("===== %s =====" % test_name)
743 self.test_name = test_name
744 if len(self.test_name) < 6:
745 print("'%s' is too short for a test name!" % self.test_name)
746 continue
747 self.set_test_name()
748 if self.show_results:
749 self.report_test_results()
750 if self.show_durations:
751 self.report_test_durations()
752 if self.show_tasks:
753 self.report_test_tasks()
754 if self.show_bugs:
755 self.report_bugs()
757 def find_in_hg_or_git(self, test_name):
758 if self._hg:
759 cmd = [self._hg, 'files', '-I', test_name]
760 elif self._git:
761 cmd = [self._git, 'ls-files', test_name]
762 else:
763 return None
764 try:
765 out = subprocess.check_output(cmd).splitlines()
766 except subprocess.CalledProcessError:
767 out = None
768 return out
770 def set_test_name(self):
771 # Generating a unified report for a specific test is complicated
772 # by differences in the test name used in various data sources.
773 # Consider:
774 # - It is often convenient to request a report based only on
775 # a short file name, rather than the full path;
776 # - Bugs may be filed in bugzilla against a simple, short test
777 # name or the full path to the test;
778 # - In ActiveData, the full path is usually used, but sometimes
779 # also includes additional path components outside of the
780 # mercurial repo (common for reftests).
781 # This function attempts to find appropriate names for different
782 # queries based on the specified test name.
784 import posixpath
785 import re
787 # full_test_name is full path to file in hg (or git)
788 self.full_test_name = None
789 out = self.find_in_hg_or_git(self.test_name)
790 if out and len(out) == 1:
791 self.full_test_name = out[0]
792 elif out and len(out) > 1:
793 print("Ambiguous test name specified. Found:")
794 for line in out:
795 print(line)
796 else:
797 out = self.find_in_hg_or_git('**/%s*' % self.test_name)
798 if out and len(out) == 1:
799 self.full_test_name = out[0]
800 elif out and len(out) > 1:
801 print("Ambiguous test name. Found:")
802 for line in out:
803 print(line)
804 if self.full_test_name:
805 self.full_test_name.replace(os.sep, posixpath.sep)
806 print("Found %s in source control." % self.full_test_name)
807 else:
808 print("Unable to validate test name '%s'!" % self.test_name)
809 self.full_test_name = self.test_name
811 # search for full_test_name in test manifests
812 from moztest.resolve import TestResolver
813 resolver = self._spawn(TestResolver)
814 relpath = self._wrap_path_argument(self.full_test_name).relpath()
815 tests = list(resolver.resolve_tests(paths=[relpath]))
816 if len(tests) == 1:
817 relpath = self._wrap_path_argument(tests[0]['manifest']).relpath()
818 print("%s found in manifest %s" % (self.full_test_name, relpath))
819 if tests[0].get('flavor'):
820 print(" flavor: %s" % tests[0]['flavor'])
821 if tests[0].get('skip-if'):
822 print(" skip-if: %s" % tests[0]['skip-if'])
823 if tests[0].get('fail-if'):
824 print(" fail-if: %s" % tests[0]['fail-if'])
825 elif len(tests) == 0:
826 print("%s not found in any test manifest!" % self.full_test_name)
827 else:
828 print("%s found in more than one manifest!" % self.full_test_name)
830 # short_name is full_test_name without path
831 self.short_name = None
832 name_idx = self.full_test_name.rfind('/')
833 if name_idx > 0:
834 self.short_name = self.full_test_name[name_idx + 1:]
836 # robo_name is short_name without ".java" - for robocop
837 self.robo_name = None
838 if self.short_name:
839 robo_idx = self.short_name.rfind('.java')
840 if robo_idx > 0:
841 self.robo_name = self.short_name[:robo_idx]
842 if self.short_name == self.test_name:
843 self.short_name = None
845 if not (self.show_results or self.show_durations or self.show_tasks):
846 # no need to determine ActiveData name if not querying
847 return
849 # activedata_test_name is name in ActiveData
850 self.activedata_test_name = None
851 simple_names = [
852 self.full_test_name,
853 self.test_name,
854 self.short_name,
855 self.robo_name
857 simple_names = [x for x in simple_names if x]
858 searches = [
859 {"in": {"result.test": simple_names}},
861 regex_names = [".*%s.*" % re.escape(x) for x in simple_names if x]
862 for r in regex_names:
863 searches.append({"regexp": {"result.test": r}})
864 query = {
865 "from": "unittest",
866 "format": "list",
867 "limit": 10,
868 "groupby": ["result.test"],
869 "where": {"and": [
870 {"or": searches},
871 {"in": {"build.branch": self.branches.split(',')}},
872 {"gt": {"run.timestamp": {"date": self.start}}},
873 {"lt": {"run.timestamp": {"date": self.end}}}
876 print("Querying ActiveData...") # Following query can take a long time
877 data = self.submit(query)
878 if data and len(data) > 0:
879 self.activedata_test_name = [
880 d['result']['test']
881 for p in simple_names + regex_names
882 for d in data
883 if re.match(p + "$", d['result']['test'])
884 ][0] # first match is best match
885 if self.activedata_test_name:
886 print("Found records matching '%s' in ActiveData." %
887 self.activedata_test_name)
888 else:
889 print("Unable to find matching records in ActiveData; using %s!" %
890 self.test_name)
891 self.activedata_test_name = self.test_name
893 def get_run_types(self, record):
894 types_label = ""
895 if 'run' in record and 'type' in record['run']:
896 run_types = record['run']['type']
897 run_types = run_types if isinstance(run_types, list) else [run_types]
898 fission = True if 'fis' in run_types else False
899 for run_type in run_types:
900 # chunked is not interesting
901 if run_type == 'chunked':
902 continue
903 # fission implies e10s
904 if fission and run_type == 'e10s':
905 continue
906 types_label += "-" + run_type
907 return types_label
909 def get_platform(self, record):
910 if 'platform' in record['build']:
911 platform = record['build']['platform']
912 else:
913 platform = "-"
914 tp = record['build']['type']
915 if type(tp) is list:
916 tp = "-".join(tp)
917 return "%s/%s%s:" % (platform, tp, self.get_run_types(record))
919 def submit(self, query):
920 import requests
921 import datetime
922 if self.verbose:
923 print(datetime.datetime.now())
924 print(json.dumps(query))
925 response = requests.post("http://activedata.allizom.org/query",
926 data=json.dumps(query),
927 stream=True)
928 if self.verbose:
929 print(datetime.datetime.now())
930 print(response)
931 response.raise_for_status()
932 data = response.json()["data"]
933 return data
935 def report_test_results(self):
936 # Report test pass/fail summary from ActiveData
937 query = {
938 "from": "unittest",
939 "format": "list",
940 "limit": 100,
941 "groupby": ["build.platform", "build.type", "run.type"],
942 "select": [
943 {"aggregate": "count"},
945 "name": "failures",
946 "value": {"case": [
947 {"when": {"eq": {"result.ok": "F"}}, "then": 1}
949 "aggregate": "sum",
950 "default": 0
953 "name": "skips",
954 "value": {"case": [
955 {"when": {"eq": {"result.status": "SKIP"}}, "then": 1}
957 "aggregate": "sum",
958 "default": 0
961 "where": {"and": [
962 {"eq": {"result.test": self.activedata_test_name}},
963 {"in": {"build.branch": self.branches.split(',')}},
964 {"gt": {"run.timestamp": {"date": self.start}}},
965 {"lt": {"run.timestamp": {"date": self.end}}}
968 print("\nTest results for %s on %s between %s and %s" %
969 (self.activedata_test_name, self.branches, self.start, self.end))
970 data = self.submit(query)
971 if data and len(data) > 0:
972 data.sort(key=self.get_platform)
973 worst_rate = 0.0
974 worst_platform = None
975 total_runs = 0
976 total_failures = 0
977 for record in data:
978 platform = self.get_platform(record)
979 if platform.startswith("-"):
980 continue
981 runs = record['count']
982 total_runs = total_runs + runs
983 failures = record.get('failures', 0)
984 skips = record.get('skips', 0)
985 total_failures = total_failures + failures
986 rate = (float)(failures) / runs
987 if rate >= worst_rate:
988 worst_rate = rate
989 worst_platform = platform
990 worst_failures = failures
991 worst_runs = runs
992 print("%-40s %6d failures (%6d skipped) in %6d runs" % (
993 platform, failures, skips, runs))
994 print("\nTotal: %d failures in %d runs or %.3f failures/run" %
995 (total_failures, total_runs, (float)(total_failures) / total_runs))
996 if worst_failures > 0:
997 print("Worst rate on %s %d failures in %d runs or %.3f failures/run" %
998 (worst_platform, worst_failures, worst_runs, worst_rate))
999 else:
1000 print("No test result data found.")
1002 def report_test_durations(self):
1003 # Report test durations summary from ActiveData
1004 query = {
1005 "from": "unittest",
1006 "format": "list",
1007 "limit": 100,
1008 "groupby": ["build.platform", "build.type", "run.type"],
1009 "select": [
1010 {"value": "result.duration",
1011 "aggregate": "average", "name": "average"},
1012 {"value": "result.duration", "aggregate": "min", "name": "min"},
1013 {"value": "result.duration", "aggregate": "max", "name": "max"},
1014 {"aggregate": "count"}
1016 "where": {"and": [
1017 {"eq": {"result.ok": "T"}},
1018 {"eq": {"result.test": self.activedata_test_name}},
1019 {"in": {"build.branch": self.branches.split(',')}},
1020 {"gt": {"run.timestamp": {"date": self.start}}},
1021 {"lt": {"run.timestamp": {"date": self.end}}}
1024 data = self.submit(query)
1025 print("\nTest durations for %s on %s between %s and %s" %
1026 (self.activedata_test_name, self.branches, self.start, self.end))
1027 if data and len(data) > 0:
1028 data.sort(key=self.get_platform)
1029 for record in data:
1030 platform = self.get_platform(record)
1031 if platform.startswith("-"):
1032 continue
1033 print("%-40s %6.2f s (%.2f s - %.2f s over %d runs)" % (
1034 platform, record['average'], record['min'],
1035 record['max'], record['count']))
1036 else:
1037 print("No test durations found.")
1039 def report_test_tasks(self):
1040 # Report test tasks summary from ActiveData
1041 query = {
1042 "from": "unittest",
1043 "format": "list",
1044 "limit": 1000,
1045 "select": ["build.platform", "build.type", "run.type", "run.name"],
1046 "where": {"and": [
1047 {"eq": {"result.test": self.activedata_test_name}},
1048 {"in": {"build.branch": self.branches.split(',')}},
1049 {"gt": {"run.timestamp": {"date": self.start}}},
1050 {"lt": {"run.timestamp": {"date": self.end}}}
1053 data = self.submit(query)
1054 print("\nTest tasks for %s on %s between %s and %s" %
1055 (self.activedata_test_name, self.branches, self.start, self.end))
1056 if data and len(data) > 0:
1057 data.sort(key=self.get_platform)
1058 consolidated = {}
1059 for record in data:
1060 platform = self.get_platform(record)
1061 if platform not in consolidated:
1062 consolidated[platform] = {}
1063 if record['run']['name'] in consolidated[platform]:
1064 consolidated[platform][record['run']['name']] += 1
1065 else:
1066 consolidated[platform][record['run']['name']] = 1
1067 for key in sorted(consolidated.keys()):
1068 tasks = ""
1069 for task in consolidated[key].keys():
1070 if tasks:
1071 tasks += "\n%-40s " % ""
1072 tasks += task
1073 tasks += " in %d runs" % consolidated[key][task]
1074 print("%-40s %s" % (key, tasks))
1075 else:
1076 print("No test tasks found.")
1078 def report_bugs(self):
1079 # Report open bugs matching test name
1080 import requests
1081 search = self.full_test_name
1082 if self.test_name:
1083 search = '%s,%s' % (search, self.test_name)
1084 if self.short_name:
1085 search = '%s,%s' % (search, self.short_name)
1086 if self.robo_name:
1087 search = '%s,%s' % (search, self.robo_name)
1088 payload = {'quicksearch': search,
1089 'include_fields': 'id,summary'}
1090 response = requests.get('https://bugzilla.mozilla.org/rest/bug',
1091 payload)
1092 response.raise_for_status()
1093 json_response = response.json()
1094 print("\nBugzilla quick search for '%s':" % search)
1095 if 'bugs' in json_response:
1096 for bug in json_response['bugs']:
1097 print("Bug %s: %s" % (bug['id'], bug['summary']))
1098 else:
1099 print("No bugs found.")
1101 @SubCommand('test-info', 'long-tasks',
1102 description='Find tasks approaching their taskcluster max-run-time.')
1103 @CommandArgument('--branches',
1104 default='mozilla-central,mozilla-inbound,autoland',
1105 help='Report for named branches '
1106 '(default: mozilla-central,mozilla-inbound,autoland)')
1107 @CommandArgument('--start',
1108 default=(date.today() - timedelta(7)
1109 ).strftime("%Y-%m-%d"),
1110 help='Start date (YYYY-MM-DD)')
1111 @CommandArgument('--end',
1112 default=date.today().strftime("%Y-%m-%d"),
1113 help='End date (YYYY-MM-DD)')
1114 @CommandArgument('--max-threshold-pct',
1115 default=90.0,
1116 help='Count tasks exceeding this percentage of max-run-time.')
1117 @CommandArgument('--filter-threshold-pct',
1118 default=0.5,
1119 help='Report tasks exceeding this percentage of long tasks.')
1120 @CommandArgument('--verbose', action='store_true',
1121 help='Enable debug logging.')
1122 def report_long_running_tasks(self, **params):
1123 def get_long_running_ratio(record):
1124 count = record['count']
1125 tasks_gt_pct = record['tasks_gt_pct']
1126 return count / tasks_gt_pct
1128 branches = params['branches']
1129 start = params['start']
1130 end = params['end']
1131 self.verbose = params['verbose']
1132 threshold_pct = float(params['max_threshold_pct'])
1133 filter_threshold_pct = float(params['filter_threshold_pct'])
1135 # Search test durations in ActiveData for long-running tests
1136 query = {
1137 "from": "task",
1138 "format": "list",
1139 "groupby": ["run.name"],
1140 "limit": 1000,
1141 "select": [
1143 "value": "task.maxRunTime",
1144 "aggregate": "median",
1145 "name": "max_run_time"
1148 "aggregate": "count"
1151 "value": {
1152 "when": {
1153 "gt": [
1155 "div": ["action.duration", "task.maxRunTime"]
1156 }, threshold_pct/100.0
1159 "then": 1
1161 "aggregate": "sum",
1162 "name": "tasks_gt_pct"
1165 "where": {"and": [
1166 {"in": {"build.branch": branches.split(',')}},
1167 {"gt": {"task.run.start_time": {"date": start}}},
1168 {"lte": {"task.run.start_time": {"date": end}}},
1169 {"eq": {"task.state": "completed"}},
1172 data = self.submit(query)
1173 print("\nTasks nearing their max-run-time on %s between %s and %s" %
1174 (branches, start, end))
1175 if data and len(data) > 0:
1176 filtered = []
1177 for record in data:
1178 if 'tasks_gt_pct' in record:
1179 count = record['count']
1180 tasks_gt_pct = record['tasks_gt_pct']
1181 if float(tasks_gt_pct) / count > filter_threshold_pct / 100.0:
1182 filtered.append(record)
1183 filtered.sort(key=get_long_running_ratio)
1184 if not filtered:
1185 print("No long running tasks found.")
1186 for record in filtered:
1187 name = record['run']['name']
1188 count = record['count']
1189 max_run_time = record['max_run_time']
1190 tasks_gt_pct = record['tasks_gt_pct']
1191 print("%-55s: %d of %d runs (%.1f%%) exceeded %d%% of max-run-time (%d s)" %
1192 (name, tasks_gt_pct, count, tasks_gt_pct * 100 / count,
1193 threshold_pct, max_run_time))
1194 else:
1195 print("No tasks found.")
1197 @SubCommand('test-info', 'report',
1198 description='Generate a json report of test manifests and/or tests '
1199 'categorized by Bugzilla component and optionally filtered '
1200 'by path, component, and/or manifest annotations.')
1201 @CommandArgument('--components', default=None,
1202 help='Comma-separated list of Bugzilla components.'
1203 ' eg. Testing::General,Core::WebVR')
1204 @CommandArgument('--flavor',
1205 help='Limit results to tests of the specified flavor (eg. "xpcshell").')
1206 @CommandArgument('--subsuite',
1207 help='Limit results to tests of the specified subsuite (eg. "devtools").')
1208 @CommandArgument('paths', nargs=argparse.REMAINDER,
1209 help='File system paths of interest.')
1210 @CommandArgument('--show-manifests', action='store_true',
1211 help='Include test manifests in report.')
1212 @CommandArgument('--show-tests', action='store_true',
1213 help='Include individual tests in report.')
1214 @CommandArgument('--show-summary', action='store_true',
1215 help='Include summary in report.')
1216 @CommandArgument('--filter-values',
1217 help='Comma-separated list of value regular expressions to filter on; '
1218 'displayed tests contain all specified values.')
1219 @CommandArgument('--filter-keys',
1220 help='Comma-separated list of test keys to filter on, '
1221 'like "skip-if"; only these fields will be searched '
1222 'for filter-values.')
1223 @CommandArgument('--no-component-report', action='store_false',
1224 dest="show_components", default=True,
1225 help='Do not categorize by bugzilla component.')
1226 @CommandArgument('--output-file',
1227 help='Path to report file.')
1228 def test_report(self, components, flavor, subsuite, paths,
1229 show_manifests, show_tests, show_summary,
1230 filter_values, filter_keys, show_components, output_file):
1231 import mozpack.path as mozpath
1232 import re
1233 from mozbuild.build_commands import Build
1234 from moztest.resolve import TestResolver
1236 def matches_filters(test):
1238 Return True if all of the requested filter_values are found in this test;
1239 if filter_keys are specified, restrict search to those test keys.
1241 for value in filter_values:
1242 value_found = False
1243 for key in test:
1244 if not filter_keys or key in filter_keys:
1245 if re.search(value, test[key]):
1246 value_found = True
1247 break
1248 if not value_found:
1249 return False
1250 return True
1252 # Ensure useful report by default
1253 if not show_manifests and not show_tests and not show_summary:
1254 show_manifests = True
1255 show_summary = True
1257 by_component = {}
1258 if components:
1259 components = components.split(',')
1260 if filter_keys:
1261 filter_keys = filter_keys.split(',')
1262 if filter_values:
1263 filter_values = filter_values.split(',')
1264 else:
1265 filter_values = []
1267 try:
1268 self.config_environment
1269 except BuildEnvironmentNotFoundException:
1270 print("Looks like configure has not run yet, running it now...")
1271 builder = Build(self._mach_context)
1272 builder.configure()
1274 print("Finding tests...")
1275 resolver = self._spawn(TestResolver)
1276 tests = list(resolver.resolve_tests(paths=paths, flavor=flavor,
1277 subsuite=subsuite))
1279 manifest_paths = set()
1280 for t in tests:
1281 manifest_paths.add(t['manifest'])
1282 manifest_count = len(manifest_paths)
1283 print("Resolver found {} tests, {} manifests".format(len(tests), manifest_count))
1285 if show_manifests:
1286 by_component['manifests'] = {}
1287 manifest_paths = list(manifest_paths)
1288 manifest_paths.sort()
1289 for manifest_path in manifest_paths:
1290 relpath = mozpath.relpath(manifest_path, self.topsrcdir)
1291 print(" {}".format(relpath))
1292 if mozpath.commonprefix((manifest_path, self.topsrcdir)) != self.topsrcdir:
1293 continue
1294 reader = self.mozbuild_reader(config_mode='empty')
1295 manifest_info = None
1296 for info_path, info in reader.files_info([manifest_path]).items():
1297 bug_component = info.get('BUG_COMPONENT')
1298 key = "{}::{}".format(bug_component.product, bug_component.component)
1299 if (info_path == relpath) and ((not components) or (key in components)):
1300 manifest_info = {
1301 'manifest': relpath,
1302 'tests': 0,
1303 'skipped': 0
1305 rkey = key if show_components else 'all'
1306 if rkey in by_component['manifests']:
1307 by_component['manifests'][rkey].append(manifest_info)
1308 else:
1309 by_component['manifests'][rkey] = [manifest_info]
1310 break
1311 if manifest_info:
1312 for t in tests:
1313 if t['manifest'] == manifest_path:
1314 manifest_info['tests'] += 1
1315 if t.get('skip-if'):
1316 manifest_info['skipped'] += 1
1317 for key in by_component['manifests']:
1318 by_component['manifests'][key].sort()
1320 if show_tests:
1321 by_component['tests'] = {}
1323 if show_tests or show_summary:
1324 test_count = 0
1325 failed_count = 0
1326 skipped_count = 0
1327 component_set = set()
1328 for t in tests:
1329 reader = self.mozbuild_reader(config_mode='empty')
1330 if not matches_filters(t):
1331 continue
1332 test_count += 1
1333 relpath = t.get('srcdir_relpath')
1334 for info_path, info in reader.files_info([relpath]).items():
1335 bug_component = info.get('BUG_COMPONENT')
1336 key = "{}::{}".format(bug_component.product, bug_component.component)
1337 if (info_path == relpath) and ((not components) or (key in components)):
1338 component_set.add(key)
1339 test_info = {'test': relpath}
1340 for test_key in ['skip-if', 'fail-if']:
1341 value = t.get(test_key)
1342 if value:
1343 test_info[test_key] = value
1344 if t.get('fail-if'):
1345 failed_count += 1
1346 if t.get('skip-if'):
1347 skipped_count += 1
1348 if show_tests:
1349 rkey = key if show_components else 'all'
1350 if rkey in by_component['tests']:
1351 by_component['tests'][rkey].append(test_info)
1352 else:
1353 by_component['tests'][rkey] = [test_info]
1354 break
1355 if show_tests:
1356 for key in by_component['tests']:
1357 by_component['tests'][key].sort(key=lambda k: k['test'])
1359 if show_summary:
1360 by_component['summary'] = {}
1361 by_component['summary']['components'] = len(component_set)
1362 by_component['summary']['manifests'] = manifest_count
1363 by_component['summary']['tests'] = test_count
1364 by_component['summary']['failed tests'] = failed_count
1365 by_component['summary']['skipped tests'] = skipped_count
1367 json_report = json.dumps(by_component, indent=2, sort_keys=True)
1368 if output_file:
1369 output_file = os.path.abspath(output_file)
1370 output_dir = os.path.dirname(output_file)
1371 if not os.path.isdir(output_dir):
1372 os.makedirs(output_dir)
1374 with open(output_file, 'w') as f:
1375 f.write(json_report)
1376 else:
1377 print(json_report)
1380 @CommandProvider
1381 class RustTests(MachCommandBase):
1382 @Command('rusttests', category='testing',
1383 conditions=[conditions.is_non_artifact_build],
1384 description="Run rust unit tests (via cargo test).")
1385 def run_rusttests(self, **kwargs):
1386 return self._mach_context.commands.dispatch('build', self._mach_context,
1387 what=['pre-export',
1388 'export',
1389 'recurse_rusttests'])
1392 @CommandProvider
1393 class TestFluentMigration(MachCommandBase):
1394 @Command('fluent-migration-test', category='testing',
1395 description="Test Fluent migration recipes.")
1396 @CommandArgument('test_paths', nargs='*', metavar='N',
1397 help="Recipe paths to test.")
1398 def run_migration_tests(self, test_paths=None, **kwargs):
1399 if not test_paths:
1400 test_paths = []
1401 self._activate_virtualenv()
1402 from test_fluent_migrations import fmt
1403 rv = 0
1404 with_context = []
1405 for to_test in test_paths:
1406 try:
1407 context = fmt.inspect_migration(to_test)
1408 for issue in context['issues']:
1409 self.log(logging.ERROR, 'fluent-migration-test', {
1410 'error': issue['msg'],
1411 'file': to_test,
1412 }, 'ERROR in {file}: {error}')
1413 if context['issues']:
1414 continue
1415 with_context.append({
1416 'to_test': to_test,
1417 'references': context['references'],
1419 except Exception as e:
1420 self.log(logging.ERROR, 'fluent-migration-test', {
1421 'error': str(e),
1422 'file': to_test
1423 }, 'ERROR in {file}: {error}')
1424 rv |= 1
1425 obj_dir = fmt.prepare_object_dir(self)
1426 for context in with_context:
1427 rv |= fmt.test_migration(self, obj_dir, **context)
1428 return rv