Bug 1719035 [wpt PR 29572] - Address flakiness in import-css-module-basic.html, a...
[gecko.git] / testing / mach_commands.py
blob034c0e2fe575d3ab30fe050bc459cf04c4a36083
1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
5 from __future__ import absolute_import, print_function, unicode_literals
7 import argparse
8 import logging
9 import os
10 import sys
11 import subprocess
13 from mach.decorators import (
14 CommandArgument,
15 CommandProvider,
16 Command,
17 SettingsProvider,
18 SubCommand,
21 from mozbuild.base import (
22 BuildEnvironmentNotFoundException,
23 MachCommandBase,
24 MachCommandConditions as conditions,
27 UNKNOWN_TEST = """
28 I was unable to find tests from the given argument(s).
30 You should specify a test directory, filename, test suite name, or
31 abbreviation.
33 It's possible my little brain doesn't know about the type of test you are
34 trying to execute. If you suspect this, please request support by filing
35 a bug at
36 https://bugzilla.mozilla.org/enter_bug.cgi?product=Testing&component=General.
37 """.strip()
39 UNKNOWN_FLAVOR = """
40 I know you are trying to run a %s%s test. Unfortunately, I can't run those
41 tests yet. Sorry!
42 """.strip()
44 TEST_HELP = """
45 Test or tests to run. Tests can be specified by filename, directory, suite
46 name or suite alias.
48 The following test suites and aliases are supported: {}
49 """.strip()
52 @SettingsProvider
53 class TestConfig(object):
54 @classmethod
55 def config_settings(cls):
56 from mozlog.commandline import log_formatters
57 from mozlog.structuredlog import log_levels
59 format_desc = "The default format to use when running tests with `mach test`."
60 format_choices = list(log_formatters)
61 level_desc = "The default log level to use when running tests with `mach test`."
62 level_choices = [l.lower() for l in log_levels]
63 return [
64 ("test.format", "string", format_desc, "mach", {"choices": format_choices}),
65 ("test.level", "string", level_desc, "info", {"choices": level_choices}),
69 def get_test_parser():
70 from mozlog.commandline import add_logging_group
71 from moztest.resolve import TEST_SUITES
73 parser = argparse.ArgumentParser()
74 parser.add_argument(
75 "what",
76 default=None,
77 nargs="+",
78 help=TEST_HELP.format(", ".join(sorted(TEST_SUITES))),
80 parser.add_argument(
81 "extra_args",
82 default=None,
83 nargs=argparse.REMAINDER,
84 help="Extra arguments to pass to the underlying test command(s). "
85 "If an underlying command doesn't recognize the argument, it "
86 "will fail.",
88 parser.add_argument(
89 "--debugger",
90 default=None,
91 action="store",
92 nargs="?",
93 help="Specify a debugger to use.",
95 add_logging_group(parser)
96 return parser
99 ADD_TEST_SUPPORTED_SUITES = [
100 "mochitest-chrome",
101 "mochitest-plain",
102 "mochitest-browser-chrome",
103 "web-platform-tests-testharness",
104 "web-platform-tests-reftest",
105 "xpcshell",
107 ADD_TEST_SUPPORTED_DOCS = ["js", "html", "xhtml", "xul"]
109 SUITE_SYNONYMS = {
110 "wpt": "web-platform-tests-testharness",
111 "wpt-testharness": "web-platform-tests-testharness",
112 "wpt-reftest": "web-platform-tests-reftest",
115 MISSING_ARG = object()
118 def create_parser_addtest():
119 import addtest
121 parser = argparse.ArgumentParser()
122 parser.add_argument(
123 "--suite",
124 choices=sorted(ADD_TEST_SUPPORTED_SUITES + list(SUITE_SYNONYMS.keys())),
125 help="suite for the test. "
126 "If you pass a `test` argument this will be determined "
127 "based on the filename and the folder it is in",
129 parser.add_argument(
130 "-o",
131 "--overwrite",
132 action="store_true",
133 help="Overwrite an existing file if it exists.",
135 parser.add_argument(
136 "--doc",
137 choices=ADD_TEST_SUPPORTED_DOCS,
138 help="Document type for the test (if applicable)."
139 "If you pass a `test` argument this will be determined "
140 "based on the filename.",
142 parser.add_argument(
143 "-e",
144 "--editor",
145 action="store",
146 nargs="?",
147 default=MISSING_ARG,
148 help="Open the created file(s) in an editor; if a "
149 "binary is supplied it will be used otherwise the default editor for "
150 "your environment will be opened",
153 for base_suite in addtest.TEST_CREATORS:
154 cls = addtest.TEST_CREATORS[base_suite]
155 if hasattr(cls, "get_parser"):
156 group = parser.add_argument_group(base_suite)
157 cls.get_parser(group)
159 parser.add_argument("test", nargs="?", help=("Test to create."))
160 return parser
163 @CommandProvider
164 class AddTest(MachCommandBase):
165 @Command(
166 "addtest",
167 category="testing",
168 description="Generate tests based on templates",
169 parser=create_parser_addtest,
171 def addtest(
172 self,
173 command_context,
174 suite=None,
175 test=None,
176 doc=None,
177 overwrite=False,
178 editor=MISSING_ARG,
179 **kwargs
181 import addtest
182 import io
183 from moztest.resolve import TEST_SUITES
185 if not suite and not test:
186 return create_parser_addtest().parse_args(["--help"])
188 if suite in SUITE_SYNONYMS:
189 suite = SUITE_SYNONYMS[suite]
191 if test:
192 if not overwrite and os.path.isfile(os.path.abspath(test)):
193 print("Error: can't generate a test that already exists:", test)
194 return 1
196 abs_test = os.path.abspath(test)
197 if doc is None:
198 doc = self.guess_doc(abs_test)
199 if suite is None:
200 guessed_suite, err = self.guess_suite(abs_test)
201 if err:
202 print(err)
203 return 1
204 suite = guessed_suite
206 else:
207 test = None
208 if doc is None:
209 doc = "html"
211 if not suite:
212 print(
213 "We couldn't automatically determine a suite. "
214 "Please specify `--suite` with one of the following options:\n{}\n"
215 "If you'd like to add support to a new suite, please file a bug "
216 "blocking https://bugzilla.mozilla.org/show_bug.cgi?id=1540285.".format(
217 ADD_TEST_SUPPORTED_SUITES
220 return 1
222 if doc not in ADD_TEST_SUPPORTED_DOCS:
223 print(
224 "Error: invalid `doc`. Either pass in a test with a valid extension"
225 "({}) or pass in the `doc` argument".format(ADD_TEST_SUPPORTED_DOCS)
227 return 1
229 creator_cls = addtest.creator_for_suite(suite)
231 if creator_cls is None:
232 print("Sorry, `addtest` doesn't currently know how to add {}".format(suite))
233 return 1
235 creator = creator_cls(self.topsrcdir, test, suite, doc, **kwargs)
237 creator.check_args()
239 paths = []
240 added_tests = False
241 for path, template in creator:
242 if not template:
243 continue
244 added_tests = True
245 if path:
246 paths.append(path)
247 print("Adding a test file at {} (suite `{}`)".format(path, suite))
249 try:
250 os.makedirs(os.path.dirname(path))
251 except OSError:
252 pass
254 with io.open(path, "w", newline="\n") as f:
255 f.write(template)
256 else:
257 # write to stdout if you passed only suite and doc and not a file path
258 print(template)
260 if not added_tests:
261 return 1
263 if test:
264 creator.update_manifest()
266 # Small hack, should really do this better
267 if suite.startswith("wpt-"):
268 suite = "web-platform-tests"
270 mach_command = TEST_SUITES[suite]["mach_command"]
271 print(
272 "Please make sure to add the new test to your commit. "
273 "You can now run the test with:\n ./mach {} {}".format(
274 mach_command, test
278 if editor is not MISSING_ARG:
279 if editor is not None:
280 editor = editor
281 elif "VISUAL" in os.environ:
282 editor = os.environ["VISUAL"]
283 elif "EDITOR" in os.environ:
284 editor = os.environ["EDITOR"]
285 else:
286 print("Unable to determine editor; please specify a binary")
287 editor = None
289 proc = None
290 if editor:
291 import subprocess
293 proc = subprocess.Popen("%s %s" % (editor, " ".join(paths)), shell=True)
295 if proc:
296 proc.wait()
298 return 0
300 def guess_doc(self, abs_test):
301 filename = os.path.basename(abs_test)
302 return os.path.splitext(filename)[1].strip(".")
304 def guess_suite(self, abs_test):
305 # If you pass a abs_test, try to detect the type based on the name
306 # and folder. This detection can be skipped if you pass the `type` arg.
307 err = None
308 guessed_suite = None
309 parent = os.path.dirname(abs_test)
310 filename = os.path.basename(abs_test)
312 has_browser_ini = os.path.isfile(os.path.join(parent, "browser.ini"))
313 has_chrome_ini = os.path.isfile(os.path.join(parent, "chrome.ini"))
314 has_plain_ini = os.path.isfile(os.path.join(parent, "mochitest.ini"))
315 has_xpcshell_ini = os.path.isfile(os.path.join(parent, "xpcshell.ini"))
317 in_wpt_folder = abs_test.startswith(
318 os.path.abspath(os.path.join("testing", "web-platform"))
321 if in_wpt_folder:
322 guessed_suite = "web-platform-tests-testharness"
323 if "/css/" in abs_test:
324 guessed_suite = "web-platform-tests-reftest"
325 elif (
326 filename.startswith("test_")
327 and has_xpcshell_ini
328 and self.guess_doc(abs_test) == "js"
330 guessed_suite = "xpcshell"
331 else:
332 if filename.startswith("browser_") and has_browser_ini:
333 guessed_suite = "mochitest-browser-chrome"
334 elif filename.startswith("test_"):
335 if has_chrome_ini and has_plain_ini:
336 err = (
337 "Error: directory contains both a chrome.ini and mochitest.ini. "
338 "Please set --suite=mochitest-chrome or --suite=mochitest-plain."
340 elif has_chrome_ini:
341 guessed_suite = "mochitest-chrome"
342 elif has_plain_ini:
343 guessed_suite = "mochitest-plain"
344 return guessed_suite, err
347 @CommandProvider
348 class Test(MachCommandBase):
349 @Command(
350 "test",
351 category="testing",
352 description="Run tests (detects the kind of test and runs it).",
353 parser=get_test_parser,
355 def test(self, command_context, what, extra_args, **log_args):
356 """Run tests from names or paths.
358 mach test accepts arguments specifying which tests to run. Each argument
359 can be:
361 * The path to a test file
362 * A directory containing tests
363 * A test suite name
364 * An alias to a test suite name (codes used on TreeHerder)
366 When paths or directories are given, they are first resolved to test
367 files known to the build system.
369 If resolved tests belong to more than one test type/flavor/harness,
370 the harness for each relevant type/flavor will be invoked. e.g. if
371 you specify a directory with xpcshell and browser chrome mochitests,
372 both harnesses will be invoked.
374 Warning: `mach test` does not automatically re-build.
375 Please remember to run `mach build` when necessary.
377 EXAMPLES
379 Run all test files in the devtools/client/shared/redux/middleware/xpcshell/
380 directory:
382 `./mach test devtools/client/shared/redux/middleware/xpcshell/`
384 The below command prints a short summary of results instead of
385 the default more verbose output.
386 Do not forget the - (minus sign) after --log-grouped!
388 `./mach test --log-grouped - devtools/client/shared/redux/middleware/xpcshell/`
390 from mozlog.commandline import setup_logging
391 from mozlog.handlers import StreamHandler
392 from moztest.resolve import get_suite_definition, TestResolver, TEST_SUITES
394 resolver = self._spawn(TestResolver)
395 run_suites, run_tests = resolver.resolve_metadata(what)
397 if not run_suites and not run_tests:
398 print(UNKNOWN_TEST)
399 return 1
401 if log_args.get("debugger", None):
402 import mozdebug
404 if not mozdebug.get_debugger_info(log_args.get("debugger")):
405 sys.exit(1)
406 extra_args_debugger_notation = "=".join(
407 ["--debugger", log_args.get("debugger")]
409 if extra_args:
410 extra_args.append(extra_args_debugger_notation)
411 else:
412 extra_args = [extra_args_debugger_notation]
414 # Create shared logger
415 format_args = {"level": self._mach_context.settings["test"]["level"]}
416 if not run_suites and len(run_tests) == 1:
417 format_args["verbose"] = True
418 format_args["compact"] = False
420 default_format = self._mach_context.settings["test"]["format"]
421 log = setup_logging(
422 "mach-test", log_args, {default_format: sys.stdout}, format_args
424 for handler in log.handlers:
425 if isinstance(handler, StreamHandler):
426 handler.formatter.inner.summary_on_shutdown = True
428 status = None
429 for suite_name in run_suites:
430 suite = TEST_SUITES[suite_name]
431 kwargs = suite["kwargs"]
432 kwargs["log"] = log
433 kwargs.setdefault("subsuite", None)
435 if "mach_command" in suite:
436 res = self._mach_context.commands.dispatch(
437 suite["mach_command"], self._mach_context, argv=extra_args, **kwargs
439 if res:
440 status = res
442 buckets = {}
443 for test in run_tests:
444 key = (test["flavor"], test.get("subsuite", ""))
445 buckets.setdefault(key, []).append(test)
447 for (flavor, subsuite), tests in sorted(buckets.items()):
448 _, m = get_suite_definition(flavor, subsuite)
449 if "mach_command" not in m:
450 substr = "-{}".format(subsuite) if subsuite else ""
451 print(UNKNOWN_FLAVOR % (flavor, substr))
452 status = 1
453 continue
455 kwargs = dict(m["kwargs"])
456 kwargs["log"] = log
457 kwargs.setdefault("subsuite", None)
459 res = self._mach_context.commands.dispatch(
460 m["mach_command"],
461 self._mach_context,
462 argv=extra_args,
463 test_objects=tests,
464 **kwargs
466 if res:
467 status = res
469 log.shutdown()
470 return status
473 @CommandProvider
474 class MachCommands(MachCommandBase):
475 @Command(
476 "cppunittest", category="testing", description="Run cpp unit tests (C++ tests)."
478 @CommandArgument(
479 "--enable-webrender",
480 action="store_true",
481 default=False,
482 dest="enable_webrender",
483 help="Enable the WebRender compositor in Gecko.",
485 @CommandArgument(
486 "test_files",
487 nargs="*",
488 metavar="N",
489 help="Test to run. Can be specified as one or more files or "
490 "directories, or omitted. If omitted, the entire test suite is "
491 "executed.",
493 def run_cppunit_test(self, command_context, **params):
494 from mozlog import commandline
496 log = params.get("log")
497 if not log:
498 log = commandline.setup_logging("cppunittest", {}, {"tbpl": sys.stdout})
500 # See if we have crash symbols
501 symbols_path = os.path.join(self.distdir, "crashreporter-symbols")
502 if not os.path.isdir(symbols_path):
503 symbols_path = None
505 # If no tests specified, run all tests in main manifest
506 tests = params["test_files"]
507 if not tests:
508 tests = [os.path.join(self.distdir, "cppunittests")]
509 manifest_path = os.path.join(self.topsrcdir, "testing", "cppunittest.ini")
510 else:
511 manifest_path = None
513 utility_path = self.bindir
515 if conditions.is_android(self):
516 from mozrunner.devices.android_device import (
517 verify_android_device,
518 InstallIntent,
521 verify_android_device(self, install=InstallIntent.NO)
522 return self.run_android_test(tests, symbols_path, manifest_path, log)
524 return self.run_desktop_test(
525 tests, symbols_path, manifest_path, utility_path, log
528 def run_desktop_test(self, tests, symbols_path, manifest_path, utility_path, log):
529 import runcppunittests as cppunittests
530 from mozlog import commandline
532 parser = cppunittests.CPPUnittestOptions()
533 commandline.add_logging_group(parser)
534 options, args = parser.parse_args()
536 options.symbols_path = symbols_path
537 options.manifest_path = manifest_path
538 options.utility_path = utility_path
539 options.xre_path = self.bindir
541 try:
542 result = cppunittests.run_test_harness(options, tests)
543 except Exception as e:
544 log.error("Caught exception running cpp unit tests: %s" % str(e))
545 result = False
546 raise
548 return 0 if result else 1
550 def run_android_test(self, tests, symbols_path, manifest_path, log):
551 import remotecppunittests as remotecppunittests
552 from mozlog import commandline
554 parser = remotecppunittests.RemoteCPPUnittestOptions()
555 commandline.add_logging_group(parser)
556 options, args = parser.parse_args()
558 if not options.adb_path:
559 from mozrunner.devices.android_device import get_adb_path
561 options.adb_path = get_adb_path(self)
562 options.symbols_path = symbols_path
563 options.manifest_path = manifest_path
564 options.xre_path = self.bindir
565 options.local_lib = self.bindir.replace("bin", "fennec")
566 for file in os.listdir(os.path.join(self.topobjdir, "dist")):
567 if file.endswith(".apk") and file.startswith("fennec"):
568 options.local_apk = os.path.join(self.topobjdir, "dist", file)
569 log.info("using APK: " + options.local_apk)
570 break
572 try:
573 result = remotecppunittests.run_test_harness(options, tests)
574 except Exception as e:
575 log.error("Caught exception running cpp unit tests: %s" % str(e))
576 result = False
577 raise
579 return 0 if result else 1
582 def executable_name(name):
583 return name + ".exe" if sys.platform.startswith("win") else name
586 @CommandProvider
587 class SpiderMonkeyTests(MachCommandBase):
588 @Command(
589 "jstests",
590 category="testing",
591 description="Run SpiderMonkey JS tests in the JS shell.",
593 @CommandArgument("--shell", help="The shell to be used")
594 @CommandArgument(
595 "params",
596 nargs=argparse.REMAINDER,
597 help="Extra arguments to pass down to the test harness.",
599 def run_jstests(self, command_context, shell, params):
600 import subprocess
602 self.virtualenv_manager.ensure()
603 python = self.virtualenv_manager.python_path
605 js = shell or os.path.join(self.bindir, executable_name("js"))
606 jstest_cmd = [
607 python,
608 os.path.join(self.topsrcdir, "js", "src", "tests", "jstests.py"),
610 ] + params
612 return subprocess.call(jstest_cmd)
614 @Command(
615 "jit-test",
616 category="testing",
617 description="Run SpiderMonkey jit-tests in the JS shell.",
618 ok_if_tests_disabled=True,
620 @CommandArgument("--shell", help="The shell to be used")
621 @CommandArgument(
622 "--cgc",
623 action="store_true",
624 default=False,
625 help="Run with the SM(cgc) job's env vars",
627 @CommandArgument(
628 "params",
629 nargs=argparse.REMAINDER,
630 help="Extra arguments to pass down to the test harness.",
632 def run_jittests(self, command_context, shell, cgc, params):
633 import subprocess
635 self.virtualenv_manager.ensure()
636 python = self.virtualenv_manager.python_path
638 js = shell or os.path.join(self.bindir, executable_name("js"))
639 jittest_cmd = [
640 python,
641 os.path.join(self.topsrcdir, "js", "src", "jit-test", "jit_test.py"),
643 ] + params
645 env = os.environ.copy()
646 if cgc:
647 env["JS_GC_ZEAL"] = "IncrementalMultipleSlices"
649 return subprocess.call(jittest_cmd, env=env)
651 @Command(
652 "jsapi-tests", category="testing", description="Run SpiderMonkey JSAPI tests."
654 @CommandArgument(
655 "test_name",
656 nargs="?",
657 metavar="N",
658 help="Test to run. Can be a prefix or omitted. If "
659 "omitted, the entire test suite is executed.",
661 def run_jsapitests(self, command_context, test_name=None):
662 import subprocess
664 jsapi_tests_cmd = [os.path.join(self.bindir, executable_name("jsapi-tests"))]
665 if test_name:
666 jsapi_tests_cmd.append(test_name)
668 test_env = os.environ.copy()
669 test_env["TOPSRCDIR"] = self.topsrcdir
671 return subprocess.call(jsapi_tests_cmd, env=test_env)
673 def run_check_js_msg(self):
674 import subprocess
676 self.virtualenv_manager.ensure()
677 python = self.virtualenv_manager.python_path
679 check_cmd = [
680 python,
681 os.path.join(self.topsrcdir, "config", "check_js_msg_encoding.py"),
684 return subprocess.call(check_cmd)
687 def get_jsshell_parser():
688 from jsshell.benchmark import get_parser
690 return get_parser()
693 @CommandProvider
694 class JsShellTests(MachCommandBase):
695 @Command(
696 "jsshell-bench",
697 category="testing",
698 parser=get_jsshell_parser,
699 description="Run benchmarks in the SpiderMonkey JS shell.",
701 def run_jsshelltests(self, command_context, **kwargs):
702 self.activate_virtualenv()
703 from jsshell import benchmark
705 return benchmark.run(**kwargs)
708 @CommandProvider
709 class CramTest(MachCommandBase):
710 @Command(
711 "cramtest",
712 category="testing",
713 description="Mercurial style .t tests for command line applications.",
715 @CommandArgument(
716 "test_paths",
717 nargs="*",
718 metavar="N",
719 help="Test paths to run. Each path can be a test file or directory. "
720 "If omitted, the entire suite will be run.",
722 @CommandArgument(
723 "cram_args",
724 nargs=argparse.REMAINDER,
725 help="Extra arguments to pass down to the cram binary. See "
726 "'./mach python -m cram -- -h' for a list of available options.",
728 def cramtest(
729 self, command_context, cram_args=None, test_paths=None, test_objects=None
731 self.activate_virtualenv()
732 import mozinfo
733 from manifestparser import TestManifest
735 if test_objects is None:
736 from moztest.resolve import TestResolver
738 resolver = self._spawn(TestResolver)
739 if test_paths:
740 # If we were given test paths, try to find tests matching them.
741 test_objects = resolver.resolve_tests(paths=test_paths, flavor="cram")
742 else:
743 # Otherwise just run everything in CRAMTEST_MANIFESTS
744 test_objects = resolver.resolve_tests(flavor="cram")
746 if not test_objects:
747 message = "No tests were collected, check spelling of the test paths."
748 self.log(logging.WARN, "cramtest", {}, message)
749 return 1
751 mp = TestManifest()
752 mp.tests.extend(test_objects)
753 tests = mp.active_tests(disabled=False, **mozinfo.info)
755 python = self.virtualenv_manager.python_path
756 cmd = [python, "-m", "cram"] + cram_args + [t["relpath"] for t in tests]
757 return subprocess.call(cmd, cwd=self.topsrcdir)
760 @CommandProvider
761 class TestInfoCommand(MachCommandBase):
762 from datetime import date, timedelta
764 @Command(
765 "test-info", category="testing", description="Display historical test results."
767 def test_info(self, command_context):
769 All functions implemented as subcommands.
772 @SubCommand(
773 "test-info",
774 "tests",
775 description="Display historical test result summary for named tests.",
777 @CommandArgument(
778 "test_names", nargs=argparse.REMAINDER, help="Test(s) of interest."
780 @CommandArgument(
781 "--start",
782 default=(date.today() - timedelta(7)).strftime("%Y-%m-%d"),
783 help="Start date (YYYY-MM-DD)",
785 @CommandArgument(
786 "--end", default=date.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
788 @CommandArgument(
789 "--show-info",
790 action="store_true",
791 help="Retrieve and display general test information.",
793 @CommandArgument(
794 "--show-bugs",
795 action="store_true",
796 help="Retrieve and display related Bugzilla bugs.",
798 @CommandArgument("--verbose", action="store_true", help="Enable debug logging.")
799 def test_info_tests(
800 self,
801 command_context,
802 test_names,
803 start,
804 end,
805 show_info,
806 show_bugs,
807 verbose,
809 import testinfo
811 ti = testinfo.TestInfoTests(verbose)
812 ti.report(
813 test_names,
814 start,
815 end,
816 show_info,
817 show_bugs,
820 @SubCommand(
821 "test-info",
822 "report",
823 description="Generate a json report of test manifests and/or tests "
824 "categorized by Bugzilla component and optionally filtered "
825 "by path, component, and/or manifest annotations.",
827 @CommandArgument(
828 "--components",
829 default=None,
830 help="Comma-separated list of Bugzilla components."
831 " eg. Testing::General,Core::WebVR",
833 @CommandArgument(
834 "--flavor",
835 help='Limit results to tests of the specified flavor (eg. "xpcshell").',
837 @CommandArgument(
838 "--subsuite",
839 help='Limit results to tests of the specified subsuite (eg. "devtools").',
841 @CommandArgument(
842 "paths", nargs=argparse.REMAINDER, help="File system paths of interest."
844 @CommandArgument(
845 "--show-manifests",
846 action="store_true",
847 help="Include test manifests in report.",
849 @CommandArgument(
850 "--show-tests", action="store_true", help="Include individual tests in report."
852 @CommandArgument(
853 "--show-summary", action="store_true", help="Include summary in report."
855 @CommandArgument(
856 "--show-annotations",
857 action="store_true",
858 help="Include list of manifest annotation conditions in report.",
860 @CommandArgument(
861 "--filter-values",
862 help="Comma-separated list of value regular expressions to filter on; "
863 "displayed tests contain all specified values.",
865 @CommandArgument(
866 "--filter-keys",
867 help="Comma-separated list of test keys to filter on, "
868 'like "skip-if"; only these fields will be searched '
869 "for filter-values.",
871 @CommandArgument(
872 "--no-component-report",
873 action="store_false",
874 dest="show_components",
875 default=True,
876 help="Do not categorize by bugzilla component.",
878 @CommandArgument("--output-file", help="Path to report file.")
879 @CommandArgument("--verbose", action="store_true", help="Enable debug logging.")
880 def test_report(
881 self,
882 command_context,
883 components,
884 flavor,
885 subsuite,
886 paths,
887 show_manifests,
888 show_tests,
889 show_summary,
890 show_annotations,
891 filter_values,
892 filter_keys,
893 show_components,
894 output_file,
895 verbose,
897 import testinfo
898 from mozbuild.build_commands import Build
900 try:
901 self.config_environment
902 except BuildEnvironmentNotFoundException:
903 print("Looks like configure has not run yet, running it now...")
904 builder = Build(self._mach_context, None)
905 builder.configure(command_context)
907 ti = testinfo.TestInfoReport(verbose)
908 ti.report(
909 components,
910 flavor,
911 subsuite,
912 paths,
913 show_manifests,
914 show_tests,
915 show_summary,
916 show_annotations,
917 filter_values,
918 filter_keys,
919 show_components,
920 output_file,
923 @SubCommand(
924 "test-info",
925 "report-diff",
926 description='Compare two reports generated by "test-info reports".',
928 @CommandArgument(
929 "--before",
930 default=None,
931 help="The first (earlier) report file; path to local file or url.",
933 @CommandArgument(
934 "--after", help="The second (later) report file; path to local file or url."
936 @CommandArgument(
937 "--output-file",
938 help="Path to report file to be written. If not specified, report"
939 "will be written to standard output.",
941 @CommandArgument("--verbose", action="store_true", help="Enable debug logging.")
942 def test_report_diff(self, command_context, before, after, output_file, verbose):
943 import testinfo
945 ti = testinfo.TestInfoReport(verbose)
946 ti.report_diff(before, after, output_file)
949 @CommandProvider
950 class RustTests(MachCommandBase):
951 @Command(
952 "rusttests",
953 category="testing",
954 conditions=[conditions.is_non_artifact_build],
955 description="Run rust unit tests (via cargo test).",
957 def run_rusttests(self, command_context, **kwargs):
958 return self._mach_context.commands.dispatch(
959 "build",
960 self._mach_context,
961 what=["pre-export", "export", "recurse_rusttests"],
965 @CommandProvider
966 class TestFluentMigration(MachCommandBase):
967 @Command(
968 "fluent-migration-test",
969 category="testing",
970 description="Test Fluent migration recipes.",
972 @CommandArgument("test_paths", nargs="*", metavar="N", help="Recipe paths to test.")
973 def run_migration_tests(self, command_context, test_paths=None, **kwargs):
974 if not test_paths:
975 test_paths = []
976 self.activate_virtualenv()
977 from test_fluent_migrations import fmt
979 rv = 0
980 with_context = []
981 for to_test in test_paths:
982 try:
983 context = fmt.inspect_migration(to_test)
984 for issue in context["issues"]:
985 self.log(
986 logging.ERROR,
987 "fluent-migration-test",
989 "error": issue["msg"],
990 "file": to_test,
992 "ERROR in {file}: {error}",
994 if context["issues"]:
995 continue
996 with_context.append(
998 "to_test": to_test,
999 "references": context["references"],
1002 except Exception as e:
1003 self.log(
1004 logging.ERROR,
1005 "fluent-migration-test",
1006 {"error": str(e), "file": to_test},
1007 "ERROR in {file}: {error}",
1009 rv |= 1
1010 obj_dir = fmt.prepare_object_dir(self)
1011 for context in with_context:
1012 rv |= fmt.test_migration(self, obj_dir, **context)
1013 return rv