Bug 1700051: part 46) Const-qualify `mozInlineSpellStatus::mAnchorRange`. r=smaug
[gecko.git] / testing / mach_commands.py
blobda226e41e3ff3d4d5ba9a592d19e84cc0fd18596
1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
5 from __future__ import absolute_import, print_function, unicode_literals
7 import argparse
8 import logging
9 import os
10 import sys
11 import subprocess
13 from mach.decorators import (
14 CommandArgument,
15 CommandProvider,
16 Command,
17 SettingsProvider,
18 SubCommand,
21 from mozbuild.base import (
22 BuildEnvironmentNotFoundException,
23 MachCommandBase,
24 MachCommandConditions as conditions,
27 UNKNOWN_TEST = """
28 I was unable to find tests from the given argument(s).
30 You should specify a test directory, filename, test suite name, or
31 abbreviation.
33 It's possible my little brain doesn't know about the type of test you are
34 trying to execute. If you suspect this, please request support by filing
35 a bug at
36 https://bugzilla.mozilla.org/enter_bug.cgi?product=Testing&component=General.
37 """.strip()
39 UNKNOWN_FLAVOR = """
40 I know you are trying to run a %s%s test. Unfortunately, I can't run those
41 tests yet. Sorry!
42 """.strip()
44 TEST_HELP = """
45 Test or tests to run. Tests can be specified by filename, directory, suite
46 name or suite alias.
48 The following test suites and aliases are supported: {}
49 """.strip()
52 @SettingsProvider
53 class TestConfig(object):
54 @classmethod
55 def config_settings(cls):
56 from mozlog.commandline import log_formatters
57 from mozlog.structuredlog import log_levels
59 format_desc = "The default format to use when running tests with `mach test`."
60 format_choices = list(log_formatters)
61 level_desc = "The default log level to use when running tests with `mach test`."
62 level_choices = [l.lower() for l in log_levels]
63 return [
64 ("test.format", "string", format_desc, "mach", {"choices": format_choices}),
65 ("test.level", "string", level_desc, "info", {"choices": level_choices}),
69 def get_test_parser():
70 from mozlog.commandline import add_logging_group
71 from moztest.resolve import TEST_SUITES
73 parser = argparse.ArgumentParser()
74 parser.add_argument(
75 "what",
76 default=None,
77 nargs="+",
78 help=TEST_HELP.format(", ".join(sorted(TEST_SUITES))),
80 parser.add_argument(
81 "extra_args",
82 default=None,
83 nargs=argparse.REMAINDER,
84 help="Extra arguments to pass to the underlying test command(s). "
85 "If an underlying command doesn't recognize the argument, it "
86 "will fail.",
88 parser.add_argument(
89 "--debugger",
90 default=None,
91 action="store",
92 nargs="?",
93 help="Specify a debugger to use.",
95 add_logging_group(parser)
96 return parser
99 ADD_TEST_SUPPORTED_SUITES = [
100 "mochitest-chrome",
101 "mochitest-plain",
102 "mochitest-browser-chrome",
103 "web-platform-tests-testharness",
104 "web-platform-tests-reftest",
105 "xpcshell",
107 ADD_TEST_SUPPORTED_DOCS = ["js", "html", "xhtml", "xul"]
109 SUITE_SYNONYMS = {
110 "wpt": "web-platform-tests-testharness",
111 "wpt-testharness": "web-platform-tests-testharness",
112 "wpt-reftest": "web-platform-tests-reftest",
115 MISSING_ARG = object()
118 def create_parser_addtest():
119 import addtest
121 parser = argparse.ArgumentParser()
122 parser.add_argument(
123 "--suite",
124 choices=sorted(ADD_TEST_SUPPORTED_SUITES + list(SUITE_SYNONYMS.keys())),
125 help="suite for the test. "
126 "If you pass a `test` argument this will be determined "
127 "based on the filename and the folder it is in",
129 parser.add_argument(
130 "-o",
131 "--overwrite",
132 action="store_true",
133 help="Overwrite an existing file if it exists.",
135 parser.add_argument(
136 "--doc",
137 choices=ADD_TEST_SUPPORTED_DOCS,
138 help="Document type for the test (if applicable)."
139 "If you pass a `test` argument this will be determined "
140 "based on the filename.",
142 parser.add_argument(
143 "-e",
144 "--editor",
145 action="store",
146 nargs="?",
147 default=MISSING_ARG,
148 help="Open the created file(s) in an editor; if a "
149 "binary is supplied it will be used otherwise the default editor for "
150 "your environment will be opened",
153 for base_suite in addtest.TEST_CREATORS:
154 cls = addtest.TEST_CREATORS[base_suite]
155 if hasattr(cls, "get_parser"):
156 group = parser.add_argument_group(base_suite)
157 cls.get_parser(group)
159 parser.add_argument("test", nargs="?", help=("Test to create."))
160 return parser
163 @CommandProvider
164 class AddTest(MachCommandBase):
165 @Command(
166 "addtest",
167 category="testing",
168 description="Generate tests based on templates",
169 parser=create_parser_addtest,
171 def addtest(
172 self,
173 suite=None,
174 test=None,
175 doc=None,
176 overwrite=False,
177 editor=MISSING_ARG,
178 **kwargs
180 import addtest
181 import io
182 from moztest.resolve import TEST_SUITES
184 if not suite and not test:
185 return create_parser_addtest().parse_args(["--help"])
187 if suite in SUITE_SYNONYMS:
188 suite = SUITE_SYNONYMS[suite]
190 if test:
191 if not overwrite and os.path.isfile(os.path.abspath(test)):
192 print("Error: can't generate a test that already exists:", test)
193 return 1
195 abs_test = os.path.abspath(test)
196 if doc is None:
197 doc = self.guess_doc(abs_test)
198 if suite is None:
199 guessed_suite, err = self.guess_suite(abs_test)
200 if err:
201 print(err)
202 return 1
203 suite = guessed_suite
205 else:
206 test = None
207 if doc is None:
208 doc = "html"
210 if not suite:
211 print(
212 "We couldn't automatically determine a suite. "
213 "Please specify `--suite` with one of the following options:\n{}\n"
214 "If you'd like to add support to a new suite, please file a bug "
215 "blocking https://bugzilla.mozilla.org/show_bug.cgi?id=1540285.".format(
216 ADD_TEST_SUPPORTED_SUITES
219 return 1
221 if doc not in ADD_TEST_SUPPORTED_DOCS:
222 print(
223 "Error: invalid `doc`. Either pass in a test with a valid extension"
224 "({}) or pass in the `doc` argument".format(ADD_TEST_SUPPORTED_DOCS)
226 return 1
228 creator_cls = addtest.creator_for_suite(suite)
230 if creator_cls is None:
231 print("Sorry, `addtest` doesn't currently know how to add {}".format(suite))
232 return 1
234 creator = creator_cls(self.topsrcdir, test, suite, doc, **kwargs)
236 creator.check_args()
238 paths = []
239 added_tests = False
240 for path, template in creator:
241 if not template:
242 continue
243 added_tests = True
244 if path:
245 paths.append(path)
246 print("Adding a test file at {} (suite `{}`)".format(path, suite))
248 try:
249 os.makedirs(os.path.dirname(path))
250 except OSError:
251 pass
253 with io.open(path, "w", newline="\n") as f:
254 f.write(template)
255 else:
256 # write to stdout if you passed only suite and doc and not a file path
257 print(template)
259 if not added_tests:
260 return 1
262 if test:
263 creator.update_manifest()
265 # Small hack, should really do this better
266 if suite.startswith("wpt-"):
267 suite = "web-platform-tests"
269 mach_command = TEST_SUITES[suite]["mach_command"]
270 print(
271 "Please make sure to add the new test to your commit. "
272 "You can now run the test with:\n ./mach {} {}".format(
273 mach_command, test
277 if editor is not MISSING_ARG:
278 if editor is not None:
279 editor = editor
280 elif "VISUAL" in os.environ:
281 editor = os.environ["VISUAL"]
282 elif "EDITOR" in os.environ:
283 editor = os.environ["EDITOR"]
284 else:
285 print("Unable to determine editor; please specify a binary")
286 editor = None
288 proc = None
289 if editor:
290 import subprocess
292 proc = subprocess.Popen("%s %s" % (editor, " ".join(paths)), shell=True)
294 if proc:
295 proc.wait()
297 return 0
299 def guess_doc(self, abs_test):
300 filename = os.path.basename(abs_test)
301 return os.path.splitext(filename)[1].strip(".")
303 def guess_suite(self, abs_test):
304 # If you pass a abs_test, try to detect the type based on the name
305 # and folder. This detection can be skipped if you pass the `type` arg.
306 err = None
307 guessed_suite = None
308 parent = os.path.dirname(abs_test)
309 filename = os.path.basename(abs_test)
311 has_browser_ini = os.path.isfile(os.path.join(parent, "browser.ini"))
312 has_chrome_ini = os.path.isfile(os.path.join(parent, "chrome.ini"))
313 has_plain_ini = os.path.isfile(os.path.join(parent, "mochitest.ini"))
314 has_xpcshell_ini = os.path.isfile(os.path.join(parent, "xpcshell.ini"))
316 in_wpt_folder = abs_test.startswith(
317 os.path.abspath(os.path.join("testing", "web-platform"))
320 if in_wpt_folder:
321 guessed_suite = "web-platform-tests-testharness"
322 if "/css/" in abs_test:
323 guessed_suite = "web-platform-tests-reftest"
324 elif (
325 filename.startswith("test_")
326 and has_xpcshell_ini
327 and self.guess_doc(abs_test) == "js"
329 guessed_suite = "xpcshell"
330 else:
331 if filename.startswith("browser_") and has_browser_ini:
332 guessed_suite = "mochitest-browser-chrome"
333 elif filename.startswith("test_"):
334 if has_chrome_ini and has_plain_ini:
335 err = (
336 "Error: directory contains both a chrome.ini and mochitest.ini. "
337 "Please set --suite=mochitest-chrome or --suite=mochitest-plain."
339 elif has_chrome_ini:
340 guessed_suite = "mochitest-chrome"
341 elif has_plain_ini:
342 guessed_suite = "mochitest-plain"
343 return guessed_suite, err
346 @CommandProvider
347 class Test(MachCommandBase):
348 @Command(
349 "test",
350 category="testing",
351 description="Run tests (detects the kind of test and runs it).",
352 parser=get_test_parser,
354 def test(self, what, extra_args, **log_args):
355 """Run tests from names or paths.
357 mach test accepts arguments specifying which tests to run. Each argument
358 can be:
360 * The path to a test file
361 * A directory containing tests
362 * A test suite name
363 * An alias to a test suite name (codes used on TreeHerder)
365 When paths or directories are given, they are first resolved to test
366 files known to the build system.
368 If resolved tests belong to more than one test type/flavor/harness,
369 the harness for each relevant type/flavor will be invoked. e.g. if
370 you specify a directory with xpcshell and browser chrome mochitests,
371 both harnesses will be invoked.
373 Warning: `mach test` does not automatically re-build.
374 Please remember to run `mach build` when necessary.
376 EXAMPLES
378 Run all test files in the devtools/client/shared/redux/middleware/xpcshell/
379 directory:
381 `./mach test devtools/client/shared/redux/middleware/xpcshell/`
383 The below command prints a short summary of results instead of
384 the default more verbose output.
385 Do not forget the - (minus sign) after --log-grouped!
387 `./mach test --log-grouped - devtools/client/shared/redux/middleware/xpcshell/`
389 from mozlog.commandline import setup_logging
390 from mozlog.handlers import StreamHandler
391 from moztest.resolve import get_suite_definition, TestResolver, TEST_SUITES
393 resolver = self._spawn(TestResolver)
394 run_suites, run_tests = resolver.resolve_metadata(what)
396 if not run_suites and not run_tests:
397 print(UNKNOWN_TEST)
398 return 1
400 if log_args.get("debugger", None):
401 import mozdebug
403 if not mozdebug.get_debugger_info(log_args.get("debugger")):
404 sys.exit(1)
405 extra_args_debugger_notation = "=".join(
406 ["--debugger", log_args.get("debugger")]
407 ).encode("ascii")
408 if extra_args:
409 extra_args.append(extra_args_debugger_notation)
410 else:
411 extra_args = [extra_args_debugger_notation]
413 # Create shared logger
414 format_args = {"level": self._mach_context.settings["test"]["level"]}
415 if not run_suites and len(run_tests) == 1:
416 format_args["verbose"] = True
417 format_args["compact"] = False
419 default_format = self._mach_context.settings["test"]["format"]
420 log = setup_logging(
421 "mach-test", log_args, {default_format: sys.stdout}, format_args
423 for handler in log.handlers:
424 if isinstance(handler, StreamHandler):
425 handler.formatter.inner.summary_on_shutdown = True
427 status = None
428 for suite_name in run_suites:
429 suite = TEST_SUITES[suite_name]
430 kwargs = suite["kwargs"]
431 kwargs["log"] = log
432 kwargs.setdefault("subsuite", None)
434 if "mach_command" in suite:
435 res = self._mach_context.commands.dispatch(
436 suite["mach_command"], self._mach_context, argv=extra_args, **kwargs
438 if res:
439 status = res
441 buckets = {}
442 for test in run_tests:
443 key = (test["flavor"], test.get("subsuite", ""))
444 buckets.setdefault(key, []).append(test)
446 for (flavor, subsuite), tests in sorted(buckets.items()):
447 _, m = get_suite_definition(flavor, subsuite)
448 if "mach_command" not in m:
449 substr = "-{}".format(subsuite) if subsuite else ""
450 print(UNKNOWN_FLAVOR % (flavor, substr))
451 status = 1
452 continue
454 kwargs = dict(m["kwargs"])
455 kwargs["log"] = log
456 kwargs.setdefault("subsuite", None)
458 res = self._mach_context.commands.dispatch(
459 m["mach_command"],
460 self._mach_context,
461 argv=extra_args,
462 test_objects=tests,
463 **kwargs
465 if res:
466 status = res
468 log.shutdown()
469 return status
472 @CommandProvider
473 class MachCommands(MachCommandBase):
474 @Command(
475 "cppunittest", category="testing", description="Run cpp unit tests (C++ tests)."
477 @CommandArgument(
478 "--enable-webrender",
479 action="store_true",
480 default=False,
481 dest="enable_webrender",
482 help="Enable the WebRender compositor in Gecko.",
484 @CommandArgument(
485 "test_files",
486 nargs="*",
487 metavar="N",
488 help="Test to run. Can be specified as one or more files or "
489 "directories, or omitted. If omitted, the entire test suite is "
490 "executed.",
492 def run_cppunit_test(self, **params):
493 from mozlog import commandline
495 log = params.get("log")
496 if not log:
497 log = commandline.setup_logging("cppunittest", {}, {"tbpl": sys.stdout})
499 # See if we have crash symbols
500 symbols_path = os.path.join(self.distdir, "crashreporter-symbols")
501 if not os.path.isdir(symbols_path):
502 symbols_path = None
504 # If no tests specified, run all tests in main manifest
505 tests = params["test_files"]
506 if not tests:
507 tests = [os.path.join(self.distdir, "cppunittests")]
508 manifest_path = os.path.join(self.topsrcdir, "testing", "cppunittest.ini")
509 else:
510 manifest_path = None
512 utility_path = self.bindir
514 if conditions.is_android(self):
515 from mozrunner.devices.android_device import (
516 verify_android_device,
517 InstallIntent,
520 verify_android_device(self, install=InstallIntent.NO)
521 return self.run_android_test(tests, symbols_path, manifest_path, log)
523 return self.run_desktop_test(
524 tests, symbols_path, manifest_path, utility_path, log
527 def run_desktop_test(self, tests, symbols_path, manifest_path, utility_path, log):
528 import runcppunittests as cppunittests
529 from mozlog import commandline
531 parser = cppunittests.CPPUnittestOptions()
532 commandline.add_logging_group(parser)
533 options, args = parser.parse_args()
535 options.symbols_path = symbols_path
536 options.manifest_path = manifest_path
537 options.utility_path = utility_path
538 options.xre_path = self.bindir
540 try:
541 result = cppunittests.run_test_harness(options, tests)
542 except Exception as e:
543 log.error("Caught exception running cpp unit tests: %s" % str(e))
544 result = False
545 raise
547 return 0 if result else 1
549 def run_android_test(self, tests, symbols_path, manifest_path, log):
550 import remotecppunittests as remotecppunittests
551 from mozlog import commandline
553 parser = remotecppunittests.RemoteCPPUnittestOptions()
554 commandline.add_logging_group(parser)
555 options, args = parser.parse_args()
557 if not options.adb_path:
558 from mozrunner.devices.android_device import get_adb_path
560 options.adb_path = get_adb_path(self)
561 options.symbols_path = symbols_path
562 options.manifest_path = manifest_path
563 options.xre_path = self.bindir
564 options.local_lib = self.bindir.replace("bin", "fennec")
565 for file in os.listdir(os.path.join(self.topobjdir, "dist")):
566 if file.endswith(".apk") and file.startswith("fennec"):
567 options.local_apk = os.path.join(self.topobjdir, "dist", file)
568 log.info("using APK: " + options.local_apk)
569 break
571 try:
572 result = remotecppunittests.run_test_harness(options, tests)
573 except Exception as e:
574 log.error("Caught exception running cpp unit tests: %s" % str(e))
575 result = False
576 raise
578 return 0 if result else 1
581 def executable_name(name):
582 return name + ".exe" if sys.platform.startswith("win") else name
585 @CommandProvider
586 class SpiderMonkeyTests(MachCommandBase):
587 @Command(
588 "jstests",
589 category="testing",
590 description="Run SpiderMonkey JS tests in the JS shell.",
592 @CommandArgument("--shell", help="The shell to be used")
593 @CommandArgument(
594 "params",
595 nargs=argparse.REMAINDER,
596 help="Extra arguments to pass down to the test harness.",
598 def run_jstests(self, shell, params):
599 import subprocess
601 self.virtualenv_manager.ensure()
602 python = self.virtualenv_manager.python_path
604 js = shell or os.path.join(self.bindir, executable_name("js"))
605 jstest_cmd = [
606 python,
607 os.path.join(self.topsrcdir, "js", "src", "tests", "jstests.py"),
609 ] + params
611 return subprocess.call(jstest_cmd)
613 @Command(
614 "jit-test",
615 category="testing",
616 description="Run SpiderMonkey jit-tests in the JS shell.",
617 ok_if_tests_disabled=True,
619 @CommandArgument("--shell", help="The shell to be used")
620 @CommandArgument(
621 "--cgc",
622 action="store_true",
623 default=False,
624 help="Run with the SM(cgc) job's env vars",
626 @CommandArgument(
627 "params",
628 nargs=argparse.REMAINDER,
629 help="Extra arguments to pass down to the test harness.",
631 def run_jittests(self, shell, cgc, params):
632 import subprocess
634 self.virtualenv_manager.ensure()
635 python = self.virtualenv_manager.python_path
637 js = shell or os.path.join(self.bindir, executable_name("js"))
638 jittest_cmd = [
639 python,
640 os.path.join(self.topsrcdir, "js", "src", "jit-test", "jit_test.py"),
642 ] + params
644 env = os.environ.copy()
645 if cgc:
646 env["JS_GC_ZEAL"] = "IncrementalMultipleSlices"
648 return subprocess.call(jittest_cmd, env=env)
650 @Command(
651 "jsapi-tests", category="testing", description="Run SpiderMonkey JSAPI tests."
653 @CommandArgument(
654 "test_name",
655 nargs="?",
656 metavar="N",
657 help="Test to run. Can be a prefix or omitted. If "
658 "omitted, the entire test suite is executed.",
660 def run_jsapitests(self, test_name=None):
661 import subprocess
663 jsapi_tests_cmd = [os.path.join(self.bindir, executable_name("jsapi-tests"))]
664 if test_name:
665 jsapi_tests_cmd.append(test_name)
667 test_env = os.environ.copy()
668 test_env["TOPSRCDIR"] = self.topsrcdir
670 return subprocess.call(jsapi_tests_cmd, env=test_env)
672 def run_check_js_msg(self):
673 import subprocess
675 self.virtualenv_manager.ensure()
676 python = self.virtualenv_manager.python_path
678 check_cmd = [
679 python,
680 os.path.join(self.topsrcdir, "config", "check_js_msg_encoding.py"),
683 return subprocess.call(check_cmd)
686 def get_jsshell_parser():
687 from jsshell.benchmark import get_parser
689 return get_parser()
692 @CommandProvider
693 class JsShellTests(MachCommandBase):
694 @Command(
695 "jsshell-bench",
696 category="testing",
697 parser=get_jsshell_parser,
698 description="Run benchmarks in the SpiderMonkey JS shell.",
700 def run_jsshelltests(self, **kwargs):
701 self.activate_virtualenv()
702 from jsshell import benchmark
704 return benchmark.run(**kwargs)
707 @CommandProvider
708 class CramTest(MachCommandBase):
709 @Command(
710 "cramtest",
711 category="testing",
712 description="Mercurial style .t tests for command line applications.",
714 @CommandArgument(
715 "test_paths",
716 nargs="*",
717 metavar="N",
718 help="Test paths to run. Each path can be a test file or directory. "
719 "If omitted, the entire suite will be run.",
721 @CommandArgument(
722 "cram_args",
723 nargs=argparse.REMAINDER,
724 help="Extra arguments to pass down to the cram binary. See "
725 "'./mach python -m cram -- -h' for a list of available options.",
727 def cramtest(self, cram_args=None, test_paths=None, test_objects=None):
728 self.activate_virtualenv()
729 import mozinfo
730 from manifestparser import TestManifest
732 if test_objects is None:
733 from moztest.resolve import TestResolver
735 resolver = self._spawn(TestResolver)
736 if test_paths:
737 # If we were given test paths, try to find tests matching them.
738 test_objects = resolver.resolve_tests(paths=test_paths, flavor="cram")
739 else:
740 # Otherwise just run everything in CRAMTEST_MANIFESTS
741 test_objects = resolver.resolve_tests(flavor="cram")
743 if not test_objects:
744 message = "No tests were collected, check spelling of the test paths."
745 self.log(logging.WARN, "cramtest", {}, message)
746 return 1
748 mp = TestManifest()
749 mp.tests.extend(test_objects)
750 tests = mp.active_tests(disabled=False, **mozinfo.info)
752 python = self.virtualenv_manager.python_path
753 cmd = [python, "-m", "cram"] + cram_args + [t["relpath"] for t in tests]
754 return subprocess.call(cmd, cwd=self.topsrcdir)
757 @CommandProvider
758 class TestInfoCommand(MachCommandBase):
759 from datetime import date, timedelta
761 @Command(
762 "test-info", category="testing", description="Display historical test results."
764 def test_info(self):
766 All functions implemented as subcommands.
769 @SubCommand(
770 "test-info",
771 "tests",
772 description="Display historical test result summary for named tests.",
774 @CommandArgument(
775 "test_names", nargs=argparse.REMAINDER, help="Test(s) of interest."
777 @CommandArgument(
778 "--start",
779 default=(date.today() - timedelta(7)).strftime("%Y-%m-%d"),
780 help="Start date (YYYY-MM-DD)",
782 @CommandArgument(
783 "--end", default=date.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
785 @CommandArgument(
786 "--show-info",
787 action="store_true",
788 help="Retrieve and display general test information.",
790 @CommandArgument(
791 "--show-bugs",
792 action="store_true",
793 help="Retrieve and display related Bugzilla bugs.",
795 @CommandArgument("--verbose", action="store_true", help="Enable debug logging.")
796 def test_info_tests(
797 self,
798 test_names,
799 start,
800 end,
801 show_info,
802 show_bugs,
803 verbose,
805 import testinfo
807 ti = testinfo.TestInfoTests(verbose)
808 ti.report(
809 test_names,
810 start,
811 end,
812 show_info,
813 show_bugs,
816 @SubCommand(
817 "test-info",
818 "report",
819 description="Generate a json report of test manifests and/or tests "
820 "categorized by Bugzilla component and optionally filtered "
821 "by path, component, and/or manifest annotations.",
823 @CommandArgument(
824 "--components",
825 default=None,
826 help="Comma-separated list of Bugzilla components."
827 " eg. Testing::General,Core::WebVR",
829 @CommandArgument(
830 "--flavor",
831 help='Limit results to tests of the specified flavor (eg. "xpcshell").',
833 @CommandArgument(
834 "--subsuite",
835 help='Limit results to tests of the specified subsuite (eg. "devtools").',
837 @CommandArgument(
838 "paths", nargs=argparse.REMAINDER, help="File system paths of interest."
840 @CommandArgument(
841 "--show-manifests",
842 action="store_true",
843 help="Include test manifests in report.",
845 @CommandArgument(
846 "--show-tests", action="store_true", help="Include individual tests in report."
848 @CommandArgument(
849 "--show-summary", action="store_true", help="Include summary in report."
851 @CommandArgument(
852 "--show-annotations",
853 action="store_true",
854 help="Include list of manifest annotation conditions in report.",
856 @CommandArgument(
857 "--filter-values",
858 help="Comma-separated list of value regular expressions to filter on; "
859 "displayed tests contain all specified values.",
861 @CommandArgument(
862 "--filter-keys",
863 help="Comma-separated list of test keys to filter on, "
864 'like "skip-if"; only these fields will be searched '
865 "for filter-values.",
867 @CommandArgument(
868 "--no-component-report",
869 action="store_false",
870 dest="show_components",
871 default=True,
872 help="Do not categorize by bugzilla component.",
874 @CommandArgument("--output-file", help="Path to report file.")
875 @CommandArgument("--verbose", action="store_true", help="Enable debug logging.")
876 def test_report(
877 self,
878 components,
879 flavor,
880 subsuite,
881 paths,
882 show_manifests,
883 show_tests,
884 show_summary,
885 show_annotations,
886 filter_values,
887 filter_keys,
888 show_components,
889 output_file,
890 verbose,
892 import testinfo
893 from mozbuild.build_commands import Build
895 try:
896 self.config_environment
897 except BuildEnvironmentNotFoundException:
898 print("Looks like configure has not run yet, running it now...")
899 builder = Build(self._mach_context, None)
900 builder.configure()
902 ti = testinfo.TestInfoReport(verbose)
903 ti.report(
904 components,
905 flavor,
906 subsuite,
907 paths,
908 show_manifests,
909 show_tests,
910 show_summary,
911 show_annotations,
912 filter_values,
913 filter_keys,
914 show_components,
915 output_file,
918 @SubCommand(
919 "test-info",
920 "report-diff",
921 description='Compare two reports generated by "test-info reports".',
923 @CommandArgument(
924 "--before",
925 default=None,
926 help="The first (earlier) report file; path to local file or url.",
928 @CommandArgument(
929 "--after", help="The second (later) report file; path to local file or url."
931 @CommandArgument(
932 "--output-file",
933 help="Path to report file to be written. If not specified, report"
934 "will be written to standard output.",
936 @CommandArgument("--verbose", action="store_true", help="Enable debug logging.")
937 def test_report_diff(self, before, after, output_file, verbose):
938 import testinfo
940 ti = testinfo.TestInfoReport(verbose)
941 ti.report_diff(before, after, output_file)
944 @CommandProvider
945 class RustTests(MachCommandBase):
946 @Command(
947 "rusttests",
948 category="testing",
949 conditions=[conditions.is_non_artifact_build],
950 description="Run rust unit tests (via cargo test).",
952 def run_rusttests(self, **kwargs):
953 return self._mach_context.commands.dispatch(
954 "build",
955 self._mach_context,
956 what=["pre-export", "export", "recurse_rusttests"],
960 @CommandProvider
961 class TestFluentMigration(MachCommandBase):
962 @Command(
963 "fluent-migration-test",
964 category="testing",
965 description="Test Fluent migration recipes.",
967 @CommandArgument("test_paths", nargs="*", metavar="N", help="Recipe paths to test.")
968 def run_migration_tests(self, test_paths=None, **kwargs):
969 if not test_paths:
970 test_paths = []
971 self.activate_virtualenv()
972 from test_fluent_migrations import fmt
974 rv = 0
975 with_context = []
976 for to_test in test_paths:
977 try:
978 context = fmt.inspect_migration(to_test)
979 for issue in context["issues"]:
980 self.log(
981 logging.ERROR,
982 "fluent-migration-test",
984 "error": issue["msg"],
985 "file": to_test,
987 "ERROR in {file}: {error}",
989 if context["issues"]:
990 continue
991 with_context.append(
993 "to_test": to_test,
994 "references": context["references"],
997 except Exception as e:
998 self.log(
999 logging.ERROR,
1000 "fluent-migration-test",
1001 {"error": str(e), "file": to_test},
1002 "ERROR in {file}: {error}",
1004 rv |= 1
1005 obj_dir = fmt.prepare_object_dir(self)
1006 for context in with_context:
1007 rv |= fmt.test_migration(self, obj_dir, **context)
1008 return rv