Bug 1727276 [wpt PR 30149] - Update wpt metadata, a=testonly
[gecko.git] / testing / mach_commands.py
blob6954c1016ecf12d4c439bbf460ad9f4db4663713
1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
5 from __future__ import absolute_import, print_function, unicode_literals
7 import argparse
8 import logging
9 import os
10 import sys
11 import subprocess
13 from mach.decorators import (
14 CommandArgument,
15 CommandProvider,
16 Command,
17 SettingsProvider,
18 SubCommand,
21 from mozbuild.base import (
22 BuildEnvironmentNotFoundException,
23 MachCommandBase,
24 MachCommandConditions as conditions,
27 UNKNOWN_TEST = """
28 I was unable to find tests from the given argument(s).
30 You should specify a test directory, filename, test suite name, or
31 abbreviation.
33 It's possible my little brain doesn't know about the type of test you are
34 trying to execute. If you suspect this, please request support by filing
35 a bug at
36 https://bugzilla.mozilla.org/enter_bug.cgi?product=Testing&component=General.
37 """.strip()
39 UNKNOWN_FLAVOR = """
40 I know you are trying to run a %s%s test. Unfortunately, I can't run those
41 tests yet. Sorry!
42 """.strip()
44 TEST_HELP = """
45 Test or tests to run. Tests can be specified by filename, directory, suite
46 name or suite alias.
48 The following test suites and aliases are supported: {}
49 """.strip()
52 @SettingsProvider
53 class TestConfig(object):
54 @classmethod
55 def config_settings(cls):
56 from mozlog.commandline import log_formatters
57 from mozlog.structuredlog import log_levels
59 format_desc = "The default format to use when running tests with `mach test`."
60 format_choices = list(log_formatters)
61 level_desc = "The default log level to use when running tests with `mach test`."
62 level_choices = [l.lower() for l in log_levels]
63 return [
64 ("test.format", "string", format_desc, "mach", {"choices": format_choices}),
65 ("test.level", "string", level_desc, "info", {"choices": level_choices}),
69 def get_test_parser():
70 from mozlog.commandline import add_logging_group
71 from moztest.resolve import TEST_SUITES
73 parser = argparse.ArgumentParser()
74 parser.add_argument(
75 "what",
76 default=None,
77 nargs="+",
78 help=TEST_HELP.format(", ".join(sorted(TEST_SUITES))),
80 parser.add_argument(
81 "extra_args",
82 default=None,
83 nargs=argparse.REMAINDER,
84 help="Extra arguments to pass to the underlying test command(s). "
85 "If an underlying command doesn't recognize the argument, it "
86 "will fail.",
88 parser.add_argument(
89 "--debugger",
90 default=None,
91 action="store",
92 nargs="?",
93 help="Specify a debugger to use.",
95 add_logging_group(parser)
96 return parser
99 ADD_TEST_SUPPORTED_SUITES = [
100 "mochitest-chrome",
101 "mochitest-plain",
102 "mochitest-browser-chrome",
103 "web-platform-tests-testharness",
104 "web-platform-tests-reftest",
105 "xpcshell",
107 ADD_TEST_SUPPORTED_DOCS = ["js", "html", "xhtml", "xul"]
109 SUITE_SYNONYMS = {
110 "wpt": "web-platform-tests-testharness",
111 "wpt-testharness": "web-platform-tests-testharness",
112 "wpt-reftest": "web-platform-tests-reftest",
115 MISSING_ARG = object()
118 def create_parser_addtest():
119 import addtest
121 parser = argparse.ArgumentParser()
122 parser.add_argument(
123 "--suite",
124 choices=sorted(ADD_TEST_SUPPORTED_SUITES + list(SUITE_SYNONYMS.keys())),
125 help="suite for the test. "
126 "If you pass a `test` argument this will be determined "
127 "based on the filename and the folder it is in",
129 parser.add_argument(
130 "-o",
131 "--overwrite",
132 action="store_true",
133 help="Overwrite an existing file if it exists.",
135 parser.add_argument(
136 "--doc",
137 choices=ADD_TEST_SUPPORTED_DOCS,
138 help="Document type for the test (if applicable)."
139 "If you pass a `test` argument this will be determined "
140 "based on the filename.",
142 parser.add_argument(
143 "-e",
144 "--editor",
145 action="store",
146 nargs="?",
147 default=MISSING_ARG,
148 help="Open the created file(s) in an editor; if a "
149 "binary is supplied it will be used otherwise the default editor for "
150 "your environment will be opened",
153 for base_suite in addtest.TEST_CREATORS:
154 cls = addtest.TEST_CREATORS[base_suite]
155 if hasattr(cls, "get_parser"):
156 group = parser.add_argument_group(base_suite)
157 cls.get_parser(group)
159 parser.add_argument("test", nargs="?", help=("Test to create."))
160 return parser
163 @CommandProvider
164 class AddTest(MachCommandBase):
165 @Command(
166 "addtest",
167 category="testing",
168 description="Generate tests based on templates",
169 parser=create_parser_addtest,
171 def addtest(
172 self,
173 command_context,
174 suite=None,
175 test=None,
176 doc=None,
177 overwrite=False,
178 editor=MISSING_ARG,
179 **kwargs,
181 import addtest
182 import io
183 from moztest.resolve import TEST_SUITES
185 if not suite and not test:
186 return create_parser_addtest().parse_args(["--help"])
188 if suite in SUITE_SYNONYMS:
189 suite = SUITE_SYNONYMS[suite]
191 if test:
192 if not overwrite and os.path.isfile(os.path.abspath(test)):
193 print("Error: can't generate a test that already exists:", test)
194 return 1
196 abs_test = os.path.abspath(test)
197 if doc is None:
198 doc = self.guess_doc(abs_test)
199 if suite is None:
200 guessed_suite, err = self.guess_suite(abs_test)
201 if err:
202 print(err)
203 return 1
204 suite = guessed_suite
206 else:
207 test = None
208 if doc is None:
209 doc = "html"
211 if not suite:
212 print(
213 "We couldn't automatically determine a suite. "
214 "Please specify `--suite` with one of the following options:\n{}\n"
215 "If you'd like to add support to a new suite, please file a bug "
216 "blocking https://bugzilla.mozilla.org/show_bug.cgi?id=1540285.".format(
217 ADD_TEST_SUPPORTED_SUITES
220 return 1
222 if doc not in ADD_TEST_SUPPORTED_DOCS:
223 print(
224 "Error: invalid `doc`. Either pass in a test with a valid extension"
225 "({}) or pass in the `doc` argument".format(ADD_TEST_SUPPORTED_DOCS)
227 return 1
229 creator_cls = addtest.creator_for_suite(suite)
231 if creator_cls is None:
232 print("Sorry, `addtest` doesn't currently know how to add {}".format(suite))
233 return 1
235 creator = creator_cls(command_context.topsrcdir, test, suite, doc, **kwargs)
237 creator.check_args()
239 paths = []
240 added_tests = False
241 for path, template in creator:
242 if not template:
243 continue
244 added_tests = True
245 if path:
246 paths.append(path)
247 print("Adding a test file at {} (suite `{}`)".format(path, suite))
249 try:
250 os.makedirs(os.path.dirname(path))
251 except OSError:
252 pass
254 with io.open(path, "w", newline="\n") as f:
255 f.write(template)
256 else:
257 # write to stdout if you passed only suite and doc and not a file path
258 print(template)
260 if not added_tests:
261 return 1
263 if test:
264 creator.update_manifest()
266 # Small hack, should really do this better
267 if suite.startswith("wpt-"):
268 suite = "web-platform-tests"
270 mach_command = TEST_SUITES[suite]["mach_command"]
271 print(
272 "Please make sure to add the new test to your commit. "
273 "You can now run the test with:\n ./mach {} {}".format(
274 mach_command, test
278 if editor is not MISSING_ARG:
279 if editor is not None:
280 editor = editor
281 elif "VISUAL" in os.environ:
282 editor = os.environ["VISUAL"]
283 elif "EDITOR" in os.environ:
284 editor = os.environ["EDITOR"]
285 else:
286 print("Unable to determine editor; please specify a binary")
287 editor = None
289 proc = None
290 if editor:
291 import subprocess
293 proc = subprocess.Popen("%s %s" % (editor, " ".join(paths)), shell=True)
295 if proc:
296 proc.wait()
298 return 0
300 def guess_doc(self, abs_test):
301 filename = os.path.basename(abs_test)
302 return os.path.splitext(filename)[1].strip(".")
304 def guess_suite(self, abs_test):
305 # If you pass a abs_test, try to detect the type based on the name
306 # and folder. This detection can be skipped if you pass the `type` arg.
307 err = None
308 guessed_suite = None
309 parent = os.path.dirname(abs_test)
310 filename = os.path.basename(abs_test)
312 has_browser_ini = os.path.isfile(os.path.join(parent, "browser.ini"))
313 has_chrome_ini = os.path.isfile(os.path.join(parent, "chrome.ini"))
314 has_plain_ini = os.path.isfile(os.path.join(parent, "mochitest.ini"))
315 has_xpcshell_ini = os.path.isfile(os.path.join(parent, "xpcshell.ini"))
317 in_wpt_folder = abs_test.startswith(
318 os.path.abspath(os.path.join("testing", "web-platform"))
321 if in_wpt_folder:
322 guessed_suite = "web-platform-tests-testharness"
323 if "/css/" in abs_test:
324 guessed_suite = "web-platform-tests-reftest"
325 elif (
326 filename.startswith("test_")
327 and has_xpcshell_ini
328 and self.guess_doc(abs_test) == "js"
330 guessed_suite = "xpcshell"
331 else:
332 if filename.startswith("browser_") and has_browser_ini:
333 guessed_suite = "mochitest-browser-chrome"
334 elif filename.startswith("test_"):
335 if has_chrome_ini and has_plain_ini:
336 err = (
337 "Error: directory contains both a chrome.ini and mochitest.ini. "
338 "Please set --suite=mochitest-chrome or --suite=mochitest-plain."
340 elif has_chrome_ini:
341 guessed_suite = "mochitest-chrome"
342 elif has_plain_ini:
343 guessed_suite = "mochitest-plain"
344 return guessed_suite, err
347 @CommandProvider
348 class Test(MachCommandBase):
349 @Command(
350 "test",
351 category="testing",
352 description="Run tests (detects the kind of test and runs it).",
353 parser=get_test_parser,
355 def test(self, command_context, what, extra_args, **log_args):
356 """Run tests from names or paths.
358 mach test accepts arguments specifying which tests to run. Each argument
359 can be:
361 * The path to a test file
362 * A directory containing tests
363 * A test suite name
364 * An alias to a test suite name (codes used on TreeHerder)
366 When paths or directories are given, they are first resolved to test
367 files known to the build system.
369 If resolved tests belong to more than one test type/flavor/harness,
370 the harness for each relevant type/flavor will be invoked. e.g. if
371 you specify a directory with xpcshell and browser chrome mochitests,
372 both harnesses will be invoked.
374 Warning: `mach test` does not automatically re-build.
375 Please remember to run `mach build` when necessary.
377 EXAMPLES
379 Run all test files in the devtools/client/shared/redux/middleware/xpcshell/
380 directory:
382 `./mach test devtools/client/shared/redux/middleware/xpcshell/`
384 The below command prints a short summary of results instead of
385 the default more verbose output.
386 Do not forget the - (minus sign) after --log-grouped!
388 `./mach test --log-grouped - devtools/client/shared/redux/middleware/xpcshell/`
390 from mozlog.commandline import setup_logging
391 from mozlog.handlers import StreamHandler
392 from moztest.resolve import get_suite_definition, TestResolver, TEST_SUITES
394 resolver = command_context._spawn(TestResolver)
395 run_suites, run_tests = resolver.resolve_metadata(what)
397 if not run_suites and not run_tests:
398 print(UNKNOWN_TEST)
399 return 1
401 if log_args.get("debugger", None):
402 import mozdebug
404 if not mozdebug.get_debugger_info(log_args.get("debugger")):
405 sys.exit(1)
406 extra_args_debugger_notation = "=".join(
407 ["--debugger", log_args.get("debugger")]
409 if extra_args:
410 extra_args.append(extra_args_debugger_notation)
411 else:
412 extra_args = [extra_args_debugger_notation]
414 # Create shared logger
415 format_args = {"level": command_context._mach_context.settings["test"]["level"]}
416 if not run_suites and len(run_tests) == 1:
417 format_args["verbose"] = True
418 format_args["compact"] = False
420 default_format = command_context._mach_context.settings["test"]["format"]
421 log = setup_logging(
422 "mach-test", log_args, {default_format: sys.stdout}, format_args
424 for handler in log.handlers:
425 if isinstance(handler, StreamHandler):
426 handler.formatter.inner.summary_on_shutdown = True
428 status = None
429 for suite_name in run_suites:
430 suite = TEST_SUITES[suite_name]
431 kwargs = suite["kwargs"]
432 kwargs["log"] = log
433 kwargs.setdefault("subsuite", None)
435 if "mach_command" in suite:
436 res = command_context._mach_context.commands.dispatch(
437 suite["mach_command"],
438 command_context._mach_context,
439 argv=extra_args,
440 **kwargs,
442 if res:
443 status = res
445 buckets = {}
446 for test in run_tests:
447 key = (test["flavor"], test.get("subsuite", ""))
448 buckets.setdefault(key, []).append(test)
450 for (flavor, subsuite), tests in sorted(buckets.items()):
451 _, m = get_suite_definition(flavor, subsuite)
452 if "mach_command" not in m:
453 substr = "-{}".format(subsuite) if subsuite else ""
454 print(UNKNOWN_FLAVOR % (flavor, substr))
455 status = 1
456 continue
458 kwargs = dict(m["kwargs"])
459 kwargs["log"] = log
460 kwargs.setdefault("subsuite", None)
462 res = command_context._mach_context.commands.dispatch(
463 m["mach_command"],
464 command_context._mach_context,
465 argv=extra_args,
466 test_objects=tests,
467 **kwargs,
469 if res:
470 status = res
472 log.shutdown()
473 return status
476 @CommandProvider
477 class MachCommands(MachCommandBase):
478 @Command(
479 "cppunittest", category="testing", description="Run cpp unit tests (C++ tests)."
481 @CommandArgument(
482 "--enable-webrender",
483 action="store_true",
484 default=False,
485 dest="enable_webrender",
486 help="Enable the WebRender compositor in Gecko.",
488 @CommandArgument(
489 "test_files",
490 nargs="*",
491 metavar="N",
492 help="Test to run. Can be specified as one or more files or "
493 "directories, or omitted. If omitted, the entire test suite is "
494 "executed.",
496 def run_cppunit_test(self, command_context, **params):
497 from mozlog import commandline
499 log = params.get("log")
500 if not log:
501 log = commandline.setup_logging("cppunittest", {}, {"tbpl": sys.stdout})
503 # See if we have crash symbols
504 symbols_path = os.path.join(command_context.distdir, "crashreporter-symbols")
505 if not os.path.isdir(symbols_path):
506 symbols_path = None
508 # If no tests specified, run all tests in main manifest
509 tests = params["test_files"]
510 if not tests:
511 tests = [os.path.join(command_context.distdir, "cppunittests")]
512 manifest_path = os.path.join(
513 command_context.topsrcdir, "testing", "cppunittest.ini"
515 else:
516 manifest_path = None
518 utility_path = command_context.bindir
520 if conditions.is_android(command_context):
521 from mozrunner.devices.android_device import (
522 verify_android_device,
523 InstallIntent,
526 verify_android_device(command_context, install=InstallIntent.NO)
527 return self.run_android_test(tests, symbols_path, manifest_path, log)
529 return self.run_desktop_test(
530 command_context, tests, symbols_path, manifest_path, utility_path, log
533 def run_desktop_test(
534 self, command_context, tests, symbols_path, manifest_path, utility_path, log
536 import runcppunittests as cppunittests
537 from mozlog import commandline
539 parser = cppunittests.CPPUnittestOptions()
540 commandline.add_logging_group(parser)
541 options, args = parser.parse_args()
543 options.symbols_path = symbols_path
544 options.manifest_path = manifest_path
545 options.utility_path = utility_path
546 options.xre_path = command_context.bindir
548 try:
549 result = cppunittests.run_test_harness(options, tests)
550 except Exception as e:
551 log.error("Caught exception running cpp unit tests: %s" % str(e))
552 result = False
553 raise
555 return 0 if result else 1
557 def run_android_test(
558 self, command_context, tests, symbols_path, manifest_path, log
560 import remotecppunittests as remotecppunittests
561 from mozlog import commandline
563 parser = remotecppunittests.RemoteCPPUnittestOptions()
564 commandline.add_logging_group(parser)
565 options, args = parser.parse_args()
567 if not options.adb_path:
568 from mozrunner.devices.android_device import get_adb_path
570 options.adb_path = get_adb_path(command_context)
571 options.symbols_path = symbols_path
572 options.manifest_path = manifest_path
573 options.xre_path = command_context.bindir
574 options.local_lib = command_context.bindir.replace("bin", "fennec")
575 for file in os.listdir(os.path.join(command_context.topobjdir, "dist")):
576 if file.endswith(".apk") and file.startswith("fennec"):
577 options.local_apk = os.path.join(
578 command_context.topobjdir, "dist", file
580 log.info("using APK: " + options.local_apk)
581 break
583 try:
584 result = remotecppunittests.run_test_harness(options, tests)
585 except Exception as e:
586 log.error("Caught exception running cpp unit tests: %s" % str(e))
587 result = False
588 raise
590 return 0 if result else 1
593 def executable_name(name):
594 return name + ".exe" if sys.platform.startswith("win") else name
597 @CommandProvider
598 class SpiderMonkeyTests(MachCommandBase):
599 @Command(
600 "jstests",
601 category="testing",
602 description="Run SpiderMonkey JS tests in the JS shell.",
604 @CommandArgument("--shell", help="The shell to be used")
605 @CommandArgument(
606 "params",
607 nargs=argparse.REMAINDER,
608 help="Extra arguments to pass down to the test harness.",
610 def run_jstests(self, command_context, shell, params):
611 import subprocess
613 command_context.virtualenv_manager.ensure()
614 python = command_context.virtualenv_manager.python_path
616 js = shell or os.path.join(command_context.bindir, executable_name("js"))
617 jstest_cmd = [
618 python,
619 os.path.join(command_context.topsrcdir, "js", "src", "tests", "jstests.py"),
621 ] + params
623 return subprocess.call(jstest_cmd)
625 @Command(
626 "jit-test",
627 category="testing",
628 description="Run SpiderMonkey jit-tests in the JS shell.",
629 ok_if_tests_disabled=True,
631 @CommandArgument("--shell", help="The shell to be used")
632 @CommandArgument(
633 "--cgc",
634 action="store_true",
635 default=False,
636 help="Run with the SM(cgc) job's env vars",
638 @CommandArgument(
639 "params",
640 nargs=argparse.REMAINDER,
641 help="Extra arguments to pass down to the test harness.",
643 def run_jittests(self, command_context, shell, cgc, params):
644 import subprocess
646 command_context.virtualenv_manager.ensure()
647 python = command_context.virtualenv_manager.python_path
649 js = shell or os.path.join(command_context.bindir, executable_name("js"))
650 jittest_cmd = [
651 python,
652 os.path.join(
653 command_context.topsrcdir, "js", "src", "jit-test", "jit_test.py"
656 ] + params
658 env = os.environ.copy()
659 if cgc:
660 env["JS_GC_ZEAL"] = "IncrementalMultipleSlices"
662 return subprocess.call(jittest_cmd, env=env)
664 @Command(
665 "jsapi-tests", category="testing", description="Run SpiderMonkey JSAPI tests."
667 @CommandArgument(
668 "test_name",
669 nargs="?",
670 metavar="N",
671 help="Test to run. Can be a prefix or omitted. If "
672 "omitted, the entire test suite is executed.",
674 def run_jsapitests(self, command_context, test_name=None):
675 import subprocess
677 jsapi_tests_cmd = [
678 os.path.join(command_context.bindir, executable_name("jsapi-tests"))
680 if test_name:
681 jsapi_tests_cmd.append(test_name)
683 test_env = os.environ.copy()
684 test_env["TOPSRCDIR"] = command_context.topsrcdir
686 result = subprocess.call(jsapi_tests_cmd, env=test_env)
687 if result != 0:
688 print(f"jsapi-tests failed, exit code {result}")
689 return result
691 def run_check_js_msg(self, command_context):
692 import subprocess
694 command_context.virtualenv_manager.ensure()
695 python = command_context.virtualenv_manager.python_path
697 check_cmd = [
698 python,
699 os.path.join(
700 command_context.topsrcdir, "config", "check_js_msg_encoding.py"
704 return subprocess.call(check_cmd)
707 def get_jsshell_parser():
708 from jsshell.benchmark import get_parser
710 return get_parser()
713 @CommandProvider
714 class JsShellTests(MachCommandBase):
715 @Command(
716 "jsshell-bench",
717 category="testing",
718 parser=get_jsshell_parser,
719 description="Run benchmarks in the SpiderMonkey JS shell.",
721 def run_jsshelltests(self, command_context, **kwargs):
722 command_context.activate_virtualenv()
723 from jsshell import benchmark
725 return benchmark.run(**kwargs)
728 @CommandProvider
729 class CramTest(MachCommandBase):
730 @Command(
731 "cramtest",
732 category="testing",
733 description="Mercurial style .t tests for command line applications.",
735 @CommandArgument(
736 "test_paths",
737 nargs="*",
738 metavar="N",
739 help="Test paths to run. Each path can be a test file or directory. "
740 "If omitted, the entire suite will be run.",
742 @CommandArgument(
743 "cram_args",
744 nargs=argparse.REMAINDER,
745 help="Extra arguments to pass down to the cram binary. See "
746 "'./mach python -m cram -- -h' for a list of available options.",
748 def cramtest(
749 self, command_context, cram_args=None, test_paths=None, test_objects=None
751 command_context.activate_virtualenv()
752 import mozinfo
753 from manifestparser import TestManifest
755 if test_objects is None:
756 from moztest.resolve import TestResolver
758 resolver = command_context._spawn(TestResolver)
759 if test_paths:
760 # If we were given test paths, try to find tests matching them.
761 test_objects = resolver.resolve_tests(paths=test_paths, flavor="cram")
762 else:
763 # Otherwise just run everything in CRAMTEST_MANIFESTS
764 test_objects = resolver.resolve_tests(flavor="cram")
766 if not test_objects:
767 message = "No tests were collected, check spelling of the test paths."
768 command_context.log(logging.WARN, "cramtest", {}, message)
769 return 1
771 mp = TestManifest()
772 mp.tests.extend(test_objects)
773 tests = mp.active_tests(disabled=False, **mozinfo.info)
775 python = command_context.virtualenv_manager.python_path
776 cmd = [python, "-m", "cram"] + cram_args + [t["relpath"] for t in tests]
777 return subprocess.call(cmd, cwd=command_context.topsrcdir)
780 @CommandProvider
781 class TestInfoCommand(MachCommandBase):
782 from datetime import date, timedelta
784 @Command(
785 "test-info", category="testing", description="Display historical test results."
787 def test_info(self, command_context):
789 All functions implemented as subcommands.
792 @SubCommand(
793 "test-info",
794 "tests",
795 description="Display historical test result summary for named tests.",
797 @CommandArgument(
798 "test_names", nargs=argparse.REMAINDER, help="Test(s) of interest."
800 @CommandArgument(
801 "--start",
802 default=(date.today() - timedelta(7)).strftime("%Y-%m-%d"),
803 help="Start date (YYYY-MM-DD)",
805 @CommandArgument(
806 "--end", default=date.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
808 @CommandArgument(
809 "--show-info",
810 action="store_true",
811 help="Retrieve and display general test information.",
813 @CommandArgument(
814 "--show-bugs",
815 action="store_true",
816 help="Retrieve and display related Bugzilla bugs.",
818 @CommandArgument("--verbose", action="store_true", help="Enable debug logging.")
819 def test_info_tests(
820 self,
821 command_context,
822 test_names,
823 start,
824 end,
825 show_info,
826 show_bugs,
827 verbose,
829 import testinfo
831 ti = testinfo.TestInfoTests(verbose)
832 ti.report(
833 test_names,
834 start,
835 end,
836 show_info,
837 show_bugs,
840 @SubCommand(
841 "test-info",
842 "report",
843 description="Generate a json report of test manifests and/or tests "
844 "categorized by Bugzilla component and optionally filtered "
845 "by path, component, and/or manifest annotations.",
847 @CommandArgument(
848 "--components",
849 default=None,
850 help="Comma-separated list of Bugzilla components."
851 " eg. Testing::General,Core::WebVR",
853 @CommandArgument(
854 "--flavor",
855 help='Limit results to tests of the specified flavor (eg. "xpcshell").',
857 @CommandArgument(
858 "--subsuite",
859 help='Limit results to tests of the specified subsuite (eg. "devtools").',
861 @CommandArgument(
862 "paths", nargs=argparse.REMAINDER, help="File system paths of interest."
864 @CommandArgument(
865 "--show-manifests",
866 action="store_true",
867 help="Include test manifests in report.",
869 @CommandArgument(
870 "--show-tests", action="store_true", help="Include individual tests in report."
872 @CommandArgument(
873 "--show-summary", action="store_true", help="Include summary in report."
875 @CommandArgument(
876 "--show-annotations",
877 action="store_true",
878 help="Include list of manifest annotation conditions in report.",
880 @CommandArgument(
881 "--filter-values",
882 help="Comma-separated list of value regular expressions to filter on; "
883 "displayed tests contain all specified values.",
885 @CommandArgument(
886 "--filter-keys",
887 help="Comma-separated list of test keys to filter on, "
888 'like "skip-if"; only these fields will be searched '
889 "for filter-values.",
891 @CommandArgument(
892 "--no-component-report",
893 action="store_false",
894 dest="show_components",
895 default=True,
896 help="Do not categorize by bugzilla component.",
898 @CommandArgument("--output-file", help="Path to report file.")
899 @CommandArgument("--verbose", action="store_true", help="Enable debug logging.")
900 def test_report(
901 self,
902 command_context,
903 components,
904 flavor,
905 subsuite,
906 paths,
907 show_manifests,
908 show_tests,
909 show_summary,
910 show_annotations,
911 filter_values,
912 filter_keys,
913 show_components,
914 output_file,
915 verbose,
917 import testinfo
918 from mozbuild.build_commands import Build
920 try:
921 command_context.config_environment
922 except BuildEnvironmentNotFoundException:
923 print("Looks like configure has not run yet, running it now...")
924 builder = Build(command_context._mach_context, None)
925 builder.configure(command_context)
927 ti = testinfo.TestInfoReport(verbose)
928 ti.report(
929 components,
930 flavor,
931 subsuite,
932 paths,
933 show_manifests,
934 show_tests,
935 show_summary,
936 show_annotations,
937 filter_values,
938 filter_keys,
939 show_components,
940 output_file,
943 @SubCommand(
944 "test-info",
945 "report-diff",
946 description='Compare two reports generated by "test-info reports".',
948 @CommandArgument(
949 "--before",
950 default=None,
951 help="The first (earlier) report file; path to local file or url.",
953 @CommandArgument(
954 "--after", help="The second (later) report file; path to local file or url."
956 @CommandArgument(
957 "--output-file",
958 help="Path to report file to be written. If not specified, report"
959 "will be written to standard output.",
961 @CommandArgument("--verbose", action="store_true", help="Enable debug logging.")
962 def test_report_diff(self, command_context, before, after, output_file, verbose):
963 import testinfo
965 ti = testinfo.TestInfoReport(verbose)
966 ti.report_diff(before, after, output_file)
969 @CommandProvider
970 class RustTests(MachCommandBase):
971 @Command(
972 "rusttests",
973 category="testing",
974 conditions=[conditions.is_non_artifact_build],
975 description="Run rust unit tests (via cargo test).",
977 def run_rusttests(self, command_context, **kwargs):
978 return command_context._mach_context.commands.dispatch(
979 "build",
980 command_context._mach_context,
981 what=["pre-export", "export", "recurse_rusttests"],
985 @CommandProvider
986 class TestFluentMigration(MachCommandBase):
987 @Command(
988 "fluent-migration-test",
989 category="testing",
990 description="Test Fluent migration recipes.",
992 @CommandArgument("test_paths", nargs="*", metavar="N", help="Recipe paths to test.")
993 def run_migration_tests(self, command_context, test_paths=None, **kwargs):
994 if not test_paths:
995 test_paths = []
996 command_context.activate_virtualenv()
997 from test_fluent_migrations import fmt
999 rv = 0
1000 with_context = []
1001 for to_test in test_paths:
1002 try:
1003 context = fmt.inspect_migration(to_test)
1004 for issue in context["issues"]:
1005 command_context.log(
1006 logging.ERROR,
1007 "fluent-migration-test",
1009 "error": issue["msg"],
1010 "file": to_test,
1012 "ERROR in {file}: {error}",
1014 if context["issues"]:
1015 continue
1016 with_context.append(
1018 "to_test": to_test,
1019 "references": context["references"],
1022 except Exception as e:
1023 command_context.log(
1024 logging.ERROR,
1025 "fluent-migration-test",
1026 {"error": str(e), "file": to_test},
1027 "ERROR in {file}: {error}",
1029 rv |= 1
1030 obj_dir = fmt.prepare_object_dir(command_context)
1031 for context in with_context:
1032 rv |= fmt.test_migration(command_context, obj_dir, **context)
1033 return rv