1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
5 from __future__
import absolute_import
, print_function
, unicode_literals
13 from mach
.decorators
import (
21 from mozbuild
.base
import (
22 BuildEnvironmentNotFoundException
,
24 MachCommandConditions
as conditions
,
28 I was unable to find tests from the given argument(s).
30 You should specify a test directory, filename, test suite name, or
33 It's possible my little brain doesn't know about the type of test you are
34 trying to execute. If you suspect this, please request support by filing
36 https://bugzilla.mozilla.org/enter_bug.cgi?product=Testing&component=General.
40 I know you are trying to run a %s%s test. Unfortunately, I can't run those
45 Test or tests to run. Tests can be specified by filename, directory, suite
48 The following test suites and aliases are supported: {}
53 class TestConfig(object):
55 def config_settings(cls
):
56 from mozlog
.commandline
import log_formatters
57 from mozlog
.structuredlog
import log_levels
59 format_desc
= "The default format to use when running tests with `mach test`."
60 format_choices
= list(log_formatters
)
61 level_desc
= "The default log level to use when running tests with `mach test`."
62 level_choices
= [l
.lower() for l
in log_levels
]
64 ("test.format", "string", format_desc
, "mach", {"choices": format_choices
}),
65 ("test.level", "string", level_desc
, "info", {"choices": level_choices
}),
69 def get_test_parser():
70 from mozlog
.commandline
import add_logging_group
71 from moztest
.resolve
import TEST_SUITES
73 parser
= argparse
.ArgumentParser()
78 help=TEST_HELP
.format(", ".join(sorted(TEST_SUITES
))),
83 nargs
=argparse
.REMAINDER
,
84 help="Extra arguments to pass to the underlying test command(s). "
85 "If an underlying command doesn't recognize the argument, it "
93 help="Specify a debugger to use.",
95 add_logging_group(parser
)
99 ADD_TEST_SUPPORTED_SUITES
= [
102 "mochitest-browser-chrome",
103 "web-platform-tests-testharness",
104 "web-platform-tests-reftest",
107 ADD_TEST_SUPPORTED_DOCS
= ["js", "html", "xhtml", "xul"]
110 "wpt": "web-platform-tests-testharness",
111 "wpt-testharness": "web-platform-tests-testharness",
112 "wpt-reftest": "web-platform-tests-reftest",
115 MISSING_ARG
= object()
118 def create_parser_addtest():
121 parser
= argparse
.ArgumentParser()
124 choices
=sorted(ADD_TEST_SUPPORTED_SUITES
+ list(SUITE_SYNONYMS
.keys())),
125 help="suite for the test. "
126 "If you pass a `test` argument this will be determined "
127 "based on the filename and the folder it is in",
133 help="Overwrite an existing file if it exists.",
137 choices
=ADD_TEST_SUPPORTED_DOCS
,
138 help="Document type for the test (if applicable)."
139 "If you pass a `test` argument this will be determined "
140 "based on the filename.",
148 help="Open the created file(s) in an editor; if a "
149 "binary is supplied it will be used otherwise the default editor for "
150 "your environment will be opened",
153 for base_suite
in addtest
.TEST_CREATORS
:
154 cls
= addtest
.TEST_CREATORS
[base_suite
]
155 if hasattr(cls
, "get_parser"):
156 group
= parser
.add_argument_group(base_suite
)
157 cls
.get_parser(group
)
159 parser
.add_argument("test", nargs
="?", help=("Test to create."))
164 class AddTest(MachCommandBase
):
168 description
="Generate tests based on templates",
169 parser
=create_parser_addtest
,
183 from moztest
.resolve
import TEST_SUITES
185 if not suite
and not test
:
186 return create_parser_addtest().parse_args(["--help"])
188 if suite
in SUITE_SYNONYMS
:
189 suite
= SUITE_SYNONYMS
[suite
]
192 if not overwrite
and os
.path
.isfile(os
.path
.abspath(test
)):
193 print("Error: can't generate a test that already exists:", test
)
196 abs_test
= os
.path
.abspath(test
)
198 doc
= self
.guess_doc(abs_test
)
200 guessed_suite
, err
= self
.guess_suite(abs_test
)
204 suite
= guessed_suite
213 "We couldn't automatically determine a suite. "
214 "Please specify `--suite` with one of the following options:\n{}\n"
215 "If you'd like to add support to a new suite, please file a bug "
216 "blocking https://bugzilla.mozilla.org/show_bug.cgi?id=1540285.".format(
217 ADD_TEST_SUPPORTED_SUITES
222 if doc
not in ADD_TEST_SUPPORTED_DOCS
:
224 "Error: invalid `doc`. Either pass in a test with a valid extension"
225 "({}) or pass in the `doc` argument".format(ADD_TEST_SUPPORTED_DOCS
)
229 creator_cls
= addtest
.creator_for_suite(suite
)
231 if creator_cls
is None:
232 print("Sorry, `addtest` doesn't currently know how to add {}".format(suite
))
235 creator
= creator_cls(command_context
.topsrcdir
, test
, suite
, doc
, **kwargs
)
241 for path
, template
in creator
:
247 print("Adding a test file at {} (suite `{}`)".format(path
, suite
))
250 os
.makedirs(os
.path
.dirname(path
))
254 with io
.open(path
, "w", newline
="\n") as f
:
257 # write to stdout if you passed only suite and doc and not a file path
264 creator
.update_manifest()
266 # Small hack, should really do this better
267 if suite
.startswith("wpt-"):
268 suite
= "web-platform-tests"
270 mach_command
= TEST_SUITES
[suite
]["mach_command"]
272 "Please make sure to add the new test to your commit. "
273 "You can now run the test with:\n ./mach {} {}".format(
278 if editor
is not MISSING_ARG
:
279 if editor
is not None:
281 elif "VISUAL" in os
.environ
:
282 editor
= os
.environ
["VISUAL"]
283 elif "EDITOR" in os
.environ
:
284 editor
= os
.environ
["EDITOR"]
286 print("Unable to determine editor; please specify a binary")
293 proc
= subprocess
.Popen("%s %s" % (editor
, " ".join(paths
)), shell
=True)
300 def guess_doc(self
, abs_test
):
301 filename
= os
.path
.basename(abs_test
)
302 return os
.path
.splitext(filename
)[1].strip(".")
304 def guess_suite(self
, abs_test
):
305 # If you pass a abs_test, try to detect the type based on the name
306 # and folder. This detection can be skipped if you pass the `type` arg.
309 parent
= os
.path
.dirname(abs_test
)
310 filename
= os
.path
.basename(abs_test
)
312 has_browser_ini
= os
.path
.isfile(os
.path
.join(parent
, "browser.ini"))
313 has_chrome_ini
= os
.path
.isfile(os
.path
.join(parent
, "chrome.ini"))
314 has_plain_ini
= os
.path
.isfile(os
.path
.join(parent
, "mochitest.ini"))
315 has_xpcshell_ini
= os
.path
.isfile(os
.path
.join(parent
, "xpcshell.ini"))
317 in_wpt_folder
= abs_test
.startswith(
318 os
.path
.abspath(os
.path
.join("testing", "web-platform"))
322 guessed_suite
= "web-platform-tests-testharness"
323 if "/css/" in abs_test
:
324 guessed_suite
= "web-platform-tests-reftest"
326 filename
.startswith("test_")
328 and self
.guess_doc(abs_test
) == "js"
330 guessed_suite
= "xpcshell"
332 if filename
.startswith("browser_") and has_browser_ini
:
333 guessed_suite
= "mochitest-browser-chrome"
334 elif filename
.startswith("test_"):
335 if has_chrome_ini
and has_plain_ini
:
337 "Error: directory contains both a chrome.ini and mochitest.ini. "
338 "Please set --suite=mochitest-chrome or --suite=mochitest-plain."
341 guessed_suite
= "mochitest-chrome"
343 guessed_suite
= "mochitest-plain"
344 return guessed_suite
, err
348 class Test(MachCommandBase
):
352 description
="Run tests (detects the kind of test and runs it).",
353 parser
=get_test_parser
,
355 def test(self
, command_context
, what
, extra_args
, **log_args
):
356 """Run tests from names or paths.
358 mach test accepts arguments specifying which tests to run. Each argument
361 * The path to a test file
362 * A directory containing tests
364 * An alias to a test suite name (codes used on TreeHerder)
366 When paths or directories are given, they are first resolved to test
367 files known to the build system.
369 If resolved tests belong to more than one test type/flavor/harness,
370 the harness for each relevant type/flavor will be invoked. e.g. if
371 you specify a directory with xpcshell and browser chrome mochitests,
372 both harnesses will be invoked.
374 Warning: `mach test` does not automatically re-build.
375 Please remember to run `mach build` when necessary.
379 Run all test files in the devtools/client/shared/redux/middleware/xpcshell/
382 `./mach test devtools/client/shared/redux/middleware/xpcshell/`
384 The below command prints a short summary of results instead of
385 the default more verbose output.
386 Do not forget the - (minus sign) after --log-grouped!
388 `./mach test --log-grouped - devtools/client/shared/redux/middleware/xpcshell/`
390 from mozlog
.commandline
import setup_logging
391 from mozlog
.handlers
import StreamHandler
392 from moztest
.resolve
import get_suite_definition
, TestResolver
, TEST_SUITES
394 resolver
= command_context
._spawn
(TestResolver
)
395 run_suites
, run_tests
= resolver
.resolve_metadata(what
)
397 if not run_suites
and not run_tests
:
401 if log_args
.get("debugger", None):
404 if not mozdebug
.get_debugger_info(log_args
.get("debugger")):
406 extra_args_debugger_notation
= "=".join(
407 ["--debugger", log_args
.get("debugger")]
410 extra_args
.append(extra_args_debugger_notation
)
412 extra_args
= [extra_args_debugger_notation
]
414 # Create shared logger
415 format_args
= {"level": command_context
._mach
_context
.settings
["test"]["level"]}
416 if not run_suites
and len(run_tests
) == 1:
417 format_args
["verbose"] = True
418 format_args
["compact"] = False
420 default_format
= command_context
._mach
_context
.settings
["test"]["format"]
422 "mach-test", log_args
, {default_format
: sys
.stdout
}, format_args
424 for handler
in log
.handlers
:
425 if isinstance(handler
, StreamHandler
):
426 handler
.formatter
.inner
.summary_on_shutdown
= True
429 for suite_name
in run_suites
:
430 suite
= TEST_SUITES
[suite_name
]
431 kwargs
= suite
["kwargs"]
433 kwargs
.setdefault("subsuite", None)
435 if "mach_command" in suite
:
436 res
= command_context
._mach
_context
.commands
.dispatch(
437 suite
["mach_command"],
438 command_context
._mach
_context
,
446 for test
in run_tests
:
447 key
= (test
["flavor"], test
.get("subsuite", ""))
448 buckets
.setdefault(key
, []).append(test
)
450 for (flavor
, subsuite
), tests
in sorted(buckets
.items()):
451 _
, m
= get_suite_definition(flavor
, subsuite
)
452 if "mach_command" not in m
:
453 substr
= "-{}".format(subsuite
) if subsuite
else ""
454 print(UNKNOWN_FLAVOR
% (flavor
, substr
))
458 kwargs
= dict(m
["kwargs"])
460 kwargs
.setdefault("subsuite", None)
462 res
= command_context
._mach
_context
.commands
.dispatch(
464 command_context
._mach
_context
,
477 class MachCommands(MachCommandBase
):
479 "cppunittest", category
="testing", description
="Run cpp unit tests (C++ tests)."
482 "--enable-webrender",
485 dest
="enable_webrender",
486 help="Enable the WebRender compositor in Gecko.",
492 help="Test to run. Can be specified as one or more files or "
493 "directories, or omitted. If omitted, the entire test suite is "
496 def run_cppunit_test(self
, command_context
, **params
):
497 from mozlog
import commandline
499 log
= params
.get("log")
501 log
= commandline
.setup_logging("cppunittest", {}, {"tbpl": sys
.stdout
})
503 # See if we have crash symbols
504 symbols_path
= os
.path
.join(command_context
.distdir
, "crashreporter-symbols")
505 if not os
.path
.isdir(symbols_path
):
508 # If no tests specified, run all tests in main manifest
509 tests
= params
["test_files"]
511 tests
= [os
.path
.join(command_context
.distdir
, "cppunittests")]
512 manifest_path
= os
.path
.join(
513 command_context
.topsrcdir
, "testing", "cppunittest.ini"
518 utility_path
= command_context
.bindir
520 if conditions
.is_android(command_context
):
521 from mozrunner
.devices
.android_device
import (
522 verify_android_device
,
526 verify_android_device(command_context
, install
=InstallIntent
.NO
)
527 return self
.run_android_test(tests
, symbols_path
, manifest_path
, log
)
529 return self
.run_desktop_test(
530 command_context
, tests
, symbols_path
, manifest_path
, utility_path
, log
533 def run_desktop_test(
534 self
, command_context
, tests
, symbols_path
, manifest_path
, utility_path
, log
536 import runcppunittests
as cppunittests
537 from mozlog
import commandline
539 parser
= cppunittests
.CPPUnittestOptions()
540 commandline
.add_logging_group(parser
)
541 options
, args
= parser
.parse_args()
543 options
.symbols_path
= symbols_path
544 options
.manifest_path
= manifest_path
545 options
.utility_path
= utility_path
546 options
.xre_path
= command_context
.bindir
549 result
= cppunittests
.run_test_harness(options
, tests
)
550 except Exception as e
:
551 log
.error("Caught exception running cpp unit tests: %s" % str(e
))
555 return 0 if result
else 1
557 def run_android_test(
558 self
, command_context
, tests
, symbols_path
, manifest_path
, log
560 import remotecppunittests
as remotecppunittests
561 from mozlog
import commandline
563 parser
= remotecppunittests
.RemoteCPPUnittestOptions()
564 commandline
.add_logging_group(parser
)
565 options
, args
= parser
.parse_args()
567 if not options
.adb_path
:
568 from mozrunner
.devices
.android_device
import get_adb_path
570 options
.adb_path
= get_adb_path(command_context
)
571 options
.symbols_path
= symbols_path
572 options
.manifest_path
= manifest_path
573 options
.xre_path
= command_context
.bindir
574 options
.local_lib
= command_context
.bindir
.replace("bin", "fennec")
575 for file in os
.listdir(os
.path
.join(command_context
.topobjdir
, "dist")):
576 if file.endswith(".apk") and file.startswith("fennec"):
577 options
.local_apk
= os
.path
.join(
578 command_context
.topobjdir
, "dist", file
580 log
.info("using APK: " + options
.local_apk
)
584 result
= remotecppunittests
.run_test_harness(options
, tests
)
585 except Exception as e
:
586 log
.error("Caught exception running cpp unit tests: %s" % str(e
))
590 return 0 if result
else 1
593 def executable_name(name
):
594 return name
+ ".exe" if sys
.platform
.startswith("win") else name
598 class SpiderMonkeyTests(MachCommandBase
):
602 description
="Run SpiderMonkey JS tests in the JS shell.",
604 @CommandArgument("--shell", help="The shell to be used")
607 nargs
=argparse
.REMAINDER
,
608 help="Extra arguments to pass down to the test harness.",
610 def run_jstests(self
, command_context
, shell
, params
):
613 command_context
.virtualenv_manager
.ensure()
614 python
= command_context
.virtualenv_manager
.python_path
616 js
= shell
or os
.path
.join(command_context
.bindir
, executable_name("js"))
619 os
.path
.join(command_context
.topsrcdir
, "js", "src", "tests", "jstests.py"),
623 return subprocess
.call(jstest_cmd
)
628 description
="Run SpiderMonkey jit-tests in the JS shell.",
629 ok_if_tests_disabled
=True,
631 @CommandArgument("--shell", help="The shell to be used")
636 help="Run with the SM(cgc) job's env vars",
640 nargs
=argparse
.REMAINDER
,
641 help="Extra arguments to pass down to the test harness.",
643 def run_jittests(self
, command_context
, shell
, cgc
, params
):
646 command_context
.virtualenv_manager
.ensure()
647 python
= command_context
.virtualenv_manager
.python_path
649 js
= shell
or os
.path
.join(command_context
.bindir
, executable_name("js"))
653 command_context
.topsrcdir
, "js", "src", "jit-test", "jit_test.py"
658 env
= os
.environ
.copy()
660 env
["JS_GC_ZEAL"] = "IncrementalMultipleSlices"
662 return subprocess
.call(jittest_cmd
, env
=env
)
665 "jsapi-tests", category
="testing", description
="Run SpiderMonkey JSAPI tests."
671 help="Test to run. Can be a prefix or omitted. If "
672 "omitted, the entire test suite is executed.",
674 def run_jsapitests(self
, command_context
, test_name
=None):
678 os
.path
.join(command_context
.bindir
, executable_name("jsapi-tests"))
681 jsapi_tests_cmd
.append(test_name
)
683 test_env
= os
.environ
.copy()
684 test_env
["TOPSRCDIR"] = command_context
.topsrcdir
686 result
= subprocess
.call(jsapi_tests_cmd
, env
=test_env
)
688 print(f
"jsapi-tests failed, exit code {result}")
691 def run_check_js_msg(self
, command_context
):
694 command_context
.virtualenv_manager
.ensure()
695 python
= command_context
.virtualenv_manager
.python_path
700 command_context
.topsrcdir
, "config", "check_js_msg_encoding.py"
704 return subprocess
.call(check_cmd
)
707 def get_jsshell_parser():
708 from jsshell
.benchmark
import get_parser
714 class JsShellTests(MachCommandBase
):
718 parser
=get_jsshell_parser
,
719 description
="Run benchmarks in the SpiderMonkey JS shell.",
721 def run_jsshelltests(self
, command_context
, **kwargs
):
722 command_context
.activate_virtualenv()
723 from jsshell
import benchmark
725 return benchmark
.run(**kwargs
)
729 class CramTest(MachCommandBase
):
733 description
="Mercurial style .t tests for command line applications.",
739 help="Test paths to run. Each path can be a test file or directory. "
740 "If omitted, the entire suite will be run.",
744 nargs
=argparse
.REMAINDER
,
745 help="Extra arguments to pass down to the cram binary. See "
746 "'./mach python -m cram -- -h' for a list of available options.",
749 self
, command_context
, cram_args
=None, test_paths
=None, test_objects
=None
751 command_context
.activate_virtualenv()
753 from manifestparser
import TestManifest
755 if test_objects
is None:
756 from moztest
.resolve
import TestResolver
758 resolver
= command_context
._spawn
(TestResolver
)
760 # If we were given test paths, try to find tests matching them.
761 test_objects
= resolver
.resolve_tests(paths
=test_paths
, flavor
="cram")
763 # Otherwise just run everything in CRAMTEST_MANIFESTS
764 test_objects
= resolver
.resolve_tests(flavor
="cram")
767 message
= "No tests were collected, check spelling of the test paths."
768 command_context
.log(logging
.WARN
, "cramtest", {}, message
)
772 mp
.tests
.extend(test_objects
)
773 tests
= mp
.active_tests(disabled
=False, **mozinfo
.info
)
775 python
= command_context
.virtualenv_manager
.python_path
776 cmd
= [python
, "-m", "cram"] + cram_args
+ [t
["relpath"] for t
in tests
]
777 return subprocess
.call(cmd
, cwd
=command_context
.topsrcdir
)
781 class TestInfoCommand(MachCommandBase
):
782 from datetime
import date
, timedelta
785 "test-info", category
="testing", description
="Display historical test results."
787 def test_info(self
, command_context
):
789 All functions implemented as subcommands.
795 description
="Display historical test result summary for named tests.",
798 "test_names", nargs
=argparse
.REMAINDER
, help="Test(s) of interest."
802 default
=(date
.today() - timedelta(7)).strftime("%Y-%m-%d"),
803 help="Start date (YYYY-MM-DD)",
806 "--end", default
=date
.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
811 help="Retrieve and display general test information.",
816 help="Retrieve and display related Bugzilla bugs.",
818 @CommandArgument("--verbose", action
="store_true", help="Enable debug logging.")
831 ti
= testinfo
.TestInfoTests(verbose
)
843 description
="Generate a json report of test manifests and/or tests "
844 "categorized by Bugzilla component and optionally filtered "
845 "by path, component, and/or manifest annotations.",
850 help="Comma-separated list of Bugzilla components."
851 " eg. Testing::General,Core::WebVR",
855 help='Limit results to tests of the specified flavor (eg. "xpcshell").',
859 help='Limit results to tests of the specified subsuite (eg. "devtools").',
862 "paths", nargs
=argparse
.REMAINDER
, help="File system paths of interest."
867 help="Include test manifests in report.",
870 "--show-tests", action
="store_true", help="Include individual tests in report."
873 "--show-summary", action
="store_true", help="Include summary in report."
876 "--show-annotations",
878 help="Include list of manifest annotation conditions in report.",
882 help="Comma-separated list of value regular expressions to filter on; "
883 "displayed tests contain all specified values.",
887 help="Comma-separated list of test keys to filter on, "
888 'like "skip-if"; only these fields will be searched '
889 "for filter-values.",
892 "--no-component-report",
893 action
="store_false",
894 dest
="show_components",
896 help="Do not categorize by bugzilla component.",
898 @CommandArgument("--output-file", help="Path to report file.")
899 @CommandArgument("--verbose", action
="store_true", help="Enable debug logging.")
918 from mozbuild
.build_commands
import Build
921 command_context
.config_environment
922 except BuildEnvironmentNotFoundException
:
923 print("Looks like configure has not run yet, running it now...")
924 builder
= Build(command_context
._mach
_context
, None)
925 builder
.configure(command_context
)
927 ti
= testinfo
.TestInfoReport(verbose
)
946 description
='Compare two reports generated by "test-info reports".',
951 help="The first (earlier) report file; path to local file or url.",
954 "--after", help="The second (later) report file; path to local file or url."
958 help="Path to report file to be written. If not specified, report"
959 "will be written to standard output.",
961 @CommandArgument("--verbose", action
="store_true", help="Enable debug logging.")
962 def test_report_diff(self
, command_context
, before
, after
, output_file
, verbose
):
965 ti
= testinfo
.TestInfoReport(verbose
)
966 ti
.report_diff(before
, after
, output_file
)
970 class RustTests(MachCommandBase
):
974 conditions
=[conditions
.is_non_artifact_build
],
975 description
="Run rust unit tests (via cargo test).",
977 def run_rusttests(self
, command_context
, **kwargs
):
978 return command_context
._mach
_context
.commands
.dispatch(
980 command_context
._mach
_context
,
981 what
=["pre-export", "export", "recurse_rusttests"],
986 class TestFluentMigration(MachCommandBase
):
988 "fluent-migration-test",
990 description
="Test Fluent migration recipes.",
992 @CommandArgument("test_paths", nargs
="*", metavar
="N", help="Recipe paths to test.")
993 def run_migration_tests(self
, command_context
, test_paths
=None, **kwargs
):
996 command_context
.activate_virtualenv()
997 from test_fluent_migrations
import fmt
1001 for to_test
in test_paths
:
1003 context
= fmt
.inspect_migration(to_test
)
1004 for issue
in context
["issues"]:
1005 command_context
.log(
1007 "fluent-migration-test",
1009 "error": issue
["msg"],
1012 "ERROR in {file}: {error}",
1014 if context
["issues"]:
1016 with_context
.append(
1019 "references": context
["references"],
1022 except Exception as e
:
1023 command_context
.log(
1025 "fluent-migration-test",
1026 {"error": str(e
), "file": to_test
},
1027 "ERROR in {file}: {error}",
1030 obj_dir
= fmt
.prepare_object_dir(command_context
)
1031 for context
in with_context
:
1032 rv |
= fmt
.test_migration(command_context
, obj_dir
, **context
)