Backed out 4 changesets (bug 1825722) for causing reftest failures CLOSED TREE
[gecko.git] / testing / mach_commands.py
blob706e4219baa996e38bf7a9f654df8aed5df1cad8
1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
5 import argparse
6 import logging
7 import os
8 import subprocess
9 import sys
11 import requests
12 from mach.decorators import Command, CommandArgument, SubCommand
13 from mozbuild.base import BuildEnvironmentNotFoundException
14 from mozbuild.base import MachCommandConditions as conditions
16 UNKNOWN_TEST = """
17 I was unable to find tests from the given argument(s).
19 You should specify a test directory, filename, test suite name, or
20 abbreviation.
22 It's possible my little brain doesn't know about the type of test you are
23 trying to execute. If you suspect this, please request support by filing
24 a bug at
25 https://bugzilla.mozilla.org/enter_bug.cgi?product=Testing&component=General.
26 """.strip()
28 UNKNOWN_FLAVOR = """
29 I know you are trying to run a %s%s test. Unfortunately, I can't run those
30 tests yet. Sorry!
31 """.strip()
33 TEST_HELP = """
34 Test or tests to run. Tests can be specified by filename, directory, suite
35 name or suite alias.
37 The following test suites and aliases are supported: {}
38 """.strip()
41 def get_test_parser():
42 from mozlog.commandline import add_logging_group
43 from moztest.resolve import TEST_SUITES
45 parser = argparse.ArgumentParser()
46 parser.add_argument(
47 "what",
48 default=None,
49 nargs="+",
50 help=TEST_HELP.format(", ".join(sorted(TEST_SUITES))),
52 parser.add_argument(
53 "extra_args",
54 default=None,
55 nargs=argparse.REMAINDER,
56 help="Extra arguments to pass to the underlying test command(s). "
57 "If an underlying command doesn't recognize the argument, it "
58 "will fail.",
60 parser.add_argument(
61 "--debugger",
62 default=None,
63 action="store",
64 nargs="?",
65 help="Specify a debugger to use.",
67 add_logging_group(parser)
68 return parser
71 ADD_TEST_SUPPORTED_SUITES = [
72 "mochitest-chrome",
73 "mochitest-plain",
74 "mochitest-browser-chrome",
75 "web-platform-tests-privatebrowsing",
76 "web-platform-tests-testharness",
77 "web-platform-tests-reftest",
78 "xpcshell",
80 ADD_TEST_SUPPORTED_DOCS = ["js", "html", "xhtml", "xul"]
82 SUITE_SYNONYMS = {
83 "wpt": "web-platform-tests-testharness",
84 "wpt-privatebrowsing": "web-platform-tests-privatebrowsing",
85 "wpt-testharness": "web-platform-tests-testharness",
86 "wpt-reftest": "web-platform-tests-reftest",
89 MISSING_ARG = object()
92 def create_parser_addtest():
93 import addtest
95 parser = argparse.ArgumentParser()
96 parser.add_argument(
97 "--suite",
98 choices=sorted(ADD_TEST_SUPPORTED_SUITES + list(SUITE_SYNONYMS.keys())),
99 help="suite for the test. "
100 "If you pass a `test` argument this will be determined "
101 "based on the filename and the folder it is in",
103 parser.add_argument(
104 "-o",
105 "--overwrite",
106 action="store_true",
107 help="Overwrite an existing file if it exists.",
109 parser.add_argument(
110 "--doc",
111 choices=ADD_TEST_SUPPORTED_DOCS,
112 help="Document type for the test (if applicable)."
113 "If you pass a `test` argument this will be determined "
114 "based on the filename.",
116 parser.add_argument(
117 "-e",
118 "--editor",
119 action="store",
120 nargs="?",
121 default=MISSING_ARG,
122 help="Open the created file(s) in an editor; if a "
123 "binary is supplied it will be used otherwise the default editor for "
124 "your environment will be opened",
127 for base_suite in addtest.TEST_CREATORS:
128 cls = addtest.TEST_CREATORS[base_suite]
129 if hasattr(cls, "get_parser"):
130 group = parser.add_argument_group(base_suite)
131 cls.get_parser(group)
133 parser.add_argument("test", nargs="?", help=("Test to create."))
134 return parser
137 @Command(
138 "addtest",
139 category="testing",
140 description="Generate tests based on templates",
141 parser=create_parser_addtest,
143 def addtest(
144 command_context,
145 suite=None,
146 test=None,
147 doc=None,
148 overwrite=False,
149 editor=MISSING_ARG,
150 **kwargs,
152 import io
154 import addtest
155 from moztest.resolve import TEST_SUITES
157 if not suite and not test:
158 return create_parser_addtest().parse_args(["--help"])
160 if suite in SUITE_SYNONYMS:
161 suite = SUITE_SYNONYMS[suite]
163 if test:
164 if not overwrite and os.path.isfile(os.path.abspath(test)):
165 print("Error: can't generate a test that already exists:", test)
166 return 1
168 abs_test = os.path.abspath(test)
169 if doc is None:
170 doc = guess_doc(abs_test)
171 if suite is None:
172 guessed_suite, err = guess_suite(abs_test)
173 if err:
174 print(err)
175 return 1
176 suite = guessed_suite
178 else:
179 test = None
180 if doc is None:
181 doc = "html"
183 if not suite:
184 print(
185 "We couldn't automatically determine a suite. "
186 "Please specify `--suite` with one of the following options:\n{}\n"
187 "If you'd like to add support to a new suite, please file a bug "
188 "blocking https://bugzilla.mozilla.org/show_bug.cgi?id=1540285.".format(
189 ADD_TEST_SUPPORTED_SUITES
192 return 1
194 if doc not in ADD_TEST_SUPPORTED_DOCS:
195 print(
196 "Error: invalid `doc`. Either pass in a test with a valid extension"
197 "({}) or pass in the `doc` argument".format(ADD_TEST_SUPPORTED_DOCS)
199 return 1
201 creator_cls = addtest.creator_for_suite(suite)
203 if creator_cls is None:
204 print("Sorry, `addtest` doesn't currently know how to add {}".format(suite))
205 return 1
207 creator = creator_cls(command_context.topsrcdir, test, suite, doc, **kwargs)
209 creator.check_args()
211 paths = []
212 added_tests = False
213 for path, template in creator:
214 if not template:
215 continue
216 added_tests = True
217 if path:
218 paths.append(path)
219 print("Adding a test file at {} (suite `{}`)".format(path, suite))
221 try:
222 os.makedirs(os.path.dirname(path))
223 except OSError:
224 pass
226 with io.open(path, "w", newline="\n") as f:
227 f.write(template)
228 else:
229 # write to stdout if you passed only suite and doc and not a file path
230 print(template)
232 if not added_tests:
233 return 1
235 if test:
236 creator.update_manifest()
238 # Small hack, should really do this better
239 if suite.startswith("wpt-"):
240 suite = "web-platform-tests"
242 mach_command = TEST_SUITES[suite]["mach_command"]
243 print(
244 "Please make sure to add the new test to your commit. "
245 "You can now run the test with:\n ./mach {} {}".format(
246 mach_command, test
250 if editor is not MISSING_ARG:
251 if editor is not None:
252 editor = editor
253 elif "VISUAL" in os.environ:
254 editor = os.environ["VISUAL"]
255 elif "EDITOR" in os.environ:
256 editor = os.environ["EDITOR"]
257 else:
258 print("Unable to determine editor; please specify a binary")
259 editor = None
261 proc = None
262 if editor:
263 import subprocess
265 proc = subprocess.Popen("%s %s" % (editor, " ".join(paths)), shell=True)
267 if proc:
268 proc.wait()
270 return 0
273 def guess_doc(abs_test):
274 filename = os.path.basename(abs_test)
275 return os.path.splitext(filename)[1].strip(".")
278 def guess_suite(abs_test):
279 # If you pass a abs_test, try to detect the type based on the name
280 # and folder. This detection can be skipped if you pass the `type` arg.
281 err = None
282 guessed_suite = None
283 parent = os.path.dirname(abs_test)
284 filename = os.path.basename(abs_test)
286 has_browser_ini = os.path.isfile(os.path.join(parent, "browser.ini"))
287 has_browser_toml = os.path.isfile(os.path.join(parent, "browser.toml"))
288 has_chrome_ini = os.path.isfile(os.path.join(parent, "chrome.ini"))
289 has_chrome_toml = os.path.isfile(os.path.join(parent, "chrome.toml"))
290 has_plain_ini = os.path.isfile(os.path.join(parent, "mochitest.ini"))
291 has_plain_toml = os.path.isfile(os.path.join(parent, "mochitest.toml"))
292 has_xpcshell_ini = os.path.isfile(os.path.join(parent, "xpcshell.ini"))
293 has_xpcshell_toml = os.path.isfile(os.path.join(parent, "xpcshell.toml"))
295 in_wpt_folder = abs_test.startswith(
296 os.path.abspath(os.path.join("testing", "web-platform"))
299 if in_wpt_folder:
300 guessed_suite = "web-platform-tests-testharness"
301 if "/css/" in abs_test:
302 guessed_suite = "web-platform-tests-reftest"
303 elif (
304 filename.startswith("test_")
305 and (has_xpcshell_ini or has_xpcshell_toml)
306 and guess_doc(abs_test) == "js"
308 guessed_suite = "xpcshell"
309 else:
310 if filename.startswith("browser_") and (has_browser_ini or has_browser_toml):
311 guessed_suite = "mochitest-browser-chrome"
312 elif filename.startswith("test_"):
313 if (has_chrome_ini or has_chrome_toml) and (
314 has_plain_ini or has_plain_toml
316 err = (
317 "Error: directory contains both a chrome.{ini|toml} and mochitest.{ini|toml}. "
318 "Please set --suite=mochitest-chrome or --suite=mochitest-plain."
320 elif has_chrome_ini or has_chrome_toml:
321 guessed_suite = "mochitest-chrome"
322 elif has_plain_ini or has_plain_toml:
323 guessed_suite = "mochitest-plain"
324 return guessed_suite, err
327 class MachTestRunner:
328 """Adapter for mach test to simplify it's import externally."""
330 def test(command_context, what, extra_args, **log_args):
331 return test(command_context, what, extra_args, **log_args)
334 @Command(
335 "test",
336 category="testing",
337 description="Run tests (detects the kind of test and runs it).",
338 parser=get_test_parser,
340 def test(command_context, what, extra_args, **log_args):
341 """Run tests from names or paths.
343 mach test accepts arguments specifying which tests to run. Each argument
344 can be:
346 * The path to a test file
347 * A directory containing tests
348 * A test suite name
349 * An alias to a test suite name (codes used on TreeHerder)
350 * path to a test manifest
352 When paths or directories are given, they are first resolved to test
353 files known to the build system.
355 If resolved tests belong to more than one test type/flavor/harness,
356 the harness for each relevant type/flavor will be invoked. e.g. if
357 you specify a directory with xpcshell and browser chrome mochitests,
358 both harnesses will be invoked.
360 Warning: `mach test` does not automatically re-build.
361 Please remember to run `mach build` when necessary.
363 EXAMPLES
365 Run all test files in the devtools/client/shared/redux/middleware/xpcshell/
366 directory:
368 `./mach test devtools/client/shared/redux/middleware/xpcshell/`
370 The below command prints a short summary of results instead of
371 the default more verbose output.
372 Do not forget the - (minus sign) after --log-grouped!
374 `./mach test --log-grouped - devtools/client/shared/redux/middleware/xpcshell/`
376 To learn more about arguments for each test type/flavor/harness, please run
377 `./mach <test-harness> --help`. For example, `./mach mochitest --help`.
379 from mozlog.commandline import setup_logging
380 from mozlog.handlers import StreamHandler
381 from moztest.resolve import TEST_SUITES, TestResolver, get_suite_definition
383 resolver = command_context._spawn(TestResolver)
384 run_suites, run_tests = resolver.resolve_metadata(what)
386 if not run_suites and not run_tests:
387 print(UNKNOWN_TEST)
388 return 1
390 if log_args.get("debugger", None):
391 import mozdebug
393 if not mozdebug.get_debugger_info(log_args.get("debugger")):
394 sys.exit(1)
395 extra_args_debugger_notation = "=".join(
396 ["--debugger", log_args.get("debugger")]
398 if extra_args:
399 extra_args.append(extra_args_debugger_notation)
400 else:
401 extra_args = [extra_args_debugger_notation]
403 # Create shared logger
404 format_args = {"level": command_context._mach_context.settings["test"]["level"]}
405 if not run_suites and len(run_tests) == 1:
406 format_args["verbose"] = True
407 format_args["compact"] = False
409 default_format = command_context._mach_context.settings["test"]["format"]
410 log = setup_logging(
411 "mach-test", log_args, {default_format: sys.stdout}, format_args
413 for handler in log.handlers:
414 if isinstance(handler, StreamHandler):
415 handler.formatter.inner.summary_on_shutdown = True
417 if log_args.get("custom_handler", None) is not None:
418 log.add_handler(log_args.get("custom_handler"))
420 status = None
421 for suite_name in run_suites:
422 suite = TEST_SUITES[suite_name]
423 kwargs = suite["kwargs"]
424 kwargs["log"] = log
425 kwargs.setdefault("subsuite", None)
427 if "mach_command" in suite:
428 res = command_context._mach_context.commands.dispatch(
429 suite["mach_command"],
430 command_context._mach_context,
431 argv=extra_args,
432 **kwargs,
434 if res:
435 status = res
437 buckets = {}
438 for test in run_tests:
439 key = (test["flavor"], test.get("subsuite", ""))
440 buckets.setdefault(key, []).append(test)
442 for (flavor, subsuite), tests in sorted(buckets.items()):
443 _, m = get_suite_definition(flavor, subsuite)
444 if "mach_command" not in m:
445 substr = "-{}".format(subsuite) if subsuite else ""
446 print(UNKNOWN_FLAVOR % (flavor, substr))
447 status = 1
448 continue
450 kwargs = dict(m["kwargs"])
451 kwargs["log"] = log
452 kwargs.setdefault("subsuite", None)
454 res = command_context._mach_context.commands.dispatch(
455 m["mach_command"],
456 command_context._mach_context,
457 argv=extra_args,
458 test_objects=tests,
459 **kwargs,
461 if res:
462 status = res
464 if not log.has_shutdown:
465 log.shutdown()
466 return status
469 @Command(
470 "cppunittest", category="testing", description="Run cpp unit tests (C++ tests)."
472 @CommandArgument(
473 "test_files",
474 nargs="*",
475 metavar="N",
476 help="Test to run. Can be specified as one or more files or "
477 "directories, or omitted. If omitted, the entire test suite is "
478 "executed.",
480 def run_cppunit_test(command_context, **params):
481 from mozlog import commandline
483 log = params.get("log")
484 if not log:
485 log = commandline.setup_logging("cppunittest", {}, {"tbpl": sys.stdout})
487 # See if we have crash symbols
488 symbols_path = os.path.join(command_context.distdir, "crashreporter-symbols")
489 if not os.path.isdir(symbols_path):
490 symbols_path = None
492 # If no tests specified, run all tests in main manifest
493 tests = params["test_files"]
494 if not tests:
495 tests = [os.path.join(command_context.distdir, "cppunittests")]
496 manifest_path = os.path.join(
497 command_context.topsrcdir, "testing", "cppunittest.toml"
499 else:
500 manifest_path = None
502 utility_path = command_context.bindir
504 if conditions.is_android(command_context):
505 from mozrunner.devices.android_device import (
506 InstallIntent,
507 verify_android_device,
510 verify_android_device(command_context, install=InstallIntent.NO)
511 return run_android_test(tests, symbols_path, manifest_path, log)
513 return run_desktop_test(
514 command_context, tests, symbols_path, manifest_path, utility_path, log
518 def run_desktop_test(
519 command_context, tests, symbols_path, manifest_path, utility_path, log
521 import runcppunittests as cppunittests
522 from mozlog import commandline
524 parser = cppunittests.CPPUnittestOptions()
525 commandline.add_logging_group(parser)
526 options, args = parser.parse_args()
528 options.symbols_path = symbols_path
529 options.manifest_path = manifest_path
530 options.utility_path = utility_path
531 options.xre_path = command_context.bindir
533 try:
534 result = cppunittests.run_test_harness(options, tests)
535 except Exception as e:
536 log.error("Caught exception running cpp unit tests: %s" % str(e))
537 result = False
538 raise
540 return 0 if result else 1
543 def run_android_test(command_context, tests, symbols_path, manifest_path, log):
544 import remotecppunittests
545 from mozlog import commandline
547 parser = remotecppunittests.RemoteCPPUnittestOptions()
548 commandline.add_logging_group(parser)
549 options, args = parser.parse_args()
551 if not options.adb_path:
552 from mozrunner.devices.android_device import get_adb_path
554 options.adb_path = get_adb_path(command_context)
555 options.symbols_path = symbols_path
556 options.manifest_path = manifest_path
557 options.xre_path = command_context.bindir
558 options.local_lib = command_context.bindir.replace("bin", "fennec")
559 for file in os.listdir(os.path.join(command_context.topobjdir, "dist")):
560 if file.endswith(".apk") and file.startswith("fennec"):
561 options.local_apk = os.path.join(command_context.topobjdir, "dist", file)
562 log.info("using APK: " + options.local_apk)
563 break
565 try:
566 result = remotecppunittests.run_test_harness(options, tests)
567 except Exception as e:
568 log.error("Caught exception running cpp unit tests: %s" % str(e))
569 result = False
570 raise
572 return 0 if result else 1
575 def executable_name(name):
576 return name + ".exe" if sys.platform.startswith("win") else name
579 @Command(
580 "jstests",
581 category="testing",
582 description="Run SpiderMonkey JS tests in the JS shell.",
583 ok_if_tests_disabled=True,
585 @CommandArgument("--shell", help="The shell to be used")
586 @CommandArgument(
587 "params",
588 nargs=argparse.REMAINDER,
589 help="Extra arguments to pass down to the test harness.",
591 def run_jstests(command_context, shell, params):
592 import subprocess
594 command_context.virtualenv_manager.ensure()
595 python = command_context.virtualenv_manager.python_path
597 js = shell or os.path.join(command_context.bindir, executable_name("js"))
598 jstest_cmd = [
599 python,
600 os.path.join(command_context.topsrcdir, "js", "src", "tests", "jstests.py"),
602 ] + params
604 return subprocess.call(jstest_cmd)
607 @Command(
608 "jit-test",
609 category="testing",
610 description="Run SpiderMonkey jit-tests in the JS shell.",
611 ok_if_tests_disabled=True,
613 @CommandArgument("--shell", help="The shell to be used")
614 @CommandArgument(
615 "--cgc",
616 action="store_true",
617 default=False,
618 help="Run with the SM(cgc) job's env vars",
620 @CommandArgument(
621 "params",
622 nargs=argparse.REMAINDER,
623 help="Extra arguments to pass down to the test harness.",
625 def run_jittests(command_context, shell, cgc, params):
626 import subprocess
628 command_context.virtualenv_manager.ensure()
629 python = command_context.virtualenv_manager.python_path
631 js = shell or os.path.join(command_context.bindir, executable_name("js"))
632 jittest_cmd = [
633 python,
634 os.path.join(command_context.topsrcdir, "js", "src", "jit-test", "jit_test.py"),
636 ] + params
638 env = os.environ.copy()
639 if cgc:
640 env["JS_GC_ZEAL"] = "IncrementalMultipleSlices"
642 return subprocess.call(jittest_cmd, env=env)
645 @Command("jsapi-tests", category="testing", description="Run SpiderMonkey JSAPI tests.")
646 @CommandArgument(
647 "--list",
648 action="store_true",
649 default=False,
650 help="List all tests",
652 @CommandArgument(
653 "--frontend-only",
654 action="store_true",
655 default=False,
656 help="Run tests for frontend-only APIs, with light-weight entry point",
658 @CommandArgument(
659 "test_name",
660 nargs="?",
661 metavar="N",
662 help="Test to run. Can be a prefix or omitted. If "
663 "omitted, the entire test suite is executed.",
665 def run_jsapitests(command_context, list=False, frontend_only=False, test_name=None):
666 import subprocess
668 jsapi_tests_cmd = [
669 os.path.join(command_context.bindir, executable_name("jsapi-tests"))
671 if list:
672 jsapi_tests_cmd.append("--list")
673 if frontend_only:
674 jsapi_tests_cmd.append("--frontend-only")
675 if test_name:
676 jsapi_tests_cmd.append(test_name)
678 test_env = os.environ.copy()
679 test_env["TOPSRCDIR"] = command_context.topsrcdir
681 result = subprocess.call(jsapi_tests_cmd, env=test_env)
682 if result != 0:
683 print(f"jsapi-tests failed, exit code {result}")
684 return result
687 def run_check_js_msg(command_context):
688 import subprocess
690 command_context.virtualenv_manager.ensure()
691 python = command_context.virtualenv_manager.python_path
693 check_cmd = [
694 python,
695 os.path.join(command_context.topsrcdir, "config", "check_js_msg_encoding.py"),
698 return subprocess.call(check_cmd)
701 def get_jsshell_parser():
702 from jsshell.benchmark import get_parser
704 return get_parser()
707 @Command(
708 "jsshell-bench",
709 category="testing",
710 parser=get_jsshell_parser,
711 description="Run benchmarks in the SpiderMonkey JS shell.",
713 def run_jsshelltests(command_context, **kwargs):
714 from jsshell import benchmark
716 return benchmark.run(**kwargs)
719 @Command(
720 "cramtest",
721 category="testing",
722 description="Mercurial style .t tests for command line applications.",
724 @CommandArgument(
725 "test_paths",
726 nargs="*",
727 metavar="N",
728 help="Test paths to run. Each path can be a test file or directory. "
729 "If omitted, the entire suite will be run.",
731 @CommandArgument(
732 "cram_args",
733 nargs=argparse.REMAINDER,
734 help="Extra arguments to pass down to the cram binary. See "
735 "'./mach python -m cram -- -h' for a list of available options.",
737 def cramtest(command_context, cram_args=None, test_paths=None, test_objects=None):
738 command_context.activate_virtualenv()
739 import mozinfo
740 from manifestparser import TestManifest
742 if test_objects is None:
743 from moztest.resolve import TestResolver
745 resolver = command_context._spawn(TestResolver)
746 if test_paths:
747 # If we were given test paths, try to find tests matching them.
748 test_objects = resolver.resolve_tests(paths=test_paths, flavor="cram")
749 else:
750 # Otherwise just run everything in CRAMTEST_MANIFESTS
751 test_objects = resolver.resolve_tests(flavor="cram")
753 if not test_objects:
754 message = "No tests were collected, check spelling of the test paths."
755 command_context.log(logging.WARN, "cramtest", {}, message)
756 return 1
758 mp = TestManifest()
759 mp.tests.extend(test_objects)
760 tests = mp.active_tests(disabled=False, **mozinfo.info)
762 python = command_context.virtualenv_manager.python_path
763 cmd = [python, "-m", "cram"] + cram_args + [t["relpath"] for t in tests]
764 return subprocess.call(cmd, cwd=command_context.topsrcdir)
767 from datetime import date, timedelta
770 @Command(
771 "test-info", category="testing", description="Display historical test results."
773 def test_info(command_context):
775 All functions implemented as subcommands.
779 @SubCommand(
780 "test-info",
781 "tests",
782 description="Display historical test result summary for named tests.",
784 @CommandArgument("test_names", nargs=argparse.REMAINDER, help="Test(s) of interest.")
785 @CommandArgument(
786 "--start",
787 default=(date.today() - timedelta(7)).strftime("%Y-%m-%d"),
788 help="Start date (YYYY-MM-DD)",
790 @CommandArgument(
791 "--end", default=date.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
793 @CommandArgument(
794 "--show-info",
795 action="store_true",
796 help="Retrieve and display general test information.",
798 @CommandArgument(
799 "--show-bugs",
800 action="store_true",
801 help="Retrieve and display related Bugzilla bugs.",
803 @CommandArgument("--verbose", action="store_true", help="Enable debug logging.")
804 def test_info_tests(
805 command_context,
806 test_names,
807 start,
808 end,
809 show_info,
810 show_bugs,
811 verbose,
813 import testinfo
815 ti = testinfo.TestInfoTests(verbose)
816 ti.report(
817 test_names,
818 start,
819 end,
820 show_info,
821 show_bugs,
825 @SubCommand(
826 "test-info",
827 "report",
828 description="Generate a json report of test manifests and/or tests "
829 "categorized by Bugzilla component and optionally filtered "
830 "by path, component, and/or manifest annotations.",
832 @CommandArgument(
833 "--components",
834 default=None,
835 help="Comma-separated list of Bugzilla components."
836 " eg. Testing::General,Core::WebVR",
838 @CommandArgument(
839 "--flavor",
840 help='Limit results to tests of the specified flavor (eg. "xpcshell").',
842 @CommandArgument(
843 "--subsuite",
844 help='Limit results to tests of the specified subsuite (eg. "devtools").',
846 @CommandArgument(
847 "paths", nargs=argparse.REMAINDER, help="File system paths of interest."
849 @CommandArgument(
850 "--show-manifests",
851 action="store_true",
852 help="Include test manifests in report.",
854 @CommandArgument(
855 "--show-tests", action="store_true", help="Include individual tests in report."
857 @CommandArgument(
858 "--show-summary", action="store_true", help="Include summary in report."
860 @CommandArgument(
861 "--show-annotations",
862 action="store_true",
863 help="Include list of manifest annotation conditions in report.",
865 @CommandArgument(
866 "--show-testruns",
867 action="store_true",
868 help="Include total number of runs the test has if there are failures.",
870 @CommandArgument(
871 "--filter-values",
872 help="Comma-separated list of value regular expressions to filter on; "
873 "displayed tests contain all specified values.",
875 @CommandArgument(
876 "--filter-keys",
877 help="Comma-separated list of test keys to filter on, "
878 'like "skip-if"; only these fields will be searched '
879 "for filter-values.",
881 @CommandArgument(
882 "--no-component-report",
883 action="store_false",
884 dest="show_components",
885 default=True,
886 help="Do not categorize by bugzilla component.",
888 @CommandArgument("--output-file", help="Path to report file.")
889 @CommandArgument("--runcounts-input-file", help="Optional path to report file.")
890 @CommandArgument("--verbose", action="store_true", help="Enable debug logging.")
891 @CommandArgument(
892 "--start",
893 default=(date.today() - timedelta(30)).strftime("%Y-%m-%d"),
894 help="Start date (YYYY-MM-DD)",
896 @CommandArgument(
897 "--end", default=date.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
899 def test_report(
900 command_context,
901 components,
902 flavor,
903 subsuite,
904 paths,
905 show_manifests,
906 show_tests,
907 show_summary,
908 show_annotations,
909 filter_values,
910 filter_keys,
911 show_components,
912 output_file,
913 verbose,
914 start,
915 end,
916 show_testruns,
917 runcounts_input_file,
919 import testinfo
920 from mozbuild import build_commands
922 try:
923 command_context.config_environment
924 except BuildEnvironmentNotFoundException:
925 print("Looks like configure has not run yet, running it now...")
926 build_commands.configure(command_context)
928 ti = testinfo.TestInfoReport(verbose)
929 ti.report(
930 components,
931 flavor,
932 subsuite,
933 paths,
934 show_manifests,
935 show_tests,
936 show_summary,
937 show_annotations,
938 filter_values,
939 filter_keys,
940 show_components,
941 output_file,
942 start,
943 end,
944 show_testruns,
945 runcounts_input_file,
949 @SubCommand(
950 "test-info",
951 "report-diff",
952 description='Compare two reports generated by "test-info reports".',
954 @CommandArgument(
955 "--before",
956 default=None,
957 help="The first (earlier) report file; path to local file or url.",
959 @CommandArgument(
960 "--after", help="The second (later) report file; path to local file or url."
962 @CommandArgument(
963 "--output-file",
964 help="Path to report file to be written. If not specified, report"
965 "will be written to standard output.",
967 @CommandArgument("--verbose", action="store_true", help="Enable debug logging.")
968 def test_report_diff(command_context, before, after, output_file, verbose):
969 import testinfo
971 ti = testinfo.TestInfoReport(verbose)
972 ti.report_diff(before, after, output_file)
975 @SubCommand(
976 "test-info",
977 "testrun-report",
978 description="Generate report of number of runs for each test group (manifest)",
980 @CommandArgument("--output-file", help="Path to report file.")
981 def test_info_testrun_report(command_context, output_file):
982 import json
984 import testinfo
986 ti = testinfo.TestInfoReport(verbose=True)
987 if os.environ.get("GECKO_HEAD_REPOSITORY", "") in [
988 "https://hg.mozilla.org/mozilla-central",
989 "https://hg.mozilla.org/try",
991 runcounts = ti.get_runcounts()
992 if output_file:
993 output_file = os.path.abspath(output_file)
994 output_dir = os.path.dirname(output_file)
995 if not os.path.isdir(output_dir):
996 os.makedirs(output_dir)
997 with open(output_file, "w") as f:
998 json.dump(runcounts, f)
999 else:
1000 print(runcounts)
1003 @SubCommand(
1004 "test-info",
1005 "failure-report",
1006 description="Display failure line groupings and frequencies for "
1007 "single tracking intermittent bugs.",
1009 @CommandArgument(
1010 "--start",
1011 default=(date.today() - timedelta(30)).strftime("%Y-%m-%d"),
1012 help="Start date (YYYY-MM-DD)",
1014 @CommandArgument(
1015 "--end", default=date.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
1017 @CommandArgument(
1018 "--bugid",
1019 default=None,
1020 help="bugid for treeherder intermittent failures data query.",
1022 def test_info_failures(
1023 command_context,
1024 start,
1025 end,
1026 bugid,
1028 # bugid comes in as a string, we need an int:
1029 try:
1030 bugid = int(bugid)
1031 except ValueError:
1032 bugid = None
1033 if not bugid:
1034 print("Please enter a valid bugid (i.e. '1760132')")
1035 return
1037 # get bug info
1038 url = (
1039 "https://bugzilla.mozilla.org/rest/bug?include_fields=summary,depends_on&id=%s"
1040 % bugid
1042 r = requests.get(url, headers={"User-agent": "mach-test-info/1.0"})
1043 if r.status_code != 200:
1044 print("%s error retrieving url: %s" % (r.status_code, url))
1046 data = r.json()
1047 if not data:
1048 print("unable to get bugzilla information for %s" % bugid)
1049 return
1051 summary = data["bugs"][0]["summary"]
1052 parts = summary.split("|")
1053 if not summary.endswith("single tracking bug") or len(parts) != 2:
1054 print("this query only works with single tracking bugs")
1055 return
1057 # get depends_on bugs:
1058 buglist = [bugid]
1059 if "depends_on" in data["bugs"][0]:
1060 buglist.extend(data["bugs"][0]["depends_on"])
1062 testname = parts[0].strip().split(" ")[-1]
1064 # now query treeherder to get details about annotations
1065 data = []
1066 for b in buglist:
1067 url = "https://treeherder.mozilla.org/api/failuresbybug/"
1068 url += "?startday=%s&endday=%s&tree=trunk&bug=%s" % (start, end, b)
1069 r = requests.get(url, headers={"User-agent": "mach-test-info/1.0"})
1070 r.raise_for_status()
1072 bdata = r.json()
1073 data.extend(bdata)
1075 if len(data) == 0:
1076 print("no failures were found for given bugid, please ensure bug is")
1077 print("accessible via: https://treeherder.mozilla.org/intermittent-failures")
1078 return
1080 # query VCS to get current list of variants:
1081 import yaml
1083 url = "https://hg.mozilla.org/mozilla-central/raw-file/tip/taskcluster/ci/test/variants.yml"
1084 r = requests.get(url, headers={"User-agent": "mach-test-info/1.0"})
1085 variants = yaml.safe_load(r.text)
1087 print(
1088 "\nQuerying data for bug %s annotated from %s to %s on trunk.\n\n"
1089 % (buglist, start, end)
1091 jobs = {}
1092 lines = {}
1093 for failure in data:
1094 # config = platform/buildtype
1095 # testsuite (<suite>[-variant][-<chunk>])
1096 # lines - group by patterns that contain test name
1097 config = "%s/%s" % (failure["platform"], failure["build_type"])
1099 variant = ""
1100 suite = ""
1101 varpos = len(failure["test_suite"])
1102 for v in variants.keys():
1103 var = "-%s" % variants[v]["suffix"]
1104 if var in failure["test_suite"]:
1105 if failure["test_suite"].find(var) < varpos:
1106 variant = var
1108 if variant:
1109 suite = failure["test_suite"].split(variant)[0]
1111 parts = failure["test_suite"].split("-")
1112 try:
1113 int(parts[-1])
1114 suite = "-".join(parts[:-1])
1115 except ValueError:
1116 pass # if this works, then the last '-X' is a number :)
1118 if suite == "":
1119 print("Error: failure to find variant in %s" % failure["test_suite"])
1121 job = "%s-%s%s" % (config, suite, variant)
1122 if job not in jobs.keys():
1123 jobs[job] = 0
1124 jobs[job] += 1
1126 # lines - sum(hash) of all lines where we match testname
1127 hvalue = 0
1128 for line in failure["lines"]:
1129 if len(line.split(testname)) <= 1:
1130 continue
1131 # strip off timestamp and mozharness status
1132 parts = line.split("TEST-UNEXPECTED")
1133 l = "TEST-UNEXPECTED%s" % parts[-1]
1135 # only keep 25 characters of the failure, often longer is random numbers
1136 parts = l.split(testname)
1137 l = "%s%s%s" % (parts[0], testname, parts[1][:25])
1139 hvalue += hash(l)
1141 if not failure["lines"]:
1142 hvalue = 1
1144 if not hvalue:
1145 continue
1147 if hvalue not in lines.keys():
1148 lines[hvalue] = {"lines": failure["lines"], "config": []}
1149 lines[hvalue]["config"].append(job)
1151 for h in lines.keys():
1152 print("%s errors with:" % (len(lines[h]["config"])))
1153 for l in lines[h]["lines"]:
1154 print(l)
1155 else:
1156 print(
1157 "... no failure lines recorded in"
1158 " https://treeherder.mozilla.org/intermittent-failures ..."
1161 for job in jobs:
1162 count = len([x for x in lines[h]["config"] if x == job])
1163 if count > 0:
1164 print(" %s: %s" % (job, count))
1165 print("")
1168 @Command(
1169 "rusttests",
1170 category="testing",
1171 conditions=[conditions.is_non_artifact_build],
1172 description="Run rust unit tests (via cargo test).",
1174 def run_rusttests(command_context, **kwargs):
1175 return command_context._mach_context.commands.dispatch(
1176 "build",
1177 command_context._mach_context,
1178 what=["pre-export", "export", "recurse_rusttests"],
1182 @Command(
1183 "fluent-migration-test",
1184 category="testing",
1185 description="Test Fluent migration recipes.",
1187 @CommandArgument(
1188 "--l10n-git",
1189 action="store_true",
1190 dest="l10n_git",
1191 help="Use git rather than hg source repository",
1193 @CommandArgument("test_paths", nargs="*", metavar="N", help="Recipe paths to test.")
1194 def run_migration_tests(command_context, l10n_git=False, test_paths=None, **kwargs):
1195 if not test_paths:
1196 test_paths = []
1197 command_context.activate_virtualenv()
1198 from test_fluent_migrations import fmt
1200 rv = 0
1201 with_context = []
1202 for to_test in test_paths:
1203 try:
1204 context = fmt.inspect_migration(to_test)
1205 for issue in context["issues"]:
1206 command_context.log(
1207 logging.ERROR,
1208 "fluent-migration-test",
1210 "error": issue["msg"],
1211 "file": to_test,
1213 "ERROR in {file}: {error}",
1215 if context["issues"]:
1216 continue
1217 with_context.append(
1219 "to_test": to_test,
1220 "references": context["references"],
1223 except Exception as e:
1224 command_context.log(
1225 logging.ERROR,
1226 "fluent-migration-test",
1227 {"error": str(e), "file": to_test},
1228 "ERROR in {file}: {error}",
1230 rv |= 1
1231 obj_dir, repo_dir = fmt.prepare_directories(command_context, l10n_git)
1232 for context in with_context:
1233 rv |= fmt.test_migration(
1234 command_context, obj_dir, repo_dir, l10n_git, **context
1236 return rv
1239 @Command(
1240 "manifest",
1241 category="testing",
1242 description="Manifest operations",
1243 virtualenv_name="manifest",
1245 def manifest(_command_context):
1247 All functions implemented as subcommands.
1251 @SubCommand(
1252 "manifest",
1253 "skip-fails",
1254 description="Update manifests to skip failing tests",
1256 @CommandArgument("try_url", nargs=1, help="Treeherder URL for try (please use quotes)")
1257 @CommandArgument(
1258 "-b", "--bugzilla", default=None, dest="bugzilla", help="Bugzilla instance"
1260 @CommandArgument(
1261 "-m", "--meta-bug-id", default=None, dest="meta_bug_id", help="Meta Bug id"
1263 @CommandArgument(
1264 "-s",
1265 "--turbo",
1266 action="store_true",
1267 dest="turbo",
1268 help="Skip all secondary failures",
1270 @CommandArgument(
1271 "-t", "--save-tasks", default=None, dest="save_tasks", help="Save tasks to file"
1273 @CommandArgument(
1274 "-T", "--use-tasks", default=None, dest="use_tasks", help="Use tasks from file"
1276 @CommandArgument(
1277 "-f",
1278 "--save-failures",
1279 default=None,
1280 dest="save_failures",
1281 help="Save failures to file",
1283 @CommandArgument(
1284 "-F",
1285 "--use-failures",
1286 default=None,
1287 dest="use_failures",
1288 help="Use failures from file",
1290 @CommandArgument(
1291 "-M",
1292 "--max-failures",
1293 default=-1,
1294 dest="max_failures",
1295 help="Maximum number of failures to skip (-1 == no limit)",
1297 @CommandArgument("-v", "--verbose", action="store_true", help="Verbose mode")
1298 @CommandArgument(
1299 "-d",
1300 "--dry-run",
1301 action="store_true",
1302 help="Determine manifest changes, but do not write them",
1304 def skipfails(
1305 command_context,
1306 try_url,
1307 bugzilla=None,
1308 meta_bug_id=None,
1309 turbo=False,
1310 save_tasks=None,
1311 use_tasks=None,
1312 save_failures=None,
1313 use_failures=None,
1314 max_failures=-1,
1315 verbose=False,
1316 dry_run=False,
1318 from skipfails import Skipfails
1320 if meta_bug_id is not None:
1321 try:
1322 meta_bug_id = int(meta_bug_id)
1323 except ValueError:
1324 meta_bug_id = None
1326 if max_failures is not None:
1327 try:
1328 max_failures = int(max_failures)
1329 except ValueError:
1330 max_failures = -1
1331 else:
1332 max_failures = -1
1334 Skipfails(command_context, try_url, verbose, bugzilla, dry_run, turbo).run(
1335 meta_bug_id,
1336 save_tasks,
1337 use_tasks,
1338 save_failures,
1339 use_failures,
1340 max_failures,