Bug 1850567 [wpt PR 41695] - [FedCM] Rename AutoReauthnFlag to AccountAutoSelectedFla...
[gecko.git] / testing / mach_commands.py
blob749a0ed495e0a5373c86f5b6650fc0f7f7c1abc4
1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
5 import argparse
6 import logging
7 import os
8 import subprocess
9 import sys
11 import requests
12 from mach.decorators import Command, CommandArgument, SettingsProvider, SubCommand
13 from mozbuild.base import BuildEnvironmentNotFoundException
14 from mozbuild.base import MachCommandConditions as conditions
16 UNKNOWN_TEST = """
17 I was unable to find tests from the given argument(s).
19 You should specify a test directory, filename, test suite name, or
20 abbreviation.
22 It's possible my little brain doesn't know about the type of test you are
23 trying to execute. If you suspect this, please request support by filing
24 a bug at
25 https://bugzilla.mozilla.org/enter_bug.cgi?product=Testing&component=General.
26 """.strip()
28 UNKNOWN_FLAVOR = """
29 I know you are trying to run a %s%s test. Unfortunately, I can't run those
30 tests yet. Sorry!
31 """.strip()
33 TEST_HELP = """
34 Test or tests to run. Tests can be specified by filename, directory, suite
35 name or suite alias.
37 The following test suites and aliases are supported: {}
38 """.strip()
41 @SettingsProvider
42 class TestConfig(object):
43 @classmethod
44 def config_settings(cls):
45 from mozlog.commandline import log_formatters
46 from mozlog.structuredlog import log_levels
48 format_desc = "The default format to use when running tests with `mach test`."
49 format_choices = list(log_formatters)
50 level_desc = "The default log level to use when running tests with `mach test`."
51 level_choices = [l.lower() for l in log_levels]
52 return [
53 ("test.format", "string", format_desc, "mach", {"choices": format_choices}),
54 ("test.level", "string", level_desc, "info", {"choices": level_choices}),
58 def get_test_parser():
59 from mozlog.commandline import add_logging_group
60 from moztest.resolve import TEST_SUITES
62 parser = argparse.ArgumentParser()
63 parser.add_argument(
64 "what",
65 default=None,
66 nargs="+",
67 help=TEST_HELP.format(", ".join(sorted(TEST_SUITES))),
69 parser.add_argument(
70 "extra_args",
71 default=None,
72 nargs=argparse.REMAINDER,
73 help="Extra arguments to pass to the underlying test command(s). "
74 "If an underlying command doesn't recognize the argument, it "
75 "will fail.",
77 parser.add_argument(
78 "--debugger",
79 default=None,
80 action="store",
81 nargs="?",
82 help="Specify a debugger to use.",
84 add_logging_group(parser)
85 return parser
88 ADD_TEST_SUPPORTED_SUITES = [
89 "mochitest-chrome",
90 "mochitest-plain",
91 "mochitest-browser-chrome",
92 "web-platform-tests-testharness",
93 "web-platform-tests-reftest",
94 "xpcshell",
96 ADD_TEST_SUPPORTED_DOCS = ["js", "html", "xhtml", "xul"]
98 SUITE_SYNONYMS = {
99 "wpt": "web-platform-tests-testharness",
100 "wpt-testharness": "web-platform-tests-testharness",
101 "wpt-reftest": "web-platform-tests-reftest",
104 MISSING_ARG = object()
107 def create_parser_addtest():
108 import addtest
110 parser = argparse.ArgumentParser()
111 parser.add_argument(
112 "--suite",
113 choices=sorted(ADD_TEST_SUPPORTED_SUITES + list(SUITE_SYNONYMS.keys())),
114 help="suite for the test. "
115 "If you pass a `test` argument this will be determined "
116 "based on the filename and the folder it is in",
118 parser.add_argument(
119 "-o",
120 "--overwrite",
121 action="store_true",
122 help="Overwrite an existing file if it exists.",
124 parser.add_argument(
125 "--doc",
126 choices=ADD_TEST_SUPPORTED_DOCS,
127 help="Document type for the test (if applicable)."
128 "If you pass a `test` argument this will be determined "
129 "based on the filename.",
131 parser.add_argument(
132 "-e",
133 "--editor",
134 action="store",
135 nargs="?",
136 default=MISSING_ARG,
137 help="Open the created file(s) in an editor; if a "
138 "binary is supplied it will be used otherwise the default editor for "
139 "your environment will be opened",
142 for base_suite in addtest.TEST_CREATORS:
143 cls = addtest.TEST_CREATORS[base_suite]
144 if hasattr(cls, "get_parser"):
145 group = parser.add_argument_group(base_suite)
146 cls.get_parser(group)
148 parser.add_argument("test", nargs="?", help=("Test to create."))
149 return parser
152 @Command(
153 "addtest",
154 category="testing",
155 description="Generate tests based on templates",
156 parser=create_parser_addtest,
158 def addtest(
159 command_context,
160 suite=None,
161 test=None,
162 doc=None,
163 overwrite=False,
164 editor=MISSING_ARG,
165 **kwargs,
167 import io
169 import addtest
170 from moztest.resolve import TEST_SUITES
172 if not suite and not test:
173 return create_parser_addtest().parse_args(["--help"])
175 if suite in SUITE_SYNONYMS:
176 suite = SUITE_SYNONYMS[suite]
178 if test:
179 if not overwrite and os.path.isfile(os.path.abspath(test)):
180 print("Error: can't generate a test that already exists:", test)
181 return 1
183 abs_test = os.path.abspath(test)
184 if doc is None:
185 doc = guess_doc(abs_test)
186 if suite is None:
187 guessed_suite, err = guess_suite(abs_test)
188 if err:
189 print(err)
190 return 1
191 suite = guessed_suite
193 else:
194 test = None
195 if doc is None:
196 doc = "html"
198 if not suite:
199 print(
200 "We couldn't automatically determine a suite. "
201 "Please specify `--suite` with one of the following options:\n{}\n"
202 "If you'd like to add support to a new suite, please file a bug "
203 "blocking https://bugzilla.mozilla.org/show_bug.cgi?id=1540285.".format(
204 ADD_TEST_SUPPORTED_SUITES
207 return 1
209 if doc not in ADD_TEST_SUPPORTED_DOCS:
210 print(
211 "Error: invalid `doc`. Either pass in a test with a valid extension"
212 "({}) or pass in the `doc` argument".format(ADD_TEST_SUPPORTED_DOCS)
214 return 1
216 creator_cls = addtest.creator_for_suite(suite)
218 if creator_cls is None:
219 print("Sorry, `addtest` doesn't currently know how to add {}".format(suite))
220 return 1
222 creator = creator_cls(command_context.topsrcdir, test, suite, doc, **kwargs)
224 creator.check_args()
226 paths = []
227 added_tests = False
228 for path, template in creator:
229 if not template:
230 continue
231 added_tests = True
232 if path:
233 paths.append(path)
234 print("Adding a test file at {} (suite `{}`)".format(path, suite))
236 try:
237 os.makedirs(os.path.dirname(path))
238 except OSError:
239 pass
241 with io.open(path, "w", newline="\n") as f:
242 f.write(template)
243 else:
244 # write to stdout if you passed only suite and doc and not a file path
245 print(template)
247 if not added_tests:
248 return 1
250 if test:
251 creator.update_manifest()
253 # Small hack, should really do this better
254 if suite.startswith("wpt-"):
255 suite = "web-platform-tests"
257 mach_command = TEST_SUITES[suite]["mach_command"]
258 print(
259 "Please make sure to add the new test to your commit. "
260 "You can now run the test with:\n ./mach {} {}".format(
261 mach_command, test
265 if editor is not MISSING_ARG:
266 if editor is not None:
267 editor = editor
268 elif "VISUAL" in os.environ:
269 editor = os.environ["VISUAL"]
270 elif "EDITOR" in os.environ:
271 editor = os.environ["EDITOR"]
272 else:
273 print("Unable to determine editor; please specify a binary")
274 editor = None
276 proc = None
277 if editor:
278 import subprocess
280 proc = subprocess.Popen("%s %s" % (editor, " ".join(paths)), shell=True)
282 if proc:
283 proc.wait()
285 return 0
288 def guess_doc(abs_test):
289 filename = os.path.basename(abs_test)
290 return os.path.splitext(filename)[1].strip(".")
293 def guess_suite(abs_test):
294 # If you pass a abs_test, try to detect the type based on the name
295 # and folder. This detection can be skipped if you pass the `type` arg.
296 err = None
297 guessed_suite = None
298 parent = os.path.dirname(abs_test)
299 filename = os.path.basename(abs_test)
301 has_browser_ini = os.path.isfile(os.path.join(parent, "browser.ini"))
302 has_browser_toml = os.path.isfile(os.path.join(parent, "browser.toml"))
303 has_chrome_ini = os.path.isfile(os.path.join(parent, "chrome.ini"))
304 has_chrome_toml = os.path.isfile(os.path.join(parent, "chrome.toml"))
305 has_plain_ini = os.path.isfile(os.path.join(parent, "mochitest.ini"))
306 has_plain_toml = os.path.isfile(os.path.join(parent, "mochitest.toml"))
307 has_xpcshell_ini = os.path.isfile(os.path.join(parent, "xpcshell.ini"))
308 has_xpcshell_toml = os.path.isfile(os.path.join(parent, "xpcshell.toml"))
310 in_wpt_folder = abs_test.startswith(
311 os.path.abspath(os.path.join("testing", "web-platform"))
314 if in_wpt_folder:
315 guessed_suite = "web-platform-tests-testharness"
316 if "/css/" in abs_test:
317 guessed_suite = "web-platform-tests-reftest"
318 elif (
319 filename.startswith("test_")
320 and (has_xpcshell_ini or has_xpcshell_toml)
321 and guess_doc(abs_test) == "js"
323 guessed_suite = "xpcshell"
324 else:
325 if filename.startswith("browser_") and (has_browser_ini or has_browser_toml):
326 guessed_suite = "mochitest-browser-chrome"
327 elif filename.startswith("test_"):
328 if (has_chrome_ini or has_chrome_toml) and (
329 has_plain_ini or has_plain_toml
331 err = (
332 "Error: directory contains both a chrome.{ini|toml} and mochitest.{ini|toml}. "
333 "Please set --suite=mochitest-chrome or --suite=mochitest-plain."
335 elif has_chrome_ini or has_chrome_toml:
336 guessed_suite = "mochitest-chrome"
337 elif has_plain_ini or has_plain_toml:
338 guessed_suite = "mochitest-plain"
339 return guessed_suite, err
342 @Command(
343 "test",
344 category="testing",
345 description="Run tests (detects the kind of test and runs it).",
346 parser=get_test_parser,
348 def test(command_context, what, extra_args, **log_args):
349 """Run tests from names or paths.
351 mach test accepts arguments specifying which tests to run. Each argument
352 can be:
354 * The path to a test file
355 * A directory containing tests
356 * A test suite name
357 * An alias to a test suite name (codes used on TreeHerder)
358 * path to a test manifest
360 When paths or directories are given, they are first resolved to test
361 files known to the build system.
363 If resolved tests belong to more than one test type/flavor/harness,
364 the harness for each relevant type/flavor will be invoked. e.g. if
365 you specify a directory with xpcshell and browser chrome mochitests,
366 both harnesses will be invoked.
368 Warning: `mach test` does not automatically re-build.
369 Please remember to run `mach build` when necessary.
371 EXAMPLES
373 Run all test files in the devtools/client/shared/redux/middleware/xpcshell/
374 directory:
376 `./mach test devtools/client/shared/redux/middleware/xpcshell/`
378 The below command prints a short summary of results instead of
379 the default more verbose output.
380 Do not forget the - (minus sign) after --log-grouped!
382 `./mach test --log-grouped - devtools/client/shared/redux/middleware/xpcshell/`
384 To learn more about arguments for each test type/flavor/harness, please run
385 `./mach <test-harness> --help`. For example, `./mach mochitest --help`.
387 from mozlog.commandline import setup_logging
388 from mozlog.handlers import StreamHandler
389 from moztest.resolve import TEST_SUITES, TestResolver, get_suite_definition
391 resolver = command_context._spawn(TestResolver)
392 run_suites, run_tests = resolver.resolve_metadata(what)
394 if not run_suites and not run_tests:
395 print(UNKNOWN_TEST)
396 return 1
398 if log_args.get("debugger", None):
399 import mozdebug
401 if not mozdebug.get_debugger_info(log_args.get("debugger")):
402 sys.exit(1)
403 extra_args_debugger_notation = "=".join(
404 ["--debugger", log_args.get("debugger")]
406 if extra_args:
407 extra_args.append(extra_args_debugger_notation)
408 else:
409 extra_args = [extra_args_debugger_notation]
411 # Create shared logger
412 format_args = {"level": command_context._mach_context.settings["test"]["level"]}
413 if not run_suites and len(run_tests) == 1:
414 format_args["verbose"] = True
415 format_args["compact"] = False
417 default_format = command_context._mach_context.settings["test"]["format"]
418 log = setup_logging(
419 "mach-test", log_args, {default_format: sys.stdout}, format_args
421 for handler in log.handlers:
422 if isinstance(handler, StreamHandler):
423 handler.formatter.inner.summary_on_shutdown = True
425 status = None
426 for suite_name in run_suites:
427 suite = TEST_SUITES[suite_name]
428 kwargs = suite["kwargs"]
429 kwargs["log"] = log
430 kwargs.setdefault("subsuite", None)
432 if "mach_command" in suite:
433 res = command_context._mach_context.commands.dispatch(
434 suite["mach_command"],
435 command_context._mach_context,
436 argv=extra_args,
437 **kwargs,
439 if res:
440 status = res
442 buckets = {}
443 for test in run_tests:
444 key = (test["flavor"], test.get("subsuite", ""))
445 buckets.setdefault(key, []).append(test)
447 for (flavor, subsuite), tests in sorted(buckets.items()):
448 _, m = get_suite_definition(flavor, subsuite)
449 if "mach_command" not in m:
450 substr = "-{}".format(subsuite) if subsuite else ""
451 print(UNKNOWN_FLAVOR % (flavor, substr))
452 status = 1
453 continue
455 kwargs = dict(m["kwargs"])
456 kwargs["log"] = log
457 kwargs.setdefault("subsuite", None)
459 res = command_context._mach_context.commands.dispatch(
460 m["mach_command"],
461 command_context._mach_context,
462 argv=extra_args,
463 test_objects=tests,
464 **kwargs,
466 if res:
467 status = res
469 log.shutdown()
470 return status
473 @Command(
474 "cppunittest", category="testing", description="Run cpp unit tests (C++ tests)."
476 @CommandArgument(
477 "test_files",
478 nargs="*",
479 metavar="N",
480 help="Test to run. Can be specified as one or more files or "
481 "directories, or omitted. If omitted, the entire test suite is "
482 "executed.",
484 def run_cppunit_test(command_context, **params):
485 from mozlog import commandline
487 log = params.get("log")
488 if not log:
489 log = commandline.setup_logging("cppunittest", {}, {"tbpl": sys.stdout})
491 # See if we have crash symbols
492 symbols_path = os.path.join(command_context.distdir, "crashreporter-symbols")
493 if not os.path.isdir(symbols_path):
494 symbols_path = None
496 # If no tests specified, run all tests in main manifest
497 tests = params["test_files"]
498 if not tests:
499 tests = [os.path.join(command_context.distdir, "cppunittests")]
500 manifest_path = os.path.join(
501 command_context.topsrcdir, "testing", "cppunittest.ini"
503 else:
504 manifest_path = None
506 utility_path = command_context.bindir
508 if conditions.is_android(command_context):
509 from mozrunner.devices.android_device import (
510 InstallIntent,
511 verify_android_device,
514 verify_android_device(command_context, install=InstallIntent.NO)
515 return run_android_test(tests, symbols_path, manifest_path, log)
517 return run_desktop_test(
518 command_context, tests, symbols_path, manifest_path, utility_path, log
522 def run_desktop_test(
523 command_context, tests, symbols_path, manifest_path, utility_path, log
525 import runcppunittests as cppunittests
526 from mozlog import commandline
528 parser = cppunittests.CPPUnittestOptions()
529 commandline.add_logging_group(parser)
530 options, args = parser.parse_args()
532 options.symbols_path = symbols_path
533 options.manifest_path = manifest_path
534 options.utility_path = utility_path
535 options.xre_path = command_context.bindir
537 try:
538 result = cppunittests.run_test_harness(options, tests)
539 except Exception as e:
540 log.error("Caught exception running cpp unit tests: %s" % str(e))
541 result = False
542 raise
544 return 0 if result else 1
547 def run_android_test(command_context, tests, symbols_path, manifest_path, log):
548 import remotecppunittests
549 from mozlog import commandline
551 parser = remotecppunittests.RemoteCPPUnittestOptions()
552 commandline.add_logging_group(parser)
553 options, args = parser.parse_args()
555 if not options.adb_path:
556 from mozrunner.devices.android_device import get_adb_path
558 options.adb_path = get_adb_path(command_context)
559 options.symbols_path = symbols_path
560 options.manifest_path = manifest_path
561 options.xre_path = command_context.bindir
562 options.local_lib = command_context.bindir.replace("bin", "fennec")
563 for file in os.listdir(os.path.join(command_context.topobjdir, "dist")):
564 if file.endswith(".apk") and file.startswith("fennec"):
565 options.local_apk = os.path.join(command_context.topobjdir, "dist", file)
566 log.info("using APK: " + options.local_apk)
567 break
569 try:
570 result = remotecppunittests.run_test_harness(options, tests)
571 except Exception as e:
572 log.error("Caught exception running cpp unit tests: %s" % str(e))
573 result = False
574 raise
576 return 0 if result else 1
579 def executable_name(name):
580 return name + ".exe" if sys.platform.startswith("win") else name
583 @Command(
584 "jstests",
585 category="testing",
586 description="Run SpiderMonkey JS tests in the JS shell.",
587 ok_if_tests_disabled=True,
589 @CommandArgument("--shell", help="The shell to be used")
590 @CommandArgument(
591 "params",
592 nargs=argparse.REMAINDER,
593 help="Extra arguments to pass down to the test harness.",
595 def run_jstests(command_context, shell, params):
596 import subprocess
598 command_context.virtualenv_manager.ensure()
599 python = command_context.virtualenv_manager.python_path
601 js = shell or os.path.join(command_context.bindir, executable_name("js"))
602 jstest_cmd = [
603 python,
604 os.path.join(command_context.topsrcdir, "js", "src", "tests", "jstests.py"),
606 ] + params
608 return subprocess.call(jstest_cmd)
611 @Command(
612 "jit-test",
613 category="testing",
614 description="Run SpiderMonkey jit-tests in the JS shell.",
615 ok_if_tests_disabled=True,
617 @CommandArgument("--shell", help="The shell to be used")
618 @CommandArgument(
619 "--cgc",
620 action="store_true",
621 default=False,
622 help="Run with the SM(cgc) job's env vars",
624 @CommandArgument(
625 "params",
626 nargs=argparse.REMAINDER,
627 help="Extra arguments to pass down to the test harness.",
629 def run_jittests(command_context, shell, cgc, params):
630 import subprocess
632 command_context.virtualenv_manager.ensure()
633 python = command_context.virtualenv_manager.python_path
635 js = shell or os.path.join(command_context.bindir, executable_name("js"))
636 jittest_cmd = [
637 python,
638 os.path.join(command_context.topsrcdir, "js", "src", "jit-test", "jit_test.py"),
640 ] + params
642 env = os.environ.copy()
643 if cgc:
644 env["JS_GC_ZEAL"] = "IncrementalMultipleSlices"
646 return subprocess.call(jittest_cmd, env=env)
649 @Command("jsapi-tests", category="testing", description="Run SpiderMonkey JSAPI tests.")
650 @CommandArgument(
651 "--list",
652 action="store_true",
653 default=False,
654 help="List all tests",
656 @CommandArgument(
657 "--frontend-only",
658 action="store_true",
659 default=False,
660 help="Run tests for frontend-only APIs, with light-weight entry point",
662 @CommandArgument(
663 "test_name",
664 nargs="?",
665 metavar="N",
666 help="Test to run. Can be a prefix or omitted. If "
667 "omitted, the entire test suite is executed.",
669 def run_jsapitests(command_context, list=False, frontend_only=False, test_name=None):
670 import subprocess
672 jsapi_tests_cmd = [
673 os.path.join(command_context.bindir, executable_name("jsapi-tests"))
675 if list:
676 jsapi_tests_cmd.append("--list")
677 if frontend_only:
678 jsapi_tests_cmd.append("--frontend-only")
679 if test_name:
680 jsapi_tests_cmd.append(test_name)
682 test_env = os.environ.copy()
683 test_env["TOPSRCDIR"] = command_context.topsrcdir
685 result = subprocess.call(jsapi_tests_cmd, env=test_env)
686 if result != 0:
687 print(f"jsapi-tests failed, exit code {result}")
688 return result
691 def run_check_js_msg(command_context):
692 import subprocess
694 command_context.virtualenv_manager.ensure()
695 python = command_context.virtualenv_manager.python_path
697 check_cmd = [
698 python,
699 os.path.join(command_context.topsrcdir, "config", "check_js_msg_encoding.py"),
702 return subprocess.call(check_cmd)
705 def get_jsshell_parser():
706 from jsshell.benchmark import get_parser
708 return get_parser()
711 @Command(
712 "jsshell-bench",
713 category="testing",
714 parser=get_jsshell_parser,
715 description="Run benchmarks in the SpiderMonkey JS shell.",
717 def run_jsshelltests(command_context, **kwargs):
718 from jsshell import benchmark
720 return benchmark.run(**kwargs)
723 @Command(
724 "cramtest",
725 category="testing",
726 description="Mercurial style .t tests for command line applications.",
728 @CommandArgument(
729 "test_paths",
730 nargs="*",
731 metavar="N",
732 help="Test paths to run. Each path can be a test file or directory. "
733 "If omitted, the entire suite will be run.",
735 @CommandArgument(
736 "cram_args",
737 nargs=argparse.REMAINDER,
738 help="Extra arguments to pass down to the cram binary. See "
739 "'./mach python -m cram -- -h' for a list of available options.",
741 def cramtest(command_context, cram_args=None, test_paths=None, test_objects=None):
742 command_context.activate_virtualenv()
743 import mozinfo
744 from manifestparser import TestManifest
746 if test_objects is None:
747 from moztest.resolve import TestResolver
749 resolver = command_context._spawn(TestResolver)
750 if test_paths:
751 # If we were given test paths, try to find tests matching them.
752 test_objects = resolver.resolve_tests(paths=test_paths, flavor="cram")
753 else:
754 # Otherwise just run everything in CRAMTEST_MANIFESTS
755 test_objects = resolver.resolve_tests(flavor="cram")
757 if not test_objects:
758 message = "No tests were collected, check spelling of the test paths."
759 command_context.log(logging.WARN, "cramtest", {}, message)
760 return 1
762 mp = TestManifest()
763 mp.tests.extend(test_objects)
764 tests = mp.active_tests(disabled=False, **mozinfo.info)
766 python = command_context.virtualenv_manager.python_path
767 cmd = [python, "-m", "cram"] + cram_args + [t["relpath"] for t in tests]
768 return subprocess.call(cmd, cwd=command_context.topsrcdir)
771 from datetime import date, timedelta
774 @Command(
775 "test-info", category="testing", description="Display historical test results."
777 def test_info(command_context):
779 All functions implemented as subcommands.
783 @SubCommand(
784 "test-info",
785 "tests",
786 description="Display historical test result summary for named tests.",
788 @CommandArgument("test_names", nargs=argparse.REMAINDER, help="Test(s) of interest.")
789 @CommandArgument(
790 "--start",
791 default=(date.today() - timedelta(7)).strftime("%Y-%m-%d"),
792 help="Start date (YYYY-MM-DD)",
794 @CommandArgument(
795 "--end", default=date.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
797 @CommandArgument(
798 "--show-info",
799 action="store_true",
800 help="Retrieve and display general test information.",
802 @CommandArgument(
803 "--show-bugs",
804 action="store_true",
805 help="Retrieve and display related Bugzilla bugs.",
807 @CommandArgument("--verbose", action="store_true", help="Enable debug logging.")
808 def test_info_tests(
809 command_context,
810 test_names,
811 start,
812 end,
813 show_info,
814 show_bugs,
815 verbose,
817 import testinfo
819 ti = testinfo.TestInfoTests(verbose)
820 ti.report(
821 test_names,
822 start,
823 end,
824 show_info,
825 show_bugs,
829 @SubCommand(
830 "test-info",
831 "report",
832 description="Generate a json report of test manifests and/or tests "
833 "categorized by Bugzilla component and optionally filtered "
834 "by path, component, and/or manifest annotations.",
836 @CommandArgument(
837 "--components",
838 default=None,
839 help="Comma-separated list of Bugzilla components."
840 " eg. Testing::General,Core::WebVR",
842 @CommandArgument(
843 "--flavor",
844 help='Limit results to tests of the specified flavor (eg. "xpcshell").',
846 @CommandArgument(
847 "--subsuite",
848 help='Limit results to tests of the specified subsuite (eg. "devtools").',
850 @CommandArgument(
851 "paths", nargs=argparse.REMAINDER, help="File system paths of interest."
853 @CommandArgument(
854 "--show-manifests",
855 action="store_true",
856 help="Include test manifests in report.",
858 @CommandArgument(
859 "--show-tests", action="store_true", help="Include individual tests in report."
861 @CommandArgument(
862 "--show-summary", action="store_true", help="Include summary in report."
864 @CommandArgument(
865 "--show-annotations",
866 action="store_true",
867 help="Include list of manifest annotation conditions in report.",
869 @CommandArgument(
870 "--show-testruns",
871 action="store_true",
872 help="Include total number of runs the test has if there are failures.",
874 @CommandArgument(
875 "--filter-values",
876 help="Comma-separated list of value regular expressions to filter on; "
877 "displayed tests contain all specified values.",
879 @CommandArgument(
880 "--filter-keys",
881 help="Comma-separated list of test keys to filter on, "
882 'like "skip-if"; only these fields will be searched '
883 "for filter-values.",
885 @CommandArgument(
886 "--no-component-report",
887 action="store_false",
888 dest="show_components",
889 default=True,
890 help="Do not categorize by bugzilla component.",
892 @CommandArgument("--output-file", help="Path to report file.")
893 @CommandArgument("--verbose", action="store_true", help="Enable debug logging.")
894 @CommandArgument(
895 "--start",
896 default=(date.today() - timedelta(30)).strftime("%Y-%m-%d"),
897 help="Start date (YYYY-MM-DD)",
899 @CommandArgument(
900 "--end", default=date.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
902 def test_report(
903 command_context,
904 components,
905 flavor,
906 subsuite,
907 paths,
908 show_manifests,
909 show_tests,
910 show_summary,
911 show_annotations,
912 filter_values,
913 filter_keys,
914 show_components,
915 output_file,
916 verbose,
917 start,
918 end,
919 show_testruns,
921 import testinfo
922 from mozbuild import build_commands
924 try:
925 command_context.config_environment
926 except BuildEnvironmentNotFoundException:
927 print("Looks like configure has not run yet, running it now...")
928 build_commands.configure(command_context)
930 ti = testinfo.TestInfoReport(verbose)
931 ti.report(
932 components,
933 flavor,
934 subsuite,
935 paths,
936 show_manifests,
937 show_tests,
938 show_summary,
939 show_annotations,
940 filter_values,
941 filter_keys,
942 show_components,
943 output_file,
944 start,
945 end,
946 show_testruns,
950 @SubCommand(
951 "test-info",
952 "report-diff",
953 description='Compare two reports generated by "test-info reports".',
955 @CommandArgument(
956 "--before",
957 default=None,
958 help="The first (earlier) report file; path to local file or url.",
960 @CommandArgument(
961 "--after", help="The second (later) report file; path to local file or url."
963 @CommandArgument(
964 "--output-file",
965 help="Path to report file to be written. If not specified, report"
966 "will be written to standard output.",
968 @CommandArgument("--verbose", action="store_true", help="Enable debug logging.")
969 def test_report_diff(command_context, before, after, output_file, verbose):
970 import testinfo
972 ti = testinfo.TestInfoReport(verbose)
973 ti.report_diff(before, after, output_file)
976 @SubCommand(
977 "test-info",
978 "testrun-report",
979 description="Generate report of number of runs for each test group (manifest)",
981 @CommandArgument("--output-file", help="Path to report file.")
982 def test_info_testrun_report(command_context, output_file):
983 import json
985 import testinfo
987 ti = testinfo.TestInfoReport(verbose=True)
988 runcounts = ti.get_runcounts()
989 if output_file:
990 output_file = os.path.abspath(output_file)
991 output_dir = os.path.dirname(output_file)
992 if not os.path.isdir(output_dir):
993 os.makedirs(output_dir)
994 with open(output_file, "w") as f:
995 json.dump(runcounts, f)
996 else:
997 print(runcounts)
1000 @SubCommand(
1001 "test-info",
1002 "failure-report",
1003 description="Display failure line groupings and frequencies for "
1004 "single tracking intermittent bugs.",
1006 @CommandArgument(
1007 "--start",
1008 default=(date.today() - timedelta(30)).strftime("%Y-%m-%d"),
1009 help="Start date (YYYY-MM-DD)",
1011 @CommandArgument(
1012 "--end", default=date.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
1014 @CommandArgument(
1015 "--bugid",
1016 default=None,
1017 help="bugid for treeherder intermittent failures data query.",
1019 def test_info_failures(
1020 command_context,
1021 start,
1022 end,
1023 bugid,
1025 # bugid comes in as a string, we need an int:
1026 try:
1027 bugid = int(bugid)
1028 except ValueError:
1029 bugid = None
1030 if not bugid:
1031 print("Please enter a valid bugid (i.e. '1760132')")
1032 return
1034 # get bug info
1035 url = (
1036 "https://bugzilla.mozilla.org/rest/bug?include_fields=summary,depends_on&id=%s"
1037 % bugid
1039 r = requests.get(url, headers={"User-agent": "mach-test-info/1.0"})
1040 if r.status_code != 200:
1041 print("%s error retrieving url: %s" % (r.status_code, url))
1043 data = r.json()
1044 if not data:
1045 print("unable to get bugzilla information for %s" % bugid)
1046 return
1048 summary = data["bugs"][0]["summary"]
1049 parts = summary.split("|")
1050 if not summary.endswith("single tracking bug") or len(parts) != 2:
1051 print("this query only works with single tracking bugs")
1052 return
1054 # get depends_on bugs:
1055 buglist = [bugid]
1056 if "depends_on" in data["bugs"][0]:
1057 buglist.extend(data["bugs"][0]["depends_on"])
1059 testname = parts[0].strip().split(" ")[-1]
1061 # now query treeherder to get details about annotations
1062 data = []
1063 for b in buglist:
1064 url = "https://treeherder.mozilla.org/api/failuresbybug/"
1065 url += "?startday=%s&endday=%s&tree=trunk&bug=%s" % (start, end, b)
1066 r = requests.get(url, headers={"User-agent": "mach-test-info/1.0"})
1067 r.raise_for_status()
1069 bdata = r.json()
1070 data.extend(bdata)
1072 if len(data) == 0:
1073 print("no failures were found for given bugid, please ensure bug is")
1074 print("accessible via: https://treeherder.mozilla.org/intermittent-failures")
1075 return
1077 # query VCS to get current list of variants:
1078 import yaml
1080 url = "https://hg.mozilla.org/mozilla-central/raw-file/tip/taskcluster/ci/test/variants.yml"
1081 r = requests.get(url, headers={"User-agent": "mach-test-info/1.0"})
1082 variants = yaml.safe_load(r.text)
1084 print(
1085 "\nQuerying data for bug %s annotated from %s to %s on trunk.\n\n"
1086 % (buglist, start, end)
1088 jobs = {}
1089 lines = {}
1090 for failure in data:
1091 # config = platform/buildtype
1092 # testsuite (<suite>[-variant][-<chunk>])
1093 # lines - group by patterns that contain test name
1094 config = "%s/%s" % (failure["platform"], failure["build_type"])
1096 variant = ""
1097 suite = ""
1098 varpos = len(failure["test_suite"])
1099 for v in variants.keys():
1100 var = "-%s" % variants[v]["suffix"]
1101 if var in failure["test_suite"]:
1102 if failure["test_suite"].find(var) < varpos:
1103 variant = var
1105 if variant:
1106 suite = failure["test_suite"].split(variant)[0]
1108 parts = failure["test_suite"].split("-")
1109 try:
1110 int(parts[-1])
1111 suite = "-".join(parts[:-1])
1112 except ValueError:
1113 pass # if this works, then the last '-X' is a number :)
1115 if suite == "":
1116 print("Error: failure to find variant in %s" % failure["test_suite"])
1118 job = "%s-%s%s" % (config, suite, variant)
1119 if job not in jobs.keys():
1120 jobs[job] = 0
1121 jobs[job] += 1
1123 # lines - sum(hash) of all lines where we match testname
1124 hvalue = 0
1125 for line in failure["lines"]:
1126 if len(line.split(testname)) <= 1:
1127 continue
1128 # strip off timestamp and mozharness status
1129 parts = line.split("TEST-UNEXPECTED")
1130 l = "TEST-UNEXPECTED%s" % parts[-1]
1132 # only keep 25 characters of the failure, often longer is random numbers
1133 parts = l.split(testname)
1134 l = "%s%s%s" % (parts[0], testname, parts[1][:25])
1136 hvalue += hash(l)
1138 if not failure["lines"]:
1139 hvalue = 1
1141 if not hvalue:
1142 continue
1144 if hvalue not in lines.keys():
1145 lines[hvalue] = {"lines": failure["lines"], "config": []}
1146 lines[hvalue]["config"].append(job)
1148 for h in lines.keys():
1149 print("%s errors with:" % (len(lines[h]["config"])))
1150 for l in lines[h]["lines"]:
1151 print(l)
1152 else:
1153 print(
1154 "... no failure lines recorded in"
1155 " https://treeherder.mozilla.org/intermittent-failures ..."
1158 for job in jobs:
1159 count = len([x for x in lines[h]["config"] if x == job])
1160 if count > 0:
1161 print(" %s: %s" % (job, count))
1162 print("")
1165 @Command(
1166 "rusttests",
1167 category="testing",
1168 conditions=[conditions.is_non_artifact_build],
1169 description="Run rust unit tests (via cargo test).",
1171 def run_rusttests(command_context, **kwargs):
1172 return command_context._mach_context.commands.dispatch(
1173 "build",
1174 command_context._mach_context,
1175 what=["pre-export", "export", "recurse_rusttests"],
1179 @Command(
1180 "fluent-migration-test",
1181 category="testing",
1182 description="Test Fluent migration recipes.",
1184 @CommandArgument("test_paths", nargs="*", metavar="N", help="Recipe paths to test.")
1185 def run_migration_tests(command_context, test_paths=None, **kwargs):
1186 if not test_paths:
1187 test_paths = []
1188 command_context.activate_virtualenv()
1189 from test_fluent_migrations import fmt
1191 rv = 0
1192 with_context = []
1193 for to_test in test_paths:
1194 try:
1195 context = fmt.inspect_migration(to_test)
1196 for issue in context["issues"]:
1197 command_context.log(
1198 logging.ERROR,
1199 "fluent-migration-test",
1201 "error": issue["msg"],
1202 "file": to_test,
1204 "ERROR in {file}: {error}",
1206 if context["issues"]:
1207 continue
1208 with_context.append(
1210 "to_test": to_test,
1211 "references": context["references"],
1214 except Exception as e:
1215 command_context.log(
1216 logging.ERROR,
1217 "fluent-migration-test",
1218 {"error": str(e), "file": to_test},
1219 "ERROR in {file}: {error}",
1221 rv |= 1
1222 obj_dir = fmt.prepare_object_dir(command_context)
1223 for context in with_context:
1224 rv |= fmt.test_migration(command_context, obj_dir, **context)
1225 return rv