no bug - Bumping Firefox l10n changesets r=release a=l10n-bump DONTBUILD CLOSED TREE
[gecko.git] / testing / mach_commands.py
bloba91793ebcc5dc3a7c19086e6f399ba170aecac0a
1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
5 import argparse
6 import logging
7 import os
8 import subprocess
9 import sys
11 import requests
12 from mach.decorators import Command, CommandArgument, SettingsProvider, SubCommand
13 from mozbuild.base import BuildEnvironmentNotFoundException
14 from mozbuild.base import MachCommandConditions as conditions
16 UNKNOWN_TEST = """
17 I was unable to find tests from the given argument(s).
19 You should specify a test directory, filename, test suite name, or
20 abbreviation.
22 It's possible my little brain doesn't know about the type of test you are
23 trying to execute. If you suspect this, please request support by filing
24 a bug at
25 https://bugzilla.mozilla.org/enter_bug.cgi?product=Testing&component=General.
26 """.strip()
28 UNKNOWN_FLAVOR = """
29 I know you are trying to run a %s%s test. Unfortunately, I can't run those
30 tests yet. Sorry!
31 """.strip()
33 TEST_HELP = """
34 Test or tests to run. Tests can be specified by filename, directory, suite
35 name or suite alias.
37 The following test suites and aliases are supported: {}
38 """.strip()
41 @SettingsProvider
42 class TestConfig(object):
43 @classmethod
44 def config_settings(cls):
45 from mozlog.commandline import log_formatters
46 from mozlog.structuredlog import log_levels
48 format_desc = "The default format to use when running tests with `mach test`."
49 format_choices = list(log_formatters)
50 level_desc = "The default log level to use when running tests with `mach test`."
51 level_choices = [l.lower() for l in log_levels]
52 return [
53 ("test.format", "string", format_desc, "mach", {"choices": format_choices}),
54 ("test.level", "string", level_desc, "info", {"choices": level_choices}),
58 def get_test_parser():
59 from mozlog.commandline import add_logging_group
60 from moztest.resolve import TEST_SUITES
62 parser = argparse.ArgumentParser()
63 parser.add_argument(
64 "what",
65 default=None,
66 nargs="+",
67 help=TEST_HELP.format(", ".join(sorted(TEST_SUITES))),
69 parser.add_argument(
70 "extra_args",
71 default=None,
72 nargs=argparse.REMAINDER,
73 help="Extra arguments to pass to the underlying test command(s). "
74 "If an underlying command doesn't recognize the argument, it "
75 "will fail.",
77 parser.add_argument(
78 "--debugger",
79 default=None,
80 action="store",
81 nargs="?",
82 help="Specify a debugger to use.",
84 add_logging_group(parser)
85 return parser
88 ADD_TEST_SUPPORTED_SUITES = [
89 "mochitest-chrome",
90 "mochitest-plain",
91 "mochitest-browser-chrome",
92 "web-platform-tests-testharness",
93 "web-platform-tests-reftest",
94 "xpcshell",
96 ADD_TEST_SUPPORTED_DOCS = ["js", "html", "xhtml", "xul"]
98 SUITE_SYNONYMS = {
99 "wpt": "web-platform-tests-testharness",
100 "wpt-testharness": "web-platform-tests-testharness",
101 "wpt-reftest": "web-platform-tests-reftest",
104 MISSING_ARG = object()
107 def create_parser_addtest():
108 import addtest
110 parser = argparse.ArgumentParser()
111 parser.add_argument(
112 "--suite",
113 choices=sorted(ADD_TEST_SUPPORTED_SUITES + list(SUITE_SYNONYMS.keys())),
114 help="suite for the test. "
115 "If you pass a `test` argument this will be determined "
116 "based on the filename and the folder it is in",
118 parser.add_argument(
119 "-o",
120 "--overwrite",
121 action="store_true",
122 help="Overwrite an existing file if it exists.",
124 parser.add_argument(
125 "--doc",
126 choices=ADD_TEST_SUPPORTED_DOCS,
127 help="Document type for the test (if applicable)."
128 "If you pass a `test` argument this will be determined "
129 "based on the filename.",
131 parser.add_argument(
132 "-e",
133 "--editor",
134 action="store",
135 nargs="?",
136 default=MISSING_ARG,
137 help="Open the created file(s) in an editor; if a "
138 "binary is supplied it will be used otherwise the default editor for "
139 "your environment will be opened",
142 for base_suite in addtest.TEST_CREATORS:
143 cls = addtest.TEST_CREATORS[base_suite]
144 if hasattr(cls, "get_parser"):
145 group = parser.add_argument_group(base_suite)
146 cls.get_parser(group)
148 parser.add_argument("test", nargs="?", help=("Test to create."))
149 return parser
152 @Command(
153 "addtest",
154 category="testing",
155 description="Generate tests based on templates",
156 parser=create_parser_addtest,
158 def addtest(
159 command_context,
160 suite=None,
161 test=None,
162 doc=None,
163 overwrite=False,
164 editor=MISSING_ARG,
165 **kwargs,
167 import io
169 import addtest
170 from moztest.resolve import TEST_SUITES
172 if not suite and not test:
173 return create_parser_addtest().parse_args(["--help"])
175 if suite in SUITE_SYNONYMS:
176 suite = SUITE_SYNONYMS[suite]
178 if test:
179 if not overwrite and os.path.isfile(os.path.abspath(test)):
180 print("Error: can't generate a test that already exists:", test)
181 return 1
183 abs_test = os.path.abspath(test)
184 if doc is None:
185 doc = guess_doc(abs_test)
186 if suite is None:
187 guessed_suite, err = guess_suite(abs_test)
188 if err:
189 print(err)
190 return 1
191 suite = guessed_suite
193 else:
194 test = None
195 if doc is None:
196 doc = "html"
198 if not suite:
199 print(
200 "We couldn't automatically determine a suite. "
201 "Please specify `--suite` with one of the following options:\n{}\n"
202 "If you'd like to add support to a new suite, please file a bug "
203 "blocking https://bugzilla.mozilla.org/show_bug.cgi?id=1540285.".format(
204 ADD_TEST_SUPPORTED_SUITES
207 return 1
209 if doc not in ADD_TEST_SUPPORTED_DOCS:
210 print(
211 "Error: invalid `doc`. Either pass in a test with a valid extension"
212 "({}) or pass in the `doc` argument".format(ADD_TEST_SUPPORTED_DOCS)
214 return 1
216 creator_cls = addtest.creator_for_suite(suite)
218 if creator_cls is None:
219 print("Sorry, `addtest` doesn't currently know how to add {}".format(suite))
220 return 1
222 creator = creator_cls(command_context.topsrcdir, test, suite, doc, **kwargs)
224 creator.check_args()
226 paths = []
227 added_tests = False
228 for path, template in creator:
229 if not template:
230 continue
231 added_tests = True
232 if path:
233 paths.append(path)
234 print("Adding a test file at {} (suite `{}`)".format(path, suite))
236 try:
237 os.makedirs(os.path.dirname(path))
238 except OSError:
239 pass
241 with io.open(path, "w", newline="\n") as f:
242 f.write(template)
243 else:
244 # write to stdout if you passed only suite and doc and not a file path
245 print(template)
247 if not added_tests:
248 return 1
250 if test:
251 creator.update_manifest()
253 # Small hack, should really do this better
254 if suite.startswith("wpt-"):
255 suite = "web-platform-tests"
257 mach_command = TEST_SUITES[suite]["mach_command"]
258 print(
259 "Please make sure to add the new test to your commit. "
260 "You can now run the test with:\n ./mach {} {}".format(
261 mach_command, test
265 if editor is not MISSING_ARG:
266 if editor is not None:
267 editor = editor
268 elif "VISUAL" in os.environ:
269 editor = os.environ["VISUAL"]
270 elif "EDITOR" in os.environ:
271 editor = os.environ["EDITOR"]
272 else:
273 print("Unable to determine editor; please specify a binary")
274 editor = None
276 proc = None
277 if editor:
278 import subprocess
280 proc = subprocess.Popen("%s %s" % (editor, " ".join(paths)), shell=True)
282 if proc:
283 proc.wait()
285 return 0
288 def guess_doc(abs_test):
289 filename = os.path.basename(abs_test)
290 return os.path.splitext(filename)[1].strip(".")
293 def guess_suite(abs_test):
294 # If you pass a abs_test, try to detect the type based on the name
295 # and folder. This detection can be skipped if you pass the `type` arg.
296 err = None
297 guessed_suite = None
298 parent = os.path.dirname(abs_test)
299 filename = os.path.basename(abs_test)
301 has_browser_ini = os.path.isfile(os.path.join(parent, "browser.ini"))
302 has_chrome_ini = os.path.isfile(os.path.join(parent, "chrome.ini"))
303 has_plain_ini = os.path.isfile(os.path.join(parent, "mochitest.ini"))
304 has_xpcshell_ini = os.path.isfile(os.path.join(parent, "xpcshell.ini"))
306 in_wpt_folder = abs_test.startswith(
307 os.path.abspath(os.path.join("testing", "web-platform"))
310 if in_wpt_folder:
311 guessed_suite = "web-platform-tests-testharness"
312 if "/css/" in abs_test:
313 guessed_suite = "web-platform-tests-reftest"
314 elif (
315 filename.startswith("test_")
316 and has_xpcshell_ini
317 and guess_doc(abs_test) == "js"
319 guessed_suite = "xpcshell"
320 else:
321 if filename.startswith("browser_") and has_browser_ini:
322 guessed_suite = "mochitest-browser-chrome"
323 elif filename.startswith("test_"):
324 if has_chrome_ini and has_plain_ini:
325 err = (
326 "Error: directory contains both a chrome.ini and mochitest.ini. "
327 "Please set --suite=mochitest-chrome or --suite=mochitest-plain."
329 elif has_chrome_ini:
330 guessed_suite = "mochitest-chrome"
331 elif has_plain_ini:
332 guessed_suite = "mochitest-plain"
333 return guessed_suite, err
336 @Command(
337 "test",
338 category="testing",
339 description="Run tests (detects the kind of test and runs it).",
340 parser=get_test_parser,
342 def test(command_context, what, extra_args, **log_args):
343 """Run tests from names or paths.
345 mach test accepts arguments specifying which tests to run. Each argument
346 can be:
348 * The path to a test file
349 * A directory containing tests
350 * A test suite name
351 * An alias to a test suite name (codes used on TreeHerder)
353 When paths or directories are given, they are first resolved to test
354 files known to the build system.
356 If resolved tests belong to more than one test type/flavor/harness,
357 the harness for each relevant type/flavor will be invoked. e.g. if
358 you specify a directory with xpcshell and browser chrome mochitests,
359 both harnesses will be invoked.
361 Warning: `mach test` does not automatically re-build.
362 Please remember to run `mach build` when necessary.
364 EXAMPLES
366 Run all test files in the devtools/client/shared/redux/middleware/xpcshell/
367 directory:
369 `./mach test devtools/client/shared/redux/middleware/xpcshell/`
371 The below command prints a short summary of results instead of
372 the default more verbose output.
373 Do not forget the - (minus sign) after --log-grouped!
375 `./mach test --log-grouped - devtools/client/shared/redux/middleware/xpcshell/`
377 To learn more about arguments for each test type/flavor/harness, please run
378 `./mach <test-harness> --help`. For example, `./mach mochitest --help`.
380 from mozlog.commandline import setup_logging
381 from mozlog.handlers import StreamHandler
382 from moztest.resolve import TEST_SUITES, TestResolver, get_suite_definition
384 resolver = command_context._spawn(TestResolver)
385 run_suites, run_tests = resolver.resolve_metadata(what)
387 if not run_suites and not run_tests:
388 print(UNKNOWN_TEST)
389 return 1
391 if log_args.get("debugger", None):
392 import mozdebug
394 if not mozdebug.get_debugger_info(log_args.get("debugger")):
395 sys.exit(1)
396 extra_args_debugger_notation = "=".join(
397 ["--debugger", log_args.get("debugger")]
399 if extra_args:
400 extra_args.append(extra_args_debugger_notation)
401 else:
402 extra_args = [extra_args_debugger_notation]
404 # Create shared logger
405 format_args = {"level": command_context._mach_context.settings["test"]["level"]}
406 if not run_suites and len(run_tests) == 1:
407 format_args["verbose"] = True
408 format_args["compact"] = False
410 default_format = command_context._mach_context.settings["test"]["format"]
411 log = setup_logging(
412 "mach-test", log_args, {default_format: sys.stdout}, format_args
414 for handler in log.handlers:
415 if isinstance(handler, StreamHandler):
416 handler.formatter.inner.summary_on_shutdown = True
418 status = None
419 for suite_name in run_suites:
420 suite = TEST_SUITES[suite_name]
421 kwargs = suite["kwargs"]
422 kwargs["log"] = log
423 kwargs.setdefault("subsuite", None)
425 if "mach_command" in suite:
426 res = command_context._mach_context.commands.dispatch(
427 suite["mach_command"],
428 command_context._mach_context,
429 argv=extra_args,
430 **kwargs,
432 if res:
433 status = res
435 buckets = {}
436 for test in run_tests:
437 key = (test["flavor"], test.get("subsuite", ""))
438 buckets.setdefault(key, []).append(test)
440 for (flavor, subsuite), tests in sorted(buckets.items()):
441 _, m = get_suite_definition(flavor, subsuite)
442 if "mach_command" not in m:
443 substr = "-{}".format(subsuite) if subsuite else ""
444 print(UNKNOWN_FLAVOR % (flavor, substr))
445 status = 1
446 continue
448 kwargs = dict(m["kwargs"])
449 kwargs["log"] = log
450 kwargs.setdefault("subsuite", None)
452 res = command_context._mach_context.commands.dispatch(
453 m["mach_command"],
454 command_context._mach_context,
455 argv=extra_args,
456 test_objects=tests,
457 **kwargs,
459 if res:
460 status = res
462 log.shutdown()
463 return status
466 @Command(
467 "cppunittest", category="testing", description="Run cpp unit tests (C++ tests)."
469 @CommandArgument(
470 "test_files",
471 nargs="*",
472 metavar="N",
473 help="Test to run. Can be specified as one or more files or "
474 "directories, or omitted. If omitted, the entire test suite is "
475 "executed.",
477 def run_cppunit_test(command_context, **params):
478 from mozlog import commandline
480 log = params.get("log")
481 if not log:
482 log = commandline.setup_logging("cppunittest", {}, {"tbpl": sys.stdout})
484 # See if we have crash symbols
485 symbols_path = os.path.join(command_context.distdir, "crashreporter-symbols")
486 if not os.path.isdir(symbols_path):
487 symbols_path = None
489 # If no tests specified, run all tests in main manifest
490 tests = params["test_files"]
491 if not tests:
492 tests = [os.path.join(command_context.distdir, "cppunittests")]
493 manifest_path = os.path.join(
494 command_context.topsrcdir, "testing", "cppunittest.ini"
496 else:
497 manifest_path = None
499 utility_path = command_context.bindir
501 if conditions.is_android(command_context):
502 from mozrunner.devices.android_device import (
503 InstallIntent,
504 verify_android_device,
507 verify_android_device(command_context, install=InstallIntent.NO)
508 return run_android_test(tests, symbols_path, manifest_path, log)
510 return run_desktop_test(
511 command_context, tests, symbols_path, manifest_path, utility_path, log
515 def run_desktop_test(
516 command_context, tests, symbols_path, manifest_path, utility_path, log
518 import runcppunittests as cppunittests
519 from mozlog import commandline
521 parser = cppunittests.CPPUnittestOptions()
522 commandline.add_logging_group(parser)
523 options, args = parser.parse_args()
525 options.symbols_path = symbols_path
526 options.manifest_path = manifest_path
527 options.utility_path = utility_path
528 options.xre_path = command_context.bindir
530 try:
531 result = cppunittests.run_test_harness(options, tests)
532 except Exception as e:
533 log.error("Caught exception running cpp unit tests: %s" % str(e))
534 result = False
535 raise
537 return 0 if result else 1
540 def run_android_test(command_context, tests, symbols_path, manifest_path, log):
541 import remotecppunittests
542 from mozlog import commandline
544 parser = remotecppunittests.RemoteCPPUnittestOptions()
545 commandline.add_logging_group(parser)
546 options, args = parser.parse_args()
548 if not options.adb_path:
549 from mozrunner.devices.android_device import get_adb_path
551 options.adb_path = get_adb_path(command_context)
552 options.symbols_path = symbols_path
553 options.manifest_path = manifest_path
554 options.xre_path = command_context.bindir
555 options.local_lib = command_context.bindir.replace("bin", "fennec")
556 for file in os.listdir(os.path.join(command_context.topobjdir, "dist")):
557 if file.endswith(".apk") and file.startswith("fennec"):
558 options.local_apk = os.path.join(command_context.topobjdir, "dist", file)
559 log.info("using APK: " + options.local_apk)
560 break
562 try:
563 result = remotecppunittests.run_test_harness(options, tests)
564 except Exception as e:
565 log.error("Caught exception running cpp unit tests: %s" % str(e))
566 result = False
567 raise
569 return 0 if result else 1
572 def executable_name(name):
573 return name + ".exe" if sys.platform.startswith("win") else name
576 @Command(
577 "jstests",
578 category="testing",
579 description="Run SpiderMonkey JS tests in the JS shell.",
580 ok_if_tests_disabled=True,
582 @CommandArgument("--shell", help="The shell to be used")
583 @CommandArgument(
584 "params",
585 nargs=argparse.REMAINDER,
586 help="Extra arguments to pass down to the test harness.",
588 def run_jstests(command_context, shell, params):
589 import subprocess
591 command_context.virtualenv_manager.ensure()
592 python = command_context.virtualenv_manager.python_path
594 js = shell or os.path.join(command_context.bindir, executable_name("js"))
595 jstest_cmd = [
596 python,
597 os.path.join(command_context.topsrcdir, "js", "src", "tests", "jstests.py"),
599 ] + params
601 return subprocess.call(jstest_cmd)
604 @Command(
605 "jit-test",
606 category="testing",
607 description="Run SpiderMonkey jit-tests in the JS shell.",
608 ok_if_tests_disabled=True,
610 @CommandArgument("--shell", help="The shell to be used")
611 @CommandArgument(
612 "--cgc",
613 action="store_true",
614 default=False,
615 help="Run with the SM(cgc) job's env vars",
617 @CommandArgument(
618 "params",
619 nargs=argparse.REMAINDER,
620 help="Extra arguments to pass down to the test harness.",
622 def run_jittests(command_context, shell, cgc, params):
623 import subprocess
625 command_context.virtualenv_manager.ensure()
626 python = command_context.virtualenv_manager.python_path
628 js = shell or os.path.join(command_context.bindir, executable_name("js"))
629 jittest_cmd = [
630 python,
631 os.path.join(command_context.topsrcdir, "js", "src", "jit-test", "jit_test.py"),
633 ] + params
635 env = os.environ.copy()
636 if cgc:
637 env["JS_GC_ZEAL"] = "IncrementalMultipleSlices"
639 return subprocess.call(jittest_cmd, env=env)
642 @Command("jsapi-tests", category="testing", description="Run SpiderMonkey JSAPI tests.")
643 @CommandArgument(
644 "--list",
645 action="store_true",
646 default=False,
647 help="List all tests",
649 @CommandArgument(
650 "--frontend-only",
651 action="store_true",
652 default=False,
653 help="Run tests for frontend-only APIs, with light-weight entry point",
655 @CommandArgument(
656 "test_name",
657 nargs="?",
658 metavar="N",
659 help="Test to run. Can be a prefix or omitted. If "
660 "omitted, the entire test suite is executed.",
662 def run_jsapitests(command_context, list=False, frontend_only=False, test_name=None):
663 import subprocess
665 jsapi_tests_cmd = [
666 os.path.join(command_context.bindir, executable_name("jsapi-tests"))
668 if list:
669 jsapi_tests_cmd.append("--list")
670 if frontend_only:
671 jsapi_tests_cmd.append("--frontend-only")
672 if test_name:
673 jsapi_tests_cmd.append(test_name)
675 test_env = os.environ.copy()
676 test_env["TOPSRCDIR"] = command_context.topsrcdir
678 result = subprocess.call(jsapi_tests_cmd, env=test_env)
679 if result != 0:
680 print(f"jsapi-tests failed, exit code {result}")
681 return result
684 def run_check_js_msg(command_context):
685 import subprocess
687 command_context.virtualenv_manager.ensure()
688 python = command_context.virtualenv_manager.python_path
690 check_cmd = [
691 python,
692 os.path.join(command_context.topsrcdir, "config", "check_js_msg_encoding.py"),
695 return subprocess.call(check_cmd)
698 def get_jsshell_parser():
699 from jsshell.benchmark import get_parser
701 return get_parser()
704 @Command(
705 "jsshell-bench",
706 category="testing",
707 parser=get_jsshell_parser,
708 description="Run benchmarks in the SpiderMonkey JS shell.",
710 def run_jsshelltests(command_context, **kwargs):
711 from jsshell import benchmark
713 return benchmark.run(**kwargs)
716 @Command(
717 "cramtest",
718 category="testing",
719 description="Mercurial style .t tests for command line applications.",
721 @CommandArgument(
722 "test_paths",
723 nargs="*",
724 metavar="N",
725 help="Test paths to run. Each path can be a test file or directory. "
726 "If omitted, the entire suite will be run.",
728 @CommandArgument(
729 "cram_args",
730 nargs=argparse.REMAINDER,
731 help="Extra arguments to pass down to the cram binary. See "
732 "'./mach python -m cram -- -h' for a list of available options.",
734 def cramtest(command_context, cram_args=None, test_paths=None, test_objects=None):
735 command_context.activate_virtualenv()
736 import mozinfo
737 from manifestparser import TestManifest
739 if test_objects is None:
740 from moztest.resolve import TestResolver
742 resolver = command_context._spawn(TestResolver)
743 if test_paths:
744 # If we were given test paths, try to find tests matching them.
745 test_objects = resolver.resolve_tests(paths=test_paths, flavor="cram")
746 else:
747 # Otherwise just run everything in CRAMTEST_MANIFESTS
748 test_objects = resolver.resolve_tests(flavor="cram")
750 if not test_objects:
751 message = "No tests were collected, check spelling of the test paths."
752 command_context.log(logging.WARN, "cramtest", {}, message)
753 return 1
755 mp = TestManifest()
756 mp.tests.extend(test_objects)
757 tests = mp.active_tests(disabled=False, **mozinfo.info)
759 python = command_context.virtualenv_manager.python_path
760 cmd = [python, "-m", "cram"] + cram_args + [t["relpath"] for t in tests]
761 return subprocess.call(cmd, cwd=command_context.topsrcdir)
764 from datetime import date, timedelta
767 @Command(
768 "test-info", category="testing", description="Display historical test results."
770 def test_info(command_context):
772 All functions implemented as subcommands.
776 @SubCommand(
777 "test-info",
778 "tests",
779 description="Display historical test result summary for named tests.",
781 @CommandArgument("test_names", nargs=argparse.REMAINDER, help="Test(s) of interest.")
782 @CommandArgument(
783 "--start",
784 default=(date.today() - timedelta(7)).strftime("%Y-%m-%d"),
785 help="Start date (YYYY-MM-DD)",
787 @CommandArgument(
788 "--end", default=date.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
790 @CommandArgument(
791 "--show-info",
792 action="store_true",
793 help="Retrieve and display general test information.",
795 @CommandArgument(
796 "--show-bugs",
797 action="store_true",
798 help="Retrieve and display related Bugzilla bugs.",
800 @CommandArgument("--verbose", action="store_true", help="Enable debug logging.")
801 def test_info_tests(
802 command_context,
803 test_names,
804 start,
805 end,
806 show_info,
807 show_bugs,
808 verbose,
810 import testinfo
812 ti = testinfo.TestInfoTests(verbose)
813 ti.report(
814 test_names,
815 start,
816 end,
817 show_info,
818 show_bugs,
822 @SubCommand(
823 "test-info",
824 "report",
825 description="Generate a json report of test manifests and/or tests "
826 "categorized by Bugzilla component and optionally filtered "
827 "by path, component, and/or manifest annotations.",
829 @CommandArgument(
830 "--components",
831 default=None,
832 help="Comma-separated list of Bugzilla components."
833 " eg. Testing::General,Core::WebVR",
835 @CommandArgument(
836 "--flavor",
837 help='Limit results to tests of the specified flavor (eg. "xpcshell").',
839 @CommandArgument(
840 "--subsuite",
841 help='Limit results to tests of the specified subsuite (eg. "devtools").',
843 @CommandArgument(
844 "paths", nargs=argparse.REMAINDER, help="File system paths of interest."
846 @CommandArgument(
847 "--show-manifests",
848 action="store_true",
849 help="Include test manifests in report.",
851 @CommandArgument(
852 "--show-tests", action="store_true", help="Include individual tests in report."
854 @CommandArgument(
855 "--show-summary", action="store_true", help="Include summary in report."
857 @CommandArgument(
858 "--show-annotations",
859 action="store_true",
860 help="Include list of manifest annotation conditions in report.",
862 @CommandArgument(
863 "--show-testruns",
864 action="store_true",
865 help="Include total number of runs the test has if there are failures.",
867 @CommandArgument(
868 "--filter-values",
869 help="Comma-separated list of value regular expressions to filter on; "
870 "displayed tests contain all specified values.",
872 @CommandArgument(
873 "--filter-keys",
874 help="Comma-separated list of test keys to filter on, "
875 'like "skip-if"; only these fields will be searched '
876 "for filter-values.",
878 @CommandArgument(
879 "--no-component-report",
880 action="store_false",
881 dest="show_components",
882 default=True,
883 help="Do not categorize by bugzilla component.",
885 @CommandArgument("--output-file", help="Path to report file.")
886 @CommandArgument("--verbose", action="store_true", help="Enable debug logging.")
887 @CommandArgument(
888 "--start",
889 default=(date.today() - timedelta(30)).strftime("%Y-%m-%d"),
890 help="Start date (YYYY-MM-DD)",
892 @CommandArgument(
893 "--end", default=date.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
895 def test_report(
896 command_context,
897 components,
898 flavor,
899 subsuite,
900 paths,
901 show_manifests,
902 show_tests,
903 show_summary,
904 show_annotations,
905 filter_values,
906 filter_keys,
907 show_components,
908 output_file,
909 verbose,
910 start,
911 end,
912 show_testruns,
914 import testinfo
915 from mozbuild import build_commands
917 try:
918 command_context.config_environment
919 except BuildEnvironmentNotFoundException:
920 print("Looks like configure has not run yet, running it now...")
921 build_commands.configure(command_context)
923 ti = testinfo.TestInfoReport(verbose)
924 ti.report(
925 components,
926 flavor,
927 subsuite,
928 paths,
929 show_manifests,
930 show_tests,
931 show_summary,
932 show_annotations,
933 filter_values,
934 filter_keys,
935 show_components,
936 output_file,
937 start,
938 end,
939 show_testruns,
943 @SubCommand(
944 "test-info",
945 "report-diff",
946 description='Compare two reports generated by "test-info reports".',
948 @CommandArgument(
949 "--before",
950 default=None,
951 help="The first (earlier) report file; path to local file or url.",
953 @CommandArgument(
954 "--after", help="The second (later) report file; path to local file or url."
956 @CommandArgument(
957 "--output-file",
958 help="Path to report file to be written. If not specified, report"
959 "will be written to standard output.",
961 @CommandArgument("--verbose", action="store_true", help="Enable debug logging.")
962 def test_report_diff(command_context, before, after, output_file, verbose):
963 import testinfo
965 ti = testinfo.TestInfoReport(verbose)
966 ti.report_diff(before, after, output_file)
969 @SubCommand(
970 "test-info",
971 "testrun-report",
972 description="Generate report of number of runs for each test group (manifest)",
974 @CommandArgument("--output-file", help="Path to report file.")
975 def test_info_testrun_report(command_context, output_file):
976 import json
978 import testinfo
980 ti = testinfo.TestInfoReport(verbose=True)
981 runcounts = ti.get_runcounts()
982 if output_file:
983 output_file = os.path.abspath(output_file)
984 output_dir = os.path.dirname(output_file)
985 if not os.path.isdir(output_dir):
986 os.makedirs(output_dir)
987 with open(output_file, "w") as f:
988 json.dump(runcounts, f)
989 else:
990 print(runcounts)
993 @SubCommand(
994 "test-info",
995 "failure-report",
996 description="Display failure line groupings and frequencies for "
997 "single tracking intermittent bugs.",
999 @CommandArgument(
1000 "--start",
1001 default=(date.today() - timedelta(30)).strftime("%Y-%m-%d"),
1002 help="Start date (YYYY-MM-DD)",
1004 @CommandArgument(
1005 "--end", default=date.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
1007 @CommandArgument(
1008 "--bugid",
1009 default=None,
1010 help="bugid for treeherder intermittent failures data query.",
1012 def test_info_failures(
1013 command_context,
1014 start,
1015 end,
1016 bugid,
1018 # bugid comes in as a string, we need an int:
1019 try:
1020 bugid = int(bugid)
1021 except ValueError:
1022 bugid = None
1023 if not bugid:
1024 print("Please enter a valid bugid (i.e. '1760132')")
1025 return
1027 # get bug info
1028 url = (
1029 "https://bugzilla.mozilla.org/rest/bug?include_fields=summary,depends_on&id=%s"
1030 % bugid
1032 r = requests.get(url, headers={"User-agent": "mach-test-info/1.0"})
1033 if r.status_code != 200:
1034 print("%s error retrieving url: %s" % (r.status_code, url))
1036 data = r.json()
1037 if not data:
1038 print("unable to get bugzilla information for %s" % bugid)
1039 return
1041 summary = data["bugs"][0]["summary"]
1042 parts = summary.split("|")
1043 if not summary.endswith("single tracking bug") or len(parts) != 2:
1044 print("this query only works with single tracking bugs")
1045 return
1047 # get depends_on bugs:
1048 buglist = [bugid]
1049 if "depends_on" in data["bugs"][0]:
1050 buglist.extend(data["bugs"][0]["depends_on"])
1052 testname = parts[0].strip().split(" ")[-1]
1054 # now query treeherder to get details about annotations
1055 data = []
1056 for b in buglist:
1057 url = "https://treeherder.mozilla.org/api/failuresbybug/"
1058 url += "?startday=%s&endday=%s&tree=trunk&bug=%s" % (start, end, b)
1059 r = requests.get(url, headers={"User-agent": "mach-test-info/1.0"})
1060 r.raise_for_status()
1062 bdata = r.json()
1063 data.extend(bdata)
1065 if len(data) == 0:
1066 print("no failures were found for given bugid, please ensure bug is")
1067 print("accessible via: https://treeherder.mozilla.org/intermittent-failures")
1068 return
1070 # query VCS to get current list of variants:
1071 import yaml
1073 url = "https://hg.mozilla.org/mozilla-central/raw-file/tip/taskcluster/ci/test/variants.yml"
1074 r = requests.get(url, headers={"User-agent": "mach-test-info/1.0"})
1075 variants = yaml.safe_load(r.text)
1077 print(
1078 "\nQuerying data for bug %s annotated from %s to %s on trunk.\n\n"
1079 % (buglist, start, end)
1081 jobs = {}
1082 lines = {}
1083 for failure in data:
1084 # config = platform/buildtype
1085 # testsuite (<suite>[-variant][-<chunk>])
1086 # lines - group by patterns that contain test name
1087 config = "%s/%s" % (failure["platform"], failure["build_type"])
1089 variant = ""
1090 suite = ""
1091 varpos = len(failure["test_suite"])
1092 for v in variants.keys():
1093 var = "-%s" % variants[v]["suffix"]
1094 if var in failure["test_suite"]:
1095 if failure["test_suite"].find(var) < varpos:
1096 variant = var
1098 if variant:
1099 suite = failure["test_suite"].split(variant)[0]
1101 parts = failure["test_suite"].split("-")
1102 try:
1103 int(parts[-1])
1104 suite = "-".join(parts[:-1])
1105 except ValueError:
1106 pass # if this works, then the last '-X' is a number :)
1108 if suite == "":
1109 print("Error: failure to find variant in %s" % failure["test_suite"])
1111 job = "%s-%s%s" % (config, suite, variant)
1112 if job not in jobs.keys():
1113 jobs[job] = 0
1114 jobs[job] += 1
1116 # lines - sum(hash) of all lines where we match testname
1117 hvalue = 0
1118 for line in failure["lines"]:
1119 if len(line.split(testname)) <= 1:
1120 continue
1121 # strip off timestamp and mozharness status
1122 parts = line.split("TEST-UNEXPECTED")
1123 l = "TEST-UNEXPECTED%s" % parts[-1]
1125 # only keep 25 characters of the failure, often longer is random numbers
1126 parts = l.split(testname)
1127 l = "%s%s%s" % (parts[0], testname, parts[1][:25])
1129 hvalue += hash(l)
1131 if not failure["lines"]:
1132 hvalue = 1
1134 if not hvalue:
1135 continue
1137 if hvalue not in lines.keys():
1138 lines[hvalue] = {"lines": failure["lines"], "config": []}
1139 lines[hvalue]["config"].append(job)
1141 for h in lines.keys():
1142 print("%s errors with:" % (len(lines[h]["config"])))
1143 for l in lines[h]["lines"]:
1144 print(l)
1145 else:
1146 print(
1147 "... no failure lines recorded in"
1148 " https://treeherder.mozilla.org/intermittent-failures ..."
1151 for job in jobs:
1152 count = len([x for x in lines[h]["config"] if x == job])
1153 if count > 0:
1154 print(" %s: %s" % (job, count))
1155 print("")
1158 @Command(
1159 "rusttests",
1160 category="testing",
1161 conditions=[conditions.is_non_artifact_build],
1162 description="Run rust unit tests (via cargo test).",
1164 def run_rusttests(command_context, **kwargs):
1165 return command_context._mach_context.commands.dispatch(
1166 "build",
1167 command_context._mach_context,
1168 what=["pre-export", "export", "recurse_rusttests"],
1172 @Command(
1173 "fluent-migration-test",
1174 category="testing",
1175 description="Test Fluent migration recipes.",
1177 @CommandArgument("test_paths", nargs="*", metavar="N", help="Recipe paths to test.")
1178 def run_migration_tests(command_context, test_paths=None, **kwargs):
1179 if not test_paths:
1180 test_paths = []
1181 command_context.activate_virtualenv()
1182 from test_fluent_migrations import fmt
1184 rv = 0
1185 with_context = []
1186 for to_test in test_paths:
1187 try:
1188 context = fmt.inspect_migration(to_test)
1189 for issue in context["issues"]:
1190 command_context.log(
1191 logging.ERROR,
1192 "fluent-migration-test",
1194 "error": issue["msg"],
1195 "file": to_test,
1197 "ERROR in {file}: {error}",
1199 if context["issues"]:
1200 continue
1201 with_context.append(
1203 "to_test": to_test,
1204 "references": context["references"],
1207 except Exception as e:
1208 command_context.log(
1209 logging.ERROR,
1210 "fluent-migration-test",
1211 {"error": str(e), "file": to_test},
1212 "ERROR in {file}: {error}",
1214 rv |= 1
1215 obj_dir = fmt.prepare_object_dir(command_context)
1216 for context in with_context:
1217 rv |= fmt.test_migration(command_context, obj_dir, **context)
1218 return rv