Bug 1810592 [wpt PR 37999] - [Multi Apps] Change remove call to take list, a=testonly
[gecko.git] / testing / mach_commands.py
blob02ffb7a48d65011a560be83a5f84aeb2d1f1fe44
1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
5 import argparse
6 import logging
7 import os
8 import subprocess
9 import sys
11 import requests
12 from mach.decorators import Command, CommandArgument, SettingsProvider, SubCommand
13 from mozbuild.base import BuildEnvironmentNotFoundException
14 from mozbuild.base import MachCommandConditions as conditions
16 UNKNOWN_TEST = """
17 I was unable to find tests from the given argument(s).
19 You should specify a test directory, filename, test suite name, or
20 abbreviation.
22 It's possible my little brain doesn't know about the type of test you are
23 trying to execute. If you suspect this, please request support by filing
24 a bug at
25 https://bugzilla.mozilla.org/enter_bug.cgi?product=Testing&component=General.
26 """.strip()
28 UNKNOWN_FLAVOR = """
29 I know you are trying to run a %s%s test. Unfortunately, I can't run those
30 tests yet. Sorry!
31 """.strip()
33 TEST_HELP = """
34 Test or tests to run. Tests can be specified by filename, directory, suite
35 name or suite alias.
37 The following test suites and aliases are supported: {}
38 """.strip()
41 @SettingsProvider
42 class TestConfig(object):
43 @classmethod
44 def config_settings(cls):
45 from mozlog.commandline import log_formatters
46 from mozlog.structuredlog import log_levels
48 format_desc = "The default format to use when running tests with `mach test`."
49 format_choices = list(log_formatters)
50 level_desc = "The default log level to use when running tests with `mach test`."
51 level_choices = [l.lower() for l in log_levels]
52 return [
53 ("test.format", "string", format_desc, "mach", {"choices": format_choices}),
54 ("test.level", "string", level_desc, "info", {"choices": level_choices}),
58 def get_test_parser():
59 from mozlog.commandline import add_logging_group
60 from moztest.resolve import TEST_SUITES
62 parser = argparse.ArgumentParser()
63 parser.add_argument(
64 "what",
65 default=None,
66 nargs="+",
67 help=TEST_HELP.format(", ".join(sorted(TEST_SUITES))),
69 parser.add_argument(
70 "extra_args",
71 default=None,
72 nargs=argparse.REMAINDER,
73 help="Extra arguments to pass to the underlying test command(s). "
74 "If an underlying command doesn't recognize the argument, it "
75 "will fail.",
77 parser.add_argument(
78 "--debugger",
79 default=None,
80 action="store",
81 nargs="?",
82 help="Specify a debugger to use.",
84 add_logging_group(parser)
85 return parser
88 ADD_TEST_SUPPORTED_SUITES = [
89 "mochitest-chrome",
90 "mochitest-plain",
91 "mochitest-browser-chrome",
92 "web-platform-tests-testharness",
93 "web-platform-tests-reftest",
94 "xpcshell",
96 ADD_TEST_SUPPORTED_DOCS = ["js", "html", "xhtml", "xul"]
98 SUITE_SYNONYMS = {
99 "wpt": "web-platform-tests-testharness",
100 "wpt-testharness": "web-platform-tests-testharness",
101 "wpt-reftest": "web-platform-tests-reftest",
104 MISSING_ARG = object()
107 def create_parser_addtest():
108 import addtest
110 parser = argparse.ArgumentParser()
111 parser.add_argument(
112 "--suite",
113 choices=sorted(ADD_TEST_SUPPORTED_SUITES + list(SUITE_SYNONYMS.keys())),
114 help="suite for the test. "
115 "If you pass a `test` argument this will be determined "
116 "based on the filename and the folder it is in",
118 parser.add_argument(
119 "-o",
120 "--overwrite",
121 action="store_true",
122 help="Overwrite an existing file if it exists.",
124 parser.add_argument(
125 "--doc",
126 choices=ADD_TEST_SUPPORTED_DOCS,
127 help="Document type for the test (if applicable)."
128 "If you pass a `test` argument this will be determined "
129 "based on the filename.",
131 parser.add_argument(
132 "-e",
133 "--editor",
134 action="store",
135 nargs="?",
136 default=MISSING_ARG,
137 help="Open the created file(s) in an editor; if a "
138 "binary is supplied it will be used otherwise the default editor for "
139 "your environment will be opened",
142 for base_suite in addtest.TEST_CREATORS:
143 cls = addtest.TEST_CREATORS[base_suite]
144 if hasattr(cls, "get_parser"):
145 group = parser.add_argument_group(base_suite)
146 cls.get_parser(group)
148 parser.add_argument("test", nargs="?", help=("Test to create."))
149 return parser
152 @Command(
153 "addtest",
154 category="testing",
155 description="Generate tests based on templates",
156 parser=create_parser_addtest,
158 def addtest(
159 command_context,
160 suite=None,
161 test=None,
162 doc=None,
163 overwrite=False,
164 editor=MISSING_ARG,
165 **kwargs,
167 import io
169 import addtest
170 from moztest.resolve import TEST_SUITES
172 if not suite and not test:
173 return create_parser_addtest().parse_args(["--help"])
175 if suite in SUITE_SYNONYMS:
176 suite = SUITE_SYNONYMS[suite]
178 if test:
179 if not overwrite and os.path.isfile(os.path.abspath(test)):
180 print("Error: can't generate a test that already exists:", test)
181 return 1
183 abs_test = os.path.abspath(test)
184 if doc is None:
185 doc = guess_doc(abs_test)
186 if suite is None:
187 guessed_suite, err = guess_suite(abs_test)
188 if err:
189 print(err)
190 return 1
191 suite = guessed_suite
193 else:
194 test = None
195 if doc is None:
196 doc = "html"
198 if not suite:
199 print(
200 "We couldn't automatically determine a suite. "
201 "Please specify `--suite` with one of the following options:\n{}\n"
202 "If you'd like to add support to a new suite, please file a bug "
203 "blocking https://bugzilla.mozilla.org/show_bug.cgi?id=1540285.".format(
204 ADD_TEST_SUPPORTED_SUITES
207 return 1
209 if doc not in ADD_TEST_SUPPORTED_DOCS:
210 print(
211 "Error: invalid `doc`. Either pass in a test with a valid extension"
212 "({}) or pass in the `doc` argument".format(ADD_TEST_SUPPORTED_DOCS)
214 return 1
216 creator_cls = addtest.creator_for_suite(suite)
218 if creator_cls is None:
219 print("Sorry, `addtest` doesn't currently know how to add {}".format(suite))
220 return 1
222 creator = creator_cls(command_context.topsrcdir, test, suite, doc, **kwargs)
224 creator.check_args()
226 paths = []
227 added_tests = False
228 for path, template in creator:
229 if not template:
230 continue
231 added_tests = True
232 if path:
233 paths.append(path)
234 print("Adding a test file at {} (suite `{}`)".format(path, suite))
236 try:
237 os.makedirs(os.path.dirname(path))
238 except OSError:
239 pass
241 with io.open(path, "w", newline="\n") as f:
242 f.write(template)
243 else:
244 # write to stdout if you passed only suite and doc and not a file path
245 print(template)
247 if not added_tests:
248 return 1
250 if test:
251 creator.update_manifest()
253 # Small hack, should really do this better
254 if suite.startswith("wpt-"):
255 suite = "web-platform-tests"
257 mach_command = TEST_SUITES[suite]["mach_command"]
258 print(
259 "Please make sure to add the new test to your commit. "
260 "You can now run the test with:\n ./mach {} {}".format(
261 mach_command, test
265 if editor is not MISSING_ARG:
266 if editor is not None:
267 editor = editor
268 elif "VISUAL" in os.environ:
269 editor = os.environ["VISUAL"]
270 elif "EDITOR" in os.environ:
271 editor = os.environ["EDITOR"]
272 else:
273 print("Unable to determine editor; please specify a binary")
274 editor = None
276 proc = None
277 if editor:
278 import subprocess
280 proc = subprocess.Popen("%s %s" % (editor, " ".join(paths)), shell=True)
282 if proc:
283 proc.wait()
285 return 0
288 def guess_doc(abs_test):
289 filename = os.path.basename(abs_test)
290 return os.path.splitext(filename)[1].strip(".")
293 def guess_suite(abs_test):
294 # If you pass a abs_test, try to detect the type based on the name
295 # and folder. This detection can be skipped if you pass the `type` arg.
296 err = None
297 guessed_suite = None
298 parent = os.path.dirname(abs_test)
299 filename = os.path.basename(abs_test)
301 has_browser_ini = os.path.isfile(os.path.join(parent, "browser.ini"))
302 has_chrome_ini = os.path.isfile(os.path.join(parent, "chrome.ini"))
303 has_plain_ini = os.path.isfile(os.path.join(parent, "mochitest.ini"))
304 has_xpcshell_ini = os.path.isfile(os.path.join(parent, "xpcshell.ini"))
306 in_wpt_folder = abs_test.startswith(
307 os.path.abspath(os.path.join("testing", "web-platform"))
310 if in_wpt_folder:
311 guessed_suite = "web-platform-tests-testharness"
312 if "/css/" in abs_test:
313 guessed_suite = "web-platform-tests-reftest"
314 elif (
315 filename.startswith("test_")
316 and has_xpcshell_ini
317 and guess_doc(abs_test) == "js"
319 guessed_suite = "xpcshell"
320 else:
321 if filename.startswith("browser_") and has_browser_ini:
322 guessed_suite = "mochitest-browser-chrome"
323 elif filename.startswith("test_"):
324 if has_chrome_ini and has_plain_ini:
325 err = (
326 "Error: directory contains both a chrome.ini and mochitest.ini. "
327 "Please set --suite=mochitest-chrome or --suite=mochitest-plain."
329 elif has_chrome_ini:
330 guessed_suite = "mochitest-chrome"
331 elif has_plain_ini:
332 guessed_suite = "mochitest-plain"
333 return guessed_suite, err
336 @Command(
337 "test",
338 category="testing",
339 description="Run tests (detects the kind of test and runs it).",
340 parser=get_test_parser,
342 def test(command_context, what, extra_args, **log_args):
343 """Run tests from names or paths.
345 mach test accepts arguments specifying which tests to run. Each argument
346 can be:
348 * The path to a test file
349 * A directory containing tests
350 * A test suite name
351 * An alias to a test suite name (codes used on TreeHerder)
353 When paths or directories are given, they are first resolved to test
354 files known to the build system.
356 If resolved tests belong to more than one test type/flavor/harness,
357 the harness for each relevant type/flavor will be invoked. e.g. if
358 you specify a directory with xpcshell and browser chrome mochitests,
359 both harnesses will be invoked.
361 Warning: `mach test` does not automatically re-build.
362 Please remember to run `mach build` when necessary.
364 EXAMPLES
366 Run all test files in the devtools/client/shared/redux/middleware/xpcshell/
367 directory:
369 `./mach test devtools/client/shared/redux/middleware/xpcshell/`
371 The below command prints a short summary of results instead of
372 the default more verbose output.
373 Do not forget the - (minus sign) after --log-grouped!
375 `./mach test --log-grouped - devtools/client/shared/redux/middleware/xpcshell/`
377 To learn more about arguments for each test type/flavor/harness, please run
378 `./mach <test-harness> --help`. For example, `./mach mochitest --help`.
380 from mozlog.commandline import setup_logging
381 from mozlog.handlers import StreamHandler
382 from moztest.resolve import TEST_SUITES, TestResolver, get_suite_definition
384 resolver = command_context._spawn(TestResolver)
385 run_suites, run_tests = resolver.resolve_metadata(what)
387 if not run_suites and not run_tests:
388 print(UNKNOWN_TEST)
389 return 1
391 if log_args.get("debugger", None):
392 import mozdebug
394 if not mozdebug.get_debugger_info(log_args.get("debugger")):
395 sys.exit(1)
396 extra_args_debugger_notation = "=".join(
397 ["--debugger", log_args.get("debugger")]
399 if extra_args:
400 extra_args.append(extra_args_debugger_notation)
401 else:
402 extra_args = [extra_args_debugger_notation]
404 # Create shared logger
405 format_args = {"level": command_context._mach_context.settings["test"]["level"]}
406 if not run_suites and len(run_tests) == 1:
407 format_args["verbose"] = True
408 format_args["compact"] = False
410 default_format = command_context._mach_context.settings["test"]["format"]
411 log = setup_logging(
412 "mach-test", log_args, {default_format: sys.stdout}, format_args
414 for handler in log.handlers:
415 if isinstance(handler, StreamHandler):
416 handler.formatter.inner.summary_on_shutdown = True
418 status = None
419 for suite_name in run_suites:
420 suite = TEST_SUITES[suite_name]
421 kwargs = suite["kwargs"]
422 kwargs["log"] = log
423 kwargs.setdefault("subsuite", None)
425 if "mach_command" in suite:
426 res = command_context._mach_context.commands.dispatch(
427 suite["mach_command"],
428 command_context._mach_context,
429 argv=extra_args,
430 **kwargs,
432 if res:
433 status = res
435 buckets = {}
436 for test in run_tests:
437 key = (test["flavor"], test.get("subsuite", ""))
438 buckets.setdefault(key, []).append(test)
440 for (flavor, subsuite), tests in sorted(buckets.items()):
441 _, m = get_suite_definition(flavor, subsuite)
442 if "mach_command" not in m:
443 substr = "-{}".format(subsuite) if subsuite else ""
444 print(UNKNOWN_FLAVOR % (flavor, substr))
445 status = 1
446 continue
448 kwargs = dict(m["kwargs"])
449 kwargs["log"] = log
450 kwargs.setdefault("subsuite", None)
452 res = command_context._mach_context.commands.dispatch(
453 m["mach_command"],
454 command_context._mach_context,
455 argv=extra_args,
456 test_objects=tests,
457 **kwargs,
459 if res:
460 status = res
462 log.shutdown()
463 return status
466 @Command(
467 "cppunittest", category="testing", description="Run cpp unit tests (C++ tests)."
469 @CommandArgument(
470 "test_files",
471 nargs="*",
472 metavar="N",
473 help="Test to run. Can be specified as one or more files or "
474 "directories, or omitted. If omitted, the entire test suite is "
475 "executed.",
477 def run_cppunit_test(command_context, **params):
478 from mozlog import commandline
480 log = params.get("log")
481 if not log:
482 log = commandline.setup_logging("cppunittest", {}, {"tbpl": sys.stdout})
484 # See if we have crash symbols
485 symbols_path = os.path.join(command_context.distdir, "crashreporter-symbols")
486 if not os.path.isdir(symbols_path):
487 symbols_path = None
489 # If no tests specified, run all tests in main manifest
490 tests = params["test_files"]
491 if not tests:
492 tests = [os.path.join(command_context.distdir, "cppunittests")]
493 manifest_path = os.path.join(
494 command_context.topsrcdir, "testing", "cppunittest.ini"
496 else:
497 manifest_path = None
499 utility_path = command_context.bindir
501 if conditions.is_android(command_context):
502 from mozrunner.devices.android_device import (
503 InstallIntent,
504 verify_android_device,
507 verify_android_device(command_context, install=InstallIntent.NO)
508 return run_android_test(tests, symbols_path, manifest_path, log)
510 return run_desktop_test(
511 command_context, tests, symbols_path, manifest_path, utility_path, log
515 def run_desktop_test(
516 command_context, tests, symbols_path, manifest_path, utility_path, log
518 import runcppunittests as cppunittests
519 from mozlog import commandline
521 parser = cppunittests.CPPUnittestOptions()
522 commandline.add_logging_group(parser)
523 options, args = parser.parse_args()
525 options.symbols_path = symbols_path
526 options.manifest_path = manifest_path
527 options.utility_path = utility_path
528 options.xre_path = command_context.bindir
530 try:
531 result = cppunittests.run_test_harness(options, tests)
532 except Exception as e:
533 log.error("Caught exception running cpp unit tests: %s" % str(e))
534 result = False
535 raise
537 return 0 if result else 1
540 def run_android_test(command_context, tests, symbols_path, manifest_path, log):
541 import remotecppunittests
542 from mozlog import commandline
544 parser = remotecppunittests.RemoteCPPUnittestOptions()
545 commandline.add_logging_group(parser)
546 options, args = parser.parse_args()
548 if not options.adb_path:
549 from mozrunner.devices.android_device import get_adb_path
551 options.adb_path = get_adb_path(command_context)
552 options.symbols_path = symbols_path
553 options.manifest_path = manifest_path
554 options.xre_path = command_context.bindir
555 options.local_lib = command_context.bindir.replace("bin", "fennec")
556 for file in os.listdir(os.path.join(command_context.topobjdir, "dist")):
557 if file.endswith(".apk") and file.startswith("fennec"):
558 options.local_apk = os.path.join(command_context.topobjdir, "dist", file)
559 log.info("using APK: " + options.local_apk)
560 break
562 try:
563 result = remotecppunittests.run_test_harness(options, tests)
564 except Exception as e:
565 log.error("Caught exception running cpp unit tests: %s" % str(e))
566 result = False
567 raise
569 return 0 if result else 1
572 def executable_name(name):
573 return name + ".exe" if sys.platform.startswith("win") else name
576 @Command(
577 "jstests",
578 category="testing",
579 description="Run SpiderMonkey JS tests in the JS shell.",
580 ok_if_tests_disabled=True,
582 @CommandArgument("--shell", help="The shell to be used")
583 @CommandArgument(
584 "params",
585 nargs=argparse.REMAINDER,
586 help="Extra arguments to pass down to the test harness.",
588 def run_jstests(command_context, shell, params):
589 import subprocess
591 command_context.virtualenv_manager.ensure()
592 python = command_context.virtualenv_manager.python_path
594 js = shell or os.path.join(command_context.bindir, executable_name("js"))
595 jstest_cmd = [
596 python,
597 os.path.join(command_context.topsrcdir, "js", "src", "tests", "jstests.py"),
599 ] + params
601 return subprocess.call(jstest_cmd)
604 @Command(
605 "jit-test",
606 category="testing",
607 description="Run SpiderMonkey jit-tests in the JS shell.",
608 ok_if_tests_disabled=True,
610 @CommandArgument("--shell", help="The shell to be used")
611 @CommandArgument(
612 "--cgc",
613 action="store_true",
614 default=False,
615 help="Run with the SM(cgc) job's env vars",
617 @CommandArgument(
618 "params",
619 nargs=argparse.REMAINDER,
620 help="Extra arguments to pass down to the test harness.",
622 def run_jittests(command_context, shell, cgc, params):
623 import subprocess
625 command_context.virtualenv_manager.ensure()
626 python = command_context.virtualenv_manager.python_path
628 js = shell or os.path.join(command_context.bindir, executable_name("js"))
629 jittest_cmd = [
630 python,
631 os.path.join(command_context.topsrcdir, "js", "src", "jit-test", "jit_test.py"),
633 ] + params
635 env = os.environ.copy()
636 if cgc:
637 env["JS_GC_ZEAL"] = "IncrementalMultipleSlices"
639 return subprocess.call(jittest_cmd, env=env)
642 @Command("jsapi-tests", category="testing", description="Run SpiderMonkey JSAPI tests.")
643 @CommandArgument(
644 "test_name",
645 nargs="?",
646 metavar="N",
647 help="Test to run. Can be a prefix or omitted. If "
648 "omitted, the entire test suite is executed.",
650 def run_jsapitests(command_context, test_name=None):
651 import subprocess
653 jsapi_tests_cmd = [
654 os.path.join(command_context.bindir, executable_name("jsapi-tests"))
656 if test_name:
657 jsapi_tests_cmd.append(test_name)
659 test_env = os.environ.copy()
660 test_env["TOPSRCDIR"] = command_context.topsrcdir
662 result = subprocess.call(jsapi_tests_cmd, env=test_env)
663 if result != 0:
664 print(f"jsapi-tests failed, exit code {result}")
665 return result
668 def run_check_js_msg(command_context):
669 import subprocess
671 command_context.virtualenv_manager.ensure()
672 python = command_context.virtualenv_manager.python_path
674 check_cmd = [
675 python,
676 os.path.join(command_context.topsrcdir, "config", "check_js_msg_encoding.py"),
679 return subprocess.call(check_cmd)
682 def get_jsshell_parser():
683 from jsshell.benchmark import get_parser
685 return get_parser()
688 @Command(
689 "jsshell-bench",
690 category="testing",
691 parser=get_jsshell_parser,
692 description="Run benchmarks in the SpiderMonkey JS shell.",
694 def run_jsshelltests(command_context, **kwargs):
695 from jsshell import benchmark
697 return benchmark.run(**kwargs)
700 @Command(
701 "cramtest",
702 category="testing",
703 description="Mercurial style .t tests for command line applications.",
705 @CommandArgument(
706 "test_paths",
707 nargs="*",
708 metavar="N",
709 help="Test paths to run. Each path can be a test file or directory. "
710 "If omitted, the entire suite will be run.",
712 @CommandArgument(
713 "cram_args",
714 nargs=argparse.REMAINDER,
715 help="Extra arguments to pass down to the cram binary. See "
716 "'./mach python -m cram -- -h' for a list of available options.",
718 def cramtest(command_context, cram_args=None, test_paths=None, test_objects=None):
719 command_context.activate_virtualenv()
720 import mozinfo
721 from manifestparser import TestManifest
723 if test_objects is None:
724 from moztest.resolve import TestResolver
726 resolver = command_context._spawn(TestResolver)
727 if test_paths:
728 # If we were given test paths, try to find tests matching them.
729 test_objects = resolver.resolve_tests(paths=test_paths, flavor="cram")
730 else:
731 # Otherwise just run everything in CRAMTEST_MANIFESTS
732 test_objects = resolver.resolve_tests(flavor="cram")
734 if not test_objects:
735 message = "No tests were collected, check spelling of the test paths."
736 command_context.log(logging.WARN, "cramtest", {}, message)
737 return 1
739 mp = TestManifest()
740 mp.tests.extend(test_objects)
741 tests = mp.active_tests(disabled=False, **mozinfo.info)
743 python = command_context.virtualenv_manager.python_path
744 cmd = [python, "-m", "cram"] + cram_args + [t["relpath"] for t in tests]
745 return subprocess.call(cmd, cwd=command_context.topsrcdir)
748 from datetime import date, timedelta
751 @Command(
752 "test-info", category="testing", description="Display historical test results."
754 def test_info(command_context):
756 All functions implemented as subcommands.
760 @SubCommand(
761 "test-info",
762 "tests",
763 description="Display historical test result summary for named tests.",
765 @CommandArgument("test_names", nargs=argparse.REMAINDER, help="Test(s) of interest.")
766 @CommandArgument(
767 "--start",
768 default=(date.today() - timedelta(7)).strftime("%Y-%m-%d"),
769 help="Start date (YYYY-MM-DD)",
771 @CommandArgument(
772 "--end", default=date.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
774 @CommandArgument(
775 "--show-info",
776 action="store_true",
777 help="Retrieve and display general test information.",
779 @CommandArgument(
780 "--show-bugs",
781 action="store_true",
782 help="Retrieve and display related Bugzilla bugs.",
784 @CommandArgument("--verbose", action="store_true", help="Enable debug logging.")
785 def test_info_tests(
786 command_context,
787 test_names,
788 start,
789 end,
790 show_info,
791 show_bugs,
792 verbose,
794 import testinfo
796 ti = testinfo.TestInfoTests(verbose)
797 ti.report(
798 test_names,
799 start,
800 end,
801 show_info,
802 show_bugs,
806 @SubCommand(
807 "test-info",
808 "report",
809 description="Generate a json report of test manifests and/or tests "
810 "categorized by Bugzilla component and optionally filtered "
811 "by path, component, and/or manifest annotations.",
813 @CommandArgument(
814 "--components",
815 default=None,
816 help="Comma-separated list of Bugzilla components."
817 " eg. Testing::General,Core::WebVR",
819 @CommandArgument(
820 "--flavor",
821 help='Limit results to tests of the specified flavor (eg. "xpcshell").',
823 @CommandArgument(
824 "--subsuite",
825 help='Limit results to tests of the specified subsuite (eg. "devtools").',
827 @CommandArgument(
828 "paths", nargs=argparse.REMAINDER, help="File system paths of interest."
830 @CommandArgument(
831 "--show-manifests",
832 action="store_true",
833 help="Include test manifests in report.",
835 @CommandArgument(
836 "--show-tests", action="store_true", help="Include individual tests in report."
838 @CommandArgument(
839 "--show-summary", action="store_true", help="Include summary in report."
841 @CommandArgument(
842 "--show-annotations",
843 action="store_true",
844 help="Include list of manifest annotation conditions in report.",
846 @CommandArgument(
847 "--show-testruns",
848 action="store_true",
849 help="Include total number of runs the test has if there are failures.",
851 @CommandArgument(
852 "--filter-values",
853 help="Comma-separated list of value regular expressions to filter on; "
854 "displayed tests contain all specified values.",
856 @CommandArgument(
857 "--filter-keys",
858 help="Comma-separated list of test keys to filter on, "
859 'like "skip-if"; only these fields will be searched '
860 "for filter-values.",
862 @CommandArgument(
863 "--no-component-report",
864 action="store_false",
865 dest="show_components",
866 default=True,
867 help="Do not categorize by bugzilla component.",
869 @CommandArgument("--output-file", help="Path to report file.")
870 @CommandArgument("--verbose", action="store_true", help="Enable debug logging.")
871 @CommandArgument(
872 "--start",
873 default=(date.today() - timedelta(30)).strftime("%Y-%m-%d"),
874 help="Start date (YYYY-MM-DD)",
876 @CommandArgument(
877 "--end", default=date.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
879 def test_report(
880 command_context,
881 components,
882 flavor,
883 subsuite,
884 paths,
885 show_manifests,
886 show_tests,
887 show_summary,
888 show_annotations,
889 filter_values,
890 filter_keys,
891 show_components,
892 output_file,
893 verbose,
894 start,
895 end,
896 show_testruns,
898 import testinfo
899 from mozbuild import build_commands
901 try:
902 command_context.config_environment
903 except BuildEnvironmentNotFoundException:
904 print("Looks like configure has not run yet, running it now...")
905 build_commands.configure(command_context)
907 ti = testinfo.TestInfoReport(verbose)
908 ti.report(
909 components,
910 flavor,
911 subsuite,
912 paths,
913 show_manifests,
914 show_tests,
915 show_summary,
916 show_annotations,
917 filter_values,
918 filter_keys,
919 show_components,
920 output_file,
921 start,
922 end,
923 show_testruns,
927 @SubCommand(
928 "test-info",
929 "report-diff",
930 description='Compare two reports generated by "test-info reports".',
932 @CommandArgument(
933 "--before",
934 default=None,
935 help="The first (earlier) report file; path to local file or url.",
937 @CommandArgument(
938 "--after", help="The second (later) report file; path to local file or url."
940 @CommandArgument(
941 "--output-file",
942 help="Path to report file to be written. If not specified, report"
943 "will be written to standard output.",
945 @CommandArgument("--verbose", action="store_true", help="Enable debug logging.")
946 def test_report_diff(command_context, before, after, output_file, verbose):
947 import testinfo
949 ti = testinfo.TestInfoReport(verbose)
950 ti.report_diff(before, after, output_file)
953 @SubCommand(
954 "test-info",
955 "testrun-report",
956 description="Generate report of number of runs for each test group (manifest)",
958 @CommandArgument("--output-file", help="Path to report file.")
959 def test_info_testrun_report(command_context, output_file):
960 import json
962 import testinfo
964 ti = testinfo.TestInfoReport(verbose=True)
965 runcounts = ti.get_runcounts()
966 if output_file:
967 output_file = os.path.abspath(output_file)
968 output_dir = os.path.dirname(output_file)
969 if not os.path.isdir(output_dir):
970 os.makedirs(output_dir)
971 with open(output_file, "w") as f:
972 json.dump(runcounts, f)
973 else:
974 print(runcounts)
977 @SubCommand(
978 "test-info",
979 "failure-report",
980 description="Display failure line groupings and frequencies for "
981 "single tracking intermittent bugs.",
983 @CommandArgument(
984 "--start",
985 default=(date.today() - timedelta(30)).strftime("%Y-%m-%d"),
986 help="Start date (YYYY-MM-DD)",
988 @CommandArgument(
989 "--end", default=date.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
991 @CommandArgument(
992 "--bugid",
993 default=None,
994 help="bugid for treeherder intermittent failures data query.",
996 def test_info_failures(
997 command_context,
998 start,
999 end,
1000 bugid,
1002 # bugid comes in as a string, we need an int:
1003 try:
1004 bugid = int(bugid)
1005 except ValueError:
1006 bugid = None
1007 if not bugid:
1008 print("Please enter a valid bugid (i.e. '1760132')")
1009 return
1011 # get bug info
1012 url = (
1013 "https://bugzilla.mozilla.org/rest/bug?include_fields=summary,depends_on&id=%s"
1014 % bugid
1016 r = requests.get(url, headers={"User-agent": "mach-test-info/1.0"})
1017 if r.status_code != 200:
1018 print("%s error retrieving url: %s" % (r.status_code, url))
1020 data = r.json()
1021 if not data:
1022 print("unable to get bugzilla information for %s" % bugid)
1023 return
1025 summary = data["bugs"][0]["summary"]
1026 parts = summary.split("|")
1027 if not summary.endswith("single tracking bug") or len(parts) != 2:
1028 print("this query only works with single tracking bugs")
1029 return
1031 # get depends_on bugs:
1032 buglist = [bugid]
1033 if "depends_on" in data["bugs"][0]:
1034 buglist.extend(data["bugs"][0]["depends_on"])
1036 testname = parts[0].strip().split(" ")[-1]
1038 # now query treeherder to get details about annotations
1039 data = []
1040 for b in buglist:
1041 url = "https://treeherder.mozilla.org/api/failuresbybug/"
1042 url += "?startday=%s&endday=%s&tree=trunk&bug=%s" % (start, end, b)
1043 r = requests.get(url, headers={"User-agent": "mach-test-info/1.0"})
1044 r.raise_for_status()
1046 bdata = r.json()
1047 data.extend(bdata)
1049 if len(data) == 0:
1050 print("no failures were found for given bugid, please ensure bug is")
1051 print("accessible via: https://treeherder.mozilla.org/intermittent-failures")
1052 return
1054 # query VCS to get current list of variants:
1055 import yaml
1057 url = "https://hg.mozilla.org/mozilla-central/raw-file/tip/taskcluster/ci/test/variants.yml"
1058 r = requests.get(url, headers={"User-agent": "mach-test-info/1.0"})
1059 variants = yaml.safe_load(r.text)
1061 print(
1062 "\nQuerying data for bug %s annotated from %s to %s on trunk.\n\n"
1063 % (buglist, start, end)
1065 jobs = {}
1066 lines = {}
1067 for failure in data:
1068 # config = platform/buildtype
1069 # testsuite (<suite>[-variant][-<chunk>])
1070 # lines - group by patterns that contain test name
1071 config = "%s/%s" % (failure["platform"], failure["build_type"])
1073 variant = ""
1074 suite = ""
1075 varpos = len(failure["test_suite"])
1076 for v in variants.keys():
1077 var = "-%s" % variants[v]["suffix"]
1078 if var in failure["test_suite"]:
1079 if failure["test_suite"].find(var) < varpos:
1080 variant = var
1082 if variant:
1083 suite = failure["test_suite"].split(variant)[0]
1085 parts = failure["test_suite"].split("-")
1086 try:
1087 int(parts[-1])
1088 suite = "-".join(parts[:-1])
1089 except ValueError:
1090 pass # if this works, then the last '-X' is a number :)
1092 if suite == "":
1093 print("Error: failure to find variant in %s" % failure["test_suite"])
1095 job = "%s-%s%s" % (config, suite, variant)
1096 if job not in jobs.keys():
1097 jobs[job] = 0
1098 jobs[job] += 1
1100 # lines - sum(hash) of all lines where we match testname
1101 hvalue = 0
1102 for line in failure["lines"]:
1103 if len(line.split(testname)) <= 1:
1104 continue
1105 # strip off timestamp and mozharness status
1106 parts = line.split("TEST-UNEXPECTED")
1107 l = "TEST-UNEXPECTED%s" % parts[-1]
1109 # only keep 25 characters of the failure, often longer is random numbers
1110 parts = l.split(testname)
1111 l = "%s%s%s" % (parts[0], testname, parts[1][:25])
1113 hvalue += hash(l)
1115 if not failure["lines"]:
1116 hvalue = 1
1118 if not hvalue:
1119 continue
1121 if hvalue not in lines.keys():
1122 lines[hvalue] = {"lines": failure["lines"], "config": []}
1123 lines[hvalue]["config"].append(job)
1125 for h in lines.keys():
1126 print("%s errors with:" % (len(lines[h]["config"])))
1127 for l in lines[h]["lines"]:
1128 print(l)
1129 else:
1130 print(
1131 "... no failure lines recorded in"
1132 " https://treeherder.mozilla.org/intermittent-failures ..."
1135 for job in jobs:
1136 count = len([x for x in lines[h]["config"] if x == job])
1137 if count > 0:
1138 print(" %s: %s" % (job, count))
1139 print("")
1142 @Command(
1143 "rusttests",
1144 category="testing",
1145 conditions=[conditions.is_non_artifact_build],
1146 description="Run rust unit tests (via cargo test).",
1148 def run_rusttests(command_context, **kwargs):
1149 return command_context._mach_context.commands.dispatch(
1150 "build",
1151 command_context._mach_context,
1152 what=["pre-export", "export", "recurse_rusttests"],
1156 @Command(
1157 "fluent-migration-test",
1158 category="testing",
1159 description="Test Fluent migration recipes.",
1161 @CommandArgument("test_paths", nargs="*", metavar="N", help="Recipe paths to test.")
1162 def run_migration_tests(command_context, test_paths=None, **kwargs):
1163 if not test_paths:
1164 test_paths = []
1165 command_context.activate_virtualenv()
1166 from test_fluent_migrations import fmt
1168 rv = 0
1169 with_context = []
1170 for to_test in test_paths:
1171 try:
1172 context = fmt.inspect_migration(to_test)
1173 for issue in context["issues"]:
1174 command_context.log(
1175 logging.ERROR,
1176 "fluent-migration-test",
1178 "error": issue["msg"],
1179 "file": to_test,
1181 "ERROR in {file}: {error}",
1183 if context["issues"]:
1184 continue
1185 with_context.append(
1187 "to_test": to_test,
1188 "references": context["references"],
1191 except Exception as e:
1192 command_context.log(
1193 logging.ERROR,
1194 "fluent-migration-test",
1195 {"error": str(e), "file": to_test},
1196 "ERROR in {file}: {error}",
1198 rv |= 1
1199 obj_dir = fmt.prepare_object_dir(command_context)
1200 for context in with_context:
1201 rv |= fmt.test_migration(command_context, obj_dir, **context)
1202 return rv