Bug 1853320 [wpt PR 41940] - [FLEDGE] Split up runs of /trusted-scoring-signals.https...
[gecko.git] / testing / mach_commands.py
blob67fd4d86bb7b0fb8e8f3f1c8b1556c84b6003e0e
1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
5 import argparse
6 import logging
7 import os
8 import subprocess
9 import sys
11 import requests
12 from mach.decorators import Command, CommandArgument, SettingsProvider, SubCommand
13 from mozbuild.base import BuildEnvironmentNotFoundException
14 from mozbuild.base import MachCommandConditions as conditions
16 UNKNOWN_TEST = """
17 I was unable to find tests from the given argument(s).
19 You should specify a test directory, filename, test suite name, or
20 abbreviation.
22 It's possible my little brain doesn't know about the type of test you are
23 trying to execute. If you suspect this, please request support by filing
24 a bug at
25 https://bugzilla.mozilla.org/enter_bug.cgi?product=Testing&component=General.
26 """.strip()
28 UNKNOWN_FLAVOR = """
29 I know you are trying to run a %s%s test. Unfortunately, I can't run those
30 tests yet. Sorry!
31 """.strip()
33 TEST_HELP = """
34 Test or tests to run. Tests can be specified by filename, directory, suite
35 name or suite alias.
37 The following test suites and aliases are supported: {}
38 """.strip()
41 @SettingsProvider
42 class TestConfig(object):
43 @classmethod
44 def config_settings(cls):
45 from mozlog.commandline import log_formatters
46 from mozlog.structuredlog import log_levels
48 format_desc = "The default format to use when running tests with `mach test`."
49 format_choices = list(log_formatters)
50 level_desc = "The default log level to use when running tests with `mach test`."
51 level_choices = [l.lower() for l in log_levels]
52 return [
53 ("test.format", "string", format_desc, "mach", {"choices": format_choices}),
54 ("test.level", "string", level_desc, "info", {"choices": level_choices}),
58 def get_test_parser():
59 from mozlog.commandline import add_logging_group
60 from moztest.resolve import TEST_SUITES
62 parser = argparse.ArgumentParser()
63 parser.add_argument(
64 "what",
65 default=None,
66 nargs="+",
67 help=TEST_HELP.format(", ".join(sorted(TEST_SUITES))),
69 parser.add_argument(
70 "extra_args",
71 default=None,
72 nargs=argparse.REMAINDER,
73 help="Extra arguments to pass to the underlying test command(s). "
74 "If an underlying command doesn't recognize the argument, it "
75 "will fail.",
77 parser.add_argument(
78 "--debugger",
79 default=None,
80 action="store",
81 nargs="?",
82 help="Specify a debugger to use.",
84 add_logging_group(parser)
85 return parser
88 ADD_TEST_SUPPORTED_SUITES = [
89 "mochitest-chrome",
90 "mochitest-plain",
91 "mochitest-browser-chrome",
92 "web-platform-tests-testharness",
93 "web-platform-tests-reftest",
94 "xpcshell",
96 ADD_TEST_SUPPORTED_DOCS = ["js", "html", "xhtml", "xul"]
98 SUITE_SYNONYMS = {
99 "wpt": "web-platform-tests-testharness",
100 "wpt-testharness": "web-platform-tests-testharness",
101 "wpt-reftest": "web-platform-tests-reftest",
104 MISSING_ARG = object()
107 def create_parser_addtest():
108 import addtest
110 parser = argparse.ArgumentParser()
111 parser.add_argument(
112 "--suite",
113 choices=sorted(ADD_TEST_SUPPORTED_SUITES + list(SUITE_SYNONYMS.keys())),
114 help="suite for the test. "
115 "If you pass a `test` argument this will be determined "
116 "based on the filename and the folder it is in",
118 parser.add_argument(
119 "-o",
120 "--overwrite",
121 action="store_true",
122 help="Overwrite an existing file if it exists.",
124 parser.add_argument(
125 "--doc",
126 choices=ADD_TEST_SUPPORTED_DOCS,
127 help="Document type for the test (if applicable)."
128 "If you pass a `test` argument this will be determined "
129 "based on the filename.",
131 parser.add_argument(
132 "-e",
133 "--editor",
134 action="store",
135 nargs="?",
136 default=MISSING_ARG,
137 help="Open the created file(s) in an editor; if a "
138 "binary is supplied it will be used otherwise the default editor for "
139 "your environment will be opened",
142 for base_suite in addtest.TEST_CREATORS:
143 cls = addtest.TEST_CREATORS[base_suite]
144 if hasattr(cls, "get_parser"):
145 group = parser.add_argument_group(base_suite)
146 cls.get_parser(group)
148 parser.add_argument("test", nargs="?", help=("Test to create."))
149 return parser
152 @Command(
153 "addtest",
154 category="testing",
155 description="Generate tests based on templates",
156 parser=create_parser_addtest,
158 def addtest(
159 command_context,
160 suite=None,
161 test=None,
162 doc=None,
163 overwrite=False,
164 editor=MISSING_ARG,
165 **kwargs,
167 import io
169 import addtest
170 from moztest.resolve import TEST_SUITES
172 if not suite and not test:
173 return create_parser_addtest().parse_args(["--help"])
175 if suite in SUITE_SYNONYMS:
176 suite = SUITE_SYNONYMS[suite]
178 if test:
179 if not overwrite and os.path.isfile(os.path.abspath(test)):
180 print("Error: can't generate a test that already exists:", test)
181 return 1
183 abs_test = os.path.abspath(test)
184 if doc is None:
185 doc = guess_doc(abs_test)
186 if suite is None:
187 guessed_suite, err = guess_suite(abs_test)
188 if err:
189 print(err)
190 return 1
191 suite = guessed_suite
193 else:
194 test = None
195 if doc is None:
196 doc = "html"
198 if not suite:
199 print(
200 "We couldn't automatically determine a suite. "
201 "Please specify `--suite` with one of the following options:\n{}\n"
202 "If you'd like to add support to a new suite, please file a bug "
203 "blocking https://bugzilla.mozilla.org/show_bug.cgi?id=1540285.".format(
204 ADD_TEST_SUPPORTED_SUITES
207 return 1
209 if doc not in ADD_TEST_SUPPORTED_DOCS:
210 print(
211 "Error: invalid `doc`. Either pass in a test with a valid extension"
212 "({}) or pass in the `doc` argument".format(ADD_TEST_SUPPORTED_DOCS)
214 return 1
216 creator_cls = addtest.creator_for_suite(suite)
218 if creator_cls is None:
219 print("Sorry, `addtest` doesn't currently know how to add {}".format(suite))
220 return 1
222 creator = creator_cls(command_context.topsrcdir, test, suite, doc, **kwargs)
224 creator.check_args()
226 paths = []
227 added_tests = False
228 for path, template in creator:
229 if not template:
230 continue
231 added_tests = True
232 if path:
233 paths.append(path)
234 print("Adding a test file at {} (suite `{}`)".format(path, suite))
236 try:
237 os.makedirs(os.path.dirname(path))
238 except OSError:
239 pass
241 with io.open(path, "w", newline="\n") as f:
242 f.write(template)
243 else:
244 # write to stdout if you passed only suite and doc and not a file path
245 print(template)
247 if not added_tests:
248 return 1
250 if test:
251 creator.update_manifest()
253 # Small hack, should really do this better
254 if suite.startswith("wpt-"):
255 suite = "web-platform-tests"
257 mach_command = TEST_SUITES[suite]["mach_command"]
258 print(
259 "Please make sure to add the new test to your commit. "
260 "You can now run the test with:\n ./mach {} {}".format(
261 mach_command, test
265 if editor is not MISSING_ARG:
266 if editor is not None:
267 editor = editor
268 elif "VISUAL" in os.environ:
269 editor = os.environ["VISUAL"]
270 elif "EDITOR" in os.environ:
271 editor = os.environ["EDITOR"]
272 else:
273 print("Unable to determine editor; please specify a binary")
274 editor = None
276 proc = None
277 if editor:
278 import subprocess
280 proc = subprocess.Popen("%s %s" % (editor, " ".join(paths)), shell=True)
282 if proc:
283 proc.wait()
285 return 0
288 def guess_doc(abs_test):
289 filename = os.path.basename(abs_test)
290 return os.path.splitext(filename)[1].strip(".")
293 def guess_suite(abs_test):
294 # If you pass a abs_test, try to detect the type based on the name
295 # and folder. This detection can be skipped if you pass the `type` arg.
296 err = None
297 guessed_suite = None
298 parent = os.path.dirname(abs_test)
299 filename = os.path.basename(abs_test)
301 has_browser_ini = os.path.isfile(os.path.join(parent, "browser.ini"))
302 has_browser_toml = os.path.isfile(os.path.join(parent, "browser.toml"))
303 has_chrome_ini = os.path.isfile(os.path.join(parent, "chrome.ini"))
304 has_chrome_toml = os.path.isfile(os.path.join(parent, "chrome.toml"))
305 has_plain_ini = os.path.isfile(os.path.join(parent, "mochitest.ini"))
306 has_plain_toml = os.path.isfile(os.path.join(parent, "mochitest.toml"))
307 has_xpcshell_ini = os.path.isfile(os.path.join(parent, "xpcshell.ini"))
308 has_xpcshell_toml = os.path.isfile(os.path.join(parent, "xpcshell.toml"))
310 in_wpt_folder = abs_test.startswith(
311 os.path.abspath(os.path.join("testing", "web-platform"))
314 if in_wpt_folder:
315 guessed_suite = "web-platform-tests-testharness"
316 if "/css/" in abs_test:
317 guessed_suite = "web-platform-tests-reftest"
318 elif (
319 filename.startswith("test_")
320 and (has_xpcshell_ini or has_xpcshell_toml)
321 and guess_doc(abs_test) == "js"
323 guessed_suite = "xpcshell"
324 else:
325 if filename.startswith("browser_") and (has_browser_ini or has_browser_toml):
326 guessed_suite = "mochitest-browser-chrome"
327 elif filename.startswith("test_"):
328 if (has_chrome_ini or has_chrome_toml) and (
329 has_plain_ini or has_plain_toml
331 err = (
332 "Error: directory contains both a chrome.{ini|toml} and mochitest.{ini|toml}. "
333 "Please set --suite=mochitest-chrome or --suite=mochitest-plain."
335 elif has_chrome_ini or has_chrome_toml:
336 guessed_suite = "mochitest-chrome"
337 elif has_plain_ini or has_plain_toml:
338 guessed_suite = "mochitest-plain"
339 return guessed_suite, err
342 @Command(
343 "test",
344 category="testing",
345 description="Run tests (detects the kind of test and runs it).",
346 parser=get_test_parser,
348 def test(command_context, what, extra_args, **log_args):
349 """Run tests from names or paths.
351 mach test accepts arguments specifying which tests to run. Each argument
352 can be:
354 * The path to a test file
355 * A directory containing tests
356 * A test suite name
357 * An alias to a test suite name (codes used on TreeHerder)
358 * path to a test manifest
360 When paths or directories are given, they are first resolved to test
361 files known to the build system.
363 If resolved tests belong to more than one test type/flavor/harness,
364 the harness for each relevant type/flavor will be invoked. e.g. if
365 you specify a directory with xpcshell and browser chrome mochitests,
366 both harnesses will be invoked.
368 Warning: `mach test` does not automatically re-build.
369 Please remember to run `mach build` when necessary.
371 EXAMPLES
373 Run all test files in the devtools/client/shared/redux/middleware/xpcshell/
374 directory:
376 `./mach test devtools/client/shared/redux/middleware/xpcshell/`
378 The below command prints a short summary of results instead of
379 the default more verbose output.
380 Do not forget the - (minus sign) after --log-grouped!
382 `./mach test --log-grouped - devtools/client/shared/redux/middleware/xpcshell/`
384 To learn more about arguments for each test type/flavor/harness, please run
385 `./mach <test-harness> --help`. For example, `./mach mochitest --help`.
387 from mozlog.commandline import setup_logging
388 from mozlog.handlers import StreamHandler
389 from moztest.resolve import TEST_SUITES, TestResolver, get_suite_definition
391 resolver = command_context._spawn(TestResolver)
392 run_suites, run_tests = resolver.resolve_metadata(what)
394 if not run_suites and not run_tests:
395 print(UNKNOWN_TEST)
396 return 1
398 if log_args.get("debugger", None):
399 import mozdebug
401 if not mozdebug.get_debugger_info(log_args.get("debugger")):
402 sys.exit(1)
403 extra_args_debugger_notation = "=".join(
404 ["--debugger", log_args.get("debugger")]
406 if extra_args:
407 extra_args.append(extra_args_debugger_notation)
408 else:
409 extra_args = [extra_args_debugger_notation]
411 # Create shared logger
412 format_args = {"level": command_context._mach_context.settings["test"]["level"]}
413 if not run_suites and len(run_tests) == 1:
414 format_args["verbose"] = True
415 format_args["compact"] = False
417 default_format = command_context._mach_context.settings["test"]["format"]
418 log = setup_logging(
419 "mach-test", log_args, {default_format: sys.stdout}, format_args
421 for handler in log.handlers:
422 if isinstance(handler, StreamHandler):
423 handler.formatter.inner.summary_on_shutdown = True
425 status = None
426 for suite_name in run_suites:
427 suite = TEST_SUITES[suite_name]
428 kwargs = suite["kwargs"]
429 kwargs["log"] = log
430 kwargs.setdefault("subsuite", None)
432 if "mach_command" in suite:
433 res = command_context._mach_context.commands.dispatch(
434 suite["mach_command"],
435 command_context._mach_context,
436 argv=extra_args,
437 **kwargs,
439 if res:
440 status = res
442 buckets = {}
443 for test in run_tests:
444 key = (test["flavor"], test.get("subsuite", ""))
445 buckets.setdefault(key, []).append(test)
447 for (flavor, subsuite), tests in sorted(buckets.items()):
448 _, m = get_suite_definition(flavor, subsuite)
449 if "mach_command" not in m:
450 substr = "-{}".format(subsuite) if subsuite else ""
451 print(UNKNOWN_FLAVOR % (flavor, substr))
452 status = 1
453 continue
455 kwargs = dict(m["kwargs"])
456 kwargs["log"] = log
457 kwargs.setdefault("subsuite", None)
459 res = command_context._mach_context.commands.dispatch(
460 m["mach_command"],
461 command_context._mach_context,
462 argv=extra_args,
463 test_objects=tests,
464 **kwargs,
466 if res:
467 status = res
469 if not log.has_shutdown:
470 log.shutdown()
471 return status
474 @Command(
475 "cppunittest", category="testing", description="Run cpp unit tests (C++ tests)."
477 @CommandArgument(
478 "test_files",
479 nargs="*",
480 metavar="N",
481 help="Test to run. Can be specified as one or more files or "
482 "directories, or omitted. If omitted, the entire test suite is "
483 "executed.",
485 def run_cppunit_test(command_context, **params):
486 from mozlog import commandline
488 log = params.get("log")
489 if not log:
490 log = commandline.setup_logging("cppunittest", {}, {"tbpl": sys.stdout})
492 # See if we have crash symbols
493 symbols_path = os.path.join(command_context.distdir, "crashreporter-symbols")
494 if not os.path.isdir(symbols_path):
495 symbols_path = None
497 # If no tests specified, run all tests in main manifest
498 tests = params["test_files"]
499 if not tests:
500 tests = [os.path.join(command_context.distdir, "cppunittests")]
501 manifest_path = os.path.join(
502 command_context.topsrcdir, "testing", "cppunittest.ini"
504 else:
505 manifest_path = None
507 utility_path = command_context.bindir
509 if conditions.is_android(command_context):
510 from mozrunner.devices.android_device import (
511 InstallIntent,
512 verify_android_device,
515 verify_android_device(command_context, install=InstallIntent.NO)
516 return run_android_test(tests, symbols_path, manifest_path, log)
518 return run_desktop_test(
519 command_context, tests, symbols_path, manifest_path, utility_path, log
523 def run_desktop_test(
524 command_context, tests, symbols_path, manifest_path, utility_path, log
526 import runcppunittests as cppunittests
527 from mozlog import commandline
529 parser = cppunittests.CPPUnittestOptions()
530 commandline.add_logging_group(parser)
531 options, args = parser.parse_args()
533 options.symbols_path = symbols_path
534 options.manifest_path = manifest_path
535 options.utility_path = utility_path
536 options.xre_path = command_context.bindir
538 try:
539 result = cppunittests.run_test_harness(options, tests)
540 except Exception as e:
541 log.error("Caught exception running cpp unit tests: %s" % str(e))
542 result = False
543 raise
545 return 0 if result else 1
548 def run_android_test(command_context, tests, symbols_path, manifest_path, log):
549 import remotecppunittests
550 from mozlog import commandline
552 parser = remotecppunittests.RemoteCPPUnittestOptions()
553 commandline.add_logging_group(parser)
554 options, args = parser.parse_args()
556 if not options.adb_path:
557 from mozrunner.devices.android_device import get_adb_path
559 options.adb_path = get_adb_path(command_context)
560 options.symbols_path = symbols_path
561 options.manifest_path = manifest_path
562 options.xre_path = command_context.bindir
563 options.local_lib = command_context.bindir.replace("bin", "fennec")
564 for file in os.listdir(os.path.join(command_context.topobjdir, "dist")):
565 if file.endswith(".apk") and file.startswith("fennec"):
566 options.local_apk = os.path.join(command_context.topobjdir, "dist", file)
567 log.info("using APK: " + options.local_apk)
568 break
570 try:
571 result = remotecppunittests.run_test_harness(options, tests)
572 except Exception as e:
573 log.error("Caught exception running cpp unit tests: %s" % str(e))
574 result = False
575 raise
577 return 0 if result else 1
580 def executable_name(name):
581 return name + ".exe" if sys.platform.startswith("win") else name
584 @Command(
585 "jstests",
586 category="testing",
587 description="Run SpiderMonkey JS tests in the JS shell.",
588 ok_if_tests_disabled=True,
590 @CommandArgument("--shell", help="The shell to be used")
591 @CommandArgument(
592 "params",
593 nargs=argparse.REMAINDER,
594 help="Extra arguments to pass down to the test harness.",
596 def run_jstests(command_context, shell, params):
597 import subprocess
599 command_context.virtualenv_manager.ensure()
600 python = command_context.virtualenv_manager.python_path
602 js = shell or os.path.join(command_context.bindir, executable_name("js"))
603 jstest_cmd = [
604 python,
605 os.path.join(command_context.topsrcdir, "js", "src", "tests", "jstests.py"),
607 ] + params
609 return subprocess.call(jstest_cmd)
612 @Command(
613 "jit-test",
614 category="testing",
615 description="Run SpiderMonkey jit-tests in the JS shell.",
616 ok_if_tests_disabled=True,
618 @CommandArgument("--shell", help="The shell to be used")
619 @CommandArgument(
620 "--cgc",
621 action="store_true",
622 default=False,
623 help="Run with the SM(cgc) job's env vars",
625 @CommandArgument(
626 "params",
627 nargs=argparse.REMAINDER,
628 help="Extra arguments to pass down to the test harness.",
630 def run_jittests(command_context, shell, cgc, params):
631 import subprocess
633 command_context.virtualenv_manager.ensure()
634 python = command_context.virtualenv_manager.python_path
636 js = shell or os.path.join(command_context.bindir, executable_name("js"))
637 jittest_cmd = [
638 python,
639 os.path.join(command_context.topsrcdir, "js", "src", "jit-test", "jit_test.py"),
641 ] + params
643 env = os.environ.copy()
644 if cgc:
645 env["JS_GC_ZEAL"] = "IncrementalMultipleSlices"
647 return subprocess.call(jittest_cmd, env=env)
650 @Command("jsapi-tests", category="testing", description="Run SpiderMonkey JSAPI tests.")
651 @CommandArgument(
652 "--list",
653 action="store_true",
654 default=False,
655 help="List all tests",
657 @CommandArgument(
658 "--frontend-only",
659 action="store_true",
660 default=False,
661 help="Run tests for frontend-only APIs, with light-weight entry point",
663 @CommandArgument(
664 "test_name",
665 nargs="?",
666 metavar="N",
667 help="Test to run. Can be a prefix or omitted. If "
668 "omitted, the entire test suite is executed.",
670 def run_jsapitests(command_context, list=False, frontend_only=False, test_name=None):
671 import subprocess
673 jsapi_tests_cmd = [
674 os.path.join(command_context.bindir, executable_name("jsapi-tests"))
676 if list:
677 jsapi_tests_cmd.append("--list")
678 if frontend_only:
679 jsapi_tests_cmd.append("--frontend-only")
680 if test_name:
681 jsapi_tests_cmd.append(test_name)
683 test_env = os.environ.copy()
684 test_env["TOPSRCDIR"] = command_context.topsrcdir
686 result = subprocess.call(jsapi_tests_cmd, env=test_env)
687 if result != 0:
688 print(f"jsapi-tests failed, exit code {result}")
689 return result
692 def run_check_js_msg(command_context):
693 import subprocess
695 command_context.virtualenv_manager.ensure()
696 python = command_context.virtualenv_manager.python_path
698 check_cmd = [
699 python,
700 os.path.join(command_context.topsrcdir, "config", "check_js_msg_encoding.py"),
703 return subprocess.call(check_cmd)
706 def get_jsshell_parser():
707 from jsshell.benchmark import get_parser
709 return get_parser()
712 @Command(
713 "jsshell-bench",
714 category="testing",
715 parser=get_jsshell_parser,
716 description="Run benchmarks in the SpiderMonkey JS shell.",
718 def run_jsshelltests(command_context, **kwargs):
719 from jsshell import benchmark
721 return benchmark.run(**kwargs)
724 @Command(
725 "cramtest",
726 category="testing",
727 description="Mercurial style .t tests for command line applications.",
729 @CommandArgument(
730 "test_paths",
731 nargs="*",
732 metavar="N",
733 help="Test paths to run. Each path can be a test file or directory. "
734 "If omitted, the entire suite will be run.",
736 @CommandArgument(
737 "cram_args",
738 nargs=argparse.REMAINDER,
739 help="Extra arguments to pass down to the cram binary. See "
740 "'./mach python -m cram -- -h' for a list of available options.",
742 def cramtest(command_context, cram_args=None, test_paths=None, test_objects=None):
743 command_context.activate_virtualenv()
744 import mozinfo
745 from manifestparser import TestManifest
747 if test_objects is None:
748 from moztest.resolve import TestResolver
750 resolver = command_context._spawn(TestResolver)
751 if test_paths:
752 # If we were given test paths, try to find tests matching them.
753 test_objects = resolver.resolve_tests(paths=test_paths, flavor="cram")
754 else:
755 # Otherwise just run everything in CRAMTEST_MANIFESTS
756 test_objects = resolver.resolve_tests(flavor="cram")
758 if not test_objects:
759 message = "No tests were collected, check spelling of the test paths."
760 command_context.log(logging.WARN, "cramtest", {}, message)
761 return 1
763 mp = TestManifest()
764 mp.tests.extend(test_objects)
765 tests = mp.active_tests(disabled=False, **mozinfo.info)
767 python = command_context.virtualenv_manager.python_path
768 cmd = [python, "-m", "cram"] + cram_args + [t["relpath"] for t in tests]
769 return subprocess.call(cmd, cwd=command_context.topsrcdir)
772 from datetime import date, timedelta
775 @Command(
776 "test-info", category="testing", description="Display historical test results."
778 def test_info(command_context):
780 All functions implemented as subcommands.
784 @SubCommand(
785 "test-info",
786 "tests",
787 description="Display historical test result summary for named tests.",
789 @CommandArgument("test_names", nargs=argparse.REMAINDER, help="Test(s) of interest.")
790 @CommandArgument(
791 "--start",
792 default=(date.today() - timedelta(7)).strftime("%Y-%m-%d"),
793 help="Start date (YYYY-MM-DD)",
795 @CommandArgument(
796 "--end", default=date.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
798 @CommandArgument(
799 "--show-info",
800 action="store_true",
801 help="Retrieve and display general test information.",
803 @CommandArgument(
804 "--show-bugs",
805 action="store_true",
806 help="Retrieve and display related Bugzilla bugs.",
808 @CommandArgument("--verbose", action="store_true", help="Enable debug logging.")
809 def test_info_tests(
810 command_context,
811 test_names,
812 start,
813 end,
814 show_info,
815 show_bugs,
816 verbose,
818 import testinfo
820 ti = testinfo.TestInfoTests(verbose)
821 ti.report(
822 test_names,
823 start,
824 end,
825 show_info,
826 show_bugs,
830 @SubCommand(
831 "test-info",
832 "report",
833 description="Generate a json report of test manifests and/or tests "
834 "categorized by Bugzilla component and optionally filtered "
835 "by path, component, and/or manifest annotations.",
837 @CommandArgument(
838 "--components",
839 default=None,
840 help="Comma-separated list of Bugzilla components."
841 " eg. Testing::General,Core::WebVR",
843 @CommandArgument(
844 "--flavor",
845 help='Limit results to tests of the specified flavor (eg. "xpcshell").',
847 @CommandArgument(
848 "--subsuite",
849 help='Limit results to tests of the specified subsuite (eg. "devtools").',
851 @CommandArgument(
852 "paths", nargs=argparse.REMAINDER, help="File system paths of interest."
854 @CommandArgument(
855 "--show-manifests",
856 action="store_true",
857 help="Include test manifests in report.",
859 @CommandArgument(
860 "--show-tests", action="store_true", help="Include individual tests in report."
862 @CommandArgument(
863 "--show-summary", action="store_true", help="Include summary in report."
865 @CommandArgument(
866 "--show-annotations",
867 action="store_true",
868 help="Include list of manifest annotation conditions in report.",
870 @CommandArgument(
871 "--show-testruns",
872 action="store_true",
873 help="Include total number of runs the test has if there are failures.",
875 @CommandArgument(
876 "--filter-values",
877 help="Comma-separated list of value regular expressions to filter on; "
878 "displayed tests contain all specified values.",
880 @CommandArgument(
881 "--filter-keys",
882 help="Comma-separated list of test keys to filter on, "
883 'like "skip-if"; only these fields will be searched '
884 "for filter-values.",
886 @CommandArgument(
887 "--no-component-report",
888 action="store_false",
889 dest="show_components",
890 default=True,
891 help="Do not categorize by bugzilla component.",
893 @CommandArgument("--output-file", help="Path to report file.")
894 @CommandArgument("--verbose", action="store_true", help="Enable debug logging.")
895 @CommandArgument(
896 "--start",
897 default=(date.today() - timedelta(30)).strftime("%Y-%m-%d"),
898 help="Start date (YYYY-MM-DD)",
900 @CommandArgument(
901 "--end", default=date.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
903 def test_report(
904 command_context,
905 components,
906 flavor,
907 subsuite,
908 paths,
909 show_manifests,
910 show_tests,
911 show_summary,
912 show_annotations,
913 filter_values,
914 filter_keys,
915 show_components,
916 output_file,
917 verbose,
918 start,
919 end,
920 show_testruns,
922 import testinfo
923 from mozbuild import build_commands
925 try:
926 command_context.config_environment
927 except BuildEnvironmentNotFoundException:
928 print("Looks like configure has not run yet, running it now...")
929 build_commands.configure(command_context)
931 ti = testinfo.TestInfoReport(verbose)
932 ti.report(
933 components,
934 flavor,
935 subsuite,
936 paths,
937 show_manifests,
938 show_tests,
939 show_summary,
940 show_annotations,
941 filter_values,
942 filter_keys,
943 show_components,
944 output_file,
945 start,
946 end,
947 show_testruns,
951 @SubCommand(
952 "test-info",
953 "report-diff",
954 description='Compare two reports generated by "test-info reports".',
956 @CommandArgument(
957 "--before",
958 default=None,
959 help="The first (earlier) report file; path to local file or url.",
961 @CommandArgument(
962 "--after", help="The second (later) report file; path to local file or url."
964 @CommandArgument(
965 "--output-file",
966 help="Path to report file to be written. If not specified, report"
967 "will be written to standard output.",
969 @CommandArgument("--verbose", action="store_true", help="Enable debug logging.")
970 def test_report_diff(command_context, before, after, output_file, verbose):
971 import testinfo
973 ti = testinfo.TestInfoReport(verbose)
974 ti.report_diff(before, after, output_file)
977 @SubCommand(
978 "test-info",
979 "testrun-report",
980 description="Generate report of number of runs for each test group (manifest)",
982 @CommandArgument("--output-file", help="Path to report file.")
983 def test_info_testrun_report(command_context, output_file):
984 import json
986 import testinfo
988 ti = testinfo.TestInfoReport(verbose=True)
989 runcounts = ti.get_runcounts()
990 if output_file:
991 output_file = os.path.abspath(output_file)
992 output_dir = os.path.dirname(output_file)
993 if not os.path.isdir(output_dir):
994 os.makedirs(output_dir)
995 with open(output_file, "w") as f:
996 json.dump(runcounts, f)
997 else:
998 print(runcounts)
1001 @SubCommand(
1002 "test-info",
1003 "failure-report",
1004 description="Display failure line groupings and frequencies for "
1005 "single tracking intermittent bugs.",
1007 @CommandArgument(
1008 "--start",
1009 default=(date.today() - timedelta(30)).strftime("%Y-%m-%d"),
1010 help="Start date (YYYY-MM-DD)",
1012 @CommandArgument(
1013 "--end", default=date.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
1015 @CommandArgument(
1016 "--bugid",
1017 default=None,
1018 help="bugid for treeherder intermittent failures data query.",
1020 def test_info_failures(
1021 command_context,
1022 start,
1023 end,
1024 bugid,
1026 # bugid comes in as a string, we need an int:
1027 try:
1028 bugid = int(bugid)
1029 except ValueError:
1030 bugid = None
1031 if not bugid:
1032 print("Please enter a valid bugid (i.e. '1760132')")
1033 return
1035 # get bug info
1036 url = (
1037 "https://bugzilla.mozilla.org/rest/bug?include_fields=summary,depends_on&id=%s"
1038 % bugid
1040 r = requests.get(url, headers={"User-agent": "mach-test-info/1.0"})
1041 if r.status_code != 200:
1042 print("%s error retrieving url: %s" % (r.status_code, url))
1044 data = r.json()
1045 if not data:
1046 print("unable to get bugzilla information for %s" % bugid)
1047 return
1049 summary = data["bugs"][0]["summary"]
1050 parts = summary.split("|")
1051 if not summary.endswith("single tracking bug") or len(parts) != 2:
1052 print("this query only works with single tracking bugs")
1053 return
1055 # get depends_on bugs:
1056 buglist = [bugid]
1057 if "depends_on" in data["bugs"][0]:
1058 buglist.extend(data["bugs"][0]["depends_on"])
1060 testname = parts[0].strip().split(" ")[-1]
1062 # now query treeherder to get details about annotations
1063 data = []
1064 for b in buglist:
1065 url = "https://treeherder.mozilla.org/api/failuresbybug/"
1066 url += "?startday=%s&endday=%s&tree=trunk&bug=%s" % (start, end, b)
1067 r = requests.get(url, headers={"User-agent": "mach-test-info/1.0"})
1068 r.raise_for_status()
1070 bdata = r.json()
1071 data.extend(bdata)
1073 if len(data) == 0:
1074 print("no failures were found for given bugid, please ensure bug is")
1075 print("accessible via: https://treeherder.mozilla.org/intermittent-failures")
1076 return
1078 # query VCS to get current list of variants:
1079 import yaml
1081 url = "https://hg.mozilla.org/mozilla-central/raw-file/tip/taskcluster/ci/test/variants.yml"
1082 r = requests.get(url, headers={"User-agent": "mach-test-info/1.0"})
1083 variants = yaml.safe_load(r.text)
1085 print(
1086 "\nQuerying data for bug %s annotated from %s to %s on trunk.\n\n"
1087 % (buglist, start, end)
1089 jobs = {}
1090 lines = {}
1091 for failure in data:
1092 # config = platform/buildtype
1093 # testsuite (<suite>[-variant][-<chunk>])
1094 # lines - group by patterns that contain test name
1095 config = "%s/%s" % (failure["platform"], failure["build_type"])
1097 variant = ""
1098 suite = ""
1099 varpos = len(failure["test_suite"])
1100 for v in variants.keys():
1101 var = "-%s" % variants[v]["suffix"]
1102 if var in failure["test_suite"]:
1103 if failure["test_suite"].find(var) < varpos:
1104 variant = var
1106 if variant:
1107 suite = failure["test_suite"].split(variant)[0]
1109 parts = failure["test_suite"].split("-")
1110 try:
1111 int(parts[-1])
1112 suite = "-".join(parts[:-1])
1113 except ValueError:
1114 pass # if this works, then the last '-X' is a number :)
1116 if suite == "":
1117 print("Error: failure to find variant in %s" % failure["test_suite"])
1119 job = "%s-%s%s" % (config, suite, variant)
1120 if job not in jobs.keys():
1121 jobs[job] = 0
1122 jobs[job] += 1
1124 # lines - sum(hash) of all lines where we match testname
1125 hvalue = 0
1126 for line in failure["lines"]:
1127 if len(line.split(testname)) <= 1:
1128 continue
1129 # strip off timestamp and mozharness status
1130 parts = line.split("TEST-UNEXPECTED")
1131 l = "TEST-UNEXPECTED%s" % parts[-1]
1133 # only keep 25 characters of the failure, often longer is random numbers
1134 parts = l.split(testname)
1135 l = "%s%s%s" % (parts[0], testname, parts[1][:25])
1137 hvalue += hash(l)
1139 if not failure["lines"]:
1140 hvalue = 1
1142 if not hvalue:
1143 continue
1145 if hvalue not in lines.keys():
1146 lines[hvalue] = {"lines": failure["lines"], "config": []}
1147 lines[hvalue]["config"].append(job)
1149 for h in lines.keys():
1150 print("%s errors with:" % (len(lines[h]["config"])))
1151 for l in lines[h]["lines"]:
1152 print(l)
1153 else:
1154 print(
1155 "... no failure lines recorded in"
1156 " https://treeherder.mozilla.org/intermittent-failures ..."
1159 for job in jobs:
1160 count = len([x for x in lines[h]["config"] if x == job])
1161 if count > 0:
1162 print(" %s: %s" % (job, count))
1163 print("")
1166 @Command(
1167 "rusttests",
1168 category="testing",
1169 conditions=[conditions.is_non_artifact_build],
1170 description="Run rust unit tests (via cargo test).",
1172 def run_rusttests(command_context, **kwargs):
1173 return command_context._mach_context.commands.dispatch(
1174 "build",
1175 command_context._mach_context,
1176 what=["pre-export", "export", "recurse_rusttests"],
1180 @Command(
1181 "fluent-migration-test",
1182 category="testing",
1183 description="Test Fluent migration recipes.",
1185 @CommandArgument("test_paths", nargs="*", metavar="N", help="Recipe paths to test.")
1186 def run_migration_tests(command_context, test_paths=None, **kwargs):
1187 if not test_paths:
1188 test_paths = []
1189 command_context.activate_virtualenv()
1190 from test_fluent_migrations import fmt
1192 rv = 0
1193 with_context = []
1194 for to_test in test_paths:
1195 try:
1196 context = fmt.inspect_migration(to_test)
1197 for issue in context["issues"]:
1198 command_context.log(
1199 logging.ERROR,
1200 "fluent-migration-test",
1202 "error": issue["msg"],
1203 "file": to_test,
1205 "ERROR in {file}: {error}",
1207 if context["issues"]:
1208 continue
1209 with_context.append(
1211 "to_test": to_test,
1212 "references": context["references"],
1215 except Exception as e:
1216 command_context.log(
1217 logging.ERROR,
1218 "fluent-migration-test",
1219 {"error": str(e), "file": to_test},
1220 "ERROR in {file}: {error}",
1222 rv |= 1
1223 obj_dir = fmt.prepare_object_dir(command_context)
1224 for context in with_context:
1225 rv |= fmt.test_migration(command_context, obj_dir, **context)
1226 return rv