Bug 1859570 [wpt PR 42212] - Update wpt metadata, a=testonly
[gecko.git] / testing / mach_commands.py
blob21cf1c19246a74cc42b28246005ab026a5557b60
1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
5 import argparse
6 import logging
7 import os
8 import subprocess
9 import sys
11 import requests
12 from mach.decorators import Command, CommandArgument, SettingsProvider, SubCommand
13 from mozbuild.base import BuildEnvironmentNotFoundException
14 from mozbuild.base import MachCommandConditions as conditions
16 UNKNOWN_TEST = """
17 I was unable to find tests from the given argument(s).
19 You should specify a test directory, filename, test suite name, or
20 abbreviation.
22 It's possible my little brain doesn't know about the type of test you are
23 trying to execute. If you suspect this, please request support by filing
24 a bug at
25 https://bugzilla.mozilla.org/enter_bug.cgi?product=Testing&component=General.
26 """.strip()
28 UNKNOWN_FLAVOR = """
29 I know you are trying to run a %s%s test. Unfortunately, I can't run those
30 tests yet. Sorry!
31 """.strip()
33 TEST_HELP = """
34 Test or tests to run. Tests can be specified by filename, directory, suite
35 name or suite alias.
37 The following test suites and aliases are supported: {}
38 """.strip()
41 @SettingsProvider
42 class TestConfig(object):
43 @classmethod
44 def config_settings(cls):
45 from mozlog.commandline import log_formatters
46 from mozlog.structuredlog import log_levels
48 format_desc = "The default format to use when running tests with `mach test`."
49 format_choices = list(log_formatters)
50 level_desc = "The default log level to use when running tests with `mach test`."
51 level_choices = [l.lower() for l in log_levels]
52 return [
53 ("test.format", "string", format_desc, "mach", {"choices": format_choices}),
54 ("test.level", "string", level_desc, "info", {"choices": level_choices}),
58 def get_test_parser():
59 from mozlog.commandline import add_logging_group
60 from moztest.resolve import TEST_SUITES
62 parser = argparse.ArgumentParser()
63 parser.add_argument(
64 "what",
65 default=None,
66 nargs="+",
67 help=TEST_HELP.format(", ".join(sorted(TEST_SUITES))),
69 parser.add_argument(
70 "extra_args",
71 default=None,
72 nargs=argparse.REMAINDER,
73 help="Extra arguments to pass to the underlying test command(s). "
74 "If an underlying command doesn't recognize the argument, it "
75 "will fail.",
77 parser.add_argument(
78 "--debugger",
79 default=None,
80 action="store",
81 nargs="?",
82 help="Specify a debugger to use.",
84 add_logging_group(parser)
85 return parser
88 ADD_TEST_SUPPORTED_SUITES = [
89 "mochitest-chrome",
90 "mochitest-plain",
91 "mochitest-browser-chrome",
92 "web-platform-tests-testharness",
93 "web-platform-tests-reftest",
94 "xpcshell",
96 ADD_TEST_SUPPORTED_DOCS = ["js", "html", "xhtml", "xul"]
98 SUITE_SYNONYMS = {
99 "wpt": "web-platform-tests-testharness",
100 "wpt-testharness": "web-platform-tests-testharness",
101 "wpt-reftest": "web-platform-tests-reftest",
104 MISSING_ARG = object()
107 def create_parser_addtest():
108 import addtest
110 parser = argparse.ArgumentParser()
111 parser.add_argument(
112 "--suite",
113 choices=sorted(ADD_TEST_SUPPORTED_SUITES + list(SUITE_SYNONYMS.keys())),
114 help="suite for the test. "
115 "If you pass a `test` argument this will be determined "
116 "based on the filename and the folder it is in",
118 parser.add_argument(
119 "-o",
120 "--overwrite",
121 action="store_true",
122 help="Overwrite an existing file if it exists.",
124 parser.add_argument(
125 "--doc",
126 choices=ADD_TEST_SUPPORTED_DOCS,
127 help="Document type for the test (if applicable)."
128 "If you pass a `test` argument this will be determined "
129 "based on the filename.",
131 parser.add_argument(
132 "-e",
133 "--editor",
134 action="store",
135 nargs="?",
136 default=MISSING_ARG,
137 help="Open the created file(s) in an editor; if a "
138 "binary is supplied it will be used otherwise the default editor for "
139 "your environment will be opened",
142 for base_suite in addtest.TEST_CREATORS:
143 cls = addtest.TEST_CREATORS[base_suite]
144 if hasattr(cls, "get_parser"):
145 group = parser.add_argument_group(base_suite)
146 cls.get_parser(group)
148 parser.add_argument("test", nargs="?", help=("Test to create."))
149 return parser
152 @Command(
153 "addtest",
154 category="testing",
155 description="Generate tests based on templates",
156 parser=create_parser_addtest,
158 def addtest(
159 command_context,
160 suite=None,
161 test=None,
162 doc=None,
163 overwrite=False,
164 editor=MISSING_ARG,
165 **kwargs,
167 import io
169 import addtest
170 from moztest.resolve import TEST_SUITES
172 if not suite and not test:
173 return create_parser_addtest().parse_args(["--help"])
175 if suite in SUITE_SYNONYMS:
176 suite = SUITE_SYNONYMS[suite]
178 if test:
179 if not overwrite and os.path.isfile(os.path.abspath(test)):
180 print("Error: can't generate a test that already exists:", test)
181 return 1
183 abs_test = os.path.abspath(test)
184 if doc is None:
185 doc = guess_doc(abs_test)
186 if suite is None:
187 guessed_suite, err = guess_suite(abs_test)
188 if err:
189 print(err)
190 return 1
191 suite = guessed_suite
193 else:
194 test = None
195 if doc is None:
196 doc = "html"
198 if not suite:
199 print(
200 "We couldn't automatically determine a suite. "
201 "Please specify `--suite` with one of the following options:\n{}\n"
202 "If you'd like to add support to a new suite, please file a bug "
203 "blocking https://bugzilla.mozilla.org/show_bug.cgi?id=1540285.".format(
204 ADD_TEST_SUPPORTED_SUITES
207 return 1
209 if doc not in ADD_TEST_SUPPORTED_DOCS:
210 print(
211 "Error: invalid `doc`. Either pass in a test with a valid extension"
212 "({}) or pass in the `doc` argument".format(ADD_TEST_SUPPORTED_DOCS)
214 return 1
216 creator_cls = addtest.creator_for_suite(suite)
218 if creator_cls is None:
219 print("Sorry, `addtest` doesn't currently know how to add {}".format(suite))
220 return 1
222 creator = creator_cls(command_context.topsrcdir, test, suite, doc, **kwargs)
224 creator.check_args()
226 paths = []
227 added_tests = False
228 for path, template in creator:
229 if not template:
230 continue
231 added_tests = True
232 if path:
233 paths.append(path)
234 print("Adding a test file at {} (suite `{}`)".format(path, suite))
236 try:
237 os.makedirs(os.path.dirname(path))
238 except OSError:
239 pass
241 with io.open(path, "w", newline="\n") as f:
242 f.write(template)
243 else:
244 # write to stdout if you passed only suite and doc and not a file path
245 print(template)
247 if not added_tests:
248 return 1
250 if test:
251 creator.update_manifest()
253 # Small hack, should really do this better
254 if suite.startswith("wpt-"):
255 suite = "web-platform-tests"
257 mach_command = TEST_SUITES[suite]["mach_command"]
258 print(
259 "Please make sure to add the new test to your commit. "
260 "You can now run the test with:\n ./mach {} {}".format(
261 mach_command, test
265 if editor is not MISSING_ARG:
266 if editor is not None:
267 editor = editor
268 elif "VISUAL" in os.environ:
269 editor = os.environ["VISUAL"]
270 elif "EDITOR" in os.environ:
271 editor = os.environ["EDITOR"]
272 else:
273 print("Unable to determine editor; please specify a binary")
274 editor = None
276 proc = None
277 if editor:
278 import subprocess
280 proc = subprocess.Popen("%s %s" % (editor, " ".join(paths)), shell=True)
282 if proc:
283 proc.wait()
285 return 0
288 def guess_doc(abs_test):
289 filename = os.path.basename(abs_test)
290 return os.path.splitext(filename)[1].strip(".")
293 def guess_suite(abs_test):
294 # If you pass a abs_test, try to detect the type based on the name
295 # and folder. This detection can be skipped if you pass the `type` arg.
296 err = None
297 guessed_suite = None
298 parent = os.path.dirname(abs_test)
299 filename = os.path.basename(abs_test)
301 has_browser_ini = os.path.isfile(os.path.join(parent, "browser.ini"))
302 has_browser_toml = os.path.isfile(os.path.join(parent, "browser.toml"))
303 has_chrome_ini = os.path.isfile(os.path.join(parent, "chrome.ini"))
304 has_chrome_toml = os.path.isfile(os.path.join(parent, "chrome.toml"))
305 has_plain_ini = os.path.isfile(os.path.join(parent, "mochitest.ini"))
306 has_plain_toml = os.path.isfile(os.path.join(parent, "mochitest.toml"))
307 has_xpcshell_ini = os.path.isfile(os.path.join(parent, "xpcshell.ini"))
308 has_xpcshell_toml = os.path.isfile(os.path.join(parent, "xpcshell.toml"))
310 in_wpt_folder = abs_test.startswith(
311 os.path.abspath(os.path.join("testing", "web-platform"))
314 if in_wpt_folder:
315 guessed_suite = "web-platform-tests-testharness"
316 if "/css/" in abs_test:
317 guessed_suite = "web-platform-tests-reftest"
318 elif (
319 filename.startswith("test_")
320 and (has_xpcshell_ini or has_xpcshell_toml)
321 and guess_doc(abs_test) == "js"
323 guessed_suite = "xpcshell"
324 else:
325 if filename.startswith("browser_") and (has_browser_ini or has_browser_toml):
326 guessed_suite = "mochitest-browser-chrome"
327 elif filename.startswith("test_"):
328 if (has_chrome_ini or has_chrome_toml) and (
329 has_plain_ini or has_plain_toml
331 err = (
332 "Error: directory contains both a chrome.{ini|toml} and mochitest.{ini|toml}. "
333 "Please set --suite=mochitest-chrome or --suite=mochitest-plain."
335 elif has_chrome_ini or has_chrome_toml:
336 guessed_suite = "mochitest-chrome"
337 elif has_plain_ini or has_plain_toml:
338 guessed_suite = "mochitest-plain"
339 return guessed_suite, err
342 class MachTestRunner:
343 """Adapter for mach test to simplify it's import externally."""
345 def test(command_context, what, extra_args, **log_args):
346 return test(command_context, what, extra_args, **log_args)
349 @Command(
350 "test",
351 category="testing",
352 description="Run tests (detects the kind of test and runs it).",
353 parser=get_test_parser,
355 def test(command_context, what, extra_args, **log_args):
356 """Run tests from names or paths.
358 mach test accepts arguments specifying which tests to run. Each argument
359 can be:
361 * The path to a test file
362 * A directory containing tests
363 * A test suite name
364 * An alias to a test suite name (codes used on TreeHerder)
365 * path to a test manifest
367 When paths or directories are given, they are first resolved to test
368 files known to the build system.
370 If resolved tests belong to more than one test type/flavor/harness,
371 the harness for each relevant type/flavor will be invoked. e.g. if
372 you specify a directory with xpcshell and browser chrome mochitests,
373 both harnesses will be invoked.
375 Warning: `mach test` does not automatically re-build.
376 Please remember to run `mach build` when necessary.
378 EXAMPLES
380 Run all test files in the devtools/client/shared/redux/middleware/xpcshell/
381 directory:
383 `./mach test devtools/client/shared/redux/middleware/xpcshell/`
385 The below command prints a short summary of results instead of
386 the default more verbose output.
387 Do not forget the - (minus sign) after --log-grouped!
389 `./mach test --log-grouped - devtools/client/shared/redux/middleware/xpcshell/`
391 To learn more about arguments for each test type/flavor/harness, please run
392 `./mach <test-harness> --help`. For example, `./mach mochitest --help`.
394 from mozlog.commandline import setup_logging
395 from mozlog.handlers import StreamHandler
396 from moztest.resolve import TEST_SUITES, TestResolver, get_suite_definition
398 resolver = command_context._spawn(TestResolver)
399 run_suites, run_tests = resolver.resolve_metadata(what)
401 if not run_suites and not run_tests:
402 print(UNKNOWN_TEST)
403 return 1
405 if log_args.get("debugger", None):
406 import mozdebug
408 if not mozdebug.get_debugger_info(log_args.get("debugger")):
409 sys.exit(1)
410 extra_args_debugger_notation = "=".join(
411 ["--debugger", log_args.get("debugger")]
413 if extra_args:
414 extra_args.append(extra_args_debugger_notation)
415 else:
416 extra_args = [extra_args_debugger_notation]
418 # Create shared logger
419 format_args = {"level": command_context._mach_context.settings["test"]["level"]}
420 if not run_suites and len(run_tests) == 1:
421 format_args["verbose"] = True
422 format_args["compact"] = False
424 default_format = command_context._mach_context.settings["test"]["format"]
425 log = setup_logging(
426 "mach-test", log_args, {default_format: sys.stdout}, format_args
428 for handler in log.handlers:
429 if isinstance(handler, StreamHandler):
430 handler.formatter.inner.summary_on_shutdown = True
432 if log_args.get("custom_handler", None) is not None:
433 log.add_handler(log_args.get("custom_handler"))
435 status = None
436 for suite_name in run_suites:
437 suite = TEST_SUITES[suite_name]
438 kwargs = suite["kwargs"]
439 kwargs["log"] = log
440 kwargs.setdefault("subsuite", None)
442 if "mach_command" in suite:
443 res = command_context._mach_context.commands.dispatch(
444 suite["mach_command"],
445 command_context._mach_context,
446 argv=extra_args,
447 **kwargs,
449 if res:
450 status = res
452 buckets = {}
453 for test in run_tests:
454 key = (test["flavor"], test.get("subsuite", ""))
455 buckets.setdefault(key, []).append(test)
457 for (flavor, subsuite), tests in sorted(buckets.items()):
458 _, m = get_suite_definition(flavor, subsuite)
459 if "mach_command" not in m:
460 substr = "-{}".format(subsuite) if subsuite else ""
461 print(UNKNOWN_FLAVOR % (flavor, substr))
462 status = 1
463 continue
465 kwargs = dict(m["kwargs"])
466 kwargs["log"] = log
467 kwargs.setdefault("subsuite", None)
469 res = command_context._mach_context.commands.dispatch(
470 m["mach_command"],
471 command_context._mach_context,
472 argv=extra_args,
473 test_objects=tests,
474 **kwargs,
476 if res:
477 status = res
479 if not log.has_shutdown:
480 log.shutdown()
481 return status
484 @Command(
485 "cppunittest", category="testing", description="Run cpp unit tests (C++ tests)."
487 @CommandArgument(
488 "test_files",
489 nargs="*",
490 metavar="N",
491 help="Test to run. Can be specified as one or more files or "
492 "directories, or omitted. If omitted, the entire test suite is "
493 "executed.",
495 def run_cppunit_test(command_context, **params):
496 from mozlog import commandline
498 log = params.get("log")
499 if not log:
500 log = commandline.setup_logging("cppunittest", {}, {"tbpl": sys.stdout})
502 # See if we have crash symbols
503 symbols_path = os.path.join(command_context.distdir, "crashreporter-symbols")
504 if not os.path.isdir(symbols_path):
505 symbols_path = None
507 # If no tests specified, run all tests in main manifest
508 tests = params["test_files"]
509 if not tests:
510 tests = [os.path.join(command_context.distdir, "cppunittests")]
511 manifest_path = os.path.join(
512 command_context.topsrcdir, "testing", "cppunittest.ini"
514 else:
515 manifest_path = None
517 utility_path = command_context.bindir
519 if conditions.is_android(command_context):
520 from mozrunner.devices.android_device import (
521 InstallIntent,
522 verify_android_device,
525 verify_android_device(command_context, install=InstallIntent.NO)
526 return run_android_test(tests, symbols_path, manifest_path, log)
528 return run_desktop_test(
529 command_context, tests, symbols_path, manifest_path, utility_path, log
533 def run_desktop_test(
534 command_context, tests, symbols_path, manifest_path, utility_path, log
536 import runcppunittests as cppunittests
537 from mozlog import commandline
539 parser = cppunittests.CPPUnittestOptions()
540 commandline.add_logging_group(parser)
541 options, args = parser.parse_args()
543 options.symbols_path = symbols_path
544 options.manifest_path = manifest_path
545 options.utility_path = utility_path
546 options.xre_path = command_context.bindir
548 try:
549 result = cppunittests.run_test_harness(options, tests)
550 except Exception as e:
551 log.error("Caught exception running cpp unit tests: %s" % str(e))
552 result = False
553 raise
555 return 0 if result else 1
558 def run_android_test(command_context, tests, symbols_path, manifest_path, log):
559 import remotecppunittests
560 from mozlog import commandline
562 parser = remotecppunittests.RemoteCPPUnittestOptions()
563 commandline.add_logging_group(parser)
564 options, args = parser.parse_args()
566 if not options.adb_path:
567 from mozrunner.devices.android_device import get_adb_path
569 options.adb_path = get_adb_path(command_context)
570 options.symbols_path = symbols_path
571 options.manifest_path = manifest_path
572 options.xre_path = command_context.bindir
573 options.local_lib = command_context.bindir.replace("bin", "fennec")
574 for file in os.listdir(os.path.join(command_context.topobjdir, "dist")):
575 if file.endswith(".apk") and file.startswith("fennec"):
576 options.local_apk = os.path.join(command_context.topobjdir, "dist", file)
577 log.info("using APK: " + options.local_apk)
578 break
580 try:
581 result = remotecppunittests.run_test_harness(options, tests)
582 except Exception as e:
583 log.error("Caught exception running cpp unit tests: %s" % str(e))
584 result = False
585 raise
587 return 0 if result else 1
590 def executable_name(name):
591 return name + ".exe" if sys.platform.startswith("win") else name
594 @Command(
595 "jstests",
596 category="testing",
597 description="Run SpiderMonkey JS tests in the JS shell.",
598 ok_if_tests_disabled=True,
600 @CommandArgument("--shell", help="The shell to be used")
601 @CommandArgument(
602 "params",
603 nargs=argparse.REMAINDER,
604 help="Extra arguments to pass down to the test harness.",
606 def run_jstests(command_context, shell, params):
607 import subprocess
609 command_context.virtualenv_manager.ensure()
610 python = command_context.virtualenv_manager.python_path
612 js = shell or os.path.join(command_context.bindir, executable_name("js"))
613 jstest_cmd = [
614 python,
615 os.path.join(command_context.topsrcdir, "js", "src", "tests", "jstests.py"),
617 ] + params
619 return subprocess.call(jstest_cmd)
622 @Command(
623 "jit-test",
624 category="testing",
625 description="Run SpiderMonkey jit-tests in the JS shell.",
626 ok_if_tests_disabled=True,
628 @CommandArgument("--shell", help="The shell to be used")
629 @CommandArgument(
630 "--cgc",
631 action="store_true",
632 default=False,
633 help="Run with the SM(cgc) job's env vars",
635 @CommandArgument(
636 "params",
637 nargs=argparse.REMAINDER,
638 help="Extra arguments to pass down to the test harness.",
640 def run_jittests(command_context, shell, cgc, params):
641 import subprocess
643 command_context.virtualenv_manager.ensure()
644 python = command_context.virtualenv_manager.python_path
646 js = shell or os.path.join(command_context.bindir, executable_name("js"))
647 jittest_cmd = [
648 python,
649 os.path.join(command_context.topsrcdir, "js", "src", "jit-test", "jit_test.py"),
651 ] + params
653 env = os.environ.copy()
654 if cgc:
655 env["JS_GC_ZEAL"] = "IncrementalMultipleSlices"
657 return subprocess.call(jittest_cmd, env=env)
660 @Command("jsapi-tests", category="testing", description="Run SpiderMonkey JSAPI tests.")
661 @CommandArgument(
662 "--list",
663 action="store_true",
664 default=False,
665 help="List all tests",
667 @CommandArgument(
668 "--frontend-only",
669 action="store_true",
670 default=False,
671 help="Run tests for frontend-only APIs, with light-weight entry point",
673 @CommandArgument(
674 "test_name",
675 nargs="?",
676 metavar="N",
677 help="Test to run. Can be a prefix or omitted. If "
678 "omitted, the entire test suite is executed.",
680 def run_jsapitests(command_context, list=False, frontend_only=False, test_name=None):
681 import subprocess
683 jsapi_tests_cmd = [
684 os.path.join(command_context.bindir, executable_name("jsapi-tests"))
686 if list:
687 jsapi_tests_cmd.append("--list")
688 if frontend_only:
689 jsapi_tests_cmd.append("--frontend-only")
690 if test_name:
691 jsapi_tests_cmd.append(test_name)
693 test_env = os.environ.copy()
694 test_env["TOPSRCDIR"] = command_context.topsrcdir
696 result = subprocess.call(jsapi_tests_cmd, env=test_env)
697 if result != 0:
698 print(f"jsapi-tests failed, exit code {result}")
699 return result
702 def run_check_js_msg(command_context):
703 import subprocess
705 command_context.virtualenv_manager.ensure()
706 python = command_context.virtualenv_manager.python_path
708 check_cmd = [
709 python,
710 os.path.join(command_context.topsrcdir, "config", "check_js_msg_encoding.py"),
713 return subprocess.call(check_cmd)
716 def get_jsshell_parser():
717 from jsshell.benchmark import get_parser
719 return get_parser()
722 @Command(
723 "jsshell-bench",
724 category="testing",
725 parser=get_jsshell_parser,
726 description="Run benchmarks in the SpiderMonkey JS shell.",
728 def run_jsshelltests(command_context, **kwargs):
729 from jsshell import benchmark
731 return benchmark.run(**kwargs)
734 @Command(
735 "cramtest",
736 category="testing",
737 description="Mercurial style .t tests for command line applications.",
739 @CommandArgument(
740 "test_paths",
741 nargs="*",
742 metavar="N",
743 help="Test paths to run. Each path can be a test file or directory. "
744 "If omitted, the entire suite will be run.",
746 @CommandArgument(
747 "cram_args",
748 nargs=argparse.REMAINDER,
749 help="Extra arguments to pass down to the cram binary. See "
750 "'./mach python -m cram -- -h' for a list of available options.",
752 def cramtest(command_context, cram_args=None, test_paths=None, test_objects=None):
753 command_context.activate_virtualenv()
754 import mozinfo
755 from manifestparser import TestManifest
757 if test_objects is None:
758 from moztest.resolve import TestResolver
760 resolver = command_context._spawn(TestResolver)
761 if test_paths:
762 # If we were given test paths, try to find tests matching them.
763 test_objects = resolver.resolve_tests(paths=test_paths, flavor="cram")
764 else:
765 # Otherwise just run everything in CRAMTEST_MANIFESTS
766 test_objects = resolver.resolve_tests(flavor="cram")
768 if not test_objects:
769 message = "No tests were collected, check spelling of the test paths."
770 command_context.log(logging.WARN, "cramtest", {}, message)
771 return 1
773 mp = TestManifest()
774 mp.tests.extend(test_objects)
775 tests = mp.active_tests(disabled=False, **mozinfo.info)
777 python = command_context.virtualenv_manager.python_path
778 cmd = [python, "-m", "cram"] + cram_args + [t["relpath"] for t in tests]
779 return subprocess.call(cmd, cwd=command_context.topsrcdir)
782 from datetime import date, timedelta
785 @Command(
786 "test-info", category="testing", description="Display historical test results."
788 def test_info(command_context):
790 All functions implemented as subcommands.
794 @SubCommand(
795 "test-info",
796 "tests",
797 description="Display historical test result summary for named tests.",
799 @CommandArgument("test_names", nargs=argparse.REMAINDER, help="Test(s) of interest.")
800 @CommandArgument(
801 "--start",
802 default=(date.today() - timedelta(7)).strftime("%Y-%m-%d"),
803 help="Start date (YYYY-MM-DD)",
805 @CommandArgument(
806 "--end", default=date.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
808 @CommandArgument(
809 "--show-info",
810 action="store_true",
811 help="Retrieve and display general test information.",
813 @CommandArgument(
814 "--show-bugs",
815 action="store_true",
816 help="Retrieve and display related Bugzilla bugs.",
818 @CommandArgument("--verbose", action="store_true", help="Enable debug logging.")
819 def test_info_tests(
820 command_context,
821 test_names,
822 start,
823 end,
824 show_info,
825 show_bugs,
826 verbose,
828 import testinfo
830 ti = testinfo.TestInfoTests(verbose)
831 ti.report(
832 test_names,
833 start,
834 end,
835 show_info,
836 show_bugs,
840 @SubCommand(
841 "test-info",
842 "report",
843 description="Generate a json report of test manifests and/or tests "
844 "categorized by Bugzilla component and optionally filtered "
845 "by path, component, and/or manifest annotations.",
847 @CommandArgument(
848 "--components",
849 default=None,
850 help="Comma-separated list of Bugzilla components."
851 " eg. Testing::General,Core::WebVR",
853 @CommandArgument(
854 "--flavor",
855 help='Limit results to tests of the specified flavor (eg. "xpcshell").',
857 @CommandArgument(
858 "--subsuite",
859 help='Limit results to tests of the specified subsuite (eg. "devtools").',
861 @CommandArgument(
862 "paths", nargs=argparse.REMAINDER, help="File system paths of interest."
864 @CommandArgument(
865 "--show-manifests",
866 action="store_true",
867 help="Include test manifests in report.",
869 @CommandArgument(
870 "--show-tests", action="store_true", help="Include individual tests in report."
872 @CommandArgument(
873 "--show-summary", action="store_true", help="Include summary in report."
875 @CommandArgument(
876 "--show-annotations",
877 action="store_true",
878 help="Include list of manifest annotation conditions in report.",
880 @CommandArgument(
881 "--show-testruns",
882 action="store_true",
883 help="Include total number of runs the test has if there are failures.",
885 @CommandArgument(
886 "--filter-values",
887 help="Comma-separated list of value regular expressions to filter on; "
888 "displayed tests contain all specified values.",
890 @CommandArgument(
891 "--filter-keys",
892 help="Comma-separated list of test keys to filter on, "
893 'like "skip-if"; only these fields will be searched '
894 "for filter-values.",
896 @CommandArgument(
897 "--no-component-report",
898 action="store_false",
899 dest="show_components",
900 default=True,
901 help="Do not categorize by bugzilla component.",
903 @CommandArgument("--output-file", help="Path to report file.")
904 @CommandArgument("--verbose", action="store_true", help="Enable debug logging.")
905 @CommandArgument(
906 "--start",
907 default=(date.today() - timedelta(30)).strftime("%Y-%m-%d"),
908 help="Start date (YYYY-MM-DD)",
910 @CommandArgument(
911 "--end", default=date.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
913 def test_report(
914 command_context,
915 components,
916 flavor,
917 subsuite,
918 paths,
919 show_manifests,
920 show_tests,
921 show_summary,
922 show_annotations,
923 filter_values,
924 filter_keys,
925 show_components,
926 output_file,
927 verbose,
928 start,
929 end,
930 show_testruns,
932 import testinfo
933 from mozbuild import build_commands
935 try:
936 command_context.config_environment
937 except BuildEnvironmentNotFoundException:
938 print("Looks like configure has not run yet, running it now...")
939 build_commands.configure(command_context)
941 ti = testinfo.TestInfoReport(verbose)
942 ti.report(
943 components,
944 flavor,
945 subsuite,
946 paths,
947 show_manifests,
948 show_tests,
949 show_summary,
950 show_annotations,
951 filter_values,
952 filter_keys,
953 show_components,
954 output_file,
955 start,
956 end,
957 show_testruns,
961 @SubCommand(
962 "test-info",
963 "report-diff",
964 description='Compare two reports generated by "test-info reports".',
966 @CommandArgument(
967 "--before",
968 default=None,
969 help="The first (earlier) report file; path to local file or url.",
971 @CommandArgument(
972 "--after", help="The second (later) report file; path to local file or url."
974 @CommandArgument(
975 "--output-file",
976 help="Path to report file to be written. If not specified, report"
977 "will be written to standard output.",
979 @CommandArgument("--verbose", action="store_true", help="Enable debug logging.")
980 def test_report_diff(command_context, before, after, output_file, verbose):
981 import testinfo
983 ti = testinfo.TestInfoReport(verbose)
984 ti.report_diff(before, after, output_file)
987 @SubCommand(
988 "test-info",
989 "testrun-report",
990 description="Generate report of number of runs for each test group (manifest)",
992 @CommandArgument("--output-file", help="Path to report file.")
993 def test_info_testrun_report(command_context, output_file):
994 import json
996 import testinfo
998 ti = testinfo.TestInfoReport(verbose=True)
999 if os.environ.get("GECKO_HEAD_REPOSITORY", "") in [
1000 "https://hg.mozilla.org/mozilla-central",
1001 "https://hg.mozilla.org/try",
1003 runcounts = ti.get_runcounts()
1004 if output_file:
1005 output_file = os.path.abspath(output_file)
1006 output_dir = os.path.dirname(output_file)
1007 if not os.path.isdir(output_dir):
1008 os.makedirs(output_dir)
1009 with open(output_file, "w") as f:
1010 json.dump(runcounts, f)
1011 else:
1012 print(runcounts)
1015 @SubCommand(
1016 "test-info",
1017 "failure-report",
1018 description="Display failure line groupings and frequencies for "
1019 "single tracking intermittent bugs.",
1021 @CommandArgument(
1022 "--start",
1023 default=(date.today() - timedelta(30)).strftime("%Y-%m-%d"),
1024 help="Start date (YYYY-MM-DD)",
1026 @CommandArgument(
1027 "--end", default=date.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
1029 @CommandArgument(
1030 "--bugid",
1031 default=None,
1032 help="bugid for treeherder intermittent failures data query.",
1034 def test_info_failures(
1035 command_context,
1036 start,
1037 end,
1038 bugid,
1040 # bugid comes in as a string, we need an int:
1041 try:
1042 bugid = int(bugid)
1043 except ValueError:
1044 bugid = None
1045 if not bugid:
1046 print("Please enter a valid bugid (i.e. '1760132')")
1047 return
1049 # get bug info
1050 url = (
1051 "https://bugzilla.mozilla.org/rest/bug?include_fields=summary,depends_on&id=%s"
1052 % bugid
1054 r = requests.get(url, headers={"User-agent": "mach-test-info/1.0"})
1055 if r.status_code != 200:
1056 print("%s error retrieving url: %s" % (r.status_code, url))
1058 data = r.json()
1059 if not data:
1060 print("unable to get bugzilla information for %s" % bugid)
1061 return
1063 summary = data["bugs"][0]["summary"]
1064 parts = summary.split("|")
1065 if not summary.endswith("single tracking bug") or len(parts) != 2:
1066 print("this query only works with single tracking bugs")
1067 return
1069 # get depends_on bugs:
1070 buglist = [bugid]
1071 if "depends_on" in data["bugs"][0]:
1072 buglist.extend(data["bugs"][0]["depends_on"])
1074 testname = parts[0].strip().split(" ")[-1]
1076 # now query treeherder to get details about annotations
1077 data = []
1078 for b in buglist:
1079 url = "https://treeherder.mozilla.org/api/failuresbybug/"
1080 url += "?startday=%s&endday=%s&tree=trunk&bug=%s" % (start, end, b)
1081 r = requests.get(url, headers={"User-agent": "mach-test-info/1.0"})
1082 r.raise_for_status()
1084 bdata = r.json()
1085 data.extend(bdata)
1087 if len(data) == 0:
1088 print("no failures were found for given bugid, please ensure bug is")
1089 print("accessible via: https://treeherder.mozilla.org/intermittent-failures")
1090 return
1092 # query VCS to get current list of variants:
1093 import yaml
1095 url = "https://hg.mozilla.org/mozilla-central/raw-file/tip/taskcluster/ci/test/variants.yml"
1096 r = requests.get(url, headers={"User-agent": "mach-test-info/1.0"})
1097 variants = yaml.safe_load(r.text)
1099 print(
1100 "\nQuerying data for bug %s annotated from %s to %s on trunk.\n\n"
1101 % (buglist, start, end)
1103 jobs = {}
1104 lines = {}
1105 for failure in data:
1106 # config = platform/buildtype
1107 # testsuite (<suite>[-variant][-<chunk>])
1108 # lines - group by patterns that contain test name
1109 config = "%s/%s" % (failure["platform"], failure["build_type"])
1111 variant = ""
1112 suite = ""
1113 varpos = len(failure["test_suite"])
1114 for v in variants.keys():
1115 var = "-%s" % variants[v]["suffix"]
1116 if var in failure["test_suite"]:
1117 if failure["test_suite"].find(var) < varpos:
1118 variant = var
1120 if variant:
1121 suite = failure["test_suite"].split(variant)[0]
1123 parts = failure["test_suite"].split("-")
1124 try:
1125 int(parts[-1])
1126 suite = "-".join(parts[:-1])
1127 except ValueError:
1128 pass # if this works, then the last '-X' is a number :)
1130 if suite == "":
1131 print("Error: failure to find variant in %s" % failure["test_suite"])
1133 job = "%s-%s%s" % (config, suite, variant)
1134 if job not in jobs.keys():
1135 jobs[job] = 0
1136 jobs[job] += 1
1138 # lines - sum(hash) of all lines where we match testname
1139 hvalue = 0
1140 for line in failure["lines"]:
1141 if len(line.split(testname)) <= 1:
1142 continue
1143 # strip off timestamp and mozharness status
1144 parts = line.split("TEST-UNEXPECTED")
1145 l = "TEST-UNEXPECTED%s" % parts[-1]
1147 # only keep 25 characters of the failure, often longer is random numbers
1148 parts = l.split(testname)
1149 l = "%s%s%s" % (parts[0], testname, parts[1][:25])
1151 hvalue += hash(l)
1153 if not failure["lines"]:
1154 hvalue = 1
1156 if not hvalue:
1157 continue
1159 if hvalue not in lines.keys():
1160 lines[hvalue] = {"lines": failure["lines"], "config": []}
1161 lines[hvalue]["config"].append(job)
1163 for h in lines.keys():
1164 print("%s errors with:" % (len(lines[h]["config"])))
1165 for l in lines[h]["lines"]:
1166 print(l)
1167 else:
1168 print(
1169 "... no failure lines recorded in"
1170 " https://treeherder.mozilla.org/intermittent-failures ..."
1173 for job in jobs:
1174 count = len([x for x in lines[h]["config"] if x == job])
1175 if count > 0:
1176 print(" %s: %s" % (job, count))
1177 print("")
1180 @Command(
1181 "rusttests",
1182 category="testing",
1183 conditions=[conditions.is_non_artifact_build],
1184 description="Run rust unit tests (via cargo test).",
1186 def run_rusttests(command_context, **kwargs):
1187 return command_context._mach_context.commands.dispatch(
1188 "build",
1189 command_context._mach_context,
1190 what=["pre-export", "export", "recurse_rusttests"],
1194 @Command(
1195 "fluent-migration-test",
1196 category="testing",
1197 description="Test Fluent migration recipes.",
1199 @CommandArgument("test_paths", nargs="*", metavar="N", help="Recipe paths to test.")
1200 def run_migration_tests(command_context, test_paths=None, **kwargs):
1201 if not test_paths:
1202 test_paths = []
1203 command_context.activate_virtualenv()
1204 from test_fluent_migrations import fmt
1206 rv = 0
1207 with_context = []
1208 for to_test in test_paths:
1209 try:
1210 context = fmt.inspect_migration(to_test)
1211 for issue in context["issues"]:
1212 command_context.log(
1213 logging.ERROR,
1214 "fluent-migration-test",
1216 "error": issue["msg"],
1217 "file": to_test,
1219 "ERROR in {file}: {error}",
1221 if context["issues"]:
1222 continue
1223 with_context.append(
1225 "to_test": to_test,
1226 "references": context["references"],
1229 except Exception as e:
1230 command_context.log(
1231 logging.ERROR,
1232 "fluent-migration-test",
1233 {"error": str(e), "file": to_test},
1234 "ERROR in {file}: {error}",
1236 rv |= 1
1237 obj_dir = fmt.prepare_object_dir(command_context)
1238 for context in with_context:
1239 rv |= fmt.test_migration(command_context, obj_dir, **context)
1240 return rv