Bug 1787947 - pref on CSS named pages in Nightly r=dholbert
[gecko.git] / testing / mach_commands.py
blobfeb7a4440771133e2e9da74d2a1ca92f752dfc37
1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
5 from __future__ import absolute_import, print_function, unicode_literals
7 import argparse
8 import logging
9 import os
10 import sys
11 import subprocess
13 from mach.decorators import (
14 CommandArgument,
15 Command,
16 SettingsProvider,
17 SubCommand,
20 from mozbuild.base import (
21 BuildEnvironmentNotFoundException,
22 MachCommandConditions as conditions,
25 UNKNOWN_TEST = """
26 I was unable to find tests from the given argument(s).
28 You should specify a test directory, filename, test suite name, or
29 abbreviation.
31 It's possible my little brain doesn't know about the type of test you are
32 trying to execute. If you suspect this, please request support by filing
33 a bug at
34 https://bugzilla.mozilla.org/enter_bug.cgi?product=Testing&component=General.
35 """.strip()
37 UNKNOWN_FLAVOR = """
38 I know you are trying to run a %s%s test. Unfortunately, I can't run those
39 tests yet. Sorry!
40 """.strip()
42 TEST_HELP = """
43 Test or tests to run. Tests can be specified by filename, directory, suite
44 name or suite alias.
46 The following test suites and aliases are supported: {}
47 """.strip()
50 @SettingsProvider
51 class TestConfig(object):
52 @classmethod
53 def config_settings(cls):
54 from mozlog.commandline import log_formatters
55 from mozlog.structuredlog import log_levels
57 format_desc = "The default format to use when running tests with `mach test`."
58 format_choices = list(log_formatters)
59 level_desc = "The default log level to use when running tests with `mach test`."
60 level_choices = [l.lower() for l in log_levels]
61 return [
62 ("test.format", "string", format_desc, "mach", {"choices": format_choices}),
63 ("test.level", "string", level_desc, "info", {"choices": level_choices}),
67 def get_test_parser():
68 from mozlog.commandline import add_logging_group
69 from moztest.resolve import TEST_SUITES
71 parser = argparse.ArgumentParser()
72 parser.add_argument(
73 "what",
74 default=None,
75 nargs="+",
76 help=TEST_HELP.format(", ".join(sorted(TEST_SUITES))),
78 parser.add_argument(
79 "extra_args",
80 default=None,
81 nargs=argparse.REMAINDER,
82 help="Extra arguments to pass to the underlying test command(s). "
83 "If an underlying command doesn't recognize the argument, it "
84 "will fail.",
86 parser.add_argument(
87 "--debugger",
88 default=None,
89 action="store",
90 nargs="?",
91 help="Specify a debugger to use.",
93 add_logging_group(parser)
94 return parser
97 ADD_TEST_SUPPORTED_SUITES = [
98 "mochitest-chrome",
99 "mochitest-plain",
100 "mochitest-browser-chrome",
101 "web-platform-tests-testharness",
102 "web-platform-tests-reftest",
103 "xpcshell",
105 ADD_TEST_SUPPORTED_DOCS = ["js", "html", "xhtml", "xul"]
107 SUITE_SYNONYMS = {
108 "wpt": "web-platform-tests-testharness",
109 "wpt-testharness": "web-platform-tests-testharness",
110 "wpt-reftest": "web-platform-tests-reftest",
113 MISSING_ARG = object()
116 def create_parser_addtest():
117 import addtest
119 parser = argparse.ArgumentParser()
120 parser.add_argument(
121 "--suite",
122 choices=sorted(ADD_TEST_SUPPORTED_SUITES + list(SUITE_SYNONYMS.keys())),
123 help="suite for the test. "
124 "If you pass a `test` argument this will be determined "
125 "based on the filename and the folder it is in",
127 parser.add_argument(
128 "-o",
129 "--overwrite",
130 action="store_true",
131 help="Overwrite an existing file if it exists.",
133 parser.add_argument(
134 "--doc",
135 choices=ADD_TEST_SUPPORTED_DOCS,
136 help="Document type for the test (if applicable)."
137 "If you pass a `test` argument this will be determined "
138 "based on the filename.",
140 parser.add_argument(
141 "-e",
142 "--editor",
143 action="store",
144 nargs="?",
145 default=MISSING_ARG,
146 help="Open the created file(s) in an editor; if a "
147 "binary is supplied it will be used otherwise the default editor for "
148 "your environment will be opened",
151 for base_suite in addtest.TEST_CREATORS:
152 cls = addtest.TEST_CREATORS[base_suite]
153 if hasattr(cls, "get_parser"):
154 group = parser.add_argument_group(base_suite)
155 cls.get_parser(group)
157 parser.add_argument("test", nargs="?", help=("Test to create."))
158 return parser
161 @Command(
162 "addtest",
163 category="testing",
164 description="Generate tests based on templates",
165 parser=create_parser_addtest,
167 def addtest(
168 command_context,
169 suite=None,
170 test=None,
171 doc=None,
172 overwrite=False,
173 editor=MISSING_ARG,
174 **kwargs,
176 import addtest
177 import io
178 from moztest.resolve import TEST_SUITES
180 if not suite and not test:
181 return create_parser_addtest().parse_args(["--help"])
183 if suite in SUITE_SYNONYMS:
184 suite = SUITE_SYNONYMS[suite]
186 if test:
187 if not overwrite and os.path.isfile(os.path.abspath(test)):
188 print("Error: can't generate a test that already exists:", test)
189 return 1
191 abs_test = os.path.abspath(test)
192 if doc is None:
193 doc = guess_doc(abs_test)
194 if suite is None:
195 guessed_suite, err = guess_suite(abs_test)
196 if err:
197 print(err)
198 return 1
199 suite = guessed_suite
201 else:
202 test = None
203 if doc is None:
204 doc = "html"
206 if not suite:
207 print(
208 "We couldn't automatically determine a suite. "
209 "Please specify `--suite` with one of the following options:\n{}\n"
210 "If you'd like to add support to a new suite, please file a bug "
211 "blocking https://bugzilla.mozilla.org/show_bug.cgi?id=1540285.".format(
212 ADD_TEST_SUPPORTED_SUITES
215 return 1
217 if doc not in ADD_TEST_SUPPORTED_DOCS:
218 print(
219 "Error: invalid `doc`. Either pass in a test with a valid extension"
220 "({}) or pass in the `doc` argument".format(ADD_TEST_SUPPORTED_DOCS)
222 return 1
224 creator_cls = addtest.creator_for_suite(suite)
226 if creator_cls is None:
227 print("Sorry, `addtest` doesn't currently know how to add {}".format(suite))
228 return 1
230 creator = creator_cls(command_context.topsrcdir, test, suite, doc, **kwargs)
232 creator.check_args()
234 paths = []
235 added_tests = False
236 for path, template in creator:
237 if not template:
238 continue
239 added_tests = True
240 if path:
241 paths.append(path)
242 print("Adding a test file at {} (suite `{}`)".format(path, suite))
244 try:
245 os.makedirs(os.path.dirname(path))
246 except OSError:
247 pass
249 with io.open(path, "w", newline="\n") as f:
250 f.write(template)
251 else:
252 # write to stdout if you passed only suite and doc and not a file path
253 print(template)
255 if not added_tests:
256 return 1
258 if test:
259 creator.update_manifest()
261 # Small hack, should really do this better
262 if suite.startswith("wpt-"):
263 suite = "web-platform-tests"
265 mach_command = TEST_SUITES[suite]["mach_command"]
266 print(
267 "Please make sure to add the new test to your commit. "
268 "You can now run the test with:\n ./mach {} {}".format(
269 mach_command, test
273 if editor is not MISSING_ARG:
274 if editor is not None:
275 editor = editor
276 elif "VISUAL" in os.environ:
277 editor = os.environ["VISUAL"]
278 elif "EDITOR" in os.environ:
279 editor = os.environ["EDITOR"]
280 else:
281 print("Unable to determine editor; please specify a binary")
282 editor = None
284 proc = None
285 if editor:
286 import subprocess
288 proc = subprocess.Popen("%s %s" % (editor, " ".join(paths)), shell=True)
290 if proc:
291 proc.wait()
293 return 0
296 def guess_doc(abs_test):
297 filename = os.path.basename(abs_test)
298 return os.path.splitext(filename)[1].strip(".")
301 def guess_suite(abs_test):
302 # If you pass a abs_test, try to detect the type based on the name
303 # and folder. This detection can be skipped if you pass the `type` arg.
304 err = None
305 guessed_suite = None
306 parent = os.path.dirname(abs_test)
307 filename = os.path.basename(abs_test)
309 has_browser_ini = os.path.isfile(os.path.join(parent, "browser.ini"))
310 has_chrome_ini = os.path.isfile(os.path.join(parent, "chrome.ini"))
311 has_plain_ini = os.path.isfile(os.path.join(parent, "mochitest.ini"))
312 has_xpcshell_ini = os.path.isfile(os.path.join(parent, "xpcshell.ini"))
314 in_wpt_folder = abs_test.startswith(
315 os.path.abspath(os.path.join("testing", "web-platform"))
318 if in_wpt_folder:
319 guessed_suite = "web-platform-tests-testharness"
320 if "/css/" in abs_test:
321 guessed_suite = "web-platform-tests-reftest"
322 elif (
323 filename.startswith("test_")
324 and has_xpcshell_ini
325 and guess_doc(abs_test) == "js"
327 guessed_suite = "xpcshell"
328 else:
329 if filename.startswith("browser_") and has_browser_ini:
330 guessed_suite = "mochitest-browser-chrome"
331 elif filename.startswith("test_"):
332 if has_chrome_ini and has_plain_ini:
333 err = (
334 "Error: directory contains both a chrome.ini and mochitest.ini. "
335 "Please set --suite=mochitest-chrome or --suite=mochitest-plain."
337 elif has_chrome_ini:
338 guessed_suite = "mochitest-chrome"
339 elif has_plain_ini:
340 guessed_suite = "mochitest-plain"
341 return guessed_suite, err
344 @Command(
345 "test",
346 category="testing",
347 description="Run tests (detects the kind of test and runs it).",
348 parser=get_test_parser,
350 def test(command_context, what, extra_args, **log_args):
351 """Run tests from names or paths.
353 mach test accepts arguments specifying which tests to run. Each argument
354 can be:
356 * The path to a test file
357 * A directory containing tests
358 * A test suite name
359 * An alias to a test suite name (codes used on TreeHerder)
361 When paths or directories are given, they are first resolved to test
362 files known to the build system.
364 If resolved tests belong to more than one test type/flavor/harness,
365 the harness for each relevant type/flavor will be invoked. e.g. if
366 you specify a directory with xpcshell and browser chrome mochitests,
367 both harnesses will be invoked.
369 Warning: `mach test` does not automatically re-build.
370 Please remember to run `mach build` when necessary.
372 EXAMPLES
374 Run all test files in the devtools/client/shared/redux/middleware/xpcshell/
375 directory:
377 `./mach test devtools/client/shared/redux/middleware/xpcshell/`
379 The below command prints a short summary of results instead of
380 the default more verbose output.
381 Do not forget the - (minus sign) after --log-grouped!
383 `./mach test --log-grouped - devtools/client/shared/redux/middleware/xpcshell/`
385 To learn more about arguments for each test type/flavor/harness, please run
386 `./mach <test-harness> --help`. For example, `./mach mochitest --help`.
388 from mozlog.commandline import setup_logging
389 from mozlog.handlers import StreamHandler
390 from moztest.resolve import get_suite_definition, TestResolver, TEST_SUITES
392 resolver = command_context._spawn(TestResolver)
393 run_suites, run_tests = resolver.resolve_metadata(what)
395 if not run_suites and not run_tests:
396 print(UNKNOWN_TEST)
397 return 1
399 if log_args.get("debugger", None):
400 import mozdebug
402 if not mozdebug.get_debugger_info(log_args.get("debugger")):
403 sys.exit(1)
404 extra_args_debugger_notation = "=".join(
405 ["--debugger", log_args.get("debugger")]
407 if extra_args:
408 extra_args.append(extra_args_debugger_notation)
409 else:
410 extra_args = [extra_args_debugger_notation]
412 # Create shared logger
413 format_args = {"level": command_context._mach_context.settings["test"]["level"]}
414 if not run_suites and len(run_tests) == 1:
415 format_args["verbose"] = True
416 format_args["compact"] = False
418 default_format = command_context._mach_context.settings["test"]["format"]
419 log = setup_logging(
420 "mach-test", log_args, {default_format: sys.stdout}, format_args
422 for handler in log.handlers:
423 if isinstance(handler, StreamHandler):
424 handler.formatter.inner.summary_on_shutdown = True
426 status = None
427 for suite_name in run_suites:
428 suite = TEST_SUITES[suite_name]
429 kwargs = suite["kwargs"]
430 kwargs["log"] = log
431 kwargs.setdefault("subsuite", None)
433 if "mach_command" in suite:
434 res = command_context._mach_context.commands.dispatch(
435 suite["mach_command"],
436 command_context._mach_context,
437 argv=extra_args,
438 **kwargs,
440 if res:
441 status = res
443 buckets = {}
444 for test in run_tests:
445 key = (test["flavor"], test.get("subsuite", ""))
446 buckets.setdefault(key, []).append(test)
448 for (flavor, subsuite), tests in sorted(buckets.items()):
449 _, m = get_suite_definition(flavor, subsuite)
450 if "mach_command" not in m:
451 substr = "-{}".format(subsuite) if subsuite else ""
452 print(UNKNOWN_FLAVOR % (flavor, substr))
453 status = 1
454 continue
456 kwargs = dict(m["kwargs"])
457 kwargs["log"] = log
458 kwargs.setdefault("subsuite", None)
460 res = command_context._mach_context.commands.dispatch(
461 m["mach_command"],
462 command_context._mach_context,
463 argv=extra_args,
464 test_objects=tests,
465 **kwargs,
467 if res:
468 status = res
470 log.shutdown()
471 return status
474 @Command(
475 "cppunittest", category="testing", description="Run cpp unit tests (C++ tests)."
477 @CommandArgument(
478 "test_files",
479 nargs="*",
480 metavar="N",
481 help="Test to run. Can be specified as one or more files or "
482 "directories, or omitted. If omitted, the entire test suite is "
483 "executed.",
485 def run_cppunit_test(command_context, **params):
486 from mozlog import commandline
488 log = params.get("log")
489 if not log:
490 log = commandline.setup_logging("cppunittest", {}, {"tbpl": sys.stdout})
492 # See if we have crash symbols
493 symbols_path = os.path.join(command_context.distdir, "crashreporter-symbols")
494 if not os.path.isdir(symbols_path):
495 symbols_path = None
497 # If no tests specified, run all tests in main manifest
498 tests = params["test_files"]
499 if not tests:
500 tests = [os.path.join(command_context.distdir, "cppunittests")]
501 manifest_path = os.path.join(
502 command_context.topsrcdir, "testing", "cppunittest.ini"
504 else:
505 manifest_path = None
507 utility_path = command_context.bindir
509 if conditions.is_android(command_context):
510 from mozrunner.devices.android_device import (
511 verify_android_device,
512 InstallIntent,
515 verify_android_device(command_context, install=InstallIntent.NO)
516 return run_android_test(tests, symbols_path, manifest_path, log)
518 return run_desktop_test(
519 command_context, tests, symbols_path, manifest_path, utility_path, log
523 def run_desktop_test(
524 command_context, tests, symbols_path, manifest_path, utility_path, log
526 import runcppunittests as cppunittests
527 from mozlog import commandline
529 parser = cppunittests.CPPUnittestOptions()
530 commandline.add_logging_group(parser)
531 options, args = parser.parse_args()
533 options.symbols_path = symbols_path
534 options.manifest_path = manifest_path
535 options.utility_path = utility_path
536 options.xre_path = command_context.bindir
538 try:
539 result = cppunittests.run_test_harness(options, tests)
540 except Exception as e:
541 log.error("Caught exception running cpp unit tests: %s" % str(e))
542 result = False
543 raise
545 return 0 if result else 1
548 def run_android_test(command_context, tests, symbols_path, manifest_path, log):
549 import remotecppunittests as remotecppunittests
550 from mozlog import commandline
552 parser = remotecppunittests.RemoteCPPUnittestOptions()
553 commandline.add_logging_group(parser)
554 options, args = parser.parse_args()
556 if not options.adb_path:
557 from mozrunner.devices.android_device import get_adb_path
559 options.adb_path = get_adb_path(command_context)
560 options.symbols_path = symbols_path
561 options.manifest_path = manifest_path
562 options.xre_path = command_context.bindir
563 options.local_lib = command_context.bindir.replace("bin", "fennec")
564 for file in os.listdir(os.path.join(command_context.topobjdir, "dist")):
565 if file.endswith(".apk") and file.startswith("fennec"):
566 options.local_apk = os.path.join(command_context.topobjdir, "dist", file)
567 log.info("using APK: " + options.local_apk)
568 break
570 try:
571 result = remotecppunittests.run_test_harness(options, tests)
572 except Exception as e:
573 log.error("Caught exception running cpp unit tests: %s" % str(e))
574 result = False
575 raise
577 return 0 if result else 1
580 def executable_name(name):
581 return name + ".exe" if sys.platform.startswith("win") else name
584 @Command(
585 "jstests",
586 category="testing",
587 description="Run SpiderMonkey JS tests in the JS shell.",
588 ok_if_tests_disabled=True,
590 @CommandArgument("--shell", help="The shell to be used")
591 @CommandArgument(
592 "params",
593 nargs=argparse.REMAINDER,
594 help="Extra arguments to pass down to the test harness.",
596 def run_jstests(command_context, shell, params):
597 import subprocess
599 command_context.virtualenv_manager.ensure()
600 python = command_context.virtualenv_manager.python_path
602 js = shell or os.path.join(command_context.bindir, executable_name("js"))
603 jstest_cmd = [
604 python,
605 os.path.join(command_context.topsrcdir, "js", "src", "tests", "jstests.py"),
607 ] + params
609 return subprocess.call(jstest_cmd)
612 @Command(
613 "jit-test",
614 category="testing",
615 description="Run SpiderMonkey jit-tests in the JS shell.",
616 ok_if_tests_disabled=True,
618 @CommandArgument("--shell", help="The shell to be used")
619 @CommandArgument(
620 "--cgc",
621 action="store_true",
622 default=False,
623 help="Run with the SM(cgc) job's env vars",
625 @CommandArgument(
626 "params",
627 nargs=argparse.REMAINDER,
628 help="Extra arguments to pass down to the test harness.",
630 def run_jittests(command_context, shell, cgc, params):
631 import subprocess
633 command_context.virtualenv_manager.ensure()
634 python = command_context.virtualenv_manager.python_path
636 js = shell or os.path.join(command_context.bindir, executable_name("js"))
637 jittest_cmd = [
638 python,
639 os.path.join(command_context.topsrcdir, "js", "src", "jit-test", "jit_test.py"),
641 ] + params
643 env = os.environ.copy()
644 if cgc:
645 env["JS_GC_ZEAL"] = "IncrementalMultipleSlices"
647 return subprocess.call(jittest_cmd, env=env)
650 @Command("jsapi-tests", category="testing", description="Run SpiderMonkey JSAPI tests.")
651 @CommandArgument(
652 "test_name",
653 nargs="?",
654 metavar="N",
655 help="Test to run. Can be a prefix or omitted. If "
656 "omitted, the entire test suite is executed.",
658 def run_jsapitests(command_context, test_name=None):
659 import subprocess
661 jsapi_tests_cmd = [
662 os.path.join(command_context.bindir, executable_name("jsapi-tests"))
664 if test_name:
665 jsapi_tests_cmd.append(test_name)
667 test_env = os.environ.copy()
668 test_env["TOPSRCDIR"] = command_context.topsrcdir
670 result = subprocess.call(jsapi_tests_cmd, env=test_env)
671 if result != 0:
672 print(f"jsapi-tests failed, exit code {result}")
673 return result
676 def run_check_js_msg(command_context):
677 import subprocess
679 command_context.virtualenv_manager.ensure()
680 python = command_context.virtualenv_manager.python_path
682 check_cmd = [
683 python,
684 os.path.join(command_context.topsrcdir, "config", "check_js_msg_encoding.py"),
687 return subprocess.call(check_cmd)
690 def get_jsshell_parser():
691 from jsshell.benchmark import get_parser
693 return get_parser()
696 @Command(
697 "jsshell-bench",
698 category="testing",
699 parser=get_jsshell_parser,
700 description="Run benchmarks in the SpiderMonkey JS shell.",
702 def run_jsshelltests(command_context, **kwargs):
703 from jsshell import benchmark
705 return benchmark.run(**kwargs)
708 @Command(
709 "cramtest",
710 category="testing",
711 description="Mercurial style .t tests for command line applications.",
713 @CommandArgument(
714 "test_paths",
715 nargs="*",
716 metavar="N",
717 help="Test paths to run. Each path can be a test file or directory. "
718 "If omitted, the entire suite will be run.",
720 @CommandArgument(
721 "cram_args",
722 nargs=argparse.REMAINDER,
723 help="Extra arguments to pass down to the cram binary. See "
724 "'./mach python -m cram -- -h' for a list of available options.",
726 def cramtest(command_context, cram_args=None, test_paths=None, test_objects=None):
727 command_context.activate_virtualenv()
728 import mozinfo
729 from manifestparser import TestManifest
731 if test_objects is None:
732 from moztest.resolve import TestResolver
734 resolver = command_context._spawn(TestResolver)
735 if test_paths:
736 # If we were given test paths, try to find tests matching them.
737 test_objects = resolver.resolve_tests(paths=test_paths, flavor="cram")
738 else:
739 # Otherwise just run everything in CRAMTEST_MANIFESTS
740 test_objects = resolver.resolve_tests(flavor="cram")
742 if not test_objects:
743 message = "No tests were collected, check spelling of the test paths."
744 command_context.log(logging.WARN, "cramtest", {}, message)
745 return 1
747 mp = TestManifest()
748 mp.tests.extend(test_objects)
749 tests = mp.active_tests(disabled=False, **mozinfo.info)
751 python = command_context.virtualenv_manager.python_path
752 cmd = [python, "-m", "cram"] + cram_args + [t["relpath"] for t in tests]
753 return subprocess.call(cmd, cwd=command_context.topsrcdir)
756 from datetime import date, timedelta
759 @Command(
760 "test-info", category="testing", description="Display historical test results."
762 def test_info(command_context):
764 All functions implemented as subcommands.
768 @SubCommand(
769 "test-info",
770 "tests",
771 description="Display historical test result summary for named tests.",
773 @CommandArgument("test_names", nargs=argparse.REMAINDER, help="Test(s) of interest.")
774 @CommandArgument(
775 "--start",
776 default=(date.today() - timedelta(7)).strftime("%Y-%m-%d"),
777 help="Start date (YYYY-MM-DD)",
779 @CommandArgument(
780 "--end", default=date.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
782 @CommandArgument(
783 "--show-info",
784 action="store_true",
785 help="Retrieve and display general test information.",
787 @CommandArgument(
788 "--show-bugs",
789 action="store_true",
790 help="Retrieve and display related Bugzilla bugs.",
792 @CommandArgument("--verbose", action="store_true", help="Enable debug logging.")
793 def test_info_tests(
794 command_context,
795 test_names,
796 start,
797 end,
798 show_info,
799 show_bugs,
800 verbose,
802 import testinfo
804 ti = testinfo.TestInfoTests(verbose)
805 ti.report(
806 test_names,
807 start,
808 end,
809 show_info,
810 show_bugs,
814 @SubCommand(
815 "test-info",
816 "report",
817 description="Generate a json report of test manifests and/or tests "
818 "categorized by Bugzilla component and optionally filtered "
819 "by path, component, and/or manifest annotations.",
821 @CommandArgument(
822 "--components",
823 default=None,
824 help="Comma-separated list of Bugzilla components."
825 " eg. Testing::General,Core::WebVR",
827 @CommandArgument(
828 "--flavor",
829 help='Limit results to tests of the specified flavor (eg. "xpcshell").',
831 @CommandArgument(
832 "--subsuite",
833 help='Limit results to tests of the specified subsuite (eg. "devtools").',
835 @CommandArgument(
836 "paths", nargs=argparse.REMAINDER, help="File system paths of interest."
838 @CommandArgument(
839 "--show-manifests",
840 action="store_true",
841 help="Include test manifests in report.",
843 @CommandArgument(
844 "--show-tests", action="store_true", help="Include individual tests in report."
846 @CommandArgument(
847 "--show-summary", action="store_true", help="Include summary in report."
849 @CommandArgument(
850 "--show-annotations",
851 action="store_true",
852 help="Include list of manifest annotation conditions in report.",
854 @CommandArgument(
855 "--filter-values",
856 help="Comma-separated list of value regular expressions to filter on; "
857 "displayed tests contain all specified values.",
859 @CommandArgument(
860 "--filter-keys",
861 help="Comma-separated list of test keys to filter on, "
862 'like "skip-if"; only these fields will be searched '
863 "for filter-values.",
865 @CommandArgument(
866 "--no-component-report",
867 action="store_false",
868 dest="show_components",
869 default=True,
870 help="Do not categorize by bugzilla component.",
872 @CommandArgument("--output-file", help="Path to report file.")
873 @CommandArgument("--verbose", action="store_true", help="Enable debug logging.")
874 @CommandArgument(
875 "--start",
876 default=(date.today() - timedelta(30)).strftime("%Y-%m-%d"),
877 help="Start date (YYYY-MM-DD)",
879 @CommandArgument(
880 "--end", default=date.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
882 def test_report(
883 command_context,
884 components,
885 flavor,
886 subsuite,
887 paths,
888 show_manifests,
889 show_tests,
890 show_summary,
891 show_annotations,
892 filter_values,
893 filter_keys,
894 show_components,
895 output_file,
896 verbose,
897 start,
898 end,
900 import testinfo
901 from mozbuild import build_commands
903 try:
904 command_context.config_environment
905 except BuildEnvironmentNotFoundException:
906 print("Looks like configure has not run yet, running it now...")
907 build_commands.configure(command_context)
909 ti = testinfo.TestInfoReport(verbose)
910 ti.report(
911 components,
912 flavor,
913 subsuite,
914 paths,
915 show_manifests,
916 show_tests,
917 show_summary,
918 show_annotations,
919 filter_values,
920 filter_keys,
921 show_components,
922 output_file,
923 start,
924 end,
928 @SubCommand(
929 "test-info",
930 "report-diff",
931 description='Compare two reports generated by "test-info reports".',
933 @CommandArgument(
934 "--before",
935 default=None,
936 help="The first (earlier) report file; path to local file or url.",
938 @CommandArgument(
939 "--after", help="The second (later) report file; path to local file or url."
941 @CommandArgument(
942 "--output-file",
943 help="Path to report file to be written. If not specified, report"
944 "will be written to standard output.",
946 @CommandArgument("--verbose", action="store_true", help="Enable debug logging.")
947 def test_report_diff(command_context, before, after, output_file, verbose):
948 import testinfo
950 ti = testinfo.TestInfoReport(verbose)
951 ti.report_diff(before, after, output_file)
954 @SubCommand(
955 "test-info",
956 "failure-report",
957 description="Display failure line groupings and frequencies for "
958 "single tracking intermittent bugs.",
960 @CommandArgument(
961 "--start",
962 default=(date.today() - timedelta(30)).strftime("%Y-%m-%d"),
963 help="Start date (YYYY-MM-DD)",
965 @CommandArgument(
966 "--end", default=date.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
968 @CommandArgument(
969 "--bugid",
970 default=None,
971 help="bugid for treeherder intermittent failures data query.",
973 def test_info_failures(
974 command_context,
975 start,
976 end,
977 bugid,
979 import requests
981 # bugid comes in as a string, we need an int:
982 try:
983 bugid = int(bugid)
984 except ValueError:
985 bugid = None
986 if not bugid:
987 print("Please enter a valid bugid (i.e. '1760132')")
988 return
990 # get bug info
991 url = (
992 "https://bugzilla.mozilla.org/rest/bug?include_fields=summary,depends_on&id=%s"
993 % bugid
995 r = requests.get(url, headers={"User-agent": "mach-test-info/1.0"})
996 if r.status_code != 200:
997 print("%s error retrieving url: %s" % (r.status_code, url))
999 data = r.json()
1000 if not data:
1001 print("unable to get bugzilla information for %s" % bugid)
1002 return
1004 summary = data["bugs"][0]["summary"]
1005 parts = summary.split("|")
1006 if not summary.endswith("single tracking bug") or len(parts) != 2:
1007 print("this query only works with single tracking bugs")
1008 return
1010 # get depends_on bugs:
1011 buglist = [bugid]
1012 if "depends_on" in data["bugs"][0]:
1013 buglist.extend(data["bugs"][0]["depends_on"])
1015 testname = parts[0].strip().split(" ")[-1]
1017 # now query treeherder to get details about annotations
1018 data = []
1019 for b in buglist:
1020 url = "https://treeherder.mozilla.org/api/failuresbybug/"
1021 url += "?startday=%s&endday=%s&tree=trunk&bug=%s" % (start, end, b)
1022 r = requests.get(url, headers={"User-agent": "mach-test-info/1.0"})
1023 r.raise_for_status()
1025 bdata = r.json()
1026 data.extend(bdata)
1028 if len(data) == 0:
1029 print("no failures were found for given bugid, please ensure bug is")
1030 print("accessible via: https://treeherder.mozilla.org/intermittent-failures")
1031 return
1033 # query VCS to get current list of variants:
1034 import yaml
1036 url = "https://hg.mozilla.org/mozilla-central/raw-file/tip/taskcluster/ci/test/variants.yml"
1037 r = requests.get(url, headers={"User-agent": "mach-test-info/1.0"})
1038 variants = yaml.safe_load(r.text)
1040 print(
1041 "\nQuerying data for bug %s annotated from %s to %s on trunk.\n\n"
1042 % (buglist, start, end)
1044 jobs = {}
1045 lines = {}
1046 for failure in data:
1047 # config = platform/buildtype
1048 # testsuite (<suite>[-variant][-<chunk>])
1049 # lines - group by patterns that contain test name
1050 config = "%s/%s" % (failure["platform"], failure["build_type"])
1052 variant = ""
1053 suite = ""
1054 varpos = len(failure["test_suite"])
1055 for v in variants.keys():
1056 var = "-%s" % variants[v]["suffix"]
1057 if var in failure["test_suite"]:
1058 if failure["test_suite"].find(var) < varpos:
1059 variant = var
1061 if variant:
1062 suite = failure["test_suite"].split(variant)[0]
1064 parts = failure["test_suite"].split("-")
1065 try:
1066 int(parts[-1])
1067 suite = "-".join(parts[:-1])
1068 except ValueError:
1069 pass # if this works, then the last '-X' is a number :)
1071 if suite == "":
1072 print("Error: failure to find variant in %s" % failure["test_suite"])
1074 job = "%s-%s%s" % (config, suite, variant)
1075 if job not in jobs.keys():
1076 jobs[job] = 0
1077 jobs[job] += 1
1079 # lines - sum(hash) of all lines where we match testname
1080 hvalue = 0
1081 for line in failure["lines"]:
1082 if len(line.split(testname)) <= 1:
1083 continue
1084 # strip off timestamp and mozharness status
1085 parts = line.split("TEST-UNEXPECTED")
1086 l = "TEST-UNEXPECTED%s" % parts[-1]
1088 # only keep 25 characters of the failure, often longer is random numbers
1089 parts = l.split(testname)
1090 l = "%s%s%s" % (parts[0], testname, parts[1][:25])
1092 hvalue += hash(l)
1094 if not failure["lines"]:
1095 hvalue = 1
1097 if not hvalue:
1098 continue
1100 if hvalue not in lines.keys():
1101 lines[hvalue] = {"lines": failure["lines"], "config": []}
1102 lines[hvalue]["config"].append(job)
1104 for h in lines.keys():
1105 print("%s errors with:" % (len(lines[h]["config"])))
1106 for l in lines[h]["lines"]:
1107 print(l)
1108 else:
1109 print(
1110 "... no failure lines recorded in"
1111 " https://treeherder.mozilla.org/intermittent-failures ..."
1114 for job in jobs:
1115 count = len([x for x in lines[h]["config"] if x == job])
1116 if count > 0:
1117 print(" %s: %s" % (job, count))
1118 print("")
1121 @Command(
1122 "rusttests",
1123 category="testing",
1124 conditions=[conditions.is_non_artifact_build],
1125 description="Run rust unit tests (via cargo test).",
1127 def run_rusttests(command_context, **kwargs):
1128 return command_context._mach_context.commands.dispatch(
1129 "build",
1130 command_context._mach_context,
1131 what=["pre-export", "export", "recurse_rusttests"],
1135 @Command(
1136 "fluent-migration-test",
1137 category="testing",
1138 description="Test Fluent migration recipes.",
1140 @CommandArgument("test_paths", nargs="*", metavar="N", help="Recipe paths to test.")
1141 def run_migration_tests(command_context, test_paths=None, **kwargs):
1142 if not test_paths:
1143 test_paths = []
1144 command_context.activate_virtualenv()
1145 from test_fluent_migrations import fmt
1147 rv = 0
1148 with_context = []
1149 for to_test in test_paths:
1150 try:
1151 context = fmt.inspect_migration(to_test)
1152 for issue in context["issues"]:
1153 command_context.log(
1154 logging.ERROR,
1155 "fluent-migration-test",
1157 "error": issue["msg"],
1158 "file": to_test,
1160 "ERROR in {file}: {error}",
1162 if context["issues"]:
1163 continue
1164 with_context.append(
1166 "to_test": to_test,
1167 "references": context["references"],
1170 except Exception as e:
1171 command_context.log(
1172 logging.ERROR,
1173 "fluent-migration-test",
1174 {"error": str(e), "file": to_test},
1175 "ERROR in {file}: {error}",
1177 rv |= 1
1178 obj_dir = fmt.prepare_object_dir(command_context)
1179 for context in with_context:
1180 rv |= fmt.test_migration(command_context, obj_dir, **context)
1181 return rv