Bug 1773770: Part 4 - Migrate INI parser factory to static component registration...
[gecko.git] / testing / mach_commands.py
blob77ca798c9e32bcba6fc1d14564ab543395539d12
1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
5 from __future__ import absolute_import, print_function, unicode_literals
7 import argparse
8 import logging
9 import os
10 import sys
11 import subprocess
13 from mach.decorators import (
14 CommandArgument,
15 Command,
16 SettingsProvider,
17 SubCommand,
20 from mozbuild.base import (
21 BuildEnvironmentNotFoundException,
22 MachCommandConditions as conditions,
25 UNKNOWN_TEST = """
26 I was unable to find tests from the given argument(s).
28 You should specify a test directory, filename, test suite name, or
29 abbreviation.
31 It's possible my little brain doesn't know about the type of test you are
32 trying to execute. If you suspect this, please request support by filing
33 a bug at
34 https://bugzilla.mozilla.org/enter_bug.cgi?product=Testing&component=General.
35 """.strip()
37 UNKNOWN_FLAVOR = """
38 I know you are trying to run a %s%s test. Unfortunately, I can't run those
39 tests yet. Sorry!
40 """.strip()
42 TEST_HELP = """
43 Test or tests to run. Tests can be specified by filename, directory, suite
44 name or suite alias.
46 The following test suites and aliases are supported: {}
47 """.strip()
50 @SettingsProvider
51 class TestConfig(object):
52 @classmethod
53 def config_settings(cls):
54 from mozlog.commandline import log_formatters
55 from mozlog.structuredlog import log_levels
57 format_desc = "The default format to use when running tests with `mach test`."
58 format_choices = list(log_formatters)
59 level_desc = "The default log level to use when running tests with `mach test`."
60 level_choices = [l.lower() for l in log_levels]
61 return [
62 ("test.format", "string", format_desc, "mach", {"choices": format_choices}),
63 ("test.level", "string", level_desc, "info", {"choices": level_choices}),
67 def get_test_parser():
68 from mozlog.commandline import add_logging_group
69 from moztest.resolve import TEST_SUITES
71 parser = argparse.ArgumentParser()
72 parser.add_argument(
73 "what",
74 default=None,
75 nargs="+",
76 help=TEST_HELP.format(", ".join(sorted(TEST_SUITES))),
78 parser.add_argument(
79 "extra_args",
80 default=None,
81 nargs=argparse.REMAINDER,
82 help="Extra arguments to pass to the underlying test command(s). "
83 "If an underlying command doesn't recognize the argument, it "
84 "will fail.",
86 parser.add_argument(
87 "--debugger",
88 default=None,
89 action="store",
90 nargs="?",
91 help="Specify a debugger to use.",
93 add_logging_group(parser)
94 return parser
97 ADD_TEST_SUPPORTED_SUITES = [
98 "mochitest-chrome",
99 "mochitest-plain",
100 "mochitest-browser-chrome",
101 "web-platform-tests-testharness",
102 "web-platform-tests-reftest",
103 "xpcshell",
105 ADD_TEST_SUPPORTED_DOCS = ["js", "html", "xhtml", "xul"]
107 SUITE_SYNONYMS = {
108 "wpt": "web-platform-tests-testharness",
109 "wpt-testharness": "web-platform-tests-testharness",
110 "wpt-reftest": "web-platform-tests-reftest",
113 MISSING_ARG = object()
116 def create_parser_addtest():
117 import addtest
119 parser = argparse.ArgumentParser()
120 parser.add_argument(
121 "--suite",
122 choices=sorted(ADD_TEST_SUPPORTED_SUITES + list(SUITE_SYNONYMS.keys())),
123 help="suite for the test. "
124 "If you pass a `test` argument this will be determined "
125 "based on the filename and the folder it is in",
127 parser.add_argument(
128 "-o",
129 "--overwrite",
130 action="store_true",
131 help="Overwrite an existing file if it exists.",
133 parser.add_argument(
134 "--doc",
135 choices=ADD_TEST_SUPPORTED_DOCS,
136 help="Document type for the test (if applicable)."
137 "If you pass a `test` argument this will be determined "
138 "based on the filename.",
140 parser.add_argument(
141 "-e",
142 "--editor",
143 action="store",
144 nargs="?",
145 default=MISSING_ARG,
146 help="Open the created file(s) in an editor; if a "
147 "binary is supplied it will be used otherwise the default editor for "
148 "your environment will be opened",
151 for base_suite in addtest.TEST_CREATORS:
152 cls = addtest.TEST_CREATORS[base_suite]
153 if hasattr(cls, "get_parser"):
154 group = parser.add_argument_group(base_suite)
155 cls.get_parser(group)
157 parser.add_argument("test", nargs="?", help=("Test to create."))
158 return parser
161 @Command(
162 "addtest",
163 category="testing",
164 description="Generate tests based on templates",
165 parser=create_parser_addtest,
167 def addtest(
168 command_context,
169 suite=None,
170 test=None,
171 doc=None,
172 overwrite=False,
173 editor=MISSING_ARG,
174 **kwargs,
176 import addtest
177 import io
178 from moztest.resolve import TEST_SUITES
180 if not suite and not test:
181 return create_parser_addtest().parse_args(["--help"])
183 if suite in SUITE_SYNONYMS:
184 suite = SUITE_SYNONYMS[suite]
186 if test:
187 if not overwrite and os.path.isfile(os.path.abspath(test)):
188 print("Error: can't generate a test that already exists:", test)
189 return 1
191 abs_test = os.path.abspath(test)
192 if doc is None:
193 doc = guess_doc(abs_test)
194 if suite is None:
195 guessed_suite, err = guess_suite(abs_test)
196 if err:
197 print(err)
198 return 1
199 suite = guessed_suite
201 else:
202 test = None
203 if doc is None:
204 doc = "html"
206 if not suite:
207 print(
208 "We couldn't automatically determine a suite. "
209 "Please specify `--suite` with one of the following options:\n{}\n"
210 "If you'd like to add support to a new suite, please file a bug "
211 "blocking https://bugzilla.mozilla.org/show_bug.cgi?id=1540285.".format(
212 ADD_TEST_SUPPORTED_SUITES
215 return 1
217 if doc not in ADD_TEST_SUPPORTED_DOCS:
218 print(
219 "Error: invalid `doc`. Either pass in a test with a valid extension"
220 "({}) or pass in the `doc` argument".format(ADD_TEST_SUPPORTED_DOCS)
222 return 1
224 creator_cls = addtest.creator_for_suite(suite)
226 if creator_cls is None:
227 print("Sorry, `addtest` doesn't currently know how to add {}".format(suite))
228 return 1
230 creator = creator_cls(command_context.topsrcdir, test, suite, doc, **kwargs)
232 creator.check_args()
234 paths = []
235 added_tests = False
236 for path, template in creator:
237 if not template:
238 continue
239 added_tests = True
240 if path:
241 paths.append(path)
242 print("Adding a test file at {} (suite `{}`)".format(path, suite))
244 try:
245 os.makedirs(os.path.dirname(path))
246 except OSError:
247 pass
249 with io.open(path, "w", newline="\n") as f:
250 f.write(template)
251 else:
252 # write to stdout if you passed only suite and doc and not a file path
253 print(template)
255 if not added_tests:
256 return 1
258 if test:
259 creator.update_manifest()
261 # Small hack, should really do this better
262 if suite.startswith("wpt-"):
263 suite = "web-platform-tests"
265 mach_command = TEST_SUITES[suite]["mach_command"]
266 print(
267 "Please make sure to add the new test to your commit. "
268 "You can now run the test with:\n ./mach {} {}".format(
269 mach_command, test
273 if editor is not MISSING_ARG:
274 if editor is not None:
275 editor = editor
276 elif "VISUAL" in os.environ:
277 editor = os.environ["VISUAL"]
278 elif "EDITOR" in os.environ:
279 editor = os.environ["EDITOR"]
280 else:
281 print("Unable to determine editor; please specify a binary")
282 editor = None
284 proc = None
285 if editor:
286 import subprocess
288 proc = subprocess.Popen("%s %s" % (editor, " ".join(paths)), shell=True)
290 if proc:
291 proc.wait()
293 return 0
296 def guess_doc(abs_test):
297 filename = os.path.basename(abs_test)
298 return os.path.splitext(filename)[1].strip(".")
301 def guess_suite(abs_test):
302 # If you pass a abs_test, try to detect the type based on the name
303 # and folder. This detection can be skipped if you pass the `type` arg.
304 err = None
305 guessed_suite = None
306 parent = os.path.dirname(abs_test)
307 filename = os.path.basename(abs_test)
309 has_browser_ini = os.path.isfile(os.path.join(parent, "browser.ini"))
310 has_chrome_ini = os.path.isfile(os.path.join(parent, "chrome.ini"))
311 has_plain_ini = os.path.isfile(os.path.join(parent, "mochitest.ini"))
312 has_xpcshell_ini = os.path.isfile(os.path.join(parent, "xpcshell.ini"))
314 in_wpt_folder = abs_test.startswith(
315 os.path.abspath(os.path.join("testing", "web-platform"))
318 if in_wpt_folder:
319 guessed_suite = "web-platform-tests-testharness"
320 if "/css/" in abs_test:
321 guessed_suite = "web-platform-tests-reftest"
322 elif (
323 filename.startswith("test_")
324 and has_xpcshell_ini
325 and guess_doc(abs_test) == "js"
327 guessed_suite = "xpcshell"
328 else:
329 if filename.startswith("browser_") and has_browser_ini:
330 guessed_suite = "mochitest-browser-chrome"
331 elif filename.startswith("test_"):
332 if has_chrome_ini and has_plain_ini:
333 err = (
334 "Error: directory contains both a chrome.ini and mochitest.ini. "
335 "Please set --suite=mochitest-chrome or --suite=mochitest-plain."
337 elif has_chrome_ini:
338 guessed_suite = "mochitest-chrome"
339 elif has_plain_ini:
340 guessed_suite = "mochitest-plain"
341 return guessed_suite, err
344 @Command(
345 "test",
346 category="testing",
347 description="Run tests (detects the kind of test and runs it).",
348 parser=get_test_parser,
350 def test(command_context, what, extra_args, **log_args):
351 """Run tests from names or paths.
353 mach test accepts arguments specifying which tests to run. Each argument
354 can be:
356 * The path to a test file
357 * A directory containing tests
358 * A test suite name
359 * An alias to a test suite name (codes used on TreeHerder)
361 When paths or directories are given, they are first resolved to test
362 files known to the build system.
364 If resolved tests belong to more than one test type/flavor/harness,
365 the harness for each relevant type/flavor will be invoked. e.g. if
366 you specify a directory with xpcshell and browser chrome mochitests,
367 both harnesses will be invoked.
369 Warning: `mach test` does not automatically re-build.
370 Please remember to run `mach build` when necessary.
372 EXAMPLES
374 Run all test files in the devtools/client/shared/redux/middleware/xpcshell/
375 directory:
377 `./mach test devtools/client/shared/redux/middleware/xpcshell/`
379 The below command prints a short summary of results instead of
380 the default more verbose output.
381 Do not forget the - (minus sign) after --log-grouped!
383 `./mach test --log-grouped - devtools/client/shared/redux/middleware/xpcshell/`
385 from mozlog.commandline import setup_logging
386 from mozlog.handlers import StreamHandler
387 from moztest.resolve import get_suite_definition, TestResolver, TEST_SUITES
389 resolver = command_context._spawn(TestResolver)
390 run_suites, run_tests = resolver.resolve_metadata(what)
392 if not run_suites and not run_tests:
393 print(UNKNOWN_TEST)
394 return 1
396 if log_args.get("debugger", None):
397 import mozdebug
399 if not mozdebug.get_debugger_info(log_args.get("debugger")):
400 sys.exit(1)
401 extra_args_debugger_notation = "=".join(
402 ["--debugger", log_args.get("debugger")]
404 if extra_args:
405 extra_args.append(extra_args_debugger_notation)
406 else:
407 extra_args = [extra_args_debugger_notation]
409 # Create shared logger
410 format_args = {"level": command_context._mach_context.settings["test"]["level"]}
411 if not run_suites and len(run_tests) == 1:
412 format_args["verbose"] = True
413 format_args["compact"] = False
415 default_format = command_context._mach_context.settings["test"]["format"]
416 log = setup_logging(
417 "mach-test", log_args, {default_format: sys.stdout}, format_args
419 for handler in log.handlers:
420 if isinstance(handler, StreamHandler):
421 handler.formatter.inner.summary_on_shutdown = True
423 status = None
424 for suite_name in run_suites:
425 suite = TEST_SUITES[suite_name]
426 kwargs = suite["kwargs"]
427 kwargs["log"] = log
428 kwargs.setdefault("subsuite", None)
430 if "mach_command" in suite:
431 res = command_context._mach_context.commands.dispatch(
432 suite["mach_command"],
433 command_context._mach_context,
434 argv=extra_args,
435 **kwargs,
437 if res:
438 status = res
440 buckets = {}
441 for test in run_tests:
442 key = (test["flavor"], test.get("subsuite", ""))
443 buckets.setdefault(key, []).append(test)
445 for (flavor, subsuite), tests in sorted(buckets.items()):
446 _, m = get_suite_definition(flavor, subsuite)
447 if "mach_command" not in m:
448 substr = "-{}".format(subsuite) if subsuite else ""
449 print(UNKNOWN_FLAVOR % (flavor, substr))
450 status = 1
451 continue
453 kwargs = dict(m["kwargs"])
454 kwargs["log"] = log
455 kwargs.setdefault("subsuite", None)
457 res = command_context._mach_context.commands.dispatch(
458 m["mach_command"],
459 command_context._mach_context,
460 argv=extra_args,
461 test_objects=tests,
462 **kwargs,
464 if res:
465 status = res
467 log.shutdown()
468 return status
471 @Command(
472 "cppunittest", category="testing", description="Run cpp unit tests (C++ tests)."
474 @CommandArgument(
475 "test_files",
476 nargs="*",
477 metavar="N",
478 help="Test to run. Can be specified as one or more files or "
479 "directories, or omitted. If omitted, the entire test suite is "
480 "executed.",
482 def run_cppunit_test(command_context, **params):
483 from mozlog import commandline
485 log = params.get("log")
486 if not log:
487 log = commandline.setup_logging("cppunittest", {}, {"tbpl": sys.stdout})
489 # See if we have crash symbols
490 symbols_path = os.path.join(command_context.distdir, "crashreporter-symbols")
491 if not os.path.isdir(symbols_path):
492 symbols_path = None
494 # If no tests specified, run all tests in main manifest
495 tests = params["test_files"]
496 if not tests:
497 tests = [os.path.join(command_context.distdir, "cppunittests")]
498 manifest_path = os.path.join(
499 command_context.topsrcdir, "testing", "cppunittest.ini"
501 else:
502 manifest_path = None
504 utility_path = command_context.bindir
506 if conditions.is_android(command_context):
507 from mozrunner.devices.android_device import (
508 verify_android_device,
509 InstallIntent,
512 verify_android_device(command_context, install=InstallIntent.NO)
513 return run_android_test(tests, symbols_path, manifest_path, log)
515 return run_desktop_test(
516 command_context, tests, symbols_path, manifest_path, utility_path, log
520 def run_desktop_test(
521 command_context, tests, symbols_path, manifest_path, utility_path, log
523 import runcppunittests as cppunittests
524 from mozlog import commandline
526 parser = cppunittests.CPPUnittestOptions()
527 commandline.add_logging_group(parser)
528 options, args = parser.parse_args()
530 options.symbols_path = symbols_path
531 options.manifest_path = manifest_path
532 options.utility_path = utility_path
533 options.xre_path = command_context.bindir
535 try:
536 result = cppunittests.run_test_harness(options, tests)
537 except Exception as e:
538 log.error("Caught exception running cpp unit tests: %s" % str(e))
539 result = False
540 raise
542 return 0 if result else 1
545 def run_android_test(command_context, tests, symbols_path, manifest_path, log):
546 import remotecppunittests as remotecppunittests
547 from mozlog import commandline
549 parser = remotecppunittests.RemoteCPPUnittestOptions()
550 commandline.add_logging_group(parser)
551 options, args = parser.parse_args()
553 if not options.adb_path:
554 from mozrunner.devices.android_device import get_adb_path
556 options.adb_path = get_adb_path(command_context)
557 options.symbols_path = symbols_path
558 options.manifest_path = manifest_path
559 options.xre_path = command_context.bindir
560 options.local_lib = command_context.bindir.replace("bin", "fennec")
561 for file in os.listdir(os.path.join(command_context.topobjdir, "dist")):
562 if file.endswith(".apk") and file.startswith("fennec"):
563 options.local_apk = os.path.join(command_context.topobjdir, "dist", file)
564 log.info("using APK: " + options.local_apk)
565 break
567 try:
568 result = remotecppunittests.run_test_harness(options, tests)
569 except Exception as e:
570 log.error("Caught exception running cpp unit tests: %s" % str(e))
571 result = False
572 raise
574 return 0 if result else 1
577 def executable_name(name):
578 return name + ".exe" if sys.platform.startswith("win") else name
581 @Command(
582 "jstests",
583 category="testing",
584 description="Run SpiderMonkey JS tests in the JS shell.",
585 ok_if_tests_disabled=True,
587 @CommandArgument("--shell", help="The shell to be used")
588 @CommandArgument(
589 "params",
590 nargs=argparse.REMAINDER,
591 help="Extra arguments to pass down to the test harness.",
593 def run_jstests(command_context, shell, params):
594 import subprocess
596 command_context.virtualenv_manager.ensure()
597 python = command_context.virtualenv_manager.python_path
599 js = shell or os.path.join(command_context.bindir, executable_name("js"))
600 jstest_cmd = [
601 python,
602 os.path.join(command_context.topsrcdir, "js", "src", "tests", "jstests.py"),
604 ] + params
606 return subprocess.call(jstest_cmd)
609 @Command(
610 "jit-test",
611 category="testing",
612 description="Run SpiderMonkey jit-tests in the JS shell.",
613 ok_if_tests_disabled=True,
615 @CommandArgument("--shell", help="The shell to be used")
616 @CommandArgument(
617 "--cgc",
618 action="store_true",
619 default=False,
620 help="Run with the SM(cgc) job's env vars",
622 @CommandArgument(
623 "params",
624 nargs=argparse.REMAINDER,
625 help="Extra arguments to pass down to the test harness.",
627 def run_jittests(command_context, shell, cgc, params):
628 import subprocess
630 command_context.virtualenv_manager.ensure()
631 python = command_context.virtualenv_manager.python_path
633 js = shell or os.path.join(command_context.bindir, executable_name("js"))
634 jittest_cmd = [
635 python,
636 os.path.join(command_context.topsrcdir, "js", "src", "jit-test", "jit_test.py"),
638 ] + params
640 env = os.environ.copy()
641 if cgc:
642 env["JS_GC_ZEAL"] = "IncrementalMultipleSlices"
644 return subprocess.call(jittest_cmd, env=env)
647 @Command("jsapi-tests", category="testing", description="Run SpiderMonkey JSAPI tests.")
648 @CommandArgument(
649 "test_name",
650 nargs="?",
651 metavar="N",
652 help="Test to run. Can be a prefix or omitted. If "
653 "omitted, the entire test suite is executed.",
655 def run_jsapitests(command_context, test_name=None):
656 import subprocess
658 jsapi_tests_cmd = [
659 os.path.join(command_context.bindir, executable_name("jsapi-tests"))
661 if test_name:
662 jsapi_tests_cmd.append(test_name)
664 test_env = os.environ.copy()
665 test_env["TOPSRCDIR"] = command_context.topsrcdir
667 result = subprocess.call(jsapi_tests_cmd, env=test_env)
668 if result != 0:
669 print(f"jsapi-tests failed, exit code {result}")
670 return result
673 def run_check_js_msg(command_context):
674 import subprocess
676 command_context.virtualenv_manager.ensure()
677 python = command_context.virtualenv_manager.python_path
679 check_cmd = [
680 python,
681 os.path.join(command_context.topsrcdir, "config", "check_js_msg_encoding.py"),
684 return subprocess.call(check_cmd)
687 def get_jsshell_parser():
688 from jsshell.benchmark import get_parser
690 return get_parser()
693 @Command(
694 "jsshell-bench",
695 category="testing",
696 parser=get_jsshell_parser,
697 description="Run benchmarks in the SpiderMonkey JS shell.",
699 def run_jsshelltests(command_context, **kwargs):
700 from jsshell import benchmark
702 return benchmark.run(**kwargs)
705 @Command(
706 "cramtest",
707 category="testing",
708 description="Mercurial style .t tests for command line applications.",
710 @CommandArgument(
711 "test_paths",
712 nargs="*",
713 metavar="N",
714 help="Test paths to run. Each path can be a test file or directory. "
715 "If omitted, the entire suite will be run.",
717 @CommandArgument(
718 "cram_args",
719 nargs=argparse.REMAINDER,
720 help="Extra arguments to pass down to the cram binary. See "
721 "'./mach python -m cram -- -h' for a list of available options.",
723 def cramtest(command_context, cram_args=None, test_paths=None, test_objects=None):
724 command_context.activate_virtualenv()
725 import mozinfo
726 from manifestparser import TestManifest
728 if test_objects is None:
729 from moztest.resolve import TestResolver
731 resolver = command_context._spawn(TestResolver)
732 if test_paths:
733 # If we were given test paths, try to find tests matching them.
734 test_objects = resolver.resolve_tests(paths=test_paths, flavor="cram")
735 else:
736 # Otherwise just run everything in CRAMTEST_MANIFESTS
737 test_objects = resolver.resolve_tests(flavor="cram")
739 if not test_objects:
740 message = "No tests were collected, check spelling of the test paths."
741 command_context.log(logging.WARN, "cramtest", {}, message)
742 return 1
744 mp = TestManifest()
745 mp.tests.extend(test_objects)
746 tests = mp.active_tests(disabled=False, **mozinfo.info)
748 python = command_context.virtualenv_manager.python_path
749 cmd = [python, "-m", "cram"] + cram_args + [t["relpath"] for t in tests]
750 return subprocess.call(cmd, cwd=command_context.topsrcdir)
753 from datetime import date, timedelta
756 @Command(
757 "test-info", category="testing", description="Display historical test results."
759 def test_info(command_context):
761 All functions implemented as subcommands.
765 @SubCommand(
766 "test-info",
767 "tests",
768 description="Display historical test result summary for named tests.",
770 @CommandArgument("test_names", nargs=argparse.REMAINDER, help="Test(s) of interest.")
771 @CommandArgument(
772 "--start",
773 default=(date.today() - timedelta(7)).strftime("%Y-%m-%d"),
774 help="Start date (YYYY-MM-DD)",
776 @CommandArgument(
777 "--end", default=date.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
779 @CommandArgument(
780 "--show-info",
781 action="store_true",
782 help="Retrieve and display general test information.",
784 @CommandArgument(
785 "--show-bugs",
786 action="store_true",
787 help="Retrieve and display related Bugzilla bugs.",
789 @CommandArgument("--verbose", action="store_true", help="Enable debug logging.")
790 def test_info_tests(
791 command_context,
792 test_names,
793 start,
794 end,
795 show_info,
796 show_bugs,
797 verbose,
799 import testinfo
801 ti = testinfo.TestInfoTests(verbose)
802 ti.report(
803 test_names,
804 start,
805 end,
806 show_info,
807 show_bugs,
811 @SubCommand(
812 "test-info",
813 "report",
814 description="Generate a json report of test manifests and/or tests "
815 "categorized by Bugzilla component and optionally filtered "
816 "by path, component, and/or manifest annotations.",
818 @CommandArgument(
819 "--components",
820 default=None,
821 help="Comma-separated list of Bugzilla components."
822 " eg. Testing::General,Core::WebVR",
824 @CommandArgument(
825 "--flavor",
826 help='Limit results to tests of the specified flavor (eg. "xpcshell").',
828 @CommandArgument(
829 "--subsuite",
830 help='Limit results to tests of the specified subsuite (eg. "devtools").',
832 @CommandArgument(
833 "paths", nargs=argparse.REMAINDER, help="File system paths of interest."
835 @CommandArgument(
836 "--show-manifests",
837 action="store_true",
838 help="Include test manifests in report.",
840 @CommandArgument(
841 "--show-tests", action="store_true", help="Include individual tests in report."
843 @CommandArgument(
844 "--show-summary", action="store_true", help="Include summary in report."
846 @CommandArgument(
847 "--show-annotations",
848 action="store_true",
849 help="Include list of manifest annotation conditions in report.",
851 @CommandArgument(
852 "--filter-values",
853 help="Comma-separated list of value regular expressions to filter on; "
854 "displayed tests contain all specified values.",
856 @CommandArgument(
857 "--filter-keys",
858 help="Comma-separated list of test keys to filter on, "
859 'like "skip-if"; only these fields will be searched '
860 "for filter-values.",
862 @CommandArgument(
863 "--no-component-report",
864 action="store_false",
865 dest="show_components",
866 default=True,
867 help="Do not categorize by bugzilla component.",
869 @CommandArgument("--output-file", help="Path to report file.")
870 @CommandArgument("--verbose", action="store_true", help="Enable debug logging.")
871 def test_report(
872 command_context,
873 components,
874 flavor,
875 subsuite,
876 paths,
877 show_manifests,
878 show_tests,
879 show_summary,
880 show_annotations,
881 filter_values,
882 filter_keys,
883 show_components,
884 output_file,
885 verbose,
887 import testinfo
888 from mozbuild import build_commands
890 try:
891 command_context.config_environment
892 except BuildEnvironmentNotFoundException:
893 print("Looks like configure has not run yet, running it now...")
894 build_commands.configure(command_context)
896 ti = testinfo.TestInfoReport(verbose)
897 ti.report(
898 components,
899 flavor,
900 subsuite,
901 paths,
902 show_manifests,
903 show_tests,
904 show_summary,
905 show_annotations,
906 filter_values,
907 filter_keys,
908 show_components,
909 output_file,
913 @SubCommand(
914 "test-info",
915 "report-diff",
916 description='Compare two reports generated by "test-info reports".',
918 @CommandArgument(
919 "--before",
920 default=None,
921 help="The first (earlier) report file; path to local file or url.",
923 @CommandArgument(
924 "--after", help="The second (later) report file; path to local file or url."
926 @CommandArgument(
927 "--output-file",
928 help="Path to report file to be written. If not specified, report"
929 "will be written to standard output.",
931 @CommandArgument("--verbose", action="store_true", help="Enable debug logging.")
932 def test_report_diff(command_context, before, after, output_file, verbose):
933 import testinfo
935 ti = testinfo.TestInfoReport(verbose)
936 ti.report_diff(before, after, output_file)
939 @SubCommand(
940 "test-info",
941 "failure-report",
942 description="Display failure line groupings and frequencies for "
943 "single tracking intermittent bugs.",
945 @CommandArgument(
946 "--start",
947 default=(date.today() - timedelta(30)).strftime("%Y-%m-%d"),
948 help="Start date (YYYY-MM-DD)",
950 @CommandArgument(
951 "--end", default=date.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
953 @CommandArgument(
954 "--bugid",
955 default=None,
956 help="bugid for treeherder intermittent failures data query.",
958 def test_info_failures(
959 command_context,
960 start,
961 end,
962 bugid,
964 import requests
966 # bugid comes in as a string, we need an int:
967 try:
968 bugid = int(bugid)
969 except ValueError:
970 bugid = None
971 if not bugid:
972 print("Please enter a valid bugid (i.e. '1760132')")
973 return
975 # get bug info
976 url = "https://bugzilla.mozilla.org/rest/bug?include_fields=summary&id=%s" % bugid
977 r = requests.get(url, headers={"User-agent": "mach-test-info/1.0"})
978 if r.status_code != 200:
979 print("%s error retrieving url: %s" % (r.status_code, url))
981 data = r.json()
982 if not data:
983 print("unable to get bugzilla information for %s" % bugid)
984 return
986 summary = data["bugs"][0]["summary"]
987 parts = summary.split("|")
988 if not summary.endswith("single tracking bug") or len(parts) != 2:
989 print("this query only works with single tracking bugs")
990 return
992 testname = parts[0].strip().split(" ")[-1]
994 # now query treeherder to get details about annotations
995 url = "https://treeherder.mozilla.org/api/failuresbybug/"
996 url += "?startday=%s&endday=%s&tree=trunk&bug=%s" % (start, end, bugid)
997 r = requests.get(url, headers={"User-agent": "mach-test-info/1.0"})
998 r.raise_for_status()
1000 data = r.json()
1001 if len(data) == 0:
1002 print("no failures were found for given bugid, please ensure bug is")
1003 print("accessible via: https://treeherder.mozilla.org/intermittent-failures")
1004 return
1006 jobs = {}
1007 lines = {}
1008 for failure in data:
1009 # config = platform/buildtype
1010 # testsuite (<suite>[-variant][-fis][-e10s|1proc][-<chunk>])
1011 # lines - group by patterns that contain test name
1012 config = "%s/%s" % (failure["platform"], failure["build_type"])
1014 if "-e10s" in failure["test_suite"]:
1015 parts = failure["test_suite"].split("-e10s")
1016 sv = parts[0].split("-")
1017 suite = sv[0]
1018 variant = ""
1019 if len(sv) > 1:
1020 variant = "-%s" % "-".join(sv[1:])
1021 elif "-1proc" in failure["test_suite"]:
1022 parts = failure["test_suite"].split("-1proc")
1023 sv = parts[0].split("-")
1024 suite = sv[0]
1025 variant = ""
1026 if len(sv) > 1:
1027 variant = "-%s" % "-".join(sv[1:])
1028 else:
1029 print("unable to parse test suite: %s" % failure["test_suite"])
1030 print("no `-e10s` or `-1proc` found")
1032 job = "%s-%s%s" % (config, suite, variant)
1033 if job not in jobs.keys():
1034 jobs[job] = 0
1035 jobs[job] += 1
1037 # lines - sum(hash) of all lines where we match testname
1038 hvalue = 0
1039 for line in failure["lines"]:
1040 if len(line.split(testname)) <= 1:
1041 continue
1042 # strip off timestamp and mozharness status
1043 parts = line.split("TEST-UNEXPECTED")
1044 l = "TEST-UNEXPECTED%s" % parts[-1]
1046 # only keep 25 characters of the failure, often longer is random numbers
1047 parts = l.split(testname)
1048 l = "%s%s%s" % (parts[0], testname, parts[1][:25])
1050 hvalue += hash(l)
1052 if not hvalue:
1053 continue
1055 if hvalue not in lines.keys():
1056 lines[hvalue] = {"lines": failure["lines"], "config": []}
1057 lines[hvalue]["config"].append(job)
1059 for h in lines.keys():
1060 print("%s errors with:" % (len(lines[h]["config"])))
1061 for l in lines[h]["lines"]:
1062 print(l)
1063 print("")
1065 for job in jobs:
1066 count = len([x for x in lines[h]["config"] if x == job])
1067 if count > 0:
1068 print(" %s: %s" % (job, count))
1071 @Command(
1072 "rusttests",
1073 category="testing",
1074 conditions=[conditions.is_non_artifact_build],
1075 description="Run rust unit tests (via cargo test).",
1077 def run_rusttests(command_context, **kwargs):
1078 return command_context._mach_context.commands.dispatch(
1079 "build",
1080 command_context._mach_context,
1081 what=["pre-export", "export", "recurse_rusttests"],
1085 @Command(
1086 "fluent-migration-test",
1087 category="testing",
1088 description="Test Fluent migration recipes.",
1090 @CommandArgument("test_paths", nargs="*", metavar="N", help="Recipe paths to test.")
1091 def run_migration_tests(command_context, test_paths=None, **kwargs):
1092 if not test_paths:
1093 test_paths = []
1094 command_context.activate_virtualenv()
1095 from test_fluent_migrations import fmt
1097 rv = 0
1098 with_context = []
1099 for to_test in test_paths:
1100 try:
1101 context = fmt.inspect_migration(to_test)
1102 for issue in context["issues"]:
1103 command_context.log(
1104 logging.ERROR,
1105 "fluent-migration-test",
1107 "error": issue["msg"],
1108 "file": to_test,
1110 "ERROR in {file}: {error}",
1112 if context["issues"]:
1113 continue
1114 with_context.append(
1116 "to_test": to_test,
1117 "references": context["references"],
1120 except Exception as e:
1121 command_context.log(
1122 logging.ERROR,
1123 "fluent-migration-test",
1124 {"error": str(e), "file": to_test},
1125 "ERROR in {file}: {error}",
1127 rv |= 1
1128 obj_dir = fmt.prepare_object_dir(command_context)
1129 for context in with_context:
1130 rv |= fmt.test_migration(command_context, obj_dir, **context)
1131 return rv