1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
12 from mach
.decorators
import Command
, CommandArgument
, SettingsProvider
, SubCommand
13 from mozbuild
.base
import BuildEnvironmentNotFoundException
14 from mozbuild
.base
import MachCommandConditions
as conditions
17 I was unable to find tests from the given argument(s).
19 You should specify a test directory, filename, test suite name, or
22 It's possible my little brain doesn't know about the type of test you are
23 trying to execute. If you suspect this, please request support by filing
25 https://bugzilla.mozilla.org/enter_bug.cgi?product=Testing&component=General.
29 I know you are trying to run a %s%s test. Unfortunately, I can't run those
34 Test or tests to run. Tests can be specified by filename, directory, suite
37 The following test suites and aliases are supported: {}
42 class TestConfig(object):
44 def config_settings(cls
):
45 from mozlog
.commandline
import log_formatters
46 from mozlog
.structuredlog
import log_levels
48 format_desc
= "The default format to use when running tests with `mach test`."
49 format_choices
= list(log_formatters
)
50 level_desc
= "The default log level to use when running tests with `mach test`."
51 level_choices
= [l
.lower() for l
in log_levels
]
53 ("test.format", "string", format_desc
, "mach", {"choices": format_choices
}),
54 ("test.level", "string", level_desc
, "info", {"choices": level_choices
}),
58 def get_test_parser():
59 from mozlog
.commandline
import add_logging_group
60 from moztest
.resolve
import TEST_SUITES
62 parser
= argparse
.ArgumentParser()
67 help=TEST_HELP
.format(", ".join(sorted(TEST_SUITES
))),
72 nargs
=argparse
.REMAINDER
,
73 help="Extra arguments to pass to the underlying test command(s). "
74 "If an underlying command doesn't recognize the argument, it "
82 help="Specify a debugger to use.",
84 add_logging_group(parser
)
88 ADD_TEST_SUPPORTED_SUITES
= [
91 "mochitest-browser-chrome",
92 "web-platform-tests-testharness",
93 "web-platform-tests-reftest",
96 ADD_TEST_SUPPORTED_DOCS
= ["js", "html", "xhtml", "xul"]
99 "wpt": "web-platform-tests-testharness",
100 "wpt-testharness": "web-platform-tests-testharness",
101 "wpt-reftest": "web-platform-tests-reftest",
104 MISSING_ARG
= object()
107 def create_parser_addtest():
110 parser
= argparse
.ArgumentParser()
113 choices
=sorted(ADD_TEST_SUPPORTED_SUITES
+ list(SUITE_SYNONYMS
.keys())),
114 help="suite for the test. "
115 "If you pass a `test` argument this will be determined "
116 "based on the filename and the folder it is in",
122 help="Overwrite an existing file if it exists.",
126 choices
=ADD_TEST_SUPPORTED_DOCS
,
127 help="Document type for the test (if applicable)."
128 "If you pass a `test` argument this will be determined "
129 "based on the filename.",
137 help="Open the created file(s) in an editor; if a "
138 "binary is supplied it will be used otherwise the default editor for "
139 "your environment will be opened",
142 for base_suite
in addtest
.TEST_CREATORS
:
143 cls
= addtest
.TEST_CREATORS
[base_suite
]
144 if hasattr(cls
, "get_parser"):
145 group
= parser
.add_argument_group(base_suite
)
146 cls
.get_parser(group
)
148 parser
.add_argument("test", nargs
="?", help=("Test to create."))
155 description
="Generate tests based on templates",
156 parser
=create_parser_addtest
,
170 from moztest
.resolve
import TEST_SUITES
172 if not suite
and not test
:
173 return create_parser_addtest().parse_args(["--help"])
175 if suite
in SUITE_SYNONYMS
:
176 suite
= SUITE_SYNONYMS
[suite
]
179 if not overwrite
and os
.path
.isfile(os
.path
.abspath(test
)):
180 print("Error: can't generate a test that already exists:", test
)
183 abs_test
= os
.path
.abspath(test
)
185 doc
= guess_doc(abs_test
)
187 guessed_suite
, err
= guess_suite(abs_test
)
191 suite
= guessed_suite
200 "We couldn't automatically determine a suite. "
201 "Please specify `--suite` with one of the following options:\n{}\n"
202 "If you'd like to add support to a new suite, please file a bug "
203 "blocking https://bugzilla.mozilla.org/show_bug.cgi?id=1540285.".format(
204 ADD_TEST_SUPPORTED_SUITES
209 if doc
not in ADD_TEST_SUPPORTED_DOCS
:
211 "Error: invalid `doc`. Either pass in a test with a valid extension"
212 "({}) or pass in the `doc` argument".format(ADD_TEST_SUPPORTED_DOCS
)
216 creator_cls
= addtest
.creator_for_suite(suite
)
218 if creator_cls
is None:
219 print("Sorry, `addtest` doesn't currently know how to add {}".format(suite
))
222 creator
= creator_cls(command_context
.topsrcdir
, test
, suite
, doc
, **kwargs
)
228 for path
, template
in creator
:
234 print("Adding a test file at {} (suite `{}`)".format(path
, suite
))
237 os
.makedirs(os
.path
.dirname(path
))
241 with io
.open(path
, "w", newline
="\n") as f
:
244 # write to stdout if you passed only suite and doc and not a file path
251 creator
.update_manifest()
253 # Small hack, should really do this better
254 if suite
.startswith("wpt-"):
255 suite
= "web-platform-tests"
257 mach_command
= TEST_SUITES
[suite
]["mach_command"]
259 "Please make sure to add the new test to your commit. "
260 "You can now run the test with:\n ./mach {} {}".format(
265 if editor
is not MISSING_ARG
:
266 if editor
is not None:
268 elif "VISUAL" in os
.environ
:
269 editor
= os
.environ
["VISUAL"]
270 elif "EDITOR" in os
.environ
:
271 editor
= os
.environ
["EDITOR"]
273 print("Unable to determine editor; please specify a binary")
280 proc
= subprocess
.Popen("%s %s" % (editor
, " ".join(paths
)), shell
=True)
288 def guess_doc(abs_test
):
289 filename
= os
.path
.basename(abs_test
)
290 return os
.path
.splitext(filename
)[1].strip(".")
293 def guess_suite(abs_test
):
294 # If you pass a abs_test, try to detect the type based on the name
295 # and folder. This detection can be skipped if you pass the `type` arg.
298 parent
= os
.path
.dirname(abs_test
)
299 filename
= os
.path
.basename(abs_test
)
301 has_browser_ini
= os
.path
.isfile(os
.path
.join(parent
, "browser.ini"))
302 has_browser_toml
= os
.path
.isfile(os
.path
.join(parent
, "browser.toml"))
303 has_chrome_ini
= os
.path
.isfile(os
.path
.join(parent
, "chrome.ini"))
304 has_chrome_toml
= os
.path
.isfile(os
.path
.join(parent
, "chrome.toml"))
305 has_plain_ini
= os
.path
.isfile(os
.path
.join(parent
, "mochitest.ini"))
306 has_plain_toml
= os
.path
.isfile(os
.path
.join(parent
, "mochitest.toml"))
307 has_xpcshell_ini
= os
.path
.isfile(os
.path
.join(parent
, "xpcshell.ini"))
308 has_xpcshell_toml
= os
.path
.isfile(os
.path
.join(parent
, "xpcshell.toml"))
310 in_wpt_folder
= abs_test
.startswith(
311 os
.path
.abspath(os
.path
.join("testing", "web-platform"))
315 guessed_suite
= "web-platform-tests-testharness"
316 if "/css/" in abs_test
:
317 guessed_suite
= "web-platform-tests-reftest"
319 filename
.startswith("test_")
320 and (has_xpcshell_ini
or has_xpcshell_toml
)
321 and guess_doc(abs_test
) == "js"
323 guessed_suite
= "xpcshell"
325 if filename
.startswith("browser_") and (has_browser_ini
or has_browser_toml
):
326 guessed_suite
= "mochitest-browser-chrome"
327 elif filename
.startswith("test_"):
328 if (has_chrome_ini
or has_chrome_toml
) and (
329 has_plain_ini
or has_plain_toml
332 "Error: directory contains both a chrome.{ini|toml} and mochitest.{ini|toml}. "
333 "Please set --suite=mochitest-chrome or --suite=mochitest-plain."
335 elif has_chrome_ini
or has_chrome_toml
:
336 guessed_suite
= "mochitest-chrome"
337 elif has_plain_ini
or has_plain_toml
:
338 guessed_suite
= "mochitest-plain"
339 return guessed_suite
, err
345 description
="Run tests (detects the kind of test and runs it).",
346 parser
=get_test_parser
,
348 def test(command_context
, what
, extra_args
, **log_args
):
349 """Run tests from names or paths.
351 mach test accepts arguments specifying which tests to run. Each argument
354 * The path to a test file
355 * A directory containing tests
357 * An alias to a test suite name (codes used on TreeHerder)
358 * path to a test manifest
360 When paths or directories are given, they are first resolved to test
361 files known to the build system.
363 If resolved tests belong to more than one test type/flavor/harness,
364 the harness for each relevant type/flavor will be invoked. e.g. if
365 you specify a directory with xpcshell and browser chrome mochitests,
366 both harnesses will be invoked.
368 Warning: `mach test` does not automatically re-build.
369 Please remember to run `mach build` when necessary.
373 Run all test files in the devtools/client/shared/redux/middleware/xpcshell/
376 `./mach test devtools/client/shared/redux/middleware/xpcshell/`
378 The below command prints a short summary of results instead of
379 the default more verbose output.
380 Do not forget the - (minus sign) after --log-grouped!
382 `./mach test --log-grouped - devtools/client/shared/redux/middleware/xpcshell/`
384 To learn more about arguments for each test type/flavor/harness, please run
385 `./mach <test-harness> --help`. For example, `./mach mochitest --help`.
387 from mozlog
.commandline
import setup_logging
388 from mozlog
.handlers
import StreamHandler
389 from moztest
.resolve
import TEST_SUITES
, TestResolver
, get_suite_definition
391 resolver
= command_context
._spawn
(TestResolver
)
392 run_suites
, run_tests
= resolver
.resolve_metadata(what
)
394 if not run_suites
and not run_tests
:
398 if log_args
.get("debugger", None):
401 if not mozdebug
.get_debugger_info(log_args
.get("debugger")):
403 extra_args_debugger_notation
= "=".join(
404 ["--debugger", log_args
.get("debugger")]
407 extra_args
.append(extra_args_debugger_notation
)
409 extra_args
= [extra_args_debugger_notation
]
411 # Create shared logger
412 format_args
= {"level": command_context
._mach
_context
.settings
["test"]["level"]}
413 if not run_suites
and len(run_tests
) == 1:
414 format_args
["verbose"] = True
415 format_args
["compact"] = False
417 default_format
= command_context
._mach
_context
.settings
["test"]["format"]
419 "mach-test", log_args
, {default_format
: sys
.stdout
}, format_args
421 for handler
in log
.handlers
:
422 if isinstance(handler
, StreamHandler
):
423 handler
.formatter
.inner
.summary_on_shutdown
= True
426 for suite_name
in run_suites
:
427 suite
= TEST_SUITES
[suite_name
]
428 kwargs
= suite
["kwargs"]
430 kwargs
.setdefault("subsuite", None)
432 if "mach_command" in suite
:
433 res
= command_context
._mach
_context
.commands
.dispatch(
434 suite
["mach_command"],
435 command_context
._mach
_context
,
443 for test
in run_tests
:
444 key
= (test
["flavor"], test
.get("subsuite", ""))
445 buckets
.setdefault(key
, []).append(test
)
447 for (flavor
, subsuite
), tests
in sorted(buckets
.items()):
448 _
, m
= get_suite_definition(flavor
, subsuite
)
449 if "mach_command" not in m
:
450 substr
= "-{}".format(subsuite
) if subsuite
else ""
451 print(UNKNOWN_FLAVOR
% (flavor
, substr
))
455 kwargs
= dict(m
["kwargs"])
457 kwargs
.setdefault("subsuite", None)
459 res
= command_context
._mach
_context
.commands
.dispatch(
461 command_context
._mach
_context
,
474 "cppunittest", category
="testing", description
="Run cpp unit tests (C++ tests)."
480 help="Test to run. Can be specified as one or more files or "
481 "directories, or omitted. If omitted, the entire test suite is "
484 def run_cppunit_test(command_context
, **params
):
485 from mozlog
import commandline
487 log
= params
.get("log")
489 log
= commandline
.setup_logging("cppunittest", {}, {"tbpl": sys
.stdout
})
491 # See if we have crash symbols
492 symbols_path
= os
.path
.join(command_context
.distdir
, "crashreporter-symbols")
493 if not os
.path
.isdir(symbols_path
):
496 # If no tests specified, run all tests in main manifest
497 tests
= params
["test_files"]
499 tests
= [os
.path
.join(command_context
.distdir
, "cppunittests")]
500 manifest_path
= os
.path
.join(
501 command_context
.topsrcdir
, "testing", "cppunittest.ini"
506 utility_path
= command_context
.bindir
508 if conditions
.is_android(command_context
):
509 from mozrunner
.devices
.android_device
import (
511 verify_android_device
,
514 verify_android_device(command_context
, install
=InstallIntent
.NO
)
515 return run_android_test(tests
, symbols_path
, manifest_path
, log
)
517 return run_desktop_test(
518 command_context
, tests
, symbols_path
, manifest_path
, utility_path
, log
522 def run_desktop_test(
523 command_context
, tests
, symbols_path
, manifest_path
, utility_path
, log
525 import runcppunittests
as cppunittests
526 from mozlog
import commandline
528 parser
= cppunittests
.CPPUnittestOptions()
529 commandline
.add_logging_group(parser
)
530 options
, args
= parser
.parse_args()
532 options
.symbols_path
= symbols_path
533 options
.manifest_path
= manifest_path
534 options
.utility_path
= utility_path
535 options
.xre_path
= command_context
.bindir
538 result
= cppunittests
.run_test_harness(options
, tests
)
539 except Exception as e
:
540 log
.error("Caught exception running cpp unit tests: %s" % str(e
))
544 return 0 if result
else 1
547 def run_android_test(command_context
, tests
, symbols_path
, manifest_path
, log
):
548 import remotecppunittests
549 from mozlog
import commandline
551 parser
= remotecppunittests
.RemoteCPPUnittestOptions()
552 commandline
.add_logging_group(parser
)
553 options
, args
= parser
.parse_args()
555 if not options
.adb_path
:
556 from mozrunner
.devices
.android_device
import get_adb_path
558 options
.adb_path
= get_adb_path(command_context
)
559 options
.symbols_path
= symbols_path
560 options
.manifest_path
= manifest_path
561 options
.xre_path
= command_context
.bindir
562 options
.local_lib
= command_context
.bindir
.replace("bin", "fennec")
563 for file in os
.listdir(os
.path
.join(command_context
.topobjdir
, "dist")):
564 if file.endswith(".apk") and file.startswith("fennec"):
565 options
.local_apk
= os
.path
.join(command_context
.topobjdir
, "dist", file)
566 log
.info("using APK: " + options
.local_apk
)
570 result
= remotecppunittests
.run_test_harness(options
, tests
)
571 except Exception as e
:
572 log
.error("Caught exception running cpp unit tests: %s" % str(e
))
576 return 0 if result
else 1
579 def executable_name(name
):
580 return name
+ ".exe" if sys
.platform
.startswith("win") else name
586 description
="Run SpiderMonkey JS tests in the JS shell.",
587 ok_if_tests_disabled
=True,
589 @CommandArgument("--shell", help="The shell to be used")
592 nargs
=argparse
.REMAINDER
,
593 help="Extra arguments to pass down to the test harness.",
595 def run_jstests(command_context
, shell
, params
):
598 command_context
.virtualenv_manager
.ensure()
599 python
= command_context
.virtualenv_manager
.python_path
601 js
= shell
or os
.path
.join(command_context
.bindir
, executable_name("js"))
604 os
.path
.join(command_context
.topsrcdir
, "js", "src", "tests", "jstests.py"),
608 return subprocess
.call(jstest_cmd
)
614 description
="Run SpiderMonkey jit-tests in the JS shell.",
615 ok_if_tests_disabled
=True,
617 @CommandArgument("--shell", help="The shell to be used")
622 help="Run with the SM(cgc) job's env vars",
626 nargs
=argparse
.REMAINDER
,
627 help="Extra arguments to pass down to the test harness.",
629 def run_jittests(command_context
, shell
, cgc
, params
):
632 command_context
.virtualenv_manager
.ensure()
633 python
= command_context
.virtualenv_manager
.python_path
635 js
= shell
or os
.path
.join(command_context
.bindir
, executable_name("js"))
638 os
.path
.join(command_context
.topsrcdir
, "js", "src", "jit-test", "jit_test.py"),
642 env
= os
.environ
.copy()
644 env
["JS_GC_ZEAL"] = "IncrementalMultipleSlices"
646 return subprocess
.call(jittest_cmd
, env
=env
)
649 @Command("jsapi-tests", category
="testing", description
="Run SpiderMonkey JSAPI tests.")
654 help="List all tests",
660 help="Run tests for frontend-only APIs, with light-weight entry point",
666 help="Test to run. Can be a prefix or omitted. If "
667 "omitted, the entire test suite is executed.",
669 def run_jsapitests(command_context
, list=False, frontend_only
=False, test_name
=None):
673 os
.path
.join(command_context
.bindir
, executable_name("jsapi-tests"))
676 jsapi_tests_cmd
.append("--list")
678 jsapi_tests_cmd
.append("--frontend-only")
680 jsapi_tests_cmd
.append(test_name
)
682 test_env
= os
.environ
.copy()
683 test_env
["TOPSRCDIR"] = command_context
.topsrcdir
685 result
= subprocess
.call(jsapi_tests_cmd
, env
=test_env
)
687 print(f
"jsapi-tests failed, exit code {result}")
691 def run_check_js_msg(command_context
):
694 command_context
.virtualenv_manager
.ensure()
695 python
= command_context
.virtualenv_manager
.python_path
699 os
.path
.join(command_context
.topsrcdir
, "config", "check_js_msg_encoding.py"),
702 return subprocess
.call(check_cmd
)
705 def get_jsshell_parser():
706 from jsshell
.benchmark
import get_parser
714 parser
=get_jsshell_parser
,
715 description
="Run benchmarks in the SpiderMonkey JS shell.",
717 def run_jsshelltests(command_context
, **kwargs
):
718 from jsshell
import benchmark
720 return benchmark
.run(**kwargs
)
726 description
="Mercurial style .t tests for command line applications.",
732 help="Test paths to run. Each path can be a test file or directory. "
733 "If omitted, the entire suite will be run.",
737 nargs
=argparse
.REMAINDER
,
738 help="Extra arguments to pass down to the cram binary. See "
739 "'./mach python -m cram -- -h' for a list of available options.",
741 def cramtest(command_context
, cram_args
=None, test_paths
=None, test_objects
=None):
742 command_context
.activate_virtualenv()
744 from manifestparser
import TestManifest
746 if test_objects
is None:
747 from moztest
.resolve
import TestResolver
749 resolver
= command_context
._spawn
(TestResolver
)
751 # If we were given test paths, try to find tests matching them.
752 test_objects
= resolver
.resolve_tests(paths
=test_paths
, flavor
="cram")
754 # Otherwise just run everything in CRAMTEST_MANIFESTS
755 test_objects
= resolver
.resolve_tests(flavor
="cram")
758 message
= "No tests were collected, check spelling of the test paths."
759 command_context
.log(logging
.WARN
, "cramtest", {}, message
)
763 mp
.tests
.extend(test_objects
)
764 tests
= mp
.active_tests(disabled
=False, **mozinfo
.info
)
766 python
= command_context
.virtualenv_manager
.python_path
767 cmd
= [python
, "-m", "cram"] + cram_args
+ [t
["relpath"] for t
in tests
]
768 return subprocess
.call(cmd
, cwd
=command_context
.topsrcdir
)
771 from datetime
import date
, timedelta
775 "test-info", category
="testing", description
="Display historical test results."
777 def test_info(command_context
):
779 All functions implemented as subcommands.
786 description
="Display historical test result summary for named tests.",
788 @CommandArgument("test_names", nargs
=argparse
.REMAINDER
, help="Test(s) of interest.")
791 default
=(date
.today() - timedelta(7)).strftime("%Y-%m-%d"),
792 help="Start date (YYYY-MM-DD)",
795 "--end", default
=date
.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
800 help="Retrieve and display general test information.",
805 help="Retrieve and display related Bugzilla bugs.",
807 @CommandArgument("--verbose", action
="store_true", help="Enable debug logging.")
819 ti
= testinfo
.TestInfoTests(verbose
)
832 description
="Generate a json report of test manifests and/or tests "
833 "categorized by Bugzilla component and optionally filtered "
834 "by path, component, and/or manifest annotations.",
839 help="Comma-separated list of Bugzilla components."
840 " eg. Testing::General,Core::WebVR",
844 help='Limit results to tests of the specified flavor (eg. "xpcshell").',
848 help='Limit results to tests of the specified subsuite (eg. "devtools").',
851 "paths", nargs
=argparse
.REMAINDER
, help="File system paths of interest."
856 help="Include test manifests in report.",
859 "--show-tests", action
="store_true", help="Include individual tests in report."
862 "--show-summary", action
="store_true", help="Include summary in report."
865 "--show-annotations",
867 help="Include list of manifest annotation conditions in report.",
872 help="Include total number of runs the test has if there are failures.",
876 help="Comma-separated list of value regular expressions to filter on; "
877 "displayed tests contain all specified values.",
881 help="Comma-separated list of test keys to filter on, "
882 'like "skip-if"; only these fields will be searched '
883 "for filter-values.",
886 "--no-component-report",
887 action
="store_false",
888 dest
="show_components",
890 help="Do not categorize by bugzilla component.",
892 @CommandArgument("--output-file", help="Path to report file.")
893 @CommandArgument("--verbose", action
="store_true", help="Enable debug logging.")
896 default
=(date
.today() - timedelta(30)).strftime("%Y-%m-%d"),
897 help="Start date (YYYY-MM-DD)",
900 "--end", default
=date
.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
922 from mozbuild
import build_commands
925 command_context
.config_environment
926 except BuildEnvironmentNotFoundException
:
927 print("Looks like configure has not run yet, running it now...")
928 build_commands
.configure(command_context
)
930 ti
= testinfo
.TestInfoReport(verbose
)
953 description
='Compare two reports generated by "test-info reports".',
958 help="The first (earlier) report file; path to local file or url.",
961 "--after", help="The second (later) report file; path to local file or url."
965 help="Path to report file to be written. If not specified, report"
966 "will be written to standard output.",
968 @CommandArgument("--verbose", action
="store_true", help="Enable debug logging.")
969 def test_report_diff(command_context
, before
, after
, output_file
, verbose
):
972 ti
= testinfo
.TestInfoReport(verbose
)
973 ti
.report_diff(before
, after
, output_file
)
979 description
="Generate report of number of runs for each test group (manifest)",
981 @CommandArgument("--output-file", help="Path to report file.")
982 def test_info_testrun_report(command_context
, output_file
):
987 ti
= testinfo
.TestInfoReport(verbose
=True)
988 runcounts
= ti
.get_runcounts()
990 output_file
= os
.path
.abspath(output_file
)
991 output_dir
= os
.path
.dirname(output_file
)
992 if not os
.path
.isdir(output_dir
):
993 os
.makedirs(output_dir
)
994 with
open(output_file
, "w") as f
:
995 json
.dump(runcounts
, f
)
1003 description
="Display failure line groupings and frequencies for "
1004 "single tracking intermittent bugs.",
1008 default
=(date
.today() - timedelta(30)).strftime("%Y-%m-%d"),
1009 help="Start date (YYYY-MM-DD)",
1012 "--end", default
=date
.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
1017 help="bugid for treeherder intermittent failures data query.",
1019 def test_info_failures(
1025 # bugid comes in as a string, we need an int:
1031 print("Please enter a valid bugid (i.e. '1760132')")
1036 "https://bugzilla.mozilla.org/rest/bug?include_fields=summary,depends_on&id=%s"
1039 r
= requests
.get(url
, headers
={"User-agent": "mach-test-info/1.0"})
1040 if r
.status_code
!= 200:
1041 print("%s error retrieving url: %s" % (r
.status_code
, url
))
1045 print("unable to get bugzilla information for %s" % bugid
)
1048 summary
= data
["bugs"][0]["summary"]
1049 parts
= summary
.split("|")
1050 if not summary
.endswith("single tracking bug") or len(parts
) != 2:
1051 print("this query only works with single tracking bugs")
1054 # get depends_on bugs:
1056 if "depends_on" in data
["bugs"][0]:
1057 buglist
.extend(data
["bugs"][0]["depends_on"])
1059 testname
= parts
[0].strip().split(" ")[-1]
1061 # now query treeherder to get details about annotations
1064 url
= "https://treeherder.mozilla.org/api/failuresbybug/"
1065 url
+= "?startday=%s&endday=%s&tree=trunk&bug=%s" % (start
, end
, b
)
1066 r
= requests
.get(url
, headers
={"User-agent": "mach-test-info/1.0"})
1067 r
.raise_for_status()
1073 print("no failures were found for given bugid, please ensure bug is")
1074 print("accessible via: https://treeherder.mozilla.org/intermittent-failures")
1077 # query VCS to get current list of variants:
1080 url
= "https://hg.mozilla.org/mozilla-central/raw-file/tip/taskcluster/ci/test/variants.yml"
1081 r
= requests
.get(url
, headers
={"User-agent": "mach-test-info/1.0"})
1082 variants
= yaml
.safe_load(r
.text
)
1085 "\nQuerying data for bug %s annotated from %s to %s on trunk.\n\n"
1086 % (buglist
, start
, end
)
1090 for failure
in data
:
1091 # config = platform/buildtype
1092 # testsuite (<suite>[-variant][-<chunk>])
1093 # lines - group by patterns that contain test name
1094 config
= "%s/%s" % (failure
["platform"], failure
["build_type"])
1098 varpos
= len(failure
["test_suite"])
1099 for v
in variants
.keys():
1100 var
= "-%s" % variants
[v
]["suffix"]
1101 if var
in failure
["test_suite"]:
1102 if failure
["test_suite"].find(var
) < varpos
:
1106 suite
= failure
["test_suite"].split(variant
)[0]
1108 parts
= failure
["test_suite"].split("-")
1111 suite
= "-".join(parts
[:-1])
1113 pass # if this works, then the last '-X' is a number :)
1116 print("Error: failure to find variant in %s" % failure
["test_suite"])
1118 job
= "%s-%s%s" % (config
, suite
, variant
)
1119 if job
not in jobs
.keys():
1123 # lines - sum(hash) of all lines where we match testname
1125 for line
in failure
["lines"]:
1126 if len(line
.split(testname
)) <= 1:
1128 # strip off timestamp and mozharness status
1129 parts
= line
.split("TEST-UNEXPECTED")
1130 l
= "TEST-UNEXPECTED%s" % parts
[-1]
1132 # only keep 25 characters of the failure, often longer is random numbers
1133 parts
= l
.split(testname
)
1134 l
= "%s%s%s" % (parts
[0], testname
, parts
[1][:25])
1138 if not failure
["lines"]:
1144 if hvalue
not in lines
.keys():
1145 lines
[hvalue
] = {"lines": failure
["lines"], "config": []}
1146 lines
[hvalue
]["config"].append(job
)
1148 for h
in lines
.keys():
1149 print("%s errors with:" % (len(lines
[h
]["config"])))
1150 for l
in lines
[h
]["lines"]:
1154 "... no failure lines recorded in"
1155 " https://treeherder.mozilla.org/intermittent-failures ..."
1159 count
= len([x
for x
in lines
[h
]["config"] if x
== job
])
1161 print(" %s: %s" % (job
, count
))
1168 conditions
=[conditions
.is_non_artifact_build
],
1169 description
="Run rust unit tests (via cargo test).",
1171 def run_rusttests(command_context
, **kwargs
):
1172 return command_context
._mach
_context
.commands
.dispatch(
1174 command_context
._mach
_context
,
1175 what
=["pre-export", "export", "recurse_rusttests"],
1180 "fluent-migration-test",
1182 description
="Test Fluent migration recipes.",
1184 @CommandArgument("test_paths", nargs
="*", metavar
="N", help="Recipe paths to test.")
1185 def run_migration_tests(command_context
, test_paths
=None, **kwargs
):
1188 command_context
.activate_virtualenv()
1189 from test_fluent_migrations
import fmt
1193 for to_test
in test_paths
:
1195 context
= fmt
.inspect_migration(to_test
)
1196 for issue
in context
["issues"]:
1197 command_context
.log(
1199 "fluent-migration-test",
1201 "error": issue
["msg"],
1204 "ERROR in {file}: {error}",
1206 if context
["issues"]:
1208 with_context
.append(
1211 "references": context
["references"],
1214 except Exception as e
:
1215 command_context
.log(
1217 "fluent-migration-test",
1218 {"error": str(e
), "file": to_test
},
1219 "ERROR in {file}: {error}",
1222 obj_dir
= fmt
.prepare_object_dir(command_context
)
1223 for context
in with_context
:
1224 rv |
= fmt
.test_migration(command_context
, obj_dir
, **context
)