1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
12 from mach
.decorators
import Command
, CommandArgument
, SubCommand
13 from mozbuild
.base
import BuildEnvironmentNotFoundException
14 from mozbuild
.base
import MachCommandConditions
as conditions
17 I was unable to find tests from the given argument(s).
19 You should specify a test directory, filename, test suite name, or
22 It's possible my little brain doesn't know about the type of test you are
23 trying to execute. If you suspect this, please request support by filing
25 https://bugzilla.mozilla.org/enter_bug.cgi?product=Testing&component=General.
29 I know you are trying to run a %s%s test. Unfortunately, I can't run those
34 Test or tests to run. Tests can be specified by filename, directory, suite
37 The following test suites and aliases are supported: {}
41 def get_test_parser():
42 from mozlog
.commandline
import add_logging_group
43 from moztest
.resolve
import TEST_SUITES
45 parser
= argparse
.ArgumentParser()
50 help=TEST_HELP
.format(", ".join(sorted(TEST_SUITES
))),
55 nargs
=argparse
.REMAINDER
,
56 help="Extra arguments to pass to the underlying test command(s). "
57 "If an underlying command doesn't recognize the argument, it "
65 help="Specify a debugger to use.",
67 add_logging_group(parser
)
71 ADD_TEST_SUPPORTED_SUITES
= [
74 "mochitest-browser-chrome",
75 "web-platform-tests-privatebrowsing",
76 "web-platform-tests-testharness",
77 "web-platform-tests-reftest",
80 ADD_TEST_SUPPORTED_DOCS
= ["js", "html", "xhtml", "xul"]
83 "wpt": "web-platform-tests-testharness",
84 "wpt-privatebrowsing": "web-platform-tests-privatebrowsing",
85 "wpt-testharness": "web-platform-tests-testharness",
86 "wpt-reftest": "web-platform-tests-reftest",
89 MISSING_ARG
= object()
92 def create_parser_addtest():
95 parser
= argparse
.ArgumentParser()
98 choices
=sorted(ADD_TEST_SUPPORTED_SUITES
+ list(SUITE_SYNONYMS
.keys())),
99 help="suite for the test. "
100 "If you pass a `test` argument this will be determined "
101 "based on the filename and the folder it is in",
107 help="Overwrite an existing file if it exists.",
111 choices
=ADD_TEST_SUPPORTED_DOCS
,
112 help="Document type for the test (if applicable)."
113 "If you pass a `test` argument this will be determined "
114 "based on the filename.",
122 help="Open the created file(s) in an editor; if a "
123 "binary is supplied it will be used otherwise the default editor for "
124 "your environment will be opened",
127 for base_suite
in addtest
.TEST_CREATORS
:
128 cls
= addtest
.TEST_CREATORS
[base_suite
]
129 if hasattr(cls
, "get_parser"):
130 group
= parser
.add_argument_group(base_suite
)
131 cls
.get_parser(group
)
133 parser
.add_argument("test", nargs
="?", help=("Test to create."))
140 description
="Generate tests based on templates",
141 parser
=create_parser_addtest
,
155 from moztest
.resolve
import TEST_SUITES
157 if not suite
and not test
:
158 return create_parser_addtest().parse_args(["--help"])
160 if suite
in SUITE_SYNONYMS
:
161 suite
= SUITE_SYNONYMS
[suite
]
164 if not overwrite
and os
.path
.isfile(os
.path
.abspath(test
)):
165 print("Error: can't generate a test that already exists:", test
)
168 abs_test
= os
.path
.abspath(test
)
170 doc
= guess_doc(abs_test
)
172 guessed_suite
, err
= guess_suite(abs_test
)
176 suite
= guessed_suite
185 "We couldn't automatically determine a suite. "
186 "Please specify `--suite` with one of the following options:\n{}\n"
187 "If you'd like to add support to a new suite, please file a bug "
188 "blocking https://bugzilla.mozilla.org/show_bug.cgi?id=1540285.".format(
189 ADD_TEST_SUPPORTED_SUITES
194 if doc
not in ADD_TEST_SUPPORTED_DOCS
:
196 "Error: invalid `doc`. Either pass in a test with a valid extension"
197 "({}) or pass in the `doc` argument".format(ADD_TEST_SUPPORTED_DOCS
)
201 creator_cls
= addtest
.creator_for_suite(suite
)
203 if creator_cls
is None:
204 print("Sorry, `addtest` doesn't currently know how to add {}".format(suite
))
207 creator
= creator_cls(command_context
.topsrcdir
, test
, suite
, doc
, **kwargs
)
213 for path
, template
in creator
:
219 print("Adding a test file at {} (suite `{}`)".format(path
, suite
))
222 os
.makedirs(os
.path
.dirname(path
))
226 with io
.open(path
, "w", newline
="\n") as f
:
229 # write to stdout if you passed only suite and doc and not a file path
236 creator
.update_manifest()
238 # Small hack, should really do this better
239 if suite
.startswith("wpt-"):
240 suite
= "web-platform-tests"
242 mach_command
= TEST_SUITES
[suite
]["mach_command"]
244 "Please make sure to add the new test to your commit. "
245 "You can now run the test with:\n ./mach {} {}".format(
250 if editor
is not MISSING_ARG
:
251 if editor
is not None:
253 elif "VISUAL" in os
.environ
:
254 editor
= os
.environ
["VISUAL"]
255 elif "EDITOR" in os
.environ
:
256 editor
= os
.environ
["EDITOR"]
258 print("Unable to determine editor; please specify a binary")
265 proc
= subprocess
.Popen("%s %s" % (editor
, " ".join(paths
)), shell
=True)
273 def guess_doc(abs_test
):
274 filename
= os
.path
.basename(abs_test
)
275 return os
.path
.splitext(filename
)[1].strip(".")
278 def guess_suite(abs_test
):
279 # If you pass a abs_test, try to detect the type based on the name
280 # and folder. This detection can be skipped if you pass the `type` arg.
283 parent
= os
.path
.dirname(abs_test
)
284 filename
= os
.path
.basename(abs_test
)
286 has_browser_ini
= os
.path
.isfile(os
.path
.join(parent
, "browser.ini"))
287 has_browser_toml
= os
.path
.isfile(os
.path
.join(parent
, "browser.toml"))
288 has_chrome_ini
= os
.path
.isfile(os
.path
.join(parent
, "chrome.ini"))
289 has_chrome_toml
= os
.path
.isfile(os
.path
.join(parent
, "chrome.toml"))
290 has_plain_ini
= os
.path
.isfile(os
.path
.join(parent
, "mochitest.ini"))
291 has_plain_toml
= os
.path
.isfile(os
.path
.join(parent
, "mochitest.toml"))
292 has_xpcshell_ini
= os
.path
.isfile(os
.path
.join(parent
, "xpcshell.ini"))
293 has_xpcshell_toml
= os
.path
.isfile(os
.path
.join(parent
, "xpcshell.toml"))
295 in_wpt_folder
= abs_test
.startswith(
296 os
.path
.abspath(os
.path
.join("testing", "web-platform"))
300 guessed_suite
= "web-platform-tests-testharness"
301 if "/css/" in abs_test
:
302 guessed_suite
= "web-platform-tests-reftest"
304 filename
.startswith("test_")
305 and (has_xpcshell_ini
or has_xpcshell_toml
)
306 and guess_doc(abs_test
) == "js"
308 guessed_suite
= "xpcshell"
310 if filename
.startswith("browser_") and (has_browser_ini
or has_browser_toml
):
311 guessed_suite
= "mochitest-browser-chrome"
312 elif filename
.startswith("test_"):
313 if (has_chrome_ini
or has_chrome_toml
) and (
314 has_plain_ini
or has_plain_toml
317 "Error: directory contains both a chrome.{ini|toml} and mochitest.{ini|toml}. "
318 "Please set --suite=mochitest-chrome or --suite=mochitest-plain."
320 elif has_chrome_ini
or has_chrome_toml
:
321 guessed_suite
= "mochitest-chrome"
322 elif has_plain_ini
or has_plain_toml
:
323 guessed_suite
= "mochitest-plain"
324 return guessed_suite
, err
327 class MachTestRunner
:
328 """Adapter for mach test to simplify it's import externally."""
330 def test(command_context
, what
, extra_args
, **log_args
):
331 return test(command_context
, what
, extra_args
, **log_args
)
337 description
="Run tests (detects the kind of test and runs it).",
338 parser
=get_test_parser
,
340 def test(command_context
, what
, extra_args
, **log_args
):
341 """Run tests from names or paths.
343 mach test accepts arguments specifying which tests to run. Each argument
346 * The path to a test file
347 * A directory containing tests
349 * An alias to a test suite name (codes used on TreeHerder)
350 * path to a test manifest
352 When paths or directories are given, they are first resolved to test
353 files known to the build system.
355 If resolved tests belong to more than one test type/flavor/harness,
356 the harness for each relevant type/flavor will be invoked. e.g. if
357 you specify a directory with xpcshell and browser chrome mochitests,
358 both harnesses will be invoked.
360 Warning: `mach test` does not automatically re-build.
361 Please remember to run `mach build` when necessary.
365 Run all test files in the devtools/client/shared/redux/middleware/xpcshell/
368 `./mach test devtools/client/shared/redux/middleware/xpcshell/`
370 The below command prints a short summary of results instead of
371 the default more verbose output.
372 Do not forget the - (minus sign) after --log-grouped!
374 `./mach test --log-grouped - devtools/client/shared/redux/middleware/xpcshell/`
376 To learn more about arguments for each test type/flavor/harness, please run
377 `./mach <test-harness> --help`. For example, `./mach mochitest --help`.
379 from mozlog
.commandline
import setup_logging
380 from mozlog
.handlers
import StreamHandler
381 from moztest
.resolve
import TEST_SUITES
, TestResolver
, get_suite_definition
383 resolver
= command_context
._spawn
(TestResolver
)
384 run_suites
, run_tests
= resolver
.resolve_metadata(what
)
386 if not run_suites
and not run_tests
:
390 if log_args
.get("debugger", None):
393 if not mozdebug
.get_debugger_info(log_args
.get("debugger")):
395 extra_args_debugger_notation
= "=".join(
396 ["--debugger", log_args
.get("debugger")]
399 extra_args
.append(extra_args_debugger_notation
)
401 extra_args
= [extra_args_debugger_notation
]
403 # Create shared logger
404 format_args
= {"level": command_context
._mach
_context
.settings
["test"]["level"]}
405 if not run_suites
and len(run_tests
) == 1:
406 format_args
["verbose"] = True
407 format_args
["compact"] = False
409 default_format
= command_context
._mach
_context
.settings
["test"]["format"]
411 "mach-test", log_args
, {default_format
: sys
.stdout
}, format_args
413 for handler
in log
.handlers
:
414 if isinstance(handler
, StreamHandler
):
415 handler
.formatter
.inner
.summary_on_shutdown
= True
417 if log_args
.get("custom_handler", None) is not None:
418 log
.add_handler(log_args
.get("custom_handler"))
421 for suite_name
in run_suites
:
422 suite
= TEST_SUITES
[suite_name
]
423 kwargs
= suite
["kwargs"]
425 kwargs
.setdefault("subsuite", None)
427 if "mach_command" in suite
:
428 res
= command_context
._mach
_context
.commands
.dispatch(
429 suite
["mach_command"],
430 command_context
._mach
_context
,
438 for test
in run_tests
:
439 key
= (test
["flavor"], test
.get("subsuite", ""))
440 buckets
.setdefault(key
, []).append(test
)
442 for (flavor
, subsuite
), tests
in sorted(buckets
.items()):
443 _
, m
= get_suite_definition(flavor
, subsuite
)
444 if "mach_command" not in m
:
445 substr
= "-{}".format(subsuite
) if subsuite
else ""
446 print(UNKNOWN_FLAVOR
% (flavor
, substr
))
450 kwargs
= dict(m
["kwargs"])
452 kwargs
.setdefault("subsuite", None)
454 res
= command_context
._mach
_context
.commands
.dispatch(
456 command_context
._mach
_context
,
464 if not log
.has_shutdown
:
470 "cppunittest", category
="testing", description
="Run cpp unit tests (C++ tests)."
476 help="Test to run. Can be specified as one or more files or "
477 "directories, or omitted. If omitted, the entire test suite is "
480 def run_cppunit_test(command_context
, **params
):
481 from mozlog
import commandline
483 log
= params
.get("log")
485 log
= commandline
.setup_logging("cppunittest", {}, {"tbpl": sys
.stdout
})
487 # See if we have crash symbols
488 symbols_path
= os
.path
.join(command_context
.distdir
, "crashreporter-symbols")
489 if not os
.path
.isdir(symbols_path
):
492 # If no tests specified, run all tests in main manifest
493 tests
= params
["test_files"]
495 tests
= [os
.path
.join(command_context
.distdir
, "cppunittests")]
496 manifest_path
= os
.path
.join(
497 command_context
.topsrcdir
, "testing", "cppunittest.toml"
502 utility_path
= command_context
.bindir
504 if conditions
.is_android(command_context
):
505 from mozrunner
.devices
.android_device
import (
507 verify_android_device
,
510 verify_android_device(command_context
, install
=InstallIntent
.NO
)
511 return run_android_test(tests
, symbols_path
, manifest_path
, log
)
513 return run_desktop_test(
514 command_context
, tests
, symbols_path
, manifest_path
, utility_path
, log
518 def run_desktop_test(
519 command_context
, tests
, symbols_path
, manifest_path
, utility_path
, log
521 import runcppunittests
as cppunittests
522 from mozlog
import commandline
524 parser
= cppunittests
.CPPUnittestOptions()
525 commandline
.add_logging_group(parser
)
526 options
, args
= parser
.parse_args()
528 options
.symbols_path
= symbols_path
529 options
.manifest_path
= manifest_path
530 options
.utility_path
= utility_path
531 options
.xre_path
= command_context
.bindir
534 result
= cppunittests
.run_test_harness(options
, tests
)
535 except Exception as e
:
536 log
.error("Caught exception running cpp unit tests: %s" % str(e
))
540 return 0 if result
else 1
543 def run_android_test(command_context
, tests
, symbols_path
, manifest_path
, log
):
544 import remotecppunittests
545 from mozlog
import commandline
547 parser
= remotecppunittests
.RemoteCPPUnittestOptions()
548 commandline
.add_logging_group(parser
)
549 options
, args
= parser
.parse_args()
551 if not options
.adb_path
:
552 from mozrunner
.devices
.android_device
import get_adb_path
554 options
.adb_path
= get_adb_path(command_context
)
555 options
.symbols_path
= symbols_path
556 options
.manifest_path
= manifest_path
557 options
.xre_path
= command_context
.bindir
558 options
.local_lib
= command_context
.bindir
.replace("bin", "fennec")
559 for file in os
.listdir(os
.path
.join(command_context
.topobjdir
, "dist")):
560 if file.endswith(".apk") and file.startswith("fennec"):
561 options
.local_apk
= os
.path
.join(command_context
.topobjdir
, "dist", file)
562 log
.info("using APK: " + options
.local_apk
)
566 result
= remotecppunittests
.run_test_harness(options
, tests
)
567 except Exception as e
:
568 log
.error("Caught exception running cpp unit tests: %s" % str(e
))
572 return 0 if result
else 1
575 def executable_name(name
):
576 return name
+ ".exe" if sys
.platform
.startswith("win") else name
582 description
="Run SpiderMonkey JS tests in the JS shell.",
583 ok_if_tests_disabled
=True,
585 @CommandArgument("--shell", help="The shell to be used")
588 nargs
=argparse
.REMAINDER
,
589 help="Extra arguments to pass down to the test harness.",
591 def run_jstests(command_context
, shell
, params
):
594 command_context
.virtualenv_manager
.ensure()
595 python
= command_context
.virtualenv_manager
.python_path
597 js
= shell
or os
.path
.join(command_context
.bindir
, executable_name("js"))
600 os
.path
.join(command_context
.topsrcdir
, "js", "src", "tests", "jstests.py"),
604 return subprocess
.call(jstest_cmd
)
610 description
="Run SpiderMonkey jit-tests in the JS shell.",
611 ok_if_tests_disabled
=True,
613 @CommandArgument("--shell", help="The shell to be used")
618 help="Run with the SM(cgc) job's env vars",
622 nargs
=argparse
.REMAINDER
,
623 help="Extra arguments to pass down to the test harness.",
625 def run_jittests(command_context
, shell
, cgc
, params
):
628 command_context
.virtualenv_manager
.ensure()
629 python
= command_context
.virtualenv_manager
.python_path
631 js
= shell
or os
.path
.join(command_context
.bindir
, executable_name("js"))
634 os
.path
.join(command_context
.topsrcdir
, "js", "src", "jit-test", "jit_test.py"),
638 env
= os
.environ
.copy()
640 env
["JS_GC_ZEAL"] = "IncrementalMultipleSlices"
642 return subprocess
.call(jittest_cmd
, env
=env
)
645 @Command("jsapi-tests", category
="testing", description
="Run SpiderMonkey JSAPI tests.")
650 help="List all tests",
656 help="Run tests for frontend-only APIs, with light-weight entry point",
662 help="Test to run. Can be a prefix or omitted. If "
663 "omitted, the entire test suite is executed.",
665 def run_jsapitests(command_context
, list=False, frontend_only
=False, test_name
=None):
669 os
.path
.join(command_context
.bindir
, executable_name("jsapi-tests"))
672 jsapi_tests_cmd
.append("--list")
674 jsapi_tests_cmd
.append("--frontend-only")
676 jsapi_tests_cmd
.append(test_name
)
678 test_env
= os
.environ
.copy()
679 test_env
["TOPSRCDIR"] = command_context
.topsrcdir
681 result
= subprocess
.call(jsapi_tests_cmd
, env
=test_env
)
683 print(f
"jsapi-tests failed, exit code {result}")
687 def run_check_js_msg(command_context
):
690 command_context
.virtualenv_manager
.ensure()
691 python
= command_context
.virtualenv_manager
.python_path
695 os
.path
.join(command_context
.topsrcdir
, "config", "check_js_msg_encoding.py"),
698 return subprocess
.call(check_cmd
)
701 def get_jsshell_parser():
702 from jsshell
.benchmark
import get_parser
710 parser
=get_jsshell_parser
,
711 description
="Run benchmarks in the SpiderMonkey JS shell.",
713 def run_jsshelltests(command_context
, **kwargs
):
714 from jsshell
import benchmark
716 return benchmark
.run(**kwargs
)
722 description
="Mercurial style .t tests for command line applications.",
728 help="Test paths to run. Each path can be a test file or directory. "
729 "If omitted, the entire suite will be run.",
733 nargs
=argparse
.REMAINDER
,
734 help="Extra arguments to pass down to the cram binary. See "
735 "'./mach python -m cram -- -h' for a list of available options.",
737 def cramtest(command_context
, cram_args
=None, test_paths
=None, test_objects
=None):
738 command_context
.activate_virtualenv()
740 from manifestparser
import TestManifest
742 if test_objects
is None:
743 from moztest
.resolve
import TestResolver
745 resolver
= command_context
._spawn
(TestResolver
)
747 # If we were given test paths, try to find tests matching them.
748 test_objects
= resolver
.resolve_tests(paths
=test_paths
, flavor
="cram")
750 # Otherwise just run everything in CRAMTEST_MANIFESTS
751 test_objects
= resolver
.resolve_tests(flavor
="cram")
754 message
= "No tests were collected, check spelling of the test paths."
755 command_context
.log(logging
.WARN
, "cramtest", {}, message
)
759 mp
.tests
.extend(test_objects
)
760 tests
= mp
.active_tests(disabled
=False, **mozinfo
.info
)
762 python
= command_context
.virtualenv_manager
.python_path
763 cmd
= [python
, "-m", "cram"] + cram_args
+ [t
["relpath"] for t
in tests
]
764 return subprocess
.call(cmd
, cwd
=command_context
.topsrcdir
)
767 from datetime
import date
, timedelta
771 "test-info", category
="testing", description
="Display historical test results."
773 def test_info(command_context
):
775 All functions implemented as subcommands.
782 description
="Display historical test result summary for named tests.",
784 @CommandArgument("test_names", nargs
=argparse
.REMAINDER
, help="Test(s) of interest.")
787 default
=(date
.today() - timedelta(7)).strftime("%Y-%m-%d"),
788 help="Start date (YYYY-MM-DD)",
791 "--end", default
=date
.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
796 help="Retrieve and display general test information.",
801 help="Retrieve and display related Bugzilla bugs.",
803 @CommandArgument("--verbose", action
="store_true", help="Enable debug logging.")
815 ti
= testinfo
.TestInfoTests(verbose
)
828 description
="Generate a json report of test manifests and/or tests "
829 "categorized by Bugzilla component and optionally filtered "
830 "by path, component, and/or manifest annotations.",
835 help="Comma-separated list of Bugzilla components."
836 " eg. Testing::General,Core::WebVR",
840 help='Limit results to tests of the specified flavor (eg. "xpcshell").',
844 help='Limit results to tests of the specified subsuite (eg. "devtools").',
847 "paths", nargs
=argparse
.REMAINDER
, help="File system paths of interest."
852 help="Include test manifests in report.",
855 "--show-tests", action
="store_true", help="Include individual tests in report."
858 "--show-summary", action
="store_true", help="Include summary in report."
861 "--show-annotations",
863 help="Include list of manifest annotation conditions in report.",
868 help="Include total number of runs the test has if there are failures.",
872 help="Comma-separated list of value regular expressions to filter on; "
873 "displayed tests contain all specified values.",
877 help="Comma-separated list of test keys to filter on, "
878 'like "skip-if"; only these fields will be searched '
879 "for filter-values.",
882 "--no-component-report",
883 action
="store_false",
884 dest
="show_components",
886 help="Do not categorize by bugzilla component.",
888 @CommandArgument("--output-file", help="Path to report file.")
889 @CommandArgument("--runcounts-input-file", help="Optional path to report file.")
890 @CommandArgument("--verbose", action
="store_true", help="Enable debug logging.")
893 default
=(date
.today() - timedelta(30)).strftime("%Y-%m-%d"),
894 help="Start date (YYYY-MM-DD)",
897 "--end", default
=date
.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
917 runcounts_input_file
,
920 from mozbuild
import build_commands
923 command_context
.config_environment
924 except BuildEnvironmentNotFoundException
:
925 print("Looks like configure has not run yet, running it now...")
926 build_commands
.configure(command_context
)
928 ti
= testinfo
.TestInfoReport(verbose
)
945 runcounts_input_file
,
952 description
='Compare two reports generated by "test-info reports".',
957 help="The first (earlier) report file; path to local file or url.",
960 "--after", help="The second (later) report file; path to local file or url."
964 help="Path to report file to be written. If not specified, report"
965 "will be written to standard output.",
967 @CommandArgument("--verbose", action
="store_true", help="Enable debug logging.")
968 def test_report_diff(command_context
, before
, after
, output_file
, verbose
):
971 ti
= testinfo
.TestInfoReport(verbose
)
972 ti
.report_diff(before
, after
, output_file
)
978 description
="Generate report of number of runs for each test group (manifest)",
980 @CommandArgument("--output-file", help="Path to report file.")
981 def test_info_testrun_report(command_context
, output_file
):
986 ti
= testinfo
.TestInfoReport(verbose
=True)
987 if os
.environ
.get("GECKO_HEAD_REPOSITORY", "") in [
988 "https://hg.mozilla.org/mozilla-central",
989 "https://hg.mozilla.org/try",
991 runcounts
= ti
.get_runcounts()
993 output_file
= os
.path
.abspath(output_file
)
994 output_dir
= os
.path
.dirname(output_file
)
995 if not os
.path
.isdir(output_dir
):
996 os
.makedirs(output_dir
)
997 with
open(output_file
, "w") as f
:
998 json
.dump(runcounts
, f
)
1006 description
="Display failure line groupings and frequencies for "
1007 "single tracking intermittent bugs.",
1011 default
=(date
.today() - timedelta(30)).strftime("%Y-%m-%d"),
1012 help="Start date (YYYY-MM-DD)",
1015 "--end", default
=date
.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
1020 help="bugid for treeherder intermittent failures data query.",
1022 def test_info_failures(
1028 # bugid comes in as a string, we need an int:
1034 print("Please enter a valid bugid (i.e. '1760132')")
1039 "https://bugzilla.mozilla.org/rest/bug?include_fields=summary,depends_on&id=%s"
1042 r
= requests
.get(url
, headers
={"User-agent": "mach-test-info/1.0"})
1043 if r
.status_code
!= 200:
1044 print("%s error retrieving url: %s" % (r
.status_code
, url
))
1048 print("unable to get bugzilla information for %s" % bugid
)
1051 summary
= data
["bugs"][0]["summary"]
1052 parts
= summary
.split("|")
1053 if not summary
.endswith("single tracking bug") or len(parts
) != 2:
1054 print("this query only works with single tracking bugs")
1057 # get depends_on bugs:
1059 if "depends_on" in data
["bugs"][0]:
1060 buglist
.extend(data
["bugs"][0]["depends_on"])
1062 testname
= parts
[0].strip().split(" ")[-1]
1064 # now query treeherder to get details about annotations
1067 url
= "https://treeherder.mozilla.org/api/failuresbybug/"
1068 url
+= "?startday=%s&endday=%s&tree=trunk&bug=%s" % (start
, end
, b
)
1069 r
= requests
.get(url
, headers
={"User-agent": "mach-test-info/1.0"})
1070 r
.raise_for_status()
1076 print("no failures were found for given bugid, please ensure bug is")
1077 print("accessible via: https://treeherder.mozilla.org/intermittent-failures")
1080 # query VCS to get current list of variants:
1083 url
= "https://hg.mozilla.org/mozilla-central/raw-file/tip/taskcluster/ci/test/variants.yml"
1084 r
= requests
.get(url
, headers
={"User-agent": "mach-test-info/1.0"})
1085 variants
= yaml
.safe_load(r
.text
)
1088 "\nQuerying data for bug %s annotated from %s to %s on trunk.\n\n"
1089 % (buglist
, start
, end
)
1093 for failure
in data
:
1094 # config = platform/buildtype
1095 # testsuite (<suite>[-variant][-<chunk>])
1096 # lines - group by patterns that contain test name
1097 config
= "%s/%s" % (failure
["platform"], failure
["build_type"])
1101 varpos
= len(failure
["test_suite"])
1102 for v
in variants
.keys():
1103 var
= "-%s" % variants
[v
]["suffix"]
1104 if var
in failure
["test_suite"]:
1105 if failure
["test_suite"].find(var
) < varpos
:
1109 suite
= failure
["test_suite"].split(variant
)[0]
1111 parts
= failure
["test_suite"].split("-")
1114 suite
= "-".join(parts
[:-1])
1116 pass # if this works, then the last '-X' is a number :)
1119 print("Error: failure to find variant in %s" % failure
["test_suite"])
1121 job
= "%s-%s%s" % (config
, suite
, variant
)
1122 if job
not in jobs
.keys():
1126 # lines - sum(hash) of all lines where we match testname
1128 for line
in failure
["lines"]:
1129 if len(line
.split(testname
)) <= 1:
1131 # strip off timestamp and mozharness status
1132 parts
= line
.split("TEST-UNEXPECTED")
1133 l
= "TEST-UNEXPECTED%s" % parts
[-1]
1135 # only keep 25 characters of the failure, often longer is random numbers
1136 parts
= l
.split(testname
)
1137 l
= "%s%s%s" % (parts
[0], testname
, parts
[1][:25])
1141 if not failure
["lines"]:
1147 if hvalue
not in lines
.keys():
1148 lines
[hvalue
] = {"lines": failure
["lines"], "config": []}
1149 lines
[hvalue
]["config"].append(job
)
1151 for h
in lines
.keys():
1152 print("%s errors with:" % (len(lines
[h
]["config"])))
1153 for l
in lines
[h
]["lines"]:
1157 "... no failure lines recorded in"
1158 " https://treeherder.mozilla.org/intermittent-failures ..."
1162 count
= len([x
for x
in lines
[h
]["config"] if x
== job
])
1164 print(" %s: %s" % (job
, count
))
1171 conditions
=[conditions
.is_non_artifact_build
],
1172 description
="Run rust unit tests (via cargo test).",
1174 def run_rusttests(command_context
, **kwargs
):
1175 return command_context
._mach
_context
.commands
.dispatch(
1177 command_context
._mach
_context
,
1178 what
=["pre-export", "export", "recurse_rusttests"],
1183 "fluent-migration-test",
1185 description
="Test Fluent migration recipes.",
1189 action
="store_true",
1191 help="Use git rather than hg source repository",
1193 @CommandArgument("test_paths", nargs
="*", metavar
="N", help="Recipe paths to test.")
1194 def run_migration_tests(command_context
, l10n_git
=False, test_paths
=None, **kwargs
):
1197 command_context
.activate_virtualenv()
1198 from test_fluent_migrations
import fmt
1202 for to_test
in test_paths
:
1204 context
= fmt
.inspect_migration(to_test
)
1205 for issue
in context
["issues"]:
1206 command_context
.log(
1208 "fluent-migration-test",
1210 "error": issue
["msg"],
1213 "ERROR in {file}: {error}",
1215 if context
["issues"]:
1217 with_context
.append(
1220 "references": context
["references"],
1223 except Exception as e
:
1224 command_context
.log(
1226 "fluent-migration-test",
1227 {"error": str(e
), "file": to_test
},
1228 "ERROR in {file}: {error}",
1231 obj_dir
, repo_dir
= fmt
.prepare_directories(command_context
, l10n_git
)
1232 for context
in with_context
:
1233 rv |
= fmt
.test_migration(
1234 command_context
, obj_dir
, repo_dir
, l10n_git
, **context
1242 description
="Manifest operations",
1243 virtualenv_name
="manifest",
1245 def manifest(_command_context
):
1247 All functions implemented as subcommands.
1254 description
="Update manifests to skip failing tests",
1256 @CommandArgument("try_url", nargs
=1, help="Treeherder URL for try (please use quotes)")
1258 "-b", "--bugzilla", default
=None, dest
="bugzilla", help="Bugzilla instance"
1261 "-m", "--meta-bug-id", default
=None, dest
="meta_bug_id", help="Meta Bug id"
1266 action
="store_true",
1268 help="Skip all secondary failures",
1271 "-t", "--save-tasks", default
=None, dest
="save_tasks", help="Save tasks to file"
1274 "-T", "--use-tasks", default
=None, dest
="use_tasks", help="Use tasks from file"
1280 dest
="save_failures",
1281 help="Save failures to file",
1287 dest
="use_failures",
1288 help="Use failures from file",
1294 dest
="max_failures",
1295 help="Maximum number of failures to skip (-1 == no limit)",
1297 @CommandArgument("-v", "--verbose", action
="store_true", help="Verbose mode")
1301 action
="store_true",
1302 help="Determine manifest changes, but do not write them",
1318 from skipfails
import Skipfails
1320 if meta_bug_id
is not None:
1322 meta_bug_id
= int(meta_bug_id
)
1326 if max_failures
is not None:
1328 max_failures
= int(max_failures
)
1334 Skipfails(command_context
, try_url
, verbose
, bugzilla
, dry_run
, turbo
).run(