1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
12 from mach
.decorators
import Command
, CommandArgument
, SettingsProvider
, SubCommand
13 from mozbuild
.base
import BuildEnvironmentNotFoundException
14 from mozbuild
.base
import MachCommandConditions
as conditions
17 I was unable to find tests from the given argument(s).
19 You should specify a test directory, filename, test suite name, or
22 It's possible my little brain doesn't know about the type of test you are
23 trying to execute. If you suspect this, please request support by filing
25 https://bugzilla.mozilla.org/enter_bug.cgi?product=Testing&component=General.
29 I know you are trying to run a %s%s test. Unfortunately, I can't run those
34 Test or tests to run. Tests can be specified by filename, directory, suite
37 The following test suites and aliases are supported: {}
42 class TestConfig(object):
44 def config_settings(cls
):
45 from mozlog
.commandline
import log_formatters
46 from mozlog
.structuredlog
import log_levels
48 format_desc
= "The default format to use when running tests with `mach test`."
49 format_choices
= list(log_formatters
)
50 level_desc
= "The default log level to use when running tests with `mach test`."
51 level_choices
= [l
.lower() for l
in log_levels
]
53 ("test.format", "string", format_desc
, "mach", {"choices": format_choices
}),
54 ("test.level", "string", level_desc
, "info", {"choices": level_choices
}),
58 def get_test_parser():
59 from mozlog
.commandline
import add_logging_group
60 from moztest
.resolve
import TEST_SUITES
62 parser
= argparse
.ArgumentParser()
67 help=TEST_HELP
.format(", ".join(sorted(TEST_SUITES
))),
72 nargs
=argparse
.REMAINDER
,
73 help="Extra arguments to pass to the underlying test command(s). "
74 "If an underlying command doesn't recognize the argument, it "
82 help="Specify a debugger to use.",
84 add_logging_group(parser
)
88 ADD_TEST_SUPPORTED_SUITES
= [
91 "mochitest-browser-chrome",
92 "web-platform-tests-testharness",
93 "web-platform-tests-reftest",
96 ADD_TEST_SUPPORTED_DOCS
= ["js", "html", "xhtml", "xul"]
99 "wpt": "web-platform-tests-testharness",
100 "wpt-testharness": "web-platform-tests-testharness",
101 "wpt-reftest": "web-platform-tests-reftest",
104 MISSING_ARG
= object()
107 def create_parser_addtest():
110 parser
= argparse
.ArgumentParser()
113 choices
=sorted(ADD_TEST_SUPPORTED_SUITES
+ list(SUITE_SYNONYMS
.keys())),
114 help="suite for the test. "
115 "If you pass a `test` argument this will be determined "
116 "based on the filename and the folder it is in",
122 help="Overwrite an existing file if it exists.",
126 choices
=ADD_TEST_SUPPORTED_DOCS
,
127 help="Document type for the test (if applicable)."
128 "If you pass a `test` argument this will be determined "
129 "based on the filename.",
137 help="Open the created file(s) in an editor; if a "
138 "binary is supplied it will be used otherwise the default editor for "
139 "your environment will be opened",
142 for base_suite
in addtest
.TEST_CREATORS
:
143 cls
= addtest
.TEST_CREATORS
[base_suite
]
144 if hasattr(cls
, "get_parser"):
145 group
= parser
.add_argument_group(base_suite
)
146 cls
.get_parser(group
)
148 parser
.add_argument("test", nargs
="?", help=("Test to create."))
155 description
="Generate tests based on templates",
156 parser
=create_parser_addtest
,
170 from moztest
.resolve
import TEST_SUITES
172 if not suite
and not test
:
173 return create_parser_addtest().parse_args(["--help"])
175 if suite
in SUITE_SYNONYMS
:
176 suite
= SUITE_SYNONYMS
[suite
]
179 if not overwrite
and os
.path
.isfile(os
.path
.abspath(test
)):
180 print("Error: can't generate a test that already exists:", test
)
183 abs_test
= os
.path
.abspath(test
)
185 doc
= guess_doc(abs_test
)
187 guessed_suite
, err
= guess_suite(abs_test
)
191 suite
= guessed_suite
200 "We couldn't automatically determine a suite. "
201 "Please specify `--suite` with one of the following options:\n{}\n"
202 "If you'd like to add support to a new suite, please file a bug "
203 "blocking https://bugzilla.mozilla.org/show_bug.cgi?id=1540285.".format(
204 ADD_TEST_SUPPORTED_SUITES
209 if doc
not in ADD_TEST_SUPPORTED_DOCS
:
211 "Error: invalid `doc`. Either pass in a test with a valid extension"
212 "({}) or pass in the `doc` argument".format(ADD_TEST_SUPPORTED_DOCS
)
216 creator_cls
= addtest
.creator_for_suite(suite
)
218 if creator_cls
is None:
219 print("Sorry, `addtest` doesn't currently know how to add {}".format(suite
))
222 creator
= creator_cls(command_context
.topsrcdir
, test
, suite
, doc
, **kwargs
)
228 for path
, template
in creator
:
234 print("Adding a test file at {} (suite `{}`)".format(path
, suite
))
237 os
.makedirs(os
.path
.dirname(path
))
241 with io
.open(path
, "w", newline
="\n") as f
:
244 # write to stdout if you passed only suite and doc and not a file path
251 creator
.update_manifest()
253 # Small hack, should really do this better
254 if suite
.startswith("wpt-"):
255 suite
= "web-platform-tests"
257 mach_command
= TEST_SUITES
[suite
]["mach_command"]
259 "Please make sure to add the new test to your commit. "
260 "You can now run the test with:\n ./mach {} {}".format(
265 if editor
is not MISSING_ARG
:
266 if editor
is not None:
268 elif "VISUAL" in os
.environ
:
269 editor
= os
.environ
["VISUAL"]
270 elif "EDITOR" in os
.environ
:
271 editor
= os
.environ
["EDITOR"]
273 print("Unable to determine editor; please specify a binary")
280 proc
= subprocess
.Popen("%s %s" % (editor
, " ".join(paths
)), shell
=True)
288 def guess_doc(abs_test
):
289 filename
= os
.path
.basename(abs_test
)
290 return os
.path
.splitext(filename
)[1].strip(".")
293 def guess_suite(abs_test
):
294 # If you pass a abs_test, try to detect the type based on the name
295 # and folder. This detection can be skipped if you pass the `type` arg.
298 parent
= os
.path
.dirname(abs_test
)
299 filename
= os
.path
.basename(abs_test
)
301 has_browser_ini
= os
.path
.isfile(os
.path
.join(parent
, "browser.ini"))
302 has_browser_toml
= os
.path
.isfile(os
.path
.join(parent
, "browser.toml"))
303 has_chrome_ini
= os
.path
.isfile(os
.path
.join(parent
, "chrome.ini"))
304 has_chrome_toml
= os
.path
.isfile(os
.path
.join(parent
, "chrome.toml"))
305 has_plain_ini
= os
.path
.isfile(os
.path
.join(parent
, "mochitest.ini"))
306 has_plain_toml
= os
.path
.isfile(os
.path
.join(parent
, "mochitest.toml"))
307 has_xpcshell_ini
= os
.path
.isfile(os
.path
.join(parent
, "xpcshell.ini"))
308 has_xpcshell_toml
= os
.path
.isfile(os
.path
.join(parent
, "xpcshell.toml"))
310 in_wpt_folder
= abs_test
.startswith(
311 os
.path
.abspath(os
.path
.join("testing", "web-platform"))
315 guessed_suite
= "web-platform-tests-testharness"
316 if "/css/" in abs_test
:
317 guessed_suite
= "web-platform-tests-reftest"
319 filename
.startswith("test_")
320 and (has_xpcshell_ini
or has_xpcshell_toml
)
321 and guess_doc(abs_test
) == "js"
323 guessed_suite
= "xpcshell"
325 if filename
.startswith("browser_") and (has_browser_ini
or has_browser_toml
):
326 guessed_suite
= "mochitest-browser-chrome"
327 elif filename
.startswith("test_"):
328 if (has_chrome_ini
or has_chrome_toml
) and (
329 has_plain_ini
or has_plain_toml
332 "Error: directory contains both a chrome.{ini|toml} and mochitest.{ini|toml}. "
333 "Please set --suite=mochitest-chrome or --suite=mochitest-plain."
335 elif has_chrome_ini
or has_chrome_toml
:
336 guessed_suite
= "mochitest-chrome"
337 elif has_plain_ini
or has_plain_toml
:
338 guessed_suite
= "mochitest-plain"
339 return guessed_suite
, err
345 description
="Run tests (detects the kind of test and runs it).",
346 parser
=get_test_parser
,
348 def test(command_context
, what
, extra_args
, **log_args
):
349 """Run tests from names or paths.
351 mach test accepts arguments specifying which tests to run. Each argument
354 * The path to a test file
355 * A directory containing tests
357 * An alias to a test suite name (codes used on TreeHerder)
358 * path to a test manifest
360 When paths or directories are given, they are first resolved to test
361 files known to the build system.
363 If resolved tests belong to more than one test type/flavor/harness,
364 the harness for each relevant type/flavor will be invoked. e.g. if
365 you specify a directory with xpcshell and browser chrome mochitests,
366 both harnesses will be invoked.
368 Warning: `mach test` does not automatically re-build.
369 Please remember to run `mach build` when necessary.
373 Run all test files in the devtools/client/shared/redux/middleware/xpcshell/
376 `./mach test devtools/client/shared/redux/middleware/xpcshell/`
378 The below command prints a short summary of results instead of
379 the default more verbose output.
380 Do not forget the - (minus sign) after --log-grouped!
382 `./mach test --log-grouped - devtools/client/shared/redux/middleware/xpcshell/`
384 To learn more about arguments for each test type/flavor/harness, please run
385 `./mach <test-harness> --help`. For example, `./mach mochitest --help`.
387 from mozlog
.commandline
import setup_logging
388 from mozlog
.handlers
import StreamHandler
389 from moztest
.resolve
import TEST_SUITES
, TestResolver
, get_suite_definition
391 resolver
= command_context
._spawn
(TestResolver
)
392 run_suites
, run_tests
= resolver
.resolve_metadata(what
)
394 if not run_suites
and not run_tests
:
398 if log_args
.get("debugger", None):
401 if not mozdebug
.get_debugger_info(log_args
.get("debugger")):
403 extra_args_debugger_notation
= "=".join(
404 ["--debugger", log_args
.get("debugger")]
407 extra_args
.append(extra_args_debugger_notation
)
409 extra_args
= [extra_args_debugger_notation
]
411 # Create shared logger
412 format_args
= {"level": command_context
._mach
_context
.settings
["test"]["level"]}
413 if not run_suites
and len(run_tests
) == 1:
414 format_args
["verbose"] = True
415 format_args
["compact"] = False
417 default_format
= command_context
._mach
_context
.settings
["test"]["format"]
419 "mach-test", log_args
, {default_format
: sys
.stdout
}, format_args
421 for handler
in log
.handlers
:
422 if isinstance(handler
, StreamHandler
):
423 handler
.formatter
.inner
.summary_on_shutdown
= True
426 for suite_name
in run_suites
:
427 suite
= TEST_SUITES
[suite_name
]
428 kwargs
= suite
["kwargs"]
430 kwargs
.setdefault("subsuite", None)
432 if "mach_command" in suite
:
433 res
= command_context
._mach
_context
.commands
.dispatch(
434 suite
["mach_command"],
435 command_context
._mach
_context
,
443 for test
in run_tests
:
444 key
= (test
["flavor"], test
.get("subsuite", ""))
445 buckets
.setdefault(key
, []).append(test
)
447 for (flavor
, subsuite
), tests
in sorted(buckets
.items()):
448 _
, m
= get_suite_definition(flavor
, subsuite
)
449 if "mach_command" not in m
:
450 substr
= "-{}".format(subsuite
) if subsuite
else ""
451 print(UNKNOWN_FLAVOR
% (flavor
, substr
))
455 kwargs
= dict(m
["kwargs"])
457 kwargs
.setdefault("subsuite", None)
459 res
= command_context
._mach
_context
.commands
.dispatch(
461 command_context
._mach
_context
,
469 if not log
.has_shutdown
:
475 "cppunittest", category
="testing", description
="Run cpp unit tests (C++ tests)."
481 help="Test to run. Can be specified as one or more files or "
482 "directories, or omitted. If omitted, the entire test suite is "
485 def run_cppunit_test(command_context
, **params
):
486 from mozlog
import commandline
488 log
= params
.get("log")
490 log
= commandline
.setup_logging("cppunittest", {}, {"tbpl": sys
.stdout
})
492 # See if we have crash symbols
493 symbols_path
= os
.path
.join(command_context
.distdir
, "crashreporter-symbols")
494 if not os
.path
.isdir(symbols_path
):
497 # If no tests specified, run all tests in main manifest
498 tests
= params
["test_files"]
500 tests
= [os
.path
.join(command_context
.distdir
, "cppunittests")]
501 manifest_path
= os
.path
.join(
502 command_context
.topsrcdir
, "testing", "cppunittest.ini"
507 utility_path
= command_context
.bindir
509 if conditions
.is_android(command_context
):
510 from mozrunner
.devices
.android_device
import (
512 verify_android_device
,
515 verify_android_device(command_context
, install
=InstallIntent
.NO
)
516 return run_android_test(tests
, symbols_path
, manifest_path
, log
)
518 return run_desktop_test(
519 command_context
, tests
, symbols_path
, manifest_path
, utility_path
, log
523 def run_desktop_test(
524 command_context
, tests
, symbols_path
, manifest_path
, utility_path
, log
526 import runcppunittests
as cppunittests
527 from mozlog
import commandline
529 parser
= cppunittests
.CPPUnittestOptions()
530 commandline
.add_logging_group(parser
)
531 options
, args
= parser
.parse_args()
533 options
.symbols_path
= symbols_path
534 options
.manifest_path
= manifest_path
535 options
.utility_path
= utility_path
536 options
.xre_path
= command_context
.bindir
539 result
= cppunittests
.run_test_harness(options
, tests
)
540 except Exception as e
:
541 log
.error("Caught exception running cpp unit tests: %s" % str(e
))
545 return 0 if result
else 1
548 def run_android_test(command_context
, tests
, symbols_path
, manifest_path
, log
):
549 import remotecppunittests
550 from mozlog
import commandline
552 parser
= remotecppunittests
.RemoteCPPUnittestOptions()
553 commandline
.add_logging_group(parser
)
554 options
, args
= parser
.parse_args()
556 if not options
.adb_path
:
557 from mozrunner
.devices
.android_device
import get_adb_path
559 options
.adb_path
= get_adb_path(command_context
)
560 options
.symbols_path
= symbols_path
561 options
.manifest_path
= manifest_path
562 options
.xre_path
= command_context
.bindir
563 options
.local_lib
= command_context
.bindir
.replace("bin", "fennec")
564 for file in os
.listdir(os
.path
.join(command_context
.topobjdir
, "dist")):
565 if file.endswith(".apk") and file.startswith("fennec"):
566 options
.local_apk
= os
.path
.join(command_context
.topobjdir
, "dist", file)
567 log
.info("using APK: " + options
.local_apk
)
571 result
= remotecppunittests
.run_test_harness(options
, tests
)
572 except Exception as e
:
573 log
.error("Caught exception running cpp unit tests: %s" % str(e
))
577 return 0 if result
else 1
580 def executable_name(name
):
581 return name
+ ".exe" if sys
.platform
.startswith("win") else name
587 description
="Run SpiderMonkey JS tests in the JS shell.",
588 ok_if_tests_disabled
=True,
590 @CommandArgument("--shell", help="The shell to be used")
593 nargs
=argparse
.REMAINDER
,
594 help="Extra arguments to pass down to the test harness.",
596 def run_jstests(command_context
, shell
, params
):
599 command_context
.virtualenv_manager
.ensure()
600 python
= command_context
.virtualenv_manager
.python_path
602 js
= shell
or os
.path
.join(command_context
.bindir
, executable_name("js"))
605 os
.path
.join(command_context
.topsrcdir
, "js", "src", "tests", "jstests.py"),
609 return subprocess
.call(jstest_cmd
)
615 description
="Run SpiderMonkey jit-tests in the JS shell.",
616 ok_if_tests_disabled
=True,
618 @CommandArgument("--shell", help="The shell to be used")
623 help="Run with the SM(cgc) job's env vars",
627 nargs
=argparse
.REMAINDER
,
628 help="Extra arguments to pass down to the test harness.",
630 def run_jittests(command_context
, shell
, cgc
, params
):
633 command_context
.virtualenv_manager
.ensure()
634 python
= command_context
.virtualenv_manager
.python_path
636 js
= shell
or os
.path
.join(command_context
.bindir
, executable_name("js"))
639 os
.path
.join(command_context
.topsrcdir
, "js", "src", "jit-test", "jit_test.py"),
643 env
= os
.environ
.copy()
645 env
["JS_GC_ZEAL"] = "IncrementalMultipleSlices"
647 return subprocess
.call(jittest_cmd
, env
=env
)
650 @Command("jsapi-tests", category
="testing", description
="Run SpiderMonkey JSAPI tests.")
655 help="List all tests",
661 help="Run tests for frontend-only APIs, with light-weight entry point",
667 help="Test to run. Can be a prefix or omitted. If "
668 "omitted, the entire test suite is executed.",
670 def run_jsapitests(command_context
, list=False, frontend_only
=False, test_name
=None):
674 os
.path
.join(command_context
.bindir
, executable_name("jsapi-tests"))
677 jsapi_tests_cmd
.append("--list")
679 jsapi_tests_cmd
.append("--frontend-only")
681 jsapi_tests_cmd
.append(test_name
)
683 test_env
= os
.environ
.copy()
684 test_env
["TOPSRCDIR"] = command_context
.topsrcdir
686 result
= subprocess
.call(jsapi_tests_cmd
, env
=test_env
)
688 print(f
"jsapi-tests failed, exit code {result}")
692 def run_check_js_msg(command_context
):
695 command_context
.virtualenv_manager
.ensure()
696 python
= command_context
.virtualenv_manager
.python_path
700 os
.path
.join(command_context
.topsrcdir
, "config", "check_js_msg_encoding.py"),
703 return subprocess
.call(check_cmd
)
706 def get_jsshell_parser():
707 from jsshell
.benchmark
import get_parser
715 parser
=get_jsshell_parser
,
716 description
="Run benchmarks in the SpiderMonkey JS shell.",
718 def run_jsshelltests(command_context
, **kwargs
):
719 from jsshell
import benchmark
721 return benchmark
.run(**kwargs
)
727 description
="Mercurial style .t tests for command line applications.",
733 help="Test paths to run. Each path can be a test file or directory. "
734 "If omitted, the entire suite will be run.",
738 nargs
=argparse
.REMAINDER
,
739 help="Extra arguments to pass down to the cram binary. See "
740 "'./mach python -m cram -- -h' for a list of available options.",
742 def cramtest(command_context
, cram_args
=None, test_paths
=None, test_objects
=None):
743 command_context
.activate_virtualenv()
745 from manifestparser
import TestManifest
747 if test_objects
is None:
748 from moztest
.resolve
import TestResolver
750 resolver
= command_context
._spawn
(TestResolver
)
752 # If we were given test paths, try to find tests matching them.
753 test_objects
= resolver
.resolve_tests(paths
=test_paths
, flavor
="cram")
755 # Otherwise just run everything in CRAMTEST_MANIFESTS
756 test_objects
= resolver
.resolve_tests(flavor
="cram")
759 message
= "No tests were collected, check spelling of the test paths."
760 command_context
.log(logging
.WARN
, "cramtest", {}, message
)
764 mp
.tests
.extend(test_objects
)
765 tests
= mp
.active_tests(disabled
=False, **mozinfo
.info
)
767 python
= command_context
.virtualenv_manager
.python_path
768 cmd
= [python
, "-m", "cram"] + cram_args
+ [t
["relpath"] for t
in tests
]
769 return subprocess
.call(cmd
, cwd
=command_context
.topsrcdir
)
772 from datetime
import date
, timedelta
776 "test-info", category
="testing", description
="Display historical test results."
778 def test_info(command_context
):
780 All functions implemented as subcommands.
787 description
="Display historical test result summary for named tests.",
789 @CommandArgument("test_names", nargs
=argparse
.REMAINDER
, help="Test(s) of interest.")
792 default
=(date
.today() - timedelta(7)).strftime("%Y-%m-%d"),
793 help="Start date (YYYY-MM-DD)",
796 "--end", default
=date
.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
801 help="Retrieve and display general test information.",
806 help="Retrieve and display related Bugzilla bugs.",
808 @CommandArgument("--verbose", action
="store_true", help="Enable debug logging.")
820 ti
= testinfo
.TestInfoTests(verbose
)
833 description
="Generate a json report of test manifests and/or tests "
834 "categorized by Bugzilla component and optionally filtered "
835 "by path, component, and/or manifest annotations.",
840 help="Comma-separated list of Bugzilla components."
841 " eg. Testing::General,Core::WebVR",
845 help='Limit results to tests of the specified flavor (eg. "xpcshell").',
849 help='Limit results to tests of the specified subsuite (eg. "devtools").',
852 "paths", nargs
=argparse
.REMAINDER
, help="File system paths of interest."
857 help="Include test manifests in report.",
860 "--show-tests", action
="store_true", help="Include individual tests in report."
863 "--show-summary", action
="store_true", help="Include summary in report."
866 "--show-annotations",
868 help="Include list of manifest annotation conditions in report.",
873 help="Include total number of runs the test has if there are failures.",
877 help="Comma-separated list of value regular expressions to filter on; "
878 "displayed tests contain all specified values.",
882 help="Comma-separated list of test keys to filter on, "
883 'like "skip-if"; only these fields will be searched '
884 "for filter-values.",
887 "--no-component-report",
888 action
="store_false",
889 dest
="show_components",
891 help="Do not categorize by bugzilla component.",
893 @CommandArgument("--output-file", help="Path to report file.")
894 @CommandArgument("--verbose", action
="store_true", help="Enable debug logging.")
897 default
=(date
.today() - timedelta(30)).strftime("%Y-%m-%d"),
898 help="Start date (YYYY-MM-DD)",
901 "--end", default
=date
.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
923 from mozbuild
import build_commands
926 command_context
.config_environment
927 except BuildEnvironmentNotFoundException
:
928 print("Looks like configure has not run yet, running it now...")
929 build_commands
.configure(command_context
)
931 ti
= testinfo
.TestInfoReport(verbose
)
954 description
='Compare two reports generated by "test-info reports".',
959 help="The first (earlier) report file; path to local file or url.",
962 "--after", help="The second (later) report file; path to local file or url."
966 help="Path to report file to be written. If not specified, report"
967 "will be written to standard output.",
969 @CommandArgument("--verbose", action
="store_true", help="Enable debug logging.")
970 def test_report_diff(command_context
, before
, after
, output_file
, verbose
):
973 ti
= testinfo
.TestInfoReport(verbose
)
974 ti
.report_diff(before
, after
, output_file
)
980 description
="Generate report of number of runs for each test group (manifest)",
982 @CommandArgument("--output-file", help="Path to report file.")
983 def test_info_testrun_report(command_context
, output_file
):
988 ti
= testinfo
.TestInfoReport(verbose
=True)
989 runcounts
= ti
.get_runcounts()
991 output_file
= os
.path
.abspath(output_file
)
992 output_dir
= os
.path
.dirname(output_file
)
993 if not os
.path
.isdir(output_dir
):
994 os
.makedirs(output_dir
)
995 with
open(output_file
, "w") as f
:
996 json
.dump(runcounts
, f
)
1004 description
="Display failure line groupings and frequencies for "
1005 "single tracking intermittent bugs.",
1009 default
=(date
.today() - timedelta(30)).strftime("%Y-%m-%d"),
1010 help="Start date (YYYY-MM-DD)",
1013 "--end", default
=date
.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
1018 help="bugid for treeherder intermittent failures data query.",
1020 def test_info_failures(
1026 # bugid comes in as a string, we need an int:
1032 print("Please enter a valid bugid (i.e. '1760132')")
1037 "https://bugzilla.mozilla.org/rest/bug?include_fields=summary,depends_on&id=%s"
1040 r
= requests
.get(url
, headers
={"User-agent": "mach-test-info/1.0"})
1041 if r
.status_code
!= 200:
1042 print("%s error retrieving url: %s" % (r
.status_code
, url
))
1046 print("unable to get bugzilla information for %s" % bugid
)
1049 summary
= data
["bugs"][0]["summary"]
1050 parts
= summary
.split("|")
1051 if not summary
.endswith("single tracking bug") or len(parts
) != 2:
1052 print("this query only works with single tracking bugs")
1055 # get depends_on bugs:
1057 if "depends_on" in data
["bugs"][0]:
1058 buglist
.extend(data
["bugs"][0]["depends_on"])
1060 testname
= parts
[0].strip().split(" ")[-1]
1062 # now query treeherder to get details about annotations
1065 url
= "https://treeherder.mozilla.org/api/failuresbybug/"
1066 url
+= "?startday=%s&endday=%s&tree=trunk&bug=%s" % (start
, end
, b
)
1067 r
= requests
.get(url
, headers
={"User-agent": "mach-test-info/1.0"})
1068 r
.raise_for_status()
1074 print("no failures were found for given bugid, please ensure bug is")
1075 print("accessible via: https://treeherder.mozilla.org/intermittent-failures")
1078 # query VCS to get current list of variants:
1081 url
= "https://hg.mozilla.org/mozilla-central/raw-file/tip/taskcluster/ci/test/variants.yml"
1082 r
= requests
.get(url
, headers
={"User-agent": "mach-test-info/1.0"})
1083 variants
= yaml
.safe_load(r
.text
)
1086 "\nQuerying data for bug %s annotated from %s to %s on trunk.\n\n"
1087 % (buglist
, start
, end
)
1091 for failure
in data
:
1092 # config = platform/buildtype
1093 # testsuite (<suite>[-variant][-<chunk>])
1094 # lines - group by patterns that contain test name
1095 config
= "%s/%s" % (failure
["platform"], failure
["build_type"])
1099 varpos
= len(failure
["test_suite"])
1100 for v
in variants
.keys():
1101 var
= "-%s" % variants
[v
]["suffix"]
1102 if var
in failure
["test_suite"]:
1103 if failure
["test_suite"].find(var
) < varpos
:
1107 suite
= failure
["test_suite"].split(variant
)[0]
1109 parts
= failure
["test_suite"].split("-")
1112 suite
= "-".join(parts
[:-1])
1114 pass # if this works, then the last '-X' is a number :)
1117 print("Error: failure to find variant in %s" % failure
["test_suite"])
1119 job
= "%s-%s%s" % (config
, suite
, variant
)
1120 if job
not in jobs
.keys():
1124 # lines - sum(hash) of all lines where we match testname
1126 for line
in failure
["lines"]:
1127 if len(line
.split(testname
)) <= 1:
1129 # strip off timestamp and mozharness status
1130 parts
= line
.split("TEST-UNEXPECTED")
1131 l
= "TEST-UNEXPECTED%s" % parts
[-1]
1133 # only keep 25 characters of the failure, often longer is random numbers
1134 parts
= l
.split(testname
)
1135 l
= "%s%s%s" % (parts
[0], testname
, parts
[1][:25])
1139 if not failure
["lines"]:
1145 if hvalue
not in lines
.keys():
1146 lines
[hvalue
] = {"lines": failure
["lines"], "config": []}
1147 lines
[hvalue
]["config"].append(job
)
1149 for h
in lines
.keys():
1150 print("%s errors with:" % (len(lines
[h
]["config"])))
1151 for l
in lines
[h
]["lines"]:
1155 "... no failure lines recorded in"
1156 " https://treeherder.mozilla.org/intermittent-failures ..."
1160 count
= len([x
for x
in lines
[h
]["config"] if x
== job
])
1162 print(" %s: %s" % (job
, count
))
1169 conditions
=[conditions
.is_non_artifact_build
],
1170 description
="Run rust unit tests (via cargo test).",
1172 def run_rusttests(command_context
, **kwargs
):
1173 return command_context
._mach
_context
.commands
.dispatch(
1175 command_context
._mach
_context
,
1176 what
=["pre-export", "export", "recurse_rusttests"],
1181 "fluent-migration-test",
1183 description
="Test Fluent migration recipes.",
1185 @CommandArgument("test_paths", nargs
="*", metavar
="N", help="Recipe paths to test.")
1186 def run_migration_tests(command_context
, test_paths
=None, **kwargs
):
1189 command_context
.activate_virtualenv()
1190 from test_fluent_migrations
import fmt
1194 for to_test
in test_paths
:
1196 context
= fmt
.inspect_migration(to_test
)
1197 for issue
in context
["issues"]:
1198 command_context
.log(
1200 "fluent-migration-test",
1202 "error": issue
["msg"],
1205 "ERROR in {file}: {error}",
1207 if context
["issues"]:
1209 with_context
.append(
1212 "references": context
["references"],
1215 except Exception as e
:
1216 command_context
.log(
1218 "fluent-migration-test",
1219 {"error": str(e
), "file": to_test
},
1220 "ERROR in {file}: {error}",
1223 obj_dir
= fmt
.prepare_object_dir(command_context
)
1224 for context
in with_context
:
1225 rv |
= fmt
.test_migration(command_context
, obj_dir
, **context
)