1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
12 from mach
.decorators
import Command
, CommandArgument
, SettingsProvider
, SubCommand
13 from mozbuild
.base
import BuildEnvironmentNotFoundException
14 from mozbuild
.base
import MachCommandConditions
as conditions
17 I was unable to find tests from the given argument(s).
19 You should specify a test directory, filename, test suite name, or
22 It's possible my little brain doesn't know about the type of test you are
23 trying to execute. If you suspect this, please request support by filing
25 https://bugzilla.mozilla.org/enter_bug.cgi?product=Testing&component=General.
29 I know you are trying to run a %s%s test. Unfortunately, I can't run those
34 Test or tests to run. Tests can be specified by filename, directory, suite
37 The following test suites and aliases are supported: {}
42 class TestConfig(object):
44 def config_settings(cls
):
45 from mozlog
.commandline
import log_formatters
46 from mozlog
.structuredlog
import log_levels
48 format_desc
= "The default format to use when running tests with `mach test`."
49 format_choices
= list(log_formatters
)
50 level_desc
= "The default log level to use when running tests with `mach test`."
51 level_choices
= [l
.lower() for l
in log_levels
]
53 ("test.format", "string", format_desc
, "mach", {"choices": format_choices
}),
54 ("test.level", "string", level_desc
, "info", {"choices": level_choices
}),
58 def get_test_parser():
59 from mozlog
.commandline
import add_logging_group
60 from moztest
.resolve
import TEST_SUITES
62 parser
= argparse
.ArgumentParser()
67 help=TEST_HELP
.format(", ".join(sorted(TEST_SUITES
))),
72 nargs
=argparse
.REMAINDER
,
73 help="Extra arguments to pass to the underlying test command(s). "
74 "If an underlying command doesn't recognize the argument, it "
82 help="Specify a debugger to use.",
84 add_logging_group(parser
)
88 ADD_TEST_SUPPORTED_SUITES
= [
91 "mochitest-browser-chrome",
92 "web-platform-tests-testharness",
93 "web-platform-tests-reftest",
96 ADD_TEST_SUPPORTED_DOCS
= ["js", "html", "xhtml", "xul"]
99 "wpt": "web-platform-tests-testharness",
100 "wpt-testharness": "web-platform-tests-testharness",
101 "wpt-reftest": "web-platform-tests-reftest",
104 MISSING_ARG
= object()
107 def create_parser_addtest():
110 parser
= argparse
.ArgumentParser()
113 choices
=sorted(ADD_TEST_SUPPORTED_SUITES
+ list(SUITE_SYNONYMS
.keys())),
114 help="suite for the test. "
115 "If you pass a `test` argument this will be determined "
116 "based on the filename and the folder it is in",
122 help="Overwrite an existing file if it exists.",
126 choices
=ADD_TEST_SUPPORTED_DOCS
,
127 help="Document type for the test (if applicable)."
128 "If you pass a `test` argument this will be determined "
129 "based on the filename.",
137 help="Open the created file(s) in an editor; if a "
138 "binary is supplied it will be used otherwise the default editor for "
139 "your environment will be opened",
142 for base_suite
in addtest
.TEST_CREATORS
:
143 cls
= addtest
.TEST_CREATORS
[base_suite
]
144 if hasattr(cls
, "get_parser"):
145 group
= parser
.add_argument_group(base_suite
)
146 cls
.get_parser(group
)
148 parser
.add_argument("test", nargs
="?", help=("Test to create."))
155 description
="Generate tests based on templates",
156 parser
=create_parser_addtest
,
170 from moztest
.resolve
import TEST_SUITES
172 if not suite
and not test
:
173 return create_parser_addtest().parse_args(["--help"])
175 if suite
in SUITE_SYNONYMS
:
176 suite
= SUITE_SYNONYMS
[suite
]
179 if not overwrite
and os
.path
.isfile(os
.path
.abspath(test
)):
180 print("Error: can't generate a test that already exists:", test
)
183 abs_test
= os
.path
.abspath(test
)
185 doc
= guess_doc(abs_test
)
187 guessed_suite
, err
= guess_suite(abs_test
)
191 suite
= guessed_suite
200 "We couldn't automatically determine a suite. "
201 "Please specify `--suite` with one of the following options:\n{}\n"
202 "If you'd like to add support to a new suite, please file a bug "
203 "blocking https://bugzilla.mozilla.org/show_bug.cgi?id=1540285.".format(
204 ADD_TEST_SUPPORTED_SUITES
209 if doc
not in ADD_TEST_SUPPORTED_DOCS
:
211 "Error: invalid `doc`. Either pass in a test with a valid extension"
212 "({}) or pass in the `doc` argument".format(ADD_TEST_SUPPORTED_DOCS
)
216 creator_cls
= addtest
.creator_for_suite(suite
)
218 if creator_cls
is None:
219 print("Sorry, `addtest` doesn't currently know how to add {}".format(suite
))
222 creator
= creator_cls(command_context
.topsrcdir
, test
, suite
, doc
, **kwargs
)
228 for path
, template
in creator
:
234 print("Adding a test file at {} (suite `{}`)".format(path
, suite
))
237 os
.makedirs(os
.path
.dirname(path
))
241 with io
.open(path
, "w", newline
="\n") as f
:
244 # write to stdout if you passed only suite and doc and not a file path
251 creator
.update_manifest()
253 # Small hack, should really do this better
254 if suite
.startswith("wpt-"):
255 suite
= "web-platform-tests"
257 mach_command
= TEST_SUITES
[suite
]["mach_command"]
259 "Please make sure to add the new test to your commit. "
260 "You can now run the test with:\n ./mach {} {}".format(
265 if editor
is not MISSING_ARG
:
266 if editor
is not None:
268 elif "VISUAL" in os
.environ
:
269 editor
= os
.environ
["VISUAL"]
270 elif "EDITOR" in os
.environ
:
271 editor
= os
.environ
["EDITOR"]
273 print("Unable to determine editor; please specify a binary")
280 proc
= subprocess
.Popen("%s %s" % (editor
, " ".join(paths
)), shell
=True)
288 def guess_doc(abs_test
):
289 filename
= os
.path
.basename(abs_test
)
290 return os
.path
.splitext(filename
)[1].strip(".")
293 def guess_suite(abs_test
):
294 # If you pass a abs_test, try to detect the type based on the name
295 # and folder. This detection can be skipped if you pass the `type` arg.
298 parent
= os
.path
.dirname(abs_test
)
299 filename
= os
.path
.basename(abs_test
)
301 has_browser_ini
= os
.path
.isfile(os
.path
.join(parent
, "browser.ini"))
302 has_chrome_ini
= os
.path
.isfile(os
.path
.join(parent
, "chrome.ini"))
303 has_plain_ini
= os
.path
.isfile(os
.path
.join(parent
, "mochitest.ini"))
304 has_xpcshell_ini
= os
.path
.isfile(os
.path
.join(parent
, "xpcshell.ini"))
306 in_wpt_folder
= abs_test
.startswith(
307 os
.path
.abspath(os
.path
.join("testing", "web-platform"))
311 guessed_suite
= "web-platform-tests-testharness"
312 if "/css/" in abs_test
:
313 guessed_suite
= "web-platform-tests-reftest"
315 filename
.startswith("test_")
317 and guess_doc(abs_test
) == "js"
319 guessed_suite
= "xpcshell"
321 if filename
.startswith("browser_") and has_browser_ini
:
322 guessed_suite
= "mochitest-browser-chrome"
323 elif filename
.startswith("test_"):
324 if has_chrome_ini
and has_plain_ini
:
326 "Error: directory contains both a chrome.ini and mochitest.ini. "
327 "Please set --suite=mochitest-chrome or --suite=mochitest-plain."
330 guessed_suite
= "mochitest-chrome"
332 guessed_suite
= "mochitest-plain"
333 return guessed_suite
, err
339 description
="Run tests (detects the kind of test and runs it).",
340 parser
=get_test_parser
,
342 def test(command_context
, what
, extra_args
, **log_args
):
343 """Run tests from names or paths.
345 mach test accepts arguments specifying which tests to run. Each argument
348 * The path to a test file
349 * A directory containing tests
351 * An alias to a test suite name (codes used on TreeHerder)
353 When paths or directories are given, they are first resolved to test
354 files known to the build system.
356 If resolved tests belong to more than one test type/flavor/harness,
357 the harness for each relevant type/flavor will be invoked. e.g. if
358 you specify a directory with xpcshell and browser chrome mochitests,
359 both harnesses will be invoked.
361 Warning: `mach test` does not automatically re-build.
362 Please remember to run `mach build` when necessary.
366 Run all test files in the devtools/client/shared/redux/middleware/xpcshell/
369 `./mach test devtools/client/shared/redux/middleware/xpcshell/`
371 The below command prints a short summary of results instead of
372 the default more verbose output.
373 Do not forget the - (minus sign) after --log-grouped!
375 `./mach test --log-grouped - devtools/client/shared/redux/middleware/xpcshell/`
377 To learn more about arguments for each test type/flavor/harness, please run
378 `./mach <test-harness> --help`. For example, `./mach mochitest --help`.
380 from mozlog
.commandline
import setup_logging
381 from mozlog
.handlers
import StreamHandler
382 from moztest
.resolve
import TEST_SUITES
, TestResolver
, get_suite_definition
384 resolver
= command_context
._spawn
(TestResolver
)
385 run_suites
, run_tests
= resolver
.resolve_metadata(what
)
387 if not run_suites
and not run_tests
:
391 if log_args
.get("debugger", None):
394 if not mozdebug
.get_debugger_info(log_args
.get("debugger")):
396 extra_args_debugger_notation
= "=".join(
397 ["--debugger", log_args
.get("debugger")]
400 extra_args
.append(extra_args_debugger_notation
)
402 extra_args
= [extra_args_debugger_notation
]
404 # Create shared logger
405 format_args
= {"level": command_context
._mach
_context
.settings
["test"]["level"]}
406 if not run_suites
and len(run_tests
) == 1:
407 format_args
["verbose"] = True
408 format_args
["compact"] = False
410 default_format
= command_context
._mach
_context
.settings
["test"]["format"]
412 "mach-test", log_args
, {default_format
: sys
.stdout
}, format_args
414 for handler
in log
.handlers
:
415 if isinstance(handler
, StreamHandler
):
416 handler
.formatter
.inner
.summary_on_shutdown
= True
419 for suite_name
in run_suites
:
420 suite
= TEST_SUITES
[suite_name
]
421 kwargs
= suite
["kwargs"]
423 kwargs
.setdefault("subsuite", None)
425 if "mach_command" in suite
:
426 res
= command_context
._mach
_context
.commands
.dispatch(
427 suite
["mach_command"],
428 command_context
._mach
_context
,
436 for test
in run_tests
:
437 key
= (test
["flavor"], test
.get("subsuite", ""))
438 buckets
.setdefault(key
, []).append(test
)
440 for (flavor
, subsuite
), tests
in sorted(buckets
.items()):
441 _
, m
= get_suite_definition(flavor
, subsuite
)
442 if "mach_command" not in m
:
443 substr
= "-{}".format(subsuite
) if subsuite
else ""
444 print(UNKNOWN_FLAVOR
% (flavor
, substr
))
448 kwargs
= dict(m
["kwargs"])
450 kwargs
.setdefault("subsuite", None)
452 res
= command_context
._mach
_context
.commands
.dispatch(
454 command_context
._mach
_context
,
467 "cppunittest", category
="testing", description
="Run cpp unit tests (C++ tests)."
473 help="Test to run. Can be specified as one or more files or "
474 "directories, or omitted. If omitted, the entire test suite is "
477 def run_cppunit_test(command_context
, **params
):
478 from mozlog
import commandline
480 log
= params
.get("log")
482 log
= commandline
.setup_logging("cppunittest", {}, {"tbpl": sys
.stdout
})
484 # See if we have crash symbols
485 symbols_path
= os
.path
.join(command_context
.distdir
, "crashreporter-symbols")
486 if not os
.path
.isdir(symbols_path
):
489 # If no tests specified, run all tests in main manifest
490 tests
= params
["test_files"]
492 tests
= [os
.path
.join(command_context
.distdir
, "cppunittests")]
493 manifest_path
= os
.path
.join(
494 command_context
.topsrcdir
, "testing", "cppunittest.ini"
499 utility_path
= command_context
.bindir
501 if conditions
.is_android(command_context
):
502 from mozrunner
.devices
.android_device
import (
504 verify_android_device
,
507 verify_android_device(command_context
, install
=InstallIntent
.NO
)
508 return run_android_test(tests
, symbols_path
, manifest_path
, log
)
510 return run_desktop_test(
511 command_context
, tests
, symbols_path
, manifest_path
, utility_path
, log
515 def run_desktop_test(
516 command_context
, tests
, symbols_path
, manifest_path
, utility_path
, log
518 import runcppunittests
as cppunittests
519 from mozlog
import commandline
521 parser
= cppunittests
.CPPUnittestOptions()
522 commandline
.add_logging_group(parser
)
523 options
, args
= parser
.parse_args()
525 options
.symbols_path
= symbols_path
526 options
.manifest_path
= manifest_path
527 options
.utility_path
= utility_path
528 options
.xre_path
= command_context
.bindir
531 result
= cppunittests
.run_test_harness(options
, tests
)
532 except Exception as e
:
533 log
.error("Caught exception running cpp unit tests: %s" % str(e
))
537 return 0 if result
else 1
540 def run_android_test(command_context
, tests
, symbols_path
, manifest_path
, log
):
541 import remotecppunittests
as remotecppunittests
542 from mozlog
import commandline
544 parser
= remotecppunittests
.RemoteCPPUnittestOptions()
545 commandline
.add_logging_group(parser
)
546 options
, args
= parser
.parse_args()
548 if not options
.adb_path
:
549 from mozrunner
.devices
.android_device
import get_adb_path
551 options
.adb_path
= get_adb_path(command_context
)
552 options
.symbols_path
= symbols_path
553 options
.manifest_path
= manifest_path
554 options
.xre_path
= command_context
.bindir
555 options
.local_lib
= command_context
.bindir
.replace("bin", "fennec")
556 for file in os
.listdir(os
.path
.join(command_context
.topobjdir
, "dist")):
557 if file.endswith(".apk") and file.startswith("fennec"):
558 options
.local_apk
= os
.path
.join(command_context
.topobjdir
, "dist", file)
559 log
.info("using APK: " + options
.local_apk
)
563 result
= remotecppunittests
.run_test_harness(options
, tests
)
564 except Exception as e
:
565 log
.error("Caught exception running cpp unit tests: %s" % str(e
))
569 return 0 if result
else 1
572 def executable_name(name
):
573 return name
+ ".exe" if sys
.platform
.startswith("win") else name
579 description
="Run SpiderMonkey JS tests in the JS shell.",
580 ok_if_tests_disabled
=True,
582 @CommandArgument("--shell", help="The shell to be used")
585 nargs
=argparse
.REMAINDER
,
586 help="Extra arguments to pass down to the test harness.",
588 def run_jstests(command_context
, shell
, params
):
591 command_context
.virtualenv_manager
.ensure()
592 python
= command_context
.virtualenv_manager
.python_path
594 js
= shell
or os
.path
.join(command_context
.bindir
, executable_name("js"))
597 os
.path
.join(command_context
.topsrcdir
, "js", "src", "tests", "jstests.py"),
601 return subprocess
.call(jstest_cmd
)
607 description
="Run SpiderMonkey jit-tests in the JS shell.",
608 ok_if_tests_disabled
=True,
610 @CommandArgument("--shell", help="The shell to be used")
615 help="Run with the SM(cgc) job's env vars",
619 nargs
=argparse
.REMAINDER
,
620 help="Extra arguments to pass down to the test harness.",
622 def run_jittests(command_context
, shell
, cgc
, params
):
625 command_context
.virtualenv_manager
.ensure()
626 python
= command_context
.virtualenv_manager
.python_path
628 js
= shell
or os
.path
.join(command_context
.bindir
, executable_name("js"))
631 os
.path
.join(command_context
.topsrcdir
, "js", "src", "jit-test", "jit_test.py"),
635 env
= os
.environ
.copy()
637 env
["JS_GC_ZEAL"] = "IncrementalMultipleSlices"
639 return subprocess
.call(jittest_cmd
, env
=env
)
642 @Command("jsapi-tests", category
="testing", description
="Run SpiderMonkey JSAPI tests.")
647 help="Test to run. Can be a prefix or omitted. If "
648 "omitted, the entire test suite is executed.",
650 def run_jsapitests(command_context
, test_name
=None):
654 os
.path
.join(command_context
.bindir
, executable_name("jsapi-tests"))
657 jsapi_tests_cmd
.append(test_name
)
659 test_env
= os
.environ
.copy()
660 test_env
["TOPSRCDIR"] = command_context
.topsrcdir
662 result
= subprocess
.call(jsapi_tests_cmd
, env
=test_env
)
664 print(f
"jsapi-tests failed, exit code {result}")
668 def run_check_js_msg(command_context
):
671 command_context
.virtualenv_manager
.ensure()
672 python
= command_context
.virtualenv_manager
.python_path
676 os
.path
.join(command_context
.topsrcdir
, "config", "check_js_msg_encoding.py"),
679 return subprocess
.call(check_cmd
)
682 def get_jsshell_parser():
683 from jsshell
.benchmark
import get_parser
691 parser
=get_jsshell_parser
,
692 description
="Run benchmarks in the SpiderMonkey JS shell.",
694 def run_jsshelltests(command_context
, **kwargs
):
695 from jsshell
import benchmark
697 return benchmark
.run(**kwargs
)
703 description
="Mercurial style .t tests for command line applications.",
709 help="Test paths to run. Each path can be a test file or directory. "
710 "If omitted, the entire suite will be run.",
714 nargs
=argparse
.REMAINDER
,
715 help="Extra arguments to pass down to the cram binary. See "
716 "'./mach python -m cram -- -h' for a list of available options.",
718 def cramtest(command_context
, cram_args
=None, test_paths
=None, test_objects
=None):
719 command_context
.activate_virtualenv()
721 from manifestparser
import TestManifest
723 if test_objects
is None:
724 from moztest
.resolve
import TestResolver
726 resolver
= command_context
._spawn
(TestResolver
)
728 # If we were given test paths, try to find tests matching them.
729 test_objects
= resolver
.resolve_tests(paths
=test_paths
, flavor
="cram")
731 # Otherwise just run everything in CRAMTEST_MANIFESTS
732 test_objects
= resolver
.resolve_tests(flavor
="cram")
735 message
= "No tests were collected, check spelling of the test paths."
736 command_context
.log(logging
.WARN
, "cramtest", {}, message
)
740 mp
.tests
.extend(test_objects
)
741 tests
= mp
.active_tests(disabled
=False, **mozinfo
.info
)
743 python
= command_context
.virtualenv_manager
.python_path
744 cmd
= [python
, "-m", "cram"] + cram_args
+ [t
["relpath"] for t
in tests
]
745 return subprocess
.call(cmd
, cwd
=command_context
.topsrcdir
)
748 from datetime
import date
, timedelta
752 "test-info", category
="testing", description
="Display historical test results."
754 def test_info(command_context
):
756 All functions implemented as subcommands.
763 description
="Display historical test result summary for named tests.",
765 @CommandArgument("test_names", nargs
=argparse
.REMAINDER
, help="Test(s) of interest.")
768 default
=(date
.today() - timedelta(7)).strftime("%Y-%m-%d"),
769 help="Start date (YYYY-MM-DD)",
772 "--end", default
=date
.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
777 help="Retrieve and display general test information.",
782 help="Retrieve and display related Bugzilla bugs.",
784 @CommandArgument("--verbose", action
="store_true", help="Enable debug logging.")
796 ti
= testinfo
.TestInfoTests(verbose
)
809 description
="Generate a json report of test manifests and/or tests "
810 "categorized by Bugzilla component and optionally filtered "
811 "by path, component, and/or manifest annotations.",
816 help="Comma-separated list of Bugzilla components."
817 " eg. Testing::General,Core::WebVR",
821 help='Limit results to tests of the specified flavor (eg. "xpcshell").',
825 help='Limit results to tests of the specified subsuite (eg. "devtools").',
828 "paths", nargs
=argparse
.REMAINDER
, help="File system paths of interest."
833 help="Include test manifests in report.",
836 "--show-tests", action
="store_true", help="Include individual tests in report."
839 "--show-summary", action
="store_true", help="Include summary in report."
842 "--show-annotations",
844 help="Include list of manifest annotation conditions in report.",
849 help="Include total number of runs the test has if there are failures.",
853 help="Comma-separated list of value regular expressions to filter on; "
854 "displayed tests contain all specified values.",
858 help="Comma-separated list of test keys to filter on, "
859 'like "skip-if"; only these fields will be searched '
860 "for filter-values.",
863 "--no-component-report",
864 action
="store_false",
865 dest
="show_components",
867 help="Do not categorize by bugzilla component.",
869 @CommandArgument("--output-file", help="Path to report file.")
870 @CommandArgument("--verbose", action
="store_true", help="Enable debug logging.")
873 default
=(date
.today() - timedelta(30)).strftime("%Y-%m-%d"),
874 help="Start date (YYYY-MM-DD)",
877 "--end", default
=date
.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
899 from mozbuild
import build_commands
902 command_context
.config_environment
903 except BuildEnvironmentNotFoundException
:
904 print("Looks like configure has not run yet, running it now...")
905 build_commands
.configure(command_context
)
907 ti
= testinfo
.TestInfoReport(verbose
)
930 description
='Compare two reports generated by "test-info reports".',
935 help="The first (earlier) report file; path to local file or url.",
938 "--after", help="The second (later) report file; path to local file or url."
942 help="Path to report file to be written. If not specified, report"
943 "will be written to standard output.",
945 @CommandArgument("--verbose", action
="store_true", help="Enable debug logging.")
946 def test_report_diff(command_context
, before
, after
, output_file
, verbose
):
949 ti
= testinfo
.TestInfoReport(verbose
)
950 ti
.report_diff(before
, after
, output_file
)
956 description
="Generate report of number of runs for each test group (manifest)",
958 @CommandArgument("--output-file", help="Path to report file.")
959 def test_info_testrun_report(command_context
, output_file
):
964 ti
= testinfo
.TestInfoReport(verbose
=True)
965 runcounts
= ti
.get_runcounts()
967 output_file
= os
.path
.abspath(output_file
)
968 output_dir
= os
.path
.dirname(output_file
)
969 if not os
.path
.isdir(output_dir
):
970 os
.makedirs(output_dir
)
971 with
open(output_file
, "w") as f
:
972 json
.dump(runcounts
, f
)
980 description
="Display failure line groupings and frequencies for "
981 "single tracking intermittent bugs.",
985 default
=(date
.today() - timedelta(30)).strftime("%Y-%m-%d"),
986 help="Start date (YYYY-MM-DD)",
989 "--end", default
=date
.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
994 help="bugid for treeherder intermittent failures data query.",
996 def test_info_failures(
1002 # bugid comes in as a string, we need an int:
1008 print("Please enter a valid bugid (i.e. '1760132')")
1013 "https://bugzilla.mozilla.org/rest/bug?include_fields=summary,depends_on&id=%s"
1016 r
= requests
.get(url
, headers
={"User-agent": "mach-test-info/1.0"})
1017 if r
.status_code
!= 200:
1018 print("%s error retrieving url: %s" % (r
.status_code
, url
))
1022 print("unable to get bugzilla information for %s" % bugid
)
1025 summary
= data
["bugs"][0]["summary"]
1026 parts
= summary
.split("|")
1027 if not summary
.endswith("single tracking bug") or len(parts
) != 2:
1028 print("this query only works with single tracking bugs")
1031 # get depends_on bugs:
1033 if "depends_on" in data
["bugs"][0]:
1034 buglist
.extend(data
["bugs"][0]["depends_on"])
1036 testname
= parts
[0].strip().split(" ")[-1]
1038 # now query treeherder to get details about annotations
1041 url
= "https://treeherder.mozilla.org/api/failuresbybug/"
1042 url
+= "?startday=%s&endday=%s&tree=trunk&bug=%s" % (start
, end
, b
)
1043 r
= requests
.get(url
, headers
={"User-agent": "mach-test-info/1.0"})
1044 r
.raise_for_status()
1050 print("no failures were found for given bugid, please ensure bug is")
1051 print("accessible via: https://treeherder.mozilla.org/intermittent-failures")
1054 # query VCS to get current list of variants:
1057 url
= "https://hg.mozilla.org/mozilla-central/raw-file/tip/taskcluster/ci/test/variants.yml"
1058 r
= requests
.get(url
, headers
={"User-agent": "mach-test-info/1.0"})
1059 variants
= yaml
.safe_load(r
.text
)
1062 "\nQuerying data for bug %s annotated from %s to %s on trunk.\n\n"
1063 % (buglist
, start
, end
)
1067 for failure
in data
:
1068 # config = platform/buildtype
1069 # testsuite (<suite>[-variant][-<chunk>])
1070 # lines - group by patterns that contain test name
1071 config
= "%s/%s" % (failure
["platform"], failure
["build_type"])
1075 varpos
= len(failure
["test_suite"])
1076 for v
in variants
.keys():
1077 var
= "-%s" % variants
[v
]["suffix"]
1078 if var
in failure
["test_suite"]:
1079 if failure
["test_suite"].find(var
) < varpos
:
1083 suite
= failure
["test_suite"].split(variant
)[0]
1085 parts
= failure
["test_suite"].split("-")
1088 suite
= "-".join(parts
[:-1])
1090 pass # if this works, then the last '-X' is a number :)
1093 print("Error: failure to find variant in %s" % failure
["test_suite"])
1095 job
= "%s-%s%s" % (config
, suite
, variant
)
1096 if job
not in jobs
.keys():
1100 # lines - sum(hash) of all lines where we match testname
1102 for line
in failure
["lines"]:
1103 if len(line
.split(testname
)) <= 1:
1105 # strip off timestamp and mozharness status
1106 parts
= line
.split("TEST-UNEXPECTED")
1107 l
= "TEST-UNEXPECTED%s" % parts
[-1]
1109 # only keep 25 characters of the failure, often longer is random numbers
1110 parts
= l
.split(testname
)
1111 l
= "%s%s%s" % (parts
[0], testname
, parts
[1][:25])
1115 if not failure
["lines"]:
1121 if hvalue
not in lines
.keys():
1122 lines
[hvalue
] = {"lines": failure
["lines"], "config": []}
1123 lines
[hvalue
]["config"].append(job
)
1125 for h
in lines
.keys():
1126 print("%s errors with:" % (len(lines
[h
]["config"])))
1127 for l
in lines
[h
]["lines"]:
1131 "... no failure lines recorded in"
1132 " https://treeherder.mozilla.org/intermittent-failures ..."
1136 count
= len([x
for x
in lines
[h
]["config"] if x
== job
])
1138 print(" %s: %s" % (job
, count
))
1145 conditions
=[conditions
.is_non_artifact_build
],
1146 description
="Run rust unit tests (via cargo test).",
1148 def run_rusttests(command_context
, **kwargs
):
1149 return command_context
._mach
_context
.commands
.dispatch(
1151 command_context
._mach
_context
,
1152 what
=["pre-export", "export", "recurse_rusttests"],
1157 "fluent-migration-test",
1159 description
="Test Fluent migration recipes.",
1161 @CommandArgument("test_paths", nargs
="*", metavar
="N", help="Recipe paths to test.")
1162 def run_migration_tests(command_context
, test_paths
=None, **kwargs
):
1165 command_context
.activate_virtualenv()
1166 from test_fluent_migrations
import fmt
1170 for to_test
in test_paths
:
1172 context
= fmt
.inspect_migration(to_test
)
1173 for issue
in context
["issues"]:
1174 command_context
.log(
1176 "fluent-migration-test",
1178 "error": issue
["msg"],
1181 "ERROR in {file}: {error}",
1183 if context
["issues"]:
1185 with_context
.append(
1188 "references": context
["references"],
1191 except Exception as e
:
1192 command_context
.log(
1194 "fluent-migration-test",
1195 {"error": str(e
), "file": to_test
},
1196 "ERROR in {file}: {error}",
1199 obj_dir
= fmt
.prepare_object_dir(command_context
)
1200 for context
in with_context
:
1201 rv |
= fmt
.test_migration(command_context
, obj_dir
, **context
)