1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
5 from __future__
import absolute_import
, print_function
, unicode_literals
13 from mach
.decorators
import (
20 from mozbuild
.base
import (
21 BuildEnvironmentNotFoundException
,
22 MachCommandConditions
as conditions
,
26 I was unable to find tests from the given argument(s).
28 You should specify a test directory, filename, test suite name, or
31 It's possible my little brain doesn't know about the type of test you are
32 trying to execute. If you suspect this, please request support by filing
34 https://bugzilla.mozilla.org/enter_bug.cgi?product=Testing&component=General.
38 I know you are trying to run a %s%s test. Unfortunately, I can't run those
43 Test or tests to run. Tests can be specified by filename, directory, suite
46 The following test suites and aliases are supported: {}
51 class TestConfig(object):
53 def config_settings(cls
):
54 from mozlog
.commandline
import log_formatters
55 from mozlog
.structuredlog
import log_levels
57 format_desc
= "The default format to use when running tests with `mach test`."
58 format_choices
= list(log_formatters
)
59 level_desc
= "The default log level to use when running tests with `mach test`."
60 level_choices
= [l
.lower() for l
in log_levels
]
62 ("test.format", "string", format_desc
, "mach", {"choices": format_choices
}),
63 ("test.level", "string", level_desc
, "info", {"choices": level_choices
}),
67 def get_test_parser():
68 from mozlog
.commandline
import add_logging_group
69 from moztest
.resolve
import TEST_SUITES
71 parser
= argparse
.ArgumentParser()
76 help=TEST_HELP
.format(", ".join(sorted(TEST_SUITES
))),
81 nargs
=argparse
.REMAINDER
,
82 help="Extra arguments to pass to the underlying test command(s). "
83 "If an underlying command doesn't recognize the argument, it "
91 help="Specify a debugger to use.",
93 add_logging_group(parser
)
97 ADD_TEST_SUPPORTED_SUITES
= [
100 "mochitest-browser-chrome",
101 "web-platform-tests-testharness",
102 "web-platform-tests-reftest",
105 ADD_TEST_SUPPORTED_DOCS
= ["js", "html", "xhtml", "xul"]
108 "wpt": "web-platform-tests-testharness",
109 "wpt-testharness": "web-platform-tests-testharness",
110 "wpt-reftest": "web-platform-tests-reftest",
113 MISSING_ARG
= object()
116 def create_parser_addtest():
119 parser
= argparse
.ArgumentParser()
122 choices
=sorted(ADD_TEST_SUPPORTED_SUITES
+ list(SUITE_SYNONYMS
.keys())),
123 help="suite for the test. "
124 "If you pass a `test` argument this will be determined "
125 "based on the filename and the folder it is in",
131 help="Overwrite an existing file if it exists.",
135 choices
=ADD_TEST_SUPPORTED_DOCS
,
136 help="Document type for the test (if applicable)."
137 "If you pass a `test` argument this will be determined "
138 "based on the filename.",
146 help="Open the created file(s) in an editor; if a "
147 "binary is supplied it will be used otherwise the default editor for "
148 "your environment will be opened",
151 for base_suite
in addtest
.TEST_CREATORS
:
152 cls
= addtest
.TEST_CREATORS
[base_suite
]
153 if hasattr(cls
, "get_parser"):
154 group
= parser
.add_argument_group(base_suite
)
155 cls
.get_parser(group
)
157 parser
.add_argument("test", nargs
="?", help=("Test to create."))
164 description
="Generate tests based on templates",
165 parser
=create_parser_addtest
,
178 from moztest
.resolve
import TEST_SUITES
180 if not suite
and not test
:
181 return create_parser_addtest().parse_args(["--help"])
183 if suite
in SUITE_SYNONYMS
:
184 suite
= SUITE_SYNONYMS
[suite
]
187 if not overwrite
and os
.path
.isfile(os
.path
.abspath(test
)):
188 print("Error: can't generate a test that already exists:", test
)
191 abs_test
= os
.path
.abspath(test
)
193 doc
= guess_doc(abs_test
)
195 guessed_suite
, err
= guess_suite(abs_test
)
199 suite
= guessed_suite
208 "We couldn't automatically determine a suite. "
209 "Please specify `--suite` with one of the following options:\n{}\n"
210 "If you'd like to add support to a new suite, please file a bug "
211 "blocking https://bugzilla.mozilla.org/show_bug.cgi?id=1540285.".format(
212 ADD_TEST_SUPPORTED_SUITES
217 if doc
not in ADD_TEST_SUPPORTED_DOCS
:
219 "Error: invalid `doc`. Either pass in a test with a valid extension"
220 "({}) or pass in the `doc` argument".format(ADD_TEST_SUPPORTED_DOCS
)
224 creator_cls
= addtest
.creator_for_suite(suite
)
226 if creator_cls
is None:
227 print("Sorry, `addtest` doesn't currently know how to add {}".format(suite
))
230 creator
= creator_cls(command_context
.topsrcdir
, test
, suite
, doc
, **kwargs
)
236 for path
, template
in creator
:
242 print("Adding a test file at {} (suite `{}`)".format(path
, suite
))
245 os
.makedirs(os
.path
.dirname(path
))
249 with io
.open(path
, "w", newline
="\n") as f
:
252 # write to stdout if you passed only suite and doc and not a file path
259 creator
.update_manifest()
261 # Small hack, should really do this better
262 if suite
.startswith("wpt-"):
263 suite
= "web-platform-tests"
265 mach_command
= TEST_SUITES
[suite
]["mach_command"]
267 "Please make sure to add the new test to your commit. "
268 "You can now run the test with:\n ./mach {} {}".format(
273 if editor
is not MISSING_ARG
:
274 if editor
is not None:
276 elif "VISUAL" in os
.environ
:
277 editor
= os
.environ
["VISUAL"]
278 elif "EDITOR" in os
.environ
:
279 editor
= os
.environ
["EDITOR"]
281 print("Unable to determine editor; please specify a binary")
288 proc
= subprocess
.Popen("%s %s" % (editor
, " ".join(paths
)), shell
=True)
296 def guess_doc(abs_test
):
297 filename
= os
.path
.basename(abs_test
)
298 return os
.path
.splitext(filename
)[1].strip(".")
301 def guess_suite(abs_test
):
302 # If you pass a abs_test, try to detect the type based on the name
303 # and folder. This detection can be skipped if you pass the `type` arg.
306 parent
= os
.path
.dirname(abs_test
)
307 filename
= os
.path
.basename(abs_test
)
309 has_browser_ini
= os
.path
.isfile(os
.path
.join(parent
, "browser.ini"))
310 has_chrome_ini
= os
.path
.isfile(os
.path
.join(parent
, "chrome.ini"))
311 has_plain_ini
= os
.path
.isfile(os
.path
.join(parent
, "mochitest.ini"))
312 has_xpcshell_ini
= os
.path
.isfile(os
.path
.join(parent
, "xpcshell.ini"))
314 in_wpt_folder
= abs_test
.startswith(
315 os
.path
.abspath(os
.path
.join("testing", "web-platform"))
319 guessed_suite
= "web-platform-tests-testharness"
320 if "/css/" in abs_test
:
321 guessed_suite
= "web-platform-tests-reftest"
323 filename
.startswith("test_")
325 and guess_doc(abs_test
) == "js"
327 guessed_suite
= "xpcshell"
329 if filename
.startswith("browser_") and has_browser_ini
:
330 guessed_suite
= "mochitest-browser-chrome"
331 elif filename
.startswith("test_"):
332 if has_chrome_ini
and has_plain_ini
:
334 "Error: directory contains both a chrome.ini and mochitest.ini. "
335 "Please set --suite=mochitest-chrome or --suite=mochitest-plain."
338 guessed_suite
= "mochitest-chrome"
340 guessed_suite
= "mochitest-plain"
341 return guessed_suite
, err
347 description
="Run tests (detects the kind of test and runs it).",
348 parser
=get_test_parser
,
350 def test(command_context
, what
, extra_args
, **log_args
):
351 """Run tests from names or paths.
353 mach test accepts arguments specifying which tests to run. Each argument
356 * The path to a test file
357 * A directory containing tests
359 * An alias to a test suite name (codes used on TreeHerder)
361 When paths or directories are given, they are first resolved to test
362 files known to the build system.
364 If resolved tests belong to more than one test type/flavor/harness,
365 the harness for each relevant type/flavor will be invoked. e.g. if
366 you specify a directory with xpcshell and browser chrome mochitests,
367 both harnesses will be invoked.
369 Warning: `mach test` does not automatically re-build.
370 Please remember to run `mach build` when necessary.
374 Run all test files in the devtools/client/shared/redux/middleware/xpcshell/
377 `./mach test devtools/client/shared/redux/middleware/xpcshell/`
379 The below command prints a short summary of results instead of
380 the default more verbose output.
381 Do not forget the - (minus sign) after --log-grouped!
383 `./mach test --log-grouped - devtools/client/shared/redux/middleware/xpcshell/`
385 from mozlog
.commandline
import setup_logging
386 from mozlog
.handlers
import StreamHandler
387 from moztest
.resolve
import get_suite_definition
, TestResolver
, TEST_SUITES
389 resolver
= command_context
._spawn
(TestResolver
)
390 run_suites
, run_tests
= resolver
.resolve_metadata(what
)
392 if not run_suites
and not run_tests
:
396 if log_args
.get("debugger", None):
399 if not mozdebug
.get_debugger_info(log_args
.get("debugger")):
401 extra_args_debugger_notation
= "=".join(
402 ["--debugger", log_args
.get("debugger")]
405 extra_args
.append(extra_args_debugger_notation
)
407 extra_args
= [extra_args_debugger_notation
]
409 # Create shared logger
410 format_args
= {"level": command_context
._mach
_context
.settings
["test"]["level"]}
411 if not run_suites
and len(run_tests
) == 1:
412 format_args
["verbose"] = True
413 format_args
["compact"] = False
415 default_format
= command_context
._mach
_context
.settings
["test"]["format"]
417 "mach-test", log_args
, {default_format
: sys
.stdout
}, format_args
419 for handler
in log
.handlers
:
420 if isinstance(handler
, StreamHandler
):
421 handler
.formatter
.inner
.summary_on_shutdown
= True
424 for suite_name
in run_suites
:
425 suite
= TEST_SUITES
[suite_name
]
426 kwargs
= suite
["kwargs"]
428 kwargs
.setdefault("subsuite", None)
430 if "mach_command" in suite
:
431 res
= command_context
._mach
_context
.commands
.dispatch(
432 suite
["mach_command"],
433 command_context
._mach
_context
,
441 for test
in run_tests
:
442 key
= (test
["flavor"], test
.get("subsuite", ""))
443 buckets
.setdefault(key
, []).append(test
)
445 for (flavor
, subsuite
), tests
in sorted(buckets
.items()):
446 _
, m
= get_suite_definition(flavor
, subsuite
)
447 if "mach_command" not in m
:
448 substr
= "-{}".format(subsuite
) if subsuite
else ""
449 print(UNKNOWN_FLAVOR
% (flavor
, substr
))
453 kwargs
= dict(m
["kwargs"])
455 kwargs
.setdefault("subsuite", None)
457 res
= command_context
._mach
_context
.commands
.dispatch(
459 command_context
._mach
_context
,
472 "cppunittest", category
="testing", description
="Run cpp unit tests (C++ tests)."
478 help="Test to run. Can be specified as one or more files or "
479 "directories, or omitted. If omitted, the entire test suite is "
482 def run_cppunit_test(command_context
, **params
):
483 from mozlog
import commandline
485 log
= params
.get("log")
487 log
= commandline
.setup_logging("cppunittest", {}, {"tbpl": sys
.stdout
})
489 # See if we have crash symbols
490 symbols_path
= os
.path
.join(command_context
.distdir
, "crashreporter-symbols")
491 if not os
.path
.isdir(symbols_path
):
494 # If no tests specified, run all tests in main manifest
495 tests
= params
["test_files"]
497 tests
= [os
.path
.join(command_context
.distdir
, "cppunittests")]
498 manifest_path
= os
.path
.join(
499 command_context
.topsrcdir
, "testing", "cppunittest.ini"
504 utility_path
= command_context
.bindir
506 if conditions
.is_android(command_context
):
507 from mozrunner
.devices
.android_device
import (
508 verify_android_device
,
512 verify_android_device(command_context
, install
=InstallIntent
.NO
)
513 return run_android_test(tests
, symbols_path
, manifest_path
, log
)
515 return run_desktop_test(
516 command_context
, tests
, symbols_path
, manifest_path
, utility_path
, log
520 def run_desktop_test(
521 command_context
, tests
, symbols_path
, manifest_path
, utility_path
, log
523 import runcppunittests
as cppunittests
524 from mozlog
import commandline
526 parser
= cppunittests
.CPPUnittestOptions()
527 commandline
.add_logging_group(parser
)
528 options
, args
= parser
.parse_args()
530 options
.symbols_path
= symbols_path
531 options
.manifest_path
= manifest_path
532 options
.utility_path
= utility_path
533 options
.xre_path
= command_context
.bindir
536 result
= cppunittests
.run_test_harness(options
, tests
)
537 except Exception as e
:
538 log
.error("Caught exception running cpp unit tests: %s" % str(e
))
542 return 0 if result
else 1
545 def run_android_test(command_context
, tests
, symbols_path
, manifest_path
, log
):
546 import remotecppunittests
as remotecppunittests
547 from mozlog
import commandline
549 parser
= remotecppunittests
.RemoteCPPUnittestOptions()
550 commandline
.add_logging_group(parser
)
551 options
, args
= parser
.parse_args()
553 if not options
.adb_path
:
554 from mozrunner
.devices
.android_device
import get_adb_path
556 options
.adb_path
= get_adb_path(command_context
)
557 options
.symbols_path
= symbols_path
558 options
.manifest_path
= manifest_path
559 options
.xre_path
= command_context
.bindir
560 options
.local_lib
= command_context
.bindir
.replace("bin", "fennec")
561 for file in os
.listdir(os
.path
.join(command_context
.topobjdir
, "dist")):
562 if file.endswith(".apk") and file.startswith("fennec"):
563 options
.local_apk
= os
.path
.join(command_context
.topobjdir
, "dist", file)
564 log
.info("using APK: " + options
.local_apk
)
568 result
= remotecppunittests
.run_test_harness(options
, tests
)
569 except Exception as e
:
570 log
.error("Caught exception running cpp unit tests: %s" % str(e
))
574 return 0 if result
else 1
577 def executable_name(name
):
578 return name
+ ".exe" if sys
.platform
.startswith("win") else name
584 description
="Run SpiderMonkey JS tests in the JS shell.",
585 ok_if_tests_disabled
=True,
587 @CommandArgument("--shell", help="The shell to be used")
590 nargs
=argparse
.REMAINDER
,
591 help="Extra arguments to pass down to the test harness.",
593 def run_jstests(command_context
, shell
, params
):
596 command_context
.virtualenv_manager
.ensure()
597 python
= command_context
.virtualenv_manager
.python_path
599 js
= shell
or os
.path
.join(command_context
.bindir
, executable_name("js"))
602 os
.path
.join(command_context
.topsrcdir
, "js", "src", "tests", "jstests.py"),
606 return subprocess
.call(jstest_cmd
)
612 description
="Run SpiderMonkey jit-tests in the JS shell.",
613 ok_if_tests_disabled
=True,
615 @CommandArgument("--shell", help="The shell to be used")
620 help="Run with the SM(cgc) job's env vars",
624 nargs
=argparse
.REMAINDER
,
625 help="Extra arguments to pass down to the test harness.",
627 def run_jittests(command_context
, shell
, cgc
, params
):
630 command_context
.virtualenv_manager
.ensure()
631 python
= command_context
.virtualenv_manager
.python_path
633 js
= shell
or os
.path
.join(command_context
.bindir
, executable_name("js"))
636 os
.path
.join(command_context
.topsrcdir
, "js", "src", "jit-test", "jit_test.py"),
640 env
= os
.environ
.copy()
642 env
["JS_GC_ZEAL"] = "IncrementalMultipleSlices"
644 return subprocess
.call(jittest_cmd
, env
=env
)
647 @Command("jsapi-tests", category
="testing", description
="Run SpiderMonkey JSAPI tests.")
652 help="Test to run. Can be a prefix or omitted. If "
653 "omitted, the entire test suite is executed.",
655 def run_jsapitests(command_context
, test_name
=None):
659 os
.path
.join(command_context
.bindir
, executable_name("jsapi-tests"))
662 jsapi_tests_cmd
.append(test_name
)
664 test_env
= os
.environ
.copy()
665 test_env
["TOPSRCDIR"] = command_context
.topsrcdir
667 result
= subprocess
.call(jsapi_tests_cmd
, env
=test_env
)
669 print(f
"jsapi-tests failed, exit code {result}")
673 def run_check_js_msg(command_context
):
676 command_context
.virtualenv_manager
.ensure()
677 python
= command_context
.virtualenv_manager
.python_path
681 os
.path
.join(command_context
.topsrcdir
, "config", "check_js_msg_encoding.py"),
684 return subprocess
.call(check_cmd
)
687 def get_jsshell_parser():
688 from jsshell
.benchmark
import get_parser
696 parser
=get_jsshell_parser
,
697 description
="Run benchmarks in the SpiderMonkey JS shell.",
699 def run_jsshelltests(command_context
, **kwargs
):
700 from jsshell
import benchmark
702 return benchmark
.run(**kwargs
)
708 description
="Mercurial style .t tests for command line applications.",
714 help="Test paths to run. Each path can be a test file or directory. "
715 "If omitted, the entire suite will be run.",
719 nargs
=argparse
.REMAINDER
,
720 help="Extra arguments to pass down to the cram binary. See "
721 "'./mach python -m cram -- -h' for a list of available options.",
723 def cramtest(command_context
, cram_args
=None, test_paths
=None, test_objects
=None):
724 command_context
.activate_virtualenv()
726 from manifestparser
import TestManifest
728 if test_objects
is None:
729 from moztest
.resolve
import TestResolver
731 resolver
= command_context
._spawn
(TestResolver
)
733 # If we were given test paths, try to find tests matching them.
734 test_objects
= resolver
.resolve_tests(paths
=test_paths
, flavor
="cram")
736 # Otherwise just run everything in CRAMTEST_MANIFESTS
737 test_objects
= resolver
.resolve_tests(flavor
="cram")
740 message
= "No tests were collected, check spelling of the test paths."
741 command_context
.log(logging
.WARN
, "cramtest", {}, message
)
745 mp
.tests
.extend(test_objects
)
746 tests
= mp
.active_tests(disabled
=False, **mozinfo
.info
)
748 python
= command_context
.virtualenv_manager
.python_path
749 cmd
= [python
, "-m", "cram"] + cram_args
+ [t
["relpath"] for t
in tests
]
750 return subprocess
.call(cmd
, cwd
=command_context
.topsrcdir
)
753 from datetime
import date
, timedelta
757 "test-info", category
="testing", description
="Display historical test results."
759 def test_info(command_context
):
761 All functions implemented as subcommands.
768 description
="Display historical test result summary for named tests.",
770 @CommandArgument("test_names", nargs
=argparse
.REMAINDER
, help="Test(s) of interest.")
773 default
=(date
.today() - timedelta(7)).strftime("%Y-%m-%d"),
774 help="Start date (YYYY-MM-DD)",
777 "--end", default
=date
.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
782 help="Retrieve and display general test information.",
787 help="Retrieve and display related Bugzilla bugs.",
789 @CommandArgument("--verbose", action
="store_true", help="Enable debug logging.")
801 ti
= testinfo
.TestInfoTests(verbose
)
814 description
="Generate a json report of test manifests and/or tests "
815 "categorized by Bugzilla component and optionally filtered "
816 "by path, component, and/or manifest annotations.",
821 help="Comma-separated list of Bugzilla components."
822 " eg. Testing::General,Core::WebVR",
826 help='Limit results to tests of the specified flavor (eg. "xpcshell").',
830 help='Limit results to tests of the specified subsuite (eg. "devtools").',
833 "paths", nargs
=argparse
.REMAINDER
, help="File system paths of interest."
838 help="Include test manifests in report.",
841 "--show-tests", action
="store_true", help="Include individual tests in report."
844 "--show-summary", action
="store_true", help="Include summary in report."
847 "--show-annotations",
849 help="Include list of manifest annotation conditions in report.",
853 help="Comma-separated list of value regular expressions to filter on; "
854 "displayed tests contain all specified values.",
858 help="Comma-separated list of test keys to filter on, "
859 'like "skip-if"; only these fields will be searched '
860 "for filter-values.",
863 "--no-component-report",
864 action
="store_false",
865 dest
="show_components",
867 help="Do not categorize by bugzilla component.",
869 @CommandArgument("--output-file", help="Path to report file.")
870 @CommandArgument("--verbose", action
="store_true", help="Enable debug logging.")
873 default
=(date
.today() - timedelta(30)).strftime("%Y-%m-%d"),
874 help="Start date (YYYY-MM-DD)",
877 "--end", default
=date
.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
898 from mozbuild
import build_commands
901 command_context
.config_environment
902 except BuildEnvironmentNotFoundException
:
903 print("Looks like configure has not run yet, running it now...")
904 build_commands
.configure(command_context
)
906 ti
= testinfo
.TestInfoReport(verbose
)
928 description
='Compare two reports generated by "test-info reports".',
933 help="The first (earlier) report file; path to local file or url.",
936 "--after", help="The second (later) report file; path to local file or url."
940 help="Path to report file to be written. If not specified, report"
941 "will be written to standard output.",
943 @CommandArgument("--verbose", action
="store_true", help="Enable debug logging.")
944 def test_report_diff(command_context
, before
, after
, output_file
, verbose
):
947 ti
= testinfo
.TestInfoReport(verbose
)
948 ti
.report_diff(before
, after
, output_file
)
954 description
="Display failure line groupings and frequencies for "
955 "single tracking intermittent bugs.",
959 default
=(date
.today() - timedelta(30)).strftime("%Y-%m-%d"),
960 help="Start date (YYYY-MM-DD)",
963 "--end", default
=date
.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
968 help="bugid for treeherder intermittent failures data query.",
970 def test_info_failures(
978 # bugid comes in as a string, we need an int:
984 print("Please enter a valid bugid (i.e. '1760132')")
989 "https://bugzilla.mozilla.org/rest/bug?include_fields=summary,depends_on&id=%s"
992 r
= requests
.get(url
, headers
={"User-agent": "mach-test-info/1.0"})
993 if r
.status_code
!= 200:
994 print("%s error retrieving url: %s" % (r
.status_code
, url
))
998 print("unable to get bugzilla information for %s" % bugid
)
1001 summary
= data
["bugs"][0]["summary"]
1002 parts
= summary
.split("|")
1003 if not summary
.endswith("single tracking bug") or len(parts
) != 2:
1004 print("this query only works with single tracking bugs")
1007 # get depends_on bugs:
1009 if "depends_on" in data
["bugs"][0]:
1010 buglist
.extend(data
["bugs"][0]["depends_on"])
1012 testname
= parts
[0].strip().split(" ")[-1]
1014 # now query treeherder to get details about annotations
1017 url
= "https://treeherder.mozilla.org/api/failuresbybug/"
1018 url
+= "?startday=%s&endday=%s&tree=trunk&bug=%s" % (start
, end
, b
)
1019 r
= requests
.get(url
, headers
={"User-agent": "mach-test-info/1.0"})
1020 r
.raise_for_status()
1026 print("no failures were found for given bugid, please ensure bug is")
1027 print("accessible via: https://treeherder.mozilla.org/intermittent-failures")
1030 # query VCS to get current list of variants:
1033 url
= "https://hg.mozilla.org/mozilla-central/raw-file/tip/taskcluster/ci/test/variants.yml"
1034 r
= requests
.get(url
, headers
={"User-agent": "mach-test-info/1.0"})
1035 variants
= yaml
.safe_load(r
.text
)
1038 "\nQuerying data for bug %s annotated from %s to %s on trunk.\n\n"
1039 % (buglist
, start
, end
)
1043 for failure
in data
:
1044 # config = platform/buildtype
1045 # testsuite (<suite>[-variant][-<chunk>])
1046 # lines - group by patterns that contain test name
1047 config
= "%s/%s" % (failure
["platform"], failure
["build_type"])
1051 varpos
= len(failure
["test_suite"])
1052 for v
in variants
.keys():
1053 var
= "-%s" % variants
[v
]["suffix"]
1054 if var
in failure
["test_suite"]:
1055 if failure
["test_suite"].find(var
) < varpos
:
1059 suite
= failure
["test_suite"].split(variant
)[0]
1061 parts
= failure
["test_suite"].split("-")
1064 suite
= "-".join(parts
[:-1])
1066 pass # if this works, then the last '-X' is a number :)
1069 print("Error: failure to find variant in %s" % failure
["test_suite"])
1071 job
= "%s-%s%s" % (config
, suite
, variant
)
1072 if job
not in jobs
.keys():
1076 # lines - sum(hash) of all lines where we match testname
1078 for line
in failure
["lines"]:
1079 if len(line
.split(testname
)) <= 1:
1081 # strip off timestamp and mozharness status
1082 parts
= line
.split("TEST-UNEXPECTED")
1083 l
= "TEST-UNEXPECTED%s" % parts
[-1]
1085 # only keep 25 characters of the failure, often longer is random numbers
1086 parts
= l
.split(testname
)
1087 l
= "%s%s%s" % (parts
[0], testname
, parts
[1][:25])
1091 if not failure
["lines"]:
1097 if hvalue
not in lines
.keys():
1098 lines
[hvalue
] = {"lines": failure
["lines"], "config": []}
1099 lines
[hvalue
]["config"].append(job
)
1101 for h
in lines
.keys():
1102 print("%s errors with:" % (len(lines
[h
]["config"])))
1103 for l
in lines
[h
]["lines"]:
1107 "... no failure lines recorded in"
1108 " https://treeherder.mozilla.org/intermittent-failures ..."
1112 count
= len([x
for x
in lines
[h
]["config"] if x
== job
])
1114 print(" %s: %s" % (job
, count
))
1121 conditions
=[conditions
.is_non_artifact_build
],
1122 description
="Run rust unit tests (via cargo test).",
1124 def run_rusttests(command_context
, **kwargs
):
1125 return command_context
._mach
_context
.commands
.dispatch(
1127 command_context
._mach
_context
,
1128 what
=["pre-export", "export", "recurse_rusttests"],
1133 "fluent-migration-test",
1135 description
="Test Fluent migration recipes.",
1137 @CommandArgument("test_paths", nargs
="*", metavar
="N", help="Recipe paths to test.")
1138 def run_migration_tests(command_context
, test_paths
=None, **kwargs
):
1141 command_context
.activate_virtualenv()
1142 from test_fluent_migrations
import fmt
1146 for to_test
in test_paths
:
1148 context
= fmt
.inspect_migration(to_test
)
1149 for issue
in context
["issues"]:
1150 command_context
.log(
1152 "fluent-migration-test",
1154 "error": issue
["msg"],
1157 "ERROR in {file}: {error}",
1159 if context
["issues"]:
1161 with_context
.append(
1164 "references": context
["references"],
1167 except Exception as e
:
1168 command_context
.log(
1170 "fluent-migration-test",
1171 {"error": str(e
), "file": to_test
},
1172 "ERROR in {file}: {error}",
1175 obj_dir
= fmt
.prepare_object_dir(command_context
)
1176 for context
in with_context
:
1177 rv |
= fmt
.test_migration(command_context
, obj_dir
, **context
)