1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
11 from collections
import defaultdict
13 logger
= logging
.getLogger(__name__
)
15 # The build type aliases are very cryptic and only used in try flags these are
16 # mappings from the single char alias to a longer more recognizable form.
17 BUILD_TYPE_ALIASES
= {"o": "opt", "d": "debug"}
19 # consider anything in this whitelist of kinds to be governed by -b/-p
30 # mapping from shortcut name (usable with -u) to a boolean function identifying
32 def alias_prefix(prefix
):
33 return lambda name
: name
.startswith(prefix
)
36 def alias_contains(infix
):
37 return lambda name
: infix
in name
40 def alias_matches(pattern
):
41 pattern
= re
.compile(pattern
)
42 return lambda name
: pattern
.match(name
)
46 # Aliases specify shorthands that can be used in try syntax. The shorthand
47 # is the dictionary key, with the value representing a pattern for matching
50 # Note that alias expansion is performed in the absence of any chunk
51 # prefixes. For example, the first example above would replace "foo-7"
52 # with "foobar-7". Note that a few aliases allowed chunks to be specified
53 # without a leading `-`, for example 'mochitest-dt1'. That's no longer
55 "cppunit": alias_prefix("cppunit"),
56 "crashtest": alias_prefix("crashtest"),
57 "crashtest-e10s": alias_prefix("crashtest-e10s"),
58 "e10s": alias_contains("e10s"),
59 "firefox-ui-functional": alias_prefix("firefox-ui-functional"),
60 "gaia-js-integration": alias_contains("gaia-js-integration"),
61 "gtest": alias_prefix("gtest"),
62 "jittest": alias_prefix("jittest"),
63 "jittests": alias_prefix("jittest"),
64 "jsreftest": alias_prefix("jsreftest"),
65 "jsreftest-e10s": alias_prefix("jsreftest-e10s"),
66 "marionette": alias_prefix("marionette"),
67 "mochitest": alias_prefix("mochitest"),
68 "mochitests": alias_prefix("mochitest"),
69 "mochitest-e10s": alias_prefix("mochitest-e10s"),
70 "mochitests-e10s": alias_prefix("mochitest-e10s"),
71 "mochitest-debug": alias_prefix("mochitest-debug-"),
72 "mochitest-a11y": alias_contains("mochitest-a11y"),
73 "mochitest-bc": alias_prefix("mochitest-browser-chrome"),
74 "mochitest-e10s-bc": alias_prefix("mochitest-browser-chrome-e10s"),
75 "mochitest-browser-chrome": alias_prefix("mochitest-browser-chrome"),
76 "mochitest-e10s-browser-chrome": alias_prefix("mochitest-browser-chrome-e10s"),
77 "mochitest-chrome": alias_contains("mochitest-chrome"),
78 "mochitest-dt": alias_prefix("mochitest-devtools-chrome"),
79 "mochitest-e10s-dt": alias_prefix("mochitest-devtools-chrome-e10s"),
80 "mochitest-gl": alias_prefix("mochitest-webgl"),
81 "mochitest-gl-e10s": alias_prefix("mochitest-webgl-e10s"),
82 "mochitest-gpu": alias_prefix("mochitest-gpu"),
83 "mochitest-gpu-e10s": alias_prefix("mochitest-gpu-e10s"),
84 "mochitest-media": alias_prefix("mochitest-media"),
85 "mochitest-media-e10s": alias_prefix("mochitest-media-e10s"),
86 "mochitest-vg": alias_prefix("mochitest-valgrind"),
87 "reftest": alias_matches(r
"^(plain-)?reftest.*$"),
88 "reftest-no-accel": alias_matches(r
"^(plain-)?reftest-no-accel.*$"),
89 "reftests": alias_matches(r
"^(plain-)?reftest.*$"),
90 "reftests-e10s": alias_matches(r
"^(plain-)?reftest-e10s.*$"),
91 "robocop": alias_prefix("robocop"),
92 "web-platform-test": alias_prefix("web-platform-tests"),
93 "web-platform-tests": alias_prefix("web-platform-tests"),
94 "web-platform-tests-e10s": alias_prefix("web-platform-tests-e10s"),
95 "web-platform-tests-crashtests": alias_prefix("web-platform-tests-crashtest"),
96 "web-platform-tests-print-reftest": alias_prefix(
97 "web-platform-tests-print-reftest"
99 "web-platform-tests-reftests": alias_prefix("web-platform-tests-reftest"),
100 "web-platform-tests-reftests-e10s": alias_prefix("web-platform-tests-reftest-e10s"),
101 "web-platform-tests-wdspec": alias_prefix("web-platform-tests-wdspec"),
102 "web-platform-tests-wdspec-e10s": alias_prefix("web-platform-tests-wdspec-e10s"),
103 "xpcshell": alias_prefix("xpcshell"),
106 # unittest platforms can be specified by substring of the "pretty name", which
107 # is basically the old Buildbot builder name. This dict has {pretty name,
108 # [test_platforms]} translations, This includes only the most commonly-used
109 # substrings. It is OK to add new test platforms to various shorthands here;
110 # if you add a new Linux64 test platform for instance, people will expect that
111 # their previous methods of requesting "all linux64 tests" will include this
112 # new platform, and they shouldn't have to explicitly spell out the new platform
113 # every time for such cases.
115 # Note that the test platforms here are only the prefix up to the `/`.
116 UNITTEST_PLATFORM_PRETTY_NAMES
= {
124 "x64": ["linux64", "linux64-asan", "linux1804-64", "linux1804-64-asan"],
125 "Android 7.0 Moto G5 32bit": ["android-hw-g5-7.0-arm7"],
126 "Android 7.0 Samsung A51 32bit": ["android-hw-a51-11.0-arm7"],
127 "Android 7.0 Samsung A51 64bit": ["android-hw-a51-11.0-aarch64"],
128 "Android 8.0 Google Pixel 2 32bit": ["android-hw-p2-8.0-arm7"],
129 "Android 8.0 Google Pixel 2 64bit": ["android-hw-p2-8.0-android-aarch64"],
130 "Android 13.0 Google Pixel 5 32bit": ["android-hw-p5-13.0-arm7"],
131 "Android 13.0 Google Pixel 5 64bit": ["android-hw-p5-13.0-android-aarch64"],
132 "Windows 7": ["windows7-32"],
133 "Windows 7 VM": ["windows7-32-vm"],
134 "Windows 10": ["windows10-64"],
137 TEST_CHUNK_SUFFIX
= re
.compile("(.*)-([0-9]+)$")
140 def escape_whitespace_in_brackets(input_str
):
142 In tests you may restrict them by platform [] inside of the brackets
143 whitespace may occur this is typically invalid shell syntax so we escape it
144 with backslash sequences .
148 for char
in input_str
:
159 if char
== " " and in_brackets
:
168 def split_try_msg(message
):
170 try_idx
= message
.index("try:")
173 message
= message
[try_idx
:].split("\n")[0]
174 # shlex used to ensure we split correctly when giving values to argparse.
175 return shlex
.split(escape_whitespace_in_brackets(message
))
178 def parse_message(message
):
179 parts
= split_try_msg(message
)
181 # Argument parser based on try flag flags
182 parser
= argparse
.ArgumentParser()
183 parser
.add_argument("-b", "--build", dest
="build_types")
185 "-p", "--platform", nargs
="?", dest
="platforms", const
="all", default
="all"
188 "-u", "--unittests", nargs
="?", dest
="unittests", const
="all", default
="all"
191 "-t", "--talos", nargs
="?", dest
="talos", const
="all", default
="none"
194 "-r", "--raptor", nargs
="?", dest
="raptor", const
="all", default
="none"
197 "-i", "--interactive", dest
="interactive", action
="store_true", default
=False
200 "-e", "--all-emails", dest
="notifications", action
="store_const", const
="all"
205 dest
="notifications",
206 action
="store_const",
209 parser
.add_argument("-j", "--job", dest
="jobs", action
="append")
212 dest
="talos_trigger_tests",
219 dest
="raptor_trigger_tests",
224 parser
.add_argument("--setenv", dest
="env", action
="append")
225 parser
.add_argument("--gecko-profile", dest
="profile", action
="store_true")
226 parser
.add_argument("--tag", dest
="tag", action
="store", default
=None)
227 parser
.add_argument("--no-retry", dest
="no_retry", action
="store_true")
229 "--include-nightly", dest
="include_nightly", action
="store_true"
231 parser
.add_argument("--artifact", dest
="artifact", action
="store_true")
233 # While we are transitioning from BB to TC, we want to push jobs to tc-worker
234 # machines but not overload machines with every try push. Therefore, we add
235 # this temporary option to be able to push jobs to tc-worker.
238 "--taskcluster-worker",
239 dest
="taskcluster_worker",
244 # In order to run test jobs multiple times
245 parser
.add_argument("--rebuild", dest
="trigger_tests", type=int, default
=1)
246 args
, _
= parser
.parse_known_args(parts
)
248 try_options
= vars(args
)
250 "use-artifact-builds": try_options
.pop("artifact"),
251 "gecko-profile": try_options
.pop("profile"),
252 "env": dict(arg
.split("=") for arg
in try_options
.pop("env") or []),
255 "try_options": try_options
,
256 "try_task_config": try_task_config
,
260 class TryOptionSyntax
:
261 def __init__(self
, parameters
, full_task_graph
, graph_config
):
263 Apply the try options in parameters.
265 The resulting object has attributes:
267 - build_types: a list containing zero or more of 'opt' and 'debug'
268 - platforms: a list of selected platform names, or None for all
269 - unittests: a list of tests, of the form given below, or None for all
270 - jobs: a list of requested job names, or None for all
271 - trigger_tests: the number of times tests should be triggered (--rebuild)
272 - interactive: true if --interactive
273 - notifications: either None if no notifications or one of 'all' or 'failure'
274 - talos_trigger_tests: the number of time talos tests should be triggered (--rebuild-talos)
275 - tag: restrict tests to the specified tag
276 - no_retry: do not retry failed jobs
278 The unittests and talos lists contain dictionaries of the form:
281 'test': '<suite name>',
282 'platforms': [..platform names..], # to limit to only certain platforms
283 'only_chunks': set([..chunk numbers..]), # to limit only to certain chunks
286 self
.full_task_graph
= full_task_graph
287 self
.graph_config
= graph_config
289 self
.build_types
= []
294 self
.trigger_tests
= 0
295 self
.interactive
= False
296 self
.notifications
= None
297 self
.talos_trigger_tests
= 0
298 self
.raptor_trigger_tests
= 0
300 self
.no_retry
= False
302 options
= parameters
["try_options"]
305 self
.jobs
= self
.parse_jobs(options
["jobs"])
306 self
.build_types
= self
.parse_build_types(
307 options
["build_types"], full_task_graph
309 self
.platforms
= self
.parse_platforms(options
, full_task_graph
)
310 self
.unittests
= self
.parse_test_option(
311 "unittest_try_name", options
["unittests"], full_task_graph
313 self
.talos
= self
.parse_test_option(
314 "talos_try_name", options
["talos"], full_task_graph
316 self
.raptor
= self
.parse_test_option(
317 "raptor_try_name", options
["raptor"], full_task_graph
319 self
.trigger_tests
= options
["trigger_tests"]
320 self
.interactive
= options
["interactive"]
321 self
.notifications
= options
["notifications"]
322 self
.talos_trigger_tests
= options
["talos_trigger_tests"]
323 self
.raptor_trigger_tests
= options
["raptor_trigger_tests"]
324 self
.tag
= options
["tag"]
325 self
.no_retry
= options
["no_retry"]
326 self
.include_nightly
= options
["include_nightly"]
328 self
.test_tiers
= self
.generate_test_tiers(full_task_graph
)
330 def generate_test_tiers(self
, full_task_graph
):
331 retval
= defaultdict(set)
332 for t
in full_task_graph
.tasks
.values():
333 if t
.attributes
.get("kind") == "test":
335 tier
= t
.task
["extra"]["treeherder"]["tier"]
336 name
= t
.attributes
.get("unittest_try_name")
337 retval
[name
].add(tier
)
343 def parse_jobs(self
, jobs_arg
):
344 if not jobs_arg
or jobs_arg
== ["none"]:
345 return [] # default is `-j none`
346 if jobs_arg
== ["all"]:
350 expanded
.extend(j
.strip() for j
in job
.split(","))
353 def parse_build_types(self
, build_types_arg
, full_task_graph
):
354 if build_types_arg
is None:
360 BUILD_TYPE_ALIASES
.get(build_type
) for build_type
in build_types_arg
366 t
.attributes
["build_type"]
367 for t
in full_task_graph
.tasks
.values()
368 if "build_type" in t
.attributes
370 bad_types
= set(build_types
) - all_types
373 "Unknown build type(s) [%s] specified for try" % ",".join(bad_types
)
378 def parse_platforms(self
, options
, full_task_graph
):
379 platform_arg
= options
["platforms"]
380 if platform_arg
== "all":
383 RIDEALONG_BUILDS
= self
.graph_config
["try"]["ridealong-builds"]
385 for build
in platform_arg
.split(","):
386 if build
in ("macosx64",):
387 # Regular opt builds are faster than shippable ones, but we don't run
388 # tests against them.
389 # We want to choose them (and only them) if no tests were requested.
391 options
["unittests"] == "none"
392 and options
["talos"] == "none"
393 and options
["raptor"] == "none"
395 results
.append("macosx64")
396 logger
.info("adding macosx64 for try syntax using macosx64.")
397 # Otherwise, use _just_ the shippable builds.
399 results
.append("macosx64-shippable")
401 "adding macosx64-shippable for try syntax using macosx64."
404 results
.append(build
)
405 if build
in RIDEALONG_BUILDS
:
406 results
.extend(RIDEALONG_BUILDS
[build
])
408 "platform %s triggers ridealong builds %s"
409 % (build
, ", ".join(RIDEALONG_BUILDS
[build
]))
413 t
.attributes
["test_platform"]
414 for t
in full_task_graph
.tasks
.values()
415 if "test_platform" in t
.attributes
418 t
.attributes
["build_platform"]
419 for t
in full_task_graph
.tasks
.values()
420 if "build_platform" in t
.attributes
422 all_platforms
= test_platforms | build_platforms
423 bad_platforms
= set(results
) - all_platforms
426 "Unknown platform(s) [%s] specified for try" % ",".join(bad_platforms
)
431 def parse_test_option(self
, attr_name
, test_arg
, full_task_graph
):
434 Parse a unittest (-u) or talos (-t) option, in the context of a full
435 task graph containing available `unittest_try_name` or `talos_try_name`
436 attributes. There are three cases:
438 - test_arg is == 'none' (meaning an empty list)
439 - test_arg is == 'all' (meaning use the list of jobs for that job type)
440 - test_arg is comma string which needs to be parsed
443 # Empty job list case...
444 if test_arg
is None or test_arg
== "none":
448 t
.attributes
["test_platform"].split("/")[0]
449 for t
in full_task_graph
.tasks
.values()
450 if "test_platform" in t
.attributes
453 tests
= self
.parse_test_opts(test_arg
, all_platforms
)
459 t
.attributes
[attr_name
]
460 for t
in full_task_graph
.tasks
.values()
461 if attr_name
in t
.attributes
464 # Special case where tests is 'all' and must be expanded
465 if tests
[0]["test"] == "all":
468 for test
in all_tests
:
469 entry
= {"test": test
}
470 # If there are platform restrictions copy them across the list.
471 if "platforms" in all_entry
:
472 entry
["platforms"] = list(all_entry
["platforms"])
473 results
.append(entry
)
474 return self
.parse_test_chunks(all_tests
, results
)
475 return self
.parse_test_chunks(all_tests
, tests
)
477 def parse_test_opts(self
, input_str
, all_platforms
):
479 Parse `testspec,testspec,..`, where each testspec is a test name
480 optionally followed by a list of test platforms or negated platforms in
483 No brackets indicates that tests should run on all platforms for which
484 builds are available. If testspecs are provided, then each is treated,
485 from left to right, as an instruction to include or (if negated)
486 exclude a set of test platforms. A single spec may expand to multiple
487 test platforms via UNITTEST_PLATFORM_PRETTY_NAMES. If the first test
488 spec is negated, processing begins with the full set of available test
489 platforms; otherwise, processing begins with an empty set of test
493 # Final results which we will return.
500 def normalize_platforms():
501 if "platforms" not in cur_test
:
503 # if the first spec is a negation, start with all platforms
504 if cur_test
["platforms"][0][0] == "-":
505 platforms
= all_platforms
.copy()
508 for platform
in cur_test
["platforms"]:
509 if platform
[0] == "-":
510 platforms
= [p
for p
in platforms
if p
!= platform
[1:]]
512 platforms
.append(platform
)
513 cur_test
["platforms"] = platforms
516 normalize_platforms()
517 cur_test
["test"] = value
.strip()
518 tests
.insert(0, cur_test
)
520 def add_platform(value
):
521 platform
= value
.strip()
522 if platform
[0] == "-":
524 platform
= platform
[1:]
527 platforms
= UNITTEST_PLATFORM_PRETTY_NAMES
.get(platform
, [platform
])
529 platforms
= ["-" + p
for p
in platforms
]
530 cur_test
["platforms"] = platforms
+ cur_test
.get("platforms", [])
532 # This might be somewhat confusing but we parse the string _backwards_ so
533 # there is no ambiguity over what state we are in.
534 for char
in reversed(input_str
):
536 # , indicates exiting a state
539 # Exit a particular platform.
543 # Exit a particular test.
548 # Token must always be reset after we exit a state
551 # Exiting platform state entering test state.
556 # Entering platform state.
562 # Handle any left over tokens.
568 def handle_alias(self
, test
, all_tests
):
570 Expand a test if its name refers to an alias, returning a list of test
571 dictionaries cloned from the first (to maintain any metadata).
573 if test
["test"] not in UNITTEST_ALIASES
:
576 alias
= UNITTEST_ALIASES
[test
["test"]]
579 newtest
= copy
.deepcopy(test
)
580 newtest
["test"] = name
583 def exprmatch(alias
):
584 return [t
for t
in all_tests
if alias(t
)]
586 return [mktest(t
) for t
in exprmatch(alias
)]
588 def parse_test_chunks(self
, all_tests
, tests
):
590 Test flags may include parameters to narrow down the number of chunks in a
591 given push. We don't model 1 chunk = 1 job in taskcluster so we must check
592 each test flag to see if it is actually specifying a chunk.
597 matches
= TEST_CHUNK_SUFFIX
.match(test
["test"])
599 name
= matches
.group(1)
600 chunk
= matches
.group(2)
601 if name
in seen_chunks
:
602 seen_chunks
[name
].add(chunk
)
604 seen_chunks
[name
] = {chunk}
606 test
["only_chunks"] = seen_chunks
[name
]
609 results
.extend(self
.handle_alias(test
, all_tests
))
611 # uniquify the results over the test names
613 {test
["test"]: test
for test
in results
}.values(),
614 key
=lambda test
: test
["test"],
618 def find_all_attribute_suffixes(self
, graph
, prefix
):
620 for t
in graph
.tasks
.values():
621 for a
in t
.attributes
:
622 if a
.startswith(prefix
):
623 rv
.add(a
[len(prefix
) :])
626 def task_matches(self
, task
):
627 attr
= task
.attributes
.get
629 def check_run_on_projects():
630 return {"all"} & set(attr("run_on_projects", []))
632 def match_test(try_spec
, attr_name
):
633 run_by_default
= True
634 if attr("build_type") not in self
.build_types
:
638 self
.platforms
is not None
639 and attr("build_platform") not in self
.platforms
642 if not check_run_on_projects():
643 run_by_default
= False
646 return run_by_default
648 # TODO: optimize this search a bit
649 for test
in try_spec
:
650 if attr(attr_name
) == test
["test"]:
655 if "only_chunks" in test
and attr("test_chunk") not in test
["only_chunks"]:
658 tier
= task
.task
["extra"]["treeherder"]["tier"]
659 if "platforms" in test
:
660 if "all" in test
["platforms"]:
662 platform
= attr("test_platform", "").split("/")[0]
663 # Platforms can be forced by syntax like "-u xpcshell[Windows 8]"
664 return platform
in test
["platforms"]
666 # Run Tier 2/3 tests if their build task is Tier 2/3 OR if there is
667 # no tier 1 test of that name.
668 build_task
= self
.full_task_graph
.tasks
[task
.dependencies
["build"]]
669 build_task_tier
= build_task
.task
["extra"]["treeherder"]["tier"]
671 name
= attr("unittest_try_name")
672 test_tiers
= self
.test_tiers
.get(name
)
674 if tier
<= build_task_tier
:
676 "not skipping tier {} test {} because build task {} "
678 tier
, task
.label
, build_task
.label
, build_task_tier
682 if 1 not in test_tiers
:
684 "not skipping tier {} test {} without explicit inclusion; "
685 "it is configured to run on tiers {}".format(
686 tier
, task
.label
, test_tiers
691 "skipping tier {} test {} because build task {} is "
692 "tier {} and there is a higher-tier test of the same name".format(
693 tier
, task
.label
, build_task
.label
, build_task_tier
698 return check_run_on_projects()
701 if attr("job_try_name"):
702 # Beware the subtle distinction between [] and None for self.jobs and self.platforms.
703 # They will be [] if there was no try syntax, and None if try syntax was detected but
704 # they remained unspecified.
705 if self
.jobs
is not None:
706 return attr("job_try_name") in self
.jobs
708 # User specified `-j all`
710 self
.platforms
is not None
711 and attr("build_platform") not in self
.platforms
713 return False # honor -p for jobs governed by a platform
714 # "all" means "everything with `try` in run_on_projects"
715 return check_run_on_projects()
716 if attr("kind") == "test":
718 match_test(self
.unittests
, "unittest_try_name")
719 or match_test(self
.talos
, "talos_try_name")
720 or match_test(self
.raptor
, "raptor_try_name")
722 if attr("kind") in BUILD_KINDS
:
723 if attr("build_type") not in self
.build_types
:
725 if self
.platforms
is None:
726 # for "-p all", look for try in the 'run_on_projects' attribute
727 return check_run_on_projects()
728 if attr("build_platform") not in self
.platforms
:
734 def none_for_all(list):
737 return ", ".join(str(e
) for e
in list)
741 "build_types: " + ", ".join(self
.build_types
),
742 "platforms: " + none_for_all(self
.platforms
),
743 "unittests: " + none_for_all(self
.unittests
),
744 "talos: " + none_for_all(self
.talos
),
745 "raptor" + none_for_all(self
.raptor
),
746 "jobs: " + none_for_all(self
.jobs
),
747 "trigger_tests: " + str(self
.trigger_tests
),
748 "interactive: " + str(self
.interactive
),
749 "notifications: " + str(self
.notifications
),
750 "talos_trigger_tests: " + str(self
.talos_trigger_tests
),
751 "raptor_trigger_tests: " + str(self
.raptor_trigger_tests
),
752 "tag: " + str(self
.tag
),
753 "no_retry: " + str(self
.no_retry
),