Bug 1838739 - Initialize result of SetAsGPUOutOfMemoryError. r=webgpu-reviewers,nical
[gecko.git] / taskcluster / gecko_taskgraph / try_option_syntax.py
blob3e054d63f98f73f97df76f0160b2bd6e3c09ee37
1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
6 import argparse
7 import copy
8 import logging
9 import re
10 import shlex
11 from collections import defaultdict
13 logger = logging.getLogger(__name__)
15 # The build type aliases are very cryptic and only used in try flags these are
16 # mappings from the single char alias to a longer more recognizable form.
17 BUILD_TYPE_ALIASES = {"o": "opt", "d": "debug"}
19 # consider anything in this whitelist of kinds to be governed by -b/-p
20 BUILD_KINDS = {
21 "build",
22 "artifact-build",
23 "hazard",
24 "l10n",
25 "valgrind",
26 "spidermonkey",
30 # mapping from shortcut name (usable with -u) to a boolean function identifying
31 # matching test names
32 def alias_prefix(prefix):
33 return lambda name: name.startswith(prefix)
36 def alias_contains(infix):
37 return lambda name: infix in name
40 def alias_matches(pattern):
41 pattern = re.compile(pattern)
42 return lambda name: pattern.match(name)
45 UNITTEST_ALIASES = {
46 # Aliases specify shorthands that can be used in try syntax. The shorthand
47 # is the dictionary key, with the value representing a pattern for matching
48 # unittest_try_names.
50 # Note that alias expansion is performed in the absence of any chunk
51 # prefixes. For example, the first example above would replace "foo-7"
52 # with "foobar-7". Note that a few aliases allowed chunks to be specified
53 # without a leading `-`, for example 'mochitest-dt1'. That's no longer
54 # supported.
55 "cppunit": alias_prefix("cppunit"),
56 "crashtest": alias_prefix("crashtest"),
57 "crashtest-e10s": alias_prefix("crashtest-e10s"),
58 "e10s": alias_contains("e10s"),
59 "firefox-ui-functional": alias_prefix("firefox-ui-functional"),
60 "gaia-js-integration": alias_contains("gaia-js-integration"),
61 "gtest": alias_prefix("gtest"),
62 "jittest": alias_prefix("jittest"),
63 "jittests": alias_prefix("jittest"),
64 "jsreftest": alias_prefix("jsreftest"),
65 "jsreftest-e10s": alias_prefix("jsreftest-e10s"),
66 "marionette": alias_prefix("marionette"),
67 "mochitest": alias_prefix("mochitest"),
68 "mochitests": alias_prefix("mochitest"),
69 "mochitest-e10s": alias_prefix("mochitest-e10s"),
70 "mochitests-e10s": alias_prefix("mochitest-e10s"),
71 "mochitest-debug": alias_prefix("mochitest-debug-"),
72 "mochitest-a11y": alias_contains("mochitest-a11y"),
73 "mochitest-bc": alias_prefix("mochitest-browser-chrome"),
74 "mochitest-e10s-bc": alias_prefix("mochitest-browser-chrome-e10s"),
75 "mochitest-browser-chrome": alias_prefix("mochitest-browser-chrome"),
76 "mochitest-e10s-browser-chrome": alias_prefix("mochitest-browser-chrome-e10s"),
77 "mochitest-chrome": alias_contains("mochitest-chrome"),
78 "mochitest-dt": alias_prefix("mochitest-devtools-chrome"),
79 "mochitest-e10s-dt": alias_prefix("mochitest-devtools-chrome-e10s"),
80 "mochitest-gl": alias_prefix("mochitest-webgl"),
81 "mochitest-gl-e10s": alias_prefix("mochitest-webgl-e10s"),
82 "mochitest-gpu": alias_prefix("mochitest-gpu"),
83 "mochitest-gpu-e10s": alias_prefix("mochitest-gpu-e10s"),
84 "mochitest-media": alias_prefix("mochitest-media"),
85 "mochitest-media-e10s": alias_prefix("mochitest-media-e10s"),
86 "mochitest-vg": alias_prefix("mochitest-valgrind"),
87 "reftest": alias_matches(r"^(plain-)?reftest.*$"),
88 "reftest-no-accel": alias_matches(r"^(plain-)?reftest-no-accel.*$"),
89 "reftests": alias_matches(r"^(plain-)?reftest.*$"),
90 "reftests-e10s": alias_matches(r"^(plain-)?reftest-e10s.*$"),
91 "robocop": alias_prefix("robocop"),
92 "web-platform-test": alias_prefix("web-platform-tests"),
93 "web-platform-tests": alias_prefix("web-platform-tests"),
94 "web-platform-tests-e10s": alias_prefix("web-platform-tests-e10s"),
95 "web-platform-tests-crashtests": alias_prefix("web-platform-tests-crashtest"),
96 "web-platform-tests-print-reftest": alias_prefix(
97 "web-platform-tests-print-reftest"
99 "web-platform-tests-reftests": alias_prefix("web-platform-tests-reftest"),
100 "web-platform-tests-reftests-e10s": alias_prefix("web-platform-tests-reftest-e10s"),
101 "web-platform-tests-wdspec": alias_prefix("web-platform-tests-wdspec"),
102 "web-platform-tests-wdspec-e10s": alias_prefix("web-platform-tests-wdspec-e10s"),
103 "xpcshell": alias_prefix("xpcshell"),
106 # unittest platforms can be specified by substring of the "pretty name", which
107 # is basically the old Buildbot builder name. This dict has {pretty name,
108 # [test_platforms]} translations, This includes only the most commonly-used
109 # substrings. It is OK to add new test platforms to various shorthands here;
110 # if you add a new Linux64 test platform for instance, people will expect that
111 # their previous methods of requesting "all linux64 tests" will include this
112 # new platform, and they shouldn't have to explicitly spell out the new platform
113 # every time for such cases.
115 # Note that the test platforms here are only the prefix up to the `/`.
116 UNITTEST_PLATFORM_PRETTY_NAMES = {
117 "Ubuntu": [
118 "linux32",
119 "linux64",
120 "linux64-asan",
121 "linux1804-64",
122 "linux1804-64-asan",
124 "x64": ["linux64", "linux64-asan", "linux1804-64", "linux1804-64-asan"],
125 "Android 7.0 Moto G5 32bit": ["android-hw-g5-7.0-arm7"],
126 "Android 7.0 Samsung A51 32bit": ["android-hw-a51-11.0-arm7"],
127 "Android 7.0 Samsung A51 64bit": ["android-hw-a51-11.0-aarch64"],
128 "Android 8.0 Google Pixel 2 32bit": ["android-hw-p2-8.0-arm7"],
129 "Android 8.0 Google Pixel 2 64bit": ["android-hw-p2-8.0-android-aarch64"],
130 "Android 13.0 Google Pixel 5 32bit": ["android-hw-p5-13.0-arm7"],
131 "Android 13.0 Google Pixel 5 64bit": ["android-hw-p5-13.0-android-aarch64"],
132 "Windows 7": ["windows7-32"],
133 "Windows 7 VM": ["windows7-32-vm"],
134 "Windows 10": ["windows10-64"],
137 TEST_CHUNK_SUFFIX = re.compile("(.*)-([0-9]+)$")
140 def escape_whitespace_in_brackets(input_str):
142 In tests you may restrict them by platform [] inside of the brackets
143 whitespace may occur this is typically invalid shell syntax so we escape it
144 with backslash sequences .
146 result = ""
147 in_brackets = False
148 for char in input_str:
149 if char == "[":
150 in_brackets = True
151 result += char
152 continue
154 if char == "]":
155 in_brackets = False
156 result += char
157 continue
159 if char == " " and in_brackets:
160 result += r"\ "
161 continue
163 result += char
165 return result
168 def split_try_msg(message):
169 try:
170 try_idx = message.index("try:")
171 except ValueError:
172 return []
173 message = message[try_idx:].split("\n")[0]
174 # shlex used to ensure we split correctly when giving values to argparse.
175 return shlex.split(escape_whitespace_in_brackets(message))
178 def parse_message(message):
179 parts = split_try_msg(message)
181 # Argument parser based on try flag flags
182 parser = argparse.ArgumentParser()
183 parser.add_argument("-b", "--build", dest="build_types")
184 parser.add_argument(
185 "-p", "--platform", nargs="?", dest="platforms", const="all", default="all"
187 parser.add_argument(
188 "-u", "--unittests", nargs="?", dest="unittests", const="all", default="all"
190 parser.add_argument(
191 "-t", "--talos", nargs="?", dest="talos", const="all", default="none"
193 parser.add_argument(
194 "-r", "--raptor", nargs="?", dest="raptor", const="all", default="none"
196 parser.add_argument(
197 "-i", "--interactive", dest="interactive", action="store_true", default=False
199 parser.add_argument(
200 "-e", "--all-emails", dest="notifications", action="store_const", const="all"
202 parser.add_argument(
203 "-f",
204 "--failure-emails",
205 dest="notifications",
206 action="store_const",
207 const="failure",
209 parser.add_argument("-j", "--job", dest="jobs", action="append")
210 parser.add_argument(
211 "--rebuild-talos",
212 dest="talos_trigger_tests",
213 action="store",
214 type=int,
215 default=1,
217 parser.add_argument(
218 "--rebuild-raptor",
219 dest="raptor_trigger_tests",
220 action="store",
221 type=int,
222 default=1,
224 parser.add_argument("--setenv", dest="env", action="append")
225 parser.add_argument("--gecko-profile", dest="profile", action="store_true")
226 parser.add_argument("--tag", dest="tag", action="store", default=None)
227 parser.add_argument("--no-retry", dest="no_retry", action="store_true")
228 parser.add_argument(
229 "--include-nightly", dest="include_nightly", action="store_true"
231 parser.add_argument("--artifact", dest="artifact", action="store_true")
233 # While we are transitioning from BB to TC, we want to push jobs to tc-worker
234 # machines but not overload machines with every try push. Therefore, we add
235 # this temporary option to be able to push jobs to tc-worker.
236 parser.add_argument(
237 "-w",
238 "--taskcluster-worker",
239 dest="taskcluster_worker",
240 action="store_true",
241 default=False,
244 # In order to run test jobs multiple times
245 parser.add_argument("--rebuild", dest="trigger_tests", type=int, default=1)
246 args, _ = parser.parse_known_args(parts)
248 try_options = vars(args)
249 try_task_config = {
250 "use-artifact-builds": try_options.pop("artifact"),
251 "gecko-profile": try_options.pop("profile"),
252 "env": dict(arg.split("=") for arg in try_options.pop("env") or []),
254 return {
255 "try_options": try_options,
256 "try_task_config": try_task_config,
260 class TryOptionSyntax:
261 def __init__(self, parameters, full_task_graph, graph_config):
263 Apply the try options in parameters.
265 The resulting object has attributes:
267 - build_types: a list containing zero or more of 'opt' and 'debug'
268 - platforms: a list of selected platform names, or None for all
269 - unittests: a list of tests, of the form given below, or None for all
270 - jobs: a list of requested job names, or None for all
271 - trigger_tests: the number of times tests should be triggered (--rebuild)
272 - interactive: true if --interactive
273 - notifications: either None if no notifications or one of 'all' or 'failure'
274 - talos_trigger_tests: the number of time talos tests should be triggered (--rebuild-talos)
275 - tag: restrict tests to the specified tag
276 - no_retry: do not retry failed jobs
278 The unittests and talos lists contain dictionaries of the form:
281 'test': '<suite name>',
282 'platforms': [..platform names..], # to limit to only certain platforms
283 'only_chunks': set([..chunk numbers..]), # to limit only to certain chunks
286 self.full_task_graph = full_task_graph
287 self.graph_config = graph_config
288 self.jobs = []
289 self.build_types = []
290 self.platforms = []
291 self.unittests = []
292 self.talos = []
293 self.raptor = []
294 self.trigger_tests = 0
295 self.interactive = False
296 self.notifications = None
297 self.talos_trigger_tests = 0
298 self.raptor_trigger_tests = 0
299 self.tag = None
300 self.no_retry = False
302 options = parameters["try_options"]
303 if not options:
304 return None
305 self.jobs = self.parse_jobs(options["jobs"])
306 self.build_types = self.parse_build_types(
307 options["build_types"], full_task_graph
309 self.platforms = self.parse_platforms(options, full_task_graph)
310 self.unittests = self.parse_test_option(
311 "unittest_try_name", options["unittests"], full_task_graph
313 self.talos = self.parse_test_option(
314 "talos_try_name", options["talos"], full_task_graph
316 self.raptor = self.parse_test_option(
317 "raptor_try_name", options["raptor"], full_task_graph
319 self.trigger_tests = options["trigger_tests"]
320 self.interactive = options["interactive"]
321 self.notifications = options["notifications"]
322 self.talos_trigger_tests = options["talos_trigger_tests"]
323 self.raptor_trigger_tests = options["raptor_trigger_tests"]
324 self.tag = options["tag"]
325 self.no_retry = options["no_retry"]
326 self.include_nightly = options["include_nightly"]
328 self.test_tiers = self.generate_test_tiers(full_task_graph)
330 def generate_test_tiers(self, full_task_graph):
331 retval = defaultdict(set)
332 for t in full_task_graph.tasks.values():
333 if t.attributes.get("kind") == "test":
334 try:
335 tier = t.task["extra"]["treeherder"]["tier"]
336 name = t.attributes.get("unittest_try_name")
337 retval[name].add(tier)
338 except KeyError:
339 pass
341 return retval
343 def parse_jobs(self, jobs_arg):
344 if not jobs_arg or jobs_arg == ["none"]:
345 return [] # default is `-j none`
346 if jobs_arg == ["all"]:
347 return None
348 expanded = []
349 for job in jobs_arg:
350 expanded.extend(j.strip() for j in job.split(","))
351 return expanded
353 def parse_build_types(self, build_types_arg, full_task_graph):
354 if build_types_arg is None:
355 build_types_arg = []
357 build_types = [
359 for _f in (
360 BUILD_TYPE_ALIASES.get(build_type) for build_type in build_types_arg
362 if _f
365 all_types = {
366 t.attributes["build_type"]
367 for t in full_task_graph.tasks.values()
368 if "build_type" in t.attributes
370 bad_types = set(build_types) - all_types
371 if bad_types:
372 raise Exception(
373 "Unknown build type(s) [%s] specified for try" % ",".join(bad_types)
376 return build_types
378 def parse_platforms(self, options, full_task_graph):
379 platform_arg = options["platforms"]
380 if platform_arg == "all":
381 return None
383 RIDEALONG_BUILDS = self.graph_config["try"]["ridealong-builds"]
384 results = []
385 for build in platform_arg.split(","):
386 if build in ("macosx64",):
387 # Regular opt builds are faster than shippable ones, but we don't run
388 # tests against them.
389 # We want to choose them (and only them) if no tests were requested.
390 if (
391 options["unittests"] == "none"
392 and options["talos"] == "none"
393 and options["raptor"] == "none"
395 results.append("macosx64")
396 logger.info("adding macosx64 for try syntax using macosx64.")
397 # Otherwise, use _just_ the shippable builds.
398 else:
399 results.append("macosx64-shippable")
400 logger.info(
401 "adding macosx64-shippable for try syntax using macosx64."
403 else:
404 results.append(build)
405 if build in RIDEALONG_BUILDS:
406 results.extend(RIDEALONG_BUILDS[build])
407 logger.info(
408 "platform %s triggers ridealong builds %s"
409 % (build, ", ".join(RIDEALONG_BUILDS[build]))
412 test_platforms = {
413 t.attributes["test_platform"]
414 for t in full_task_graph.tasks.values()
415 if "test_platform" in t.attributes
417 build_platforms = {
418 t.attributes["build_platform"]
419 for t in full_task_graph.tasks.values()
420 if "build_platform" in t.attributes
422 all_platforms = test_platforms | build_platforms
423 bad_platforms = set(results) - all_platforms
424 if bad_platforms:
425 raise Exception(
426 "Unknown platform(s) [%s] specified for try" % ",".join(bad_platforms)
429 return results
431 def parse_test_option(self, attr_name, test_arg, full_task_graph):
434 Parse a unittest (-u) or talos (-t) option, in the context of a full
435 task graph containing available `unittest_try_name` or `talos_try_name`
436 attributes. There are three cases:
438 - test_arg is == 'none' (meaning an empty list)
439 - test_arg is == 'all' (meaning use the list of jobs for that job type)
440 - test_arg is comma string which needs to be parsed
443 # Empty job list case...
444 if test_arg is None or test_arg == "none":
445 return []
447 all_platforms = {
448 t.attributes["test_platform"].split("/")[0]
449 for t in full_task_graph.tasks.values()
450 if "test_platform" in t.attributes
453 tests = self.parse_test_opts(test_arg, all_platforms)
455 if not tests:
456 return []
458 all_tests = {
459 t.attributes[attr_name]
460 for t in full_task_graph.tasks.values()
461 if attr_name in t.attributes
464 # Special case where tests is 'all' and must be expanded
465 if tests[0]["test"] == "all":
466 results = []
467 all_entry = tests[0]
468 for test in all_tests:
469 entry = {"test": test}
470 # If there are platform restrictions copy them across the list.
471 if "platforms" in all_entry:
472 entry["platforms"] = list(all_entry["platforms"])
473 results.append(entry)
474 return self.parse_test_chunks(all_tests, results)
475 return self.parse_test_chunks(all_tests, tests)
477 def parse_test_opts(self, input_str, all_platforms):
479 Parse `testspec,testspec,..`, where each testspec is a test name
480 optionally followed by a list of test platforms or negated platforms in
481 `[]`.
483 No brackets indicates that tests should run on all platforms for which
484 builds are available. If testspecs are provided, then each is treated,
485 from left to right, as an instruction to include or (if negated)
486 exclude a set of test platforms. A single spec may expand to multiple
487 test platforms via UNITTEST_PLATFORM_PRETTY_NAMES. If the first test
488 spec is negated, processing begins with the full set of available test
489 platforms; otherwise, processing begins with an empty set of test
490 platforms.
493 # Final results which we will return.
494 tests = []
496 cur_test = {}
497 token = ""
498 in_platforms = False
500 def normalize_platforms():
501 if "platforms" not in cur_test:
502 return
503 # if the first spec is a negation, start with all platforms
504 if cur_test["platforms"][0][0] == "-":
505 platforms = all_platforms.copy()
506 else:
507 platforms = []
508 for platform in cur_test["platforms"]:
509 if platform[0] == "-":
510 platforms = [p for p in platforms if p != platform[1:]]
511 else:
512 platforms.append(platform)
513 cur_test["platforms"] = platforms
515 def add_test(value):
516 normalize_platforms()
517 cur_test["test"] = value.strip()
518 tests.insert(0, cur_test)
520 def add_platform(value):
521 platform = value.strip()
522 if platform[0] == "-":
523 negated = True
524 platform = platform[1:]
525 else:
526 negated = False
527 platforms = UNITTEST_PLATFORM_PRETTY_NAMES.get(platform, [platform])
528 if negated:
529 platforms = ["-" + p for p in platforms]
530 cur_test["platforms"] = platforms + cur_test.get("platforms", [])
532 # This might be somewhat confusing but we parse the string _backwards_ so
533 # there is no ambiguity over what state we are in.
534 for char in reversed(input_str):
536 # , indicates exiting a state
537 if char == ",":
539 # Exit a particular platform.
540 if in_platforms:
541 add_platform(token)
543 # Exit a particular test.
544 else:
545 add_test(token)
546 cur_test = {}
548 # Token must always be reset after we exit a state
549 token = ""
550 elif char == "[":
551 # Exiting platform state entering test state.
552 add_platform(token)
553 token = ""
554 in_platforms = False
555 elif char == "]":
556 # Entering platform state.
557 in_platforms = True
558 else:
559 # Accumulator.
560 token = char + token
562 # Handle any left over tokens.
563 if token:
564 add_test(token)
566 return tests
568 def handle_alias(self, test, all_tests):
570 Expand a test if its name refers to an alias, returning a list of test
571 dictionaries cloned from the first (to maintain any metadata).
573 if test["test"] not in UNITTEST_ALIASES:
574 return [test]
576 alias = UNITTEST_ALIASES[test["test"]]
578 def mktest(name):
579 newtest = copy.deepcopy(test)
580 newtest["test"] = name
581 return newtest
583 def exprmatch(alias):
584 return [t for t in all_tests if alias(t)]
586 return [mktest(t) for t in exprmatch(alias)]
588 def parse_test_chunks(self, all_tests, tests):
590 Test flags may include parameters to narrow down the number of chunks in a
591 given push. We don't model 1 chunk = 1 job in taskcluster so we must check
592 each test flag to see if it is actually specifying a chunk.
594 results = []
595 seen_chunks = {}
596 for test in tests:
597 matches = TEST_CHUNK_SUFFIX.match(test["test"])
598 if matches:
599 name = matches.group(1)
600 chunk = matches.group(2)
601 if name in seen_chunks:
602 seen_chunks[name].add(chunk)
603 else:
604 seen_chunks[name] = {chunk}
605 test["test"] = name
606 test["only_chunks"] = seen_chunks[name]
607 results.append(test)
608 else:
609 results.extend(self.handle_alias(test, all_tests))
611 # uniquify the results over the test names
612 results = sorted(
613 {test["test"]: test for test in results}.values(),
614 key=lambda test: test["test"],
616 return results
618 def find_all_attribute_suffixes(self, graph, prefix):
619 rv = set()
620 for t in graph.tasks.values():
621 for a in t.attributes:
622 if a.startswith(prefix):
623 rv.add(a[len(prefix) :])
624 return sorted(rv)
626 def task_matches(self, task):
627 attr = task.attributes.get
629 def check_run_on_projects():
630 return {"all"} & set(attr("run_on_projects", []))
632 def match_test(try_spec, attr_name):
633 run_by_default = True
634 if attr("build_type") not in self.build_types:
635 return False
637 if (
638 self.platforms is not None
639 and attr("build_platform") not in self.platforms
641 return False
642 if not check_run_on_projects():
643 run_by_default = False
645 if try_spec is None:
646 return run_by_default
648 # TODO: optimize this search a bit
649 for test in try_spec:
650 if attr(attr_name) == test["test"]:
651 break
652 else:
653 return False
655 if "only_chunks" in test and attr("test_chunk") not in test["only_chunks"]:
656 return False
658 tier = task.task["extra"]["treeherder"]["tier"]
659 if "platforms" in test:
660 if "all" in test["platforms"]:
661 return True
662 platform = attr("test_platform", "").split("/")[0]
663 # Platforms can be forced by syntax like "-u xpcshell[Windows 8]"
664 return platform in test["platforms"]
665 if tier != 1:
666 # Run Tier 2/3 tests if their build task is Tier 2/3 OR if there is
667 # no tier 1 test of that name.
668 build_task = self.full_task_graph.tasks[task.dependencies["build"]]
669 build_task_tier = build_task.task["extra"]["treeherder"]["tier"]
671 name = attr("unittest_try_name")
672 test_tiers = self.test_tiers.get(name)
674 if tier <= build_task_tier:
675 logger.debug(
676 "not skipping tier {} test {} because build task {} "
677 "is tier {}".format(
678 tier, task.label, build_task.label, build_task_tier
681 return True
682 if 1 not in test_tiers:
683 logger.debug(
684 "not skipping tier {} test {} without explicit inclusion; "
685 "it is configured to run on tiers {}".format(
686 tier, task.label, test_tiers
689 return True
690 logger.debug(
691 "skipping tier {} test {} because build task {} is "
692 "tier {} and there is a higher-tier test of the same name".format(
693 tier, task.label, build_task.label, build_task_tier
696 return False
697 if run_by_default:
698 return check_run_on_projects()
699 return False
701 if attr("job_try_name"):
702 # Beware the subtle distinction between [] and None for self.jobs and self.platforms.
703 # They will be [] if there was no try syntax, and None if try syntax was detected but
704 # they remained unspecified.
705 if self.jobs is not None:
706 return attr("job_try_name") in self.jobs
708 # User specified `-j all`
709 if (
710 self.platforms is not None
711 and attr("build_platform") not in self.platforms
713 return False # honor -p for jobs governed by a platform
714 # "all" means "everything with `try` in run_on_projects"
715 return check_run_on_projects()
716 if attr("kind") == "test":
717 return (
718 match_test(self.unittests, "unittest_try_name")
719 or match_test(self.talos, "talos_try_name")
720 or match_test(self.raptor, "raptor_try_name")
722 if attr("kind") in BUILD_KINDS:
723 if attr("build_type") not in self.build_types:
724 return False
725 if self.platforms is None:
726 # for "-p all", look for try in the 'run_on_projects' attribute
727 return check_run_on_projects()
728 if attr("build_platform") not in self.platforms:
729 return False
730 return True
731 return False
733 def __str__(self):
734 def none_for_all(list):
735 if list is None:
736 return "<all>"
737 return ", ".join(str(e) for e in list)
739 return "\n".join(
741 "build_types: " + ", ".join(self.build_types),
742 "platforms: " + none_for_all(self.platforms),
743 "unittests: " + none_for_all(self.unittests),
744 "talos: " + none_for_all(self.talos),
745 "raptor" + none_for_all(self.raptor),
746 "jobs: " + none_for_all(self.jobs),
747 "trigger_tests: " + str(self.trigger_tests),
748 "interactive: " + str(self.interactive),
749 "notifications: " + str(self.notifications),
750 "talos_trigger_tests: " + str(self.talos_trigger_tests),
751 "raptor_trigger_tests: " + str(self.raptor_trigger_tests),
752 "tag: " + str(self.tag),
753 "no_retry: " + str(self.no_retry),