Bug 1682766 [wpt PR 26921] - Fix nullptr dereference accessing PolicyContainer in...
[gecko.git] / taskcluster / taskgraph / try_option_syntax.py
blob8558706d3bb1148b9f4c2cfb3632368b7829f846
1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
5 from __future__ import absolute_import, print_function, unicode_literals
7 import argparse
8 import copy
9 import logging
10 import re
11 import shlex
12 import six
13 from collections import defaultdict
15 logger = logging.getLogger(__name__)
17 # The build type aliases are very cryptic and only used in try flags these are
18 # mappings from the single char alias to a longer more recognizable form.
19 BUILD_TYPE_ALIASES = {"o": "opt", "d": "debug"}
21 # consider anything in this whitelist of kinds to be governed by -b/-p
22 BUILD_KINDS = set(
24 "build",
25 "artifact-build",
26 "hazard",
27 "l10n",
28 "valgrind",
29 "spidermonkey",
34 # mapping from shortcut name (usable with -u) to a boolean function identifying
35 # matching test names
36 def alias_prefix(prefix):
37 return lambda name: name.startswith(prefix)
40 def alias_contains(infix):
41 return lambda name: infix in name
44 def alias_matches(pattern):
45 pattern = re.compile(pattern)
46 return lambda name: pattern.match(name)
49 UNITTEST_ALIASES = {
50 # Aliases specify shorthands that can be used in try syntax. The shorthand
51 # is the dictionary key, with the value representing a pattern for matching
52 # unittest_try_names.
54 # Note that alias expansion is performed in the absence of any chunk
55 # prefixes. For example, the first example above would replace "foo-7"
56 # with "foobar-7". Note that a few aliases allowed chunks to be specified
57 # without a leading `-`, for example 'mochitest-dt1'. That's no longer
58 # supported.
59 "cppunit": alias_prefix("cppunit"),
60 "crashtest": alias_prefix("crashtest"),
61 "crashtest-e10s": alias_prefix("crashtest-e10s"),
62 "e10s": alias_contains("e10s"),
63 "firefox-ui-functional": alias_prefix("firefox-ui-functional"),
64 "gaia-js-integration": alias_contains("gaia-js-integration"),
65 "gtest": alias_prefix("gtest"),
66 "jittest": alias_prefix("jittest"),
67 "jittests": alias_prefix("jittest"),
68 "jsreftest": alias_prefix("jsreftest"),
69 "jsreftest-e10s": alias_prefix("jsreftest-e10s"),
70 "marionette": alias_prefix("marionette"),
71 "mochitest": alias_prefix("mochitest"),
72 "mochitests": alias_prefix("mochitest"),
73 "mochitest-e10s": alias_prefix("mochitest-e10s"),
74 "mochitests-e10s": alias_prefix("mochitest-e10s"),
75 "mochitest-debug": alias_prefix("mochitest-debug-"),
76 "mochitest-a11y": alias_contains("mochitest-a11y"),
77 "mochitest-bc": alias_prefix("mochitest-browser-chrome"),
78 "mochitest-e10s-bc": alias_prefix("mochitest-browser-chrome-e10s"),
79 "mochitest-browser-chrome": alias_prefix("mochitest-browser-chrome"),
80 "mochitest-e10s-browser-chrome": alias_prefix("mochitest-browser-chrome-e10s"),
81 "mochitest-chrome": alias_contains("mochitest-chrome"),
82 "mochitest-dt": alias_prefix("mochitest-devtools-chrome"),
83 "mochitest-e10s-dt": alias_prefix("mochitest-devtools-chrome-e10s"),
84 "mochitest-gl": alias_prefix("mochitest-webgl"),
85 "mochitest-gl-e10s": alias_prefix("mochitest-webgl-e10s"),
86 "mochitest-gpu": alias_prefix("mochitest-gpu"),
87 "mochitest-gpu-e10s": alias_prefix("mochitest-gpu-e10s"),
88 "mochitest-media": alias_prefix("mochitest-media"),
89 "mochitest-media-e10s": alias_prefix("mochitest-media-e10s"),
90 "mochitest-vg": alias_prefix("mochitest-valgrind"),
91 "reftest": alias_matches(r"^(plain-)?reftest.*$"),
92 "reftest-no-accel": alias_matches(r"^(plain-)?reftest-no-accel.*$"),
93 "reftests": alias_matches(r"^(plain-)?reftest.*$"),
94 "reftests-e10s": alias_matches(r"^(plain-)?reftest-e10s.*$"),
95 "reftest-gpu": alias_matches(r"^(plain-)?reftest-gpu.*$"),
96 "robocop": alias_prefix("robocop"),
97 "web-platform-test": alias_prefix("web-platform-tests"),
98 "web-platform-tests": alias_prefix("web-platform-tests"),
99 "web-platform-tests-e10s": alias_prefix("web-platform-tests-e10s"),
100 "web-platform-tests-crashtests": alias_prefix("web-platform-tests-crashtest"),
101 "web-platform-tests-print-reftest": alias_prefix(
102 "web-platform-tests-print-reftest"
104 "web-platform-tests-reftests": alias_prefix("web-platform-tests-reftest"),
105 "web-platform-tests-reftests-e10s": alias_prefix("web-platform-tests-reftest-e10s"),
106 "web-platform-tests-wdspec": alias_prefix("web-platform-tests-wdspec"),
107 "web-platform-tests-wdspec-e10s": alias_prefix("web-platform-tests-wdspec-e10s"),
108 "xpcshell": alias_prefix("xpcshell"),
111 # unittest platforms can be specified by substring of the "pretty name", which
112 # is basically the old Buildbot builder name. This dict has {pretty name,
113 # [test_platforms]} translations, This includes only the most commonly-used
114 # substrings. It is OK to add new test platforms to various shorthands here;
115 # if you add a new Linux64 test platform for instance, people will expect that
116 # their previous methods of requesting "all linux64 tests" will include this
117 # new platform, and they shouldn't have to explicitly spell out the new platform
118 # every time for such cases.
120 # Note that the test platforms here are only the prefix up to the `/`.
121 UNITTEST_PLATFORM_PRETTY_NAMES = {
122 "Ubuntu": [
123 "linux32",
124 "linux64",
125 "linux64-asan",
126 "linux1804-64",
127 "linux1804-64-asan",
129 "x64": ["linux64", "linux64-asan", "linux1804-64", "linux1804-64-asan"],
130 "Android 7.0 Moto G5 32bit": ["android-hw-g5-7.0-arm7-api-16"],
131 "Android 8.0 Google Pixel 2 32bit": ["android-hw-p2-8.0-arm7-api-16"],
132 "Android 8.0 Google Pixel 2 64bit": ["android-hw-p2-8.0-android-aarch64"],
133 "10.14": ["macosx1014-64"],
134 "Windows 7": ["windows7-32"],
135 "Windows 7 VM": ["windows7-32-vm"],
136 "Windows 10": ["windows10-64"],
139 TEST_CHUNK_SUFFIX = re.compile("(.*)-([0-9]+)$")
142 def escape_whitespace_in_brackets(input_str):
144 In tests you may restrict them by platform [] inside of the brackets
145 whitespace may occur this is typically invalid shell syntax so we escape it
146 with backslash sequences .
148 result = ""
149 in_brackets = False
150 for char in input_str:
151 if char == "[":
152 in_brackets = True
153 result += char
154 continue
156 if char == "]":
157 in_brackets = False
158 result += char
159 continue
161 if char == " " and in_brackets:
162 result += "\ "
163 continue
165 result += char
167 return result
170 def split_try_msg(message):
171 try:
172 try_idx = message.index("try:")
173 except ValueError:
174 return []
175 message = message[try_idx:].split("\n")[0]
176 # shlex used to ensure we split correctly when giving values to argparse.
177 return shlex.split(escape_whitespace_in_brackets(message))
180 def parse_message(message):
181 parts = split_try_msg(message)
183 # Argument parser based on try flag flags
184 parser = argparse.ArgumentParser()
185 parser.add_argument("-b", "--build", dest="build_types")
186 parser.add_argument(
187 "-p", "--platform", nargs="?", dest="platforms", const="all", default="all"
189 parser.add_argument(
190 "-u", "--unittests", nargs="?", dest="unittests", const="all", default="all"
192 parser.add_argument(
193 "-t", "--talos", nargs="?", dest="talos", const="all", default="none"
195 parser.add_argument(
196 "-r", "--raptor", nargs="?", dest="raptor", const="all", default="none"
198 parser.add_argument(
199 "-i", "--interactive", dest="interactive", action="store_true", default=False
201 parser.add_argument(
202 "-e", "--all-emails", dest="notifications", action="store_const", const="all"
204 parser.add_argument(
205 "-f",
206 "--failure-emails",
207 dest="notifications",
208 action="store_const",
209 const="failure",
211 parser.add_argument("-j", "--job", dest="jobs", action="append")
212 parser.add_argument(
213 "--rebuild-talos",
214 dest="talos_trigger_tests",
215 action="store",
216 type=int,
217 default=1,
219 parser.add_argument(
220 "--rebuild-raptor",
221 dest="raptor_trigger_tests",
222 action="store",
223 type=int,
224 default=1,
226 parser.add_argument("--setenv", dest="env", action="append")
227 parser.add_argument("--gecko-profile", dest="profile", action="store_true")
228 parser.add_argument("--tag", dest="tag", action="store", default=None)
229 parser.add_argument("--no-retry", dest="no_retry", action="store_true")
230 parser.add_argument(
231 "--include-nightly", dest="include_nightly", action="store_true"
233 parser.add_argument("--artifact", dest="artifact", action="store_true")
235 # While we are transitioning from BB to TC, we want to push jobs to tc-worker
236 # machines but not overload machines with every try push. Therefore, we add
237 # this temporary option to be able to push jobs to tc-worker.
238 parser.add_argument(
239 "-w",
240 "--taskcluster-worker",
241 dest="taskcluster_worker",
242 action="store_true",
243 default=False,
246 # In order to run test jobs multiple times
247 parser.add_argument("--rebuild", dest="trigger_tests", type=int, default=1)
248 args, _ = parser.parse_known_args(parts)
250 try_options = vars(args)
251 try_task_config = {
252 "use-artifact-builds": try_options.pop("artifact"),
253 "gecko-profile": try_options.pop("profile"),
254 "env": dict(arg.split("=") for arg in try_options.pop("env") or []),
256 return {
257 "try_options": try_options,
258 "try_task_config": try_task_config,
262 class TryOptionSyntax(object):
263 def __init__(self, parameters, full_task_graph, graph_config):
265 Apply the try options in parameters.
267 The resulting object has attributes:
269 - build_types: a list containing zero or more of 'opt' and 'debug'
270 - platforms: a list of selected platform names, or None for all
271 - unittests: a list of tests, of the form given below, or None for all
272 - jobs: a list of requested job names, or None for all
273 - trigger_tests: the number of times tests should be triggered (--rebuild)
274 - interactive: true if --interactive
275 - notifications: either None if no notifications or one of 'all' or 'failure'
276 - talos_trigger_tests: the number of time talos tests should be triggered (--rebuild-talos)
277 - tag: restrict tests to the specified tag
278 - no_retry: do not retry failed jobs
280 The unittests and talos lists contain dictionaries of the form:
283 'test': '<suite name>',
284 'platforms': [..platform names..], # to limit to only certain platforms
285 'only_chunks': set([..chunk numbers..]), # to limit only to certain chunks
288 self.full_task_graph = full_task_graph
289 self.graph_config = graph_config
290 self.jobs = []
291 self.build_types = []
292 self.platforms = []
293 self.unittests = []
294 self.talos = []
295 self.raptor = []
296 self.trigger_tests = 0
297 self.interactive = False
298 self.notifications = None
299 self.talos_trigger_tests = 0
300 self.raptor_trigger_tests = 0
301 self.tag = None
302 self.no_retry = False
304 options = parameters["try_options"]
305 if not options:
306 return None
307 self.jobs = self.parse_jobs(options["jobs"])
308 self.build_types = self.parse_build_types(
309 options["build_types"], full_task_graph
311 self.platforms = self.parse_platforms(options, full_task_graph)
312 self.unittests = self.parse_test_option(
313 "unittest_try_name", options["unittests"], full_task_graph
315 self.talos = self.parse_test_option(
316 "talos_try_name", options["talos"], full_task_graph
318 self.raptor = self.parse_test_option(
319 "raptor_try_name", options["raptor"], full_task_graph
321 self.trigger_tests = options["trigger_tests"]
322 self.interactive = options["interactive"]
323 self.notifications = options["notifications"]
324 self.talos_trigger_tests = options["talos_trigger_tests"]
325 self.raptor_trigger_tests = options["raptor_trigger_tests"]
326 self.tag = options["tag"]
327 self.no_retry = options["no_retry"]
328 self.include_nightly = options["include_nightly"]
330 self.test_tiers = self.generate_test_tiers(full_task_graph)
332 def generate_test_tiers(self, full_task_graph):
333 retval = defaultdict(set)
334 for t in six.itervalues(full_task_graph.tasks):
335 if t.attributes.get("kind") == "test":
336 try:
337 tier = t.task["extra"]["treeherder"]["tier"]
338 name = t.attributes.get("unittest_try_name")
339 retval[name].add(tier)
340 except KeyError:
341 pass
343 return retval
345 def parse_jobs(self, jobs_arg):
346 if not jobs_arg or jobs_arg == ["none"]:
347 return [] # default is `-j none`
348 if jobs_arg == ["all"]:
349 return None
350 expanded = []
351 for job in jobs_arg:
352 expanded.extend(j.strip() for j in job.split(","))
353 return expanded
355 def parse_build_types(self, build_types_arg, full_task_graph):
356 if build_types_arg is None:
357 build_types_arg = []
359 build_types = [
361 for _f in (
362 BUILD_TYPE_ALIASES.get(build_type) for build_type in build_types_arg
364 if _f
367 all_types = set(
368 t.attributes["build_type"]
369 for t in six.itervalues(full_task_graph.tasks)
370 if "build_type" in t.attributes
372 bad_types = set(build_types) - all_types
373 if bad_types:
374 raise Exception(
375 "Unknown build type(s) [%s] specified for try" % ",".join(bad_types)
378 return build_types
380 def parse_platforms(self, options, full_task_graph):
381 platform_arg = options["platforms"]
382 if platform_arg == "all":
383 return None
385 RIDEALONG_BUILDS = self.graph_config["try"]["ridealong-builds"]
386 results = []
387 for build in platform_arg.split(","):
388 if build in ("macosx64",):
389 # Regular opt builds are faster than shippable ones, but we don't run
390 # tests against them.
391 # We want to choose them (and only them) if no tests were requested.
392 if (
393 options["unittests"] == "none"
394 and options["talos"] == "none"
395 and options["raptor"] == "none"
397 results.append("macosx64")
398 logger.info("adding macosx64 for try syntax using macosx64.")
399 # Otherwise, use _just_ the shippable builds.
400 else:
401 results.append("macosx64-shippable")
402 logger.info(
403 "adding macosx64-shippable for try syntax using macosx64."
405 else:
406 results.append(build)
407 if build in RIDEALONG_BUILDS:
408 results.extend(RIDEALONG_BUILDS[build])
409 logger.info(
410 "platform %s triggers ridealong builds %s"
411 % (build, ", ".join(RIDEALONG_BUILDS[build]))
414 test_platforms = set(
415 t.attributes["test_platform"]
416 for t in six.itervalues(full_task_graph.tasks)
417 if "test_platform" in t.attributes
419 build_platforms = set(
420 t.attributes["build_platform"]
421 for t in six.itervalues(full_task_graph.tasks)
422 if "build_platform" in t.attributes
424 all_platforms = test_platforms | build_platforms
425 bad_platforms = set(results) - all_platforms
426 if bad_platforms:
427 raise Exception(
428 "Unknown platform(s) [%s] specified for try" % ",".join(bad_platforms)
431 return results
433 def parse_test_option(self, attr_name, test_arg, full_task_graph):
436 Parse a unittest (-u) or talos (-t) option, in the context of a full
437 task graph containing available `unittest_try_name` or `talos_try_name`
438 attributes. There are three cases:
440 - test_arg is == 'none' (meaning an empty list)
441 - test_arg is == 'all' (meaning use the list of jobs for that job type)
442 - test_arg is comma string which needs to be parsed
445 # Empty job list case...
446 if test_arg is None or test_arg == "none":
447 return []
449 all_platforms = set(
450 t.attributes["test_platform"].split("/")[0]
451 for t in six.itervalues(full_task_graph.tasks)
452 if "test_platform" in t.attributes
455 tests = self.parse_test_opts(test_arg, all_platforms)
457 if not tests:
458 return []
460 all_tests = set(
461 t.attributes[attr_name]
462 for t in six.itervalues(full_task_graph.tasks)
463 if attr_name in t.attributes
466 # Special case where tests is 'all' and must be expanded
467 if tests[0]["test"] == "all":
468 results = []
469 all_entry = tests[0]
470 for test in all_tests:
471 entry = {"test": test}
472 # If there are platform restrictions copy them across the list.
473 if "platforms" in all_entry:
474 entry["platforms"] = list(all_entry["platforms"])
475 results.append(entry)
476 return self.parse_test_chunks(all_tests, results)
477 else:
478 return self.parse_test_chunks(all_tests, tests)
480 def parse_test_opts(self, input_str, all_platforms):
482 Parse `testspec,testspec,..`, where each testspec is a test name
483 optionally followed by a list of test platforms or negated platforms in
484 `[]`.
486 No brackets indicates that tests should run on all platforms for which
487 builds are available. If testspecs are provided, then each is treated,
488 from left to right, as an instruction to include or (if negated)
489 exclude a set of test platforms. A single spec may expand to multiple
490 test platforms via UNITTEST_PLATFORM_PRETTY_NAMES. If the first test
491 spec is negated, processing begins with the full set of available test
492 platforms; otherwise, processing begins with an empty set of test
493 platforms.
496 # Final results which we will return.
497 tests = []
499 cur_test = {}
500 token = ""
501 in_platforms = False
503 def normalize_platforms():
504 if "platforms" not in cur_test:
505 return
506 # if the first spec is a negation, start with all platforms
507 if cur_test["platforms"][0][0] == "-":
508 platforms = all_platforms.copy()
509 else:
510 platforms = []
511 for platform in cur_test["platforms"]:
512 if platform[0] == "-":
513 platforms = [p for p in platforms if p != platform[1:]]
514 else:
515 platforms.append(platform)
516 cur_test["platforms"] = platforms
518 def add_test(value):
519 normalize_platforms()
520 cur_test["test"] = value.strip()
521 tests.insert(0, cur_test)
523 def add_platform(value):
524 platform = value.strip()
525 if platform[0] == "-":
526 negated = True
527 platform = platform[1:]
528 else:
529 negated = False
530 platforms = UNITTEST_PLATFORM_PRETTY_NAMES.get(platform, [platform])
531 if negated:
532 platforms = ["-" + p for p in platforms]
533 cur_test["platforms"] = platforms + cur_test.get("platforms", [])
535 # This might be somewhat confusing but we parse the string _backwards_ so
536 # there is no ambiguity over what state we are in.
537 for char in reversed(input_str):
539 # , indicates exiting a state
540 if char == ",":
542 # Exit a particular platform.
543 if in_platforms:
544 add_platform(token)
546 # Exit a particular test.
547 else:
548 add_test(token)
549 cur_test = {}
551 # Token must always be reset after we exit a state
552 token = ""
553 elif char == "[":
554 # Exiting platform state entering test state.
555 add_platform(token)
556 token = ""
557 in_platforms = False
558 elif char == "]":
559 # Entering platform state.
560 in_platforms = True
561 else:
562 # Accumulator.
563 token = char + token
565 # Handle any left over tokens.
566 if token:
567 add_test(token)
569 return tests
571 def handle_alias(self, test, all_tests):
573 Expand a test if its name refers to an alias, returning a list of test
574 dictionaries cloned from the first (to maintain any metadata).
576 if test["test"] not in UNITTEST_ALIASES:
577 return [test]
579 alias = UNITTEST_ALIASES[test["test"]]
581 def mktest(name):
582 newtest = copy.deepcopy(test)
583 newtest["test"] = name
584 return newtest
586 def exprmatch(alias):
587 return [t for t in all_tests if alias(t)]
589 return [mktest(t) for t in exprmatch(alias)]
591 def parse_test_chunks(self, all_tests, tests):
593 Test flags may include parameters to narrow down the number of chunks in a
594 given push. We don't model 1 chunk = 1 job in taskcluster so we must check
595 each test flag to see if it is actually specifying a chunk.
597 results = []
598 seen_chunks = {}
599 for test in tests:
600 matches = TEST_CHUNK_SUFFIX.match(test["test"])
601 if matches:
602 name = matches.group(1)
603 chunk = matches.group(2)
604 if name in seen_chunks:
605 seen_chunks[name].add(chunk)
606 else:
607 seen_chunks[name] = {chunk}
608 test["test"] = name
609 test["only_chunks"] = seen_chunks[name]
610 results.append(test)
611 else:
612 results.extend(self.handle_alias(test, all_tests))
614 # uniquify the results over the test names
615 results = sorted(
616 {test["test"]: test for test in results}.values(),
617 key=lambda test: test["test"],
619 return results
621 def find_all_attribute_suffixes(self, graph, prefix):
622 rv = set()
623 for t in six.itervalues(graph.tasks):
624 for a in t.attributes:
625 if a.startswith(prefix):
626 rv.add(a[len(prefix) :])
627 return sorted(rv)
629 def task_matches(self, task):
630 attr = task.attributes.get
632 def check_run_on_projects():
633 return {"all"} & set(attr("run_on_projects", []))
635 # Don't schedule fission tests when try option syntax is used
636 if attr("unittest_variant") == "fission":
637 return False
639 def match_test(try_spec, attr_name):
640 run_by_default = True
641 if attr("build_type") not in self.build_types:
642 return False
644 if (
645 self.platforms is not None
646 and attr("build_platform") not in self.platforms
648 return False
649 elif not check_run_on_projects():
650 run_by_default = False
652 if try_spec is None:
653 return run_by_default
655 # TODO: optimize this search a bit
656 for test in try_spec:
657 if attr(attr_name) == test["test"]:
658 break
659 else:
660 return False
662 if "only_chunks" in test and attr("test_chunk") not in test["only_chunks"]:
663 return False
665 tier = task.task["extra"]["treeherder"]["tier"]
666 if "platforms" in test:
667 if "all" in test["platforms"]:
668 return True
669 platform = attr("test_platform", "").split("/")[0]
670 # Platforms can be forced by syntax like "-u xpcshell[Windows 8]"
671 return platform in test["platforms"]
672 elif tier != 1:
673 # Run Tier 2/3 tests if their build task is Tier 2/3 OR if there is
674 # no tier 1 test of that name.
675 build_task = self.full_task_graph.tasks[task.dependencies["build"]]
676 build_task_tier = build_task.task["extra"]["treeherder"]["tier"]
678 name = attr("unittest_try_name")
679 test_tiers = self.test_tiers.get(name)
681 if tier <= build_task_tier:
682 logger.debug(
683 "not skipping tier {} test {} because build task {} "
684 "is tier {}".format(
685 tier, task.label, build_task.label, build_task_tier
688 return True
689 elif 1 not in test_tiers:
690 logger.debug(
691 "not skipping tier {} test {} without explicit inclusion; "
692 "it is configured to run on tiers {}".format(
693 tier, task.label, test_tiers
696 return True
697 else:
698 logger.debug(
699 "skipping tier {} test {} because build task {} is "
700 "tier {} and there is a higher-tier test of the same name".format(
701 tier, task.label, build_task.label, build_task_tier
704 return False
705 elif run_by_default:
706 return check_run_on_projects()
707 else:
708 return False
710 if attr("job_try_name"):
711 # Beware the subtle distinction between [] and None for self.jobs and self.platforms.
712 # They will be [] if there was no try syntax, and None if try syntax was detected but
713 # they remained unspecified.
714 if self.jobs is not None:
715 return attr("job_try_name") in self.jobs
717 # User specified `-j all`
718 if (
719 self.platforms is not None
720 and attr("build_platform") not in self.platforms
722 return False # honor -p for jobs governed by a platform
723 # "all" means "everything with `try` in run_on_projects"
724 return check_run_on_projects()
725 elif attr("kind") == "test":
726 return (
727 match_test(self.unittests, "unittest_try_name")
728 or match_test(self.talos, "talos_try_name")
729 or match_test(self.raptor, "raptor_try_name")
731 elif attr("kind") in BUILD_KINDS:
732 if attr("build_type") not in self.build_types:
733 return False
734 elif self.platforms is None:
735 # for "-p all", look for try in the 'run_on_projects' attribute
736 return check_run_on_projects()
737 else:
738 if attr("build_platform") not in self.platforms:
739 return False
740 return True
741 else:
742 return False
744 def __str__(self):
745 def none_for_all(list):
746 if list is None:
747 return "<all>"
748 return ", ".join(str(e) for e in list)
750 return "\n".join(
752 "build_types: " + ", ".join(self.build_types),
753 "platforms: " + none_for_all(self.platforms),
754 "unittests: " + none_for_all(self.unittests),
755 "talos: " + none_for_all(self.talos),
756 "raptor" + none_for_all(self.raptor),
757 "jobs: " + none_for_all(self.jobs),
758 "trigger_tests: " + str(self.trigger_tests),
759 "interactive: " + str(self.interactive),
760 "notifications: " + str(self.notifications),
761 "talos_trigger_tests: " + str(self.talos_trigger_tests),
762 "raptor_trigger_tests: " + str(self.raptor_trigger_tests),
763 "tag: " + str(self.tag),
764 "no_retry: " + str(self.no_retry),