Bug 1890689 accumulate input in LargerReceiverBlockSizeThanDesiredBuffering GTest...
[gecko.git] / taskcluster / gecko_taskgraph / try_option_syntax.py
blob4dfac574df8157a48ce6b32da492bbe1f0c42094
1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
6 import argparse
7 import copy
8 import logging
9 import re
10 import shlex
11 from collections import defaultdict
13 logger = logging.getLogger(__name__)
15 # The build type aliases are very cryptic and only used in try flags these are
16 # mappings from the single char alias to a longer more recognizable form.
17 BUILD_TYPE_ALIASES = {"o": "opt", "d": "debug"}
19 # consider anything in this whitelist of kinds to be governed by -b/-p
20 BUILD_KINDS = {
21 "build",
22 "artifact-build",
23 "hazard",
24 "l10n",
25 "valgrind",
26 "spidermonkey",
30 # mapping from shortcut name (usable with -u) to a boolean function identifying
31 # matching test names
32 def alias_prefix(prefix):
33 return lambda name: name.startswith(prefix)
36 def alias_contains(infix):
37 return lambda name: infix in name
40 def alias_matches(pattern):
41 pattern = re.compile(pattern)
42 return lambda name: pattern.match(name)
45 UNITTEST_ALIASES = {
46 # Aliases specify shorthands that can be used in try syntax. The shorthand
47 # is the dictionary key, with the value representing a pattern for matching
48 # unittest_try_names.
50 # Note that alias expansion is performed in the absence of any chunk
51 # prefixes. For example, the first example above would replace "foo-7"
52 # with "foobar-7". Note that a few aliases allowed chunks to be specified
53 # without a leading `-`, for example 'mochitest-dt1'. That's no longer
54 # supported.
55 "cppunit": alias_prefix("cppunit"),
56 "crashtest": alias_prefix("crashtest"),
57 "crashtest-e10s": alias_prefix("crashtest-e10s"),
58 "e10s": alias_contains("e10s"),
59 "firefox-ui-functional": alias_prefix("firefox-ui-functional"),
60 "gaia-js-integration": alias_contains("gaia-js-integration"),
61 "gtest": alias_prefix("gtest"),
62 "jittest": alias_prefix("jittest"),
63 "jittests": alias_prefix("jittest"),
64 "jsreftest": alias_prefix("jsreftest"),
65 "jsreftest-e10s": alias_prefix("jsreftest-e10s"),
66 "marionette": alias_prefix("marionette"),
67 "mochitest": alias_prefix("mochitest"),
68 "mochitests": alias_prefix("mochitest"),
69 "mochitest-e10s": alias_prefix("mochitest-e10s"),
70 "mochitests-e10s": alias_prefix("mochitest-e10s"),
71 "mochitest-debug": alias_prefix("mochitest-debug-"),
72 "mochitest-a11y": alias_contains("mochitest-a11y"),
73 "mochitest-bc": alias_prefix("mochitest-browser-chrome"),
74 "mochitest-e10s-bc": alias_prefix("mochitest-browser-chrome-e10s"),
75 "mochitest-browser-chrome": alias_prefix("mochitest-browser-chrome"),
76 "mochitest-e10s-browser-chrome": alias_prefix("mochitest-browser-chrome-e10s"),
77 "mochitest-chrome": alias_contains("mochitest-chrome"),
78 "mochitest-dt": alias_prefix("mochitest-devtools-chrome"),
79 "mochitest-e10s-dt": alias_prefix("mochitest-devtools-chrome-e10s"),
80 "mochitest-gl": alias_prefix("mochitest-webgl"),
81 "mochitest-gl-e10s": alias_prefix("mochitest-webgl-e10s"),
82 "mochitest-gpu": alias_prefix("mochitest-gpu"),
83 "mochitest-gpu-e10s": alias_prefix("mochitest-gpu-e10s"),
84 "mochitest-media": alias_prefix("mochitest-media"),
85 "mochitest-media-e10s": alias_prefix("mochitest-media-e10s"),
86 "mochitest-vg": alias_prefix("mochitest-valgrind"),
87 "reftest": alias_matches(r"^(plain-)?reftest.*$"),
88 "reftest-no-accel": alias_matches(r"^(plain-)?reftest-no-accel.*$"),
89 "reftests": alias_matches(r"^(plain-)?reftest.*$"),
90 "reftests-e10s": alias_matches(r"^(plain-)?reftest-e10s.*$"),
91 "robocop": alias_prefix("robocop"),
92 "web-platform-test": alias_prefix("web-platform-tests"),
93 "web-platform-tests": alias_prefix("web-platform-tests"),
94 "web-platform-tests-e10s": alias_prefix("web-platform-tests-e10s"),
95 "web-platform-tests-crashtests": alias_prefix("web-platform-tests-crashtest"),
96 "web-platform-tests-print-reftest": alias_prefix(
97 "web-platform-tests-print-reftest"
99 "web-platform-tests-reftests": alias_prefix("web-platform-tests-reftest"),
100 "web-platform-tests-reftests-e10s": alias_prefix("web-platform-tests-reftest-e10s"),
101 "web-platform-tests-wdspec": alias_prefix("web-platform-tests-wdspec"),
102 "web-platform-tests-wdspec-e10s": alias_prefix("web-platform-tests-wdspec-e10s"),
103 "xpcshell": alias_prefix("xpcshell"),
106 # unittest platforms can be specified by substring of the "pretty name", which
107 # is basically the old Buildbot builder name. This dict has {pretty name,
108 # [test_platforms]} translations, This includes only the most commonly-used
109 # substrings. It is OK to add new test platforms to various shorthands here;
110 # if you add a new Linux64 test platform for instance, people will expect that
111 # their previous methods of requesting "all linux64 tests" will include this
112 # new platform, and they shouldn't have to explicitly spell out the new platform
113 # every time for such cases.
115 # Note that the test platforms here are only the prefix up to the `/`.
116 UNITTEST_PLATFORM_PRETTY_NAMES = {
117 "Ubuntu": [
118 "linux32",
119 "linux64",
120 "linux64-asan",
121 "linux1804-64",
122 "linux1804-64-asan",
124 "x64": ["linux64", "linux64-asan", "linux1804-64", "linux1804-64-asan"],
125 "Android 7.0 Samsung A51 32bit": ["android-hw-a51-11.0-arm7"],
126 "Android 7.0 Samsung A51 64bit": ["android-hw-a51-11.0-aarch64"],
127 "Android 13.0 Google Pixel 5 32bit": ["android-hw-p5-13.0-arm7"],
128 "Android 13.0 Google Pixel 5 64bit": ["android-hw-p5-13.0-android-aarch64"],
129 "Android 13.0 Google Pixel 6 64bit": ["android-hw-p6-13.0-android-aarch64"],
130 "Android 13.0 Samsung S21 64bit": ["android-hw-s21-13.0-android-aarch64"],
131 "Windows 10": ["windows10-64"],
134 TEST_CHUNK_SUFFIX = re.compile("(.*)-([0-9]+)$")
137 def escape_whitespace_in_brackets(input_str):
139 In tests you may restrict them by platform [] inside of the brackets
140 whitespace may occur this is typically invalid shell syntax so we escape it
141 with backslash sequences .
143 result = ""
144 in_brackets = False
145 for char in input_str:
146 if char == "[":
147 in_brackets = True
148 result += char
149 continue
151 if char == "]":
152 in_brackets = False
153 result += char
154 continue
156 if char == " " and in_brackets:
157 result += r"\ "
158 continue
160 result += char
162 return result
165 def split_try_msg(message):
166 try:
167 try_idx = message.index("try:")
168 except ValueError:
169 return []
170 message = message[try_idx:].split("\n")[0]
171 # shlex used to ensure we split correctly when giving values to argparse.
172 return shlex.split(escape_whitespace_in_brackets(message))
175 def parse_message(message):
176 parts = split_try_msg(message)
178 # Argument parser based on try flag flags
179 parser = argparse.ArgumentParser()
180 parser.add_argument("-b", "--build", dest="build_types")
181 parser.add_argument(
182 "-p", "--platform", nargs="?", dest="platforms", const="all", default="all"
184 parser.add_argument(
185 "-u", "--unittests", nargs="?", dest="unittests", const="all", default="all"
187 parser.add_argument(
188 "-t", "--talos", nargs="?", dest="talos", const="all", default="none"
190 parser.add_argument(
191 "-r", "--raptor", nargs="?", dest="raptor", const="all", default="none"
193 parser.add_argument(
194 "-i", "--interactive", dest="interactive", action="store_true", default=False
196 parser.add_argument(
197 "-e", "--all-emails", dest="notifications", action="store_const", const="all"
199 parser.add_argument(
200 "-f",
201 "--failure-emails",
202 dest="notifications",
203 action="store_const",
204 const="failure",
206 parser.add_argument("-j", "--job", dest="jobs", action="append")
207 parser.add_argument(
208 "--rebuild-talos",
209 dest="talos_trigger_tests",
210 action="store",
211 type=int,
212 default=1,
214 parser.add_argument(
215 "--rebuild-raptor",
216 dest="raptor_trigger_tests",
217 action="store",
218 type=int,
219 default=1,
221 parser.add_argument("--setenv", dest="env", action="append")
222 parser.add_argument("--gecko-profile", dest="profile", action="store_true")
223 parser.add_argument("--tag", dest="tag", action="store", default=None)
224 parser.add_argument("--no-retry", dest="no_retry", action="store_true")
225 parser.add_argument(
226 "--include-nightly", dest="include_nightly", action="store_true"
228 parser.add_argument("--artifact", dest="artifact", action="store_true")
230 # While we are transitioning from BB to TC, we want to push jobs to tc-worker
231 # machines but not overload machines with every try push. Therefore, we add
232 # this temporary option to be able to push jobs to tc-worker.
233 parser.add_argument(
234 "-w",
235 "--taskcluster-worker",
236 dest="taskcluster_worker",
237 action="store_true",
238 default=False,
241 # In order to run test jobs multiple times
242 parser.add_argument("--rebuild", dest="trigger_tests", type=int, default=1)
243 args, _ = parser.parse_known_args(parts)
245 try_options = vars(args)
246 try_task_config = {
247 "use-artifact-builds": try_options.pop("artifact"),
248 "gecko-profile": try_options.pop("profile"),
249 "env": dict(arg.split("=") for arg in try_options.pop("env") or []),
251 return {
252 "try_options": try_options,
253 "try_task_config": try_task_config,
257 class TryOptionSyntax:
258 def __init__(self, parameters, full_task_graph, graph_config):
260 Apply the try options in parameters.
262 The resulting object has attributes:
264 - build_types: a list containing zero or more of 'opt' and 'debug'
265 - platforms: a list of selected platform names, or None for all
266 - unittests: a list of tests, of the form given below, or None for all
267 - jobs: a list of requested job names, or None for all
268 - trigger_tests: the number of times tests should be triggered (--rebuild)
269 - interactive: true if --interactive
270 - notifications: either None if no notifications or one of 'all' or 'failure'
271 - talos_trigger_tests: the number of time talos tests should be triggered (--rebuild-talos)
272 - tag: restrict tests to the specified tag
273 - no_retry: do not retry failed jobs
275 The unittests and talos lists contain dictionaries of the form:
278 'test': '<suite name>',
279 'platforms': [..platform names..], # to limit to only certain platforms
280 'only_chunks': set([..chunk numbers..]), # to limit only to certain chunks
283 self.full_task_graph = full_task_graph
284 self.graph_config = graph_config
285 self.jobs = []
286 self.build_types = []
287 self.platforms = []
288 self.unittests = []
289 self.talos = []
290 self.raptor = []
291 self.trigger_tests = 0
292 self.interactive = False
293 self.notifications = None
294 self.talos_trigger_tests = 0
295 self.raptor_trigger_tests = 0
296 self.tag = None
297 self.no_retry = False
299 options = parameters["try_options"]
300 if not options:
301 return None
302 self.jobs = self.parse_jobs(options["jobs"])
303 self.build_types = self.parse_build_types(
304 options["build_types"], full_task_graph
306 self.platforms = self.parse_platforms(options, full_task_graph)
307 self.unittests = self.parse_test_option(
308 "unittest_try_name", options["unittests"], full_task_graph
310 self.talos = self.parse_test_option(
311 "talos_try_name", options["talos"], full_task_graph
313 self.raptor = self.parse_test_option(
314 "raptor_try_name", options["raptor"], full_task_graph
316 self.trigger_tests = options["trigger_tests"]
317 self.interactive = options["interactive"]
318 self.notifications = options["notifications"]
319 self.talos_trigger_tests = options["talos_trigger_tests"]
320 self.raptor_trigger_tests = options["raptor_trigger_tests"]
321 self.tag = options["tag"]
322 self.no_retry = options["no_retry"]
323 self.include_nightly = options["include_nightly"]
325 self.test_tiers = self.generate_test_tiers(full_task_graph)
327 def generate_test_tiers(self, full_task_graph):
328 retval = defaultdict(set)
329 for t in full_task_graph.tasks.values():
330 if t.attributes.get("kind") == "test":
331 try:
332 tier = t.task["extra"]["treeherder"]["tier"]
333 name = t.attributes.get("unittest_try_name")
334 retval[name].add(tier)
335 except KeyError:
336 pass
338 return retval
340 def parse_jobs(self, jobs_arg):
341 if not jobs_arg or jobs_arg == ["none"]:
342 return [] # default is `-j none`
343 if jobs_arg == ["all"]:
344 return None
345 expanded = []
346 for job in jobs_arg:
347 expanded.extend(j.strip() for j in job.split(","))
348 return expanded
350 def parse_build_types(self, build_types_arg, full_task_graph):
351 if build_types_arg is None:
352 build_types_arg = []
354 build_types = [
356 for _f in (
357 BUILD_TYPE_ALIASES.get(build_type) for build_type in build_types_arg
359 if _f
362 all_types = {
363 t.attributes["build_type"]
364 for t in full_task_graph.tasks.values()
365 if "build_type" in t.attributes
367 bad_types = set(build_types) - all_types
368 if bad_types:
369 raise Exception(
370 "Unknown build type(s) [%s] specified for try" % ",".join(bad_types)
373 return build_types
375 def parse_platforms(self, options, full_task_graph):
376 platform_arg = options["platforms"]
377 if platform_arg == "all":
378 return None
380 RIDEALONG_BUILDS = self.graph_config["try"]["ridealong-builds"]
381 results = []
382 for build in platform_arg.split(","):
383 if build in ("macosx64",):
384 # Regular opt builds are faster than shippable ones, but we don't run
385 # tests against them.
386 # We want to choose them (and only them) if no tests were requested.
387 if (
388 options["unittests"] == "none"
389 and options["talos"] == "none"
390 and options["raptor"] == "none"
392 results.append("macosx64")
393 logger.info("adding macosx64 for try syntax using macosx64.")
394 # Otherwise, use _just_ the shippable builds.
395 else:
396 results.append("macosx64-shippable")
397 logger.info(
398 "adding macosx64-shippable for try syntax using macosx64."
400 else:
401 results.append(build)
402 if build in RIDEALONG_BUILDS:
403 results.extend(RIDEALONG_BUILDS[build])
404 logger.info(
405 "platform %s triggers ridealong builds %s"
406 % (build, ", ".join(RIDEALONG_BUILDS[build]))
409 test_platforms = {
410 t.attributes["test_platform"]
411 for t in full_task_graph.tasks.values()
412 if "test_platform" in t.attributes
414 build_platforms = {
415 t.attributes["build_platform"]
416 for t in full_task_graph.tasks.values()
417 if "build_platform" in t.attributes
419 all_platforms = test_platforms | build_platforms
420 bad_platforms = set(results) - all_platforms
421 if bad_platforms:
422 raise Exception(
423 "Unknown platform(s) [%s] specified for try" % ",".join(bad_platforms)
426 return results
428 def parse_test_option(self, attr_name, test_arg, full_task_graph):
431 Parse a unittest (-u) or talos (-t) option, in the context of a full
432 task graph containing available `unittest_try_name` or `talos_try_name`
433 attributes. There are three cases:
435 - test_arg is == 'none' (meaning an empty list)
436 - test_arg is == 'all' (meaning use the list of jobs for that job type)
437 - test_arg is comma string which needs to be parsed
440 # Empty job list case...
441 if test_arg is None or test_arg == "none":
442 return []
444 all_platforms = {
445 t.attributes["test_platform"].split("/")[0]
446 for t in full_task_graph.tasks.values()
447 if "test_platform" in t.attributes
450 tests = self.parse_test_opts(test_arg, all_platforms)
452 if not tests:
453 return []
455 all_tests = {
456 t.attributes[attr_name]
457 for t in full_task_graph.tasks.values()
458 if attr_name in t.attributes
461 # Special case where tests is 'all' and must be expanded
462 if tests[0]["test"] == "all":
463 results = []
464 all_entry = tests[0]
465 for test in all_tests:
466 entry = {"test": test}
467 # If there are platform restrictions copy them across the list.
468 if "platforms" in all_entry:
469 entry["platforms"] = list(all_entry["platforms"])
470 results.append(entry)
471 return self.parse_test_chunks(all_tests, results)
472 return self.parse_test_chunks(all_tests, tests)
474 def parse_test_opts(self, input_str, all_platforms):
476 Parse `testspec,testspec,..`, where each testspec is a test name
477 optionally followed by a list of test platforms or negated platforms in
478 `[]`.
480 No brackets indicates that tests should run on all platforms for which
481 builds are available. If testspecs are provided, then each is treated,
482 from left to right, as an instruction to include or (if negated)
483 exclude a set of test platforms. A single spec may expand to multiple
484 test platforms via UNITTEST_PLATFORM_PRETTY_NAMES. If the first test
485 spec is negated, processing begins with the full set of available test
486 platforms; otherwise, processing begins with an empty set of test
487 platforms.
490 # Final results which we will return.
491 tests = []
493 cur_test = {}
494 token = ""
495 in_platforms = False
497 def normalize_platforms():
498 if "platforms" not in cur_test:
499 return
500 # if the first spec is a negation, start with all platforms
501 if cur_test["platforms"][0][0] == "-":
502 platforms = all_platforms.copy()
503 else:
504 platforms = []
505 for platform in cur_test["platforms"]:
506 if platform[0] == "-":
507 platforms = [p for p in platforms if p != platform[1:]]
508 else:
509 platforms.append(platform)
510 cur_test["platforms"] = platforms
512 def add_test(value):
513 normalize_platforms()
514 cur_test["test"] = value.strip()
515 tests.insert(0, cur_test)
517 def add_platform(value):
518 platform = value.strip()
519 if platform[0] == "-":
520 negated = True
521 platform = platform[1:]
522 else:
523 negated = False
524 platforms = UNITTEST_PLATFORM_PRETTY_NAMES.get(platform, [platform])
525 if negated:
526 platforms = ["-" + p for p in platforms]
527 cur_test["platforms"] = platforms + cur_test.get("platforms", [])
529 # This might be somewhat confusing but we parse the string _backwards_ so
530 # there is no ambiguity over what state we are in.
531 for char in reversed(input_str):
532 # , indicates exiting a state
533 if char == ",":
534 # Exit a particular platform.
535 if in_platforms:
536 add_platform(token)
538 # Exit a particular test.
539 else:
540 add_test(token)
541 cur_test = {}
543 # Token must always be reset after we exit a state
544 token = ""
545 elif char == "[":
546 # Exiting platform state entering test state.
547 add_platform(token)
548 token = ""
549 in_platforms = False
550 elif char == "]":
551 # Entering platform state.
552 in_platforms = True
553 else:
554 # Accumulator.
555 token = char + token
557 # Handle any left over tokens.
558 if token:
559 add_test(token)
561 return tests
563 def handle_alias(self, test, all_tests):
565 Expand a test if its name refers to an alias, returning a list of test
566 dictionaries cloned from the first (to maintain any metadata).
568 if test["test"] not in UNITTEST_ALIASES:
569 return [test]
571 alias = UNITTEST_ALIASES[test["test"]]
573 def mktest(name):
574 newtest = copy.deepcopy(test)
575 newtest["test"] = name
576 return newtest
578 def exprmatch(alias):
579 return [t for t in all_tests if alias(t)]
581 return [mktest(t) for t in exprmatch(alias)]
583 def parse_test_chunks(self, all_tests, tests):
585 Test flags may include parameters to narrow down the number of chunks in a
586 given push. We don't model 1 chunk = 1 job in taskcluster so we must check
587 each test flag to see if it is actually specifying a chunk.
589 results = []
590 seen_chunks = {}
591 for test in tests:
592 matches = TEST_CHUNK_SUFFIX.match(test["test"])
593 if matches:
594 name = matches.group(1)
595 chunk = matches.group(2)
596 if name in seen_chunks:
597 seen_chunks[name].add(chunk)
598 else:
599 seen_chunks[name] = {chunk}
600 test["test"] = name
601 test["only_chunks"] = seen_chunks[name]
602 results.append(test)
603 else:
604 results.extend(self.handle_alias(test, all_tests))
606 # uniquify the results over the test names
607 results = sorted(
608 {test["test"]: test for test in results}.values(),
609 key=lambda test: test["test"],
611 return results
613 def find_all_attribute_suffixes(self, graph, prefix):
614 rv = set()
615 for t in graph.tasks.values():
616 for a in t.attributes:
617 if a.startswith(prefix):
618 rv.add(a[len(prefix) :])
619 return sorted(rv)
621 def task_matches(self, task):
622 attr = task.attributes.get
624 def check_run_on_projects():
625 return {"all"} & set(attr("run_on_projects", []))
627 def match_test(try_spec, attr_name):
628 run_by_default = True
629 if attr("build_type") not in self.build_types:
630 return False
632 if (
633 self.platforms is not None
634 and attr("build_platform") not in self.platforms
636 return False
637 if not check_run_on_projects():
638 run_by_default = False
640 if try_spec is None:
641 return run_by_default
643 # TODO: optimize this search a bit
644 for test in try_spec:
645 if attr(attr_name) == test["test"]:
646 break
647 else:
648 return False
650 if "only_chunks" in test and attr("test_chunk") not in test["only_chunks"]:
651 return False
653 tier = task.task["extra"]["treeherder"]["tier"]
654 if "platforms" in test:
655 if "all" in test["platforms"]:
656 return True
657 platform = attr("test_platform", "").split("/")[0]
658 # Platforms can be forced by syntax like "-u xpcshell[Windows 8]"
659 return platform in test["platforms"]
660 if tier != 1:
661 # Run Tier 2/3 tests if their build task is Tier 2/3 OR if there is
662 # no tier 1 test of that name.
663 build_task = self.full_task_graph.tasks[task.dependencies["build"]]
664 build_task_tier = build_task.task["extra"]["treeherder"]["tier"]
666 name = attr("unittest_try_name")
667 test_tiers = self.test_tiers.get(name)
669 if tier <= build_task_tier:
670 logger.debug(
671 "not skipping tier {} test {} because build task {} "
672 "is tier {}".format(
673 tier, task.label, build_task.label, build_task_tier
676 return True
677 if 1 not in test_tiers:
678 logger.debug(
679 "not skipping tier {} test {} without explicit inclusion; "
680 "it is configured to run on tiers {}".format(
681 tier, task.label, test_tiers
684 return True
685 logger.debug(
686 "skipping tier {} test {} because build task {} is "
687 "tier {} and there is a higher-tier test of the same name".format(
688 tier, task.label, build_task.label, build_task_tier
691 return False
692 if run_by_default:
693 return check_run_on_projects()
694 return False
696 if attr("job_try_name"):
697 # Beware the subtle distinction between [] and None for self.jobs and self.platforms.
698 # They will be [] if there was no try syntax, and None if try syntax was detected but
699 # they remained unspecified.
700 if self.jobs is not None:
701 return attr("job_try_name") in self.jobs
703 # User specified `-j all`
704 if (
705 self.platforms is not None
706 and attr("build_platform") not in self.platforms
708 return False # honor -p for jobs governed by a platform
709 # "all" means "everything with `try` in run_on_projects"
710 return check_run_on_projects()
711 if attr("kind") == "test":
712 return (
713 match_test(self.unittests, "unittest_try_name")
714 or match_test(self.talos, "talos_try_name")
715 or match_test(self.raptor, "raptor_try_name")
717 if attr("kind") in BUILD_KINDS:
718 if attr("build_type") not in self.build_types:
719 return False
720 if self.platforms is None:
721 # for "-p all", look for try in the 'run_on_projects' attribute
722 return check_run_on_projects()
723 if attr("build_platform") not in self.platforms:
724 return False
725 return True
726 return False
728 def __str__(self):
729 def none_for_all(list):
730 if list is None:
731 return "<all>"
732 return ", ".join(str(e) for e in list)
734 return "\n".join(
736 "build_types: " + ", ".join(self.build_types),
737 "platforms: " + none_for_all(self.platforms),
738 "unittests: " + none_for_all(self.unittests),
739 "talos: " + none_for_all(self.talos),
740 "raptor" + none_for_all(self.raptor),
741 "jobs: " + none_for_all(self.jobs),
742 "trigger_tests: " + str(self.trigger_tests),
743 "interactive: " + str(self.interactive),
744 "notifications: " + str(self.notifications),
745 "talos_trigger_tests: " + str(self.talos_trigger_tests),
746 "raptor_trigger_tests: " + str(self.raptor_trigger_tests),
747 "tag: " + str(self.tag),
748 "no_retry: " + str(self.no_retry),