1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
16 from copy
import deepcopy
18 from pathlib
import Path
19 from statistics
import median
20 from xmlrpc
.client
import Fault
25 from yaml
import CLoader
as Loader
27 from yaml
import Loader
32 from manifestparser
import ManifestParser
33 from manifestparser
.toml
import add_skip_if
, alphabetize_toml_str
, sort_paths
34 from mozci
.task
import TestTask
35 from mozci
.util
.taskcluster
import get_task
37 from taskcluster
.exceptions
import TaskclusterRestFailure
39 TASK_LOG
= "live_backing.log"
40 TASK_ARTIFACT
= "public/logs/" + TASK_LOG
41 ATTACHMENT_DESCRIPTION
= "Compressed " + TASK_ARTIFACT
+ " for task "
43 r
".*Created attachment ([0-9]+)\n.*"
44 + ATTACHMENT_DESCRIPTION
45 + "([A-Za-z0-9_-]+)\n.*"
48 BUGZILLA_AUTHENTICATION_HELP
= "Must create a Bugzilla API key per https://github.com/mozilla/mozci-tools/blob/main/citools/test_triage_bug_filer.py"
50 MS_PER_MINUTE
= 60 * 1000 # ms per minute
51 DEBUG_THRESHOLD
= 40 * MS_PER_MINUTE
# 40 minutes in ms
52 OPT_THRESHOLD
= 20 * MS_PER_MINUTE
# 20 minutes in ms
57 DURATIONS
= "durations"
58 FAILED_RUNS
= "runs_failed"
59 FAILURE_RATIO
= 0.4 # more than this fraction of failures will disable
61 MEDIAN_DURATION
= "duration_median"
62 MINIMUM_RUNS
= 3 # mininum number of runs to consider success/failure
63 MOCK_BUG_DEFAULTS
= {"blocks": [], "comments": []}
64 MOCK_TASK_DEFAULTS
= {"extra": {}, "failure_types": {}, "results": []}
65 MOCK_TASK_INITS
= ["results"]
71 SUM_BY_LABEL
= "sum_by_label"
72 TOTAL_DURATION
= "duration_total"
73 TOTAL_RUNS
= "runs_total"
74 WP
= "testing/web-platform/"
76 WPT0
= WP
+ "tests/infrastructure"
77 WPT_META0
= WP
+ "tests/infrastructure/metadata"
78 WPT_META0_CLASSIC
= WP
+ "meta/infrastructure"
80 WPT_META1
= WPT1
.replace("tests", "meta")
81 WPT2
= WP
+ "mozilla/tests"
82 WPT_META2
= WPT2
.replace("tests", "meta")
83 WPT_MOZILLA
= "/_mozilla"
87 def __init__(self
, data
, defaults
={}, inits
=[]):
89 self
._defaults
= defaults
91 values
= self
._data
.get(name
, []) # assume type is an array
92 values
= [Mock(value
, defaults
, inits
) for value
in values
]
93 self
._data
[name
] = values
95 def __getattr__(self
, name
):
96 if name
in self
._data
:
97 return self
._data
[name
]
98 if name
in self
._defaults
:
99 return self
._defaults
[name
]
103 class Classification(object):
104 "Classification of the failure (not the task result)"
106 DISABLE_MANIFEST
= "disable_manifest" # crash found
107 DISABLE_RECOMMENDED
= "disable_recommended" # disable first failing path
108 DISABLE_TOO_LONG
= "disable_too_long" # runtime threshold exceeded
109 INTERMITTENT
= "intermittent"
110 SECONDARY
= "secondary" # secondary failing path
111 SUCCESS
= "success" # path always succeeds
117 constant indexes for attributes of a run
127 class Skipfails(object):
128 "mach manifest skip-fails implementation: Update manifests to skip failing tests"
131 REVISION
= "revision"
132 TREEHERDER
= "treeherder.mozilla.org"
133 BUGZILLA_SERVER_DEFAULT
= "bugzilla.allizom.org"
137 command_context
=None,
144 self
.command_context
= command_context
145 if self
.command_context
is not None:
146 self
.topsrcdir
= self
.command_context
.topsrcdir
148 self
.topsrcdir
= Path(__file__
).parent
.parent
149 self
.topsrcdir
= os
.path
.normpath(self
.topsrcdir
)
150 if isinstance(try_url
, list) and len(try_url
) == 1:
151 self
.try_url
= try_url
[0]
153 self
.try_url
= try_url
154 self
.dry_run
= dry_run
155 self
.verbose
= verbose
157 if bugzilla
is not None:
158 self
.bugzilla
= bugzilla
159 elif "BUGZILLA" in os
.environ
:
160 self
.bugzilla
= os
.environ
["BUGZILLA"]
162 self
.bugzilla
= Skipfails
.BUGZILLA_SERVER_DEFAULT
163 if self
.bugzilla
== "disable":
164 self
.bugzilla
= None # Bug filing disabled
165 self
.component
= "skip-fails"
167 self
._attach
_rx
= None
171 self
.headers
= {} # for Treeherder requests
172 self
.headers
["Accept"] = "application/json"
173 self
.headers
["User-Agent"] = "treeherder-pyclient"
174 self
.jobs_url
= "https://treeherder.mozilla.org/api/jobs/"
178 self
.bugs
= [] # preloaded bugs, currently not an updated cache
180 def _initialize_bzapi(self
):
181 """Lazily initializes the Bugzilla API (returns True on success)"""
182 if self
._bzapi
is None and self
.bugzilla
is not None:
183 self
._bzapi
= bugzilla
.Bugzilla(self
.bugzilla
)
184 self
._attach
_rx
= re
.compile(ATTACHMENT_REGEX
, flags
=re
.M
)
185 return self
._bzapi
is not None
187 def pprint(self
, obj
):
189 self
.pp
= pprint
.PrettyPrinter(indent
=4, stream
=sys
.stderr
)
194 if self
.command_context
is not None:
195 self
.command_context
.log(
196 logging
.ERROR
, self
.component
, {"error": str(e
)}, "ERROR: {error}"
199 print(f
"ERROR: {e}", file=sys
.stderr
, flush
=True)
201 def warning(self
, e
):
202 if self
.command_context
is not None:
203 self
.command_context
.log(
204 logging
.WARNING
, self
.component
, {"error": str(e
)}, "WARNING: {error}"
207 print(f
"WARNING: {e}", file=sys
.stderr
, flush
=True)
210 if self
.command_context
is not None:
211 self
.command_context
.log(
212 logging
.INFO
, self
.component
, {"error": str(e
)}, "INFO: {error}"
215 print(f
"INFO: {e}", file=sys
.stderr
, flush
=True)
221 def full_path(self
, filename
):
222 """Returns full path for the relative filename"""
224 return os
.path
.join(self
.topsrcdir
, os
.path
.normpath(filename
))
226 def isdir(self
, filename
):
227 """Returns True if filename is a directory"""
229 return os
.path
.isdir(self
.full_path(filename
))
231 def exists(self
, filename
):
232 """Returns True if filename exists"""
234 return os
.path
.exists(self
.full_path(filename
))
245 "Run skip-fails on try_url, return True on success"
247 try_url
= self
.try_url
248 revision
, repo
= self
.get_revision(try_url
)
249 if use_tasks
is not None:
250 tasks
= self
.read_tasks(use_tasks
)
251 self
.vinfo(f
"use tasks: {use_tasks}")
253 tasks
= self
.get_tasks(revision
, repo
)
254 if use_failures
is not None:
255 failures
= self
.read_failures(use_failures
)
256 self
.vinfo(f
"use failures: {use_failures}")
258 failures
= self
.get_failures(tasks
)
259 if save_failures
is not None:
260 self
.write_json(save_failures
, failures
)
261 self
.vinfo(f
"save failures: {save_failures}")
262 if save_tasks
is not None:
263 self
.write_tasks(save_tasks
, tasks
)
264 self
.vinfo(f
"save tasks: {save_tasks}")
266 for manifest
in failures
:
267 if manifest
.endswith(".toml") or manifest
.startswith(WP
):
268 wpt
= failures
[manifest
][WPT
]
269 for label
in failures
[manifest
][LL
]:
270 for path
in failures
[manifest
][LL
][label
][PP
]:
271 classification
= failures
[manifest
][LL
][label
][PP
][path
][CC
]
272 if classification
.startswith("disable_") or (
273 self
.turbo
and classification
== Classification
.SECONDARY
275 anyjs
= {} # anyjs alternate basename = False
276 for task_id
in failures
[manifest
][LL
][label
][PP
][path
][
280 break # just use the first task_id
281 filename
= os
.path
.basename(path
)
282 anyjs
[filename
] = False
285 in failures
[manifest
][LL
][label
][PP
][path
][RUNS
][
289 query
= failures
[manifest
][LL
][label
][PP
][path
][
292 anyjs
[filename
+ query
] = False
297 in failures
[manifest
][LL
][label
][PP
][path
][RUNS
][
301 any_filename
= os
.path
.basename(
302 failures
[manifest
][LL
][label
][PP
][path
][RUNS
][
306 anyjs
[any_filename
] = False
307 if query
is not None:
308 anyjs
[any_filename
+ query
] = False
322 if max_failures
>= 0 and num_failures
>= max_failures
:
324 f
"max_failures={max_failures} threshold reached. stopping."
327 elif manifest
.endswith(".ini"):
328 self
.warning(f
"cannot process skip-fails on INI manifest: {manifest}")
329 elif manifest
.endswith(".list"):
330 self
.warning(f
"cannot process skip-fails on LIST manifest: {manifest}")
333 f
"cannot process skip-fails on unknown manifest: {manifest}"
337 def get_revision(self
, url
):
338 parsed
= urllib
.parse
.urlparse(url
)
339 if parsed
.scheme
!= "https":
340 raise ValueError("try_url scheme not https")
341 if parsed
.netloc
!= Skipfails
.TREEHERDER
:
342 raise ValueError(f
"try_url server not {Skipfails.TREEHERDER}")
343 if len(parsed
.query
) == 0:
344 raise ValueError("try_url query missing")
345 query
= urllib
.parse
.parse_qs(parsed
.query
)
346 if Skipfails
.REVISION
not in query
:
347 raise ValueError("try_url query missing revision")
348 revision
= query
[Skipfails
.REVISION
][0]
349 if Skipfails
.REPO
in query
:
350 repo
= query
[Skipfails
.REPO
][0]
353 self
.vinfo(f
"considering {repo} revision={revision}")
354 return revision
, repo
356 def get_tasks(self
, revision
, repo
):
357 push
= mozci
.push
.Push(revision
, repo
)
360 def get_failures(self
, tasks
):
362 find failures and create structure comprised of runs by path:
366 classification: Classification
367 * unknown (default) < 3 runs
368 * intermittent (not enough failures)
369 * disable_recommended (enough repeated failures) >3 runs >= 4
370 * disable_manifest (disable DEFAULT if no other failures)
371 * secondary (not first failure in group)
387 Classification
.DISABLE_MANIFEST
: 0,
388 Classification
.DISABLE_RECOMMENDED
: 0,
389 Classification
.DISABLE_TOO_LONG
: 0,
390 Classification
.INTERMITTENT
: 0,
391 Classification
.SECONDARY
: 0,
392 Classification
.SUCCESS
: 0,
393 Classification
.UNKNOWN
: 0,
398 CC
: Classification
.UNKNOWN
,
407 for task
in tasks
: # add explicit failures
409 if len(task
.results
) == 0:
410 continue # ignore aborted tasks
411 for manifest
in task
.failure_types
:
415 if mm
.endswith(".ini"):
417 f
"cannot analyze skip-fails on INI manifests: {mm}"
420 elif mm
.endswith(".list"):
422 f
"cannot analyze skip-fails on LIST manifests: {mm}"
425 elif not mm
.endswith(".toml"):
426 path
, mm
, _query
, _anyjs
= self
.wpt_paths(mm
)
427 if path
is None: # not WPT
429 f
"cannot analyze skip-fails on unknown manifest type: {manifest}"
434 if mm
not in manifest_paths
:
435 manifest_paths
[mm
] = []
437 ff
[mm
] = deepcopy(manifest_
)
439 if ll
not in ff
[mm
][LL
]:
440 ff
[mm
][LL
][ll
] = deepcopy(label_
)
441 for path_type
in task
.failure_types
[manifest
]:
442 path
, _type
= path_type
444 path
, mmpath
, query
, anyjs
= self
.wpt_paths(path
)
447 f
"non existant failure path: {path_type[0]}"
450 mm
= os
.path
.dirname(mmpath
)
451 if mm
not in manifest_paths
:
452 manifest_paths
[mm
] = []
454 ff
[mm
] = deepcopy(manifest_
)
456 if ll
not in ff
[mm
][LL
]:
457 ff
[mm
][LL
][ll
] = deepcopy(label_
)
461 if not wpt
and path
== mm
:
462 path
= DEF
# refers to the manifest itself
463 if path
not in manifest_paths
[mm
]:
464 manifest_paths
[mm
].append(path
)
465 if path
not in ff
[mm
][LL
][ll
][PP
]:
466 ff
[mm
][LL
][ll
][PP
][path
] = deepcopy(path_
)
467 if task
.id not in ff
[mm
][LL
][ll
][PP
][path
][RUNS
]:
468 ff
[mm
][LL
][ll
][PP
][path
][RUNS
][task
.id] = deepcopy(run_
)
469 ff
[mm
][LL
][ll
][PP
][path
][RUNS
][task
.id][RR
] = False
470 if query
is not None:
471 ff
[mm
][LL
][ll
][PP
][path
][RUNS
][task
.id][QUERY
] = query
472 if anyjs
is not None:
473 ff
[mm
][LL
][ll
][PP
][path
][RUNS
][task
.id][ANYJS
] = anyjs
474 ff
[mm
][LL
][ll
][PP
][path
][TOTAL_RUNS
] += 1
475 ff
[mm
][LL
][ll
][PP
][path
][FAILED_RUNS
] += 1
476 except AttributeError as ae
:
477 self
.warning(f
"unknown attribute in task (#1): {ae}")
479 for task
in tasks
: # add results
481 if len(task
.results
) == 0:
482 continue # ignore aborted tasks
483 for result
in task
.results
:
486 if mm
.endswith(".ini"):
488 f
"cannot analyze skip-fails on INI manifests: {mm}"
491 elif mm
.endswith(".list"):
493 f
"cannot analyze skip-fails on LIST manifests: {mm}"
496 elif not mm
.endswith(".toml"):
497 path
, mm
, _query
, _anyjs
= self
.wpt_paths(mm
)
498 if path
is None: # not WPT
500 f
"cannot analyze skip-fails on unknown manifest type: {result.group}"
504 if mm
not in manifest_paths
:
507 ff
[mm
] = deepcopy(manifest_
)
509 if ll
not in ff
[mm
][LL
]:
510 ff
[mm
][LL
][ll
] = deepcopy(label_
)
511 if task
.id not in ff
[mm
][LL
][ll
][DURATIONS
]:
512 # duration may be None !!!
513 ff
[mm
][LL
][ll
][DURATIONS
][task
.id] = result
.duration
or 0
514 if ff
[mm
][LL
][ll
][OPT
] is None:
515 ff
[mm
][LL
][ll
][OPT
] = self
.get_opt_for_task(task
.id)
516 for path
in manifest_paths
[mm
]: # all known paths
517 if path
not in ff
[mm
][LL
][ll
][PP
]:
518 ff
[mm
][LL
][ll
][PP
][path
] = deepcopy(path_
)
519 if task
.id not in ff
[mm
][LL
][ll
][PP
][path
][RUNS
]:
520 ff
[mm
][LL
][ll
][PP
][path
][RUNS
][task
.id] = deepcopy(run_
)
521 ff
[mm
][LL
][ll
][PP
][path
][RUNS
][task
.id][RR
] = result
.ok
522 ff
[mm
][LL
][ll
][PP
][path
][TOTAL_RUNS
] += 1
524 ff
[mm
][LL
][ll
][PP
][path
][FAILED_RUNS
] += 1
525 except AttributeError as ae
:
526 self
.warning(f
"unknown attribute in task (#2): {ae}")
528 for mm
in ff
: # determine classifications
530 for label
in ff
[mm
][LL
]:
532 opt
= ff
[mm
][LL
][ll
][OPT
]
533 durations
= [] # summarize durations
534 for task_id
in ff
[mm
][LL
][ll
][DURATIONS
]:
535 duration
= ff
[mm
][LL
][ll
][DURATIONS
][task_id
]
536 durations
.append(duration
)
537 if len(durations
) > 0:
538 total_duration
= sum(durations
)
539 median_duration
= median(durations
)
540 ff
[mm
][LL
][ll
][TOTAL_DURATION
] = total_duration
541 ff
[mm
][LL
][ll
][MEDIAN_DURATION
] = median_duration
542 if (opt
and median_duration
> OPT_THRESHOLD
) or (
543 (not opt
) and median_duration
> DEBUG_THRESHOLD
546 paths
= ff
[mm
][LL
][ll
][PP
].keys()
550 if path
not in ff
[mm
][LL
][ll
][PP
]:
551 ff
[mm
][LL
][ll
][PP
][path
] = deepcopy(path_
)
552 if task_id
not in ff
[mm
][LL
][ll
][PP
][path
][RUNS
]:
553 ff
[mm
][LL
][ll
][PP
][path
][RUNS
][task
.id] = deepcopy(run_
)
554 ff
[mm
][LL
][ll
][PP
][path
][RUNS
][task
.id][RR
] = False
555 ff
[mm
][LL
][ll
][PP
][path
][TOTAL_RUNS
] += 1
556 ff
[mm
][LL
][ll
][PP
][path
][FAILED_RUNS
] += 1
557 ff
[mm
][LL
][ll
][PP
][path
][
559 ] = Classification
.DISABLE_TOO_LONG
560 primary
= True # we have not seen the first failure
561 for path
in sort_paths(ff
[mm
][LL
][ll
][PP
]):
562 classification
= ff
[mm
][LL
][ll
][PP
][path
][CC
]
563 if classification
== Classification
.UNKNOWN
:
564 failed_runs
= ff
[mm
][LL
][ll
][PP
][path
][FAILED_RUNS
]
565 total_runs
= ff
[mm
][LL
][ll
][PP
][path
][TOTAL_RUNS
]
566 if total_runs
>= MINIMUM_RUNS
:
567 if failed_runs
/ total_runs
< FAILURE_RATIO
:
569 classification
= Classification
.SUCCESS
571 classification
= Classification
.INTERMITTENT
574 classification
= Classification
.DISABLE_MANIFEST
576 classification
= Classification
.DISABLE_RECOMMENDED
579 classification
= Classification
.SECONDARY
580 ff
[mm
][LL
][ll
][PP
][path
][CC
] = classification
581 ff
[mm
][LL
][ll
][SUM_BY_LABEL
][classification
] += 1
584 def _get_os_version(self
, os
, platform
):
585 """Return the os_version given the label platform string"""
586 i
= platform
.find(os
)
588 yy
= platform
[j
: j
+ 2]
589 mm
= platform
[j
+ 2 : j
+ 4]
592 def get_bug_by_id(self
, id):
593 """Get bug by bug id"""
600 if bug
is None and self
._initialize
_bzapi
():
601 bug
= self
._bzapi
.getbug(id)
604 def get_bugs_by_summary(self
, summary
):
605 """Get bug by bug summary"""
609 if b
.summary
== summary
:
613 if self
._initialize
_bzapi
():
614 query
= self
._bzapi
.build_query(short_desc
=summary
)
615 query
["include_fields"] = [
624 bugs
= self
._bzapi
.query(query
)
629 summary
="Bug short description",
630 description
="Bug description",
633 version
="unspecified",
639 if self
._initialize
_bzapi
():
640 if not self
._bzapi
.logged_in
:
642 "Must create a Bugzilla API key per https://github.com/mozilla/mozci-tools/blob/main/citools/test_triage_bug_filer.py"
644 raise PermissionError(f
"Not authenticated for Bugzilla {self.bugzilla}")
645 createinfo
= self
._bzapi
.build_createbug(
650 description
=description
,
652 createinfo
["type"] = bugtype
653 bug
= self
._bzapi
.createbug(createinfo
)
656 def add_bug_comment(self
, id, comment
, meta_bug_id
=None):
657 """Add a comment to an existing bug"""
659 if self
._initialize
_bzapi
():
660 if not self
._bzapi
.logged_in
:
661 self
.error(BUGZILLA_AUTHENTICATION_HELP
)
662 raise PermissionError("Not authenticated for Bugzilla")
663 if meta_bug_id
is not None:
664 blocks_add
= [meta_bug_id
]
667 updateinfo
= self
._bzapi
.build_update(
668 comment
=comment
, blocks_add
=blocks_add
670 self
._bzapi
.update_bugs([id], updateinfo
)
686 Skip a failure (for TOML and WPT manifests)
687 For wpt anyjs is a dictionary mapping from alternate basename to
688 a boolean (indicating if the basename has been handled in the manifest)
691 if manifest
.endswith(".toml"):
696 _path
, manifest
, _query
, _anyjs
= self
.wpt_paths(path
)
697 filename
= os
.path
.basename(path
)
698 self
.vinfo(f
"===== Skip failure in manifest: {manifest} =====")
702 skip_if
= self
.task_to_skip_if(task_id
, wpt
)
705 f
"Unable to calculate skip-if condition from manifest={manifest} from failure label={label}"
709 if classification
== Classification
.DISABLE_MANIFEST
:
710 comment
= "Disabled entire manifest due to crash result"
711 elif classification
== Classification
.DISABLE_TOO_LONG
:
712 comment
= "Disabled entire manifest due to excessive run time"
715 filename
= self
.get_filename_in_manifest(manifest
, path
)
716 comment
= f
'Disabled test due to failures in test file: "{filename}"'
717 if classification
== Classification
.SECONDARY
:
718 comment
+= " (secondary)"
720 bug_reference
= " (secondary)"
721 if wpt
and len(anyjs
) > 1:
722 comment
+= "\nAdditional WPT wildcard paths:"
723 for p
in sorted(anyjs
.keys()):
725 comment
+= f
'\n "{p}"'
726 platform
, testname
= self
.label_to_platform_testname(label
)
727 if platform
is not None:
728 comment
+= "\nCommand line to reproduce (experimental):\n"
729 comment
+= f
" \"mach try fuzzy -q '{platform}' {testname}\"\n"
730 comment
+= f
"\nTry URL = {try_url}"
731 comment
+= f
"\nrevision = {revision}"
732 comment
+= f
"\nrepo = {repo}"
733 comment
+= f
"\nlabel = {label}"
734 if task_id
is not None:
735 comment
+= f
"\ntask_id = {task_id}"
736 push_id
= self
.get_push_id(revision
, repo
)
737 if push_id
is not None:
738 comment
+= f
"\npush_id = {push_id}"
739 job_id
= self
.get_job_id(push_id
, task_id
)
740 if job_id
is not None:
741 comment
+= f
"\njob_id = {job_id}"
747 ) = self
.get_bug_suggestions(repo
, job_id
, path
, anyjs
)
748 if log_url
is not None:
749 comment
+= f
"\nBug suggestions: {suggestions_url}"
750 comment
+= f
"\nSpecifically see at line {line_number} in the attached log: {log_url}"
751 comment
+= f
'\n\n "{line}"\n'
752 bug_summary
= f
"MANIFEST {manifest}"
755 if self
.bugzilla
is None:
756 self
.warning("Bugzilla has been disabled: no bugs created or updated")
758 bugs
= self
.get_bugs_by_summary(bug_summary
)
761 f
"This bug covers excluded failing tests in the MANIFEST {manifest}"
763 description
+= "\n(generated by `mach manifest skip-fails`)"
764 product
, component
= self
.get_file_info(path
)
767 f
'Dry-run NOT creating bug: {product}::{component} "{bug_summary}"'
770 bug
= self
.create_bug(bug_summary
, description
, product
, component
)
773 f
'Created Bug {bugid} {product}::{component} : "{bug_summary}"'
777 product
= bugs
[0].product
778 component
= bugs
[0].component
779 self
.vinfo(f
'Found Bug {bugid} {product}::{component} "{bug_summary}"')
780 if meta_bug_id
is not None:
781 if meta_bug_id
in bugs
[0].blocks
:
783 f
" Bug {bugid} already blocks meta bug {meta_bug_id}"
785 meta_bug_id
= None # no need to add again
786 comments
= bugs
[0].getcomments()
787 for i
in range(len(comments
)):
788 text
= comments
[i
]["text"]
789 m
= self
._attach
_rx
.findall(text
)
792 attachments
[a_task_id
] = m
[0][0]
793 if a_task_id
== task_id
:
795 f
" Bug {bugid} already has the compressed log attached for this task"
798 self
.error(f
'More than one bug found for summary: "{bug_summary}"')
800 bug_reference
= f
"Bug {bugid}" + bug_reference
801 comment
+= f
"\nskip-if condition: {skip_if} # {bug_reference}"
803 self
.warning(f
"Dry-run NOT adding comment to Bug {bugid}:\n{comment}")
804 self
.info(f
'Dry-run NOT editing ["{filename}"] manifest: "{manifest}"')
805 self
.info(f
'would add skip-if condition: "{skip_if}" # {bug_reference}')
806 if task_id
is not None and task_id
not in attachments
:
807 self
.info("would add compressed log for this task")
809 elif self
.bugzilla
is None:
810 self
.warning(f
"NOT adding comment to Bug {bugid}:\n{comment}")
812 self
.add_bug_comment(bugid
, comment
, meta_bug_id
)
813 self
.info(f
"Added comment to Bug {bugid}:\n{comment}")
814 if meta_bug_id
is not None:
815 self
.info(f
" Bug {bugid} blocks meta Bug: {meta_bug_id}")
816 if task_id
is not None and task_id
not in attachments
:
817 self
.add_attachment_log_for_task(bugid
, task_id
)
818 self
.info("Added compressed log for this task")
819 manifest_path
= self
.full_path(manifest
)
821 if os
.path
.exists(manifest_path
):
822 manifest_str
= io
.open(manifest_path
, "r", encoding
="utf-8").read()
825 # ensure parent directories exist
826 os
.makedirs(os
.path
.dirname(manifest_path
), exist_ok
=True)
827 manifest_str
= self
.wpt_add_skip_if(
828 manifest_str
, anyjs
, skip_if
, bug_reference
831 mp
= ManifestParser(use_toml
=True, document
=True)
832 mp
.read(manifest_path
)
833 document
= mp
.source_documents
[manifest_path
]
834 add_skip_if(document
, filename
, skip_if
, bug_reference
)
835 manifest_str
= alphabetize_toml_str(document
)
836 fp
= io
.open(manifest_path
, "w", encoding
="utf-8", newline
="\n")
837 fp
.write(manifest_str
)
839 self
.info(f
'Edited ["{filename}"] in manifest: "{manifest}"')
840 self
.info(f
'added skip-if condition: "{skip_if}" # {bug_reference}')
842 def get_variants(self
):
843 """Get mozinfo for each test variants"""
845 if len(self
.variants
) == 0:
846 variants_file
= "taskcluster/ci/test/variants.yml"
847 variants_path
= self
.full_path(variants_file
)
848 fp
= io
.open(variants_path
, "r", encoding
="utf-8")
849 raw_variants
= load(fp
, Loader
=Loader
)
851 for k
, v
in raw_variants
.items():
854 mozinfo
= v
["mozinfo"]
855 self
.variants
[k
] = mozinfo
858 def get_task_details(self
, task_id
):
859 """Download details for task task_id"""
861 if task_id
in self
.tasks
: # if cached
862 task
= self
.tasks
[task_id
]
865 task
= get_task(task_id
)
866 except TaskclusterRestFailure
:
867 self
.warning(f
"Task {task_id} no longer exists.")
869 self
.tasks
[task_id
] = task
872 def get_extra(self
, task_id
):
873 """Calculate extra for task task_id"""
875 if task_id
in self
.extras
: # if cached
876 extra
= self
.extras
[task_id
]
879 task
= self
.get_task_details(task_id
) or {}
888 test_setting
= task
.get("extra", {}).get("test-setting", {})
889 platform
= test_setting
.get("platform", {})
890 platform_os
= platform
.get("os", {})
893 if "name" in platform_os
:
894 os
= platform_os
["name"]
899 if "version" in platform_os
:
900 os_version
= platform_os
["version"]
901 if len(os_version
) == 4:
902 os_version
= os_version
[0:2] + "." + os_version
[2:4]
903 if "build" in platform_os
:
904 build
= platform_os
["build"]
905 if "arch" in platform
:
906 arch
= platform
["arch"]
907 if arch
== "x86" or arch
.find("32") >= 0:
912 if arch
!= "aarch64" and arch
!= "ppc":
914 if "display" in platform
:
915 display
= platform
["display"]
916 if "runtime" in test_setting
:
917 for k
in test_setting
["runtime"]:
918 if k
in self
.variants
:
919 runtimes
.append(self
.variants
[k
]) # adds mozinfo
920 if "build" in test_setting
:
921 tbuild
= test_setting
["build"]
924 if tbuild
[k
] == "opt":
926 elif tbuild
[k
] == "debug":
928 build_types
.append(tbuild
[k
])
930 build_types
.append(k
)
933 "arch": arch
or unknown
,
934 "bits": bits
or unknown
,
935 "build": build
or unknown
,
936 "build_types": build_types
,
938 "display": display
or unknown
,
941 "os_version": os_version
or unknown
,
942 "runtimes": runtimes
,
944 self
.extras
[task_id
] = extra
947 def get_opt_for_task(self
, task_id
):
948 extra
= self
.get_extra(task_id
)
951 def task_to_skip_if(self
, task_id
, wpt
=False):
952 """Calculate the skip-if condition for failing task task_id"""
964 version
= "os_version"
965 extra
= self
.get_extra(task_id
)
967 if extra
["os"] is not None:
968 if extra
["os_version"] is not None:
969 skip_if
= "os" + eq
+ qq
+ extra
["os"] + qq
971 extra
["build"] is not None
972 and extra
["os"] == "win"
973 and extra
["os_version"] == "11"
974 and extra
["build"] == "2009"
976 skip_if
= "win11_2009" # mozinfo.py:137
978 skip_if
+= aa
+ version
+ eq
+ qq
+ extra
["os_version"] + qq
979 if extra
["arch"] is not None:
980 skip_if
+= aa
+ arch
+ eq
+ qq
+ extra
["arch"] + qq
981 # since we always give arch/processor, bits are not required
982 # if extra["bits"] is not None:
983 # skip_if += aa + "bits" + eq + extra["bits"]
985 skip_if
+= aa
+ "debug"
987 skip_if
+= aa
+ nn
+ "debug"
988 if extra
["display"] is not None:
989 skip_if
+= aa
+ "display" + eq
+ qq
+ extra
["display"] + qq
990 for runtime
in extra
["runtimes"]:
991 skip_if
+= aa
+ runtime
992 for build_type
in extra
["build_types"]:
993 # note: lite will not evaluate on non-android platforms
994 if build_type
not in ["debug", "lite", "opt", "shippable"]:
995 skip_if
+= aa
+ build_type
998 def get_file_info(self
, path
, product
="Testing", component
="General"):
1000 Get bugzilla product and component for the path.
1001 Provide defaults (in case command_context is not defined
1002 or there isn't file info available).
1004 if path
!= DEF
and self
.command_context
is not None:
1005 reader
= self
.command_context
.mozbuild_reader(config_mode
="empty")
1006 info
= reader
.files_info([path
])
1007 cp
= info
[path
]["BUG_COMPONENT"]
1008 product
= cp
.product
1009 component
= cp
.component
1010 return product
, component
1012 def get_filename_in_manifest(self
, manifest
, path
):
1013 """return relative filename for path in manifest"""
1015 filename
= os
.path
.basename(path
)
1018 manifest_dir
= os
.path
.dirname(manifest
)
1020 j
= min(len(manifest_dir
), len(path
))
1021 while i
< j
and manifest_dir
[i
] == path
[i
]:
1023 if i
< len(manifest_dir
):
1024 for _
in range(manifest_dir
.count("/", i
) + 1):
1025 filename
= "../" + filename
1027 filename
= path
[i
+ 1 :]
1030 def get_push_id(self
, revision
, repo
):
1031 """Return the push_id for revision and repo (or None)"""
1033 self
.vinfo(f
"Retrieving push_id for {repo} revision: {revision} ...")
1034 if revision
in self
.push_ids
: # if cached
1035 push_id
= self
.push_ids
[revision
]
1038 push_url
= f
"https://treeherder.mozilla.org/api/project/{repo}/push/"
1040 params
["full"] = "true"
1041 params
["count"] = 10
1042 params
["revision"] = revision
1043 r
= requests
.get(push_url
, headers
=self
.headers
, params
=params
)
1044 if r
.status_code
!= 200:
1045 self
.warning(f
"FAILED to query Treeherder = {r} for {r.url}")
1048 if "results" in response
:
1049 results
= response
["results"]
1050 if len(results
) > 0:
1054 self
.push_ids
[revision
] = push_id
1057 def get_job_id(self
, push_id
, task_id
):
1058 """Return the job_id for push_id, task_id (or None)"""
1060 self
.vinfo(f
"Retrieving job_id for push_id: {push_id}, task_id: {task_id} ...")
1061 k
= f
"{push_id}:{task_id}"
1062 if k
in self
.job_ids
: # if cached
1063 job_id
= self
.job_ids
[k
]
1067 params
["push_id"] = push_id
1068 r
= requests
.get(self
.jobs_url
, headers
=self
.headers
, params
=params
)
1069 if r
.status_code
!= 200:
1070 self
.warning(f
"FAILED to query Treeherder = {r} for {r.url}")
1073 if "results" in response
:
1074 results
= response
["results"]
1075 if len(results
) > 0:
1076 for result
in results
:
1077 if len(result
) > 14:
1078 if result
[14] == task_id
:
1081 self
.job_ids
[k
] = job_id
1084 def get_bug_suggestions(self
, repo
, job_id
, path
, anyjs
=None):
1086 Return the (suggestions_url, line_number, line, log_url)
1087 for the given repo and job_id
1090 f
"Retrieving bug_suggestions for {repo} job_id: {job_id}, path: {path} ..."
1092 suggestions_url
= f
"https://treeherder.mozilla.org/api/project/{repo}/jobs/{job_id}/bug_suggestions/"
1096 r
= requests
.get(suggestions_url
, headers
=self
.headers
)
1097 if r
.status_code
!= 200:
1098 self
.warning(f
"FAILED to query Treeherder = {r} for {r.url}")
1100 if anyjs
is not None:
1101 pathdir
= os
.path
.dirname(path
) + "/"
1102 paths
= [pathdir
+ f
for f
in anyjs
.keys()]
1106 if len(response
) > 0:
1107 for sugg
in response
:
1109 path_end
= sugg
.get("path_end", None)
1110 # handles WPT short paths
1111 if path_end
is not None and p
.endswith(path_end
):
1112 line_number
= sugg
["line_number"] + 1
1113 line
= sugg
["search"]
1114 log_url
= f
"https://treeherder.mozilla.org/logviewer?repo={repo}&job_id={job_id}&lineNumber={line_number}"
1116 rv
= (suggestions_url
, line_number
, line
, log_url
)
1119 def read_json(self
, filename
):
1120 """read data as JSON from filename"""
1122 fp
= io
.open(filename
, "r", encoding
="utf-8")
1123 data
= json
.load(fp
)
1127 def read_tasks(self
, filename
):
1128 """read tasks as JSON from filename"""
1130 if not os
.path
.exists(filename
):
1131 msg
= f
"use-tasks JSON file does not exist: {filename}"
1132 raise OSError(2, msg
, filename
)
1133 tasks
= self
.read_json(filename
)
1134 tasks
= [Mock(task
, MOCK_TASK_DEFAULTS
, MOCK_TASK_INITS
) for task
in tasks
]
1136 if len(task
.extra
) > 0: # pre-warm cache for extra information
1137 self
.extras
[task
.id] = task
.extra
1140 def read_failures(self
, filename
):
1141 """read failures as JSON from filename"""
1143 if not os
.path
.exists(filename
):
1144 msg
= f
"use-failures JSON file does not exist: {filename}"
1145 raise OSError(2, msg
, filename
)
1146 failures
= self
.read_json(filename
)
1149 def read_bugs(self
, filename
):
1150 """read bugs as JSON from filename"""
1152 if not os
.path
.exists(filename
):
1153 msg
= f
"bugs JSON file does not exist: {filename}"
1154 raise OSError(2, msg
, filename
)
1155 bugs
= self
.read_json(filename
)
1156 bugs
= [Mock(bug
, MOCK_BUG_DEFAULTS
) for bug
in bugs
]
1159 def write_json(self
, filename
, data
):
1160 """saves data as JSON to filename"""
1161 fp
= io
.open(filename
, "w", encoding
="utf-8")
1162 json
.dump(data
, fp
, indent
=2, sort_keys
=True)
1165 def write_tasks(self
, save_tasks
, tasks
):
1166 """saves tasks as JSON to save_tasks"""
1169 if not isinstance(task
, TestTask
):
1172 jtask
["id"] = task
.id
1173 jtask
["label"] = task
.label
1174 jtask
["duration"] = task
.duration
1175 jtask
["result"] = task
.result
1176 jtask
["state"] = task
.state
1177 jtask
["extra"] = self
.get_extra(task
.id)
1179 for k
, v
in task
.tags
.items():
1180 if k
== "createdForUser":
1181 jtags
[k
] = "ci@mozilla.com"
1184 jtask
["tags"] = jtags
1185 jtask
["tier"] = task
.tier
1186 jtask
["results"] = [
1187 {"group": r
.group
, "ok": r
.ok
, "duration": r
.duration
}
1188 for r
in task
.results
1190 jtask
["errors"] = None # Bug with task.errors property??
1192 for k
in task
.failure_types
:
1193 jft
[k
] = [[f
[0], f
[1].value
] for f
in task
.failure_types
[k
]]
1194 jtask
["failure_types"] = jft
1195 jtasks
.append(jtask
)
1196 self
.write_json(save_tasks
, jtasks
)
1198 def label_to_platform_testname(self
, label
):
1199 """convert from label to platform, testname for mach command line"""
1202 platform_details
= label
.split("/")
1203 if len(platform_details
) == 2:
1204 platform
, details
= platform_details
1205 words
= details
.split("-")
1207 platform
+= "/" + words
.pop(0) # opt or debug
1209 _chunk
= int(words
[-1])
1213 words
.pop() # remove test suffix
1214 testname
= "-".join(words
)
1217 return platform
, testname
1219 def add_attachment_log_for_task(self
, bugid
, task_id
):
1220 """Adds compressed log for this task to bugid"""
1222 log_url
= f
"https://firefox-ci-tc.services.mozilla.com/api/queue/v1/task/{task_id}/artifacts/public/logs/live_backing.log"
1223 r
= requests
.get(log_url
, headers
=self
.headers
)
1224 if r
.status_code
!= 200:
1225 self
.error(f
"Unable get log for task: {task_id}")
1227 attach_fp
= tempfile
.NamedTemporaryFile()
1228 fp
= gzip
.open(attach_fp
, "wb")
1229 fp
.write(r
.text
.encode("utf-8"))
1231 if self
._initialize
_bzapi
():
1232 description
= ATTACHMENT_DESCRIPTION
+ task_id
1233 file_name
= TASK_LOG
+ ".gz"
1234 comment
= "Added compressed log"
1235 content_type
= "application/gzip"
1237 self
._bzapi
.attachfile(
1241 file_name
=file_name
,
1243 content_type
=content_type
,
1247 pass # Fault expected: Failed to fetch key 9372091 from network storage: The specified key does not exist.
1249 def get_wpt_path_meta(self
, shortpath
):
1250 if shortpath
.startswith(WPT0
):
1252 meta
= shortpath
.replace(WPT0
, WPT_META0
, 1)
1253 elif shortpath
.startswith(WPT1
):
1255 meta
= shortpath
.replace(WPT1
, WPT_META1
, 1)
1256 elif shortpath
.startswith(WPT2
):
1258 meta
= shortpath
.replace(WPT2
, WPT_META2
, 1)
1259 elif shortpath
.startswith(WPT_MOZILLA
):
1260 shortpath
= shortpath
[len(WPT_MOZILLA
) :]
1261 path
= WPT2
+ shortpath
1262 meta
= WPT_META2
+ shortpath
1264 path
= WPT1
+ shortpath
1265 meta
= WPT_META1
+ shortpath
1268 def wpt_paths(self
, shortpath
):
1270 Analyzes the WPT short path for a test and returns
1271 (path, manifest, query, anyjs) where
1272 path is the relative path to the test file
1273 manifest is the relative path to the file metadata
1274 query is the test file query paramters (or None)
1275 anyjs is the html test file as reported by mozci (or None)
1279 i
= shortpath
.find("?")
1281 query
= shortpath
[i
:]
1282 shortpath
= shortpath
[0:i
]
1283 path
, manifest
= self
.get_wpt_path_meta(shortpath
)
1284 failure_type
= not self
.isdir(path
)
1286 i
= path
.find(".any.")
1288 anyjs
= path
# orig path
1289 manifest
= manifest
.replace(path
[i
:], ".any.js")
1290 path
= path
[0:i
] + ".any.js"
1292 i
= path
.find(".window.")
1294 anyjs
= path
# orig path
1295 manifest
= manifest
.replace(path
[i
:], ".window.js")
1296 path
= path
[0:i
] + ".window.js"
1298 i
= path
.find(".worker.")
1300 anyjs
= path
# orig path
1301 manifest
= manifest
.replace(path
[i
:], ".worker.js")
1302 path
= path
[0:i
] + ".worker.js"
1304 manifest_classic
= ""
1305 if manifest
.startswith(WPT_META0
):
1306 manifest_classic
= manifest
.replace(WPT_META0
, WPT_META0_CLASSIC
, 1)
1307 if self
.exists(manifest_classic
):
1308 if self
.exists(manifest
):
1310 f
"Both classic {manifest_classic} and metadata {manifest} manifests exist"
1314 f
"Using the classic {manifest_classic} manifest as the metadata manifest {manifest} does not exist"
1316 manifest
= manifest_classic
1317 if not self
.exists(path
):
1318 return (None, None, None, None)
1319 return (path
, manifest
, query
, anyjs
)
1321 def wpt_add_skip_if(self
, manifest_str
, anyjs
, skip_if
, bug_reference
):
1323 Edits a WPT manifest string to add disabled condition
1324 anyjs is a dictionary mapping from filename and any alternate basenames to
1325 a boolean (indicating if the file has been handled in the manifest)
1328 disabled_key
= False
1329 disabled
= " disabled:"
1330 condition_start
= " if "
1331 condition
= condition_start
+ skip_if
+ ": " + bug_reference
1332 lines
= manifest_str
.splitlines()
1333 section
= None # name of the section
1338 if line
.startswith("["):
1339 if section
is not None and not anyjs
[section
]: # not yet handled
1340 if not disabled_key
:
1341 lines
.insert(i
, disabled
)
1343 lines
.insert(i
, condition
)
1344 lines
.insert(i
+ 1, "") # blank line after condition
1347 anyjs
[section
] = True
1348 section
= line
[1:-1]
1349 if section
in anyjs
and not anyjs
[section
]:
1350 disabled_key
= False
1352 section
= None # ignore section we are not interested in
1353 elif section
is not None:
1354 if line
== disabled
:
1356 elif line
.startswith(" ["):
1357 if i
> 0 and i
- 1 < n
and lines
[i
- 1] == "":
1361 if not disabled_key
:
1362 lines
.insert(i
, disabled
)
1365 lines
.insert(i
, condition
)
1366 lines
.insert(i
+ 1, "") # blank line after condition
1369 anyjs
[section
] = True
1371 elif line
.startswith(" ") and not line
.startswith(" "):
1372 if disabled_key
: # insert condition above new key
1373 lines
.insert(i
, condition
)
1376 anyjs
[section
] = True
1378 disabled_key
= False
1379 elif line
.startswith(" "):
1380 if disabled_key
and line
== condition
:
1381 anyjs
[section
] = True # condition already present
1384 if section
is not None and not anyjs
[section
]: # not yet handled
1385 if i
> 0 and i
- 1 < n
and lines
[i
- 1] == "":
1387 if not disabled_key
:
1388 lines
.append(disabled
)
1391 lines
.append(condition
)
1392 lines
.append("") # blank line after condition
1395 anyjs
[section
] = True
1396 for section
in anyjs
:
1397 if not anyjs
[section
]:
1398 if i
> 0 and i
- 1 < n
and lines
[i
- 1] != "":
1399 lines
.append("") # blank line before condition
1402 lines
.append("[" + section
+ "]")
1403 lines
.append(disabled
)
1404 lines
.append(condition
)
1405 lines
.append("") # blank line after condition
1408 manifest_str
= "\n".join(lines
) + "\n"