Bug 1842773 - Part 5: Add ArrayBuffer.prototype.{maxByteLength,resizable} getters...
[gecko.git] / testing / skipfails.py
blobadfcd98e753e8b921f07ac9a6d8f270f60be4aa6
1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
5 import gzip
6 import io
7 import json
8 import logging
9 import os
10 import os.path
11 import pprint
12 import re
13 import sys
14 import tempfile
15 import urllib.parse
16 from copy import deepcopy
17 from enum import Enum
18 from pathlib import Path
19 from statistics import median
20 from xmlrpc.client import Fault
22 from yaml import load
24 try:
25 from yaml import CLoader as Loader
26 except ImportError:
27 from yaml import Loader
29 import bugzilla
30 import mozci.push
31 import requests
32 from manifestparser import ManifestParser
33 from manifestparser.toml import add_skip_if, alphabetize_toml_str, sort_paths
34 from mozci.task import TestTask
35 from mozci.util.taskcluster import get_task
37 from taskcluster.exceptions import TaskclusterRestFailure
39 TASK_LOG = "live_backing.log"
40 TASK_ARTIFACT = "public/logs/" + TASK_LOG
41 ATTACHMENT_DESCRIPTION = "Compressed " + TASK_ARTIFACT + " for task "
42 ATTACHMENT_REGEX = (
43 r".*Created attachment ([0-9]+)\n.*"
44 + ATTACHMENT_DESCRIPTION
45 + "([A-Za-z0-9_-]+)\n.*"
48 BUGZILLA_AUTHENTICATION_HELP = "Must create a Bugzilla API key per https://github.com/mozilla/mozci-tools/blob/main/citools/test_triage_bug_filer.py"
50 MS_PER_MINUTE = 60 * 1000 # ms per minute
51 DEBUG_THRESHOLD = 40 * MS_PER_MINUTE # 40 minutes in ms
52 OPT_THRESHOLD = 20 * MS_PER_MINUTE # 20 minutes in ms
54 CC = "classification"
55 DEF = "DEFAULT"
56 DURATIONS = "durations"
57 FAILED_RUNS = "failed_runs"
58 FAILURE_RATIO = 0.4 # more than this fraction of failures will disable
59 LL = "label"
60 MEDIAN_DURATION = "median_duration"
61 MINIMUM_RUNS = 3 # mininum number of runs to consider success/failure
62 OPT = "opt"
63 PP = "path"
64 RUNS = "runs"
65 SUM_BY_LABEL = "sum_by_label"
66 TOTAL_DURATION = "total_duration"
67 TOTAL_RUNS = "total_runs"
70 class MockResult(object):
71 def __init__(self, result):
72 self.result = result
74 @property
75 def duration(self):
76 return self.result["duration"]
78 @property
79 def group(self):
80 return self.result["group"]
82 @property
83 def ok(self):
84 return self.result["ok"]
87 class MockTask(object):
88 def __init__(self, task):
89 self.task = task
90 if "results" in self.task:
91 self.task["results"] = [
92 MockResult(result) for result in self.task["results"]
94 else:
95 self.task["results"] = []
97 @property
98 def failure_types(self):
99 if "failure_types" in self.task:
100 return self.task["failure_types"]
101 else: # note no failure_types in Task object
102 return {}
104 @property
105 def duration(self):
106 return self.task["duration"]
108 @property
109 def id(self):
110 return self.task["id"]
112 @property
113 def label(self):
114 return self.task["label"]
116 @property
117 def results(self):
118 if "results" in self.task:
119 return self.task["results"]
120 else:
121 return []
124 class Classification(object):
125 "Classification of the failure (not the task result)"
127 DISABLE_MANIFEST = "disable_manifest" # crash found
128 DISABLE_RECOMMENDED = "disable_recommended" # disable first failing path
129 DISABLE_TOO_LONG = "disable_too_long" # runtime threshold exceeded
130 INTERMITTENT = "intermittent"
131 SECONDARY = "secondary" # secondary failing path
132 SUCCESS = "success" # path always succeeds
133 UNKNOWN = "unknown"
136 class Run(Enum):
138 constant indexes for attributes of a run
141 MANIFEST = 0
142 TASK_ID = 1
143 TASK_LABEL = 2
144 RESULT = 3
145 CLASSIFICATION = 4
148 class Skipfails(object):
149 "mach manifest skip-fails implementation: Update manifests to skip failing tests"
151 REPO = "repo"
152 REVISION = "revision"
153 TREEHERDER = "treeherder.mozilla.org"
154 BUGZILLA_SERVER_DEFAULT = "bugzilla.allizom.org"
156 def __init__(
157 self,
158 command_context=None,
159 try_url="",
160 verbose=False,
161 bugzilla=None,
162 dry_run=False,
163 turbo=False,
165 self.command_context = command_context
166 if self.command_context is not None:
167 self.topsrcdir = self.command_context.topsrcdir
168 else:
169 self.topsrcdir = Path(__file__).parent.parent
170 self.topsrcdir = os.path.normpath(self.topsrcdir)
171 if isinstance(try_url, list) and len(try_url) == 1:
172 self.try_url = try_url[0]
173 else:
174 self.try_url = try_url
175 self.dry_run = dry_run
176 self.verbose = verbose
177 self.turbo = turbo
178 if bugzilla is not None:
179 self.bugzilla = bugzilla
180 elif "BUGZILLA" in os.environ:
181 self.bugzilla = os.environ["BUGZILLA"]
182 else:
183 self.bugzilla = Skipfails.BUGZILLA_SERVER_DEFAULT
184 self.component = "skip-fails"
185 self._bzapi = None
186 self._attach_rx = None
187 self.variants = {}
188 self.tasks = {}
189 self.pp = None
190 self.headers = {} # for Treeherder requests
191 self.headers["Accept"] = "application/json"
192 self.headers["User-Agent"] = "treeherder-pyclient"
193 self.jobs_url = "https://treeherder.mozilla.org/api/jobs/"
194 self.push_ids = {}
195 self.job_ids = {}
196 self.extras = {}
198 def _initialize_bzapi(self):
199 """Lazily initializes the Bugzilla API"""
200 if self._bzapi is None:
201 self._bzapi = bugzilla.Bugzilla(self.bugzilla)
202 self._attach_rx = re.compile(ATTACHMENT_REGEX, flags=re.M)
204 def pprint(self, obj):
205 if self.pp is None:
206 self.pp = pprint.PrettyPrinter(indent=4, stream=sys.stderr)
207 self.pp.pprint(obj)
208 sys.stderr.flush()
210 def error(self, e):
211 if self.command_context is not None:
212 self.command_context.log(
213 logging.ERROR, self.component, {"error": str(e)}, "ERROR: {error}"
215 else:
216 print(f"ERROR: {e}", file=sys.stderr, flush=True)
218 def warning(self, e):
219 if self.command_context is not None:
220 self.command_context.log(
221 logging.WARNING, self.component, {"error": str(e)}, "WARNING: {error}"
223 else:
224 print(f"WARNING: {e}", file=sys.stderr, flush=True)
226 def info(self, e):
227 if self.command_context is not None:
228 self.command_context.log(
229 logging.INFO, self.component, {"error": str(e)}, "INFO: {error}"
231 else:
232 print(f"INFO: {e}", file=sys.stderr, flush=True)
234 def vinfo(self, e):
235 if self.verbose:
236 self.info(e)
238 def run(
239 self,
240 meta_bug_id=None,
241 save_tasks=None,
242 use_tasks=None,
243 save_failures=None,
244 use_failures=None,
245 max_failures=-1,
247 "Run skip-fails on try_url, return True on success"
249 try_url = self.try_url
250 revision, repo = self.get_revision(try_url)
252 if use_tasks is not None:
253 if os.path.exists(use_tasks):
254 self.vinfo(f"use tasks: {use_tasks}")
255 tasks = self.read_json(use_tasks)
256 tasks = [MockTask(task) for task in tasks]
257 else:
258 self.error(f"uses tasks JSON file does not exist: {use_tasks}")
259 return False
260 else:
261 tasks = self.get_tasks(revision, repo)
263 if use_failures is not None:
264 if os.path.exists(use_failures):
265 self.vinfo(f"use failures: {use_failures}")
266 failures = self.read_json(use_failures)
267 else:
268 self.error(f"use failures JSON file does not exist: {use_failures}")
269 return False
270 else:
271 failures = self.get_failures(tasks)
272 if save_failures is not None:
273 self.vinfo(f"save failures: {save_failures}")
274 self.write_json(save_failures, failures)
276 if save_tasks is not None:
277 self.vinfo(f"save tasks: {save_tasks}")
278 self.write_tasks(save_tasks, tasks)
280 num_failures = 0
281 for manifest in failures:
282 if not manifest.endswith(".toml"):
283 self.warning(f"cannot process skip-fails on INI manifests: {manifest}")
284 else:
285 for label in failures[manifest][LL]:
286 for path in failures[manifest][LL][label][PP]:
287 classification = failures[manifest][LL][label][PP][path][CC]
288 if classification.startswith("disable_") or (
289 self.turbo and classification == Classification.SECONDARY
291 for task_id in failures[manifest][LL][label][PP][path][
292 RUNS
294 break # just use the first task_id
295 self.skip_failure(
296 manifest,
297 path,
298 label,
299 classification,
300 task_id,
301 try_url,
302 revision,
303 repo,
304 meta_bug_id,
306 num_failures += 1
307 if max_failures >= 0 and num_failures >= max_failures:
308 self.warning(
309 f"max_failures={max_failures} threshold reached. stopping."
311 return True
312 return True
314 def get_revision(self, url):
315 parsed = urllib.parse.urlparse(url)
316 if parsed.scheme != "https":
317 raise ValueError("try_url scheme not https")
318 if parsed.netloc != Skipfails.TREEHERDER:
319 raise ValueError(f"try_url server not {Skipfails.TREEHERDER}")
320 if len(parsed.query) == 0:
321 raise ValueError("try_url query missing")
322 query = urllib.parse.parse_qs(parsed.query)
323 if Skipfails.REVISION not in query:
324 raise ValueError("try_url query missing revision")
325 revision = query[Skipfails.REVISION][0]
326 if Skipfails.REPO in query:
327 repo = query[Skipfails.REPO][0]
328 else:
329 repo = "try"
330 self.vinfo(f"considering {repo} revision={revision}")
331 return revision, repo
333 def get_tasks(self, revision, repo):
334 push = mozci.push.Push(revision, repo)
335 return push.tasks
337 def get_failures(self, tasks):
339 find failures and create structure comprised of runs by path:
340 result:
341 * False (failed)
342 * True (passed)
343 classification: Classification
344 * unknown (default) < 3 runs
345 * intermittent (not enough failures)
346 * disable_recommended (enough repeated failures) >3 runs >= 4
347 * disable_manifest (disable DEFAULT if no other failures)
348 * secondary (not first failure in group)
349 * success
352 ff = {}
353 manifest_paths = {}
354 manifest_ = {
355 LL: {},
357 label_ = {
358 DURATIONS: {},
359 MEDIAN_DURATION: 0,
360 OPT: None,
361 PP: {},
362 SUM_BY_LABEL: {
363 Classification.DISABLE_MANIFEST: 0,
364 Classification.DISABLE_RECOMMENDED: 0,
365 Classification.DISABLE_TOO_LONG: 0,
366 Classification.INTERMITTENT: 0,
367 Classification.SECONDARY: 0,
368 Classification.SUCCESS: 0,
369 Classification.UNKNOWN: 0,
371 TOTAL_DURATION: 0,
373 path_ = {
374 CC: Classification.UNKNOWN,
375 FAILED_RUNS: 0,
376 RUNS: {},
377 TOTAL_RUNS: 0,
380 for task in tasks: # add implicit failures
381 try:
382 if len(task.results) == 0:
383 continue # ignore aborted tasks
384 for mm in task.failure_types:
385 if mm not in manifest_paths:
386 manifest_paths[mm] = []
387 if mm not in ff:
388 ff[mm] = deepcopy(manifest_)
389 ll = task.label
390 if ll not in ff[mm][LL]:
391 ff[mm][LL][ll] = deepcopy(label_)
392 for path_type in task.failure_types[mm]:
393 path, _type = path_type
394 if path == mm:
395 path = DEF # refers to the manifest itself
396 if path not in manifest_paths[mm]:
397 manifest_paths[mm].append(path)
398 if path not in ff[mm][LL][ll][PP]:
399 ff[mm][LL][ll][PP][path] = deepcopy(path_)
400 if task.id not in ff[mm][LL][ll][PP][path][RUNS]:
401 ff[mm][LL][ll][PP][path][RUNS][task.id] = False
402 ff[mm][LL][ll][PP][path][TOTAL_RUNS] += 1
403 ff[mm][LL][ll][PP][path][FAILED_RUNS] += 1
404 except AttributeError as ae:
405 self.warning(f"unknown attribute in task (#1): {ae}")
407 for task in tasks: # add results
408 try:
409 if len(task.results) == 0:
410 continue # ignore aborted tasks
411 for result in task.results:
412 mm = result.group
413 if mm not in ff:
414 ff[mm] = deepcopy(manifest_)
415 ll = task.label
416 if ll not in ff[mm][LL]:
417 ff[mm][LL][ll] = deepcopy(label_)
418 if task.id not in ff[mm][LL][ll][DURATIONS]:
419 # duration may be None !!!
420 ff[mm][LL][ll][DURATIONS][task.id] = result.duration or 0
421 if ff[mm][LL][ll][OPT] is None:
422 ff[mm][LL][ll][OPT] = self.get_opt_for_task(task.id)
423 if mm not in manifest_paths:
424 continue
425 for path in manifest_paths[mm]: # all known paths
426 if path not in ff[mm][LL][ll][PP]:
427 ff[mm][LL][ll][PP][path] = deepcopy(path_)
428 if task.id not in ff[mm][LL][ll][PP][path][RUNS]:
429 ff[mm][LL][ll][PP][path][RUNS][task.id] = result.ok
430 ff[mm][LL][ll][PP][path][TOTAL_RUNS] += 1
431 if not result.ok:
432 ff[mm][LL][ll][PP][path][FAILED_RUNS] += 1
433 except AttributeError as ae:
434 self.warning(f"unknown attribute in task (#3): {ae}")
436 for mm in ff: # determine classifications
437 for label in ff[mm][LL]:
438 opt = ff[mm][LL][label][OPT]
439 durations = [] # summarize durations
440 for task_id in ff[mm][LL][label][DURATIONS]:
441 duration = ff[mm][LL][label][DURATIONS][task_id]
442 durations.append(duration)
443 if len(durations) > 0:
444 total_duration = sum(durations)
445 median_duration = median(durations)
446 ff[mm][LL][label][TOTAL_DURATION] = total_duration
447 ff[mm][LL][label][MEDIAN_DURATION] = median_duration
448 if (opt and median_duration > OPT_THRESHOLD) or (
449 (not opt) and median_duration > DEBUG_THRESHOLD
451 if DEF not in ff[mm][LL][label][PP]:
452 ff[mm][LL][label][PP][DEF] = deepcopy(path_)
453 if task_id not in ff[mm][LL][label][PP][DEF][RUNS]:
454 ff[mm][LL][label][PP][DEF][RUNS][task_id] = False
455 ff[mm][LL][label][PP][DEF][TOTAL_RUNS] += 1
456 ff[mm][LL][label][PP][DEF][FAILED_RUNS] += 1
457 ff[mm][LL][label][PP][DEF][CC] = Classification.DISABLE_TOO_LONG
458 primary = True # we have not seen the first failure
459 for path in sort_paths(ff[mm][LL][label][PP]):
460 classification = ff[mm][LL][label][PP][path][CC]
461 if classification == Classification.UNKNOWN:
462 failed_runs = ff[mm][LL][label][PP][path][FAILED_RUNS]
463 total_runs = ff[mm][LL][label][PP][path][TOTAL_RUNS]
464 if total_runs >= MINIMUM_RUNS:
465 if failed_runs / total_runs < FAILURE_RATIO:
466 if failed_runs == 0:
467 classification = Classification.SUCCESS
468 else:
469 classification = Classification.INTERMITTENT
470 elif primary:
471 if path == DEF:
472 classification = Classification.DISABLE_MANIFEST
473 else:
474 classification = Classification.DISABLE_RECOMMENDED
475 primary = False
476 else:
477 classification = Classification.SECONDARY
478 ff[mm][LL][label][PP][path][CC] = classification
479 ff[mm][LL][label][SUM_BY_LABEL][classification] += 1
480 return ff
482 def _get_os_version(self, os, platform):
483 """Return the os_version given the label platform string"""
484 i = platform.find(os)
485 j = i + len(os)
486 yy = platform[j : j + 2]
487 mm = platform[j + 2 : j + 4]
488 return yy + "." + mm
490 def get_bug_by_id(self, id):
491 """Get bug by bug id"""
493 self._initialize_bzapi()
494 bug = self._bzapi.getbug(id)
495 return bug
497 def get_bugs_by_summary(self, summary):
498 """Get bug by bug summary"""
500 self._initialize_bzapi()
501 query = self._bzapi.build_query(short_desc=summary)
502 query["include_fields"] = [
503 "id",
504 "product",
505 "component",
506 "status",
507 "resolution",
508 "summary",
509 "blocks",
511 bugs = self._bzapi.query(query)
512 return bugs
514 def create_bug(
515 self,
516 summary="Bug short description",
517 description="Bug description",
518 product="Testing",
519 component="General",
520 version="unspecified",
521 bugtype="task",
523 """Create a bug"""
525 self._initialize_bzapi()
526 if not self._bzapi.logged_in:
527 self.error(
528 "Must create a Bugzilla API key per https://github.com/mozilla/mozci-tools/blob/main/citools/test_triage_bug_filer.py"
530 raise PermissionError(f"Not authenticated for Bugzilla {self.bugzilla}")
531 createinfo = self._bzapi.build_createbug(
532 product=product,
533 component=component,
534 summary=summary,
535 version=version,
536 description=description,
538 createinfo["type"] = bugtype
539 bug = self._bzapi.createbug(createinfo)
540 return bug
542 def add_bug_comment(self, id, comment, meta_bug_id=None):
543 """Add a comment to an existing bug"""
545 self._initialize_bzapi()
546 if not self._bzapi.logged_in:
547 self.error(BUGZILLA_AUTHENTICATION_HELP)
548 raise PermissionError("Not authenticated for Bugzilla")
549 if meta_bug_id is not None:
550 blocks_add = [meta_bug_id]
551 else:
552 blocks_add = None
553 updateinfo = self._bzapi.build_update(comment=comment, blocks_add=blocks_add)
554 self._bzapi.update_bugs([id], updateinfo)
556 def skip_failure(
557 self,
558 manifest,
559 path,
560 label,
561 classification,
562 task_id,
563 try_url,
564 revision,
565 repo,
566 meta_bug_id=None,
568 """Skip a failure"""
570 self.vinfo(f"===== Skip failure in manifest: {manifest} =====")
571 if task_id is None:
572 skip_if = "true"
573 else:
574 skip_if = self.task_to_skip_if(task_id)
575 if skip_if is None:
576 self.warning(
577 f"Unable to calculate skip-if condition from manifest={manifest} from failure label={label}"
579 return
580 bug_reference = ""
581 if classification == Classification.DISABLE_MANIFEST:
582 filename = DEF
583 comment = "Disabled entire manifest due to crash result"
584 elif classification == Classification.DISABLE_TOO_LONG:
585 filename = DEF
586 comment = "Disabled entire manifest due to excessive run time"
587 else:
588 filename = self.get_filename_in_manifest(manifest, path)
589 comment = f'Disabled test due to failures: "{filename}"'
590 if classification == Classification.SECONDARY:
591 comment += " (secondary)"
592 bug_reference = " (secondary)"
593 comment += f"\nTry URL = {try_url}"
594 comment += f"\nrevision = {revision}"
595 comment += f"\nrepo = {repo}"
596 comment += f"\nlabel = {label}"
597 if task_id is not None:
598 comment += f"\ntask_id = {task_id}"
599 push_id = self.get_push_id(revision, repo)
600 if push_id is not None:
601 comment += f"\npush_id = {push_id}"
602 job_id = self.get_job_id(push_id, task_id)
603 if job_id is not None:
604 comment += f"\njob_id = {job_id}"
606 suggestions_url,
607 line_number,
608 line,
609 log_url,
610 ) = self.get_bug_suggestions(repo, job_id, path)
611 if log_url is not None:
612 comment += f"\n\nBug suggestions: {suggestions_url}"
613 comment += f"\nSpecifically see at line {line_number} in the attached log: {log_url}"
614 comment += f'\n\n "{line}"\n'
615 platform, testname = self.label_to_platform_testname(label)
616 if platform is not None:
617 comment += "\n\nCommand line to reproduce:\n\n"
618 comment += f" \"mach try fuzzy -q '{platform}' {testname}\""
619 bug_summary = f"MANIFEST {manifest}"
620 attachments = {}
621 bugs = self.get_bugs_by_summary(bug_summary)
622 if len(bugs) == 0:
623 description = (
624 f"This bug covers excluded failing tests in the MANIFEST {manifest}"
626 description += "\n(generated by `mach manifest skip-fails`)"
627 product, component = self.get_file_info(path)
628 if self.dry_run:
629 self.warning(
630 f'Dry-run NOT creating bug: {product}::{component} "{bug_summary}"'
632 bugid = "TBD"
633 else:
634 bug = self.create_bug(bug_summary, description, product, component)
635 bugid = bug.id
636 self.vinfo(
637 f'Created Bug {bugid} {product}::{component} : "{bug_summary}"'
639 bug_reference = f"Bug {bugid}" + bug_reference
640 elif len(bugs) == 1:
641 bugid = bugs[0].id
642 bug_reference = f"Bug {bugid}" + bug_reference
643 product = bugs[0].product
644 component = bugs[0].component
645 self.vinfo(f'Found Bug {bugid} {product}::{component} "{bug_summary}"')
646 if meta_bug_id is not None:
647 if meta_bug_id in bugs[0].blocks:
648 self.vinfo(f" Bug {bugid} already blocks meta bug {meta_bug_id}")
649 meta_bug_id = None # no need to add again
650 comments = bugs[0].getcomments()
651 for i in range(len(comments)):
652 text = comments[i]["text"]
653 m = self._attach_rx.findall(text)
654 if len(m) == 1:
655 a_task_id = m[0][1]
656 attachments[a_task_id] = m[0][0]
657 if a_task_id == task_id:
658 self.vinfo(
659 f" Bug {bugid} already has the compressed log attached for this task"
661 else:
662 self.error(f'More than one bug found for summary: "{bug_summary}"')
663 return
664 if self.dry_run:
665 self.warning(f"Dry-run NOT adding comment to Bug {bugid}: {comment}")
666 self.info(f'Dry-run NOT editing ["{filename}"] manifest: "{manifest}"')
667 self.info(f'would add skip-if condition: "{skip_if}" # {bug_reference}')
668 if task_id is not None and task_id not in attachments:
669 self.info("would add compressed log for this task")
670 return
671 self.add_bug_comment(bugid, comment, meta_bug_id)
672 self.info(f"Added comment to Bug {bugid}: {comment}")
673 if meta_bug_id is not None:
674 self.info(f" Bug {bugid} blocks meta Bug: {meta_bug_id}")
675 if task_id is not None and task_id not in attachments:
676 self.add_attachment_log_for_task(bugid, task_id)
677 self.info("Added compressed log for this task")
678 mp = ManifestParser(use_toml=True, document=True)
679 manifest_path = os.path.join(self.topsrcdir, os.path.normpath(manifest))
680 mp.read(manifest_path)
681 document = mp.source_documents[manifest_path]
682 add_skip_if(document, filename, skip_if, bug_reference)
683 manifest_str = alphabetize_toml_str(document)
684 fp = io.open(manifest_path, "w", encoding="utf-8", newline="\n")
685 fp.write(manifest_str)
686 fp.close()
687 self.info(f'Edited ["{filename}"] in manifest: "{manifest}"')
688 self.info(f'added skip-if condition: "{skip_if}" # {bug_reference}')
690 def get_variants(self):
691 """Get mozinfo for each test variants"""
693 if len(self.variants) == 0:
694 variants_file = "taskcluster/ci/test/variants.yml"
695 variants_path = os.path.join(
696 self.topsrcdir, os.path.normpath(variants_file)
698 fp = io.open(variants_path, "r", encoding="utf-8")
699 raw_variants = load(fp, Loader=Loader)
700 fp.close()
701 for k, v in raw_variants.items():
702 mozinfo = k
703 if "mozinfo" in v:
704 mozinfo = v["mozinfo"]
705 self.variants[k] = mozinfo
706 return self.variants
708 def get_task_details(self, task_id):
709 """Download details for task task_id"""
711 if task_id in self.tasks: # if cached
712 task = self.tasks[task_id]
713 else:
714 try:
715 task = get_task(task_id)
716 except TaskclusterRestFailure:
717 self.warning(f"Task {task_id} no longer exists.")
718 return None
719 self.tasks[task_id] = task
720 return task
722 def get_extra(self, task_id):
723 """Calculate extra for task task_id"""
725 if task_id in self.extras: # if cached
726 extra = self.extras[task_id]
727 else:
728 self.get_variants()
729 task = self.get_task_details(task_id) or {}
730 os = None
731 os_version = None
732 arch = None
733 bits = None
734 display = None
735 runtimes = []
736 build_types = []
737 test_setting = task.get("extra", {}).get("test-setting", {})
738 platform = test_setting.get("platform", {})
739 platform_os = platform.get("os", {})
740 opt = False
741 debug = False
742 if "name" in platform_os:
743 os = platform_os["name"]
744 if os == "windows":
745 os = "win"
746 if os == "macosx":
747 os = "mac"
748 if "version" in platform_os:
749 os_version = platform_os["version"]
750 if len(os_version) == 4:
751 os_version = os_version[0:2] + "." + os_version[2:4]
752 if "arch" in platform:
753 arch = platform["arch"]
754 if arch == "x86" or arch.find("32") >= 0:
755 bits = "32"
756 if arch == "64" or arch.find("64") >= 0:
757 bits = "64"
758 if "display" in platform:
759 display = platform["display"]
760 if "runtime" in test_setting:
761 for k in test_setting["runtime"]:
762 if k in self.variants:
763 runtimes.append(self.variants[k]) # adds mozinfo
764 if "build" in test_setting:
765 tbuild = test_setting["build"]
766 for k in tbuild:
767 if k == "type":
768 if tbuild[k] == "opt":
769 opt = True
770 elif tbuild[k] == "debug":
771 debug = True
772 build_types.append(tbuild[k])
773 else:
774 build_types.append(k)
775 unknown = None
776 extra = {
777 "os": os or unknown,
778 "os_version": os_version or unknown,
779 "arch": arch or unknown,
780 "bits": bits or unknown,
781 "display": display or unknown,
782 "runtimes": runtimes,
783 "opt": opt,
784 "debug": debug,
785 "build_types": build_types,
787 self.extras[task_id] = extra
788 return extra
790 def get_opt_for_task(self, task_id):
791 extra = self.get_extra(task_id)
792 return extra["opt"]
794 def task_to_skip_if(self, task_id):
795 """Calculate the skip-if condition for failing task task_id"""
797 extra = self.get_extra(task_id)
798 skip_if = None
799 if extra["os"] is not None:
800 skip_if = "os == '" + extra["os"] + "'"
801 if extra["os_version"] is not None:
802 skip_if += " && "
803 skip_if += "os_version == '" + extra["os_version"] + "'"
804 if extra["bits"] is not None:
805 skip_if += " && "
806 skip_if += "bits == '" + extra["bits"] + "'"
807 if extra["display"] is not None:
808 skip_if += " && "
809 skip_if += "display == '" + extra["display"] + "'"
810 for runtime in extra["runtimes"]:
811 skip_if += " && "
812 skip_if += runtime
813 for build_type in extra["build_types"]:
814 skip_if += " && "
815 skip_if += build_type
816 return skip_if
818 def get_file_info(self, path, product="Testing", component="General"):
820 Get bugzilla product and component for the path.
821 Provide defaults (in case command_context is not defined
822 or there isn't file info available).
824 if path != DEF and self.command_context is not None:
825 reader = self.command_context.mozbuild_reader(config_mode="empty")
826 info = reader.files_info([path])
827 cp = info[path]["BUG_COMPONENT"]
828 product = cp.product
829 component = cp.component
830 return product, component
832 def get_filename_in_manifest(self, manifest, path):
833 """return relative filename for path in manifest"""
835 filename = os.path.basename(path)
836 if filename == DEF:
837 return filename
838 manifest_dir = os.path.dirname(manifest)
839 i = 0
840 j = min(len(manifest_dir), len(path))
841 while i < j and manifest_dir[i] == path[i]:
842 i += 1
843 if i < len(manifest_dir):
844 for _ in range(manifest_dir.count("/", i) + 1):
845 filename = "../" + filename
846 elif i < len(path):
847 filename = path[i + 1 :]
848 return filename
850 def get_push_id(self, revision, repo):
851 """Return the push_id for revision and repo (or None)"""
853 self.vinfo(f"Retrieving push_id for {repo} revision: {revision} ...")
854 if revision in self.push_ids: # if cached
855 push_id = self.push_ids[revision]
856 else:
857 push_id = None
858 push_url = f"https://treeherder.mozilla.org/api/project/{repo}/push/"
859 params = {}
860 params["full"] = "true"
861 params["count"] = 10
862 params["revision"] = revision
863 r = requests.get(push_url, headers=self.headers, params=params)
864 if r.status_code != 200:
865 self.warning(f"FAILED to query Treeherder = {r} for {r.url}")
866 else:
867 response = r.json()
868 if "results" in response:
869 results = response["results"]
870 if len(results) > 0:
871 r0 = results[0]
872 if "id" in r0:
873 push_id = r0["id"]
874 self.push_ids[revision] = push_id
875 return push_id
877 def get_job_id(self, push_id, task_id):
878 """Return the job_id for push_id, task_id (or None)"""
880 self.vinfo(f"Retrieving job_id for push_id: {push_id}, task_id: {task_id} ...")
881 if push_id in self.job_ids: # if cached
882 job_id = self.job_ids[push_id]
883 else:
884 job_id = None
885 params = {}
886 params["push_id"] = push_id
887 r = requests.get(self.jobs_url, headers=self.headers, params=params)
888 if r.status_code != 200:
889 self.warning(f"FAILED to query Treeherder = {r} for {r.url}")
890 else:
891 response = r.json()
892 if "results" in response:
893 results = response["results"]
894 if len(results) > 0:
895 for result in results:
896 if len(result) > 14:
897 if result[14] == task_id:
898 job_id = result[1]
899 break
900 self.job_ids[push_id] = job_id
901 return job_id
903 def get_bug_suggestions(self, repo, job_id, path):
905 Return the (suggestions_url, line_number, line, log_url)
906 for the given repo and job_id
908 self.vinfo(
909 f"Retrieving bug_suggestions for {repo} job_id: {job_id}, path: {path} ..."
911 suggestions_url = f"https://treeherder.mozilla.org/api/project/{repo}/jobs/{job_id}/bug_suggestions/"
912 line_number = None
913 line = None
914 log_url = None
915 r = requests.get(suggestions_url, headers=self.headers)
916 if r.status_code != 200:
917 self.warning(f"FAILED to query Treeherder = {r} for {r.url}")
918 else:
919 response = r.json()
920 if len(response) > 0:
921 for sugg in response:
922 if sugg["path_end"] == path:
923 line_number = sugg["line_number"] + 1
924 line = sugg["search"]
925 log_url = f"https://treeherder.mozilla.org/logviewer?repo={repo}&job_id={job_id}&lineNumber={line_number}"
926 break
927 rv = (suggestions_url, line_number, line, log_url)
928 return rv
930 def read_json(self, filename):
931 """read data as JSON from filename"""
932 fp = io.open(filename, "r", encoding="utf-8")
933 data = json.load(fp)
934 fp.close()
935 return data
937 def write_json(self, filename, data):
938 """saves data as JSON to filename"""
939 fp = io.open(filename, "w", encoding="utf-8")
940 json.dump(data, fp, indent=2, sort_keys=True)
941 fp.close()
943 def write_tasks(self, save_tasks, tasks):
944 """saves tasks as JSON to save_tasks"""
945 jtasks = []
946 for task in tasks:
947 if not isinstance(task, TestTask):
948 continue
949 jtask = {}
950 jtask["id"] = task.id
951 jtask["label"] = task.label
952 jtask["duration"] = task.duration
953 jtask["result"] = task.result
954 jtask["state"] = task.state
955 jtask["extra"] = self.get_extra(task.id)
956 jtags = {}
957 for k, v in task.tags.items():
958 if k == "createdForUser":
959 jtags[k] = "ci@mozilla.com"
960 else:
961 jtags[k] = v
962 jtask["tags"] = jtags
963 jtask["tier"] = task.tier
964 jtask["results"] = [
965 {"group": r.group, "ok": r.ok, "duration": r.duration}
966 for r in task.results
968 jtask["errors"] = None # Bug with task.errors property??
969 jft = {}
970 for k in task.failure_types:
971 jft[k] = [[f[0], f[1].value] for f in task.failure_types[k]]
972 jtask["failure_types"] = jft
973 jtasks.append(jtask)
974 self.write_json(save_tasks, jtasks)
976 def label_to_platform_testname(self, label):
977 """convert from label to platform, testname for mach command line"""
978 platform = None
979 testname = None
980 platform_details = label.split("/")
981 if len(platform_details) == 2:
982 platform, details = platform_details
983 words = details.split("-")
984 if len(words) > 2:
985 platform += "/" + words.pop(0) # opt or debug
986 try:
987 _chunk = int(words[-1])
988 words.pop()
989 except ValueError:
990 pass
991 words.pop() # remove test suffix
992 testname = "-".join(words)
993 else:
994 platform = None
995 return platform, testname
997 def add_attachment_log_for_task(self, bugid, task_id):
998 """Adds compressed log for this task to bugid"""
1000 log_url = f"https://firefox-ci-tc.services.mozilla.com/api/queue/v1/task/{task_id}/artifacts/public/logs/live_backing.log"
1001 r = requests.get(log_url, headers=self.headers)
1002 if r.status_code != 200:
1003 self.error(f"Unable get log for task: {task_id}")
1004 return
1005 attach_fp = tempfile.NamedTemporaryFile()
1006 fp = gzip.open(attach_fp, "wb")
1007 fp.write(r.text.encode("utf-8"))
1008 fp.close()
1009 self._initialize_bzapi()
1010 description = ATTACHMENT_DESCRIPTION + task_id
1011 file_name = TASK_LOG + ".gz"
1012 comment = "Added compressed log"
1013 content_type = "application/gzip"
1014 try:
1015 self._bzapi.attachfile(
1016 [bugid],
1017 attach_fp.name,
1018 description,
1019 file_name=file_name,
1020 comment=comment,
1021 content_type=content_type,
1022 is_private=False,
1024 except Fault:
1025 pass # Fault expected: Failed to fetch key 9372091 from network storage: The specified key does not exist.