Bug 1824856 - migrate beetmover-android-app kind from firefox-android. r=gabriel...
[gecko.git] / testing / skipfails.py
bloba13bb6bd71c26c24748245db004214e008f7ff8c
1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
5 import gzip
6 import io
7 import json
8 import logging
9 import os
10 import os.path
11 import pprint
12 import re
13 import sys
14 import tempfile
15 import urllib.parse
16 from copy import deepcopy
17 from enum import Enum
18 from pathlib import Path
19 from statistics import median
20 from xmlrpc.client import Fault
22 from yaml import load
24 try:
25 from yaml import CLoader as Loader
26 except ImportError:
27 from yaml import Loader
29 import bugzilla
30 import mozci.push
31 import requests
32 from manifestparser import ManifestParser
33 from manifestparser.toml import add_skip_if, alphabetize_toml_str, sort_paths
34 from mozci.task import TestTask
35 from mozci.util.taskcluster import get_task
37 from taskcluster.exceptions import TaskclusterRestFailure
39 TASK_LOG = "live_backing.log"
40 TASK_ARTIFACT = "public/logs/" + TASK_LOG
41 ATTACHMENT_DESCRIPTION = "Compressed " + TASK_ARTIFACT + " for task "
42 ATTACHMENT_REGEX = (
43 r".*Created attachment ([0-9]+)\n.*"
44 + ATTACHMENT_DESCRIPTION
45 + "([A-Za-z0-9_-]+)\n.*"
48 BUGZILLA_AUTHENTICATION_HELP = "Must create a Bugzilla API key per https://github.com/mozilla/mozci-tools/blob/main/citools/test_triage_bug_filer.py"
50 MS_PER_MINUTE = 60 * 1000 # ms per minute
51 DEBUG_THRESHOLD = 40 * MS_PER_MINUTE # 40 minutes in ms
52 OPT_THRESHOLD = 20 * MS_PER_MINUTE # 20 minutes in ms
54 CC = "classification"
55 DEF = "DEFAULT"
56 DURATIONS = "durations"
57 FAILED_RUNS = "failed_runs"
58 FAILURE_RATIO = 0.4 # more than this fraction of failures will disable
59 LL = "label"
60 MEDIAN_DURATION = "median_duration"
61 MINIMUM_RUNS = 3 # mininum number of runs to consider success/failure
62 MOCK_BUG_DEFAULTS = {"blocks": [], "comments": []}
63 MOCK_TASK_DEFAULTS = {"failure_types": {}, "results": []}
64 MOCK_TASK_INITS = ["results"]
65 OPT = "opt"
66 PP = "path"
67 RUNS = "runs"
68 SUM_BY_LABEL = "sum_by_label"
69 TOTAL_DURATION = "total_duration"
70 TOTAL_RUNS = "total_runs"
73 class Mock(object):
74 def __init__(self, data, defaults={}, inits=[]):
75 self._data = data
76 self._defaults = defaults
77 for name in inits:
78 values = self._data.get(name, []) # assume type is an array
79 values = [Mock(value, defaults, inits) for value in values]
80 self._data[name] = values
82 def __getattr__(self, name):
83 if name in self._data:
84 return self._data[name]
85 if name in self._defaults:
86 return self._defaults[name]
87 return ""
90 class Classification(object):
91 "Classification of the failure (not the task result)"
93 DISABLE_MANIFEST = "disable_manifest" # crash found
94 DISABLE_RECOMMENDED = "disable_recommended" # disable first failing path
95 DISABLE_TOO_LONG = "disable_too_long" # runtime threshold exceeded
96 INTERMITTENT = "intermittent"
97 SECONDARY = "secondary" # secondary failing path
98 SUCCESS = "success" # path always succeeds
99 UNKNOWN = "unknown"
102 class Run(Enum):
104 constant indexes for attributes of a run
107 MANIFEST = 0
108 TASK_ID = 1
109 TASK_LABEL = 2
110 RESULT = 3
111 CLASSIFICATION = 4
114 class Skipfails(object):
115 "mach manifest skip-fails implementation: Update manifests to skip failing tests"
117 REPO = "repo"
118 REVISION = "revision"
119 TREEHERDER = "treeherder.mozilla.org"
120 BUGZILLA_SERVER_DEFAULT = "bugzilla.allizom.org"
122 def __init__(
123 self,
124 command_context=None,
125 try_url="",
126 verbose=False,
127 bugzilla=None,
128 dry_run=False,
129 turbo=False,
131 self.command_context = command_context
132 if self.command_context is not None:
133 self.topsrcdir = self.command_context.topsrcdir
134 else:
135 self.topsrcdir = Path(__file__).parent.parent
136 self.topsrcdir = os.path.normpath(self.topsrcdir)
137 if isinstance(try_url, list) and len(try_url) == 1:
138 self.try_url = try_url[0]
139 else:
140 self.try_url = try_url
141 self.dry_run = dry_run
142 self.verbose = verbose
143 self.turbo = turbo
144 if bugzilla is not None:
145 self.bugzilla = bugzilla
146 elif "BUGZILLA" in os.environ:
147 self.bugzilla = os.environ["BUGZILLA"]
148 else:
149 self.bugzilla = Skipfails.BUGZILLA_SERVER_DEFAULT
150 self.component = "skip-fails"
151 self._bzapi = None
152 self._attach_rx = None
153 self.variants = {}
154 self.tasks = {}
155 self.pp = None
156 self.headers = {} # for Treeherder requests
157 self.headers["Accept"] = "application/json"
158 self.headers["User-Agent"] = "treeherder-pyclient"
159 self.jobs_url = "https://treeherder.mozilla.org/api/jobs/"
160 self.push_ids = {}
161 self.job_ids = {}
162 self.extras = {}
163 self.bugs = [] # preloaded bugs, currently not an updated cache
165 def _initialize_bzapi(self):
166 """Lazily initializes the Bugzilla API"""
167 if self._bzapi is None:
168 self._bzapi = bugzilla.Bugzilla(self.bugzilla)
169 self._attach_rx = re.compile(ATTACHMENT_REGEX, flags=re.M)
171 def pprint(self, obj):
172 if self.pp is None:
173 self.pp = pprint.PrettyPrinter(indent=4, stream=sys.stderr)
174 self.pp.pprint(obj)
175 sys.stderr.flush()
177 def error(self, e):
178 if self.command_context is not None:
179 self.command_context.log(
180 logging.ERROR, self.component, {"error": str(e)}, "ERROR: {error}"
182 else:
183 print(f"ERROR: {e}", file=sys.stderr, flush=True)
185 def warning(self, e):
186 if self.command_context is not None:
187 self.command_context.log(
188 logging.WARNING, self.component, {"error": str(e)}, "WARNING: {error}"
190 else:
191 print(f"WARNING: {e}", file=sys.stderr, flush=True)
193 def info(self, e):
194 if self.command_context is not None:
195 self.command_context.log(
196 logging.INFO, self.component, {"error": str(e)}, "INFO: {error}"
198 else:
199 print(f"INFO: {e}", file=sys.stderr, flush=True)
201 def vinfo(self, e):
202 if self.verbose:
203 self.info(e)
205 def run(
206 self,
207 meta_bug_id=None,
208 save_tasks=None,
209 use_tasks=None,
210 save_failures=None,
211 use_failures=None,
212 max_failures=-1,
214 "Run skip-fails on try_url, return True on success"
216 try_url = self.try_url
217 revision, repo = self.get_revision(try_url)
219 if use_tasks is not None:
220 if os.path.exists(use_tasks):
221 self.vinfo(f"use tasks: {use_tasks}")
222 tasks = self.read_json(use_tasks)
223 tasks = [
224 Mock(task, MOCK_TASK_DEFAULTS, MOCK_TASK_INITS) for task in tasks
226 else:
227 self.error(f"uses tasks JSON file does not exist: {use_tasks}")
228 return False
229 else:
230 tasks = self.get_tasks(revision, repo)
232 if use_failures is not None:
233 if os.path.exists(use_failures):
234 self.vinfo(f"use failures: {use_failures}")
235 failures = self.read_json(use_failures)
236 else:
237 self.error(f"use failures JSON file does not exist: {use_failures}")
238 return False
239 else:
240 failures = self.get_failures(tasks)
241 if save_failures is not None:
242 self.vinfo(f"save failures: {save_failures}")
243 self.write_json(save_failures, failures)
245 if save_tasks is not None:
246 self.vinfo(f"save tasks: {save_tasks}")
247 self.write_tasks(save_tasks, tasks)
249 num_failures = 0
250 for manifest in failures:
251 if not manifest.endswith(".toml"):
252 self.warning(f"cannot process skip-fails on INI manifests: {manifest}")
253 else:
254 for label in failures[manifest][LL]:
255 for path in failures[manifest][LL][label][PP]:
256 classification = failures[manifest][LL][label][PP][path][CC]
257 if classification.startswith("disable_") or (
258 self.turbo and classification == Classification.SECONDARY
260 for task_id in failures[manifest][LL][label][PP][path][
261 RUNS
263 break # just use the first task_id
264 self.skip_failure(
265 manifest,
266 path,
267 label,
268 classification,
269 task_id,
270 try_url,
271 revision,
272 repo,
273 meta_bug_id,
275 num_failures += 1
276 if max_failures >= 0 and num_failures >= max_failures:
277 self.warning(
278 f"max_failures={max_failures} threshold reached. stopping."
280 return True
281 return True
283 def get_revision(self, url):
284 parsed = urllib.parse.urlparse(url)
285 if parsed.scheme != "https":
286 raise ValueError("try_url scheme not https")
287 if parsed.netloc != Skipfails.TREEHERDER:
288 raise ValueError(f"try_url server not {Skipfails.TREEHERDER}")
289 if len(parsed.query) == 0:
290 raise ValueError("try_url query missing")
291 query = urllib.parse.parse_qs(parsed.query)
292 if Skipfails.REVISION not in query:
293 raise ValueError("try_url query missing revision")
294 revision = query[Skipfails.REVISION][0]
295 if Skipfails.REPO in query:
296 repo = query[Skipfails.REPO][0]
297 else:
298 repo = "try"
299 self.vinfo(f"considering {repo} revision={revision}")
300 return revision, repo
302 def get_tasks(self, revision, repo):
303 push = mozci.push.Push(revision, repo)
304 return push.tasks
306 def get_failures(self, tasks):
308 find failures and create structure comprised of runs by path:
309 result:
310 * False (failed)
311 * True (passed)
312 classification: Classification
313 * unknown (default) < 3 runs
314 * intermittent (not enough failures)
315 * disable_recommended (enough repeated failures) >3 runs >= 4
316 * disable_manifest (disable DEFAULT if no other failures)
317 * secondary (not first failure in group)
318 * success
321 ff = {}
322 manifest_paths = {}
323 manifest_ = {
324 LL: {},
326 label_ = {
327 DURATIONS: {},
328 MEDIAN_DURATION: 0,
329 OPT: None,
330 PP: {},
331 SUM_BY_LABEL: {
332 Classification.DISABLE_MANIFEST: 0,
333 Classification.DISABLE_RECOMMENDED: 0,
334 Classification.DISABLE_TOO_LONG: 0,
335 Classification.INTERMITTENT: 0,
336 Classification.SECONDARY: 0,
337 Classification.SUCCESS: 0,
338 Classification.UNKNOWN: 0,
340 TOTAL_DURATION: 0,
342 path_ = {
343 CC: Classification.UNKNOWN,
344 FAILED_RUNS: 0,
345 RUNS: {},
346 TOTAL_RUNS: 0,
349 for task in tasks: # add implicit failures
350 try:
351 if len(task.results) == 0:
352 continue # ignore aborted tasks
353 for mm in task.failure_types:
354 if mm not in manifest_paths:
355 manifest_paths[mm] = []
356 if mm not in ff:
357 ff[mm] = deepcopy(manifest_)
358 ll = task.label
359 if ll not in ff[mm][LL]:
360 ff[mm][LL][ll] = deepcopy(label_)
361 for path_type in task.failure_types[mm]:
362 path, _type = path_type
363 if path == mm:
364 path = DEF # refers to the manifest itself
365 if path not in manifest_paths[mm]:
366 manifest_paths[mm].append(path)
367 if path not in ff[mm][LL][ll][PP]:
368 ff[mm][LL][ll][PP][path] = deepcopy(path_)
369 if task.id not in ff[mm][LL][ll][PP][path][RUNS]:
370 ff[mm][LL][ll][PP][path][RUNS][task.id] = False
371 ff[mm][LL][ll][PP][path][TOTAL_RUNS] += 1
372 ff[mm][LL][ll][PP][path][FAILED_RUNS] += 1
373 except AttributeError as ae:
374 self.warning(f"unknown attribute in task (#1): {ae}")
376 for task in tasks: # add results
377 try:
378 if len(task.results) == 0:
379 continue # ignore aborted tasks
380 for result in task.results:
381 mm = result.group
382 if mm not in ff:
383 ff[mm] = deepcopy(manifest_)
384 ll = task.label
385 if ll not in ff[mm][LL]:
386 ff[mm][LL][ll] = deepcopy(label_)
387 if task.id not in ff[mm][LL][ll][DURATIONS]:
388 # duration may be None !!!
389 ff[mm][LL][ll][DURATIONS][task.id] = result.duration or 0
390 if ff[mm][LL][ll][OPT] is None:
391 ff[mm][LL][ll][OPT] = self.get_opt_for_task(task.id)
392 if mm not in manifest_paths:
393 continue
394 for path in manifest_paths[mm]: # all known paths
395 if path not in ff[mm][LL][ll][PP]:
396 ff[mm][LL][ll][PP][path] = deepcopy(path_)
397 if task.id not in ff[mm][LL][ll][PP][path][RUNS]:
398 ff[mm][LL][ll][PP][path][RUNS][task.id] = result.ok
399 ff[mm][LL][ll][PP][path][TOTAL_RUNS] += 1
400 if not result.ok:
401 ff[mm][LL][ll][PP][path][FAILED_RUNS] += 1
402 except AttributeError as ae:
403 self.warning(f"unknown attribute in task (#3): {ae}")
405 for mm in ff: # determine classifications
406 for label in ff[mm][LL]:
407 opt = ff[mm][LL][label][OPT]
408 durations = [] # summarize durations
409 for task_id in ff[mm][LL][label][DURATIONS]:
410 duration = ff[mm][LL][label][DURATIONS][task_id]
411 durations.append(duration)
412 if len(durations) > 0:
413 total_duration = sum(durations)
414 median_duration = median(durations)
415 ff[mm][LL][label][TOTAL_DURATION] = total_duration
416 ff[mm][LL][label][MEDIAN_DURATION] = median_duration
417 if (opt and median_duration > OPT_THRESHOLD) or (
418 (not opt) and median_duration > DEBUG_THRESHOLD
420 if DEF not in ff[mm][LL][label][PP]:
421 ff[mm][LL][label][PP][DEF] = deepcopy(path_)
422 if task_id not in ff[mm][LL][label][PP][DEF][RUNS]:
423 ff[mm][LL][label][PP][DEF][RUNS][task_id] = False
424 ff[mm][LL][label][PP][DEF][TOTAL_RUNS] += 1
425 ff[mm][LL][label][PP][DEF][FAILED_RUNS] += 1
426 ff[mm][LL][label][PP][DEF][CC] = Classification.DISABLE_TOO_LONG
427 primary = True # we have not seen the first failure
428 for path in sort_paths(ff[mm][LL][label][PP]):
429 classification = ff[mm][LL][label][PP][path][CC]
430 if classification == Classification.UNKNOWN:
431 failed_runs = ff[mm][LL][label][PP][path][FAILED_RUNS]
432 total_runs = ff[mm][LL][label][PP][path][TOTAL_RUNS]
433 if total_runs >= MINIMUM_RUNS:
434 if failed_runs / total_runs < FAILURE_RATIO:
435 if failed_runs == 0:
436 classification = Classification.SUCCESS
437 else:
438 classification = Classification.INTERMITTENT
439 elif primary:
440 if path == DEF:
441 classification = Classification.DISABLE_MANIFEST
442 else:
443 classification = Classification.DISABLE_RECOMMENDED
444 primary = False
445 else:
446 classification = Classification.SECONDARY
447 ff[mm][LL][label][PP][path][CC] = classification
448 ff[mm][LL][label][SUM_BY_LABEL][classification] += 1
449 return ff
451 def _get_os_version(self, os, platform):
452 """Return the os_version given the label platform string"""
453 i = platform.find(os)
454 j = i + len(os)
455 yy = platform[j : j + 2]
456 mm = platform[j + 2 : j + 4]
457 return yy + "." + mm
459 def get_bug_by_id(self, id):
460 """Get bug by bug id"""
462 self._initialize_bzapi()
463 bug = None
464 for b in self.bugs:
465 if b.id == id:
466 bug = b
467 break
468 if bug is None:
469 bug = self._bzapi.getbug(id)
470 return bug
472 def get_bugs_by_summary(self, summary):
473 """Get bug by bug summary"""
475 self._initialize_bzapi()
476 bugs = []
477 for b in self.bugs:
478 if b.summary == summary:
479 bugs.append(b)
480 if len(bugs) > 0:
481 return bugs
482 query = self._bzapi.build_query(short_desc=summary)
483 query["include_fields"] = [
484 "id",
485 "product",
486 "component",
487 "status",
488 "resolution",
489 "summary",
490 "blocks",
492 bugs = self._bzapi.query(query)
493 return bugs
495 def create_bug(
496 self,
497 summary="Bug short description",
498 description="Bug description",
499 product="Testing",
500 component="General",
501 version="unspecified",
502 bugtype="task",
504 """Create a bug"""
506 self._initialize_bzapi()
507 if not self._bzapi.logged_in:
508 self.error(
509 "Must create a Bugzilla API key per https://github.com/mozilla/mozci-tools/blob/main/citools/test_triage_bug_filer.py"
511 raise PermissionError(f"Not authenticated for Bugzilla {self.bugzilla}")
512 createinfo = self._bzapi.build_createbug(
513 product=product,
514 component=component,
515 summary=summary,
516 version=version,
517 description=description,
519 createinfo["type"] = bugtype
520 bug = self._bzapi.createbug(createinfo)
521 return bug
523 def add_bug_comment(self, id, comment, meta_bug_id=None):
524 """Add a comment to an existing bug"""
526 self._initialize_bzapi()
527 if not self._bzapi.logged_in:
528 self.error(BUGZILLA_AUTHENTICATION_HELP)
529 raise PermissionError("Not authenticated for Bugzilla")
530 if meta_bug_id is not None:
531 blocks_add = [meta_bug_id]
532 else:
533 blocks_add = None
534 updateinfo = self._bzapi.build_update(comment=comment, blocks_add=blocks_add)
535 self._bzapi.update_bugs([id], updateinfo)
537 def skip_failure(
538 self,
539 manifest,
540 path,
541 label,
542 classification,
543 task_id,
544 try_url,
545 revision,
546 repo,
547 meta_bug_id=None,
549 """Skip a failure"""
551 self.vinfo(f"===== Skip failure in manifest: {manifest} =====")
552 if task_id is None:
553 skip_if = "true"
554 else:
555 skip_if = self.task_to_skip_if(task_id)
556 if skip_if is None:
557 self.warning(
558 f"Unable to calculate skip-if condition from manifest={manifest} from failure label={label}"
560 return
561 bug_reference = ""
562 if classification == Classification.DISABLE_MANIFEST:
563 filename = DEF
564 comment = "Disabled entire manifest due to crash result"
565 elif classification == Classification.DISABLE_TOO_LONG:
566 filename = DEF
567 comment = "Disabled entire manifest due to excessive run time"
568 else:
569 filename = self.get_filename_in_manifest(manifest, path)
570 comment = f'Disabled test due to failures: "{filename}"'
571 if classification == Classification.SECONDARY:
572 comment += " (secondary)"
573 bug_reference = " (secondary)"
574 comment += f"\nTry URL = {try_url}"
575 comment += f"\nrevision = {revision}"
576 comment += f"\nrepo = {repo}"
577 comment += f"\nlabel = {label}"
578 if task_id is not None:
579 comment += f"\ntask_id = {task_id}"
580 push_id = self.get_push_id(revision, repo)
581 if push_id is not None:
582 comment += f"\npush_id = {push_id}"
583 job_id = self.get_job_id(push_id, task_id)
584 if job_id is not None:
585 comment += f"\njob_id = {job_id}"
587 suggestions_url,
588 line_number,
589 line,
590 log_url,
591 ) = self.get_bug_suggestions(repo, job_id, path)
592 if log_url is not None:
593 comment += f"\n\nBug suggestions: {suggestions_url}"
594 comment += f"\nSpecifically see at line {line_number} in the attached log: {log_url}"
595 comment += f'\n\n "{line}"\n'
596 platform, testname = self.label_to_platform_testname(label)
597 if platform is not None:
598 comment += "\n\nCommand line to reproduce:\n\n"
599 comment += f" \"mach try fuzzy -q '{platform}' {testname}\""
600 bug_summary = f"MANIFEST {manifest}"
601 attachments = {}
602 bugs = self.get_bugs_by_summary(bug_summary)
603 if len(bugs) == 0:
604 description = (
605 f"This bug covers excluded failing tests in the MANIFEST {manifest}"
607 description += "\n(generated by `mach manifest skip-fails`)"
608 product, component = self.get_file_info(path)
609 if self.dry_run:
610 self.warning(
611 f'Dry-run NOT creating bug: {product}::{component} "{bug_summary}"'
613 bugid = "TBD"
614 else:
615 bug = self.create_bug(bug_summary, description, product, component)
616 bugid = bug.id
617 self.vinfo(
618 f'Created Bug {bugid} {product}::{component} : "{bug_summary}"'
620 bug_reference = f"Bug {bugid}" + bug_reference
621 elif len(bugs) == 1:
622 bugid = bugs[0].id
623 bug_reference = f"Bug {bugid}" + bug_reference
624 product = bugs[0].product
625 component = bugs[0].component
626 self.vinfo(f'Found Bug {bugid} {product}::{component} "{bug_summary}"')
627 if meta_bug_id is not None:
628 if meta_bug_id in bugs[0].blocks:
629 self.vinfo(f" Bug {bugid} already blocks meta bug {meta_bug_id}")
630 meta_bug_id = None # no need to add again
631 comments = bugs[0].getcomments()
632 for i in range(len(comments)):
633 text = comments[i]["text"]
634 m = self._attach_rx.findall(text)
635 if len(m) == 1:
636 a_task_id = m[0][1]
637 attachments[a_task_id] = m[0][0]
638 if a_task_id == task_id:
639 self.vinfo(
640 f" Bug {bugid} already has the compressed log attached for this task"
642 else:
643 self.error(f'More than one bug found for summary: "{bug_summary}"')
644 return
645 if self.dry_run:
646 self.warning(f"Dry-run NOT adding comment to Bug {bugid}: {comment}")
647 self.info(f'Dry-run NOT editing ["{filename}"] manifest: "{manifest}"')
648 self.info(f'would add skip-if condition: "{skip_if}" # {bug_reference}')
649 if task_id is not None and task_id not in attachments:
650 self.info("would add compressed log for this task")
651 return
652 self.add_bug_comment(bugid, comment, meta_bug_id)
653 self.info(f"Added comment to Bug {bugid}: {comment}")
654 if meta_bug_id is not None:
655 self.info(f" Bug {bugid} blocks meta Bug: {meta_bug_id}")
656 if task_id is not None and task_id not in attachments:
657 self.add_attachment_log_for_task(bugid, task_id)
658 self.info("Added compressed log for this task")
659 mp = ManifestParser(use_toml=True, document=True)
660 manifest_path = os.path.join(self.topsrcdir, os.path.normpath(manifest))
661 mp.read(manifest_path)
662 document = mp.source_documents[manifest_path]
663 add_skip_if(document, filename, skip_if, bug_reference)
664 manifest_str = alphabetize_toml_str(document)
665 fp = io.open(manifest_path, "w", encoding="utf-8", newline="\n")
666 fp.write(manifest_str)
667 fp.close()
668 self.info(f'Edited ["{filename}"] in manifest: "{manifest}"')
669 self.info(f'added skip-if condition: "{skip_if}" # {bug_reference}')
671 def get_variants(self):
672 """Get mozinfo for each test variants"""
674 if len(self.variants) == 0:
675 variants_file = "taskcluster/ci/test/variants.yml"
676 variants_path = os.path.join(
677 self.topsrcdir, os.path.normpath(variants_file)
679 fp = io.open(variants_path, "r", encoding="utf-8")
680 raw_variants = load(fp, Loader=Loader)
681 fp.close()
682 for k, v in raw_variants.items():
683 mozinfo = k
684 if "mozinfo" in v:
685 mozinfo = v["mozinfo"]
686 self.variants[k] = mozinfo
687 return self.variants
689 def get_task_details(self, task_id):
690 """Download details for task task_id"""
692 if task_id in self.tasks: # if cached
693 task = self.tasks[task_id]
694 else:
695 try:
696 task = get_task(task_id)
697 except TaskclusterRestFailure:
698 self.warning(f"Task {task_id} no longer exists.")
699 return None
700 self.tasks[task_id] = task
701 return task
703 def get_extra(self, task_id):
704 """Calculate extra for task task_id"""
706 if task_id in self.extras: # if cached
707 extra = self.extras[task_id]
708 else:
709 self.get_variants()
710 task = self.get_task_details(task_id) or {}
711 os = None
712 os_version = None
713 arch = None
714 bits = None
715 display = None
716 runtimes = []
717 build_types = []
718 test_setting = task.get("extra", {}).get("test-setting", {})
719 platform = test_setting.get("platform", {})
720 platform_os = platform.get("os", {})
721 opt = False
722 debug = False
723 if "name" in platform_os:
724 os = platform_os["name"]
725 if os == "windows":
726 os = "win"
727 if os == "macosx":
728 os = "mac"
729 if "version" in platform_os:
730 os_version = platform_os["version"]
731 if len(os_version) == 4:
732 os_version = os_version[0:2] + "." + os_version[2:4]
733 if "arch" in platform:
734 arch = platform["arch"]
735 if arch == "x86" or arch.find("32") >= 0:
736 bits = "32"
737 if arch == "64" or arch.find("64") >= 0:
738 bits = "64"
739 if "display" in platform:
740 display = platform["display"]
741 if "runtime" in test_setting:
742 for k in test_setting["runtime"]:
743 if k in self.variants:
744 runtimes.append(self.variants[k]) # adds mozinfo
745 if "build" in test_setting:
746 tbuild = test_setting["build"]
747 for k in tbuild:
748 if k == "type":
749 if tbuild[k] == "opt":
750 opt = True
751 elif tbuild[k] == "debug":
752 debug = True
753 build_types.append(tbuild[k])
754 else:
755 build_types.append(k)
756 unknown = None
757 extra = {
758 "os": os or unknown,
759 "os_version": os_version or unknown,
760 "arch": arch or unknown,
761 "bits": bits or unknown,
762 "display": display or unknown,
763 "runtimes": runtimes,
764 "opt": opt,
765 "debug": debug,
766 "build_types": build_types,
768 self.extras[task_id] = extra
769 return extra
771 def get_opt_for_task(self, task_id):
772 extra = self.get_extra(task_id)
773 return extra["opt"]
775 def task_to_skip_if(self, task_id):
776 """Calculate the skip-if condition for failing task task_id"""
778 extra = self.get_extra(task_id)
779 skip_if = None
780 if extra["os"] is not None:
781 skip_if = "os == '" + extra["os"] + "'"
782 if extra["os_version"] is not None:
783 skip_if += " && "
784 skip_if += "os_version == '" + extra["os_version"] + "'"
785 if extra["bits"] is not None:
786 skip_if += " && "
787 skip_if += "bits == '" + extra["bits"] + "'"
788 if extra["display"] is not None:
789 skip_if += " && "
790 skip_if += "display == '" + extra["display"] + "'"
791 for runtime in extra["runtimes"]:
792 skip_if += " && "
793 skip_if += runtime
794 for build_type in extra["build_types"]:
795 skip_if += " && "
796 skip_if += build_type
797 return skip_if
799 def get_file_info(self, path, product="Testing", component="General"):
801 Get bugzilla product and component for the path.
802 Provide defaults (in case command_context is not defined
803 or there isn't file info available).
805 if path != DEF and self.command_context is not None:
806 reader = self.command_context.mozbuild_reader(config_mode="empty")
807 info = reader.files_info([path])
808 cp = info[path]["BUG_COMPONENT"]
809 product = cp.product
810 component = cp.component
811 return product, component
813 def get_filename_in_manifest(self, manifest, path):
814 """return relative filename for path in manifest"""
816 filename = os.path.basename(path)
817 if filename == DEF:
818 return filename
819 manifest_dir = os.path.dirname(manifest)
820 i = 0
821 j = min(len(manifest_dir), len(path))
822 while i < j and manifest_dir[i] == path[i]:
823 i += 1
824 if i < len(manifest_dir):
825 for _ in range(manifest_dir.count("/", i) + 1):
826 filename = "../" + filename
827 elif i < len(path):
828 filename = path[i + 1 :]
829 return filename
831 def get_push_id(self, revision, repo):
832 """Return the push_id for revision and repo (or None)"""
834 self.vinfo(f"Retrieving push_id for {repo} revision: {revision} ...")
835 if revision in self.push_ids: # if cached
836 push_id = self.push_ids[revision]
837 else:
838 push_id = None
839 push_url = f"https://treeherder.mozilla.org/api/project/{repo}/push/"
840 params = {}
841 params["full"] = "true"
842 params["count"] = 10
843 params["revision"] = revision
844 r = requests.get(push_url, headers=self.headers, params=params)
845 if r.status_code != 200:
846 self.warning(f"FAILED to query Treeherder = {r} for {r.url}")
847 else:
848 response = r.json()
849 if "results" in response:
850 results = response["results"]
851 if len(results) > 0:
852 r0 = results[0]
853 if "id" in r0:
854 push_id = r0["id"]
855 self.push_ids[revision] = push_id
856 return push_id
858 def get_job_id(self, push_id, task_id):
859 """Return the job_id for push_id, task_id (or None)"""
861 self.vinfo(f"Retrieving job_id for push_id: {push_id}, task_id: {task_id} ...")
862 if push_id in self.job_ids: # if cached
863 job_id = self.job_ids[push_id]
864 else:
865 job_id = None
866 params = {}
867 params["push_id"] = push_id
868 r = requests.get(self.jobs_url, headers=self.headers, params=params)
869 if r.status_code != 200:
870 self.warning(f"FAILED to query Treeherder = {r} for {r.url}")
871 else:
872 response = r.json()
873 if "results" in response:
874 results = response["results"]
875 if len(results) > 0:
876 for result in results:
877 if len(result) > 14:
878 if result[14] == task_id:
879 job_id = result[1]
880 break
881 self.job_ids[push_id] = job_id
882 return job_id
884 def get_bug_suggestions(self, repo, job_id, path):
886 Return the (suggestions_url, line_number, line, log_url)
887 for the given repo and job_id
889 self.vinfo(
890 f"Retrieving bug_suggestions for {repo} job_id: {job_id}, path: {path} ..."
892 suggestions_url = f"https://treeherder.mozilla.org/api/project/{repo}/jobs/{job_id}/bug_suggestions/"
893 line_number = None
894 line = None
895 log_url = None
896 r = requests.get(suggestions_url, headers=self.headers)
897 if r.status_code != 200:
898 self.warning(f"FAILED to query Treeherder = {r} for {r.url}")
899 else:
900 response = r.json()
901 if len(response) > 0:
902 for sugg in response:
903 if sugg["path_end"] == path:
904 line_number = sugg["line_number"] + 1
905 line = sugg["search"]
906 log_url = f"https://treeherder.mozilla.org/logviewer?repo={repo}&job_id={job_id}&lineNumber={line_number}"
907 break
908 rv = (suggestions_url, line_number, line, log_url)
909 return rv
911 def read_json(self, filename):
912 """read data as JSON from filename"""
913 fp = io.open(filename, "r", encoding="utf-8")
914 data = json.load(fp)
915 fp.close()
916 return data
918 def write_json(self, filename, data):
919 """saves data as JSON to filename"""
920 fp = io.open(filename, "w", encoding="utf-8")
921 json.dump(data, fp, indent=2, sort_keys=True)
922 fp.close()
924 def write_tasks(self, save_tasks, tasks):
925 """saves tasks as JSON to save_tasks"""
926 jtasks = []
927 for task in tasks:
928 if not isinstance(task, TestTask):
929 continue
930 jtask = {}
931 jtask["id"] = task.id
932 jtask["label"] = task.label
933 jtask["duration"] = task.duration
934 jtask["result"] = task.result
935 jtask["state"] = task.state
936 jtask["extra"] = self.get_extra(task.id)
937 jtags = {}
938 for k, v in task.tags.items():
939 if k == "createdForUser":
940 jtags[k] = "ci@mozilla.com"
941 else:
942 jtags[k] = v
943 jtask["tags"] = jtags
944 jtask["tier"] = task.tier
945 jtask["results"] = [
946 {"group": r.group, "ok": r.ok, "duration": r.duration}
947 for r in task.results
949 jtask["errors"] = None # Bug with task.errors property??
950 jft = {}
951 for k in task.failure_types:
952 jft[k] = [[f[0], f[1].value] for f in task.failure_types[k]]
953 jtask["failure_types"] = jft
954 jtasks.append(jtask)
955 self.write_json(save_tasks, jtasks)
957 def label_to_platform_testname(self, label):
958 """convert from label to platform, testname for mach command line"""
959 platform = None
960 testname = None
961 platform_details = label.split("/")
962 if len(platform_details) == 2:
963 platform, details = platform_details
964 words = details.split("-")
965 if len(words) > 2:
966 platform += "/" + words.pop(0) # opt or debug
967 try:
968 _chunk = int(words[-1])
969 words.pop()
970 except ValueError:
971 pass
972 words.pop() # remove test suffix
973 testname = "-".join(words)
974 else:
975 platform = None
976 return platform, testname
978 def add_attachment_log_for_task(self, bugid, task_id):
979 """Adds compressed log for this task to bugid"""
981 log_url = f"https://firefox-ci-tc.services.mozilla.com/api/queue/v1/task/{task_id}/artifacts/public/logs/live_backing.log"
982 r = requests.get(log_url, headers=self.headers)
983 if r.status_code != 200:
984 self.error(f"Unable get log for task: {task_id}")
985 return
986 attach_fp = tempfile.NamedTemporaryFile()
987 fp = gzip.open(attach_fp, "wb")
988 fp.write(r.text.encode("utf-8"))
989 fp.close()
990 self._initialize_bzapi()
991 description = ATTACHMENT_DESCRIPTION + task_id
992 file_name = TASK_LOG + ".gz"
993 comment = "Added compressed log"
994 content_type = "application/gzip"
995 try:
996 self._bzapi.attachfile(
997 [bugid],
998 attach_fp.name,
999 description,
1000 file_name=file_name,
1001 comment=comment,
1002 content_type=content_type,
1003 is_private=False,
1005 except Fault:
1006 pass # Fault expected: Failed to fetch key 9372091 from network storage: The specified key does not exist.