no bug - Bumping Firefox l10n changesets r=release a=l10n-bump DONTBUILD CLOSED TREE
[gecko.git] / testing / testinfo.py
blob80846d10d841ed98a78124137daa8aad3840db66
1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
5 import datetime
6 import errno
7 import json
8 import os
9 import posixpath
10 import re
11 import subprocess
12 from collections import defaultdict
14 import mozpack.path as mozpath
15 import requests
16 import six.moves.urllib_parse as urlparse
17 from mozbuild.base import MachCommandConditions as conditions
18 from mozbuild.base import MozbuildObject
19 from mozfile import which
20 from moztest.resolve import TestManifestLoader, TestResolver
22 REFERER = "https://wiki.developer.mozilla.org/en-US/docs/Mozilla/Test-Info"
25 class TestInfo(object):
26 """
27 Support 'mach test-info'.
28 """
30 def __init__(self, verbose):
31 self.verbose = verbose
32 here = os.path.abspath(os.path.dirname(__file__))
33 self.build_obj = MozbuildObject.from_environment(cwd=here)
35 def log_verbose(self, what):
36 if self.verbose:
37 print(what)
40 class TestInfoTests(TestInfo):
41 """
42 Support 'mach test-info tests': Detailed report of specified tests.
43 """
45 def __init__(self, verbose):
46 TestInfo.__init__(self, verbose)
48 self._hg = None
49 if conditions.is_hg(self.build_obj):
50 self._hg = which("hg")
51 if not self._hg:
52 raise OSError(errno.ENOENT, "Could not find 'hg' on PATH.")
54 self._git = None
55 if conditions.is_git(self.build_obj):
56 self._git = which("git")
57 if not self._git:
58 raise OSError(errno.ENOENT, "Could not find 'git' on PATH.")
60 def find_in_hg_or_git(self, test_name):
61 if self._hg:
62 cmd = [self._hg, "files", "-I", test_name]
63 elif self._git:
64 cmd = [self._git, "ls-files", test_name]
65 else:
66 return None
67 try:
68 out = subprocess.check_output(cmd, universal_newlines=True).splitlines()
69 except subprocess.CalledProcessError:
70 out = None
71 return out
73 def set_test_name(self):
74 # Generating a unified report for a specific test is complicated
75 # by differences in the test name used in various data sources.
76 # Consider:
77 # - It is often convenient to request a report based only on
78 # a short file name, rather than the full path;
79 # - Bugs may be filed in bugzilla against a simple, short test
80 # name or the full path to the test;
81 # This function attempts to find appropriate names for different
82 # queries based on the specified test name.
84 # full_test_name is full path to file in hg (or git)
85 self.full_test_name = None
86 out = self.find_in_hg_or_git(self.test_name)
87 if out and len(out) == 1:
88 self.full_test_name = out[0]
89 elif out and len(out) > 1:
90 print("Ambiguous test name specified. Found:")
91 for line in out:
92 print(line)
93 else:
94 out = self.find_in_hg_or_git("**/%s*" % self.test_name)
95 if out and len(out) == 1:
96 self.full_test_name = out[0]
97 elif out and len(out) > 1:
98 print("Ambiguous test name. Found:")
99 for line in out:
100 print(line)
101 if self.full_test_name:
102 self.full_test_name.replace(os.sep, posixpath.sep)
103 print("Found %s in source control." % self.full_test_name)
104 else:
105 print("Unable to validate test name '%s'!" % self.test_name)
106 self.full_test_name = self.test_name
108 # search for full_test_name in test manifests
109 here = os.path.abspath(os.path.dirname(__file__))
110 resolver = TestResolver.from_environment(
111 cwd=here, loader_cls=TestManifestLoader
113 relpath = self.build_obj._wrap_path_argument(self.full_test_name).relpath()
114 tests = list(resolver.resolve_tests(paths=[relpath]))
115 if len(tests) == 1:
116 relpath = self.build_obj._wrap_path_argument(tests[0]["manifest"]).relpath()
117 print("%s found in manifest %s" % (self.full_test_name, relpath))
118 if tests[0].get("flavor"):
119 print(" flavor: %s" % tests[0]["flavor"])
120 if tests[0].get("skip-if"):
121 print(" skip-if: %s" % tests[0]["skip-if"])
122 if tests[0].get("fail-if"):
123 print(" fail-if: %s" % tests[0]["fail-if"])
124 elif len(tests) == 0:
125 print("%s not found in any test manifest!" % self.full_test_name)
126 else:
127 print("%s found in more than one manifest!" % self.full_test_name)
129 # short_name is full_test_name without path
130 self.short_name = None
131 name_idx = self.full_test_name.rfind("/")
132 if name_idx > 0:
133 self.short_name = self.full_test_name[name_idx + 1 :]
134 if self.short_name and self.short_name == self.test_name:
135 self.short_name = None
137 def get_platform(self, record):
138 if "platform" in record["build"]:
139 platform = record["build"]["platform"]
140 else:
141 platform = "-"
142 platform_words = platform.split("-")
143 types_label = ""
144 # combine run and build types and eliminate duplicates
145 run_types = []
146 if "run" in record and "type" in record["run"]:
147 run_types = record["run"]["type"]
148 run_types = run_types if isinstance(run_types, list) else [run_types]
149 build_types = []
150 if "build" in record and "type" in record["build"]:
151 build_types = record["build"]["type"]
152 build_types = (
153 build_types if isinstance(build_types, list) else [build_types]
155 run_types = list(set(run_types + build_types))
156 # '1proc' is used as a treeherder label but does not appear in run types
157 if "e10s" not in run_types:
158 run_types = run_types + ["1proc"]
159 for run_type in run_types:
160 # chunked is not interesting
161 if run_type == "chunked":
162 continue
163 # e10s is the default: implied
164 if run_type == "e10s":
165 continue
166 # sometimes a build/run type is already present in the build platform
167 if run_type in platform_words:
168 continue
169 if types_label:
170 types_label += "-"
171 types_label += run_type
172 return "%s/%s:" % (platform, types_label)
174 def report_bugs(self):
175 # Report open bugs matching test name
176 search = self.full_test_name
177 if self.test_name:
178 search = "%s,%s" % (search, self.test_name)
179 if self.short_name:
180 search = "%s,%s" % (search, self.short_name)
181 payload = {"quicksearch": search, "include_fields": "id,summary"}
182 response = requests.get("https://bugzilla.mozilla.org/rest/bug", payload)
183 response.raise_for_status()
184 json_response = response.json()
185 print("\nBugzilla quick search for '%s':" % search)
186 if "bugs" in json_response:
187 for bug in json_response["bugs"]:
188 print("Bug %s: %s" % (bug["id"], bug["summary"]))
189 else:
190 print("No bugs found.")
192 def report(
193 self,
194 test_names,
195 start,
196 end,
197 show_info,
198 show_bugs,
200 self.start = start
201 self.end = end
202 self.show_info = show_info
204 if not self.show_info and not show_bugs:
205 # by default, show everything
206 self.show_info = True
207 show_bugs = True
209 for test_name in test_names:
210 print("===== %s =====" % test_name)
211 self.test_name = test_name
212 if len(self.test_name) < 6:
213 print("'%s' is too short for a test name!" % self.test_name)
214 continue
215 self.set_test_name()
216 if show_bugs:
217 self.report_bugs()
220 class TestInfoReport(TestInfo):
222 Support 'mach test-info report': Report of test runs summarized by
223 manifest and component.
226 def __init__(self, verbose):
227 TestInfo.__init__(self, verbose)
228 self.threads = []
230 def update_report(self, by_component, result, path_mod):
231 def update_item(item, label, value):
232 # It is important to include any existing item value in case ActiveData
233 # returns multiple records for the same test; that can happen if the report
234 # sometimes maps more than one ActiveData record to the same path.
235 new_value = item.get(label, 0) + value
236 if type(new_value) == int:
237 item[label] = new_value
238 else:
239 item[label] = float(round(new_value, 2)) # pylint: disable=W1633
241 if "test" in result and "tests" in by_component:
242 test = result["test"]
243 if path_mod:
244 test = path_mod(test)
245 for bc in by_component["tests"]:
246 for item in by_component["tests"][bc]:
247 if test == item["test"]:
248 # pylint: disable=W1633
249 seconds = float(round(result.get("duration", 0), 2))
250 update_item(item, "total run time, seconds", seconds)
251 update_item(item, "total runs", result.get("count", 0))
252 update_item(item, "skipped runs", result.get("skips", 0))
253 update_item(item, "failed runs", result.get("failures", 0))
254 return True
255 return False
257 def path_mod_reftest(self, path):
258 # "<path1> == <path2>" -> "<path1>"
259 path = path.split(" ")[0]
260 # "<path>?<params>" -> "<path>"
261 path = path.split("?")[0]
262 # "<path>#<fragment>" -> "<path>"
263 path = path.split("#")[0]
264 return path
266 def path_mod_jsreftest(self, path):
267 # "<path>;assert" -> "<path>"
268 path = path.split(";")[0]
269 return path
271 def path_mod_marionette(self, path):
272 # "<path> <test-name>" -> "<path>"
273 path = path.split(" ")[0]
274 # "part1\part2" -> "part1/part2"
275 path = path.replace("\\", os.path.sep)
276 return path
278 def path_mod_wpt(self, path):
279 if path[0] == os.path.sep:
280 # "/<path>" -> "<path>"
281 path = path[1:]
282 # "<path>" -> "testing/web-platform/tests/<path>"
283 path = os.path.join("testing", "web-platform", "tests", path)
284 # "<path>?<params>" -> "<path>"
285 path = path.split("?")[0]
286 return path
288 def path_mod_jittest(self, path):
289 # "part1\part2" -> "part1/part2"
290 path = path.replace("\\", os.path.sep)
291 # "<path>" -> "js/src/jit-test/tests/<path>"
292 return os.path.join("js", "src", "jit-test", "tests", path)
294 def path_mod_xpcshell(self, path):
295 # <manifest>.ini:<path> -> "<path>"
296 path = path.split(".ini:")[-1]
297 return path
299 def description(
300 self,
301 components,
302 flavor,
303 subsuite,
304 paths,
305 show_manifests,
306 show_tests,
307 show_summary,
308 show_annotations,
309 filter_values,
310 filter_keys,
311 start_date,
312 end_date,
314 # provide a natural language description of the report options
315 what = []
316 if show_manifests:
317 what.append("test manifests")
318 if show_tests:
319 what.append("tests")
320 if show_annotations:
321 what.append("test manifest annotations")
322 if show_summary and len(what) == 0:
323 what.append("summary of tests only")
324 if len(what) > 1:
325 what[-1] = "and " + what[-1]
326 what = ", ".join(what)
327 d = "Test summary report for " + what
328 if components:
329 d += ", in specified components (%s)" % components
330 else:
331 d += ", in all components"
332 if flavor:
333 d += ", in specified flavor (%s)" % flavor
334 if subsuite:
335 d += ", in specified subsuite (%s)" % subsuite
336 if paths:
337 d += ", on specified paths (%s)" % paths
338 if filter_values:
339 d += ", containing '%s'" % filter_values
340 if filter_keys:
341 d += " in manifest keys '%s'" % filter_keys
342 else:
343 d += " in any part of manifest entry"
344 d += ", including historical run-time data for the last "
346 start = datetime.datetime.strptime(start_date, "%Y-%m-%d")
347 end = datetime.datetime.strptime(end_date, "%Y-%m-%d")
348 d += "%s days on trunk (autoland/m-c)" % ((end - start).days)
349 d += " as of %s." % end_date
350 return d
352 # TODO: this is hacked for now and very limited
353 def parse_test(self, summary):
354 if summary.endswith("single tracking bug"):
355 name_part = summary.split("|")[0] # remove 'single tracking bug'
356 name_part.strip()
357 return name_part.split()[-1] # get just the test name, not extra words
358 return None
360 def get_runcount_data(self, start, end):
361 # TODO: use start/end properly
362 runcounts = self.get_runcounts()
363 runcounts = self.squash_runcounts(runcounts, days=30)
364 return runcounts
366 def get_testinfoall_index_url(self):
367 import taskcluster
369 queue = taskcluster.Queue()
370 index = taskcluster.Index(
372 "rootUrl": "https://firefox-ci-tc.services.mozilla.com",
375 route = "gecko.v2.mozilla-central.latest.source.test-info-all"
377 task_id = index.findTask(route)["taskId"]
378 artifacts = queue.listLatestArtifacts(task_id)["artifacts"]
380 url = ""
381 for artifact in artifacts:
382 if artifact["name"].endswith("test-run-info.json"):
383 url = queue.buildUrl("getLatestArtifact", task_id, artifact["name"])
384 break
385 return url
387 def get_runcounts(self):
388 testrundata = {}
389 # get historical data from test-info job artifact; if missing get fresh
390 try:
391 url = self.get_testinfoall_index_url()
392 r = requests.get(url, headers={"User-agent": "mach-test-info/1.0"})
393 r.raise_for_status()
394 testrundata = r.json()
395 except Exception:
396 pass
398 # fill in any holes we have
399 endday = datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(
400 days=1
402 startday = endday - datetime.timedelta(days=30)
403 while startday < endday:
404 nextday = startday + datetime.timedelta(days=1)
405 retries = 2
406 done = False
407 if (
408 str(nextday) not in testrundata.keys()
409 or testrundata[str(nextday)] == {}
411 while not done:
412 url = "https://treeherder.mozilla.org/api/groupsummary/"
413 url += "?startdate=%s&enddate=%s" % (
414 startday.date(),
415 nextday.date(),
417 try:
418 r = requests.get(
419 url, headers={"User-agent": "mach-test-info/1.0"}
421 done = True
422 except requests.exceptions.HTTPError:
423 retries -= 1
424 if retries <= 0:
425 r.raise_for_status()
426 try:
427 testrundata[str(nextday.date())] = r.json()
428 except json.decoder.JSONDecodeError:
429 print(
430 "Warning unable to retrieve (from treeherder's groupsummary api) testrun data for date: %s, skipping for now"
431 % nextday.date()
433 testrundata[str(nextday.date())] = {}
434 continue
435 startday = nextday
437 return testrundata
439 def squash_runcounts(self, runcounts, days=30):
440 # squash all testrundata together into 1 big happy family for the last X days
441 endday = datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(
442 days=1
444 oldest = endday - datetime.timedelta(days=days)
446 testgroup_runinfo = defaultdict(lambda: defaultdict(int))
448 retVal = {}
449 for datekey in runcounts.keys():
450 # strip out older days
451 if datetime.date.fromisoformat(datekey) < oldest.date():
452 continue
454 jtn = runcounts[datekey]["job_type_names"]
455 for m in runcounts[datekey]["manifests"]:
456 man_name = list(m.keys())[0]
458 for job_type_id, result, classification, count in m[man_name]:
459 # format: job_type_name, result, classification, count
460 # find matching jtn, result, classification and increment 'count'
461 job_name = jtn[job_type_id]
462 key = (job_name, result, classification)
463 testgroup_runinfo[man_name][key] += count
465 for m in testgroup_runinfo:
466 retVal[m] = [
467 list(x) + [testgroup_runinfo[m][x]] for x in testgroup_runinfo[m]
469 return retVal
471 def get_intermittent_failure_data(self, start, end):
472 retVal = {}
474 # get IFV bug list
475 # i.e. https://th.m.o/api/failures/?startday=2022-06-22&endday=2022-06-29&tree=all
476 url = (
477 "https://treeherder.mozilla.org/api/failures/?startday=%s&endday=%s&tree=trunk"
478 % (start, end)
480 r = requests.get(url, headers={"User-agent": "mach-test-info/1.0"})
481 if_data = r.json()
482 buglist = [x["bug_id"] for x in if_data]
484 # get bug data for summary, 800 bugs at a time
485 # i.e. https://b.m.o/rest/bug?include_fields=id,product,component,summary&id=1,2,3...
486 max_bugs = 800
487 bug_data = []
488 fields = ["id", "product", "component", "summary"]
489 for bug_index in range(0, len(buglist), max_bugs):
490 bugs = [str(x) for x in buglist[bug_index:max_bugs]]
491 url = "https://bugzilla.mozilla.org/rest/bug?include_fields=%s&id=%s" % (
492 ",".join(fields),
493 ",".join(bugs),
495 r = requests.get(url, headers={"User-agent": "mach-test-info/1.0"})
496 data = r.json()
497 if data and "bugs" in data.keys():
498 bug_data.extend(data["bugs"])
500 # for each summary, parse filename, store component
501 # IF we find >1 bug with same testname, for now summarize as one
502 for bug in bug_data:
503 test_name = self.parse_test(bug["summary"])
504 if not test_name:
505 continue
507 c = int([x["bug_count"] for x in if_data if x["bug_id"] == bug["id"]][0])
508 if test_name not in retVal.keys():
509 retVal[test_name] = {
510 "id": bug["id"],
511 "count": 0,
512 "product": bug["product"],
513 "component": bug["component"],
515 retVal[test_name]["count"] += c
517 if bug["product"] != retVal[test_name]["product"]:
518 print(
519 "ERROR | %s | mismatched bugzilla product, bugzilla (%s) != repo (%s)"
520 % (bug["id"], bug["product"], retVal[test_name]["product"])
522 if bug["component"] != retVal[test_name]["component"]:
523 print(
524 "ERROR | %s | mismatched bugzilla component, bugzilla (%s) != repo (%s)"
525 % (bug["id"], bug["component"], retVal[test_name]["component"])
527 return retVal
529 def report(
530 self,
531 components,
532 flavor,
533 subsuite,
534 paths,
535 show_manifests,
536 show_tests,
537 show_summary,
538 show_annotations,
539 filter_values,
540 filter_keys,
541 show_components,
542 output_file,
543 start,
544 end,
545 show_testruns,
547 def matches_filters(test):
549 Return True if all of the requested filter_values are found in this test;
550 if filter_keys are specified, restrict search to those test keys.
552 for value in filter_values:
553 value_found = False
554 for key in test:
555 if not filter_keys or key in filter_keys:
556 if re.search(value, test[key]):
557 value_found = True
558 break
559 if not value_found:
560 return False
561 return True
563 start_time = datetime.datetime.now()
565 # Ensure useful report by default
566 if (
567 not show_manifests
568 and not show_tests
569 and not show_summary
570 and not show_annotations
572 show_manifests = True
573 show_summary = True
575 by_component = {}
576 if components:
577 components = components.split(",")
578 if filter_keys:
579 filter_keys = filter_keys.split(",")
580 if filter_values:
581 filter_values = filter_values.split(",")
582 else:
583 filter_values = []
584 display_keys = (filter_keys or []) + ["skip-if", "fail-if", "fails-if"]
585 display_keys = set(display_keys)
586 ifd = self.get_intermittent_failure_data(start, end)
588 if show_testruns:
589 runcount = self.get_runcount_data(start, end)
591 print("Finding tests...")
592 here = os.path.abspath(os.path.dirname(__file__))
593 resolver = TestResolver.from_environment(
594 cwd=here, loader_cls=TestManifestLoader
596 tests = list(
597 resolver.resolve_tests(paths=paths, flavor=flavor, subsuite=subsuite)
600 manifest_paths = set()
601 for t in tests:
602 if t.get("manifest", None):
603 manifest_path = t["manifest"]
604 if t.get("ancestor_manifest", None):
605 manifest_path = "%s:%s" % (t["ancestor_manifest"], t["manifest"])
606 manifest_paths.add(manifest_path)
607 manifest_count = len(manifest_paths)
608 print(
609 "Resolver found {} tests, {} manifests".format(len(tests), manifest_count)
612 if show_manifests:
613 topsrcdir = self.build_obj.topsrcdir
614 by_component["manifests"] = {}
615 manifest_paths = list(manifest_paths)
616 manifest_paths.sort()
617 relpaths = []
618 for manifest_path in manifest_paths:
619 relpath = mozpath.relpath(manifest_path, topsrcdir)
620 if mozpath.commonprefix((manifest_path, topsrcdir)) != topsrcdir:
621 continue
622 relpaths.append(relpath)
623 reader = self.build_obj.mozbuild_reader(config_mode="empty")
624 files_info = reader.files_info(relpaths)
625 for manifest_path in manifest_paths:
626 relpath = mozpath.relpath(manifest_path, topsrcdir)
627 if mozpath.commonprefix((manifest_path, topsrcdir)) != topsrcdir:
628 continue
629 manifest_info = None
630 if relpath in files_info:
631 bug_component = files_info[relpath].get("BUG_COMPONENT")
632 if bug_component:
633 key = "{}::{}".format(
634 bug_component.product, bug_component.component
636 else:
637 key = "<unknown bug component>"
638 if (not components) or (key in components):
639 manifest_info = {"manifest": relpath, "tests": 0, "skipped": 0}
640 rkey = key if show_components else "all"
641 if rkey in by_component["manifests"]:
642 by_component["manifests"][rkey].append(manifest_info)
643 else:
644 by_component["manifests"][rkey] = [manifest_info]
645 if manifest_info:
646 for t in tests:
647 if t["manifest"] == manifest_path:
648 manifest_info["tests"] += 1
649 if t.get("skip-if"):
650 manifest_info["skipped"] += 1
651 for key in by_component["manifests"]:
652 by_component["manifests"][key].sort(key=lambda k: k["manifest"])
654 if show_tests:
655 by_component["tests"] = {}
657 if show_tests or show_summary or show_annotations:
658 test_count = 0
659 failed_count = 0
660 skipped_count = 0
661 annotation_count = 0
662 condition_count = 0
663 component_set = set()
664 relpaths = []
665 conditions = {}
666 known_unconditional_annotations = ["skip", "fail", "asserts", "random"]
667 known_conditional_annotations = [
668 "skip-if",
669 "fail-if",
670 "run-if",
671 "fails-if",
672 "fuzzy-if",
673 "random-if",
674 "asserts-if",
676 for t in tests:
677 relpath = t.get("srcdir_relpath")
678 relpaths.append(relpath)
679 reader = self.build_obj.mozbuild_reader(config_mode="empty")
680 files_info = reader.files_info(relpaths)
681 for t in tests:
682 if not matches_filters(t):
683 continue
684 if "referenced-test" in t:
685 # Avoid double-counting reftests: disregard reference file entries
686 continue
687 if show_annotations:
688 for key in t:
689 if key in known_unconditional_annotations:
690 annotation_count += 1
691 if key in known_conditional_annotations:
692 annotation_count += 1
693 # Here 'key' is a manifest annotation type like 'skip-if' and t[key]
694 # is the associated condition. For example, the manifestparser
695 # manifest annotation, "skip-if = os == 'win'", is expected to be
696 # encoded as t['skip-if'] = "os == 'win'".
697 # To allow for reftest manifests, t[key] may have multiple entries
698 # separated by ';', each corresponding to a condition for that test
699 # and annotation type. For example,
700 # "skip-if(Android&&webrender) skip-if(OSX)", would be
701 # encoded as t['skip-if'] = "Android&&webrender;OSX".
702 annotation_conditions = t[key].split(";")
704 # if key has \n in it, we need to strip it. for manifestparser format
705 # 1) from the beginning of the line
706 # 2) different conditions if in the middle of the line
707 annotation_conditions = [
708 x.strip("\n") for x in annotation_conditions
710 temp = []
711 for condition in annotation_conditions:
712 temp.extend(condition.split("\n"))
713 annotation_conditions = temp
715 for condition in annotation_conditions:
716 condition_count += 1
717 # Trim reftest fuzzy-if ranges: everything after the first comma
718 # eg. "Android,0-2,1-3" -> "Android"
719 condition = condition.split(",")[0]
720 if condition not in conditions:
721 conditions[condition] = 0
722 conditions[condition] += 1
723 test_count += 1
724 relpath = t.get("srcdir_relpath")
725 if relpath in files_info:
726 bug_component = files_info[relpath].get("BUG_COMPONENT")
727 if bug_component:
728 key = "{}::{}".format(
729 bug_component.product, bug_component.component
731 else:
732 key = "<unknown bug component>"
733 if (not components) or (key in components):
734 component_set.add(key)
735 test_info = {"test": relpath}
736 for test_key in display_keys:
737 value = t.get(test_key)
738 if value:
739 test_info[test_key] = value
740 if t.get("fail-if"):
741 failed_count += 1
742 if t.get("fails-if"):
743 failed_count += 1
744 if t.get("skip-if"):
745 skipped_count += 1
747 if "manifest_relpath" in t and "manifest" in t:
748 if "web-platform" in t["manifest_relpath"]:
749 test_info["manifest"] = [t["manifest"]]
750 else:
751 test_info["manifest"] = [t["manifest_relpath"]]
753 # handle included manifests as ancestor:child
754 if t.get("ancestor_manifest", None):
755 test_info["manifest"] = [
756 "%s:%s"
757 % (t["ancestor_manifest"], test_info["manifest"][0])
760 # add in intermittent failure data
761 if ifd.get(relpath):
762 if_data = ifd.get(relpath)
763 test_info["failure_count"] = if_data["count"]
764 if show_testruns:
765 total_runs = 0
766 for m in test_info["manifest"]:
767 total_runs += sum([x[3] for x in runcount[m]])
768 if total_runs > 0:
769 test_info["total_runs"] = total_runs
771 if show_tests:
772 rkey = key if show_components else "all"
773 if rkey in by_component["tests"]:
774 # Avoid duplicates: Some test paths have multiple TestResolver
775 # entries, as when a test is included by multiple manifests.
776 found = False
777 for ctest in by_component["tests"][rkey]:
778 if ctest["test"] == test_info["test"]:
779 found = True
780 break
781 if not found:
782 by_component["tests"][rkey].append(test_info)
783 else:
784 for ti in by_component["tests"][rkey]:
785 if ti["test"] == test_info["test"]:
786 if (
787 test_info["manifest"][0]
788 not in ti["manifest"]
790 ti_manifest = test_info["manifest"]
791 if test_info.get(
792 "ancestor_manifest", None
794 ti_manifest = "%s:%s" % (
795 test_info["ancestor_manifest"],
796 ti_manifest,
798 ti["manifest"].extend(ti_manifest)
799 else:
800 by_component["tests"][rkey] = [test_info]
801 if show_tests:
802 for key in by_component["tests"]:
803 by_component["tests"][key].sort(key=lambda k: k["test"])
805 by_component["description"] = self.description(
806 components,
807 flavor,
808 subsuite,
809 paths,
810 show_manifests,
811 show_tests,
812 show_summary,
813 show_annotations,
814 filter_values,
815 filter_keys,
816 start,
817 end,
820 if show_summary:
821 by_component["summary"] = {}
822 by_component["summary"]["components"] = len(component_set)
823 by_component["summary"]["manifests"] = manifest_count
824 by_component["summary"]["tests"] = test_count
825 by_component["summary"]["failed tests"] = failed_count
826 by_component["summary"]["skipped tests"] = skipped_count
828 if show_annotations:
829 by_component["annotations"] = {}
830 by_component["annotations"]["total annotations"] = annotation_count
831 by_component["annotations"]["total conditions"] = condition_count
832 by_component["annotations"]["unique conditions"] = len(conditions)
833 by_component["annotations"]["conditions"] = conditions
835 self.write_report(by_component, output_file)
837 end_time = datetime.datetime.now()
838 self.log_verbose(
839 "%d seconds total to generate report"
840 % (end_time - start_time).total_seconds()
843 def write_report(self, by_component, output_file):
844 json_report = json.dumps(by_component, indent=2, sort_keys=True)
845 if output_file:
846 output_file = os.path.abspath(output_file)
847 output_dir = os.path.dirname(output_file)
848 if not os.path.isdir(output_dir):
849 os.makedirs(output_dir)
851 with open(output_file, "w") as f:
852 f.write(json_report)
853 else:
854 print(json_report)
856 def report_diff(self, before, after, output_file):
858 Support for 'mach test-info report-diff'.
861 def get_file(path_or_url):
862 if urlparse.urlparse(path_or_url).scheme:
863 response = requests.get(path_or_url)
864 response.raise_for_status()
865 return json.loads(response.text)
866 with open(path_or_url) as f:
867 return json.load(f)
869 report1 = get_file(before)
870 report2 = get_file(after)
872 by_component = {"tests": {}, "summary": {}}
873 self.diff_summaries(by_component, report1["summary"], report2["summary"])
874 self.diff_all_components(by_component, report1["tests"], report2["tests"])
875 self.write_report(by_component, output_file)
877 def diff_summaries(self, by_component, summary1, summary2):
879 Update by_component with comparison of summaries.
881 all_keys = set(summary1.keys()) | set(summary2.keys())
882 for key in all_keys:
883 delta = summary2.get(key, 0) - summary1.get(key, 0)
884 by_component["summary"]["%s delta" % key] = delta
886 def diff_all_components(self, by_component, tests1, tests2):
888 Update by_component with any added/deleted tests, for all components.
890 self.added_count = 0
891 self.deleted_count = 0
892 for component in tests1:
893 component1 = tests1[component]
894 component2 = [] if component not in tests2 else tests2[component]
895 self.diff_component(by_component, component, component1, component2)
896 for component in tests2:
897 if component not in tests1:
898 component2 = tests2[component]
899 self.diff_component(by_component, component, [], component2)
900 by_component["summary"]["added tests"] = self.added_count
901 by_component["summary"]["deleted tests"] = self.deleted_count
903 def diff_component(self, by_component, component, component1, component2):
905 Update by_component[component] with any added/deleted tests for the
906 named component.
907 "added": tests found in component2 but missing from component1.
908 "deleted": tests found in component1 but missing from component2.
910 tests1 = set([t["test"] for t in component1])
911 tests2 = set([t["test"] for t in component2])
912 deleted = tests1 - tests2
913 added = tests2 - tests1
914 if deleted or added:
915 by_component["tests"][component] = {}
916 if deleted:
917 by_component["tests"][component]["deleted"] = sorted(list(deleted))
918 if added:
919 by_component["tests"][component]["added"] = sorted(list(added))
920 self.added_count += len(added)
921 self.deleted_count += len(deleted)
922 common = len(tests1.intersection(tests2))
923 self.log_verbose(
924 "%s: %d deleted, %d added, %d common"
925 % (component, len(deleted), len(added), common)