Bug 1849098 - Disable browser_searchMode_sessionStore.js for frequent failures. r...
[gecko.git] / testing / testinfo.py
blob9f93a3507ea965a8f4341bcf94ef40dfc80b1407
1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
5 import datetime
6 import errno
7 import json
8 import os
9 import posixpath
10 import re
11 import subprocess
12 from collections import defaultdict
14 import mozpack.path as mozpath
15 import requests
16 import six.moves.urllib_parse as urlparse
17 from mozbuild.base import MachCommandConditions as conditions
18 from mozbuild.base import MozbuildObject
19 from mozfile import which
20 from moztest.resolve import TestManifestLoader, TestResolver
22 REFERER = "https://wiki.developer.mozilla.org/en-US/docs/Mozilla/Test-Info"
23 MAX_DAYS = 30
26 class TestInfo(object):
27 """
28 Support 'mach test-info'.
29 """
31 def __init__(self, verbose):
32 self.verbose = verbose
33 here = os.path.abspath(os.path.dirname(__file__))
34 self.build_obj = MozbuildObject.from_environment(cwd=here)
36 def log_verbose(self, what):
37 if self.verbose:
38 print(what)
41 class TestInfoTests(TestInfo):
42 """
43 Support 'mach test-info tests': Detailed report of specified tests.
44 """
46 def __init__(self, verbose):
47 TestInfo.__init__(self, verbose)
49 self._hg = None
50 if conditions.is_hg(self.build_obj):
51 self._hg = which("hg")
52 if not self._hg:
53 raise OSError(errno.ENOENT, "Could not find 'hg' on PATH.")
55 self._git = None
56 if conditions.is_git(self.build_obj):
57 self._git = which("git")
58 if not self._git:
59 raise OSError(errno.ENOENT, "Could not find 'git' on PATH.")
61 def find_in_hg_or_git(self, test_name):
62 if self._hg:
63 cmd = [self._hg, "files", "-I", test_name]
64 elif self._git:
65 cmd = [self._git, "ls-files", test_name]
66 else:
67 return None
68 try:
69 out = subprocess.check_output(cmd, universal_newlines=True).splitlines()
70 except subprocess.CalledProcessError:
71 out = None
72 return out
74 def set_test_name(self):
75 # Generating a unified report for a specific test is complicated
76 # by differences in the test name used in various data sources.
77 # Consider:
78 # - It is often convenient to request a report based only on
79 # a short file name, rather than the full path;
80 # - Bugs may be filed in bugzilla against a simple, short test
81 # name or the full path to the test;
82 # This function attempts to find appropriate names for different
83 # queries based on the specified test name.
85 # full_test_name is full path to file in hg (or git)
86 self.full_test_name = None
87 out = self.find_in_hg_or_git(self.test_name)
88 if out and len(out) == 1:
89 self.full_test_name = out[0]
90 elif out and len(out) > 1:
91 print("Ambiguous test name specified. Found:")
92 for line in out:
93 print(line)
94 else:
95 out = self.find_in_hg_or_git("**/%s*" % self.test_name)
96 if out and len(out) == 1:
97 self.full_test_name = out[0]
98 elif out and len(out) > 1:
99 print("Ambiguous test name. Found:")
100 for line in out:
101 print(line)
102 if self.full_test_name:
103 self.full_test_name.replace(os.sep, posixpath.sep)
104 print("Found %s in source control." % self.full_test_name)
105 else:
106 print("Unable to validate test name '%s'!" % self.test_name)
107 self.full_test_name = self.test_name
109 # search for full_test_name in test manifests
110 here = os.path.abspath(os.path.dirname(__file__))
111 resolver = TestResolver.from_environment(
112 cwd=here, loader_cls=TestManifestLoader
114 relpath = self.build_obj._wrap_path_argument(self.full_test_name).relpath()
115 tests = list(resolver.resolve_tests(paths=[relpath]))
116 if len(tests) == 1:
117 relpath = self.build_obj._wrap_path_argument(tests[0]["manifest"]).relpath()
118 print("%s found in manifest %s" % (self.full_test_name, relpath))
119 if tests[0].get("flavor"):
120 print(" flavor: %s" % tests[0]["flavor"])
121 if tests[0].get("skip-if"):
122 print(" skip-if: %s" % tests[0]["skip-if"])
123 if tests[0].get("fail-if"):
124 print(" fail-if: %s" % tests[0]["fail-if"])
125 elif len(tests) == 0:
126 print("%s not found in any test manifest!" % self.full_test_name)
127 else:
128 print("%s found in more than one manifest!" % self.full_test_name)
130 # short_name is full_test_name without path
131 self.short_name = None
132 name_idx = self.full_test_name.rfind("/")
133 if name_idx > 0:
134 self.short_name = self.full_test_name[name_idx + 1 :]
135 if self.short_name and self.short_name == self.test_name:
136 self.short_name = None
138 def get_platform(self, record):
139 if "platform" in record["build"]:
140 platform = record["build"]["platform"]
141 else:
142 platform = "-"
143 platform_words = platform.split("-")
144 types_label = ""
145 # combine run and build types and eliminate duplicates
146 run_types = []
147 if "run" in record and "type" in record["run"]:
148 run_types = record["run"]["type"]
149 run_types = run_types if isinstance(run_types, list) else [run_types]
150 build_types = []
151 if "build" in record and "type" in record["build"]:
152 build_types = record["build"]["type"]
153 build_types = (
154 build_types if isinstance(build_types, list) else [build_types]
156 run_types = list(set(run_types + build_types))
157 # '1proc' is used as a treeherder label but does not appear in run types
158 if "e10s" not in run_types:
159 run_types = run_types + ["1proc"]
160 for run_type in run_types:
161 # chunked is not interesting
162 if run_type == "chunked":
163 continue
164 # e10s is the default: implied
165 if run_type == "e10s":
166 continue
167 # sometimes a build/run type is already present in the build platform
168 if run_type in platform_words:
169 continue
170 if types_label:
171 types_label += "-"
172 types_label += run_type
173 return "%s/%s:" % (platform, types_label)
175 def report_bugs(self):
176 # Report open bugs matching test name
177 search = self.full_test_name
178 if self.test_name:
179 search = "%s,%s" % (search, self.test_name)
180 if self.short_name:
181 search = "%s,%s" % (search, self.short_name)
182 payload = {"quicksearch": search, "include_fields": "id,summary"}
183 response = requests.get("https://bugzilla.mozilla.org/rest/bug", payload)
184 response.raise_for_status()
185 json_response = response.json()
186 print("\nBugzilla quick search for '%s':" % search)
187 if "bugs" in json_response:
188 for bug in json_response["bugs"]:
189 print("Bug %s: %s" % (bug["id"], bug["summary"]))
190 else:
191 print("No bugs found.")
193 def report(
194 self,
195 test_names,
196 start,
197 end,
198 show_info,
199 show_bugs,
201 self.start = start
202 self.end = end
203 self.show_info = show_info
205 if not self.show_info and not show_bugs:
206 # by default, show everything
207 self.show_info = True
208 show_bugs = True
210 for test_name in test_names:
211 print("===== %s =====" % test_name)
212 self.test_name = test_name
213 if len(self.test_name) < 6:
214 print("'%s' is too short for a test name!" % self.test_name)
215 continue
216 self.set_test_name()
217 if show_bugs:
218 self.report_bugs()
221 class TestInfoReport(TestInfo):
223 Support 'mach test-info report': Report of test runs summarized by
224 manifest and component.
227 def __init__(self, verbose):
228 TestInfo.__init__(self, verbose)
229 self.threads = []
231 def update_report(self, by_component, result, path_mod):
232 def update_item(item, label, value):
233 # It is important to include any existing item value in case ActiveData
234 # returns multiple records for the same test; that can happen if the report
235 # sometimes maps more than one ActiveData record to the same path.
236 new_value = item.get(label, 0) + value
237 if type(new_value) == int:
238 item[label] = new_value
239 else:
240 item[label] = float(round(new_value, 2)) # pylint: disable=W1633
242 if "test" in result and "tests" in by_component:
243 test = result["test"]
244 if path_mod:
245 test = path_mod(test)
246 for bc in by_component["tests"]:
247 for item in by_component["tests"][bc]:
248 if test == item["test"]:
249 # pylint: disable=W1633
250 seconds = float(round(result.get("duration", 0), 2))
251 update_item(item, "total run time, seconds", seconds)
252 update_item(item, "total runs", result.get("count", 0))
253 update_item(item, "skipped runs", result.get("skips", 0))
254 update_item(item, "failed runs", result.get("failures", 0))
255 return True
256 return False
258 def path_mod_reftest(self, path):
259 # "<path1> == <path2>" -> "<path1>"
260 path = path.split(" ")[0]
261 # "<path>?<params>" -> "<path>"
262 path = path.split("?")[0]
263 # "<path>#<fragment>" -> "<path>"
264 path = path.split("#")[0]
265 return path
267 def path_mod_jsreftest(self, path):
268 # "<path>;assert" -> "<path>"
269 path = path.split(";")[0]
270 return path
272 def path_mod_marionette(self, path):
273 # "<path> <test-name>" -> "<path>"
274 path = path.split(" ")[0]
275 # "part1\part2" -> "part1/part2"
276 path = path.replace("\\", os.path.sep)
277 return path
279 def path_mod_wpt(self, path):
280 if path[0] == os.path.sep:
281 # "/<path>" -> "<path>"
282 path = path[1:]
283 # "<path>" -> "testing/web-platform/tests/<path>"
284 path = os.path.join("testing", "web-platform", "tests", path)
285 # "<path>?<params>" -> "<path>"
286 path = path.split("?")[0]
287 return path
289 def path_mod_jittest(self, path):
290 # "part1\part2" -> "part1/part2"
291 path = path.replace("\\", os.path.sep)
292 # "<path>" -> "js/src/jit-test/tests/<path>"
293 return os.path.join("js", "src", "jit-test", "tests", path)
295 def path_mod_xpcshell(self, path):
296 # <manifest>.{ini|toml}:<path> -> "<path>"
297 path = path.split(":")[-1]
298 return path
300 def description(
301 self,
302 components,
303 flavor,
304 subsuite,
305 paths,
306 show_manifests,
307 show_tests,
308 show_summary,
309 show_annotations,
310 filter_values,
311 filter_keys,
312 start_date,
313 end_date,
315 # provide a natural language description of the report options
316 what = []
317 if show_manifests:
318 what.append("test manifests")
319 if show_tests:
320 what.append("tests")
321 if show_annotations:
322 what.append("test manifest annotations")
323 if show_summary and len(what) == 0:
324 what.append("summary of tests only")
325 if len(what) > 1:
326 what[-1] = "and " + what[-1]
327 what = ", ".join(what)
328 d = "Test summary report for " + what
329 if components:
330 d += ", in specified components (%s)" % components
331 else:
332 d += ", in all components"
333 if flavor:
334 d += ", in specified flavor (%s)" % flavor
335 if subsuite:
336 d += ", in specified subsuite (%s)" % subsuite
337 if paths:
338 d += ", on specified paths (%s)" % paths
339 if filter_values:
340 d += ", containing '%s'" % filter_values
341 if filter_keys:
342 d += " in manifest keys '%s'" % filter_keys
343 else:
344 d += " in any part of manifest entry"
345 d += ", including historical run-time data for the last "
347 start = datetime.datetime.strptime(start_date, "%Y-%m-%d")
348 end = datetime.datetime.strptime(end_date, "%Y-%m-%d")
349 d += "%s days on trunk (autoland/m-c)" % ((end - start).days)
350 d += " as of %s." % end_date
351 return d
353 # TODO: this is hacked for now and very limited
354 def parse_test(self, summary):
355 if summary.endswith("single tracking bug"):
356 name_part = summary.split("|")[0] # remove 'single tracking bug'
357 name_part.strip()
358 return name_part.split()[-1] # get just the test name, not extra words
359 return None
361 def get_runcount_data(self, start, end):
362 # TODO: use start/end properly
363 runcounts = self.get_runcounts(days=MAX_DAYS)
364 runcounts = self.squash_runcounts(runcounts, days=MAX_DAYS)
365 return runcounts
367 def get_testinfoall_index_url(self):
368 import taskcluster
370 queue = taskcluster.Queue()
371 index = taskcluster.Index(
373 "rootUrl": "https://firefox-ci-tc.services.mozilla.com",
376 route = "gecko.v2.mozilla-central.latest.source.test-info-all"
378 task_id = index.findTask(route)["taskId"]
379 artifacts = queue.listLatestArtifacts(task_id)["artifacts"]
381 url = ""
382 for artifact in artifacts:
383 if artifact["name"].endswith("test-run-info.json"):
384 url = queue.buildUrl("getLatestArtifact", task_id, artifact["name"])
385 break
386 return url
388 def get_runcounts(self, days=MAX_DAYS):
389 testrundata = {}
390 # get historical data from test-info job artifact; if missing get fresh
391 try:
392 url = self.get_testinfoall_index_url()
393 print("INFO: requesting runcounts url: %s" % url)
394 r = requests.get(url, headers={"User-agent": "mach-test-info/1.0"})
395 r.raise_for_status()
396 testrundata = r.json()
397 except Exception:
398 pass
400 # fill in any holes we have
401 endday = datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(
402 days=1
404 startday = endday - datetime.timedelta(days=days)
405 while startday < endday:
406 nextday = startday + datetime.timedelta(days=1)
407 retries = 2
408 done = False
409 if (
410 str(nextday) not in testrundata.keys()
411 or testrundata[str(nextday)] == {}
413 while not done:
414 url = "https://treeherder.mozilla.org/api/groupsummary/"
415 url += "?startdate=%s&enddate=%s" % (
416 startday.date(),
417 nextday.date(),
419 try:
420 print("INFO: requesting groupsummary url: %s" % url)
421 r = requests.get(
422 url, headers={"User-agent": "mach-test-info/1.0"}
424 done = True
425 except requests.exceptions.HTTPError:
426 retries -= 1
427 if retries <= 0:
428 r.raise_for_status()
429 try:
430 testrundata[str(nextday.date())] = r.json()
431 except json.decoder.JSONDecodeError:
432 print(
433 "Warning unable to retrieve (from treeherder's groupsummary api) testrun data for date: %s, skipping for now"
434 % nextday.date()
436 testrundata[str(nextday.date())] = {}
437 continue
438 startday = nextday
440 return testrundata
442 def squash_runcounts(self, runcounts, days=MAX_DAYS):
443 # squash all testrundata together into 1 big happy family for the last X days
444 endday = datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(
445 days=1
447 oldest = endday - datetime.timedelta(days=days)
449 testgroup_runinfo = defaultdict(lambda: defaultdict(int))
451 retVal = {}
452 for datekey in runcounts.keys():
453 # strip out older days
454 if datetime.date.fromisoformat(datekey) < oldest.date():
455 continue
457 jtn = runcounts[datekey].get("job_type_names", {})
458 if not jtn:
459 print("Warning: Missing job type names from date: %s" % datekey)
460 continue
462 for m in runcounts[datekey]["manifests"]:
463 man_name = list(m.keys())[0]
465 for job_type_id, result, classification, count in m[man_name]:
466 # format: job_type_name, result, classification, count
467 # find matching jtn, result, classification and increment 'count'
468 job_name = jtn[job_type_id]
469 key = (job_name, result, classification)
470 testgroup_runinfo[man_name][key] += count
472 for m in testgroup_runinfo:
473 retVal[m] = [
474 list(x) + [testgroup_runinfo[m][x]] for x in testgroup_runinfo[m]
476 return retVal
478 def get_intermittent_failure_data(self, start, end):
479 retVal = {}
481 # get IFV bug list
482 # i.e. https://th.m.o/api/failures/?startday=2022-06-22&endday=2022-06-29&tree=all
483 url = (
484 "https://treeherder.mozilla.org/api/failures/?startday=%s&endday=%s&tree=trunk"
485 % (start, end)
487 r = requests.get(url, headers={"User-agent": "mach-test-info/1.0"})
488 if_data = r.json()
489 buglist = [x["bug_id"] for x in if_data]
491 # get bug data for summary, 800 bugs at a time
492 # i.e. https://b.m.o/rest/bug?include_fields=id,product,component,summary&id=1,2,3...
493 max_bugs = 800
494 bug_data = []
495 fields = ["id", "product", "component", "summary"]
496 for bug_index in range(0, len(buglist), max_bugs):
497 bugs = [str(x) for x in buglist[bug_index : bug_index + max_bugs]]
498 if not bugs:
499 print(f"warning: found no bugs in range {bug_index}, +{max_bugs}")
500 continue
502 url = "https://bugzilla.mozilla.org/rest/bug?include_fields=%s&id=%s" % (
503 ",".join(fields),
504 ",".join(bugs),
506 r = requests.get(url, headers={"User-agent": "mach-test-info/1.0"})
507 data = r.json()
508 if data and "bugs" in data.keys():
509 bug_data.extend(data["bugs"])
511 # for each summary, parse filename, store component
512 # IF we find >1 bug with same testname, for now summarize as one
513 for bug in bug_data:
514 test_name = self.parse_test(bug["summary"])
515 if not test_name:
516 continue
518 c = int([x["bug_count"] for x in if_data if x["bug_id"] == bug["id"]][0])
519 if test_name not in retVal.keys():
520 retVal[test_name] = {
521 "id": bug["id"],
522 "count": 0,
523 "product": bug["product"],
524 "component": bug["component"],
526 retVal[test_name]["count"] += c
528 if bug["product"] != retVal[test_name]["product"]:
529 print(
530 "ERROR | %s | mismatched bugzilla product, bugzilla (%s) != repo (%s)"
531 % (bug["id"], bug["product"], retVal[test_name]["product"])
533 if bug["component"] != retVal[test_name]["component"]:
534 print(
535 "ERROR | %s | mismatched bugzilla component, bugzilla (%s) != repo (%s)"
536 % (bug["id"], bug["component"], retVal[test_name]["component"])
538 return retVal
540 def report(
541 self,
542 components,
543 flavor,
544 subsuite,
545 paths,
546 show_manifests,
547 show_tests,
548 show_summary,
549 show_annotations,
550 filter_values,
551 filter_keys,
552 show_components,
553 output_file,
554 start,
555 end,
556 show_testruns,
558 def matches_filters(test):
560 Return True if all of the requested filter_values are found in this test;
561 if filter_keys are specified, restrict search to those test keys.
563 for value in filter_values:
564 value_found = False
565 for key in test:
566 if not filter_keys or key in filter_keys:
567 if re.search(value, test[key]):
568 value_found = True
569 break
570 if not value_found:
571 return False
572 return True
574 start_time = datetime.datetime.now()
576 # Ensure useful report by default
577 if (
578 not show_manifests
579 and not show_tests
580 and not show_summary
581 and not show_annotations
583 show_manifests = True
584 show_summary = True
586 by_component = {}
587 if components:
588 components = components.split(",")
589 if filter_keys:
590 filter_keys = filter_keys.split(",")
591 if filter_values:
592 filter_values = filter_values.split(",")
593 else:
594 filter_values = []
595 display_keys = (filter_keys or []) + ["skip-if", "fail-if", "fails-if"]
596 display_keys = set(display_keys)
597 ifd = self.get_intermittent_failure_data(start, end)
599 if show_testruns and os.environ.get("GECKO_HEAD_REPOSITORY", "") in [
600 "https://hg.mozilla.org/mozilla-central",
601 "https://hg.mozilla.org/try",
603 runcount = self.get_runcount_data(start, end)
605 print("Finding tests...")
606 here = os.path.abspath(os.path.dirname(__file__))
607 resolver = TestResolver.from_environment(
608 cwd=here, loader_cls=TestManifestLoader
610 tests = list(
611 resolver.resolve_tests(paths=paths, flavor=flavor, subsuite=subsuite)
614 manifest_paths = set()
615 for t in tests:
616 if t.get("manifest", None):
617 manifest_path = t["manifest"]
618 if t.get("ancestor_manifest", None):
619 manifest_path = "%s:%s" % (t["ancestor_manifest"], t["manifest"])
620 manifest_paths.add(manifest_path)
621 manifest_count = len(manifest_paths)
622 print(
623 "Resolver found {} tests, {} manifests".format(len(tests), manifest_count)
626 if show_manifests:
627 topsrcdir = self.build_obj.topsrcdir
628 by_component["manifests"] = {}
629 manifest_paths = list(manifest_paths)
630 manifest_paths.sort()
631 relpaths = []
632 for manifest_path in manifest_paths:
633 relpath = mozpath.relpath(manifest_path, topsrcdir)
634 if mozpath.commonprefix((manifest_path, topsrcdir)) != topsrcdir:
635 continue
636 relpaths.append(relpath)
637 reader = self.build_obj.mozbuild_reader(config_mode="empty")
638 files_info = reader.files_info(relpaths)
639 for manifest_path in manifest_paths:
640 relpath = mozpath.relpath(manifest_path, topsrcdir)
641 if mozpath.commonprefix((manifest_path, topsrcdir)) != topsrcdir:
642 continue
643 manifest_info = None
644 if relpath in files_info:
645 bug_component = files_info[relpath].get("BUG_COMPONENT")
646 if bug_component:
647 key = "{}::{}".format(
648 bug_component.product, bug_component.component
650 else:
651 key = "<unknown bug component>"
652 if (not components) or (key in components):
653 manifest_info = {"manifest": relpath, "tests": 0, "skipped": 0}
654 rkey = key if show_components else "all"
655 if rkey in by_component["manifests"]:
656 by_component["manifests"][rkey].append(manifest_info)
657 else:
658 by_component["manifests"][rkey] = [manifest_info]
659 if manifest_info:
660 for t in tests:
661 if t["manifest"] == manifest_path:
662 manifest_info["tests"] += 1
663 if t.get("skip-if"):
664 manifest_info["skipped"] += 1
665 for key in by_component["manifests"]:
666 by_component["manifests"][key].sort(key=lambda k: k["manifest"])
668 if show_tests:
669 by_component["tests"] = {}
671 if show_tests or show_summary or show_annotations:
672 test_count = 0
673 failed_count = 0
674 skipped_count = 0
675 annotation_count = 0
676 condition_count = 0
677 component_set = set()
678 relpaths = []
679 conditions = {}
680 known_unconditional_annotations = ["skip", "fail", "asserts", "random"]
681 known_conditional_annotations = [
682 "skip-if",
683 "fail-if",
684 "run-if",
685 "fails-if",
686 "fuzzy-if",
687 "random-if",
688 "asserts-if",
690 for t in tests:
691 relpath = t.get("srcdir_relpath")
692 relpaths.append(relpath)
693 reader = self.build_obj.mozbuild_reader(config_mode="empty")
694 files_info = reader.files_info(relpaths)
695 for t in tests:
696 if not matches_filters(t):
697 continue
698 if "referenced-test" in t:
699 # Avoid double-counting reftests: disregard reference file entries
700 continue
701 if show_annotations:
702 for key in t:
703 if key in known_unconditional_annotations:
704 annotation_count += 1
705 if key in known_conditional_annotations:
706 annotation_count += 1
707 # Here 'key' is a manifest annotation type like 'skip-if' and t[key]
708 # is the associated condition. For example, the manifestparser
709 # manifest annotation, "skip-if = os == 'win'", is expected to be
710 # encoded as t['skip-if'] = "os == 'win'".
711 # To allow for reftest manifests, t[key] may have multiple entries
712 # separated by ';', each corresponding to a condition for that test
713 # and annotation type. For example,
714 # "skip-if(Android&&webrender) skip-if(OSX)", would be
715 # encoded as t['skip-if'] = "Android&&webrender;OSX".
716 annotation_conditions = t[key].split(";")
718 # if key has \n in it, we need to strip it. for manifestparser format
719 # 1) from the beginning of the line
720 # 2) different conditions if in the middle of the line
721 annotation_conditions = [
722 x.strip("\n") for x in annotation_conditions
724 temp = []
725 for condition in annotation_conditions:
726 temp.extend(condition.split("\n"))
727 annotation_conditions = temp
729 for condition in annotation_conditions:
730 condition_count += 1
731 # Trim reftest fuzzy-if ranges: everything after the first comma
732 # eg. "Android,0-2,1-3" -> "Android"
733 condition = condition.split(",")[0]
734 if condition not in conditions:
735 conditions[condition] = 0
736 conditions[condition] += 1
737 test_count += 1
738 relpath = t.get("srcdir_relpath")
739 if relpath in files_info:
740 bug_component = files_info[relpath].get("BUG_COMPONENT")
741 if bug_component:
742 key = "{}::{}".format(
743 bug_component.product, bug_component.component
745 else:
746 key = "<unknown bug component>"
747 if (not components) or (key in components):
748 component_set.add(key)
749 test_info = {"test": relpath}
750 for test_key in display_keys:
751 value = t.get(test_key)
752 if value:
753 test_info[test_key] = value
754 if t.get("fail-if"):
755 failed_count += 1
756 if t.get("fails-if"):
757 failed_count += 1
758 if t.get("skip-if"):
759 skipped_count += 1
761 if "manifest_relpath" in t and "manifest" in t:
762 if "web-platform" in t["manifest_relpath"]:
763 test_info["manifest"] = [t["manifest"]]
764 else:
765 test_info["manifest"] = [t["manifest_relpath"]]
767 # handle included manifests as ancestor:child
768 if t.get("ancestor_manifest", None):
769 test_info["manifest"] = [
770 "%s:%s"
771 % (t["ancestor_manifest"], test_info["manifest"][0])
774 # add in intermittent failure data
775 if ifd.get(relpath):
776 if_data = ifd.get(relpath)
777 test_info["failure_count"] = if_data["count"]
778 if show_testruns:
779 total_runs = 0
780 for m in test_info["manifest"]:
781 if m in runcount:
782 total_runs += sum([x[3] for x in runcount[m]])
783 if total_runs > 0:
784 test_info["total_runs"] = total_runs
786 if show_tests:
787 rkey = key if show_components else "all"
788 if rkey in by_component["tests"]:
789 # Avoid duplicates: Some test paths have multiple TestResolver
790 # entries, as when a test is included by multiple manifests.
791 found = False
792 for ctest in by_component["tests"][rkey]:
793 if ctest["test"] == test_info["test"]:
794 found = True
795 break
796 if not found:
797 by_component["tests"][rkey].append(test_info)
798 else:
799 for ti in by_component["tests"][rkey]:
800 if ti["test"] == test_info["test"]:
801 if (
802 test_info["manifest"][0]
803 not in ti["manifest"]
805 ti_manifest = test_info["manifest"]
806 if test_info.get(
807 "ancestor_manifest", None
809 ti_manifest = "%s:%s" % (
810 test_info["ancestor_manifest"],
811 ti_manifest,
813 ti["manifest"].extend(ti_manifest)
814 else:
815 by_component["tests"][rkey] = [test_info]
816 if show_tests:
817 for key in by_component["tests"]:
818 by_component["tests"][key].sort(key=lambda k: k["test"])
820 by_component["description"] = self.description(
821 components,
822 flavor,
823 subsuite,
824 paths,
825 show_manifests,
826 show_tests,
827 show_summary,
828 show_annotations,
829 filter_values,
830 filter_keys,
831 start,
832 end,
835 if show_summary:
836 by_component["summary"] = {}
837 by_component["summary"]["components"] = len(component_set)
838 by_component["summary"]["manifests"] = manifest_count
839 by_component["summary"]["tests"] = test_count
840 by_component["summary"]["failed tests"] = failed_count
841 by_component["summary"]["skipped tests"] = skipped_count
843 if show_annotations:
844 by_component["annotations"] = {}
845 by_component["annotations"]["total annotations"] = annotation_count
846 by_component["annotations"]["total conditions"] = condition_count
847 by_component["annotations"]["unique conditions"] = len(conditions)
848 by_component["annotations"]["conditions"] = conditions
850 self.write_report(by_component, output_file)
852 end_time = datetime.datetime.now()
853 self.log_verbose(
854 "%d seconds total to generate report"
855 % (end_time - start_time).total_seconds()
858 def write_report(self, by_component, output_file):
859 json_report = json.dumps(by_component, indent=2, sort_keys=True)
860 if output_file:
861 output_file = os.path.abspath(output_file)
862 output_dir = os.path.dirname(output_file)
863 if not os.path.isdir(output_dir):
864 os.makedirs(output_dir)
866 with open(output_file, "w") as f:
867 f.write(json_report)
868 else:
869 print(json_report)
871 def report_diff(self, before, after, output_file):
873 Support for 'mach test-info report-diff'.
876 def get_file(path_or_url):
877 if urlparse.urlparse(path_or_url).scheme:
878 response = requests.get(path_or_url)
879 response.raise_for_status()
880 return json.loads(response.text)
881 with open(path_or_url) as f:
882 return json.load(f)
884 report1 = get_file(before)
885 report2 = get_file(after)
887 by_component = {"tests": {}, "summary": {}}
888 self.diff_summaries(by_component, report1["summary"], report2["summary"])
889 self.diff_all_components(by_component, report1["tests"], report2["tests"])
890 self.write_report(by_component, output_file)
892 def diff_summaries(self, by_component, summary1, summary2):
894 Update by_component with comparison of summaries.
896 all_keys = set(summary1.keys()) | set(summary2.keys())
897 for key in all_keys:
898 delta = summary2.get(key, 0) - summary1.get(key, 0)
899 by_component["summary"]["%s delta" % key] = delta
901 def diff_all_components(self, by_component, tests1, tests2):
903 Update by_component with any added/deleted tests, for all components.
905 self.added_count = 0
906 self.deleted_count = 0
907 for component in tests1:
908 component1 = tests1[component]
909 component2 = [] if component not in tests2 else tests2[component]
910 self.diff_component(by_component, component, component1, component2)
911 for component in tests2:
912 if component not in tests1:
913 component2 = tests2[component]
914 self.diff_component(by_component, component, [], component2)
915 by_component["summary"]["added tests"] = self.added_count
916 by_component["summary"]["deleted tests"] = self.deleted_count
918 def diff_component(self, by_component, component, component1, component2):
920 Update by_component[component] with any added/deleted tests for the
921 named component.
922 "added": tests found in component2 but missing from component1.
923 "deleted": tests found in component1 but missing from component2.
925 tests1 = set([t["test"] for t in component1])
926 tests2 = set([t["test"] for t in component2])
927 deleted = tests1 - tests2
928 added = tests2 - tests1
929 if deleted or added:
930 by_component["tests"][component] = {}
931 if deleted:
932 by_component["tests"][component]["deleted"] = sorted(list(deleted))
933 if added:
934 by_component["tests"][component]["added"] = sorted(list(added))
935 self.added_count += len(added)
936 self.deleted_count += len(deleted)
937 common = len(tests1.intersection(tests2))
938 self.log_verbose(
939 "%s: %d deleted, %d added, %d common"
940 % (component, len(deleted), len(added), common)