Backed out changeset bcbab342eed8 (bug 1889658) for causing wpt reftest failures...
[gecko.git] / testing / testinfo.py
blobe17926f85ee4cfa92e39a17cc2e9ae3ec01175b9
1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
5 import datetime
6 import errno
7 import json
8 import os
9 import posixpath
10 import re
11 import subprocess
12 from collections import defaultdict
14 import mozpack.path as mozpath
15 import requests
16 import six.moves.urllib_parse as urlparse
17 from mozbuild.base import MachCommandConditions as conditions
18 from mozbuild.base import MozbuildObject
19 from mozfile import which
20 from moztest.resolve import TestManifestLoader, TestResolver
21 from redo import retriable
23 REFERER = "https://wiki.developer.mozilla.org/en-US/docs/Mozilla/Test-Info"
24 MAX_DAYS = 30
27 class TestInfo(object):
28 """
29 Support 'mach test-info'.
30 """
32 def __init__(self, verbose):
33 self.verbose = verbose
34 here = os.path.abspath(os.path.dirname(__file__))
35 self.build_obj = MozbuildObject.from_environment(cwd=here)
37 def log_verbose(self, what):
38 if self.verbose:
39 print(what)
42 class TestInfoTests(TestInfo):
43 """
44 Support 'mach test-info tests': Detailed report of specified tests.
45 """
47 def __init__(self, verbose):
48 TestInfo.__init__(self, verbose)
50 self._hg = None
51 if conditions.is_hg(self.build_obj):
52 self._hg = which("hg")
53 if not self._hg:
54 raise OSError(errno.ENOENT, "Could not find 'hg' on PATH.")
56 self._git = None
57 if conditions.is_git(self.build_obj):
58 self._git = which("git")
59 if not self._git:
60 raise OSError(errno.ENOENT, "Could not find 'git' on PATH.")
62 def find_in_hg_or_git(self, test_name):
63 if self._hg:
64 cmd = [self._hg, "files", "-I", test_name]
65 elif self._git:
66 cmd = [self._git, "ls-files", test_name]
67 else:
68 return None
69 try:
70 out = subprocess.check_output(cmd, universal_newlines=True).splitlines()
71 except subprocess.CalledProcessError:
72 out = None
73 return out
75 def set_test_name(self):
76 # Generating a unified report for a specific test is complicated
77 # by differences in the test name used in various data sources.
78 # Consider:
79 # - It is often convenient to request a report based only on
80 # a short file name, rather than the full path;
81 # - Bugs may be filed in bugzilla against a simple, short test
82 # name or the full path to the test;
83 # This function attempts to find appropriate names for different
84 # queries based on the specified test name.
86 # full_test_name is full path to file in hg (or git)
87 self.full_test_name = None
88 out = self.find_in_hg_or_git(self.test_name)
89 if out and len(out) == 1:
90 self.full_test_name = out[0]
91 elif out and len(out) > 1:
92 print("Ambiguous test name specified. Found:")
93 for line in out:
94 print(line)
95 else:
96 out = self.find_in_hg_or_git("**/%s*" % self.test_name)
97 if out and len(out) == 1:
98 self.full_test_name = out[0]
99 elif out and len(out) > 1:
100 print("Ambiguous test name. Found:")
101 for line in out:
102 print(line)
103 if self.full_test_name:
104 self.full_test_name.replace(os.sep, posixpath.sep)
105 print("Found %s in source control." % self.full_test_name)
106 else:
107 print("Unable to validate test name '%s'!" % self.test_name)
108 self.full_test_name = self.test_name
110 # search for full_test_name in test manifests
111 here = os.path.abspath(os.path.dirname(__file__))
112 resolver = TestResolver.from_environment(
113 cwd=here, loader_cls=TestManifestLoader
115 relpath = self.build_obj._wrap_path_argument(self.full_test_name).relpath()
116 tests = list(resolver.resolve_tests(paths=[relpath]))
117 if len(tests) == 1:
118 relpath = self.build_obj._wrap_path_argument(tests[0]["manifest"]).relpath()
119 print("%s found in manifest %s" % (self.full_test_name, relpath))
120 if tests[0].get("flavor"):
121 print(" flavor: %s" % tests[0]["flavor"])
122 if tests[0].get("skip-if"):
123 print(" skip-if: %s" % tests[0]["skip-if"])
124 if tests[0].get("fail-if"):
125 print(" fail-if: %s" % tests[0]["fail-if"])
126 elif len(tests) == 0:
127 print("%s not found in any test manifest!" % self.full_test_name)
128 else:
129 print("%s found in more than one manifest!" % self.full_test_name)
131 # short_name is full_test_name without path
132 self.short_name = None
133 name_idx = self.full_test_name.rfind("/")
134 if name_idx > 0:
135 self.short_name = self.full_test_name[name_idx + 1 :]
136 if self.short_name and self.short_name == self.test_name:
137 self.short_name = None
139 def get_platform(self, record):
140 if "platform" in record["build"]:
141 platform = record["build"]["platform"]
142 else:
143 platform = "-"
144 platform_words = platform.split("-")
145 types_label = ""
146 # combine run and build types and eliminate duplicates
147 run_types = []
148 if "run" in record and "type" in record["run"]:
149 run_types = record["run"]["type"]
150 run_types = run_types if isinstance(run_types, list) else [run_types]
151 build_types = []
152 if "build" in record and "type" in record["build"]:
153 build_types = record["build"]["type"]
154 build_types = (
155 build_types if isinstance(build_types, list) else [build_types]
157 run_types = list(set(run_types + build_types))
158 # '1proc' is used as a treeherder label but does not appear in run types
159 if "e10s" not in run_types:
160 run_types = run_types + ["1proc"]
161 for run_type in run_types:
162 # chunked is not interesting
163 if run_type == "chunked":
164 continue
165 # e10s is the default: implied
166 if run_type == "e10s":
167 continue
168 # sometimes a build/run type is already present in the build platform
169 if run_type in platform_words:
170 continue
171 if types_label:
172 types_label += "-"
173 types_label += run_type
174 return "%s/%s:" % (platform, types_label)
176 def report_bugs(self):
177 # Report open bugs matching test name
178 search = self.full_test_name
179 if self.test_name:
180 search = "%s,%s" % (search, self.test_name)
181 if self.short_name:
182 search = "%s,%s" % (search, self.short_name)
183 payload = {"quicksearch": search, "include_fields": "id,summary"}
184 response = requests.get("https://bugzilla.mozilla.org/rest/bug", payload)
185 response.raise_for_status()
186 json_response = response.json()
187 print("\nBugzilla quick search for '%s':" % search)
188 if "bugs" in json_response:
189 for bug in json_response["bugs"]:
190 print("Bug %s: %s" % (bug["id"], bug["summary"]))
191 else:
192 print("No bugs found.")
194 def report(
195 self,
196 test_names,
197 start,
198 end,
199 show_info,
200 show_bugs,
202 self.start = start
203 self.end = end
204 self.show_info = show_info
206 if not self.show_info and not show_bugs:
207 # by default, show everything
208 self.show_info = True
209 show_bugs = True
211 for test_name in test_names:
212 print("===== %s =====" % test_name)
213 self.test_name = test_name
214 if len(self.test_name) < 6:
215 print("'%s' is too short for a test name!" % self.test_name)
216 continue
217 self.set_test_name()
218 if show_bugs:
219 self.report_bugs()
222 class TestInfoReport(TestInfo):
224 Support 'mach test-info report': Report of test runs summarized by
225 manifest and component.
228 def __init__(self, verbose):
229 TestInfo.__init__(self, verbose)
230 self.threads = []
232 @retriable(attempts=3, sleeptime=5, sleepscale=2)
233 def get_url(self, target_url):
234 # if we fail to get valid json (i.e. end point has malformed data), return {}
235 retVal = {}
236 try:
237 self.log_verbose("getting url: %s" % target_url)
238 r = requests.get(target_url, headers={"User-agent": "mach-test-info/1.0"})
239 self.log_verbose("got status: %s" % r.status_code)
240 r.raise_for_status()
241 retVal = r.json()
242 except json.decoder.JSONDecodeError:
243 self.log_verbose("Error retrieving data from %s" % target_url)
245 return retVal
247 def update_report(self, by_component, result, path_mod):
248 def update_item(item, label, value):
249 # It is important to include any existing item value in case ActiveData
250 # returns multiple records for the same test; that can happen if the report
251 # sometimes maps more than one ActiveData record to the same path.
252 new_value = item.get(label, 0) + value
253 if type(new_value) == int:
254 item[label] = new_value
255 else:
256 item[label] = float(round(new_value, 2)) # pylint: disable=W1633
258 if "test" in result and "tests" in by_component:
259 test = result["test"]
260 if path_mod:
261 test = path_mod(test)
262 for bc in by_component["tests"]:
263 for item in by_component["tests"][bc]:
264 if test == item["test"]:
265 # pylint: disable=W1633
266 seconds = float(round(result.get("duration", 0), 2))
267 update_item(item, "total run time, seconds", seconds)
268 update_item(item, "total runs", result.get("count", 0))
269 update_item(item, "skipped runs", result.get("skips", 0))
270 update_item(item, "failed runs", result.get("failures", 0))
271 return True
272 return False
274 def path_mod_reftest(self, path):
275 # "<path1> == <path2>" -> "<path1>"
276 path = path.split(" ")[0]
277 # "<path>?<params>" -> "<path>"
278 path = path.split("?")[0]
279 # "<path>#<fragment>" -> "<path>"
280 path = path.split("#")[0]
281 return path
283 def path_mod_jsreftest(self, path):
284 # "<path>;assert" -> "<path>"
285 path = path.split(";")[0]
286 return path
288 def path_mod_marionette(self, path):
289 # "<path> <test-name>" -> "<path>"
290 path = path.split(" ")[0]
291 # "part1\part2" -> "part1/part2"
292 path = path.replace("\\", os.path.sep)
293 return path
295 def path_mod_wpt(self, path):
296 if path[0] == os.path.sep:
297 # "/<path>" -> "<path>"
298 path = path[1:]
299 # "<path>" -> "testing/web-platform/tests/<path>"
300 path = os.path.join("testing", "web-platform", "tests", path)
301 # "<path>?<params>" -> "<path>"
302 path = path.split("?")[0]
303 return path
305 def path_mod_jittest(self, path):
306 # "part1\part2" -> "part1/part2"
307 path = path.replace("\\", os.path.sep)
308 # "<path>" -> "js/src/jit-test/tests/<path>"
309 return os.path.join("js", "src", "jit-test", "tests", path)
311 def path_mod_xpcshell(self, path):
312 # <manifest>.{ini|toml}:<path> -> "<path>"
313 path = path.split(":")[-1]
314 return path
316 def description(
317 self,
318 components,
319 flavor,
320 subsuite,
321 paths,
322 show_manifests,
323 show_tests,
324 show_summary,
325 show_annotations,
326 filter_values,
327 filter_keys,
328 start_date,
329 end_date,
331 # provide a natural language description of the report options
332 what = []
333 if show_manifests:
334 what.append("test manifests")
335 if show_tests:
336 what.append("tests")
337 if show_annotations:
338 what.append("test manifest annotations")
339 if show_summary and len(what) == 0:
340 what.append("summary of tests only")
341 if len(what) > 1:
342 what[-1] = "and " + what[-1]
343 what = ", ".join(what)
344 d = "Test summary report for " + what
345 if components:
346 d += ", in specified components (%s)" % components
347 else:
348 d += ", in all components"
349 if flavor:
350 d += ", in specified flavor (%s)" % flavor
351 if subsuite:
352 d += ", in specified subsuite (%s)" % subsuite
353 if paths:
354 d += ", on specified paths (%s)" % paths
355 if filter_values:
356 d += ", containing '%s'" % filter_values
357 if filter_keys:
358 d += " in manifest keys '%s'" % filter_keys
359 else:
360 d += " in any part of manifest entry"
361 d += ", including historical run-time data for the last "
363 start = datetime.datetime.strptime(start_date, "%Y-%m-%d")
364 end = datetime.datetime.strptime(end_date, "%Y-%m-%d")
365 d += "%s days on trunk (autoland/m-c)" % ((end - start).days)
366 d += " as of %s." % end_date
367 return d
369 # TODO: this is hacked for now and very limited
370 def parse_test(self, summary):
371 if summary.endswith("single tracking bug"):
372 name_part = summary.split("|")[0] # remove 'single tracking bug'
373 name_part.strip()
374 return name_part.split()[-1] # get just the test name, not extra words
375 return None
377 def get_runcount_data(self, runcounts_input_file, start, end):
378 # TODO: use start/end properly
379 if runcounts_input_file:
380 try:
381 with open(runcounts_input_file, "r") as f:
382 runcounts = json.load(f)
383 except:
384 print("Unable to load runcounts from path: %s" % runcounts_input_file)
385 raise
386 else:
387 runcounts = self.get_runcounts(days=MAX_DAYS)
388 runcounts = self.squash_runcounts(runcounts, days=MAX_DAYS)
389 return runcounts
391 def get_testinfoall_index_url(self):
392 import taskcluster
394 index = taskcluster.Index(
396 "rootUrl": "https://firefox-ci-tc.services.mozilla.com",
399 route = "gecko.v2.mozilla-central.latest.source.test-info-all"
400 queue = taskcluster.Queue(
402 "rootUrl": "https://firefox-ci-tc.services.mozilla.com",
406 task_id = index.findTask(route)["taskId"]
407 artifacts = queue.listLatestArtifacts(task_id)["artifacts"]
409 url = ""
410 for artifact in artifacts:
411 if artifact["name"].endswith("test-run-info.json"):
412 url = queue.buildUrl("getLatestArtifact", task_id, artifact["name"])
413 break
414 return url
416 def get_runcounts(self, days=MAX_DAYS):
417 testrundata = {}
418 # get historical data from test-info job artifact; if missing get fresh
419 url = self.get_testinfoall_index_url()
420 print("INFO: requesting runcounts url: %s" % url)
421 olddata = self.get_url(url)
423 # fill in any holes we have
424 endday = datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(
425 days=1
427 startday = endday - datetime.timedelta(days=days)
428 urls_to_fetch = []
429 # build list of dates with missing data
430 while startday < endday:
431 nextday = startday + datetime.timedelta(days=1)
432 if not olddata.get(str(nextday.date()), {}):
433 url = "https://treeherder.mozilla.org/api/groupsummary/"
434 url += "?startdate=%s&enddate=%s" % (
435 startday.date(),
436 nextday.date(),
438 urls_to_fetch.append([str(nextday.date()), url])
439 testrundata[str(nextday.date())] = olddata.get(str(nextday.date()), {})
441 startday = nextday
443 # limit missing data collection to 5 most recent days days to reduce overall runtime
444 for date, url in urls_to_fetch[-5:]:
445 try:
446 testrundata[date] = self.get_url(url)
447 except requests.exceptions.HTTPError:
448 # We want to see other errors, but can accept HTTPError failures
449 print(f"Unable to retrieve results for url: {url}")
450 pass
452 return testrundata
454 def squash_runcounts(self, runcounts, days=MAX_DAYS):
455 # squash all testrundata together into 1 big happy family for the last X days
456 endday = datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(
457 days=1
459 oldest = endday - datetime.timedelta(days=days)
461 testgroup_runinfo = defaultdict(lambda: defaultdict(int))
463 retVal = {}
464 for datekey in runcounts.keys():
465 # strip out older days
466 if datetime.date.fromisoformat(datekey) < oldest.date():
467 continue
469 jtn = runcounts[datekey].get("job_type_names", {})
470 if not jtn:
471 print("Warning: Missing job type names from date: %s" % datekey)
472 continue
474 for m in runcounts[datekey]["manifests"]:
475 man_name = list(m.keys())[0]
477 for job_type_id, result, classification, count in m[man_name]:
478 # format: job_type_name, result, classification, count
479 # find matching jtn, result, classification and increment 'count'
480 job_name = jtn[job_type_id]
481 key = (job_name, result, classification)
482 testgroup_runinfo[man_name][key] += count
484 for m in testgroup_runinfo:
485 retVal[m] = [
486 list(x) + [testgroup_runinfo[m][x]] for x in testgroup_runinfo[m]
488 return retVal
490 def get_intermittent_failure_data(self, start, end):
491 retVal = {}
493 # get IFV bug list
494 # i.e. https://th.m.o/api/failures/?startday=2022-06-22&endday=2022-06-29&tree=all
495 url = (
496 "https://treeherder.mozilla.org/api/failures/?startday=%s&endday=%s&tree=trunk"
497 % (start, end)
499 if_data = self.get_url(url)
500 buglist = [x["bug_id"] for x in if_data]
502 # get bug data for summary, 800 bugs at a time
503 # i.e. https://b.m.o/rest/bug?include_fields=id,product,component,summary&id=1,2,3...
504 max_bugs = 800
505 bug_data = []
506 fields = ["id", "product", "component", "summary"]
507 for bug_index in range(0, len(buglist), max_bugs):
508 bugs = [str(x) for x in buglist[bug_index : bug_index + max_bugs]]
509 if not bugs:
510 print(f"warning: found no bugs in range {bug_index}, +{max_bugs}")
511 continue
513 url = "https://bugzilla.mozilla.org/rest/bug?include_fields=%s&id=%s" % (
514 ",".join(fields),
515 ",".join(bugs),
517 data = self.get_url(url)
518 if data and "bugs" in data.keys():
519 bug_data.extend(data["bugs"])
521 # for each summary, parse filename, store component
522 # IF we find >1 bug with same testname, for now summarize as one
523 for bug in bug_data:
524 test_name = self.parse_test(bug["summary"])
525 if not test_name:
526 continue
528 c = int([x["bug_count"] for x in if_data if x["bug_id"] == bug["id"]][0])
529 if test_name not in retVal.keys():
530 retVal[test_name] = {
531 "id": bug["id"],
532 "count": 0,
533 "product": bug["product"],
534 "component": bug["component"],
536 retVal[test_name]["count"] += c
538 if bug["product"] != retVal[test_name]["product"]:
539 print(
540 "ERROR | %s | mismatched bugzilla product, bugzilla (%s) != repo (%s)"
541 % (bug["id"], bug["product"], retVal[test_name]["product"])
543 if bug["component"] != retVal[test_name]["component"]:
544 print(
545 "ERROR | %s | mismatched bugzilla component, bugzilla (%s) != repo (%s)"
546 % (bug["id"], bug["component"], retVal[test_name]["component"])
548 return retVal
550 def report(
551 self,
552 components,
553 flavor,
554 subsuite,
555 paths,
556 show_manifests,
557 show_tests,
558 show_summary,
559 show_annotations,
560 filter_values,
561 filter_keys,
562 show_components,
563 output_file,
564 start,
565 end,
566 show_testruns,
567 runcounts_input_file,
569 def matches_filters(test):
571 Return True if all of the requested filter_values are found in this test;
572 if filter_keys are specified, restrict search to those test keys.
574 for value in filter_values:
575 value_found = False
576 for key in test:
577 if not filter_keys or key in filter_keys:
578 if re.search(value, test[key]):
579 value_found = True
580 break
581 if not value_found:
582 return False
583 return True
585 start_time = datetime.datetime.now()
587 # Ensure useful report by default
588 if (
589 not show_manifests
590 and not show_tests
591 and not show_summary
592 and not show_annotations
594 show_manifests = True
595 show_summary = True
597 by_component = {}
598 if components:
599 components = components.split(",")
600 if filter_keys:
601 filter_keys = filter_keys.split(",")
602 if filter_values:
603 filter_values = filter_values.split(",")
604 else:
605 filter_values = []
606 display_keys = (filter_keys or []) + ["skip-if", "fail-if", "fails-if"]
607 display_keys = set(display_keys)
608 ifd = self.get_intermittent_failure_data(start, end)
610 runcount = {}
611 if show_testruns and os.environ.get("GECKO_HEAD_REPOSITORY", "") in [
612 "https://hg.mozilla.org/mozilla-central",
613 "https://hg.mozilla.org/try",
615 runcount = self.get_runcount_data(runcounts_input_file, start, end)
617 print("Finding tests...")
618 here = os.path.abspath(os.path.dirname(__file__))
619 resolver = TestResolver.from_environment(
620 cwd=here, loader_cls=TestManifestLoader
622 tests = list(
623 resolver.resolve_tests(paths=paths, flavor=flavor, subsuite=subsuite)
626 manifest_paths = set()
627 for t in tests:
628 if t.get("manifest", None):
629 manifest_path = t["manifest"]
630 if t.get("ancestor_manifest", None):
631 manifest_path = "%s:%s" % (t["ancestor_manifest"], t["manifest"])
632 manifest_paths.add(manifest_path)
633 manifest_count = len(manifest_paths)
634 print(
635 "Resolver found {} tests, {} manifests".format(len(tests), manifest_count)
638 if show_manifests:
639 topsrcdir = self.build_obj.topsrcdir
640 by_component["manifests"] = {}
641 manifest_paths = list(manifest_paths)
642 manifest_paths.sort()
643 relpaths = []
644 for manifest_path in manifest_paths:
645 relpath = mozpath.relpath(manifest_path, topsrcdir)
646 if mozpath.commonprefix((manifest_path, topsrcdir)) != topsrcdir:
647 continue
648 relpaths.append(relpath)
649 reader = self.build_obj.mozbuild_reader(config_mode="empty")
650 files_info = reader.files_info(relpaths)
651 for manifest_path in manifest_paths:
652 relpath = mozpath.relpath(manifest_path, topsrcdir)
653 if mozpath.commonprefix((manifest_path, topsrcdir)) != topsrcdir:
654 continue
655 manifest_info = None
656 if relpath in files_info:
657 bug_component = files_info[relpath].get("BUG_COMPONENT")
658 if bug_component:
659 key = "{}::{}".format(
660 bug_component.product, bug_component.component
662 else:
663 key = "<unknown bug component>"
664 if (not components) or (key in components):
665 manifest_info = {"manifest": relpath, "tests": 0, "skipped": 0}
666 rkey = key if show_components else "all"
667 if rkey in by_component["manifests"]:
668 by_component["manifests"][rkey].append(manifest_info)
669 else:
670 by_component["manifests"][rkey] = [manifest_info]
671 if manifest_info:
672 for t in tests:
673 if t["manifest"] == manifest_path:
674 manifest_info["tests"] += 1
675 if t.get("skip-if"):
676 manifest_info["skipped"] += 1
677 for key in by_component["manifests"]:
678 by_component["manifests"][key].sort(key=lambda k: k["manifest"])
680 if show_tests:
681 by_component["tests"] = {}
683 if show_tests or show_summary or show_annotations:
684 test_count = 0
685 failed_count = 0
686 skipped_count = 0
687 annotation_count = 0
688 condition_count = 0
689 component_set = set()
690 relpaths = []
691 conditions = {}
692 known_unconditional_annotations = ["skip", "fail", "asserts", "random"]
693 known_conditional_annotations = [
694 "skip-if",
695 "fail-if",
696 "run-if",
697 "fails-if",
698 "fuzzy-if",
699 "random-if",
700 "asserts-if",
702 for t in tests:
703 relpath = t.get("srcdir_relpath")
704 relpaths.append(relpath)
705 reader = self.build_obj.mozbuild_reader(config_mode="empty")
706 files_info = reader.files_info(relpaths)
707 for t in tests:
708 if not matches_filters(t):
709 continue
710 if "referenced-test" in t:
711 # Avoid double-counting reftests: disregard reference file entries
712 continue
713 if show_annotations:
714 for key in t:
715 if key in known_unconditional_annotations:
716 annotation_count += 1
717 if key in known_conditional_annotations:
718 annotation_count += 1
719 # Here 'key' is a manifest annotation type like 'skip-if' and t[key]
720 # is the associated condition. For example, the manifestparser
721 # manifest annotation, "skip-if = os == 'win'", is expected to be
722 # encoded as t['skip-if'] = "os == 'win'".
723 # To allow for reftest manifests, t[key] may have multiple entries
724 # separated by ';', each corresponding to a condition for that test
725 # and annotation type. For example,
726 # "skip-if(Android&&webrender) skip-if(OSX)", would be
727 # encoded as t['skip-if'] = "Android&&webrender;OSX".
728 annotation_conditions = t[key].split(";")
730 # if key has \n in it, we need to strip it. for manifestparser format
731 # 1) from the beginning of the line
732 # 2) different conditions if in the middle of the line
733 annotation_conditions = [
734 x.strip("\n") for x in annotation_conditions
736 temp = []
737 for condition in annotation_conditions:
738 temp.extend(condition.split("\n"))
739 annotation_conditions = temp
741 for condition in annotation_conditions:
742 condition_count += 1
743 # Trim reftest fuzzy-if ranges: everything after the first comma
744 # eg. "Android,0-2,1-3" -> "Android"
745 condition = condition.split(",")[0]
746 if condition not in conditions:
747 conditions[condition] = 0
748 conditions[condition] += 1
749 test_count += 1
750 relpath = t.get("srcdir_relpath")
751 if relpath in files_info:
752 bug_component = files_info[relpath].get("BUG_COMPONENT")
753 if bug_component:
754 key = "{}::{}".format(
755 bug_component.product, bug_component.component
757 else:
758 key = "<unknown bug component>"
759 if (not components) or (key in components):
760 component_set.add(key)
761 test_info = {"test": relpath}
762 for test_key in display_keys:
763 value = t.get(test_key)
764 if value:
765 test_info[test_key] = value
766 if t.get("fail-if"):
767 failed_count += 1
768 if t.get("fails-if"):
769 failed_count += 1
770 if t.get("skip-if"):
771 skipped_count += 1
773 if "manifest_relpath" in t and "manifest" in t:
774 if "web-platform" in t["manifest_relpath"]:
775 test_info["manifest"] = [t["manifest"]]
776 else:
777 test_info["manifest"] = [t["manifest_relpath"]]
779 # handle included manifests as ancestor:child
780 if t.get("ancestor_manifest", None):
781 test_info["manifest"] = [
782 "%s:%s"
783 % (t["ancestor_manifest"], test_info["manifest"][0])
786 # add in intermittent failure data
787 if ifd.get(relpath):
788 if_data = ifd.get(relpath)
789 test_info["failure_count"] = if_data["count"]
790 if show_testruns:
791 total_runs = 0
792 for m in test_info["manifest"]:
793 if m in runcount.keys():
794 for x in runcount.get(m, []):
795 if not x:
796 break
797 total_runs += x[3]
798 if total_runs > 0:
799 test_info["total_runs"] = total_runs
801 if show_tests:
802 rkey = key if show_components else "all"
803 if rkey in by_component["tests"]:
804 # Avoid duplicates: Some test paths have multiple TestResolver
805 # entries, as when a test is included by multiple manifests.
806 found = False
807 for ctest in by_component["tests"][rkey]:
808 if ctest["test"] == test_info["test"]:
809 found = True
810 break
811 if not found:
812 by_component["tests"][rkey].append(test_info)
813 else:
814 for ti in by_component["tests"][rkey]:
815 if ti["test"] == test_info["test"]:
816 if (
817 test_info["manifest"][0]
818 not in ti["manifest"]
820 ti_manifest = test_info["manifest"]
821 if test_info.get(
822 "ancestor_manifest", None
824 ti_manifest = "%s:%s" % (
825 test_info["ancestor_manifest"],
826 ti_manifest,
828 ti["manifest"].extend(ti_manifest)
829 else:
830 by_component["tests"][rkey] = [test_info]
831 if show_tests:
832 for key in by_component["tests"]:
833 by_component["tests"][key].sort(key=lambda k: k["test"])
835 by_component["description"] = self.description(
836 components,
837 flavor,
838 subsuite,
839 paths,
840 show_manifests,
841 show_tests,
842 show_summary,
843 show_annotations,
844 filter_values,
845 filter_keys,
846 start,
847 end,
850 if show_summary:
851 by_component["summary"] = {}
852 by_component["summary"]["components"] = len(component_set)
853 by_component["summary"]["manifests"] = manifest_count
854 by_component["summary"]["tests"] = test_count
855 by_component["summary"]["failed tests"] = failed_count
856 by_component["summary"]["skipped tests"] = skipped_count
858 if show_annotations:
859 by_component["annotations"] = {}
860 by_component["annotations"]["total annotations"] = annotation_count
861 by_component["annotations"]["total conditions"] = condition_count
862 by_component["annotations"]["unique conditions"] = len(conditions)
863 by_component["annotations"]["conditions"] = conditions
865 self.write_report(by_component, output_file)
867 end_time = datetime.datetime.now()
868 self.log_verbose(
869 "%d seconds total to generate report"
870 % (end_time - start_time).total_seconds()
873 def write_report(self, by_component, output_file):
874 json_report = json.dumps(by_component, indent=2, sort_keys=True)
875 if output_file:
876 output_file = os.path.abspath(output_file)
877 output_dir = os.path.dirname(output_file)
878 if not os.path.isdir(output_dir):
879 os.makedirs(output_dir)
881 with open(output_file, "w") as f:
882 f.write(json_report)
883 else:
884 print(json_report)
886 def report_diff(self, before, after, output_file):
888 Support for 'mach test-info report-diff'.
891 def get_file(path_or_url):
892 if urlparse.urlparse(path_or_url).scheme:
893 response = requests.get(path_or_url)
894 response.raise_for_status()
895 return json.loads(response.text)
896 with open(path_or_url) as f:
897 return json.load(f)
899 report1 = get_file(before)
900 report2 = get_file(after)
902 by_component = {"tests": {}, "summary": {}}
903 self.diff_summaries(by_component, report1["summary"], report2["summary"])
904 self.diff_all_components(by_component, report1["tests"], report2["tests"])
905 self.write_report(by_component, output_file)
907 def diff_summaries(self, by_component, summary1, summary2):
909 Update by_component with comparison of summaries.
911 all_keys = set(summary1.keys()) | set(summary2.keys())
912 for key in all_keys:
913 delta = summary2.get(key, 0) - summary1.get(key, 0)
914 by_component["summary"]["%s delta" % key] = delta
916 def diff_all_components(self, by_component, tests1, tests2):
918 Update by_component with any added/deleted tests, for all components.
920 self.added_count = 0
921 self.deleted_count = 0
922 for component in tests1:
923 component1 = tests1[component]
924 component2 = [] if component not in tests2 else tests2[component]
925 self.diff_component(by_component, component, component1, component2)
926 for component in tests2:
927 if component not in tests1:
928 component2 = tests2[component]
929 self.diff_component(by_component, component, [], component2)
930 by_component["summary"]["added tests"] = self.added_count
931 by_component["summary"]["deleted tests"] = self.deleted_count
933 def diff_component(self, by_component, component, component1, component2):
935 Update by_component[component] with any added/deleted tests for the
936 named component.
937 "added": tests found in component2 but missing from component1.
938 "deleted": tests found in component1 but missing from component2.
940 tests1 = set([t["test"] for t in component1])
941 tests2 = set([t["test"] for t in component2])
942 deleted = tests1 - tests2
943 added = tests2 - tests1
944 if deleted or added:
945 by_component["tests"][component] = {}
946 if deleted:
947 by_component["tests"][component]["deleted"] = sorted(list(deleted))
948 if added:
949 by_component["tests"][component]["added"] = sorted(list(added))
950 self.added_count += len(added)
951 self.deleted_count += len(deleted)
952 common = len(tests1.intersection(tests2))
953 self.log_verbose(
954 "%s: %d deleted, %d added, %d common"
955 % (component, len(deleted), len(added), common)