Bug 1691109 [wpt PR 27513] - Increase timeout duration for wpt/fetch/api/basic/keepal...
[gecko.git] / testing / testinfo.py
blobf7407a1073de0759d32d729f7eea4dc39388807b
1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
5 from __future__ import absolute_import, division, print_function
7 import datetime
8 import errno
9 import json
10 import os
11 import posixpath
12 import re
13 import requests
14 import six.moves.urllib_parse as urlparse
15 import subprocess
16 import threading
17 import traceback
18 import mozpack.path as mozpath
19 from moztest.resolve import TestResolver, TestManifestLoader
20 from mozfile import which
22 from mozbuild.base import MozbuildObject, MachCommandConditions as conditions
24 ACTIVEDATA_RECORD_LIMIT = 10000
25 MAX_ACTIVEDATA_CONCURRENCY = 5
26 MAX_ACTIVEDATA_RETRIES = 5
27 REFERER = "https://wiki.developer.mozilla.org/en-US/docs/Mozilla/Test-Info"
30 class TestInfo(object):
31 """
32 Support 'mach test-info'.
33 """
35 def __init__(self, verbose):
36 self.verbose = verbose
37 here = os.path.abspath(os.path.dirname(__file__))
38 self.build_obj = MozbuildObject.from_environment(cwd=here)
39 self.total_activedata_seconds = 0
41 def log_verbose(self, what):
42 if self.verbose:
43 print(what)
45 def activedata_query(self, query):
46 start_time = datetime.datetime.now()
47 self.log_verbose(start_time)
48 self.log_verbose(json.dumps(query))
49 response = requests.post(
50 "http://activedata.allizom.org/query",
51 data=json.dumps(query),
52 headers={"referer": REFERER},
53 stream=True,
55 end_time = datetime.datetime.now()
56 self.total_activedata_seconds += (end_time - start_time).total_seconds()
57 self.log_verbose(end_time)
58 self.log_verbose(response)
59 response.raise_for_status()
60 data = response.json()["data"]
61 self.log_verbose("response length: %d" % len(data))
62 return data
65 class ActiveDataThread(threading.Thread):
66 """
67 A thread to query ActiveData and wait for its response.
68 """
70 def __init__(self, name, ti, query, context):
71 threading.Thread.__init__(self, name=name)
72 self.ti = ti
73 self.query = query
74 self.context = context
75 self.response = None
77 def run(self):
78 attempt = 1
79 while attempt < MAX_ACTIVEDATA_RETRIES and not self.response:
80 try:
81 self.response = self.ti.activedata_query(self.query)
82 if not self.response:
83 self.ti.log_verbose("%s: no data received for query" % self.name)
84 self.response = []
85 break
86 except Exception:
87 self.ti.log_verbose(
88 "%s: Exception on attempt #%d:" % (self.name, attempt)
90 traceback.print_exc()
91 attempt += 1
94 class TestInfoTests(TestInfo):
95 """
96 Support 'mach test-info tests': Detailed report of specified tests.
97 """
99 def __init__(self, verbose):
100 TestInfo.__init__(self, verbose)
102 self._hg = None
103 if conditions.is_hg(self.build_obj):
104 self._hg = which("hg")
105 if not self._hg:
106 raise OSError(errno.ENOENT, "Could not find 'hg' on PATH.")
108 self._git = None
109 if conditions.is_git(self.build_obj):
110 self._git = which("git")
111 if not self._git:
112 raise OSError(errno.ENOENT, "Could not find 'git' on PATH.")
114 def find_in_hg_or_git(self, test_name):
115 if self._hg:
116 cmd = [self._hg, "files", "-I", test_name]
117 elif self._git:
118 cmd = [self._git, "ls-files", test_name]
119 else:
120 return None
121 try:
122 out = subprocess.check_output(cmd, universal_newlines=True).splitlines()
123 except subprocess.CalledProcessError:
124 out = None
125 return out
127 def set_test_name(self):
128 # Generating a unified report for a specific test is complicated
129 # by differences in the test name used in various data sources.
130 # Consider:
131 # - It is often convenient to request a report based only on
132 # a short file name, rather than the full path;
133 # - Bugs may be filed in bugzilla against a simple, short test
134 # name or the full path to the test;
135 # - In ActiveData, the full path is usually used, but sometimes
136 # also includes additional path components outside of the
137 # mercurial repo (common for reftests).
138 # This function attempts to find appropriate names for different
139 # queries based on the specified test name.
141 # full_test_name is full path to file in hg (or git)
142 self.full_test_name = None
143 out = self.find_in_hg_or_git(self.test_name)
144 if out and len(out) == 1:
145 self.full_test_name = out[0]
146 elif out and len(out) > 1:
147 print("Ambiguous test name specified. Found:")
148 for line in out:
149 print(line)
150 else:
151 out = self.find_in_hg_or_git("**/%s*" % self.test_name)
152 if out and len(out) == 1:
153 self.full_test_name = out[0]
154 elif out and len(out) > 1:
155 print("Ambiguous test name. Found:")
156 for line in out:
157 print(line)
158 if self.full_test_name:
159 self.full_test_name.replace(os.sep, posixpath.sep)
160 print("Found %s in source control." % self.full_test_name)
161 else:
162 print("Unable to validate test name '%s'!" % self.test_name)
163 self.full_test_name = self.test_name
165 # search for full_test_name in test manifests
166 here = os.path.abspath(os.path.dirname(__file__))
167 resolver = TestResolver.from_environment(
168 cwd=here, loader_cls=TestManifestLoader
170 relpath = self.build_obj._wrap_path_argument(self.full_test_name).relpath()
171 tests = list(resolver.resolve_tests(paths=[relpath]))
172 if len(tests) == 1:
173 relpath = self.build_obj._wrap_path_argument(tests[0]["manifest"]).relpath()
174 print("%s found in manifest %s" % (self.full_test_name, relpath))
175 if tests[0].get("flavor"):
176 print(" flavor: %s" % tests[0]["flavor"])
177 if tests[0].get("skip-if"):
178 print(" skip-if: %s" % tests[0]["skip-if"])
179 if tests[0].get("fail-if"):
180 print(" fail-if: %s" % tests[0]["fail-if"])
181 elif len(tests) == 0:
182 print("%s not found in any test manifest!" % self.full_test_name)
183 else:
184 print("%s found in more than one manifest!" % self.full_test_name)
186 # short_name is full_test_name without path
187 self.short_name = None
188 name_idx = self.full_test_name.rfind("/")
189 if name_idx > 0:
190 self.short_name = self.full_test_name[name_idx + 1 :]
191 if self.short_name and self.short_name == self.test_name:
192 self.short_name = None
194 if not (self.show_results or self.show_durations or self.show_tasks):
195 # no need to determine ActiveData name if not querying
196 return
198 def set_activedata_test_name(self):
199 # activedata_test_name is name in ActiveData
200 self.activedata_test_name = None
201 simple_names = [self.full_test_name, self.test_name, self.short_name]
202 simple_names = [x for x in simple_names if x]
203 searches = [
204 {"in": {"result.test": simple_names}},
206 regex_names = [".*%s.*" % re.escape(x) for x in simple_names if x]
207 for r in regex_names:
208 searches.append({"regexp": {"result.test": r}})
209 query = {
210 "from": "unittest",
211 "format": "list",
212 "limit": 10,
213 "groupby": ["result.test"],
214 "where": {
215 "and": [
216 {"or": searches},
217 {"in": {"build.branch": self.branches.split(",")}},
218 {"gt": {"run.timestamp": {"date": self.start}}},
219 {"lt": {"run.timestamp": {"date": self.end}}},
223 print("Querying ActiveData...") # Following query can take a long time
224 data = self.activedata_query(query)
225 if data and len(data) > 0:
226 self.activedata_test_name = [
227 d["result"]["test"]
228 for p in simple_names + regex_names
229 for d in data
230 if re.match(p + "$", d["result"]["test"])
233 ] # first match is best match
234 if self.activedata_test_name:
235 print(
236 "Found records matching '%s' in ActiveData." % self.activedata_test_name
238 else:
239 print(
240 "Unable to find matching records in ActiveData; using %s!"
241 % self.test_name
243 self.activedata_test_name = self.test_name
245 def get_platform(self, record):
246 if "platform" in record["build"]:
247 platform = record["build"]["platform"]
248 else:
249 platform = "-"
250 platform_words = platform.split("-")
251 types_label = ""
252 # combine run and build types and eliminate duplicates
253 run_types = []
254 if "run" in record and "type" in record["run"]:
255 run_types = record["run"]["type"]
256 run_types = run_types if isinstance(run_types, list) else [run_types]
257 build_types = []
258 if "build" in record and "type" in record["build"]:
259 build_types = record["build"]["type"]
260 build_types = (
261 build_types if isinstance(build_types, list) else [build_types]
263 run_types = list(set(run_types + build_types))
264 # '1proc' is used as a treeherder label but does not appear in run types
265 if "e10s" not in run_types:
266 run_types = run_types + ["1proc"]
267 for run_type in run_types:
268 # chunked is not interesting
269 if run_type == "chunked":
270 continue
271 # e10s is the default: implied
272 if run_type == "e10s":
273 continue
274 # sometimes a build/run type is already present in the build platform
275 if run_type in platform_words:
276 continue
277 if types_label:
278 types_label += "-"
279 types_label += run_type
280 return "%s/%s:" % (platform, types_label)
282 def report_test_results(self):
283 # Report test pass/fail summary from ActiveData
284 query = {
285 "from": "unittest",
286 "format": "list",
287 "limit": 100,
288 "groupby": ["build.platform", "build.type"],
289 "select": [
290 {"aggregate": "count"},
292 "name": "failures",
293 "value": {
294 "case": [{"when": {"eq": {"result.ok": "F"}}, "then": 1}]
296 "aggregate": "sum",
297 "default": 0,
300 "name": "skips",
301 "value": {
302 "case": [{"when": {"eq": {"result.status": "SKIP"}}, "then": 1}]
304 "aggregate": "sum",
305 "default": 0,
307 {"value": "run.type", "aggregate": "union"},
309 "where": {
310 "and": [
311 {"eq": {"result.test": self.activedata_test_name}},
312 {"in": {"build.branch": self.branches.split(",")}},
313 {"gt": {"run.timestamp": {"date": self.start}}},
314 {"lt": {"run.timestamp": {"date": self.end}}},
318 print(
319 "\nTest results for %s on %s between %s and %s"
320 % (self.activedata_test_name, self.branches, self.start, self.end)
322 data = self.activedata_query(query)
323 if data and len(data) > 0:
324 data.sort(key=self.get_platform)
325 worst_rate = 0.0
326 worst_platform = None
327 total_runs = 0
328 total_failures = 0
329 for record in data:
330 platform = self.get_platform(record)
331 if platform.startswith("-"):
332 continue
333 runs = record["count"]
334 total_runs = total_runs + runs
335 failures = record.get("failures", 0)
336 skips = record.get("skips", 0)
337 total_failures = total_failures + failures
338 rate = (float)(failures) / runs
339 if rate >= worst_rate:
340 worst_rate = rate
341 worst_platform = platform
342 worst_failures = failures
343 worst_runs = runs
344 print(
345 "%-40s %6d failures (%6d skipped) in %6d runs"
346 % (platform, failures, skips, runs)
348 print(
349 "\nTotal: %d failures in %d runs or %.3f failures/run"
350 % (total_failures, total_runs, (float)(total_failures) / total_runs)
352 if worst_failures > 0:
353 print(
354 "Worst rate on %s %d failures in %d runs or %.3f failures/run"
355 % (worst_platform, worst_failures, worst_runs, worst_rate)
357 else:
358 print("No test result data found.")
360 def report_test_durations(self):
361 # Report test durations summary from ActiveData
362 query = {
363 "from": "unittest",
364 "format": "list",
365 "limit": 100,
366 "groupby": ["build.platform", "build.type"],
367 "select": [
368 {"value": "result.duration", "aggregate": "average", "name": "average"},
369 {"value": "result.duration", "aggregate": "min", "name": "min"},
370 {"value": "result.duration", "aggregate": "max", "name": "max"},
371 {"aggregate": "count"},
372 {"value": "run.type", "aggregate": "union"},
374 "where": {
375 "and": [
376 {"eq": {"result.ok": "T"}},
377 {"eq": {"result.test": self.activedata_test_name}},
378 {"in": {"build.branch": self.branches.split(",")}},
379 {"gt": {"run.timestamp": {"date": self.start}}},
380 {"lt": {"run.timestamp": {"date": self.end}}},
384 data = self.activedata_query(query)
385 print(
386 "\nTest durations for %s on %s between %s and %s"
387 % (self.activedata_test_name, self.branches, self.start, self.end)
389 if data and len(data) > 0:
390 data.sort(key=self.get_platform)
391 for record in data:
392 platform = self.get_platform(record)
393 if platform.startswith("-"):
394 continue
395 print(
396 "%-40s %6.2f s (%.2f s - %.2f s over %d runs)"
398 platform,
399 record["average"],
400 record["min"],
401 record["max"],
402 record["count"],
405 else:
406 print("No test durations found.")
408 def report_test_tasks(self):
409 # Report test tasks summary from ActiveData
410 query = {
411 "from": "unittest",
412 "format": "list",
413 "limit": 1000,
414 "select": ["build.platform", "build.type", "run.type", "run.name"],
415 "where": {
416 "and": [
417 {"eq": {"result.test": self.activedata_test_name}},
418 {"in": {"build.branch": self.branches.split(",")}},
419 {"gt": {"run.timestamp": {"date": self.start}}},
420 {"lt": {"run.timestamp": {"date": self.end}}},
424 data = self.activedata_query(query)
425 print(
426 "\nTest tasks for %s on %s between %s and %s"
427 % (self.activedata_test_name, self.branches, self.start, self.end)
429 if data and len(data) > 0:
430 data.sort(key=self.get_platform)
431 consolidated = {}
432 for record in data:
433 platform = self.get_platform(record)
434 if platform not in consolidated:
435 consolidated[platform] = {}
436 if record["run"]["name"] in consolidated[platform]:
437 consolidated[platform][record["run"]["name"]] += 1
438 else:
439 consolidated[platform][record["run"]["name"]] = 1
440 for key in sorted(consolidated.keys()):
441 tasks = ""
442 for task in consolidated[key].keys():
443 if tasks:
444 tasks += "\n%-40s " % ""
445 tasks += task
446 tasks += " in %d runs" % consolidated[key][task]
447 print("%-40s %s" % (key, tasks))
448 else:
449 print("No test tasks found.")
451 def report_bugs(self):
452 # Report open bugs matching test name
453 search = self.full_test_name
454 if self.test_name:
455 search = "%s,%s" % (search, self.test_name)
456 if self.short_name:
457 search = "%s,%s" % (search, self.short_name)
458 payload = {"quicksearch": search, "include_fields": "id,summary"}
459 response = requests.get("https://bugzilla.mozilla.org/rest/bug", payload)
460 response.raise_for_status()
461 json_response = response.json()
462 print("\nBugzilla quick search for '%s':" % search)
463 if "bugs" in json_response:
464 for bug in json_response["bugs"]:
465 print("Bug %s: %s" % (bug["id"], bug["summary"]))
466 else:
467 print("No bugs found.")
469 def report(
470 self,
471 test_names,
472 branches,
473 start,
474 end,
475 show_info,
476 show_results,
477 show_durations,
478 show_tasks,
479 show_bugs,
481 self.branches = branches
482 self.start = start
483 self.end = end
484 self.show_info = show_info
485 self.show_results = show_results
486 self.show_durations = show_durations
487 self.show_tasks = show_tasks
489 if (
490 not self.show_info
491 and not self.show_results
492 and not self.show_durations
493 and not self.show_tasks
494 and not show_bugs
496 # by default, show everything
497 self.show_info = True
498 self.show_results = True
499 self.show_durations = True
500 self.show_tasks = True
501 show_bugs = True
503 for test_name in test_names:
504 print("===== %s =====" % test_name)
505 self.test_name = test_name
506 if len(self.test_name) < 6:
507 print("'%s' is too short for a test name!" % self.test_name)
508 continue
509 self.set_test_name()
510 if show_bugs:
511 self.report_bugs()
512 self.set_activedata_test_name()
513 if self.show_results:
514 self.report_test_results()
515 if self.show_durations:
516 self.report_test_durations()
517 if self.show_tasks:
518 self.report_test_tasks()
521 class TestInfoLongRunningTasks(TestInfo):
523 Support 'mach test-info long-tasks': Summary of tasks approaching their max-run-time.
526 def __init__(self, verbose):
527 TestInfo.__init__(self, verbose)
529 def report(self, branches, start, end, threshold_pct, filter_threshold_pct):
530 def get_long_running_ratio(record):
531 count = record["count"]
532 tasks_gt_pct = record["tasks_gt_pct"]
533 # pylint --py3k W1619
534 return count / tasks_gt_pct
536 # Search test durations in ActiveData for long-running tests
537 query = {
538 "from": "task",
539 "format": "list",
540 "groupby": ["run.name"],
541 "limit": 1000,
542 "select": [
544 "value": "task.maxRunTime",
545 "aggregate": "median",
546 "name": "max_run_time",
548 {"aggregate": "count"},
550 "value": {
551 "when": {
552 "gt": [
553 {"div": ["action.duration", "task.maxRunTime"]},
554 threshold_pct / 100.0,
557 "then": 1,
559 "aggregate": "sum",
560 "name": "tasks_gt_pct",
563 "where": {
564 "and": [
565 {"in": {"build.branch": branches.split(",")}},
566 {"gt": {"task.run.start_time": {"date": start}}},
567 {"lte": {"task.run.start_time": {"date": end}}},
568 {"eq": {"task.state": "completed"}},
572 data = self.activedata_query(query)
573 print(
574 "\nTasks nearing their max-run-time on %s between %s and %s"
575 % (branches, start, end)
577 if data and len(data) > 0:
578 filtered = []
579 for record in data:
580 if "tasks_gt_pct" in record:
581 count = record["count"]
582 tasks_gt_pct = record["tasks_gt_pct"]
583 if float(tasks_gt_pct) / count > filter_threshold_pct / 100.0:
584 filtered.append(record)
585 filtered.sort(key=get_long_running_ratio)
586 if not filtered:
587 print("No long running tasks found.")
588 for record in filtered:
589 name = record["run"]["name"]
590 count = record["count"]
591 max_run_time = record["max_run_time"]
592 tasks_gt_pct = record["tasks_gt_pct"]
593 # pylint --py3k W1619
594 print(
595 "%-55s: %d of %d runs (%.1f%%) exceeded %d%% of max-run-time (%d s)"
597 name,
598 tasks_gt_pct,
599 count,
600 tasks_gt_pct * 100 / count,
601 threshold_pct,
602 max_run_time,
605 else:
606 print("No tasks found.")
609 class TestInfoReport(TestInfo):
611 Support 'mach test-info report': Report of test runs summarized by
612 manifest and component.
615 def __init__(self, verbose):
616 TestInfo.__init__(self, verbose)
617 self.total_activedata_matches = 0
618 self.threads = []
620 def add_activedata_for_suite(
621 self, label, branches, days, suite_clause, tests_clause, path_mod
623 dates_clause = {"date": "today-%dday" % days}
624 where_conditions = [
625 suite_clause,
626 {"in": {"repo.branch.name": branches.split(",")}},
627 {"gt": {"run.timestamp": dates_clause}},
629 if tests_clause:
630 where_conditions.append(tests_clause)
631 ad_query = {
632 "from": "unittest",
633 "limit": ACTIVEDATA_RECORD_LIMIT,
634 "format": "list",
635 "groupby": ["result.test"],
636 "select": [
637 {"name": "result.count", "aggregate": "count"},
639 "name": "result.duration",
640 "value": "result.duration",
641 "aggregate": "sum",
644 "name": "result.failures",
645 "value": {
646 "case": [{"when": {"eq": {"result.ok": "F"}}, "then": 1}]
648 "aggregate": "sum",
649 "default": 0,
652 "name": "result.skips",
653 "value": {
654 "case": [{"when": {"eq": {"result.status": "SKIP"}}, "then": 1}]
656 "aggregate": "sum",
657 "default": 0,
660 "where": {"and": where_conditions},
662 t = ActiveDataThread(label, self, ad_query, path_mod)
663 self.threads.append(t)
665 def update_report(self, by_component, result, path_mod):
666 def update_item(item, label, value):
667 # It is important to include any existing item value in case ActiveData
668 # returns multiple records for the same test; that can happen if the report
669 # sometimes maps more than one ActiveData record to the same path.
670 new_value = item.get(label, 0) + value
671 if type(new_value) == int:
672 item[label] = new_value
673 else:
674 item[label] = float(round(new_value, 2)) # pylint: disable=W1633
676 if "test" in result and "tests" in by_component:
677 test = result["test"]
678 if path_mod:
679 test = path_mod(test)
680 for bc in by_component["tests"]:
681 for item in by_component["tests"][bc]:
682 if test == item["test"]:
683 # pylint: disable=W1633
684 seconds = float(round(result.get("duration", 0), 2))
685 update_item(item, "total run time, seconds", seconds)
686 update_item(item, "total runs", result.get("count", 0))
687 update_item(item, "skipped runs", result.get("skips", 0))
688 update_item(item, "failed runs", result.get("failures", 0))
689 return True
690 return False
692 def collect_activedata_results(self, by_component):
693 # Start the first MAX_ACTIVEDATA_CONCURRENCY threads. If too many
694 # concurrent requests are made to ActiveData, the requests frequently
695 # fail (504 is the typical response).
696 for i in range(min(MAX_ACTIVEDATA_CONCURRENCY, len(self.threads))):
697 t = self.threads[i]
698 t.start()
699 # Wait for running threads (first N threads in self.threads) to complete.
700 # When a thread completes, start the next thread, process the results
701 # from the completed thread, and remove the completed thread from
702 # the thread list.
703 while len(self.threads):
704 running_threads = min(MAX_ACTIVEDATA_CONCURRENCY, len(self.threads))
705 for i in range(running_threads):
706 t = self.threads[i]
707 t.join(1)
708 if not t.isAlive():
709 ad_response = t.response
710 path_mod = t.context
711 name = t.name
712 del self.threads[i]
713 if len(self.threads) >= MAX_ACTIVEDATA_CONCURRENCY:
714 running_threads = min(
715 MAX_ACTIVEDATA_CONCURRENCY, len(self.threads)
717 self.threads[running_threads - 1].start()
718 if ad_response:
719 if len(ad_response) >= ACTIVEDATA_RECORD_LIMIT:
720 print(
721 "%s: ActiveData query limit reached; data may be missing"
722 % name
724 matches = 0
725 for record in ad_response:
726 if "result" in record:
727 result = record["result"]
728 if self.update_report(by_component, result, path_mod):
729 matches += 1
730 self.log_verbose(
731 "%s: %d results; %d matches"
732 % (name, len(ad_response), matches)
734 self.total_activedata_matches += matches
735 break
737 def path_mod_reftest(self, path):
738 # "<path1> == <path2>" -> "<path1>"
739 path = path.split(" ")[0]
740 # "<path>?<params>" -> "<path>"
741 path = path.split("?")[0]
742 # "<path>#<fragment>" -> "<path>"
743 path = path.split("#")[0]
744 return path
746 def path_mod_jsreftest(self, path):
747 # "<path>;assert" -> "<path>"
748 path = path.split(";")[0]
749 return path
751 def path_mod_marionette(self, path):
752 # "<path> <test-name>" -> "<path>"
753 path = path.split(" ")[0]
754 # "part1\part2" -> "part1/part2"
755 path = path.replace("\\", os.path.sep)
756 return path
758 def path_mod_wpt(self, path):
759 if path[0] == os.path.sep:
760 # "/<path>" -> "<path>"
761 path = path[1:]
762 # "<path>" -> "testing/web-platform/tests/<path>"
763 path = os.path.join("testing", "web-platform", "tests", path)
764 # "<path>?<params>" -> "<path>"
765 path = path.split("?")[0]
766 return path
768 def path_mod_jittest(self, path):
769 # "part1\part2" -> "part1/part2"
770 path = path.replace("\\", os.path.sep)
771 # "<path>" -> "js/src/jit-test/tests/<path>"
772 return os.path.join("js", "src", "jit-test", "tests", path)
774 def path_mod_xpcshell(self, path):
775 # <manifest>.ini:<path> -> "<path>"
776 path = path.split(".ini:")[-1]
777 return path
779 def add_activedata(self, branches, days, by_component):
780 suites = {
781 # List of known suites requiring special path handling and/or
782 # suites typically containing thousands of test paths.
783 # regexes have been selected by trial and error to partition data
784 # into queries returning less than ACTIVEDATA_RECORD_LIMIT records.
785 "reftest": (
786 self.path_mod_reftest,
788 {"regex": {"result.test": "layout/reftests/[a-k].*"}},
789 {"regex": {"result.test": "layout/reftests/[^a-k].*"}},
790 {"not": {"regex": {"result.test": "layout/reftests/.*"}}},
793 "web-platform-tests": (
794 self.path_mod_wpt,
796 {"regex": {"result.test": "/[a-g].*"}},
797 {"regex": {"result.test": "/[h-p].*"}},
798 {"not": {"regex": {"result.test": "/[a-p].*"}}},
801 "web-platform-tests-reftest": (
802 self.path_mod_wpt,
804 {"regex": {"result.test": "/css/css-.*"}},
805 {"not": {"regex": {"result.test": "/css/css-.*"}}},
808 "crashtest": (
809 None,
811 {"regex": {"result.test": "[a-g].*"}},
812 {"not": {"regex": {"result.test": "[a-g].*"}}},
815 "web-platform-tests-wdspec": (self.path_mod_wpt, [None]),
816 "web-platform-tests-crashtest": (self.path_mod_wpt, [None]),
817 "web-platform-tests-print-reftest": (self.path_mod_wpt, [None]),
818 "xpcshell": (self.path_mod_xpcshell, [None]),
819 "mochitest-plain": (None, [None]),
820 "mochitest-browser-chrome": (None, [None]),
821 "mochitest-media": (None, [None]),
822 "mochitest-devtools-chrome": (None, [None]),
823 "marionette": (self.path_mod_marionette, [None]),
824 "mochitest-chrome": (None, [None]),
826 unsupported_suites = [
827 # Usually these suites are excluded because currently the test resolver
828 # does not provide test paths for them.
829 "jsreftest",
830 "jittest",
831 "geckoview-junit",
832 "cppunittest",
834 for suite in suites:
835 suite_clause = {"eq": {"run.suite.name": suite}}
836 path_mod = suites[suite][0]
837 test_clauses = suites[suite][1]
838 suite_count = 1
839 for test_clause in test_clauses:
840 label = "%s-%d" % (suite, suite_count)
841 suite_count += 1
842 self.add_activedata_for_suite(
843 label, branches, days, suite_clause, test_clause, path_mod
845 # Remainder: All supported suites not handled above.
846 suite_clause = {
847 "not": {"in": {"run.suite.name": unsupported_suites + list(suites)}}
849 self.add_activedata_for_suite(
850 "remainder", branches, days, suite_clause, None, None
852 self.collect_activedata_results(by_component)
854 def description(
855 self,
856 components,
857 flavor,
858 subsuite,
859 paths,
860 show_manifests,
861 show_tests,
862 show_summary,
863 show_annotations,
864 show_activedata,
865 filter_values,
866 filter_keys,
867 branches,
868 days,
870 # provide a natural language description of the report options
871 what = []
872 if show_manifests:
873 what.append("test manifests")
874 if show_tests:
875 what.append("tests")
876 if show_annotations:
877 what.append("test manifest annotations")
878 if show_summary and len(what) == 0:
879 what.append("summary of tests only")
880 if len(what) > 1:
881 what[-1] = "and " + what[-1]
882 what = ", ".join(what)
883 d = "Test summary report for " + what
884 if components:
885 d += ", in specified components (%s)" % components
886 else:
887 d += ", in all components"
888 if flavor:
889 d += ", in specified flavor (%s)" % flavor
890 if subsuite:
891 d += ", in specified subsuite (%s)" % subsuite
892 if paths:
893 d += ", on specified paths (%s)" % paths
894 if filter_values:
895 d += ", containing '%s'" % filter_values
896 if filter_keys:
897 d += " in manifest keys '%s'" % filter_keys
898 else:
899 d += " in any part of manifest entry"
900 if show_activedata:
901 d += ", including historical run-time data for the last %d days on %s" % (
902 days,
903 branches,
905 d += " as of %s." % datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
906 return d
908 def report(
909 self,
910 components,
911 flavor,
912 subsuite,
913 paths,
914 show_manifests,
915 show_tests,
916 show_summary,
917 show_annotations,
918 show_activedata,
919 filter_values,
920 filter_keys,
921 show_components,
922 output_file,
923 branches,
924 days,
926 def matches_filters(test):
928 Return True if all of the requested filter_values are found in this test;
929 if filter_keys are specified, restrict search to those test keys.
931 for value in filter_values:
932 value_found = False
933 for key in test:
934 if not filter_keys or key in filter_keys:
935 if re.search(value, test[key]):
936 value_found = True
937 break
938 if not value_found:
939 return False
940 return True
942 start_time = datetime.datetime.now()
944 # Ensure useful report by default
945 if (
946 not show_manifests
947 and not show_tests
948 and not show_summary
949 and not show_annotations
951 show_manifests = True
952 show_summary = True
954 by_component = {}
955 if components:
956 components = components.split(",")
957 if filter_keys:
958 filter_keys = filter_keys.split(",")
959 if filter_values:
960 filter_values = filter_values.split(",")
961 else:
962 filter_values = []
963 display_keys = (filter_keys or []) + ["skip-if", "fail-if", "fails-if"]
964 display_keys = set(display_keys)
966 print("Finding tests...")
967 here = os.path.abspath(os.path.dirname(__file__))
968 resolver = TestResolver.from_environment(
969 cwd=here, loader_cls=TestManifestLoader
971 tests = list(
972 resolver.resolve_tests(paths=paths, flavor=flavor, subsuite=subsuite)
975 manifest_paths = set()
976 for t in tests:
977 if "manifest" in t and t["manifest"] is not None:
978 manifest_paths.add(t["manifest"])
979 manifest_count = len(manifest_paths)
980 print(
981 "Resolver found {} tests, {} manifests".format(len(tests), manifest_count)
984 if show_manifests:
985 topsrcdir = self.build_obj.topsrcdir
986 by_component["manifests"] = {}
987 manifest_paths = list(manifest_paths)
988 manifest_paths.sort()
989 relpaths = []
990 for manifest_path in manifest_paths:
991 relpath = mozpath.relpath(manifest_path, topsrcdir)
992 if mozpath.commonprefix((manifest_path, topsrcdir)) != topsrcdir:
993 continue
994 relpaths.append(relpath)
995 reader = self.build_obj.mozbuild_reader(config_mode="empty")
996 files_info = reader.files_info(relpaths)
997 for manifest_path in manifest_paths:
998 relpath = mozpath.relpath(manifest_path, topsrcdir)
999 if mozpath.commonprefix((manifest_path, topsrcdir)) != topsrcdir:
1000 continue
1001 manifest_info = None
1002 if relpath in files_info:
1003 bug_component = files_info[relpath].get("BUG_COMPONENT")
1004 if bug_component:
1005 key = "{}::{}".format(
1006 bug_component.product, bug_component.component
1008 else:
1009 key = "<unknown bug component>"
1010 if (not components) or (key in components):
1011 manifest_info = {"manifest": relpath, "tests": 0, "skipped": 0}
1012 rkey = key if show_components else "all"
1013 if rkey in by_component["manifests"]:
1014 by_component["manifests"][rkey].append(manifest_info)
1015 else:
1016 by_component["manifests"][rkey] = [manifest_info]
1017 if manifest_info:
1018 for t in tests:
1019 if t["manifest"] == manifest_path:
1020 manifest_info["tests"] += 1
1021 if t.get("skip-if"):
1022 manifest_info["skipped"] += 1
1023 for key in by_component["manifests"]:
1024 by_component["manifests"][key].sort(key=lambda k: k["manifest"])
1026 if show_tests:
1027 by_component["tests"] = {}
1029 if show_tests or show_summary or show_annotations:
1030 test_count = 0
1031 failed_count = 0
1032 skipped_count = 0
1033 annotation_count = 0
1034 condition_count = 0
1035 component_set = set()
1036 relpaths = []
1037 conditions = {}
1038 known_unconditional_annotations = ["skip", "fail", "asserts", "random"]
1039 known_conditional_annotations = [
1040 "skip-if",
1041 "fail-if",
1042 "run-if",
1043 "fails-if",
1044 "fuzzy-if",
1045 "random-if",
1046 "asserts-if",
1048 for t in tests:
1049 relpath = t.get("srcdir_relpath")
1050 relpaths.append(relpath)
1051 reader = self.build_obj.mozbuild_reader(config_mode="empty")
1052 files_info = reader.files_info(relpaths)
1053 for t in tests:
1054 if not matches_filters(t):
1055 continue
1056 if "referenced-test" in t:
1057 # Avoid double-counting reftests: disregard reference file entries
1058 continue
1059 if show_annotations:
1060 for key in t:
1061 if key in known_unconditional_annotations:
1062 annotation_count += 1
1063 if key in known_conditional_annotations:
1064 annotation_count += 1
1065 # Here 'key' is a manifest annotation type like 'skip-if' and t[key]
1066 # is the associated condition. For example, the manifestparser
1067 # manifest annotation, "skip-if = os == 'win'", is expected to be
1068 # encoded as t['skip-if'] = "os == 'win'".
1069 # To allow for reftest manifests, t[key] may have multiple entries
1070 # separated by ';', each corresponding to a condition for that test
1071 # and annotation type. For example,
1072 # "skip-if(Android&&webrender) skip-if(OSX)", would be
1073 # encoded as t['skip-if'] = "Android&&webrender;OSX".
1074 annotation_conditions = t[key].split(";")
1075 for condition in annotation_conditions:
1076 condition_count += 1
1077 # Trim reftest fuzzy-if ranges: everything after the first comma
1078 # eg. "Android,0-2,1-3" -> "Android"
1079 condition = condition.split(",")[0]
1080 if condition not in conditions:
1081 conditions[condition] = 0
1082 conditions[condition] += 1
1083 test_count += 1
1084 relpath = t.get("srcdir_relpath")
1085 if relpath in files_info:
1086 bug_component = files_info[relpath].get("BUG_COMPONENT")
1087 if bug_component:
1088 key = "{}::{}".format(
1089 bug_component.product, bug_component.component
1091 else:
1092 key = "<unknown bug component>"
1093 if (not components) or (key in components):
1094 component_set.add(key)
1095 test_info = {"test": relpath}
1096 for test_key in display_keys:
1097 value = t.get(test_key)
1098 if value:
1099 test_info[test_key] = value
1100 if t.get("fail-if"):
1101 failed_count += 1
1102 if t.get("fails-if"):
1103 failed_count += 1
1104 if t.get("skip-if"):
1105 skipped_count += 1
1106 if show_tests:
1107 rkey = key if show_components else "all"
1108 if rkey in by_component["tests"]:
1109 # Avoid duplicates: Some test paths have multiple TestResolver
1110 # entries, as when a test is included by multiple manifests.
1111 found = False
1112 for ctest in by_component["tests"][rkey]:
1113 if ctest["test"] == test_info["test"]:
1114 found = True
1115 break
1116 if not found:
1117 by_component["tests"][rkey].append(test_info)
1118 else:
1119 by_component["tests"][rkey] = [test_info]
1120 if show_tests:
1121 for key in by_component["tests"]:
1122 by_component["tests"][key].sort(key=lambda k: k["test"])
1124 if show_activedata:
1125 try:
1126 self.add_activedata(branches, days, by_component)
1127 except Exception:
1128 print("Failed to retrieve some ActiveData data.")
1129 traceback.print_exc()
1130 self.log_verbose(
1131 "%d tests updated with matching ActiveData data"
1132 % self.total_activedata_matches
1134 self.log_verbose(
1135 "%d seconds waiting for ActiveData" % self.total_activedata_seconds
1138 by_component["description"] = self.description(
1139 components,
1140 flavor,
1141 subsuite,
1142 paths,
1143 show_manifests,
1144 show_tests,
1145 show_summary,
1146 show_annotations,
1147 show_activedata,
1148 filter_values,
1149 filter_keys,
1150 branches,
1151 days,
1154 if show_summary:
1155 by_component["summary"] = {}
1156 by_component["summary"]["components"] = len(component_set)
1157 by_component["summary"]["manifests"] = manifest_count
1158 by_component["summary"]["tests"] = test_count
1159 by_component["summary"]["failed tests"] = failed_count
1160 by_component["summary"]["skipped tests"] = skipped_count
1162 if show_annotations:
1163 by_component["annotations"] = {}
1164 by_component["annotations"]["total annotations"] = annotation_count
1165 by_component["annotations"]["total conditions"] = condition_count
1166 by_component["annotations"]["unique conditions"] = len(conditions)
1167 by_component["annotations"]["conditions"] = conditions
1169 self.write_report(by_component, output_file)
1171 end_time = datetime.datetime.now()
1172 self.log_verbose(
1173 "%d seconds total to generate report"
1174 % (end_time - start_time).total_seconds()
1177 def write_report(self, by_component, output_file):
1178 json_report = json.dumps(by_component, indent=2, sort_keys=True)
1179 if output_file:
1180 output_file = os.path.abspath(output_file)
1181 output_dir = os.path.dirname(output_file)
1182 if not os.path.isdir(output_dir):
1183 os.makedirs(output_dir)
1185 with open(output_file, "w") as f:
1186 f.write(json_report)
1187 else:
1188 print(json_report)
1190 def report_diff(self, before, after, output_file):
1192 Support for 'mach test-info report-diff'.
1195 def get_file(path_or_url):
1196 if urlparse.urlparse(path_or_url).scheme:
1197 response = requests.get(path_or_url)
1198 response.raise_for_status()
1199 return json.loads(response.text)
1200 with open(path_or_url) as f:
1201 return json.load(f)
1203 report1 = get_file(before)
1204 report2 = get_file(after)
1206 by_component = {"tests": {}, "summary": {}}
1207 self.diff_summaries(by_component, report1["summary"], report2["summary"])
1208 self.diff_all_components(by_component, report1["tests"], report2["tests"])
1209 self.write_report(by_component, output_file)
1211 def diff_summaries(self, by_component, summary1, summary2):
1213 Update by_component with comparison of summaries.
1215 all_keys = set(summary1.keys()) | set(summary2.keys())
1216 for key in all_keys:
1217 delta = summary2.get(key, 0) - summary1.get(key, 0)
1218 by_component["summary"]["%s delta" % key] = delta
1220 def diff_all_components(self, by_component, tests1, tests2):
1222 Update by_component with any added/deleted tests, for all components.
1224 self.added_count = 0
1225 self.deleted_count = 0
1226 for component in tests1:
1227 component1 = tests1[component]
1228 component2 = [] if component not in tests2 else tests2[component]
1229 self.diff_component(by_component, component, component1, component2)
1230 for component in tests2:
1231 if component not in tests1:
1232 component2 = tests2[component]
1233 self.diff_component(by_component, component, [], component2)
1234 by_component["summary"]["added tests"] = self.added_count
1235 by_component["summary"]["deleted tests"] = self.deleted_count
1237 def diff_component(self, by_component, component, component1, component2):
1239 Update by_component[component] with any added/deleted tests for the
1240 named component.
1241 "added": tests found in component2 but missing from component1.
1242 "deleted": tests found in component1 but missing from component2.
1244 tests1 = set([t["test"] for t in component1])
1245 tests2 = set([t["test"] for t in component2])
1246 deleted = tests1 - tests2
1247 added = tests2 - tests1
1248 if deleted or added:
1249 by_component["tests"][component] = {}
1250 if deleted:
1251 by_component["tests"][component]["deleted"] = sorted(list(deleted))
1252 if added:
1253 by_component["tests"][component]["added"] = sorted(list(added))
1254 self.added_count += len(added)
1255 self.deleted_count += len(deleted)
1256 common = len(tests1.intersection(tests2))
1257 self.log_verbose(
1258 "%s: %d deleted, %d added, %d common"
1259 % (component, len(deleted), len(added), common)