1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
5 from __future__
import absolute_import
, division
, print_function
14 import six
.moves
.urllib_parse
as urlparse
18 import mozpack
.path
as mozpath
19 from moztest
.resolve
import TestResolver
, TestManifestLoader
20 from mozfile
import which
22 from mozbuild
.base
import MozbuildObject
, MachCommandConditions
as conditions
24 ACTIVEDATA_RECORD_LIMIT
= 10000
25 MAX_ACTIVEDATA_CONCURRENCY
= 5
26 MAX_ACTIVEDATA_RETRIES
= 5
27 REFERER
= "https://wiki.developer.mozilla.org/en-US/docs/Mozilla/Test-Info"
30 class TestInfo(object):
32 Support 'mach test-info'.
35 def __init__(self
, verbose
):
36 self
.verbose
= verbose
37 here
= os
.path
.abspath(os
.path
.dirname(__file__
))
38 self
.build_obj
= MozbuildObject
.from_environment(cwd
=here
)
39 self
.total_activedata_seconds
= 0
41 def log_verbose(self
, what
):
45 def activedata_query(self
, query
):
46 start_time
= datetime
.datetime
.now()
47 self
.log_verbose(start_time
)
48 self
.log_verbose(json
.dumps(query
))
49 response
= requests
.post(
50 "http://activedata.allizom.org/query",
51 data
=json
.dumps(query
),
52 headers
={"referer": REFERER
},
55 end_time
= datetime
.datetime
.now()
56 self
.total_activedata_seconds
+= (end_time
- start_time
).total_seconds()
57 self
.log_verbose(end_time
)
58 self
.log_verbose(response
)
59 response
.raise_for_status()
60 data
= response
.json()["data"]
61 self
.log_verbose("response length: %d" % len(data
))
65 class ActiveDataThread(threading
.Thread
):
67 A thread to query ActiveData and wait for its response.
70 def __init__(self
, name
, ti
, query
, context
):
71 threading
.Thread
.__init
__(self
, name
=name
)
74 self
.context
= context
79 while attempt
< MAX_ACTIVEDATA_RETRIES
and not self
.response
:
81 self
.response
= self
.ti
.activedata_query(self
.query
)
83 self
.ti
.log_verbose("%s: no data received for query" % self
.name
)
88 "%s: Exception on attempt #%d:" % (self
.name
, attempt
)
94 class TestInfoTests(TestInfo
):
96 Support 'mach test-info tests': Detailed report of specified tests.
99 def __init__(self
, verbose
):
100 TestInfo
.__init
__(self
, verbose
)
103 if conditions
.is_hg(self
.build_obj
):
104 self
._hg
= which("hg")
106 raise OSError(errno
.ENOENT
, "Could not find 'hg' on PATH.")
109 if conditions
.is_git(self
.build_obj
):
110 self
._git
= which("git")
112 raise OSError(errno
.ENOENT
, "Could not find 'git' on PATH.")
114 def find_in_hg_or_git(self
, test_name
):
116 cmd
= [self
._hg
, "files", "-I", test_name
]
118 cmd
= [self
._git
, "ls-files", test_name
]
122 out
= subprocess
.check_output(cmd
, universal_newlines
=True).splitlines()
123 except subprocess
.CalledProcessError
:
127 def set_test_name(self
):
128 # Generating a unified report for a specific test is complicated
129 # by differences in the test name used in various data sources.
131 # - It is often convenient to request a report based only on
132 # a short file name, rather than the full path;
133 # - Bugs may be filed in bugzilla against a simple, short test
134 # name or the full path to the test;
135 # - In ActiveData, the full path is usually used, but sometimes
136 # also includes additional path components outside of the
137 # mercurial repo (common for reftests).
138 # This function attempts to find appropriate names for different
139 # queries based on the specified test name.
141 # full_test_name is full path to file in hg (or git)
142 self
.full_test_name
= None
143 out
= self
.find_in_hg_or_git(self
.test_name
)
144 if out
and len(out
) == 1:
145 self
.full_test_name
= out
[0]
146 elif out
and len(out
) > 1:
147 print("Ambiguous test name specified. Found:")
151 out
= self
.find_in_hg_or_git("**/%s*" % self
.test_name
)
152 if out
and len(out
) == 1:
153 self
.full_test_name
= out
[0]
154 elif out
and len(out
) > 1:
155 print("Ambiguous test name. Found:")
158 if self
.full_test_name
:
159 self
.full_test_name
.replace(os
.sep
, posixpath
.sep
)
160 print("Found %s in source control." % self
.full_test_name
)
162 print("Unable to validate test name '%s'!" % self
.test_name
)
163 self
.full_test_name
= self
.test_name
165 # search for full_test_name in test manifests
166 here
= os
.path
.abspath(os
.path
.dirname(__file__
))
167 resolver
= TestResolver
.from_environment(
168 cwd
=here
, loader_cls
=TestManifestLoader
170 relpath
= self
.build_obj
._wrap
_path
_argument
(self
.full_test_name
).relpath()
171 tests
= list(resolver
.resolve_tests(paths
=[relpath
]))
173 relpath
= self
.build_obj
._wrap
_path
_argument
(tests
[0]["manifest"]).relpath()
174 print("%s found in manifest %s" % (self
.full_test_name
, relpath
))
175 if tests
[0].get("flavor"):
176 print(" flavor: %s" % tests
[0]["flavor"])
177 if tests
[0].get("skip-if"):
178 print(" skip-if: %s" % tests
[0]["skip-if"])
179 if tests
[0].get("fail-if"):
180 print(" fail-if: %s" % tests
[0]["fail-if"])
181 elif len(tests
) == 0:
182 print("%s not found in any test manifest!" % self
.full_test_name
)
184 print("%s found in more than one manifest!" % self
.full_test_name
)
186 # short_name is full_test_name without path
187 self
.short_name
= None
188 name_idx
= self
.full_test_name
.rfind("/")
190 self
.short_name
= self
.full_test_name
[name_idx
+ 1 :]
191 if self
.short_name
and self
.short_name
== self
.test_name
:
192 self
.short_name
= None
194 if not (self
.show_results
or self
.show_durations
or self
.show_tasks
):
195 # no need to determine ActiveData name if not querying
198 def set_activedata_test_name(self
):
199 # activedata_test_name is name in ActiveData
200 self
.activedata_test_name
= None
201 simple_names
= [self
.full_test_name
, self
.test_name
, self
.short_name
]
202 simple_names
= [x
for x
in simple_names
if x
]
204 {"in": {"result.test": simple_names
}},
206 regex_names
= [".*%s.*" % re
.escape(x
) for x
in simple_names
if x
]
207 for r
in regex_names
:
208 searches
.append({"regexp": {"result.test": r
}})
213 "groupby": ["result.test"],
217 {"in": {"build.branch": self
.branches
.split(",")}},
218 {"gt": {"run.timestamp": {"date": self
.start
}}},
219 {"lt": {"run.timestamp": {"date": self
.end
}}},
223 print("Querying ActiveData...") # Following query can take a long time
224 data
= self
.activedata_query(query
)
225 if data
and len(data
) > 0:
226 self
.activedata_test_name
= [
228 for p
in simple_names
+ regex_names
230 if re
.match(p
+ "$", d
["result"]["test"])
233 ] # first match is best match
234 if self
.activedata_test_name
:
236 "Found records matching '%s' in ActiveData." % self
.activedata_test_name
240 "Unable to find matching records in ActiveData; using %s!"
243 self
.activedata_test_name
= self
.test_name
245 def get_platform(self
, record
):
246 if "platform" in record
["build"]:
247 platform
= record
["build"]["platform"]
250 platform_words
= platform
.split("-")
252 # combine run and build types and eliminate duplicates
254 if "run" in record
and "type" in record
["run"]:
255 run_types
= record
["run"]["type"]
256 run_types
= run_types
if isinstance(run_types
, list) else [run_types
]
258 if "build" in record
and "type" in record
["build"]:
259 build_types
= record
["build"]["type"]
261 build_types
if isinstance(build_types
, list) else [build_types
]
263 run_types
= list(set(run_types
+ build_types
))
264 # '1proc' is used as a treeherder label but does not appear in run types
265 if "e10s" not in run_types
:
266 run_types
= run_types
+ ["1proc"]
267 for run_type
in run_types
:
268 # chunked is not interesting
269 if run_type
== "chunked":
271 # e10s is the default: implied
272 if run_type
== "e10s":
274 # sometimes a build/run type is already present in the build platform
275 if run_type
in platform_words
:
279 types_label
+= run_type
280 return "%s/%s:" % (platform
, types_label
)
282 def report_test_results(self
):
283 # Report test pass/fail summary from ActiveData
288 "groupby": ["build.platform", "build.type"],
290 {"aggregate": "count"},
294 "case": [{"when": {"eq": {"result.ok": "F"}}, "then": 1}]
302 "case": [{"when": {"eq": {"result.status": "SKIP"}}, "then": 1}]
307 {"value": "run.type", "aggregate": "union"},
311 {"eq": {"result.test": self
.activedata_test_name
}},
312 {"in": {"build.branch": self
.branches
.split(",")}},
313 {"gt": {"run.timestamp": {"date": self
.start
}}},
314 {"lt": {"run.timestamp": {"date": self
.end
}}},
319 "\nTest results for %s on %s between %s and %s"
320 % (self
.activedata_test_name
, self
.branches
, self
.start
, self
.end
)
322 data
= self
.activedata_query(query
)
323 if data
and len(data
) > 0:
324 data
.sort(key
=self
.get_platform
)
326 worst_platform
= None
330 platform
= self
.get_platform(record
)
331 if platform
.startswith("-"):
333 runs
= record
["count"]
334 total_runs
= total_runs
+ runs
335 failures
= record
.get("failures", 0)
336 skips
= record
.get("skips", 0)
337 total_failures
= total_failures
+ failures
338 rate
= (float)(failures
) / runs
339 if rate
>= worst_rate
:
341 worst_platform
= platform
342 worst_failures
= failures
345 "%-40s %6d failures (%6d skipped) in %6d runs"
346 % (platform
, failures
, skips
, runs
)
349 "\nTotal: %d failures in %d runs or %.3f failures/run"
350 % (total_failures
, total_runs
, (float)(total_failures
) / total_runs
)
352 if worst_failures
> 0:
354 "Worst rate on %s %d failures in %d runs or %.3f failures/run"
355 % (worst_platform
, worst_failures
, worst_runs
, worst_rate
)
358 print("No test result data found.")
360 def report_test_durations(self
):
361 # Report test durations summary from ActiveData
366 "groupby": ["build.platform", "build.type"],
368 {"value": "result.duration", "aggregate": "average", "name": "average"},
369 {"value": "result.duration", "aggregate": "min", "name": "min"},
370 {"value": "result.duration", "aggregate": "max", "name": "max"},
371 {"aggregate": "count"},
372 {"value": "run.type", "aggregate": "union"},
376 {"eq": {"result.ok": "T"}},
377 {"eq": {"result.test": self
.activedata_test_name
}},
378 {"in": {"build.branch": self
.branches
.split(",")}},
379 {"gt": {"run.timestamp": {"date": self
.start
}}},
380 {"lt": {"run.timestamp": {"date": self
.end
}}},
384 data
= self
.activedata_query(query
)
386 "\nTest durations for %s on %s between %s and %s"
387 % (self
.activedata_test_name
, self
.branches
, self
.start
, self
.end
)
389 if data
and len(data
) > 0:
390 data
.sort(key
=self
.get_platform
)
392 platform
= self
.get_platform(record
)
393 if platform
.startswith("-"):
396 "%-40s %6.2f s (%.2f s - %.2f s over %d runs)"
406 print("No test durations found.")
408 def report_test_tasks(self
):
409 # Report test tasks summary from ActiveData
414 "select": ["build.platform", "build.type", "run.type", "run.name"],
417 {"eq": {"result.test": self
.activedata_test_name
}},
418 {"in": {"build.branch": self
.branches
.split(",")}},
419 {"gt": {"run.timestamp": {"date": self
.start
}}},
420 {"lt": {"run.timestamp": {"date": self
.end
}}},
424 data
= self
.activedata_query(query
)
426 "\nTest tasks for %s on %s between %s and %s"
427 % (self
.activedata_test_name
, self
.branches
, self
.start
, self
.end
)
429 if data
and len(data
) > 0:
430 data
.sort(key
=self
.get_platform
)
433 platform
= self
.get_platform(record
)
434 if platform
not in consolidated
:
435 consolidated
[platform
] = {}
436 if record
["run"]["name"] in consolidated
[platform
]:
437 consolidated
[platform
][record
["run"]["name"]] += 1
439 consolidated
[platform
][record
["run"]["name"]] = 1
440 for key
in sorted(consolidated
.keys()):
442 for task
in consolidated
[key
].keys():
444 tasks
+= "\n%-40s " % ""
446 tasks
+= " in %d runs" % consolidated
[key
][task
]
447 print("%-40s %s" % (key
, tasks
))
449 print("No test tasks found.")
451 def report_bugs(self
):
452 # Report open bugs matching test name
453 search
= self
.full_test_name
455 search
= "%s,%s" % (search
, self
.test_name
)
457 search
= "%s,%s" % (search
, self
.short_name
)
458 payload
= {"quicksearch": search
, "include_fields": "id,summary"}
459 response
= requests
.get("https://bugzilla.mozilla.org/rest/bug", payload
)
460 response
.raise_for_status()
461 json_response
= response
.json()
462 print("\nBugzilla quick search for '%s':" % search
)
463 if "bugs" in json_response
:
464 for bug
in json_response
["bugs"]:
465 print("Bug %s: %s" % (bug
["id"], bug
["summary"]))
467 print("No bugs found.")
481 self
.branches
= branches
484 self
.show_info
= show_info
485 self
.show_results
= show_results
486 self
.show_durations
= show_durations
487 self
.show_tasks
= show_tasks
491 and not self
.show_results
492 and not self
.show_durations
493 and not self
.show_tasks
496 # by default, show everything
497 self
.show_info
= True
498 self
.show_results
= True
499 self
.show_durations
= True
500 self
.show_tasks
= True
503 for test_name
in test_names
:
504 print("===== %s =====" % test_name
)
505 self
.test_name
= test_name
506 if len(self
.test_name
) < 6:
507 print("'%s' is too short for a test name!" % self
.test_name
)
512 self
.set_activedata_test_name()
513 if self
.show_results
:
514 self
.report_test_results()
515 if self
.show_durations
:
516 self
.report_test_durations()
518 self
.report_test_tasks()
521 class TestInfoLongRunningTasks(TestInfo
):
523 Support 'mach test-info long-tasks': Summary of tasks approaching their max-run-time.
526 def __init__(self
, verbose
):
527 TestInfo
.__init
__(self
, verbose
)
529 def report(self
, branches
, start
, end
, threshold_pct
, filter_threshold_pct
):
530 def get_long_running_ratio(record
):
531 count
= record
["count"]
532 tasks_gt_pct
= record
["tasks_gt_pct"]
533 # pylint --py3k W1619
534 return count
/ tasks_gt_pct
536 # Search test durations in ActiveData for long-running tests
540 "groupby": ["run.name"],
544 "value": "task.maxRunTime",
545 "aggregate": "median",
546 "name": "max_run_time",
548 {"aggregate": "count"},
553 {"div": ["action.duration", "task.maxRunTime"]},
554 threshold_pct
/ 100.0,
560 "name": "tasks_gt_pct",
565 {"in": {"build.branch": branches
.split(",")}},
566 {"gt": {"task.run.start_time": {"date": start
}}},
567 {"lte": {"task.run.start_time": {"date": end
}}},
568 {"eq": {"task.state": "completed"}},
572 data
= self
.activedata_query(query
)
574 "\nTasks nearing their max-run-time on %s between %s and %s"
575 % (branches
, start
, end
)
577 if data
and len(data
) > 0:
580 if "tasks_gt_pct" in record
:
581 count
= record
["count"]
582 tasks_gt_pct
= record
["tasks_gt_pct"]
583 if float(tasks_gt_pct
) / count
> filter_threshold_pct
/ 100.0:
584 filtered
.append(record
)
585 filtered
.sort(key
=get_long_running_ratio
)
587 print("No long running tasks found.")
588 for record
in filtered
:
589 name
= record
["run"]["name"]
590 count
= record
["count"]
591 max_run_time
= record
["max_run_time"]
592 tasks_gt_pct
= record
["tasks_gt_pct"]
593 # pylint --py3k W1619
595 "%-55s: %d of %d runs (%.1f%%) exceeded %d%% of max-run-time (%d s)"
600 tasks_gt_pct
* 100 / count
,
606 print("No tasks found.")
609 class TestInfoReport(TestInfo
):
611 Support 'mach test-info report': Report of test runs summarized by
612 manifest and component.
615 def __init__(self
, verbose
):
616 TestInfo
.__init
__(self
, verbose
)
617 self
.total_activedata_matches
= 0
620 def add_activedata_for_suite(
621 self
, label
, branches
, days
, suite_clause
, tests_clause
, path_mod
623 dates_clause
= {"date": "today-%dday" % days
}
626 {"in": {"repo.branch.name": branches
.split(",")}},
627 {"gt": {"run.timestamp": dates_clause
}},
630 where_conditions
.append(tests_clause
)
633 "limit": ACTIVEDATA_RECORD_LIMIT
,
635 "groupby": ["result.test"],
637 {"name": "result.count", "aggregate": "count"},
639 "name": "result.duration",
640 "value": "result.duration",
644 "name": "result.failures",
646 "case": [{"when": {"eq": {"result.ok": "F"}}, "then": 1}]
652 "name": "result.skips",
654 "case": [{"when": {"eq": {"result.status": "SKIP"}}, "then": 1}]
660 "where": {"and": where_conditions
},
662 t
= ActiveDataThread(label
, self
, ad_query
, path_mod
)
663 self
.threads
.append(t
)
665 def update_report(self
, by_component
, result
, path_mod
):
666 def update_item(item
, label
, value
):
667 # It is important to include any existing item value in case ActiveData
668 # returns multiple records for the same test; that can happen if the report
669 # sometimes maps more than one ActiveData record to the same path.
670 new_value
= item
.get(label
, 0) + value
671 if type(new_value
) == int:
672 item
[label
] = new_value
674 item
[label
] = float(round(new_value
, 2)) # pylint: disable=W1633
676 if "test" in result
and "tests" in by_component
:
677 test
= result
["test"]
679 test
= path_mod(test
)
680 for bc
in by_component
["tests"]:
681 for item
in by_component
["tests"][bc
]:
682 if test
== item
["test"]:
683 # pylint: disable=W1633
684 seconds
= float(round(result
.get("duration", 0), 2))
685 update_item(item
, "total run time, seconds", seconds
)
686 update_item(item
, "total runs", result
.get("count", 0))
687 update_item(item
, "skipped runs", result
.get("skips", 0))
688 update_item(item
, "failed runs", result
.get("failures", 0))
692 def collect_activedata_results(self
, by_component
):
693 # Start the first MAX_ACTIVEDATA_CONCURRENCY threads. If too many
694 # concurrent requests are made to ActiveData, the requests frequently
695 # fail (504 is the typical response).
696 for i
in range(min(MAX_ACTIVEDATA_CONCURRENCY
, len(self
.threads
))):
699 # Wait for running threads (first N threads in self.threads) to complete.
700 # When a thread completes, start the next thread, process the results
701 # from the completed thread, and remove the completed thread from
703 while len(self
.threads
):
704 running_threads
= min(MAX_ACTIVEDATA_CONCURRENCY
, len(self
.threads
))
705 for i
in range(running_threads
):
709 ad_response
= t
.response
713 if len(self
.threads
) >= MAX_ACTIVEDATA_CONCURRENCY
:
714 running_threads
= min(
715 MAX_ACTIVEDATA_CONCURRENCY
, len(self
.threads
)
717 self
.threads
[running_threads
- 1].start()
719 if len(ad_response
) >= ACTIVEDATA_RECORD_LIMIT
:
721 "%s: ActiveData query limit reached; data may be missing"
725 for record
in ad_response
:
726 if "result" in record
:
727 result
= record
["result"]
728 if self
.update_report(by_component
, result
, path_mod
):
731 "%s: %d results; %d matches"
732 % (name
, len(ad_response
), matches
)
734 self
.total_activedata_matches
+= matches
737 def path_mod_reftest(self
, path
):
738 # "<path1> == <path2>" -> "<path1>"
739 path
= path
.split(" ")[0]
740 # "<path>?<params>" -> "<path>"
741 path
= path
.split("?")[0]
742 # "<path>#<fragment>" -> "<path>"
743 path
= path
.split("#")[0]
746 def path_mod_jsreftest(self
, path
):
747 # "<path>;assert" -> "<path>"
748 path
= path
.split(";")[0]
751 def path_mod_marionette(self
, path
):
752 # "<path> <test-name>" -> "<path>"
753 path
= path
.split(" ")[0]
754 # "part1\part2" -> "part1/part2"
755 path
= path
.replace("\\", os
.path
.sep
)
758 def path_mod_wpt(self
, path
):
759 if path
[0] == os
.path
.sep
:
760 # "/<path>" -> "<path>"
762 # "<path>" -> "testing/web-platform/tests/<path>"
763 path
= os
.path
.join("testing", "web-platform", "tests", path
)
764 # "<path>?<params>" -> "<path>"
765 path
= path
.split("?")[0]
768 def path_mod_jittest(self
, path
):
769 # "part1\part2" -> "part1/part2"
770 path
= path
.replace("\\", os
.path
.sep
)
771 # "<path>" -> "js/src/jit-test/tests/<path>"
772 return os
.path
.join("js", "src", "jit-test", "tests", path
)
774 def path_mod_xpcshell(self
, path
):
775 # <manifest>.ini:<path> -> "<path>"
776 path
= path
.split(".ini:")[-1]
779 def add_activedata(self
, branches
, days
, by_component
):
781 # List of known suites requiring special path handling and/or
782 # suites typically containing thousands of test paths.
783 # regexes have been selected by trial and error to partition data
784 # into queries returning less than ACTIVEDATA_RECORD_LIMIT records.
786 self
.path_mod_reftest
,
788 {"regex": {"result.test": "layout/reftests/[a-k].*"}},
789 {"regex": {"result.test": "layout/reftests/[^a-k].*"}},
790 {"not": {"regex": {"result.test": "layout/reftests/.*"}}},
793 "web-platform-tests": (
796 {"regex": {"result.test": "/[a-g].*"}},
797 {"regex": {"result.test": "/[h-p].*"}},
798 {"not": {"regex": {"result.test": "/[a-p].*"}}},
801 "web-platform-tests-reftest": (
804 {"regex": {"result.test": "/css/css-.*"}},
805 {"not": {"regex": {"result.test": "/css/css-.*"}}},
811 {"regex": {"result.test": "[a-g].*"}},
812 {"not": {"regex": {"result.test": "[a-g].*"}}},
815 "web-platform-tests-wdspec": (self
.path_mod_wpt
, [None]),
816 "web-platform-tests-crashtest": (self
.path_mod_wpt
, [None]),
817 "web-platform-tests-print-reftest": (self
.path_mod_wpt
, [None]),
818 "xpcshell": (self
.path_mod_xpcshell
, [None]),
819 "mochitest-plain": (None, [None]),
820 "mochitest-browser-chrome": (None, [None]),
821 "mochitest-media": (None, [None]),
822 "mochitest-devtools-chrome": (None, [None]),
823 "marionette": (self
.path_mod_marionette
, [None]),
824 "mochitest-chrome": (None, [None]),
826 unsupported_suites
= [
827 # Usually these suites are excluded because currently the test resolver
828 # does not provide test paths for them.
835 suite_clause
= {"eq": {"run.suite.name": suite
}}
836 path_mod
= suites
[suite
][0]
837 test_clauses
= suites
[suite
][1]
839 for test_clause
in test_clauses
:
840 label
= "%s-%d" % (suite
, suite_count
)
842 self
.add_activedata_for_suite(
843 label
, branches
, days
, suite_clause
, test_clause
, path_mod
845 # Remainder: All supported suites not handled above.
847 "not": {"in": {"run.suite.name": unsupported_suites
+ list(suites
)}}
849 self
.add_activedata_for_suite(
850 "remainder", branches
, days
, suite_clause
, None, None
852 self
.collect_activedata_results(by_component
)
870 # provide a natural language description of the report options
873 what
.append("test manifests")
877 what
.append("test manifest annotations")
878 if show_summary
and len(what
) == 0:
879 what
.append("summary of tests only")
881 what
[-1] = "and " + what
[-1]
882 what
= ", ".join(what
)
883 d
= "Test summary report for " + what
885 d
+= ", in specified components (%s)" % components
887 d
+= ", in all components"
889 d
+= ", in specified flavor (%s)" % flavor
891 d
+= ", in specified subsuite (%s)" % subsuite
893 d
+= ", on specified paths (%s)" % paths
895 d
+= ", containing '%s'" % filter_values
897 d
+= " in manifest keys '%s'" % filter_keys
899 d
+= " in any part of manifest entry"
901 d
+= ", including historical run-time data for the last %d days on %s" % (
905 d
+= " as of %s." % datetime
.datetime
.now().strftime("%Y-%m-%d %H:%M")
926 def matches_filters(test
):
928 Return True if all of the requested filter_values are found in this test;
929 if filter_keys are specified, restrict search to those test keys.
931 for value
in filter_values
:
934 if not filter_keys
or key
in filter_keys
:
935 if re
.search(value
, test
[key
]):
942 start_time
= datetime
.datetime
.now()
944 # Ensure useful report by default
949 and not show_annotations
951 show_manifests
= True
956 components
= components
.split(",")
958 filter_keys
= filter_keys
.split(",")
960 filter_values
= filter_values
.split(",")
963 display_keys
= (filter_keys
or []) + ["skip-if", "fail-if", "fails-if"]
964 display_keys
= set(display_keys
)
966 print("Finding tests...")
967 here
= os
.path
.abspath(os
.path
.dirname(__file__
))
968 resolver
= TestResolver
.from_environment(
969 cwd
=here
, loader_cls
=TestManifestLoader
972 resolver
.resolve_tests(paths
=paths
, flavor
=flavor
, subsuite
=subsuite
)
975 manifest_paths
= set()
977 if "manifest" in t
and t
["manifest"] is not None:
978 manifest_paths
.add(t
["manifest"])
979 manifest_count
= len(manifest_paths
)
981 "Resolver found {} tests, {} manifests".format(len(tests
), manifest_count
)
985 topsrcdir
= self
.build_obj
.topsrcdir
986 by_component
["manifests"] = {}
987 manifest_paths
= list(manifest_paths
)
988 manifest_paths
.sort()
990 for manifest_path
in manifest_paths
:
991 relpath
= mozpath
.relpath(manifest_path
, topsrcdir
)
992 if mozpath
.commonprefix((manifest_path
, topsrcdir
)) != topsrcdir
:
994 relpaths
.append(relpath
)
995 reader
= self
.build_obj
.mozbuild_reader(config_mode
="empty")
996 files_info
= reader
.files_info(relpaths
)
997 for manifest_path
in manifest_paths
:
998 relpath
= mozpath
.relpath(manifest_path
, topsrcdir
)
999 if mozpath
.commonprefix((manifest_path
, topsrcdir
)) != topsrcdir
:
1001 manifest_info
= None
1002 if relpath
in files_info
:
1003 bug_component
= files_info
[relpath
].get("BUG_COMPONENT")
1005 key
= "{}::{}".format(
1006 bug_component
.product
, bug_component
.component
1009 key
= "<unknown bug component>"
1010 if (not components
) or (key
in components
):
1011 manifest_info
= {"manifest": relpath
, "tests": 0, "skipped": 0}
1012 rkey
= key
if show_components
else "all"
1013 if rkey
in by_component
["manifests"]:
1014 by_component
["manifests"][rkey
].append(manifest_info
)
1016 by_component
["manifests"][rkey
] = [manifest_info
]
1019 if t
["manifest"] == manifest_path
:
1020 manifest_info
["tests"] += 1
1021 if t
.get("skip-if"):
1022 manifest_info
["skipped"] += 1
1023 for key
in by_component
["manifests"]:
1024 by_component
["manifests"][key
].sort(key
=lambda k
: k
["manifest"])
1027 by_component
["tests"] = {}
1029 if show_tests
or show_summary
or show_annotations
:
1033 annotation_count
= 0
1035 component_set
= set()
1038 known_unconditional_annotations
= ["skip", "fail", "asserts", "random"]
1039 known_conditional_annotations
= [
1049 relpath
= t
.get("srcdir_relpath")
1050 relpaths
.append(relpath
)
1051 reader
= self
.build_obj
.mozbuild_reader(config_mode
="empty")
1052 files_info
= reader
.files_info(relpaths
)
1054 if not matches_filters(t
):
1056 if "referenced-test" in t
:
1057 # Avoid double-counting reftests: disregard reference file entries
1059 if show_annotations
:
1061 if key
in known_unconditional_annotations
:
1062 annotation_count
+= 1
1063 if key
in known_conditional_annotations
:
1064 annotation_count
+= 1
1065 # Here 'key' is a manifest annotation type like 'skip-if' and t[key]
1066 # is the associated condition. For example, the manifestparser
1067 # manifest annotation, "skip-if = os == 'win'", is expected to be
1068 # encoded as t['skip-if'] = "os == 'win'".
1069 # To allow for reftest manifests, t[key] may have multiple entries
1070 # separated by ';', each corresponding to a condition for that test
1071 # and annotation type. For example,
1072 # "skip-if(Android&&webrender) skip-if(OSX)", would be
1073 # encoded as t['skip-if'] = "Android&&webrender;OSX".
1074 annotation_conditions
= t
[key
].split(";")
1075 for condition
in annotation_conditions
:
1076 condition_count
+= 1
1077 # Trim reftest fuzzy-if ranges: everything after the first comma
1078 # eg. "Android,0-2,1-3" -> "Android"
1079 condition
= condition
.split(",")[0]
1080 if condition
not in conditions
:
1081 conditions
[condition
] = 0
1082 conditions
[condition
] += 1
1084 relpath
= t
.get("srcdir_relpath")
1085 if relpath
in files_info
:
1086 bug_component
= files_info
[relpath
].get("BUG_COMPONENT")
1088 key
= "{}::{}".format(
1089 bug_component
.product
, bug_component
.component
1092 key
= "<unknown bug component>"
1093 if (not components
) or (key
in components
):
1094 component_set
.add(key
)
1095 test_info
= {"test": relpath
}
1096 for test_key
in display_keys
:
1097 value
= t
.get(test_key
)
1099 test_info
[test_key
] = value
1100 if t
.get("fail-if"):
1102 if t
.get("fails-if"):
1104 if t
.get("skip-if"):
1107 rkey
= key
if show_components
else "all"
1108 if rkey
in by_component
["tests"]:
1109 # Avoid duplicates: Some test paths have multiple TestResolver
1110 # entries, as when a test is included by multiple manifests.
1112 for ctest
in by_component
["tests"][rkey
]:
1113 if ctest
["test"] == test_info
["test"]:
1117 by_component
["tests"][rkey
].append(test_info
)
1119 by_component
["tests"][rkey
] = [test_info
]
1121 for key
in by_component
["tests"]:
1122 by_component
["tests"][key
].sort(key
=lambda k
: k
["test"])
1126 self
.add_activedata(branches
, days
, by_component
)
1128 print("Failed to retrieve some ActiveData data.")
1129 traceback
.print_exc()
1131 "%d tests updated with matching ActiveData data"
1132 % self
.total_activedata_matches
1135 "%d seconds waiting for ActiveData" % self
.total_activedata_seconds
1138 by_component
["description"] = self
.description(
1155 by_component
["summary"] = {}
1156 by_component
["summary"]["components"] = len(component_set
)
1157 by_component
["summary"]["manifests"] = manifest_count
1158 by_component
["summary"]["tests"] = test_count
1159 by_component
["summary"]["failed tests"] = failed_count
1160 by_component
["summary"]["skipped tests"] = skipped_count
1162 if show_annotations
:
1163 by_component
["annotations"] = {}
1164 by_component
["annotations"]["total annotations"] = annotation_count
1165 by_component
["annotations"]["total conditions"] = condition_count
1166 by_component
["annotations"]["unique conditions"] = len(conditions
)
1167 by_component
["annotations"]["conditions"] = conditions
1169 self
.write_report(by_component
, output_file
)
1171 end_time
= datetime
.datetime
.now()
1173 "%d seconds total to generate report"
1174 % (end_time
- start_time
).total_seconds()
1177 def write_report(self
, by_component
, output_file
):
1178 json_report
= json
.dumps(by_component
, indent
=2, sort_keys
=True)
1180 output_file
= os
.path
.abspath(output_file
)
1181 output_dir
= os
.path
.dirname(output_file
)
1182 if not os
.path
.isdir(output_dir
):
1183 os
.makedirs(output_dir
)
1185 with
open(output_file
, "w") as f
:
1186 f
.write(json_report
)
1190 def report_diff(self
, before
, after
, output_file
):
1192 Support for 'mach test-info report-diff'.
1195 def get_file(path_or_url
):
1196 if urlparse
.urlparse(path_or_url
).scheme
:
1197 response
= requests
.get(path_or_url
)
1198 response
.raise_for_status()
1199 return json
.loads(response
.text
)
1200 with
open(path_or_url
) as f
:
1203 report1
= get_file(before
)
1204 report2
= get_file(after
)
1206 by_component
= {"tests": {}, "summary": {}}
1207 self
.diff_summaries(by_component
, report1
["summary"], report2
["summary"])
1208 self
.diff_all_components(by_component
, report1
["tests"], report2
["tests"])
1209 self
.write_report(by_component
, output_file
)
1211 def diff_summaries(self
, by_component
, summary1
, summary2
):
1213 Update by_component with comparison of summaries.
1215 all_keys
= set(summary1
.keys()) |
set(summary2
.keys())
1216 for key
in all_keys
:
1217 delta
= summary2
.get(key
, 0) - summary1
.get(key
, 0)
1218 by_component
["summary"]["%s delta" % key
] = delta
1220 def diff_all_components(self
, by_component
, tests1
, tests2
):
1222 Update by_component with any added/deleted tests, for all components.
1224 self
.added_count
= 0
1225 self
.deleted_count
= 0
1226 for component
in tests1
:
1227 component1
= tests1
[component
]
1228 component2
= [] if component
not in tests2
else tests2
[component
]
1229 self
.diff_component(by_component
, component
, component1
, component2
)
1230 for component
in tests2
:
1231 if component
not in tests1
:
1232 component2
= tests2
[component
]
1233 self
.diff_component(by_component
, component
, [], component2
)
1234 by_component
["summary"]["added tests"] = self
.added_count
1235 by_component
["summary"]["deleted tests"] = self
.deleted_count
1237 def diff_component(self
, by_component
, component
, component1
, component2
):
1239 Update by_component[component] with any added/deleted tests for the
1241 "added": tests found in component2 but missing from component1.
1242 "deleted": tests found in component1 but missing from component2.
1244 tests1
= set([t
["test"] for t
in component1
])
1245 tests2
= set([t
["test"] for t
in component2
])
1246 deleted
= tests1
- tests2
1247 added
= tests2
- tests1
1248 if deleted
or added
:
1249 by_component
["tests"][component
] = {}
1251 by_component
["tests"][component
]["deleted"] = sorted(list(deleted
))
1253 by_component
["tests"][component
]["added"] = sorted(list(added
))
1254 self
.added_count
+= len(added
)
1255 self
.deleted_count
+= len(deleted
)
1256 common
= len(tests1
.intersection(tests2
))
1258 "%s: %d deleted, %d added, %d common"
1259 % (component
, len(deleted
), len(added
), common
)