Bug 1651439 [wpt PR 24515] - [NativeFS] Rename getFile/Directory to getXXXHandle...
[gecko.git] / testing / testinfo.py
blob3c1c6b2e691d78de5ea203b006fde834784605d4
1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
5 from __future__ import absolute_import, print_function
7 import datetime
8 import errno
9 import json
10 import os
11 import posixpath
12 import re
13 import requests
14 import six.moves.urllib_parse as urlparse
15 import subprocess
16 import threading
17 import traceback
18 import mozpack.path as mozpath
19 from moztest.resolve import TestResolver, TestManifestLoader
20 from mozfile import which
22 from mozbuild.base import MozbuildObject, MachCommandConditions as conditions
24 ACTIVEDATA_RECORD_LIMIT = 10000
25 MAX_ACTIVEDATA_CONCURRENCY = 5
26 MAX_ACTIVEDATA_RETRIES = 5
27 REFERER = 'https://wiki.developer.mozilla.org/en-US/docs/Mozilla/Test-Info'
30 class TestInfo(object):
31 """
32 Support 'mach test-info'.
33 """
34 def __init__(self, verbose):
35 self.verbose = verbose
36 here = os.path.abspath(os.path.dirname(__file__))
37 self.build_obj = MozbuildObject.from_environment(cwd=here)
38 self.total_activedata_seconds = 0
40 def log_verbose(self, what):
41 if self.verbose:
42 print(what)
44 def activedata_query(self, query):
45 start_time = datetime.datetime.now()
46 self.log_verbose(start_time)
47 self.log_verbose(json.dumps(query))
48 response = requests.post("http://activedata.allizom.org/query",
49 data=json.dumps(query),
50 headers={'referer': REFERER},
51 stream=True)
52 end_time = datetime.datetime.now()
53 self.total_activedata_seconds += (end_time - start_time).total_seconds()
54 self.log_verbose(end_time)
55 self.log_verbose(response)
56 response.raise_for_status()
57 data = response.json()["data"]
58 self.log_verbose("response length: %d" % len(data))
59 return data
62 class ActiveDataThread(threading.Thread):
63 """
64 A thread to query ActiveData and wait for its response.
65 """
66 def __init__(self, name, ti, query, context):
67 threading.Thread.__init__(self, name=name)
68 self.ti = ti
69 self.query = query
70 self.context = context
71 self.response = None
73 def run(self):
74 attempt = 1
75 while attempt < MAX_ACTIVEDATA_RETRIES and not self.response:
76 try:
77 self.response = self.ti.activedata_query(self.query)
78 if not self.response:
79 self.ti.log_verbose("%s: no data received for query" % self.name)
80 self.response = []
81 break
82 except Exception:
83 self.ti.log_verbose("%s: Exception on attempt #%d:" % (self.name, attempt))
84 traceback.print_exc()
85 attempt += 1
88 class TestInfoTests(TestInfo):
89 """
90 Support 'mach test-info tests': Detailed report of specified tests.
91 """
92 def __init__(self, verbose):
93 TestInfo.__init__(self, verbose)
95 self._hg = None
96 if conditions.is_hg(self.build_obj):
97 self._hg = which('hg')
98 if not self._hg:
99 raise OSError(errno.ENOENT, "Could not find 'hg' on PATH.")
101 self._git = None
102 if conditions.is_git(self.build_obj):
103 self._git = which('git')
104 if not self._git:
105 raise OSError(errno.ENOENT, "Could not find 'git' on PATH.")
107 def find_in_hg_or_git(self, test_name):
108 if self._hg:
109 cmd = [self._hg, 'files', '-I', test_name]
110 elif self._git:
111 cmd = [self._git, 'ls-files', test_name]
112 else:
113 return None
114 try:
115 out = subprocess.check_output(cmd, universal_newlines=True).splitlines()
116 except subprocess.CalledProcessError:
117 out = None
118 return out
120 def set_test_name(self):
121 # Generating a unified report for a specific test is complicated
122 # by differences in the test name used in various data sources.
123 # Consider:
124 # - It is often convenient to request a report based only on
125 # a short file name, rather than the full path;
126 # - Bugs may be filed in bugzilla against a simple, short test
127 # name or the full path to the test;
128 # - In ActiveData, the full path is usually used, but sometimes
129 # also includes additional path components outside of the
130 # mercurial repo (common for reftests).
131 # This function attempts to find appropriate names for different
132 # queries based on the specified test name.
134 # full_test_name is full path to file in hg (or git)
135 self.full_test_name = None
136 out = self.find_in_hg_or_git(self.test_name)
137 if out and len(out) == 1:
138 self.full_test_name = out[0]
139 elif out and len(out) > 1:
140 print("Ambiguous test name specified. Found:")
141 for line in out:
142 print(line)
143 else:
144 out = self.find_in_hg_or_git('**/%s*' % self.test_name)
145 if out and len(out) == 1:
146 self.full_test_name = out[0]
147 elif out and len(out) > 1:
148 print("Ambiguous test name. Found:")
149 for line in out:
150 print(line)
151 if self.full_test_name:
152 self.full_test_name.replace(os.sep, posixpath.sep)
153 print("Found %s in source control." % self.full_test_name)
154 else:
155 print("Unable to validate test name '%s'!" % self.test_name)
156 self.full_test_name = self.test_name
158 # search for full_test_name in test manifests
159 here = os.path.abspath(os.path.dirname(__file__))
160 resolver = TestResolver.from_environment(cwd=here, loader_cls=TestManifestLoader)
161 relpath = self.build_obj._wrap_path_argument(self.full_test_name).relpath()
162 tests = list(resolver.resolve_tests(paths=[relpath]))
163 if len(tests) == 1:
164 relpath = self.build_obj._wrap_path_argument(tests[0]['manifest']).relpath()
165 print("%s found in manifest %s" % (self.full_test_name, relpath))
166 if tests[0].get('flavor'):
167 print(" flavor: %s" % tests[0]['flavor'])
168 if tests[0].get('skip-if'):
169 print(" skip-if: %s" % tests[0]['skip-if'])
170 if tests[0].get('fail-if'):
171 print(" fail-if: %s" % tests[0]['fail-if'])
172 elif len(tests) == 0:
173 print("%s not found in any test manifest!" % self.full_test_name)
174 else:
175 print("%s found in more than one manifest!" % self.full_test_name)
177 # short_name is full_test_name without path
178 self.short_name = None
179 name_idx = self.full_test_name.rfind('/')
180 if name_idx > 0:
181 self.short_name = self.full_test_name[name_idx + 1:]
182 if self.short_name and self.short_name == self.test_name:
183 self.short_name = None
185 if not (self.show_results or self.show_durations or self.show_tasks):
186 # no need to determine ActiveData name if not querying
187 return
189 def set_activedata_test_name(self):
190 # activedata_test_name is name in ActiveData
191 self.activedata_test_name = None
192 simple_names = [
193 self.full_test_name,
194 self.test_name,
195 self.short_name
197 simple_names = [x for x in simple_names if x]
198 searches = [
199 {"in": {"result.test": simple_names}},
201 regex_names = [".*%s.*" % re.escape(x) for x in simple_names if x]
202 for r in regex_names:
203 searches.append({"regexp": {"result.test": r}})
204 query = {
205 "from": "unittest",
206 "format": "list",
207 "limit": 10,
208 "groupby": ["result.test"],
209 "where": {"and": [
210 {"or": searches},
211 {"in": {"build.branch": self.branches.split(',')}},
212 {"gt": {"run.timestamp": {"date": self.start}}},
213 {"lt": {"run.timestamp": {"date": self.end}}}
216 print("Querying ActiveData...") # Following query can take a long time
217 data = self.activedata_query(query)
218 if data and len(data) > 0:
219 self.activedata_test_name = [
220 d['result']['test']
221 for p in simple_names + regex_names
222 for d in data
223 if re.match(p + "$", d['result']['test'])
224 ][0] # first match is best match
225 if self.activedata_test_name:
226 print("Found records matching '%s' in ActiveData." %
227 self.activedata_test_name)
228 else:
229 print("Unable to find matching records in ActiveData; using %s!" %
230 self.test_name)
231 self.activedata_test_name = self.test_name
233 def get_platform(self, record):
234 if 'platform' in record['build']:
235 platform = record['build']['platform']
236 else:
237 platform = "-"
238 platform_words = platform.split('-')
239 types_label = ""
240 # combine run and build types and eliminate duplicates
241 run_types = []
242 if 'run' in record and 'type' in record['run']:
243 run_types = record['run']['type']
244 run_types = run_types if isinstance(run_types, list) else [run_types]
245 build_types = []
246 if 'build' in record and 'type' in record['build']:
247 build_types = record['build']['type']
248 build_types = build_types if isinstance(build_types, list) else [build_types]
249 run_types = list(set(run_types+build_types))
250 # '1proc' is used as a treeherder label but does not appear in run types
251 if 'e10s' not in run_types:
252 run_types = run_types + ['1proc']
253 for run_type in run_types:
254 # chunked is not interesting
255 if run_type == 'chunked':
256 continue
257 # e10s is the default: implied
258 if run_type == 'e10s':
259 continue
260 # sometimes a build/run type is already present in the build platform
261 if run_type in platform_words:
262 continue
263 if types_label:
264 types_label += "-"
265 types_label += run_type
266 return "%s/%s:" % (platform, types_label)
268 def report_test_results(self):
269 # Report test pass/fail summary from ActiveData
270 query = {
271 "from": "unittest",
272 "format": "list",
273 "limit": 100,
274 "groupby": ["build.platform", "build.type"],
275 "select": [
276 {"aggregate": "count"},
278 "name": "failures",
279 "value": {"case": [
280 {"when": {"eq": {"result.ok": "F"}}, "then": 1}
282 "aggregate": "sum",
283 "default": 0
286 "name": "skips",
287 "value": {"case": [
288 {"when": {"eq": {"result.status": "SKIP"}}, "then": 1}
290 "aggregate": "sum",
291 "default": 0
293 {"value": "run.type", "aggregate": "union"}
295 "where": {"and": [
296 {"eq": {"result.test": self.activedata_test_name}},
297 {"in": {"build.branch": self.branches.split(',')}},
298 {"gt": {"run.timestamp": {"date": self.start}}},
299 {"lt": {"run.timestamp": {"date": self.end}}}
302 print("\nTest results for %s on %s between %s and %s" %
303 (self.activedata_test_name, self.branches, self.start, self.end))
304 data = self.activedata_query(query)
305 if data and len(data) > 0:
306 data.sort(key=self.get_platform)
307 worst_rate = 0.0
308 worst_platform = None
309 total_runs = 0
310 total_failures = 0
311 for record in data:
312 platform = self.get_platform(record)
313 if platform.startswith("-"):
314 continue
315 runs = record['count']
316 total_runs = total_runs + runs
317 failures = record.get('failures', 0)
318 skips = record.get('skips', 0)
319 total_failures = total_failures + failures
320 rate = (float)(failures) / runs
321 if rate >= worst_rate:
322 worst_rate = rate
323 worst_platform = platform
324 worst_failures = failures
325 worst_runs = runs
326 print("%-40s %6d failures (%6d skipped) in %6d runs" % (
327 platform, failures, skips, runs))
328 print("\nTotal: %d failures in %d runs or %.3f failures/run" %
329 (total_failures, total_runs, (float)(total_failures) / total_runs))
330 if worst_failures > 0:
331 print("Worst rate on %s %d failures in %d runs or %.3f failures/run" %
332 (worst_platform, worst_failures, worst_runs, worst_rate))
333 else:
334 print("No test result data found.")
336 def report_test_durations(self):
337 # Report test durations summary from ActiveData
338 query = {
339 "from": "unittest",
340 "format": "list",
341 "limit": 100,
342 "groupby": ["build.platform", "build.type"],
343 "select": [
344 {"value": "result.duration",
345 "aggregate": "average", "name": "average"},
346 {"value": "result.duration", "aggregate": "min", "name": "min"},
347 {"value": "result.duration", "aggregate": "max", "name": "max"},
348 {"aggregate": "count"},
349 {"value": "run.type", "aggregate": "union"}
351 "where": {"and": [
352 {"eq": {"result.ok": "T"}},
353 {"eq": {"result.test": self.activedata_test_name}},
354 {"in": {"build.branch": self.branches.split(',')}},
355 {"gt": {"run.timestamp": {"date": self.start}}},
356 {"lt": {"run.timestamp": {"date": self.end}}}
359 data = self.activedata_query(query)
360 print("\nTest durations for %s on %s between %s and %s" %
361 (self.activedata_test_name, self.branches, self.start, self.end))
362 if data and len(data) > 0:
363 data.sort(key=self.get_platform)
364 for record in data:
365 platform = self.get_platform(record)
366 if platform.startswith("-"):
367 continue
368 print("%-40s %6.2f s (%.2f s - %.2f s over %d runs)" % (
369 platform, record['average'], record['min'],
370 record['max'], record['count']))
371 else:
372 print("No test durations found.")
374 def report_test_tasks(self):
375 # Report test tasks summary from ActiveData
376 query = {
377 "from": "unittest",
378 "format": "list",
379 "limit": 1000,
380 "select": ["build.platform", "build.type", "run.type", "run.name"],
381 "where": {"and": [
382 {"eq": {"result.test": self.activedata_test_name}},
383 {"in": {"build.branch": self.branches.split(',')}},
384 {"gt": {"run.timestamp": {"date": self.start}}},
385 {"lt": {"run.timestamp": {"date": self.end}}}
388 data = self.activedata_query(query)
389 print("\nTest tasks for %s on %s between %s and %s" %
390 (self.activedata_test_name, self.branches, self.start, self.end))
391 if data and len(data) > 0:
392 data.sort(key=self.get_platform)
393 consolidated = {}
394 for record in data:
395 platform = self.get_platform(record)
396 if platform not in consolidated:
397 consolidated[platform] = {}
398 if record['run']['name'] in consolidated[platform]:
399 consolidated[platform][record['run']['name']] += 1
400 else:
401 consolidated[platform][record['run']['name']] = 1
402 for key in sorted(consolidated.keys()):
403 tasks = ""
404 for task in consolidated[key].keys():
405 if tasks:
406 tasks += "\n%-40s " % ""
407 tasks += task
408 tasks += " in %d runs" % consolidated[key][task]
409 print("%-40s %s" % (key, tasks))
410 else:
411 print("No test tasks found.")
413 def report_bugs(self):
414 # Report open bugs matching test name
415 search = self.full_test_name
416 if self.test_name:
417 search = '%s,%s' % (search, self.test_name)
418 if self.short_name:
419 search = '%s,%s' % (search, self.short_name)
420 payload = {'quicksearch': search,
421 'include_fields': 'id,summary'}
422 response = requests.get('https://bugzilla.mozilla.org/rest/bug',
423 payload)
424 response.raise_for_status()
425 json_response = response.json()
426 print("\nBugzilla quick search for '%s':" % search)
427 if 'bugs' in json_response:
428 for bug in json_response['bugs']:
429 print("Bug %s: %s" % (bug['id'], bug['summary']))
430 else:
431 print("No bugs found.")
433 def report(self, test_names, branches, start, end,
434 show_info, show_results, show_durations, show_tasks, show_bugs):
435 self.branches = branches
436 self.start = start
437 self.end = end
438 self.show_info = show_info
439 self.show_results = show_results
440 self.show_durations = show_durations
441 self.show_tasks = show_tasks
443 if (not self.show_info and
444 not self.show_results and
445 not self.show_durations and
446 not self.show_tasks and
447 not show_bugs):
448 # by default, show everything
449 self.show_info = True
450 self.show_results = True
451 self.show_durations = True
452 self.show_tasks = True
453 show_bugs = True
455 for test_name in test_names:
456 print("===== %s =====" % test_name)
457 self.test_name = test_name
458 if len(self.test_name) < 6:
459 print("'%s' is too short for a test name!" % self.test_name)
460 continue
461 self.set_test_name()
462 if show_bugs:
463 self.report_bugs()
464 self.set_activedata_test_name()
465 if self.show_results:
466 self.report_test_results()
467 if self.show_durations:
468 self.report_test_durations()
469 if self.show_tasks:
470 self.report_test_tasks()
473 class TestInfoLongRunningTasks(TestInfo):
475 Support 'mach test-info long-tasks': Summary of tasks approaching their max-run-time.
477 def __init__(self, verbose):
478 TestInfo.__init__(self, verbose)
480 def report(self, branches, start, end, threshold_pct, filter_threshold_pct):
482 def get_long_running_ratio(record):
483 count = record['count']
484 tasks_gt_pct = record['tasks_gt_pct']
485 return count / tasks_gt_pct
487 # Search test durations in ActiveData for long-running tests
488 query = {
489 "from": "task",
490 "format": "list",
491 "groupby": ["run.name"],
492 "limit": 1000,
493 "select": [
495 "value": "task.maxRunTime",
496 "aggregate": "median",
497 "name": "max_run_time"
500 "aggregate": "count"
503 "value": {
504 "when": {
505 "gt": [
507 "div": ["action.duration", "task.maxRunTime"]
508 }, threshold_pct/100.0
511 "then": 1
513 "aggregate": "sum",
514 "name": "tasks_gt_pct"
517 "where": {"and": [
518 {"in": {"build.branch": branches.split(',')}},
519 {"gt": {"task.run.start_time": {"date": start}}},
520 {"lte": {"task.run.start_time": {"date": end}}},
521 {"eq": {"task.state": "completed"}},
524 data = self.activedata_query(query)
525 print("\nTasks nearing their max-run-time on %s between %s and %s" %
526 (branches, start, end))
527 if data and len(data) > 0:
528 filtered = []
529 for record in data:
530 if 'tasks_gt_pct' in record:
531 count = record['count']
532 tasks_gt_pct = record['tasks_gt_pct']
533 if float(tasks_gt_pct) / count > filter_threshold_pct / 100.0:
534 filtered.append(record)
535 filtered.sort(key=get_long_running_ratio)
536 if not filtered:
537 print("No long running tasks found.")
538 for record in filtered:
539 name = record['run']['name']
540 count = record['count']
541 max_run_time = record['max_run_time']
542 tasks_gt_pct = record['tasks_gt_pct']
543 print("%-55s: %d of %d runs (%.1f%%) exceeded %d%% of max-run-time (%d s)" %
544 (name, tasks_gt_pct, count, tasks_gt_pct * 100 / count,
545 threshold_pct, max_run_time))
546 else:
547 print("No tasks found.")
550 class TestInfoReport(TestInfo):
552 Support 'mach test-info report': Report of test runs summarized by
553 manifest and component.
555 def __init__(self, verbose):
556 TestInfo.__init__(self, verbose)
557 self.total_activedata_matches = 0
558 self.threads = []
560 def add_activedata_for_suite(self, label, branches, days,
561 suite_clause, tests_clause, path_mod):
562 dates_clause = {"date": "today-%dday" % days}
563 where_conditions = [
564 suite_clause,
565 {"in": {"repo.branch.name": branches.split(',')}},
566 {"gt": {"run.timestamp": dates_clause}},
568 if tests_clause:
569 where_conditions.append(tests_clause)
570 ad_query = {
571 "from": "unittest",
572 "limit": ACTIVEDATA_RECORD_LIMIT,
573 "format": "list",
574 "groupby": ["result.test"],
575 "select": [
577 "name": "result.count",
578 "aggregate": "count"
581 "name": "result.duration",
582 "value": "result.duration",
583 "aggregate": "sum"
586 "name": "result.failures",
587 "value": {"case": [
588 {"when": {"eq": {"result.ok": "F"}}, "then": 1}
590 "aggregate": "sum",
591 "default": 0
594 "name": "result.skips",
595 "value": {"case": [
596 {"when": {"eq": {"result.status": "SKIP"}}, "then": 1}
598 "aggregate": "sum",
599 "default": 0
602 "where": {"and": where_conditions}
604 t = ActiveDataThread(label, self, ad_query, path_mod)
605 self.threads.append(t)
607 def update_report(self, by_component, result, path_mod):
608 def update_item(item, label, value):
609 # It is important to include any existing item value in case ActiveData
610 # returns multiple records for the same test; that can happen if the report
611 # sometimes maps more than one ActiveData record to the same path.
612 new_value = item.get(label, 0) + value
613 if type(new_value) == int:
614 item[label] = new_value
615 else:
616 item[label] = round(new_value, 2)
618 if 'test' in result and 'tests' in by_component:
619 test = result['test']
620 if path_mod:
621 test = path_mod(test)
622 for bc in by_component['tests']:
623 for item in by_component['tests'][bc]:
624 if test == item['test']:
625 seconds = round(result.get('duration', 0), 2)
626 update_item(item, 'total run time, seconds', seconds)
627 update_item(item, 'total runs', result.get('count', 0))
628 update_item(item, 'skipped runs', result.get('skips', 0))
629 update_item(item, 'failed runs', result.get('failures', 0))
630 return True
631 return False
633 def collect_activedata_results(self, by_component):
634 # Start the first MAX_ACTIVEDATA_CONCURRENCY threads. If too many
635 # concurrent requests are made to ActiveData, the requests frequently
636 # fail (504 is the typical response).
637 for i in range(min(MAX_ACTIVEDATA_CONCURRENCY, len(self.threads))):
638 t = self.threads[i]
639 t.start()
640 # Wait for running threads (first N threads in self.threads) to complete.
641 # When a thread completes, start the next thread, process the results
642 # from the completed thread, and remove the completed thread from
643 # the thread list.
644 while len(self.threads):
645 running_threads = min(MAX_ACTIVEDATA_CONCURRENCY, len(self.threads))
646 for i in range(running_threads):
647 t = self.threads[i]
648 t.join(1)
649 if not t.isAlive():
650 ad_response = t.response
651 path_mod = t.context
652 name = t.name
653 del self.threads[i]
654 if len(self.threads) >= MAX_ACTIVEDATA_CONCURRENCY:
655 running_threads = min(MAX_ACTIVEDATA_CONCURRENCY, len(self.threads))
656 self.threads[running_threads - 1].start()
657 if ad_response:
658 if len(ad_response) >= ACTIVEDATA_RECORD_LIMIT:
659 print("%s: ActiveData query limit reached; data may be missing" % name)
660 matches = 0
661 for record in ad_response:
662 if 'result' in record:
663 result = record['result']
664 if self.update_report(by_component, result, path_mod):
665 matches += 1
666 self.log_verbose("%s: %d results; %d matches" %
667 (name, len(ad_response), matches))
668 self.total_activedata_matches += matches
669 break
671 def path_mod_reftest(self, path):
672 # "<path1> == <path2>" -> "<path1>"
673 path = path.split(' ')[0]
674 # "<path>?<params>" -> "<path>"
675 path = path.split('?')[0]
676 # "<path>#<fragment>" -> "<path>"
677 path = path.split('#')[0]
678 return path
680 def path_mod_jsreftest(self, path):
681 # "<path>;assert" -> "<path>"
682 path = path.split(';')[0]
683 return path
685 def path_mod_marionette(self, path):
686 # "<path> <test-name>" -> "<path>"
687 path = path.split(' ')[0]
688 # "part1\part2" -> "part1/part2"
689 path = path.replace('\\', os.path.sep)
690 return path
692 def path_mod_wpt(self, path):
693 if path[0] == os.path.sep:
694 # "/<path>" -> "<path>"
695 path = path[1:]
696 # "<path>" -> "testing/web-platform/tests/<path>"
697 path = os.path.join('testing', 'web-platform', 'tests', path)
698 # "<path>?<params>" -> "<path>"
699 path = path.split('?')[0]
700 return path
702 def path_mod_jittest(self, path):
703 # "part1\part2" -> "part1/part2"
704 path = path.replace('\\', os.path.sep)
705 # "<path>" -> "js/src/jit-test/tests/<path>"
706 return os.path.join('js', 'src', 'jit-test', 'tests', path)
708 def path_mod_xpcshell(self, path):
709 # <manifest>.ini:<path> -> "<path>"
710 path = path.split('.ini:')[-1]
711 return path
713 def add_activedata(self, branches, days, by_component):
714 suites = {
715 # List of known suites requiring special path handling and/or
716 # suites typically containing thousands of test paths.
717 # regexes have been selected by trial and error to partition data
718 # into queries returning less than ACTIVEDATA_RECORD_LIMIT records.
719 "reftest": (self.path_mod_reftest,
720 [{"regex": {"result.test": "layout/reftests/[a-k].*"}},
721 {"regex": {"result.test": "layout/reftests/[^a-k].*"}},
722 {"not": {"regex": {"result.test": "layout/reftests/.*"}}}]),
723 "web-platform-tests": (self.path_mod_wpt,
724 [{"regex": {"result.test": "/[a-g].*"}},
725 {"regex": {"result.test": "/[h-p].*"}},
726 {"not": {"regex": {"result.test": "/[a-p].*"}}}]),
727 "web-platform-tests-reftest": (self.path_mod_wpt,
728 [{"regex": {"result.test": "/css/css-.*"}},
729 {"not": {"regex": {"result.test": "/css/css-.*"}}}]),
730 "crashtest": (None,
731 [{"regex": {"result.test": "[a-g].*"}},
732 {"not": {"regex": {"result.test": "[a-g].*"}}}]),
733 "web-platform-tests-wdspec": (self.path_mod_wpt, [None]),
734 "web-platform-tests-crashtest": (self.path_mod_wpt, [None]),
735 "web-platform-tests-print-reftest": (self.path_mod_wpt, [None]),
736 "xpcshell": (self.path_mod_xpcshell, [None]),
737 "mochitest-plain": (None, [None]),
738 "mochitest-browser-chrome": (None, [None]),
739 "mochitest-media": (None, [None]),
740 "mochitest-devtools-chrome": (None, [None]),
741 "marionette": (self.path_mod_marionette, [None]),
742 "mochitest-chrome": (None, [None]),
744 unsupported_suites = [
745 # Usually these suites are excluded because currently the test resolver
746 # does not provide test paths for them.
747 "jsreftest",
748 "jittest",
749 "geckoview-junit",
750 "cppunittest",
752 for suite in suites:
753 suite_clause = {"eq": {"run.suite.name": suite}}
754 path_mod = suites[suite][0]
755 test_clauses = suites[suite][1]
756 suite_count = 1
757 for test_clause in test_clauses:
758 label = "%s-%d" % (suite, suite_count)
759 suite_count += 1
760 self.add_activedata_for_suite(label, branches, days,
761 suite_clause, test_clause, path_mod)
762 # Remainder: All supported suites not handled above.
763 suite_clause = {"not": {"in": {"run.suite.name": unsupported_suites + list(suites)}}}
764 self.add_activedata_for_suite("remainder", branches, days,
765 suite_clause, None, None)
766 self.collect_activedata_results(by_component)
768 def description(self, components, flavor, subsuite, paths,
769 show_manifests, show_tests, show_summary, show_annotations,
770 show_activedata,
771 filter_values, filter_keys,
772 branches, days):
773 # provide a natural language description of the report options
774 what = []
775 if show_manifests:
776 what.append("test manifests")
777 if show_tests:
778 what.append("tests")
779 if show_annotations:
780 what.append("test manifest annotations")
781 if show_summary and len(what) == 0:
782 what.append("summary of tests only")
783 if len(what) > 1:
784 what[-1] = "and " + what[-1]
785 what = ", ".join(what)
786 d = "Test summary report for " + what
787 if components:
788 d += ", in specified components (%s)" % components
789 else:
790 d += ", in all components"
791 if flavor:
792 d += ", in specified flavor (%s)" % flavor
793 if subsuite:
794 d += ", in specified subsuite (%s)" % subsuite
795 if paths:
796 d += ", on specified paths (%s)" % paths
797 if filter_values:
798 d += ", containing '%s'" % filter_values
799 if filter_keys:
800 d += " in manifest keys '%s'" % filter_keys
801 else:
802 d += " in any part of manifest entry"
803 if show_activedata:
804 d += ", including historical run-time data for the last %d days on %s" % (
805 days, branches)
806 d += " as of %s." % datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
807 return d
809 def report(self, components, flavor, subsuite, paths,
810 show_manifests, show_tests, show_summary, show_annotations,
811 show_activedata,
812 filter_values, filter_keys, show_components, output_file,
813 branches, days):
815 def matches_filters(test):
817 Return True if all of the requested filter_values are found in this test;
818 if filter_keys are specified, restrict search to those test keys.
820 for value in filter_values:
821 value_found = False
822 for key in test:
823 if not filter_keys or key in filter_keys:
824 if re.search(value, test[key]):
825 value_found = True
826 break
827 if not value_found:
828 return False
829 return True
831 start_time = datetime.datetime.now()
833 # Ensure useful report by default
834 if not show_manifests and not show_tests and not show_summary and not show_annotations:
835 show_manifests = True
836 show_summary = True
838 by_component = {}
839 if components:
840 components = components.split(',')
841 if filter_keys:
842 filter_keys = filter_keys.split(',')
843 if filter_values:
844 filter_values = filter_values.split(',')
845 else:
846 filter_values = []
847 display_keys = (filter_keys or []) + ['skip-if', 'fail-if', 'fails-if']
848 display_keys = set(display_keys)
850 print("Finding tests...")
851 here = os.path.abspath(os.path.dirname(__file__))
852 resolver = TestResolver.from_environment(cwd=here, loader_cls=TestManifestLoader)
853 tests = list(resolver.resolve_tests(paths=paths, flavor=flavor,
854 subsuite=subsuite))
856 manifest_paths = set()
857 for t in tests:
858 if 'manifest' in t and t['manifest'] is not None:
859 manifest_paths.add(t['manifest'])
860 manifest_count = len(manifest_paths)
861 print("Resolver found {} tests, {} manifests".format(len(tests), manifest_count))
863 if show_manifests:
864 topsrcdir = self.build_obj.topsrcdir
865 by_component['manifests'] = {}
866 manifest_paths = list(manifest_paths)
867 manifest_paths.sort()
868 relpaths = []
869 for manifest_path in manifest_paths:
870 relpath = mozpath.relpath(manifest_path, topsrcdir)
871 if mozpath.commonprefix((manifest_path, topsrcdir)) != topsrcdir:
872 continue
873 relpaths.append(relpath)
874 reader = self.build_obj.mozbuild_reader(config_mode='empty')
875 files_info = reader.files_info(relpaths)
876 for manifest_path in manifest_paths:
877 relpath = mozpath.relpath(manifest_path, topsrcdir)
878 if mozpath.commonprefix((manifest_path, topsrcdir)) != topsrcdir:
879 continue
880 manifest_info = None
881 if relpath in files_info:
882 bug_component = files_info[relpath].get('BUG_COMPONENT')
883 if bug_component:
884 key = "{}::{}".format(bug_component.product, bug_component.component)
885 else:
886 key = "<unknown bug component>"
887 if (not components) or (key in components):
888 manifest_info = {
889 'manifest': relpath,
890 'tests': 0,
891 'skipped': 0
893 rkey = key if show_components else 'all'
894 if rkey in by_component['manifests']:
895 by_component['manifests'][rkey].append(manifest_info)
896 else:
897 by_component['manifests'][rkey] = [manifest_info]
898 if manifest_info:
899 for t in tests:
900 if t['manifest'] == manifest_path:
901 manifest_info['tests'] += 1
902 if t.get('skip-if'):
903 manifest_info['skipped'] += 1
904 for key in by_component['manifests']:
905 by_component['manifests'][key].sort()
907 if show_tests:
908 by_component['tests'] = {}
910 if show_tests or show_summary or show_annotations:
911 test_count = 0
912 failed_count = 0
913 skipped_count = 0
914 annotation_count = 0
915 condition_count = 0
916 component_set = set()
917 relpaths = []
918 conditions = {}
919 known_unconditional_annotations = ['skip', 'fail', 'asserts', 'random']
920 known_conditional_annotations = ['skip-if', 'fail-if', 'run-if',
921 'fails-if', 'fuzzy-if', 'random-if', 'asserts-if']
922 for t in tests:
923 relpath = t.get('srcdir_relpath')
924 relpaths.append(relpath)
925 reader = self.build_obj.mozbuild_reader(config_mode='empty')
926 files_info = reader.files_info(relpaths)
927 for t in tests:
928 if not matches_filters(t):
929 continue
930 if 'referenced-test' in t:
931 # Avoid double-counting reftests: disregard reference file entries
932 continue
933 if show_annotations:
934 for key in t:
935 if key in known_unconditional_annotations:
936 annotation_count += 1
937 if key in known_conditional_annotations:
938 annotation_count += 1
939 # Here 'key' is a manifest annotation type like 'skip-if' and t[key]
940 # is the associated condition. For example, the manifestparser
941 # manifest annotation, "skip-if = os == 'win'", is expected to be
942 # encoded as t['skip-if'] = "os == 'win'".
943 # To allow for reftest manifests, t[key] may have multiple entries
944 # separated by ';', each corresponding to a condition for that test
945 # and annotation type. For example,
946 # "skip-if(Android&&webrender) skip-if(OSX)", would be
947 # encoded as t['skip-if'] = "Android&&webrender;OSX".
948 annotation_conditions = t[key].split(';')
949 for condition in annotation_conditions:
950 condition_count += 1
951 # Trim reftest fuzzy-if ranges: everything after the first comma
952 # eg. "Android,0-2,1-3" -> "Android"
953 condition = condition.split(',')[0]
954 if condition not in conditions:
955 conditions[condition] = 0
956 conditions[condition] += 1
957 test_count += 1
958 relpath = t.get('srcdir_relpath')
959 if relpath in files_info:
960 bug_component = files_info[relpath].get('BUG_COMPONENT')
961 if bug_component:
962 key = "{}::{}".format(bug_component.product, bug_component.component)
963 else:
964 key = "<unknown bug component>"
965 if (not components) or (key in components):
966 component_set.add(key)
967 test_info = {'test': relpath}
968 for test_key in display_keys:
969 value = t.get(test_key)
970 if value:
971 test_info[test_key] = value
972 if t.get('fail-if'):
973 failed_count += 1
974 if t.get('fails-if'):
975 failed_count += 1
976 if t.get('skip-if'):
977 skipped_count += 1
978 if show_tests:
979 rkey = key if show_components else 'all'
980 if rkey in by_component['tests']:
981 # Avoid duplicates: Some test paths have multiple TestResolver
982 # entries, as when a test is included by multiple manifests.
983 found = False
984 for ctest in by_component['tests'][rkey]:
985 if ctest['test'] == test_info['test']:
986 found = True
987 break
988 if not found:
989 by_component['tests'][rkey].append(test_info)
990 else:
991 by_component['tests'][rkey] = [test_info]
992 if show_tests:
993 for key in by_component['tests']:
994 by_component['tests'][key].sort(key=lambda k: k['test'])
996 if show_activedata:
997 try:
998 self.add_activedata(branches, days, by_component)
999 except Exception:
1000 print("Failed to retrieve some ActiveData data.")
1001 traceback.print_exc()
1002 self.log_verbose("%d tests updated with matching ActiveData data" %
1003 self.total_activedata_matches)
1004 self.log_verbose("%d seconds waiting for ActiveData" %
1005 self.total_activedata_seconds)
1007 by_component['description'] = self.description(
1008 components, flavor, subsuite, paths,
1009 show_manifests, show_tests, show_summary, show_annotations,
1010 show_activedata,
1011 filter_values, filter_keys,
1012 branches, days)
1014 if show_summary:
1015 by_component['summary'] = {}
1016 by_component['summary']['components'] = len(component_set)
1017 by_component['summary']['manifests'] = manifest_count
1018 by_component['summary']['tests'] = test_count
1019 by_component['summary']['failed tests'] = failed_count
1020 by_component['summary']['skipped tests'] = skipped_count
1022 if show_annotations:
1023 by_component['annotations'] = {}
1024 by_component['annotations']['total annotations'] = annotation_count
1025 by_component['annotations']['total conditions'] = condition_count
1026 by_component['annotations']['unique conditions'] = len(conditions)
1027 by_component['annotations']['conditions'] = conditions
1029 self.write_report(by_component, output_file)
1031 end_time = datetime.datetime.now()
1032 self.log_verbose("%d seconds total to generate report" %
1033 (end_time - start_time).total_seconds())
1035 def write_report(self, by_component, output_file):
1036 json_report = json.dumps(by_component, indent=2, sort_keys=True)
1037 if output_file:
1038 output_file = os.path.abspath(output_file)
1039 output_dir = os.path.dirname(output_file)
1040 if not os.path.isdir(output_dir):
1041 os.makedirs(output_dir)
1043 with open(output_file, 'w') as f:
1044 f.write(json_report)
1045 else:
1046 print(json_report)
1048 def report_diff(self, before, after, output_file):
1050 Support for 'mach test-info report-diff'.
1053 def get_file(path_or_url):
1054 if urlparse.urlparse(path_or_url).scheme:
1055 response = requests.get(path_or_url)
1056 response.raise_for_status()
1057 return json.loads(response.text)
1058 with open(path_or_url) as f:
1059 return json.load(f)
1061 report1 = get_file(before)
1062 report2 = get_file(after)
1064 by_component = {'tests': {}, 'summary': {}}
1065 self.diff_summaries(by_component, report1["summary"], report2["summary"])
1066 self.diff_all_components(by_component, report1["tests"], report2["tests"])
1067 self.write_report(by_component, output_file)
1069 def diff_summaries(self, by_component, summary1, summary2):
1071 Update by_component with comparison of summaries.
1073 all_keys = set(summary1.keys()) | set(summary2.keys())
1074 for key in all_keys:
1075 delta = summary2.get(key, 0) - summary1.get(key, 0)
1076 by_component['summary']['%s delta' % key] = delta
1078 def diff_all_components(self, by_component, tests1, tests2):
1080 Update by_component with any added/deleted tests, for all components.
1082 self.added_count = 0
1083 self.deleted_count = 0
1084 for component in tests1:
1085 component1 = tests1[component]
1086 component2 = [] if component not in tests2 else tests2[component]
1087 self.diff_component(by_component, component, component1, component2)
1088 for component in tests2:
1089 if component not in tests1:
1090 component2 = tests2[component]
1091 self.diff_component(by_component, component, [], component2)
1092 by_component['summary']['added tests'] = self.added_count
1093 by_component['summary']['deleted tests'] = self.deleted_count
1095 def diff_component(self, by_component, component, component1, component2):
1097 Update by_component[component] with any added/deleted tests for the
1098 named component.
1099 "added": tests found in component2 but missing from component1.
1100 "deleted": tests found in component1 but missing from component2.
1102 tests1 = set([t['test'] for t in component1])
1103 tests2 = set([t['test'] for t in component2])
1104 deleted = tests1 - tests2
1105 added = tests2 - tests1
1106 if deleted or added:
1107 by_component['tests'][component] = {}
1108 if deleted:
1109 by_component['tests'][component]['deleted'] = sorted(list(deleted))
1110 if added:
1111 by_component['tests'][component]['added'] = sorted(list(added))
1112 self.added_count += len(added)
1113 self.deleted_count += len(deleted)
1114 common = len(tests1.intersection(tests2))
1115 self.log_verbose("%s: %d deleted, %d added, %d common" % (component, len(deleted),
1116 len(added), common))