Open Files app when a ZIP archive is opened from the downloads tray.
[chromium-blink-merge.git] / tools / valgrind / drmemory_analyze.py
blob29fc0ed4b0c52de0e13cda5b2afb5a553ebd61c6
1 #!/usr/bin/env python
2 # Copyright (c) 2011 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 # drmemory_analyze.py
8 ''' Given a Dr. Memory output file, parses errors and uniques them.'''
10 from collections import defaultdict
11 import common
12 import hashlib
13 import logging
14 import optparse
15 import os
16 import re
17 import subprocess
18 import sys
19 import time
21 class DrMemoryError:
22 def __init__(self, report, suppression, testcase):
23 self._report = report
24 self._testcase = testcase
26 # Chromium-specific transformations of the suppressions:
27 # Replace 'any_test.exe' and 'chrome.dll' with '*', then remove the
28 # Dr.Memory-generated error ids from the name= lines as they don't
29 # make sense in a multiprocess report.
30 supp_lines = suppression.split("\n")
31 for l in xrange(len(supp_lines)):
32 if supp_lines[l].startswith("name="):
33 supp_lines[l] = "name=<insert_a_suppression_name_here>"
34 if supp_lines[l].startswith("chrome.dll!"):
35 supp_lines[l] = supp_lines[l].replace("chrome.dll!", "*!")
36 bang_index = supp_lines[l].find("!")
37 d_exe_index = supp_lines[l].find(".exe!")
38 if bang_index >= 4 and d_exe_index + 4 == bang_index:
39 supp_lines[l] = "*" + supp_lines[l][bang_index:]
40 self._suppression = "\n".join(supp_lines)
42 def __str__(self):
43 output = ""
44 output += "### BEGIN MEMORY TOOL REPORT (error hash=#%016X#)\n" % \
45 self.ErrorHash()
46 output += self._report + "\n"
47 if self._testcase:
48 output += "The report came from the `%s` test.\n" % self._testcase
49 output += "Suppression (error hash=#%016X#):\n" % self.ErrorHash()
50 output += (" For more info on using suppressions see "
51 "http://dev.chromium.org/developers/how-tos/using-drmemory#TOC-Suppressing-error-reports-from-the-\n")
52 output += "{\n%s\n}\n" % self._suppression
53 output += "### END MEMORY TOOL REPORT (error hash=#%016X#)\n" % \
54 self.ErrorHash()
55 return output
57 # This is a device-independent hash identifying the suppression.
58 # By printing out this hash we can find duplicate reports between tests and
59 # different shards running on multiple buildbots
60 def ErrorHash(self):
61 return int(hashlib.md5(self._suppression).hexdigest()[:16], 16)
63 def __hash__(self):
64 return hash(self._suppression)
66 def __eq__(self, rhs):
67 return self._suppression == rhs
70 class DrMemoryAnalyzer:
71 ''' Given a set of Dr.Memory output files, parse all the errors out of
72 them, unique them and output the results.'''
74 def __init__(self):
75 self.known_errors = set()
76 self.error_count = 0;
78 def ReadLine(self):
79 self.line_ = self.cur_fd_.readline()
81 def ReadSection(self):
82 result = [self.line_]
83 self.ReadLine()
84 while len(self.line_.strip()) > 0:
85 result.append(self.line_)
86 self.ReadLine()
87 return result
89 def ParseReportFile(self, filename, testcase):
90 ret = []
92 # First, read the generated suppressions file so we can easily lookup a
93 # suppression for a given error.
94 supp_fd = open(filename.replace("results", "suppress"), 'r')
95 generated_suppressions = {} # Key -> Error #, Value -> Suppression text.
96 for line in supp_fd:
97 # NOTE: this regexp looks fragile. Might break if the generated
98 # suppression format slightly changes.
99 m = re.search("# Suppression for Error #([0-9]+)", line.strip())
100 if not m:
101 continue
102 error_id = int(m.groups()[0])
103 assert error_id not in generated_suppressions
104 # OK, now read the next suppression:
105 cur_supp = ""
106 for supp_line in supp_fd:
107 if supp_line.startswith("#") or supp_line.strip() == "":
108 break
109 cur_supp += supp_line
110 generated_suppressions[error_id] = cur_supp.strip()
111 supp_fd.close()
113 self.cur_fd_ = open(filename, 'r')
114 while True:
115 self.ReadLine()
116 if (self.line_ == ''): break
118 match = re.search("^Error #([0-9]+): (.*)", self.line_)
119 if match:
120 error_id = int(match.groups()[0])
121 self.line_ = match.groups()[1].strip() + "\n"
122 report = "".join(self.ReadSection()).strip()
123 suppression = generated_suppressions[error_id]
124 ret.append(DrMemoryError(report, suppression, testcase))
126 if re.search("SUPPRESSIONS USED:", self.line_):
127 self.ReadLine()
128 while self.line_.strip() != "":
129 line = self.line_.strip()
130 (count, name) = re.match(" *([0-9\?]+)x(?: \(.*?\))?: (.*)",
131 line).groups()
132 if (count == "?"):
133 # Whole-module have no count available: assume 1
134 count = 1
135 else:
136 count = int(count)
137 self.used_suppressions[name] += count
138 self.ReadLine()
140 if self.line_.startswith("ASSERT FAILURE"):
141 ret.append(self.line_.strip())
143 self.cur_fd_.close()
144 return ret
146 def Report(self, filenames, testcase, check_sanity):
147 sys.stdout.flush()
148 # TODO(timurrrr): support positive tests / check_sanity==True
149 self.used_suppressions = defaultdict(int)
151 to_report = []
152 reports_for_this_test = set()
153 for f in filenames:
154 cur_reports = self.ParseReportFile(f, testcase)
156 # Filter out the reports that were there in previous tests.
157 for r in cur_reports:
158 if r in reports_for_this_test:
159 # A similar report is about to be printed for this test.
160 pass
161 elif r in self.known_errors:
162 # A similar report has already been printed in one of the prev tests.
163 to_report.append("This error was already printed in some "
164 "other test, see 'hash=#%016X#'" % r.ErrorHash())
165 reports_for_this_test.add(r)
166 else:
167 self.known_errors.add(r)
168 reports_for_this_test.add(r)
169 to_report.append(r)
171 common.PrintUsedSuppressionsList(self.used_suppressions)
173 if not to_report:
174 logging.info("PASS: No error reports found")
175 return 0
177 sys.stdout.flush()
178 sys.stderr.flush()
179 logging.info("Found %i error reports" % len(to_report))
180 for report in to_report:
181 self.error_count += 1
182 logging.info("Report #%d\n%s" % (self.error_count, report))
183 logging.info("Total: %i error reports" % len(to_report))
184 sys.stdout.flush()
185 return -1
188 def main():
189 '''For testing only. The DrMemoryAnalyze class should be imported instead.'''
190 parser = optparse.OptionParser("usage: %prog <files to analyze>")
192 (options, args) = parser.parse_args()
193 if len(args) == 0:
194 parser.error("no filename specified")
195 filenames = args
197 logging.getLogger().setLevel(logging.INFO)
198 return DrMemoryAnalyzer().Report(filenames, None, False)
201 if __name__ == '__main__':
202 sys.exit(main())