hppa: Improve handling of REG+D addresses when generating PA 2.0 code
[official-gcc.git] / contrib / analyze_brprob.py
blobca42fa6a6c26262a1a5d7f0a5848c28209cfdd5c
1 #!/usr/bin/env python3
3 # Copyright (C) 2016-2024 Free Software Foundation, Inc.
5 # Script to analyze results of our branch prediction heuristics
7 # This file is part of GCC.
9 # GCC is free software; you can redistribute it and/or modify it under
10 # the terms of the GNU General Public License as published by the Free
11 # Software Foundation; either version 3, or (at your option) any later
12 # version.
14 # GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 # WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 # for more details.
19 # You should have received a copy of the GNU General Public License
20 # along with GCC; see the file COPYING3. If not see
21 # <http://www.gnu.org/licenses/>.
25 # This script is used to calculate two basic properties of the branch prediction
26 # heuristics - coverage and hitrate. Coverage is number of executions
27 # of a given branch matched by the heuristics and hitrate is probability
28 # that once branch is predicted as taken it is really taken.
30 # These values are useful to determine the quality of given heuristics.
31 # Hitrate may be directly used in predict.def.
33 # Usage:
34 # Step 1: Compile and profile your program. You need to use -fprofile-generate
35 # flag to get the profiles.
36 # Step 2: Make a reference run of the intrumented application.
37 # Step 3: Compile the program with collected profile and dump IPA profiles
38 # (-fprofile-use -fdump-ipa-profile-details)
39 # Step 4: Collect all generated dump files:
40 # find . -name '*.profile' | xargs cat > dump_file
41 # Step 5: Run the script:
42 # ./analyze_brprob.py dump_file
43 # and read results. Basically the following table is printed:
45 # HEURISTICS BRANCHES (REL) HITRATE COVERAGE (REL)
46 # early return (on trees) 3 0.2% 35.83% / 93.64% 66360 0.0%
47 # guess loop iv compare 8 0.6% 53.35% / 53.73% 11183344 0.0%
48 # call 18 1.4% 31.95% / 69.95% 51880179 0.2%
49 # loop guard 23 1.8% 84.13% / 84.85% 13749065956 42.2%
50 # opcode values positive (on trees) 42 3.3% 15.71% / 84.81% 6771097902 20.8%
51 # opcode values nonequal (on trees) 226 17.6% 72.48% / 72.84% 844753864 2.6%
52 # loop exit 231 18.0% 86.97% / 86.98% 8952666897 27.5%
53 # loop iterations 239 18.6% 91.10% / 91.10% 3062707264 9.4%
54 # DS theory 281 21.9% 82.08% / 83.39% 7787264075 23.9%
55 # no prediction 293 22.9% 46.92% / 70.70% 2293267840 7.0%
56 # guessed loop iterations 313 24.4% 76.41% / 76.41% 10782750177 33.1%
57 # first match 708 55.2% 82.30% / 82.31% 22489588691 69.0%
58 # combined 1282 100.0% 79.76% / 81.75% 32570120606 100.0%
61 # The heuristics called "first match" is a heuristics used by GCC branch
62 # prediction pass and it predicts 55.2% branches correctly. As you can,
63 # the heuristics has very good covertage (69.05%). On the other hand,
64 # "opcode values nonequal (on trees)" heuristics has good hirate, but poor
65 # coverage.
67 import sys
68 import os
69 import re
70 import argparse
72 from math import *
74 counter_aggregates = set(['combined', 'first match', 'DS theory',
75 'no prediction'])
76 hot_threshold = 10
78 def percentage(a, b):
79 return 100.0 * a / b
81 def average(values):
82 return 1.0 * sum(values) / len(values)
84 def average_cutoff(values, cut):
85 l = len(values)
86 skip = floor(l * cut / 2)
87 if skip > 0:
88 values.sort()
89 values = values[skip:-skip]
90 return average(values)
92 def median(values):
93 values.sort()
94 return values[int(len(values) / 2)]
96 class PredictDefFile:
97 def __init__(self, path):
98 self.path = path
99 self.predictors = {}
101 def parse_and_modify(self, heuristics, write_def_file):
102 lines = [x.rstrip() for x in open(self.path).readlines()]
104 p = None
105 modified_lines = []
106 for i, l in enumerate(lines):
107 if l.startswith('DEF_PREDICTOR'):
108 next_line = lines[i + 1]
109 if l.endswith(','):
110 l += next_line
111 m = re.match('.*"(.*)".*', l)
112 p = m.group(1)
113 elif l == '':
114 p = None
116 if p != None:
117 heuristic = [x for x in heuristics if x.name == p]
118 heuristic = heuristic[0] if len(heuristic) == 1 else None
120 m = re.match('.*HITRATE \(([^)]*)\).*', l)
121 if (m != None):
122 self.predictors[p] = int(m.group(1))
124 # modify the line
125 if heuristic != None:
126 new_line = (l[:m.start(1)]
127 + str(round(heuristic.get_hitrate()))
128 + l[m.end(1):])
129 l = new_line
130 p = None
131 elif 'PROB_VERY_LIKELY' in l:
132 self.predictors[p] = 100
133 modified_lines.append(l)
135 # save the file
136 if write_def_file:
137 with open(self.path, 'w+') as f:
138 for l in modified_lines:
139 f.write(l + '\n')
140 class Heuristics:
141 def __init__(self, count, hits, fits):
142 self.count = count
143 self.hits = hits
144 self.fits = fits
146 class Summary:
147 def __init__(self, name):
148 self.name = name
149 self.edges= []
151 def branches(self):
152 return len(self.edges)
154 def hits(self):
155 return sum([x.hits for x in self.edges])
157 def fits(self):
158 return sum([x.fits for x in self.edges])
160 def count(self):
161 return sum([x.count for x in self.edges])
163 def successfull_branches(self):
164 return len([x for x in self.edges if 2 * x.hits >= x.count])
166 def get_hitrate(self):
167 return 100.0 * self.hits() / self.count()
169 def get_branch_hitrate(self):
170 return 100.0 * self.successfull_branches() / self.branches()
172 def count_formatted(self):
173 v = self.count()
174 for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']:
175 if v < 1000:
176 return "%3.2f%s" % (v, unit)
177 v /= 1000.0
178 return "%.1f%s" % (v, 'Y')
180 def count(self):
181 return sum([x.count for x in self.edges])
183 def print(self, branches_max, count_max, predict_def):
184 # filter out most hot edges (if requested)
185 self.edges = sorted(self.edges, reverse = True, key = lambda x: x.count)
186 if args.coverage_threshold != None:
187 threshold = args.coverage_threshold * self.count() / 100
188 edges = [x for x in self.edges if x.count < threshold]
189 if len(edges) != 0:
190 self.edges = edges
192 predicted_as = None
193 if predict_def != None and self.name in predict_def.predictors:
194 predicted_as = predict_def.predictors[self.name]
196 print('%-40s %8i %5.1f%% %11.2f%% %7.2f%% / %6.2f%% %14i %8s %5.1f%%' %
197 (self.name, self.branches(),
198 percentage(self.branches(), branches_max),
199 self.get_branch_hitrate(),
200 self.get_hitrate(),
201 percentage(self.fits(), self.count()),
202 self.count(), self.count_formatted(),
203 percentage(self.count(), count_max)), end = '')
205 if predicted_as != None:
206 print('%12i%% %5.1f%%' % (predicted_as,
207 self.get_hitrate() - predicted_as), end = '')
208 else:
209 print(' ' * 20, end = '')
211 # print details about the most important edges
212 if args.coverage_threshold == None:
213 edges = [x for x in self.edges[:100] if x.count * hot_threshold > self.count()]
214 if args.verbose:
215 for c in edges:
216 r = 100.0 * c.count / self.count()
217 print(' %.0f%%:%d' % (r, c.count), end = '')
218 elif len(edges) > 0:
219 print(' %0.0f%%:%d' % (100.0 * sum([x.count for x in edges]) / self.count(), len(edges)), end = '')
221 print()
223 class Profile:
224 def __init__(self, filename):
225 self.filename = filename
226 self.heuristics = {}
227 self.niter_vector = []
229 def add(self, name, prediction, count, hits):
230 if not name in self.heuristics:
231 self.heuristics[name] = Summary(name)
233 s = self.heuristics[name]
235 if prediction < 50:
236 hits = count - hits
237 remaining = count - hits
238 fits = max(hits, remaining)
240 s.edges.append(Heuristics(count, hits, fits))
242 def add_loop_niter(self, niter):
243 if niter > 0:
244 self.niter_vector.append(niter)
246 def branches_max(self):
247 return max([v.branches() for k, v in self.heuristics.items()])
249 def count_max(self):
250 return max([v.count() for k, v in self.heuristics.items()])
252 def print_group(self, sorting, group_name, heuristics, predict_def):
253 count_max = self.count_max()
254 branches_max = self.branches_max()
256 sorter = lambda x: x.branches()
257 if sorting == 'branch-hitrate':
258 sorter = lambda x: x.get_branch_hitrate()
259 elif sorting == 'hitrate':
260 sorter = lambda x: x.get_hitrate()
261 elif sorting == 'coverage':
262 sorter = lambda x: x.count
263 elif sorting == 'name':
264 sorter = lambda x: x.name.lower()
266 print('%-40s %8s %6s %12s %18s %14s %8s %6s %12s %6s %s' %
267 ('HEURISTICS', 'BRANCHES', '(REL)',
268 'BR. HITRATE', 'HITRATE', 'COVERAGE', 'COVERAGE', '(REL)',
269 'predict.def', '(REL)', 'HOT branches (>%d%%)' % hot_threshold))
270 for h in sorted(heuristics, key = sorter):
271 h.print(branches_max, count_max, predict_def)
273 def dump(self, sorting):
274 heuristics = self.heuristics.values()
275 if len(heuristics) == 0:
276 print('No heuristics available')
277 return
279 predict_def = None
280 if args.def_file != None:
281 predict_def = PredictDefFile(args.def_file)
282 predict_def.parse_and_modify(heuristics, args.write_def_file)
284 special = list(filter(lambda x: x.name in counter_aggregates,
285 heuristics))
286 normal = list(filter(lambda x: x.name not in counter_aggregates,
287 heuristics))
289 self.print_group(sorting, 'HEURISTICS', normal, predict_def)
290 print()
291 self.print_group(sorting, 'HEURISTIC AGGREGATES', special, predict_def)
293 if len(self.niter_vector) > 0:
294 print ('\nLoop count: %d' % len(self.niter_vector)),
295 print(' avg. # of iter: %.2f' % average(self.niter_vector))
296 print(' median # of iter: %.2f' % median(self.niter_vector))
297 for v in [1, 5, 10, 20, 30]:
298 cut = 0.01 * v
299 print(' avg. (%d%% cutoff) # of iter: %.2f'
300 % (v, average_cutoff(self.niter_vector, cut)))
302 parser = argparse.ArgumentParser()
303 parser.add_argument('dump_file', metavar = 'dump_file',
304 help = 'IPA profile dump file')
305 parser.add_argument('-s', '--sorting', dest = 'sorting',
306 choices = ['branches', 'branch-hitrate', 'hitrate', 'coverage', 'name'],
307 default = 'branches')
308 parser.add_argument('-d', '--def-file', help = 'path to predict.def')
309 parser.add_argument('-w', '--write-def-file', action = 'store_true',
310 help = 'Modify predict.def file in order to set new numbers')
311 parser.add_argument('-c', '--coverage-threshold', type = int,
312 help = 'Ignore edges that have percentage coverage >= coverage-threshold')
313 parser.add_argument('-v', '--verbose', action = 'store_true', help = 'Print verbose informations')
315 args = parser.parse_args()
317 profile = Profile(args.dump_file)
318 loop_niter_str = ';; profile-based iteration count: '
320 for l in open(args.dump_file):
321 if l.startswith(';;heuristics;'):
322 parts = l.strip().split(';')
323 assert len(parts) == 8
324 name = parts[3]
325 prediction = float(parts[6])
326 count = int(parts[4])
327 hits = int(parts[5])
329 profile.add(name, prediction, count, hits)
330 elif l.startswith(loop_niter_str):
331 v = int(l[len(loop_niter_str):])
332 profile.add_loop_niter(v)
334 profile.dump(args.sorting)