tree-object-size: Clean up unknown propagation
[official-gcc.git] / libgcc / hardcfr.c
blob376a36202c8498b9b61b00d50c07b1e5ff0e49b3
1 /* Control flow redundancy hardening
2 Copyright (C) 2022-2023 Free Software Foundation, Inc.
3 Contributed by Alexandre Oliva <oliva@adacore.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
26 /* Avoid infinite recursion. */
27 #pragma GCC optimize ("-fno-harden-control-flow-redundancy")
29 #include <stddef.h>
30 #include <stdbool.h>
32 /* This should be kept in sync with gcc/gimple-harden-control-flow.cc. */
33 #if __CHAR_BIT__ >= 28
34 # define VWORDmode __QI__
35 #elif __CHAR_BIT__ >= 14
36 # define VWORDmode __HI__
37 #else
38 # define VWORDmode __SI__
39 #endif
41 typedef unsigned int __attribute__ ((__mode__ (VWORDmode))) vword;
43 /* This function is optionally called at the end of a function to verify that
44 the VISITED array represents a sensible execution path in the CFG. It is
45 always expected to pass; the purpose is to detect attempts to subvert
46 execution by taking unexpected paths, or other execution errors. The
47 function, instrumented by pass_harden_control_flow_redundancy at a time in
48 which it had BLOCKS basic blocks (not counting ENTER and EXIT, so block 2
49 maps to index 0, the first bit of the first VWORD), sets a bit in the bit
50 array VISITED as it enters the corresponding basic block. CFG holds a
51 representation of the control flow graph at the time of the instrumentation:
52 an array of VWORDs holding, for each block, a sequence of predecessors, and
53 a sequence of successors. Each pred and succ sequence is represented as a
54 sequence of pairs (mask, index), terminated by an index-less all-zero mask.
55 If the bit corresponding to the block is set, then at least one of the pred
56 masks, and at least one of the succ masks, must have a bit set in
57 VISITED[index]. An ENTRY block predecessor and an EXIT block successor are
58 represented in a (mask, index) pair that tests the block's own bit. */
59 extern void __hardcfr_check (size_t blocks,
60 vword const *visited,
61 vword const *cfg);
63 /* Compute the MASK for the bit representing BLOCK in WORDIDX's vword in a
64 visited blocks bit array. */
65 static inline void
66 block2mask (size_t const block, vword *const mask, size_t *const wordidx)
68 size_t wbits = __CHAR_BIT__ * sizeof (vword);
69 *wordidx = block / wbits;
70 *mask = (vword)1 << (block % wbits);
73 /* Check whether the bit corresponding to BLOCK is set in VISITED. */
74 static inline bool
75 visited_p (size_t const block, vword const *const visited)
77 vword mask;
78 size_t wordidx;
79 block2mask (block, &mask, &wordidx);
80 vword w = visited[wordidx];
81 return (w & mask) != 0;
84 /* Check whether any VISITED bits that would correspond to blocks after BLOCKS
85 are set. */
86 static inline bool
87 excess_bits_set_p (size_t const blocks, vword const *const visited)
89 vword mask;
90 size_t wordidx;
91 block2mask (blocks - 1, &mask, &wordidx);
92 mask = -mask - mask;
93 vword w = visited[wordidx];
94 return (w & mask) != 0;
97 /* Read and consume a mask from **CFG_IT. (Consume meaning advancing the
98 iterator to the next word). If the mask is zero, return FALSE. Otherwise,
99 also read and consume an index, and set *MASK and/or *WORDIDX, whichever are
100 nonNULL, to the corresponding read values, and finally return TRUE. */
101 static inline bool
102 next_pair (vword const **const cfg_it,
103 vword *const mask,
104 size_t *const wordidx)
106 vword m = **cfg_it;
107 ++*cfg_it;
108 if (!m)
109 return false;
111 if (mask)
112 *mask = m;
114 size_t word = **cfg_it;
115 ++*cfg_it;
117 if (wordidx)
118 *wordidx = word;
120 return true;
123 /* Return TRUE iff any of the bits in MASK is set in VISITED[WORDIDX]. */
124 static inline bool
125 test_mask (vword const *const visited,
126 vword const mask, size_t const wordidx)
128 return (visited[wordidx] & mask) != 0;
131 /* Scan a sequence of pairs (mask, index) at **CFG_IT until its terminator is
132 reached and consumed. */
133 static inline void
134 consume_seq (vword const **const cfg_it)
136 while (next_pair (cfg_it, NULL, NULL))
137 /* Do nothing. */;
140 /* Check that at least one of the MASK bits in a sequence of pairs (mask,
141 index) at **CFG_IT is set in the corresponding VISITED[INDEX] word. Trap if
142 we reach the terminator without finding any. Consume the entire sequence
143 otherwise, so that *CFG_IT points just past the terminator, which may be the
144 beginning of the next sequence. */
145 static inline bool
146 check_seq (vword const *const visited, vword const **const cfg_it)
148 vword mask;
149 size_t wordidx;
151 /* If the block was visited, check that at least one of the
152 preds/succs was also visited. */
154 /* If we get to the end of the sequence without finding any
155 match, something is amiss. */
156 if (!next_pair (cfg_it, &mask, &wordidx))
157 return false;
158 /* Keep searching until we find a match, at which point the
159 condition is satisfied. */
160 while (!test_mask (visited, mask, wordidx));
162 /* Consume the remaining entries in the sequence, whether we found a match or
163 skipped the block, so as to position the iterator at the beginning of the
164 next . */
165 consume_seq (cfg_it);
167 return true;
170 /* Print out the CFG with BLOCKS blocks, presumed to be associated with CALLER.
171 This is expected to be optimized out entirely, unless the verbose part of
172 __hardcfr_check_fail is enabled. */
173 static inline void
174 __hardcfr_debug_cfg (size_t const blocks,
175 void const *const caller,
176 vword const *const cfg)
178 __builtin_printf ("CFG at %p, for %p", cfg, caller);
179 vword const *cfg_it = cfg;
180 for (size_t i = 0; i < blocks; i++)
182 vword mask; size_t wordidx;
183 block2mask (i, &mask, &wordidx);
184 __builtin_printf ("\nblock %lu (%lu/0x%lx)\npreds: ",
185 (unsigned long)i,
186 (unsigned long)wordidx, (unsigned long)mask);
187 while (next_pair (&cfg_it, &mask, &wordidx))
188 __builtin_printf (" (%lu/0x%lx)",
189 (unsigned long)wordidx, (unsigned long)mask);
190 __builtin_printf ("\nsuccs: ");
191 while (next_pair (&cfg_it, &mask, &wordidx))
192 __builtin_printf (" (%lu/0x%lx)",
193 (unsigned long)wordidx, (unsigned long)mask);
195 __builtin_printf ("\n");
198 #ifndef ATTRIBUTE_UNUSED
199 # define ATTRIBUTE_UNUSED __attribute__ ((__unused__))
200 #endif
202 /* This is called when an out-of-line hardcfr check fails. All the arguments
203 are ignored, and it just traps, unless HARDCFR_VERBOSE_FAIL is enabled. IF
204 it is, it prints the PART of the CFG, expected to have BLOCKS blocks, that
205 failed at CALLER's BLOCK, and the VISITED bitmap. When the verbose mode is
206 enabled, it also forces __hardcfr_debug_cfg (above) to be compiled into an
207 out-of-line function, that could be called from a debugger.
210 #ifdef __BPF__
211 __attribute__((__always_inline__))
212 #endif
213 static inline void
214 __hardcfr_check_fail (size_t const blocks ATTRIBUTE_UNUSED,
215 vword const *const visited ATTRIBUTE_UNUSED,
216 vword const *const cfg ATTRIBUTE_UNUSED,
217 size_t const block ATTRIBUTE_UNUSED,
218 int const part ATTRIBUTE_UNUSED,
219 void const *const caller ATTRIBUTE_UNUSED)
221 #if HARDCFR_VERBOSE_FAIL
222 static const char *parts[] = { "preds", "succs", "no excess" };
224 vword mask; size_t wordidx;
225 block2mask (block, &mask, &wordidx);
226 if (part == 2)
227 mask = -mask - mask;
228 __builtin_printf ("hardcfr fail at %p block %lu (%lu/0x%lx), expected %s:",
229 caller, (unsigned long)block,
230 (unsigned long)wordidx, (unsigned long)mask,
231 parts[part]);
233 if (part != 2)
235 /* Skip data for previous blocks. */
236 vword const *cfg_it = cfg;
237 for (size_t i = block; i--; )
239 consume_seq (&cfg_it);
240 consume_seq (&cfg_it);
242 for (size_t i = part; i--; )
243 consume_seq (&cfg_it);
245 while (next_pair (&cfg_it, &mask, &wordidx))
246 __builtin_printf (" (%lu/0x%lx)",
247 (unsigned long)wordidx, (unsigned long)mask);
250 __builtin_printf ("\nvisited:");
251 block2mask (blocks - 1, &mask, &wordidx);
252 for (size_t i = 0; i <= wordidx; i++)
253 __builtin_printf (" (%lu/0x%lx)",
254 (unsigned long)i, (unsigned long)visited[i]);
255 __builtin_printf ("\n");
257 /* Reference __hardcfr_debug_cfg so that it's output out-of-line, so that it
258 can be called from a debugger. */
259 if (!caller || caller == __hardcfr_debug_cfg)
260 return;
261 #endif
262 __builtin_trap ();
265 /* Check that, for each of the BLOCKS basic blocks, if its bit is set in
266 VISITED, at least one of its predecessors in CFG is also set, and at also
267 that at least one of its successors in CFG is also set. */
268 void
269 __hardcfr_check (size_t const blocks,
270 vword const *const visited,
271 vword const *const cfg)
273 vword const *cfg_it = cfg;
274 for (size_t i = 0; i < blocks; i++)
276 bool v = visited_p (i, visited);
278 /* For each block, there are two sequences of pairs (mask, index), each
279 sequence terminated by a single all-zero mask (no index). The first
280 sequence is for predecessor blocks, the second is for successors. At
281 least one of each must be set. */
282 if (!v)
284 /* Consume predecessors. */
285 consume_seq (&cfg_it);
286 /* Consume successors. */
287 consume_seq (&cfg_it);
289 else
291 /* Check predecessors. */
292 if (!check_seq (visited, &cfg_it))
293 __hardcfr_check_fail (blocks, visited, cfg, i, 0,
294 __builtin_return_address (0));
295 /* Check successors. */
296 if (!check_seq (visited, &cfg_it))
297 __hardcfr_check_fail (blocks, visited, cfg, i, 1,
298 __builtin_return_address (0));
301 if (excess_bits_set_p (blocks, visited))
302 __hardcfr_check_fail (blocks, visited, cfg, blocks - 1, 2,
303 __builtin_return_address (0));