PR middle-end/66633
[official-gcc.git] / gcc / tree-eh.c
blobd3d46bc16bad033c972b108724df5e2c3c368b87
1 /* Exception handling semantics and decomposition for trees.
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "alias.h"
25 #include "symtab.h"
26 #include "tree.h"
27 #include "fold-const.h"
28 #include "hard-reg-set.h"
29 #include "function.h"
30 #include "rtl.h"
31 #include "flags.h"
32 #include "insn-config.h"
33 #include "expmed.h"
34 #include "dojump.h"
35 #include "explow.h"
36 #include "calls.h"
37 #include "emit-rtl.h"
38 #include "varasm.h"
39 #include "stmt.h"
40 #include "expr.h"
41 #include "except.h"
42 #include "predict.h"
43 #include "dominance.h"
44 #include "cfg.h"
45 #include "cfganal.h"
46 #include "cfgcleanup.h"
47 #include "basic-block.h"
48 #include "tree-ssa-alias.h"
49 #include "internal-fn.h"
50 #include "tree-eh.h"
51 #include "gimple-expr.h"
52 #include "gimple.h"
53 #include "gimple-iterator.h"
54 #include "gimple-ssa.h"
55 #include "cgraph.h"
56 #include "tree-cfg.h"
57 #include "tree-phinodes.h"
58 #include "ssa-iterators.h"
59 #include "stringpool.h"
60 #include "tree-ssanames.h"
61 #include "tree-into-ssa.h"
62 #include "tree-ssa.h"
63 #include "tree-inline.h"
64 #include "tree-pass.h"
65 #include "langhooks.h"
66 #include "diagnostic-core.h"
67 #include "target.h"
68 #include "cfgloop.h"
69 #include "gimple-low.h"
71 /* In some instances a tree and a gimple need to be stored in a same table,
72 i.e. in hash tables. This is a structure to do this. */
73 typedef union {tree *tp; tree t; gimple g;} treemple;
75 /* Misc functions used in this file. */
77 /* Remember and lookup EH landing pad data for arbitrary statements.
78 Really this means any statement that could_throw_p. We could
79 stuff this information into the stmt_ann data structure, but:
81 (1) We absolutely rely on this information being kept until
82 we get to rtl. Once we're done with lowering here, if we lose
83 the information there's no way to recover it!
85 (2) There are many more statements that *cannot* throw as
86 compared to those that can. We should be saving some amount
87 of space by only allocating memory for those that can throw. */
89 /* Add statement T in function IFUN to landing pad NUM. */
91 static void
92 add_stmt_to_eh_lp_fn (struct function *ifun, gimple t, int num)
94 gcc_assert (num != 0);
96 if (!get_eh_throw_stmt_table (ifun))
97 set_eh_throw_stmt_table (ifun, hash_map<gimple, int>::create_ggc (31));
99 gcc_assert (!get_eh_throw_stmt_table (ifun)->put (t, num));
102 /* Add statement T in the current function (cfun) to EH landing pad NUM. */
104 void
105 add_stmt_to_eh_lp (gimple t, int num)
107 add_stmt_to_eh_lp_fn (cfun, t, num);
110 /* Add statement T to the single EH landing pad in REGION. */
112 static void
113 record_stmt_eh_region (eh_region region, gimple t)
115 if (region == NULL)
116 return;
117 if (region->type == ERT_MUST_NOT_THROW)
118 add_stmt_to_eh_lp_fn (cfun, t, -region->index);
119 else
121 eh_landing_pad lp = region->landing_pads;
122 if (lp == NULL)
123 lp = gen_eh_landing_pad (region);
124 else
125 gcc_assert (lp->next_lp == NULL);
126 add_stmt_to_eh_lp_fn (cfun, t, lp->index);
131 /* Remove statement T in function IFUN from its EH landing pad. */
133 bool
134 remove_stmt_from_eh_lp_fn (struct function *ifun, gimple t)
136 if (!get_eh_throw_stmt_table (ifun))
137 return false;
139 if (!get_eh_throw_stmt_table (ifun)->get (t))
140 return false;
142 get_eh_throw_stmt_table (ifun)->remove (t);
143 return true;
147 /* Remove statement T in the current function (cfun) from its
148 EH landing pad. */
150 bool
151 remove_stmt_from_eh_lp (gimple t)
153 return remove_stmt_from_eh_lp_fn (cfun, t);
156 /* Determine if statement T is inside an EH region in function IFUN.
157 Positive numbers indicate a landing pad index; negative numbers
158 indicate a MUST_NOT_THROW region index; zero indicates that the
159 statement is not recorded in the region table. */
162 lookup_stmt_eh_lp_fn (struct function *ifun, gimple t)
164 if (ifun->eh->throw_stmt_table == NULL)
165 return 0;
167 int *lp_nr = ifun->eh->throw_stmt_table->get (t);
168 return lp_nr ? *lp_nr : 0;
171 /* Likewise, but always use the current function. */
174 lookup_stmt_eh_lp (gimple t)
176 /* We can get called from initialized data when -fnon-call-exceptions
177 is on; prevent crash. */
178 if (!cfun)
179 return 0;
180 return lookup_stmt_eh_lp_fn (cfun, t);
183 /* First pass of EH node decomposition. Build up a tree of GIMPLE_TRY_FINALLY
184 nodes and LABEL_DECL nodes. We will use this during the second phase to
185 determine if a goto leaves the body of a TRY_FINALLY_EXPR node. */
187 struct finally_tree_node
189 /* When storing a GIMPLE_TRY, we have to record a gimple. However
190 when deciding whether a GOTO to a certain LABEL_DECL (which is a
191 tree) leaves the TRY block, its necessary to record a tree in
192 this field. Thus a treemple is used. */
193 treemple child;
194 gtry *parent;
197 /* Hashtable helpers. */
199 struct finally_tree_hasher : free_ptr_hash <finally_tree_node>
201 static inline hashval_t hash (const finally_tree_node *);
202 static inline bool equal (const finally_tree_node *,
203 const finally_tree_node *);
206 inline hashval_t
207 finally_tree_hasher::hash (const finally_tree_node *v)
209 return (intptr_t)v->child.t >> 4;
212 inline bool
213 finally_tree_hasher::equal (const finally_tree_node *v,
214 const finally_tree_node *c)
216 return v->child.t == c->child.t;
219 /* Note that this table is *not* marked GTY. It is short-lived. */
220 static hash_table<finally_tree_hasher> *finally_tree;
222 static void
223 record_in_finally_tree (treemple child, gtry *parent)
225 struct finally_tree_node *n;
226 finally_tree_node **slot;
228 n = XNEW (struct finally_tree_node);
229 n->child = child;
230 n->parent = parent;
232 slot = finally_tree->find_slot (n, INSERT);
233 gcc_assert (!*slot);
234 *slot = n;
237 static void
238 collect_finally_tree (gimple stmt, gtry *region);
240 /* Go through the gimple sequence. Works with collect_finally_tree to
241 record all GIMPLE_LABEL and GIMPLE_TRY statements. */
243 static void
244 collect_finally_tree_1 (gimple_seq seq, gtry *region)
246 gimple_stmt_iterator gsi;
248 for (gsi = gsi_start (seq); !gsi_end_p (gsi); gsi_next (&gsi))
249 collect_finally_tree (gsi_stmt (gsi), region);
252 static void
253 collect_finally_tree (gimple stmt, gtry *region)
255 treemple temp;
257 switch (gimple_code (stmt))
259 case GIMPLE_LABEL:
260 temp.t = gimple_label_label (as_a <glabel *> (stmt));
261 record_in_finally_tree (temp, region);
262 break;
264 case GIMPLE_TRY:
265 if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY)
267 temp.g = stmt;
268 record_in_finally_tree (temp, region);
269 collect_finally_tree_1 (gimple_try_eval (stmt),
270 as_a <gtry *> (stmt));
271 collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
273 else if (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH)
275 collect_finally_tree_1 (gimple_try_eval (stmt), region);
276 collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
278 break;
280 case GIMPLE_CATCH:
281 collect_finally_tree_1 (gimple_catch_handler (
282 as_a <gcatch *> (stmt)),
283 region);
284 break;
286 case GIMPLE_EH_FILTER:
287 collect_finally_tree_1 (gimple_eh_filter_failure (stmt), region);
288 break;
290 case GIMPLE_EH_ELSE:
292 geh_else *eh_else_stmt = as_a <geh_else *> (stmt);
293 collect_finally_tree_1 (gimple_eh_else_n_body (eh_else_stmt), region);
294 collect_finally_tree_1 (gimple_eh_else_e_body (eh_else_stmt), region);
296 break;
298 default:
299 /* A type, a decl, or some kind of statement that we're not
300 interested in. Don't walk them. */
301 break;
306 /* Use the finally tree to determine if a jump from START to TARGET
307 would leave the try_finally node that START lives in. */
309 static bool
310 outside_finally_tree (treemple start, gimple target)
312 struct finally_tree_node n, *p;
316 n.child = start;
317 p = finally_tree->find (&n);
318 if (!p)
319 return true;
320 start.g = p->parent;
322 while (start.g != target);
324 return false;
327 /* Second pass of EH node decomposition. Actually transform the GIMPLE_TRY
328 nodes into a set of gotos, magic labels, and eh regions.
329 The eh region creation is straight-forward, but frobbing all the gotos
330 and such into shape isn't. */
332 /* The sequence into which we record all EH stuff. This will be
333 placed at the end of the function when we're all done. */
334 static gimple_seq eh_seq;
336 /* Record whether an EH region contains something that can throw,
337 indexed by EH region number. */
338 static bitmap eh_region_may_contain_throw_map;
340 /* The GOTO_QUEUE is is an array of GIMPLE_GOTO and GIMPLE_RETURN
341 statements that are seen to escape this GIMPLE_TRY_FINALLY node.
342 The idea is to record a gimple statement for everything except for
343 the conditionals, which get their labels recorded. Since labels are
344 of type 'tree', we need this node to store both gimple and tree
345 objects. REPL_STMT is the sequence used to replace the goto/return
346 statement. CONT_STMT is used to store the statement that allows
347 the return/goto to jump to the original destination. */
349 struct goto_queue_node
351 treemple stmt;
352 location_t location;
353 gimple_seq repl_stmt;
354 gimple cont_stmt;
355 int index;
356 /* This is used when index >= 0 to indicate that stmt is a label (as
357 opposed to a goto stmt). */
358 int is_label;
361 /* State of the world while lowering. */
363 struct leh_state
365 /* What's "current" while constructing the eh region tree. These
366 correspond to variables of the same name in cfun->eh, which we
367 don't have easy access to. */
368 eh_region cur_region;
370 /* What's "current" for the purposes of __builtin_eh_pointer. For
371 a CATCH, this is the associated TRY. For an EH_FILTER, this is
372 the associated ALLOWED_EXCEPTIONS, etc. */
373 eh_region ehp_region;
375 /* Processing of TRY_FINALLY requires a bit more state. This is
376 split out into a separate structure so that we don't have to
377 copy so much when processing other nodes. */
378 struct leh_tf_state *tf;
381 struct leh_tf_state
383 /* Pointer to the GIMPLE_TRY_FINALLY node under discussion. The
384 try_finally_expr is the original GIMPLE_TRY_FINALLY. We need to retain
385 this so that outside_finally_tree can reliably reference the tree used
386 in the collect_finally_tree data structures. */
387 gtry *try_finally_expr;
388 gtry *top_p;
390 /* While lowering a top_p usually it is expanded into multiple statements,
391 thus we need the following field to store them. */
392 gimple_seq top_p_seq;
394 /* The state outside this try_finally node. */
395 struct leh_state *outer;
397 /* The exception region created for it. */
398 eh_region region;
400 /* The goto queue. */
401 struct goto_queue_node *goto_queue;
402 size_t goto_queue_size;
403 size_t goto_queue_active;
405 /* Pointer map to help in searching goto_queue when it is large. */
406 hash_map<gimple, goto_queue_node *> *goto_queue_map;
408 /* The set of unique labels seen as entries in the goto queue. */
409 vec<tree> dest_array;
411 /* A label to be added at the end of the completed transformed
412 sequence. It will be set if may_fallthru was true *at one time*,
413 though subsequent transformations may have cleared that flag. */
414 tree fallthru_label;
416 /* True if it is possible to fall out the bottom of the try block.
417 Cleared if the fallthru is converted to a goto. */
418 bool may_fallthru;
420 /* True if any entry in goto_queue is a GIMPLE_RETURN. */
421 bool may_return;
423 /* True if the finally block can receive an exception edge.
424 Cleared if the exception case is handled by code duplication. */
425 bool may_throw;
428 static gimple_seq lower_eh_must_not_throw (struct leh_state *, gtry *);
430 /* Search for STMT in the goto queue. Return the replacement,
431 or null if the statement isn't in the queue. */
433 #define LARGE_GOTO_QUEUE 20
435 static void lower_eh_constructs_1 (struct leh_state *state, gimple_seq *seq);
437 static gimple_seq
438 find_goto_replacement (struct leh_tf_state *tf, treemple stmt)
440 unsigned int i;
442 if (tf->goto_queue_active < LARGE_GOTO_QUEUE)
444 for (i = 0; i < tf->goto_queue_active; i++)
445 if ( tf->goto_queue[i].stmt.g == stmt.g)
446 return tf->goto_queue[i].repl_stmt;
447 return NULL;
450 /* If we have a large number of entries in the goto_queue, create a
451 pointer map and use that for searching. */
453 if (!tf->goto_queue_map)
455 tf->goto_queue_map = new hash_map<gimple, goto_queue_node *>;
456 for (i = 0; i < tf->goto_queue_active; i++)
458 bool existed = tf->goto_queue_map->put (tf->goto_queue[i].stmt.g,
459 &tf->goto_queue[i]);
460 gcc_assert (!existed);
464 goto_queue_node **slot = tf->goto_queue_map->get (stmt.g);
465 if (slot != NULL)
466 return ((*slot)->repl_stmt);
468 return NULL;
471 /* A subroutine of replace_goto_queue_1. Handles the sub-clauses of a
472 lowered GIMPLE_COND. If, by chance, the replacement is a simple goto,
473 then we can just splat it in, otherwise we add the new stmts immediately
474 after the GIMPLE_COND and redirect. */
476 static void
477 replace_goto_queue_cond_clause (tree *tp, struct leh_tf_state *tf,
478 gimple_stmt_iterator *gsi)
480 tree label;
481 gimple_seq new_seq;
482 treemple temp;
483 location_t loc = gimple_location (gsi_stmt (*gsi));
485 temp.tp = tp;
486 new_seq = find_goto_replacement (tf, temp);
487 if (!new_seq)
488 return;
490 if (gimple_seq_singleton_p (new_seq)
491 && gimple_code (gimple_seq_first_stmt (new_seq)) == GIMPLE_GOTO)
493 *tp = gimple_goto_dest (gimple_seq_first_stmt (new_seq));
494 return;
497 label = create_artificial_label (loc);
498 /* Set the new label for the GIMPLE_COND */
499 *tp = label;
501 gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING);
502 gsi_insert_seq_after (gsi, gimple_seq_copy (new_seq), GSI_CONTINUE_LINKING);
505 /* The real work of replace_goto_queue. Returns with TSI updated to
506 point to the next statement. */
508 static void replace_goto_queue_stmt_list (gimple_seq *, struct leh_tf_state *);
510 static void
511 replace_goto_queue_1 (gimple stmt, struct leh_tf_state *tf,
512 gimple_stmt_iterator *gsi)
514 gimple_seq seq;
515 treemple temp;
516 temp.g = NULL;
518 switch (gimple_code (stmt))
520 case GIMPLE_GOTO:
521 case GIMPLE_RETURN:
522 temp.g = stmt;
523 seq = find_goto_replacement (tf, temp);
524 if (seq)
526 gsi_insert_seq_before (gsi, gimple_seq_copy (seq), GSI_SAME_STMT);
527 gsi_remove (gsi, false);
528 return;
530 break;
532 case GIMPLE_COND:
533 replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 2), tf, gsi);
534 replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 3), tf, gsi);
535 break;
537 case GIMPLE_TRY:
538 replace_goto_queue_stmt_list (gimple_try_eval_ptr (stmt), tf);
539 replace_goto_queue_stmt_list (gimple_try_cleanup_ptr (stmt), tf);
540 break;
541 case GIMPLE_CATCH:
542 replace_goto_queue_stmt_list (gimple_catch_handler_ptr (
543 as_a <gcatch *> (stmt)),
544 tf);
545 break;
546 case GIMPLE_EH_FILTER:
547 replace_goto_queue_stmt_list (gimple_eh_filter_failure_ptr (stmt), tf);
548 break;
549 case GIMPLE_EH_ELSE:
551 geh_else *eh_else_stmt = as_a <geh_else *> (stmt);
552 replace_goto_queue_stmt_list (gimple_eh_else_n_body_ptr (eh_else_stmt),
553 tf);
554 replace_goto_queue_stmt_list (gimple_eh_else_e_body_ptr (eh_else_stmt),
555 tf);
557 break;
559 default:
560 /* These won't have gotos in them. */
561 break;
564 gsi_next (gsi);
567 /* A subroutine of replace_goto_queue. Handles GIMPLE_SEQ. */
569 static void
570 replace_goto_queue_stmt_list (gimple_seq *seq, struct leh_tf_state *tf)
572 gimple_stmt_iterator gsi = gsi_start (*seq);
574 while (!gsi_end_p (gsi))
575 replace_goto_queue_1 (gsi_stmt (gsi), tf, &gsi);
578 /* Replace all goto queue members. */
580 static void
581 replace_goto_queue (struct leh_tf_state *tf)
583 if (tf->goto_queue_active == 0)
584 return;
585 replace_goto_queue_stmt_list (&tf->top_p_seq, tf);
586 replace_goto_queue_stmt_list (&eh_seq, tf);
589 /* Add a new record to the goto queue contained in TF. NEW_STMT is the
590 data to be added, IS_LABEL indicates whether NEW_STMT is a label or
591 a gimple return. */
593 static void
594 record_in_goto_queue (struct leh_tf_state *tf,
595 treemple new_stmt,
596 int index,
597 bool is_label,
598 location_t location)
600 size_t active, size;
601 struct goto_queue_node *q;
603 gcc_assert (!tf->goto_queue_map);
605 active = tf->goto_queue_active;
606 size = tf->goto_queue_size;
607 if (active >= size)
609 size = (size ? size * 2 : 32);
610 tf->goto_queue_size = size;
611 tf->goto_queue
612 = XRESIZEVEC (struct goto_queue_node, tf->goto_queue, size);
615 q = &tf->goto_queue[active];
616 tf->goto_queue_active = active + 1;
618 memset (q, 0, sizeof (*q));
619 q->stmt = new_stmt;
620 q->index = index;
621 q->location = location;
622 q->is_label = is_label;
625 /* Record the LABEL label in the goto queue contained in TF.
626 TF is not null. */
628 static void
629 record_in_goto_queue_label (struct leh_tf_state *tf, treemple stmt, tree label,
630 location_t location)
632 int index;
633 treemple temp, new_stmt;
635 if (!label)
636 return;
638 /* Computed and non-local gotos do not get processed. Given
639 their nature we can neither tell whether we've escaped the
640 finally block nor redirect them if we knew. */
641 if (TREE_CODE (label) != LABEL_DECL)
642 return;
644 /* No need to record gotos that don't leave the try block. */
645 temp.t = label;
646 if (!outside_finally_tree (temp, tf->try_finally_expr))
647 return;
649 if (! tf->dest_array.exists ())
651 tf->dest_array.create (10);
652 tf->dest_array.quick_push (label);
653 index = 0;
655 else
657 int n = tf->dest_array.length ();
658 for (index = 0; index < n; ++index)
659 if (tf->dest_array[index] == label)
660 break;
661 if (index == n)
662 tf->dest_array.safe_push (label);
665 /* In the case of a GOTO we want to record the destination label,
666 since with a GIMPLE_COND we have an easy access to the then/else
667 labels. */
668 new_stmt = stmt;
669 record_in_goto_queue (tf, new_stmt, index, true, location);
672 /* For any GIMPLE_GOTO or GIMPLE_RETURN, decide whether it leaves a try_finally
673 node, and if so record that fact in the goto queue associated with that
674 try_finally node. */
676 static void
677 maybe_record_in_goto_queue (struct leh_state *state, gimple stmt)
679 struct leh_tf_state *tf = state->tf;
680 treemple new_stmt;
682 if (!tf)
683 return;
685 switch (gimple_code (stmt))
687 case GIMPLE_COND:
689 gcond *cond_stmt = as_a <gcond *> (stmt);
690 new_stmt.tp = gimple_op_ptr (cond_stmt, 2);
691 record_in_goto_queue_label (tf, new_stmt,
692 gimple_cond_true_label (cond_stmt),
693 EXPR_LOCATION (*new_stmt.tp));
694 new_stmt.tp = gimple_op_ptr (cond_stmt, 3);
695 record_in_goto_queue_label (tf, new_stmt,
696 gimple_cond_false_label (cond_stmt),
697 EXPR_LOCATION (*new_stmt.tp));
699 break;
700 case GIMPLE_GOTO:
701 new_stmt.g = stmt;
702 record_in_goto_queue_label (tf, new_stmt, gimple_goto_dest (stmt),
703 gimple_location (stmt));
704 break;
706 case GIMPLE_RETURN:
707 tf->may_return = true;
708 new_stmt.g = stmt;
709 record_in_goto_queue (tf, new_stmt, -1, false, gimple_location (stmt));
710 break;
712 default:
713 gcc_unreachable ();
718 #ifdef ENABLE_CHECKING
719 /* We do not process GIMPLE_SWITCHes for now. As long as the original source
720 was in fact structured, and we've not yet done jump threading, then none
721 of the labels will leave outer GIMPLE_TRY_FINALLY nodes. Verify this. */
723 static void
724 verify_norecord_switch_expr (struct leh_state *state,
725 gswitch *switch_expr)
727 struct leh_tf_state *tf = state->tf;
728 size_t i, n;
730 if (!tf)
731 return;
733 n = gimple_switch_num_labels (switch_expr);
735 for (i = 0; i < n; ++i)
737 treemple temp;
738 tree lab = CASE_LABEL (gimple_switch_label (switch_expr, i));
739 temp.t = lab;
740 gcc_assert (!outside_finally_tree (temp, tf->try_finally_expr));
743 #else
744 #define verify_norecord_switch_expr(state, switch_expr)
745 #endif
747 /* Redirect a RETURN_EXPR pointed to by Q to FINLAB. If MOD is
748 non-null, insert it before the new branch. */
750 static void
751 do_return_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod)
753 gimple x;
755 /* In the case of a return, the queue node must be a gimple statement. */
756 gcc_assert (!q->is_label);
758 /* Note that the return value may have already been computed, e.g.,
760 int x;
761 int foo (void)
763 x = 0;
764 try {
765 return x;
766 } finally {
767 x++;
771 should return 0, not 1. We don't have to do anything to make
772 this happens because the return value has been placed in the
773 RESULT_DECL already. */
775 q->cont_stmt = q->stmt.g;
777 if (mod)
778 gimple_seq_add_seq (&q->repl_stmt, mod);
780 x = gimple_build_goto (finlab);
781 gimple_set_location (x, q->location);
782 gimple_seq_add_stmt (&q->repl_stmt, x);
785 /* Similar, but easier, for GIMPLE_GOTO. */
787 static void
788 do_goto_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod,
789 struct leh_tf_state *tf)
791 ggoto *x;
793 gcc_assert (q->is_label);
795 q->cont_stmt = gimple_build_goto (tf->dest_array[q->index]);
797 if (mod)
798 gimple_seq_add_seq (&q->repl_stmt, mod);
800 x = gimple_build_goto (finlab);
801 gimple_set_location (x, q->location);
802 gimple_seq_add_stmt (&q->repl_stmt, x);
805 /* Emit a standard landing pad sequence into SEQ for REGION. */
807 static void
808 emit_post_landing_pad (gimple_seq *seq, eh_region region)
810 eh_landing_pad lp = region->landing_pads;
811 glabel *x;
813 if (lp == NULL)
814 lp = gen_eh_landing_pad (region);
816 lp->post_landing_pad = create_artificial_label (UNKNOWN_LOCATION);
817 EH_LANDING_PAD_NR (lp->post_landing_pad) = lp->index;
819 x = gimple_build_label (lp->post_landing_pad);
820 gimple_seq_add_stmt (seq, x);
823 /* Emit a RESX statement into SEQ for REGION. */
825 static void
826 emit_resx (gimple_seq *seq, eh_region region)
828 gresx *x = gimple_build_resx (region->index);
829 gimple_seq_add_stmt (seq, x);
830 if (region->outer)
831 record_stmt_eh_region (region->outer, x);
834 /* Emit an EH_DISPATCH statement into SEQ for REGION. */
836 static void
837 emit_eh_dispatch (gimple_seq *seq, eh_region region)
839 geh_dispatch *x = gimple_build_eh_dispatch (region->index);
840 gimple_seq_add_stmt (seq, x);
843 /* Note that the current EH region may contain a throw, or a
844 call to a function which itself may contain a throw. */
846 static void
847 note_eh_region_may_contain_throw (eh_region region)
849 while (bitmap_set_bit (eh_region_may_contain_throw_map, region->index))
851 if (region->type == ERT_MUST_NOT_THROW)
852 break;
853 region = region->outer;
854 if (region == NULL)
855 break;
859 /* Check if REGION has been marked as containing a throw. If REGION is
860 NULL, this predicate is false. */
862 static inline bool
863 eh_region_may_contain_throw (eh_region r)
865 return r && bitmap_bit_p (eh_region_may_contain_throw_map, r->index);
868 /* We want to transform
869 try { body; } catch { stuff; }
871 normal_sequence:
872 body;
873 over:
874 eh_sequence:
875 landing_pad:
876 stuff;
877 goto over;
879 TP is a GIMPLE_TRY node. REGION is the region whose post_landing_pad
880 should be placed before the second operand, or NULL. OVER is
881 an existing label that should be put at the exit, or NULL. */
883 static gimple_seq
884 frob_into_branch_around (gtry *tp, eh_region region, tree over)
886 gimple x;
887 gimple_seq cleanup, result;
888 location_t loc = gimple_location (tp);
890 cleanup = gimple_try_cleanup (tp);
891 result = gimple_try_eval (tp);
893 if (region)
894 emit_post_landing_pad (&eh_seq, region);
896 if (gimple_seq_may_fallthru (cleanup))
898 if (!over)
899 over = create_artificial_label (loc);
900 x = gimple_build_goto (over);
901 gimple_set_location (x, loc);
902 gimple_seq_add_stmt (&cleanup, x);
904 gimple_seq_add_seq (&eh_seq, cleanup);
906 if (over)
908 x = gimple_build_label (over);
909 gimple_seq_add_stmt (&result, x);
911 return result;
914 /* A subroutine of lower_try_finally. Duplicate the tree rooted at T.
915 Make sure to record all new labels found. */
917 static gimple_seq
918 lower_try_finally_dup_block (gimple_seq seq, struct leh_state *outer_state,
919 location_t loc)
921 gtry *region = NULL;
922 gimple_seq new_seq;
923 gimple_stmt_iterator gsi;
925 new_seq = copy_gimple_seq_and_replace_locals (seq);
927 for (gsi = gsi_start (new_seq); !gsi_end_p (gsi); gsi_next (&gsi))
929 gimple stmt = gsi_stmt (gsi);
930 if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
932 tree block = gimple_block (stmt);
933 gimple_set_location (stmt, loc);
934 gimple_set_block (stmt, block);
938 if (outer_state->tf)
939 region = outer_state->tf->try_finally_expr;
940 collect_finally_tree_1 (new_seq, region);
942 return new_seq;
945 /* A subroutine of lower_try_finally. Create a fallthru label for
946 the given try_finally state. The only tricky bit here is that
947 we have to make sure to record the label in our outer context. */
949 static tree
950 lower_try_finally_fallthru_label (struct leh_tf_state *tf)
952 tree label = tf->fallthru_label;
953 treemple temp;
955 if (!label)
957 label = create_artificial_label (gimple_location (tf->try_finally_expr));
958 tf->fallthru_label = label;
959 if (tf->outer->tf)
961 temp.t = label;
962 record_in_finally_tree (temp, tf->outer->tf->try_finally_expr);
965 return label;
968 /* A subroutine of lower_try_finally. If FINALLY consits of a
969 GIMPLE_EH_ELSE node, return it. */
971 static inline geh_else *
972 get_eh_else (gimple_seq finally)
974 gimple x = gimple_seq_first_stmt (finally);
975 if (gimple_code (x) == GIMPLE_EH_ELSE)
977 gcc_assert (gimple_seq_singleton_p (finally));
978 return as_a <geh_else *> (x);
980 return NULL;
983 /* A subroutine of lower_try_finally. If the eh_protect_cleanup_actions
984 langhook returns non-null, then the language requires that the exception
985 path out of a try_finally be treated specially. To wit: the code within
986 the finally block may not itself throw an exception. We have two choices
987 here. First we can duplicate the finally block and wrap it in a
988 must_not_throw region. Second, we can generate code like
990 try {
991 finally_block;
992 } catch {
993 if (fintmp == eh_edge)
994 protect_cleanup_actions;
997 where "fintmp" is the temporary used in the switch statement generation
998 alternative considered below. For the nonce, we always choose the first
999 option.
1001 THIS_STATE may be null if this is a try-cleanup, not a try-finally. */
1003 static void
1004 honor_protect_cleanup_actions (struct leh_state *outer_state,
1005 struct leh_state *this_state,
1006 struct leh_tf_state *tf)
1008 tree protect_cleanup_actions;
1009 gimple_stmt_iterator gsi;
1010 bool finally_may_fallthru;
1011 gimple_seq finally;
1012 gimple x;
1013 geh_mnt *eh_mnt;
1014 gtry *try_stmt;
1015 geh_else *eh_else;
1017 /* First check for nothing to do. */
1018 if (lang_hooks.eh_protect_cleanup_actions == NULL)
1019 return;
1020 protect_cleanup_actions = lang_hooks.eh_protect_cleanup_actions ();
1021 if (protect_cleanup_actions == NULL)
1022 return;
1024 finally = gimple_try_cleanup (tf->top_p);
1025 eh_else = get_eh_else (finally);
1027 /* Duplicate the FINALLY block. Only need to do this for try-finally,
1028 and not for cleanups. If we've got an EH_ELSE, extract it now. */
1029 if (eh_else)
1031 finally = gimple_eh_else_e_body (eh_else);
1032 gimple_try_set_cleanup (tf->top_p, gimple_eh_else_n_body (eh_else));
1034 else if (this_state)
1035 finally = lower_try_finally_dup_block (finally, outer_state,
1036 gimple_location (tf->try_finally_expr));
1037 finally_may_fallthru = gimple_seq_may_fallthru (finally);
1039 /* If this cleanup consists of a TRY_CATCH_EXPR with TRY_CATCH_IS_CLEANUP
1040 set, the handler of the TRY_CATCH_EXPR is another cleanup which ought
1041 to be in an enclosing scope, but needs to be implemented at this level
1042 to avoid a nesting violation (see wrap_temporary_cleanups in
1043 cp/decl.c). Since it's logically at an outer level, we should call
1044 terminate before we get to it, so strip it away before adding the
1045 MUST_NOT_THROW filter. */
1046 gsi = gsi_start (finally);
1047 x = gsi_stmt (gsi);
1048 if (gimple_code (x) == GIMPLE_TRY
1049 && gimple_try_kind (x) == GIMPLE_TRY_CATCH
1050 && gimple_try_catch_is_cleanup (x))
1052 gsi_insert_seq_before (&gsi, gimple_try_eval (x), GSI_SAME_STMT);
1053 gsi_remove (&gsi, false);
1056 /* Wrap the block with protect_cleanup_actions as the action. */
1057 eh_mnt = gimple_build_eh_must_not_throw (protect_cleanup_actions);
1058 try_stmt = gimple_build_try (finally, gimple_seq_alloc_with_stmt (eh_mnt),
1059 GIMPLE_TRY_CATCH);
1060 finally = lower_eh_must_not_throw (outer_state, try_stmt);
1062 /* Drop all of this into the exception sequence. */
1063 emit_post_landing_pad (&eh_seq, tf->region);
1064 gimple_seq_add_seq (&eh_seq, finally);
1065 if (finally_may_fallthru)
1066 emit_resx (&eh_seq, tf->region);
1068 /* Having now been handled, EH isn't to be considered with
1069 the rest of the outgoing edges. */
1070 tf->may_throw = false;
1073 /* A subroutine of lower_try_finally. We have determined that there is
1074 no fallthru edge out of the finally block. This means that there is
1075 no outgoing edge corresponding to any incoming edge. Restructure the
1076 try_finally node for this special case. */
1078 static void
1079 lower_try_finally_nofallthru (struct leh_state *state,
1080 struct leh_tf_state *tf)
1082 tree lab;
1083 gimple x;
1084 geh_else *eh_else;
1085 gimple_seq finally;
1086 struct goto_queue_node *q, *qe;
1088 lab = create_artificial_label (gimple_location (tf->try_finally_expr));
1090 /* We expect that tf->top_p is a GIMPLE_TRY. */
1091 finally = gimple_try_cleanup (tf->top_p);
1092 tf->top_p_seq = gimple_try_eval (tf->top_p);
1094 x = gimple_build_label (lab);
1095 gimple_seq_add_stmt (&tf->top_p_seq, x);
1097 q = tf->goto_queue;
1098 qe = q + tf->goto_queue_active;
1099 for (; q < qe; ++q)
1100 if (q->index < 0)
1101 do_return_redirection (q, lab, NULL);
1102 else
1103 do_goto_redirection (q, lab, NULL, tf);
1105 replace_goto_queue (tf);
1107 /* Emit the finally block into the stream. Lower EH_ELSE at this time. */
1108 eh_else = get_eh_else (finally);
1109 if (eh_else)
1111 finally = gimple_eh_else_n_body (eh_else);
1112 lower_eh_constructs_1 (state, &finally);
1113 gimple_seq_add_seq (&tf->top_p_seq, finally);
1115 if (tf->may_throw)
1117 finally = gimple_eh_else_e_body (eh_else);
1118 lower_eh_constructs_1 (state, &finally);
1120 emit_post_landing_pad (&eh_seq, tf->region);
1121 gimple_seq_add_seq (&eh_seq, finally);
1124 else
1126 lower_eh_constructs_1 (state, &finally);
1127 gimple_seq_add_seq (&tf->top_p_seq, finally);
1129 if (tf->may_throw)
1131 emit_post_landing_pad (&eh_seq, tf->region);
1133 x = gimple_build_goto (lab);
1134 gimple_set_location (x, gimple_location (tf->try_finally_expr));
1135 gimple_seq_add_stmt (&eh_seq, x);
1140 /* A subroutine of lower_try_finally. We have determined that there is
1141 exactly one destination of the finally block. Restructure the
1142 try_finally node for this special case. */
1144 static void
1145 lower_try_finally_onedest (struct leh_state *state, struct leh_tf_state *tf)
1147 struct goto_queue_node *q, *qe;
1148 geh_else *eh_else;
1149 glabel *label_stmt;
1150 gimple x;
1151 gimple_seq finally;
1152 gimple_stmt_iterator gsi;
1153 tree finally_label;
1154 location_t loc = gimple_location (tf->try_finally_expr);
1156 finally = gimple_try_cleanup (tf->top_p);
1157 tf->top_p_seq = gimple_try_eval (tf->top_p);
1159 /* Since there's only one destination, and the destination edge can only
1160 either be EH or non-EH, that implies that all of our incoming edges
1161 are of the same type. Therefore we can lower EH_ELSE immediately. */
1162 eh_else = get_eh_else (finally);
1163 if (eh_else)
1165 if (tf->may_throw)
1166 finally = gimple_eh_else_e_body (eh_else);
1167 else
1168 finally = gimple_eh_else_n_body (eh_else);
1171 lower_eh_constructs_1 (state, &finally);
1173 for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1175 gimple stmt = gsi_stmt (gsi);
1176 if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
1178 tree block = gimple_block (stmt);
1179 gimple_set_location (stmt, gimple_location (tf->try_finally_expr));
1180 gimple_set_block (stmt, block);
1184 if (tf->may_throw)
1186 /* Only reachable via the exception edge. Add the given label to
1187 the head of the FINALLY block. Append a RESX at the end. */
1188 emit_post_landing_pad (&eh_seq, tf->region);
1189 gimple_seq_add_seq (&eh_seq, finally);
1190 emit_resx (&eh_seq, tf->region);
1191 return;
1194 if (tf->may_fallthru)
1196 /* Only reachable via the fallthru edge. Do nothing but let
1197 the two blocks run together; we'll fall out the bottom. */
1198 gimple_seq_add_seq (&tf->top_p_seq, finally);
1199 return;
1202 finally_label = create_artificial_label (loc);
1203 label_stmt = gimple_build_label (finally_label);
1204 gimple_seq_add_stmt (&tf->top_p_seq, label_stmt);
1206 gimple_seq_add_seq (&tf->top_p_seq, finally);
1208 q = tf->goto_queue;
1209 qe = q + tf->goto_queue_active;
1211 if (tf->may_return)
1213 /* Reachable by return expressions only. Redirect them. */
1214 for (; q < qe; ++q)
1215 do_return_redirection (q, finally_label, NULL);
1216 replace_goto_queue (tf);
1218 else
1220 /* Reachable by goto expressions only. Redirect them. */
1221 for (; q < qe; ++q)
1222 do_goto_redirection (q, finally_label, NULL, tf);
1223 replace_goto_queue (tf);
1225 if (tf->dest_array[0] == tf->fallthru_label)
1227 /* Reachable by goto to fallthru label only. Redirect it
1228 to the new label (already created, sadly), and do not
1229 emit the final branch out, or the fallthru label. */
1230 tf->fallthru_label = NULL;
1231 return;
1235 /* Place the original return/goto to the original destination
1236 immediately after the finally block. */
1237 x = tf->goto_queue[0].cont_stmt;
1238 gimple_seq_add_stmt (&tf->top_p_seq, x);
1239 maybe_record_in_goto_queue (state, x);
1242 /* A subroutine of lower_try_finally. There are multiple edges incoming
1243 and outgoing from the finally block. Implement this by duplicating the
1244 finally block for every destination. */
1246 static void
1247 lower_try_finally_copy (struct leh_state *state, struct leh_tf_state *tf)
1249 gimple_seq finally;
1250 gimple_seq new_stmt;
1251 gimple_seq seq;
1252 gimple x;
1253 geh_else *eh_else;
1254 tree tmp;
1255 location_t tf_loc = gimple_location (tf->try_finally_expr);
1257 finally = gimple_try_cleanup (tf->top_p);
1259 /* Notice EH_ELSE, and simplify some of the remaining code
1260 by considering FINALLY to be the normal return path only. */
1261 eh_else = get_eh_else (finally);
1262 if (eh_else)
1263 finally = gimple_eh_else_n_body (eh_else);
1265 tf->top_p_seq = gimple_try_eval (tf->top_p);
1266 new_stmt = NULL;
1268 if (tf->may_fallthru)
1270 seq = lower_try_finally_dup_block (finally, state, tf_loc);
1271 lower_eh_constructs_1 (state, &seq);
1272 gimple_seq_add_seq (&new_stmt, seq);
1274 tmp = lower_try_finally_fallthru_label (tf);
1275 x = gimple_build_goto (tmp);
1276 gimple_set_location (x, tf_loc);
1277 gimple_seq_add_stmt (&new_stmt, x);
1280 if (tf->may_throw)
1282 /* We don't need to copy the EH path of EH_ELSE,
1283 since it is only emitted once. */
1284 if (eh_else)
1285 seq = gimple_eh_else_e_body (eh_else);
1286 else
1287 seq = lower_try_finally_dup_block (finally, state, tf_loc);
1288 lower_eh_constructs_1 (state, &seq);
1290 emit_post_landing_pad (&eh_seq, tf->region);
1291 gimple_seq_add_seq (&eh_seq, seq);
1292 emit_resx (&eh_seq, tf->region);
1295 if (tf->goto_queue)
1297 struct goto_queue_node *q, *qe;
1298 int return_index, index;
1299 struct labels_s
1301 struct goto_queue_node *q;
1302 tree label;
1303 } *labels;
1305 return_index = tf->dest_array.length ();
1306 labels = XCNEWVEC (struct labels_s, return_index + 1);
1308 q = tf->goto_queue;
1309 qe = q + tf->goto_queue_active;
1310 for (; q < qe; q++)
1312 index = q->index < 0 ? return_index : q->index;
1314 if (!labels[index].q)
1315 labels[index].q = q;
1318 for (index = 0; index < return_index + 1; index++)
1320 tree lab;
1322 q = labels[index].q;
1323 if (! q)
1324 continue;
1326 lab = labels[index].label
1327 = create_artificial_label (tf_loc);
1329 if (index == return_index)
1330 do_return_redirection (q, lab, NULL);
1331 else
1332 do_goto_redirection (q, lab, NULL, tf);
1334 x = gimple_build_label (lab);
1335 gimple_seq_add_stmt (&new_stmt, x);
1337 seq = lower_try_finally_dup_block (finally, state, q->location);
1338 lower_eh_constructs_1 (state, &seq);
1339 gimple_seq_add_seq (&new_stmt, seq);
1341 gimple_seq_add_stmt (&new_stmt, q->cont_stmt);
1342 maybe_record_in_goto_queue (state, q->cont_stmt);
1345 for (q = tf->goto_queue; q < qe; q++)
1347 tree lab;
1349 index = q->index < 0 ? return_index : q->index;
1351 if (labels[index].q == q)
1352 continue;
1354 lab = labels[index].label;
1356 if (index == return_index)
1357 do_return_redirection (q, lab, NULL);
1358 else
1359 do_goto_redirection (q, lab, NULL, tf);
1362 replace_goto_queue (tf);
1363 free (labels);
1366 /* Need to link new stmts after running replace_goto_queue due
1367 to not wanting to process the same goto stmts twice. */
1368 gimple_seq_add_seq (&tf->top_p_seq, new_stmt);
1371 /* A subroutine of lower_try_finally. There are multiple edges incoming
1372 and outgoing from the finally block. Implement this by instrumenting
1373 each incoming edge and creating a switch statement at the end of the
1374 finally block that branches to the appropriate destination. */
1376 static void
1377 lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf)
1379 struct goto_queue_node *q, *qe;
1380 tree finally_tmp, finally_label;
1381 int return_index, eh_index, fallthru_index;
1382 int nlabels, ndests, j, last_case_index;
1383 tree last_case;
1384 vec<tree> case_label_vec;
1385 gimple_seq switch_body = NULL;
1386 gimple x;
1387 geh_else *eh_else;
1388 tree tmp;
1389 gimple switch_stmt;
1390 gimple_seq finally;
1391 hash_map<tree, gimple> *cont_map = NULL;
1392 /* The location of the TRY_FINALLY stmt. */
1393 location_t tf_loc = gimple_location (tf->try_finally_expr);
1394 /* The location of the finally block. */
1395 location_t finally_loc;
1397 finally = gimple_try_cleanup (tf->top_p);
1398 eh_else = get_eh_else (finally);
1400 /* Mash the TRY block to the head of the chain. */
1401 tf->top_p_seq = gimple_try_eval (tf->top_p);
1403 /* The location of the finally is either the last stmt in the finally
1404 block or the location of the TRY_FINALLY itself. */
1405 x = gimple_seq_last_stmt (finally);
1406 finally_loc = x ? gimple_location (x) : tf_loc;
1408 /* Prepare for switch statement generation. */
1409 nlabels = tf->dest_array.length ();
1410 return_index = nlabels;
1411 eh_index = return_index + tf->may_return;
1412 fallthru_index = eh_index + (tf->may_throw && !eh_else);
1413 ndests = fallthru_index + tf->may_fallthru;
1415 finally_tmp = create_tmp_var (integer_type_node, "finally_tmp");
1416 finally_label = create_artificial_label (finally_loc);
1418 /* We use vec::quick_push on case_label_vec throughout this function,
1419 since we know the size in advance and allocate precisely as muce
1420 space as needed. */
1421 case_label_vec.create (ndests);
1422 last_case = NULL;
1423 last_case_index = 0;
1425 /* Begin inserting code for getting to the finally block. Things
1426 are done in this order to correspond to the sequence the code is
1427 laid out. */
1429 if (tf->may_fallthru)
1431 x = gimple_build_assign (finally_tmp,
1432 build_int_cst (integer_type_node,
1433 fallthru_index));
1434 gimple_seq_add_stmt (&tf->top_p_seq, x);
1436 tmp = build_int_cst (integer_type_node, fallthru_index);
1437 last_case = build_case_label (tmp, NULL,
1438 create_artificial_label (tf_loc));
1439 case_label_vec.quick_push (last_case);
1440 last_case_index++;
1442 x = gimple_build_label (CASE_LABEL (last_case));
1443 gimple_seq_add_stmt (&switch_body, x);
1445 tmp = lower_try_finally_fallthru_label (tf);
1446 x = gimple_build_goto (tmp);
1447 gimple_set_location (x, tf_loc);
1448 gimple_seq_add_stmt (&switch_body, x);
1451 /* For EH_ELSE, emit the exception path (plus resx) now, then
1452 subsequently we only need consider the normal path. */
1453 if (eh_else)
1455 if (tf->may_throw)
1457 finally = gimple_eh_else_e_body (eh_else);
1458 lower_eh_constructs_1 (state, &finally);
1460 emit_post_landing_pad (&eh_seq, tf->region);
1461 gimple_seq_add_seq (&eh_seq, finally);
1462 emit_resx (&eh_seq, tf->region);
1465 finally = gimple_eh_else_n_body (eh_else);
1467 else if (tf->may_throw)
1469 emit_post_landing_pad (&eh_seq, tf->region);
1471 x = gimple_build_assign (finally_tmp,
1472 build_int_cst (integer_type_node, eh_index));
1473 gimple_seq_add_stmt (&eh_seq, x);
1475 x = gimple_build_goto (finally_label);
1476 gimple_set_location (x, tf_loc);
1477 gimple_seq_add_stmt (&eh_seq, x);
1479 tmp = build_int_cst (integer_type_node, eh_index);
1480 last_case = build_case_label (tmp, NULL,
1481 create_artificial_label (tf_loc));
1482 case_label_vec.quick_push (last_case);
1483 last_case_index++;
1485 x = gimple_build_label (CASE_LABEL (last_case));
1486 gimple_seq_add_stmt (&eh_seq, x);
1487 emit_resx (&eh_seq, tf->region);
1490 x = gimple_build_label (finally_label);
1491 gimple_seq_add_stmt (&tf->top_p_seq, x);
1493 lower_eh_constructs_1 (state, &finally);
1494 gimple_seq_add_seq (&tf->top_p_seq, finally);
1496 /* Redirect each incoming goto edge. */
1497 q = tf->goto_queue;
1498 qe = q + tf->goto_queue_active;
1499 j = last_case_index + tf->may_return;
1500 /* Prepare the assignments to finally_tmp that are executed upon the
1501 entrance through a particular edge. */
1502 for (; q < qe; ++q)
1504 gimple_seq mod = NULL;
1505 int switch_id;
1506 unsigned int case_index;
1508 if (q->index < 0)
1510 x = gimple_build_assign (finally_tmp,
1511 build_int_cst (integer_type_node,
1512 return_index));
1513 gimple_seq_add_stmt (&mod, x);
1514 do_return_redirection (q, finally_label, mod);
1515 switch_id = return_index;
1517 else
1519 x = gimple_build_assign (finally_tmp,
1520 build_int_cst (integer_type_node, q->index));
1521 gimple_seq_add_stmt (&mod, x);
1522 do_goto_redirection (q, finally_label, mod, tf);
1523 switch_id = q->index;
1526 case_index = j + q->index;
1527 if (case_label_vec.length () <= case_index || !case_label_vec[case_index])
1529 tree case_lab;
1530 tmp = build_int_cst (integer_type_node, switch_id);
1531 case_lab = build_case_label (tmp, NULL,
1532 create_artificial_label (tf_loc));
1533 /* We store the cont_stmt in the pointer map, so that we can recover
1534 it in the loop below. */
1535 if (!cont_map)
1536 cont_map = new hash_map<tree, gimple>;
1537 cont_map->put (case_lab, q->cont_stmt);
1538 case_label_vec.quick_push (case_lab);
1541 for (j = last_case_index; j < last_case_index + nlabels; j++)
1543 gimple cont_stmt;
1545 last_case = case_label_vec[j];
1547 gcc_assert (last_case);
1548 gcc_assert (cont_map);
1550 cont_stmt = *cont_map->get (last_case);
1552 x = gimple_build_label (CASE_LABEL (last_case));
1553 gimple_seq_add_stmt (&switch_body, x);
1554 gimple_seq_add_stmt (&switch_body, cont_stmt);
1555 maybe_record_in_goto_queue (state, cont_stmt);
1557 if (cont_map)
1558 delete cont_map;
1560 replace_goto_queue (tf);
1562 /* Make sure that the last case is the default label, as one is required.
1563 Then sort the labels, which is also required in GIMPLE. */
1564 CASE_LOW (last_case) = NULL;
1565 tree tem = case_label_vec.pop ();
1566 gcc_assert (tem == last_case);
1567 sort_case_labels (case_label_vec);
1569 /* Build the switch statement, setting last_case to be the default
1570 label. */
1571 switch_stmt = gimple_build_switch (finally_tmp, last_case,
1572 case_label_vec);
1573 gimple_set_location (switch_stmt, finally_loc);
1575 /* Need to link SWITCH_STMT after running replace_goto_queue
1576 due to not wanting to process the same goto stmts twice. */
1577 gimple_seq_add_stmt (&tf->top_p_seq, switch_stmt);
1578 gimple_seq_add_seq (&tf->top_p_seq, switch_body);
1581 /* Decide whether or not we are going to duplicate the finally block.
1582 There are several considerations.
1584 First, if this is Java, then the finally block contains code
1585 written by the user. It has line numbers associated with it,
1586 so duplicating the block means it's difficult to set a breakpoint.
1587 Since controlling code generation via -g is verboten, we simply
1588 never duplicate code without optimization.
1590 Second, we'd like to prevent egregious code growth. One way to
1591 do this is to estimate the size of the finally block, multiply
1592 that by the number of copies we'd need to make, and compare against
1593 the estimate of the size of the switch machinery we'd have to add. */
1595 static bool
1596 decide_copy_try_finally (int ndests, bool may_throw, gimple_seq finally)
1598 int f_estimate, sw_estimate;
1599 geh_else *eh_else;
1601 /* If there's an EH_ELSE involved, the exception path is separate
1602 and really doesn't come into play for this computation. */
1603 eh_else = get_eh_else (finally);
1604 if (eh_else)
1606 ndests -= may_throw;
1607 finally = gimple_eh_else_n_body (eh_else);
1610 if (!optimize)
1612 gimple_stmt_iterator gsi;
1614 if (ndests == 1)
1615 return true;
1617 for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1619 gimple stmt = gsi_stmt (gsi);
1620 if (!is_gimple_debug (stmt) && !gimple_clobber_p (stmt))
1621 return false;
1623 return true;
1626 /* Finally estimate N times, plus N gotos. */
1627 f_estimate = count_insns_seq (finally, &eni_size_weights);
1628 f_estimate = (f_estimate + 1) * ndests;
1630 /* Switch statement (cost 10), N variable assignments, N gotos. */
1631 sw_estimate = 10 + 2 * ndests;
1633 /* Optimize for size clearly wants our best guess. */
1634 if (optimize_function_for_size_p (cfun))
1635 return f_estimate < sw_estimate;
1637 /* ??? These numbers are completely made up so far. */
1638 if (optimize > 1)
1639 return f_estimate < 100 || f_estimate < sw_estimate * 2;
1640 else
1641 return f_estimate < 40 || f_estimate * 2 < sw_estimate * 3;
1644 /* REG is the enclosing region for a possible cleanup region, or the region
1645 itself. Returns TRUE if such a region would be unreachable.
1647 Cleanup regions within a must-not-throw region aren't actually reachable
1648 even if there are throwing stmts within them, because the personality
1649 routine will call terminate before unwinding. */
1651 static bool
1652 cleanup_is_dead_in (eh_region reg)
1654 while (reg && reg->type == ERT_CLEANUP)
1655 reg = reg->outer;
1656 return (reg && reg->type == ERT_MUST_NOT_THROW);
1659 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_FINALLY nodes
1660 to a sequence of labels and blocks, plus the exception region trees
1661 that record all the magic. This is complicated by the need to
1662 arrange for the FINALLY block to be executed on all exits. */
1664 static gimple_seq
1665 lower_try_finally (struct leh_state *state, gtry *tp)
1667 struct leh_tf_state this_tf;
1668 struct leh_state this_state;
1669 int ndests;
1670 gimple_seq old_eh_seq;
1672 /* Process the try block. */
1674 memset (&this_tf, 0, sizeof (this_tf));
1675 this_tf.try_finally_expr = tp;
1676 this_tf.top_p = tp;
1677 this_tf.outer = state;
1678 if (using_eh_for_cleanups_p () && !cleanup_is_dead_in (state->cur_region))
1680 this_tf.region = gen_eh_region_cleanup (state->cur_region);
1681 this_state.cur_region = this_tf.region;
1683 else
1685 this_tf.region = NULL;
1686 this_state.cur_region = state->cur_region;
1689 this_state.ehp_region = state->ehp_region;
1690 this_state.tf = &this_tf;
1692 old_eh_seq = eh_seq;
1693 eh_seq = NULL;
1695 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1697 /* Determine if the try block is escaped through the bottom. */
1698 this_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1700 /* Determine if any exceptions are possible within the try block. */
1701 if (this_tf.region)
1702 this_tf.may_throw = eh_region_may_contain_throw (this_tf.region);
1703 if (this_tf.may_throw)
1704 honor_protect_cleanup_actions (state, &this_state, &this_tf);
1706 /* Determine how many edges (still) reach the finally block. Or rather,
1707 how many destinations are reached by the finally block. Use this to
1708 determine how we process the finally block itself. */
1710 ndests = this_tf.dest_array.length ();
1711 ndests += this_tf.may_fallthru;
1712 ndests += this_tf.may_return;
1713 ndests += this_tf.may_throw;
1715 /* If the FINALLY block is not reachable, dike it out. */
1716 if (ndests == 0)
1718 gimple_seq_add_seq (&this_tf.top_p_seq, gimple_try_eval (tp));
1719 gimple_try_set_cleanup (tp, NULL);
1721 /* If the finally block doesn't fall through, then any destination
1722 we might try to impose there isn't reached either. There may be
1723 some minor amount of cleanup and redirection still needed. */
1724 else if (!gimple_seq_may_fallthru (gimple_try_cleanup (tp)))
1725 lower_try_finally_nofallthru (state, &this_tf);
1727 /* We can easily special-case redirection to a single destination. */
1728 else if (ndests == 1)
1729 lower_try_finally_onedest (state, &this_tf);
1730 else if (decide_copy_try_finally (ndests, this_tf.may_throw,
1731 gimple_try_cleanup (tp)))
1732 lower_try_finally_copy (state, &this_tf);
1733 else
1734 lower_try_finally_switch (state, &this_tf);
1736 /* If someone requested we add a label at the end of the transformed
1737 block, do so. */
1738 if (this_tf.fallthru_label)
1740 /* This must be reached only if ndests == 0. */
1741 gimple x = gimple_build_label (this_tf.fallthru_label);
1742 gimple_seq_add_stmt (&this_tf.top_p_seq, x);
1745 this_tf.dest_array.release ();
1746 free (this_tf.goto_queue);
1747 if (this_tf.goto_queue_map)
1748 delete this_tf.goto_queue_map;
1750 /* If there was an old (aka outer) eh_seq, append the current eh_seq.
1751 If there was no old eh_seq, then the append is trivially already done. */
1752 if (old_eh_seq)
1754 if (eh_seq == NULL)
1755 eh_seq = old_eh_seq;
1756 else
1758 gimple_seq new_eh_seq = eh_seq;
1759 eh_seq = old_eh_seq;
1760 gimple_seq_add_seq (&eh_seq, new_eh_seq);
1764 return this_tf.top_p_seq;
1767 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_CATCH with a
1768 list of GIMPLE_CATCH to a sequence of labels and blocks, plus the
1769 exception region trees that records all the magic. */
1771 static gimple_seq
1772 lower_catch (struct leh_state *state, gtry *tp)
1774 eh_region try_region = NULL;
1775 struct leh_state this_state = *state;
1776 gimple_stmt_iterator gsi;
1777 tree out_label;
1778 gimple_seq new_seq, cleanup;
1779 gimple x;
1780 location_t try_catch_loc = gimple_location (tp);
1782 if (flag_exceptions)
1784 try_region = gen_eh_region_try (state->cur_region);
1785 this_state.cur_region = try_region;
1788 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1790 if (!eh_region_may_contain_throw (try_region))
1791 return gimple_try_eval (tp);
1793 new_seq = NULL;
1794 emit_eh_dispatch (&new_seq, try_region);
1795 emit_resx (&new_seq, try_region);
1797 this_state.cur_region = state->cur_region;
1798 this_state.ehp_region = try_region;
1800 /* Add eh_seq from lowering EH in the cleanup sequence after the cleanup
1801 itself, so that e.g. for coverage purposes the nested cleanups don't
1802 appear before the cleanup body. See PR64634 for details. */
1803 gimple_seq old_eh_seq = eh_seq;
1804 eh_seq = NULL;
1806 out_label = NULL;
1807 cleanup = gimple_try_cleanup (tp);
1808 for (gsi = gsi_start (cleanup);
1809 !gsi_end_p (gsi);
1810 gsi_next (&gsi))
1812 eh_catch c;
1813 gcatch *catch_stmt;
1814 gimple_seq handler;
1816 catch_stmt = as_a <gcatch *> (gsi_stmt (gsi));
1817 c = gen_eh_region_catch (try_region, gimple_catch_types (catch_stmt));
1819 handler = gimple_catch_handler (catch_stmt);
1820 lower_eh_constructs_1 (&this_state, &handler);
1822 c->label = create_artificial_label (UNKNOWN_LOCATION);
1823 x = gimple_build_label (c->label);
1824 gimple_seq_add_stmt (&new_seq, x);
1826 gimple_seq_add_seq (&new_seq, handler);
1828 if (gimple_seq_may_fallthru (new_seq))
1830 if (!out_label)
1831 out_label = create_artificial_label (try_catch_loc);
1833 x = gimple_build_goto (out_label);
1834 gimple_seq_add_stmt (&new_seq, x);
1836 if (!c->type_list)
1837 break;
1840 gimple_try_set_cleanup (tp, new_seq);
1842 gimple_seq new_eh_seq = eh_seq;
1843 eh_seq = old_eh_seq;
1844 gimple_seq ret_seq = frob_into_branch_around (tp, try_region, out_label);
1845 gimple_seq_add_seq (&eh_seq, new_eh_seq);
1846 return ret_seq;
1849 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with a
1850 GIMPLE_EH_FILTER to a sequence of labels and blocks, plus the exception
1851 region trees that record all the magic. */
1853 static gimple_seq
1854 lower_eh_filter (struct leh_state *state, gtry *tp)
1856 struct leh_state this_state = *state;
1857 eh_region this_region = NULL;
1858 gimple inner, x;
1859 gimple_seq new_seq;
1861 inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1863 if (flag_exceptions)
1865 this_region = gen_eh_region_allowed (state->cur_region,
1866 gimple_eh_filter_types (inner));
1867 this_state.cur_region = this_region;
1870 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1872 if (!eh_region_may_contain_throw (this_region))
1873 return gimple_try_eval (tp);
1875 new_seq = NULL;
1876 this_state.cur_region = state->cur_region;
1877 this_state.ehp_region = this_region;
1879 emit_eh_dispatch (&new_seq, this_region);
1880 emit_resx (&new_seq, this_region);
1882 this_region->u.allowed.label = create_artificial_label (UNKNOWN_LOCATION);
1883 x = gimple_build_label (this_region->u.allowed.label);
1884 gimple_seq_add_stmt (&new_seq, x);
1886 lower_eh_constructs_1 (&this_state, gimple_eh_filter_failure_ptr (inner));
1887 gimple_seq_add_seq (&new_seq, gimple_eh_filter_failure (inner));
1889 gimple_try_set_cleanup (tp, new_seq);
1891 return frob_into_branch_around (tp, this_region, NULL);
1894 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with
1895 an GIMPLE_EH_MUST_NOT_THROW to a sequence of labels and blocks,
1896 plus the exception region trees that record all the magic. */
1898 static gimple_seq
1899 lower_eh_must_not_throw (struct leh_state *state, gtry *tp)
1901 struct leh_state this_state = *state;
1903 if (flag_exceptions)
1905 gimple inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1906 eh_region this_region;
1908 this_region = gen_eh_region_must_not_throw (state->cur_region);
1909 this_region->u.must_not_throw.failure_decl
1910 = gimple_eh_must_not_throw_fndecl (
1911 as_a <geh_mnt *> (inner));
1912 this_region->u.must_not_throw.failure_loc
1913 = LOCATION_LOCUS (gimple_location (tp));
1915 /* In order to get mangling applied to this decl, we must mark it
1916 used now. Otherwise, pass_ipa_free_lang_data won't think it
1917 needs to happen. */
1918 TREE_USED (this_region->u.must_not_throw.failure_decl) = 1;
1920 this_state.cur_region = this_region;
1923 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1925 return gimple_try_eval (tp);
1928 /* Implement a cleanup expression. This is similar to try-finally,
1929 except that we only execute the cleanup block for exception edges. */
1931 static gimple_seq
1932 lower_cleanup (struct leh_state *state, gtry *tp)
1934 struct leh_state this_state = *state;
1935 eh_region this_region = NULL;
1936 struct leh_tf_state fake_tf;
1937 gimple_seq result;
1938 bool cleanup_dead = cleanup_is_dead_in (state->cur_region);
1940 if (flag_exceptions && !cleanup_dead)
1942 this_region = gen_eh_region_cleanup (state->cur_region);
1943 this_state.cur_region = this_region;
1946 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1948 if (cleanup_dead || !eh_region_may_contain_throw (this_region))
1949 return gimple_try_eval (tp);
1951 /* Build enough of a try-finally state so that we can reuse
1952 honor_protect_cleanup_actions. */
1953 memset (&fake_tf, 0, sizeof (fake_tf));
1954 fake_tf.top_p = fake_tf.try_finally_expr = tp;
1955 fake_tf.outer = state;
1956 fake_tf.region = this_region;
1957 fake_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1958 fake_tf.may_throw = true;
1960 honor_protect_cleanup_actions (state, NULL, &fake_tf);
1962 if (fake_tf.may_throw)
1964 /* In this case honor_protect_cleanup_actions had nothing to do,
1965 and we should process this normally. */
1966 lower_eh_constructs_1 (state, gimple_try_cleanup_ptr (tp));
1967 result = frob_into_branch_around (tp, this_region,
1968 fake_tf.fallthru_label);
1970 else
1972 /* In this case honor_protect_cleanup_actions did nearly all of
1973 the work. All we have left is to append the fallthru_label. */
1975 result = gimple_try_eval (tp);
1976 if (fake_tf.fallthru_label)
1978 gimple x = gimple_build_label (fake_tf.fallthru_label);
1979 gimple_seq_add_stmt (&result, x);
1982 return result;
1985 /* Main loop for lowering eh constructs. Also moves gsi to the next
1986 statement. */
1988 static void
1989 lower_eh_constructs_2 (struct leh_state *state, gimple_stmt_iterator *gsi)
1991 gimple_seq replace;
1992 gimple x;
1993 gimple stmt = gsi_stmt (*gsi);
1995 switch (gimple_code (stmt))
1997 case GIMPLE_CALL:
1999 tree fndecl = gimple_call_fndecl (stmt);
2000 tree rhs, lhs;
2002 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
2003 switch (DECL_FUNCTION_CODE (fndecl))
2005 case BUILT_IN_EH_POINTER:
2006 /* The front end may have generated a call to
2007 __builtin_eh_pointer (0) within a catch region. Replace
2008 this zero argument with the current catch region number. */
2009 if (state->ehp_region)
2011 tree nr = build_int_cst (integer_type_node,
2012 state->ehp_region->index);
2013 gimple_call_set_arg (stmt, 0, nr);
2015 else
2017 /* The user has dome something silly. Remove it. */
2018 rhs = null_pointer_node;
2019 goto do_replace;
2021 break;
2023 case BUILT_IN_EH_FILTER:
2024 /* ??? This should never appear, but since it's a builtin it
2025 is accessible to abuse by users. Just remove it and
2026 replace the use with the arbitrary value zero. */
2027 rhs = build_int_cst (TREE_TYPE (TREE_TYPE (fndecl)), 0);
2028 do_replace:
2029 lhs = gimple_call_lhs (stmt);
2030 x = gimple_build_assign (lhs, rhs);
2031 gsi_insert_before (gsi, x, GSI_SAME_STMT);
2032 /* FALLTHRU */
2034 case BUILT_IN_EH_COPY_VALUES:
2035 /* Likewise this should not appear. Remove it. */
2036 gsi_remove (gsi, true);
2037 return;
2039 default:
2040 break;
2043 /* FALLTHRU */
2045 case GIMPLE_ASSIGN:
2046 /* If the stmt can throw use a new temporary for the assignment
2047 to a LHS. This makes sure the old value of the LHS is
2048 available on the EH edge. Only do so for statements that
2049 potentially fall through (no noreturn calls e.g.), otherwise
2050 this new assignment might create fake fallthru regions. */
2051 if (stmt_could_throw_p (stmt)
2052 && gimple_has_lhs (stmt)
2053 && gimple_stmt_may_fallthru (stmt)
2054 && !tree_could_throw_p (gimple_get_lhs (stmt))
2055 && is_gimple_reg_type (TREE_TYPE (gimple_get_lhs (stmt))))
2057 tree lhs = gimple_get_lhs (stmt);
2058 tree tmp = create_tmp_var (TREE_TYPE (lhs));
2059 gimple s = gimple_build_assign (lhs, tmp);
2060 gimple_set_location (s, gimple_location (stmt));
2061 gimple_set_block (s, gimple_block (stmt));
2062 gimple_set_lhs (stmt, tmp);
2063 if (TREE_CODE (TREE_TYPE (tmp)) == COMPLEX_TYPE
2064 || TREE_CODE (TREE_TYPE (tmp)) == VECTOR_TYPE)
2065 DECL_GIMPLE_REG_P (tmp) = 1;
2066 gsi_insert_after (gsi, s, GSI_SAME_STMT);
2068 /* Look for things that can throw exceptions, and record them. */
2069 if (state->cur_region && stmt_could_throw_p (stmt))
2071 record_stmt_eh_region (state->cur_region, stmt);
2072 note_eh_region_may_contain_throw (state->cur_region);
2074 break;
2076 case GIMPLE_COND:
2077 case GIMPLE_GOTO:
2078 case GIMPLE_RETURN:
2079 maybe_record_in_goto_queue (state, stmt);
2080 break;
2082 case GIMPLE_SWITCH:
2083 verify_norecord_switch_expr (state, as_a <gswitch *> (stmt));
2084 break;
2086 case GIMPLE_TRY:
2088 gtry *try_stmt = as_a <gtry *> (stmt);
2089 if (gimple_try_kind (try_stmt) == GIMPLE_TRY_FINALLY)
2090 replace = lower_try_finally (state, try_stmt);
2091 else
2093 x = gimple_seq_first_stmt (gimple_try_cleanup (try_stmt));
2094 if (!x)
2096 replace = gimple_try_eval (try_stmt);
2097 lower_eh_constructs_1 (state, &replace);
2099 else
2100 switch (gimple_code (x))
2102 case GIMPLE_CATCH:
2103 replace = lower_catch (state, try_stmt);
2104 break;
2105 case GIMPLE_EH_FILTER:
2106 replace = lower_eh_filter (state, try_stmt);
2107 break;
2108 case GIMPLE_EH_MUST_NOT_THROW:
2109 replace = lower_eh_must_not_throw (state, try_stmt);
2110 break;
2111 case GIMPLE_EH_ELSE:
2112 /* This code is only valid with GIMPLE_TRY_FINALLY. */
2113 gcc_unreachable ();
2114 default:
2115 replace = lower_cleanup (state, try_stmt);
2116 break;
2121 /* Remove the old stmt and insert the transformed sequence
2122 instead. */
2123 gsi_insert_seq_before (gsi, replace, GSI_SAME_STMT);
2124 gsi_remove (gsi, true);
2126 /* Return since we don't want gsi_next () */
2127 return;
2129 case GIMPLE_EH_ELSE:
2130 /* We should be eliminating this in lower_try_finally et al. */
2131 gcc_unreachable ();
2133 default:
2134 /* A type, a decl, or some kind of statement that we're not
2135 interested in. Don't walk them. */
2136 break;
2139 gsi_next (gsi);
2142 /* A helper to unwrap a gimple_seq and feed stmts to lower_eh_constructs_2. */
2144 static void
2145 lower_eh_constructs_1 (struct leh_state *state, gimple_seq *pseq)
2147 gimple_stmt_iterator gsi;
2148 for (gsi = gsi_start (*pseq); !gsi_end_p (gsi);)
2149 lower_eh_constructs_2 (state, &gsi);
2152 namespace {
2154 const pass_data pass_data_lower_eh =
2156 GIMPLE_PASS, /* type */
2157 "eh", /* name */
2158 OPTGROUP_NONE, /* optinfo_flags */
2159 TV_TREE_EH, /* tv_id */
2160 PROP_gimple_lcf, /* properties_required */
2161 PROP_gimple_leh, /* properties_provided */
2162 0, /* properties_destroyed */
2163 0, /* todo_flags_start */
2164 0, /* todo_flags_finish */
2167 class pass_lower_eh : public gimple_opt_pass
2169 public:
2170 pass_lower_eh (gcc::context *ctxt)
2171 : gimple_opt_pass (pass_data_lower_eh, ctxt)
2174 /* opt_pass methods: */
2175 virtual unsigned int execute (function *);
2177 }; // class pass_lower_eh
2179 unsigned int
2180 pass_lower_eh::execute (function *fun)
2182 struct leh_state null_state;
2183 gimple_seq bodyp;
2185 bodyp = gimple_body (current_function_decl);
2186 if (bodyp == NULL)
2187 return 0;
2189 finally_tree = new hash_table<finally_tree_hasher> (31);
2190 eh_region_may_contain_throw_map = BITMAP_ALLOC (NULL);
2191 memset (&null_state, 0, sizeof (null_state));
2193 collect_finally_tree_1 (bodyp, NULL);
2194 lower_eh_constructs_1 (&null_state, &bodyp);
2195 gimple_set_body (current_function_decl, bodyp);
2197 /* We assume there's a return statement, or something, at the end of
2198 the function, and thus ploping the EH sequence afterward won't
2199 change anything. */
2200 gcc_assert (!gimple_seq_may_fallthru (bodyp));
2201 gimple_seq_add_seq (&bodyp, eh_seq);
2203 /* We assume that since BODYP already existed, adding EH_SEQ to it
2204 didn't change its value, and we don't have to re-set the function. */
2205 gcc_assert (bodyp == gimple_body (current_function_decl));
2207 delete finally_tree;
2208 finally_tree = NULL;
2209 BITMAP_FREE (eh_region_may_contain_throw_map);
2210 eh_seq = NULL;
2212 /* If this function needs a language specific EH personality routine
2213 and the frontend didn't already set one do so now. */
2214 if (function_needs_eh_personality (fun) == eh_personality_lang
2215 && !DECL_FUNCTION_PERSONALITY (current_function_decl))
2216 DECL_FUNCTION_PERSONALITY (current_function_decl)
2217 = lang_hooks.eh_personality ();
2219 return 0;
2222 } // anon namespace
2224 gimple_opt_pass *
2225 make_pass_lower_eh (gcc::context *ctxt)
2227 return new pass_lower_eh (ctxt);
2230 /* Create the multiple edges from an EH_DISPATCH statement to all of
2231 the possible handlers for its EH region. Return true if there's
2232 no fallthru edge; false if there is. */
2234 bool
2235 make_eh_dispatch_edges (geh_dispatch *stmt)
2237 eh_region r;
2238 eh_catch c;
2239 basic_block src, dst;
2241 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2242 src = gimple_bb (stmt);
2244 switch (r->type)
2246 case ERT_TRY:
2247 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2249 dst = label_to_block (c->label);
2250 make_edge (src, dst, 0);
2252 /* A catch-all handler doesn't have a fallthru. */
2253 if (c->type_list == NULL)
2254 return false;
2256 break;
2258 case ERT_ALLOWED_EXCEPTIONS:
2259 dst = label_to_block (r->u.allowed.label);
2260 make_edge (src, dst, 0);
2261 break;
2263 default:
2264 gcc_unreachable ();
2267 return true;
2270 /* Create the single EH edge from STMT to its nearest landing pad,
2271 if there is such a landing pad within the current function. */
2273 void
2274 make_eh_edges (gimple stmt)
2276 basic_block src, dst;
2277 eh_landing_pad lp;
2278 int lp_nr;
2280 lp_nr = lookup_stmt_eh_lp (stmt);
2281 if (lp_nr <= 0)
2282 return;
2284 lp = get_eh_landing_pad_from_number (lp_nr);
2285 gcc_assert (lp != NULL);
2287 src = gimple_bb (stmt);
2288 dst = label_to_block (lp->post_landing_pad);
2289 make_edge (src, dst, EDGE_EH);
2292 /* Do the work in redirecting EDGE_IN to NEW_BB within the EH region tree;
2293 do not actually perform the final edge redirection.
2295 CHANGE_REGION is true when we're being called from cleanup_empty_eh and
2296 we intend to change the destination EH region as well; this means
2297 EH_LANDING_PAD_NR must already be set on the destination block label.
2298 If false, we're being called from generic cfg manipulation code and we
2299 should preserve our place within the region tree. */
2301 static void
2302 redirect_eh_edge_1 (edge edge_in, basic_block new_bb, bool change_region)
2304 eh_landing_pad old_lp, new_lp;
2305 basic_block old_bb;
2306 gimple throw_stmt;
2307 int old_lp_nr, new_lp_nr;
2308 tree old_label, new_label;
2309 edge_iterator ei;
2310 edge e;
2312 old_bb = edge_in->dest;
2313 old_label = gimple_block_label (old_bb);
2314 old_lp_nr = EH_LANDING_PAD_NR (old_label);
2315 gcc_assert (old_lp_nr > 0);
2316 old_lp = get_eh_landing_pad_from_number (old_lp_nr);
2318 throw_stmt = last_stmt (edge_in->src);
2319 gcc_assert (lookup_stmt_eh_lp (throw_stmt) == old_lp_nr);
2321 new_label = gimple_block_label (new_bb);
2323 /* Look for an existing region that might be using NEW_BB already. */
2324 new_lp_nr = EH_LANDING_PAD_NR (new_label);
2325 if (new_lp_nr)
2327 new_lp = get_eh_landing_pad_from_number (new_lp_nr);
2328 gcc_assert (new_lp);
2330 /* Unless CHANGE_REGION is true, the new and old landing pad
2331 had better be associated with the same EH region. */
2332 gcc_assert (change_region || new_lp->region == old_lp->region);
2334 else
2336 new_lp = NULL;
2337 gcc_assert (!change_region);
2340 /* Notice when we redirect the last EH edge away from OLD_BB. */
2341 FOR_EACH_EDGE (e, ei, old_bb->preds)
2342 if (e != edge_in && (e->flags & EDGE_EH))
2343 break;
2345 if (new_lp)
2347 /* NEW_LP already exists. If there are still edges into OLD_LP,
2348 there's nothing to do with the EH tree. If there are no more
2349 edges into OLD_LP, then we want to remove OLD_LP as it is unused.
2350 If CHANGE_REGION is true, then our caller is expecting to remove
2351 the landing pad. */
2352 if (e == NULL && !change_region)
2353 remove_eh_landing_pad (old_lp);
2355 else
2357 /* No correct landing pad exists. If there are no more edges
2358 into OLD_LP, then we can simply re-use the existing landing pad.
2359 Otherwise, we have to create a new landing pad. */
2360 if (e == NULL)
2362 EH_LANDING_PAD_NR (old_lp->post_landing_pad) = 0;
2363 new_lp = old_lp;
2365 else
2366 new_lp = gen_eh_landing_pad (old_lp->region);
2367 new_lp->post_landing_pad = new_label;
2368 EH_LANDING_PAD_NR (new_label) = new_lp->index;
2371 /* Maybe move the throwing statement to the new region. */
2372 if (old_lp != new_lp)
2374 remove_stmt_from_eh_lp (throw_stmt);
2375 add_stmt_to_eh_lp (throw_stmt, new_lp->index);
2379 /* Redirect EH edge E to NEW_BB. */
2381 edge
2382 redirect_eh_edge (edge edge_in, basic_block new_bb)
2384 redirect_eh_edge_1 (edge_in, new_bb, false);
2385 return ssa_redirect_edge (edge_in, new_bb);
2388 /* This is a subroutine of gimple_redirect_edge_and_branch. Update the
2389 labels for redirecting a non-fallthru EH_DISPATCH edge E to NEW_BB.
2390 The actual edge update will happen in the caller. */
2392 void
2393 redirect_eh_dispatch_edge (geh_dispatch *stmt, edge e, basic_block new_bb)
2395 tree new_lab = gimple_block_label (new_bb);
2396 bool any_changed = false;
2397 basic_block old_bb;
2398 eh_region r;
2399 eh_catch c;
2401 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2402 switch (r->type)
2404 case ERT_TRY:
2405 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2407 old_bb = label_to_block (c->label);
2408 if (old_bb == e->dest)
2410 c->label = new_lab;
2411 any_changed = true;
2414 break;
2416 case ERT_ALLOWED_EXCEPTIONS:
2417 old_bb = label_to_block (r->u.allowed.label);
2418 gcc_assert (old_bb == e->dest);
2419 r->u.allowed.label = new_lab;
2420 any_changed = true;
2421 break;
2423 default:
2424 gcc_unreachable ();
2427 gcc_assert (any_changed);
2430 /* Helper function for operation_could_trap_p and stmt_could_throw_p. */
2432 bool
2433 operation_could_trap_helper_p (enum tree_code op,
2434 bool fp_operation,
2435 bool honor_trapv,
2436 bool honor_nans,
2437 bool honor_snans,
2438 tree divisor,
2439 bool *handled)
2441 *handled = true;
2442 switch (op)
2444 case TRUNC_DIV_EXPR:
2445 case CEIL_DIV_EXPR:
2446 case FLOOR_DIV_EXPR:
2447 case ROUND_DIV_EXPR:
2448 case EXACT_DIV_EXPR:
2449 case CEIL_MOD_EXPR:
2450 case FLOOR_MOD_EXPR:
2451 case ROUND_MOD_EXPR:
2452 case TRUNC_MOD_EXPR:
2453 case RDIV_EXPR:
2454 if (honor_snans || honor_trapv)
2455 return true;
2456 if (fp_operation)
2457 return flag_trapping_math;
2458 if (!TREE_CONSTANT (divisor) || integer_zerop (divisor))
2459 return true;
2460 return false;
2462 case LT_EXPR:
2463 case LE_EXPR:
2464 case GT_EXPR:
2465 case GE_EXPR:
2466 case LTGT_EXPR:
2467 /* Some floating point comparisons may trap. */
2468 return honor_nans;
2470 case EQ_EXPR:
2471 case NE_EXPR:
2472 case UNORDERED_EXPR:
2473 case ORDERED_EXPR:
2474 case UNLT_EXPR:
2475 case UNLE_EXPR:
2476 case UNGT_EXPR:
2477 case UNGE_EXPR:
2478 case UNEQ_EXPR:
2479 return honor_snans;
2481 case NEGATE_EXPR:
2482 case ABS_EXPR:
2483 case CONJ_EXPR:
2484 /* These operations don't trap with floating point. */
2485 if (honor_trapv)
2486 return true;
2487 return false;
2489 case PLUS_EXPR:
2490 case MINUS_EXPR:
2491 case MULT_EXPR:
2492 /* Any floating arithmetic may trap. */
2493 if (fp_operation && flag_trapping_math)
2494 return true;
2495 if (honor_trapv)
2496 return true;
2497 return false;
2499 case COMPLEX_EXPR:
2500 case CONSTRUCTOR:
2501 /* Constructing an object cannot trap. */
2502 return false;
2504 default:
2505 /* Any floating arithmetic may trap. */
2506 if (fp_operation && flag_trapping_math)
2507 return true;
2509 *handled = false;
2510 return false;
2514 /* Return true if operation OP may trap. FP_OPERATION is true if OP is applied
2515 on floating-point values. HONOR_TRAPV is true if OP is applied on integer
2516 type operands that may trap. If OP is a division operator, DIVISOR contains
2517 the value of the divisor. */
2519 bool
2520 operation_could_trap_p (enum tree_code op, bool fp_operation, bool honor_trapv,
2521 tree divisor)
2523 bool honor_nans = (fp_operation && flag_trapping_math
2524 && !flag_finite_math_only);
2525 bool honor_snans = fp_operation && flag_signaling_nans != 0;
2526 bool handled;
2528 if (TREE_CODE_CLASS (op) != tcc_comparison
2529 && TREE_CODE_CLASS (op) != tcc_unary
2530 && TREE_CODE_CLASS (op) != tcc_binary)
2531 return false;
2533 return operation_could_trap_helper_p (op, fp_operation, honor_trapv,
2534 honor_nans, honor_snans, divisor,
2535 &handled);
2539 /* Returns true if it is possible to prove that the index of
2540 an array access REF (an ARRAY_REF expression) falls into the
2541 array bounds. */
2543 static bool
2544 in_array_bounds_p (tree ref)
2546 tree idx = TREE_OPERAND (ref, 1);
2547 tree min, max;
2549 if (TREE_CODE (idx) != INTEGER_CST)
2550 return false;
2552 min = array_ref_low_bound (ref);
2553 max = array_ref_up_bound (ref);
2554 if (!min
2555 || !max
2556 || TREE_CODE (min) != INTEGER_CST
2557 || TREE_CODE (max) != INTEGER_CST)
2558 return false;
2560 if (tree_int_cst_lt (idx, min)
2561 || tree_int_cst_lt (max, idx))
2562 return false;
2564 return true;
2567 /* Returns true if it is possible to prove that the range of
2568 an array access REF (an ARRAY_RANGE_REF expression) falls
2569 into the array bounds. */
2571 static bool
2572 range_in_array_bounds_p (tree ref)
2574 tree domain_type = TYPE_DOMAIN (TREE_TYPE (ref));
2575 tree range_min, range_max, min, max;
2577 range_min = TYPE_MIN_VALUE (domain_type);
2578 range_max = TYPE_MAX_VALUE (domain_type);
2579 if (!range_min
2580 || !range_max
2581 || TREE_CODE (range_min) != INTEGER_CST
2582 || TREE_CODE (range_max) != INTEGER_CST)
2583 return false;
2585 min = array_ref_low_bound (ref);
2586 max = array_ref_up_bound (ref);
2587 if (!min
2588 || !max
2589 || TREE_CODE (min) != INTEGER_CST
2590 || TREE_CODE (max) != INTEGER_CST)
2591 return false;
2593 if (tree_int_cst_lt (range_min, min)
2594 || tree_int_cst_lt (max, range_max))
2595 return false;
2597 return true;
2600 /* Return true if EXPR can trap, as in dereferencing an invalid pointer
2601 location or floating point arithmetic. C.f. the rtl version, may_trap_p.
2602 This routine expects only GIMPLE lhs or rhs input. */
2604 bool
2605 tree_could_trap_p (tree expr)
2607 enum tree_code code;
2608 bool fp_operation = false;
2609 bool honor_trapv = false;
2610 tree t, base, div = NULL_TREE;
2612 if (!expr)
2613 return false;
2615 code = TREE_CODE (expr);
2616 t = TREE_TYPE (expr);
2618 if (t)
2620 if (COMPARISON_CLASS_P (expr))
2621 fp_operation = FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 0)));
2622 else
2623 fp_operation = FLOAT_TYPE_P (t);
2624 honor_trapv = INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t);
2627 if (TREE_CODE_CLASS (code) == tcc_binary)
2628 div = TREE_OPERAND (expr, 1);
2629 if (operation_could_trap_p (code, fp_operation, honor_trapv, div))
2630 return true;
2632 restart:
2633 switch (code)
2635 case COMPONENT_REF:
2636 case REALPART_EXPR:
2637 case IMAGPART_EXPR:
2638 case BIT_FIELD_REF:
2639 case VIEW_CONVERT_EXPR:
2640 case WITH_SIZE_EXPR:
2641 expr = TREE_OPERAND (expr, 0);
2642 code = TREE_CODE (expr);
2643 goto restart;
2645 case ARRAY_RANGE_REF:
2646 base = TREE_OPERAND (expr, 0);
2647 if (tree_could_trap_p (base))
2648 return true;
2649 if (TREE_THIS_NOTRAP (expr))
2650 return false;
2651 return !range_in_array_bounds_p (expr);
2653 case ARRAY_REF:
2654 base = TREE_OPERAND (expr, 0);
2655 if (tree_could_trap_p (base))
2656 return true;
2657 if (TREE_THIS_NOTRAP (expr))
2658 return false;
2659 return !in_array_bounds_p (expr);
2661 case TARGET_MEM_REF:
2662 case MEM_REF:
2663 if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR
2664 && tree_could_trap_p (TREE_OPERAND (TREE_OPERAND (expr, 0), 0)))
2665 return true;
2666 if (TREE_THIS_NOTRAP (expr))
2667 return false;
2668 /* We cannot prove that the access is in-bounds when we have
2669 variable-index TARGET_MEM_REFs. */
2670 if (code == TARGET_MEM_REF
2671 && (TMR_INDEX (expr) || TMR_INDEX2 (expr)))
2672 return true;
2673 if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR)
2675 tree base = TREE_OPERAND (TREE_OPERAND (expr, 0), 0);
2676 offset_int off = mem_ref_offset (expr);
2677 if (wi::neg_p (off, SIGNED))
2678 return true;
2679 if (TREE_CODE (base) == STRING_CST)
2680 return wi::leu_p (TREE_STRING_LENGTH (base), off);
2681 else if (DECL_SIZE_UNIT (base) == NULL_TREE
2682 || TREE_CODE (DECL_SIZE_UNIT (base)) != INTEGER_CST
2683 || wi::leu_p (wi::to_offset (DECL_SIZE_UNIT (base)), off))
2684 return true;
2685 /* Now we are sure the first byte of the access is inside
2686 the object. */
2687 return false;
2689 return true;
2691 case INDIRECT_REF:
2692 return !TREE_THIS_NOTRAP (expr);
2694 case ASM_EXPR:
2695 return TREE_THIS_VOLATILE (expr);
2697 case CALL_EXPR:
2698 t = get_callee_fndecl (expr);
2699 /* Assume that calls to weak functions may trap. */
2700 if (!t || !DECL_P (t))
2701 return true;
2702 if (DECL_WEAK (t))
2703 return tree_could_trap_p (t);
2704 return false;
2706 case FUNCTION_DECL:
2707 /* Assume that accesses to weak functions may trap, unless we know
2708 they are certainly defined in current TU or in some other
2709 LTO partition. */
2710 if (DECL_WEAK (expr) && !DECL_COMDAT (expr) && DECL_EXTERNAL (expr))
2712 cgraph_node *node = cgraph_node::get (expr);
2713 if (node)
2714 node = node->function_symbol ();
2715 return !(node && node->in_other_partition);
2717 return false;
2719 case VAR_DECL:
2720 /* Assume that accesses to weak vars may trap, unless we know
2721 they are certainly defined in current TU or in some other
2722 LTO partition. */
2723 if (DECL_WEAK (expr) && !DECL_COMDAT (expr) && DECL_EXTERNAL (expr))
2725 varpool_node *node = varpool_node::get (expr);
2726 if (node)
2727 node = node->ultimate_alias_target ();
2728 return !(node && node->in_other_partition);
2730 return false;
2732 default:
2733 return false;
2738 /* Helper for stmt_could_throw_p. Return true if STMT (assumed to be a
2739 an assignment or a conditional) may throw. */
2741 static bool
2742 stmt_could_throw_1_p (gimple stmt)
2744 enum tree_code code = gimple_expr_code (stmt);
2745 bool honor_nans = false;
2746 bool honor_snans = false;
2747 bool fp_operation = false;
2748 bool honor_trapv = false;
2749 tree t;
2750 size_t i;
2751 bool handled, ret;
2753 if (TREE_CODE_CLASS (code) == tcc_comparison
2754 || TREE_CODE_CLASS (code) == tcc_unary
2755 || TREE_CODE_CLASS (code) == tcc_binary)
2757 if (is_gimple_assign (stmt)
2758 && TREE_CODE_CLASS (code) == tcc_comparison)
2759 t = TREE_TYPE (gimple_assign_rhs1 (stmt));
2760 else if (gimple_code (stmt) == GIMPLE_COND)
2761 t = TREE_TYPE (gimple_cond_lhs (stmt));
2762 else
2763 t = gimple_expr_type (stmt);
2764 fp_operation = FLOAT_TYPE_P (t);
2765 if (fp_operation)
2767 honor_nans = flag_trapping_math && !flag_finite_math_only;
2768 honor_snans = flag_signaling_nans != 0;
2770 else if (INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t))
2771 honor_trapv = true;
2774 /* Check if the main expression may trap. */
2775 t = is_gimple_assign (stmt) ? gimple_assign_rhs2 (stmt) : NULL;
2776 ret = operation_could_trap_helper_p (code, fp_operation, honor_trapv,
2777 honor_nans, honor_snans, t,
2778 &handled);
2779 if (handled)
2780 return ret;
2782 /* If the expression does not trap, see if any of the individual operands may
2783 trap. */
2784 for (i = 0; i < gimple_num_ops (stmt); i++)
2785 if (tree_could_trap_p (gimple_op (stmt, i)))
2786 return true;
2788 return false;
2792 /* Return true if statement STMT could throw an exception. */
2794 bool
2795 stmt_could_throw_p (gimple stmt)
2797 if (!flag_exceptions)
2798 return false;
2800 /* The only statements that can throw an exception are assignments,
2801 conditionals, calls, resx, and asms. */
2802 switch (gimple_code (stmt))
2804 case GIMPLE_RESX:
2805 return true;
2807 case GIMPLE_CALL:
2808 return !gimple_call_nothrow_p (as_a <gcall *> (stmt));
2810 case GIMPLE_ASSIGN:
2811 case GIMPLE_COND:
2812 if (!cfun->can_throw_non_call_exceptions)
2813 return false;
2814 return stmt_could_throw_1_p (stmt);
2816 case GIMPLE_ASM:
2817 if (!cfun->can_throw_non_call_exceptions)
2818 return false;
2819 return gimple_asm_volatile_p (as_a <gasm *> (stmt));
2821 default:
2822 return false;
2827 /* Return true if expression T could throw an exception. */
2829 bool
2830 tree_could_throw_p (tree t)
2832 if (!flag_exceptions)
2833 return false;
2834 if (TREE_CODE (t) == MODIFY_EXPR)
2836 if (cfun->can_throw_non_call_exceptions
2837 && tree_could_trap_p (TREE_OPERAND (t, 0)))
2838 return true;
2839 t = TREE_OPERAND (t, 1);
2842 if (TREE_CODE (t) == WITH_SIZE_EXPR)
2843 t = TREE_OPERAND (t, 0);
2844 if (TREE_CODE (t) == CALL_EXPR)
2845 return (call_expr_flags (t) & ECF_NOTHROW) == 0;
2846 if (cfun->can_throw_non_call_exceptions)
2847 return tree_could_trap_p (t);
2848 return false;
2851 /* Return true if STMT can throw an exception that is not caught within
2852 the current function (CFUN). */
2854 bool
2855 stmt_can_throw_external (gimple stmt)
2857 int lp_nr;
2859 if (!stmt_could_throw_p (stmt))
2860 return false;
2862 lp_nr = lookup_stmt_eh_lp (stmt);
2863 return lp_nr == 0;
2866 /* Return true if STMT can throw an exception that is caught within
2867 the current function (CFUN). */
2869 bool
2870 stmt_can_throw_internal (gimple stmt)
2872 int lp_nr;
2874 if (!stmt_could_throw_p (stmt))
2875 return false;
2877 lp_nr = lookup_stmt_eh_lp (stmt);
2878 return lp_nr > 0;
2881 /* Given a statement STMT in IFUN, if STMT can no longer throw, then
2882 remove any entry it might have from the EH table. Return true if
2883 any change was made. */
2885 bool
2886 maybe_clean_eh_stmt_fn (struct function *ifun, gimple stmt)
2888 if (stmt_could_throw_p (stmt))
2889 return false;
2890 return remove_stmt_from_eh_lp_fn (ifun, stmt);
2893 /* Likewise, but always use the current function. */
2895 bool
2896 maybe_clean_eh_stmt (gimple stmt)
2898 return maybe_clean_eh_stmt_fn (cfun, stmt);
2901 /* Given a statement OLD_STMT and a new statement NEW_STMT that has replaced
2902 OLD_STMT in the function, remove OLD_STMT from the EH table and put NEW_STMT
2903 in the table if it should be in there. Return TRUE if a replacement was
2904 done that my require an EH edge purge. */
2906 bool
2907 maybe_clean_or_replace_eh_stmt (gimple old_stmt, gimple new_stmt)
2909 int lp_nr = lookup_stmt_eh_lp (old_stmt);
2911 if (lp_nr != 0)
2913 bool new_stmt_could_throw = stmt_could_throw_p (new_stmt);
2915 if (new_stmt == old_stmt && new_stmt_could_throw)
2916 return false;
2918 remove_stmt_from_eh_lp (old_stmt);
2919 if (new_stmt_could_throw)
2921 add_stmt_to_eh_lp (new_stmt, lp_nr);
2922 return false;
2924 else
2925 return true;
2928 return false;
2931 /* Given a statement OLD_STMT in OLD_FUN and a duplicate statement NEW_STMT
2932 in NEW_FUN, copy the EH table data from OLD_STMT to NEW_STMT. The MAP
2933 operand is the return value of duplicate_eh_regions. */
2935 bool
2936 maybe_duplicate_eh_stmt_fn (struct function *new_fun, gimple new_stmt,
2937 struct function *old_fun, gimple old_stmt,
2938 hash_map<void *, void *> *map,
2939 int default_lp_nr)
2941 int old_lp_nr, new_lp_nr;
2943 if (!stmt_could_throw_p (new_stmt))
2944 return false;
2946 old_lp_nr = lookup_stmt_eh_lp_fn (old_fun, old_stmt);
2947 if (old_lp_nr == 0)
2949 if (default_lp_nr == 0)
2950 return false;
2951 new_lp_nr = default_lp_nr;
2953 else if (old_lp_nr > 0)
2955 eh_landing_pad old_lp, new_lp;
2957 old_lp = (*old_fun->eh->lp_array)[old_lp_nr];
2958 new_lp = static_cast<eh_landing_pad> (*map->get (old_lp));
2959 new_lp_nr = new_lp->index;
2961 else
2963 eh_region old_r, new_r;
2965 old_r = (*old_fun->eh->region_array)[-old_lp_nr];
2966 new_r = static_cast<eh_region> (*map->get (old_r));
2967 new_lp_nr = -new_r->index;
2970 add_stmt_to_eh_lp_fn (new_fun, new_stmt, new_lp_nr);
2971 return true;
2974 /* Similar, but both OLD_STMT and NEW_STMT are within the current function,
2975 and thus no remapping is required. */
2977 bool
2978 maybe_duplicate_eh_stmt (gimple new_stmt, gimple old_stmt)
2980 int lp_nr;
2982 if (!stmt_could_throw_p (new_stmt))
2983 return false;
2985 lp_nr = lookup_stmt_eh_lp (old_stmt);
2986 if (lp_nr == 0)
2987 return false;
2989 add_stmt_to_eh_lp (new_stmt, lp_nr);
2990 return true;
2993 /* Returns TRUE if oneh and twoh are exception handlers (gimple_try_cleanup of
2994 GIMPLE_TRY) that are similar enough to be considered the same. Currently
2995 this only handles handlers consisting of a single call, as that's the
2996 important case for C++: a destructor call for a particular object showing
2997 up in multiple handlers. */
2999 static bool
3000 same_handler_p (gimple_seq oneh, gimple_seq twoh)
3002 gimple_stmt_iterator gsi;
3003 gimple ones, twos;
3004 unsigned int ai;
3006 gsi = gsi_start (oneh);
3007 if (!gsi_one_before_end_p (gsi))
3008 return false;
3009 ones = gsi_stmt (gsi);
3011 gsi = gsi_start (twoh);
3012 if (!gsi_one_before_end_p (gsi))
3013 return false;
3014 twos = gsi_stmt (gsi);
3016 if (!is_gimple_call (ones)
3017 || !is_gimple_call (twos)
3018 || gimple_call_lhs (ones)
3019 || gimple_call_lhs (twos)
3020 || gimple_call_chain (ones)
3021 || gimple_call_chain (twos)
3022 || !gimple_call_same_target_p (ones, twos)
3023 || gimple_call_num_args (ones) != gimple_call_num_args (twos))
3024 return false;
3026 for (ai = 0; ai < gimple_call_num_args (ones); ++ai)
3027 if (!operand_equal_p (gimple_call_arg (ones, ai),
3028 gimple_call_arg (twos, ai), 0))
3029 return false;
3031 return true;
3034 /* Optimize
3035 try { A() } finally { try { ~B() } catch { ~A() } }
3036 try { ... } finally { ~A() }
3037 into
3038 try { A() } catch { ~B() }
3039 try { ~B() ... } finally { ~A() }
3041 This occurs frequently in C++, where A is a local variable and B is a
3042 temporary used in the initializer for A. */
3044 static void
3045 optimize_double_finally (gtry *one, gtry *two)
3047 gimple oneh;
3048 gimple_stmt_iterator gsi;
3049 gimple_seq cleanup;
3051 cleanup = gimple_try_cleanup (one);
3052 gsi = gsi_start (cleanup);
3053 if (!gsi_one_before_end_p (gsi))
3054 return;
3056 oneh = gsi_stmt (gsi);
3057 if (gimple_code (oneh) != GIMPLE_TRY
3058 || gimple_try_kind (oneh) != GIMPLE_TRY_CATCH)
3059 return;
3061 if (same_handler_p (gimple_try_cleanup (oneh), gimple_try_cleanup (two)))
3063 gimple_seq seq = gimple_try_eval (oneh);
3065 gimple_try_set_cleanup (one, seq);
3066 gimple_try_set_kind (one, GIMPLE_TRY_CATCH);
3067 seq = copy_gimple_seq_and_replace_locals (seq);
3068 gimple_seq_add_seq (&seq, gimple_try_eval (two));
3069 gimple_try_set_eval (two, seq);
3073 /* Perform EH refactoring optimizations that are simpler to do when code
3074 flow has been lowered but EH structures haven't. */
3076 static void
3077 refactor_eh_r (gimple_seq seq)
3079 gimple_stmt_iterator gsi;
3080 gimple one, two;
3082 one = NULL;
3083 two = NULL;
3084 gsi = gsi_start (seq);
3085 while (1)
3087 one = two;
3088 if (gsi_end_p (gsi))
3089 two = NULL;
3090 else
3091 two = gsi_stmt (gsi);
3092 if (one && two)
3093 if (gtry *try_one = dyn_cast <gtry *> (one))
3094 if (gtry *try_two = dyn_cast <gtry *> (two))
3095 if (gimple_try_kind (try_one) == GIMPLE_TRY_FINALLY
3096 && gimple_try_kind (try_two) == GIMPLE_TRY_FINALLY)
3097 optimize_double_finally (try_one, try_two);
3098 if (one)
3099 switch (gimple_code (one))
3101 case GIMPLE_TRY:
3102 refactor_eh_r (gimple_try_eval (one));
3103 refactor_eh_r (gimple_try_cleanup (one));
3104 break;
3105 case GIMPLE_CATCH:
3106 refactor_eh_r (gimple_catch_handler (as_a <gcatch *> (one)));
3107 break;
3108 case GIMPLE_EH_FILTER:
3109 refactor_eh_r (gimple_eh_filter_failure (one));
3110 break;
3111 case GIMPLE_EH_ELSE:
3113 geh_else *eh_else_stmt = as_a <geh_else *> (one);
3114 refactor_eh_r (gimple_eh_else_n_body (eh_else_stmt));
3115 refactor_eh_r (gimple_eh_else_e_body (eh_else_stmt));
3117 break;
3118 default:
3119 break;
3121 if (two)
3122 gsi_next (&gsi);
3123 else
3124 break;
3128 namespace {
3130 const pass_data pass_data_refactor_eh =
3132 GIMPLE_PASS, /* type */
3133 "ehopt", /* name */
3134 OPTGROUP_NONE, /* optinfo_flags */
3135 TV_TREE_EH, /* tv_id */
3136 PROP_gimple_lcf, /* properties_required */
3137 0, /* properties_provided */
3138 0, /* properties_destroyed */
3139 0, /* todo_flags_start */
3140 0, /* todo_flags_finish */
3143 class pass_refactor_eh : public gimple_opt_pass
3145 public:
3146 pass_refactor_eh (gcc::context *ctxt)
3147 : gimple_opt_pass (pass_data_refactor_eh, ctxt)
3150 /* opt_pass methods: */
3151 virtual bool gate (function *) { return flag_exceptions != 0; }
3152 virtual unsigned int execute (function *)
3154 refactor_eh_r (gimple_body (current_function_decl));
3155 return 0;
3158 }; // class pass_refactor_eh
3160 } // anon namespace
3162 gimple_opt_pass *
3163 make_pass_refactor_eh (gcc::context *ctxt)
3165 return new pass_refactor_eh (ctxt);
3168 /* At the end of gimple optimization, we can lower RESX. */
3170 static bool
3171 lower_resx (basic_block bb, gresx *stmt,
3172 hash_map<eh_region, tree> *mnt_map)
3174 int lp_nr;
3175 eh_region src_r, dst_r;
3176 gimple_stmt_iterator gsi;
3177 gimple x;
3178 tree fn, src_nr;
3179 bool ret = false;
3181 lp_nr = lookup_stmt_eh_lp (stmt);
3182 if (lp_nr != 0)
3183 dst_r = get_eh_region_from_lp_number (lp_nr);
3184 else
3185 dst_r = NULL;
3187 src_r = get_eh_region_from_number (gimple_resx_region (stmt));
3188 gsi = gsi_last_bb (bb);
3190 if (src_r == NULL)
3192 /* We can wind up with no source region when pass_cleanup_eh shows
3193 that there are no entries into an eh region and deletes it, but
3194 then the block that contains the resx isn't removed. This can
3195 happen without optimization when the switch statement created by
3196 lower_try_finally_switch isn't simplified to remove the eh case.
3198 Resolve this by expanding the resx node to an abort. */
3200 fn = builtin_decl_implicit (BUILT_IN_TRAP);
3201 x = gimple_build_call (fn, 0);
3202 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3204 while (EDGE_COUNT (bb->succs) > 0)
3205 remove_edge (EDGE_SUCC (bb, 0));
3207 else if (dst_r)
3209 /* When we have a destination region, we resolve this by copying
3210 the excptr and filter values into place, and changing the edge
3211 to immediately after the landing pad. */
3212 edge e;
3214 if (lp_nr < 0)
3216 basic_block new_bb;
3217 tree lab;
3219 /* We are resuming into a MUST_NOT_CALL region. Expand a call to
3220 the failure decl into a new block, if needed. */
3221 gcc_assert (dst_r->type == ERT_MUST_NOT_THROW);
3223 tree *slot = mnt_map->get (dst_r);
3224 if (slot == NULL)
3226 gimple_stmt_iterator gsi2;
3228 new_bb = create_empty_bb (bb);
3229 add_bb_to_loop (new_bb, bb->loop_father);
3230 lab = gimple_block_label (new_bb);
3231 gsi2 = gsi_start_bb (new_bb);
3233 fn = dst_r->u.must_not_throw.failure_decl;
3234 x = gimple_build_call (fn, 0);
3235 gimple_set_location (x, dst_r->u.must_not_throw.failure_loc);
3236 gsi_insert_after (&gsi2, x, GSI_CONTINUE_LINKING);
3238 mnt_map->put (dst_r, lab);
3240 else
3242 lab = *slot;
3243 new_bb = label_to_block (lab);
3246 gcc_assert (EDGE_COUNT (bb->succs) == 0);
3247 e = make_edge (bb, new_bb, EDGE_FALLTHRU);
3248 e->count = bb->count;
3249 e->probability = REG_BR_PROB_BASE;
3251 else
3253 edge_iterator ei;
3254 tree dst_nr = build_int_cst (integer_type_node, dst_r->index);
3256 fn = builtin_decl_implicit (BUILT_IN_EH_COPY_VALUES);
3257 src_nr = build_int_cst (integer_type_node, src_r->index);
3258 x = gimple_build_call (fn, 2, dst_nr, src_nr);
3259 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3261 /* Update the flags for the outgoing edge. */
3262 e = single_succ_edge (bb);
3263 gcc_assert (e->flags & EDGE_EH);
3264 e->flags = (e->flags & ~EDGE_EH) | EDGE_FALLTHRU;
3266 /* If there are no more EH users of the landing pad, delete it. */
3267 FOR_EACH_EDGE (e, ei, e->dest->preds)
3268 if (e->flags & EDGE_EH)
3269 break;
3270 if (e == NULL)
3272 eh_landing_pad lp = get_eh_landing_pad_from_number (lp_nr);
3273 remove_eh_landing_pad (lp);
3277 ret = true;
3279 else
3281 tree var;
3283 /* When we don't have a destination region, this exception escapes
3284 up the call chain. We resolve this by generating a call to the
3285 _Unwind_Resume library function. */
3287 /* The ARM EABI redefines _Unwind_Resume as __cxa_end_cleanup
3288 with no arguments for C++ and Java. Check for that. */
3289 if (src_r->use_cxa_end_cleanup)
3291 fn = builtin_decl_implicit (BUILT_IN_CXA_END_CLEANUP);
3292 x = gimple_build_call (fn, 0);
3293 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3295 else
3297 fn = builtin_decl_implicit (BUILT_IN_EH_POINTER);
3298 src_nr = build_int_cst (integer_type_node, src_r->index);
3299 x = gimple_build_call (fn, 1, src_nr);
3300 var = create_tmp_var (ptr_type_node);
3301 var = make_ssa_name (var, x);
3302 gimple_call_set_lhs (x, var);
3303 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3305 fn = builtin_decl_implicit (BUILT_IN_UNWIND_RESUME);
3306 x = gimple_build_call (fn, 1, var);
3307 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3310 gcc_assert (EDGE_COUNT (bb->succs) == 0);
3313 gsi_remove (&gsi, true);
3315 return ret;
3318 namespace {
3320 const pass_data pass_data_lower_resx =
3322 GIMPLE_PASS, /* type */
3323 "resx", /* name */
3324 OPTGROUP_NONE, /* optinfo_flags */
3325 TV_TREE_EH, /* tv_id */
3326 PROP_gimple_lcf, /* properties_required */
3327 0, /* properties_provided */
3328 0, /* properties_destroyed */
3329 0, /* todo_flags_start */
3330 0, /* todo_flags_finish */
3333 class pass_lower_resx : public gimple_opt_pass
3335 public:
3336 pass_lower_resx (gcc::context *ctxt)
3337 : gimple_opt_pass (pass_data_lower_resx, ctxt)
3340 /* opt_pass methods: */
3341 virtual bool gate (function *) { return flag_exceptions != 0; }
3342 virtual unsigned int execute (function *);
3344 }; // class pass_lower_resx
3346 unsigned
3347 pass_lower_resx::execute (function *fun)
3349 basic_block bb;
3350 bool dominance_invalidated = false;
3351 bool any_rewritten = false;
3353 hash_map<eh_region, tree> mnt_map;
3355 FOR_EACH_BB_FN (bb, fun)
3357 gimple last = last_stmt (bb);
3358 if (last && is_gimple_resx (last))
3360 dominance_invalidated |=
3361 lower_resx (bb, as_a <gresx *> (last), &mnt_map);
3362 any_rewritten = true;
3366 if (dominance_invalidated)
3368 free_dominance_info (CDI_DOMINATORS);
3369 free_dominance_info (CDI_POST_DOMINATORS);
3372 return any_rewritten ? TODO_update_ssa_only_virtuals : 0;
3375 } // anon namespace
3377 gimple_opt_pass *
3378 make_pass_lower_resx (gcc::context *ctxt)
3380 return new pass_lower_resx (ctxt);
3383 /* Try to optimize var = {v} {CLOBBER} stmts followed just by
3384 external throw. */
3386 static void
3387 optimize_clobbers (basic_block bb)
3389 gimple_stmt_iterator gsi = gsi_last_bb (bb);
3390 bool any_clobbers = false;
3391 bool seen_stack_restore = false;
3392 edge_iterator ei;
3393 edge e;
3395 /* Only optimize anything if the bb contains at least one clobber,
3396 ends with resx (checked by caller), optionally contains some
3397 debug stmts or labels, or at most one __builtin_stack_restore
3398 call, and has an incoming EH edge. */
3399 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3401 gimple stmt = gsi_stmt (gsi);
3402 if (is_gimple_debug (stmt))
3403 continue;
3404 if (gimple_clobber_p (stmt))
3406 any_clobbers = true;
3407 continue;
3409 if (!seen_stack_restore
3410 && gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
3412 seen_stack_restore = true;
3413 continue;
3415 if (gimple_code (stmt) == GIMPLE_LABEL)
3416 break;
3417 return;
3419 if (!any_clobbers)
3420 return;
3421 FOR_EACH_EDGE (e, ei, bb->preds)
3422 if (e->flags & EDGE_EH)
3423 break;
3424 if (e == NULL)
3425 return;
3426 gsi = gsi_last_bb (bb);
3427 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3429 gimple stmt = gsi_stmt (gsi);
3430 if (!gimple_clobber_p (stmt))
3431 continue;
3432 unlink_stmt_vdef (stmt);
3433 gsi_remove (&gsi, true);
3434 release_defs (stmt);
3438 /* Try to sink var = {v} {CLOBBER} stmts followed just by
3439 internal throw to successor BB. */
3441 static int
3442 sink_clobbers (basic_block bb)
3444 edge e;
3445 edge_iterator ei;
3446 gimple_stmt_iterator gsi, dgsi;
3447 basic_block succbb;
3448 bool any_clobbers = false;
3449 unsigned todo = 0;
3451 /* Only optimize if BB has a single EH successor and
3452 all predecessor edges are EH too. */
3453 if (!single_succ_p (bb)
3454 || (single_succ_edge (bb)->flags & EDGE_EH) == 0)
3455 return 0;
3457 FOR_EACH_EDGE (e, ei, bb->preds)
3459 if ((e->flags & EDGE_EH) == 0)
3460 return 0;
3463 /* And BB contains only CLOBBER stmts before the final
3464 RESX. */
3465 gsi = gsi_last_bb (bb);
3466 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3468 gimple stmt = gsi_stmt (gsi);
3469 if (is_gimple_debug (stmt))
3470 continue;
3471 if (gimple_code (stmt) == GIMPLE_LABEL)
3472 break;
3473 if (!gimple_clobber_p (stmt))
3474 return 0;
3475 any_clobbers = true;
3477 if (!any_clobbers)
3478 return 0;
3480 edge succe = single_succ_edge (bb);
3481 succbb = succe->dest;
3483 /* See if there is a virtual PHI node to take an updated virtual
3484 operand from. */
3485 gphi *vphi = NULL;
3486 tree vuse = NULL_TREE;
3487 for (gphi_iterator gpi = gsi_start_phis (succbb);
3488 !gsi_end_p (gpi); gsi_next (&gpi))
3490 tree res = gimple_phi_result (gpi.phi ());
3491 if (virtual_operand_p (res))
3493 vphi = gpi.phi ();
3494 vuse = res;
3495 break;
3499 dgsi = gsi_after_labels (succbb);
3500 gsi = gsi_last_bb (bb);
3501 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3503 gimple stmt = gsi_stmt (gsi);
3504 tree lhs;
3505 if (is_gimple_debug (stmt))
3506 continue;
3507 if (gimple_code (stmt) == GIMPLE_LABEL)
3508 break;
3509 lhs = gimple_assign_lhs (stmt);
3510 /* Unfortunately we don't have dominance info updated at this
3511 point, so checking if
3512 dominated_by_p (CDI_DOMINATORS, succbb,
3513 gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (lhs, 0)))
3514 would be too costly. Thus, avoid sinking any clobbers that
3515 refer to non-(D) SSA_NAMEs. */
3516 if (TREE_CODE (lhs) == MEM_REF
3517 && TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME
3518 && !SSA_NAME_IS_DEFAULT_DEF (TREE_OPERAND (lhs, 0)))
3520 unlink_stmt_vdef (stmt);
3521 gsi_remove (&gsi, true);
3522 release_defs (stmt);
3523 continue;
3526 /* As we do not change stmt order when sinking across a
3527 forwarder edge we can keep virtual operands in place. */
3528 gsi_remove (&gsi, false);
3529 gsi_insert_before (&dgsi, stmt, GSI_NEW_STMT);
3531 /* But adjust virtual operands if we sunk across a PHI node. */
3532 if (vuse)
3534 gimple use_stmt;
3535 imm_use_iterator iter;
3536 use_operand_p use_p;
3537 FOR_EACH_IMM_USE_STMT (use_stmt, iter, vuse)
3538 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3539 SET_USE (use_p, gimple_vdef (stmt));
3540 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse))
3542 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_vdef (stmt)) = 1;
3543 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse) = 0;
3545 /* Adjust the incoming virtual operand. */
3546 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (vphi, succe), gimple_vuse (stmt));
3547 SET_USE (gimple_vuse_op (stmt), vuse);
3549 /* If there isn't a single predecessor but no virtual PHI node
3550 arrange for virtual operands to be renamed. */
3551 else if (gimple_vuse_op (stmt) != NULL_USE_OPERAND_P
3552 && !single_pred_p (succbb))
3554 /* In this case there will be no use of the VDEF of this stmt.
3555 ??? Unless this is a secondary opportunity and we have not
3556 removed unreachable blocks yet, so we cannot assert this.
3557 Which also means we will end up renaming too many times. */
3558 SET_USE (gimple_vuse_op (stmt), gimple_vop (cfun));
3559 mark_virtual_operands_for_renaming (cfun);
3560 todo |= TODO_update_ssa_only_virtuals;
3564 return todo;
3567 /* At the end of inlining, we can lower EH_DISPATCH. Return true when
3568 we have found some duplicate labels and removed some edges. */
3570 static bool
3571 lower_eh_dispatch (basic_block src, geh_dispatch *stmt)
3573 gimple_stmt_iterator gsi;
3574 int region_nr;
3575 eh_region r;
3576 tree filter, fn;
3577 gimple x;
3578 bool redirected = false;
3580 region_nr = gimple_eh_dispatch_region (stmt);
3581 r = get_eh_region_from_number (region_nr);
3583 gsi = gsi_last_bb (src);
3585 switch (r->type)
3587 case ERT_TRY:
3589 auto_vec<tree> labels;
3590 tree default_label = NULL;
3591 eh_catch c;
3592 edge_iterator ei;
3593 edge e;
3594 hash_set<tree> seen_values;
3596 /* Collect the labels for a switch. Zero the post_landing_pad
3597 field becase we'll no longer have anything keeping these labels
3598 in existence and the optimizer will be free to merge these
3599 blocks at will. */
3600 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
3602 tree tp_node, flt_node, lab = c->label;
3603 bool have_label = false;
3605 c->label = NULL;
3606 tp_node = c->type_list;
3607 flt_node = c->filter_list;
3609 if (tp_node == NULL)
3611 default_label = lab;
3612 break;
3616 /* Filter out duplicate labels that arise when this handler
3617 is shadowed by an earlier one. When no labels are
3618 attached to the handler anymore, we remove
3619 the corresponding edge and then we delete unreachable
3620 blocks at the end of this pass. */
3621 if (! seen_values.contains (TREE_VALUE (flt_node)))
3623 tree t = build_case_label (TREE_VALUE (flt_node),
3624 NULL, lab);
3625 labels.safe_push (t);
3626 seen_values.add (TREE_VALUE (flt_node));
3627 have_label = true;
3630 tp_node = TREE_CHAIN (tp_node);
3631 flt_node = TREE_CHAIN (flt_node);
3633 while (tp_node);
3634 if (! have_label)
3636 remove_edge (find_edge (src, label_to_block (lab)));
3637 redirected = true;
3641 /* Clean up the edge flags. */
3642 FOR_EACH_EDGE (e, ei, src->succs)
3644 if (e->flags & EDGE_FALLTHRU)
3646 /* If there was no catch-all, use the fallthru edge. */
3647 if (default_label == NULL)
3648 default_label = gimple_block_label (e->dest);
3649 e->flags &= ~EDGE_FALLTHRU;
3652 gcc_assert (default_label != NULL);
3654 /* Don't generate a switch if there's only a default case.
3655 This is common in the form of try { A; } catch (...) { B; }. */
3656 if (!labels.exists ())
3658 e = single_succ_edge (src);
3659 e->flags |= EDGE_FALLTHRU;
3661 else
3663 fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3664 x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3665 region_nr));
3666 filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)));
3667 filter = make_ssa_name (filter, x);
3668 gimple_call_set_lhs (x, filter);
3669 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3671 /* Turn the default label into a default case. */
3672 default_label = build_case_label (NULL, NULL, default_label);
3673 sort_case_labels (labels);
3675 x = gimple_build_switch (filter, default_label, labels);
3676 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3679 break;
3681 case ERT_ALLOWED_EXCEPTIONS:
3683 edge b_e = BRANCH_EDGE (src);
3684 edge f_e = FALLTHRU_EDGE (src);
3686 fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3687 x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3688 region_nr));
3689 filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)));
3690 filter = make_ssa_name (filter, x);
3691 gimple_call_set_lhs (x, filter);
3692 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3694 r->u.allowed.label = NULL;
3695 x = gimple_build_cond (EQ_EXPR, filter,
3696 build_int_cst (TREE_TYPE (filter),
3697 r->u.allowed.filter),
3698 NULL_TREE, NULL_TREE);
3699 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3701 b_e->flags = b_e->flags | EDGE_TRUE_VALUE;
3702 f_e->flags = (f_e->flags & ~EDGE_FALLTHRU) | EDGE_FALSE_VALUE;
3704 break;
3706 default:
3707 gcc_unreachable ();
3710 /* Replace the EH_DISPATCH with the SWITCH or COND generated above. */
3711 gsi_remove (&gsi, true);
3712 return redirected;
3715 namespace {
3717 const pass_data pass_data_lower_eh_dispatch =
3719 GIMPLE_PASS, /* type */
3720 "ehdisp", /* name */
3721 OPTGROUP_NONE, /* optinfo_flags */
3722 TV_TREE_EH, /* tv_id */
3723 PROP_gimple_lcf, /* properties_required */
3724 0, /* properties_provided */
3725 0, /* properties_destroyed */
3726 0, /* todo_flags_start */
3727 0, /* todo_flags_finish */
3730 class pass_lower_eh_dispatch : public gimple_opt_pass
3732 public:
3733 pass_lower_eh_dispatch (gcc::context *ctxt)
3734 : gimple_opt_pass (pass_data_lower_eh_dispatch, ctxt)
3737 /* opt_pass methods: */
3738 virtual bool gate (function *fun) { return fun->eh->region_tree != NULL; }
3739 virtual unsigned int execute (function *);
3741 }; // class pass_lower_eh_dispatch
3743 unsigned
3744 pass_lower_eh_dispatch::execute (function *fun)
3746 basic_block bb;
3747 int flags = 0;
3748 bool redirected = false;
3750 assign_filter_values ();
3752 FOR_EACH_BB_FN (bb, fun)
3754 gimple last = last_stmt (bb);
3755 if (last == NULL)
3756 continue;
3757 if (gimple_code (last) == GIMPLE_EH_DISPATCH)
3759 redirected |= lower_eh_dispatch (bb,
3760 as_a <geh_dispatch *> (last));
3761 flags |= TODO_update_ssa_only_virtuals;
3763 else if (gimple_code (last) == GIMPLE_RESX)
3765 if (stmt_can_throw_external (last))
3766 optimize_clobbers (bb);
3767 else
3768 flags |= sink_clobbers (bb);
3772 if (redirected)
3773 delete_unreachable_blocks ();
3774 return flags;
3777 } // anon namespace
3779 gimple_opt_pass *
3780 make_pass_lower_eh_dispatch (gcc::context *ctxt)
3782 return new pass_lower_eh_dispatch (ctxt);
3785 /* Walk statements, see what regions and, optionally, landing pads
3786 are really referenced.
3788 Returns in R_REACHABLEP an sbitmap with bits set for reachable regions,
3789 and in LP_REACHABLE an sbitmap with bits set for reachable landing pads.
3791 Passing NULL for LP_REACHABLE is valid, in this case only reachable
3792 regions are marked.
3794 The caller is responsible for freeing the returned sbitmaps. */
3796 static void
3797 mark_reachable_handlers (sbitmap *r_reachablep, sbitmap *lp_reachablep)
3799 sbitmap r_reachable, lp_reachable;
3800 basic_block bb;
3801 bool mark_landing_pads = (lp_reachablep != NULL);
3802 gcc_checking_assert (r_reachablep != NULL);
3804 r_reachable = sbitmap_alloc (cfun->eh->region_array->length ());
3805 bitmap_clear (r_reachable);
3806 *r_reachablep = r_reachable;
3808 if (mark_landing_pads)
3810 lp_reachable = sbitmap_alloc (cfun->eh->lp_array->length ());
3811 bitmap_clear (lp_reachable);
3812 *lp_reachablep = lp_reachable;
3814 else
3815 lp_reachable = NULL;
3817 FOR_EACH_BB_FN (bb, cfun)
3819 gimple_stmt_iterator gsi;
3821 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3823 gimple stmt = gsi_stmt (gsi);
3825 if (mark_landing_pads)
3827 int lp_nr = lookup_stmt_eh_lp (stmt);
3829 /* Negative LP numbers are MUST_NOT_THROW regions which
3830 are not considered BB enders. */
3831 if (lp_nr < 0)
3832 bitmap_set_bit (r_reachable, -lp_nr);
3834 /* Positive LP numbers are real landing pads, and BB enders. */
3835 else if (lp_nr > 0)
3837 gcc_assert (gsi_one_before_end_p (gsi));
3838 eh_region region = get_eh_region_from_lp_number (lp_nr);
3839 bitmap_set_bit (r_reachable, region->index);
3840 bitmap_set_bit (lp_reachable, lp_nr);
3844 /* Avoid removing regions referenced from RESX/EH_DISPATCH. */
3845 switch (gimple_code (stmt))
3847 case GIMPLE_RESX:
3848 bitmap_set_bit (r_reachable,
3849 gimple_resx_region (as_a <gresx *> (stmt)));
3850 break;
3851 case GIMPLE_EH_DISPATCH:
3852 bitmap_set_bit (r_reachable,
3853 gimple_eh_dispatch_region (
3854 as_a <geh_dispatch *> (stmt)));
3855 break;
3856 case GIMPLE_CALL:
3857 if (gimple_call_builtin_p (stmt, BUILT_IN_EH_COPY_VALUES))
3858 for (int i = 0; i < 2; ++i)
3860 tree rt = gimple_call_arg (stmt, i);
3861 HOST_WIDE_INT ri = tree_to_shwi (rt);
3863 gcc_assert (ri = (int)ri);
3864 bitmap_set_bit (r_reachable, ri);
3866 break;
3867 default:
3868 break;
3874 /* Remove unreachable handlers and unreachable landing pads. */
3876 static void
3877 remove_unreachable_handlers (void)
3879 sbitmap r_reachable, lp_reachable;
3880 eh_region region;
3881 eh_landing_pad lp;
3882 unsigned i;
3884 mark_reachable_handlers (&r_reachable, &lp_reachable);
3886 if (dump_file)
3888 fprintf (dump_file, "Before removal of unreachable regions:\n");
3889 dump_eh_tree (dump_file, cfun);
3890 fprintf (dump_file, "Reachable regions: ");
3891 dump_bitmap_file (dump_file, r_reachable);
3892 fprintf (dump_file, "Reachable landing pads: ");
3893 dump_bitmap_file (dump_file, lp_reachable);
3896 if (dump_file)
3898 FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
3899 if (region && !bitmap_bit_p (r_reachable, region->index))
3900 fprintf (dump_file,
3901 "Removing unreachable region %d\n",
3902 region->index);
3905 remove_unreachable_eh_regions (r_reachable);
3907 FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
3908 if (lp && !bitmap_bit_p (lp_reachable, lp->index))
3910 if (dump_file)
3911 fprintf (dump_file,
3912 "Removing unreachable landing pad %d\n",
3913 lp->index);
3914 remove_eh_landing_pad (lp);
3917 if (dump_file)
3919 fprintf (dump_file, "\n\nAfter removal of unreachable regions:\n");
3920 dump_eh_tree (dump_file, cfun);
3921 fprintf (dump_file, "\n\n");
3924 sbitmap_free (r_reachable);
3925 sbitmap_free (lp_reachable);
3927 #ifdef ENABLE_CHECKING
3928 verify_eh_tree (cfun);
3929 #endif
3932 /* Remove unreachable handlers if any landing pads have been removed after
3933 last ehcleanup pass (due to gimple_purge_dead_eh_edges). */
3935 void
3936 maybe_remove_unreachable_handlers (void)
3938 eh_landing_pad lp;
3939 unsigned i;
3941 if (cfun->eh == NULL)
3942 return;
3944 FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
3945 if (lp && lp->post_landing_pad)
3947 if (label_to_block (lp->post_landing_pad) == NULL)
3949 remove_unreachable_handlers ();
3950 return;
3955 /* Remove regions that do not have landing pads. This assumes
3956 that remove_unreachable_handlers has already been run, and
3957 that we've just manipulated the landing pads since then.
3959 Preserve regions with landing pads and regions that prevent
3960 exceptions from propagating further, even if these regions
3961 are not reachable. */
3963 static void
3964 remove_unreachable_handlers_no_lp (void)
3966 eh_region region;
3967 sbitmap r_reachable;
3968 unsigned i;
3970 mark_reachable_handlers (&r_reachable, /*lp_reachablep=*/NULL);
3972 FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
3974 if (! region)
3975 continue;
3977 if (region->landing_pads != NULL
3978 || region->type == ERT_MUST_NOT_THROW)
3979 bitmap_set_bit (r_reachable, region->index);
3981 if (dump_file
3982 && !bitmap_bit_p (r_reachable, region->index))
3983 fprintf (dump_file,
3984 "Removing unreachable region %d\n",
3985 region->index);
3988 remove_unreachable_eh_regions (r_reachable);
3990 sbitmap_free (r_reachable);
3993 /* Undo critical edge splitting on an EH landing pad. Earlier, we
3994 optimisticaly split all sorts of edges, including EH edges. The
3995 optimization passes in between may not have needed them; if not,
3996 we should undo the split.
3998 Recognize this case by having one EH edge incoming to the BB and
3999 one normal edge outgoing; BB should be empty apart from the
4000 post_landing_pad label.
4002 Note that this is slightly different from the empty handler case
4003 handled by cleanup_empty_eh, in that the actual handler may yet
4004 have actual code but the landing pad has been separated from the
4005 handler. As such, cleanup_empty_eh relies on this transformation
4006 having been done first. */
4008 static bool
4009 unsplit_eh (eh_landing_pad lp)
4011 basic_block bb = label_to_block (lp->post_landing_pad);
4012 gimple_stmt_iterator gsi;
4013 edge e_in, e_out;
4015 /* Quickly check the edge counts on BB for singularity. */
4016 if (!single_pred_p (bb) || !single_succ_p (bb))
4017 return false;
4018 e_in = single_pred_edge (bb);
4019 e_out = single_succ_edge (bb);
4021 /* Input edge must be EH and output edge must be normal. */
4022 if ((e_in->flags & EDGE_EH) == 0 || (e_out->flags & EDGE_EH) != 0)
4023 return false;
4025 /* The block must be empty except for the labels and debug insns. */
4026 gsi = gsi_after_labels (bb);
4027 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4028 gsi_next_nondebug (&gsi);
4029 if (!gsi_end_p (gsi))
4030 return false;
4032 /* The destination block must not already have a landing pad
4033 for a different region. */
4034 for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
4036 glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
4037 tree lab;
4038 int lp_nr;
4040 if (!label_stmt)
4041 break;
4042 lab = gimple_label_label (label_stmt);
4043 lp_nr = EH_LANDING_PAD_NR (lab);
4044 if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
4045 return false;
4048 /* The new destination block must not already be a destination of
4049 the source block, lest we merge fallthru and eh edges and get
4050 all sorts of confused. */
4051 if (find_edge (e_in->src, e_out->dest))
4052 return false;
4054 /* ??? We can get degenerate phis due to cfg cleanups. I would have
4055 thought this should have been cleaned up by a phicprop pass, but
4056 that doesn't appear to handle virtuals. Propagate by hand. */
4057 if (!gimple_seq_empty_p (phi_nodes (bb)))
4059 for (gphi_iterator gpi = gsi_start_phis (bb); !gsi_end_p (gpi); )
4061 gimple use_stmt;
4062 gphi *phi = gpi.phi ();
4063 tree lhs = gimple_phi_result (phi);
4064 tree rhs = gimple_phi_arg_def (phi, 0);
4065 use_operand_p use_p;
4066 imm_use_iterator iter;
4068 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
4070 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
4071 SET_USE (use_p, rhs);
4074 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
4075 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
4077 remove_phi_node (&gpi, true);
4081 if (dump_file && (dump_flags & TDF_DETAILS))
4082 fprintf (dump_file, "Unsplit EH landing pad %d to block %i.\n",
4083 lp->index, e_out->dest->index);
4085 /* Redirect the edge. Since redirect_eh_edge_1 expects to be moving
4086 a successor edge, humor it. But do the real CFG change with the
4087 predecessor of E_OUT in order to preserve the ordering of arguments
4088 to the PHI nodes in E_OUT->DEST. */
4089 redirect_eh_edge_1 (e_in, e_out->dest, false);
4090 redirect_edge_pred (e_out, e_in->src);
4091 e_out->flags = e_in->flags;
4092 e_out->probability = e_in->probability;
4093 e_out->count = e_in->count;
4094 remove_edge (e_in);
4096 return true;
4099 /* Examine each landing pad block and see if it matches unsplit_eh. */
4101 static bool
4102 unsplit_all_eh (void)
4104 bool changed = false;
4105 eh_landing_pad lp;
4106 int i;
4108 for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
4109 if (lp)
4110 changed |= unsplit_eh (lp);
4112 return changed;
4115 /* A subroutine of cleanup_empty_eh. Redirect all EH edges incoming
4116 to OLD_BB to NEW_BB; return true on success, false on failure.
4118 OLD_BB_OUT is the edge into NEW_BB from OLD_BB, so if we miss any
4119 PHI variables from OLD_BB we can pick them up from OLD_BB_OUT.
4120 Virtual PHIs may be deleted and marked for renaming. */
4122 static bool
4123 cleanup_empty_eh_merge_phis (basic_block new_bb, basic_block old_bb,
4124 edge old_bb_out, bool change_region)
4126 gphi_iterator ngsi, ogsi;
4127 edge_iterator ei;
4128 edge e;
4129 bitmap ophi_handled;
4131 /* The destination block must not be a regular successor for any
4132 of the preds of the landing pad. Thus, avoid turning
4133 <..>
4134 | \ EH
4135 | <..>
4137 <..>
4138 into
4139 <..>
4140 | | EH
4141 <..>
4142 which CFG verification would choke on. See PR45172 and PR51089. */
4143 FOR_EACH_EDGE (e, ei, old_bb->preds)
4144 if (find_edge (e->src, new_bb))
4145 return false;
4147 FOR_EACH_EDGE (e, ei, old_bb->preds)
4148 redirect_edge_var_map_clear (e);
4150 ophi_handled = BITMAP_ALLOC (NULL);
4152 /* First, iterate through the PHIs on NEW_BB and set up the edge_var_map
4153 for the edges we're going to move. */
4154 for (ngsi = gsi_start_phis (new_bb); !gsi_end_p (ngsi); gsi_next (&ngsi))
4156 gphi *ophi, *nphi = ngsi.phi ();
4157 tree nresult, nop;
4159 nresult = gimple_phi_result (nphi);
4160 nop = gimple_phi_arg_def (nphi, old_bb_out->dest_idx);
4162 /* Find the corresponding PHI in OLD_BB so we can forward-propagate
4163 the source ssa_name. */
4164 ophi = NULL;
4165 for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4167 ophi = ogsi.phi ();
4168 if (gimple_phi_result (ophi) == nop)
4169 break;
4170 ophi = NULL;
4173 /* If we did find the corresponding PHI, copy those inputs. */
4174 if (ophi)
4176 /* If NOP is used somewhere else beyond phis in new_bb, give up. */
4177 if (!has_single_use (nop))
4179 imm_use_iterator imm_iter;
4180 use_operand_p use_p;
4182 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, nop)
4184 if (!gimple_debug_bind_p (USE_STMT (use_p))
4185 && (gimple_code (USE_STMT (use_p)) != GIMPLE_PHI
4186 || gimple_bb (USE_STMT (use_p)) != new_bb))
4187 goto fail;
4190 bitmap_set_bit (ophi_handled, SSA_NAME_VERSION (nop));
4191 FOR_EACH_EDGE (e, ei, old_bb->preds)
4193 location_t oloc;
4194 tree oop;
4196 if ((e->flags & EDGE_EH) == 0)
4197 continue;
4198 oop = gimple_phi_arg_def (ophi, e->dest_idx);
4199 oloc = gimple_phi_arg_location (ophi, e->dest_idx);
4200 redirect_edge_var_map_add (e, nresult, oop, oloc);
4203 /* If we didn't find the PHI, if it's a real variable or a VOP, we know
4204 from the fact that OLD_BB is tree_empty_eh_handler_p that the
4205 variable is unchanged from input to the block and we can simply
4206 re-use the input to NEW_BB from the OLD_BB_OUT edge. */
4207 else
4209 location_t nloc
4210 = gimple_phi_arg_location (nphi, old_bb_out->dest_idx);
4211 FOR_EACH_EDGE (e, ei, old_bb->preds)
4212 redirect_edge_var_map_add (e, nresult, nop, nloc);
4216 /* Second, verify that all PHIs from OLD_BB have been handled. If not,
4217 we don't know what values from the other edges into NEW_BB to use. */
4218 for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4220 gphi *ophi = ogsi.phi ();
4221 tree oresult = gimple_phi_result (ophi);
4222 if (!bitmap_bit_p (ophi_handled, SSA_NAME_VERSION (oresult)))
4223 goto fail;
4226 /* Finally, move the edges and update the PHIs. */
4227 for (ei = ei_start (old_bb->preds); (e = ei_safe_edge (ei)); )
4228 if (e->flags & EDGE_EH)
4230 /* ??? CFG manipluation routines do not try to update loop
4231 form on edge redirection. Do so manually here for now. */
4232 /* If we redirect a loop entry or latch edge that will either create
4233 a multiple entry loop or rotate the loop. If the loops merge
4234 we may have created a loop with multiple latches.
4235 All of this isn't easily fixed thus cancel the affected loop
4236 and mark the other loop as possibly having multiple latches. */
4237 if (e->dest == e->dest->loop_father->header)
4239 mark_loop_for_removal (e->dest->loop_father);
4240 new_bb->loop_father->latch = NULL;
4241 loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES);
4243 redirect_eh_edge_1 (e, new_bb, change_region);
4244 redirect_edge_succ (e, new_bb);
4245 flush_pending_stmts (e);
4247 else
4248 ei_next (&ei);
4250 BITMAP_FREE (ophi_handled);
4251 return true;
4253 fail:
4254 FOR_EACH_EDGE (e, ei, old_bb->preds)
4255 redirect_edge_var_map_clear (e);
4256 BITMAP_FREE (ophi_handled);
4257 return false;
4260 /* A subroutine of cleanup_empty_eh. Move a landing pad LP from its
4261 old region to NEW_REGION at BB. */
4263 static void
4264 cleanup_empty_eh_move_lp (basic_block bb, edge e_out,
4265 eh_landing_pad lp, eh_region new_region)
4267 gimple_stmt_iterator gsi;
4268 eh_landing_pad *pp;
4270 for (pp = &lp->region->landing_pads; *pp != lp; pp = &(*pp)->next_lp)
4271 continue;
4272 *pp = lp->next_lp;
4274 lp->region = new_region;
4275 lp->next_lp = new_region->landing_pads;
4276 new_region->landing_pads = lp;
4278 /* Delete the RESX that was matched within the empty handler block. */
4279 gsi = gsi_last_bb (bb);
4280 unlink_stmt_vdef (gsi_stmt (gsi));
4281 gsi_remove (&gsi, true);
4283 /* Clean up E_OUT for the fallthru. */
4284 e_out->flags = (e_out->flags & ~EDGE_EH) | EDGE_FALLTHRU;
4285 e_out->probability = REG_BR_PROB_BASE;
4288 /* A subroutine of cleanup_empty_eh. Handle more complex cases of
4289 unsplitting than unsplit_eh was prepared to handle, e.g. when
4290 multiple incoming edges and phis are involved. */
4292 static bool
4293 cleanup_empty_eh_unsplit (basic_block bb, edge e_out, eh_landing_pad lp)
4295 gimple_stmt_iterator gsi;
4296 tree lab;
4298 /* We really ought not have totally lost everything following
4299 a landing pad label. Given that BB is empty, there had better
4300 be a successor. */
4301 gcc_assert (e_out != NULL);
4303 /* The destination block must not already have a landing pad
4304 for a different region. */
4305 lab = NULL;
4306 for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
4308 glabel *stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
4309 int lp_nr;
4311 if (!stmt)
4312 break;
4313 lab = gimple_label_label (stmt);
4314 lp_nr = EH_LANDING_PAD_NR (lab);
4315 if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
4316 return false;
4319 /* Attempt to move the PHIs into the successor block. */
4320 if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, false))
4322 if (dump_file && (dump_flags & TDF_DETAILS))
4323 fprintf (dump_file,
4324 "Unsplit EH landing pad %d to block %i "
4325 "(via cleanup_empty_eh).\n",
4326 lp->index, e_out->dest->index);
4327 return true;
4330 return false;
4333 /* Return true if edge E_FIRST is part of an empty infinite loop
4334 or leads to such a loop through a series of single successor
4335 empty bbs. */
4337 static bool
4338 infinite_empty_loop_p (edge e_first)
4340 bool inf_loop = false;
4341 edge e;
4343 if (e_first->dest == e_first->src)
4344 return true;
4346 e_first->src->aux = (void *) 1;
4347 for (e = e_first; single_succ_p (e->dest); e = single_succ_edge (e->dest))
4349 gimple_stmt_iterator gsi;
4350 if (e->dest->aux)
4352 inf_loop = true;
4353 break;
4355 e->dest->aux = (void *) 1;
4356 gsi = gsi_after_labels (e->dest);
4357 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4358 gsi_next_nondebug (&gsi);
4359 if (!gsi_end_p (gsi))
4360 break;
4362 e_first->src->aux = NULL;
4363 for (e = e_first; e->dest->aux; e = single_succ_edge (e->dest))
4364 e->dest->aux = NULL;
4366 return inf_loop;
4369 /* Examine the block associated with LP to determine if it's an empty
4370 handler for its EH region. If so, attempt to redirect EH edges to
4371 an outer region. Return true the CFG was updated in any way. This
4372 is similar to jump forwarding, just across EH edges. */
4374 static bool
4375 cleanup_empty_eh (eh_landing_pad lp)
4377 basic_block bb = label_to_block (lp->post_landing_pad);
4378 gimple_stmt_iterator gsi;
4379 gimple resx;
4380 eh_region new_region;
4381 edge_iterator ei;
4382 edge e, e_out;
4383 bool has_non_eh_pred;
4384 bool ret = false;
4385 int new_lp_nr;
4387 /* There can be zero or one edges out of BB. This is the quickest test. */
4388 switch (EDGE_COUNT (bb->succs))
4390 case 0:
4391 e_out = NULL;
4392 break;
4393 case 1:
4394 e_out = single_succ_edge (bb);
4395 break;
4396 default:
4397 return false;
4400 resx = last_stmt (bb);
4401 if (resx && is_gimple_resx (resx))
4403 if (stmt_can_throw_external (resx))
4404 optimize_clobbers (bb);
4405 else if (sink_clobbers (bb))
4406 ret = true;
4409 gsi = gsi_after_labels (bb);
4411 /* Make sure to skip debug statements. */
4412 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4413 gsi_next_nondebug (&gsi);
4415 /* If the block is totally empty, look for more unsplitting cases. */
4416 if (gsi_end_p (gsi))
4418 /* For the degenerate case of an infinite loop bail out.
4419 If bb has no successors and is totally empty, which can happen e.g.
4420 because of incorrect noreturn attribute, bail out too. */
4421 if (e_out == NULL
4422 || infinite_empty_loop_p (e_out))
4423 return ret;
4425 return ret | cleanup_empty_eh_unsplit (bb, e_out, lp);
4428 /* The block should consist only of a single RESX statement, modulo a
4429 preceding call to __builtin_stack_restore if there is no outgoing
4430 edge, since the call can be eliminated in this case. */
4431 resx = gsi_stmt (gsi);
4432 if (!e_out && gimple_call_builtin_p (resx, BUILT_IN_STACK_RESTORE))
4434 gsi_next (&gsi);
4435 resx = gsi_stmt (gsi);
4437 if (!is_gimple_resx (resx))
4438 return ret;
4439 gcc_assert (gsi_one_before_end_p (gsi));
4441 /* Determine if there are non-EH edges, or resx edges into the handler. */
4442 has_non_eh_pred = false;
4443 FOR_EACH_EDGE (e, ei, bb->preds)
4444 if (!(e->flags & EDGE_EH))
4445 has_non_eh_pred = true;
4447 /* Find the handler that's outer of the empty handler by looking at
4448 where the RESX instruction was vectored. */
4449 new_lp_nr = lookup_stmt_eh_lp (resx);
4450 new_region = get_eh_region_from_lp_number (new_lp_nr);
4452 /* If there's no destination region within the current function,
4453 redirection is trivial via removing the throwing statements from
4454 the EH region, removing the EH edges, and allowing the block
4455 to go unreachable. */
4456 if (new_region == NULL)
4458 gcc_assert (e_out == NULL);
4459 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4460 if (e->flags & EDGE_EH)
4462 gimple stmt = last_stmt (e->src);
4463 remove_stmt_from_eh_lp (stmt);
4464 remove_edge (e);
4466 else
4467 ei_next (&ei);
4468 goto succeed;
4471 /* If the destination region is a MUST_NOT_THROW, allow the runtime
4472 to handle the abort and allow the blocks to go unreachable. */
4473 if (new_region->type == ERT_MUST_NOT_THROW)
4475 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4476 if (e->flags & EDGE_EH)
4478 gimple stmt = last_stmt (e->src);
4479 remove_stmt_from_eh_lp (stmt);
4480 add_stmt_to_eh_lp (stmt, new_lp_nr);
4481 remove_edge (e);
4483 else
4484 ei_next (&ei);
4485 goto succeed;
4488 /* Try to redirect the EH edges and merge the PHIs into the destination
4489 landing pad block. If the merge succeeds, we'll already have redirected
4490 all the EH edges. The handler itself will go unreachable if there were
4491 no normal edges. */
4492 if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, true))
4493 goto succeed;
4495 /* Finally, if all input edges are EH edges, then we can (potentially)
4496 reduce the number of transfers from the runtime by moving the landing
4497 pad from the original region to the new region. This is a win when
4498 we remove the last CLEANUP region along a particular exception
4499 propagation path. Since nothing changes except for the region with
4500 which the landing pad is associated, the PHI nodes do not need to be
4501 adjusted at all. */
4502 if (!has_non_eh_pred)
4504 cleanup_empty_eh_move_lp (bb, e_out, lp, new_region);
4505 if (dump_file && (dump_flags & TDF_DETAILS))
4506 fprintf (dump_file, "Empty EH handler %i moved to EH region %i.\n",
4507 lp->index, new_region->index);
4509 /* ??? The CFG didn't change, but we may have rendered the
4510 old EH region unreachable. Trigger a cleanup there. */
4511 return true;
4514 return ret;
4516 succeed:
4517 if (dump_file && (dump_flags & TDF_DETAILS))
4518 fprintf (dump_file, "Empty EH handler %i removed.\n", lp->index);
4519 remove_eh_landing_pad (lp);
4520 return true;
4523 /* Do a post-order traversal of the EH region tree. Examine each
4524 post_landing_pad block and see if we can eliminate it as empty. */
4526 static bool
4527 cleanup_all_empty_eh (void)
4529 bool changed = false;
4530 eh_landing_pad lp;
4531 int i;
4533 for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
4534 if (lp)
4535 changed |= cleanup_empty_eh (lp);
4537 return changed;
4540 /* Perform cleanups and lowering of exception handling
4541 1) cleanups regions with handlers doing nothing are optimized out
4542 2) MUST_NOT_THROW regions that became dead because of 1) are optimized out
4543 3) Info about regions that are containing instructions, and regions
4544 reachable via local EH edges is collected
4545 4) Eh tree is pruned for regions no longer necessary.
4547 TODO: Push MUST_NOT_THROW regions to the root of the EH tree.
4548 Unify those that have the same failure decl and locus.
4551 static unsigned int
4552 execute_cleanup_eh_1 (void)
4554 /* Do this first: unsplit_all_eh and cleanup_all_empty_eh can die
4555 looking up unreachable landing pads. */
4556 remove_unreachable_handlers ();
4558 /* Watch out for the region tree vanishing due to all unreachable. */
4559 if (cfun->eh->region_tree)
4561 bool changed = false;
4563 if (optimize)
4564 changed |= unsplit_all_eh ();
4565 changed |= cleanup_all_empty_eh ();
4567 if (changed)
4569 free_dominance_info (CDI_DOMINATORS);
4570 free_dominance_info (CDI_POST_DOMINATORS);
4572 /* We delayed all basic block deletion, as we may have performed
4573 cleanups on EH edges while non-EH edges were still present. */
4574 delete_unreachable_blocks ();
4576 /* We manipulated the landing pads. Remove any region that no
4577 longer has a landing pad. */
4578 remove_unreachable_handlers_no_lp ();
4580 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
4584 return 0;
4587 namespace {
4589 const pass_data pass_data_cleanup_eh =
4591 GIMPLE_PASS, /* type */
4592 "ehcleanup", /* name */
4593 OPTGROUP_NONE, /* optinfo_flags */
4594 TV_TREE_EH, /* tv_id */
4595 PROP_gimple_lcf, /* properties_required */
4596 0, /* properties_provided */
4597 0, /* properties_destroyed */
4598 0, /* todo_flags_start */
4599 0, /* todo_flags_finish */
4602 class pass_cleanup_eh : public gimple_opt_pass
4604 public:
4605 pass_cleanup_eh (gcc::context *ctxt)
4606 : gimple_opt_pass (pass_data_cleanup_eh, ctxt)
4609 /* opt_pass methods: */
4610 opt_pass * clone () { return new pass_cleanup_eh (m_ctxt); }
4611 virtual bool gate (function *fun)
4613 return fun->eh != NULL && fun->eh->region_tree != NULL;
4616 virtual unsigned int execute (function *);
4618 }; // class pass_cleanup_eh
4620 unsigned int
4621 pass_cleanup_eh::execute (function *fun)
4623 int ret = execute_cleanup_eh_1 ();
4625 /* If the function no longer needs an EH personality routine
4626 clear it. This exposes cross-language inlining opportunities
4627 and avoids references to a never defined personality routine. */
4628 if (DECL_FUNCTION_PERSONALITY (current_function_decl)
4629 && function_needs_eh_personality (fun) != eh_personality_lang)
4630 DECL_FUNCTION_PERSONALITY (current_function_decl) = NULL_TREE;
4632 return ret;
4635 } // anon namespace
4637 gimple_opt_pass *
4638 make_pass_cleanup_eh (gcc::context *ctxt)
4640 return new pass_cleanup_eh (ctxt);
4643 /* Verify that BB containing STMT as the last statement, has precisely the
4644 edge that make_eh_edges would create. */
4646 DEBUG_FUNCTION bool
4647 verify_eh_edges (gimple stmt)
4649 basic_block bb = gimple_bb (stmt);
4650 eh_landing_pad lp = NULL;
4651 int lp_nr;
4652 edge_iterator ei;
4653 edge e, eh_edge;
4655 lp_nr = lookup_stmt_eh_lp (stmt);
4656 if (lp_nr > 0)
4657 lp = get_eh_landing_pad_from_number (lp_nr);
4659 eh_edge = NULL;
4660 FOR_EACH_EDGE (e, ei, bb->succs)
4662 if (e->flags & EDGE_EH)
4664 if (eh_edge)
4666 error ("BB %i has multiple EH edges", bb->index);
4667 return true;
4669 else
4670 eh_edge = e;
4674 if (lp == NULL)
4676 if (eh_edge)
4678 error ("BB %i can not throw but has an EH edge", bb->index);
4679 return true;
4681 return false;
4684 if (!stmt_could_throw_p (stmt))
4686 error ("BB %i last statement has incorrectly set lp", bb->index);
4687 return true;
4690 if (eh_edge == NULL)
4692 error ("BB %i is missing an EH edge", bb->index);
4693 return true;
4696 if (eh_edge->dest != label_to_block (lp->post_landing_pad))
4698 error ("Incorrect EH edge %i->%i", bb->index, eh_edge->dest->index);
4699 return true;
4702 return false;
4705 /* Similarly, but handle GIMPLE_EH_DISPATCH specifically. */
4707 DEBUG_FUNCTION bool
4708 verify_eh_dispatch_edge (geh_dispatch *stmt)
4710 eh_region r;
4711 eh_catch c;
4712 basic_block src, dst;
4713 bool want_fallthru = true;
4714 edge_iterator ei;
4715 edge e, fall_edge;
4717 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
4718 src = gimple_bb (stmt);
4720 FOR_EACH_EDGE (e, ei, src->succs)
4721 gcc_assert (e->aux == NULL);
4723 switch (r->type)
4725 case ERT_TRY:
4726 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
4728 dst = label_to_block (c->label);
4729 e = find_edge (src, dst);
4730 if (e == NULL)
4732 error ("BB %i is missing an edge", src->index);
4733 return true;
4735 e->aux = (void *)e;
4737 /* A catch-all handler doesn't have a fallthru. */
4738 if (c->type_list == NULL)
4740 want_fallthru = false;
4741 break;
4744 break;
4746 case ERT_ALLOWED_EXCEPTIONS:
4747 dst = label_to_block (r->u.allowed.label);
4748 e = find_edge (src, dst);
4749 if (e == NULL)
4751 error ("BB %i is missing an edge", src->index);
4752 return true;
4754 e->aux = (void *)e;
4755 break;
4757 default:
4758 gcc_unreachable ();
4761 fall_edge = NULL;
4762 FOR_EACH_EDGE (e, ei, src->succs)
4764 if (e->flags & EDGE_FALLTHRU)
4766 if (fall_edge != NULL)
4768 error ("BB %i too many fallthru edges", src->index);
4769 return true;
4771 fall_edge = e;
4773 else if (e->aux)
4774 e->aux = NULL;
4775 else
4777 error ("BB %i has incorrect edge", src->index);
4778 return true;
4781 if ((fall_edge != NULL) ^ want_fallthru)
4783 error ("BB %i has incorrect fallthru edge", src->index);
4784 return true;
4787 return false;