Rebase.
[official-gcc.git] / gcc / tree-eh.c
blobdb2818490a97bbbbaa582a8b2197ba75943f213f
1 /* Exception handling semantics and decomposition for trees.
2 Copyright (C) 2003-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "hash-table.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "expr.h"
27 #include "calls.h"
28 #include "flags.h"
29 #include "function.h"
30 #include "except.h"
31 #include "hash-set.h"
32 #include "pointer-set.h"
33 #include "basic-block.h"
34 #include "tree-ssa-alias.h"
35 #include "internal-fn.h"
36 #include "tree-eh.h"
37 #include "gimple-expr.h"
38 #include "is-a.h"
39 #include "gimple.h"
40 #include "gimple-iterator.h"
41 #include "gimple-ssa.h"
42 #include "cgraph.h"
43 #include "tree-cfg.h"
44 #include "tree-phinodes.h"
45 #include "ssa-iterators.h"
46 #include "stringpool.h"
47 #include "tree-ssanames.h"
48 #include "tree-into-ssa.h"
49 #include "tree-ssa.h"
50 #include "tree-inline.h"
51 #include "tree-pass.h"
52 #include "langhooks.h"
53 #include "diagnostic-core.h"
54 #include "target.h"
55 #include "cfgloop.h"
56 #include "gimple-low.h"
58 /* In some instances a tree and a gimple need to be stored in a same table,
59 i.e. in hash tables. This is a structure to do this. */
60 typedef union {tree *tp; tree t; gimple g;} treemple;
62 /* Misc functions used in this file. */
64 /* Remember and lookup EH landing pad data for arbitrary statements.
65 Really this means any statement that could_throw_p. We could
66 stuff this information into the stmt_ann data structure, but:
68 (1) We absolutely rely on this information being kept until
69 we get to rtl. Once we're done with lowering here, if we lose
70 the information there's no way to recover it!
72 (2) There are many more statements that *cannot* throw as
73 compared to those that can. We should be saving some amount
74 of space by only allocating memory for those that can throw. */
76 /* Add statement T in function IFUN to landing pad NUM. */
78 static void
79 add_stmt_to_eh_lp_fn (struct function *ifun, gimple t, int num)
81 struct throw_stmt_node *n;
82 void **slot;
84 gcc_assert (num != 0);
86 n = ggc_alloc<throw_stmt_node> ();
87 n->stmt = t;
88 n->lp_nr = num;
90 if (!get_eh_throw_stmt_table (ifun))
91 set_eh_throw_stmt_table (ifun, htab_create_ggc (31, struct_ptr_hash,
92 struct_ptr_eq,
93 ggc_free));
95 slot = htab_find_slot (get_eh_throw_stmt_table (ifun), n, INSERT);
96 gcc_assert (!*slot);
97 *slot = n;
100 /* Add statement T in the current function (cfun) to EH landing pad NUM. */
102 void
103 add_stmt_to_eh_lp (gimple t, int num)
105 add_stmt_to_eh_lp_fn (cfun, t, num);
108 /* Add statement T to the single EH landing pad in REGION. */
110 static void
111 record_stmt_eh_region (eh_region region, gimple t)
113 if (region == NULL)
114 return;
115 if (region->type == ERT_MUST_NOT_THROW)
116 add_stmt_to_eh_lp_fn (cfun, t, -region->index);
117 else
119 eh_landing_pad lp = region->landing_pads;
120 if (lp == NULL)
121 lp = gen_eh_landing_pad (region);
122 else
123 gcc_assert (lp->next_lp == NULL);
124 add_stmt_to_eh_lp_fn (cfun, t, lp->index);
129 /* Remove statement T in function IFUN from its EH landing pad. */
131 bool
132 remove_stmt_from_eh_lp_fn (struct function *ifun, gimple t)
134 struct throw_stmt_node dummy;
135 void **slot;
137 if (!get_eh_throw_stmt_table (ifun))
138 return false;
140 dummy.stmt = t;
141 slot = htab_find_slot (get_eh_throw_stmt_table (ifun), &dummy,
142 NO_INSERT);
143 if (slot)
145 htab_clear_slot (get_eh_throw_stmt_table (ifun), slot);
146 return true;
148 else
149 return false;
153 /* Remove statement T in the current function (cfun) from its
154 EH landing pad. */
156 bool
157 remove_stmt_from_eh_lp (gimple t)
159 return remove_stmt_from_eh_lp_fn (cfun, t);
162 /* Determine if statement T is inside an EH region in function IFUN.
163 Positive numbers indicate a landing pad index; negative numbers
164 indicate a MUST_NOT_THROW region index; zero indicates that the
165 statement is not recorded in the region table. */
168 lookup_stmt_eh_lp_fn (struct function *ifun, gimple t)
170 struct throw_stmt_node *p, n;
172 if (ifun->eh->throw_stmt_table == NULL)
173 return 0;
175 n.stmt = t;
176 p = (struct throw_stmt_node *) htab_find (ifun->eh->throw_stmt_table, &n);
177 return p ? p->lp_nr : 0;
180 /* Likewise, but always use the current function. */
183 lookup_stmt_eh_lp (gimple t)
185 /* We can get called from initialized data when -fnon-call-exceptions
186 is on; prevent crash. */
187 if (!cfun)
188 return 0;
189 return lookup_stmt_eh_lp_fn (cfun, t);
192 /* First pass of EH node decomposition. Build up a tree of GIMPLE_TRY_FINALLY
193 nodes and LABEL_DECL nodes. We will use this during the second phase to
194 determine if a goto leaves the body of a TRY_FINALLY_EXPR node. */
196 struct finally_tree_node
198 /* When storing a GIMPLE_TRY, we have to record a gimple. However
199 when deciding whether a GOTO to a certain LABEL_DECL (which is a
200 tree) leaves the TRY block, its necessary to record a tree in
201 this field. Thus a treemple is used. */
202 treemple child;
203 gimple parent;
206 /* Hashtable helpers. */
208 struct finally_tree_hasher : typed_free_remove <finally_tree_node>
210 typedef finally_tree_node value_type;
211 typedef finally_tree_node compare_type;
212 static inline hashval_t hash (const value_type *);
213 static inline bool equal (const value_type *, const compare_type *);
216 inline hashval_t
217 finally_tree_hasher::hash (const value_type *v)
219 return (intptr_t)v->child.t >> 4;
222 inline bool
223 finally_tree_hasher::equal (const value_type *v, const compare_type *c)
225 return v->child.t == c->child.t;
228 /* Note that this table is *not* marked GTY. It is short-lived. */
229 static hash_table<finally_tree_hasher> *finally_tree;
231 static void
232 record_in_finally_tree (treemple child, gimple parent)
234 struct finally_tree_node *n;
235 finally_tree_node **slot;
237 n = XNEW (struct finally_tree_node);
238 n->child = child;
239 n->parent = parent;
241 slot = finally_tree->find_slot (n, INSERT);
242 gcc_assert (!*slot);
243 *slot = n;
246 static void
247 collect_finally_tree (gimple stmt, gimple region);
249 /* Go through the gimple sequence. Works with collect_finally_tree to
250 record all GIMPLE_LABEL and GIMPLE_TRY statements. */
252 static void
253 collect_finally_tree_1 (gimple_seq seq, gimple region)
255 gimple_stmt_iterator gsi;
257 for (gsi = gsi_start (seq); !gsi_end_p (gsi); gsi_next (&gsi))
258 collect_finally_tree (gsi_stmt (gsi), region);
261 static void
262 collect_finally_tree (gimple stmt, gimple region)
264 treemple temp;
266 switch (gimple_code (stmt))
268 case GIMPLE_LABEL:
269 temp.t = gimple_label_label (stmt);
270 record_in_finally_tree (temp, region);
271 break;
273 case GIMPLE_TRY:
274 if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY)
276 temp.g = stmt;
277 record_in_finally_tree (temp, region);
278 collect_finally_tree_1 (gimple_try_eval (stmt), stmt);
279 collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
281 else if (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH)
283 collect_finally_tree_1 (gimple_try_eval (stmt), region);
284 collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
286 break;
288 case GIMPLE_CATCH:
289 collect_finally_tree_1 (gimple_catch_handler (stmt), region);
290 break;
292 case GIMPLE_EH_FILTER:
293 collect_finally_tree_1 (gimple_eh_filter_failure (stmt), region);
294 break;
296 case GIMPLE_EH_ELSE:
297 collect_finally_tree_1 (gimple_eh_else_n_body (stmt), region);
298 collect_finally_tree_1 (gimple_eh_else_e_body (stmt), region);
299 break;
301 default:
302 /* A type, a decl, or some kind of statement that we're not
303 interested in. Don't walk them. */
304 break;
309 /* Use the finally tree to determine if a jump from START to TARGET
310 would leave the try_finally node that START lives in. */
312 static bool
313 outside_finally_tree (treemple start, gimple target)
315 struct finally_tree_node n, *p;
319 n.child = start;
320 p = finally_tree->find (&n);
321 if (!p)
322 return true;
323 start.g = p->parent;
325 while (start.g != target);
327 return false;
330 /* Second pass of EH node decomposition. Actually transform the GIMPLE_TRY
331 nodes into a set of gotos, magic labels, and eh regions.
332 The eh region creation is straight-forward, but frobbing all the gotos
333 and such into shape isn't. */
335 /* The sequence into which we record all EH stuff. This will be
336 placed at the end of the function when we're all done. */
337 static gimple_seq eh_seq;
339 /* Record whether an EH region contains something that can throw,
340 indexed by EH region number. */
341 static bitmap eh_region_may_contain_throw_map;
343 /* The GOTO_QUEUE is is an array of GIMPLE_GOTO and GIMPLE_RETURN
344 statements that are seen to escape this GIMPLE_TRY_FINALLY node.
345 The idea is to record a gimple statement for everything except for
346 the conditionals, which get their labels recorded. Since labels are
347 of type 'tree', we need this node to store both gimple and tree
348 objects. REPL_STMT is the sequence used to replace the goto/return
349 statement. CONT_STMT is used to store the statement that allows
350 the return/goto to jump to the original destination. */
352 struct goto_queue_node
354 treemple stmt;
355 location_t location;
356 gimple_seq repl_stmt;
357 gimple cont_stmt;
358 int index;
359 /* This is used when index >= 0 to indicate that stmt is a label (as
360 opposed to a goto stmt). */
361 int is_label;
364 /* State of the world while lowering. */
366 struct leh_state
368 /* What's "current" while constructing the eh region tree. These
369 correspond to variables of the same name in cfun->eh, which we
370 don't have easy access to. */
371 eh_region cur_region;
373 /* What's "current" for the purposes of __builtin_eh_pointer. For
374 a CATCH, this is the associated TRY. For an EH_FILTER, this is
375 the associated ALLOWED_EXCEPTIONS, etc. */
376 eh_region ehp_region;
378 /* Processing of TRY_FINALLY requires a bit more state. This is
379 split out into a separate structure so that we don't have to
380 copy so much when processing other nodes. */
381 struct leh_tf_state *tf;
384 struct leh_tf_state
386 /* Pointer to the GIMPLE_TRY_FINALLY node under discussion. The
387 try_finally_expr is the original GIMPLE_TRY_FINALLY. We need to retain
388 this so that outside_finally_tree can reliably reference the tree used
389 in the collect_finally_tree data structures. */
390 gimple try_finally_expr;
391 gimple top_p;
393 /* While lowering a top_p usually it is expanded into multiple statements,
394 thus we need the following field to store them. */
395 gimple_seq top_p_seq;
397 /* The state outside this try_finally node. */
398 struct leh_state *outer;
400 /* The exception region created for it. */
401 eh_region region;
403 /* The goto queue. */
404 struct goto_queue_node *goto_queue;
405 size_t goto_queue_size;
406 size_t goto_queue_active;
408 /* Pointer map to help in searching goto_queue when it is large. */
409 hash_map<gimple, goto_queue_node *> *goto_queue_map;
411 /* The set of unique labels seen as entries in the goto queue. */
412 vec<tree> dest_array;
414 /* A label to be added at the end of the completed transformed
415 sequence. It will be set if may_fallthru was true *at one time*,
416 though subsequent transformations may have cleared that flag. */
417 tree fallthru_label;
419 /* True if it is possible to fall out the bottom of the try block.
420 Cleared if the fallthru is converted to a goto. */
421 bool may_fallthru;
423 /* True if any entry in goto_queue is a GIMPLE_RETURN. */
424 bool may_return;
426 /* True if the finally block can receive an exception edge.
427 Cleared if the exception case is handled by code duplication. */
428 bool may_throw;
431 static gimple_seq lower_eh_must_not_throw (struct leh_state *, gimple);
433 /* Search for STMT in the goto queue. Return the replacement,
434 or null if the statement isn't in the queue. */
436 #define LARGE_GOTO_QUEUE 20
438 static void lower_eh_constructs_1 (struct leh_state *state, gimple_seq *seq);
440 static gimple_seq
441 find_goto_replacement (struct leh_tf_state *tf, treemple stmt)
443 unsigned int i;
445 if (tf->goto_queue_active < LARGE_GOTO_QUEUE)
447 for (i = 0; i < tf->goto_queue_active; i++)
448 if ( tf->goto_queue[i].stmt.g == stmt.g)
449 return tf->goto_queue[i].repl_stmt;
450 return NULL;
453 /* If we have a large number of entries in the goto_queue, create a
454 pointer map and use that for searching. */
456 if (!tf->goto_queue_map)
458 tf->goto_queue_map = new hash_map<gimple, goto_queue_node *>;
459 for (i = 0; i < tf->goto_queue_active; i++)
461 bool existed = tf->goto_queue_map->put (tf->goto_queue[i].stmt.g,
462 &tf->goto_queue[i]);
463 gcc_assert (!existed);
467 goto_queue_node **slot = tf->goto_queue_map->get (stmt.g);
468 if (slot != NULL)
469 return ((*slot)->repl_stmt);
471 return NULL;
474 /* A subroutine of replace_goto_queue_1. Handles the sub-clauses of a
475 lowered GIMPLE_COND. If, by chance, the replacement is a simple goto,
476 then we can just splat it in, otherwise we add the new stmts immediately
477 after the GIMPLE_COND and redirect. */
479 static void
480 replace_goto_queue_cond_clause (tree *tp, struct leh_tf_state *tf,
481 gimple_stmt_iterator *gsi)
483 tree label;
484 gimple_seq new_seq;
485 treemple temp;
486 location_t loc = gimple_location (gsi_stmt (*gsi));
488 temp.tp = tp;
489 new_seq = find_goto_replacement (tf, temp);
490 if (!new_seq)
491 return;
493 if (gimple_seq_singleton_p (new_seq)
494 && gimple_code (gimple_seq_first_stmt (new_seq)) == GIMPLE_GOTO)
496 *tp = gimple_goto_dest (gimple_seq_first_stmt (new_seq));
497 return;
500 label = create_artificial_label (loc);
501 /* Set the new label for the GIMPLE_COND */
502 *tp = label;
504 gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING);
505 gsi_insert_seq_after (gsi, gimple_seq_copy (new_seq), GSI_CONTINUE_LINKING);
508 /* The real work of replace_goto_queue. Returns with TSI updated to
509 point to the next statement. */
511 static void replace_goto_queue_stmt_list (gimple_seq *, struct leh_tf_state *);
513 static void
514 replace_goto_queue_1 (gimple stmt, struct leh_tf_state *tf,
515 gimple_stmt_iterator *gsi)
517 gimple_seq seq;
518 treemple temp;
519 temp.g = NULL;
521 switch (gimple_code (stmt))
523 case GIMPLE_GOTO:
524 case GIMPLE_RETURN:
525 temp.g = stmt;
526 seq = find_goto_replacement (tf, temp);
527 if (seq)
529 gsi_insert_seq_before (gsi, gimple_seq_copy (seq), GSI_SAME_STMT);
530 gsi_remove (gsi, false);
531 return;
533 break;
535 case GIMPLE_COND:
536 replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 2), tf, gsi);
537 replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 3), tf, gsi);
538 break;
540 case GIMPLE_TRY:
541 replace_goto_queue_stmt_list (gimple_try_eval_ptr (stmt), tf);
542 replace_goto_queue_stmt_list (gimple_try_cleanup_ptr (stmt), tf);
543 break;
544 case GIMPLE_CATCH:
545 replace_goto_queue_stmt_list (gimple_catch_handler_ptr (stmt), tf);
546 break;
547 case GIMPLE_EH_FILTER:
548 replace_goto_queue_stmt_list (gimple_eh_filter_failure_ptr (stmt), tf);
549 break;
550 case GIMPLE_EH_ELSE:
551 replace_goto_queue_stmt_list (gimple_eh_else_n_body_ptr (stmt), tf);
552 replace_goto_queue_stmt_list (gimple_eh_else_e_body_ptr (stmt), tf);
553 break;
555 default:
556 /* These won't have gotos in them. */
557 break;
560 gsi_next (gsi);
563 /* A subroutine of replace_goto_queue. Handles GIMPLE_SEQ. */
565 static void
566 replace_goto_queue_stmt_list (gimple_seq *seq, struct leh_tf_state *tf)
568 gimple_stmt_iterator gsi = gsi_start (*seq);
570 while (!gsi_end_p (gsi))
571 replace_goto_queue_1 (gsi_stmt (gsi), tf, &gsi);
574 /* Replace all goto queue members. */
576 static void
577 replace_goto_queue (struct leh_tf_state *tf)
579 if (tf->goto_queue_active == 0)
580 return;
581 replace_goto_queue_stmt_list (&tf->top_p_seq, tf);
582 replace_goto_queue_stmt_list (&eh_seq, tf);
585 /* Add a new record to the goto queue contained in TF. NEW_STMT is the
586 data to be added, IS_LABEL indicates whether NEW_STMT is a label or
587 a gimple return. */
589 static void
590 record_in_goto_queue (struct leh_tf_state *tf,
591 treemple new_stmt,
592 int index,
593 bool is_label,
594 location_t location)
596 size_t active, size;
597 struct goto_queue_node *q;
599 gcc_assert (!tf->goto_queue_map);
601 active = tf->goto_queue_active;
602 size = tf->goto_queue_size;
603 if (active >= size)
605 size = (size ? size * 2 : 32);
606 tf->goto_queue_size = size;
607 tf->goto_queue
608 = XRESIZEVEC (struct goto_queue_node, tf->goto_queue, size);
611 q = &tf->goto_queue[active];
612 tf->goto_queue_active = active + 1;
614 memset (q, 0, sizeof (*q));
615 q->stmt = new_stmt;
616 q->index = index;
617 q->location = location;
618 q->is_label = is_label;
621 /* Record the LABEL label in the goto queue contained in TF.
622 TF is not null. */
624 static void
625 record_in_goto_queue_label (struct leh_tf_state *tf, treemple stmt, tree label,
626 location_t location)
628 int index;
629 treemple temp, new_stmt;
631 if (!label)
632 return;
634 /* Computed and non-local gotos do not get processed. Given
635 their nature we can neither tell whether we've escaped the
636 finally block nor redirect them if we knew. */
637 if (TREE_CODE (label) != LABEL_DECL)
638 return;
640 /* No need to record gotos that don't leave the try block. */
641 temp.t = label;
642 if (!outside_finally_tree (temp, tf->try_finally_expr))
643 return;
645 if (! tf->dest_array.exists ())
647 tf->dest_array.create (10);
648 tf->dest_array.quick_push (label);
649 index = 0;
651 else
653 int n = tf->dest_array.length ();
654 for (index = 0; index < n; ++index)
655 if (tf->dest_array[index] == label)
656 break;
657 if (index == n)
658 tf->dest_array.safe_push (label);
661 /* In the case of a GOTO we want to record the destination label,
662 since with a GIMPLE_COND we have an easy access to the then/else
663 labels. */
664 new_stmt = stmt;
665 record_in_goto_queue (tf, new_stmt, index, true, location);
668 /* For any GIMPLE_GOTO or GIMPLE_RETURN, decide whether it leaves a try_finally
669 node, and if so record that fact in the goto queue associated with that
670 try_finally node. */
672 static void
673 maybe_record_in_goto_queue (struct leh_state *state, gimple stmt)
675 struct leh_tf_state *tf = state->tf;
676 treemple new_stmt;
678 if (!tf)
679 return;
681 switch (gimple_code (stmt))
683 case GIMPLE_COND:
684 new_stmt.tp = gimple_op_ptr (stmt, 2);
685 record_in_goto_queue_label (tf, new_stmt, gimple_cond_true_label (stmt),
686 EXPR_LOCATION (*new_stmt.tp));
687 new_stmt.tp = gimple_op_ptr (stmt, 3);
688 record_in_goto_queue_label (tf, new_stmt, gimple_cond_false_label (stmt),
689 EXPR_LOCATION (*new_stmt.tp));
690 break;
691 case GIMPLE_GOTO:
692 new_stmt.g = stmt;
693 record_in_goto_queue_label (tf, new_stmt, gimple_goto_dest (stmt),
694 gimple_location (stmt));
695 break;
697 case GIMPLE_RETURN:
698 tf->may_return = true;
699 new_stmt.g = stmt;
700 record_in_goto_queue (tf, new_stmt, -1, false, gimple_location (stmt));
701 break;
703 default:
704 gcc_unreachable ();
709 #ifdef ENABLE_CHECKING
710 /* We do not process GIMPLE_SWITCHes for now. As long as the original source
711 was in fact structured, and we've not yet done jump threading, then none
712 of the labels will leave outer GIMPLE_TRY_FINALLY nodes. Verify this. */
714 static void
715 verify_norecord_switch_expr (struct leh_state *state, gimple switch_expr)
717 struct leh_tf_state *tf = state->tf;
718 size_t i, n;
720 if (!tf)
721 return;
723 n = gimple_switch_num_labels (switch_expr);
725 for (i = 0; i < n; ++i)
727 treemple temp;
728 tree lab = CASE_LABEL (gimple_switch_label (switch_expr, i));
729 temp.t = lab;
730 gcc_assert (!outside_finally_tree (temp, tf->try_finally_expr));
733 #else
734 #define verify_norecord_switch_expr(state, switch_expr)
735 #endif
737 /* Redirect a RETURN_EXPR pointed to by Q to FINLAB. If MOD is
738 non-null, insert it before the new branch. */
740 static void
741 do_return_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod)
743 gimple x;
745 /* In the case of a return, the queue node must be a gimple statement. */
746 gcc_assert (!q->is_label);
748 /* Note that the return value may have already been computed, e.g.,
750 int x;
751 int foo (void)
753 x = 0;
754 try {
755 return x;
756 } finally {
757 x++;
761 should return 0, not 1. We don't have to do anything to make
762 this happens because the return value has been placed in the
763 RESULT_DECL already. */
765 q->cont_stmt = q->stmt.g;
767 if (mod)
768 gimple_seq_add_seq (&q->repl_stmt, mod);
770 x = gimple_build_goto (finlab);
771 gimple_set_location (x, q->location);
772 gimple_seq_add_stmt (&q->repl_stmt, x);
775 /* Similar, but easier, for GIMPLE_GOTO. */
777 static void
778 do_goto_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod,
779 struct leh_tf_state *tf)
781 gimple x;
783 gcc_assert (q->is_label);
785 q->cont_stmt = gimple_build_goto (tf->dest_array[q->index]);
787 if (mod)
788 gimple_seq_add_seq (&q->repl_stmt, mod);
790 x = gimple_build_goto (finlab);
791 gimple_set_location (x, q->location);
792 gimple_seq_add_stmt (&q->repl_stmt, x);
795 /* Emit a standard landing pad sequence into SEQ for REGION. */
797 static void
798 emit_post_landing_pad (gimple_seq *seq, eh_region region)
800 eh_landing_pad lp = region->landing_pads;
801 gimple x;
803 if (lp == NULL)
804 lp = gen_eh_landing_pad (region);
806 lp->post_landing_pad = create_artificial_label (UNKNOWN_LOCATION);
807 EH_LANDING_PAD_NR (lp->post_landing_pad) = lp->index;
809 x = gimple_build_label (lp->post_landing_pad);
810 gimple_seq_add_stmt (seq, x);
813 /* Emit a RESX statement into SEQ for REGION. */
815 static void
816 emit_resx (gimple_seq *seq, eh_region region)
818 gimple x = gimple_build_resx (region->index);
819 gimple_seq_add_stmt (seq, x);
820 if (region->outer)
821 record_stmt_eh_region (region->outer, x);
824 /* Emit an EH_DISPATCH statement into SEQ for REGION. */
826 static void
827 emit_eh_dispatch (gimple_seq *seq, eh_region region)
829 gimple x = gimple_build_eh_dispatch (region->index);
830 gimple_seq_add_stmt (seq, x);
833 /* Note that the current EH region may contain a throw, or a
834 call to a function which itself may contain a throw. */
836 static void
837 note_eh_region_may_contain_throw (eh_region region)
839 while (bitmap_set_bit (eh_region_may_contain_throw_map, region->index))
841 if (region->type == ERT_MUST_NOT_THROW)
842 break;
843 region = region->outer;
844 if (region == NULL)
845 break;
849 /* Check if REGION has been marked as containing a throw. If REGION is
850 NULL, this predicate is false. */
852 static inline bool
853 eh_region_may_contain_throw (eh_region r)
855 return r && bitmap_bit_p (eh_region_may_contain_throw_map, r->index);
858 /* We want to transform
859 try { body; } catch { stuff; }
861 normal_seqence:
862 body;
863 over:
864 eh_seqence:
865 landing_pad:
866 stuff;
867 goto over;
869 TP is a GIMPLE_TRY node. REGION is the region whose post_landing_pad
870 should be placed before the second operand, or NULL. OVER is
871 an existing label that should be put at the exit, or NULL. */
873 static gimple_seq
874 frob_into_branch_around (gimple tp, eh_region region, tree over)
876 gimple x;
877 gimple_seq cleanup, result;
878 location_t loc = gimple_location (tp);
880 cleanup = gimple_try_cleanup (tp);
881 result = gimple_try_eval (tp);
883 if (region)
884 emit_post_landing_pad (&eh_seq, region);
886 if (gimple_seq_may_fallthru (cleanup))
888 if (!over)
889 over = create_artificial_label (loc);
890 x = gimple_build_goto (over);
891 gimple_set_location (x, loc);
892 gimple_seq_add_stmt (&cleanup, x);
894 gimple_seq_add_seq (&eh_seq, cleanup);
896 if (over)
898 x = gimple_build_label (over);
899 gimple_seq_add_stmt (&result, x);
901 return result;
904 /* A subroutine of lower_try_finally. Duplicate the tree rooted at T.
905 Make sure to record all new labels found. */
907 static gimple_seq
908 lower_try_finally_dup_block (gimple_seq seq, struct leh_state *outer_state,
909 location_t loc)
911 gimple region = NULL;
912 gimple_seq new_seq;
913 gimple_stmt_iterator gsi;
915 new_seq = copy_gimple_seq_and_replace_locals (seq);
917 for (gsi = gsi_start (new_seq); !gsi_end_p (gsi); gsi_next (&gsi))
919 gimple stmt = gsi_stmt (gsi);
920 if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
922 tree block = gimple_block (stmt);
923 gimple_set_location (stmt, loc);
924 gimple_set_block (stmt, block);
928 if (outer_state->tf)
929 region = outer_state->tf->try_finally_expr;
930 collect_finally_tree_1 (new_seq, region);
932 return new_seq;
935 /* A subroutine of lower_try_finally. Create a fallthru label for
936 the given try_finally state. The only tricky bit here is that
937 we have to make sure to record the label in our outer context. */
939 static tree
940 lower_try_finally_fallthru_label (struct leh_tf_state *tf)
942 tree label = tf->fallthru_label;
943 treemple temp;
945 if (!label)
947 label = create_artificial_label (gimple_location (tf->try_finally_expr));
948 tf->fallthru_label = label;
949 if (tf->outer->tf)
951 temp.t = label;
952 record_in_finally_tree (temp, tf->outer->tf->try_finally_expr);
955 return label;
958 /* A subroutine of lower_try_finally. If FINALLY consits of a
959 GIMPLE_EH_ELSE node, return it. */
961 static inline gimple
962 get_eh_else (gimple_seq finally)
964 gimple x = gimple_seq_first_stmt (finally);
965 if (gimple_code (x) == GIMPLE_EH_ELSE)
967 gcc_assert (gimple_seq_singleton_p (finally));
968 return x;
970 return NULL;
973 /* A subroutine of lower_try_finally. If the eh_protect_cleanup_actions
974 langhook returns non-null, then the language requires that the exception
975 path out of a try_finally be treated specially. To wit: the code within
976 the finally block may not itself throw an exception. We have two choices
977 here. First we can duplicate the finally block and wrap it in a
978 must_not_throw region. Second, we can generate code like
980 try {
981 finally_block;
982 } catch {
983 if (fintmp == eh_edge)
984 protect_cleanup_actions;
987 where "fintmp" is the temporary used in the switch statement generation
988 alternative considered below. For the nonce, we always choose the first
989 option.
991 THIS_STATE may be null if this is a try-cleanup, not a try-finally. */
993 static void
994 honor_protect_cleanup_actions (struct leh_state *outer_state,
995 struct leh_state *this_state,
996 struct leh_tf_state *tf)
998 tree protect_cleanup_actions;
999 gimple_stmt_iterator gsi;
1000 bool finally_may_fallthru;
1001 gimple_seq finally;
1002 gimple x, eh_else;
1004 /* First check for nothing to do. */
1005 if (lang_hooks.eh_protect_cleanup_actions == NULL)
1006 return;
1007 protect_cleanup_actions = lang_hooks.eh_protect_cleanup_actions ();
1008 if (protect_cleanup_actions == NULL)
1009 return;
1011 finally = gimple_try_cleanup (tf->top_p);
1012 eh_else = get_eh_else (finally);
1014 /* Duplicate the FINALLY block. Only need to do this for try-finally,
1015 and not for cleanups. If we've got an EH_ELSE, extract it now. */
1016 if (eh_else)
1018 finally = gimple_eh_else_e_body (eh_else);
1019 gimple_try_set_cleanup (tf->top_p, gimple_eh_else_n_body (eh_else));
1021 else if (this_state)
1022 finally = lower_try_finally_dup_block (finally, outer_state,
1023 gimple_location (tf->try_finally_expr));
1024 finally_may_fallthru = gimple_seq_may_fallthru (finally);
1026 /* If this cleanup consists of a TRY_CATCH_EXPR with TRY_CATCH_IS_CLEANUP
1027 set, the handler of the TRY_CATCH_EXPR is another cleanup which ought
1028 to be in an enclosing scope, but needs to be implemented at this level
1029 to avoid a nesting violation (see wrap_temporary_cleanups in
1030 cp/decl.c). Since it's logically at an outer level, we should call
1031 terminate before we get to it, so strip it away before adding the
1032 MUST_NOT_THROW filter. */
1033 gsi = gsi_start (finally);
1034 x = gsi_stmt (gsi);
1035 if (gimple_code (x) == GIMPLE_TRY
1036 && gimple_try_kind (x) == GIMPLE_TRY_CATCH
1037 && gimple_try_catch_is_cleanup (x))
1039 gsi_insert_seq_before (&gsi, gimple_try_eval (x), GSI_SAME_STMT);
1040 gsi_remove (&gsi, false);
1043 /* Wrap the block with protect_cleanup_actions as the action. */
1044 x = gimple_build_eh_must_not_throw (protect_cleanup_actions);
1045 x = gimple_build_try (finally, gimple_seq_alloc_with_stmt (x),
1046 GIMPLE_TRY_CATCH);
1047 finally = lower_eh_must_not_throw (outer_state, x);
1049 /* Drop all of this into the exception sequence. */
1050 emit_post_landing_pad (&eh_seq, tf->region);
1051 gimple_seq_add_seq (&eh_seq, finally);
1052 if (finally_may_fallthru)
1053 emit_resx (&eh_seq, tf->region);
1055 /* Having now been handled, EH isn't to be considered with
1056 the rest of the outgoing edges. */
1057 tf->may_throw = false;
1060 /* A subroutine of lower_try_finally. We have determined that there is
1061 no fallthru edge out of the finally block. This means that there is
1062 no outgoing edge corresponding to any incoming edge. Restructure the
1063 try_finally node for this special case. */
1065 static void
1066 lower_try_finally_nofallthru (struct leh_state *state,
1067 struct leh_tf_state *tf)
1069 tree lab;
1070 gimple x, eh_else;
1071 gimple_seq finally;
1072 struct goto_queue_node *q, *qe;
1074 lab = create_artificial_label (gimple_location (tf->try_finally_expr));
1076 /* We expect that tf->top_p is a GIMPLE_TRY. */
1077 finally = gimple_try_cleanup (tf->top_p);
1078 tf->top_p_seq = gimple_try_eval (tf->top_p);
1080 x = gimple_build_label (lab);
1081 gimple_seq_add_stmt (&tf->top_p_seq, x);
1083 q = tf->goto_queue;
1084 qe = q + tf->goto_queue_active;
1085 for (; q < qe; ++q)
1086 if (q->index < 0)
1087 do_return_redirection (q, lab, NULL);
1088 else
1089 do_goto_redirection (q, lab, NULL, tf);
1091 replace_goto_queue (tf);
1093 /* Emit the finally block into the stream. Lower EH_ELSE at this time. */
1094 eh_else = get_eh_else (finally);
1095 if (eh_else)
1097 finally = gimple_eh_else_n_body (eh_else);
1098 lower_eh_constructs_1 (state, &finally);
1099 gimple_seq_add_seq (&tf->top_p_seq, finally);
1101 if (tf->may_throw)
1103 finally = gimple_eh_else_e_body (eh_else);
1104 lower_eh_constructs_1 (state, &finally);
1106 emit_post_landing_pad (&eh_seq, tf->region);
1107 gimple_seq_add_seq (&eh_seq, finally);
1110 else
1112 lower_eh_constructs_1 (state, &finally);
1113 gimple_seq_add_seq (&tf->top_p_seq, finally);
1115 if (tf->may_throw)
1117 emit_post_landing_pad (&eh_seq, tf->region);
1119 x = gimple_build_goto (lab);
1120 gimple_set_location (x, gimple_location (tf->try_finally_expr));
1121 gimple_seq_add_stmt (&eh_seq, x);
1126 /* A subroutine of lower_try_finally. We have determined that there is
1127 exactly one destination of the finally block. Restructure the
1128 try_finally node for this special case. */
1130 static void
1131 lower_try_finally_onedest (struct leh_state *state, struct leh_tf_state *tf)
1133 struct goto_queue_node *q, *qe;
1134 gimple x;
1135 gimple_seq finally;
1136 gimple_stmt_iterator gsi;
1137 tree finally_label;
1138 location_t loc = gimple_location (tf->try_finally_expr);
1140 finally = gimple_try_cleanup (tf->top_p);
1141 tf->top_p_seq = gimple_try_eval (tf->top_p);
1143 /* Since there's only one destination, and the destination edge can only
1144 either be EH or non-EH, that implies that all of our incoming edges
1145 are of the same type. Therefore we can lower EH_ELSE immediately. */
1146 x = get_eh_else (finally);
1147 if (x)
1149 if (tf->may_throw)
1150 finally = gimple_eh_else_e_body (x);
1151 else
1152 finally = gimple_eh_else_n_body (x);
1155 lower_eh_constructs_1 (state, &finally);
1157 for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1159 gimple stmt = gsi_stmt (gsi);
1160 if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
1162 tree block = gimple_block (stmt);
1163 gimple_set_location (stmt, gimple_location (tf->try_finally_expr));
1164 gimple_set_block (stmt, block);
1168 if (tf->may_throw)
1170 /* Only reachable via the exception edge. Add the given label to
1171 the head of the FINALLY block. Append a RESX at the end. */
1172 emit_post_landing_pad (&eh_seq, tf->region);
1173 gimple_seq_add_seq (&eh_seq, finally);
1174 emit_resx (&eh_seq, tf->region);
1175 return;
1178 if (tf->may_fallthru)
1180 /* Only reachable via the fallthru edge. Do nothing but let
1181 the two blocks run together; we'll fall out the bottom. */
1182 gimple_seq_add_seq (&tf->top_p_seq, finally);
1183 return;
1186 finally_label = create_artificial_label (loc);
1187 x = gimple_build_label (finally_label);
1188 gimple_seq_add_stmt (&tf->top_p_seq, x);
1190 gimple_seq_add_seq (&tf->top_p_seq, finally);
1192 q = tf->goto_queue;
1193 qe = q + tf->goto_queue_active;
1195 if (tf->may_return)
1197 /* Reachable by return expressions only. Redirect them. */
1198 for (; q < qe; ++q)
1199 do_return_redirection (q, finally_label, NULL);
1200 replace_goto_queue (tf);
1202 else
1204 /* Reachable by goto expressions only. Redirect them. */
1205 for (; q < qe; ++q)
1206 do_goto_redirection (q, finally_label, NULL, tf);
1207 replace_goto_queue (tf);
1209 if (tf->dest_array[0] == tf->fallthru_label)
1211 /* Reachable by goto to fallthru label only. Redirect it
1212 to the new label (already created, sadly), and do not
1213 emit the final branch out, or the fallthru label. */
1214 tf->fallthru_label = NULL;
1215 return;
1219 /* Place the original return/goto to the original destination
1220 immediately after the finally block. */
1221 x = tf->goto_queue[0].cont_stmt;
1222 gimple_seq_add_stmt (&tf->top_p_seq, x);
1223 maybe_record_in_goto_queue (state, x);
1226 /* A subroutine of lower_try_finally. There are multiple edges incoming
1227 and outgoing from the finally block. Implement this by duplicating the
1228 finally block for every destination. */
1230 static void
1231 lower_try_finally_copy (struct leh_state *state, struct leh_tf_state *tf)
1233 gimple_seq finally;
1234 gimple_seq new_stmt;
1235 gimple_seq seq;
1236 gimple x, eh_else;
1237 tree tmp;
1238 location_t tf_loc = gimple_location (tf->try_finally_expr);
1240 finally = gimple_try_cleanup (tf->top_p);
1242 /* Notice EH_ELSE, and simplify some of the remaining code
1243 by considering FINALLY to be the normal return path only. */
1244 eh_else = get_eh_else (finally);
1245 if (eh_else)
1246 finally = gimple_eh_else_n_body (eh_else);
1248 tf->top_p_seq = gimple_try_eval (tf->top_p);
1249 new_stmt = NULL;
1251 if (tf->may_fallthru)
1253 seq = lower_try_finally_dup_block (finally, state, tf_loc);
1254 lower_eh_constructs_1 (state, &seq);
1255 gimple_seq_add_seq (&new_stmt, seq);
1257 tmp = lower_try_finally_fallthru_label (tf);
1258 x = gimple_build_goto (tmp);
1259 gimple_set_location (x, tf_loc);
1260 gimple_seq_add_stmt (&new_stmt, x);
1263 if (tf->may_throw)
1265 /* We don't need to copy the EH path of EH_ELSE,
1266 since it is only emitted once. */
1267 if (eh_else)
1268 seq = gimple_eh_else_e_body (eh_else);
1269 else
1270 seq = lower_try_finally_dup_block (finally, state, tf_loc);
1271 lower_eh_constructs_1 (state, &seq);
1273 emit_post_landing_pad (&eh_seq, tf->region);
1274 gimple_seq_add_seq (&eh_seq, seq);
1275 emit_resx (&eh_seq, tf->region);
1278 if (tf->goto_queue)
1280 struct goto_queue_node *q, *qe;
1281 int return_index, index;
1282 struct labels_s
1284 struct goto_queue_node *q;
1285 tree label;
1286 } *labels;
1288 return_index = tf->dest_array.length ();
1289 labels = XCNEWVEC (struct labels_s, return_index + 1);
1291 q = tf->goto_queue;
1292 qe = q + tf->goto_queue_active;
1293 for (; q < qe; q++)
1295 index = q->index < 0 ? return_index : q->index;
1297 if (!labels[index].q)
1298 labels[index].q = q;
1301 for (index = 0; index < return_index + 1; index++)
1303 tree lab;
1305 q = labels[index].q;
1306 if (! q)
1307 continue;
1309 lab = labels[index].label
1310 = create_artificial_label (tf_loc);
1312 if (index == return_index)
1313 do_return_redirection (q, lab, NULL);
1314 else
1315 do_goto_redirection (q, lab, NULL, tf);
1317 x = gimple_build_label (lab);
1318 gimple_seq_add_stmt (&new_stmt, x);
1320 seq = lower_try_finally_dup_block (finally, state, q->location);
1321 lower_eh_constructs_1 (state, &seq);
1322 gimple_seq_add_seq (&new_stmt, seq);
1324 gimple_seq_add_stmt (&new_stmt, q->cont_stmt);
1325 maybe_record_in_goto_queue (state, q->cont_stmt);
1328 for (q = tf->goto_queue; q < qe; q++)
1330 tree lab;
1332 index = q->index < 0 ? return_index : q->index;
1334 if (labels[index].q == q)
1335 continue;
1337 lab = labels[index].label;
1339 if (index == return_index)
1340 do_return_redirection (q, lab, NULL);
1341 else
1342 do_goto_redirection (q, lab, NULL, tf);
1345 replace_goto_queue (tf);
1346 free (labels);
1349 /* Need to link new stmts after running replace_goto_queue due
1350 to not wanting to process the same goto stmts twice. */
1351 gimple_seq_add_seq (&tf->top_p_seq, new_stmt);
1354 /* A subroutine of lower_try_finally. There are multiple edges incoming
1355 and outgoing from the finally block. Implement this by instrumenting
1356 each incoming edge and creating a switch statement at the end of the
1357 finally block that branches to the appropriate destination. */
1359 static void
1360 lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf)
1362 struct goto_queue_node *q, *qe;
1363 tree finally_tmp, finally_label;
1364 int return_index, eh_index, fallthru_index;
1365 int nlabels, ndests, j, last_case_index;
1366 tree last_case;
1367 vec<tree> case_label_vec;
1368 gimple_seq switch_body = NULL;
1369 gimple x, eh_else;
1370 tree tmp;
1371 gimple switch_stmt;
1372 gimple_seq finally;
1373 hash_map<tree, gimple> *cont_map = NULL;
1374 /* The location of the TRY_FINALLY stmt. */
1375 location_t tf_loc = gimple_location (tf->try_finally_expr);
1376 /* The location of the finally block. */
1377 location_t finally_loc;
1379 finally = gimple_try_cleanup (tf->top_p);
1380 eh_else = get_eh_else (finally);
1382 /* Mash the TRY block to the head of the chain. */
1383 tf->top_p_seq = gimple_try_eval (tf->top_p);
1385 /* The location of the finally is either the last stmt in the finally
1386 block or the location of the TRY_FINALLY itself. */
1387 x = gimple_seq_last_stmt (finally);
1388 finally_loc = x ? gimple_location (x) : tf_loc;
1390 /* Prepare for switch statement generation. */
1391 nlabels = tf->dest_array.length ();
1392 return_index = nlabels;
1393 eh_index = return_index + tf->may_return;
1394 fallthru_index = eh_index + (tf->may_throw && !eh_else);
1395 ndests = fallthru_index + tf->may_fallthru;
1397 finally_tmp = create_tmp_var (integer_type_node, "finally_tmp");
1398 finally_label = create_artificial_label (finally_loc);
1400 /* We use vec::quick_push on case_label_vec throughout this function,
1401 since we know the size in advance and allocate precisely as muce
1402 space as needed. */
1403 case_label_vec.create (ndests);
1404 last_case = NULL;
1405 last_case_index = 0;
1407 /* Begin inserting code for getting to the finally block. Things
1408 are done in this order to correspond to the sequence the code is
1409 laid out. */
1411 if (tf->may_fallthru)
1413 x = gimple_build_assign (finally_tmp,
1414 build_int_cst (integer_type_node,
1415 fallthru_index));
1416 gimple_seq_add_stmt (&tf->top_p_seq, x);
1418 tmp = build_int_cst (integer_type_node, fallthru_index);
1419 last_case = build_case_label (tmp, NULL,
1420 create_artificial_label (tf_loc));
1421 case_label_vec.quick_push (last_case);
1422 last_case_index++;
1424 x = gimple_build_label (CASE_LABEL (last_case));
1425 gimple_seq_add_stmt (&switch_body, x);
1427 tmp = lower_try_finally_fallthru_label (tf);
1428 x = gimple_build_goto (tmp);
1429 gimple_set_location (x, tf_loc);
1430 gimple_seq_add_stmt (&switch_body, x);
1433 /* For EH_ELSE, emit the exception path (plus resx) now, then
1434 subsequently we only need consider the normal path. */
1435 if (eh_else)
1437 if (tf->may_throw)
1439 finally = gimple_eh_else_e_body (eh_else);
1440 lower_eh_constructs_1 (state, &finally);
1442 emit_post_landing_pad (&eh_seq, tf->region);
1443 gimple_seq_add_seq (&eh_seq, finally);
1444 emit_resx (&eh_seq, tf->region);
1447 finally = gimple_eh_else_n_body (eh_else);
1449 else if (tf->may_throw)
1451 emit_post_landing_pad (&eh_seq, tf->region);
1453 x = gimple_build_assign (finally_tmp,
1454 build_int_cst (integer_type_node, eh_index));
1455 gimple_seq_add_stmt (&eh_seq, x);
1457 x = gimple_build_goto (finally_label);
1458 gimple_set_location (x, tf_loc);
1459 gimple_seq_add_stmt (&eh_seq, x);
1461 tmp = build_int_cst (integer_type_node, eh_index);
1462 last_case = build_case_label (tmp, NULL,
1463 create_artificial_label (tf_loc));
1464 case_label_vec.quick_push (last_case);
1465 last_case_index++;
1467 x = gimple_build_label (CASE_LABEL (last_case));
1468 gimple_seq_add_stmt (&eh_seq, x);
1469 emit_resx (&eh_seq, tf->region);
1472 x = gimple_build_label (finally_label);
1473 gimple_seq_add_stmt (&tf->top_p_seq, x);
1475 lower_eh_constructs_1 (state, &finally);
1476 gimple_seq_add_seq (&tf->top_p_seq, finally);
1478 /* Redirect each incoming goto edge. */
1479 q = tf->goto_queue;
1480 qe = q + tf->goto_queue_active;
1481 j = last_case_index + tf->may_return;
1482 /* Prepare the assignments to finally_tmp that are executed upon the
1483 entrance through a particular edge. */
1484 for (; q < qe; ++q)
1486 gimple_seq mod = NULL;
1487 int switch_id;
1488 unsigned int case_index;
1490 if (q->index < 0)
1492 x = gimple_build_assign (finally_tmp,
1493 build_int_cst (integer_type_node,
1494 return_index));
1495 gimple_seq_add_stmt (&mod, x);
1496 do_return_redirection (q, finally_label, mod);
1497 switch_id = return_index;
1499 else
1501 x = gimple_build_assign (finally_tmp,
1502 build_int_cst (integer_type_node, q->index));
1503 gimple_seq_add_stmt (&mod, x);
1504 do_goto_redirection (q, finally_label, mod, tf);
1505 switch_id = q->index;
1508 case_index = j + q->index;
1509 if (case_label_vec.length () <= case_index || !case_label_vec[case_index])
1511 tree case_lab;
1512 tmp = build_int_cst (integer_type_node, switch_id);
1513 case_lab = build_case_label (tmp, NULL,
1514 create_artificial_label (tf_loc));
1515 /* We store the cont_stmt in the pointer map, so that we can recover
1516 it in the loop below. */
1517 if (!cont_map)
1518 cont_map = new hash_map<tree, gimple>;
1519 cont_map->put (case_lab, q->cont_stmt);
1520 case_label_vec.quick_push (case_lab);
1523 for (j = last_case_index; j < last_case_index + nlabels; j++)
1525 gimple cont_stmt;
1527 last_case = case_label_vec[j];
1529 gcc_assert (last_case);
1530 gcc_assert (cont_map);
1532 cont_stmt = *cont_map->get (last_case);
1534 x = gimple_build_label (CASE_LABEL (last_case));
1535 gimple_seq_add_stmt (&switch_body, x);
1536 gimple_seq_add_stmt (&switch_body, cont_stmt);
1537 maybe_record_in_goto_queue (state, cont_stmt);
1539 if (cont_map)
1540 delete cont_map;
1542 replace_goto_queue (tf);
1544 /* Make sure that the last case is the default label, as one is required.
1545 Then sort the labels, which is also required in GIMPLE. */
1546 CASE_LOW (last_case) = NULL;
1547 tree tem = case_label_vec.pop ();
1548 gcc_assert (tem == last_case);
1549 sort_case_labels (case_label_vec);
1551 /* Build the switch statement, setting last_case to be the default
1552 label. */
1553 switch_stmt = gimple_build_switch (finally_tmp, last_case,
1554 case_label_vec);
1555 gimple_set_location (switch_stmt, finally_loc);
1557 /* Need to link SWITCH_STMT after running replace_goto_queue
1558 due to not wanting to process the same goto stmts twice. */
1559 gimple_seq_add_stmt (&tf->top_p_seq, switch_stmt);
1560 gimple_seq_add_seq (&tf->top_p_seq, switch_body);
1563 /* Decide whether or not we are going to duplicate the finally block.
1564 There are several considerations.
1566 First, if this is Java, then the finally block contains code
1567 written by the user. It has line numbers associated with it,
1568 so duplicating the block means it's difficult to set a breakpoint.
1569 Since controlling code generation via -g is verboten, we simply
1570 never duplicate code without optimization.
1572 Second, we'd like to prevent egregious code growth. One way to
1573 do this is to estimate the size of the finally block, multiply
1574 that by the number of copies we'd need to make, and compare against
1575 the estimate of the size of the switch machinery we'd have to add. */
1577 static bool
1578 decide_copy_try_finally (int ndests, bool may_throw, gimple_seq finally)
1580 int f_estimate, sw_estimate;
1581 gimple eh_else;
1583 /* If there's an EH_ELSE involved, the exception path is separate
1584 and really doesn't come into play for this computation. */
1585 eh_else = get_eh_else (finally);
1586 if (eh_else)
1588 ndests -= may_throw;
1589 finally = gimple_eh_else_n_body (eh_else);
1592 if (!optimize)
1594 gimple_stmt_iterator gsi;
1596 if (ndests == 1)
1597 return true;
1599 for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1601 gimple stmt = gsi_stmt (gsi);
1602 if (!is_gimple_debug (stmt) && !gimple_clobber_p (stmt))
1603 return false;
1605 return true;
1608 /* Finally estimate N times, plus N gotos. */
1609 f_estimate = count_insns_seq (finally, &eni_size_weights);
1610 f_estimate = (f_estimate + 1) * ndests;
1612 /* Switch statement (cost 10), N variable assignments, N gotos. */
1613 sw_estimate = 10 + 2 * ndests;
1615 /* Optimize for size clearly wants our best guess. */
1616 if (optimize_function_for_size_p (cfun))
1617 return f_estimate < sw_estimate;
1619 /* ??? These numbers are completely made up so far. */
1620 if (optimize > 1)
1621 return f_estimate < 100 || f_estimate < sw_estimate * 2;
1622 else
1623 return f_estimate < 40 || f_estimate * 2 < sw_estimate * 3;
1626 /* REG is the enclosing region for a possible cleanup region, or the region
1627 itself. Returns TRUE if such a region would be unreachable.
1629 Cleanup regions within a must-not-throw region aren't actually reachable
1630 even if there are throwing stmts within them, because the personality
1631 routine will call terminate before unwinding. */
1633 static bool
1634 cleanup_is_dead_in (eh_region reg)
1636 while (reg && reg->type == ERT_CLEANUP)
1637 reg = reg->outer;
1638 return (reg && reg->type == ERT_MUST_NOT_THROW);
1641 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_FINALLY nodes
1642 to a sequence of labels and blocks, plus the exception region trees
1643 that record all the magic. This is complicated by the need to
1644 arrange for the FINALLY block to be executed on all exits. */
1646 static gimple_seq
1647 lower_try_finally (struct leh_state *state, gimple tp)
1649 struct leh_tf_state this_tf;
1650 struct leh_state this_state;
1651 int ndests;
1652 gimple_seq old_eh_seq;
1654 /* Process the try block. */
1656 memset (&this_tf, 0, sizeof (this_tf));
1657 this_tf.try_finally_expr = tp;
1658 this_tf.top_p = tp;
1659 this_tf.outer = state;
1660 if (using_eh_for_cleanups_p () && !cleanup_is_dead_in (state->cur_region))
1662 this_tf.region = gen_eh_region_cleanup (state->cur_region);
1663 this_state.cur_region = this_tf.region;
1665 else
1667 this_tf.region = NULL;
1668 this_state.cur_region = state->cur_region;
1671 this_state.ehp_region = state->ehp_region;
1672 this_state.tf = &this_tf;
1674 old_eh_seq = eh_seq;
1675 eh_seq = NULL;
1677 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1679 /* Determine if the try block is escaped through the bottom. */
1680 this_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1682 /* Determine if any exceptions are possible within the try block. */
1683 if (this_tf.region)
1684 this_tf.may_throw = eh_region_may_contain_throw (this_tf.region);
1685 if (this_tf.may_throw)
1686 honor_protect_cleanup_actions (state, &this_state, &this_tf);
1688 /* Determine how many edges (still) reach the finally block. Or rather,
1689 how many destinations are reached by the finally block. Use this to
1690 determine how we process the finally block itself. */
1692 ndests = this_tf.dest_array.length ();
1693 ndests += this_tf.may_fallthru;
1694 ndests += this_tf.may_return;
1695 ndests += this_tf.may_throw;
1697 /* If the FINALLY block is not reachable, dike it out. */
1698 if (ndests == 0)
1700 gimple_seq_add_seq (&this_tf.top_p_seq, gimple_try_eval (tp));
1701 gimple_try_set_cleanup (tp, NULL);
1703 /* If the finally block doesn't fall through, then any destination
1704 we might try to impose there isn't reached either. There may be
1705 some minor amount of cleanup and redirection still needed. */
1706 else if (!gimple_seq_may_fallthru (gimple_try_cleanup (tp)))
1707 lower_try_finally_nofallthru (state, &this_tf);
1709 /* We can easily special-case redirection to a single destination. */
1710 else if (ndests == 1)
1711 lower_try_finally_onedest (state, &this_tf);
1712 else if (decide_copy_try_finally (ndests, this_tf.may_throw,
1713 gimple_try_cleanup (tp)))
1714 lower_try_finally_copy (state, &this_tf);
1715 else
1716 lower_try_finally_switch (state, &this_tf);
1718 /* If someone requested we add a label at the end of the transformed
1719 block, do so. */
1720 if (this_tf.fallthru_label)
1722 /* This must be reached only if ndests == 0. */
1723 gimple x = gimple_build_label (this_tf.fallthru_label);
1724 gimple_seq_add_stmt (&this_tf.top_p_seq, x);
1727 this_tf.dest_array.release ();
1728 free (this_tf.goto_queue);
1729 if (this_tf.goto_queue_map)
1730 delete this_tf.goto_queue_map;
1732 /* If there was an old (aka outer) eh_seq, append the current eh_seq.
1733 If there was no old eh_seq, then the append is trivially already done. */
1734 if (old_eh_seq)
1736 if (eh_seq == NULL)
1737 eh_seq = old_eh_seq;
1738 else
1740 gimple_seq new_eh_seq = eh_seq;
1741 eh_seq = old_eh_seq;
1742 gimple_seq_add_seq (&eh_seq, new_eh_seq);
1746 return this_tf.top_p_seq;
1749 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_CATCH with a
1750 list of GIMPLE_CATCH to a sequence of labels and blocks, plus the
1751 exception region trees that records all the magic. */
1753 static gimple_seq
1754 lower_catch (struct leh_state *state, gimple tp)
1756 eh_region try_region = NULL;
1757 struct leh_state this_state = *state;
1758 gimple_stmt_iterator gsi;
1759 tree out_label;
1760 gimple_seq new_seq, cleanup;
1761 gimple x;
1762 location_t try_catch_loc = gimple_location (tp);
1764 if (flag_exceptions)
1766 try_region = gen_eh_region_try (state->cur_region);
1767 this_state.cur_region = try_region;
1770 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1772 if (!eh_region_may_contain_throw (try_region))
1773 return gimple_try_eval (tp);
1775 new_seq = NULL;
1776 emit_eh_dispatch (&new_seq, try_region);
1777 emit_resx (&new_seq, try_region);
1779 this_state.cur_region = state->cur_region;
1780 this_state.ehp_region = try_region;
1782 out_label = NULL;
1783 cleanup = gimple_try_cleanup (tp);
1784 for (gsi = gsi_start (cleanup);
1785 !gsi_end_p (gsi);
1786 gsi_next (&gsi))
1788 eh_catch c;
1789 gimple gcatch;
1790 gimple_seq handler;
1792 gcatch = gsi_stmt (gsi);
1793 c = gen_eh_region_catch (try_region, gimple_catch_types (gcatch));
1795 handler = gimple_catch_handler (gcatch);
1796 lower_eh_constructs_1 (&this_state, &handler);
1798 c->label = create_artificial_label (UNKNOWN_LOCATION);
1799 x = gimple_build_label (c->label);
1800 gimple_seq_add_stmt (&new_seq, x);
1802 gimple_seq_add_seq (&new_seq, handler);
1804 if (gimple_seq_may_fallthru (new_seq))
1806 if (!out_label)
1807 out_label = create_artificial_label (try_catch_loc);
1809 x = gimple_build_goto (out_label);
1810 gimple_seq_add_stmt (&new_seq, x);
1812 if (!c->type_list)
1813 break;
1816 gimple_try_set_cleanup (tp, new_seq);
1818 return frob_into_branch_around (tp, try_region, out_label);
1821 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with a
1822 GIMPLE_EH_FILTER to a sequence of labels and blocks, plus the exception
1823 region trees that record all the magic. */
1825 static gimple_seq
1826 lower_eh_filter (struct leh_state *state, gimple tp)
1828 struct leh_state this_state = *state;
1829 eh_region this_region = NULL;
1830 gimple inner, x;
1831 gimple_seq new_seq;
1833 inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1835 if (flag_exceptions)
1837 this_region = gen_eh_region_allowed (state->cur_region,
1838 gimple_eh_filter_types (inner));
1839 this_state.cur_region = this_region;
1842 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1844 if (!eh_region_may_contain_throw (this_region))
1845 return gimple_try_eval (tp);
1847 new_seq = NULL;
1848 this_state.cur_region = state->cur_region;
1849 this_state.ehp_region = this_region;
1851 emit_eh_dispatch (&new_seq, this_region);
1852 emit_resx (&new_seq, this_region);
1854 this_region->u.allowed.label = create_artificial_label (UNKNOWN_LOCATION);
1855 x = gimple_build_label (this_region->u.allowed.label);
1856 gimple_seq_add_stmt (&new_seq, x);
1858 lower_eh_constructs_1 (&this_state, gimple_eh_filter_failure_ptr (inner));
1859 gimple_seq_add_seq (&new_seq, gimple_eh_filter_failure (inner));
1861 gimple_try_set_cleanup (tp, new_seq);
1863 return frob_into_branch_around (tp, this_region, NULL);
1866 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with
1867 an GIMPLE_EH_MUST_NOT_THROW to a sequence of labels and blocks,
1868 plus the exception region trees that record all the magic. */
1870 static gimple_seq
1871 lower_eh_must_not_throw (struct leh_state *state, gimple tp)
1873 struct leh_state this_state = *state;
1875 if (flag_exceptions)
1877 gimple inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1878 eh_region this_region;
1880 this_region = gen_eh_region_must_not_throw (state->cur_region);
1881 this_region->u.must_not_throw.failure_decl
1882 = gimple_eh_must_not_throw_fndecl (inner);
1883 this_region->u.must_not_throw.failure_loc
1884 = LOCATION_LOCUS (gimple_location (tp));
1886 /* In order to get mangling applied to this decl, we must mark it
1887 used now. Otherwise, pass_ipa_free_lang_data won't think it
1888 needs to happen. */
1889 TREE_USED (this_region->u.must_not_throw.failure_decl) = 1;
1891 this_state.cur_region = this_region;
1894 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1896 return gimple_try_eval (tp);
1899 /* Implement a cleanup expression. This is similar to try-finally,
1900 except that we only execute the cleanup block for exception edges. */
1902 static gimple_seq
1903 lower_cleanup (struct leh_state *state, gimple tp)
1905 struct leh_state this_state = *state;
1906 eh_region this_region = NULL;
1907 struct leh_tf_state fake_tf;
1908 gimple_seq result;
1909 bool cleanup_dead = cleanup_is_dead_in (state->cur_region);
1911 if (flag_exceptions && !cleanup_dead)
1913 this_region = gen_eh_region_cleanup (state->cur_region);
1914 this_state.cur_region = this_region;
1917 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1919 if (cleanup_dead || !eh_region_may_contain_throw (this_region))
1920 return gimple_try_eval (tp);
1922 /* Build enough of a try-finally state so that we can reuse
1923 honor_protect_cleanup_actions. */
1924 memset (&fake_tf, 0, sizeof (fake_tf));
1925 fake_tf.top_p = fake_tf.try_finally_expr = tp;
1926 fake_tf.outer = state;
1927 fake_tf.region = this_region;
1928 fake_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1929 fake_tf.may_throw = true;
1931 honor_protect_cleanup_actions (state, NULL, &fake_tf);
1933 if (fake_tf.may_throw)
1935 /* In this case honor_protect_cleanup_actions had nothing to do,
1936 and we should process this normally. */
1937 lower_eh_constructs_1 (state, gimple_try_cleanup_ptr (tp));
1938 result = frob_into_branch_around (tp, this_region,
1939 fake_tf.fallthru_label);
1941 else
1943 /* In this case honor_protect_cleanup_actions did nearly all of
1944 the work. All we have left is to append the fallthru_label. */
1946 result = gimple_try_eval (tp);
1947 if (fake_tf.fallthru_label)
1949 gimple x = gimple_build_label (fake_tf.fallthru_label);
1950 gimple_seq_add_stmt (&result, x);
1953 return result;
1956 /* Main loop for lowering eh constructs. Also moves gsi to the next
1957 statement. */
1959 static void
1960 lower_eh_constructs_2 (struct leh_state *state, gimple_stmt_iterator *gsi)
1962 gimple_seq replace;
1963 gimple x;
1964 gimple stmt = gsi_stmt (*gsi);
1966 switch (gimple_code (stmt))
1968 case GIMPLE_CALL:
1970 tree fndecl = gimple_call_fndecl (stmt);
1971 tree rhs, lhs;
1973 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
1974 switch (DECL_FUNCTION_CODE (fndecl))
1976 case BUILT_IN_EH_POINTER:
1977 /* The front end may have generated a call to
1978 __builtin_eh_pointer (0) within a catch region. Replace
1979 this zero argument with the current catch region number. */
1980 if (state->ehp_region)
1982 tree nr = build_int_cst (integer_type_node,
1983 state->ehp_region->index);
1984 gimple_call_set_arg (stmt, 0, nr);
1986 else
1988 /* The user has dome something silly. Remove it. */
1989 rhs = null_pointer_node;
1990 goto do_replace;
1992 break;
1994 case BUILT_IN_EH_FILTER:
1995 /* ??? This should never appear, but since it's a builtin it
1996 is accessible to abuse by users. Just remove it and
1997 replace the use with the arbitrary value zero. */
1998 rhs = build_int_cst (TREE_TYPE (TREE_TYPE (fndecl)), 0);
1999 do_replace:
2000 lhs = gimple_call_lhs (stmt);
2001 x = gimple_build_assign (lhs, rhs);
2002 gsi_insert_before (gsi, x, GSI_SAME_STMT);
2003 /* FALLTHRU */
2005 case BUILT_IN_EH_COPY_VALUES:
2006 /* Likewise this should not appear. Remove it. */
2007 gsi_remove (gsi, true);
2008 return;
2010 default:
2011 break;
2014 /* FALLTHRU */
2016 case GIMPLE_ASSIGN:
2017 /* If the stmt can throw use a new temporary for the assignment
2018 to a LHS. This makes sure the old value of the LHS is
2019 available on the EH edge. Only do so for statements that
2020 potentially fall through (no noreturn calls e.g.), otherwise
2021 this new assignment might create fake fallthru regions. */
2022 if (stmt_could_throw_p (stmt)
2023 && gimple_has_lhs (stmt)
2024 && gimple_stmt_may_fallthru (stmt)
2025 && !tree_could_throw_p (gimple_get_lhs (stmt))
2026 && is_gimple_reg_type (TREE_TYPE (gimple_get_lhs (stmt))))
2028 tree lhs = gimple_get_lhs (stmt);
2029 tree tmp = create_tmp_var (TREE_TYPE (lhs), NULL);
2030 gimple s = gimple_build_assign (lhs, tmp);
2031 gimple_set_location (s, gimple_location (stmt));
2032 gimple_set_block (s, gimple_block (stmt));
2033 gimple_set_lhs (stmt, tmp);
2034 if (TREE_CODE (TREE_TYPE (tmp)) == COMPLEX_TYPE
2035 || TREE_CODE (TREE_TYPE (tmp)) == VECTOR_TYPE)
2036 DECL_GIMPLE_REG_P (tmp) = 1;
2037 gsi_insert_after (gsi, s, GSI_SAME_STMT);
2039 /* Look for things that can throw exceptions, and record them. */
2040 if (state->cur_region && stmt_could_throw_p (stmt))
2042 record_stmt_eh_region (state->cur_region, stmt);
2043 note_eh_region_may_contain_throw (state->cur_region);
2045 break;
2047 case GIMPLE_COND:
2048 case GIMPLE_GOTO:
2049 case GIMPLE_RETURN:
2050 maybe_record_in_goto_queue (state, stmt);
2051 break;
2053 case GIMPLE_SWITCH:
2054 verify_norecord_switch_expr (state, stmt);
2055 break;
2057 case GIMPLE_TRY:
2058 if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY)
2059 replace = lower_try_finally (state, stmt);
2060 else
2062 x = gimple_seq_first_stmt (gimple_try_cleanup (stmt));
2063 if (!x)
2065 replace = gimple_try_eval (stmt);
2066 lower_eh_constructs_1 (state, &replace);
2068 else
2069 switch (gimple_code (x))
2071 case GIMPLE_CATCH:
2072 replace = lower_catch (state, stmt);
2073 break;
2074 case GIMPLE_EH_FILTER:
2075 replace = lower_eh_filter (state, stmt);
2076 break;
2077 case GIMPLE_EH_MUST_NOT_THROW:
2078 replace = lower_eh_must_not_throw (state, stmt);
2079 break;
2080 case GIMPLE_EH_ELSE:
2081 /* This code is only valid with GIMPLE_TRY_FINALLY. */
2082 gcc_unreachable ();
2083 default:
2084 replace = lower_cleanup (state, stmt);
2085 break;
2089 /* Remove the old stmt and insert the transformed sequence
2090 instead. */
2091 gsi_insert_seq_before (gsi, replace, GSI_SAME_STMT);
2092 gsi_remove (gsi, true);
2094 /* Return since we don't want gsi_next () */
2095 return;
2097 case GIMPLE_EH_ELSE:
2098 /* We should be eliminating this in lower_try_finally et al. */
2099 gcc_unreachable ();
2101 default:
2102 /* A type, a decl, or some kind of statement that we're not
2103 interested in. Don't walk them. */
2104 break;
2107 gsi_next (gsi);
2110 /* A helper to unwrap a gimple_seq and feed stmts to lower_eh_constructs_2. */
2112 static void
2113 lower_eh_constructs_1 (struct leh_state *state, gimple_seq *pseq)
2115 gimple_stmt_iterator gsi;
2116 for (gsi = gsi_start (*pseq); !gsi_end_p (gsi);)
2117 lower_eh_constructs_2 (state, &gsi);
2120 namespace {
2122 const pass_data pass_data_lower_eh =
2124 GIMPLE_PASS, /* type */
2125 "eh", /* name */
2126 OPTGROUP_NONE, /* optinfo_flags */
2127 TV_TREE_EH, /* tv_id */
2128 PROP_gimple_lcf, /* properties_required */
2129 PROP_gimple_leh, /* properties_provided */
2130 0, /* properties_destroyed */
2131 0, /* todo_flags_start */
2132 0, /* todo_flags_finish */
2135 class pass_lower_eh : public gimple_opt_pass
2137 public:
2138 pass_lower_eh (gcc::context *ctxt)
2139 : gimple_opt_pass (pass_data_lower_eh, ctxt)
2142 /* opt_pass methods: */
2143 virtual unsigned int execute (function *);
2145 }; // class pass_lower_eh
2147 unsigned int
2148 pass_lower_eh::execute (function *fun)
2150 struct leh_state null_state;
2151 gimple_seq bodyp;
2153 bodyp = gimple_body (current_function_decl);
2154 if (bodyp == NULL)
2155 return 0;
2157 finally_tree = new hash_table<finally_tree_hasher> (31);
2158 eh_region_may_contain_throw_map = BITMAP_ALLOC (NULL);
2159 memset (&null_state, 0, sizeof (null_state));
2161 collect_finally_tree_1 (bodyp, NULL);
2162 lower_eh_constructs_1 (&null_state, &bodyp);
2163 gimple_set_body (current_function_decl, bodyp);
2165 /* We assume there's a return statement, or something, at the end of
2166 the function, and thus ploping the EH sequence afterward won't
2167 change anything. */
2168 gcc_assert (!gimple_seq_may_fallthru (bodyp));
2169 gimple_seq_add_seq (&bodyp, eh_seq);
2171 /* We assume that since BODYP already existed, adding EH_SEQ to it
2172 didn't change its value, and we don't have to re-set the function. */
2173 gcc_assert (bodyp == gimple_body (current_function_decl));
2175 delete finally_tree;
2176 finally_tree = NULL;
2177 BITMAP_FREE (eh_region_may_contain_throw_map);
2178 eh_seq = NULL;
2180 /* If this function needs a language specific EH personality routine
2181 and the frontend didn't already set one do so now. */
2182 if (function_needs_eh_personality (fun) == eh_personality_lang
2183 && !DECL_FUNCTION_PERSONALITY (current_function_decl))
2184 DECL_FUNCTION_PERSONALITY (current_function_decl)
2185 = lang_hooks.eh_personality ();
2187 return 0;
2190 } // anon namespace
2192 gimple_opt_pass *
2193 make_pass_lower_eh (gcc::context *ctxt)
2195 return new pass_lower_eh (ctxt);
2198 /* Create the multiple edges from an EH_DISPATCH statement to all of
2199 the possible handlers for its EH region. Return true if there's
2200 no fallthru edge; false if there is. */
2202 bool
2203 make_eh_dispatch_edges (gimple stmt)
2205 eh_region r;
2206 eh_catch c;
2207 basic_block src, dst;
2209 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2210 src = gimple_bb (stmt);
2212 switch (r->type)
2214 case ERT_TRY:
2215 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2217 dst = label_to_block (c->label);
2218 make_edge (src, dst, 0);
2220 /* A catch-all handler doesn't have a fallthru. */
2221 if (c->type_list == NULL)
2222 return false;
2224 break;
2226 case ERT_ALLOWED_EXCEPTIONS:
2227 dst = label_to_block (r->u.allowed.label);
2228 make_edge (src, dst, 0);
2229 break;
2231 default:
2232 gcc_unreachable ();
2235 return true;
2238 /* Create the single EH edge from STMT to its nearest landing pad,
2239 if there is such a landing pad within the current function. */
2241 void
2242 make_eh_edges (gimple stmt)
2244 basic_block src, dst;
2245 eh_landing_pad lp;
2246 int lp_nr;
2248 lp_nr = lookup_stmt_eh_lp (stmt);
2249 if (lp_nr <= 0)
2250 return;
2252 lp = get_eh_landing_pad_from_number (lp_nr);
2253 gcc_assert (lp != NULL);
2255 src = gimple_bb (stmt);
2256 dst = label_to_block (lp->post_landing_pad);
2257 make_edge (src, dst, EDGE_EH);
2260 /* Do the work in redirecting EDGE_IN to NEW_BB within the EH region tree;
2261 do not actually perform the final edge redirection.
2263 CHANGE_REGION is true when we're being called from cleanup_empty_eh and
2264 we intend to change the destination EH region as well; this means
2265 EH_LANDING_PAD_NR must already be set on the destination block label.
2266 If false, we're being called from generic cfg manipulation code and we
2267 should preserve our place within the region tree. */
2269 static void
2270 redirect_eh_edge_1 (edge edge_in, basic_block new_bb, bool change_region)
2272 eh_landing_pad old_lp, new_lp;
2273 basic_block old_bb;
2274 gimple throw_stmt;
2275 int old_lp_nr, new_lp_nr;
2276 tree old_label, new_label;
2277 edge_iterator ei;
2278 edge e;
2280 old_bb = edge_in->dest;
2281 old_label = gimple_block_label (old_bb);
2282 old_lp_nr = EH_LANDING_PAD_NR (old_label);
2283 gcc_assert (old_lp_nr > 0);
2284 old_lp = get_eh_landing_pad_from_number (old_lp_nr);
2286 throw_stmt = last_stmt (edge_in->src);
2287 gcc_assert (lookup_stmt_eh_lp (throw_stmt) == old_lp_nr);
2289 new_label = gimple_block_label (new_bb);
2291 /* Look for an existing region that might be using NEW_BB already. */
2292 new_lp_nr = EH_LANDING_PAD_NR (new_label);
2293 if (new_lp_nr)
2295 new_lp = get_eh_landing_pad_from_number (new_lp_nr);
2296 gcc_assert (new_lp);
2298 /* Unless CHANGE_REGION is true, the new and old landing pad
2299 had better be associated with the same EH region. */
2300 gcc_assert (change_region || new_lp->region == old_lp->region);
2302 else
2304 new_lp = NULL;
2305 gcc_assert (!change_region);
2308 /* Notice when we redirect the last EH edge away from OLD_BB. */
2309 FOR_EACH_EDGE (e, ei, old_bb->preds)
2310 if (e != edge_in && (e->flags & EDGE_EH))
2311 break;
2313 if (new_lp)
2315 /* NEW_LP already exists. If there are still edges into OLD_LP,
2316 there's nothing to do with the EH tree. If there are no more
2317 edges into OLD_LP, then we want to remove OLD_LP as it is unused.
2318 If CHANGE_REGION is true, then our caller is expecting to remove
2319 the landing pad. */
2320 if (e == NULL && !change_region)
2321 remove_eh_landing_pad (old_lp);
2323 else
2325 /* No correct landing pad exists. If there are no more edges
2326 into OLD_LP, then we can simply re-use the existing landing pad.
2327 Otherwise, we have to create a new landing pad. */
2328 if (e == NULL)
2330 EH_LANDING_PAD_NR (old_lp->post_landing_pad) = 0;
2331 new_lp = old_lp;
2333 else
2334 new_lp = gen_eh_landing_pad (old_lp->region);
2335 new_lp->post_landing_pad = new_label;
2336 EH_LANDING_PAD_NR (new_label) = new_lp->index;
2339 /* Maybe move the throwing statement to the new region. */
2340 if (old_lp != new_lp)
2342 remove_stmt_from_eh_lp (throw_stmt);
2343 add_stmt_to_eh_lp (throw_stmt, new_lp->index);
2347 /* Redirect EH edge E to NEW_BB. */
2349 edge
2350 redirect_eh_edge (edge edge_in, basic_block new_bb)
2352 redirect_eh_edge_1 (edge_in, new_bb, false);
2353 return ssa_redirect_edge (edge_in, new_bb);
2356 /* This is a subroutine of gimple_redirect_edge_and_branch. Update the
2357 labels for redirecting a non-fallthru EH_DISPATCH edge E to NEW_BB.
2358 The actual edge update will happen in the caller. */
2360 void
2361 redirect_eh_dispatch_edge (gimple stmt, edge e, basic_block new_bb)
2363 tree new_lab = gimple_block_label (new_bb);
2364 bool any_changed = false;
2365 basic_block old_bb;
2366 eh_region r;
2367 eh_catch c;
2369 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2370 switch (r->type)
2372 case ERT_TRY:
2373 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2375 old_bb = label_to_block (c->label);
2376 if (old_bb == e->dest)
2378 c->label = new_lab;
2379 any_changed = true;
2382 break;
2384 case ERT_ALLOWED_EXCEPTIONS:
2385 old_bb = label_to_block (r->u.allowed.label);
2386 gcc_assert (old_bb == e->dest);
2387 r->u.allowed.label = new_lab;
2388 any_changed = true;
2389 break;
2391 default:
2392 gcc_unreachable ();
2395 gcc_assert (any_changed);
2398 /* Helper function for operation_could_trap_p and stmt_could_throw_p. */
2400 bool
2401 operation_could_trap_helper_p (enum tree_code op,
2402 bool fp_operation,
2403 bool honor_trapv,
2404 bool honor_nans,
2405 bool honor_snans,
2406 tree divisor,
2407 bool *handled)
2409 *handled = true;
2410 switch (op)
2412 case TRUNC_DIV_EXPR:
2413 case CEIL_DIV_EXPR:
2414 case FLOOR_DIV_EXPR:
2415 case ROUND_DIV_EXPR:
2416 case EXACT_DIV_EXPR:
2417 case CEIL_MOD_EXPR:
2418 case FLOOR_MOD_EXPR:
2419 case ROUND_MOD_EXPR:
2420 case TRUNC_MOD_EXPR:
2421 case RDIV_EXPR:
2422 if (honor_snans || honor_trapv)
2423 return true;
2424 if (fp_operation)
2425 return flag_trapping_math;
2426 if (!TREE_CONSTANT (divisor) || integer_zerop (divisor))
2427 return true;
2428 return false;
2430 case LT_EXPR:
2431 case LE_EXPR:
2432 case GT_EXPR:
2433 case GE_EXPR:
2434 case LTGT_EXPR:
2435 /* Some floating point comparisons may trap. */
2436 return honor_nans;
2438 case EQ_EXPR:
2439 case NE_EXPR:
2440 case UNORDERED_EXPR:
2441 case ORDERED_EXPR:
2442 case UNLT_EXPR:
2443 case UNLE_EXPR:
2444 case UNGT_EXPR:
2445 case UNGE_EXPR:
2446 case UNEQ_EXPR:
2447 return honor_snans;
2449 case CONVERT_EXPR:
2450 case FIX_TRUNC_EXPR:
2451 /* Conversion of floating point might trap. */
2452 return honor_nans;
2454 case NEGATE_EXPR:
2455 case ABS_EXPR:
2456 case CONJ_EXPR:
2457 /* These operations don't trap with floating point. */
2458 if (honor_trapv)
2459 return true;
2460 return false;
2462 case PLUS_EXPR:
2463 case MINUS_EXPR:
2464 case MULT_EXPR:
2465 /* Any floating arithmetic may trap. */
2466 if (fp_operation && flag_trapping_math)
2467 return true;
2468 if (honor_trapv)
2469 return true;
2470 return false;
2472 case COMPLEX_EXPR:
2473 case CONSTRUCTOR:
2474 /* Constructing an object cannot trap. */
2475 return false;
2477 default:
2478 /* Any floating arithmetic may trap. */
2479 if (fp_operation && flag_trapping_math)
2480 return true;
2482 *handled = false;
2483 return false;
2487 /* Return true if operation OP may trap. FP_OPERATION is true if OP is applied
2488 on floating-point values. HONOR_TRAPV is true if OP is applied on integer
2489 type operands that may trap. If OP is a division operator, DIVISOR contains
2490 the value of the divisor. */
2492 bool
2493 operation_could_trap_p (enum tree_code op, bool fp_operation, bool honor_trapv,
2494 tree divisor)
2496 bool honor_nans = (fp_operation && flag_trapping_math
2497 && !flag_finite_math_only);
2498 bool honor_snans = fp_operation && flag_signaling_nans != 0;
2499 bool handled;
2501 if (TREE_CODE_CLASS (op) != tcc_comparison
2502 && TREE_CODE_CLASS (op) != tcc_unary
2503 && TREE_CODE_CLASS (op) != tcc_binary)
2504 return false;
2506 return operation_could_trap_helper_p (op, fp_operation, honor_trapv,
2507 honor_nans, honor_snans, divisor,
2508 &handled);
2512 /* Returns true if it is possible to prove that the index of
2513 an array access REF (an ARRAY_REF expression) falls into the
2514 array bounds. */
2516 static bool
2517 in_array_bounds_p (tree ref)
2519 tree idx = TREE_OPERAND (ref, 1);
2520 tree min, max;
2522 if (TREE_CODE (idx) != INTEGER_CST)
2523 return false;
2525 min = array_ref_low_bound (ref);
2526 max = array_ref_up_bound (ref);
2527 if (!min
2528 || !max
2529 || TREE_CODE (min) != INTEGER_CST
2530 || TREE_CODE (max) != INTEGER_CST)
2531 return false;
2533 if (tree_int_cst_lt (idx, min)
2534 || tree_int_cst_lt (max, idx))
2535 return false;
2537 return true;
2540 /* Returns true if it is possible to prove that the range of
2541 an array access REF (an ARRAY_RANGE_REF expression) falls
2542 into the array bounds. */
2544 static bool
2545 range_in_array_bounds_p (tree ref)
2547 tree domain_type = TYPE_DOMAIN (TREE_TYPE (ref));
2548 tree range_min, range_max, min, max;
2550 range_min = TYPE_MIN_VALUE (domain_type);
2551 range_max = TYPE_MAX_VALUE (domain_type);
2552 if (!range_min
2553 || !range_max
2554 || TREE_CODE (range_min) != INTEGER_CST
2555 || TREE_CODE (range_max) != INTEGER_CST)
2556 return false;
2558 min = array_ref_low_bound (ref);
2559 max = array_ref_up_bound (ref);
2560 if (!min
2561 || !max
2562 || TREE_CODE (min) != INTEGER_CST
2563 || TREE_CODE (max) != INTEGER_CST)
2564 return false;
2566 if (tree_int_cst_lt (range_min, min)
2567 || tree_int_cst_lt (max, range_max))
2568 return false;
2570 return true;
2573 /* Return true if EXPR can trap, as in dereferencing an invalid pointer
2574 location or floating point arithmetic. C.f. the rtl version, may_trap_p.
2575 This routine expects only GIMPLE lhs or rhs input. */
2577 bool
2578 tree_could_trap_p (tree expr)
2580 enum tree_code code;
2581 bool fp_operation = false;
2582 bool honor_trapv = false;
2583 tree t, base, div = NULL_TREE;
2585 if (!expr)
2586 return false;
2588 code = TREE_CODE (expr);
2589 t = TREE_TYPE (expr);
2591 if (t)
2593 if (COMPARISON_CLASS_P (expr))
2594 fp_operation = FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 0)));
2595 else
2596 fp_operation = FLOAT_TYPE_P (t);
2597 honor_trapv = INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t);
2600 if (TREE_CODE_CLASS (code) == tcc_binary)
2601 div = TREE_OPERAND (expr, 1);
2602 if (operation_could_trap_p (code, fp_operation, honor_trapv, div))
2603 return true;
2605 restart:
2606 switch (code)
2608 case COMPONENT_REF:
2609 case REALPART_EXPR:
2610 case IMAGPART_EXPR:
2611 case BIT_FIELD_REF:
2612 case VIEW_CONVERT_EXPR:
2613 case WITH_SIZE_EXPR:
2614 expr = TREE_OPERAND (expr, 0);
2615 code = TREE_CODE (expr);
2616 goto restart;
2618 case ARRAY_RANGE_REF:
2619 base = TREE_OPERAND (expr, 0);
2620 if (tree_could_trap_p (base))
2621 return true;
2622 if (TREE_THIS_NOTRAP (expr))
2623 return false;
2624 return !range_in_array_bounds_p (expr);
2626 case ARRAY_REF:
2627 base = TREE_OPERAND (expr, 0);
2628 if (tree_could_trap_p (base))
2629 return true;
2630 if (TREE_THIS_NOTRAP (expr))
2631 return false;
2632 return !in_array_bounds_p (expr);
2634 case TARGET_MEM_REF:
2635 case MEM_REF:
2636 if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR
2637 && tree_could_trap_p (TREE_OPERAND (TREE_OPERAND (expr, 0), 0)))
2638 return true;
2639 if (TREE_THIS_NOTRAP (expr))
2640 return false;
2641 /* We cannot prove that the access is in-bounds when we have
2642 variable-index TARGET_MEM_REFs. */
2643 if (code == TARGET_MEM_REF
2644 && (TMR_INDEX (expr) || TMR_INDEX2 (expr)))
2645 return true;
2646 if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR)
2648 tree base = TREE_OPERAND (TREE_OPERAND (expr, 0), 0);
2649 offset_int off = mem_ref_offset (expr);
2650 if (wi::neg_p (off, SIGNED))
2651 return true;
2652 if (TREE_CODE (base) == STRING_CST)
2653 return wi::leu_p (TREE_STRING_LENGTH (base), off);
2654 else if (DECL_SIZE_UNIT (base) == NULL_TREE
2655 || TREE_CODE (DECL_SIZE_UNIT (base)) != INTEGER_CST
2656 || wi::leu_p (wi::to_offset (DECL_SIZE_UNIT (base)), off))
2657 return true;
2658 /* Now we are sure the first byte of the access is inside
2659 the object. */
2660 return false;
2662 return true;
2664 case INDIRECT_REF:
2665 return !TREE_THIS_NOTRAP (expr);
2667 case ASM_EXPR:
2668 return TREE_THIS_VOLATILE (expr);
2670 case CALL_EXPR:
2671 t = get_callee_fndecl (expr);
2672 /* Assume that calls to weak functions may trap. */
2673 if (!t || !DECL_P (t))
2674 return true;
2675 if (DECL_WEAK (t))
2676 return tree_could_trap_p (t);
2677 return false;
2679 case FUNCTION_DECL:
2680 /* Assume that accesses to weak functions may trap, unless we know
2681 they are certainly defined in current TU or in some other
2682 LTO partition. */
2683 if (DECL_WEAK (expr) && !DECL_COMDAT (expr))
2685 struct cgraph_node *node;
2686 if (!DECL_EXTERNAL (expr))
2687 return false;
2688 node = cgraph_node::get (expr)->function_symbol ();
2689 if (node && node->in_other_partition)
2690 return false;
2691 return true;
2693 return false;
2695 case VAR_DECL:
2696 /* Assume that accesses to weak vars may trap, unless we know
2697 they are certainly defined in current TU or in some other
2698 LTO partition. */
2699 if (DECL_WEAK (expr) && !DECL_COMDAT (expr))
2701 varpool_node *node;
2702 if (!DECL_EXTERNAL (expr))
2703 return false;
2704 node = varpool_node::get (expr)->ultimate_alias_target ();
2705 if (node && node->in_other_partition)
2706 return false;
2707 return true;
2709 return false;
2711 default:
2712 return false;
2717 /* Helper for stmt_could_throw_p. Return true if STMT (assumed to be a
2718 an assignment or a conditional) may throw. */
2720 static bool
2721 stmt_could_throw_1_p (gimple stmt)
2723 enum tree_code code = gimple_expr_code (stmt);
2724 bool honor_nans = false;
2725 bool honor_snans = false;
2726 bool fp_operation = false;
2727 bool honor_trapv = false;
2728 tree t;
2729 size_t i;
2730 bool handled, ret;
2732 if (TREE_CODE_CLASS (code) == tcc_comparison
2733 || TREE_CODE_CLASS (code) == tcc_unary
2734 || TREE_CODE_CLASS (code) == tcc_binary)
2736 if (is_gimple_assign (stmt)
2737 && TREE_CODE_CLASS (code) == tcc_comparison)
2738 t = TREE_TYPE (gimple_assign_rhs1 (stmt));
2739 else if (gimple_code (stmt) == GIMPLE_COND)
2740 t = TREE_TYPE (gimple_cond_lhs (stmt));
2741 else
2742 t = gimple_expr_type (stmt);
2743 fp_operation = FLOAT_TYPE_P (t);
2744 if (fp_operation)
2746 honor_nans = flag_trapping_math && !flag_finite_math_only;
2747 honor_snans = flag_signaling_nans != 0;
2749 else if (INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t))
2750 honor_trapv = true;
2753 /* Check if the main expression may trap. */
2754 t = is_gimple_assign (stmt) ? gimple_assign_rhs2 (stmt) : NULL;
2755 ret = operation_could_trap_helper_p (code, fp_operation, honor_trapv,
2756 honor_nans, honor_snans, t,
2757 &handled);
2758 if (handled)
2759 return ret;
2761 /* If the expression does not trap, see if any of the individual operands may
2762 trap. */
2763 for (i = 0; i < gimple_num_ops (stmt); i++)
2764 if (tree_could_trap_p (gimple_op (stmt, i)))
2765 return true;
2767 return false;
2771 /* Return true if statement STMT could throw an exception. */
2773 bool
2774 stmt_could_throw_p (gimple stmt)
2776 if (!flag_exceptions)
2777 return false;
2779 /* The only statements that can throw an exception are assignments,
2780 conditionals, calls, resx, and asms. */
2781 switch (gimple_code (stmt))
2783 case GIMPLE_RESX:
2784 return true;
2786 case GIMPLE_CALL:
2787 return !gimple_call_nothrow_p (stmt);
2789 case GIMPLE_ASSIGN:
2790 case GIMPLE_COND:
2791 if (!cfun->can_throw_non_call_exceptions)
2792 return false;
2793 return stmt_could_throw_1_p (stmt);
2795 case GIMPLE_ASM:
2796 if (!cfun->can_throw_non_call_exceptions)
2797 return false;
2798 return gimple_asm_volatile_p (stmt);
2800 default:
2801 return false;
2806 /* Return true if expression T could throw an exception. */
2808 bool
2809 tree_could_throw_p (tree t)
2811 if (!flag_exceptions)
2812 return false;
2813 if (TREE_CODE (t) == MODIFY_EXPR)
2815 if (cfun->can_throw_non_call_exceptions
2816 && tree_could_trap_p (TREE_OPERAND (t, 0)))
2817 return true;
2818 t = TREE_OPERAND (t, 1);
2821 if (TREE_CODE (t) == WITH_SIZE_EXPR)
2822 t = TREE_OPERAND (t, 0);
2823 if (TREE_CODE (t) == CALL_EXPR)
2824 return (call_expr_flags (t) & ECF_NOTHROW) == 0;
2825 if (cfun->can_throw_non_call_exceptions)
2826 return tree_could_trap_p (t);
2827 return false;
2830 /* Return true if STMT can throw an exception that is not caught within
2831 the current function (CFUN). */
2833 bool
2834 stmt_can_throw_external (gimple stmt)
2836 int lp_nr;
2838 if (!stmt_could_throw_p (stmt))
2839 return false;
2841 lp_nr = lookup_stmt_eh_lp (stmt);
2842 return lp_nr == 0;
2845 /* Return true if STMT can throw an exception that is caught within
2846 the current function (CFUN). */
2848 bool
2849 stmt_can_throw_internal (gimple stmt)
2851 int lp_nr;
2853 if (!stmt_could_throw_p (stmt))
2854 return false;
2856 lp_nr = lookup_stmt_eh_lp (stmt);
2857 return lp_nr > 0;
2860 /* Given a statement STMT in IFUN, if STMT can no longer throw, then
2861 remove any entry it might have from the EH table. Return true if
2862 any change was made. */
2864 bool
2865 maybe_clean_eh_stmt_fn (struct function *ifun, gimple stmt)
2867 if (stmt_could_throw_p (stmt))
2868 return false;
2869 return remove_stmt_from_eh_lp_fn (ifun, stmt);
2872 /* Likewise, but always use the current function. */
2874 bool
2875 maybe_clean_eh_stmt (gimple stmt)
2877 return maybe_clean_eh_stmt_fn (cfun, stmt);
2880 /* Given a statement OLD_STMT and a new statement NEW_STMT that has replaced
2881 OLD_STMT in the function, remove OLD_STMT from the EH table and put NEW_STMT
2882 in the table if it should be in there. Return TRUE if a replacement was
2883 done that my require an EH edge purge. */
2885 bool
2886 maybe_clean_or_replace_eh_stmt (gimple old_stmt, gimple new_stmt)
2888 int lp_nr = lookup_stmt_eh_lp (old_stmt);
2890 if (lp_nr != 0)
2892 bool new_stmt_could_throw = stmt_could_throw_p (new_stmt);
2894 if (new_stmt == old_stmt && new_stmt_could_throw)
2895 return false;
2897 remove_stmt_from_eh_lp (old_stmt);
2898 if (new_stmt_could_throw)
2900 add_stmt_to_eh_lp (new_stmt, lp_nr);
2901 return false;
2903 else
2904 return true;
2907 return false;
2910 /* Given a statement OLD_STMT in OLD_FUN and a duplicate statement NEW_STMT
2911 in NEW_FUN, copy the EH table data from OLD_STMT to NEW_STMT. The MAP
2912 operand is the return value of duplicate_eh_regions. */
2914 bool
2915 maybe_duplicate_eh_stmt_fn (struct function *new_fun, gimple new_stmt,
2916 struct function *old_fun, gimple old_stmt,
2917 hash_map<void *, void *> *map,
2918 int default_lp_nr)
2920 int old_lp_nr, new_lp_nr;
2922 if (!stmt_could_throw_p (new_stmt))
2923 return false;
2925 old_lp_nr = lookup_stmt_eh_lp_fn (old_fun, old_stmt);
2926 if (old_lp_nr == 0)
2928 if (default_lp_nr == 0)
2929 return false;
2930 new_lp_nr = default_lp_nr;
2932 else if (old_lp_nr > 0)
2934 eh_landing_pad old_lp, new_lp;
2936 old_lp = (*old_fun->eh->lp_array)[old_lp_nr];
2937 new_lp = static_cast<eh_landing_pad> (*map->get (old_lp));
2938 new_lp_nr = new_lp->index;
2940 else
2942 eh_region old_r, new_r;
2944 old_r = (*old_fun->eh->region_array)[-old_lp_nr];
2945 new_r = static_cast<eh_region> (*map->get (old_r));
2946 new_lp_nr = -new_r->index;
2949 add_stmt_to_eh_lp_fn (new_fun, new_stmt, new_lp_nr);
2950 return true;
2953 /* Similar, but both OLD_STMT and NEW_STMT are within the current function,
2954 and thus no remapping is required. */
2956 bool
2957 maybe_duplicate_eh_stmt (gimple new_stmt, gimple old_stmt)
2959 int lp_nr;
2961 if (!stmt_could_throw_p (new_stmt))
2962 return false;
2964 lp_nr = lookup_stmt_eh_lp (old_stmt);
2965 if (lp_nr == 0)
2966 return false;
2968 add_stmt_to_eh_lp (new_stmt, lp_nr);
2969 return true;
2972 /* Returns TRUE if oneh and twoh are exception handlers (gimple_try_cleanup of
2973 GIMPLE_TRY) that are similar enough to be considered the same. Currently
2974 this only handles handlers consisting of a single call, as that's the
2975 important case for C++: a destructor call for a particular object showing
2976 up in multiple handlers. */
2978 static bool
2979 same_handler_p (gimple_seq oneh, gimple_seq twoh)
2981 gimple_stmt_iterator gsi;
2982 gimple ones, twos;
2983 unsigned int ai;
2985 gsi = gsi_start (oneh);
2986 if (!gsi_one_before_end_p (gsi))
2987 return false;
2988 ones = gsi_stmt (gsi);
2990 gsi = gsi_start (twoh);
2991 if (!gsi_one_before_end_p (gsi))
2992 return false;
2993 twos = gsi_stmt (gsi);
2995 if (!is_gimple_call (ones)
2996 || !is_gimple_call (twos)
2997 || gimple_call_lhs (ones)
2998 || gimple_call_lhs (twos)
2999 || gimple_call_chain (ones)
3000 || gimple_call_chain (twos)
3001 || !gimple_call_same_target_p (ones, twos)
3002 || gimple_call_num_args (ones) != gimple_call_num_args (twos))
3003 return false;
3005 for (ai = 0; ai < gimple_call_num_args (ones); ++ai)
3006 if (!operand_equal_p (gimple_call_arg (ones, ai),
3007 gimple_call_arg (twos, ai), 0))
3008 return false;
3010 return true;
3013 /* Optimize
3014 try { A() } finally { try { ~B() } catch { ~A() } }
3015 try { ... } finally { ~A() }
3016 into
3017 try { A() } catch { ~B() }
3018 try { ~B() ... } finally { ~A() }
3020 This occurs frequently in C++, where A is a local variable and B is a
3021 temporary used in the initializer for A. */
3023 static void
3024 optimize_double_finally (gimple one, gimple two)
3026 gimple oneh;
3027 gimple_stmt_iterator gsi;
3028 gimple_seq cleanup;
3030 cleanup = gimple_try_cleanup (one);
3031 gsi = gsi_start (cleanup);
3032 if (!gsi_one_before_end_p (gsi))
3033 return;
3035 oneh = gsi_stmt (gsi);
3036 if (gimple_code (oneh) != GIMPLE_TRY
3037 || gimple_try_kind (oneh) != GIMPLE_TRY_CATCH)
3038 return;
3040 if (same_handler_p (gimple_try_cleanup (oneh), gimple_try_cleanup (two)))
3042 gimple_seq seq = gimple_try_eval (oneh);
3044 gimple_try_set_cleanup (one, seq);
3045 gimple_try_set_kind (one, GIMPLE_TRY_CATCH);
3046 seq = copy_gimple_seq_and_replace_locals (seq);
3047 gimple_seq_add_seq (&seq, gimple_try_eval (two));
3048 gimple_try_set_eval (two, seq);
3052 /* Perform EH refactoring optimizations that are simpler to do when code
3053 flow has been lowered but EH structures haven't. */
3055 static void
3056 refactor_eh_r (gimple_seq seq)
3058 gimple_stmt_iterator gsi;
3059 gimple one, two;
3061 one = NULL;
3062 two = NULL;
3063 gsi = gsi_start (seq);
3064 while (1)
3066 one = two;
3067 if (gsi_end_p (gsi))
3068 two = NULL;
3069 else
3070 two = gsi_stmt (gsi);
3071 if (one
3072 && two
3073 && gimple_code (one) == GIMPLE_TRY
3074 && gimple_code (two) == GIMPLE_TRY
3075 && gimple_try_kind (one) == GIMPLE_TRY_FINALLY
3076 && gimple_try_kind (two) == GIMPLE_TRY_FINALLY)
3077 optimize_double_finally (one, two);
3078 if (one)
3079 switch (gimple_code (one))
3081 case GIMPLE_TRY:
3082 refactor_eh_r (gimple_try_eval (one));
3083 refactor_eh_r (gimple_try_cleanup (one));
3084 break;
3085 case GIMPLE_CATCH:
3086 refactor_eh_r (gimple_catch_handler (one));
3087 break;
3088 case GIMPLE_EH_FILTER:
3089 refactor_eh_r (gimple_eh_filter_failure (one));
3090 break;
3091 case GIMPLE_EH_ELSE:
3092 refactor_eh_r (gimple_eh_else_n_body (one));
3093 refactor_eh_r (gimple_eh_else_e_body (one));
3094 break;
3095 default:
3096 break;
3098 if (two)
3099 gsi_next (&gsi);
3100 else
3101 break;
3105 namespace {
3107 const pass_data pass_data_refactor_eh =
3109 GIMPLE_PASS, /* type */
3110 "ehopt", /* name */
3111 OPTGROUP_NONE, /* optinfo_flags */
3112 TV_TREE_EH, /* tv_id */
3113 PROP_gimple_lcf, /* properties_required */
3114 0, /* properties_provided */
3115 0, /* properties_destroyed */
3116 0, /* todo_flags_start */
3117 0, /* todo_flags_finish */
3120 class pass_refactor_eh : public gimple_opt_pass
3122 public:
3123 pass_refactor_eh (gcc::context *ctxt)
3124 : gimple_opt_pass (pass_data_refactor_eh, ctxt)
3127 /* opt_pass methods: */
3128 virtual bool gate (function *) { return flag_exceptions != 0; }
3129 virtual unsigned int execute (function *)
3131 refactor_eh_r (gimple_body (current_function_decl));
3132 return 0;
3135 }; // class pass_refactor_eh
3137 } // anon namespace
3139 gimple_opt_pass *
3140 make_pass_refactor_eh (gcc::context *ctxt)
3142 return new pass_refactor_eh (ctxt);
3145 /* At the end of gimple optimization, we can lower RESX. */
3147 static bool
3148 lower_resx (basic_block bb, gimple stmt, hash_map<eh_region, tree> *mnt_map)
3150 int lp_nr;
3151 eh_region src_r, dst_r;
3152 gimple_stmt_iterator gsi;
3153 gimple x;
3154 tree fn, src_nr;
3155 bool ret = false;
3157 lp_nr = lookup_stmt_eh_lp (stmt);
3158 if (lp_nr != 0)
3159 dst_r = get_eh_region_from_lp_number (lp_nr);
3160 else
3161 dst_r = NULL;
3163 src_r = get_eh_region_from_number (gimple_resx_region (stmt));
3164 gsi = gsi_last_bb (bb);
3166 if (src_r == NULL)
3168 /* We can wind up with no source region when pass_cleanup_eh shows
3169 that there are no entries into an eh region and deletes it, but
3170 then the block that contains the resx isn't removed. This can
3171 happen without optimization when the switch statement created by
3172 lower_try_finally_switch isn't simplified to remove the eh case.
3174 Resolve this by expanding the resx node to an abort. */
3176 fn = builtin_decl_implicit (BUILT_IN_TRAP);
3177 x = gimple_build_call (fn, 0);
3178 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3180 while (EDGE_COUNT (bb->succs) > 0)
3181 remove_edge (EDGE_SUCC (bb, 0));
3183 else if (dst_r)
3185 /* When we have a destination region, we resolve this by copying
3186 the excptr and filter values into place, and changing the edge
3187 to immediately after the landing pad. */
3188 edge e;
3190 if (lp_nr < 0)
3192 basic_block new_bb;
3193 tree lab;
3195 /* We are resuming into a MUST_NOT_CALL region. Expand a call to
3196 the failure decl into a new block, if needed. */
3197 gcc_assert (dst_r->type == ERT_MUST_NOT_THROW);
3199 tree *slot = mnt_map->get (dst_r);
3200 if (slot == NULL)
3202 gimple_stmt_iterator gsi2;
3204 new_bb = create_empty_bb (bb);
3205 add_bb_to_loop (new_bb, bb->loop_father);
3206 lab = gimple_block_label (new_bb);
3207 gsi2 = gsi_start_bb (new_bb);
3209 fn = dst_r->u.must_not_throw.failure_decl;
3210 x = gimple_build_call (fn, 0);
3211 gimple_set_location (x, dst_r->u.must_not_throw.failure_loc);
3212 gsi_insert_after (&gsi2, x, GSI_CONTINUE_LINKING);
3214 mnt_map->put (dst_r, lab);
3216 else
3218 lab = *slot;
3219 new_bb = label_to_block (lab);
3222 gcc_assert (EDGE_COUNT (bb->succs) == 0);
3223 e = make_edge (bb, new_bb, EDGE_FALLTHRU);
3224 e->count = bb->count;
3225 e->probability = REG_BR_PROB_BASE;
3227 else
3229 edge_iterator ei;
3230 tree dst_nr = build_int_cst (integer_type_node, dst_r->index);
3232 fn = builtin_decl_implicit (BUILT_IN_EH_COPY_VALUES);
3233 src_nr = build_int_cst (integer_type_node, src_r->index);
3234 x = gimple_build_call (fn, 2, dst_nr, src_nr);
3235 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3237 /* Update the flags for the outgoing edge. */
3238 e = single_succ_edge (bb);
3239 gcc_assert (e->flags & EDGE_EH);
3240 e->flags = (e->flags & ~EDGE_EH) | EDGE_FALLTHRU;
3242 /* If there are no more EH users of the landing pad, delete it. */
3243 FOR_EACH_EDGE (e, ei, e->dest->preds)
3244 if (e->flags & EDGE_EH)
3245 break;
3246 if (e == NULL)
3248 eh_landing_pad lp = get_eh_landing_pad_from_number (lp_nr);
3249 remove_eh_landing_pad (lp);
3253 ret = true;
3255 else
3257 tree var;
3259 /* When we don't have a destination region, this exception escapes
3260 up the call chain. We resolve this by generating a call to the
3261 _Unwind_Resume library function. */
3263 /* The ARM EABI redefines _Unwind_Resume as __cxa_end_cleanup
3264 with no arguments for C++ and Java. Check for that. */
3265 if (src_r->use_cxa_end_cleanup)
3267 fn = builtin_decl_implicit (BUILT_IN_CXA_END_CLEANUP);
3268 x = gimple_build_call (fn, 0);
3269 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3271 else
3273 fn = builtin_decl_implicit (BUILT_IN_EH_POINTER);
3274 src_nr = build_int_cst (integer_type_node, src_r->index);
3275 x = gimple_build_call (fn, 1, src_nr);
3276 var = create_tmp_var (ptr_type_node, NULL);
3277 var = make_ssa_name (var, x);
3278 gimple_call_set_lhs (x, var);
3279 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3281 fn = builtin_decl_implicit (BUILT_IN_UNWIND_RESUME);
3282 x = gimple_build_call (fn, 1, var);
3283 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3286 gcc_assert (EDGE_COUNT (bb->succs) == 0);
3289 gsi_remove (&gsi, true);
3291 return ret;
3294 namespace {
3296 const pass_data pass_data_lower_resx =
3298 GIMPLE_PASS, /* type */
3299 "resx", /* name */
3300 OPTGROUP_NONE, /* optinfo_flags */
3301 TV_TREE_EH, /* tv_id */
3302 PROP_gimple_lcf, /* properties_required */
3303 0, /* properties_provided */
3304 0, /* properties_destroyed */
3305 0, /* todo_flags_start */
3306 0, /* todo_flags_finish */
3309 class pass_lower_resx : public gimple_opt_pass
3311 public:
3312 pass_lower_resx (gcc::context *ctxt)
3313 : gimple_opt_pass (pass_data_lower_resx, ctxt)
3316 /* opt_pass methods: */
3317 virtual bool gate (function *) { return flag_exceptions != 0; }
3318 virtual unsigned int execute (function *);
3320 }; // class pass_lower_resx
3322 unsigned
3323 pass_lower_resx::execute (function *fun)
3325 basic_block bb;
3326 bool dominance_invalidated = false;
3327 bool any_rewritten = false;
3329 hash_map<eh_region, tree> mnt_map;
3331 FOR_EACH_BB_FN (bb, fun)
3333 gimple last = last_stmt (bb);
3334 if (last && is_gimple_resx (last))
3336 dominance_invalidated |= lower_resx (bb, last, &mnt_map);
3337 any_rewritten = true;
3341 if (dominance_invalidated)
3343 free_dominance_info (CDI_DOMINATORS);
3344 free_dominance_info (CDI_POST_DOMINATORS);
3347 return any_rewritten ? TODO_update_ssa_only_virtuals : 0;
3350 } // anon namespace
3352 gimple_opt_pass *
3353 make_pass_lower_resx (gcc::context *ctxt)
3355 return new pass_lower_resx (ctxt);
3358 /* Try to optimize var = {v} {CLOBBER} stmts followed just by
3359 external throw. */
3361 static void
3362 optimize_clobbers (basic_block bb)
3364 gimple_stmt_iterator gsi = gsi_last_bb (bb);
3365 bool any_clobbers = false;
3366 bool seen_stack_restore = false;
3367 edge_iterator ei;
3368 edge e;
3370 /* Only optimize anything if the bb contains at least one clobber,
3371 ends with resx (checked by caller), optionally contains some
3372 debug stmts or labels, or at most one __builtin_stack_restore
3373 call, and has an incoming EH edge. */
3374 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3376 gimple stmt = gsi_stmt (gsi);
3377 if (is_gimple_debug (stmt))
3378 continue;
3379 if (gimple_clobber_p (stmt))
3381 any_clobbers = true;
3382 continue;
3384 if (!seen_stack_restore
3385 && gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
3387 seen_stack_restore = true;
3388 continue;
3390 if (gimple_code (stmt) == GIMPLE_LABEL)
3391 break;
3392 return;
3394 if (!any_clobbers)
3395 return;
3396 FOR_EACH_EDGE (e, ei, bb->preds)
3397 if (e->flags & EDGE_EH)
3398 break;
3399 if (e == NULL)
3400 return;
3401 gsi = gsi_last_bb (bb);
3402 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3404 gimple stmt = gsi_stmt (gsi);
3405 if (!gimple_clobber_p (stmt))
3406 continue;
3407 unlink_stmt_vdef (stmt);
3408 gsi_remove (&gsi, true);
3409 release_defs (stmt);
3413 /* Try to sink var = {v} {CLOBBER} stmts followed just by
3414 internal throw to successor BB. */
3416 static int
3417 sink_clobbers (basic_block bb)
3419 edge e;
3420 edge_iterator ei;
3421 gimple_stmt_iterator gsi, dgsi;
3422 basic_block succbb;
3423 bool any_clobbers = false;
3424 unsigned todo = 0;
3426 /* Only optimize if BB has a single EH successor and
3427 all predecessor edges are EH too. */
3428 if (!single_succ_p (bb)
3429 || (single_succ_edge (bb)->flags & EDGE_EH) == 0)
3430 return 0;
3432 FOR_EACH_EDGE (e, ei, bb->preds)
3434 if ((e->flags & EDGE_EH) == 0)
3435 return 0;
3438 /* And BB contains only CLOBBER stmts before the final
3439 RESX. */
3440 gsi = gsi_last_bb (bb);
3441 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3443 gimple stmt = gsi_stmt (gsi);
3444 if (is_gimple_debug (stmt))
3445 continue;
3446 if (gimple_code (stmt) == GIMPLE_LABEL)
3447 break;
3448 if (!gimple_clobber_p (stmt))
3449 return 0;
3450 any_clobbers = true;
3452 if (!any_clobbers)
3453 return 0;
3455 edge succe = single_succ_edge (bb);
3456 succbb = succe->dest;
3458 /* See if there is a virtual PHI node to take an updated virtual
3459 operand from. */
3460 gimple vphi = NULL;
3461 tree vuse = NULL_TREE;
3462 for (gsi = gsi_start_phis (succbb); !gsi_end_p (gsi); gsi_next (&gsi))
3464 tree res = gimple_phi_result (gsi_stmt (gsi));
3465 if (virtual_operand_p (res))
3467 vphi = gsi_stmt (gsi);
3468 vuse = res;
3469 break;
3473 dgsi = gsi_after_labels (succbb);
3474 gsi = gsi_last_bb (bb);
3475 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3477 gimple stmt = gsi_stmt (gsi);
3478 tree lhs;
3479 if (is_gimple_debug (stmt))
3480 continue;
3481 if (gimple_code (stmt) == GIMPLE_LABEL)
3482 break;
3483 lhs = gimple_assign_lhs (stmt);
3484 /* Unfortunately we don't have dominance info updated at this
3485 point, so checking if
3486 dominated_by_p (CDI_DOMINATORS, succbb,
3487 gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (lhs, 0)))
3488 would be too costly. Thus, avoid sinking any clobbers that
3489 refer to non-(D) SSA_NAMEs. */
3490 if (TREE_CODE (lhs) == MEM_REF
3491 && TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME
3492 && !SSA_NAME_IS_DEFAULT_DEF (TREE_OPERAND (lhs, 0)))
3494 unlink_stmt_vdef (stmt);
3495 gsi_remove (&gsi, true);
3496 release_defs (stmt);
3497 continue;
3500 /* As we do not change stmt order when sinking across a
3501 forwarder edge we can keep virtual operands in place. */
3502 gsi_remove (&gsi, false);
3503 gsi_insert_before (&dgsi, stmt, GSI_NEW_STMT);
3505 /* But adjust virtual operands if we sunk across a PHI node. */
3506 if (vuse)
3508 gimple use_stmt;
3509 imm_use_iterator iter;
3510 use_operand_p use_p;
3511 FOR_EACH_IMM_USE_STMT (use_stmt, iter, vuse)
3512 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3513 SET_USE (use_p, gimple_vdef (stmt));
3514 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse))
3516 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_vdef (stmt)) = 1;
3517 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse) = 0;
3519 /* Adjust the incoming virtual operand. */
3520 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (vphi, succe), gimple_vuse (stmt));
3521 SET_USE (gimple_vuse_op (stmt), vuse);
3523 /* If there isn't a single predecessor but no virtual PHI node
3524 arrange for virtual operands to be renamed. */
3525 else if (gimple_vuse_op (stmt) != NULL_USE_OPERAND_P
3526 && !single_pred_p (succbb))
3528 /* In this case there will be no use of the VDEF of this stmt.
3529 ??? Unless this is a secondary opportunity and we have not
3530 removed unreachable blocks yet, so we cannot assert this.
3531 Which also means we will end up renaming too many times. */
3532 SET_USE (gimple_vuse_op (stmt), gimple_vop (cfun));
3533 mark_virtual_operands_for_renaming (cfun);
3534 todo |= TODO_update_ssa_only_virtuals;
3538 return todo;
3541 /* At the end of inlining, we can lower EH_DISPATCH. Return true when
3542 we have found some duplicate labels and removed some edges. */
3544 static bool
3545 lower_eh_dispatch (basic_block src, gimple stmt)
3547 gimple_stmt_iterator gsi;
3548 int region_nr;
3549 eh_region r;
3550 tree filter, fn;
3551 gimple x;
3552 bool redirected = false;
3554 region_nr = gimple_eh_dispatch_region (stmt);
3555 r = get_eh_region_from_number (region_nr);
3557 gsi = gsi_last_bb (src);
3559 switch (r->type)
3561 case ERT_TRY:
3563 auto_vec<tree> labels;
3564 tree default_label = NULL;
3565 eh_catch c;
3566 edge_iterator ei;
3567 edge e;
3568 hash_set<tree> seen_values;
3570 /* Collect the labels for a switch. Zero the post_landing_pad
3571 field becase we'll no longer have anything keeping these labels
3572 in existence and the optimizer will be free to merge these
3573 blocks at will. */
3574 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
3576 tree tp_node, flt_node, lab = c->label;
3577 bool have_label = false;
3579 c->label = NULL;
3580 tp_node = c->type_list;
3581 flt_node = c->filter_list;
3583 if (tp_node == NULL)
3585 default_label = lab;
3586 break;
3590 /* Filter out duplicate labels that arise when this handler
3591 is shadowed by an earlier one. When no labels are
3592 attached to the handler anymore, we remove
3593 the corresponding edge and then we delete unreachable
3594 blocks at the end of this pass. */
3595 if (! seen_values.contains (TREE_VALUE (flt_node)))
3597 tree t = build_case_label (TREE_VALUE (flt_node),
3598 NULL, lab);
3599 labels.safe_push (t);
3600 seen_values.add (TREE_VALUE (flt_node));
3601 have_label = true;
3604 tp_node = TREE_CHAIN (tp_node);
3605 flt_node = TREE_CHAIN (flt_node);
3607 while (tp_node);
3608 if (! have_label)
3610 remove_edge (find_edge (src, label_to_block (lab)));
3611 redirected = true;
3615 /* Clean up the edge flags. */
3616 FOR_EACH_EDGE (e, ei, src->succs)
3618 if (e->flags & EDGE_FALLTHRU)
3620 /* If there was no catch-all, use the fallthru edge. */
3621 if (default_label == NULL)
3622 default_label = gimple_block_label (e->dest);
3623 e->flags &= ~EDGE_FALLTHRU;
3626 gcc_assert (default_label != NULL);
3628 /* Don't generate a switch if there's only a default case.
3629 This is common in the form of try { A; } catch (...) { B; }. */
3630 if (!labels.exists ())
3632 e = single_succ_edge (src);
3633 e->flags |= EDGE_FALLTHRU;
3635 else
3637 fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3638 x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3639 region_nr));
3640 filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)), NULL);
3641 filter = make_ssa_name (filter, x);
3642 gimple_call_set_lhs (x, filter);
3643 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3645 /* Turn the default label into a default case. */
3646 default_label = build_case_label (NULL, NULL, default_label);
3647 sort_case_labels (labels);
3649 x = gimple_build_switch (filter, default_label, labels);
3650 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3653 break;
3655 case ERT_ALLOWED_EXCEPTIONS:
3657 edge b_e = BRANCH_EDGE (src);
3658 edge f_e = FALLTHRU_EDGE (src);
3660 fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3661 x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3662 region_nr));
3663 filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)), NULL);
3664 filter = make_ssa_name (filter, x);
3665 gimple_call_set_lhs (x, filter);
3666 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3668 r->u.allowed.label = NULL;
3669 x = gimple_build_cond (EQ_EXPR, filter,
3670 build_int_cst (TREE_TYPE (filter),
3671 r->u.allowed.filter),
3672 NULL_TREE, NULL_TREE);
3673 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3675 b_e->flags = b_e->flags | EDGE_TRUE_VALUE;
3676 f_e->flags = (f_e->flags & ~EDGE_FALLTHRU) | EDGE_FALSE_VALUE;
3678 break;
3680 default:
3681 gcc_unreachable ();
3684 /* Replace the EH_DISPATCH with the SWITCH or COND generated above. */
3685 gsi_remove (&gsi, true);
3686 return redirected;
3689 namespace {
3691 const pass_data pass_data_lower_eh_dispatch =
3693 GIMPLE_PASS, /* type */
3694 "ehdisp", /* name */
3695 OPTGROUP_NONE, /* optinfo_flags */
3696 TV_TREE_EH, /* tv_id */
3697 PROP_gimple_lcf, /* properties_required */
3698 0, /* properties_provided */
3699 0, /* properties_destroyed */
3700 0, /* todo_flags_start */
3701 0, /* todo_flags_finish */
3704 class pass_lower_eh_dispatch : public gimple_opt_pass
3706 public:
3707 pass_lower_eh_dispatch (gcc::context *ctxt)
3708 : gimple_opt_pass (pass_data_lower_eh_dispatch, ctxt)
3711 /* opt_pass methods: */
3712 virtual bool gate (function *fun) { return fun->eh->region_tree != NULL; }
3713 virtual unsigned int execute (function *);
3715 }; // class pass_lower_eh_dispatch
3717 unsigned
3718 pass_lower_eh_dispatch::execute (function *fun)
3720 basic_block bb;
3721 int flags = 0;
3722 bool redirected = false;
3724 assign_filter_values ();
3726 FOR_EACH_BB_FN (bb, fun)
3728 gimple last = last_stmt (bb);
3729 if (last == NULL)
3730 continue;
3731 if (gimple_code (last) == GIMPLE_EH_DISPATCH)
3733 redirected |= lower_eh_dispatch (bb, last);
3734 flags |= TODO_update_ssa_only_virtuals;
3736 else if (gimple_code (last) == GIMPLE_RESX)
3738 if (stmt_can_throw_external (last))
3739 optimize_clobbers (bb);
3740 else
3741 flags |= sink_clobbers (bb);
3745 if (redirected)
3746 delete_unreachable_blocks ();
3747 return flags;
3750 } // anon namespace
3752 gimple_opt_pass *
3753 make_pass_lower_eh_dispatch (gcc::context *ctxt)
3755 return new pass_lower_eh_dispatch (ctxt);
3758 /* Walk statements, see what regions and, optionally, landing pads
3759 are really referenced.
3761 Returns in R_REACHABLEP an sbitmap with bits set for reachable regions,
3762 and in LP_REACHABLE an sbitmap with bits set for reachable landing pads.
3764 Passing NULL for LP_REACHABLE is valid, in this case only reachable
3765 regions are marked.
3767 The caller is responsible for freeing the returned sbitmaps. */
3769 static void
3770 mark_reachable_handlers (sbitmap *r_reachablep, sbitmap *lp_reachablep)
3772 sbitmap r_reachable, lp_reachable;
3773 basic_block bb;
3774 bool mark_landing_pads = (lp_reachablep != NULL);
3775 gcc_checking_assert (r_reachablep != NULL);
3777 r_reachable = sbitmap_alloc (cfun->eh->region_array->length ());
3778 bitmap_clear (r_reachable);
3779 *r_reachablep = r_reachable;
3781 if (mark_landing_pads)
3783 lp_reachable = sbitmap_alloc (cfun->eh->lp_array->length ());
3784 bitmap_clear (lp_reachable);
3785 *lp_reachablep = lp_reachable;
3787 else
3788 lp_reachable = NULL;
3790 FOR_EACH_BB_FN (bb, cfun)
3792 gimple_stmt_iterator gsi;
3794 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3796 gimple stmt = gsi_stmt (gsi);
3798 if (mark_landing_pads)
3800 int lp_nr = lookup_stmt_eh_lp (stmt);
3802 /* Negative LP numbers are MUST_NOT_THROW regions which
3803 are not considered BB enders. */
3804 if (lp_nr < 0)
3805 bitmap_set_bit (r_reachable, -lp_nr);
3807 /* Positive LP numbers are real landing pads, and BB enders. */
3808 else if (lp_nr > 0)
3810 gcc_assert (gsi_one_before_end_p (gsi));
3811 eh_region region = get_eh_region_from_lp_number (lp_nr);
3812 bitmap_set_bit (r_reachable, region->index);
3813 bitmap_set_bit (lp_reachable, lp_nr);
3817 /* Avoid removing regions referenced from RESX/EH_DISPATCH. */
3818 switch (gimple_code (stmt))
3820 case GIMPLE_RESX:
3821 bitmap_set_bit (r_reachable, gimple_resx_region (stmt));
3822 break;
3823 case GIMPLE_EH_DISPATCH:
3824 bitmap_set_bit (r_reachable, gimple_eh_dispatch_region (stmt));
3825 break;
3826 default:
3827 break;
3833 /* Remove unreachable handlers and unreachable landing pads. */
3835 static void
3836 remove_unreachable_handlers (void)
3838 sbitmap r_reachable, lp_reachable;
3839 eh_region region;
3840 eh_landing_pad lp;
3841 unsigned i;
3843 mark_reachable_handlers (&r_reachable, &lp_reachable);
3845 if (dump_file)
3847 fprintf (dump_file, "Before removal of unreachable regions:\n");
3848 dump_eh_tree (dump_file, cfun);
3849 fprintf (dump_file, "Reachable regions: ");
3850 dump_bitmap_file (dump_file, r_reachable);
3851 fprintf (dump_file, "Reachable landing pads: ");
3852 dump_bitmap_file (dump_file, lp_reachable);
3855 if (dump_file)
3857 FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
3858 if (region && !bitmap_bit_p (r_reachable, region->index))
3859 fprintf (dump_file,
3860 "Removing unreachable region %d\n",
3861 region->index);
3864 remove_unreachable_eh_regions (r_reachable);
3866 FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
3867 if (lp && !bitmap_bit_p (lp_reachable, lp->index))
3869 if (dump_file)
3870 fprintf (dump_file,
3871 "Removing unreachable landing pad %d\n",
3872 lp->index);
3873 remove_eh_landing_pad (lp);
3876 if (dump_file)
3878 fprintf (dump_file, "\n\nAfter removal of unreachable regions:\n");
3879 dump_eh_tree (dump_file, cfun);
3880 fprintf (dump_file, "\n\n");
3883 sbitmap_free (r_reachable);
3884 sbitmap_free (lp_reachable);
3886 #ifdef ENABLE_CHECKING
3887 verify_eh_tree (cfun);
3888 #endif
3891 /* Remove unreachable handlers if any landing pads have been removed after
3892 last ehcleanup pass (due to gimple_purge_dead_eh_edges). */
3894 void
3895 maybe_remove_unreachable_handlers (void)
3897 eh_landing_pad lp;
3898 unsigned i;
3900 if (cfun->eh == NULL)
3901 return;
3903 FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
3904 if (lp && lp->post_landing_pad)
3906 if (label_to_block (lp->post_landing_pad) == NULL)
3908 remove_unreachable_handlers ();
3909 return;
3914 /* Remove regions that do not have landing pads. This assumes
3915 that remove_unreachable_handlers has already been run, and
3916 that we've just manipulated the landing pads since then.
3918 Preserve regions with landing pads and regions that prevent
3919 exceptions from propagating further, even if these regions
3920 are not reachable. */
3922 static void
3923 remove_unreachable_handlers_no_lp (void)
3925 eh_region region;
3926 sbitmap r_reachable;
3927 unsigned i;
3929 mark_reachable_handlers (&r_reachable, /*lp_reachablep=*/NULL);
3931 FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
3933 if (! region)
3934 continue;
3936 if (region->landing_pads != NULL
3937 || region->type == ERT_MUST_NOT_THROW)
3938 bitmap_set_bit (r_reachable, region->index);
3940 if (dump_file
3941 && !bitmap_bit_p (r_reachable, region->index))
3942 fprintf (dump_file,
3943 "Removing unreachable region %d\n",
3944 region->index);
3947 remove_unreachable_eh_regions (r_reachable);
3949 sbitmap_free (r_reachable);
3952 /* Undo critical edge splitting on an EH landing pad. Earlier, we
3953 optimisticaly split all sorts of edges, including EH edges. The
3954 optimization passes in between may not have needed them; if not,
3955 we should undo the split.
3957 Recognize this case by having one EH edge incoming to the BB and
3958 one normal edge outgoing; BB should be empty apart from the
3959 post_landing_pad label.
3961 Note that this is slightly different from the empty handler case
3962 handled by cleanup_empty_eh, in that the actual handler may yet
3963 have actual code but the landing pad has been separated from the
3964 handler. As such, cleanup_empty_eh relies on this transformation
3965 having been done first. */
3967 static bool
3968 unsplit_eh (eh_landing_pad lp)
3970 basic_block bb = label_to_block (lp->post_landing_pad);
3971 gimple_stmt_iterator gsi;
3972 edge e_in, e_out;
3974 /* Quickly check the edge counts on BB for singularity. */
3975 if (!single_pred_p (bb) || !single_succ_p (bb))
3976 return false;
3977 e_in = single_pred_edge (bb);
3978 e_out = single_succ_edge (bb);
3980 /* Input edge must be EH and output edge must be normal. */
3981 if ((e_in->flags & EDGE_EH) == 0 || (e_out->flags & EDGE_EH) != 0)
3982 return false;
3984 /* The block must be empty except for the labels and debug insns. */
3985 gsi = gsi_after_labels (bb);
3986 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
3987 gsi_next_nondebug (&gsi);
3988 if (!gsi_end_p (gsi))
3989 return false;
3991 /* The destination block must not already have a landing pad
3992 for a different region. */
3993 for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
3995 gimple stmt = gsi_stmt (gsi);
3996 tree lab;
3997 int lp_nr;
3999 if (gimple_code (stmt) != GIMPLE_LABEL)
4000 break;
4001 lab = gimple_label_label (stmt);
4002 lp_nr = EH_LANDING_PAD_NR (lab);
4003 if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
4004 return false;
4007 /* The new destination block must not already be a destination of
4008 the source block, lest we merge fallthru and eh edges and get
4009 all sorts of confused. */
4010 if (find_edge (e_in->src, e_out->dest))
4011 return false;
4013 /* ??? We can get degenerate phis due to cfg cleanups. I would have
4014 thought this should have been cleaned up by a phicprop pass, but
4015 that doesn't appear to handle virtuals. Propagate by hand. */
4016 if (!gimple_seq_empty_p (phi_nodes (bb)))
4018 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); )
4020 gimple use_stmt, phi = gsi_stmt (gsi);
4021 tree lhs = gimple_phi_result (phi);
4022 tree rhs = gimple_phi_arg_def (phi, 0);
4023 use_operand_p use_p;
4024 imm_use_iterator iter;
4026 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
4028 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
4029 SET_USE (use_p, rhs);
4032 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
4033 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
4035 remove_phi_node (&gsi, true);
4039 if (dump_file && (dump_flags & TDF_DETAILS))
4040 fprintf (dump_file, "Unsplit EH landing pad %d to block %i.\n",
4041 lp->index, e_out->dest->index);
4043 /* Redirect the edge. Since redirect_eh_edge_1 expects to be moving
4044 a successor edge, humor it. But do the real CFG change with the
4045 predecessor of E_OUT in order to preserve the ordering of arguments
4046 to the PHI nodes in E_OUT->DEST. */
4047 redirect_eh_edge_1 (e_in, e_out->dest, false);
4048 redirect_edge_pred (e_out, e_in->src);
4049 e_out->flags = e_in->flags;
4050 e_out->probability = e_in->probability;
4051 e_out->count = e_in->count;
4052 remove_edge (e_in);
4054 return true;
4057 /* Examine each landing pad block and see if it matches unsplit_eh. */
4059 static bool
4060 unsplit_all_eh (void)
4062 bool changed = false;
4063 eh_landing_pad lp;
4064 int i;
4066 for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
4067 if (lp)
4068 changed |= unsplit_eh (lp);
4070 return changed;
4073 /* A subroutine of cleanup_empty_eh. Redirect all EH edges incoming
4074 to OLD_BB to NEW_BB; return true on success, false on failure.
4076 OLD_BB_OUT is the edge into NEW_BB from OLD_BB, so if we miss any
4077 PHI variables from OLD_BB we can pick them up from OLD_BB_OUT.
4078 Virtual PHIs may be deleted and marked for renaming. */
4080 static bool
4081 cleanup_empty_eh_merge_phis (basic_block new_bb, basic_block old_bb,
4082 edge old_bb_out, bool change_region)
4084 gimple_stmt_iterator ngsi, ogsi;
4085 edge_iterator ei;
4086 edge e;
4087 bitmap ophi_handled;
4089 /* The destination block must not be a regular successor for any
4090 of the preds of the landing pad. Thus, avoid turning
4091 <..>
4092 | \ EH
4093 | <..>
4095 <..>
4096 into
4097 <..>
4098 | | EH
4099 <..>
4100 which CFG verification would choke on. See PR45172 and PR51089. */
4101 FOR_EACH_EDGE (e, ei, old_bb->preds)
4102 if (find_edge (e->src, new_bb))
4103 return false;
4105 FOR_EACH_EDGE (e, ei, old_bb->preds)
4106 redirect_edge_var_map_clear (e);
4108 ophi_handled = BITMAP_ALLOC (NULL);
4110 /* First, iterate through the PHIs on NEW_BB and set up the edge_var_map
4111 for the edges we're going to move. */
4112 for (ngsi = gsi_start_phis (new_bb); !gsi_end_p (ngsi); gsi_next (&ngsi))
4114 gimple ophi, nphi = gsi_stmt (ngsi);
4115 tree nresult, nop;
4117 nresult = gimple_phi_result (nphi);
4118 nop = gimple_phi_arg_def (nphi, old_bb_out->dest_idx);
4120 /* Find the corresponding PHI in OLD_BB so we can forward-propagate
4121 the source ssa_name. */
4122 ophi = NULL;
4123 for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4125 ophi = gsi_stmt (ogsi);
4126 if (gimple_phi_result (ophi) == nop)
4127 break;
4128 ophi = NULL;
4131 /* If we did find the corresponding PHI, copy those inputs. */
4132 if (ophi)
4134 /* If NOP is used somewhere else beyond phis in new_bb, give up. */
4135 if (!has_single_use (nop))
4137 imm_use_iterator imm_iter;
4138 use_operand_p use_p;
4140 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, nop)
4142 if (!gimple_debug_bind_p (USE_STMT (use_p))
4143 && (gimple_code (USE_STMT (use_p)) != GIMPLE_PHI
4144 || gimple_bb (USE_STMT (use_p)) != new_bb))
4145 goto fail;
4148 bitmap_set_bit (ophi_handled, SSA_NAME_VERSION (nop));
4149 FOR_EACH_EDGE (e, ei, old_bb->preds)
4151 location_t oloc;
4152 tree oop;
4154 if ((e->flags & EDGE_EH) == 0)
4155 continue;
4156 oop = gimple_phi_arg_def (ophi, e->dest_idx);
4157 oloc = gimple_phi_arg_location (ophi, e->dest_idx);
4158 redirect_edge_var_map_add (e, nresult, oop, oloc);
4161 /* If we didn't find the PHI, if it's a real variable or a VOP, we know
4162 from the fact that OLD_BB is tree_empty_eh_handler_p that the
4163 variable is unchanged from input to the block and we can simply
4164 re-use the input to NEW_BB from the OLD_BB_OUT edge. */
4165 else
4167 location_t nloc
4168 = gimple_phi_arg_location (nphi, old_bb_out->dest_idx);
4169 FOR_EACH_EDGE (e, ei, old_bb->preds)
4170 redirect_edge_var_map_add (e, nresult, nop, nloc);
4174 /* Second, verify that all PHIs from OLD_BB have been handled. If not,
4175 we don't know what values from the other edges into NEW_BB to use. */
4176 for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4178 gimple ophi = gsi_stmt (ogsi);
4179 tree oresult = gimple_phi_result (ophi);
4180 if (!bitmap_bit_p (ophi_handled, SSA_NAME_VERSION (oresult)))
4181 goto fail;
4184 /* Finally, move the edges and update the PHIs. */
4185 for (ei = ei_start (old_bb->preds); (e = ei_safe_edge (ei)); )
4186 if (e->flags & EDGE_EH)
4188 /* ??? CFG manipluation routines do not try to update loop
4189 form on edge redirection. Do so manually here for now. */
4190 /* If we redirect a loop entry or latch edge that will either create
4191 a multiple entry loop or rotate the loop. If the loops merge
4192 we may have created a loop with multiple latches.
4193 All of this isn't easily fixed thus cancel the affected loop
4194 and mark the other loop as possibly having multiple latches. */
4195 if (e->dest == e->dest->loop_father->header)
4197 e->dest->loop_father->header = NULL;
4198 e->dest->loop_father->latch = NULL;
4199 new_bb->loop_father->latch = NULL;
4200 loops_state_set (LOOPS_NEED_FIXUP|LOOPS_MAY_HAVE_MULTIPLE_LATCHES);
4202 redirect_eh_edge_1 (e, new_bb, change_region);
4203 redirect_edge_succ (e, new_bb);
4204 flush_pending_stmts (e);
4206 else
4207 ei_next (&ei);
4209 BITMAP_FREE (ophi_handled);
4210 return true;
4212 fail:
4213 FOR_EACH_EDGE (e, ei, old_bb->preds)
4214 redirect_edge_var_map_clear (e);
4215 BITMAP_FREE (ophi_handled);
4216 return false;
4219 /* A subroutine of cleanup_empty_eh. Move a landing pad LP from its
4220 old region to NEW_REGION at BB. */
4222 static void
4223 cleanup_empty_eh_move_lp (basic_block bb, edge e_out,
4224 eh_landing_pad lp, eh_region new_region)
4226 gimple_stmt_iterator gsi;
4227 eh_landing_pad *pp;
4229 for (pp = &lp->region->landing_pads; *pp != lp; pp = &(*pp)->next_lp)
4230 continue;
4231 *pp = lp->next_lp;
4233 lp->region = new_region;
4234 lp->next_lp = new_region->landing_pads;
4235 new_region->landing_pads = lp;
4237 /* Delete the RESX that was matched within the empty handler block. */
4238 gsi = gsi_last_bb (bb);
4239 unlink_stmt_vdef (gsi_stmt (gsi));
4240 gsi_remove (&gsi, true);
4242 /* Clean up E_OUT for the fallthru. */
4243 e_out->flags = (e_out->flags & ~EDGE_EH) | EDGE_FALLTHRU;
4244 e_out->probability = REG_BR_PROB_BASE;
4247 /* A subroutine of cleanup_empty_eh. Handle more complex cases of
4248 unsplitting than unsplit_eh was prepared to handle, e.g. when
4249 multiple incoming edges and phis are involved. */
4251 static bool
4252 cleanup_empty_eh_unsplit (basic_block bb, edge e_out, eh_landing_pad lp)
4254 gimple_stmt_iterator gsi;
4255 tree lab;
4257 /* We really ought not have totally lost everything following
4258 a landing pad label. Given that BB is empty, there had better
4259 be a successor. */
4260 gcc_assert (e_out != NULL);
4262 /* The destination block must not already have a landing pad
4263 for a different region. */
4264 lab = NULL;
4265 for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
4267 gimple stmt = gsi_stmt (gsi);
4268 int lp_nr;
4270 if (gimple_code (stmt) != GIMPLE_LABEL)
4271 break;
4272 lab = gimple_label_label (stmt);
4273 lp_nr = EH_LANDING_PAD_NR (lab);
4274 if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
4275 return false;
4278 /* Attempt to move the PHIs into the successor block. */
4279 if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, false))
4281 if (dump_file && (dump_flags & TDF_DETAILS))
4282 fprintf (dump_file,
4283 "Unsplit EH landing pad %d to block %i "
4284 "(via cleanup_empty_eh).\n",
4285 lp->index, e_out->dest->index);
4286 return true;
4289 return false;
4292 /* Return true if edge E_FIRST is part of an empty infinite loop
4293 or leads to such a loop through a series of single successor
4294 empty bbs. */
4296 static bool
4297 infinite_empty_loop_p (edge e_first)
4299 bool inf_loop = false;
4300 edge e;
4302 if (e_first->dest == e_first->src)
4303 return true;
4305 e_first->src->aux = (void *) 1;
4306 for (e = e_first; single_succ_p (e->dest); e = single_succ_edge (e->dest))
4308 gimple_stmt_iterator gsi;
4309 if (e->dest->aux)
4311 inf_loop = true;
4312 break;
4314 e->dest->aux = (void *) 1;
4315 gsi = gsi_after_labels (e->dest);
4316 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4317 gsi_next_nondebug (&gsi);
4318 if (!gsi_end_p (gsi))
4319 break;
4321 e_first->src->aux = NULL;
4322 for (e = e_first; e->dest->aux; e = single_succ_edge (e->dest))
4323 e->dest->aux = NULL;
4325 return inf_loop;
4328 /* Examine the block associated with LP to determine if it's an empty
4329 handler for its EH region. If so, attempt to redirect EH edges to
4330 an outer region. Return true the CFG was updated in any way. This
4331 is similar to jump forwarding, just across EH edges. */
4333 static bool
4334 cleanup_empty_eh (eh_landing_pad lp)
4336 basic_block bb = label_to_block (lp->post_landing_pad);
4337 gimple_stmt_iterator gsi;
4338 gimple resx;
4339 eh_region new_region;
4340 edge_iterator ei;
4341 edge e, e_out;
4342 bool has_non_eh_pred;
4343 bool ret = false;
4344 int new_lp_nr;
4346 /* There can be zero or one edges out of BB. This is the quickest test. */
4347 switch (EDGE_COUNT (bb->succs))
4349 case 0:
4350 e_out = NULL;
4351 break;
4352 case 1:
4353 e_out = single_succ_edge (bb);
4354 break;
4355 default:
4356 return false;
4359 resx = last_stmt (bb);
4360 if (resx && is_gimple_resx (resx))
4362 if (stmt_can_throw_external (resx))
4363 optimize_clobbers (bb);
4364 else if (sink_clobbers (bb))
4365 ret = true;
4368 gsi = gsi_after_labels (bb);
4370 /* Make sure to skip debug statements. */
4371 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4372 gsi_next_nondebug (&gsi);
4374 /* If the block is totally empty, look for more unsplitting cases. */
4375 if (gsi_end_p (gsi))
4377 /* For the degenerate case of an infinite loop bail out.
4378 If bb has no successors and is totally empty, which can happen e.g.
4379 because of incorrect noreturn attribute, bail out too. */
4380 if (e_out == NULL
4381 || infinite_empty_loop_p (e_out))
4382 return ret;
4384 return ret | cleanup_empty_eh_unsplit (bb, e_out, lp);
4387 /* The block should consist only of a single RESX statement, modulo a
4388 preceding call to __builtin_stack_restore if there is no outgoing
4389 edge, since the call can be eliminated in this case. */
4390 resx = gsi_stmt (gsi);
4391 if (!e_out && gimple_call_builtin_p (resx, BUILT_IN_STACK_RESTORE))
4393 gsi_next (&gsi);
4394 resx = gsi_stmt (gsi);
4396 if (!is_gimple_resx (resx))
4397 return ret;
4398 gcc_assert (gsi_one_before_end_p (gsi));
4400 /* Determine if there are non-EH edges, or resx edges into the handler. */
4401 has_non_eh_pred = false;
4402 FOR_EACH_EDGE (e, ei, bb->preds)
4403 if (!(e->flags & EDGE_EH))
4404 has_non_eh_pred = true;
4406 /* Find the handler that's outer of the empty handler by looking at
4407 where the RESX instruction was vectored. */
4408 new_lp_nr = lookup_stmt_eh_lp (resx);
4409 new_region = get_eh_region_from_lp_number (new_lp_nr);
4411 /* If there's no destination region within the current function,
4412 redirection is trivial via removing the throwing statements from
4413 the EH region, removing the EH edges, and allowing the block
4414 to go unreachable. */
4415 if (new_region == NULL)
4417 gcc_assert (e_out == NULL);
4418 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4419 if (e->flags & EDGE_EH)
4421 gimple stmt = last_stmt (e->src);
4422 remove_stmt_from_eh_lp (stmt);
4423 remove_edge (e);
4425 else
4426 ei_next (&ei);
4427 goto succeed;
4430 /* If the destination region is a MUST_NOT_THROW, allow the runtime
4431 to handle the abort and allow the blocks to go unreachable. */
4432 if (new_region->type == ERT_MUST_NOT_THROW)
4434 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4435 if (e->flags & EDGE_EH)
4437 gimple stmt = last_stmt (e->src);
4438 remove_stmt_from_eh_lp (stmt);
4439 add_stmt_to_eh_lp (stmt, new_lp_nr);
4440 remove_edge (e);
4442 else
4443 ei_next (&ei);
4444 goto succeed;
4447 /* Try to redirect the EH edges and merge the PHIs into the destination
4448 landing pad block. If the merge succeeds, we'll already have redirected
4449 all the EH edges. The handler itself will go unreachable if there were
4450 no normal edges. */
4451 if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, true))
4452 goto succeed;
4454 /* Finally, if all input edges are EH edges, then we can (potentially)
4455 reduce the number of transfers from the runtime by moving the landing
4456 pad from the original region to the new region. This is a win when
4457 we remove the last CLEANUP region along a particular exception
4458 propagation path. Since nothing changes except for the region with
4459 which the landing pad is associated, the PHI nodes do not need to be
4460 adjusted at all. */
4461 if (!has_non_eh_pred)
4463 cleanup_empty_eh_move_lp (bb, e_out, lp, new_region);
4464 if (dump_file && (dump_flags & TDF_DETAILS))
4465 fprintf (dump_file, "Empty EH handler %i moved to EH region %i.\n",
4466 lp->index, new_region->index);
4468 /* ??? The CFG didn't change, but we may have rendered the
4469 old EH region unreachable. Trigger a cleanup there. */
4470 return true;
4473 return ret;
4475 succeed:
4476 if (dump_file && (dump_flags & TDF_DETAILS))
4477 fprintf (dump_file, "Empty EH handler %i removed.\n", lp->index);
4478 remove_eh_landing_pad (lp);
4479 return true;
4482 /* Do a post-order traversal of the EH region tree. Examine each
4483 post_landing_pad block and see if we can eliminate it as empty. */
4485 static bool
4486 cleanup_all_empty_eh (void)
4488 bool changed = false;
4489 eh_landing_pad lp;
4490 int i;
4492 for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
4493 if (lp)
4494 changed |= cleanup_empty_eh (lp);
4496 return changed;
4499 /* Perform cleanups and lowering of exception handling
4500 1) cleanups regions with handlers doing nothing are optimized out
4501 2) MUST_NOT_THROW regions that became dead because of 1) are optimized out
4502 3) Info about regions that are containing instructions, and regions
4503 reachable via local EH edges is collected
4504 4) Eh tree is pruned for regions no longer necessary.
4506 TODO: Push MUST_NOT_THROW regions to the root of the EH tree.
4507 Unify those that have the same failure decl and locus.
4510 static unsigned int
4511 execute_cleanup_eh_1 (void)
4513 /* Do this first: unsplit_all_eh and cleanup_all_empty_eh can die
4514 looking up unreachable landing pads. */
4515 remove_unreachable_handlers ();
4517 /* Watch out for the region tree vanishing due to all unreachable. */
4518 if (cfun->eh->region_tree)
4520 bool changed = false;
4522 if (optimize)
4523 changed |= unsplit_all_eh ();
4524 changed |= cleanup_all_empty_eh ();
4526 if (changed)
4528 free_dominance_info (CDI_DOMINATORS);
4529 free_dominance_info (CDI_POST_DOMINATORS);
4531 /* We delayed all basic block deletion, as we may have performed
4532 cleanups on EH edges while non-EH edges were still present. */
4533 delete_unreachable_blocks ();
4535 /* We manipulated the landing pads. Remove any region that no
4536 longer has a landing pad. */
4537 remove_unreachable_handlers_no_lp ();
4539 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
4543 return 0;
4546 namespace {
4548 const pass_data pass_data_cleanup_eh =
4550 GIMPLE_PASS, /* type */
4551 "ehcleanup", /* name */
4552 OPTGROUP_NONE, /* optinfo_flags */
4553 TV_TREE_EH, /* tv_id */
4554 PROP_gimple_lcf, /* properties_required */
4555 0, /* properties_provided */
4556 0, /* properties_destroyed */
4557 0, /* todo_flags_start */
4558 0, /* todo_flags_finish */
4561 class pass_cleanup_eh : public gimple_opt_pass
4563 public:
4564 pass_cleanup_eh (gcc::context *ctxt)
4565 : gimple_opt_pass (pass_data_cleanup_eh, ctxt)
4568 /* opt_pass methods: */
4569 opt_pass * clone () { return new pass_cleanup_eh (m_ctxt); }
4570 virtual bool gate (function *fun)
4572 return fun->eh != NULL && fun->eh->region_tree != NULL;
4575 virtual unsigned int execute (function *);
4577 }; // class pass_cleanup_eh
4579 unsigned int
4580 pass_cleanup_eh::execute (function *fun)
4582 int ret = execute_cleanup_eh_1 ();
4584 /* If the function no longer needs an EH personality routine
4585 clear it. This exposes cross-language inlining opportunities
4586 and avoids references to a never defined personality routine. */
4587 if (DECL_FUNCTION_PERSONALITY (current_function_decl)
4588 && function_needs_eh_personality (fun) != eh_personality_lang)
4589 DECL_FUNCTION_PERSONALITY (current_function_decl) = NULL_TREE;
4591 return ret;
4594 } // anon namespace
4596 gimple_opt_pass *
4597 make_pass_cleanup_eh (gcc::context *ctxt)
4599 return new pass_cleanup_eh (ctxt);
4602 /* Verify that BB containing STMT as the last statement, has precisely the
4603 edge that make_eh_edges would create. */
4605 DEBUG_FUNCTION bool
4606 verify_eh_edges (gimple stmt)
4608 basic_block bb = gimple_bb (stmt);
4609 eh_landing_pad lp = NULL;
4610 int lp_nr;
4611 edge_iterator ei;
4612 edge e, eh_edge;
4614 lp_nr = lookup_stmt_eh_lp (stmt);
4615 if (lp_nr > 0)
4616 lp = get_eh_landing_pad_from_number (lp_nr);
4618 eh_edge = NULL;
4619 FOR_EACH_EDGE (e, ei, bb->succs)
4621 if (e->flags & EDGE_EH)
4623 if (eh_edge)
4625 error ("BB %i has multiple EH edges", bb->index);
4626 return true;
4628 else
4629 eh_edge = e;
4633 if (lp == NULL)
4635 if (eh_edge)
4637 error ("BB %i can not throw but has an EH edge", bb->index);
4638 return true;
4640 return false;
4643 if (!stmt_could_throw_p (stmt))
4645 error ("BB %i last statement has incorrectly set lp", bb->index);
4646 return true;
4649 if (eh_edge == NULL)
4651 error ("BB %i is missing an EH edge", bb->index);
4652 return true;
4655 if (eh_edge->dest != label_to_block (lp->post_landing_pad))
4657 error ("Incorrect EH edge %i->%i", bb->index, eh_edge->dest->index);
4658 return true;
4661 return false;
4664 /* Similarly, but handle GIMPLE_EH_DISPATCH specifically. */
4666 DEBUG_FUNCTION bool
4667 verify_eh_dispatch_edge (gimple stmt)
4669 eh_region r;
4670 eh_catch c;
4671 basic_block src, dst;
4672 bool want_fallthru = true;
4673 edge_iterator ei;
4674 edge e, fall_edge;
4676 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
4677 src = gimple_bb (stmt);
4679 FOR_EACH_EDGE (e, ei, src->succs)
4680 gcc_assert (e->aux == NULL);
4682 switch (r->type)
4684 case ERT_TRY:
4685 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
4687 dst = label_to_block (c->label);
4688 e = find_edge (src, dst);
4689 if (e == NULL)
4691 error ("BB %i is missing an edge", src->index);
4692 return true;
4694 e->aux = (void *)e;
4696 /* A catch-all handler doesn't have a fallthru. */
4697 if (c->type_list == NULL)
4699 want_fallthru = false;
4700 break;
4703 break;
4705 case ERT_ALLOWED_EXCEPTIONS:
4706 dst = label_to_block (r->u.allowed.label);
4707 e = find_edge (src, dst);
4708 if (e == NULL)
4710 error ("BB %i is missing an edge", src->index);
4711 return true;
4713 e->aux = (void *)e;
4714 break;
4716 default:
4717 gcc_unreachable ();
4720 fall_edge = NULL;
4721 FOR_EACH_EDGE (e, ei, src->succs)
4723 if (e->flags & EDGE_FALLTHRU)
4725 if (fall_edge != NULL)
4727 error ("BB %i too many fallthru edges", src->index);
4728 return true;
4730 fall_edge = e;
4732 else if (e->aux)
4733 e->aux = NULL;
4734 else
4736 error ("BB %i has incorrect edge", src->index);
4737 return true;
4740 if ((fall_edge != NULL) ^ want_fallthru)
4742 error ("BB %i has incorrect fallthru edge", src->index);
4743 return true;
4746 return false;