1 /* Exception handling semantics and decomposition for trees.
2 Copyright (C) 2003-2020 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
28 #include "tree-pass.h"
31 #include "diagnostic-core.h"
32 #include "fold-const.h"
36 #include "cfgcleanup.h"
38 #include "gimple-iterator.h"
40 #include "tree-into-ssa.h"
42 #include "tree-inline.h"
43 #include "langhooks.h"
45 #include "gimple-low.h"
46 #include "stringpool.h"
51 /* In some instances a tree and a gimple need to be stored in a same table,
52 i.e. in hash tables. This is a structure to do this. */
53 typedef union {tree
*tp
; tree t
; gimple
*g
;} treemple
;
55 /* Misc functions used in this file. */
57 /* Remember and lookup EH landing pad data for arbitrary statements.
58 Really this means any statement that could_throw_p. We could
59 stuff this information into the stmt_ann data structure, but:
61 (1) We absolutely rely on this information being kept until
62 we get to rtl. Once we're done with lowering here, if we lose
63 the information there's no way to recover it!
65 (2) There are many more statements that *cannot* throw as
66 compared to those that can. We should be saving some amount
67 of space by only allocating memory for those that can throw. */
69 /* Add statement T in function IFUN to landing pad NUM. */
72 add_stmt_to_eh_lp_fn (struct function
*ifun
, gimple
*t
, int num
)
74 gcc_assert (num
!= 0);
76 if (!get_eh_throw_stmt_table (ifun
))
77 set_eh_throw_stmt_table (ifun
, hash_map
<gimple
*, int>::create_ggc (31));
79 gcc_assert (!get_eh_throw_stmt_table (ifun
)->put (t
, num
));
82 /* Add statement T in the current function (cfun) to EH landing pad NUM. */
85 add_stmt_to_eh_lp (gimple
*t
, int num
)
87 add_stmt_to_eh_lp_fn (cfun
, t
, num
);
90 /* Add statement T to the single EH landing pad in REGION. */
93 record_stmt_eh_region (eh_region region
, gimple
*t
)
97 if (region
->type
== ERT_MUST_NOT_THROW
)
98 add_stmt_to_eh_lp_fn (cfun
, t
, -region
->index
);
101 eh_landing_pad lp
= region
->landing_pads
;
103 lp
= gen_eh_landing_pad (region
);
105 gcc_assert (lp
->next_lp
== NULL
);
106 add_stmt_to_eh_lp_fn (cfun
, t
, lp
->index
);
111 /* Remove statement T in function IFUN from its EH landing pad. */
114 remove_stmt_from_eh_lp_fn (struct function
*ifun
, gimple
*t
)
116 if (!get_eh_throw_stmt_table (ifun
))
119 if (!get_eh_throw_stmt_table (ifun
)->get (t
))
122 get_eh_throw_stmt_table (ifun
)->remove (t
);
127 /* Remove statement T in the current function (cfun) from its
131 remove_stmt_from_eh_lp (gimple
*t
)
133 return remove_stmt_from_eh_lp_fn (cfun
, t
);
136 /* Determine if statement T is inside an EH region in function IFUN.
137 Positive numbers indicate a landing pad index; negative numbers
138 indicate a MUST_NOT_THROW region index; zero indicates that the
139 statement is not recorded in the region table. */
142 lookup_stmt_eh_lp_fn (struct function
*ifun
, const gimple
*t
)
144 if (ifun
->eh
->throw_stmt_table
== NULL
)
147 int *lp_nr
= ifun
->eh
->throw_stmt_table
->get (const_cast <gimple
*> (t
));
148 return lp_nr
? *lp_nr
: 0;
151 /* Likewise, but always use the current function. */
154 lookup_stmt_eh_lp (const gimple
*t
)
156 /* We can get called from initialized data when -fnon-call-exceptions
157 is on; prevent crash. */
160 return lookup_stmt_eh_lp_fn (cfun
, t
);
163 /* First pass of EH node decomposition. Build up a tree of GIMPLE_TRY_FINALLY
164 nodes and LABEL_DECL nodes. We will use this during the second phase to
165 determine if a goto leaves the body of a TRY_FINALLY_EXPR node. */
167 struct finally_tree_node
169 /* When storing a GIMPLE_TRY, we have to record a gimple. However
170 when deciding whether a GOTO to a certain LABEL_DECL (which is a
171 tree) leaves the TRY block, its necessary to record a tree in
172 this field. Thus a treemple is used. */
177 /* Hashtable helpers. */
179 struct finally_tree_hasher
: free_ptr_hash
<finally_tree_node
>
181 static inline hashval_t
hash (const finally_tree_node
*);
182 static inline bool equal (const finally_tree_node
*,
183 const finally_tree_node
*);
187 finally_tree_hasher::hash (const finally_tree_node
*v
)
189 return (intptr_t)v
->child
.t
>> 4;
193 finally_tree_hasher::equal (const finally_tree_node
*v
,
194 const finally_tree_node
*c
)
196 return v
->child
.t
== c
->child
.t
;
199 /* Note that this table is *not* marked GTY. It is short-lived. */
200 static hash_table
<finally_tree_hasher
> *finally_tree
;
203 record_in_finally_tree (treemple child
, gtry
*parent
)
205 struct finally_tree_node
*n
;
206 finally_tree_node
**slot
;
208 n
= XNEW (struct finally_tree_node
);
212 slot
= finally_tree
->find_slot (n
, INSERT
);
218 collect_finally_tree (gimple
*stmt
, gtry
*region
);
220 /* Go through the gimple sequence. Works with collect_finally_tree to
221 record all GIMPLE_LABEL and GIMPLE_TRY statements. */
224 collect_finally_tree_1 (gimple_seq seq
, gtry
*region
)
226 gimple_stmt_iterator gsi
;
228 for (gsi
= gsi_start (seq
); !gsi_end_p (gsi
); gsi_next (&gsi
))
229 collect_finally_tree (gsi_stmt (gsi
), region
);
233 collect_finally_tree (gimple
*stmt
, gtry
*region
)
237 switch (gimple_code (stmt
))
240 temp
.t
= gimple_label_label (as_a
<glabel
*> (stmt
));
241 record_in_finally_tree (temp
, region
);
245 if (gimple_try_kind (stmt
) == GIMPLE_TRY_FINALLY
)
248 record_in_finally_tree (temp
, region
);
249 collect_finally_tree_1 (gimple_try_eval (stmt
),
250 as_a
<gtry
*> (stmt
));
251 collect_finally_tree_1 (gimple_try_cleanup (stmt
), region
);
253 else if (gimple_try_kind (stmt
) == GIMPLE_TRY_CATCH
)
255 collect_finally_tree_1 (gimple_try_eval (stmt
), region
);
256 collect_finally_tree_1 (gimple_try_cleanup (stmt
), region
);
261 collect_finally_tree_1 (gimple_catch_handler (
262 as_a
<gcatch
*> (stmt
)),
266 case GIMPLE_EH_FILTER
:
267 collect_finally_tree_1 (gimple_eh_filter_failure (stmt
), region
);
272 geh_else
*eh_else_stmt
= as_a
<geh_else
*> (stmt
);
273 collect_finally_tree_1 (gimple_eh_else_n_body (eh_else_stmt
), region
);
274 collect_finally_tree_1 (gimple_eh_else_e_body (eh_else_stmt
), region
);
279 /* A type, a decl, or some kind of statement that we're not
280 interested in. Don't walk them. */
286 /* Use the finally tree to determine if a jump from START to TARGET
287 would leave the try_finally node that START lives in. */
290 outside_finally_tree (treemple start
, gimple
*target
)
292 struct finally_tree_node n
, *p
;
297 p
= finally_tree
->find (&n
);
302 while (start
.g
!= target
);
307 /* Second pass of EH node decomposition. Actually transform the GIMPLE_TRY
308 nodes into a set of gotos, magic labels, and eh regions.
309 The eh region creation is straight-forward, but frobbing all the gotos
310 and such into shape isn't. */
312 /* The sequence into which we record all EH stuff. This will be
313 placed at the end of the function when we're all done. */
314 static gimple_seq eh_seq
;
316 /* Record whether an EH region contains something that can throw,
317 indexed by EH region number. */
318 static bitmap eh_region_may_contain_throw_map
;
320 /* The GOTO_QUEUE is an array of GIMPLE_GOTO and GIMPLE_RETURN
321 statements that are seen to escape this GIMPLE_TRY_FINALLY node.
322 The idea is to record a gimple statement for everything except for
323 the conditionals, which get their labels recorded. Since labels are
324 of type 'tree', we need this node to store both gimple and tree
325 objects. REPL_STMT is the sequence used to replace the goto/return
326 statement. CONT_STMT is used to store the statement that allows
327 the return/goto to jump to the original destination. */
329 struct goto_queue_node
333 gimple_seq repl_stmt
;
336 /* This is used when index >= 0 to indicate that stmt is a label (as
337 opposed to a goto stmt). */
341 /* State of the world while lowering. */
345 /* What's "current" while constructing the eh region tree. These
346 correspond to variables of the same name in cfun->eh, which we
347 don't have easy access to. */
348 eh_region cur_region
;
350 /* What's "current" for the purposes of __builtin_eh_pointer. For
351 a CATCH, this is the associated TRY. For an EH_FILTER, this is
352 the associated ALLOWED_EXCEPTIONS, etc. */
353 eh_region ehp_region
;
355 /* Processing of TRY_FINALLY requires a bit more state. This is
356 split out into a separate structure so that we don't have to
357 copy so much when processing other nodes. */
358 struct leh_tf_state
*tf
;
360 /* Outer non-clean up region. */
361 eh_region outer_non_cleanup
;
366 /* Pointer to the GIMPLE_TRY_FINALLY node under discussion. The
367 try_finally_expr is the original GIMPLE_TRY_FINALLY. We need to retain
368 this so that outside_finally_tree can reliably reference the tree used
369 in the collect_finally_tree data structures. */
370 gtry
*try_finally_expr
;
373 /* While lowering a top_p usually it is expanded into multiple statements,
374 thus we need the following field to store them. */
375 gimple_seq top_p_seq
;
377 /* The state outside this try_finally node. */
378 struct leh_state
*outer
;
380 /* The exception region created for it. */
383 /* The goto queue. */
384 struct goto_queue_node
*goto_queue
;
385 size_t goto_queue_size
;
386 size_t goto_queue_active
;
388 /* Pointer map to help in searching goto_queue when it is large. */
389 hash_map
<gimple
*, goto_queue_node
*> *goto_queue_map
;
391 /* The set of unique labels seen as entries in the goto queue. */
392 vec
<tree
> dest_array
;
394 /* A label to be added at the end of the completed transformed
395 sequence. It will be set if may_fallthru was true *at one time*,
396 though subsequent transformations may have cleared that flag. */
399 /* True if it is possible to fall out the bottom of the try block.
400 Cleared if the fallthru is converted to a goto. */
403 /* True if any entry in goto_queue is a GIMPLE_RETURN. */
406 /* True if the finally block can receive an exception edge.
407 Cleared if the exception case is handled by code duplication. */
411 static gimple_seq
lower_eh_must_not_throw (struct leh_state
*, gtry
*);
413 /* Search for STMT in the goto queue. Return the replacement,
414 or null if the statement isn't in the queue. */
416 #define LARGE_GOTO_QUEUE 20
418 static void lower_eh_constructs_1 (struct leh_state
*state
, gimple_seq
*seq
);
421 find_goto_replacement (struct leh_tf_state
*tf
, treemple stmt
)
425 if (tf
->goto_queue_active
< LARGE_GOTO_QUEUE
)
427 for (i
= 0; i
< tf
->goto_queue_active
; i
++)
428 if ( tf
->goto_queue
[i
].stmt
.g
== stmt
.g
)
429 return tf
->goto_queue
[i
].repl_stmt
;
433 /* If we have a large number of entries in the goto_queue, create a
434 pointer map and use that for searching. */
436 if (!tf
->goto_queue_map
)
438 tf
->goto_queue_map
= new hash_map
<gimple
*, goto_queue_node
*>;
439 for (i
= 0; i
< tf
->goto_queue_active
; i
++)
441 bool existed
= tf
->goto_queue_map
->put (tf
->goto_queue
[i
].stmt
.g
,
443 gcc_assert (!existed
);
447 goto_queue_node
**slot
= tf
->goto_queue_map
->get (stmt
.g
);
449 return ((*slot
)->repl_stmt
);
454 /* A subroutine of replace_goto_queue_1. Handles the sub-clauses of a
455 lowered GIMPLE_COND. If, by chance, the replacement is a simple goto,
456 then we can just splat it in, otherwise we add the new stmts immediately
457 after the GIMPLE_COND and redirect. */
460 replace_goto_queue_cond_clause (tree
*tp
, struct leh_tf_state
*tf
,
461 gimple_stmt_iterator
*gsi
)
466 location_t loc
= gimple_location (gsi_stmt (*gsi
));
469 new_seq
= find_goto_replacement (tf
, temp
);
473 if (gimple_seq_singleton_p (new_seq
)
474 && gimple_code (gimple_seq_first_stmt (new_seq
)) == GIMPLE_GOTO
)
476 *tp
= gimple_goto_dest (gimple_seq_first_stmt (new_seq
));
480 label
= create_artificial_label (loc
);
481 /* Set the new label for the GIMPLE_COND */
484 gsi_insert_after (gsi
, gimple_build_label (label
), GSI_CONTINUE_LINKING
);
485 gsi_insert_seq_after (gsi
, gimple_seq_copy (new_seq
), GSI_CONTINUE_LINKING
);
488 /* The real work of replace_goto_queue. Returns with TSI updated to
489 point to the next statement. */
491 static void replace_goto_queue_stmt_list (gimple_seq
*, struct leh_tf_state
*);
494 replace_goto_queue_1 (gimple
*stmt
, struct leh_tf_state
*tf
,
495 gimple_stmt_iterator
*gsi
)
501 switch (gimple_code (stmt
))
506 seq
= find_goto_replacement (tf
, temp
);
509 gimple_stmt_iterator i
;
510 seq
= gimple_seq_copy (seq
);
511 for (i
= gsi_start (seq
); !gsi_end_p (i
); gsi_next (&i
))
512 gimple_set_location (gsi_stmt (i
), gimple_location (stmt
));
513 gsi_insert_seq_before (gsi
, seq
, GSI_SAME_STMT
);
514 gsi_remove (gsi
, false);
520 replace_goto_queue_cond_clause (gimple_op_ptr (stmt
, 2), tf
, gsi
);
521 replace_goto_queue_cond_clause (gimple_op_ptr (stmt
, 3), tf
, gsi
);
525 replace_goto_queue_stmt_list (gimple_try_eval_ptr (stmt
), tf
);
526 replace_goto_queue_stmt_list (gimple_try_cleanup_ptr (stmt
), tf
);
529 replace_goto_queue_stmt_list (gimple_catch_handler_ptr (
530 as_a
<gcatch
*> (stmt
)),
533 case GIMPLE_EH_FILTER
:
534 replace_goto_queue_stmt_list (gimple_eh_filter_failure_ptr (stmt
), tf
);
538 geh_else
*eh_else_stmt
= as_a
<geh_else
*> (stmt
);
539 replace_goto_queue_stmt_list (gimple_eh_else_n_body_ptr (eh_else_stmt
),
541 replace_goto_queue_stmt_list (gimple_eh_else_e_body_ptr (eh_else_stmt
),
547 /* These won't have gotos in them. */
554 /* A subroutine of replace_goto_queue. Handles GIMPLE_SEQ. */
557 replace_goto_queue_stmt_list (gimple_seq
*seq
, struct leh_tf_state
*tf
)
559 gimple_stmt_iterator gsi
= gsi_start (*seq
);
561 while (!gsi_end_p (gsi
))
562 replace_goto_queue_1 (gsi_stmt (gsi
), tf
, &gsi
);
565 /* Replace all goto queue members. */
568 replace_goto_queue (struct leh_tf_state
*tf
)
570 if (tf
->goto_queue_active
== 0)
572 replace_goto_queue_stmt_list (&tf
->top_p_seq
, tf
);
573 replace_goto_queue_stmt_list (&eh_seq
, tf
);
576 /* Add a new record to the goto queue contained in TF. NEW_STMT is the
577 data to be added, IS_LABEL indicates whether NEW_STMT is a label or
581 record_in_goto_queue (struct leh_tf_state
*tf
,
588 struct goto_queue_node
*q
;
590 gcc_assert (!tf
->goto_queue_map
);
592 active
= tf
->goto_queue_active
;
593 size
= tf
->goto_queue_size
;
596 size
= (size
? size
* 2 : 32);
597 tf
->goto_queue_size
= size
;
599 = XRESIZEVEC (struct goto_queue_node
, tf
->goto_queue
, size
);
602 q
= &tf
->goto_queue
[active
];
603 tf
->goto_queue_active
= active
+ 1;
605 memset (q
, 0, sizeof (*q
));
608 q
->location
= location
;
609 q
->is_label
= is_label
;
612 /* Record the LABEL label in the goto queue contained in TF.
616 record_in_goto_queue_label (struct leh_tf_state
*tf
, treemple stmt
, tree label
,
620 treemple temp
, new_stmt
;
625 /* Computed and non-local gotos do not get processed. Given
626 their nature we can neither tell whether we've escaped the
627 finally block nor redirect them if we knew. */
628 if (TREE_CODE (label
) != LABEL_DECL
)
631 /* No need to record gotos that don't leave the try block. */
633 if (!outside_finally_tree (temp
, tf
->try_finally_expr
))
636 if (! tf
->dest_array
.exists ())
638 tf
->dest_array
.create (10);
639 tf
->dest_array
.quick_push (label
);
644 int n
= tf
->dest_array
.length ();
645 for (index
= 0; index
< n
; ++index
)
646 if (tf
->dest_array
[index
] == label
)
649 tf
->dest_array
.safe_push (label
);
652 /* In the case of a GOTO we want to record the destination label,
653 since with a GIMPLE_COND we have an easy access to the then/else
656 record_in_goto_queue (tf
, new_stmt
, index
, true, location
);
659 /* For any GIMPLE_GOTO or GIMPLE_RETURN, decide whether it leaves a try_finally
660 node, and if so record that fact in the goto queue associated with that
664 maybe_record_in_goto_queue (struct leh_state
*state
, gimple
*stmt
)
666 struct leh_tf_state
*tf
= state
->tf
;
672 switch (gimple_code (stmt
))
676 gcond
*cond_stmt
= as_a
<gcond
*> (stmt
);
677 new_stmt
.tp
= gimple_op_ptr (cond_stmt
, 2);
678 record_in_goto_queue_label (tf
, new_stmt
,
679 gimple_cond_true_label (cond_stmt
),
680 EXPR_LOCATION (*new_stmt
.tp
));
681 new_stmt
.tp
= gimple_op_ptr (cond_stmt
, 3);
682 record_in_goto_queue_label (tf
, new_stmt
,
683 gimple_cond_false_label (cond_stmt
),
684 EXPR_LOCATION (*new_stmt
.tp
));
689 record_in_goto_queue_label (tf
, new_stmt
, gimple_goto_dest (stmt
),
690 gimple_location (stmt
));
694 tf
->may_return
= true;
696 record_in_goto_queue (tf
, new_stmt
, -1, false, gimple_location (stmt
));
706 /* We do not process GIMPLE_SWITCHes for now. As long as the original source
707 was in fact structured, and we've not yet done jump threading, then none
708 of the labels will leave outer GIMPLE_TRY_FINALLY nodes. Verify this. */
711 verify_norecord_switch_expr (struct leh_state
*state
,
712 gswitch
*switch_expr
)
714 struct leh_tf_state
*tf
= state
->tf
;
720 n
= gimple_switch_num_labels (switch_expr
);
722 for (i
= 0; i
< n
; ++i
)
725 tree lab
= CASE_LABEL (gimple_switch_label (switch_expr
, i
));
727 gcc_assert (!outside_finally_tree (temp
, tf
->try_finally_expr
));
731 #define verify_norecord_switch_expr(state, switch_expr)
734 /* Redirect a RETURN_EXPR pointed to by Q to FINLAB. If MOD is
735 non-null, insert it before the new branch. */
738 do_return_redirection (struct goto_queue_node
*q
, tree finlab
, gimple_seq mod
)
742 /* In the case of a return, the queue node must be a gimple statement. */
743 gcc_assert (!q
->is_label
);
745 /* Note that the return value may have already been computed, e.g.,
758 should return 0, not 1. We don't have to do anything to make
759 this happens because the return value has been placed in the
760 RESULT_DECL already. */
762 q
->cont_stmt
= q
->stmt
.g
;
765 gimple_seq_add_seq (&q
->repl_stmt
, mod
);
767 x
= gimple_build_goto (finlab
);
768 gimple_set_location (x
, q
->location
);
769 gimple_seq_add_stmt (&q
->repl_stmt
, x
);
772 /* Similar, but easier, for GIMPLE_GOTO. */
775 do_goto_redirection (struct goto_queue_node
*q
, tree finlab
, gimple_seq mod
,
776 struct leh_tf_state
*tf
)
780 gcc_assert (q
->is_label
);
782 q
->cont_stmt
= gimple_build_goto (tf
->dest_array
[q
->index
]);
785 gimple_seq_add_seq (&q
->repl_stmt
, mod
);
787 x
= gimple_build_goto (finlab
);
788 gimple_set_location (x
, q
->location
);
789 gimple_seq_add_stmt (&q
->repl_stmt
, x
);
792 /* Emit a standard landing pad sequence into SEQ for REGION. */
795 emit_post_landing_pad (gimple_seq
*seq
, eh_region region
)
797 eh_landing_pad lp
= region
->landing_pads
;
801 lp
= gen_eh_landing_pad (region
);
803 lp
->post_landing_pad
= create_artificial_label (UNKNOWN_LOCATION
);
804 EH_LANDING_PAD_NR (lp
->post_landing_pad
) = lp
->index
;
806 x
= gimple_build_label (lp
->post_landing_pad
);
807 gimple_seq_add_stmt (seq
, x
);
810 /* Emit a RESX statement into SEQ for REGION. */
813 emit_resx (gimple_seq
*seq
, eh_region region
)
815 gresx
*x
= gimple_build_resx (region
->index
);
816 gimple_seq_add_stmt (seq
, x
);
818 record_stmt_eh_region (region
->outer
, x
);
821 /* Note that the current EH region may contain a throw, or a
822 call to a function which itself may contain a throw. */
825 note_eh_region_may_contain_throw (eh_region region
)
827 while (bitmap_set_bit (eh_region_may_contain_throw_map
, region
->index
))
829 if (region
->type
== ERT_MUST_NOT_THROW
)
831 region
= region
->outer
;
837 /* Check if REGION has been marked as containing a throw. If REGION is
838 NULL, this predicate is false. */
841 eh_region_may_contain_throw (eh_region r
)
843 return r
&& bitmap_bit_p (eh_region_may_contain_throw_map
, r
->index
);
846 /* We want to transform
847 try { body; } catch { stuff; }
857 TP is a GIMPLE_TRY node. REGION is the region whose post_landing_pad
858 should be placed before the second operand, or NULL. OVER is
859 an existing label that should be put at the exit, or NULL. */
862 frob_into_branch_around (gtry
*tp
, eh_region region
, tree over
)
865 gimple_seq cleanup
, result
;
866 location_t loc
= gimple_location (tp
);
868 cleanup
= gimple_try_cleanup (tp
);
869 result
= gimple_try_eval (tp
);
872 emit_post_landing_pad (&eh_seq
, region
);
874 if (gimple_seq_may_fallthru (cleanup
))
877 over
= create_artificial_label (loc
);
878 x
= gimple_build_goto (over
);
879 gimple_set_location (x
, loc
);
880 gimple_seq_add_stmt (&cleanup
, x
);
882 gimple_seq_add_seq (&eh_seq
, cleanup
);
886 x
= gimple_build_label (over
);
887 gimple_seq_add_stmt (&result
, x
);
892 /* A subroutine of lower_try_finally. Duplicate the tree rooted at T.
893 Make sure to record all new labels found. */
896 lower_try_finally_dup_block (gimple_seq seq
, struct leh_state
*outer_state
,
901 gimple_stmt_iterator gsi
;
902 location_t last_loc
= UNKNOWN_LOCATION
;
904 new_seq
= copy_gimple_seq_and_replace_locals (seq
);
906 for (gsi
= gsi_last (new_seq
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
908 gimple
*stmt
= gsi_stmt (gsi
);
909 /* We duplicate __builtin_stack_restore at -O0 in the hope of eliminating
910 it on the EH paths. When it is not eliminated, give it the next
911 location in the sequence or make it transparent in the debug info. */
912 if (gimple_call_builtin_p (stmt
, BUILT_IN_STACK_RESTORE
))
913 gimple_set_location (stmt
, last_loc
);
914 else if (LOCATION_LOCUS (gimple_location (stmt
)) == UNKNOWN_LOCATION
)
916 tree block
= gimple_block (stmt
);
917 gimple_set_location (stmt
, loc
);
918 gimple_set_block (stmt
, block
);
921 last_loc
= gimple_location (stmt
);
925 region
= outer_state
->tf
->try_finally_expr
;
926 collect_finally_tree_1 (new_seq
, region
);
931 /* A subroutine of lower_try_finally. Create a fallthru label for
932 the given try_finally state. The only tricky bit here is that
933 we have to make sure to record the label in our outer context. */
936 lower_try_finally_fallthru_label (struct leh_tf_state
*tf
)
938 tree label
= tf
->fallthru_label
;
943 label
= create_artificial_label (gimple_location (tf
->try_finally_expr
));
944 tf
->fallthru_label
= label
;
948 record_in_finally_tree (temp
, tf
->outer
->tf
->try_finally_expr
);
954 /* A subroutine of lower_try_finally. If FINALLY consits of a
955 GIMPLE_EH_ELSE node, return it. */
957 static inline geh_else
*
958 get_eh_else (gimple_seq finally
)
960 gimple
*x
= gimple_seq_first_stmt (finally
);
961 if (gimple_code (x
) == GIMPLE_EH_ELSE
)
963 gcc_assert (gimple_seq_singleton_p (finally
));
964 return as_a
<geh_else
*> (x
);
969 /* A subroutine of lower_try_finally. If the eh_protect_cleanup_actions
970 langhook returns non-null, then the language requires that the exception
971 path out of a try_finally be treated specially. To wit: the code within
972 the finally block may not itself throw an exception. We have two choices
973 here. First we can duplicate the finally block and wrap it in a
974 must_not_throw region. Second, we can generate code like
979 if (fintmp == eh_edge)
980 protect_cleanup_actions;
983 where "fintmp" is the temporary used in the switch statement generation
984 alternative considered below. For the nonce, we always choose the first
987 THIS_STATE may be null if this is a try-cleanup, not a try-finally. */
990 honor_protect_cleanup_actions (struct leh_state
*outer_state
,
991 struct leh_state
*this_state
,
992 struct leh_tf_state
*tf
)
994 gimple_seq finally
= gimple_try_cleanup (tf
->top_p
);
996 /* EH_ELSE doesn't come from user code; only compiler generated stuff.
997 It does need to be handled here, so as to separate the (different)
998 EH path from the normal path. But we should not attempt to wrap
999 it with a must-not-throw node (which indeed gets in the way). */
1000 if (geh_else
*eh_else
= get_eh_else (finally
))
1002 gimple_try_set_cleanup (tf
->top_p
, gimple_eh_else_n_body (eh_else
));
1003 finally
= gimple_eh_else_e_body (eh_else
);
1005 /* Let the ELSE see the exception that's being processed, but
1006 since the cleanup is outside the try block, process it with
1007 outer_state, otherwise it may be used as a cleanup for
1008 itself, and Bad Things (TM) ensue. */
1009 eh_region save_ehp
= outer_state
->ehp_region
;
1010 outer_state
->ehp_region
= this_state
->cur_region
;
1011 lower_eh_constructs_1 (outer_state
, &finally
);
1012 outer_state
->ehp_region
= save_ehp
;
1016 /* First check for nothing to do. */
1017 if (lang_hooks
.eh_protect_cleanup_actions
== NULL
)
1019 tree actions
= lang_hooks
.eh_protect_cleanup_actions ();
1020 if (actions
== NULL
)
1024 finally
= lower_try_finally_dup_block (finally
, outer_state
,
1025 gimple_location (tf
->try_finally_expr
));
1027 /* If this cleanup consists of a TRY_CATCH_EXPR with TRY_CATCH_IS_CLEANUP
1028 set, the handler of the TRY_CATCH_EXPR is another cleanup which ought
1029 to be in an enclosing scope, but needs to be implemented at this level
1030 to avoid a nesting violation (see wrap_temporary_cleanups in
1031 cp/decl.c). Since it's logically at an outer level, we should call
1032 terminate before we get to it, so strip it away before adding the
1033 MUST_NOT_THROW filter. */
1034 gimple_stmt_iterator gsi
= gsi_start (finally
);
1035 gimple
*x
= gsi_stmt (gsi
);
1036 if (gimple_code (x
) == GIMPLE_TRY
1037 && gimple_try_kind (x
) == GIMPLE_TRY_CATCH
1038 && gimple_try_catch_is_cleanup (x
))
1040 gsi_insert_seq_before (&gsi
, gimple_try_eval (x
), GSI_SAME_STMT
);
1041 gsi_remove (&gsi
, false);
1044 /* Wrap the block with protect_cleanup_actions as the action. */
1045 geh_mnt
*eh_mnt
= gimple_build_eh_must_not_throw (actions
);
1046 gtry
*try_stmt
= gimple_build_try (finally
,
1047 gimple_seq_alloc_with_stmt (eh_mnt
),
1049 finally
= lower_eh_must_not_throw (outer_state
, try_stmt
);
1052 /* Drop all of this into the exception sequence. */
1053 emit_post_landing_pad (&eh_seq
, tf
->region
);
1054 gimple_seq_add_seq (&eh_seq
, finally
);
1055 if (gimple_seq_may_fallthru (finally
))
1056 emit_resx (&eh_seq
, tf
->region
);
1058 /* Having now been handled, EH isn't to be considered with
1059 the rest of the outgoing edges. */
1060 tf
->may_throw
= false;
1063 /* A subroutine of lower_try_finally. We have determined that there is
1064 no fallthru edge out of the finally block. This means that there is
1065 no outgoing edge corresponding to any incoming edge. Restructure the
1066 try_finally node for this special case. */
1069 lower_try_finally_nofallthru (struct leh_state
*state
,
1070 struct leh_tf_state
*tf
)
1076 struct goto_queue_node
*q
, *qe
;
1078 lab
= create_artificial_label (gimple_location (tf
->try_finally_expr
));
1080 /* We expect that tf->top_p is a GIMPLE_TRY. */
1081 finally
= gimple_try_cleanup (tf
->top_p
);
1082 tf
->top_p_seq
= gimple_try_eval (tf
->top_p
);
1084 x
= gimple_build_label (lab
);
1085 gimple_seq_add_stmt (&tf
->top_p_seq
, x
);
1088 qe
= q
+ tf
->goto_queue_active
;
1091 do_return_redirection (q
, lab
, NULL
);
1093 do_goto_redirection (q
, lab
, NULL
, tf
);
1095 replace_goto_queue (tf
);
1097 /* Emit the finally block into the stream. Lower EH_ELSE at this time. */
1098 eh_else
= get_eh_else (finally
);
1101 finally
= gimple_eh_else_n_body (eh_else
);
1102 lower_eh_constructs_1 (state
, &finally
);
1103 gimple_seq_add_seq (&tf
->top_p_seq
, finally
);
1107 finally
= gimple_eh_else_e_body (eh_else
);
1108 lower_eh_constructs_1 (state
, &finally
);
1110 emit_post_landing_pad (&eh_seq
, tf
->region
);
1111 gimple_seq_add_seq (&eh_seq
, finally
);
1116 lower_eh_constructs_1 (state
, &finally
);
1117 gimple_seq_add_seq (&tf
->top_p_seq
, finally
);
1121 emit_post_landing_pad (&eh_seq
, tf
->region
);
1123 x
= gimple_build_goto (lab
);
1124 gimple_set_location (x
, gimple_location (tf
->try_finally_expr
));
1125 gimple_seq_add_stmt (&eh_seq
, x
);
1130 /* A subroutine of lower_try_finally. We have determined that there is
1131 exactly one destination of the finally block. Restructure the
1132 try_finally node for this special case. */
1135 lower_try_finally_onedest (struct leh_state
*state
, struct leh_tf_state
*tf
)
1137 struct goto_queue_node
*q
, *qe
;
1142 gimple_stmt_iterator gsi
;
1144 location_t loc
= gimple_location (tf
->try_finally_expr
);
1146 finally
= gimple_try_cleanup (tf
->top_p
);
1147 tf
->top_p_seq
= gimple_try_eval (tf
->top_p
);
1149 /* Since there's only one destination, and the destination edge can only
1150 either be EH or non-EH, that implies that all of our incoming edges
1151 are of the same type. Therefore we can lower EH_ELSE immediately. */
1152 eh_else
= get_eh_else (finally
);
1156 finally
= gimple_eh_else_e_body (eh_else
);
1158 finally
= gimple_eh_else_n_body (eh_else
);
1161 lower_eh_constructs_1 (state
, &finally
);
1163 for (gsi
= gsi_start (finally
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1165 gimple
*stmt
= gsi_stmt (gsi
);
1166 if (LOCATION_LOCUS (gimple_location (stmt
)) == UNKNOWN_LOCATION
)
1168 tree block
= gimple_block (stmt
);
1169 gimple_set_location (stmt
, gimple_location (tf
->try_finally_expr
));
1170 gimple_set_block (stmt
, block
);
1176 /* Only reachable via the exception edge. Add the given label to
1177 the head of the FINALLY block. Append a RESX at the end. */
1178 emit_post_landing_pad (&eh_seq
, tf
->region
);
1179 gimple_seq_add_seq (&eh_seq
, finally
);
1180 emit_resx (&eh_seq
, tf
->region
);
1184 if (tf
->may_fallthru
)
1186 /* Only reachable via the fallthru edge. Do nothing but let
1187 the two blocks run together; we'll fall out the bottom. */
1188 gimple_seq_add_seq (&tf
->top_p_seq
, finally
);
1192 finally_label
= create_artificial_label (loc
);
1193 label_stmt
= gimple_build_label (finally_label
);
1194 gimple_seq_add_stmt (&tf
->top_p_seq
, label_stmt
);
1196 gimple_seq_add_seq (&tf
->top_p_seq
, finally
);
1199 qe
= q
+ tf
->goto_queue_active
;
1203 /* Reachable by return expressions only. Redirect them. */
1205 do_return_redirection (q
, finally_label
, NULL
);
1206 replace_goto_queue (tf
);
1210 /* Reachable by goto expressions only. Redirect them. */
1212 do_goto_redirection (q
, finally_label
, NULL
, tf
);
1213 replace_goto_queue (tf
);
1215 if (tf
->dest_array
[0] == tf
->fallthru_label
)
1217 /* Reachable by goto to fallthru label only. Redirect it
1218 to the new label (already created, sadly), and do not
1219 emit the final branch out, or the fallthru label. */
1220 tf
->fallthru_label
= NULL
;
1225 /* Place the original return/goto to the original destination
1226 immediately after the finally block. */
1227 x
= tf
->goto_queue
[0].cont_stmt
;
1228 gimple_seq_add_stmt (&tf
->top_p_seq
, x
);
1229 maybe_record_in_goto_queue (state
, x
);
1232 /* A subroutine of lower_try_finally. There are multiple edges incoming
1233 and outgoing from the finally block. Implement this by duplicating the
1234 finally block for every destination. */
1237 lower_try_finally_copy (struct leh_state
*state
, struct leh_tf_state
*tf
)
1240 gimple_seq new_stmt
;
1245 location_t tf_loc
= gimple_location (tf
->try_finally_expr
);
1247 finally
= gimple_try_cleanup (tf
->top_p
);
1249 /* Notice EH_ELSE, and simplify some of the remaining code
1250 by considering FINALLY to be the normal return path only. */
1251 eh_else
= get_eh_else (finally
);
1253 finally
= gimple_eh_else_n_body (eh_else
);
1255 tf
->top_p_seq
= gimple_try_eval (tf
->top_p
);
1258 if (tf
->may_fallthru
)
1260 seq
= lower_try_finally_dup_block (finally
, state
, tf_loc
);
1261 lower_eh_constructs_1 (state
, &seq
);
1262 gimple_seq_add_seq (&new_stmt
, seq
);
1264 tmp
= lower_try_finally_fallthru_label (tf
);
1265 x
= gimple_build_goto (tmp
);
1266 gimple_set_location (x
, tf_loc
);
1267 gimple_seq_add_stmt (&new_stmt
, x
);
1272 /* We don't need to copy the EH path of EH_ELSE,
1273 since it is only emitted once. */
1275 seq
= gimple_eh_else_e_body (eh_else
);
1277 seq
= lower_try_finally_dup_block (finally
, state
, tf_loc
);
1278 lower_eh_constructs_1 (state
, &seq
);
1280 emit_post_landing_pad (&eh_seq
, tf
->region
);
1281 gimple_seq_add_seq (&eh_seq
, seq
);
1282 emit_resx (&eh_seq
, tf
->region
);
1287 struct goto_queue_node
*q
, *qe
;
1288 int return_index
, index
;
1291 struct goto_queue_node
*q
;
1295 return_index
= tf
->dest_array
.length ();
1296 labels
= XCNEWVEC (struct labels_s
, return_index
+ 1);
1299 qe
= q
+ tf
->goto_queue_active
;
1302 index
= q
->index
< 0 ? return_index
: q
->index
;
1304 if (!labels
[index
].q
)
1305 labels
[index
].q
= q
;
1308 for (index
= 0; index
< return_index
+ 1; index
++)
1312 q
= labels
[index
].q
;
1316 lab
= labels
[index
].label
1317 = create_artificial_label (tf_loc
);
1319 if (index
== return_index
)
1320 do_return_redirection (q
, lab
, NULL
);
1322 do_goto_redirection (q
, lab
, NULL
, tf
);
1324 x
= gimple_build_label (lab
);
1325 gimple_seq_add_stmt (&new_stmt
, x
);
1327 seq
= lower_try_finally_dup_block (finally
, state
, q
->location
);
1328 lower_eh_constructs_1 (state
, &seq
);
1329 gimple_seq_add_seq (&new_stmt
, seq
);
1331 gimple_seq_add_stmt (&new_stmt
, q
->cont_stmt
);
1332 maybe_record_in_goto_queue (state
, q
->cont_stmt
);
1335 for (q
= tf
->goto_queue
; q
< qe
; q
++)
1339 index
= q
->index
< 0 ? return_index
: q
->index
;
1341 if (labels
[index
].q
== q
)
1344 lab
= labels
[index
].label
;
1346 if (index
== return_index
)
1347 do_return_redirection (q
, lab
, NULL
);
1349 do_goto_redirection (q
, lab
, NULL
, tf
);
1352 replace_goto_queue (tf
);
1356 /* Need to link new stmts after running replace_goto_queue due
1357 to not wanting to process the same goto stmts twice. */
1358 gimple_seq_add_seq (&tf
->top_p_seq
, new_stmt
);
1361 /* A subroutine of lower_try_finally. There are multiple edges incoming
1362 and outgoing from the finally block. Implement this by instrumenting
1363 each incoming edge and creating a switch statement at the end of the
1364 finally block that branches to the appropriate destination. */
1367 lower_try_finally_switch (struct leh_state
*state
, struct leh_tf_state
*tf
)
1369 struct goto_queue_node
*q
, *qe
;
1370 tree finally_tmp
, finally_label
;
1371 int return_index
, eh_index
, fallthru_index
;
1372 int nlabels
, ndests
, j
, last_case_index
;
1374 auto_vec
<tree
> case_label_vec
;
1375 gimple_seq switch_body
= NULL
;
1379 gimple
*switch_stmt
;
1381 hash_map
<tree
, gimple
*> *cont_map
= NULL
;
1382 /* The location of the TRY_FINALLY stmt. */
1383 location_t tf_loc
= gimple_location (tf
->try_finally_expr
);
1384 /* The location of the finally block. */
1385 location_t finally_loc
;
1387 finally
= gimple_try_cleanup (tf
->top_p
);
1388 eh_else
= get_eh_else (finally
);
1390 /* Mash the TRY block to the head of the chain. */
1391 tf
->top_p_seq
= gimple_try_eval (tf
->top_p
);
1393 /* The location of the finally is either the last stmt in the finally
1394 block or the location of the TRY_FINALLY itself. */
1395 x
= gimple_seq_last_stmt (finally
);
1396 finally_loc
= x
? gimple_location (x
) : tf_loc
;
1398 /* Prepare for switch statement generation. */
1399 nlabels
= tf
->dest_array
.length ();
1400 return_index
= nlabels
;
1401 eh_index
= return_index
+ tf
->may_return
;
1402 fallthru_index
= eh_index
+ (tf
->may_throw
&& !eh_else
);
1403 ndests
= fallthru_index
+ tf
->may_fallthru
;
1405 finally_tmp
= create_tmp_var (integer_type_node
, "finally_tmp");
1406 finally_label
= create_artificial_label (finally_loc
);
1408 /* We use vec::quick_push on case_label_vec throughout this function,
1409 since we know the size in advance and allocate precisely as muce
1411 case_label_vec
.create (ndests
);
1413 last_case_index
= 0;
1415 /* Begin inserting code for getting to the finally block. Things
1416 are done in this order to correspond to the sequence the code is
1419 if (tf
->may_fallthru
)
1421 x
= gimple_build_assign (finally_tmp
,
1422 build_int_cst (integer_type_node
,
1424 gimple_set_location (x
, finally_loc
);
1425 gimple_seq_add_stmt (&tf
->top_p_seq
, x
);
1427 tmp
= build_int_cst (integer_type_node
, fallthru_index
);
1428 last_case
= build_case_label (tmp
, NULL
,
1429 create_artificial_label (finally_loc
));
1430 case_label_vec
.quick_push (last_case
);
1433 x
= gimple_build_label (CASE_LABEL (last_case
));
1434 gimple_seq_add_stmt (&switch_body
, x
);
1436 tmp
= lower_try_finally_fallthru_label (tf
);
1437 x
= gimple_build_goto (tmp
);
1438 gimple_set_location (x
, finally_loc
);
1439 gimple_seq_add_stmt (&switch_body
, x
);
1442 /* For EH_ELSE, emit the exception path (plus resx) now, then
1443 subsequently we only need consider the normal path. */
1448 finally
= gimple_eh_else_e_body (eh_else
);
1449 lower_eh_constructs_1 (state
, &finally
);
1451 emit_post_landing_pad (&eh_seq
, tf
->region
);
1452 gimple_seq_add_seq (&eh_seq
, finally
);
1453 emit_resx (&eh_seq
, tf
->region
);
1456 finally
= gimple_eh_else_n_body (eh_else
);
1458 else if (tf
->may_throw
)
1460 emit_post_landing_pad (&eh_seq
, tf
->region
);
1462 x
= gimple_build_assign (finally_tmp
,
1463 build_int_cst (integer_type_node
, eh_index
));
1464 gimple_seq_add_stmt (&eh_seq
, x
);
1466 x
= gimple_build_goto (finally_label
);
1467 gimple_set_location (x
, tf_loc
);
1468 gimple_seq_add_stmt (&eh_seq
, x
);
1470 tmp
= build_int_cst (integer_type_node
, eh_index
);
1471 last_case
= build_case_label (tmp
, NULL
,
1472 create_artificial_label (tf_loc
));
1473 case_label_vec
.quick_push (last_case
);
1476 x
= gimple_build_label (CASE_LABEL (last_case
));
1477 gimple_seq_add_stmt (&eh_seq
, x
);
1478 emit_resx (&eh_seq
, tf
->region
);
1481 x
= gimple_build_label (finally_label
);
1482 gimple_seq_add_stmt (&tf
->top_p_seq
, x
);
1484 lower_eh_constructs_1 (state
, &finally
);
1485 gimple_seq_add_seq (&tf
->top_p_seq
, finally
);
1487 /* Redirect each incoming goto edge. */
1489 qe
= q
+ tf
->goto_queue_active
;
1490 j
= last_case_index
+ tf
->may_return
;
1491 /* Prepare the assignments to finally_tmp that are executed upon the
1492 entrance through a particular edge. */
1495 gimple_seq mod
= NULL
;
1497 unsigned int case_index
;
1501 x
= gimple_build_assign (finally_tmp
,
1502 build_int_cst (integer_type_node
,
1504 gimple_seq_add_stmt (&mod
, x
);
1505 do_return_redirection (q
, finally_label
, mod
);
1506 switch_id
= return_index
;
1510 x
= gimple_build_assign (finally_tmp
,
1511 build_int_cst (integer_type_node
, q
->index
));
1512 gimple_seq_add_stmt (&mod
, x
);
1513 do_goto_redirection (q
, finally_label
, mod
, tf
);
1514 switch_id
= q
->index
;
1517 case_index
= j
+ q
->index
;
1518 if (case_label_vec
.length () <= case_index
|| !case_label_vec
[case_index
])
1521 tmp
= build_int_cst (integer_type_node
, switch_id
);
1522 case_lab
= build_case_label (tmp
, NULL
,
1523 create_artificial_label (tf_loc
));
1524 /* We store the cont_stmt in the pointer map, so that we can recover
1525 it in the loop below. */
1527 cont_map
= new hash_map
<tree
, gimple
*>;
1528 cont_map
->put (case_lab
, q
->cont_stmt
);
1529 case_label_vec
.quick_push (case_lab
);
1532 for (j
= last_case_index
; j
< last_case_index
+ nlabels
; j
++)
1536 last_case
= case_label_vec
[j
];
1538 gcc_assert (last_case
);
1539 gcc_assert (cont_map
);
1541 cont_stmt
= *cont_map
->get (last_case
);
1543 x
= gimple_build_label (CASE_LABEL (last_case
));
1544 gimple_seq_add_stmt (&switch_body
, x
);
1545 gimple_seq_add_stmt (&switch_body
, cont_stmt
);
1546 maybe_record_in_goto_queue (state
, cont_stmt
);
1551 replace_goto_queue (tf
);
1553 /* Make sure that the last case is the default label, as one is required.
1554 Then sort the labels, which is also required in GIMPLE. */
1555 CASE_LOW (last_case
) = NULL
;
1556 tree tem
= case_label_vec
.pop ();
1557 gcc_assert (tem
== last_case
);
1558 sort_case_labels (case_label_vec
);
1560 /* Build the switch statement, setting last_case to be the default
1562 switch_stmt
= gimple_build_switch (finally_tmp
, last_case
,
1564 gimple_set_location (switch_stmt
, finally_loc
);
1566 /* Need to link SWITCH_STMT after running replace_goto_queue
1567 due to not wanting to process the same goto stmts twice. */
1568 gimple_seq_add_stmt (&tf
->top_p_seq
, switch_stmt
);
1569 gimple_seq_add_seq (&tf
->top_p_seq
, switch_body
);
1572 /* Decide whether or not we are going to duplicate the finally block.
1573 There are several considerations.
1575 Second, we'd like to prevent egregious code growth. One way to
1576 do this is to estimate the size of the finally block, multiply
1577 that by the number of copies we'd need to make, and compare against
1578 the estimate of the size of the switch machinery we'd have to add. */
1581 decide_copy_try_finally (int ndests
, bool may_throw
, gimple_seq finally
)
1583 int f_estimate
, sw_estimate
;
1586 /* If there's an EH_ELSE involved, the exception path is separate
1587 and really doesn't come into play for this computation. */
1588 eh_else
= get_eh_else (finally
);
1591 ndests
-= may_throw
;
1592 finally
= gimple_eh_else_n_body (eh_else
);
1597 gimple_stmt_iterator gsi
;
1602 for (gsi
= gsi_start (finally
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1604 /* Duplicate __builtin_stack_restore in the hope of eliminating it
1605 on the EH paths and, consequently, useless cleanups. */
1606 gimple
*stmt
= gsi_stmt (gsi
);
1607 if (!is_gimple_debug (stmt
)
1608 && !gimple_clobber_p (stmt
)
1609 && !gimple_call_builtin_p (stmt
, BUILT_IN_STACK_RESTORE
))
1615 /* Finally estimate N times, plus N gotos. */
1616 f_estimate
= estimate_num_insns_seq (finally
, &eni_size_weights
);
1617 f_estimate
= (f_estimate
+ 1) * ndests
;
1619 /* Switch statement (cost 10), N variable assignments, N gotos. */
1620 sw_estimate
= 10 + 2 * ndests
;
1622 /* Optimize for size clearly wants our best guess. */
1623 if (optimize_function_for_size_p (cfun
))
1624 return f_estimate
< sw_estimate
;
1626 /* ??? These numbers are completely made up so far. */
1628 return f_estimate
< 100 || f_estimate
< sw_estimate
* 2;
1630 return f_estimate
< 40 || f_estimate
* 2 < sw_estimate
* 3;
1633 /* REG is current region of a LEH state.
1634 is the enclosing region for a possible cleanup region, or the region
1635 itself. Returns TRUE if such a region would be unreachable.
1637 Cleanup regions within a must-not-throw region aren't actually reachable
1638 even if there are throwing stmts within them, because the personality
1639 routine will call terminate before unwinding. */
1642 cleanup_is_dead_in (leh_state
*state
)
1646 eh_region reg
= state
->cur_region
;
1647 while (reg
&& reg
->type
== ERT_CLEANUP
)
1650 gcc_assert (reg
== state
->outer_non_cleanup
);
1653 eh_region reg
= state
->outer_non_cleanup
;
1654 return (reg
&& reg
->type
== ERT_MUST_NOT_THROW
);
1657 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_FINALLY nodes
1658 to a sequence of labels and blocks, plus the exception region trees
1659 that record all the magic. This is complicated by the need to
1660 arrange for the FINALLY block to be executed on all exits. */
1663 lower_try_finally (struct leh_state
*state
, gtry
*tp
)
1665 struct leh_tf_state this_tf
;
1666 struct leh_state this_state
;
1668 gimple_seq old_eh_seq
;
1670 /* Process the try block. */
1672 memset (&this_tf
, 0, sizeof (this_tf
));
1673 this_tf
.try_finally_expr
= tp
;
1675 this_tf
.outer
= state
;
1676 if (using_eh_for_cleanups_p () && !cleanup_is_dead_in (state
))
1678 this_tf
.region
= gen_eh_region_cleanup (state
->cur_region
);
1679 this_state
.cur_region
= this_tf
.region
;
1683 this_tf
.region
= NULL
;
1684 this_state
.cur_region
= state
->cur_region
;
1687 this_state
.outer_non_cleanup
= state
->outer_non_cleanup
;
1688 this_state
.ehp_region
= state
->ehp_region
;
1689 this_state
.tf
= &this_tf
;
1691 old_eh_seq
= eh_seq
;
1694 lower_eh_constructs_1 (&this_state
, gimple_try_eval_ptr (tp
));
1696 /* Determine if the try block is escaped through the bottom. */
1697 this_tf
.may_fallthru
= gimple_seq_may_fallthru (gimple_try_eval (tp
));
1699 /* Determine if any exceptions are possible within the try block. */
1701 this_tf
.may_throw
= eh_region_may_contain_throw (this_tf
.region
);
1702 if (this_tf
.may_throw
)
1703 honor_protect_cleanup_actions (state
, &this_state
, &this_tf
);
1705 /* Determine how many edges (still) reach the finally block. Or rather,
1706 how many destinations are reached by the finally block. Use this to
1707 determine how we process the finally block itself. */
1709 ndests
= this_tf
.dest_array
.length ();
1710 ndests
+= this_tf
.may_fallthru
;
1711 ndests
+= this_tf
.may_return
;
1712 ndests
+= this_tf
.may_throw
;
1714 /* If the FINALLY block is not reachable, dike it out. */
1717 gimple_seq_add_seq (&this_tf
.top_p_seq
, gimple_try_eval (tp
));
1718 gimple_try_set_cleanup (tp
, NULL
);
1720 /* If the finally block doesn't fall through, then any destination
1721 we might try to impose there isn't reached either. There may be
1722 some minor amount of cleanup and redirection still needed. */
1723 else if (!gimple_seq_may_fallthru (gimple_try_cleanup (tp
)))
1724 lower_try_finally_nofallthru (state
, &this_tf
);
1726 /* We can easily special-case redirection to a single destination. */
1727 else if (ndests
== 1)
1728 lower_try_finally_onedest (state
, &this_tf
);
1729 else if (decide_copy_try_finally (ndests
, this_tf
.may_throw
,
1730 gimple_try_cleanup (tp
)))
1731 lower_try_finally_copy (state
, &this_tf
);
1733 lower_try_finally_switch (state
, &this_tf
);
1735 /* If someone requested we add a label at the end of the transformed
1737 if (this_tf
.fallthru_label
)
1739 /* This must be reached only if ndests == 0. */
1740 gimple
*x
= gimple_build_label (this_tf
.fallthru_label
);
1741 gimple_seq_add_stmt (&this_tf
.top_p_seq
, x
);
1744 this_tf
.dest_array
.release ();
1745 free (this_tf
.goto_queue
);
1746 if (this_tf
.goto_queue_map
)
1747 delete this_tf
.goto_queue_map
;
1749 /* If there was an old (aka outer) eh_seq, append the current eh_seq.
1750 If there was no old eh_seq, then the append is trivially already done. */
1754 eh_seq
= old_eh_seq
;
1757 gimple_seq new_eh_seq
= eh_seq
;
1758 eh_seq
= old_eh_seq
;
1759 gimple_seq_add_seq (&eh_seq
, new_eh_seq
);
1763 return this_tf
.top_p_seq
;
1766 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_CATCH with a
1767 list of GIMPLE_CATCH to a sequence of labels and blocks, plus the
1768 exception region trees that records all the magic. */
1771 lower_catch (struct leh_state
*state
, gtry
*tp
)
1773 eh_region try_region
= NULL
;
1774 struct leh_state this_state
= *state
;
1775 gimple_stmt_iterator gsi
;
1777 gimple_seq new_seq
, cleanup
;
1779 geh_dispatch
*eh_dispatch
;
1780 location_t try_catch_loc
= gimple_location (tp
);
1781 location_t catch_loc
= UNKNOWN_LOCATION
;
1783 if (flag_exceptions
)
1785 try_region
= gen_eh_region_try (state
->cur_region
);
1786 this_state
.cur_region
= try_region
;
1787 this_state
.outer_non_cleanup
= this_state
.cur_region
;
1790 lower_eh_constructs_1 (&this_state
, gimple_try_eval_ptr (tp
));
1792 if (!eh_region_may_contain_throw (try_region
))
1793 return gimple_try_eval (tp
);
1796 eh_dispatch
= gimple_build_eh_dispatch (try_region
->index
);
1797 gimple_seq_add_stmt (&new_seq
, eh_dispatch
);
1798 emit_resx (&new_seq
, try_region
);
1800 this_state
.cur_region
= state
->cur_region
;
1801 this_state
.outer_non_cleanup
= state
->outer_non_cleanup
;
1802 this_state
.ehp_region
= try_region
;
1804 /* Add eh_seq from lowering EH in the cleanup sequence after the cleanup
1805 itself, so that e.g. for coverage purposes the nested cleanups don't
1806 appear before the cleanup body. See PR64634 for details. */
1807 gimple_seq old_eh_seq
= eh_seq
;
1811 cleanup
= gimple_try_cleanup (tp
);
1812 for (gsi
= gsi_start (cleanup
);
1820 catch_stmt
= as_a
<gcatch
*> (gsi_stmt (gsi
));
1821 if (catch_loc
== UNKNOWN_LOCATION
)
1822 catch_loc
= gimple_location (catch_stmt
);
1823 c
= gen_eh_region_catch (try_region
, gimple_catch_types (catch_stmt
));
1825 handler
= gimple_catch_handler (catch_stmt
);
1826 lower_eh_constructs_1 (&this_state
, &handler
);
1828 c
->label
= create_artificial_label (UNKNOWN_LOCATION
);
1829 x
= gimple_build_label (c
->label
);
1830 gimple_seq_add_stmt (&new_seq
, x
);
1832 gimple_seq_add_seq (&new_seq
, handler
);
1834 if (gimple_seq_may_fallthru (new_seq
))
1837 out_label
= create_artificial_label (try_catch_loc
);
1839 x
= gimple_build_goto (out_label
);
1840 gimple_seq_add_stmt (&new_seq
, x
);
1846 /* Try to set a location on the dispatching construct to avoid inheriting
1847 the location of the previous statement. */
1848 gimple_set_location (eh_dispatch
, catch_loc
);
1850 gimple_try_set_cleanup (tp
, new_seq
);
1852 gimple_seq new_eh_seq
= eh_seq
;
1853 eh_seq
= old_eh_seq
;
1854 gimple_seq ret_seq
= frob_into_branch_around (tp
, try_region
, out_label
);
1855 gimple_seq_add_seq (&eh_seq
, new_eh_seq
);
1859 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with a
1860 GIMPLE_EH_FILTER to a sequence of labels and blocks, plus the exception
1861 region trees that record all the magic. */
1864 lower_eh_filter (struct leh_state
*state
, gtry
*tp
)
1866 struct leh_state this_state
= *state
;
1867 eh_region this_region
= NULL
;
1871 inner
= gimple_seq_first_stmt (gimple_try_cleanup (tp
));
1873 if (flag_exceptions
)
1875 this_region
= gen_eh_region_allowed (state
->cur_region
,
1876 gimple_eh_filter_types (inner
));
1877 this_state
.cur_region
= this_region
;
1878 this_state
.outer_non_cleanup
= this_state
.cur_region
;
1881 lower_eh_constructs_1 (&this_state
, gimple_try_eval_ptr (tp
));
1883 if (!eh_region_may_contain_throw (this_region
))
1884 return gimple_try_eval (tp
);
1886 this_state
.cur_region
= state
->cur_region
;
1887 this_state
.ehp_region
= this_region
;
1890 x
= gimple_build_eh_dispatch (this_region
->index
);
1891 gimple_set_location (x
, gimple_location (tp
));
1892 gimple_seq_add_stmt (&new_seq
, x
);
1893 emit_resx (&new_seq
, this_region
);
1895 this_region
->u
.allowed
.label
= create_artificial_label (UNKNOWN_LOCATION
);
1896 x
= gimple_build_label (this_region
->u
.allowed
.label
);
1897 gimple_seq_add_stmt (&new_seq
, x
);
1899 lower_eh_constructs_1 (&this_state
, gimple_eh_filter_failure_ptr (inner
));
1900 gimple_seq_add_seq (&new_seq
, gimple_eh_filter_failure (inner
));
1902 gimple_try_set_cleanup (tp
, new_seq
);
1904 return frob_into_branch_around (tp
, this_region
, NULL
);
1907 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with
1908 an GIMPLE_EH_MUST_NOT_THROW to a sequence of labels and blocks,
1909 plus the exception region trees that record all the magic. */
1912 lower_eh_must_not_throw (struct leh_state
*state
, gtry
*tp
)
1914 struct leh_state this_state
= *state
;
1916 if (flag_exceptions
)
1918 gimple
*inner
= gimple_seq_first_stmt (gimple_try_cleanup (tp
));
1919 eh_region this_region
;
1921 this_region
= gen_eh_region_must_not_throw (state
->cur_region
);
1922 this_region
->u
.must_not_throw
.failure_decl
1923 = gimple_eh_must_not_throw_fndecl (
1924 as_a
<geh_mnt
*> (inner
));
1925 this_region
->u
.must_not_throw
.failure_loc
1926 = LOCATION_LOCUS (gimple_location (tp
));
1928 /* In order to get mangling applied to this decl, we must mark it
1929 used now. Otherwise, pass_ipa_free_lang_data won't think it
1931 TREE_USED (this_region
->u
.must_not_throw
.failure_decl
) = 1;
1933 this_state
.cur_region
= this_region
;
1934 this_state
.outer_non_cleanup
= this_state
.cur_region
;
1937 lower_eh_constructs_1 (&this_state
, gimple_try_eval_ptr (tp
));
1939 return gimple_try_eval (tp
);
1942 /* Implement a cleanup expression. This is similar to try-finally,
1943 except that we only execute the cleanup block for exception edges. */
1946 lower_cleanup (struct leh_state
*state
, gtry
*tp
)
1948 struct leh_state this_state
= *state
;
1949 eh_region this_region
= NULL
;
1950 struct leh_tf_state fake_tf
;
1952 bool cleanup_dead
= cleanup_is_dead_in (state
);
1954 if (flag_exceptions
&& !cleanup_dead
)
1956 this_region
= gen_eh_region_cleanup (state
->cur_region
);
1957 this_state
.cur_region
= this_region
;
1958 this_state
.outer_non_cleanup
= state
->outer_non_cleanup
;
1961 lower_eh_constructs_1 (&this_state
, gimple_try_eval_ptr (tp
));
1963 if (cleanup_dead
|| !eh_region_may_contain_throw (this_region
))
1964 return gimple_try_eval (tp
);
1966 /* Build enough of a try-finally state so that we can reuse
1967 honor_protect_cleanup_actions. */
1968 memset (&fake_tf
, 0, sizeof (fake_tf
));
1969 fake_tf
.top_p
= fake_tf
.try_finally_expr
= tp
;
1970 fake_tf
.outer
= state
;
1971 fake_tf
.region
= this_region
;
1972 fake_tf
.may_fallthru
= gimple_seq_may_fallthru (gimple_try_eval (tp
));
1973 fake_tf
.may_throw
= true;
1975 honor_protect_cleanup_actions (state
, NULL
, &fake_tf
);
1977 if (fake_tf
.may_throw
)
1979 /* In this case honor_protect_cleanup_actions had nothing to do,
1980 and we should process this normally. */
1981 lower_eh_constructs_1 (state
, gimple_try_cleanup_ptr (tp
));
1982 result
= frob_into_branch_around (tp
, this_region
,
1983 fake_tf
.fallthru_label
);
1987 /* In this case honor_protect_cleanup_actions did nearly all of
1988 the work. All we have left is to append the fallthru_label. */
1990 result
= gimple_try_eval (tp
);
1991 if (fake_tf
.fallthru_label
)
1993 gimple
*x
= gimple_build_label (fake_tf
.fallthru_label
);
1994 gimple_seq_add_stmt (&result
, x
);
2000 /* Main loop for lowering eh constructs. Also moves gsi to the next
2004 lower_eh_constructs_2 (struct leh_state
*state
, gimple_stmt_iterator
*gsi
)
2008 gimple
*stmt
= gsi_stmt (*gsi
);
2010 switch (gimple_code (stmt
))
2014 tree fndecl
= gimple_call_fndecl (stmt
);
2017 if (fndecl
&& fndecl_built_in_p (fndecl
, BUILT_IN_NORMAL
))
2018 switch (DECL_FUNCTION_CODE (fndecl
))
2020 case BUILT_IN_EH_POINTER
:
2021 /* The front end may have generated a call to
2022 __builtin_eh_pointer (0) within a catch region. Replace
2023 this zero argument with the current catch region number. */
2024 if (state
->ehp_region
)
2026 tree nr
= build_int_cst (integer_type_node
,
2027 state
->ehp_region
->index
);
2028 gimple_call_set_arg (stmt
, 0, nr
);
2032 /* The user has dome something silly. Remove it. */
2033 rhs
= null_pointer_node
;
2038 case BUILT_IN_EH_FILTER
:
2039 /* ??? This should never appear, but since it's a builtin it
2040 is accessible to abuse by users. Just remove it and
2041 replace the use with the arbitrary value zero. */
2042 rhs
= build_int_cst (TREE_TYPE (TREE_TYPE (fndecl
)), 0);
2044 lhs
= gimple_call_lhs (stmt
);
2045 x
= gimple_build_assign (lhs
, rhs
);
2046 gsi_insert_before (gsi
, x
, GSI_SAME_STMT
);
2049 case BUILT_IN_EH_COPY_VALUES
:
2050 /* Likewise this should not appear. Remove it. */
2051 gsi_remove (gsi
, true);
2061 /* If the stmt can throw, use a new temporary for the assignment
2062 to a LHS. This makes sure the old value of the LHS is
2063 available on the EH edge. Only do so for statements that
2064 potentially fall through (no noreturn calls e.g.), otherwise
2065 this new assignment might create fake fallthru regions. */
2066 if (stmt_could_throw_p (cfun
, stmt
)
2067 && gimple_has_lhs (stmt
)
2068 && gimple_stmt_may_fallthru (stmt
)
2069 && !tree_could_throw_p (gimple_get_lhs (stmt
))
2070 && is_gimple_reg_type (TREE_TYPE (gimple_get_lhs (stmt
))))
2072 tree lhs
= gimple_get_lhs (stmt
);
2073 tree tmp
= create_tmp_var (TREE_TYPE (lhs
));
2074 gimple
*s
= gimple_build_assign (lhs
, tmp
);
2075 gimple_set_location (s
, gimple_location (stmt
));
2076 gimple_set_block (s
, gimple_block (stmt
));
2077 gimple_set_lhs (stmt
, tmp
);
2078 gsi_insert_after (gsi
, s
, GSI_SAME_STMT
);
2080 /* Look for things that can throw exceptions, and record them. */
2081 if (state
->cur_region
&& stmt_could_throw_p (cfun
, stmt
))
2083 record_stmt_eh_region (state
->cur_region
, stmt
);
2084 note_eh_region_may_contain_throw (state
->cur_region
);
2091 maybe_record_in_goto_queue (state
, stmt
);
2095 verify_norecord_switch_expr (state
, as_a
<gswitch
*> (stmt
));
2100 gtry
*try_stmt
= as_a
<gtry
*> (stmt
);
2101 if (gimple_try_kind (try_stmt
) == GIMPLE_TRY_FINALLY
)
2102 replace
= lower_try_finally (state
, try_stmt
);
2105 x
= gimple_seq_first_stmt (gimple_try_cleanup (try_stmt
));
2108 replace
= gimple_try_eval (try_stmt
);
2109 lower_eh_constructs_1 (state
, &replace
);
2112 switch (gimple_code (x
))
2115 replace
= lower_catch (state
, try_stmt
);
2117 case GIMPLE_EH_FILTER
:
2118 replace
= lower_eh_filter (state
, try_stmt
);
2120 case GIMPLE_EH_MUST_NOT_THROW
:
2121 replace
= lower_eh_must_not_throw (state
, try_stmt
);
2123 case GIMPLE_EH_ELSE
:
2124 /* This code is only valid with GIMPLE_TRY_FINALLY. */
2127 replace
= lower_cleanup (state
, try_stmt
);
2133 /* Remove the old stmt and insert the transformed sequence
2135 gsi_insert_seq_before (gsi
, replace
, GSI_SAME_STMT
);
2136 gsi_remove (gsi
, true);
2138 /* Return since we don't want gsi_next () */
2141 case GIMPLE_EH_ELSE
:
2142 /* We should be eliminating this in lower_try_finally et al. */
2146 /* A type, a decl, or some kind of statement that we're not
2147 interested in. Don't walk them. */
2154 /* A helper to unwrap a gimple_seq and feed stmts to lower_eh_constructs_2. */
2157 lower_eh_constructs_1 (struct leh_state
*state
, gimple_seq
*pseq
)
2159 gimple_stmt_iterator gsi
;
2160 for (gsi
= gsi_start (*pseq
); !gsi_end_p (gsi
);)
2161 lower_eh_constructs_2 (state
, &gsi
);
2166 const pass_data pass_data_lower_eh
=
2168 GIMPLE_PASS
, /* type */
2170 OPTGROUP_NONE
, /* optinfo_flags */
2171 TV_TREE_EH
, /* tv_id */
2172 PROP_gimple_lcf
, /* properties_required */
2173 PROP_gimple_leh
, /* properties_provided */
2174 0, /* properties_destroyed */
2175 0, /* todo_flags_start */
2176 0, /* todo_flags_finish */
2179 class pass_lower_eh
: public gimple_opt_pass
2182 pass_lower_eh (gcc::context
*ctxt
)
2183 : gimple_opt_pass (pass_data_lower_eh
, ctxt
)
2186 /* opt_pass methods: */
2187 virtual unsigned int execute (function
*);
2189 }; // class pass_lower_eh
2192 pass_lower_eh::execute (function
*fun
)
2194 struct leh_state null_state
;
2197 bodyp
= gimple_body (current_function_decl
);
2201 finally_tree
= new hash_table
<finally_tree_hasher
> (31);
2202 eh_region_may_contain_throw_map
= BITMAP_ALLOC (NULL
);
2203 memset (&null_state
, 0, sizeof (null_state
));
2205 collect_finally_tree_1 (bodyp
, NULL
);
2206 lower_eh_constructs_1 (&null_state
, &bodyp
);
2207 gimple_set_body (current_function_decl
, bodyp
);
2209 /* We assume there's a return statement, or something, at the end of
2210 the function, and thus ploping the EH sequence afterward won't
2212 gcc_assert (!gimple_seq_may_fallthru (bodyp
));
2213 gimple_seq_add_seq (&bodyp
, eh_seq
);
2215 /* We assume that since BODYP already existed, adding EH_SEQ to it
2216 didn't change its value, and we don't have to re-set the function. */
2217 gcc_assert (bodyp
== gimple_body (current_function_decl
));
2219 delete finally_tree
;
2220 finally_tree
= NULL
;
2221 BITMAP_FREE (eh_region_may_contain_throw_map
);
2224 /* If this function needs a language specific EH personality routine
2225 and the frontend didn't already set one do so now. */
2226 if (function_needs_eh_personality (fun
) == eh_personality_lang
2227 && !DECL_FUNCTION_PERSONALITY (current_function_decl
))
2228 DECL_FUNCTION_PERSONALITY (current_function_decl
)
2229 = lang_hooks
.eh_personality ();
2237 make_pass_lower_eh (gcc::context
*ctxt
)
2239 return new pass_lower_eh (ctxt
);
2242 /* Create the multiple edges from an EH_DISPATCH statement to all of
2243 the possible handlers for its EH region. Return true if there's
2244 no fallthru edge; false if there is. */
2247 make_eh_dispatch_edges (geh_dispatch
*stmt
)
2251 basic_block src
, dst
;
2253 r
= get_eh_region_from_number (gimple_eh_dispatch_region (stmt
));
2254 src
= gimple_bb (stmt
);
2259 for (c
= r
->u
.eh_try
.first_catch
; c
; c
= c
->next_catch
)
2261 dst
= label_to_block (cfun
, c
->label
);
2262 make_edge (src
, dst
, 0);
2264 /* A catch-all handler doesn't have a fallthru. */
2265 if (c
->type_list
== NULL
)
2270 case ERT_ALLOWED_EXCEPTIONS
:
2271 dst
= label_to_block (cfun
, r
->u
.allowed
.label
);
2272 make_edge (src
, dst
, 0);
2282 /* Create the single EH edge from STMT to its nearest landing pad,
2283 if there is such a landing pad within the current function. */
2286 make_eh_edges (gimple
*stmt
)
2288 basic_block src
, dst
;
2292 lp_nr
= lookup_stmt_eh_lp (stmt
);
2296 lp
= get_eh_landing_pad_from_number (lp_nr
);
2297 gcc_assert (lp
!= NULL
);
2299 src
= gimple_bb (stmt
);
2300 dst
= label_to_block (cfun
, lp
->post_landing_pad
);
2301 make_edge (src
, dst
, EDGE_EH
);
2304 /* Do the work in redirecting EDGE_IN to NEW_BB within the EH region tree;
2305 do not actually perform the final edge redirection.
2307 CHANGE_REGION is true when we're being called from cleanup_empty_eh and
2308 we intend to change the destination EH region as well; this means
2309 EH_LANDING_PAD_NR must already be set on the destination block label.
2310 If false, we're being called from generic cfg manipulation code and we
2311 should preserve our place within the region tree. */
2314 redirect_eh_edge_1 (edge edge_in
, basic_block new_bb
, bool change_region
)
2316 eh_landing_pad old_lp
, new_lp
;
2319 int old_lp_nr
, new_lp_nr
;
2320 tree old_label
, new_label
;
2324 old_bb
= edge_in
->dest
;
2325 old_label
= gimple_block_label (old_bb
);
2326 old_lp_nr
= EH_LANDING_PAD_NR (old_label
);
2327 gcc_assert (old_lp_nr
> 0);
2328 old_lp
= get_eh_landing_pad_from_number (old_lp_nr
);
2330 throw_stmt
= last_stmt (edge_in
->src
);
2331 gcc_checking_assert (lookup_stmt_eh_lp (throw_stmt
) == old_lp_nr
);
2333 new_label
= gimple_block_label (new_bb
);
2335 /* Look for an existing region that might be using NEW_BB already. */
2336 new_lp_nr
= EH_LANDING_PAD_NR (new_label
);
2339 new_lp
= get_eh_landing_pad_from_number (new_lp_nr
);
2340 gcc_assert (new_lp
);
2342 /* Unless CHANGE_REGION is true, the new and old landing pad
2343 had better be associated with the same EH region. */
2344 gcc_assert (change_region
|| new_lp
->region
== old_lp
->region
);
2349 gcc_assert (!change_region
);
2352 /* Notice when we redirect the last EH edge away from OLD_BB. */
2353 FOR_EACH_EDGE (e
, ei
, old_bb
->preds
)
2354 if (e
!= edge_in
&& (e
->flags
& EDGE_EH
))
2359 /* NEW_LP already exists. If there are still edges into OLD_LP,
2360 there's nothing to do with the EH tree. If there are no more
2361 edges into OLD_LP, then we want to remove OLD_LP as it is unused.
2362 If CHANGE_REGION is true, then our caller is expecting to remove
2364 if (e
== NULL
&& !change_region
)
2365 remove_eh_landing_pad (old_lp
);
2369 /* No correct landing pad exists. If there are no more edges
2370 into OLD_LP, then we can simply re-use the existing landing pad.
2371 Otherwise, we have to create a new landing pad. */
2374 EH_LANDING_PAD_NR (old_lp
->post_landing_pad
) = 0;
2378 new_lp
= gen_eh_landing_pad (old_lp
->region
);
2379 new_lp
->post_landing_pad
= new_label
;
2380 EH_LANDING_PAD_NR (new_label
) = new_lp
->index
;
2383 /* Maybe move the throwing statement to the new region. */
2384 if (old_lp
!= new_lp
)
2386 remove_stmt_from_eh_lp (throw_stmt
);
2387 add_stmt_to_eh_lp (throw_stmt
, new_lp
->index
);
2391 /* Redirect EH edge E to NEW_BB. */
2394 redirect_eh_edge (edge edge_in
, basic_block new_bb
)
2396 redirect_eh_edge_1 (edge_in
, new_bb
, false);
2397 return ssa_redirect_edge (edge_in
, new_bb
);
2400 /* This is a subroutine of gimple_redirect_edge_and_branch. Update the
2401 labels for redirecting a non-fallthru EH_DISPATCH edge E to NEW_BB.
2402 The actual edge update will happen in the caller. */
2405 redirect_eh_dispatch_edge (geh_dispatch
*stmt
, edge e
, basic_block new_bb
)
2407 tree new_lab
= gimple_block_label (new_bb
);
2408 bool any_changed
= false;
2413 r
= get_eh_region_from_number (gimple_eh_dispatch_region (stmt
));
2417 for (c
= r
->u
.eh_try
.first_catch
; c
; c
= c
->next_catch
)
2419 old_bb
= label_to_block (cfun
, c
->label
);
2420 if (old_bb
== e
->dest
)
2428 case ERT_ALLOWED_EXCEPTIONS
:
2429 old_bb
= label_to_block (cfun
, r
->u
.allowed
.label
);
2430 gcc_assert (old_bb
== e
->dest
);
2431 r
->u
.allowed
.label
= new_lab
;
2439 gcc_assert (any_changed
);
2442 /* Helper function for operation_could_trap_p and stmt_could_throw_p. */
2445 operation_could_trap_helper_p (enum tree_code op
,
2456 case TRUNC_DIV_EXPR
:
2458 case FLOOR_DIV_EXPR
:
2459 case ROUND_DIV_EXPR
:
2460 case EXACT_DIV_EXPR
:
2462 case FLOOR_MOD_EXPR
:
2463 case ROUND_MOD_EXPR
:
2464 case TRUNC_MOD_EXPR
:
2469 return flag_trapping_math
;
2470 if (!TREE_CONSTANT (divisor
) || integer_zerop (divisor
))
2479 /* Some floating point comparisons may trap. */
2484 case UNORDERED_EXPR
:
2496 /* These operations don't trap with floating point. */
2502 /* ABSU_EXPR never traps. */
2508 /* Any floating arithmetic may trap. */
2509 if (fp_operation
&& flag_trapping_math
)
2517 /* Constructing an object cannot trap. */
2522 /* Whether *COND_EXPR can trap depends on whether the
2523 first argument can trap, so signal it as not handled.
2524 Whether lhs is floating or not doesn't matter. */
2529 /* Any floating arithmetic may trap. */
2530 if (fp_operation
&& flag_trapping_math
)
2538 /* Return true if operation OP may trap. FP_OPERATION is true if OP is applied
2539 on floating-point values. HONOR_TRAPV is true if OP is applied on integer
2540 type operands that may trap. If OP is a division operator, DIVISOR contains
2541 the value of the divisor. */
2544 operation_could_trap_p (enum tree_code op
, bool fp_operation
, bool honor_trapv
,
2547 bool honor_nans
= (fp_operation
&& flag_trapping_math
2548 && !flag_finite_math_only
);
2549 bool honor_snans
= fp_operation
&& flag_signaling_nans
!= 0;
2552 /* This function cannot tell whether or not COND_EXPR and VEC_COND_EXPR could
2553 trap, because that depends on the respective condition op. */
2554 gcc_assert (op
!= COND_EXPR
&& op
!= VEC_COND_EXPR
);
2556 if (TREE_CODE_CLASS (op
) != tcc_comparison
2557 && TREE_CODE_CLASS (op
) != tcc_unary
2558 && TREE_CODE_CLASS (op
) != tcc_binary
)
2561 return operation_could_trap_helper_p (op
, fp_operation
, honor_trapv
,
2562 honor_nans
, honor_snans
, divisor
,
2567 /* Returns true if it is possible to prove that the index of
2568 an array access REF (an ARRAY_REF expression) falls into the
2572 in_array_bounds_p (tree ref
)
2574 tree idx
= TREE_OPERAND (ref
, 1);
2577 if (TREE_CODE (idx
) != INTEGER_CST
)
2580 min
= array_ref_low_bound (ref
);
2581 max
= array_ref_up_bound (ref
);
2584 || TREE_CODE (min
) != INTEGER_CST
2585 || TREE_CODE (max
) != INTEGER_CST
)
2588 if (tree_int_cst_lt (idx
, min
)
2589 || tree_int_cst_lt (max
, idx
))
2595 /* Returns true if it is possible to prove that the range of
2596 an array access REF (an ARRAY_RANGE_REF expression) falls
2597 into the array bounds. */
2600 range_in_array_bounds_p (tree ref
)
2602 tree domain_type
= TYPE_DOMAIN (TREE_TYPE (ref
));
2603 tree range_min
, range_max
, min
, max
;
2605 range_min
= TYPE_MIN_VALUE (domain_type
);
2606 range_max
= TYPE_MAX_VALUE (domain_type
);
2609 || TREE_CODE (range_min
) != INTEGER_CST
2610 || TREE_CODE (range_max
) != INTEGER_CST
)
2613 min
= array_ref_low_bound (ref
);
2614 max
= array_ref_up_bound (ref
);
2617 || TREE_CODE (min
) != INTEGER_CST
2618 || TREE_CODE (max
) != INTEGER_CST
)
2621 if (tree_int_cst_lt (range_min
, min
)
2622 || tree_int_cst_lt (max
, range_max
))
2628 /* Return true if EXPR can trap, as in dereferencing an invalid pointer
2629 location or floating point arithmetic. C.f. the rtl version, may_trap_p.
2630 This routine expects only GIMPLE lhs or rhs input. */
2633 tree_could_trap_p (tree expr
)
2635 enum tree_code code
;
2636 bool fp_operation
= false;
2637 bool honor_trapv
= false;
2638 tree t
, base
, div
= NULL_TREE
;
2643 /* In COND_EXPR and VEC_COND_EXPR only the condition may trap, but
2644 they won't appear as operands in GIMPLE form, so this is just for the
2645 GENERIC uses where it needs to recurse on the operands and so
2646 *COND_EXPR itself doesn't trap. */
2647 if (TREE_CODE (expr
) == COND_EXPR
|| TREE_CODE (expr
) == VEC_COND_EXPR
)
2650 code
= TREE_CODE (expr
);
2651 t
= TREE_TYPE (expr
);
2655 if (COMPARISON_CLASS_P (expr
))
2656 fp_operation
= FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (expr
, 0)));
2658 fp_operation
= FLOAT_TYPE_P (t
);
2659 honor_trapv
= INTEGRAL_TYPE_P (t
) && TYPE_OVERFLOW_TRAPS (t
);
2662 if (TREE_CODE_CLASS (code
) == tcc_binary
)
2663 div
= TREE_OPERAND (expr
, 1);
2664 if (operation_could_trap_p (code
, fp_operation
, honor_trapv
, div
))
2674 case VIEW_CONVERT_EXPR
:
2675 case WITH_SIZE_EXPR
:
2676 expr
= TREE_OPERAND (expr
, 0);
2677 code
= TREE_CODE (expr
);
2680 case ARRAY_RANGE_REF
:
2681 base
= TREE_OPERAND (expr
, 0);
2682 if (tree_could_trap_p (base
))
2684 if (TREE_THIS_NOTRAP (expr
))
2686 return !range_in_array_bounds_p (expr
);
2689 base
= TREE_OPERAND (expr
, 0);
2690 if (tree_could_trap_p (base
))
2692 if (TREE_THIS_NOTRAP (expr
))
2694 return !in_array_bounds_p (expr
);
2696 case TARGET_MEM_REF
:
2698 if (TREE_CODE (TREE_OPERAND (expr
, 0)) == ADDR_EXPR
2699 && tree_could_trap_p (TREE_OPERAND (TREE_OPERAND (expr
, 0), 0)))
2701 if (TREE_THIS_NOTRAP (expr
))
2703 /* We cannot prove that the access is in-bounds when we have
2704 variable-index TARGET_MEM_REFs. */
2705 if (code
== TARGET_MEM_REF
2706 && (TMR_INDEX (expr
) || TMR_INDEX2 (expr
)))
2708 if (TREE_CODE (TREE_OPERAND (expr
, 0)) == ADDR_EXPR
)
2710 tree base
= TREE_OPERAND (TREE_OPERAND (expr
, 0), 0);
2711 poly_offset_int off
= mem_ref_offset (expr
);
2712 if (maybe_lt (off
, 0))
2714 if (TREE_CODE (base
) == STRING_CST
)
2715 return maybe_le (TREE_STRING_LENGTH (base
), off
);
2716 tree size
= DECL_SIZE_UNIT (base
);
2717 if (size
== NULL_TREE
2718 || !poly_int_tree_p (size
)
2719 || maybe_le (wi::to_poly_offset (size
), off
))
2721 /* Now we are sure the first byte of the access is inside
2728 return !TREE_THIS_NOTRAP (expr
);
2731 return TREE_THIS_VOLATILE (expr
);
2734 t
= get_callee_fndecl (expr
);
2735 /* Assume that calls to weak functions may trap. */
2736 if (!t
|| !DECL_P (t
))
2739 return tree_could_trap_p (t
);
2743 /* Assume that accesses to weak functions may trap, unless we know
2744 they are certainly defined in current TU or in some other
2746 if (DECL_WEAK (expr
) && !DECL_COMDAT (expr
) && DECL_EXTERNAL (expr
))
2748 cgraph_node
*node
= cgraph_node::get (expr
);
2750 node
= node
->function_symbol ();
2751 return !(node
&& node
->in_other_partition
);
2756 /* Assume that accesses to weak vars may trap, unless we know
2757 they are certainly defined in current TU or in some other
2759 if (DECL_WEAK (expr
) && !DECL_COMDAT (expr
) && DECL_EXTERNAL (expr
))
2761 varpool_node
*node
= varpool_node::get (expr
);
2763 node
= node
->ultimate_alias_target ();
2764 return !(node
&& node
->in_other_partition
);
2773 /* Return non-NULL if there is an integer operation with trapping overflow
2774 we can rewrite into non-trapping. Called via walk_tree from
2775 rewrite_to_non_trapping_overflow. */
2778 find_trapping_overflow (tree
*tp
, int *walk_subtrees
, void *data
)
2781 && ANY_INTEGRAL_TYPE_P (TREE_TYPE (*tp
))
2782 && !operation_no_trapping_overflow (TREE_TYPE (*tp
), TREE_CODE (*tp
)))
2784 if (IS_TYPE_OR_DECL_P (*tp
)
2785 || (TREE_CODE (*tp
) == SAVE_EXPR
&& data
== NULL
))
2790 /* Rewrite selected operations into unsigned arithmetics, so that they
2791 don't trap on overflow. */
2794 replace_trapping_overflow (tree
*tp
, int *walk_subtrees
, void *data
)
2796 if (find_trapping_overflow (tp
, walk_subtrees
, data
))
2798 tree type
= TREE_TYPE (*tp
);
2799 tree utype
= unsigned_type_for (type
);
2801 int len
= TREE_OPERAND_LENGTH (*tp
);
2802 for (int i
= 0; i
< len
; ++i
)
2803 walk_tree (&TREE_OPERAND (*tp
, i
), replace_trapping_overflow
,
2804 data
, (hash_set
<tree
> *) data
);
2806 if (TREE_CODE (*tp
) == ABS_EXPR
)
2808 TREE_SET_CODE (*tp
, ABSU_EXPR
);
2809 TREE_TYPE (*tp
) = utype
;
2810 *tp
= fold_convert (type
, *tp
);
2814 TREE_TYPE (*tp
) = utype
;
2815 len
= TREE_OPERAND_LENGTH (*tp
);
2816 for (int i
= 0; i
< len
; ++i
)
2817 TREE_OPERAND (*tp
, i
)
2818 = fold_convert (utype
, TREE_OPERAND (*tp
, i
));
2819 *tp
= fold_convert (type
, *tp
);
2825 /* If any subexpression of EXPR can trap due to -ftrapv, rewrite it
2826 using unsigned arithmetics to avoid traps in it. */
2829 rewrite_to_non_trapping_overflow (tree expr
)
2833 hash_set
<tree
> pset
;
2834 if (!walk_tree (&expr
, find_trapping_overflow
, &pset
, &pset
))
2836 expr
= unshare_expr (expr
);
2838 walk_tree (&expr
, replace_trapping_overflow
, &pset
, &pset
);
2842 /* Helper for stmt_could_throw_p. Return true if STMT (assumed to be a
2843 an assignment or a conditional) may throw. */
2846 stmt_could_throw_1_p (gassign
*stmt
)
2848 enum tree_code code
= gimple_assign_rhs_code (stmt
);
2849 bool honor_nans
= false;
2850 bool honor_snans
= false;
2851 bool fp_operation
= false;
2852 bool honor_trapv
= false;
2857 if (TREE_CODE_CLASS (code
) == tcc_comparison
2858 || TREE_CODE_CLASS (code
) == tcc_unary
2859 || TREE_CODE_CLASS (code
) == tcc_binary
)
2861 if (TREE_CODE_CLASS (code
) == tcc_comparison
)
2862 t
= TREE_TYPE (gimple_assign_rhs1 (stmt
));
2864 t
= gimple_expr_type (stmt
);
2865 fp_operation
= FLOAT_TYPE_P (t
);
2868 honor_nans
= flag_trapping_math
&& !flag_finite_math_only
;
2869 honor_snans
= flag_signaling_nans
!= 0;
2871 else if (INTEGRAL_TYPE_P (t
) && TYPE_OVERFLOW_TRAPS (t
))
2875 /* First check the LHS. */
2876 if (tree_could_trap_p (gimple_assign_lhs (stmt
)))
2879 /* Check if the main expression may trap. */
2880 ret
= operation_could_trap_helper_p (code
, fp_operation
, honor_trapv
,
2881 honor_nans
, honor_snans
,
2882 gimple_assign_rhs2 (stmt
),
2887 /* If the expression does not trap, see if any of the individual operands may
2889 for (i
= 1; i
< gimple_num_ops (stmt
); i
++)
2890 if (tree_could_trap_p (gimple_op (stmt
, i
)))
2897 /* Return true if statement STMT within FUN could throw an exception. */
2900 stmt_could_throw_p (function
*fun
, gimple
*stmt
)
2902 if (!flag_exceptions
)
2905 /* The only statements that can throw an exception are assignments,
2906 conditionals, calls, resx, and asms. */
2907 switch (gimple_code (stmt
))
2913 return !gimple_call_nothrow_p (as_a
<gcall
*> (stmt
));
2917 if (fun
&& !fun
->can_throw_non_call_exceptions
)
2919 gcond
*cond
= as_a
<gcond
*> (stmt
);
2920 tree lhs
= gimple_cond_lhs (cond
);
2921 return operation_could_trap_p (gimple_cond_code (cond
),
2922 FLOAT_TYPE_P (TREE_TYPE (lhs
)),
2927 if ((fun
&& !fun
->can_throw_non_call_exceptions
)
2928 || gimple_clobber_p (stmt
))
2930 return stmt_could_throw_1_p (as_a
<gassign
*> (stmt
));
2933 if (fun
&& !fun
->can_throw_non_call_exceptions
)
2935 return gimple_asm_volatile_p (as_a
<gasm
*> (stmt
));
2942 /* Return true if STMT in function FUN must be assumed necessary because of
2943 non-call exceptions. */
2946 stmt_unremovable_because_of_non_call_eh_p (function
*fun
, gimple
*stmt
)
2948 return (fun
->can_throw_non_call_exceptions
2949 && !fun
->can_delete_dead_exceptions
2950 && stmt_could_throw_p (fun
, stmt
));
2953 /* Return true if expression T could throw an exception. */
2956 tree_could_throw_p (tree t
)
2958 if (!flag_exceptions
)
2960 if (TREE_CODE (t
) == MODIFY_EXPR
)
2962 if (cfun
->can_throw_non_call_exceptions
2963 && tree_could_trap_p (TREE_OPERAND (t
, 0)))
2965 t
= TREE_OPERAND (t
, 1);
2968 if (TREE_CODE (t
) == WITH_SIZE_EXPR
)
2969 t
= TREE_OPERAND (t
, 0);
2970 if (TREE_CODE (t
) == CALL_EXPR
)
2971 return (call_expr_flags (t
) & ECF_NOTHROW
) == 0;
2972 if (cfun
->can_throw_non_call_exceptions
)
2973 return tree_could_trap_p (t
);
2977 /* Return true if STMT can throw an exception that is not caught within its
2978 function FUN. FUN can be NULL but the function is extra conservative
2982 stmt_can_throw_external (function
*fun
, gimple
*stmt
)
2986 if (!stmt_could_throw_p (fun
, stmt
))
2991 lp_nr
= lookup_stmt_eh_lp_fn (fun
, stmt
);
2995 /* Return true if STMT can throw an exception that is caught within its
2999 stmt_can_throw_internal (function
*fun
, gimple
*stmt
)
3003 gcc_checking_assert (fun
);
3004 if (!stmt_could_throw_p (fun
, stmt
))
3007 lp_nr
= lookup_stmt_eh_lp_fn (fun
, stmt
);
3011 /* Given a statement STMT in IFUN, if STMT can no longer throw, then
3012 remove any entry it might have from the EH table. Return true if
3013 any change was made. */
3016 maybe_clean_eh_stmt_fn (struct function
*ifun
, gimple
*stmt
)
3018 if (stmt_could_throw_p (ifun
, stmt
))
3020 return remove_stmt_from_eh_lp_fn (ifun
, stmt
);
3023 /* Likewise, but always use the current function. */
3026 maybe_clean_eh_stmt (gimple
*stmt
)
3028 return maybe_clean_eh_stmt_fn (cfun
, stmt
);
3031 /* Given a statement OLD_STMT and a new statement NEW_STMT that has replaced
3032 OLD_STMT in the function, remove OLD_STMT from the EH table and put NEW_STMT
3033 in the table if it should be in there. Return TRUE if a replacement was
3034 done that my require an EH edge purge. */
3037 maybe_clean_or_replace_eh_stmt (gimple
*old_stmt
, gimple
*new_stmt
)
3039 int lp_nr
= lookup_stmt_eh_lp (old_stmt
);
3043 bool new_stmt_could_throw
= stmt_could_throw_p (cfun
, new_stmt
);
3045 if (new_stmt
== old_stmt
&& new_stmt_could_throw
)
3048 remove_stmt_from_eh_lp (old_stmt
);
3049 if (new_stmt_could_throw
)
3051 add_stmt_to_eh_lp (new_stmt
, lp_nr
);
3061 /* Given a statement OLD_STMT in OLD_FUN and a duplicate statement NEW_STMT
3062 in NEW_FUN, copy the EH table data from OLD_STMT to NEW_STMT. The MAP
3063 operand is the return value of duplicate_eh_regions. */
3066 maybe_duplicate_eh_stmt_fn (struct function
*new_fun
, gimple
*new_stmt
,
3067 struct function
*old_fun
, gimple
*old_stmt
,
3068 hash_map
<void *, void *> *map
,
3071 int old_lp_nr
, new_lp_nr
;
3073 if (!stmt_could_throw_p (new_fun
, new_stmt
))
3076 old_lp_nr
= lookup_stmt_eh_lp_fn (old_fun
, old_stmt
);
3079 if (default_lp_nr
== 0)
3081 new_lp_nr
= default_lp_nr
;
3083 else if (old_lp_nr
> 0)
3085 eh_landing_pad old_lp
, new_lp
;
3087 old_lp
= (*old_fun
->eh
->lp_array
)[old_lp_nr
];
3088 new_lp
= static_cast<eh_landing_pad
> (*map
->get (old_lp
));
3089 new_lp_nr
= new_lp
->index
;
3093 eh_region old_r
, new_r
;
3095 old_r
= (*old_fun
->eh
->region_array
)[-old_lp_nr
];
3096 new_r
= static_cast<eh_region
> (*map
->get (old_r
));
3097 new_lp_nr
= -new_r
->index
;
3100 add_stmt_to_eh_lp_fn (new_fun
, new_stmt
, new_lp_nr
);
3104 /* Similar, but both OLD_STMT and NEW_STMT are within the current function,
3105 and thus no remapping is required. */
3108 maybe_duplicate_eh_stmt (gimple
*new_stmt
, gimple
*old_stmt
)
3112 if (!stmt_could_throw_p (cfun
, new_stmt
))
3115 lp_nr
= lookup_stmt_eh_lp (old_stmt
);
3119 add_stmt_to_eh_lp (new_stmt
, lp_nr
);
3123 /* Returns TRUE if oneh and twoh are exception handlers (gimple_try_cleanup of
3124 GIMPLE_TRY) that are similar enough to be considered the same. Currently
3125 this only handles handlers consisting of a single call, as that's the
3126 important case for C++: a destructor call for a particular object showing
3127 up in multiple handlers. */
3130 same_handler_p (gimple_seq oneh
, gimple_seq twoh
)
3132 gimple_stmt_iterator gsi
;
3133 gimple
*ones
, *twos
;
3136 gsi
= gsi_start (oneh
);
3137 if (!gsi_one_before_end_p (gsi
))
3139 ones
= gsi_stmt (gsi
);
3141 gsi
= gsi_start (twoh
);
3142 if (!gsi_one_before_end_p (gsi
))
3144 twos
= gsi_stmt (gsi
);
3146 if (!is_gimple_call (ones
)
3147 || !is_gimple_call (twos
)
3148 || gimple_call_lhs (ones
)
3149 || gimple_call_lhs (twos
)
3150 || gimple_call_chain (ones
)
3151 || gimple_call_chain (twos
)
3152 || !gimple_call_same_target_p (ones
, twos
)
3153 || gimple_call_num_args (ones
) != gimple_call_num_args (twos
))
3156 for (ai
= 0; ai
< gimple_call_num_args (ones
); ++ai
)
3157 if (!operand_equal_p (gimple_call_arg (ones
, ai
),
3158 gimple_call_arg (twos
, ai
), 0))
3165 try { A() } finally { try { ~B() } catch { ~A() } }
3166 try { ... } finally { ~A() }
3168 try { A() } catch { ~B() }
3169 try { ~B() ... } finally { ~A() }
3171 This occurs frequently in C++, where A is a local variable and B is a
3172 temporary used in the initializer for A. */
3175 optimize_double_finally (gtry
*one
, gtry
*two
)
3178 gimple_stmt_iterator gsi
;
3181 cleanup
= gimple_try_cleanup (one
);
3182 gsi
= gsi_start (cleanup
);
3183 if (!gsi_one_before_end_p (gsi
))
3186 oneh
= gsi_stmt (gsi
);
3187 if (gimple_code (oneh
) != GIMPLE_TRY
3188 || gimple_try_kind (oneh
) != GIMPLE_TRY_CATCH
)
3191 if (same_handler_p (gimple_try_cleanup (oneh
), gimple_try_cleanup (two
)))
3193 gimple_seq seq
= gimple_try_eval (oneh
);
3195 gimple_try_set_cleanup (one
, seq
);
3196 gimple_try_set_kind (one
, GIMPLE_TRY_CATCH
);
3197 seq
= copy_gimple_seq_and_replace_locals (seq
);
3198 gimple_seq_add_seq (&seq
, gimple_try_eval (two
));
3199 gimple_try_set_eval (two
, seq
);
3203 /* Perform EH refactoring optimizations that are simpler to do when code
3204 flow has been lowered but EH structures haven't. */
3207 refactor_eh_r (gimple_seq seq
)
3209 gimple_stmt_iterator gsi
;
3214 gsi
= gsi_start (seq
);
3218 if (gsi_end_p (gsi
))
3221 two
= gsi_stmt (gsi
);
3223 if (gtry
*try_one
= dyn_cast
<gtry
*> (one
))
3224 if (gtry
*try_two
= dyn_cast
<gtry
*> (two
))
3225 if (gimple_try_kind (try_one
) == GIMPLE_TRY_FINALLY
3226 && gimple_try_kind (try_two
) == GIMPLE_TRY_FINALLY
)
3227 optimize_double_finally (try_one
, try_two
);
3229 switch (gimple_code (one
))
3232 refactor_eh_r (gimple_try_eval (one
));
3233 refactor_eh_r (gimple_try_cleanup (one
));
3236 refactor_eh_r (gimple_catch_handler (as_a
<gcatch
*> (one
)));
3238 case GIMPLE_EH_FILTER
:
3239 refactor_eh_r (gimple_eh_filter_failure (one
));
3241 case GIMPLE_EH_ELSE
:
3243 geh_else
*eh_else_stmt
= as_a
<geh_else
*> (one
);
3244 refactor_eh_r (gimple_eh_else_n_body (eh_else_stmt
));
3245 refactor_eh_r (gimple_eh_else_e_body (eh_else_stmt
));
3260 const pass_data pass_data_refactor_eh
=
3262 GIMPLE_PASS
, /* type */
3264 OPTGROUP_NONE
, /* optinfo_flags */
3265 TV_TREE_EH
, /* tv_id */
3266 PROP_gimple_lcf
, /* properties_required */
3267 0, /* properties_provided */
3268 0, /* properties_destroyed */
3269 0, /* todo_flags_start */
3270 0, /* todo_flags_finish */
3273 class pass_refactor_eh
: public gimple_opt_pass
3276 pass_refactor_eh (gcc::context
*ctxt
)
3277 : gimple_opt_pass (pass_data_refactor_eh
, ctxt
)
3280 /* opt_pass methods: */
3281 virtual bool gate (function
*) { return flag_exceptions
!= 0; }
3282 virtual unsigned int execute (function
*)
3284 refactor_eh_r (gimple_body (current_function_decl
));
3288 }; // class pass_refactor_eh
3293 make_pass_refactor_eh (gcc::context
*ctxt
)
3295 return new pass_refactor_eh (ctxt
);
3298 /* At the end of gimple optimization, we can lower RESX. */
3301 lower_resx (basic_block bb
, gresx
*stmt
,
3302 hash_map
<eh_region
, tree
> *mnt_map
)
3305 eh_region src_r
, dst_r
;
3306 gimple_stmt_iterator gsi
;
3311 lp_nr
= lookup_stmt_eh_lp (stmt
);
3313 dst_r
= get_eh_region_from_lp_number (lp_nr
);
3317 src_r
= get_eh_region_from_number (gimple_resx_region (stmt
));
3318 gsi
= gsi_last_bb (bb
);
3322 /* We can wind up with no source region when pass_cleanup_eh shows
3323 that there are no entries into an eh region and deletes it, but
3324 then the block that contains the resx isn't removed. This can
3325 happen without optimization when the switch statement created by
3326 lower_try_finally_switch isn't simplified to remove the eh case.
3328 Resolve this by expanding the resx node to an abort. */
3330 fn
= builtin_decl_implicit (BUILT_IN_TRAP
);
3331 x
= gimple_build_call (fn
, 0);
3332 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3334 while (EDGE_COUNT (bb
->succs
) > 0)
3335 remove_edge (EDGE_SUCC (bb
, 0));
3339 /* When we have a destination region, we resolve this by copying
3340 the excptr and filter values into place, and changing the edge
3341 to immediately after the landing pad. */
3349 /* We are resuming into a MUST_NOT_CALL region. Expand a call to
3350 the failure decl into a new block, if needed. */
3351 gcc_assert (dst_r
->type
== ERT_MUST_NOT_THROW
);
3353 tree
*slot
= mnt_map
->get (dst_r
);
3356 gimple_stmt_iterator gsi2
;
3358 new_bb
= create_empty_bb (bb
);
3359 new_bb
->count
= bb
->count
;
3360 add_bb_to_loop (new_bb
, bb
->loop_father
);
3361 lab
= gimple_block_label (new_bb
);
3362 gsi2
= gsi_start_bb (new_bb
);
3364 fn
= dst_r
->u
.must_not_throw
.failure_decl
;
3365 x
= gimple_build_call (fn
, 0);
3366 gimple_set_location (x
, dst_r
->u
.must_not_throw
.failure_loc
);
3367 gsi_insert_after (&gsi2
, x
, GSI_CONTINUE_LINKING
);
3369 mnt_map
->put (dst_r
, lab
);
3374 new_bb
= label_to_block (cfun
, lab
);
3377 gcc_assert (EDGE_COUNT (bb
->succs
) == 0);
3378 e
= make_single_succ_edge (bb
, new_bb
, EDGE_FALLTHRU
);
3383 tree dst_nr
= build_int_cst (integer_type_node
, dst_r
->index
);
3385 fn
= builtin_decl_implicit (BUILT_IN_EH_COPY_VALUES
);
3386 src_nr
= build_int_cst (integer_type_node
, src_r
->index
);
3387 x
= gimple_build_call (fn
, 2, dst_nr
, src_nr
);
3388 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3390 /* Update the flags for the outgoing edge. */
3391 e
= single_succ_edge (bb
);
3392 gcc_assert (e
->flags
& EDGE_EH
);
3393 e
->flags
= (e
->flags
& ~EDGE_EH
) | EDGE_FALLTHRU
;
3394 e
->probability
= profile_probability::always ();
3396 /* If there are no more EH users of the landing pad, delete it. */
3397 FOR_EACH_EDGE (e
, ei
, e
->dest
->preds
)
3398 if (e
->flags
& EDGE_EH
)
3402 eh_landing_pad lp
= get_eh_landing_pad_from_number (lp_nr
);
3403 remove_eh_landing_pad (lp
);
3413 /* When we don't have a destination region, this exception escapes
3414 up the call chain. We resolve this by generating a call to the
3415 _Unwind_Resume library function. */
3417 /* The ARM EABI redefines _Unwind_Resume as __cxa_end_cleanup
3418 with no arguments for C++. Check for that. */
3419 if (src_r
->use_cxa_end_cleanup
)
3421 fn
= builtin_decl_implicit (BUILT_IN_CXA_END_CLEANUP
);
3422 x
= gimple_build_call (fn
, 0);
3423 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3427 fn
= builtin_decl_implicit (BUILT_IN_EH_POINTER
);
3428 src_nr
= build_int_cst (integer_type_node
, src_r
->index
);
3429 x
= gimple_build_call (fn
, 1, src_nr
);
3430 var
= create_tmp_var (ptr_type_node
);
3431 var
= make_ssa_name (var
, x
);
3432 gimple_call_set_lhs (x
, var
);
3433 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3435 /* When exception handling is delegated to a caller function, we
3436 have to guarantee that shadow memory variables living on stack
3437 will be cleaner before control is given to a parent function. */
3438 if (sanitize_flags_p (SANITIZE_ADDRESS
))
3441 = builtin_decl_implicit (BUILT_IN_ASAN_HANDLE_NO_RETURN
);
3442 gimple
*g
= gimple_build_call (decl
, 0);
3443 gimple_set_location (g
, gimple_location (stmt
));
3444 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
3447 fn
= builtin_decl_implicit (BUILT_IN_UNWIND_RESUME
);
3448 x
= gimple_build_call (fn
, 1, var
);
3449 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3452 gcc_assert (EDGE_COUNT (bb
->succs
) == 0);
3455 gsi_remove (&gsi
, true);
3462 const pass_data pass_data_lower_resx
=
3464 GIMPLE_PASS
, /* type */
3466 OPTGROUP_NONE
, /* optinfo_flags */
3467 TV_TREE_EH
, /* tv_id */
3468 PROP_gimple_lcf
, /* properties_required */
3469 0, /* properties_provided */
3470 0, /* properties_destroyed */
3471 0, /* todo_flags_start */
3472 0, /* todo_flags_finish */
3475 class pass_lower_resx
: public gimple_opt_pass
3478 pass_lower_resx (gcc::context
*ctxt
)
3479 : gimple_opt_pass (pass_data_lower_resx
, ctxt
)
3482 /* opt_pass methods: */
3483 virtual bool gate (function
*) { return flag_exceptions
!= 0; }
3484 virtual unsigned int execute (function
*);
3486 }; // class pass_lower_resx
3489 pass_lower_resx::execute (function
*fun
)
3492 bool dominance_invalidated
= false;
3493 bool any_rewritten
= false;
3495 hash_map
<eh_region
, tree
> mnt_map
;
3497 FOR_EACH_BB_FN (bb
, fun
)
3499 gimple
*last
= last_stmt (bb
);
3500 if (last
&& is_gimple_resx (last
))
3502 dominance_invalidated
|=
3503 lower_resx (bb
, as_a
<gresx
*> (last
), &mnt_map
);
3504 any_rewritten
= true;
3508 if (dominance_invalidated
)
3510 free_dominance_info (CDI_DOMINATORS
);
3511 free_dominance_info (CDI_POST_DOMINATORS
);
3514 return any_rewritten
? TODO_update_ssa_only_virtuals
: 0;
3520 make_pass_lower_resx (gcc::context
*ctxt
)
3522 return new pass_lower_resx (ctxt
);
3525 /* Try to optimize var = {v} {CLOBBER} stmts followed just by
3529 optimize_clobbers (basic_block bb
)
3531 gimple_stmt_iterator gsi
= gsi_last_bb (bb
);
3532 bool any_clobbers
= false;
3533 bool seen_stack_restore
= false;
3537 /* Only optimize anything if the bb contains at least one clobber,
3538 ends with resx (checked by caller), optionally contains some
3539 debug stmts or labels, or at most one __builtin_stack_restore
3540 call, and has an incoming EH edge. */
3541 for (gsi_prev (&gsi
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
3543 gimple
*stmt
= gsi_stmt (gsi
);
3544 if (is_gimple_debug (stmt
))
3546 if (gimple_clobber_p (stmt
))
3548 any_clobbers
= true;
3551 if (!seen_stack_restore
3552 && gimple_call_builtin_p (stmt
, BUILT_IN_STACK_RESTORE
))
3554 seen_stack_restore
= true;
3557 if (gimple_code (stmt
) == GIMPLE_LABEL
)
3563 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
3564 if (e
->flags
& EDGE_EH
)
3568 gsi
= gsi_last_bb (bb
);
3569 for (gsi_prev (&gsi
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
3571 gimple
*stmt
= gsi_stmt (gsi
);
3572 if (!gimple_clobber_p (stmt
))
3574 unlink_stmt_vdef (stmt
);
3575 gsi_remove (&gsi
, true);
3576 release_defs (stmt
);
3580 /* Try to sink var = {v} {CLOBBER} stmts followed just by
3581 internal throw to successor BB.
3582 SUNK, if not NULL, is an array of sequences indexed by basic-block
3583 index to sink to and to pick up sinking opportunities from.
3584 If FOUND_OPPORTUNITY is not NULL then do not perform the optimization
3585 but set *FOUND_OPPORTUNITY to true. */
3588 sink_clobbers (basic_block bb
,
3589 gimple_seq
*sunk
= NULL
, bool *found_opportunity
= NULL
)
3593 gimple_stmt_iterator gsi
, dgsi
;
3595 bool any_clobbers
= false;
3598 /* Only optimize if BB has a single EH successor and
3599 all predecessor edges are EH too. */
3600 if (!single_succ_p (bb
)
3601 || (single_succ_edge (bb
)->flags
& EDGE_EH
) == 0)
3604 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
3606 if ((e
->flags
& EDGE_EH
) == 0)
3610 /* And BB contains only CLOBBER stmts before the final
3612 gsi
= gsi_last_bb (bb
);
3613 for (gsi_prev (&gsi
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
3615 gimple
*stmt
= gsi_stmt (gsi
);
3616 if (is_gimple_debug (stmt
))
3618 if (gimple_code (stmt
) == GIMPLE_LABEL
)
3620 if (!gimple_clobber_p (stmt
))
3622 any_clobbers
= true;
3624 if (!any_clobbers
&& (!sunk
|| gimple_seq_empty_p (sunk
[bb
->index
])))
3627 /* If this was a dry run, tell it we found clobbers to sink. */
3628 if (found_opportunity
)
3630 *found_opportunity
= true;
3634 edge succe
= single_succ_edge (bb
);
3635 succbb
= succe
->dest
;
3637 /* See if there is a virtual PHI node to take an updated virtual
3640 for (gphi_iterator gpi
= gsi_start_phis (succbb
);
3641 !gsi_end_p (gpi
); gsi_next (&gpi
))
3643 tree res
= gimple_phi_result (gpi
.phi ());
3644 if (virtual_operand_p (res
))
3651 gimple
*first_sunk
= NULL
;
3652 gimple
*last_sunk
= NULL
;
3653 if (sunk
&& !(succbb
->flags
& BB_VISITED
))
3654 dgsi
= gsi_start (sunk
[succbb
->index
]);
3656 dgsi
= gsi_after_labels (succbb
);
3657 gsi
= gsi_last_bb (bb
);
3658 for (gsi_prev (&gsi
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
3660 gimple
*stmt
= gsi_stmt (gsi
);
3662 if (is_gimple_debug (stmt
))
3664 if (gimple_code (stmt
) == GIMPLE_LABEL
)
3666 lhs
= gimple_assign_lhs (stmt
);
3667 /* Unfortunately we don't have dominance info updated at this
3668 point, so checking if
3669 dominated_by_p (CDI_DOMINATORS, succbb,
3670 gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (lhs, 0)))
3671 would be too costly. Thus, avoid sinking any clobbers that
3672 refer to non-(D) SSA_NAMEs. */
3673 if (TREE_CODE (lhs
) == MEM_REF
3674 && TREE_CODE (TREE_OPERAND (lhs
, 0)) == SSA_NAME
3675 && !SSA_NAME_IS_DEFAULT_DEF (TREE_OPERAND (lhs
, 0)))
3677 unlink_stmt_vdef (stmt
);
3678 gsi_remove (&gsi
, true);
3679 release_defs (stmt
);
3683 /* As we do not change stmt order when sinking across a
3684 forwarder edge we can keep virtual operands in place. */
3685 gsi_remove (&gsi
, false);
3686 gsi_insert_before (&dgsi
, stmt
, GSI_NEW_STMT
);
3691 if (sunk
&& !gimple_seq_empty_p (sunk
[bb
->index
]))
3694 first_sunk
= gsi_stmt (gsi_last (sunk
[bb
->index
]));
3695 last_sunk
= gsi_stmt (gsi_start (sunk
[bb
->index
]));
3696 gsi_insert_seq_before_without_update (&dgsi
,
3697 sunk
[bb
->index
], GSI_NEW_STMT
);
3698 sunk
[bb
->index
] = NULL
;
3702 /* Adjust virtual operands if we sunk across a virtual PHI. */
3705 imm_use_iterator iter
;
3706 use_operand_p use_p
;
3708 tree phi_def
= gimple_phi_result (vphi
);
3709 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, phi_def
)
3710 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
3711 SET_USE (use_p
, gimple_vdef (first_sunk
));
3712 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (phi_def
))
3714 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_vdef (first_sunk
)) = 1;
3715 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (phi_def
) = 0;
3717 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (vphi
, succe
),
3718 gimple_vuse (last_sunk
));
3719 SET_USE (gimple_vuse_op (last_sunk
), phi_def
);
3721 /* If there isn't a single predecessor but no virtual PHI node
3722 arrange for virtual operands to be renamed. */
3723 else if (!single_pred_p (succbb
)
3724 && TREE_CODE (gimple_vuse (last_sunk
)) == SSA_NAME
)
3726 mark_virtual_operand_for_renaming (gimple_vuse (last_sunk
));
3727 todo
|= TODO_update_ssa_only_virtuals
;
3734 /* At the end of inlining, we can lower EH_DISPATCH. Return true when
3735 we have found some duplicate labels and removed some edges. */
3738 lower_eh_dispatch (basic_block src
, geh_dispatch
*stmt
)
3740 gimple_stmt_iterator gsi
;
3745 bool redirected
= false;
3747 region_nr
= gimple_eh_dispatch_region (stmt
);
3748 r
= get_eh_region_from_number (region_nr
);
3750 gsi
= gsi_last_bb (src
);
3756 auto_vec
<tree
> labels
;
3757 tree default_label
= NULL
;
3761 hash_set
<tree
> seen_values
;
3763 /* Collect the labels for a switch. Zero the post_landing_pad
3764 field becase we'll no longer have anything keeping these labels
3765 in existence and the optimizer will be free to merge these
3767 for (c
= r
->u
.eh_try
.first_catch
; c
; c
= c
->next_catch
)
3769 tree tp_node
, flt_node
, lab
= c
->label
;
3770 bool have_label
= false;
3773 tp_node
= c
->type_list
;
3774 flt_node
= c
->filter_list
;
3776 if (tp_node
== NULL
)
3778 default_label
= lab
;
3783 /* Filter out duplicate labels that arise when this handler
3784 is shadowed by an earlier one. When no labels are
3785 attached to the handler anymore, we remove
3786 the corresponding edge and then we delete unreachable
3787 blocks at the end of this pass. */
3788 if (! seen_values
.contains (TREE_VALUE (flt_node
)))
3790 tree t
= build_case_label (TREE_VALUE (flt_node
),
3792 labels
.safe_push (t
);
3793 seen_values
.add (TREE_VALUE (flt_node
));
3797 tp_node
= TREE_CHAIN (tp_node
);
3798 flt_node
= TREE_CHAIN (flt_node
);
3803 remove_edge (find_edge (src
, label_to_block (cfun
, lab
)));
3808 /* Clean up the edge flags. */
3809 FOR_EACH_EDGE (e
, ei
, src
->succs
)
3811 if (e
->flags
& EDGE_FALLTHRU
)
3813 /* If there was no catch-all, use the fallthru edge. */
3814 if (default_label
== NULL
)
3815 default_label
= gimple_block_label (e
->dest
);
3816 e
->flags
&= ~EDGE_FALLTHRU
;
3819 gcc_assert (default_label
!= NULL
);
3821 /* Don't generate a switch if there's only a default case.
3822 This is common in the form of try { A; } catch (...) { B; }. */
3823 if (!labels
.exists ())
3825 e
= single_succ_edge (src
);
3826 e
->flags
|= EDGE_FALLTHRU
;
3830 fn
= builtin_decl_implicit (BUILT_IN_EH_FILTER
);
3831 x
= gimple_build_call (fn
, 1, build_int_cst (integer_type_node
,
3833 filter
= create_tmp_var (TREE_TYPE (TREE_TYPE (fn
)));
3834 filter
= make_ssa_name (filter
, x
);
3835 gimple_call_set_lhs (x
, filter
);
3836 gimple_set_location (x
, gimple_location (stmt
));
3837 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3839 /* Turn the default label into a default case. */
3840 default_label
= build_case_label (NULL
, NULL
, default_label
);
3841 sort_case_labels (labels
);
3843 x
= gimple_build_switch (filter
, default_label
, labels
);
3844 gimple_set_location (x
, gimple_location (stmt
));
3845 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3850 case ERT_ALLOWED_EXCEPTIONS
:
3852 edge b_e
= BRANCH_EDGE (src
);
3853 edge f_e
= FALLTHRU_EDGE (src
);
3855 fn
= builtin_decl_implicit (BUILT_IN_EH_FILTER
);
3856 x
= gimple_build_call (fn
, 1, build_int_cst (integer_type_node
,
3858 filter
= create_tmp_var (TREE_TYPE (TREE_TYPE (fn
)));
3859 filter
= make_ssa_name (filter
, x
);
3860 gimple_call_set_lhs (x
, filter
);
3861 gimple_set_location (x
, gimple_location (stmt
));
3862 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3864 r
->u
.allowed
.label
= NULL
;
3865 x
= gimple_build_cond (EQ_EXPR
, filter
,
3866 build_int_cst (TREE_TYPE (filter
),
3867 r
->u
.allowed
.filter
),
3868 NULL_TREE
, NULL_TREE
);
3869 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3871 b_e
->flags
= b_e
->flags
| EDGE_TRUE_VALUE
;
3872 f_e
->flags
= (f_e
->flags
& ~EDGE_FALLTHRU
) | EDGE_FALSE_VALUE
;
3880 /* Replace the EH_DISPATCH with the SWITCH or COND generated above. */
3881 gsi_remove (&gsi
, true);
3887 const pass_data pass_data_lower_eh_dispatch
=
3889 GIMPLE_PASS
, /* type */
3890 "ehdisp", /* name */
3891 OPTGROUP_NONE
, /* optinfo_flags */
3892 TV_TREE_EH
, /* tv_id */
3893 PROP_gimple_lcf
, /* properties_required */
3894 0, /* properties_provided */
3895 0, /* properties_destroyed */
3896 0, /* todo_flags_start */
3897 0, /* todo_flags_finish */
3900 class pass_lower_eh_dispatch
: public gimple_opt_pass
3903 pass_lower_eh_dispatch (gcc::context
*ctxt
)
3904 : gimple_opt_pass (pass_data_lower_eh_dispatch
, ctxt
)
3907 /* opt_pass methods: */
3908 virtual bool gate (function
*fun
) { return fun
->eh
->region_tree
!= NULL
; }
3909 virtual unsigned int execute (function
*);
3911 }; // class pass_lower_eh_dispatch
3914 pass_lower_eh_dispatch::execute (function
*fun
)
3918 bool redirected
= false;
3919 bool any_resx_to_process
= false;
3921 assign_filter_values ();
3923 FOR_EACH_BB_FN (bb
, fun
)
3925 gimple
*last
= last_stmt (bb
);
3928 if (gimple_code (last
) == GIMPLE_EH_DISPATCH
)
3930 redirected
|= lower_eh_dispatch (bb
,
3931 as_a
<geh_dispatch
*> (last
));
3932 flags
|= TODO_update_ssa_only_virtuals
;
3934 else if (gimple_code (last
) == GIMPLE_RESX
)
3936 if (stmt_can_throw_external (fun
, last
))
3937 optimize_clobbers (bb
);
3938 else if (!any_resx_to_process
)
3939 sink_clobbers (bb
, NULL
, &any_resx_to_process
);
3941 bb
->flags
&= ~BB_VISITED
;
3945 free_dominance_info (CDI_DOMINATORS
);
3946 delete_unreachable_blocks ();
3949 if (any_resx_to_process
)
3951 /* Make sure to catch all secondary sinking opportunities by processing
3952 blocks in RPO order and after all CFG modifications from lowering
3953 and unreachable block removal. */
3954 int *rpo
= XNEWVEC (int, n_basic_blocks_for_fn (fun
));
3955 int rpo_n
= pre_and_rev_post_order_compute_fn (fun
, NULL
, rpo
, false);
3956 gimple_seq
*sunk
= XCNEWVEC (gimple_seq
, last_basic_block_for_fn (fun
));
3957 for (int i
= 0; i
< rpo_n
; ++i
)
3959 bb
= BASIC_BLOCK_FOR_FN (fun
, rpo
[i
]);
3960 gimple
*last
= last_stmt (bb
);
3962 && gimple_code (last
) == GIMPLE_RESX
3963 && !stmt_can_throw_external (fun
, last
))
3964 flags
|= sink_clobbers (bb
, sunk
);
3965 /* If there were any clobbers sunk into this BB, insert them now. */
3966 if (!gimple_seq_empty_p (sunk
[bb
->index
]))
3968 gimple_stmt_iterator gsi
= gsi_after_labels (bb
);
3969 gsi_insert_seq_before (&gsi
, sunk
[bb
->index
], GSI_NEW_STMT
);
3970 sunk
[bb
->index
] = NULL
;
3972 bb
->flags
|= BB_VISITED
;
3984 make_pass_lower_eh_dispatch (gcc::context
*ctxt
)
3986 return new pass_lower_eh_dispatch (ctxt
);
3989 /* Walk statements, see what regions and, optionally, landing pads
3990 are really referenced.
3992 Returns in R_REACHABLEP an sbitmap with bits set for reachable regions,
3993 and in LP_REACHABLE an sbitmap with bits set for reachable landing pads.
3995 Passing NULL for LP_REACHABLE is valid, in this case only reachable
3998 The caller is responsible for freeing the returned sbitmaps. */
4001 mark_reachable_handlers (sbitmap
*r_reachablep
, sbitmap
*lp_reachablep
)
4003 sbitmap r_reachable
, lp_reachable
;
4005 bool mark_landing_pads
= (lp_reachablep
!= NULL
);
4006 gcc_checking_assert (r_reachablep
!= NULL
);
4008 r_reachable
= sbitmap_alloc (cfun
->eh
->region_array
->length ());
4009 bitmap_clear (r_reachable
);
4010 *r_reachablep
= r_reachable
;
4012 if (mark_landing_pads
)
4014 lp_reachable
= sbitmap_alloc (cfun
->eh
->lp_array
->length ());
4015 bitmap_clear (lp_reachable
);
4016 *lp_reachablep
= lp_reachable
;
4019 lp_reachable
= NULL
;
4021 FOR_EACH_BB_FN (bb
, cfun
)
4023 gimple_stmt_iterator gsi
;
4025 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4027 gimple
*stmt
= gsi_stmt (gsi
);
4029 if (mark_landing_pads
)
4031 int lp_nr
= lookup_stmt_eh_lp (stmt
);
4033 /* Negative LP numbers are MUST_NOT_THROW regions which
4034 are not considered BB enders. */
4036 bitmap_set_bit (r_reachable
, -lp_nr
);
4038 /* Positive LP numbers are real landing pads, and BB enders. */
4041 gcc_assert (gsi_one_before_end_p (gsi
));
4042 eh_region region
= get_eh_region_from_lp_number (lp_nr
);
4043 bitmap_set_bit (r_reachable
, region
->index
);
4044 bitmap_set_bit (lp_reachable
, lp_nr
);
4048 /* Avoid removing regions referenced from RESX/EH_DISPATCH. */
4049 switch (gimple_code (stmt
))
4052 bitmap_set_bit (r_reachable
,
4053 gimple_resx_region (as_a
<gresx
*> (stmt
)));
4055 case GIMPLE_EH_DISPATCH
:
4056 bitmap_set_bit (r_reachable
,
4057 gimple_eh_dispatch_region (
4058 as_a
<geh_dispatch
*> (stmt
)));
4061 if (gimple_call_builtin_p (stmt
, BUILT_IN_EH_COPY_VALUES
))
4062 for (int i
= 0; i
< 2; ++i
)
4064 tree rt
= gimple_call_arg (stmt
, i
);
4065 HOST_WIDE_INT ri
= tree_to_shwi (rt
);
4067 gcc_assert (ri
== (int)ri
);
4068 bitmap_set_bit (r_reachable
, ri
);
4078 /* Remove unreachable handlers and unreachable landing pads. */
4081 remove_unreachable_handlers (void)
4083 sbitmap r_reachable
, lp_reachable
;
4088 mark_reachable_handlers (&r_reachable
, &lp_reachable
);
4092 fprintf (dump_file
, "Before removal of unreachable regions:\n");
4093 dump_eh_tree (dump_file
, cfun
);
4094 fprintf (dump_file
, "Reachable regions: ");
4095 dump_bitmap_file (dump_file
, r_reachable
);
4096 fprintf (dump_file
, "Reachable landing pads: ");
4097 dump_bitmap_file (dump_file
, lp_reachable
);
4102 FOR_EACH_VEC_SAFE_ELT (cfun
->eh
->region_array
, i
, region
)
4103 if (region
&& !bitmap_bit_p (r_reachable
, region
->index
))
4105 "Removing unreachable region %d\n",
4109 remove_unreachable_eh_regions (r_reachable
);
4111 FOR_EACH_VEC_SAFE_ELT (cfun
->eh
->lp_array
, i
, lp
)
4112 if (lp
&& !bitmap_bit_p (lp_reachable
, lp
->index
))
4116 "Removing unreachable landing pad %d\n",
4118 remove_eh_landing_pad (lp
);
4123 fprintf (dump_file
, "\n\nAfter removal of unreachable regions:\n");
4124 dump_eh_tree (dump_file
, cfun
);
4125 fprintf (dump_file
, "\n\n");
4128 sbitmap_free (r_reachable
);
4129 sbitmap_free (lp_reachable
);
4132 verify_eh_tree (cfun
);
4135 /* Remove unreachable handlers if any landing pads have been removed after
4136 last ehcleanup pass (due to gimple_purge_dead_eh_edges). */
4139 maybe_remove_unreachable_handlers (void)
4144 if (cfun
->eh
== NULL
)
4147 FOR_EACH_VEC_SAFE_ELT (cfun
->eh
->lp_array
, i
, lp
)
4149 && (lp
->post_landing_pad
== NULL_TREE
4150 || label_to_block (cfun
, lp
->post_landing_pad
) == NULL
))
4152 remove_unreachable_handlers ();
4157 /* Remove regions that do not have landing pads. This assumes
4158 that remove_unreachable_handlers has already been run, and
4159 that we've just manipulated the landing pads since then.
4161 Preserve regions with landing pads and regions that prevent
4162 exceptions from propagating further, even if these regions
4163 are not reachable. */
4166 remove_unreachable_handlers_no_lp (void)
4169 sbitmap r_reachable
;
4172 mark_reachable_handlers (&r_reachable
, /*lp_reachablep=*/NULL
);
4174 FOR_EACH_VEC_SAFE_ELT (cfun
->eh
->region_array
, i
, region
)
4179 if (region
->landing_pads
!= NULL
4180 || region
->type
== ERT_MUST_NOT_THROW
)
4181 bitmap_set_bit (r_reachable
, region
->index
);
4184 && !bitmap_bit_p (r_reachable
, region
->index
))
4186 "Removing unreachable region %d\n",
4190 remove_unreachable_eh_regions (r_reachable
);
4192 sbitmap_free (r_reachable
);
4195 /* Undo critical edge splitting on an EH landing pad. Earlier, we
4196 optimisticaly split all sorts of edges, including EH edges. The
4197 optimization passes in between may not have needed them; if not,
4198 we should undo the split.
4200 Recognize this case by having one EH edge incoming to the BB and
4201 one normal edge outgoing; BB should be empty apart from the
4202 post_landing_pad label.
4204 Note that this is slightly different from the empty handler case
4205 handled by cleanup_empty_eh, in that the actual handler may yet
4206 have actual code but the landing pad has been separated from the
4207 handler. As such, cleanup_empty_eh relies on this transformation
4208 having been done first. */
4211 unsplit_eh (eh_landing_pad lp
)
4213 basic_block bb
= label_to_block (cfun
, lp
->post_landing_pad
);
4214 gimple_stmt_iterator gsi
;
4217 /* Quickly check the edge counts on BB for singularity. */
4218 if (!single_pred_p (bb
) || !single_succ_p (bb
))
4220 e_in
= single_pred_edge (bb
);
4221 e_out
= single_succ_edge (bb
);
4223 /* Input edge must be EH and output edge must be normal. */
4224 if ((e_in
->flags
& EDGE_EH
) == 0 || (e_out
->flags
& EDGE_EH
) != 0)
4227 /* The block must be empty except for the labels and debug insns. */
4228 gsi
= gsi_after_labels (bb
);
4229 if (!gsi_end_p (gsi
) && is_gimple_debug (gsi_stmt (gsi
)))
4230 gsi_next_nondebug (&gsi
);
4231 if (!gsi_end_p (gsi
))
4234 /* The destination block must not already have a landing pad
4235 for a different region. */
4236 for (gsi
= gsi_start_bb (e_out
->dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4238 glabel
*label_stmt
= dyn_cast
<glabel
*> (gsi_stmt (gsi
));
4244 lab
= gimple_label_label (label_stmt
);
4245 lp_nr
= EH_LANDING_PAD_NR (lab
);
4246 if (lp_nr
&& get_eh_region_from_lp_number (lp_nr
) != lp
->region
)
4250 /* The new destination block must not already be a destination of
4251 the source block, lest we merge fallthru and eh edges and get
4252 all sorts of confused. */
4253 if (find_edge (e_in
->src
, e_out
->dest
))
4256 /* ??? We can get degenerate phis due to cfg cleanups. I would have
4257 thought this should have been cleaned up by a phicprop pass, but
4258 that doesn't appear to handle virtuals. Propagate by hand. */
4259 if (!gimple_seq_empty_p (phi_nodes (bb
)))
4261 for (gphi_iterator gpi
= gsi_start_phis (bb
); !gsi_end_p (gpi
); )
4264 gphi
*phi
= gpi
.phi ();
4265 tree lhs
= gimple_phi_result (phi
);
4266 tree rhs
= gimple_phi_arg_def (phi
, 0);
4267 use_operand_p use_p
;
4268 imm_use_iterator iter
;
4270 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs
)
4272 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
4273 SET_USE (use_p
, rhs
);
4276 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs
))
4277 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs
) = 1;
4279 remove_phi_node (&gpi
, true);
4283 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4284 fprintf (dump_file
, "Unsplit EH landing pad %d to block %i.\n",
4285 lp
->index
, e_out
->dest
->index
);
4287 /* Redirect the edge. Since redirect_eh_edge_1 expects to be moving
4288 a successor edge, humor it. But do the real CFG change with the
4289 predecessor of E_OUT in order to preserve the ordering of arguments
4290 to the PHI nodes in E_OUT->DEST. */
4291 redirect_eh_edge_1 (e_in
, e_out
->dest
, false);
4292 redirect_edge_pred (e_out
, e_in
->src
);
4293 e_out
->flags
= e_in
->flags
;
4294 e_out
->probability
= e_in
->probability
;
4300 /* Examine each landing pad block and see if it matches unsplit_eh. */
4303 unsplit_all_eh (void)
4305 bool changed
= false;
4309 for (i
= 1; vec_safe_iterate (cfun
->eh
->lp_array
, i
, &lp
); ++i
)
4311 changed
|= unsplit_eh (lp
);
4316 /* Wrapper around unsplit_all_eh that makes it usable everywhere. */
4319 unsplit_eh_edges (void)
4323 /* unsplit_all_eh can die looking up unreachable landing pads. */
4324 maybe_remove_unreachable_handlers ();
4326 changed
= unsplit_all_eh ();
4328 /* If EH edges have been unsplit, delete unreachable forwarder blocks. */
4331 free_dominance_info (CDI_DOMINATORS
);
4332 free_dominance_info (CDI_POST_DOMINATORS
);
4333 delete_unreachable_blocks ();
4337 /* A subroutine of cleanup_empty_eh. Redirect all EH edges incoming
4338 to OLD_BB to NEW_BB; return true on success, false on failure.
4340 OLD_BB_OUT is the edge into NEW_BB from OLD_BB, so if we miss any
4341 PHI variables from OLD_BB we can pick them up from OLD_BB_OUT.
4342 Virtual PHIs may be deleted and marked for renaming. */
4345 cleanup_empty_eh_merge_phis (basic_block new_bb
, basic_block old_bb
,
4346 edge old_bb_out
, bool change_region
)
4348 gphi_iterator ngsi
, ogsi
;
4351 bitmap ophi_handled
;
4353 /* The destination block must not be a regular successor for any
4354 of the preds of the landing pad. Thus, avoid turning
4364 which CFG verification would choke on. See PR45172 and PR51089. */
4365 if (!single_pred_p (new_bb
))
4366 FOR_EACH_EDGE (e
, ei
, old_bb
->preds
)
4367 if (find_edge (e
->src
, new_bb
))
4370 FOR_EACH_EDGE (e
, ei
, old_bb
->preds
)
4371 redirect_edge_var_map_clear (e
);
4373 ophi_handled
= BITMAP_ALLOC (NULL
);
4375 /* First, iterate through the PHIs on NEW_BB and set up the edge_var_map
4376 for the edges we're going to move. */
4377 for (ngsi
= gsi_start_phis (new_bb
); !gsi_end_p (ngsi
); gsi_next (&ngsi
))
4379 gphi
*ophi
, *nphi
= ngsi
.phi ();
4382 nresult
= gimple_phi_result (nphi
);
4383 nop
= gimple_phi_arg_def (nphi
, old_bb_out
->dest_idx
);
4385 /* Find the corresponding PHI in OLD_BB so we can forward-propagate
4386 the source ssa_name. */
4388 for (ogsi
= gsi_start_phis (old_bb
); !gsi_end_p (ogsi
); gsi_next (&ogsi
))
4391 if (gimple_phi_result (ophi
) == nop
)
4396 /* If we did find the corresponding PHI, copy those inputs. */
4399 /* If NOP is used somewhere else beyond phis in new_bb, give up. */
4400 if (!has_single_use (nop
))
4402 imm_use_iterator imm_iter
;
4403 use_operand_p use_p
;
4405 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, nop
)
4407 if (!gimple_debug_bind_p (USE_STMT (use_p
))
4408 && (gimple_code (USE_STMT (use_p
)) != GIMPLE_PHI
4409 || gimple_bb (USE_STMT (use_p
)) != new_bb
))
4413 bitmap_set_bit (ophi_handled
, SSA_NAME_VERSION (nop
));
4414 FOR_EACH_EDGE (e
, ei
, old_bb
->preds
)
4419 if ((e
->flags
& EDGE_EH
) == 0)
4421 oop
= gimple_phi_arg_def (ophi
, e
->dest_idx
);
4422 oloc
= gimple_phi_arg_location (ophi
, e
->dest_idx
);
4423 redirect_edge_var_map_add (e
, nresult
, oop
, oloc
);
4426 /* If we didn't find the PHI, if it's a real variable or a VOP, we know
4427 from the fact that OLD_BB is tree_empty_eh_handler_p that the
4428 variable is unchanged from input to the block and we can simply
4429 re-use the input to NEW_BB from the OLD_BB_OUT edge. */
4433 = gimple_phi_arg_location (nphi
, old_bb_out
->dest_idx
);
4434 FOR_EACH_EDGE (e
, ei
, old_bb
->preds
)
4435 redirect_edge_var_map_add (e
, nresult
, nop
, nloc
);
4439 /* Second, verify that all PHIs from OLD_BB have been handled. If not,
4440 we don't know what values from the other edges into NEW_BB to use. */
4441 for (ogsi
= gsi_start_phis (old_bb
); !gsi_end_p (ogsi
); gsi_next (&ogsi
))
4443 gphi
*ophi
= ogsi
.phi ();
4444 tree oresult
= gimple_phi_result (ophi
);
4445 if (!bitmap_bit_p (ophi_handled
, SSA_NAME_VERSION (oresult
)))
4449 /* Finally, move the edges and update the PHIs. */
4450 for (ei
= ei_start (old_bb
->preds
); (e
= ei_safe_edge (ei
)); )
4451 if (e
->flags
& EDGE_EH
)
4453 /* ??? CFG manipluation routines do not try to update loop
4454 form on edge redirection. Do so manually here for now. */
4455 /* If we redirect a loop entry or latch edge that will either create
4456 a multiple entry loop or rotate the loop. If the loops merge
4457 we may have created a loop with multiple latches.
4458 All of this isn't easily fixed thus cancel the affected loop
4459 and mark the other loop as possibly having multiple latches. */
4460 if (e
->dest
== e
->dest
->loop_father
->header
)
4462 mark_loop_for_removal (e
->dest
->loop_father
);
4463 new_bb
->loop_father
->latch
= NULL
;
4464 loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES
);
4466 redirect_eh_edge_1 (e
, new_bb
, change_region
);
4467 redirect_edge_succ (e
, new_bb
);
4468 flush_pending_stmts (e
);
4473 BITMAP_FREE (ophi_handled
);
4477 FOR_EACH_EDGE (e
, ei
, old_bb
->preds
)
4478 redirect_edge_var_map_clear (e
);
4479 BITMAP_FREE (ophi_handled
);
4483 /* A subroutine of cleanup_empty_eh. Move a landing pad LP from its
4484 old region to NEW_REGION at BB. */
4487 cleanup_empty_eh_move_lp (basic_block bb
, edge e_out
,
4488 eh_landing_pad lp
, eh_region new_region
)
4490 gimple_stmt_iterator gsi
;
4493 for (pp
= &lp
->region
->landing_pads
; *pp
!= lp
; pp
= &(*pp
)->next_lp
)
4497 lp
->region
= new_region
;
4498 lp
->next_lp
= new_region
->landing_pads
;
4499 new_region
->landing_pads
= lp
;
4501 /* Delete the RESX that was matched within the empty handler block. */
4502 gsi
= gsi_last_bb (bb
);
4503 unlink_stmt_vdef (gsi_stmt (gsi
));
4504 gsi_remove (&gsi
, true);
4506 /* Clean up E_OUT for the fallthru. */
4507 e_out
->flags
= (e_out
->flags
& ~EDGE_EH
) | EDGE_FALLTHRU
;
4508 e_out
->probability
= profile_probability::always ();
4511 /* A subroutine of cleanup_empty_eh. Handle more complex cases of
4512 unsplitting than unsplit_eh was prepared to handle, e.g. when
4513 multiple incoming edges and phis are involved. */
4516 cleanup_empty_eh_unsplit (basic_block bb
, edge e_out
, eh_landing_pad lp
)
4518 gimple_stmt_iterator gsi
;
4521 /* We really ought not have totally lost everything following
4522 a landing pad label. Given that BB is empty, there had better
4524 gcc_assert (e_out
!= NULL
);
4526 /* The destination block must not already have a landing pad
4527 for a different region. */
4529 for (gsi
= gsi_start_bb (e_out
->dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4531 glabel
*stmt
= dyn_cast
<glabel
*> (gsi_stmt (gsi
));
4536 lab
= gimple_label_label (stmt
);
4537 lp_nr
= EH_LANDING_PAD_NR (lab
);
4538 if (lp_nr
&& get_eh_region_from_lp_number (lp_nr
) != lp
->region
)
4542 /* Attempt to move the PHIs into the successor block. */
4543 if (cleanup_empty_eh_merge_phis (e_out
->dest
, bb
, e_out
, false))
4545 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4547 "Unsplit EH landing pad %d to block %i "
4548 "(via cleanup_empty_eh).\n",
4549 lp
->index
, e_out
->dest
->index
);
4556 /* Return true if edge E_FIRST is part of an empty infinite loop
4557 or leads to such a loop through a series of single successor
4561 infinite_empty_loop_p (edge e_first
)
4563 bool inf_loop
= false;
4566 if (e_first
->dest
== e_first
->src
)
4569 e_first
->src
->aux
= (void *) 1;
4570 for (e
= e_first
; single_succ_p (e
->dest
); e
= single_succ_edge (e
->dest
))
4572 gimple_stmt_iterator gsi
;
4578 e
->dest
->aux
= (void *) 1;
4579 gsi
= gsi_after_labels (e
->dest
);
4580 if (!gsi_end_p (gsi
) && is_gimple_debug (gsi_stmt (gsi
)))
4581 gsi_next_nondebug (&gsi
);
4582 if (!gsi_end_p (gsi
))
4585 e_first
->src
->aux
= NULL
;
4586 for (e
= e_first
; e
->dest
->aux
; e
= single_succ_edge (e
->dest
))
4587 e
->dest
->aux
= NULL
;
4592 /* Examine the block associated with LP to determine if it's an empty
4593 handler for its EH region. If so, attempt to redirect EH edges to
4594 an outer region. Return true the CFG was updated in any way. This
4595 is similar to jump forwarding, just across EH edges. */
4598 cleanup_empty_eh (eh_landing_pad lp
)
4600 basic_block bb
= label_to_block (cfun
, lp
->post_landing_pad
);
4601 gimple_stmt_iterator gsi
;
4603 eh_region new_region
;
4606 bool has_non_eh_pred
;
4610 /* There can be zero or one edges out of BB. This is the quickest test. */
4611 switch (EDGE_COUNT (bb
->succs
))
4617 e_out
= single_succ_edge (bb
);
4623 gsi
= gsi_last_nondebug_bb (bb
);
4624 resx
= gsi_stmt (gsi
);
4625 if (resx
&& is_gimple_resx (resx
))
4627 if (stmt_can_throw_external (cfun
, resx
))
4628 optimize_clobbers (bb
);
4629 else if (sink_clobbers (bb
))
4633 gsi
= gsi_after_labels (bb
);
4635 /* Make sure to skip debug statements. */
4636 if (!gsi_end_p (gsi
) && is_gimple_debug (gsi_stmt (gsi
)))
4637 gsi_next_nondebug (&gsi
);
4639 /* If the block is totally empty, look for more unsplitting cases. */
4640 if (gsi_end_p (gsi
))
4642 /* For the degenerate case of an infinite loop bail out.
4643 If bb has no successors and is totally empty, which can happen e.g.
4644 because of incorrect noreturn attribute, bail out too. */
4646 || infinite_empty_loop_p (e_out
))
4649 return ret
| cleanup_empty_eh_unsplit (bb
, e_out
, lp
);
4652 /* The block should consist only of a single RESX statement, modulo a
4653 preceding call to __builtin_stack_restore if there is no outgoing
4654 edge, since the call can be eliminated in this case. */
4655 resx
= gsi_stmt (gsi
);
4656 if (!e_out
&& gimple_call_builtin_p (resx
, BUILT_IN_STACK_RESTORE
))
4658 gsi_next_nondebug (&gsi
);
4659 resx
= gsi_stmt (gsi
);
4661 if (!is_gimple_resx (resx
))
4663 gcc_assert (gsi_one_nondebug_before_end_p (gsi
));
4665 /* Determine if there are non-EH edges, or resx edges into the handler. */
4666 has_non_eh_pred
= false;
4667 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
4668 if (!(e
->flags
& EDGE_EH
))
4669 has_non_eh_pred
= true;
4671 /* Find the handler that's outer of the empty handler by looking at
4672 where the RESX instruction was vectored. */
4673 new_lp_nr
= lookup_stmt_eh_lp (resx
);
4674 new_region
= get_eh_region_from_lp_number (new_lp_nr
);
4676 /* If there's no destination region within the current function,
4677 redirection is trivial via removing the throwing statements from
4678 the EH region, removing the EH edges, and allowing the block
4679 to go unreachable. */
4680 if (new_region
== NULL
)
4682 gcc_assert (e_out
== NULL
);
4683 for (ei
= ei_start (bb
->preds
); (e
= ei_safe_edge (ei
)); )
4684 if (e
->flags
& EDGE_EH
)
4686 gimple
*stmt
= last_stmt (e
->src
);
4687 remove_stmt_from_eh_lp (stmt
);
4695 /* If the destination region is a MUST_NOT_THROW, allow the runtime
4696 to handle the abort and allow the blocks to go unreachable. */
4697 if (new_region
->type
== ERT_MUST_NOT_THROW
)
4699 for (ei
= ei_start (bb
->preds
); (e
= ei_safe_edge (ei
)); )
4700 if (e
->flags
& EDGE_EH
)
4702 gimple
*stmt
= last_stmt (e
->src
);
4703 remove_stmt_from_eh_lp (stmt
);
4704 add_stmt_to_eh_lp (stmt
, new_lp_nr
);
4712 /* Try to redirect the EH edges and merge the PHIs into the destination
4713 landing pad block. If the merge succeeds, we'll already have redirected
4714 all the EH edges. The handler itself will go unreachable if there were
4716 if (cleanup_empty_eh_merge_phis (e_out
->dest
, bb
, e_out
, true))
4719 /* Finally, if all input edges are EH edges, then we can (potentially)
4720 reduce the number of transfers from the runtime by moving the landing
4721 pad from the original region to the new region. This is a win when
4722 we remove the last CLEANUP region along a particular exception
4723 propagation path. Since nothing changes except for the region with
4724 which the landing pad is associated, the PHI nodes do not need to be
4726 if (!has_non_eh_pred
)
4728 cleanup_empty_eh_move_lp (bb
, e_out
, lp
, new_region
);
4729 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4730 fprintf (dump_file
, "Empty EH handler %i moved to EH region %i.\n",
4731 lp
->index
, new_region
->index
);
4733 /* ??? The CFG didn't change, but we may have rendered the
4734 old EH region unreachable. Trigger a cleanup there. */
4741 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4742 fprintf (dump_file
, "Empty EH handler %i removed.\n", lp
->index
);
4743 remove_eh_landing_pad (lp
);
4747 /* Do a post-order traversal of the EH region tree. Examine each
4748 post_landing_pad block and see if we can eliminate it as empty. */
4751 cleanup_all_empty_eh (void)
4753 bool changed
= false;
4757 /* The post-order traversal may lead to quadraticness in the redirection
4758 of incoming EH edges from inner LPs, so first try to walk the region
4759 tree from inner to outer LPs in order to eliminate these edges. */
4760 for (i
= vec_safe_length (cfun
->eh
->lp_array
) - 1; i
>= 1; --i
)
4762 lp
= (*cfun
->eh
->lp_array
)[i
];
4764 changed
|= cleanup_empty_eh (lp
);
4767 /* Now do the post-order traversal to eliminate outer empty LPs. */
4768 for (i
= 1; vec_safe_iterate (cfun
->eh
->lp_array
, i
, &lp
); ++i
)
4770 changed
|= cleanup_empty_eh (lp
);
4775 /* Perform cleanups and lowering of exception handling
4776 1) cleanups regions with handlers doing nothing are optimized out
4777 2) MUST_NOT_THROW regions that became dead because of 1) are optimized out
4778 3) Info about regions that are containing instructions, and regions
4779 reachable via local EH edges is collected
4780 4) Eh tree is pruned for regions no longer necessary.
4782 TODO: Push MUST_NOT_THROW regions to the root of the EH tree.
4783 Unify those that have the same failure decl and locus.
4787 execute_cleanup_eh_1 (void)
4789 /* Do this first: unsplit_all_eh and cleanup_all_empty_eh can die
4790 looking up unreachable landing pads. */
4791 remove_unreachable_handlers ();
4793 /* Watch out for the region tree vanishing due to all unreachable. */
4794 if (cfun
->eh
->region_tree
)
4796 bool changed
= false;
4799 changed
|= unsplit_all_eh ();
4800 changed
|= cleanup_all_empty_eh ();
4804 free_dominance_info (CDI_DOMINATORS
);
4805 free_dominance_info (CDI_POST_DOMINATORS
);
4807 /* We delayed all basic block deletion, as we may have performed
4808 cleanups on EH edges while non-EH edges were still present. */
4809 delete_unreachable_blocks ();
4811 /* We manipulated the landing pads. Remove any region that no
4812 longer has a landing pad. */
4813 remove_unreachable_handlers_no_lp ();
4815 return TODO_cleanup_cfg
| TODO_update_ssa_only_virtuals
;
4824 const pass_data pass_data_cleanup_eh
=
4826 GIMPLE_PASS
, /* type */
4827 "ehcleanup", /* name */
4828 OPTGROUP_NONE
, /* optinfo_flags */
4829 TV_TREE_EH
, /* tv_id */
4830 PROP_gimple_lcf
, /* properties_required */
4831 0, /* properties_provided */
4832 0, /* properties_destroyed */
4833 0, /* todo_flags_start */
4834 0, /* todo_flags_finish */
4837 class pass_cleanup_eh
: public gimple_opt_pass
4840 pass_cleanup_eh (gcc::context
*ctxt
)
4841 : gimple_opt_pass (pass_data_cleanup_eh
, ctxt
)
4844 /* opt_pass methods: */
4845 opt_pass
* clone () { return new pass_cleanup_eh (m_ctxt
); }
4846 virtual bool gate (function
*fun
)
4848 return fun
->eh
!= NULL
&& fun
->eh
->region_tree
!= NULL
;
4851 virtual unsigned int execute (function
*);
4853 }; // class pass_cleanup_eh
4856 pass_cleanup_eh::execute (function
*fun
)
4858 int ret
= execute_cleanup_eh_1 ();
4860 /* If the function no longer needs an EH personality routine
4861 clear it. This exposes cross-language inlining opportunities
4862 and avoids references to a never defined personality routine. */
4863 if (DECL_FUNCTION_PERSONALITY (current_function_decl
)
4864 && function_needs_eh_personality (fun
) != eh_personality_lang
)
4865 DECL_FUNCTION_PERSONALITY (current_function_decl
) = NULL_TREE
;
4873 make_pass_cleanup_eh (gcc::context
*ctxt
)
4875 return new pass_cleanup_eh (ctxt
);
4878 /* Disable warnings about missing quoting in GCC diagnostics for
4879 the verification errors. Their format strings don't follow GCC
4880 diagnostic conventions but are only used for debugging. */
4882 # pragma GCC diagnostic push
4883 # pragma GCC diagnostic ignored "-Wformat-diag"
4886 /* Verify that BB containing STMT as the last statement, has precisely the
4887 edge that make_eh_edges would create. */
4890 verify_eh_edges (gimple
*stmt
)
4892 basic_block bb
= gimple_bb (stmt
);
4893 eh_landing_pad lp
= NULL
;
4898 lp_nr
= lookup_stmt_eh_lp (stmt
);
4900 lp
= get_eh_landing_pad_from_number (lp_nr
);
4903 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
4905 if (e
->flags
& EDGE_EH
)
4909 error ("BB %i has multiple EH edges", bb
->index
);
4921 error ("BB %i cannot throw but has an EH edge", bb
->index
);
4927 if (!stmt_could_throw_p (cfun
, stmt
))
4929 error ("BB %i last statement has incorrectly set lp", bb
->index
);
4933 if (eh_edge
== NULL
)
4935 error ("BB %i is missing an EH edge", bb
->index
);
4939 if (eh_edge
->dest
!= label_to_block (cfun
, lp
->post_landing_pad
))
4941 error ("Incorrect EH edge %i->%i", bb
->index
, eh_edge
->dest
->index
);
4948 /* Similarly, but handle GIMPLE_EH_DISPATCH specifically. */
4951 verify_eh_dispatch_edge (geh_dispatch
*stmt
)
4955 basic_block src
, dst
;
4956 bool want_fallthru
= true;
4960 r
= get_eh_region_from_number (gimple_eh_dispatch_region (stmt
));
4961 src
= gimple_bb (stmt
);
4963 FOR_EACH_EDGE (e
, ei
, src
->succs
)
4964 gcc_assert (e
->aux
== NULL
);
4969 for (c
= r
->u
.eh_try
.first_catch
; c
; c
= c
->next_catch
)
4971 dst
= label_to_block (cfun
, c
->label
);
4972 e
= find_edge (src
, dst
);
4975 error ("BB %i is missing an edge", src
->index
);
4980 /* A catch-all handler doesn't have a fallthru. */
4981 if (c
->type_list
== NULL
)
4983 want_fallthru
= false;
4989 case ERT_ALLOWED_EXCEPTIONS
:
4990 dst
= label_to_block (cfun
, r
->u
.allowed
.label
);
4991 e
= find_edge (src
, dst
);
4994 error ("BB %i is missing an edge", src
->index
);
5005 FOR_EACH_EDGE (e
, ei
, src
->succs
)
5007 if (e
->flags
& EDGE_FALLTHRU
)
5009 if (fall_edge
!= NULL
)
5011 error ("BB %i too many fallthru edges", src
->index
);
5020 error ("BB %i has incorrect edge", src
->index
);
5024 if ((fall_edge
!= NULL
) ^ want_fallthru
)
5026 error ("BB %i has incorrect fallthru edge", src
->index
);
5034 # pragma GCC diagnostic pop