1 /* Exception handling semantics and decomposition for trees.
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
30 #include "fold-const.h"
32 #include "insn-config.h"
43 #include "cfgcleanup.h"
44 #include "internal-fn.h"
46 #include "gimple-iterator.h"
49 #include "tree-into-ssa.h"
51 #include "tree-inline.h"
52 #include "tree-pass.h"
53 #include "langhooks.h"
54 #include "diagnostic-core.h"
57 #include "gimple-low.h"
59 /* In some instances a tree and a gimple need to be stored in a same table,
60 i.e. in hash tables. This is a structure to do this. */
61 typedef union {tree
*tp
; tree t
; gimple g
;} treemple
;
63 /* Misc functions used in this file. */
65 /* Remember and lookup EH landing pad data for arbitrary statements.
66 Really this means any statement that could_throw_p. We could
67 stuff this information into the stmt_ann data structure, but:
69 (1) We absolutely rely on this information being kept until
70 we get to rtl. Once we're done with lowering here, if we lose
71 the information there's no way to recover it!
73 (2) There are many more statements that *cannot* throw as
74 compared to those that can. We should be saving some amount
75 of space by only allocating memory for those that can throw. */
77 /* Add statement T in function IFUN to landing pad NUM. */
80 add_stmt_to_eh_lp_fn (struct function
*ifun
, gimple t
, int num
)
82 gcc_assert (num
!= 0);
84 if (!get_eh_throw_stmt_table (ifun
))
85 set_eh_throw_stmt_table (ifun
, hash_map
<gimple
, int>::create_ggc (31));
87 gcc_assert (!get_eh_throw_stmt_table (ifun
)->put (t
, num
));
90 /* Add statement T in the current function (cfun) to EH landing pad NUM. */
93 add_stmt_to_eh_lp (gimple t
, int num
)
95 add_stmt_to_eh_lp_fn (cfun
, t
, num
);
98 /* Add statement T to the single EH landing pad in REGION. */
101 record_stmt_eh_region (eh_region region
, gimple t
)
105 if (region
->type
== ERT_MUST_NOT_THROW
)
106 add_stmt_to_eh_lp_fn (cfun
, t
, -region
->index
);
109 eh_landing_pad lp
= region
->landing_pads
;
111 lp
= gen_eh_landing_pad (region
);
113 gcc_assert (lp
->next_lp
== NULL
);
114 add_stmt_to_eh_lp_fn (cfun
, t
, lp
->index
);
119 /* Remove statement T in function IFUN from its EH landing pad. */
122 remove_stmt_from_eh_lp_fn (struct function
*ifun
, gimple t
)
124 if (!get_eh_throw_stmt_table (ifun
))
127 if (!get_eh_throw_stmt_table (ifun
)->get (t
))
130 get_eh_throw_stmt_table (ifun
)->remove (t
);
135 /* Remove statement T in the current function (cfun) from its
139 remove_stmt_from_eh_lp (gimple t
)
141 return remove_stmt_from_eh_lp_fn (cfun
, t
);
144 /* Determine if statement T is inside an EH region in function IFUN.
145 Positive numbers indicate a landing pad index; negative numbers
146 indicate a MUST_NOT_THROW region index; zero indicates that the
147 statement is not recorded in the region table. */
150 lookup_stmt_eh_lp_fn (struct function
*ifun
, gimple t
)
152 if (ifun
->eh
->throw_stmt_table
== NULL
)
155 int *lp_nr
= ifun
->eh
->throw_stmt_table
->get (t
);
156 return lp_nr
? *lp_nr
: 0;
159 /* Likewise, but always use the current function. */
162 lookup_stmt_eh_lp (gimple t
)
164 /* We can get called from initialized data when -fnon-call-exceptions
165 is on; prevent crash. */
168 return lookup_stmt_eh_lp_fn (cfun
, t
);
171 /* First pass of EH node decomposition. Build up a tree of GIMPLE_TRY_FINALLY
172 nodes and LABEL_DECL nodes. We will use this during the second phase to
173 determine if a goto leaves the body of a TRY_FINALLY_EXPR node. */
175 struct finally_tree_node
177 /* When storing a GIMPLE_TRY, we have to record a gimple. However
178 when deciding whether a GOTO to a certain LABEL_DECL (which is a
179 tree) leaves the TRY block, its necessary to record a tree in
180 this field. Thus a treemple is used. */
185 /* Hashtable helpers. */
187 struct finally_tree_hasher
: free_ptr_hash
<finally_tree_node
>
189 static inline hashval_t
hash (const finally_tree_node
*);
190 static inline bool equal (const finally_tree_node
*,
191 const finally_tree_node
*);
195 finally_tree_hasher::hash (const finally_tree_node
*v
)
197 return (intptr_t)v
->child
.t
>> 4;
201 finally_tree_hasher::equal (const finally_tree_node
*v
,
202 const finally_tree_node
*c
)
204 return v
->child
.t
== c
->child
.t
;
207 /* Note that this table is *not* marked GTY. It is short-lived. */
208 static hash_table
<finally_tree_hasher
> *finally_tree
;
211 record_in_finally_tree (treemple child
, gtry
*parent
)
213 struct finally_tree_node
*n
;
214 finally_tree_node
**slot
;
216 n
= XNEW (struct finally_tree_node
);
220 slot
= finally_tree
->find_slot (n
, INSERT
);
226 collect_finally_tree (gimple stmt
, gtry
*region
);
228 /* Go through the gimple sequence. Works with collect_finally_tree to
229 record all GIMPLE_LABEL and GIMPLE_TRY statements. */
232 collect_finally_tree_1 (gimple_seq seq
, gtry
*region
)
234 gimple_stmt_iterator gsi
;
236 for (gsi
= gsi_start (seq
); !gsi_end_p (gsi
); gsi_next (&gsi
))
237 collect_finally_tree (gsi_stmt (gsi
), region
);
241 collect_finally_tree (gimple stmt
, gtry
*region
)
245 switch (gimple_code (stmt
))
248 temp
.t
= gimple_label_label (as_a
<glabel
*> (stmt
));
249 record_in_finally_tree (temp
, region
);
253 if (gimple_try_kind (stmt
) == GIMPLE_TRY_FINALLY
)
256 record_in_finally_tree (temp
, region
);
257 collect_finally_tree_1 (gimple_try_eval (stmt
),
258 as_a
<gtry
*> (stmt
));
259 collect_finally_tree_1 (gimple_try_cleanup (stmt
), region
);
261 else if (gimple_try_kind (stmt
) == GIMPLE_TRY_CATCH
)
263 collect_finally_tree_1 (gimple_try_eval (stmt
), region
);
264 collect_finally_tree_1 (gimple_try_cleanup (stmt
), region
);
269 collect_finally_tree_1 (gimple_catch_handler (
270 as_a
<gcatch
*> (stmt
)),
274 case GIMPLE_EH_FILTER
:
275 collect_finally_tree_1 (gimple_eh_filter_failure (stmt
), region
);
280 geh_else
*eh_else_stmt
= as_a
<geh_else
*> (stmt
);
281 collect_finally_tree_1 (gimple_eh_else_n_body (eh_else_stmt
), region
);
282 collect_finally_tree_1 (gimple_eh_else_e_body (eh_else_stmt
), region
);
287 /* A type, a decl, or some kind of statement that we're not
288 interested in. Don't walk them. */
294 /* Use the finally tree to determine if a jump from START to TARGET
295 would leave the try_finally node that START lives in. */
298 outside_finally_tree (treemple start
, gimple target
)
300 struct finally_tree_node n
, *p
;
305 p
= finally_tree
->find (&n
);
310 while (start
.g
!= target
);
315 /* Second pass of EH node decomposition. Actually transform the GIMPLE_TRY
316 nodes into a set of gotos, magic labels, and eh regions.
317 The eh region creation is straight-forward, but frobbing all the gotos
318 and such into shape isn't. */
320 /* The sequence into which we record all EH stuff. This will be
321 placed at the end of the function when we're all done. */
322 static gimple_seq eh_seq
;
324 /* Record whether an EH region contains something that can throw,
325 indexed by EH region number. */
326 static bitmap eh_region_may_contain_throw_map
;
328 /* The GOTO_QUEUE is an array of GIMPLE_GOTO and GIMPLE_RETURN
329 statements that are seen to escape this GIMPLE_TRY_FINALLY node.
330 The idea is to record a gimple statement for everything except for
331 the conditionals, which get their labels recorded. Since labels are
332 of type 'tree', we need this node to store both gimple and tree
333 objects. REPL_STMT is the sequence used to replace the goto/return
334 statement. CONT_STMT is used to store the statement that allows
335 the return/goto to jump to the original destination. */
337 struct goto_queue_node
341 gimple_seq repl_stmt
;
344 /* This is used when index >= 0 to indicate that stmt is a label (as
345 opposed to a goto stmt). */
349 /* State of the world while lowering. */
353 /* What's "current" while constructing the eh region tree. These
354 correspond to variables of the same name in cfun->eh, which we
355 don't have easy access to. */
356 eh_region cur_region
;
358 /* What's "current" for the purposes of __builtin_eh_pointer. For
359 a CATCH, this is the associated TRY. For an EH_FILTER, this is
360 the associated ALLOWED_EXCEPTIONS, etc. */
361 eh_region ehp_region
;
363 /* Processing of TRY_FINALLY requires a bit more state. This is
364 split out into a separate structure so that we don't have to
365 copy so much when processing other nodes. */
366 struct leh_tf_state
*tf
;
371 /* Pointer to the GIMPLE_TRY_FINALLY node under discussion. The
372 try_finally_expr is the original GIMPLE_TRY_FINALLY. We need to retain
373 this so that outside_finally_tree can reliably reference the tree used
374 in the collect_finally_tree data structures. */
375 gtry
*try_finally_expr
;
378 /* While lowering a top_p usually it is expanded into multiple statements,
379 thus we need the following field to store them. */
380 gimple_seq top_p_seq
;
382 /* The state outside this try_finally node. */
383 struct leh_state
*outer
;
385 /* The exception region created for it. */
388 /* The goto queue. */
389 struct goto_queue_node
*goto_queue
;
390 size_t goto_queue_size
;
391 size_t goto_queue_active
;
393 /* Pointer map to help in searching goto_queue when it is large. */
394 hash_map
<gimple
, goto_queue_node
*> *goto_queue_map
;
396 /* The set of unique labels seen as entries in the goto queue. */
397 vec
<tree
> dest_array
;
399 /* A label to be added at the end of the completed transformed
400 sequence. It will be set if may_fallthru was true *at one time*,
401 though subsequent transformations may have cleared that flag. */
404 /* True if it is possible to fall out the bottom of the try block.
405 Cleared if the fallthru is converted to a goto. */
408 /* True if any entry in goto_queue is a GIMPLE_RETURN. */
411 /* True if the finally block can receive an exception edge.
412 Cleared if the exception case is handled by code duplication. */
416 static gimple_seq
lower_eh_must_not_throw (struct leh_state
*, gtry
*);
418 /* Search for STMT in the goto queue. Return the replacement,
419 or null if the statement isn't in the queue. */
421 #define LARGE_GOTO_QUEUE 20
423 static void lower_eh_constructs_1 (struct leh_state
*state
, gimple_seq
*seq
);
426 find_goto_replacement (struct leh_tf_state
*tf
, treemple stmt
)
430 if (tf
->goto_queue_active
< LARGE_GOTO_QUEUE
)
432 for (i
= 0; i
< tf
->goto_queue_active
; i
++)
433 if ( tf
->goto_queue
[i
].stmt
.g
== stmt
.g
)
434 return tf
->goto_queue
[i
].repl_stmt
;
438 /* If we have a large number of entries in the goto_queue, create a
439 pointer map and use that for searching. */
441 if (!tf
->goto_queue_map
)
443 tf
->goto_queue_map
= new hash_map
<gimple
, goto_queue_node
*>;
444 for (i
= 0; i
< tf
->goto_queue_active
; i
++)
446 bool existed
= tf
->goto_queue_map
->put (tf
->goto_queue
[i
].stmt
.g
,
448 gcc_assert (!existed
);
452 goto_queue_node
**slot
= tf
->goto_queue_map
->get (stmt
.g
);
454 return ((*slot
)->repl_stmt
);
459 /* A subroutine of replace_goto_queue_1. Handles the sub-clauses of a
460 lowered GIMPLE_COND. If, by chance, the replacement is a simple goto,
461 then we can just splat it in, otherwise we add the new stmts immediately
462 after the GIMPLE_COND and redirect. */
465 replace_goto_queue_cond_clause (tree
*tp
, struct leh_tf_state
*tf
,
466 gimple_stmt_iterator
*gsi
)
471 location_t loc
= gimple_location (gsi_stmt (*gsi
));
474 new_seq
= find_goto_replacement (tf
, temp
);
478 if (gimple_seq_singleton_p (new_seq
)
479 && gimple_code (gimple_seq_first_stmt (new_seq
)) == GIMPLE_GOTO
)
481 *tp
= gimple_goto_dest (gimple_seq_first_stmt (new_seq
));
485 label
= create_artificial_label (loc
);
486 /* Set the new label for the GIMPLE_COND */
489 gsi_insert_after (gsi
, gimple_build_label (label
), GSI_CONTINUE_LINKING
);
490 gsi_insert_seq_after (gsi
, gimple_seq_copy (new_seq
), GSI_CONTINUE_LINKING
);
493 /* The real work of replace_goto_queue. Returns with TSI updated to
494 point to the next statement. */
496 static void replace_goto_queue_stmt_list (gimple_seq
*, struct leh_tf_state
*);
499 replace_goto_queue_1 (gimple stmt
, struct leh_tf_state
*tf
,
500 gimple_stmt_iterator
*gsi
)
506 switch (gimple_code (stmt
))
511 seq
= find_goto_replacement (tf
, temp
);
514 gsi_insert_seq_before (gsi
, gimple_seq_copy (seq
), GSI_SAME_STMT
);
515 gsi_remove (gsi
, false);
521 replace_goto_queue_cond_clause (gimple_op_ptr (stmt
, 2), tf
, gsi
);
522 replace_goto_queue_cond_clause (gimple_op_ptr (stmt
, 3), tf
, gsi
);
526 replace_goto_queue_stmt_list (gimple_try_eval_ptr (stmt
), tf
);
527 replace_goto_queue_stmt_list (gimple_try_cleanup_ptr (stmt
), tf
);
530 replace_goto_queue_stmt_list (gimple_catch_handler_ptr (
531 as_a
<gcatch
*> (stmt
)),
534 case GIMPLE_EH_FILTER
:
535 replace_goto_queue_stmt_list (gimple_eh_filter_failure_ptr (stmt
), tf
);
539 geh_else
*eh_else_stmt
= as_a
<geh_else
*> (stmt
);
540 replace_goto_queue_stmt_list (gimple_eh_else_n_body_ptr (eh_else_stmt
),
542 replace_goto_queue_stmt_list (gimple_eh_else_e_body_ptr (eh_else_stmt
),
548 /* These won't have gotos in them. */
555 /* A subroutine of replace_goto_queue. Handles GIMPLE_SEQ. */
558 replace_goto_queue_stmt_list (gimple_seq
*seq
, struct leh_tf_state
*tf
)
560 gimple_stmt_iterator gsi
= gsi_start (*seq
);
562 while (!gsi_end_p (gsi
))
563 replace_goto_queue_1 (gsi_stmt (gsi
), tf
, &gsi
);
566 /* Replace all goto queue members. */
569 replace_goto_queue (struct leh_tf_state
*tf
)
571 if (tf
->goto_queue_active
== 0)
573 replace_goto_queue_stmt_list (&tf
->top_p_seq
, tf
);
574 replace_goto_queue_stmt_list (&eh_seq
, tf
);
577 /* Add a new record to the goto queue contained in TF. NEW_STMT is the
578 data to be added, IS_LABEL indicates whether NEW_STMT is a label or
582 record_in_goto_queue (struct leh_tf_state
*tf
,
589 struct goto_queue_node
*q
;
591 gcc_assert (!tf
->goto_queue_map
);
593 active
= tf
->goto_queue_active
;
594 size
= tf
->goto_queue_size
;
597 size
= (size
? size
* 2 : 32);
598 tf
->goto_queue_size
= size
;
600 = XRESIZEVEC (struct goto_queue_node
, tf
->goto_queue
, size
);
603 q
= &tf
->goto_queue
[active
];
604 tf
->goto_queue_active
= active
+ 1;
606 memset (q
, 0, sizeof (*q
));
609 q
->location
= location
;
610 q
->is_label
= is_label
;
613 /* Record the LABEL label in the goto queue contained in TF.
617 record_in_goto_queue_label (struct leh_tf_state
*tf
, treemple stmt
, tree label
,
621 treemple temp
, new_stmt
;
626 /* Computed and non-local gotos do not get processed. Given
627 their nature we can neither tell whether we've escaped the
628 finally block nor redirect them if we knew. */
629 if (TREE_CODE (label
) != LABEL_DECL
)
632 /* No need to record gotos that don't leave the try block. */
634 if (!outside_finally_tree (temp
, tf
->try_finally_expr
))
637 if (! tf
->dest_array
.exists ())
639 tf
->dest_array
.create (10);
640 tf
->dest_array
.quick_push (label
);
645 int n
= tf
->dest_array
.length ();
646 for (index
= 0; index
< n
; ++index
)
647 if (tf
->dest_array
[index
] == label
)
650 tf
->dest_array
.safe_push (label
);
653 /* In the case of a GOTO we want to record the destination label,
654 since with a GIMPLE_COND we have an easy access to the then/else
657 record_in_goto_queue (tf
, new_stmt
, index
, true, location
);
660 /* For any GIMPLE_GOTO or GIMPLE_RETURN, decide whether it leaves a try_finally
661 node, and if so record that fact in the goto queue associated with that
665 maybe_record_in_goto_queue (struct leh_state
*state
, gimple stmt
)
667 struct leh_tf_state
*tf
= state
->tf
;
673 switch (gimple_code (stmt
))
677 gcond
*cond_stmt
= as_a
<gcond
*> (stmt
);
678 new_stmt
.tp
= gimple_op_ptr (cond_stmt
, 2);
679 record_in_goto_queue_label (tf
, new_stmt
,
680 gimple_cond_true_label (cond_stmt
),
681 EXPR_LOCATION (*new_stmt
.tp
));
682 new_stmt
.tp
= gimple_op_ptr (cond_stmt
, 3);
683 record_in_goto_queue_label (tf
, new_stmt
,
684 gimple_cond_false_label (cond_stmt
),
685 EXPR_LOCATION (*new_stmt
.tp
));
690 record_in_goto_queue_label (tf
, new_stmt
, gimple_goto_dest (stmt
),
691 gimple_location (stmt
));
695 tf
->may_return
= true;
697 record_in_goto_queue (tf
, new_stmt
, -1, false, gimple_location (stmt
));
706 #ifdef ENABLE_CHECKING
707 /* We do not process GIMPLE_SWITCHes for now. As long as the original source
708 was in fact structured, and we've not yet done jump threading, then none
709 of the labels will leave outer GIMPLE_TRY_FINALLY nodes. Verify this. */
712 verify_norecord_switch_expr (struct leh_state
*state
,
713 gswitch
*switch_expr
)
715 struct leh_tf_state
*tf
= state
->tf
;
721 n
= gimple_switch_num_labels (switch_expr
);
723 for (i
= 0; i
< n
; ++i
)
726 tree lab
= CASE_LABEL (gimple_switch_label (switch_expr
, i
));
728 gcc_assert (!outside_finally_tree (temp
, tf
->try_finally_expr
));
732 #define verify_norecord_switch_expr(state, switch_expr)
735 /* Redirect a RETURN_EXPR pointed to by Q to FINLAB. If MOD is
736 non-null, insert it before the new branch. */
739 do_return_redirection (struct goto_queue_node
*q
, tree finlab
, gimple_seq mod
)
743 /* In the case of a return, the queue node must be a gimple statement. */
744 gcc_assert (!q
->is_label
);
746 /* Note that the return value may have already been computed, e.g.,
759 should return 0, not 1. We don't have to do anything to make
760 this happens because the return value has been placed in the
761 RESULT_DECL already. */
763 q
->cont_stmt
= q
->stmt
.g
;
766 gimple_seq_add_seq (&q
->repl_stmt
, mod
);
768 x
= gimple_build_goto (finlab
);
769 gimple_set_location (x
, q
->location
);
770 gimple_seq_add_stmt (&q
->repl_stmt
, x
);
773 /* Similar, but easier, for GIMPLE_GOTO. */
776 do_goto_redirection (struct goto_queue_node
*q
, tree finlab
, gimple_seq mod
,
777 struct leh_tf_state
*tf
)
781 gcc_assert (q
->is_label
);
783 q
->cont_stmt
= gimple_build_goto (tf
->dest_array
[q
->index
]);
786 gimple_seq_add_seq (&q
->repl_stmt
, mod
);
788 x
= gimple_build_goto (finlab
);
789 gimple_set_location (x
, q
->location
);
790 gimple_seq_add_stmt (&q
->repl_stmt
, x
);
793 /* Emit a standard landing pad sequence into SEQ for REGION. */
796 emit_post_landing_pad (gimple_seq
*seq
, eh_region region
)
798 eh_landing_pad lp
= region
->landing_pads
;
802 lp
= gen_eh_landing_pad (region
);
804 lp
->post_landing_pad
= create_artificial_label (UNKNOWN_LOCATION
);
805 EH_LANDING_PAD_NR (lp
->post_landing_pad
) = lp
->index
;
807 x
= gimple_build_label (lp
->post_landing_pad
);
808 gimple_seq_add_stmt (seq
, x
);
811 /* Emit a RESX statement into SEQ for REGION. */
814 emit_resx (gimple_seq
*seq
, eh_region region
)
816 gresx
*x
= gimple_build_resx (region
->index
);
817 gimple_seq_add_stmt (seq
, x
);
819 record_stmt_eh_region (region
->outer
, x
);
822 /* Emit an EH_DISPATCH statement into SEQ for REGION. */
825 emit_eh_dispatch (gimple_seq
*seq
, eh_region region
)
827 geh_dispatch
*x
= gimple_build_eh_dispatch (region
->index
);
828 gimple_seq_add_stmt (seq
, x
);
831 /* Note that the current EH region may contain a throw, or a
832 call to a function which itself may contain a throw. */
835 note_eh_region_may_contain_throw (eh_region region
)
837 while (bitmap_set_bit (eh_region_may_contain_throw_map
, region
->index
))
839 if (region
->type
== ERT_MUST_NOT_THROW
)
841 region
= region
->outer
;
847 /* Check if REGION has been marked as containing a throw. If REGION is
848 NULL, this predicate is false. */
851 eh_region_may_contain_throw (eh_region r
)
853 return r
&& bitmap_bit_p (eh_region_may_contain_throw_map
, r
->index
);
856 /* We want to transform
857 try { body; } catch { stuff; }
867 TP is a GIMPLE_TRY node. REGION is the region whose post_landing_pad
868 should be placed before the second operand, or NULL. OVER is
869 an existing label that should be put at the exit, or NULL. */
872 frob_into_branch_around (gtry
*tp
, eh_region region
, tree over
)
875 gimple_seq cleanup
, result
;
876 location_t loc
= gimple_location (tp
);
878 cleanup
= gimple_try_cleanup (tp
);
879 result
= gimple_try_eval (tp
);
882 emit_post_landing_pad (&eh_seq
, region
);
884 if (gimple_seq_may_fallthru (cleanup
))
887 over
= create_artificial_label (loc
);
888 x
= gimple_build_goto (over
);
889 gimple_set_location (x
, loc
);
890 gimple_seq_add_stmt (&cleanup
, x
);
892 gimple_seq_add_seq (&eh_seq
, cleanup
);
896 x
= gimple_build_label (over
);
897 gimple_seq_add_stmt (&result
, x
);
902 /* A subroutine of lower_try_finally. Duplicate the tree rooted at T.
903 Make sure to record all new labels found. */
906 lower_try_finally_dup_block (gimple_seq seq
, struct leh_state
*outer_state
,
911 gimple_stmt_iterator gsi
;
913 new_seq
= copy_gimple_seq_and_replace_locals (seq
);
915 for (gsi
= gsi_start (new_seq
); !gsi_end_p (gsi
); gsi_next (&gsi
))
917 gimple stmt
= gsi_stmt (gsi
);
918 if (LOCATION_LOCUS (gimple_location (stmt
)) == UNKNOWN_LOCATION
)
920 tree block
= gimple_block (stmt
);
921 gimple_set_location (stmt
, loc
);
922 gimple_set_block (stmt
, block
);
927 region
= outer_state
->tf
->try_finally_expr
;
928 collect_finally_tree_1 (new_seq
, region
);
933 /* A subroutine of lower_try_finally. Create a fallthru label for
934 the given try_finally state. The only tricky bit here is that
935 we have to make sure to record the label in our outer context. */
938 lower_try_finally_fallthru_label (struct leh_tf_state
*tf
)
940 tree label
= tf
->fallthru_label
;
945 label
= create_artificial_label (gimple_location (tf
->try_finally_expr
));
946 tf
->fallthru_label
= label
;
950 record_in_finally_tree (temp
, tf
->outer
->tf
->try_finally_expr
);
956 /* A subroutine of lower_try_finally. If FINALLY consits of a
957 GIMPLE_EH_ELSE node, return it. */
959 static inline geh_else
*
960 get_eh_else (gimple_seq finally
)
962 gimple x
= gimple_seq_first_stmt (finally
);
963 if (gimple_code (x
) == GIMPLE_EH_ELSE
)
965 gcc_assert (gimple_seq_singleton_p (finally
));
966 return as_a
<geh_else
*> (x
);
971 /* A subroutine of lower_try_finally. If the eh_protect_cleanup_actions
972 langhook returns non-null, then the language requires that the exception
973 path out of a try_finally be treated specially. To wit: the code within
974 the finally block may not itself throw an exception. We have two choices
975 here. First we can duplicate the finally block and wrap it in a
976 must_not_throw region. Second, we can generate code like
981 if (fintmp == eh_edge)
982 protect_cleanup_actions;
985 where "fintmp" is the temporary used in the switch statement generation
986 alternative considered below. For the nonce, we always choose the first
989 THIS_STATE may be null if this is a try-cleanup, not a try-finally. */
992 honor_protect_cleanup_actions (struct leh_state
*outer_state
,
993 struct leh_state
*this_state
,
994 struct leh_tf_state
*tf
)
996 tree protect_cleanup_actions
;
997 gimple_stmt_iterator gsi
;
998 bool finally_may_fallthru
;
1005 /* First check for nothing to do. */
1006 if (lang_hooks
.eh_protect_cleanup_actions
== NULL
)
1008 protect_cleanup_actions
= lang_hooks
.eh_protect_cleanup_actions ();
1009 if (protect_cleanup_actions
== NULL
)
1012 finally
= gimple_try_cleanup (tf
->top_p
);
1013 eh_else
= get_eh_else (finally
);
1015 /* Duplicate the FINALLY block. Only need to do this for try-finally,
1016 and not for cleanups. If we've got an EH_ELSE, extract it now. */
1019 finally
= gimple_eh_else_e_body (eh_else
);
1020 gimple_try_set_cleanup (tf
->top_p
, gimple_eh_else_n_body (eh_else
));
1022 else if (this_state
)
1023 finally
= lower_try_finally_dup_block (finally
, outer_state
,
1024 gimple_location (tf
->try_finally_expr
));
1025 finally_may_fallthru
= gimple_seq_may_fallthru (finally
);
1027 /* If this cleanup consists of a TRY_CATCH_EXPR with TRY_CATCH_IS_CLEANUP
1028 set, the handler of the TRY_CATCH_EXPR is another cleanup which ought
1029 to be in an enclosing scope, but needs to be implemented at this level
1030 to avoid a nesting violation (see wrap_temporary_cleanups in
1031 cp/decl.c). Since it's logically at an outer level, we should call
1032 terminate before we get to it, so strip it away before adding the
1033 MUST_NOT_THROW filter. */
1034 gsi
= gsi_start (finally
);
1036 if (gimple_code (x
) == GIMPLE_TRY
1037 && gimple_try_kind (x
) == GIMPLE_TRY_CATCH
1038 && gimple_try_catch_is_cleanup (x
))
1040 gsi_insert_seq_before (&gsi
, gimple_try_eval (x
), GSI_SAME_STMT
);
1041 gsi_remove (&gsi
, false);
1044 /* Wrap the block with protect_cleanup_actions as the action. */
1045 eh_mnt
= gimple_build_eh_must_not_throw (protect_cleanup_actions
);
1046 try_stmt
= gimple_build_try (finally
, gimple_seq_alloc_with_stmt (eh_mnt
),
1048 finally
= lower_eh_must_not_throw (outer_state
, try_stmt
);
1050 /* Drop all of this into the exception sequence. */
1051 emit_post_landing_pad (&eh_seq
, tf
->region
);
1052 gimple_seq_add_seq (&eh_seq
, finally
);
1053 if (finally_may_fallthru
)
1054 emit_resx (&eh_seq
, tf
->region
);
1056 /* Having now been handled, EH isn't to be considered with
1057 the rest of the outgoing edges. */
1058 tf
->may_throw
= false;
1061 /* A subroutine of lower_try_finally. We have determined that there is
1062 no fallthru edge out of the finally block. This means that there is
1063 no outgoing edge corresponding to any incoming edge. Restructure the
1064 try_finally node for this special case. */
1067 lower_try_finally_nofallthru (struct leh_state
*state
,
1068 struct leh_tf_state
*tf
)
1074 struct goto_queue_node
*q
, *qe
;
1076 lab
= create_artificial_label (gimple_location (tf
->try_finally_expr
));
1078 /* We expect that tf->top_p is a GIMPLE_TRY. */
1079 finally
= gimple_try_cleanup (tf
->top_p
);
1080 tf
->top_p_seq
= gimple_try_eval (tf
->top_p
);
1082 x
= gimple_build_label (lab
);
1083 gimple_seq_add_stmt (&tf
->top_p_seq
, x
);
1086 qe
= q
+ tf
->goto_queue_active
;
1089 do_return_redirection (q
, lab
, NULL
);
1091 do_goto_redirection (q
, lab
, NULL
, tf
);
1093 replace_goto_queue (tf
);
1095 /* Emit the finally block into the stream. Lower EH_ELSE at this time. */
1096 eh_else
= get_eh_else (finally
);
1099 finally
= gimple_eh_else_n_body (eh_else
);
1100 lower_eh_constructs_1 (state
, &finally
);
1101 gimple_seq_add_seq (&tf
->top_p_seq
, finally
);
1105 finally
= gimple_eh_else_e_body (eh_else
);
1106 lower_eh_constructs_1 (state
, &finally
);
1108 emit_post_landing_pad (&eh_seq
, tf
->region
);
1109 gimple_seq_add_seq (&eh_seq
, finally
);
1114 lower_eh_constructs_1 (state
, &finally
);
1115 gimple_seq_add_seq (&tf
->top_p_seq
, finally
);
1119 emit_post_landing_pad (&eh_seq
, tf
->region
);
1121 x
= gimple_build_goto (lab
);
1122 gimple_set_location (x
, gimple_location (tf
->try_finally_expr
));
1123 gimple_seq_add_stmt (&eh_seq
, x
);
1128 /* A subroutine of lower_try_finally. We have determined that there is
1129 exactly one destination of the finally block. Restructure the
1130 try_finally node for this special case. */
1133 lower_try_finally_onedest (struct leh_state
*state
, struct leh_tf_state
*tf
)
1135 struct goto_queue_node
*q
, *qe
;
1140 gimple_stmt_iterator gsi
;
1142 location_t loc
= gimple_location (tf
->try_finally_expr
);
1144 finally
= gimple_try_cleanup (tf
->top_p
);
1145 tf
->top_p_seq
= gimple_try_eval (tf
->top_p
);
1147 /* Since there's only one destination, and the destination edge can only
1148 either be EH or non-EH, that implies that all of our incoming edges
1149 are of the same type. Therefore we can lower EH_ELSE immediately. */
1150 eh_else
= get_eh_else (finally
);
1154 finally
= gimple_eh_else_e_body (eh_else
);
1156 finally
= gimple_eh_else_n_body (eh_else
);
1159 lower_eh_constructs_1 (state
, &finally
);
1161 for (gsi
= gsi_start (finally
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1163 gimple stmt
= gsi_stmt (gsi
);
1164 if (LOCATION_LOCUS (gimple_location (stmt
)) == UNKNOWN_LOCATION
)
1166 tree block
= gimple_block (stmt
);
1167 gimple_set_location (stmt
, gimple_location (tf
->try_finally_expr
));
1168 gimple_set_block (stmt
, block
);
1174 /* Only reachable via the exception edge. Add the given label to
1175 the head of the FINALLY block. Append a RESX at the end. */
1176 emit_post_landing_pad (&eh_seq
, tf
->region
);
1177 gimple_seq_add_seq (&eh_seq
, finally
);
1178 emit_resx (&eh_seq
, tf
->region
);
1182 if (tf
->may_fallthru
)
1184 /* Only reachable via the fallthru edge. Do nothing but let
1185 the two blocks run together; we'll fall out the bottom. */
1186 gimple_seq_add_seq (&tf
->top_p_seq
, finally
);
1190 finally_label
= create_artificial_label (loc
);
1191 label_stmt
= gimple_build_label (finally_label
);
1192 gimple_seq_add_stmt (&tf
->top_p_seq
, label_stmt
);
1194 gimple_seq_add_seq (&tf
->top_p_seq
, finally
);
1197 qe
= q
+ tf
->goto_queue_active
;
1201 /* Reachable by return expressions only. Redirect them. */
1203 do_return_redirection (q
, finally_label
, NULL
);
1204 replace_goto_queue (tf
);
1208 /* Reachable by goto expressions only. Redirect them. */
1210 do_goto_redirection (q
, finally_label
, NULL
, tf
);
1211 replace_goto_queue (tf
);
1213 if (tf
->dest_array
[0] == tf
->fallthru_label
)
1215 /* Reachable by goto to fallthru label only. Redirect it
1216 to the new label (already created, sadly), and do not
1217 emit the final branch out, or the fallthru label. */
1218 tf
->fallthru_label
= NULL
;
1223 /* Place the original return/goto to the original destination
1224 immediately after the finally block. */
1225 x
= tf
->goto_queue
[0].cont_stmt
;
1226 gimple_seq_add_stmt (&tf
->top_p_seq
, x
);
1227 maybe_record_in_goto_queue (state
, x
);
1230 /* A subroutine of lower_try_finally. There are multiple edges incoming
1231 and outgoing from the finally block. Implement this by duplicating the
1232 finally block for every destination. */
1235 lower_try_finally_copy (struct leh_state
*state
, struct leh_tf_state
*tf
)
1238 gimple_seq new_stmt
;
1243 location_t tf_loc
= gimple_location (tf
->try_finally_expr
);
1245 finally
= gimple_try_cleanup (tf
->top_p
);
1247 /* Notice EH_ELSE, and simplify some of the remaining code
1248 by considering FINALLY to be the normal return path only. */
1249 eh_else
= get_eh_else (finally
);
1251 finally
= gimple_eh_else_n_body (eh_else
);
1253 tf
->top_p_seq
= gimple_try_eval (tf
->top_p
);
1256 if (tf
->may_fallthru
)
1258 seq
= lower_try_finally_dup_block (finally
, state
, tf_loc
);
1259 lower_eh_constructs_1 (state
, &seq
);
1260 gimple_seq_add_seq (&new_stmt
, seq
);
1262 tmp
= lower_try_finally_fallthru_label (tf
);
1263 x
= gimple_build_goto (tmp
);
1264 gimple_set_location (x
, tf_loc
);
1265 gimple_seq_add_stmt (&new_stmt
, x
);
1270 /* We don't need to copy the EH path of EH_ELSE,
1271 since it is only emitted once. */
1273 seq
= gimple_eh_else_e_body (eh_else
);
1275 seq
= lower_try_finally_dup_block (finally
, state
, tf_loc
);
1276 lower_eh_constructs_1 (state
, &seq
);
1278 emit_post_landing_pad (&eh_seq
, tf
->region
);
1279 gimple_seq_add_seq (&eh_seq
, seq
);
1280 emit_resx (&eh_seq
, tf
->region
);
1285 struct goto_queue_node
*q
, *qe
;
1286 int return_index
, index
;
1289 struct goto_queue_node
*q
;
1293 return_index
= tf
->dest_array
.length ();
1294 labels
= XCNEWVEC (struct labels_s
, return_index
+ 1);
1297 qe
= q
+ tf
->goto_queue_active
;
1300 index
= q
->index
< 0 ? return_index
: q
->index
;
1302 if (!labels
[index
].q
)
1303 labels
[index
].q
= q
;
1306 for (index
= 0; index
< return_index
+ 1; index
++)
1310 q
= labels
[index
].q
;
1314 lab
= labels
[index
].label
1315 = create_artificial_label (tf_loc
);
1317 if (index
== return_index
)
1318 do_return_redirection (q
, lab
, NULL
);
1320 do_goto_redirection (q
, lab
, NULL
, tf
);
1322 x
= gimple_build_label (lab
);
1323 gimple_seq_add_stmt (&new_stmt
, x
);
1325 seq
= lower_try_finally_dup_block (finally
, state
, q
->location
);
1326 lower_eh_constructs_1 (state
, &seq
);
1327 gimple_seq_add_seq (&new_stmt
, seq
);
1329 gimple_seq_add_stmt (&new_stmt
, q
->cont_stmt
);
1330 maybe_record_in_goto_queue (state
, q
->cont_stmt
);
1333 for (q
= tf
->goto_queue
; q
< qe
; q
++)
1337 index
= q
->index
< 0 ? return_index
: q
->index
;
1339 if (labels
[index
].q
== q
)
1342 lab
= labels
[index
].label
;
1344 if (index
== return_index
)
1345 do_return_redirection (q
, lab
, NULL
);
1347 do_goto_redirection (q
, lab
, NULL
, tf
);
1350 replace_goto_queue (tf
);
1354 /* Need to link new stmts after running replace_goto_queue due
1355 to not wanting to process the same goto stmts twice. */
1356 gimple_seq_add_seq (&tf
->top_p_seq
, new_stmt
);
1359 /* A subroutine of lower_try_finally. There are multiple edges incoming
1360 and outgoing from the finally block. Implement this by instrumenting
1361 each incoming edge and creating a switch statement at the end of the
1362 finally block that branches to the appropriate destination. */
1365 lower_try_finally_switch (struct leh_state
*state
, struct leh_tf_state
*tf
)
1367 struct goto_queue_node
*q
, *qe
;
1368 tree finally_tmp
, finally_label
;
1369 int return_index
, eh_index
, fallthru_index
;
1370 int nlabels
, ndests
, j
, last_case_index
;
1372 vec
<tree
> case_label_vec
;
1373 gimple_seq switch_body
= NULL
;
1379 hash_map
<tree
, gimple
> *cont_map
= NULL
;
1380 /* The location of the TRY_FINALLY stmt. */
1381 location_t tf_loc
= gimple_location (tf
->try_finally_expr
);
1382 /* The location of the finally block. */
1383 location_t finally_loc
;
1385 finally
= gimple_try_cleanup (tf
->top_p
);
1386 eh_else
= get_eh_else (finally
);
1388 /* Mash the TRY block to the head of the chain. */
1389 tf
->top_p_seq
= gimple_try_eval (tf
->top_p
);
1391 /* The location of the finally is either the last stmt in the finally
1392 block or the location of the TRY_FINALLY itself. */
1393 x
= gimple_seq_last_stmt (finally
);
1394 finally_loc
= x
? gimple_location (x
) : tf_loc
;
1396 /* Prepare for switch statement generation. */
1397 nlabels
= tf
->dest_array
.length ();
1398 return_index
= nlabels
;
1399 eh_index
= return_index
+ tf
->may_return
;
1400 fallthru_index
= eh_index
+ (tf
->may_throw
&& !eh_else
);
1401 ndests
= fallthru_index
+ tf
->may_fallthru
;
1403 finally_tmp
= create_tmp_var (integer_type_node
, "finally_tmp");
1404 finally_label
= create_artificial_label (finally_loc
);
1406 /* We use vec::quick_push on case_label_vec throughout this function,
1407 since we know the size in advance and allocate precisely as muce
1409 case_label_vec
.create (ndests
);
1411 last_case_index
= 0;
1413 /* Begin inserting code for getting to the finally block. Things
1414 are done in this order to correspond to the sequence the code is
1417 if (tf
->may_fallthru
)
1419 x
= gimple_build_assign (finally_tmp
,
1420 build_int_cst (integer_type_node
,
1422 gimple_seq_add_stmt (&tf
->top_p_seq
, x
);
1424 tmp
= build_int_cst (integer_type_node
, fallthru_index
);
1425 last_case
= build_case_label (tmp
, NULL
,
1426 create_artificial_label (tf_loc
));
1427 case_label_vec
.quick_push (last_case
);
1430 x
= gimple_build_label (CASE_LABEL (last_case
));
1431 gimple_seq_add_stmt (&switch_body
, x
);
1433 tmp
= lower_try_finally_fallthru_label (tf
);
1434 x
= gimple_build_goto (tmp
);
1435 gimple_set_location (x
, tf_loc
);
1436 gimple_seq_add_stmt (&switch_body
, x
);
1439 /* For EH_ELSE, emit the exception path (plus resx) now, then
1440 subsequently we only need consider the normal path. */
1445 finally
= gimple_eh_else_e_body (eh_else
);
1446 lower_eh_constructs_1 (state
, &finally
);
1448 emit_post_landing_pad (&eh_seq
, tf
->region
);
1449 gimple_seq_add_seq (&eh_seq
, finally
);
1450 emit_resx (&eh_seq
, tf
->region
);
1453 finally
= gimple_eh_else_n_body (eh_else
);
1455 else if (tf
->may_throw
)
1457 emit_post_landing_pad (&eh_seq
, tf
->region
);
1459 x
= gimple_build_assign (finally_tmp
,
1460 build_int_cst (integer_type_node
, eh_index
));
1461 gimple_seq_add_stmt (&eh_seq
, x
);
1463 x
= gimple_build_goto (finally_label
);
1464 gimple_set_location (x
, tf_loc
);
1465 gimple_seq_add_stmt (&eh_seq
, x
);
1467 tmp
= build_int_cst (integer_type_node
, eh_index
);
1468 last_case
= build_case_label (tmp
, NULL
,
1469 create_artificial_label (tf_loc
));
1470 case_label_vec
.quick_push (last_case
);
1473 x
= gimple_build_label (CASE_LABEL (last_case
));
1474 gimple_seq_add_stmt (&eh_seq
, x
);
1475 emit_resx (&eh_seq
, tf
->region
);
1478 x
= gimple_build_label (finally_label
);
1479 gimple_seq_add_stmt (&tf
->top_p_seq
, x
);
1481 lower_eh_constructs_1 (state
, &finally
);
1482 gimple_seq_add_seq (&tf
->top_p_seq
, finally
);
1484 /* Redirect each incoming goto edge. */
1486 qe
= q
+ tf
->goto_queue_active
;
1487 j
= last_case_index
+ tf
->may_return
;
1488 /* Prepare the assignments to finally_tmp that are executed upon the
1489 entrance through a particular edge. */
1492 gimple_seq mod
= NULL
;
1494 unsigned int case_index
;
1498 x
= gimple_build_assign (finally_tmp
,
1499 build_int_cst (integer_type_node
,
1501 gimple_seq_add_stmt (&mod
, x
);
1502 do_return_redirection (q
, finally_label
, mod
);
1503 switch_id
= return_index
;
1507 x
= gimple_build_assign (finally_tmp
,
1508 build_int_cst (integer_type_node
, q
->index
));
1509 gimple_seq_add_stmt (&mod
, x
);
1510 do_goto_redirection (q
, finally_label
, mod
, tf
);
1511 switch_id
= q
->index
;
1514 case_index
= j
+ q
->index
;
1515 if (case_label_vec
.length () <= case_index
|| !case_label_vec
[case_index
])
1518 tmp
= build_int_cst (integer_type_node
, switch_id
);
1519 case_lab
= build_case_label (tmp
, NULL
,
1520 create_artificial_label (tf_loc
));
1521 /* We store the cont_stmt in the pointer map, so that we can recover
1522 it in the loop below. */
1524 cont_map
= new hash_map
<tree
, gimple
>;
1525 cont_map
->put (case_lab
, q
->cont_stmt
);
1526 case_label_vec
.quick_push (case_lab
);
1529 for (j
= last_case_index
; j
< last_case_index
+ nlabels
; j
++)
1533 last_case
= case_label_vec
[j
];
1535 gcc_assert (last_case
);
1536 gcc_assert (cont_map
);
1538 cont_stmt
= *cont_map
->get (last_case
);
1540 x
= gimple_build_label (CASE_LABEL (last_case
));
1541 gimple_seq_add_stmt (&switch_body
, x
);
1542 gimple_seq_add_stmt (&switch_body
, cont_stmt
);
1543 maybe_record_in_goto_queue (state
, cont_stmt
);
1548 replace_goto_queue (tf
);
1550 /* Make sure that the last case is the default label, as one is required.
1551 Then sort the labels, which is also required in GIMPLE. */
1552 CASE_LOW (last_case
) = NULL
;
1553 tree tem
= case_label_vec
.pop ();
1554 gcc_assert (tem
== last_case
);
1555 sort_case_labels (case_label_vec
);
1557 /* Build the switch statement, setting last_case to be the default
1559 switch_stmt
= gimple_build_switch (finally_tmp
, last_case
,
1561 gimple_set_location (switch_stmt
, finally_loc
);
1563 /* Need to link SWITCH_STMT after running replace_goto_queue
1564 due to not wanting to process the same goto stmts twice. */
1565 gimple_seq_add_stmt (&tf
->top_p_seq
, switch_stmt
);
1566 gimple_seq_add_seq (&tf
->top_p_seq
, switch_body
);
1569 /* Decide whether or not we are going to duplicate the finally block.
1570 There are several considerations.
1572 First, if this is Java, then the finally block contains code
1573 written by the user. It has line numbers associated with it,
1574 so duplicating the block means it's difficult to set a breakpoint.
1575 Since controlling code generation via -g is verboten, we simply
1576 never duplicate code without optimization.
1578 Second, we'd like to prevent egregious code growth. One way to
1579 do this is to estimate the size of the finally block, multiply
1580 that by the number of copies we'd need to make, and compare against
1581 the estimate of the size of the switch machinery we'd have to add. */
1584 decide_copy_try_finally (int ndests
, bool may_throw
, gimple_seq finally
)
1586 int f_estimate
, sw_estimate
;
1589 /* If there's an EH_ELSE involved, the exception path is separate
1590 and really doesn't come into play for this computation. */
1591 eh_else
= get_eh_else (finally
);
1594 ndests
-= may_throw
;
1595 finally
= gimple_eh_else_n_body (eh_else
);
1600 gimple_stmt_iterator gsi
;
1605 for (gsi
= gsi_start (finally
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1607 gimple stmt
= gsi_stmt (gsi
);
1608 if (!is_gimple_debug (stmt
) && !gimple_clobber_p (stmt
))
1614 /* Finally estimate N times, plus N gotos. */
1615 f_estimate
= count_insns_seq (finally
, &eni_size_weights
);
1616 f_estimate
= (f_estimate
+ 1) * ndests
;
1618 /* Switch statement (cost 10), N variable assignments, N gotos. */
1619 sw_estimate
= 10 + 2 * ndests
;
1621 /* Optimize for size clearly wants our best guess. */
1622 if (optimize_function_for_size_p (cfun
))
1623 return f_estimate
< sw_estimate
;
1625 /* ??? These numbers are completely made up so far. */
1627 return f_estimate
< 100 || f_estimate
< sw_estimate
* 2;
1629 return f_estimate
< 40 || f_estimate
* 2 < sw_estimate
* 3;
1632 /* REG is the enclosing region for a possible cleanup region, or the region
1633 itself. Returns TRUE if such a region would be unreachable.
1635 Cleanup regions within a must-not-throw region aren't actually reachable
1636 even if there are throwing stmts within them, because the personality
1637 routine will call terminate before unwinding. */
1640 cleanup_is_dead_in (eh_region reg
)
1642 while (reg
&& reg
->type
== ERT_CLEANUP
)
1644 return (reg
&& reg
->type
== ERT_MUST_NOT_THROW
);
1647 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_FINALLY nodes
1648 to a sequence of labels and blocks, plus the exception region trees
1649 that record all the magic. This is complicated by the need to
1650 arrange for the FINALLY block to be executed on all exits. */
1653 lower_try_finally (struct leh_state
*state
, gtry
*tp
)
1655 struct leh_tf_state this_tf
;
1656 struct leh_state this_state
;
1658 gimple_seq old_eh_seq
;
1660 /* Process the try block. */
1662 memset (&this_tf
, 0, sizeof (this_tf
));
1663 this_tf
.try_finally_expr
= tp
;
1665 this_tf
.outer
= state
;
1666 if (using_eh_for_cleanups_p () && !cleanup_is_dead_in (state
->cur_region
))
1668 this_tf
.region
= gen_eh_region_cleanup (state
->cur_region
);
1669 this_state
.cur_region
= this_tf
.region
;
1673 this_tf
.region
= NULL
;
1674 this_state
.cur_region
= state
->cur_region
;
1677 this_state
.ehp_region
= state
->ehp_region
;
1678 this_state
.tf
= &this_tf
;
1680 old_eh_seq
= eh_seq
;
1683 lower_eh_constructs_1 (&this_state
, gimple_try_eval_ptr (tp
));
1685 /* Determine if the try block is escaped through the bottom. */
1686 this_tf
.may_fallthru
= gimple_seq_may_fallthru (gimple_try_eval (tp
));
1688 /* Determine if any exceptions are possible within the try block. */
1690 this_tf
.may_throw
= eh_region_may_contain_throw (this_tf
.region
);
1691 if (this_tf
.may_throw
)
1692 honor_protect_cleanup_actions (state
, &this_state
, &this_tf
);
1694 /* Determine how many edges (still) reach the finally block. Or rather,
1695 how many destinations are reached by the finally block. Use this to
1696 determine how we process the finally block itself. */
1698 ndests
= this_tf
.dest_array
.length ();
1699 ndests
+= this_tf
.may_fallthru
;
1700 ndests
+= this_tf
.may_return
;
1701 ndests
+= this_tf
.may_throw
;
1703 /* If the FINALLY block is not reachable, dike it out. */
1706 gimple_seq_add_seq (&this_tf
.top_p_seq
, gimple_try_eval (tp
));
1707 gimple_try_set_cleanup (tp
, NULL
);
1709 /* If the finally block doesn't fall through, then any destination
1710 we might try to impose there isn't reached either. There may be
1711 some minor amount of cleanup and redirection still needed. */
1712 else if (!gimple_seq_may_fallthru (gimple_try_cleanup (tp
)))
1713 lower_try_finally_nofallthru (state
, &this_tf
);
1715 /* We can easily special-case redirection to a single destination. */
1716 else if (ndests
== 1)
1717 lower_try_finally_onedest (state
, &this_tf
);
1718 else if (decide_copy_try_finally (ndests
, this_tf
.may_throw
,
1719 gimple_try_cleanup (tp
)))
1720 lower_try_finally_copy (state
, &this_tf
);
1722 lower_try_finally_switch (state
, &this_tf
);
1724 /* If someone requested we add a label at the end of the transformed
1726 if (this_tf
.fallthru_label
)
1728 /* This must be reached only if ndests == 0. */
1729 gimple x
= gimple_build_label (this_tf
.fallthru_label
);
1730 gimple_seq_add_stmt (&this_tf
.top_p_seq
, x
);
1733 this_tf
.dest_array
.release ();
1734 free (this_tf
.goto_queue
);
1735 if (this_tf
.goto_queue_map
)
1736 delete this_tf
.goto_queue_map
;
1738 /* If there was an old (aka outer) eh_seq, append the current eh_seq.
1739 If there was no old eh_seq, then the append is trivially already done. */
1743 eh_seq
= old_eh_seq
;
1746 gimple_seq new_eh_seq
= eh_seq
;
1747 eh_seq
= old_eh_seq
;
1748 gimple_seq_add_seq (&eh_seq
, new_eh_seq
);
1752 return this_tf
.top_p_seq
;
1755 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_CATCH with a
1756 list of GIMPLE_CATCH to a sequence of labels and blocks, plus the
1757 exception region trees that records all the magic. */
1760 lower_catch (struct leh_state
*state
, gtry
*tp
)
1762 eh_region try_region
= NULL
;
1763 struct leh_state this_state
= *state
;
1764 gimple_stmt_iterator gsi
;
1766 gimple_seq new_seq
, cleanup
;
1768 location_t try_catch_loc
= gimple_location (tp
);
1770 if (flag_exceptions
)
1772 try_region
= gen_eh_region_try (state
->cur_region
);
1773 this_state
.cur_region
= try_region
;
1776 lower_eh_constructs_1 (&this_state
, gimple_try_eval_ptr (tp
));
1778 if (!eh_region_may_contain_throw (try_region
))
1779 return gimple_try_eval (tp
);
1782 emit_eh_dispatch (&new_seq
, try_region
);
1783 emit_resx (&new_seq
, try_region
);
1785 this_state
.cur_region
= state
->cur_region
;
1786 this_state
.ehp_region
= try_region
;
1788 /* Add eh_seq from lowering EH in the cleanup sequence after the cleanup
1789 itself, so that e.g. for coverage purposes the nested cleanups don't
1790 appear before the cleanup body. See PR64634 for details. */
1791 gimple_seq old_eh_seq
= eh_seq
;
1795 cleanup
= gimple_try_cleanup (tp
);
1796 for (gsi
= gsi_start (cleanup
);
1804 catch_stmt
= as_a
<gcatch
*> (gsi_stmt (gsi
));
1805 c
= gen_eh_region_catch (try_region
, gimple_catch_types (catch_stmt
));
1807 handler
= gimple_catch_handler (catch_stmt
);
1808 lower_eh_constructs_1 (&this_state
, &handler
);
1810 c
->label
= create_artificial_label (UNKNOWN_LOCATION
);
1811 x
= gimple_build_label (c
->label
);
1812 gimple_seq_add_stmt (&new_seq
, x
);
1814 gimple_seq_add_seq (&new_seq
, handler
);
1816 if (gimple_seq_may_fallthru (new_seq
))
1819 out_label
= create_artificial_label (try_catch_loc
);
1821 x
= gimple_build_goto (out_label
);
1822 gimple_seq_add_stmt (&new_seq
, x
);
1828 gimple_try_set_cleanup (tp
, new_seq
);
1830 gimple_seq new_eh_seq
= eh_seq
;
1831 eh_seq
= old_eh_seq
;
1832 gimple_seq ret_seq
= frob_into_branch_around (tp
, try_region
, out_label
);
1833 gimple_seq_add_seq (&eh_seq
, new_eh_seq
);
1837 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with a
1838 GIMPLE_EH_FILTER to a sequence of labels and blocks, plus the exception
1839 region trees that record all the magic. */
1842 lower_eh_filter (struct leh_state
*state
, gtry
*tp
)
1844 struct leh_state this_state
= *state
;
1845 eh_region this_region
= NULL
;
1849 inner
= gimple_seq_first_stmt (gimple_try_cleanup (tp
));
1851 if (flag_exceptions
)
1853 this_region
= gen_eh_region_allowed (state
->cur_region
,
1854 gimple_eh_filter_types (inner
));
1855 this_state
.cur_region
= this_region
;
1858 lower_eh_constructs_1 (&this_state
, gimple_try_eval_ptr (tp
));
1860 if (!eh_region_may_contain_throw (this_region
))
1861 return gimple_try_eval (tp
);
1864 this_state
.cur_region
= state
->cur_region
;
1865 this_state
.ehp_region
= this_region
;
1867 emit_eh_dispatch (&new_seq
, this_region
);
1868 emit_resx (&new_seq
, this_region
);
1870 this_region
->u
.allowed
.label
= create_artificial_label (UNKNOWN_LOCATION
);
1871 x
= gimple_build_label (this_region
->u
.allowed
.label
);
1872 gimple_seq_add_stmt (&new_seq
, x
);
1874 lower_eh_constructs_1 (&this_state
, gimple_eh_filter_failure_ptr (inner
));
1875 gimple_seq_add_seq (&new_seq
, gimple_eh_filter_failure (inner
));
1877 gimple_try_set_cleanup (tp
, new_seq
);
1879 return frob_into_branch_around (tp
, this_region
, NULL
);
1882 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with
1883 an GIMPLE_EH_MUST_NOT_THROW to a sequence of labels and blocks,
1884 plus the exception region trees that record all the magic. */
1887 lower_eh_must_not_throw (struct leh_state
*state
, gtry
*tp
)
1889 struct leh_state this_state
= *state
;
1891 if (flag_exceptions
)
1893 gimple inner
= gimple_seq_first_stmt (gimple_try_cleanup (tp
));
1894 eh_region this_region
;
1896 this_region
= gen_eh_region_must_not_throw (state
->cur_region
);
1897 this_region
->u
.must_not_throw
.failure_decl
1898 = gimple_eh_must_not_throw_fndecl (
1899 as_a
<geh_mnt
*> (inner
));
1900 this_region
->u
.must_not_throw
.failure_loc
1901 = LOCATION_LOCUS (gimple_location (tp
));
1903 /* In order to get mangling applied to this decl, we must mark it
1904 used now. Otherwise, pass_ipa_free_lang_data won't think it
1906 TREE_USED (this_region
->u
.must_not_throw
.failure_decl
) = 1;
1908 this_state
.cur_region
= this_region
;
1911 lower_eh_constructs_1 (&this_state
, gimple_try_eval_ptr (tp
));
1913 return gimple_try_eval (tp
);
1916 /* Implement a cleanup expression. This is similar to try-finally,
1917 except that we only execute the cleanup block for exception edges. */
1920 lower_cleanup (struct leh_state
*state
, gtry
*tp
)
1922 struct leh_state this_state
= *state
;
1923 eh_region this_region
= NULL
;
1924 struct leh_tf_state fake_tf
;
1926 bool cleanup_dead
= cleanup_is_dead_in (state
->cur_region
);
1928 if (flag_exceptions
&& !cleanup_dead
)
1930 this_region
= gen_eh_region_cleanup (state
->cur_region
);
1931 this_state
.cur_region
= this_region
;
1934 lower_eh_constructs_1 (&this_state
, gimple_try_eval_ptr (tp
));
1936 if (cleanup_dead
|| !eh_region_may_contain_throw (this_region
))
1937 return gimple_try_eval (tp
);
1939 /* Build enough of a try-finally state so that we can reuse
1940 honor_protect_cleanup_actions. */
1941 memset (&fake_tf
, 0, sizeof (fake_tf
));
1942 fake_tf
.top_p
= fake_tf
.try_finally_expr
= tp
;
1943 fake_tf
.outer
= state
;
1944 fake_tf
.region
= this_region
;
1945 fake_tf
.may_fallthru
= gimple_seq_may_fallthru (gimple_try_eval (tp
));
1946 fake_tf
.may_throw
= true;
1948 honor_protect_cleanup_actions (state
, NULL
, &fake_tf
);
1950 if (fake_tf
.may_throw
)
1952 /* In this case honor_protect_cleanup_actions had nothing to do,
1953 and we should process this normally. */
1954 lower_eh_constructs_1 (state
, gimple_try_cleanup_ptr (tp
));
1955 result
= frob_into_branch_around (tp
, this_region
,
1956 fake_tf
.fallthru_label
);
1960 /* In this case honor_protect_cleanup_actions did nearly all of
1961 the work. All we have left is to append the fallthru_label. */
1963 result
= gimple_try_eval (tp
);
1964 if (fake_tf
.fallthru_label
)
1966 gimple x
= gimple_build_label (fake_tf
.fallthru_label
);
1967 gimple_seq_add_stmt (&result
, x
);
1973 /* Main loop for lowering eh constructs. Also moves gsi to the next
1977 lower_eh_constructs_2 (struct leh_state
*state
, gimple_stmt_iterator
*gsi
)
1981 gimple stmt
= gsi_stmt (*gsi
);
1983 switch (gimple_code (stmt
))
1987 tree fndecl
= gimple_call_fndecl (stmt
);
1990 if (fndecl
&& DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
1991 switch (DECL_FUNCTION_CODE (fndecl
))
1993 case BUILT_IN_EH_POINTER
:
1994 /* The front end may have generated a call to
1995 __builtin_eh_pointer (0) within a catch region. Replace
1996 this zero argument with the current catch region number. */
1997 if (state
->ehp_region
)
1999 tree nr
= build_int_cst (integer_type_node
,
2000 state
->ehp_region
->index
);
2001 gimple_call_set_arg (stmt
, 0, nr
);
2005 /* The user has dome something silly. Remove it. */
2006 rhs
= null_pointer_node
;
2011 case BUILT_IN_EH_FILTER
:
2012 /* ??? This should never appear, but since it's a builtin it
2013 is accessible to abuse by users. Just remove it and
2014 replace the use with the arbitrary value zero. */
2015 rhs
= build_int_cst (TREE_TYPE (TREE_TYPE (fndecl
)), 0);
2017 lhs
= gimple_call_lhs (stmt
);
2018 x
= gimple_build_assign (lhs
, rhs
);
2019 gsi_insert_before (gsi
, x
, GSI_SAME_STMT
);
2022 case BUILT_IN_EH_COPY_VALUES
:
2023 /* Likewise this should not appear. Remove it. */
2024 gsi_remove (gsi
, true);
2034 /* If the stmt can throw use a new temporary for the assignment
2035 to a LHS. This makes sure the old value of the LHS is
2036 available on the EH edge. Only do so for statements that
2037 potentially fall through (no noreturn calls e.g.), otherwise
2038 this new assignment might create fake fallthru regions. */
2039 if (stmt_could_throw_p (stmt
)
2040 && gimple_has_lhs (stmt
)
2041 && gimple_stmt_may_fallthru (stmt
)
2042 && !tree_could_throw_p (gimple_get_lhs (stmt
))
2043 && is_gimple_reg_type (TREE_TYPE (gimple_get_lhs (stmt
))))
2045 tree lhs
= gimple_get_lhs (stmt
);
2046 tree tmp
= create_tmp_var (TREE_TYPE (lhs
));
2047 gimple s
= gimple_build_assign (lhs
, tmp
);
2048 gimple_set_location (s
, gimple_location (stmt
));
2049 gimple_set_block (s
, gimple_block (stmt
));
2050 gimple_set_lhs (stmt
, tmp
);
2051 if (TREE_CODE (TREE_TYPE (tmp
)) == COMPLEX_TYPE
2052 || TREE_CODE (TREE_TYPE (tmp
)) == VECTOR_TYPE
)
2053 DECL_GIMPLE_REG_P (tmp
) = 1;
2054 gsi_insert_after (gsi
, s
, GSI_SAME_STMT
);
2056 /* Look for things that can throw exceptions, and record them. */
2057 if (state
->cur_region
&& stmt_could_throw_p (stmt
))
2059 record_stmt_eh_region (state
->cur_region
, stmt
);
2060 note_eh_region_may_contain_throw (state
->cur_region
);
2067 maybe_record_in_goto_queue (state
, stmt
);
2071 verify_norecord_switch_expr (state
, as_a
<gswitch
*> (stmt
));
2076 gtry
*try_stmt
= as_a
<gtry
*> (stmt
);
2077 if (gimple_try_kind (try_stmt
) == GIMPLE_TRY_FINALLY
)
2078 replace
= lower_try_finally (state
, try_stmt
);
2081 x
= gimple_seq_first_stmt (gimple_try_cleanup (try_stmt
));
2084 replace
= gimple_try_eval (try_stmt
);
2085 lower_eh_constructs_1 (state
, &replace
);
2088 switch (gimple_code (x
))
2091 replace
= lower_catch (state
, try_stmt
);
2093 case GIMPLE_EH_FILTER
:
2094 replace
= lower_eh_filter (state
, try_stmt
);
2096 case GIMPLE_EH_MUST_NOT_THROW
:
2097 replace
= lower_eh_must_not_throw (state
, try_stmt
);
2099 case GIMPLE_EH_ELSE
:
2100 /* This code is only valid with GIMPLE_TRY_FINALLY. */
2103 replace
= lower_cleanup (state
, try_stmt
);
2109 /* Remove the old stmt and insert the transformed sequence
2111 gsi_insert_seq_before (gsi
, replace
, GSI_SAME_STMT
);
2112 gsi_remove (gsi
, true);
2114 /* Return since we don't want gsi_next () */
2117 case GIMPLE_EH_ELSE
:
2118 /* We should be eliminating this in lower_try_finally et al. */
2122 /* A type, a decl, or some kind of statement that we're not
2123 interested in. Don't walk them. */
2130 /* A helper to unwrap a gimple_seq and feed stmts to lower_eh_constructs_2. */
2133 lower_eh_constructs_1 (struct leh_state
*state
, gimple_seq
*pseq
)
2135 gimple_stmt_iterator gsi
;
2136 for (gsi
= gsi_start (*pseq
); !gsi_end_p (gsi
);)
2137 lower_eh_constructs_2 (state
, &gsi
);
2142 const pass_data pass_data_lower_eh
=
2144 GIMPLE_PASS
, /* type */
2146 OPTGROUP_NONE
, /* optinfo_flags */
2147 TV_TREE_EH
, /* tv_id */
2148 PROP_gimple_lcf
, /* properties_required */
2149 PROP_gimple_leh
, /* properties_provided */
2150 0, /* properties_destroyed */
2151 0, /* todo_flags_start */
2152 0, /* todo_flags_finish */
2155 class pass_lower_eh
: public gimple_opt_pass
2158 pass_lower_eh (gcc::context
*ctxt
)
2159 : gimple_opt_pass (pass_data_lower_eh
, ctxt
)
2162 /* opt_pass methods: */
2163 virtual unsigned int execute (function
*);
2165 }; // class pass_lower_eh
2168 pass_lower_eh::execute (function
*fun
)
2170 struct leh_state null_state
;
2173 bodyp
= gimple_body (current_function_decl
);
2177 finally_tree
= new hash_table
<finally_tree_hasher
> (31);
2178 eh_region_may_contain_throw_map
= BITMAP_ALLOC (NULL
);
2179 memset (&null_state
, 0, sizeof (null_state
));
2181 collect_finally_tree_1 (bodyp
, NULL
);
2182 lower_eh_constructs_1 (&null_state
, &bodyp
);
2183 gimple_set_body (current_function_decl
, bodyp
);
2185 /* We assume there's a return statement, or something, at the end of
2186 the function, and thus ploping the EH sequence afterward won't
2188 gcc_assert (!gimple_seq_may_fallthru (bodyp
));
2189 gimple_seq_add_seq (&bodyp
, eh_seq
);
2191 /* We assume that since BODYP already existed, adding EH_SEQ to it
2192 didn't change its value, and we don't have to re-set the function. */
2193 gcc_assert (bodyp
== gimple_body (current_function_decl
));
2195 delete finally_tree
;
2196 finally_tree
= NULL
;
2197 BITMAP_FREE (eh_region_may_contain_throw_map
);
2200 /* If this function needs a language specific EH personality routine
2201 and the frontend didn't already set one do so now. */
2202 if (function_needs_eh_personality (fun
) == eh_personality_lang
2203 && !DECL_FUNCTION_PERSONALITY (current_function_decl
))
2204 DECL_FUNCTION_PERSONALITY (current_function_decl
)
2205 = lang_hooks
.eh_personality ();
2213 make_pass_lower_eh (gcc::context
*ctxt
)
2215 return new pass_lower_eh (ctxt
);
2218 /* Create the multiple edges from an EH_DISPATCH statement to all of
2219 the possible handlers for its EH region. Return true if there's
2220 no fallthru edge; false if there is. */
2223 make_eh_dispatch_edges (geh_dispatch
*stmt
)
2227 basic_block src
, dst
;
2229 r
= get_eh_region_from_number (gimple_eh_dispatch_region (stmt
));
2230 src
= gimple_bb (stmt
);
2235 for (c
= r
->u
.eh_try
.first_catch
; c
; c
= c
->next_catch
)
2237 dst
= label_to_block (c
->label
);
2238 make_edge (src
, dst
, 0);
2240 /* A catch-all handler doesn't have a fallthru. */
2241 if (c
->type_list
== NULL
)
2246 case ERT_ALLOWED_EXCEPTIONS
:
2247 dst
= label_to_block (r
->u
.allowed
.label
);
2248 make_edge (src
, dst
, 0);
2258 /* Create the single EH edge from STMT to its nearest landing pad,
2259 if there is such a landing pad within the current function. */
2262 make_eh_edges (gimple stmt
)
2264 basic_block src
, dst
;
2268 lp_nr
= lookup_stmt_eh_lp (stmt
);
2272 lp
= get_eh_landing_pad_from_number (lp_nr
);
2273 gcc_assert (lp
!= NULL
);
2275 src
= gimple_bb (stmt
);
2276 dst
= label_to_block (lp
->post_landing_pad
);
2277 make_edge (src
, dst
, EDGE_EH
);
2280 /* Do the work in redirecting EDGE_IN to NEW_BB within the EH region tree;
2281 do not actually perform the final edge redirection.
2283 CHANGE_REGION is true when we're being called from cleanup_empty_eh and
2284 we intend to change the destination EH region as well; this means
2285 EH_LANDING_PAD_NR must already be set on the destination block label.
2286 If false, we're being called from generic cfg manipulation code and we
2287 should preserve our place within the region tree. */
2290 redirect_eh_edge_1 (edge edge_in
, basic_block new_bb
, bool change_region
)
2292 eh_landing_pad old_lp
, new_lp
;
2295 int old_lp_nr
, new_lp_nr
;
2296 tree old_label
, new_label
;
2300 old_bb
= edge_in
->dest
;
2301 old_label
= gimple_block_label (old_bb
);
2302 old_lp_nr
= EH_LANDING_PAD_NR (old_label
);
2303 gcc_assert (old_lp_nr
> 0);
2304 old_lp
= get_eh_landing_pad_from_number (old_lp_nr
);
2306 throw_stmt
= last_stmt (edge_in
->src
);
2307 gcc_assert (lookup_stmt_eh_lp (throw_stmt
) == old_lp_nr
);
2309 new_label
= gimple_block_label (new_bb
);
2311 /* Look for an existing region that might be using NEW_BB already. */
2312 new_lp_nr
= EH_LANDING_PAD_NR (new_label
);
2315 new_lp
= get_eh_landing_pad_from_number (new_lp_nr
);
2316 gcc_assert (new_lp
);
2318 /* Unless CHANGE_REGION is true, the new and old landing pad
2319 had better be associated with the same EH region. */
2320 gcc_assert (change_region
|| new_lp
->region
== old_lp
->region
);
2325 gcc_assert (!change_region
);
2328 /* Notice when we redirect the last EH edge away from OLD_BB. */
2329 FOR_EACH_EDGE (e
, ei
, old_bb
->preds
)
2330 if (e
!= edge_in
&& (e
->flags
& EDGE_EH
))
2335 /* NEW_LP already exists. If there are still edges into OLD_LP,
2336 there's nothing to do with the EH tree. If there are no more
2337 edges into OLD_LP, then we want to remove OLD_LP as it is unused.
2338 If CHANGE_REGION is true, then our caller is expecting to remove
2340 if (e
== NULL
&& !change_region
)
2341 remove_eh_landing_pad (old_lp
);
2345 /* No correct landing pad exists. If there are no more edges
2346 into OLD_LP, then we can simply re-use the existing landing pad.
2347 Otherwise, we have to create a new landing pad. */
2350 EH_LANDING_PAD_NR (old_lp
->post_landing_pad
) = 0;
2354 new_lp
= gen_eh_landing_pad (old_lp
->region
);
2355 new_lp
->post_landing_pad
= new_label
;
2356 EH_LANDING_PAD_NR (new_label
) = new_lp
->index
;
2359 /* Maybe move the throwing statement to the new region. */
2360 if (old_lp
!= new_lp
)
2362 remove_stmt_from_eh_lp (throw_stmt
);
2363 add_stmt_to_eh_lp (throw_stmt
, new_lp
->index
);
2367 /* Redirect EH edge E to NEW_BB. */
2370 redirect_eh_edge (edge edge_in
, basic_block new_bb
)
2372 redirect_eh_edge_1 (edge_in
, new_bb
, false);
2373 return ssa_redirect_edge (edge_in
, new_bb
);
2376 /* This is a subroutine of gimple_redirect_edge_and_branch. Update the
2377 labels for redirecting a non-fallthru EH_DISPATCH edge E to NEW_BB.
2378 The actual edge update will happen in the caller. */
2381 redirect_eh_dispatch_edge (geh_dispatch
*stmt
, edge e
, basic_block new_bb
)
2383 tree new_lab
= gimple_block_label (new_bb
);
2384 bool any_changed
= false;
2389 r
= get_eh_region_from_number (gimple_eh_dispatch_region (stmt
));
2393 for (c
= r
->u
.eh_try
.first_catch
; c
; c
= c
->next_catch
)
2395 old_bb
= label_to_block (c
->label
);
2396 if (old_bb
== e
->dest
)
2404 case ERT_ALLOWED_EXCEPTIONS
:
2405 old_bb
= label_to_block (r
->u
.allowed
.label
);
2406 gcc_assert (old_bb
== e
->dest
);
2407 r
->u
.allowed
.label
= new_lab
;
2415 gcc_assert (any_changed
);
2418 /* Helper function for operation_could_trap_p and stmt_could_throw_p. */
2421 operation_could_trap_helper_p (enum tree_code op
,
2432 case TRUNC_DIV_EXPR
:
2434 case FLOOR_DIV_EXPR
:
2435 case ROUND_DIV_EXPR
:
2436 case EXACT_DIV_EXPR
:
2438 case FLOOR_MOD_EXPR
:
2439 case ROUND_MOD_EXPR
:
2440 case TRUNC_MOD_EXPR
:
2442 if (honor_snans
|| honor_trapv
)
2445 return flag_trapping_math
;
2446 if (!TREE_CONSTANT (divisor
) || integer_zerop (divisor
))
2455 /* Some floating point comparisons may trap. */
2460 case UNORDERED_EXPR
:
2472 /* These operations don't trap with floating point. */
2480 /* Any floating arithmetic may trap. */
2481 if (fp_operation
&& flag_trapping_math
)
2489 /* Constructing an object cannot trap. */
2493 /* Any floating arithmetic may trap. */
2494 if (fp_operation
&& flag_trapping_math
)
2502 /* Return true if operation OP may trap. FP_OPERATION is true if OP is applied
2503 on floating-point values. HONOR_TRAPV is true if OP is applied on integer
2504 type operands that may trap. If OP is a division operator, DIVISOR contains
2505 the value of the divisor. */
2508 operation_could_trap_p (enum tree_code op
, bool fp_operation
, bool honor_trapv
,
2511 bool honor_nans
= (fp_operation
&& flag_trapping_math
2512 && !flag_finite_math_only
);
2513 bool honor_snans
= fp_operation
&& flag_signaling_nans
!= 0;
2516 if (TREE_CODE_CLASS (op
) != tcc_comparison
2517 && TREE_CODE_CLASS (op
) != tcc_unary
2518 && TREE_CODE_CLASS (op
) != tcc_binary
)
2521 return operation_could_trap_helper_p (op
, fp_operation
, honor_trapv
,
2522 honor_nans
, honor_snans
, divisor
,
2527 /* Returns true if it is possible to prove that the index of
2528 an array access REF (an ARRAY_REF expression) falls into the
2532 in_array_bounds_p (tree ref
)
2534 tree idx
= TREE_OPERAND (ref
, 1);
2537 if (TREE_CODE (idx
) != INTEGER_CST
)
2540 min
= array_ref_low_bound (ref
);
2541 max
= array_ref_up_bound (ref
);
2544 || TREE_CODE (min
) != INTEGER_CST
2545 || TREE_CODE (max
) != INTEGER_CST
)
2548 if (tree_int_cst_lt (idx
, min
)
2549 || tree_int_cst_lt (max
, idx
))
2555 /* Returns true if it is possible to prove that the range of
2556 an array access REF (an ARRAY_RANGE_REF expression) falls
2557 into the array bounds. */
2560 range_in_array_bounds_p (tree ref
)
2562 tree domain_type
= TYPE_DOMAIN (TREE_TYPE (ref
));
2563 tree range_min
, range_max
, min
, max
;
2565 range_min
= TYPE_MIN_VALUE (domain_type
);
2566 range_max
= TYPE_MAX_VALUE (domain_type
);
2569 || TREE_CODE (range_min
) != INTEGER_CST
2570 || TREE_CODE (range_max
) != INTEGER_CST
)
2573 min
= array_ref_low_bound (ref
);
2574 max
= array_ref_up_bound (ref
);
2577 || TREE_CODE (min
) != INTEGER_CST
2578 || TREE_CODE (max
) != INTEGER_CST
)
2581 if (tree_int_cst_lt (range_min
, min
)
2582 || tree_int_cst_lt (max
, range_max
))
2588 /* Return true if EXPR can trap, as in dereferencing an invalid pointer
2589 location or floating point arithmetic. C.f. the rtl version, may_trap_p.
2590 This routine expects only GIMPLE lhs or rhs input. */
2593 tree_could_trap_p (tree expr
)
2595 enum tree_code code
;
2596 bool fp_operation
= false;
2597 bool honor_trapv
= false;
2598 tree t
, base
, div
= NULL_TREE
;
2603 code
= TREE_CODE (expr
);
2604 t
= TREE_TYPE (expr
);
2608 if (COMPARISON_CLASS_P (expr
))
2609 fp_operation
= FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (expr
, 0)));
2611 fp_operation
= FLOAT_TYPE_P (t
);
2612 honor_trapv
= INTEGRAL_TYPE_P (t
) && TYPE_OVERFLOW_TRAPS (t
);
2615 if (TREE_CODE_CLASS (code
) == tcc_binary
)
2616 div
= TREE_OPERAND (expr
, 1);
2617 if (operation_could_trap_p (code
, fp_operation
, honor_trapv
, div
))
2627 case VIEW_CONVERT_EXPR
:
2628 case WITH_SIZE_EXPR
:
2629 expr
= TREE_OPERAND (expr
, 0);
2630 code
= TREE_CODE (expr
);
2633 case ARRAY_RANGE_REF
:
2634 base
= TREE_OPERAND (expr
, 0);
2635 if (tree_could_trap_p (base
))
2637 if (TREE_THIS_NOTRAP (expr
))
2639 return !range_in_array_bounds_p (expr
);
2642 base
= TREE_OPERAND (expr
, 0);
2643 if (tree_could_trap_p (base
))
2645 if (TREE_THIS_NOTRAP (expr
))
2647 return !in_array_bounds_p (expr
);
2649 case TARGET_MEM_REF
:
2651 if (TREE_CODE (TREE_OPERAND (expr
, 0)) == ADDR_EXPR
2652 && tree_could_trap_p (TREE_OPERAND (TREE_OPERAND (expr
, 0), 0)))
2654 if (TREE_THIS_NOTRAP (expr
))
2656 /* We cannot prove that the access is in-bounds when we have
2657 variable-index TARGET_MEM_REFs. */
2658 if (code
== TARGET_MEM_REF
2659 && (TMR_INDEX (expr
) || TMR_INDEX2 (expr
)))
2661 if (TREE_CODE (TREE_OPERAND (expr
, 0)) == ADDR_EXPR
)
2663 tree base
= TREE_OPERAND (TREE_OPERAND (expr
, 0), 0);
2664 offset_int off
= mem_ref_offset (expr
);
2665 if (wi::neg_p (off
, SIGNED
))
2667 if (TREE_CODE (base
) == STRING_CST
)
2668 return wi::leu_p (TREE_STRING_LENGTH (base
), off
);
2669 else if (DECL_SIZE_UNIT (base
) == NULL_TREE
2670 || TREE_CODE (DECL_SIZE_UNIT (base
)) != INTEGER_CST
2671 || wi::leu_p (wi::to_offset (DECL_SIZE_UNIT (base
)), off
))
2673 /* Now we are sure the first byte of the access is inside
2680 return !TREE_THIS_NOTRAP (expr
);
2683 return TREE_THIS_VOLATILE (expr
);
2686 t
= get_callee_fndecl (expr
);
2687 /* Assume that calls to weak functions may trap. */
2688 if (!t
|| !DECL_P (t
))
2691 return tree_could_trap_p (t
);
2695 /* Assume that accesses to weak functions may trap, unless we know
2696 they are certainly defined in current TU or in some other
2698 if (DECL_WEAK (expr
) && !DECL_COMDAT (expr
) && DECL_EXTERNAL (expr
))
2700 cgraph_node
*node
= cgraph_node::get (expr
);
2702 node
= node
->function_symbol ();
2703 return !(node
&& node
->in_other_partition
);
2708 /* Assume that accesses to weak vars may trap, unless we know
2709 they are certainly defined in current TU or in some other
2711 if (DECL_WEAK (expr
) && !DECL_COMDAT (expr
) && DECL_EXTERNAL (expr
))
2713 varpool_node
*node
= varpool_node::get (expr
);
2715 node
= node
->ultimate_alias_target ();
2716 return !(node
&& node
->in_other_partition
);
2726 /* Helper for stmt_could_throw_p. Return true if STMT (assumed to be a
2727 an assignment or a conditional) may throw. */
2730 stmt_could_throw_1_p (gimple stmt
)
2732 enum tree_code code
= gimple_expr_code (stmt
);
2733 bool honor_nans
= false;
2734 bool honor_snans
= false;
2735 bool fp_operation
= false;
2736 bool honor_trapv
= false;
2741 if (TREE_CODE_CLASS (code
) == tcc_comparison
2742 || TREE_CODE_CLASS (code
) == tcc_unary
2743 || TREE_CODE_CLASS (code
) == tcc_binary
)
2745 if (is_gimple_assign (stmt
)
2746 && TREE_CODE_CLASS (code
) == tcc_comparison
)
2747 t
= TREE_TYPE (gimple_assign_rhs1 (stmt
));
2748 else if (gimple_code (stmt
) == GIMPLE_COND
)
2749 t
= TREE_TYPE (gimple_cond_lhs (stmt
));
2751 t
= gimple_expr_type (stmt
);
2752 fp_operation
= FLOAT_TYPE_P (t
);
2755 honor_nans
= flag_trapping_math
&& !flag_finite_math_only
;
2756 honor_snans
= flag_signaling_nans
!= 0;
2758 else if (INTEGRAL_TYPE_P (t
) && TYPE_OVERFLOW_TRAPS (t
))
2762 /* Check if the main expression may trap. */
2763 t
= is_gimple_assign (stmt
) ? gimple_assign_rhs2 (stmt
) : NULL
;
2764 ret
= operation_could_trap_helper_p (code
, fp_operation
, honor_trapv
,
2765 honor_nans
, honor_snans
, t
,
2770 /* If the expression does not trap, see if any of the individual operands may
2772 for (i
= 0; i
< gimple_num_ops (stmt
); i
++)
2773 if (tree_could_trap_p (gimple_op (stmt
, i
)))
2780 /* Return true if statement STMT could throw an exception. */
2783 stmt_could_throw_p (gimple stmt
)
2785 if (!flag_exceptions
)
2788 /* The only statements that can throw an exception are assignments,
2789 conditionals, calls, resx, and asms. */
2790 switch (gimple_code (stmt
))
2796 return !gimple_call_nothrow_p (as_a
<gcall
*> (stmt
));
2800 if (!cfun
->can_throw_non_call_exceptions
)
2802 return stmt_could_throw_1_p (stmt
);
2805 if (!cfun
->can_throw_non_call_exceptions
)
2807 return gimple_asm_volatile_p (as_a
<gasm
*> (stmt
));
2815 /* Return true if expression T could throw an exception. */
2818 tree_could_throw_p (tree t
)
2820 if (!flag_exceptions
)
2822 if (TREE_CODE (t
) == MODIFY_EXPR
)
2824 if (cfun
->can_throw_non_call_exceptions
2825 && tree_could_trap_p (TREE_OPERAND (t
, 0)))
2827 t
= TREE_OPERAND (t
, 1);
2830 if (TREE_CODE (t
) == WITH_SIZE_EXPR
)
2831 t
= TREE_OPERAND (t
, 0);
2832 if (TREE_CODE (t
) == CALL_EXPR
)
2833 return (call_expr_flags (t
) & ECF_NOTHROW
) == 0;
2834 if (cfun
->can_throw_non_call_exceptions
)
2835 return tree_could_trap_p (t
);
2839 /* Return true if STMT can throw an exception that is not caught within
2840 the current function (CFUN). */
2843 stmt_can_throw_external (gimple stmt
)
2847 if (!stmt_could_throw_p (stmt
))
2850 lp_nr
= lookup_stmt_eh_lp (stmt
);
2854 /* Return true if STMT can throw an exception that is caught within
2855 the current function (CFUN). */
2858 stmt_can_throw_internal (gimple stmt
)
2862 if (!stmt_could_throw_p (stmt
))
2865 lp_nr
= lookup_stmt_eh_lp (stmt
);
2869 /* Given a statement STMT in IFUN, if STMT can no longer throw, then
2870 remove any entry it might have from the EH table. Return true if
2871 any change was made. */
2874 maybe_clean_eh_stmt_fn (struct function
*ifun
, gimple stmt
)
2876 if (stmt_could_throw_p (stmt
))
2878 return remove_stmt_from_eh_lp_fn (ifun
, stmt
);
2881 /* Likewise, but always use the current function. */
2884 maybe_clean_eh_stmt (gimple stmt
)
2886 return maybe_clean_eh_stmt_fn (cfun
, stmt
);
2889 /* Given a statement OLD_STMT and a new statement NEW_STMT that has replaced
2890 OLD_STMT in the function, remove OLD_STMT from the EH table and put NEW_STMT
2891 in the table if it should be in there. Return TRUE if a replacement was
2892 done that my require an EH edge purge. */
2895 maybe_clean_or_replace_eh_stmt (gimple old_stmt
, gimple new_stmt
)
2897 int lp_nr
= lookup_stmt_eh_lp (old_stmt
);
2901 bool new_stmt_could_throw
= stmt_could_throw_p (new_stmt
);
2903 if (new_stmt
== old_stmt
&& new_stmt_could_throw
)
2906 remove_stmt_from_eh_lp (old_stmt
);
2907 if (new_stmt_could_throw
)
2909 add_stmt_to_eh_lp (new_stmt
, lp_nr
);
2919 /* Given a statement OLD_STMT in OLD_FUN and a duplicate statement NEW_STMT
2920 in NEW_FUN, copy the EH table data from OLD_STMT to NEW_STMT. The MAP
2921 operand is the return value of duplicate_eh_regions. */
2924 maybe_duplicate_eh_stmt_fn (struct function
*new_fun
, gimple new_stmt
,
2925 struct function
*old_fun
, gimple old_stmt
,
2926 hash_map
<void *, void *> *map
,
2929 int old_lp_nr
, new_lp_nr
;
2931 if (!stmt_could_throw_p (new_stmt
))
2934 old_lp_nr
= lookup_stmt_eh_lp_fn (old_fun
, old_stmt
);
2937 if (default_lp_nr
== 0)
2939 new_lp_nr
= default_lp_nr
;
2941 else if (old_lp_nr
> 0)
2943 eh_landing_pad old_lp
, new_lp
;
2945 old_lp
= (*old_fun
->eh
->lp_array
)[old_lp_nr
];
2946 new_lp
= static_cast<eh_landing_pad
> (*map
->get (old_lp
));
2947 new_lp_nr
= new_lp
->index
;
2951 eh_region old_r
, new_r
;
2953 old_r
= (*old_fun
->eh
->region_array
)[-old_lp_nr
];
2954 new_r
= static_cast<eh_region
> (*map
->get (old_r
));
2955 new_lp_nr
= -new_r
->index
;
2958 add_stmt_to_eh_lp_fn (new_fun
, new_stmt
, new_lp_nr
);
2962 /* Similar, but both OLD_STMT and NEW_STMT are within the current function,
2963 and thus no remapping is required. */
2966 maybe_duplicate_eh_stmt (gimple new_stmt
, gimple old_stmt
)
2970 if (!stmt_could_throw_p (new_stmt
))
2973 lp_nr
= lookup_stmt_eh_lp (old_stmt
);
2977 add_stmt_to_eh_lp (new_stmt
, lp_nr
);
2981 /* Returns TRUE if oneh and twoh are exception handlers (gimple_try_cleanup of
2982 GIMPLE_TRY) that are similar enough to be considered the same. Currently
2983 this only handles handlers consisting of a single call, as that's the
2984 important case for C++: a destructor call for a particular object showing
2985 up in multiple handlers. */
2988 same_handler_p (gimple_seq oneh
, gimple_seq twoh
)
2990 gimple_stmt_iterator gsi
;
2994 gsi
= gsi_start (oneh
);
2995 if (!gsi_one_before_end_p (gsi
))
2997 ones
= gsi_stmt (gsi
);
2999 gsi
= gsi_start (twoh
);
3000 if (!gsi_one_before_end_p (gsi
))
3002 twos
= gsi_stmt (gsi
);
3004 if (!is_gimple_call (ones
)
3005 || !is_gimple_call (twos
)
3006 || gimple_call_lhs (ones
)
3007 || gimple_call_lhs (twos
)
3008 || gimple_call_chain (ones
)
3009 || gimple_call_chain (twos
)
3010 || !gimple_call_same_target_p (ones
, twos
)
3011 || gimple_call_num_args (ones
) != gimple_call_num_args (twos
))
3014 for (ai
= 0; ai
< gimple_call_num_args (ones
); ++ai
)
3015 if (!operand_equal_p (gimple_call_arg (ones
, ai
),
3016 gimple_call_arg (twos
, ai
), 0))
3023 try { A() } finally { try { ~B() } catch { ~A() } }
3024 try { ... } finally { ~A() }
3026 try { A() } catch { ~B() }
3027 try { ~B() ... } finally { ~A() }
3029 This occurs frequently in C++, where A is a local variable and B is a
3030 temporary used in the initializer for A. */
3033 optimize_double_finally (gtry
*one
, gtry
*two
)
3036 gimple_stmt_iterator gsi
;
3039 cleanup
= gimple_try_cleanup (one
);
3040 gsi
= gsi_start (cleanup
);
3041 if (!gsi_one_before_end_p (gsi
))
3044 oneh
= gsi_stmt (gsi
);
3045 if (gimple_code (oneh
) != GIMPLE_TRY
3046 || gimple_try_kind (oneh
) != GIMPLE_TRY_CATCH
)
3049 if (same_handler_p (gimple_try_cleanup (oneh
), gimple_try_cleanup (two
)))
3051 gimple_seq seq
= gimple_try_eval (oneh
);
3053 gimple_try_set_cleanup (one
, seq
);
3054 gimple_try_set_kind (one
, GIMPLE_TRY_CATCH
);
3055 seq
= copy_gimple_seq_and_replace_locals (seq
);
3056 gimple_seq_add_seq (&seq
, gimple_try_eval (two
));
3057 gimple_try_set_eval (two
, seq
);
3061 /* Perform EH refactoring optimizations that are simpler to do when code
3062 flow has been lowered but EH structures haven't. */
3065 refactor_eh_r (gimple_seq seq
)
3067 gimple_stmt_iterator gsi
;
3072 gsi
= gsi_start (seq
);
3076 if (gsi_end_p (gsi
))
3079 two
= gsi_stmt (gsi
);
3081 if (gtry
*try_one
= dyn_cast
<gtry
*> (one
))
3082 if (gtry
*try_two
= dyn_cast
<gtry
*> (two
))
3083 if (gimple_try_kind (try_one
) == GIMPLE_TRY_FINALLY
3084 && gimple_try_kind (try_two
) == GIMPLE_TRY_FINALLY
)
3085 optimize_double_finally (try_one
, try_two
);
3087 switch (gimple_code (one
))
3090 refactor_eh_r (gimple_try_eval (one
));
3091 refactor_eh_r (gimple_try_cleanup (one
));
3094 refactor_eh_r (gimple_catch_handler (as_a
<gcatch
*> (one
)));
3096 case GIMPLE_EH_FILTER
:
3097 refactor_eh_r (gimple_eh_filter_failure (one
));
3099 case GIMPLE_EH_ELSE
:
3101 geh_else
*eh_else_stmt
= as_a
<geh_else
*> (one
);
3102 refactor_eh_r (gimple_eh_else_n_body (eh_else_stmt
));
3103 refactor_eh_r (gimple_eh_else_e_body (eh_else_stmt
));
3118 const pass_data pass_data_refactor_eh
=
3120 GIMPLE_PASS
, /* type */
3122 OPTGROUP_NONE
, /* optinfo_flags */
3123 TV_TREE_EH
, /* tv_id */
3124 PROP_gimple_lcf
, /* properties_required */
3125 0, /* properties_provided */
3126 0, /* properties_destroyed */
3127 0, /* todo_flags_start */
3128 0, /* todo_flags_finish */
3131 class pass_refactor_eh
: public gimple_opt_pass
3134 pass_refactor_eh (gcc::context
*ctxt
)
3135 : gimple_opt_pass (pass_data_refactor_eh
, ctxt
)
3138 /* opt_pass methods: */
3139 virtual bool gate (function
*) { return flag_exceptions
!= 0; }
3140 virtual unsigned int execute (function
*)
3142 refactor_eh_r (gimple_body (current_function_decl
));
3146 }; // class pass_refactor_eh
3151 make_pass_refactor_eh (gcc::context
*ctxt
)
3153 return new pass_refactor_eh (ctxt
);
3156 /* At the end of gimple optimization, we can lower RESX. */
3159 lower_resx (basic_block bb
, gresx
*stmt
,
3160 hash_map
<eh_region
, tree
> *mnt_map
)
3163 eh_region src_r
, dst_r
;
3164 gimple_stmt_iterator gsi
;
3169 lp_nr
= lookup_stmt_eh_lp (stmt
);
3171 dst_r
= get_eh_region_from_lp_number (lp_nr
);
3175 src_r
= get_eh_region_from_number (gimple_resx_region (stmt
));
3176 gsi
= gsi_last_bb (bb
);
3180 /* We can wind up with no source region when pass_cleanup_eh shows
3181 that there are no entries into an eh region and deletes it, but
3182 then the block that contains the resx isn't removed. This can
3183 happen without optimization when the switch statement created by
3184 lower_try_finally_switch isn't simplified to remove the eh case.
3186 Resolve this by expanding the resx node to an abort. */
3188 fn
= builtin_decl_implicit (BUILT_IN_TRAP
);
3189 x
= gimple_build_call (fn
, 0);
3190 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3192 while (EDGE_COUNT (bb
->succs
) > 0)
3193 remove_edge (EDGE_SUCC (bb
, 0));
3197 /* When we have a destination region, we resolve this by copying
3198 the excptr and filter values into place, and changing the edge
3199 to immediately after the landing pad. */
3207 /* We are resuming into a MUST_NOT_CALL region. Expand a call to
3208 the failure decl into a new block, if needed. */
3209 gcc_assert (dst_r
->type
== ERT_MUST_NOT_THROW
);
3211 tree
*slot
= mnt_map
->get (dst_r
);
3214 gimple_stmt_iterator gsi2
;
3216 new_bb
= create_empty_bb (bb
);
3217 add_bb_to_loop (new_bb
, bb
->loop_father
);
3218 lab
= gimple_block_label (new_bb
);
3219 gsi2
= gsi_start_bb (new_bb
);
3221 fn
= dst_r
->u
.must_not_throw
.failure_decl
;
3222 x
= gimple_build_call (fn
, 0);
3223 gimple_set_location (x
, dst_r
->u
.must_not_throw
.failure_loc
);
3224 gsi_insert_after (&gsi2
, x
, GSI_CONTINUE_LINKING
);
3226 mnt_map
->put (dst_r
, lab
);
3231 new_bb
= label_to_block (lab
);
3234 gcc_assert (EDGE_COUNT (bb
->succs
) == 0);
3235 e
= make_edge (bb
, new_bb
, EDGE_FALLTHRU
);
3236 e
->count
= bb
->count
;
3237 e
->probability
= REG_BR_PROB_BASE
;
3242 tree dst_nr
= build_int_cst (integer_type_node
, dst_r
->index
);
3244 fn
= builtin_decl_implicit (BUILT_IN_EH_COPY_VALUES
);
3245 src_nr
= build_int_cst (integer_type_node
, src_r
->index
);
3246 x
= gimple_build_call (fn
, 2, dst_nr
, src_nr
);
3247 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3249 /* Update the flags for the outgoing edge. */
3250 e
= single_succ_edge (bb
);
3251 gcc_assert (e
->flags
& EDGE_EH
);
3252 e
->flags
= (e
->flags
& ~EDGE_EH
) | EDGE_FALLTHRU
;
3254 /* If there are no more EH users of the landing pad, delete it. */
3255 FOR_EACH_EDGE (e
, ei
, e
->dest
->preds
)
3256 if (e
->flags
& EDGE_EH
)
3260 eh_landing_pad lp
= get_eh_landing_pad_from_number (lp_nr
);
3261 remove_eh_landing_pad (lp
);
3271 /* When we don't have a destination region, this exception escapes
3272 up the call chain. We resolve this by generating a call to the
3273 _Unwind_Resume library function. */
3275 /* The ARM EABI redefines _Unwind_Resume as __cxa_end_cleanup
3276 with no arguments for C++ and Java. Check for that. */
3277 if (src_r
->use_cxa_end_cleanup
)
3279 fn
= builtin_decl_implicit (BUILT_IN_CXA_END_CLEANUP
);
3280 x
= gimple_build_call (fn
, 0);
3281 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3285 fn
= builtin_decl_implicit (BUILT_IN_EH_POINTER
);
3286 src_nr
= build_int_cst (integer_type_node
, src_r
->index
);
3287 x
= gimple_build_call (fn
, 1, src_nr
);
3288 var
= create_tmp_var (ptr_type_node
);
3289 var
= make_ssa_name (var
, x
);
3290 gimple_call_set_lhs (x
, var
);
3291 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3293 fn
= builtin_decl_implicit (BUILT_IN_UNWIND_RESUME
);
3294 x
= gimple_build_call (fn
, 1, var
);
3295 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3298 gcc_assert (EDGE_COUNT (bb
->succs
) == 0);
3301 gsi_remove (&gsi
, true);
3308 const pass_data pass_data_lower_resx
=
3310 GIMPLE_PASS
, /* type */
3312 OPTGROUP_NONE
, /* optinfo_flags */
3313 TV_TREE_EH
, /* tv_id */
3314 PROP_gimple_lcf
, /* properties_required */
3315 0, /* properties_provided */
3316 0, /* properties_destroyed */
3317 0, /* todo_flags_start */
3318 0, /* todo_flags_finish */
3321 class pass_lower_resx
: public gimple_opt_pass
3324 pass_lower_resx (gcc::context
*ctxt
)
3325 : gimple_opt_pass (pass_data_lower_resx
, ctxt
)
3328 /* opt_pass methods: */
3329 virtual bool gate (function
*) { return flag_exceptions
!= 0; }
3330 virtual unsigned int execute (function
*);
3332 }; // class pass_lower_resx
3335 pass_lower_resx::execute (function
*fun
)
3338 bool dominance_invalidated
= false;
3339 bool any_rewritten
= false;
3341 hash_map
<eh_region
, tree
> mnt_map
;
3343 FOR_EACH_BB_FN (bb
, fun
)
3345 gimple last
= last_stmt (bb
);
3346 if (last
&& is_gimple_resx (last
))
3348 dominance_invalidated
|=
3349 lower_resx (bb
, as_a
<gresx
*> (last
), &mnt_map
);
3350 any_rewritten
= true;
3354 if (dominance_invalidated
)
3356 free_dominance_info (CDI_DOMINATORS
);
3357 free_dominance_info (CDI_POST_DOMINATORS
);
3360 return any_rewritten
? TODO_update_ssa_only_virtuals
: 0;
3366 make_pass_lower_resx (gcc::context
*ctxt
)
3368 return new pass_lower_resx (ctxt
);
3371 /* Try to optimize var = {v} {CLOBBER} stmts followed just by
3375 optimize_clobbers (basic_block bb
)
3377 gimple_stmt_iterator gsi
= gsi_last_bb (bb
);
3378 bool any_clobbers
= false;
3379 bool seen_stack_restore
= false;
3383 /* Only optimize anything if the bb contains at least one clobber,
3384 ends with resx (checked by caller), optionally contains some
3385 debug stmts or labels, or at most one __builtin_stack_restore
3386 call, and has an incoming EH edge. */
3387 for (gsi_prev (&gsi
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
3389 gimple stmt
= gsi_stmt (gsi
);
3390 if (is_gimple_debug (stmt
))
3392 if (gimple_clobber_p (stmt
))
3394 any_clobbers
= true;
3397 if (!seen_stack_restore
3398 && gimple_call_builtin_p (stmt
, BUILT_IN_STACK_RESTORE
))
3400 seen_stack_restore
= true;
3403 if (gimple_code (stmt
) == GIMPLE_LABEL
)
3409 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
3410 if (e
->flags
& EDGE_EH
)
3414 gsi
= gsi_last_bb (bb
);
3415 for (gsi_prev (&gsi
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
3417 gimple stmt
= gsi_stmt (gsi
);
3418 if (!gimple_clobber_p (stmt
))
3420 unlink_stmt_vdef (stmt
);
3421 gsi_remove (&gsi
, true);
3422 release_defs (stmt
);
3426 /* Try to sink var = {v} {CLOBBER} stmts followed just by
3427 internal throw to successor BB. */
3430 sink_clobbers (basic_block bb
)
3434 gimple_stmt_iterator gsi
, dgsi
;
3436 bool any_clobbers
= false;
3439 /* Only optimize if BB has a single EH successor and
3440 all predecessor edges are EH too. */
3441 if (!single_succ_p (bb
)
3442 || (single_succ_edge (bb
)->flags
& EDGE_EH
) == 0)
3445 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
3447 if ((e
->flags
& EDGE_EH
) == 0)
3451 /* And BB contains only CLOBBER stmts before the final
3453 gsi
= gsi_last_bb (bb
);
3454 for (gsi_prev (&gsi
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
3456 gimple stmt
= gsi_stmt (gsi
);
3457 if (is_gimple_debug (stmt
))
3459 if (gimple_code (stmt
) == GIMPLE_LABEL
)
3461 if (!gimple_clobber_p (stmt
))
3463 any_clobbers
= true;
3468 edge succe
= single_succ_edge (bb
);
3469 succbb
= succe
->dest
;
3471 /* See if there is a virtual PHI node to take an updated virtual
3474 tree vuse
= NULL_TREE
;
3475 for (gphi_iterator gpi
= gsi_start_phis (succbb
);
3476 !gsi_end_p (gpi
); gsi_next (&gpi
))
3478 tree res
= gimple_phi_result (gpi
.phi ());
3479 if (virtual_operand_p (res
))
3487 dgsi
= gsi_after_labels (succbb
);
3488 gsi
= gsi_last_bb (bb
);
3489 for (gsi_prev (&gsi
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
3491 gimple stmt
= gsi_stmt (gsi
);
3493 if (is_gimple_debug (stmt
))
3495 if (gimple_code (stmt
) == GIMPLE_LABEL
)
3497 lhs
= gimple_assign_lhs (stmt
);
3498 /* Unfortunately we don't have dominance info updated at this
3499 point, so checking if
3500 dominated_by_p (CDI_DOMINATORS, succbb,
3501 gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (lhs, 0)))
3502 would be too costly. Thus, avoid sinking any clobbers that
3503 refer to non-(D) SSA_NAMEs. */
3504 if (TREE_CODE (lhs
) == MEM_REF
3505 && TREE_CODE (TREE_OPERAND (lhs
, 0)) == SSA_NAME
3506 && !SSA_NAME_IS_DEFAULT_DEF (TREE_OPERAND (lhs
, 0)))
3508 unlink_stmt_vdef (stmt
);
3509 gsi_remove (&gsi
, true);
3510 release_defs (stmt
);
3514 /* As we do not change stmt order when sinking across a
3515 forwarder edge we can keep virtual operands in place. */
3516 gsi_remove (&gsi
, false);
3517 gsi_insert_before (&dgsi
, stmt
, GSI_NEW_STMT
);
3519 /* But adjust virtual operands if we sunk across a PHI node. */
3523 imm_use_iterator iter
;
3524 use_operand_p use_p
;
3525 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, vuse
)
3526 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
3527 SET_USE (use_p
, gimple_vdef (stmt
));
3528 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse
))
3530 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_vdef (stmt
)) = 1;
3531 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse
) = 0;
3533 /* Adjust the incoming virtual operand. */
3534 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (vphi
, succe
), gimple_vuse (stmt
));
3535 SET_USE (gimple_vuse_op (stmt
), vuse
);
3537 /* If there isn't a single predecessor but no virtual PHI node
3538 arrange for virtual operands to be renamed. */
3539 else if (gimple_vuse_op (stmt
) != NULL_USE_OPERAND_P
3540 && !single_pred_p (succbb
))
3542 /* In this case there will be no use of the VDEF of this stmt.
3543 ??? Unless this is a secondary opportunity and we have not
3544 removed unreachable blocks yet, so we cannot assert this.
3545 Which also means we will end up renaming too many times. */
3546 SET_USE (gimple_vuse_op (stmt
), gimple_vop (cfun
));
3547 mark_virtual_operands_for_renaming (cfun
);
3548 todo
|= TODO_update_ssa_only_virtuals
;
3555 /* At the end of inlining, we can lower EH_DISPATCH. Return true when
3556 we have found some duplicate labels and removed some edges. */
3559 lower_eh_dispatch (basic_block src
, geh_dispatch
*stmt
)
3561 gimple_stmt_iterator gsi
;
3566 bool redirected
= false;
3568 region_nr
= gimple_eh_dispatch_region (stmt
);
3569 r
= get_eh_region_from_number (region_nr
);
3571 gsi
= gsi_last_bb (src
);
3577 auto_vec
<tree
> labels
;
3578 tree default_label
= NULL
;
3582 hash_set
<tree
> seen_values
;
3584 /* Collect the labels for a switch. Zero the post_landing_pad
3585 field becase we'll no longer have anything keeping these labels
3586 in existence and the optimizer will be free to merge these
3588 for (c
= r
->u
.eh_try
.first_catch
; c
; c
= c
->next_catch
)
3590 tree tp_node
, flt_node
, lab
= c
->label
;
3591 bool have_label
= false;
3594 tp_node
= c
->type_list
;
3595 flt_node
= c
->filter_list
;
3597 if (tp_node
== NULL
)
3599 default_label
= lab
;
3604 /* Filter out duplicate labels that arise when this handler
3605 is shadowed by an earlier one. When no labels are
3606 attached to the handler anymore, we remove
3607 the corresponding edge and then we delete unreachable
3608 blocks at the end of this pass. */
3609 if (! seen_values
.contains (TREE_VALUE (flt_node
)))
3611 tree t
= build_case_label (TREE_VALUE (flt_node
),
3613 labels
.safe_push (t
);
3614 seen_values
.add (TREE_VALUE (flt_node
));
3618 tp_node
= TREE_CHAIN (tp_node
);
3619 flt_node
= TREE_CHAIN (flt_node
);
3624 remove_edge (find_edge (src
, label_to_block (lab
)));
3629 /* Clean up the edge flags. */
3630 FOR_EACH_EDGE (e
, ei
, src
->succs
)
3632 if (e
->flags
& EDGE_FALLTHRU
)
3634 /* If there was no catch-all, use the fallthru edge. */
3635 if (default_label
== NULL
)
3636 default_label
= gimple_block_label (e
->dest
);
3637 e
->flags
&= ~EDGE_FALLTHRU
;
3640 gcc_assert (default_label
!= NULL
);
3642 /* Don't generate a switch if there's only a default case.
3643 This is common in the form of try { A; } catch (...) { B; }. */
3644 if (!labels
.exists ())
3646 e
= single_succ_edge (src
);
3647 e
->flags
|= EDGE_FALLTHRU
;
3651 fn
= builtin_decl_implicit (BUILT_IN_EH_FILTER
);
3652 x
= gimple_build_call (fn
, 1, build_int_cst (integer_type_node
,
3654 filter
= create_tmp_var (TREE_TYPE (TREE_TYPE (fn
)));
3655 filter
= make_ssa_name (filter
, x
);
3656 gimple_call_set_lhs (x
, filter
);
3657 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3659 /* Turn the default label into a default case. */
3660 default_label
= build_case_label (NULL
, NULL
, default_label
);
3661 sort_case_labels (labels
);
3663 x
= gimple_build_switch (filter
, default_label
, labels
);
3664 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3669 case ERT_ALLOWED_EXCEPTIONS
:
3671 edge b_e
= BRANCH_EDGE (src
);
3672 edge f_e
= FALLTHRU_EDGE (src
);
3674 fn
= builtin_decl_implicit (BUILT_IN_EH_FILTER
);
3675 x
= gimple_build_call (fn
, 1, build_int_cst (integer_type_node
,
3677 filter
= create_tmp_var (TREE_TYPE (TREE_TYPE (fn
)));
3678 filter
= make_ssa_name (filter
, x
);
3679 gimple_call_set_lhs (x
, filter
);
3680 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3682 r
->u
.allowed
.label
= NULL
;
3683 x
= gimple_build_cond (EQ_EXPR
, filter
,
3684 build_int_cst (TREE_TYPE (filter
),
3685 r
->u
.allowed
.filter
),
3686 NULL_TREE
, NULL_TREE
);
3687 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3689 b_e
->flags
= b_e
->flags
| EDGE_TRUE_VALUE
;
3690 f_e
->flags
= (f_e
->flags
& ~EDGE_FALLTHRU
) | EDGE_FALSE_VALUE
;
3698 /* Replace the EH_DISPATCH with the SWITCH or COND generated above. */
3699 gsi_remove (&gsi
, true);
3705 const pass_data pass_data_lower_eh_dispatch
=
3707 GIMPLE_PASS
, /* type */
3708 "ehdisp", /* name */
3709 OPTGROUP_NONE
, /* optinfo_flags */
3710 TV_TREE_EH
, /* tv_id */
3711 PROP_gimple_lcf
, /* properties_required */
3712 0, /* properties_provided */
3713 0, /* properties_destroyed */
3714 0, /* todo_flags_start */
3715 0, /* todo_flags_finish */
3718 class pass_lower_eh_dispatch
: public gimple_opt_pass
3721 pass_lower_eh_dispatch (gcc::context
*ctxt
)
3722 : gimple_opt_pass (pass_data_lower_eh_dispatch
, ctxt
)
3725 /* opt_pass methods: */
3726 virtual bool gate (function
*fun
) { return fun
->eh
->region_tree
!= NULL
; }
3727 virtual unsigned int execute (function
*);
3729 }; // class pass_lower_eh_dispatch
3732 pass_lower_eh_dispatch::execute (function
*fun
)
3736 bool redirected
= false;
3738 assign_filter_values ();
3740 FOR_EACH_BB_FN (bb
, fun
)
3742 gimple last
= last_stmt (bb
);
3745 if (gimple_code (last
) == GIMPLE_EH_DISPATCH
)
3747 redirected
|= lower_eh_dispatch (bb
,
3748 as_a
<geh_dispatch
*> (last
));
3749 flags
|= TODO_update_ssa_only_virtuals
;
3751 else if (gimple_code (last
) == GIMPLE_RESX
)
3753 if (stmt_can_throw_external (last
))
3754 optimize_clobbers (bb
);
3756 flags
|= sink_clobbers (bb
);
3761 delete_unreachable_blocks ();
3768 make_pass_lower_eh_dispatch (gcc::context
*ctxt
)
3770 return new pass_lower_eh_dispatch (ctxt
);
3773 /* Walk statements, see what regions and, optionally, landing pads
3774 are really referenced.
3776 Returns in R_REACHABLEP an sbitmap with bits set for reachable regions,
3777 and in LP_REACHABLE an sbitmap with bits set for reachable landing pads.
3779 Passing NULL for LP_REACHABLE is valid, in this case only reachable
3782 The caller is responsible for freeing the returned sbitmaps. */
3785 mark_reachable_handlers (sbitmap
*r_reachablep
, sbitmap
*lp_reachablep
)
3787 sbitmap r_reachable
, lp_reachable
;
3789 bool mark_landing_pads
= (lp_reachablep
!= NULL
);
3790 gcc_checking_assert (r_reachablep
!= NULL
);
3792 r_reachable
= sbitmap_alloc (cfun
->eh
->region_array
->length ());
3793 bitmap_clear (r_reachable
);
3794 *r_reachablep
= r_reachable
;
3796 if (mark_landing_pads
)
3798 lp_reachable
= sbitmap_alloc (cfun
->eh
->lp_array
->length ());
3799 bitmap_clear (lp_reachable
);
3800 *lp_reachablep
= lp_reachable
;
3803 lp_reachable
= NULL
;
3805 FOR_EACH_BB_FN (bb
, cfun
)
3807 gimple_stmt_iterator gsi
;
3809 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3811 gimple stmt
= gsi_stmt (gsi
);
3813 if (mark_landing_pads
)
3815 int lp_nr
= lookup_stmt_eh_lp (stmt
);
3817 /* Negative LP numbers are MUST_NOT_THROW regions which
3818 are not considered BB enders. */
3820 bitmap_set_bit (r_reachable
, -lp_nr
);
3822 /* Positive LP numbers are real landing pads, and BB enders. */
3825 gcc_assert (gsi_one_before_end_p (gsi
));
3826 eh_region region
= get_eh_region_from_lp_number (lp_nr
);
3827 bitmap_set_bit (r_reachable
, region
->index
);
3828 bitmap_set_bit (lp_reachable
, lp_nr
);
3832 /* Avoid removing regions referenced from RESX/EH_DISPATCH. */
3833 switch (gimple_code (stmt
))
3836 bitmap_set_bit (r_reachable
,
3837 gimple_resx_region (as_a
<gresx
*> (stmt
)));
3839 case GIMPLE_EH_DISPATCH
:
3840 bitmap_set_bit (r_reachable
,
3841 gimple_eh_dispatch_region (
3842 as_a
<geh_dispatch
*> (stmt
)));
3845 if (gimple_call_builtin_p (stmt
, BUILT_IN_EH_COPY_VALUES
))
3846 for (int i
= 0; i
< 2; ++i
)
3848 tree rt
= gimple_call_arg (stmt
, i
);
3849 HOST_WIDE_INT ri
= tree_to_shwi (rt
);
3851 gcc_assert (ri
= (int)ri
);
3852 bitmap_set_bit (r_reachable
, ri
);
3862 /* Remove unreachable handlers and unreachable landing pads. */
3865 remove_unreachable_handlers (void)
3867 sbitmap r_reachable
, lp_reachable
;
3872 mark_reachable_handlers (&r_reachable
, &lp_reachable
);
3876 fprintf (dump_file
, "Before removal of unreachable regions:\n");
3877 dump_eh_tree (dump_file
, cfun
);
3878 fprintf (dump_file
, "Reachable regions: ");
3879 dump_bitmap_file (dump_file
, r_reachable
);
3880 fprintf (dump_file
, "Reachable landing pads: ");
3881 dump_bitmap_file (dump_file
, lp_reachable
);
3886 FOR_EACH_VEC_SAFE_ELT (cfun
->eh
->region_array
, i
, region
)
3887 if (region
&& !bitmap_bit_p (r_reachable
, region
->index
))
3889 "Removing unreachable region %d\n",
3893 remove_unreachable_eh_regions (r_reachable
);
3895 FOR_EACH_VEC_SAFE_ELT (cfun
->eh
->lp_array
, i
, lp
)
3896 if (lp
&& !bitmap_bit_p (lp_reachable
, lp
->index
))
3900 "Removing unreachable landing pad %d\n",
3902 remove_eh_landing_pad (lp
);
3907 fprintf (dump_file
, "\n\nAfter removal of unreachable regions:\n");
3908 dump_eh_tree (dump_file
, cfun
);
3909 fprintf (dump_file
, "\n\n");
3912 sbitmap_free (r_reachable
);
3913 sbitmap_free (lp_reachable
);
3915 #ifdef ENABLE_CHECKING
3916 verify_eh_tree (cfun
);
3920 /* Remove unreachable handlers if any landing pads have been removed after
3921 last ehcleanup pass (due to gimple_purge_dead_eh_edges). */
3924 maybe_remove_unreachable_handlers (void)
3929 if (cfun
->eh
== NULL
)
3932 FOR_EACH_VEC_SAFE_ELT (cfun
->eh
->lp_array
, i
, lp
)
3933 if (lp
&& lp
->post_landing_pad
)
3935 if (label_to_block (lp
->post_landing_pad
) == NULL
)
3937 remove_unreachable_handlers ();
3943 /* Remove regions that do not have landing pads. This assumes
3944 that remove_unreachable_handlers has already been run, and
3945 that we've just manipulated the landing pads since then.
3947 Preserve regions with landing pads and regions that prevent
3948 exceptions from propagating further, even if these regions
3949 are not reachable. */
3952 remove_unreachable_handlers_no_lp (void)
3955 sbitmap r_reachable
;
3958 mark_reachable_handlers (&r_reachable
, /*lp_reachablep=*/NULL
);
3960 FOR_EACH_VEC_SAFE_ELT (cfun
->eh
->region_array
, i
, region
)
3965 if (region
->landing_pads
!= NULL
3966 || region
->type
== ERT_MUST_NOT_THROW
)
3967 bitmap_set_bit (r_reachable
, region
->index
);
3970 && !bitmap_bit_p (r_reachable
, region
->index
))
3972 "Removing unreachable region %d\n",
3976 remove_unreachable_eh_regions (r_reachable
);
3978 sbitmap_free (r_reachable
);
3981 /* Undo critical edge splitting on an EH landing pad. Earlier, we
3982 optimisticaly split all sorts of edges, including EH edges. The
3983 optimization passes in between may not have needed them; if not,
3984 we should undo the split.
3986 Recognize this case by having one EH edge incoming to the BB and
3987 one normal edge outgoing; BB should be empty apart from the
3988 post_landing_pad label.
3990 Note that this is slightly different from the empty handler case
3991 handled by cleanup_empty_eh, in that the actual handler may yet
3992 have actual code but the landing pad has been separated from the
3993 handler. As such, cleanup_empty_eh relies on this transformation
3994 having been done first. */
3997 unsplit_eh (eh_landing_pad lp
)
3999 basic_block bb
= label_to_block (lp
->post_landing_pad
);
4000 gimple_stmt_iterator gsi
;
4003 /* Quickly check the edge counts on BB for singularity. */
4004 if (!single_pred_p (bb
) || !single_succ_p (bb
))
4006 e_in
= single_pred_edge (bb
);
4007 e_out
= single_succ_edge (bb
);
4009 /* Input edge must be EH and output edge must be normal. */
4010 if ((e_in
->flags
& EDGE_EH
) == 0 || (e_out
->flags
& EDGE_EH
) != 0)
4013 /* The block must be empty except for the labels and debug insns. */
4014 gsi
= gsi_after_labels (bb
);
4015 if (!gsi_end_p (gsi
) && is_gimple_debug (gsi_stmt (gsi
)))
4016 gsi_next_nondebug (&gsi
);
4017 if (!gsi_end_p (gsi
))
4020 /* The destination block must not already have a landing pad
4021 for a different region. */
4022 for (gsi
= gsi_start_bb (e_out
->dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4024 glabel
*label_stmt
= dyn_cast
<glabel
*> (gsi_stmt (gsi
));
4030 lab
= gimple_label_label (label_stmt
);
4031 lp_nr
= EH_LANDING_PAD_NR (lab
);
4032 if (lp_nr
&& get_eh_region_from_lp_number (lp_nr
) != lp
->region
)
4036 /* The new destination block must not already be a destination of
4037 the source block, lest we merge fallthru and eh edges and get
4038 all sorts of confused. */
4039 if (find_edge (e_in
->src
, e_out
->dest
))
4042 /* ??? We can get degenerate phis due to cfg cleanups. I would have
4043 thought this should have been cleaned up by a phicprop pass, but
4044 that doesn't appear to handle virtuals. Propagate by hand. */
4045 if (!gimple_seq_empty_p (phi_nodes (bb
)))
4047 for (gphi_iterator gpi
= gsi_start_phis (bb
); !gsi_end_p (gpi
); )
4050 gphi
*phi
= gpi
.phi ();
4051 tree lhs
= gimple_phi_result (phi
);
4052 tree rhs
= gimple_phi_arg_def (phi
, 0);
4053 use_operand_p use_p
;
4054 imm_use_iterator iter
;
4056 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs
)
4058 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
4059 SET_USE (use_p
, rhs
);
4062 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs
))
4063 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs
) = 1;
4065 remove_phi_node (&gpi
, true);
4069 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4070 fprintf (dump_file
, "Unsplit EH landing pad %d to block %i.\n",
4071 lp
->index
, e_out
->dest
->index
);
4073 /* Redirect the edge. Since redirect_eh_edge_1 expects to be moving
4074 a successor edge, humor it. But do the real CFG change with the
4075 predecessor of E_OUT in order to preserve the ordering of arguments
4076 to the PHI nodes in E_OUT->DEST. */
4077 redirect_eh_edge_1 (e_in
, e_out
->dest
, false);
4078 redirect_edge_pred (e_out
, e_in
->src
);
4079 e_out
->flags
= e_in
->flags
;
4080 e_out
->probability
= e_in
->probability
;
4081 e_out
->count
= e_in
->count
;
4087 /* Examine each landing pad block and see if it matches unsplit_eh. */
4090 unsplit_all_eh (void)
4092 bool changed
= false;
4096 for (i
= 1; vec_safe_iterate (cfun
->eh
->lp_array
, i
, &lp
); ++i
)
4098 changed
|= unsplit_eh (lp
);
4103 /* A subroutine of cleanup_empty_eh. Redirect all EH edges incoming
4104 to OLD_BB to NEW_BB; return true on success, false on failure.
4106 OLD_BB_OUT is the edge into NEW_BB from OLD_BB, so if we miss any
4107 PHI variables from OLD_BB we can pick them up from OLD_BB_OUT.
4108 Virtual PHIs may be deleted and marked for renaming. */
4111 cleanup_empty_eh_merge_phis (basic_block new_bb
, basic_block old_bb
,
4112 edge old_bb_out
, bool change_region
)
4114 gphi_iterator ngsi
, ogsi
;
4117 bitmap ophi_handled
;
4119 /* The destination block must not be a regular successor for any
4120 of the preds of the landing pad. Thus, avoid turning
4130 which CFG verification would choke on. See PR45172 and PR51089. */
4131 FOR_EACH_EDGE (e
, ei
, old_bb
->preds
)
4132 if (find_edge (e
->src
, new_bb
))
4135 FOR_EACH_EDGE (e
, ei
, old_bb
->preds
)
4136 redirect_edge_var_map_clear (e
);
4138 ophi_handled
= BITMAP_ALLOC (NULL
);
4140 /* First, iterate through the PHIs on NEW_BB and set up the edge_var_map
4141 for the edges we're going to move. */
4142 for (ngsi
= gsi_start_phis (new_bb
); !gsi_end_p (ngsi
); gsi_next (&ngsi
))
4144 gphi
*ophi
, *nphi
= ngsi
.phi ();
4147 nresult
= gimple_phi_result (nphi
);
4148 nop
= gimple_phi_arg_def (nphi
, old_bb_out
->dest_idx
);
4150 /* Find the corresponding PHI in OLD_BB so we can forward-propagate
4151 the source ssa_name. */
4153 for (ogsi
= gsi_start_phis (old_bb
); !gsi_end_p (ogsi
); gsi_next (&ogsi
))
4156 if (gimple_phi_result (ophi
) == nop
)
4161 /* If we did find the corresponding PHI, copy those inputs. */
4164 /* If NOP is used somewhere else beyond phis in new_bb, give up. */
4165 if (!has_single_use (nop
))
4167 imm_use_iterator imm_iter
;
4168 use_operand_p use_p
;
4170 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, nop
)
4172 if (!gimple_debug_bind_p (USE_STMT (use_p
))
4173 && (gimple_code (USE_STMT (use_p
)) != GIMPLE_PHI
4174 || gimple_bb (USE_STMT (use_p
)) != new_bb
))
4178 bitmap_set_bit (ophi_handled
, SSA_NAME_VERSION (nop
));
4179 FOR_EACH_EDGE (e
, ei
, old_bb
->preds
)
4184 if ((e
->flags
& EDGE_EH
) == 0)
4186 oop
= gimple_phi_arg_def (ophi
, e
->dest_idx
);
4187 oloc
= gimple_phi_arg_location (ophi
, e
->dest_idx
);
4188 redirect_edge_var_map_add (e
, nresult
, oop
, oloc
);
4191 /* If we didn't find the PHI, if it's a real variable or a VOP, we know
4192 from the fact that OLD_BB is tree_empty_eh_handler_p that the
4193 variable is unchanged from input to the block and we can simply
4194 re-use the input to NEW_BB from the OLD_BB_OUT edge. */
4198 = gimple_phi_arg_location (nphi
, old_bb_out
->dest_idx
);
4199 FOR_EACH_EDGE (e
, ei
, old_bb
->preds
)
4200 redirect_edge_var_map_add (e
, nresult
, nop
, nloc
);
4204 /* Second, verify that all PHIs from OLD_BB have been handled. If not,
4205 we don't know what values from the other edges into NEW_BB to use. */
4206 for (ogsi
= gsi_start_phis (old_bb
); !gsi_end_p (ogsi
); gsi_next (&ogsi
))
4208 gphi
*ophi
= ogsi
.phi ();
4209 tree oresult
= gimple_phi_result (ophi
);
4210 if (!bitmap_bit_p (ophi_handled
, SSA_NAME_VERSION (oresult
)))
4214 /* Finally, move the edges and update the PHIs. */
4215 for (ei
= ei_start (old_bb
->preds
); (e
= ei_safe_edge (ei
)); )
4216 if (e
->flags
& EDGE_EH
)
4218 /* ??? CFG manipluation routines do not try to update loop
4219 form on edge redirection. Do so manually here for now. */
4220 /* If we redirect a loop entry or latch edge that will either create
4221 a multiple entry loop or rotate the loop. If the loops merge
4222 we may have created a loop with multiple latches.
4223 All of this isn't easily fixed thus cancel the affected loop
4224 and mark the other loop as possibly having multiple latches. */
4225 if (e
->dest
== e
->dest
->loop_father
->header
)
4227 mark_loop_for_removal (e
->dest
->loop_father
);
4228 new_bb
->loop_father
->latch
= NULL
;
4229 loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES
);
4231 redirect_eh_edge_1 (e
, new_bb
, change_region
);
4232 redirect_edge_succ (e
, new_bb
);
4233 flush_pending_stmts (e
);
4238 BITMAP_FREE (ophi_handled
);
4242 FOR_EACH_EDGE (e
, ei
, old_bb
->preds
)
4243 redirect_edge_var_map_clear (e
);
4244 BITMAP_FREE (ophi_handled
);
4248 /* A subroutine of cleanup_empty_eh. Move a landing pad LP from its
4249 old region to NEW_REGION at BB. */
4252 cleanup_empty_eh_move_lp (basic_block bb
, edge e_out
,
4253 eh_landing_pad lp
, eh_region new_region
)
4255 gimple_stmt_iterator gsi
;
4258 for (pp
= &lp
->region
->landing_pads
; *pp
!= lp
; pp
= &(*pp
)->next_lp
)
4262 lp
->region
= new_region
;
4263 lp
->next_lp
= new_region
->landing_pads
;
4264 new_region
->landing_pads
= lp
;
4266 /* Delete the RESX that was matched within the empty handler block. */
4267 gsi
= gsi_last_bb (bb
);
4268 unlink_stmt_vdef (gsi_stmt (gsi
));
4269 gsi_remove (&gsi
, true);
4271 /* Clean up E_OUT for the fallthru. */
4272 e_out
->flags
= (e_out
->flags
& ~EDGE_EH
) | EDGE_FALLTHRU
;
4273 e_out
->probability
= REG_BR_PROB_BASE
;
4276 /* A subroutine of cleanup_empty_eh. Handle more complex cases of
4277 unsplitting than unsplit_eh was prepared to handle, e.g. when
4278 multiple incoming edges and phis are involved. */
4281 cleanup_empty_eh_unsplit (basic_block bb
, edge e_out
, eh_landing_pad lp
)
4283 gimple_stmt_iterator gsi
;
4286 /* We really ought not have totally lost everything following
4287 a landing pad label. Given that BB is empty, there had better
4289 gcc_assert (e_out
!= NULL
);
4291 /* The destination block must not already have a landing pad
4292 for a different region. */
4294 for (gsi
= gsi_start_bb (e_out
->dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4296 glabel
*stmt
= dyn_cast
<glabel
*> (gsi_stmt (gsi
));
4301 lab
= gimple_label_label (stmt
);
4302 lp_nr
= EH_LANDING_PAD_NR (lab
);
4303 if (lp_nr
&& get_eh_region_from_lp_number (lp_nr
) != lp
->region
)
4307 /* Attempt to move the PHIs into the successor block. */
4308 if (cleanup_empty_eh_merge_phis (e_out
->dest
, bb
, e_out
, false))
4310 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4312 "Unsplit EH landing pad %d to block %i "
4313 "(via cleanup_empty_eh).\n",
4314 lp
->index
, e_out
->dest
->index
);
4321 /* Return true if edge E_FIRST is part of an empty infinite loop
4322 or leads to such a loop through a series of single successor
4326 infinite_empty_loop_p (edge e_first
)
4328 bool inf_loop
= false;
4331 if (e_first
->dest
== e_first
->src
)
4334 e_first
->src
->aux
= (void *) 1;
4335 for (e
= e_first
; single_succ_p (e
->dest
); e
= single_succ_edge (e
->dest
))
4337 gimple_stmt_iterator gsi
;
4343 e
->dest
->aux
= (void *) 1;
4344 gsi
= gsi_after_labels (e
->dest
);
4345 if (!gsi_end_p (gsi
) && is_gimple_debug (gsi_stmt (gsi
)))
4346 gsi_next_nondebug (&gsi
);
4347 if (!gsi_end_p (gsi
))
4350 e_first
->src
->aux
= NULL
;
4351 for (e
= e_first
; e
->dest
->aux
; e
= single_succ_edge (e
->dest
))
4352 e
->dest
->aux
= NULL
;
4357 /* Examine the block associated with LP to determine if it's an empty
4358 handler for its EH region. If so, attempt to redirect EH edges to
4359 an outer region. Return true the CFG was updated in any way. This
4360 is similar to jump forwarding, just across EH edges. */
4363 cleanup_empty_eh (eh_landing_pad lp
)
4365 basic_block bb
= label_to_block (lp
->post_landing_pad
);
4366 gimple_stmt_iterator gsi
;
4368 eh_region new_region
;
4371 bool has_non_eh_pred
;
4375 /* There can be zero or one edges out of BB. This is the quickest test. */
4376 switch (EDGE_COUNT (bb
->succs
))
4382 e_out
= single_succ_edge (bb
);
4388 resx
= last_stmt (bb
);
4389 if (resx
&& is_gimple_resx (resx
))
4391 if (stmt_can_throw_external (resx
))
4392 optimize_clobbers (bb
);
4393 else if (sink_clobbers (bb
))
4397 gsi
= gsi_after_labels (bb
);
4399 /* Make sure to skip debug statements. */
4400 if (!gsi_end_p (gsi
) && is_gimple_debug (gsi_stmt (gsi
)))
4401 gsi_next_nondebug (&gsi
);
4403 /* If the block is totally empty, look for more unsplitting cases. */
4404 if (gsi_end_p (gsi
))
4406 /* For the degenerate case of an infinite loop bail out.
4407 If bb has no successors and is totally empty, which can happen e.g.
4408 because of incorrect noreturn attribute, bail out too. */
4410 || infinite_empty_loop_p (e_out
))
4413 return ret
| cleanup_empty_eh_unsplit (bb
, e_out
, lp
);
4416 /* The block should consist only of a single RESX statement, modulo a
4417 preceding call to __builtin_stack_restore if there is no outgoing
4418 edge, since the call can be eliminated in this case. */
4419 resx
= gsi_stmt (gsi
);
4420 if (!e_out
&& gimple_call_builtin_p (resx
, BUILT_IN_STACK_RESTORE
))
4423 resx
= gsi_stmt (gsi
);
4425 if (!is_gimple_resx (resx
))
4427 gcc_assert (gsi_one_before_end_p (gsi
));
4429 /* Determine if there are non-EH edges, or resx edges into the handler. */
4430 has_non_eh_pred
= false;
4431 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
4432 if (!(e
->flags
& EDGE_EH
))
4433 has_non_eh_pred
= true;
4435 /* Find the handler that's outer of the empty handler by looking at
4436 where the RESX instruction was vectored. */
4437 new_lp_nr
= lookup_stmt_eh_lp (resx
);
4438 new_region
= get_eh_region_from_lp_number (new_lp_nr
);
4440 /* If there's no destination region within the current function,
4441 redirection is trivial via removing the throwing statements from
4442 the EH region, removing the EH edges, and allowing the block
4443 to go unreachable. */
4444 if (new_region
== NULL
)
4446 gcc_assert (e_out
== NULL
);
4447 for (ei
= ei_start (bb
->preds
); (e
= ei_safe_edge (ei
)); )
4448 if (e
->flags
& EDGE_EH
)
4450 gimple stmt
= last_stmt (e
->src
);
4451 remove_stmt_from_eh_lp (stmt
);
4459 /* If the destination region is a MUST_NOT_THROW, allow the runtime
4460 to handle the abort and allow the blocks to go unreachable. */
4461 if (new_region
->type
== ERT_MUST_NOT_THROW
)
4463 for (ei
= ei_start (bb
->preds
); (e
= ei_safe_edge (ei
)); )
4464 if (e
->flags
& EDGE_EH
)
4466 gimple stmt
= last_stmt (e
->src
);
4467 remove_stmt_from_eh_lp (stmt
);
4468 add_stmt_to_eh_lp (stmt
, new_lp_nr
);
4476 /* Try to redirect the EH edges and merge the PHIs into the destination
4477 landing pad block. If the merge succeeds, we'll already have redirected
4478 all the EH edges. The handler itself will go unreachable if there were
4480 if (cleanup_empty_eh_merge_phis (e_out
->dest
, bb
, e_out
, true))
4483 /* Finally, if all input edges are EH edges, then we can (potentially)
4484 reduce the number of transfers from the runtime by moving the landing
4485 pad from the original region to the new region. This is a win when
4486 we remove the last CLEANUP region along a particular exception
4487 propagation path. Since nothing changes except for the region with
4488 which the landing pad is associated, the PHI nodes do not need to be
4490 if (!has_non_eh_pred
)
4492 cleanup_empty_eh_move_lp (bb
, e_out
, lp
, new_region
);
4493 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4494 fprintf (dump_file
, "Empty EH handler %i moved to EH region %i.\n",
4495 lp
->index
, new_region
->index
);
4497 /* ??? The CFG didn't change, but we may have rendered the
4498 old EH region unreachable. Trigger a cleanup there. */
4505 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4506 fprintf (dump_file
, "Empty EH handler %i removed.\n", lp
->index
);
4507 remove_eh_landing_pad (lp
);
4511 /* Do a post-order traversal of the EH region tree. Examine each
4512 post_landing_pad block and see if we can eliminate it as empty. */
4515 cleanup_all_empty_eh (void)
4517 bool changed
= false;
4521 for (i
= 1; vec_safe_iterate (cfun
->eh
->lp_array
, i
, &lp
); ++i
)
4523 changed
|= cleanup_empty_eh (lp
);
4528 /* Perform cleanups and lowering of exception handling
4529 1) cleanups regions with handlers doing nothing are optimized out
4530 2) MUST_NOT_THROW regions that became dead because of 1) are optimized out
4531 3) Info about regions that are containing instructions, and regions
4532 reachable via local EH edges is collected
4533 4) Eh tree is pruned for regions no longer necessary.
4535 TODO: Push MUST_NOT_THROW regions to the root of the EH tree.
4536 Unify those that have the same failure decl and locus.
4540 execute_cleanup_eh_1 (void)
4542 /* Do this first: unsplit_all_eh and cleanup_all_empty_eh can die
4543 looking up unreachable landing pads. */
4544 remove_unreachable_handlers ();
4546 /* Watch out for the region tree vanishing due to all unreachable. */
4547 if (cfun
->eh
->region_tree
)
4549 bool changed
= false;
4552 changed
|= unsplit_all_eh ();
4553 changed
|= cleanup_all_empty_eh ();
4557 free_dominance_info (CDI_DOMINATORS
);
4558 free_dominance_info (CDI_POST_DOMINATORS
);
4560 /* We delayed all basic block deletion, as we may have performed
4561 cleanups on EH edges while non-EH edges were still present. */
4562 delete_unreachable_blocks ();
4564 /* We manipulated the landing pads. Remove any region that no
4565 longer has a landing pad. */
4566 remove_unreachable_handlers_no_lp ();
4568 return TODO_cleanup_cfg
| TODO_update_ssa_only_virtuals
;
4577 const pass_data pass_data_cleanup_eh
=
4579 GIMPLE_PASS
, /* type */
4580 "ehcleanup", /* name */
4581 OPTGROUP_NONE
, /* optinfo_flags */
4582 TV_TREE_EH
, /* tv_id */
4583 PROP_gimple_lcf
, /* properties_required */
4584 0, /* properties_provided */
4585 0, /* properties_destroyed */
4586 0, /* todo_flags_start */
4587 0, /* todo_flags_finish */
4590 class pass_cleanup_eh
: public gimple_opt_pass
4593 pass_cleanup_eh (gcc::context
*ctxt
)
4594 : gimple_opt_pass (pass_data_cleanup_eh
, ctxt
)
4597 /* opt_pass methods: */
4598 opt_pass
* clone () { return new pass_cleanup_eh (m_ctxt
); }
4599 virtual bool gate (function
*fun
)
4601 return fun
->eh
!= NULL
&& fun
->eh
->region_tree
!= NULL
;
4604 virtual unsigned int execute (function
*);
4606 }; // class pass_cleanup_eh
4609 pass_cleanup_eh::execute (function
*fun
)
4611 int ret
= execute_cleanup_eh_1 ();
4613 /* If the function no longer needs an EH personality routine
4614 clear it. This exposes cross-language inlining opportunities
4615 and avoids references to a never defined personality routine. */
4616 if (DECL_FUNCTION_PERSONALITY (current_function_decl
)
4617 && function_needs_eh_personality (fun
) != eh_personality_lang
)
4618 DECL_FUNCTION_PERSONALITY (current_function_decl
) = NULL_TREE
;
4626 make_pass_cleanup_eh (gcc::context
*ctxt
)
4628 return new pass_cleanup_eh (ctxt
);
4631 /* Verify that BB containing STMT as the last statement, has precisely the
4632 edge that make_eh_edges would create. */
4635 verify_eh_edges (gimple stmt
)
4637 basic_block bb
= gimple_bb (stmt
);
4638 eh_landing_pad lp
= NULL
;
4643 lp_nr
= lookup_stmt_eh_lp (stmt
);
4645 lp
= get_eh_landing_pad_from_number (lp_nr
);
4648 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
4650 if (e
->flags
& EDGE_EH
)
4654 error ("BB %i has multiple EH edges", bb
->index
);
4666 error ("BB %i can not throw but has an EH edge", bb
->index
);
4672 if (!stmt_could_throw_p (stmt
))
4674 error ("BB %i last statement has incorrectly set lp", bb
->index
);
4678 if (eh_edge
== NULL
)
4680 error ("BB %i is missing an EH edge", bb
->index
);
4684 if (eh_edge
->dest
!= label_to_block (lp
->post_landing_pad
))
4686 error ("Incorrect EH edge %i->%i", bb
->index
, eh_edge
->dest
->index
);
4693 /* Similarly, but handle GIMPLE_EH_DISPATCH specifically. */
4696 verify_eh_dispatch_edge (geh_dispatch
*stmt
)
4700 basic_block src
, dst
;
4701 bool want_fallthru
= true;
4705 r
= get_eh_region_from_number (gimple_eh_dispatch_region (stmt
));
4706 src
= gimple_bb (stmt
);
4708 FOR_EACH_EDGE (e
, ei
, src
->succs
)
4709 gcc_assert (e
->aux
== NULL
);
4714 for (c
= r
->u
.eh_try
.first_catch
; c
; c
= c
->next_catch
)
4716 dst
= label_to_block (c
->label
);
4717 e
= find_edge (src
, dst
);
4720 error ("BB %i is missing an edge", src
->index
);
4725 /* A catch-all handler doesn't have a fallthru. */
4726 if (c
->type_list
== NULL
)
4728 want_fallthru
= false;
4734 case ERT_ALLOWED_EXCEPTIONS
:
4735 dst
= label_to_block (r
->u
.allowed
.label
);
4736 e
= find_edge (src
, dst
);
4739 error ("BB %i is missing an edge", src
->index
);
4750 FOR_EACH_EDGE (e
, ei
, src
->succs
)
4752 if (e
->flags
& EDGE_FALLTHRU
)
4754 if (fall_edge
!= NULL
)
4756 error ("BB %i too many fallthru edges", src
->index
);
4765 error ("BB %i has incorrect edge", src
->index
);
4769 if ((fall_edge
!= NULL
) ^ want_fallthru
)
4771 error ("BB %i has incorrect fallthru edge", src
->index
);