1 /* Exception handling semantics and decomposition for trees.
2 Copyright (C) 2003-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
23 #include "hash-table.h"
32 #include "basic-block.h"
33 #include "tree-ssa-alias.h"
34 #include "internal-fn.h"
36 #include "gimple-expr.h"
39 #include "gimple-iterator.h"
40 #include "gimple-ssa.h"
43 #include "tree-phinodes.h"
44 #include "ssa-iterators.h"
45 #include "stringpool.h"
46 #include "tree-ssanames.h"
47 #include "tree-into-ssa.h"
49 #include "tree-inline.h"
50 #include "tree-pass.h"
51 #include "langhooks.h"
52 #include "diagnostic-core.h"
55 #include "gimple-low.h"
57 /* In some instances a tree and a gimple need to be stored in a same table,
58 i.e. in hash tables. This is a structure to do this. */
59 typedef union {tree
*tp
; tree t
; gimple g
;} treemple
;
61 /* Misc functions used in this file. */
63 /* Remember and lookup EH landing pad data for arbitrary statements.
64 Really this means any statement that could_throw_p. We could
65 stuff this information into the stmt_ann data structure, but:
67 (1) We absolutely rely on this information being kept until
68 we get to rtl. Once we're done with lowering here, if we lose
69 the information there's no way to recover it!
71 (2) There are many more statements that *cannot* throw as
72 compared to those that can. We should be saving some amount
73 of space by only allocating memory for those that can throw. */
75 /* Add statement T in function IFUN to landing pad NUM. */
78 add_stmt_to_eh_lp_fn (struct function
*ifun
, gimple t
, int num
)
80 gcc_assert (num
!= 0);
82 if (!get_eh_throw_stmt_table (ifun
))
83 set_eh_throw_stmt_table (ifun
, hash_map
<gimple
, int>::create_ggc (31));
85 gcc_assert (!get_eh_throw_stmt_table (ifun
)->put (t
, num
));
88 /* Add statement T in the current function (cfun) to EH landing pad NUM. */
91 add_stmt_to_eh_lp (gimple t
, int num
)
93 add_stmt_to_eh_lp_fn (cfun
, t
, num
);
96 /* Add statement T to the single EH landing pad in REGION. */
99 record_stmt_eh_region (eh_region region
, gimple t
)
103 if (region
->type
== ERT_MUST_NOT_THROW
)
104 add_stmt_to_eh_lp_fn (cfun
, t
, -region
->index
);
107 eh_landing_pad lp
= region
->landing_pads
;
109 lp
= gen_eh_landing_pad (region
);
111 gcc_assert (lp
->next_lp
== NULL
);
112 add_stmt_to_eh_lp_fn (cfun
, t
, lp
->index
);
117 /* Remove statement T in function IFUN from its EH landing pad. */
120 remove_stmt_from_eh_lp_fn (struct function
*ifun
, gimple t
)
122 if (!get_eh_throw_stmt_table (ifun
))
125 if (!get_eh_throw_stmt_table (ifun
)->get (t
))
128 get_eh_throw_stmt_table (ifun
)->remove (t
);
133 /* Remove statement T in the current function (cfun) from its
137 remove_stmt_from_eh_lp (gimple t
)
139 return remove_stmt_from_eh_lp_fn (cfun
, t
);
142 /* Determine if statement T is inside an EH region in function IFUN.
143 Positive numbers indicate a landing pad index; negative numbers
144 indicate a MUST_NOT_THROW region index; zero indicates that the
145 statement is not recorded in the region table. */
148 lookup_stmt_eh_lp_fn (struct function
*ifun
, gimple t
)
150 if (ifun
->eh
->throw_stmt_table
== NULL
)
153 int *lp_nr
= ifun
->eh
->throw_stmt_table
->get (t
);
154 return lp_nr
? *lp_nr
: 0;
157 /* Likewise, but always use the current function. */
160 lookup_stmt_eh_lp (gimple t
)
162 /* We can get called from initialized data when -fnon-call-exceptions
163 is on; prevent crash. */
166 return lookup_stmt_eh_lp_fn (cfun
, t
);
169 /* First pass of EH node decomposition. Build up a tree of GIMPLE_TRY_FINALLY
170 nodes and LABEL_DECL nodes. We will use this during the second phase to
171 determine if a goto leaves the body of a TRY_FINALLY_EXPR node. */
173 struct finally_tree_node
175 /* When storing a GIMPLE_TRY, we have to record a gimple. However
176 when deciding whether a GOTO to a certain LABEL_DECL (which is a
177 tree) leaves the TRY block, its necessary to record a tree in
178 this field. Thus a treemple is used. */
183 /* Hashtable helpers. */
185 struct finally_tree_hasher
: typed_free_remove
<finally_tree_node
>
187 typedef finally_tree_node value_type
;
188 typedef finally_tree_node compare_type
;
189 static inline hashval_t
hash (const value_type
*);
190 static inline bool equal (const value_type
*, const compare_type
*);
194 finally_tree_hasher::hash (const value_type
*v
)
196 return (intptr_t)v
->child
.t
>> 4;
200 finally_tree_hasher::equal (const value_type
*v
, const compare_type
*c
)
202 return v
->child
.t
== c
->child
.t
;
205 /* Note that this table is *not* marked GTY. It is short-lived. */
206 static hash_table
<finally_tree_hasher
> *finally_tree
;
209 record_in_finally_tree (treemple child
, gimple_try parent
)
211 struct finally_tree_node
*n
;
212 finally_tree_node
**slot
;
214 n
= XNEW (struct finally_tree_node
);
218 slot
= finally_tree
->find_slot (n
, INSERT
);
224 collect_finally_tree (gimple stmt
, gimple_try region
);
226 /* Go through the gimple sequence. Works with collect_finally_tree to
227 record all GIMPLE_LABEL and GIMPLE_TRY statements. */
230 collect_finally_tree_1 (gimple_seq seq
, gimple_try region
)
232 gimple_stmt_iterator gsi
;
234 for (gsi
= gsi_start (seq
); !gsi_end_p (gsi
); gsi_next (&gsi
))
235 collect_finally_tree (gsi_stmt (gsi
), region
);
239 collect_finally_tree (gimple stmt
, gimple_try region
)
243 switch (gimple_code (stmt
))
246 temp
.t
= gimple_label_label (stmt
);
247 record_in_finally_tree (temp
, region
);
251 if (gimple_try_kind (stmt
) == GIMPLE_TRY_FINALLY
)
254 record_in_finally_tree (temp
, region
);
255 collect_finally_tree_1 (gimple_try_eval (stmt
),
256 as_a
<gimple_try
> (stmt
));
257 collect_finally_tree_1 (gimple_try_cleanup (stmt
), region
);
259 else if (gimple_try_kind (stmt
) == GIMPLE_TRY_CATCH
)
261 collect_finally_tree_1 (gimple_try_eval (stmt
), region
);
262 collect_finally_tree_1 (gimple_try_cleanup (stmt
), region
);
267 collect_finally_tree_1 (gimple_catch_handler (
268 as_a
<gimple_catch
> (stmt
)),
272 case GIMPLE_EH_FILTER
:
273 collect_finally_tree_1 (gimple_eh_filter_failure (stmt
), region
);
278 gimple_eh_else eh_else_stmt
= as_a
<gimple_eh_else
> (stmt
);
279 collect_finally_tree_1 (gimple_eh_else_n_body (eh_else_stmt
), region
);
280 collect_finally_tree_1 (gimple_eh_else_e_body (eh_else_stmt
), region
);
285 /* A type, a decl, or some kind of statement that we're not
286 interested in. Don't walk them. */
292 /* Use the finally tree to determine if a jump from START to TARGET
293 would leave the try_finally node that START lives in. */
296 outside_finally_tree (treemple start
, gimple target
)
298 struct finally_tree_node n
, *p
;
303 p
= finally_tree
->find (&n
);
308 while (start
.g
!= target
);
313 /* Second pass of EH node decomposition. Actually transform the GIMPLE_TRY
314 nodes into a set of gotos, magic labels, and eh regions.
315 The eh region creation is straight-forward, but frobbing all the gotos
316 and such into shape isn't. */
318 /* The sequence into which we record all EH stuff. This will be
319 placed at the end of the function when we're all done. */
320 static gimple_seq eh_seq
;
322 /* Record whether an EH region contains something that can throw,
323 indexed by EH region number. */
324 static bitmap eh_region_may_contain_throw_map
;
326 /* The GOTO_QUEUE is is an array of GIMPLE_GOTO and GIMPLE_RETURN
327 statements that are seen to escape this GIMPLE_TRY_FINALLY node.
328 The idea is to record a gimple statement for everything except for
329 the conditionals, which get their labels recorded. Since labels are
330 of type 'tree', we need this node to store both gimple and tree
331 objects. REPL_STMT is the sequence used to replace the goto/return
332 statement. CONT_STMT is used to store the statement that allows
333 the return/goto to jump to the original destination. */
335 struct goto_queue_node
339 gimple_seq repl_stmt
;
342 /* This is used when index >= 0 to indicate that stmt is a label (as
343 opposed to a goto stmt). */
347 /* State of the world while lowering. */
351 /* What's "current" while constructing the eh region tree. These
352 correspond to variables of the same name in cfun->eh, which we
353 don't have easy access to. */
354 eh_region cur_region
;
356 /* What's "current" for the purposes of __builtin_eh_pointer. For
357 a CATCH, this is the associated TRY. For an EH_FILTER, this is
358 the associated ALLOWED_EXCEPTIONS, etc. */
359 eh_region ehp_region
;
361 /* Processing of TRY_FINALLY requires a bit more state. This is
362 split out into a separate structure so that we don't have to
363 copy so much when processing other nodes. */
364 struct leh_tf_state
*tf
;
369 /* Pointer to the GIMPLE_TRY_FINALLY node under discussion. The
370 try_finally_expr is the original GIMPLE_TRY_FINALLY. We need to retain
371 this so that outside_finally_tree can reliably reference the tree used
372 in the collect_finally_tree data structures. */
373 gimple_try try_finally_expr
;
376 /* While lowering a top_p usually it is expanded into multiple statements,
377 thus we need the following field to store them. */
378 gimple_seq top_p_seq
;
380 /* The state outside this try_finally node. */
381 struct leh_state
*outer
;
383 /* The exception region created for it. */
386 /* The goto queue. */
387 struct goto_queue_node
*goto_queue
;
388 size_t goto_queue_size
;
389 size_t goto_queue_active
;
391 /* Pointer map to help in searching goto_queue when it is large. */
392 hash_map
<gimple
, goto_queue_node
*> *goto_queue_map
;
394 /* The set of unique labels seen as entries in the goto queue. */
395 vec
<tree
> dest_array
;
397 /* A label to be added at the end of the completed transformed
398 sequence. It will be set if may_fallthru was true *at one time*,
399 though subsequent transformations may have cleared that flag. */
402 /* True if it is possible to fall out the bottom of the try block.
403 Cleared if the fallthru is converted to a goto. */
406 /* True if any entry in goto_queue is a GIMPLE_RETURN. */
409 /* True if the finally block can receive an exception edge.
410 Cleared if the exception case is handled by code duplication. */
414 static gimple_seq
lower_eh_must_not_throw (struct leh_state
*, gimple_try
);
416 /* Search for STMT in the goto queue. Return the replacement,
417 or null if the statement isn't in the queue. */
419 #define LARGE_GOTO_QUEUE 20
421 static void lower_eh_constructs_1 (struct leh_state
*state
, gimple_seq
*seq
);
424 find_goto_replacement (struct leh_tf_state
*tf
, treemple stmt
)
428 if (tf
->goto_queue_active
< LARGE_GOTO_QUEUE
)
430 for (i
= 0; i
< tf
->goto_queue_active
; i
++)
431 if ( tf
->goto_queue
[i
].stmt
.g
== stmt
.g
)
432 return tf
->goto_queue
[i
].repl_stmt
;
436 /* If we have a large number of entries in the goto_queue, create a
437 pointer map and use that for searching. */
439 if (!tf
->goto_queue_map
)
441 tf
->goto_queue_map
= new hash_map
<gimple
, goto_queue_node
*>;
442 for (i
= 0; i
< tf
->goto_queue_active
; i
++)
444 bool existed
= tf
->goto_queue_map
->put (tf
->goto_queue
[i
].stmt
.g
,
446 gcc_assert (!existed
);
450 goto_queue_node
**slot
= tf
->goto_queue_map
->get (stmt
.g
);
452 return ((*slot
)->repl_stmt
);
457 /* A subroutine of replace_goto_queue_1. Handles the sub-clauses of a
458 lowered GIMPLE_COND. If, by chance, the replacement is a simple goto,
459 then we can just splat it in, otherwise we add the new stmts immediately
460 after the GIMPLE_COND and redirect. */
463 replace_goto_queue_cond_clause (tree
*tp
, struct leh_tf_state
*tf
,
464 gimple_stmt_iterator
*gsi
)
469 location_t loc
= gimple_location (gsi_stmt (*gsi
));
472 new_seq
= find_goto_replacement (tf
, temp
);
476 if (gimple_seq_singleton_p (new_seq
)
477 && gimple_code (gimple_seq_first_stmt (new_seq
)) == GIMPLE_GOTO
)
479 *tp
= gimple_goto_dest (gimple_seq_first_stmt (new_seq
));
483 label
= create_artificial_label (loc
);
484 /* Set the new label for the GIMPLE_COND */
487 gsi_insert_after (gsi
, gimple_build_label (label
), GSI_CONTINUE_LINKING
);
488 gsi_insert_seq_after (gsi
, gimple_seq_copy (new_seq
), GSI_CONTINUE_LINKING
);
491 /* The real work of replace_goto_queue. Returns with TSI updated to
492 point to the next statement. */
494 static void replace_goto_queue_stmt_list (gimple_seq
*, struct leh_tf_state
*);
497 replace_goto_queue_1 (gimple stmt
, struct leh_tf_state
*tf
,
498 gimple_stmt_iterator
*gsi
)
504 switch (gimple_code (stmt
))
509 seq
= find_goto_replacement (tf
, temp
);
512 gsi_insert_seq_before (gsi
, gimple_seq_copy (seq
), GSI_SAME_STMT
);
513 gsi_remove (gsi
, false);
519 replace_goto_queue_cond_clause (gimple_op_ptr (stmt
, 2), tf
, gsi
);
520 replace_goto_queue_cond_clause (gimple_op_ptr (stmt
, 3), tf
, gsi
);
524 replace_goto_queue_stmt_list (gimple_try_eval_ptr (stmt
), tf
);
525 replace_goto_queue_stmt_list (gimple_try_cleanup_ptr (stmt
), tf
);
528 replace_goto_queue_stmt_list (gimple_catch_handler_ptr (
529 as_a
<gimple_catch
> (stmt
)),
532 case GIMPLE_EH_FILTER
:
533 replace_goto_queue_stmt_list (gimple_eh_filter_failure_ptr (stmt
), tf
);
537 gimple_eh_else eh_else_stmt
= as_a
<gimple_eh_else
> (stmt
);
538 replace_goto_queue_stmt_list (gimple_eh_else_n_body_ptr (eh_else_stmt
),
540 replace_goto_queue_stmt_list (gimple_eh_else_e_body_ptr (eh_else_stmt
),
546 /* These won't have gotos in them. */
553 /* A subroutine of replace_goto_queue. Handles GIMPLE_SEQ. */
556 replace_goto_queue_stmt_list (gimple_seq
*seq
, struct leh_tf_state
*tf
)
558 gimple_stmt_iterator gsi
= gsi_start (*seq
);
560 while (!gsi_end_p (gsi
))
561 replace_goto_queue_1 (gsi_stmt (gsi
), tf
, &gsi
);
564 /* Replace all goto queue members. */
567 replace_goto_queue (struct leh_tf_state
*tf
)
569 if (tf
->goto_queue_active
== 0)
571 replace_goto_queue_stmt_list (&tf
->top_p_seq
, tf
);
572 replace_goto_queue_stmt_list (&eh_seq
, tf
);
575 /* Add a new record to the goto queue contained in TF. NEW_STMT is the
576 data to be added, IS_LABEL indicates whether NEW_STMT is a label or
580 record_in_goto_queue (struct leh_tf_state
*tf
,
587 struct goto_queue_node
*q
;
589 gcc_assert (!tf
->goto_queue_map
);
591 active
= tf
->goto_queue_active
;
592 size
= tf
->goto_queue_size
;
595 size
= (size
? size
* 2 : 32);
596 tf
->goto_queue_size
= size
;
598 = XRESIZEVEC (struct goto_queue_node
, tf
->goto_queue
, size
);
601 q
= &tf
->goto_queue
[active
];
602 tf
->goto_queue_active
= active
+ 1;
604 memset (q
, 0, sizeof (*q
));
607 q
->location
= location
;
608 q
->is_label
= is_label
;
611 /* Record the LABEL label in the goto queue contained in TF.
615 record_in_goto_queue_label (struct leh_tf_state
*tf
, treemple stmt
, tree label
,
619 treemple temp
, new_stmt
;
624 /* Computed and non-local gotos do not get processed. Given
625 their nature we can neither tell whether we've escaped the
626 finally block nor redirect them if we knew. */
627 if (TREE_CODE (label
) != LABEL_DECL
)
630 /* No need to record gotos that don't leave the try block. */
632 if (!outside_finally_tree (temp
, tf
->try_finally_expr
))
635 if (! tf
->dest_array
.exists ())
637 tf
->dest_array
.create (10);
638 tf
->dest_array
.quick_push (label
);
643 int n
= tf
->dest_array
.length ();
644 for (index
= 0; index
< n
; ++index
)
645 if (tf
->dest_array
[index
] == label
)
648 tf
->dest_array
.safe_push (label
);
651 /* In the case of a GOTO we want to record the destination label,
652 since with a GIMPLE_COND we have an easy access to the then/else
655 record_in_goto_queue (tf
, new_stmt
, index
, true, location
);
658 /* For any GIMPLE_GOTO or GIMPLE_RETURN, decide whether it leaves a try_finally
659 node, and if so record that fact in the goto queue associated with that
663 maybe_record_in_goto_queue (struct leh_state
*state
, gimple stmt
)
665 struct leh_tf_state
*tf
= state
->tf
;
671 switch (gimple_code (stmt
))
674 new_stmt
.tp
= gimple_op_ptr (stmt
, 2);
675 record_in_goto_queue_label (tf
, new_stmt
, gimple_cond_true_label (stmt
),
676 EXPR_LOCATION (*new_stmt
.tp
));
677 new_stmt
.tp
= gimple_op_ptr (stmt
, 3);
678 record_in_goto_queue_label (tf
, new_stmt
, gimple_cond_false_label (stmt
),
679 EXPR_LOCATION (*new_stmt
.tp
));
683 record_in_goto_queue_label (tf
, new_stmt
, gimple_goto_dest (stmt
),
684 gimple_location (stmt
));
688 tf
->may_return
= true;
690 record_in_goto_queue (tf
, new_stmt
, -1, false, gimple_location (stmt
));
699 #ifdef ENABLE_CHECKING
700 /* We do not process GIMPLE_SWITCHes for now. As long as the original source
701 was in fact structured, and we've not yet done jump threading, then none
702 of the labels will leave outer GIMPLE_TRY_FINALLY nodes. Verify this. */
705 verify_norecord_switch_expr (struct leh_state
*state
,
706 gimple_switch switch_expr
)
708 struct leh_tf_state
*tf
= state
->tf
;
714 n
= gimple_switch_num_labels (switch_expr
);
716 for (i
= 0; i
< n
; ++i
)
719 tree lab
= CASE_LABEL (gimple_switch_label (switch_expr
, i
));
721 gcc_assert (!outside_finally_tree (temp
, tf
->try_finally_expr
));
725 #define verify_norecord_switch_expr(state, switch_expr)
728 /* Redirect a RETURN_EXPR pointed to by Q to FINLAB. If MOD is
729 non-null, insert it before the new branch. */
732 do_return_redirection (struct goto_queue_node
*q
, tree finlab
, gimple_seq mod
)
736 /* In the case of a return, the queue node must be a gimple statement. */
737 gcc_assert (!q
->is_label
);
739 /* Note that the return value may have already been computed, e.g.,
752 should return 0, not 1. We don't have to do anything to make
753 this happens because the return value has been placed in the
754 RESULT_DECL already. */
756 q
->cont_stmt
= q
->stmt
.g
;
759 gimple_seq_add_seq (&q
->repl_stmt
, mod
);
761 x
= gimple_build_goto (finlab
);
762 gimple_set_location (x
, q
->location
);
763 gimple_seq_add_stmt (&q
->repl_stmt
, x
);
766 /* Similar, but easier, for GIMPLE_GOTO. */
769 do_goto_redirection (struct goto_queue_node
*q
, tree finlab
, gimple_seq mod
,
770 struct leh_tf_state
*tf
)
774 gcc_assert (q
->is_label
);
776 q
->cont_stmt
= gimple_build_goto (tf
->dest_array
[q
->index
]);
779 gimple_seq_add_seq (&q
->repl_stmt
, mod
);
781 x
= gimple_build_goto (finlab
);
782 gimple_set_location (x
, q
->location
);
783 gimple_seq_add_stmt (&q
->repl_stmt
, x
);
786 /* Emit a standard landing pad sequence into SEQ for REGION. */
789 emit_post_landing_pad (gimple_seq
*seq
, eh_region region
)
791 eh_landing_pad lp
= region
->landing_pads
;
795 lp
= gen_eh_landing_pad (region
);
797 lp
->post_landing_pad
= create_artificial_label (UNKNOWN_LOCATION
);
798 EH_LANDING_PAD_NR (lp
->post_landing_pad
) = lp
->index
;
800 x
= gimple_build_label (lp
->post_landing_pad
);
801 gimple_seq_add_stmt (seq
, x
);
804 /* Emit a RESX statement into SEQ for REGION. */
807 emit_resx (gimple_seq
*seq
, eh_region region
)
809 gimple_resx x
= gimple_build_resx (region
->index
);
810 gimple_seq_add_stmt (seq
, x
);
812 record_stmt_eh_region (region
->outer
, x
);
815 /* Emit an EH_DISPATCH statement into SEQ for REGION. */
818 emit_eh_dispatch (gimple_seq
*seq
, eh_region region
)
820 gimple_eh_dispatch x
= gimple_build_eh_dispatch (region
->index
);
821 gimple_seq_add_stmt (seq
, x
);
824 /* Note that the current EH region may contain a throw, or a
825 call to a function which itself may contain a throw. */
828 note_eh_region_may_contain_throw (eh_region region
)
830 while (bitmap_set_bit (eh_region_may_contain_throw_map
, region
->index
))
832 if (region
->type
== ERT_MUST_NOT_THROW
)
834 region
= region
->outer
;
840 /* Check if REGION has been marked as containing a throw. If REGION is
841 NULL, this predicate is false. */
844 eh_region_may_contain_throw (eh_region r
)
846 return r
&& bitmap_bit_p (eh_region_may_contain_throw_map
, r
->index
);
849 /* We want to transform
850 try { body; } catch { stuff; }
860 TP is a GIMPLE_TRY node. REGION is the region whose post_landing_pad
861 should be placed before the second operand, or NULL. OVER is
862 an existing label that should be put at the exit, or NULL. */
865 frob_into_branch_around (gimple_try tp
, eh_region region
, tree over
)
868 gimple_seq cleanup
, result
;
869 location_t loc
= gimple_location (tp
);
871 cleanup
= gimple_try_cleanup (tp
);
872 result
= gimple_try_eval (tp
);
875 emit_post_landing_pad (&eh_seq
, region
);
877 if (gimple_seq_may_fallthru (cleanup
))
880 over
= create_artificial_label (loc
);
881 x
= gimple_build_goto (over
);
882 gimple_set_location (x
, loc
);
883 gimple_seq_add_stmt (&cleanup
, x
);
885 gimple_seq_add_seq (&eh_seq
, cleanup
);
889 x
= gimple_build_label (over
);
890 gimple_seq_add_stmt (&result
, x
);
895 /* A subroutine of lower_try_finally. Duplicate the tree rooted at T.
896 Make sure to record all new labels found. */
899 lower_try_finally_dup_block (gimple_seq seq
, struct leh_state
*outer_state
,
902 gimple_try region
= NULL
;
904 gimple_stmt_iterator gsi
;
906 new_seq
= copy_gimple_seq_and_replace_locals (seq
);
908 for (gsi
= gsi_start (new_seq
); !gsi_end_p (gsi
); gsi_next (&gsi
))
910 gimple stmt
= gsi_stmt (gsi
);
911 if (LOCATION_LOCUS (gimple_location (stmt
)) == UNKNOWN_LOCATION
)
913 tree block
= gimple_block (stmt
);
914 gimple_set_location (stmt
, loc
);
915 gimple_set_block (stmt
, block
);
920 region
= outer_state
->tf
->try_finally_expr
;
921 collect_finally_tree_1 (new_seq
, region
);
926 /* A subroutine of lower_try_finally. Create a fallthru label for
927 the given try_finally state. The only tricky bit here is that
928 we have to make sure to record the label in our outer context. */
931 lower_try_finally_fallthru_label (struct leh_tf_state
*tf
)
933 tree label
= tf
->fallthru_label
;
938 label
= create_artificial_label (gimple_location (tf
->try_finally_expr
));
939 tf
->fallthru_label
= label
;
943 record_in_finally_tree (temp
, tf
->outer
->tf
->try_finally_expr
);
949 /* A subroutine of lower_try_finally. If FINALLY consits of a
950 GIMPLE_EH_ELSE node, return it. */
952 static inline gimple_eh_else
953 get_eh_else (gimple_seq finally
)
955 gimple x
= gimple_seq_first_stmt (finally
);
956 if (gimple_code (x
) == GIMPLE_EH_ELSE
)
958 gcc_assert (gimple_seq_singleton_p (finally
));
959 return as_a
<gimple_eh_else
> (x
);
964 /* A subroutine of lower_try_finally. If the eh_protect_cleanup_actions
965 langhook returns non-null, then the language requires that the exception
966 path out of a try_finally be treated specially. To wit: the code within
967 the finally block may not itself throw an exception. We have two choices
968 here. First we can duplicate the finally block and wrap it in a
969 must_not_throw region. Second, we can generate code like
974 if (fintmp == eh_edge)
975 protect_cleanup_actions;
978 where "fintmp" is the temporary used in the switch statement generation
979 alternative considered below. For the nonce, we always choose the first
982 THIS_STATE may be null if this is a try-cleanup, not a try-finally. */
985 honor_protect_cleanup_actions (struct leh_state
*outer_state
,
986 struct leh_state
*this_state
,
987 struct leh_tf_state
*tf
)
989 tree protect_cleanup_actions
;
990 gimple_stmt_iterator gsi
;
991 bool finally_may_fallthru
;
994 gimple_eh_must_not_throw eh_mnt
;
996 gimple_eh_else eh_else
;
998 /* First check for nothing to do. */
999 if (lang_hooks
.eh_protect_cleanup_actions
== NULL
)
1001 protect_cleanup_actions
= lang_hooks
.eh_protect_cleanup_actions ();
1002 if (protect_cleanup_actions
== NULL
)
1005 finally
= gimple_try_cleanup (tf
->top_p
);
1006 eh_else
= get_eh_else (finally
);
1008 /* Duplicate the FINALLY block. Only need to do this for try-finally,
1009 and not for cleanups. If we've got an EH_ELSE, extract it now. */
1012 finally
= gimple_eh_else_e_body (eh_else
);
1013 gimple_try_set_cleanup (tf
->top_p
, gimple_eh_else_n_body (eh_else
));
1015 else if (this_state
)
1016 finally
= lower_try_finally_dup_block (finally
, outer_state
,
1017 gimple_location (tf
->try_finally_expr
));
1018 finally_may_fallthru
= gimple_seq_may_fallthru (finally
);
1020 /* If this cleanup consists of a TRY_CATCH_EXPR with TRY_CATCH_IS_CLEANUP
1021 set, the handler of the TRY_CATCH_EXPR is another cleanup which ought
1022 to be in an enclosing scope, but needs to be implemented at this level
1023 to avoid a nesting violation (see wrap_temporary_cleanups in
1024 cp/decl.c). Since it's logically at an outer level, we should call
1025 terminate before we get to it, so strip it away before adding the
1026 MUST_NOT_THROW filter. */
1027 gsi
= gsi_start (finally
);
1029 if (gimple_code (x
) == GIMPLE_TRY
1030 && gimple_try_kind (x
) == GIMPLE_TRY_CATCH
1031 && gimple_try_catch_is_cleanup (x
))
1033 gsi_insert_seq_before (&gsi
, gimple_try_eval (x
), GSI_SAME_STMT
);
1034 gsi_remove (&gsi
, false);
1037 /* Wrap the block with protect_cleanup_actions as the action. */
1038 eh_mnt
= gimple_build_eh_must_not_throw (protect_cleanup_actions
);
1039 try_stmt
= gimple_build_try (finally
, gimple_seq_alloc_with_stmt (eh_mnt
),
1041 finally
= lower_eh_must_not_throw (outer_state
, try_stmt
);
1043 /* Drop all of this into the exception sequence. */
1044 emit_post_landing_pad (&eh_seq
, tf
->region
);
1045 gimple_seq_add_seq (&eh_seq
, finally
);
1046 if (finally_may_fallthru
)
1047 emit_resx (&eh_seq
, tf
->region
);
1049 /* Having now been handled, EH isn't to be considered with
1050 the rest of the outgoing edges. */
1051 tf
->may_throw
= false;
1054 /* A subroutine of lower_try_finally. We have determined that there is
1055 no fallthru edge out of the finally block. This means that there is
1056 no outgoing edge corresponding to any incoming edge. Restructure the
1057 try_finally node for this special case. */
1060 lower_try_finally_nofallthru (struct leh_state
*state
,
1061 struct leh_tf_state
*tf
)
1065 gimple_eh_else eh_else
;
1067 struct goto_queue_node
*q
, *qe
;
1069 lab
= create_artificial_label (gimple_location (tf
->try_finally_expr
));
1071 /* We expect that tf->top_p is a GIMPLE_TRY. */
1072 finally
= gimple_try_cleanup (tf
->top_p
);
1073 tf
->top_p_seq
= gimple_try_eval (tf
->top_p
);
1075 x
= gimple_build_label (lab
);
1076 gimple_seq_add_stmt (&tf
->top_p_seq
, x
);
1079 qe
= q
+ tf
->goto_queue_active
;
1082 do_return_redirection (q
, lab
, NULL
);
1084 do_goto_redirection (q
, lab
, NULL
, tf
);
1086 replace_goto_queue (tf
);
1088 /* Emit the finally block into the stream. Lower EH_ELSE at this time. */
1089 eh_else
= get_eh_else (finally
);
1092 finally
= gimple_eh_else_n_body (eh_else
);
1093 lower_eh_constructs_1 (state
, &finally
);
1094 gimple_seq_add_seq (&tf
->top_p_seq
, finally
);
1098 finally
= gimple_eh_else_e_body (eh_else
);
1099 lower_eh_constructs_1 (state
, &finally
);
1101 emit_post_landing_pad (&eh_seq
, tf
->region
);
1102 gimple_seq_add_seq (&eh_seq
, finally
);
1107 lower_eh_constructs_1 (state
, &finally
);
1108 gimple_seq_add_seq (&tf
->top_p_seq
, finally
);
1112 emit_post_landing_pad (&eh_seq
, tf
->region
);
1114 x
= gimple_build_goto (lab
);
1115 gimple_set_location (x
, gimple_location (tf
->try_finally_expr
));
1116 gimple_seq_add_stmt (&eh_seq
, x
);
1121 /* A subroutine of lower_try_finally. We have determined that there is
1122 exactly one destination of the finally block. Restructure the
1123 try_finally node for this special case. */
1126 lower_try_finally_onedest (struct leh_state
*state
, struct leh_tf_state
*tf
)
1128 struct goto_queue_node
*q
, *qe
;
1129 gimple_eh_else eh_else
;
1130 gimple_label label_stmt
;
1133 gimple_stmt_iterator gsi
;
1135 location_t loc
= gimple_location (tf
->try_finally_expr
);
1137 finally
= gimple_try_cleanup (tf
->top_p
);
1138 tf
->top_p_seq
= gimple_try_eval (tf
->top_p
);
1140 /* Since there's only one destination, and the destination edge can only
1141 either be EH or non-EH, that implies that all of our incoming edges
1142 are of the same type. Therefore we can lower EH_ELSE immediately. */
1143 eh_else
= get_eh_else (finally
);
1147 finally
= gimple_eh_else_e_body (eh_else
);
1149 finally
= gimple_eh_else_n_body (eh_else
);
1152 lower_eh_constructs_1 (state
, &finally
);
1154 for (gsi
= gsi_start (finally
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1156 gimple stmt
= gsi_stmt (gsi
);
1157 if (LOCATION_LOCUS (gimple_location (stmt
)) == UNKNOWN_LOCATION
)
1159 tree block
= gimple_block (stmt
);
1160 gimple_set_location (stmt
, gimple_location (tf
->try_finally_expr
));
1161 gimple_set_block (stmt
, block
);
1167 /* Only reachable via the exception edge. Add the given label to
1168 the head of the FINALLY block. Append a RESX at the end. */
1169 emit_post_landing_pad (&eh_seq
, tf
->region
);
1170 gimple_seq_add_seq (&eh_seq
, finally
);
1171 emit_resx (&eh_seq
, tf
->region
);
1175 if (tf
->may_fallthru
)
1177 /* Only reachable via the fallthru edge. Do nothing but let
1178 the two blocks run together; we'll fall out the bottom. */
1179 gimple_seq_add_seq (&tf
->top_p_seq
, finally
);
1183 finally_label
= create_artificial_label (loc
);
1184 label_stmt
= gimple_build_label (finally_label
);
1185 gimple_seq_add_stmt (&tf
->top_p_seq
, label_stmt
);
1187 gimple_seq_add_seq (&tf
->top_p_seq
, finally
);
1190 qe
= q
+ tf
->goto_queue_active
;
1194 /* Reachable by return expressions only. Redirect them. */
1196 do_return_redirection (q
, finally_label
, NULL
);
1197 replace_goto_queue (tf
);
1201 /* Reachable by goto expressions only. Redirect them. */
1203 do_goto_redirection (q
, finally_label
, NULL
, tf
);
1204 replace_goto_queue (tf
);
1206 if (tf
->dest_array
[0] == tf
->fallthru_label
)
1208 /* Reachable by goto to fallthru label only. Redirect it
1209 to the new label (already created, sadly), and do not
1210 emit the final branch out, or the fallthru label. */
1211 tf
->fallthru_label
= NULL
;
1216 /* Place the original return/goto to the original destination
1217 immediately after the finally block. */
1218 x
= tf
->goto_queue
[0].cont_stmt
;
1219 gimple_seq_add_stmt (&tf
->top_p_seq
, x
);
1220 maybe_record_in_goto_queue (state
, x
);
1223 /* A subroutine of lower_try_finally. There are multiple edges incoming
1224 and outgoing from the finally block. Implement this by duplicating the
1225 finally block for every destination. */
1228 lower_try_finally_copy (struct leh_state
*state
, struct leh_tf_state
*tf
)
1231 gimple_seq new_stmt
;
1234 gimple_eh_else eh_else
;
1236 location_t tf_loc
= gimple_location (tf
->try_finally_expr
);
1238 finally
= gimple_try_cleanup (tf
->top_p
);
1240 /* Notice EH_ELSE, and simplify some of the remaining code
1241 by considering FINALLY to be the normal return path only. */
1242 eh_else
= get_eh_else (finally
);
1244 finally
= gimple_eh_else_n_body (eh_else
);
1246 tf
->top_p_seq
= gimple_try_eval (tf
->top_p
);
1249 if (tf
->may_fallthru
)
1251 seq
= lower_try_finally_dup_block (finally
, state
, tf_loc
);
1252 lower_eh_constructs_1 (state
, &seq
);
1253 gimple_seq_add_seq (&new_stmt
, seq
);
1255 tmp
= lower_try_finally_fallthru_label (tf
);
1256 x
= gimple_build_goto (tmp
);
1257 gimple_set_location (x
, tf_loc
);
1258 gimple_seq_add_stmt (&new_stmt
, x
);
1263 /* We don't need to copy the EH path of EH_ELSE,
1264 since it is only emitted once. */
1266 seq
= gimple_eh_else_e_body (eh_else
);
1268 seq
= lower_try_finally_dup_block (finally
, state
, tf_loc
);
1269 lower_eh_constructs_1 (state
, &seq
);
1271 emit_post_landing_pad (&eh_seq
, tf
->region
);
1272 gimple_seq_add_seq (&eh_seq
, seq
);
1273 emit_resx (&eh_seq
, tf
->region
);
1278 struct goto_queue_node
*q
, *qe
;
1279 int return_index
, index
;
1282 struct goto_queue_node
*q
;
1286 return_index
= tf
->dest_array
.length ();
1287 labels
= XCNEWVEC (struct labels_s
, return_index
+ 1);
1290 qe
= q
+ tf
->goto_queue_active
;
1293 index
= q
->index
< 0 ? return_index
: q
->index
;
1295 if (!labels
[index
].q
)
1296 labels
[index
].q
= q
;
1299 for (index
= 0; index
< return_index
+ 1; index
++)
1303 q
= labels
[index
].q
;
1307 lab
= labels
[index
].label
1308 = create_artificial_label (tf_loc
);
1310 if (index
== return_index
)
1311 do_return_redirection (q
, lab
, NULL
);
1313 do_goto_redirection (q
, lab
, NULL
, tf
);
1315 x
= gimple_build_label (lab
);
1316 gimple_seq_add_stmt (&new_stmt
, x
);
1318 seq
= lower_try_finally_dup_block (finally
, state
, q
->location
);
1319 lower_eh_constructs_1 (state
, &seq
);
1320 gimple_seq_add_seq (&new_stmt
, seq
);
1322 gimple_seq_add_stmt (&new_stmt
, q
->cont_stmt
);
1323 maybe_record_in_goto_queue (state
, q
->cont_stmt
);
1326 for (q
= tf
->goto_queue
; q
< qe
; q
++)
1330 index
= q
->index
< 0 ? return_index
: q
->index
;
1332 if (labels
[index
].q
== q
)
1335 lab
= labels
[index
].label
;
1337 if (index
== return_index
)
1338 do_return_redirection (q
, lab
, NULL
);
1340 do_goto_redirection (q
, lab
, NULL
, tf
);
1343 replace_goto_queue (tf
);
1347 /* Need to link new stmts after running replace_goto_queue due
1348 to not wanting to process the same goto stmts twice. */
1349 gimple_seq_add_seq (&tf
->top_p_seq
, new_stmt
);
1352 /* A subroutine of lower_try_finally. There are multiple edges incoming
1353 and outgoing from the finally block. Implement this by instrumenting
1354 each incoming edge and creating a switch statement at the end of the
1355 finally block that branches to the appropriate destination. */
1358 lower_try_finally_switch (struct leh_state
*state
, struct leh_tf_state
*tf
)
1360 struct goto_queue_node
*q
, *qe
;
1361 tree finally_tmp
, finally_label
;
1362 int return_index
, eh_index
, fallthru_index
;
1363 int nlabels
, ndests
, j
, last_case_index
;
1365 vec
<tree
> case_label_vec
;
1366 gimple_seq switch_body
= NULL
;
1368 gimple_eh_else eh_else
;
1372 hash_map
<tree
, gimple
> *cont_map
= NULL
;
1373 /* The location of the TRY_FINALLY stmt. */
1374 location_t tf_loc
= gimple_location (tf
->try_finally_expr
);
1375 /* The location of the finally block. */
1376 location_t finally_loc
;
1378 finally
= gimple_try_cleanup (tf
->top_p
);
1379 eh_else
= get_eh_else (finally
);
1381 /* Mash the TRY block to the head of the chain. */
1382 tf
->top_p_seq
= gimple_try_eval (tf
->top_p
);
1384 /* The location of the finally is either the last stmt in the finally
1385 block or the location of the TRY_FINALLY itself. */
1386 x
= gimple_seq_last_stmt (finally
);
1387 finally_loc
= x
? gimple_location (x
) : tf_loc
;
1389 /* Prepare for switch statement generation. */
1390 nlabels
= tf
->dest_array
.length ();
1391 return_index
= nlabels
;
1392 eh_index
= return_index
+ tf
->may_return
;
1393 fallthru_index
= eh_index
+ (tf
->may_throw
&& !eh_else
);
1394 ndests
= fallthru_index
+ tf
->may_fallthru
;
1396 finally_tmp
= create_tmp_var (integer_type_node
, "finally_tmp");
1397 finally_label
= create_artificial_label (finally_loc
);
1399 /* We use vec::quick_push on case_label_vec throughout this function,
1400 since we know the size in advance and allocate precisely as muce
1402 case_label_vec
.create (ndests
);
1404 last_case_index
= 0;
1406 /* Begin inserting code for getting to the finally block. Things
1407 are done in this order to correspond to the sequence the code is
1410 if (tf
->may_fallthru
)
1412 x
= gimple_build_assign (finally_tmp
,
1413 build_int_cst (integer_type_node
,
1415 gimple_seq_add_stmt (&tf
->top_p_seq
, x
);
1417 tmp
= build_int_cst (integer_type_node
, fallthru_index
);
1418 last_case
= build_case_label (tmp
, NULL
,
1419 create_artificial_label (tf_loc
));
1420 case_label_vec
.quick_push (last_case
);
1423 x
= gimple_build_label (CASE_LABEL (last_case
));
1424 gimple_seq_add_stmt (&switch_body
, x
);
1426 tmp
= lower_try_finally_fallthru_label (tf
);
1427 x
= gimple_build_goto (tmp
);
1428 gimple_set_location (x
, tf_loc
);
1429 gimple_seq_add_stmt (&switch_body
, x
);
1432 /* For EH_ELSE, emit the exception path (plus resx) now, then
1433 subsequently we only need consider the normal path. */
1438 finally
= gimple_eh_else_e_body (eh_else
);
1439 lower_eh_constructs_1 (state
, &finally
);
1441 emit_post_landing_pad (&eh_seq
, tf
->region
);
1442 gimple_seq_add_seq (&eh_seq
, finally
);
1443 emit_resx (&eh_seq
, tf
->region
);
1446 finally
= gimple_eh_else_n_body (eh_else
);
1448 else if (tf
->may_throw
)
1450 emit_post_landing_pad (&eh_seq
, tf
->region
);
1452 x
= gimple_build_assign (finally_tmp
,
1453 build_int_cst (integer_type_node
, eh_index
));
1454 gimple_seq_add_stmt (&eh_seq
, x
);
1456 x
= gimple_build_goto (finally_label
);
1457 gimple_set_location (x
, tf_loc
);
1458 gimple_seq_add_stmt (&eh_seq
, x
);
1460 tmp
= build_int_cst (integer_type_node
, eh_index
);
1461 last_case
= build_case_label (tmp
, NULL
,
1462 create_artificial_label (tf_loc
));
1463 case_label_vec
.quick_push (last_case
);
1466 x
= gimple_build_label (CASE_LABEL (last_case
));
1467 gimple_seq_add_stmt (&eh_seq
, x
);
1468 emit_resx (&eh_seq
, tf
->region
);
1471 x
= gimple_build_label (finally_label
);
1472 gimple_seq_add_stmt (&tf
->top_p_seq
, x
);
1474 lower_eh_constructs_1 (state
, &finally
);
1475 gimple_seq_add_seq (&tf
->top_p_seq
, finally
);
1477 /* Redirect each incoming goto edge. */
1479 qe
= q
+ tf
->goto_queue_active
;
1480 j
= last_case_index
+ tf
->may_return
;
1481 /* Prepare the assignments to finally_tmp that are executed upon the
1482 entrance through a particular edge. */
1485 gimple_seq mod
= NULL
;
1487 unsigned int case_index
;
1491 x
= gimple_build_assign (finally_tmp
,
1492 build_int_cst (integer_type_node
,
1494 gimple_seq_add_stmt (&mod
, x
);
1495 do_return_redirection (q
, finally_label
, mod
);
1496 switch_id
= return_index
;
1500 x
= gimple_build_assign (finally_tmp
,
1501 build_int_cst (integer_type_node
, q
->index
));
1502 gimple_seq_add_stmt (&mod
, x
);
1503 do_goto_redirection (q
, finally_label
, mod
, tf
);
1504 switch_id
= q
->index
;
1507 case_index
= j
+ q
->index
;
1508 if (case_label_vec
.length () <= case_index
|| !case_label_vec
[case_index
])
1511 tmp
= build_int_cst (integer_type_node
, switch_id
);
1512 case_lab
= build_case_label (tmp
, NULL
,
1513 create_artificial_label (tf_loc
));
1514 /* We store the cont_stmt in the pointer map, so that we can recover
1515 it in the loop below. */
1517 cont_map
= new hash_map
<tree
, gimple
>;
1518 cont_map
->put (case_lab
, q
->cont_stmt
);
1519 case_label_vec
.quick_push (case_lab
);
1522 for (j
= last_case_index
; j
< last_case_index
+ nlabels
; j
++)
1526 last_case
= case_label_vec
[j
];
1528 gcc_assert (last_case
);
1529 gcc_assert (cont_map
);
1531 cont_stmt
= *cont_map
->get (last_case
);
1533 x
= gimple_build_label (CASE_LABEL (last_case
));
1534 gimple_seq_add_stmt (&switch_body
, x
);
1535 gimple_seq_add_stmt (&switch_body
, cont_stmt
);
1536 maybe_record_in_goto_queue (state
, cont_stmt
);
1541 replace_goto_queue (tf
);
1543 /* Make sure that the last case is the default label, as one is required.
1544 Then sort the labels, which is also required in GIMPLE. */
1545 CASE_LOW (last_case
) = NULL
;
1546 tree tem
= case_label_vec
.pop ();
1547 gcc_assert (tem
== last_case
);
1548 sort_case_labels (case_label_vec
);
1550 /* Build the switch statement, setting last_case to be the default
1552 switch_stmt
= gimple_build_switch (finally_tmp
, last_case
,
1554 gimple_set_location (switch_stmt
, finally_loc
);
1556 /* Need to link SWITCH_STMT after running replace_goto_queue
1557 due to not wanting to process the same goto stmts twice. */
1558 gimple_seq_add_stmt (&tf
->top_p_seq
, switch_stmt
);
1559 gimple_seq_add_seq (&tf
->top_p_seq
, switch_body
);
1562 /* Decide whether or not we are going to duplicate the finally block.
1563 There are several considerations.
1565 First, if this is Java, then the finally block contains code
1566 written by the user. It has line numbers associated with it,
1567 so duplicating the block means it's difficult to set a breakpoint.
1568 Since controlling code generation via -g is verboten, we simply
1569 never duplicate code without optimization.
1571 Second, we'd like to prevent egregious code growth. One way to
1572 do this is to estimate the size of the finally block, multiply
1573 that by the number of copies we'd need to make, and compare against
1574 the estimate of the size of the switch machinery we'd have to add. */
1577 decide_copy_try_finally (int ndests
, bool may_throw
, gimple_seq finally
)
1579 int f_estimate
, sw_estimate
;
1580 gimple_eh_else eh_else
;
1582 /* If there's an EH_ELSE involved, the exception path is separate
1583 and really doesn't come into play for this computation. */
1584 eh_else
= get_eh_else (finally
);
1587 ndests
-= may_throw
;
1588 finally
= gimple_eh_else_n_body (eh_else
);
1593 gimple_stmt_iterator gsi
;
1598 for (gsi
= gsi_start (finally
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1600 gimple stmt
= gsi_stmt (gsi
);
1601 if (!is_gimple_debug (stmt
) && !gimple_clobber_p (stmt
))
1607 /* Finally estimate N times, plus N gotos. */
1608 f_estimate
= count_insns_seq (finally
, &eni_size_weights
);
1609 f_estimate
= (f_estimate
+ 1) * ndests
;
1611 /* Switch statement (cost 10), N variable assignments, N gotos. */
1612 sw_estimate
= 10 + 2 * ndests
;
1614 /* Optimize for size clearly wants our best guess. */
1615 if (optimize_function_for_size_p (cfun
))
1616 return f_estimate
< sw_estimate
;
1618 /* ??? These numbers are completely made up so far. */
1620 return f_estimate
< 100 || f_estimate
< sw_estimate
* 2;
1622 return f_estimate
< 40 || f_estimate
* 2 < sw_estimate
* 3;
1625 /* REG is the enclosing region for a possible cleanup region, or the region
1626 itself. Returns TRUE if such a region would be unreachable.
1628 Cleanup regions within a must-not-throw region aren't actually reachable
1629 even if there are throwing stmts within them, because the personality
1630 routine will call terminate before unwinding. */
1633 cleanup_is_dead_in (eh_region reg
)
1635 while (reg
&& reg
->type
== ERT_CLEANUP
)
1637 return (reg
&& reg
->type
== ERT_MUST_NOT_THROW
);
1640 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_FINALLY nodes
1641 to a sequence of labels and blocks, plus the exception region trees
1642 that record all the magic. This is complicated by the need to
1643 arrange for the FINALLY block to be executed on all exits. */
1646 lower_try_finally (struct leh_state
*state
, gimple_try tp
)
1648 struct leh_tf_state this_tf
;
1649 struct leh_state this_state
;
1651 gimple_seq old_eh_seq
;
1653 /* Process the try block. */
1655 memset (&this_tf
, 0, sizeof (this_tf
));
1656 this_tf
.try_finally_expr
= tp
;
1658 this_tf
.outer
= state
;
1659 if (using_eh_for_cleanups_p () && !cleanup_is_dead_in (state
->cur_region
))
1661 this_tf
.region
= gen_eh_region_cleanup (state
->cur_region
);
1662 this_state
.cur_region
= this_tf
.region
;
1666 this_tf
.region
= NULL
;
1667 this_state
.cur_region
= state
->cur_region
;
1670 this_state
.ehp_region
= state
->ehp_region
;
1671 this_state
.tf
= &this_tf
;
1673 old_eh_seq
= eh_seq
;
1676 lower_eh_constructs_1 (&this_state
, gimple_try_eval_ptr (tp
));
1678 /* Determine if the try block is escaped through the bottom. */
1679 this_tf
.may_fallthru
= gimple_seq_may_fallthru (gimple_try_eval (tp
));
1681 /* Determine if any exceptions are possible within the try block. */
1683 this_tf
.may_throw
= eh_region_may_contain_throw (this_tf
.region
);
1684 if (this_tf
.may_throw
)
1685 honor_protect_cleanup_actions (state
, &this_state
, &this_tf
);
1687 /* Determine how many edges (still) reach the finally block. Or rather,
1688 how many destinations are reached by the finally block. Use this to
1689 determine how we process the finally block itself. */
1691 ndests
= this_tf
.dest_array
.length ();
1692 ndests
+= this_tf
.may_fallthru
;
1693 ndests
+= this_tf
.may_return
;
1694 ndests
+= this_tf
.may_throw
;
1696 /* If the FINALLY block is not reachable, dike it out. */
1699 gimple_seq_add_seq (&this_tf
.top_p_seq
, gimple_try_eval (tp
));
1700 gimple_try_set_cleanup (tp
, NULL
);
1702 /* If the finally block doesn't fall through, then any destination
1703 we might try to impose there isn't reached either. There may be
1704 some minor amount of cleanup and redirection still needed. */
1705 else if (!gimple_seq_may_fallthru (gimple_try_cleanup (tp
)))
1706 lower_try_finally_nofallthru (state
, &this_tf
);
1708 /* We can easily special-case redirection to a single destination. */
1709 else if (ndests
== 1)
1710 lower_try_finally_onedest (state
, &this_tf
);
1711 else if (decide_copy_try_finally (ndests
, this_tf
.may_throw
,
1712 gimple_try_cleanup (tp
)))
1713 lower_try_finally_copy (state
, &this_tf
);
1715 lower_try_finally_switch (state
, &this_tf
);
1717 /* If someone requested we add a label at the end of the transformed
1719 if (this_tf
.fallthru_label
)
1721 /* This must be reached only if ndests == 0. */
1722 gimple x
= gimple_build_label (this_tf
.fallthru_label
);
1723 gimple_seq_add_stmt (&this_tf
.top_p_seq
, x
);
1726 this_tf
.dest_array
.release ();
1727 free (this_tf
.goto_queue
);
1728 if (this_tf
.goto_queue_map
)
1729 delete this_tf
.goto_queue_map
;
1731 /* If there was an old (aka outer) eh_seq, append the current eh_seq.
1732 If there was no old eh_seq, then the append is trivially already done. */
1736 eh_seq
= old_eh_seq
;
1739 gimple_seq new_eh_seq
= eh_seq
;
1740 eh_seq
= old_eh_seq
;
1741 gimple_seq_add_seq (&eh_seq
, new_eh_seq
);
1745 return this_tf
.top_p_seq
;
1748 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_CATCH with a
1749 list of GIMPLE_CATCH to a sequence of labels and blocks, plus the
1750 exception region trees that records all the magic. */
1753 lower_catch (struct leh_state
*state
, gimple_try tp
)
1755 eh_region try_region
= NULL
;
1756 struct leh_state this_state
= *state
;
1757 gimple_stmt_iterator gsi
;
1759 gimple_seq new_seq
, cleanup
;
1761 location_t try_catch_loc
= gimple_location (tp
);
1763 if (flag_exceptions
)
1765 try_region
= gen_eh_region_try (state
->cur_region
);
1766 this_state
.cur_region
= try_region
;
1769 lower_eh_constructs_1 (&this_state
, gimple_try_eval_ptr (tp
));
1771 if (!eh_region_may_contain_throw (try_region
))
1772 return gimple_try_eval (tp
);
1775 emit_eh_dispatch (&new_seq
, try_region
);
1776 emit_resx (&new_seq
, try_region
);
1778 this_state
.cur_region
= state
->cur_region
;
1779 this_state
.ehp_region
= try_region
;
1782 cleanup
= gimple_try_cleanup (tp
);
1783 for (gsi
= gsi_start (cleanup
);
1788 gimple_catch gcatch
;
1791 gcatch
= as_a
<gimple_catch
> (gsi_stmt (gsi
));
1792 c
= gen_eh_region_catch (try_region
, gimple_catch_types (gcatch
));
1794 handler
= gimple_catch_handler (gcatch
);
1795 lower_eh_constructs_1 (&this_state
, &handler
);
1797 c
->label
= create_artificial_label (UNKNOWN_LOCATION
);
1798 x
= gimple_build_label (c
->label
);
1799 gimple_seq_add_stmt (&new_seq
, x
);
1801 gimple_seq_add_seq (&new_seq
, handler
);
1803 if (gimple_seq_may_fallthru (new_seq
))
1806 out_label
= create_artificial_label (try_catch_loc
);
1808 x
= gimple_build_goto (out_label
);
1809 gimple_seq_add_stmt (&new_seq
, x
);
1815 gimple_try_set_cleanup (tp
, new_seq
);
1817 return frob_into_branch_around (tp
, try_region
, out_label
);
1820 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with a
1821 GIMPLE_EH_FILTER to a sequence of labels and blocks, plus the exception
1822 region trees that record all the magic. */
1825 lower_eh_filter (struct leh_state
*state
, gimple_try tp
)
1827 struct leh_state this_state
= *state
;
1828 eh_region this_region
= NULL
;
1832 inner
= gimple_seq_first_stmt (gimple_try_cleanup (tp
));
1834 if (flag_exceptions
)
1836 this_region
= gen_eh_region_allowed (state
->cur_region
,
1837 gimple_eh_filter_types (inner
));
1838 this_state
.cur_region
= this_region
;
1841 lower_eh_constructs_1 (&this_state
, gimple_try_eval_ptr (tp
));
1843 if (!eh_region_may_contain_throw (this_region
))
1844 return gimple_try_eval (tp
);
1847 this_state
.cur_region
= state
->cur_region
;
1848 this_state
.ehp_region
= this_region
;
1850 emit_eh_dispatch (&new_seq
, this_region
);
1851 emit_resx (&new_seq
, this_region
);
1853 this_region
->u
.allowed
.label
= create_artificial_label (UNKNOWN_LOCATION
);
1854 x
= gimple_build_label (this_region
->u
.allowed
.label
);
1855 gimple_seq_add_stmt (&new_seq
, x
);
1857 lower_eh_constructs_1 (&this_state
, gimple_eh_filter_failure_ptr (inner
));
1858 gimple_seq_add_seq (&new_seq
, gimple_eh_filter_failure (inner
));
1860 gimple_try_set_cleanup (tp
, new_seq
);
1862 return frob_into_branch_around (tp
, this_region
, NULL
);
1865 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with
1866 an GIMPLE_EH_MUST_NOT_THROW to a sequence of labels and blocks,
1867 plus the exception region trees that record all the magic. */
1870 lower_eh_must_not_throw (struct leh_state
*state
, gimple_try tp
)
1872 struct leh_state this_state
= *state
;
1874 if (flag_exceptions
)
1876 gimple inner
= gimple_seq_first_stmt (gimple_try_cleanup (tp
));
1877 eh_region this_region
;
1879 this_region
= gen_eh_region_must_not_throw (state
->cur_region
);
1880 this_region
->u
.must_not_throw
.failure_decl
1881 = gimple_eh_must_not_throw_fndecl (
1882 as_a
<gimple_eh_must_not_throw
> (inner
));
1883 this_region
->u
.must_not_throw
.failure_loc
1884 = LOCATION_LOCUS (gimple_location (tp
));
1886 /* In order to get mangling applied to this decl, we must mark it
1887 used now. Otherwise, pass_ipa_free_lang_data won't think it
1889 TREE_USED (this_region
->u
.must_not_throw
.failure_decl
) = 1;
1891 this_state
.cur_region
= this_region
;
1894 lower_eh_constructs_1 (&this_state
, gimple_try_eval_ptr (tp
));
1896 return gimple_try_eval (tp
);
1899 /* Implement a cleanup expression. This is similar to try-finally,
1900 except that we only execute the cleanup block for exception edges. */
1903 lower_cleanup (struct leh_state
*state
, gimple_try tp
)
1905 struct leh_state this_state
= *state
;
1906 eh_region this_region
= NULL
;
1907 struct leh_tf_state fake_tf
;
1909 bool cleanup_dead
= cleanup_is_dead_in (state
->cur_region
);
1911 if (flag_exceptions
&& !cleanup_dead
)
1913 this_region
= gen_eh_region_cleanup (state
->cur_region
);
1914 this_state
.cur_region
= this_region
;
1917 lower_eh_constructs_1 (&this_state
, gimple_try_eval_ptr (tp
));
1919 if (cleanup_dead
|| !eh_region_may_contain_throw (this_region
))
1920 return gimple_try_eval (tp
);
1922 /* Build enough of a try-finally state so that we can reuse
1923 honor_protect_cleanup_actions. */
1924 memset (&fake_tf
, 0, sizeof (fake_tf
));
1925 fake_tf
.top_p
= fake_tf
.try_finally_expr
= tp
;
1926 fake_tf
.outer
= state
;
1927 fake_tf
.region
= this_region
;
1928 fake_tf
.may_fallthru
= gimple_seq_may_fallthru (gimple_try_eval (tp
));
1929 fake_tf
.may_throw
= true;
1931 honor_protect_cleanup_actions (state
, NULL
, &fake_tf
);
1933 if (fake_tf
.may_throw
)
1935 /* In this case honor_protect_cleanup_actions had nothing to do,
1936 and we should process this normally. */
1937 lower_eh_constructs_1 (state
, gimple_try_cleanup_ptr (tp
));
1938 result
= frob_into_branch_around (tp
, this_region
,
1939 fake_tf
.fallthru_label
);
1943 /* In this case honor_protect_cleanup_actions did nearly all of
1944 the work. All we have left is to append the fallthru_label. */
1946 result
= gimple_try_eval (tp
);
1947 if (fake_tf
.fallthru_label
)
1949 gimple x
= gimple_build_label (fake_tf
.fallthru_label
);
1950 gimple_seq_add_stmt (&result
, x
);
1956 /* Main loop for lowering eh constructs. Also moves gsi to the next
1960 lower_eh_constructs_2 (struct leh_state
*state
, gimple_stmt_iterator
*gsi
)
1964 gimple stmt
= gsi_stmt (*gsi
);
1966 switch (gimple_code (stmt
))
1970 tree fndecl
= gimple_call_fndecl (stmt
);
1973 if (fndecl
&& DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
1974 switch (DECL_FUNCTION_CODE (fndecl
))
1976 case BUILT_IN_EH_POINTER
:
1977 /* The front end may have generated a call to
1978 __builtin_eh_pointer (0) within a catch region. Replace
1979 this zero argument with the current catch region number. */
1980 if (state
->ehp_region
)
1982 tree nr
= build_int_cst (integer_type_node
,
1983 state
->ehp_region
->index
);
1984 gimple_call_set_arg (stmt
, 0, nr
);
1988 /* The user has dome something silly. Remove it. */
1989 rhs
= null_pointer_node
;
1994 case BUILT_IN_EH_FILTER
:
1995 /* ??? This should never appear, but since it's a builtin it
1996 is accessible to abuse by users. Just remove it and
1997 replace the use with the arbitrary value zero. */
1998 rhs
= build_int_cst (TREE_TYPE (TREE_TYPE (fndecl
)), 0);
2000 lhs
= gimple_call_lhs (stmt
);
2001 x
= gimple_build_assign (lhs
, rhs
);
2002 gsi_insert_before (gsi
, x
, GSI_SAME_STMT
);
2005 case BUILT_IN_EH_COPY_VALUES
:
2006 /* Likewise this should not appear. Remove it. */
2007 gsi_remove (gsi
, true);
2017 /* If the stmt can throw use a new temporary for the assignment
2018 to a LHS. This makes sure the old value of the LHS is
2019 available on the EH edge. Only do so for statements that
2020 potentially fall through (no noreturn calls e.g.), otherwise
2021 this new assignment might create fake fallthru regions. */
2022 if (stmt_could_throw_p (stmt
)
2023 && gimple_has_lhs (stmt
)
2024 && gimple_stmt_may_fallthru (stmt
)
2025 && !tree_could_throw_p (gimple_get_lhs (stmt
))
2026 && is_gimple_reg_type (TREE_TYPE (gimple_get_lhs (stmt
))))
2028 tree lhs
= gimple_get_lhs (stmt
);
2029 tree tmp
= create_tmp_var (TREE_TYPE (lhs
), NULL
);
2030 gimple s
= gimple_build_assign (lhs
, tmp
);
2031 gimple_set_location (s
, gimple_location (stmt
));
2032 gimple_set_block (s
, gimple_block (stmt
));
2033 gimple_set_lhs (stmt
, tmp
);
2034 if (TREE_CODE (TREE_TYPE (tmp
)) == COMPLEX_TYPE
2035 || TREE_CODE (TREE_TYPE (tmp
)) == VECTOR_TYPE
)
2036 DECL_GIMPLE_REG_P (tmp
) = 1;
2037 gsi_insert_after (gsi
, s
, GSI_SAME_STMT
);
2039 /* Look for things that can throw exceptions, and record them. */
2040 if (state
->cur_region
&& stmt_could_throw_p (stmt
))
2042 record_stmt_eh_region (state
->cur_region
, stmt
);
2043 note_eh_region_may_contain_throw (state
->cur_region
);
2050 maybe_record_in_goto_queue (state
, stmt
);
2054 verify_norecord_switch_expr (state
, as_a
<gimple_switch
> (stmt
));
2059 gimple_try try_stmt
= as_a
<gimple_try
> (stmt
);
2060 if (gimple_try_kind (try_stmt
) == GIMPLE_TRY_FINALLY
)
2061 replace
= lower_try_finally (state
, try_stmt
);
2064 x
= gimple_seq_first_stmt (gimple_try_cleanup (try_stmt
));
2067 replace
= gimple_try_eval (try_stmt
);
2068 lower_eh_constructs_1 (state
, &replace
);
2071 switch (gimple_code (x
))
2074 replace
= lower_catch (state
, try_stmt
);
2076 case GIMPLE_EH_FILTER
:
2077 replace
= lower_eh_filter (state
, try_stmt
);
2079 case GIMPLE_EH_MUST_NOT_THROW
:
2080 replace
= lower_eh_must_not_throw (state
, try_stmt
);
2082 case GIMPLE_EH_ELSE
:
2083 /* This code is only valid with GIMPLE_TRY_FINALLY. */
2086 replace
= lower_cleanup (state
, try_stmt
);
2092 /* Remove the old stmt and insert the transformed sequence
2094 gsi_insert_seq_before (gsi
, replace
, GSI_SAME_STMT
);
2095 gsi_remove (gsi
, true);
2097 /* Return since we don't want gsi_next () */
2100 case GIMPLE_EH_ELSE
:
2101 /* We should be eliminating this in lower_try_finally et al. */
2105 /* A type, a decl, or some kind of statement that we're not
2106 interested in. Don't walk them. */
2113 /* A helper to unwrap a gimple_seq and feed stmts to lower_eh_constructs_2. */
2116 lower_eh_constructs_1 (struct leh_state
*state
, gimple_seq
*pseq
)
2118 gimple_stmt_iterator gsi
;
2119 for (gsi
= gsi_start (*pseq
); !gsi_end_p (gsi
);)
2120 lower_eh_constructs_2 (state
, &gsi
);
2125 const pass_data pass_data_lower_eh
=
2127 GIMPLE_PASS
, /* type */
2129 OPTGROUP_NONE
, /* optinfo_flags */
2130 TV_TREE_EH
, /* tv_id */
2131 PROP_gimple_lcf
, /* properties_required */
2132 PROP_gimple_leh
, /* properties_provided */
2133 0, /* properties_destroyed */
2134 0, /* todo_flags_start */
2135 0, /* todo_flags_finish */
2138 class pass_lower_eh
: public gimple_opt_pass
2141 pass_lower_eh (gcc::context
*ctxt
)
2142 : gimple_opt_pass (pass_data_lower_eh
, ctxt
)
2145 /* opt_pass methods: */
2146 virtual unsigned int execute (function
*);
2148 }; // class pass_lower_eh
2151 pass_lower_eh::execute (function
*fun
)
2153 struct leh_state null_state
;
2156 bodyp
= gimple_body (current_function_decl
);
2160 finally_tree
= new hash_table
<finally_tree_hasher
> (31);
2161 eh_region_may_contain_throw_map
= BITMAP_ALLOC (NULL
);
2162 memset (&null_state
, 0, sizeof (null_state
));
2164 collect_finally_tree_1 (bodyp
, NULL
);
2165 lower_eh_constructs_1 (&null_state
, &bodyp
);
2166 gimple_set_body (current_function_decl
, bodyp
);
2168 /* We assume there's a return statement, or something, at the end of
2169 the function, and thus ploping the EH sequence afterward won't
2171 gcc_assert (!gimple_seq_may_fallthru (bodyp
));
2172 gimple_seq_add_seq (&bodyp
, eh_seq
);
2174 /* We assume that since BODYP already existed, adding EH_SEQ to it
2175 didn't change its value, and we don't have to re-set the function. */
2176 gcc_assert (bodyp
== gimple_body (current_function_decl
));
2178 delete finally_tree
;
2179 finally_tree
= NULL
;
2180 BITMAP_FREE (eh_region_may_contain_throw_map
);
2183 /* If this function needs a language specific EH personality routine
2184 and the frontend didn't already set one do so now. */
2185 if (function_needs_eh_personality (fun
) == eh_personality_lang
2186 && !DECL_FUNCTION_PERSONALITY (current_function_decl
))
2187 DECL_FUNCTION_PERSONALITY (current_function_decl
)
2188 = lang_hooks
.eh_personality ();
2196 make_pass_lower_eh (gcc::context
*ctxt
)
2198 return new pass_lower_eh (ctxt
);
2201 /* Create the multiple edges from an EH_DISPATCH statement to all of
2202 the possible handlers for its EH region. Return true if there's
2203 no fallthru edge; false if there is. */
2206 make_eh_dispatch_edges (gimple_eh_dispatch stmt
)
2210 basic_block src
, dst
;
2212 r
= get_eh_region_from_number (gimple_eh_dispatch_region (stmt
));
2213 src
= gimple_bb (stmt
);
2218 for (c
= r
->u
.eh_try
.first_catch
; c
; c
= c
->next_catch
)
2220 dst
= label_to_block (c
->label
);
2221 make_edge (src
, dst
, 0);
2223 /* A catch-all handler doesn't have a fallthru. */
2224 if (c
->type_list
== NULL
)
2229 case ERT_ALLOWED_EXCEPTIONS
:
2230 dst
= label_to_block (r
->u
.allowed
.label
);
2231 make_edge (src
, dst
, 0);
2241 /* Create the single EH edge from STMT to its nearest landing pad,
2242 if there is such a landing pad within the current function. */
2245 make_eh_edges (gimple stmt
)
2247 basic_block src
, dst
;
2251 lp_nr
= lookup_stmt_eh_lp (stmt
);
2255 lp
= get_eh_landing_pad_from_number (lp_nr
);
2256 gcc_assert (lp
!= NULL
);
2258 src
= gimple_bb (stmt
);
2259 dst
= label_to_block (lp
->post_landing_pad
);
2260 make_edge (src
, dst
, EDGE_EH
);
2263 /* Do the work in redirecting EDGE_IN to NEW_BB within the EH region tree;
2264 do not actually perform the final edge redirection.
2266 CHANGE_REGION is true when we're being called from cleanup_empty_eh and
2267 we intend to change the destination EH region as well; this means
2268 EH_LANDING_PAD_NR must already be set on the destination block label.
2269 If false, we're being called from generic cfg manipulation code and we
2270 should preserve our place within the region tree. */
2273 redirect_eh_edge_1 (edge edge_in
, basic_block new_bb
, bool change_region
)
2275 eh_landing_pad old_lp
, new_lp
;
2278 int old_lp_nr
, new_lp_nr
;
2279 tree old_label
, new_label
;
2283 old_bb
= edge_in
->dest
;
2284 old_label
= gimple_block_label (old_bb
);
2285 old_lp_nr
= EH_LANDING_PAD_NR (old_label
);
2286 gcc_assert (old_lp_nr
> 0);
2287 old_lp
= get_eh_landing_pad_from_number (old_lp_nr
);
2289 throw_stmt
= last_stmt (edge_in
->src
);
2290 gcc_assert (lookup_stmt_eh_lp (throw_stmt
) == old_lp_nr
);
2292 new_label
= gimple_block_label (new_bb
);
2294 /* Look for an existing region that might be using NEW_BB already. */
2295 new_lp_nr
= EH_LANDING_PAD_NR (new_label
);
2298 new_lp
= get_eh_landing_pad_from_number (new_lp_nr
);
2299 gcc_assert (new_lp
);
2301 /* Unless CHANGE_REGION is true, the new and old landing pad
2302 had better be associated with the same EH region. */
2303 gcc_assert (change_region
|| new_lp
->region
== old_lp
->region
);
2308 gcc_assert (!change_region
);
2311 /* Notice when we redirect the last EH edge away from OLD_BB. */
2312 FOR_EACH_EDGE (e
, ei
, old_bb
->preds
)
2313 if (e
!= edge_in
&& (e
->flags
& EDGE_EH
))
2318 /* NEW_LP already exists. If there are still edges into OLD_LP,
2319 there's nothing to do with the EH tree. If there are no more
2320 edges into OLD_LP, then we want to remove OLD_LP as it is unused.
2321 If CHANGE_REGION is true, then our caller is expecting to remove
2323 if (e
== NULL
&& !change_region
)
2324 remove_eh_landing_pad (old_lp
);
2328 /* No correct landing pad exists. If there are no more edges
2329 into OLD_LP, then we can simply re-use the existing landing pad.
2330 Otherwise, we have to create a new landing pad. */
2333 EH_LANDING_PAD_NR (old_lp
->post_landing_pad
) = 0;
2337 new_lp
= gen_eh_landing_pad (old_lp
->region
);
2338 new_lp
->post_landing_pad
= new_label
;
2339 EH_LANDING_PAD_NR (new_label
) = new_lp
->index
;
2342 /* Maybe move the throwing statement to the new region. */
2343 if (old_lp
!= new_lp
)
2345 remove_stmt_from_eh_lp (throw_stmt
);
2346 add_stmt_to_eh_lp (throw_stmt
, new_lp
->index
);
2350 /* Redirect EH edge E to NEW_BB. */
2353 redirect_eh_edge (edge edge_in
, basic_block new_bb
)
2355 redirect_eh_edge_1 (edge_in
, new_bb
, false);
2356 return ssa_redirect_edge (edge_in
, new_bb
);
2359 /* This is a subroutine of gimple_redirect_edge_and_branch. Update the
2360 labels for redirecting a non-fallthru EH_DISPATCH edge E to NEW_BB.
2361 The actual edge update will happen in the caller. */
2364 redirect_eh_dispatch_edge (gimple_eh_dispatch stmt
, edge e
, basic_block new_bb
)
2366 tree new_lab
= gimple_block_label (new_bb
);
2367 bool any_changed
= false;
2372 r
= get_eh_region_from_number (gimple_eh_dispatch_region (stmt
));
2376 for (c
= r
->u
.eh_try
.first_catch
; c
; c
= c
->next_catch
)
2378 old_bb
= label_to_block (c
->label
);
2379 if (old_bb
== e
->dest
)
2387 case ERT_ALLOWED_EXCEPTIONS
:
2388 old_bb
= label_to_block (r
->u
.allowed
.label
);
2389 gcc_assert (old_bb
== e
->dest
);
2390 r
->u
.allowed
.label
= new_lab
;
2398 gcc_assert (any_changed
);
2401 /* Helper function for operation_could_trap_p and stmt_could_throw_p. */
2404 operation_could_trap_helper_p (enum tree_code op
,
2415 case TRUNC_DIV_EXPR
:
2417 case FLOOR_DIV_EXPR
:
2418 case ROUND_DIV_EXPR
:
2419 case EXACT_DIV_EXPR
:
2421 case FLOOR_MOD_EXPR
:
2422 case ROUND_MOD_EXPR
:
2423 case TRUNC_MOD_EXPR
:
2425 if (honor_snans
|| honor_trapv
)
2428 return flag_trapping_math
;
2429 if (!TREE_CONSTANT (divisor
) || integer_zerop (divisor
))
2438 /* Some floating point comparisons may trap. */
2443 case UNORDERED_EXPR
:
2453 case FIX_TRUNC_EXPR
:
2454 /* Conversion of floating point might trap. */
2460 /* These operations don't trap with floating point. */
2468 /* Any floating arithmetic may trap. */
2469 if (fp_operation
&& flag_trapping_math
)
2477 /* Constructing an object cannot trap. */
2481 /* Any floating arithmetic may trap. */
2482 if (fp_operation
&& flag_trapping_math
)
2490 /* Return true if operation OP may trap. FP_OPERATION is true if OP is applied
2491 on floating-point values. HONOR_TRAPV is true if OP is applied on integer
2492 type operands that may trap. If OP is a division operator, DIVISOR contains
2493 the value of the divisor. */
2496 operation_could_trap_p (enum tree_code op
, bool fp_operation
, bool honor_trapv
,
2499 bool honor_nans
= (fp_operation
&& flag_trapping_math
2500 && !flag_finite_math_only
);
2501 bool honor_snans
= fp_operation
&& flag_signaling_nans
!= 0;
2504 if (TREE_CODE_CLASS (op
) != tcc_comparison
2505 && TREE_CODE_CLASS (op
) != tcc_unary
2506 && TREE_CODE_CLASS (op
) != tcc_binary
)
2509 return operation_could_trap_helper_p (op
, fp_operation
, honor_trapv
,
2510 honor_nans
, honor_snans
, divisor
,
2515 /* Returns true if it is possible to prove that the index of
2516 an array access REF (an ARRAY_REF expression) falls into the
2520 in_array_bounds_p (tree ref
)
2522 tree idx
= TREE_OPERAND (ref
, 1);
2525 if (TREE_CODE (idx
) != INTEGER_CST
)
2528 min
= array_ref_low_bound (ref
);
2529 max
= array_ref_up_bound (ref
);
2532 || TREE_CODE (min
) != INTEGER_CST
2533 || TREE_CODE (max
) != INTEGER_CST
)
2536 if (tree_int_cst_lt (idx
, min
)
2537 || tree_int_cst_lt (max
, idx
))
2543 /* Returns true if it is possible to prove that the range of
2544 an array access REF (an ARRAY_RANGE_REF expression) falls
2545 into the array bounds. */
2548 range_in_array_bounds_p (tree ref
)
2550 tree domain_type
= TYPE_DOMAIN (TREE_TYPE (ref
));
2551 tree range_min
, range_max
, min
, max
;
2553 range_min
= TYPE_MIN_VALUE (domain_type
);
2554 range_max
= TYPE_MAX_VALUE (domain_type
);
2557 || TREE_CODE (range_min
) != INTEGER_CST
2558 || TREE_CODE (range_max
) != INTEGER_CST
)
2561 min
= array_ref_low_bound (ref
);
2562 max
= array_ref_up_bound (ref
);
2565 || TREE_CODE (min
) != INTEGER_CST
2566 || TREE_CODE (max
) != INTEGER_CST
)
2569 if (tree_int_cst_lt (range_min
, min
)
2570 || tree_int_cst_lt (max
, range_max
))
2576 /* Return true if EXPR can trap, as in dereferencing an invalid pointer
2577 location or floating point arithmetic. C.f. the rtl version, may_trap_p.
2578 This routine expects only GIMPLE lhs or rhs input. */
2581 tree_could_trap_p (tree expr
)
2583 enum tree_code code
;
2584 bool fp_operation
= false;
2585 bool honor_trapv
= false;
2586 tree t
, base
, div
= NULL_TREE
;
2591 code
= TREE_CODE (expr
);
2592 t
= TREE_TYPE (expr
);
2596 if (COMPARISON_CLASS_P (expr
))
2597 fp_operation
= FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (expr
, 0)));
2599 fp_operation
= FLOAT_TYPE_P (t
);
2600 honor_trapv
= INTEGRAL_TYPE_P (t
) && TYPE_OVERFLOW_TRAPS (t
);
2603 if (TREE_CODE_CLASS (code
) == tcc_binary
)
2604 div
= TREE_OPERAND (expr
, 1);
2605 if (operation_could_trap_p (code
, fp_operation
, honor_trapv
, div
))
2615 case VIEW_CONVERT_EXPR
:
2616 case WITH_SIZE_EXPR
:
2617 expr
= TREE_OPERAND (expr
, 0);
2618 code
= TREE_CODE (expr
);
2621 case ARRAY_RANGE_REF
:
2622 base
= TREE_OPERAND (expr
, 0);
2623 if (tree_could_trap_p (base
))
2625 if (TREE_THIS_NOTRAP (expr
))
2627 return !range_in_array_bounds_p (expr
);
2630 base
= TREE_OPERAND (expr
, 0);
2631 if (tree_could_trap_p (base
))
2633 if (TREE_THIS_NOTRAP (expr
))
2635 return !in_array_bounds_p (expr
);
2637 case TARGET_MEM_REF
:
2639 if (TREE_CODE (TREE_OPERAND (expr
, 0)) == ADDR_EXPR
2640 && tree_could_trap_p (TREE_OPERAND (TREE_OPERAND (expr
, 0), 0)))
2642 if (TREE_THIS_NOTRAP (expr
))
2644 /* We cannot prove that the access is in-bounds when we have
2645 variable-index TARGET_MEM_REFs. */
2646 if (code
== TARGET_MEM_REF
2647 && (TMR_INDEX (expr
) || TMR_INDEX2 (expr
)))
2649 if (TREE_CODE (TREE_OPERAND (expr
, 0)) == ADDR_EXPR
)
2651 tree base
= TREE_OPERAND (TREE_OPERAND (expr
, 0), 0);
2652 offset_int off
= mem_ref_offset (expr
);
2653 if (wi::neg_p (off
, SIGNED
))
2655 if (TREE_CODE (base
) == STRING_CST
)
2656 return wi::leu_p (TREE_STRING_LENGTH (base
), off
);
2657 else if (DECL_SIZE_UNIT (base
) == NULL_TREE
2658 || TREE_CODE (DECL_SIZE_UNIT (base
)) != INTEGER_CST
2659 || wi::leu_p (wi::to_offset (DECL_SIZE_UNIT (base
)), off
))
2661 /* Now we are sure the first byte of the access is inside
2668 return !TREE_THIS_NOTRAP (expr
);
2671 return TREE_THIS_VOLATILE (expr
);
2674 t
= get_callee_fndecl (expr
);
2675 /* Assume that calls to weak functions may trap. */
2676 if (!t
|| !DECL_P (t
))
2679 return tree_could_trap_p (t
);
2683 /* Assume that accesses to weak functions may trap, unless we know
2684 they are certainly defined in current TU or in some other
2686 if (DECL_WEAK (expr
) && !DECL_COMDAT (expr
))
2688 struct cgraph_node
*node
;
2689 if (!DECL_EXTERNAL (expr
))
2691 node
= cgraph_node::get (expr
)->function_symbol ();
2692 if (node
&& node
->in_other_partition
)
2699 /* Assume that accesses to weak vars may trap, unless we know
2700 they are certainly defined in current TU or in some other
2702 if (DECL_WEAK (expr
) && !DECL_COMDAT (expr
))
2705 if (!DECL_EXTERNAL (expr
))
2707 node
= varpool_node::get (expr
)->ultimate_alias_target ();
2708 if (node
&& node
->in_other_partition
)
2720 /* Helper for stmt_could_throw_p. Return true if STMT (assumed to be a
2721 an assignment or a conditional) may throw. */
2724 stmt_could_throw_1_p (gimple stmt
)
2726 enum tree_code code
= gimple_expr_code (stmt
);
2727 bool honor_nans
= false;
2728 bool honor_snans
= false;
2729 bool fp_operation
= false;
2730 bool honor_trapv
= false;
2735 if (TREE_CODE_CLASS (code
) == tcc_comparison
2736 || TREE_CODE_CLASS (code
) == tcc_unary
2737 || TREE_CODE_CLASS (code
) == tcc_binary
)
2739 if (is_gimple_assign (stmt
)
2740 && TREE_CODE_CLASS (code
) == tcc_comparison
)
2741 t
= TREE_TYPE (gimple_assign_rhs1 (stmt
));
2742 else if (gimple_code (stmt
) == GIMPLE_COND
)
2743 t
= TREE_TYPE (gimple_cond_lhs (stmt
));
2745 t
= gimple_expr_type (stmt
);
2746 fp_operation
= FLOAT_TYPE_P (t
);
2749 honor_nans
= flag_trapping_math
&& !flag_finite_math_only
;
2750 honor_snans
= flag_signaling_nans
!= 0;
2752 else if (INTEGRAL_TYPE_P (t
) && TYPE_OVERFLOW_TRAPS (t
))
2756 /* Check if the main expression may trap. */
2757 t
= is_gimple_assign (stmt
) ? gimple_assign_rhs2 (stmt
) : NULL
;
2758 ret
= operation_could_trap_helper_p (code
, fp_operation
, honor_trapv
,
2759 honor_nans
, honor_snans
, t
,
2764 /* If the expression does not trap, see if any of the individual operands may
2766 for (i
= 0; i
< gimple_num_ops (stmt
); i
++)
2767 if (tree_could_trap_p (gimple_op (stmt
, i
)))
2774 /* Return true if statement STMT could throw an exception. */
2777 stmt_could_throw_p (gimple stmt
)
2779 if (!flag_exceptions
)
2782 /* The only statements that can throw an exception are assignments,
2783 conditionals, calls, resx, and asms. */
2784 switch (gimple_code (stmt
))
2790 return !gimple_call_nothrow_p (stmt
);
2794 if (!cfun
->can_throw_non_call_exceptions
)
2796 return stmt_could_throw_1_p (stmt
);
2799 if (!cfun
->can_throw_non_call_exceptions
)
2801 return gimple_asm_volatile_p (as_a
<gimple_asm
> (stmt
));
2809 /* Return true if expression T could throw an exception. */
2812 tree_could_throw_p (tree t
)
2814 if (!flag_exceptions
)
2816 if (TREE_CODE (t
) == MODIFY_EXPR
)
2818 if (cfun
->can_throw_non_call_exceptions
2819 && tree_could_trap_p (TREE_OPERAND (t
, 0)))
2821 t
= TREE_OPERAND (t
, 1);
2824 if (TREE_CODE (t
) == WITH_SIZE_EXPR
)
2825 t
= TREE_OPERAND (t
, 0);
2826 if (TREE_CODE (t
) == CALL_EXPR
)
2827 return (call_expr_flags (t
) & ECF_NOTHROW
) == 0;
2828 if (cfun
->can_throw_non_call_exceptions
)
2829 return tree_could_trap_p (t
);
2833 /* Return true if STMT can throw an exception that is not caught within
2834 the current function (CFUN). */
2837 stmt_can_throw_external (gimple stmt
)
2841 if (!stmt_could_throw_p (stmt
))
2844 lp_nr
= lookup_stmt_eh_lp (stmt
);
2848 /* Return true if STMT can throw an exception that is caught within
2849 the current function (CFUN). */
2852 stmt_can_throw_internal (gimple stmt
)
2856 if (!stmt_could_throw_p (stmt
))
2859 lp_nr
= lookup_stmt_eh_lp (stmt
);
2863 /* Given a statement STMT in IFUN, if STMT can no longer throw, then
2864 remove any entry it might have from the EH table. Return true if
2865 any change was made. */
2868 maybe_clean_eh_stmt_fn (struct function
*ifun
, gimple stmt
)
2870 if (stmt_could_throw_p (stmt
))
2872 return remove_stmt_from_eh_lp_fn (ifun
, stmt
);
2875 /* Likewise, but always use the current function. */
2878 maybe_clean_eh_stmt (gimple stmt
)
2880 return maybe_clean_eh_stmt_fn (cfun
, stmt
);
2883 /* Given a statement OLD_STMT and a new statement NEW_STMT that has replaced
2884 OLD_STMT in the function, remove OLD_STMT from the EH table and put NEW_STMT
2885 in the table if it should be in there. Return TRUE if a replacement was
2886 done that my require an EH edge purge. */
2889 maybe_clean_or_replace_eh_stmt (gimple old_stmt
, gimple new_stmt
)
2891 int lp_nr
= lookup_stmt_eh_lp (old_stmt
);
2895 bool new_stmt_could_throw
= stmt_could_throw_p (new_stmt
);
2897 if (new_stmt
== old_stmt
&& new_stmt_could_throw
)
2900 remove_stmt_from_eh_lp (old_stmt
);
2901 if (new_stmt_could_throw
)
2903 add_stmt_to_eh_lp (new_stmt
, lp_nr
);
2913 /* Given a statement OLD_STMT in OLD_FUN and a duplicate statement NEW_STMT
2914 in NEW_FUN, copy the EH table data from OLD_STMT to NEW_STMT. The MAP
2915 operand is the return value of duplicate_eh_regions. */
2918 maybe_duplicate_eh_stmt_fn (struct function
*new_fun
, gimple new_stmt
,
2919 struct function
*old_fun
, gimple old_stmt
,
2920 hash_map
<void *, void *> *map
,
2923 int old_lp_nr
, new_lp_nr
;
2925 if (!stmt_could_throw_p (new_stmt
))
2928 old_lp_nr
= lookup_stmt_eh_lp_fn (old_fun
, old_stmt
);
2931 if (default_lp_nr
== 0)
2933 new_lp_nr
= default_lp_nr
;
2935 else if (old_lp_nr
> 0)
2937 eh_landing_pad old_lp
, new_lp
;
2939 old_lp
= (*old_fun
->eh
->lp_array
)[old_lp_nr
];
2940 new_lp
= static_cast<eh_landing_pad
> (*map
->get (old_lp
));
2941 new_lp_nr
= new_lp
->index
;
2945 eh_region old_r
, new_r
;
2947 old_r
= (*old_fun
->eh
->region_array
)[-old_lp_nr
];
2948 new_r
= static_cast<eh_region
> (*map
->get (old_r
));
2949 new_lp_nr
= -new_r
->index
;
2952 add_stmt_to_eh_lp_fn (new_fun
, new_stmt
, new_lp_nr
);
2956 /* Similar, but both OLD_STMT and NEW_STMT are within the current function,
2957 and thus no remapping is required. */
2960 maybe_duplicate_eh_stmt (gimple new_stmt
, gimple old_stmt
)
2964 if (!stmt_could_throw_p (new_stmt
))
2967 lp_nr
= lookup_stmt_eh_lp (old_stmt
);
2971 add_stmt_to_eh_lp (new_stmt
, lp_nr
);
2975 /* Returns TRUE if oneh and twoh are exception handlers (gimple_try_cleanup of
2976 GIMPLE_TRY) that are similar enough to be considered the same. Currently
2977 this only handles handlers consisting of a single call, as that's the
2978 important case for C++: a destructor call for a particular object showing
2979 up in multiple handlers. */
2982 same_handler_p (gimple_seq oneh
, gimple_seq twoh
)
2984 gimple_stmt_iterator gsi
;
2988 gsi
= gsi_start (oneh
);
2989 if (!gsi_one_before_end_p (gsi
))
2991 ones
= gsi_stmt (gsi
);
2993 gsi
= gsi_start (twoh
);
2994 if (!gsi_one_before_end_p (gsi
))
2996 twos
= gsi_stmt (gsi
);
2998 if (!is_gimple_call (ones
)
2999 || !is_gimple_call (twos
)
3000 || gimple_call_lhs (ones
)
3001 || gimple_call_lhs (twos
)
3002 || gimple_call_chain (ones
)
3003 || gimple_call_chain (twos
)
3004 || !gimple_call_same_target_p (ones
, twos
)
3005 || gimple_call_num_args (ones
) != gimple_call_num_args (twos
))
3008 for (ai
= 0; ai
< gimple_call_num_args (ones
); ++ai
)
3009 if (!operand_equal_p (gimple_call_arg (ones
, ai
),
3010 gimple_call_arg (twos
, ai
), 0))
3017 try { A() } finally { try { ~B() } catch { ~A() } }
3018 try { ... } finally { ~A() }
3020 try { A() } catch { ~B() }
3021 try { ~B() ... } finally { ~A() }
3023 This occurs frequently in C++, where A is a local variable and B is a
3024 temporary used in the initializer for A. */
3027 optimize_double_finally (gimple one
, gimple two
)
3030 gimple_stmt_iterator gsi
;
3033 cleanup
= gimple_try_cleanup (one
);
3034 gsi
= gsi_start (cleanup
);
3035 if (!gsi_one_before_end_p (gsi
))
3038 oneh
= gsi_stmt (gsi
);
3039 if (gimple_code (oneh
) != GIMPLE_TRY
3040 || gimple_try_kind (oneh
) != GIMPLE_TRY_CATCH
)
3043 if (same_handler_p (gimple_try_cleanup (oneh
), gimple_try_cleanup (two
)))
3045 gimple_seq seq
= gimple_try_eval (oneh
);
3047 gimple_try_set_cleanup (one
, seq
);
3048 gimple_try_set_kind (one
, GIMPLE_TRY_CATCH
);
3049 seq
= copy_gimple_seq_and_replace_locals (seq
);
3050 gimple_seq_add_seq (&seq
, gimple_try_eval (two
));
3051 gimple_try_set_eval (two
, seq
);
3055 /* Perform EH refactoring optimizations that are simpler to do when code
3056 flow has been lowered but EH structures haven't. */
3059 refactor_eh_r (gimple_seq seq
)
3061 gimple_stmt_iterator gsi
;
3066 gsi
= gsi_start (seq
);
3070 if (gsi_end_p (gsi
))
3073 two
= gsi_stmt (gsi
);
3076 && gimple_code (one
) == GIMPLE_TRY
3077 && gimple_code (two
) == GIMPLE_TRY
3078 && gimple_try_kind (one
) == GIMPLE_TRY_FINALLY
3079 && gimple_try_kind (two
) == GIMPLE_TRY_FINALLY
)
3080 optimize_double_finally (one
, two
);
3082 switch (gimple_code (one
))
3085 refactor_eh_r (gimple_try_eval (one
));
3086 refactor_eh_r (gimple_try_cleanup (one
));
3089 refactor_eh_r (gimple_catch_handler (as_a
<gimple_catch
> (one
)));
3091 case GIMPLE_EH_FILTER
:
3092 refactor_eh_r (gimple_eh_filter_failure (one
));
3094 case GIMPLE_EH_ELSE
:
3096 gimple_eh_else eh_else_stmt
= as_a
<gimple_eh_else
> (one
);
3097 refactor_eh_r (gimple_eh_else_n_body (eh_else_stmt
));
3098 refactor_eh_r (gimple_eh_else_e_body (eh_else_stmt
));
3113 const pass_data pass_data_refactor_eh
=
3115 GIMPLE_PASS
, /* type */
3117 OPTGROUP_NONE
, /* optinfo_flags */
3118 TV_TREE_EH
, /* tv_id */
3119 PROP_gimple_lcf
, /* properties_required */
3120 0, /* properties_provided */
3121 0, /* properties_destroyed */
3122 0, /* todo_flags_start */
3123 0, /* todo_flags_finish */
3126 class pass_refactor_eh
: public gimple_opt_pass
3129 pass_refactor_eh (gcc::context
*ctxt
)
3130 : gimple_opt_pass (pass_data_refactor_eh
, ctxt
)
3133 /* opt_pass methods: */
3134 virtual bool gate (function
*) { return flag_exceptions
!= 0; }
3135 virtual unsigned int execute (function
*)
3137 refactor_eh_r (gimple_body (current_function_decl
));
3141 }; // class pass_refactor_eh
3146 make_pass_refactor_eh (gcc::context
*ctxt
)
3148 return new pass_refactor_eh (ctxt
);
3151 /* At the end of gimple optimization, we can lower RESX. */
3154 lower_resx (basic_block bb
, gimple_resx stmt
,
3155 hash_map
<eh_region
, tree
> *mnt_map
)
3158 eh_region src_r
, dst_r
;
3159 gimple_stmt_iterator gsi
;
3164 lp_nr
= lookup_stmt_eh_lp (stmt
);
3166 dst_r
= get_eh_region_from_lp_number (lp_nr
);
3170 src_r
= get_eh_region_from_number (gimple_resx_region (stmt
));
3171 gsi
= gsi_last_bb (bb
);
3175 /* We can wind up with no source region when pass_cleanup_eh shows
3176 that there are no entries into an eh region and deletes it, but
3177 then the block that contains the resx isn't removed. This can
3178 happen without optimization when the switch statement created by
3179 lower_try_finally_switch isn't simplified to remove the eh case.
3181 Resolve this by expanding the resx node to an abort. */
3183 fn
= builtin_decl_implicit (BUILT_IN_TRAP
);
3184 x
= gimple_build_call (fn
, 0);
3185 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3187 while (EDGE_COUNT (bb
->succs
) > 0)
3188 remove_edge (EDGE_SUCC (bb
, 0));
3192 /* When we have a destination region, we resolve this by copying
3193 the excptr and filter values into place, and changing the edge
3194 to immediately after the landing pad. */
3202 /* We are resuming into a MUST_NOT_CALL region. Expand a call to
3203 the failure decl into a new block, if needed. */
3204 gcc_assert (dst_r
->type
== ERT_MUST_NOT_THROW
);
3206 tree
*slot
= mnt_map
->get (dst_r
);
3209 gimple_stmt_iterator gsi2
;
3211 new_bb
= create_empty_bb (bb
);
3212 add_bb_to_loop (new_bb
, bb
->loop_father
);
3213 lab
= gimple_block_label (new_bb
);
3214 gsi2
= gsi_start_bb (new_bb
);
3216 fn
= dst_r
->u
.must_not_throw
.failure_decl
;
3217 x
= gimple_build_call (fn
, 0);
3218 gimple_set_location (x
, dst_r
->u
.must_not_throw
.failure_loc
);
3219 gsi_insert_after (&gsi2
, x
, GSI_CONTINUE_LINKING
);
3221 mnt_map
->put (dst_r
, lab
);
3226 new_bb
= label_to_block (lab
);
3229 gcc_assert (EDGE_COUNT (bb
->succs
) == 0);
3230 e
= make_edge (bb
, new_bb
, EDGE_FALLTHRU
);
3231 e
->count
= bb
->count
;
3232 e
->probability
= REG_BR_PROB_BASE
;
3237 tree dst_nr
= build_int_cst (integer_type_node
, dst_r
->index
);
3239 fn
= builtin_decl_implicit (BUILT_IN_EH_COPY_VALUES
);
3240 src_nr
= build_int_cst (integer_type_node
, src_r
->index
);
3241 x
= gimple_build_call (fn
, 2, dst_nr
, src_nr
);
3242 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3244 /* Update the flags for the outgoing edge. */
3245 e
= single_succ_edge (bb
);
3246 gcc_assert (e
->flags
& EDGE_EH
);
3247 e
->flags
= (e
->flags
& ~EDGE_EH
) | EDGE_FALLTHRU
;
3249 /* If there are no more EH users of the landing pad, delete it. */
3250 FOR_EACH_EDGE (e
, ei
, e
->dest
->preds
)
3251 if (e
->flags
& EDGE_EH
)
3255 eh_landing_pad lp
= get_eh_landing_pad_from_number (lp_nr
);
3256 remove_eh_landing_pad (lp
);
3266 /* When we don't have a destination region, this exception escapes
3267 up the call chain. We resolve this by generating a call to the
3268 _Unwind_Resume library function. */
3270 /* The ARM EABI redefines _Unwind_Resume as __cxa_end_cleanup
3271 with no arguments for C++ and Java. Check for that. */
3272 if (src_r
->use_cxa_end_cleanup
)
3274 fn
= builtin_decl_implicit (BUILT_IN_CXA_END_CLEANUP
);
3275 x
= gimple_build_call (fn
, 0);
3276 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3280 fn
= builtin_decl_implicit (BUILT_IN_EH_POINTER
);
3281 src_nr
= build_int_cst (integer_type_node
, src_r
->index
);
3282 x
= gimple_build_call (fn
, 1, src_nr
);
3283 var
= create_tmp_var (ptr_type_node
, NULL
);
3284 var
= make_ssa_name (var
, x
);
3285 gimple_call_set_lhs (x
, var
);
3286 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3288 fn
= builtin_decl_implicit (BUILT_IN_UNWIND_RESUME
);
3289 x
= gimple_build_call (fn
, 1, var
);
3290 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3293 gcc_assert (EDGE_COUNT (bb
->succs
) == 0);
3296 gsi_remove (&gsi
, true);
3303 const pass_data pass_data_lower_resx
=
3305 GIMPLE_PASS
, /* type */
3307 OPTGROUP_NONE
, /* optinfo_flags */
3308 TV_TREE_EH
, /* tv_id */
3309 PROP_gimple_lcf
, /* properties_required */
3310 0, /* properties_provided */
3311 0, /* properties_destroyed */
3312 0, /* todo_flags_start */
3313 0, /* todo_flags_finish */
3316 class pass_lower_resx
: public gimple_opt_pass
3319 pass_lower_resx (gcc::context
*ctxt
)
3320 : gimple_opt_pass (pass_data_lower_resx
, ctxt
)
3323 /* opt_pass methods: */
3324 virtual bool gate (function
*) { return flag_exceptions
!= 0; }
3325 virtual unsigned int execute (function
*);
3327 }; // class pass_lower_resx
3330 pass_lower_resx::execute (function
*fun
)
3333 bool dominance_invalidated
= false;
3334 bool any_rewritten
= false;
3336 hash_map
<eh_region
, tree
> mnt_map
;
3338 FOR_EACH_BB_FN (bb
, fun
)
3340 gimple last
= last_stmt (bb
);
3341 if (last
&& is_gimple_resx (last
))
3343 dominance_invalidated
|=
3344 lower_resx (bb
, as_a
<gimple_resx
> (last
), &mnt_map
);
3345 any_rewritten
= true;
3349 if (dominance_invalidated
)
3351 free_dominance_info (CDI_DOMINATORS
);
3352 free_dominance_info (CDI_POST_DOMINATORS
);
3355 return any_rewritten
? TODO_update_ssa_only_virtuals
: 0;
3361 make_pass_lower_resx (gcc::context
*ctxt
)
3363 return new pass_lower_resx (ctxt
);
3366 /* Try to optimize var = {v} {CLOBBER} stmts followed just by
3370 optimize_clobbers (basic_block bb
)
3372 gimple_stmt_iterator gsi
= gsi_last_bb (bb
);
3373 bool any_clobbers
= false;
3374 bool seen_stack_restore
= false;
3378 /* Only optimize anything if the bb contains at least one clobber,
3379 ends with resx (checked by caller), optionally contains some
3380 debug stmts or labels, or at most one __builtin_stack_restore
3381 call, and has an incoming EH edge. */
3382 for (gsi_prev (&gsi
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
3384 gimple stmt
= gsi_stmt (gsi
);
3385 if (is_gimple_debug (stmt
))
3387 if (gimple_clobber_p (stmt
))
3389 any_clobbers
= true;
3392 if (!seen_stack_restore
3393 && gimple_call_builtin_p (stmt
, BUILT_IN_STACK_RESTORE
))
3395 seen_stack_restore
= true;
3398 if (gimple_code (stmt
) == GIMPLE_LABEL
)
3404 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
3405 if (e
->flags
& EDGE_EH
)
3409 gsi
= gsi_last_bb (bb
);
3410 for (gsi_prev (&gsi
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
3412 gimple stmt
= gsi_stmt (gsi
);
3413 if (!gimple_clobber_p (stmt
))
3415 unlink_stmt_vdef (stmt
);
3416 gsi_remove (&gsi
, true);
3417 release_defs (stmt
);
3421 /* Try to sink var = {v} {CLOBBER} stmts followed just by
3422 internal throw to successor BB. */
3425 sink_clobbers (basic_block bb
)
3429 gimple_stmt_iterator gsi
, dgsi
;
3431 bool any_clobbers
= false;
3434 /* Only optimize if BB has a single EH successor and
3435 all predecessor edges are EH too. */
3436 if (!single_succ_p (bb
)
3437 || (single_succ_edge (bb
)->flags
& EDGE_EH
) == 0)
3440 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
3442 if ((e
->flags
& EDGE_EH
) == 0)
3446 /* And BB contains only CLOBBER stmts before the final
3448 gsi
= gsi_last_bb (bb
);
3449 for (gsi_prev (&gsi
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
3451 gimple stmt
= gsi_stmt (gsi
);
3452 if (is_gimple_debug (stmt
))
3454 if (gimple_code (stmt
) == GIMPLE_LABEL
)
3456 if (!gimple_clobber_p (stmt
))
3458 any_clobbers
= true;
3463 edge succe
= single_succ_edge (bb
);
3464 succbb
= succe
->dest
;
3466 /* See if there is a virtual PHI node to take an updated virtual
3469 tree vuse
= NULL_TREE
;
3470 for (gsi
= gsi_start_phis (succbb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3472 tree res
= gimple_phi_result (gsi_stmt (gsi
));
3473 if (virtual_operand_p (res
))
3475 vphi
= gsi_stmt (gsi
);
3481 dgsi
= gsi_after_labels (succbb
);
3482 gsi
= gsi_last_bb (bb
);
3483 for (gsi_prev (&gsi
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
3485 gimple stmt
= gsi_stmt (gsi
);
3487 if (is_gimple_debug (stmt
))
3489 if (gimple_code (stmt
) == GIMPLE_LABEL
)
3491 lhs
= gimple_assign_lhs (stmt
);
3492 /* Unfortunately we don't have dominance info updated at this
3493 point, so checking if
3494 dominated_by_p (CDI_DOMINATORS, succbb,
3495 gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (lhs, 0)))
3496 would be too costly. Thus, avoid sinking any clobbers that
3497 refer to non-(D) SSA_NAMEs. */
3498 if (TREE_CODE (lhs
) == MEM_REF
3499 && TREE_CODE (TREE_OPERAND (lhs
, 0)) == SSA_NAME
3500 && !SSA_NAME_IS_DEFAULT_DEF (TREE_OPERAND (lhs
, 0)))
3502 unlink_stmt_vdef (stmt
);
3503 gsi_remove (&gsi
, true);
3504 release_defs (stmt
);
3508 /* As we do not change stmt order when sinking across a
3509 forwarder edge we can keep virtual operands in place. */
3510 gsi_remove (&gsi
, false);
3511 gsi_insert_before (&dgsi
, stmt
, GSI_NEW_STMT
);
3513 /* But adjust virtual operands if we sunk across a PHI node. */
3517 imm_use_iterator iter
;
3518 use_operand_p use_p
;
3519 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, vuse
)
3520 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
3521 SET_USE (use_p
, gimple_vdef (stmt
));
3522 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse
))
3524 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_vdef (stmt
)) = 1;
3525 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse
) = 0;
3527 /* Adjust the incoming virtual operand. */
3528 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (vphi
, succe
), gimple_vuse (stmt
));
3529 SET_USE (gimple_vuse_op (stmt
), vuse
);
3531 /* If there isn't a single predecessor but no virtual PHI node
3532 arrange for virtual operands to be renamed. */
3533 else if (gimple_vuse_op (stmt
) != NULL_USE_OPERAND_P
3534 && !single_pred_p (succbb
))
3536 /* In this case there will be no use of the VDEF of this stmt.
3537 ??? Unless this is a secondary opportunity and we have not
3538 removed unreachable blocks yet, so we cannot assert this.
3539 Which also means we will end up renaming too many times. */
3540 SET_USE (gimple_vuse_op (stmt
), gimple_vop (cfun
));
3541 mark_virtual_operands_for_renaming (cfun
);
3542 todo
|= TODO_update_ssa_only_virtuals
;
3549 /* At the end of inlining, we can lower EH_DISPATCH. Return true when
3550 we have found some duplicate labels and removed some edges. */
3553 lower_eh_dispatch (basic_block src
, gimple_eh_dispatch stmt
)
3555 gimple_stmt_iterator gsi
;
3560 bool redirected
= false;
3562 region_nr
= gimple_eh_dispatch_region (stmt
);
3563 r
= get_eh_region_from_number (region_nr
);
3565 gsi
= gsi_last_bb (src
);
3571 auto_vec
<tree
> labels
;
3572 tree default_label
= NULL
;
3576 hash_set
<tree
> seen_values
;
3578 /* Collect the labels for a switch. Zero the post_landing_pad
3579 field becase we'll no longer have anything keeping these labels
3580 in existence and the optimizer will be free to merge these
3582 for (c
= r
->u
.eh_try
.first_catch
; c
; c
= c
->next_catch
)
3584 tree tp_node
, flt_node
, lab
= c
->label
;
3585 bool have_label
= false;
3588 tp_node
= c
->type_list
;
3589 flt_node
= c
->filter_list
;
3591 if (tp_node
== NULL
)
3593 default_label
= lab
;
3598 /* Filter out duplicate labels that arise when this handler
3599 is shadowed by an earlier one. When no labels are
3600 attached to the handler anymore, we remove
3601 the corresponding edge and then we delete unreachable
3602 blocks at the end of this pass. */
3603 if (! seen_values
.contains (TREE_VALUE (flt_node
)))
3605 tree t
= build_case_label (TREE_VALUE (flt_node
),
3607 labels
.safe_push (t
);
3608 seen_values
.add (TREE_VALUE (flt_node
));
3612 tp_node
= TREE_CHAIN (tp_node
);
3613 flt_node
= TREE_CHAIN (flt_node
);
3618 remove_edge (find_edge (src
, label_to_block (lab
)));
3623 /* Clean up the edge flags. */
3624 FOR_EACH_EDGE (e
, ei
, src
->succs
)
3626 if (e
->flags
& EDGE_FALLTHRU
)
3628 /* If there was no catch-all, use the fallthru edge. */
3629 if (default_label
== NULL
)
3630 default_label
= gimple_block_label (e
->dest
);
3631 e
->flags
&= ~EDGE_FALLTHRU
;
3634 gcc_assert (default_label
!= NULL
);
3636 /* Don't generate a switch if there's only a default case.
3637 This is common in the form of try { A; } catch (...) { B; }. */
3638 if (!labels
.exists ())
3640 e
= single_succ_edge (src
);
3641 e
->flags
|= EDGE_FALLTHRU
;
3645 fn
= builtin_decl_implicit (BUILT_IN_EH_FILTER
);
3646 x
= gimple_build_call (fn
, 1, build_int_cst (integer_type_node
,
3648 filter
= create_tmp_var (TREE_TYPE (TREE_TYPE (fn
)), NULL
);
3649 filter
= make_ssa_name (filter
, x
);
3650 gimple_call_set_lhs (x
, filter
);
3651 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3653 /* Turn the default label into a default case. */
3654 default_label
= build_case_label (NULL
, NULL
, default_label
);
3655 sort_case_labels (labels
);
3657 x
= gimple_build_switch (filter
, default_label
, labels
);
3658 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3663 case ERT_ALLOWED_EXCEPTIONS
:
3665 edge b_e
= BRANCH_EDGE (src
);
3666 edge f_e
= FALLTHRU_EDGE (src
);
3668 fn
= builtin_decl_implicit (BUILT_IN_EH_FILTER
);
3669 x
= gimple_build_call (fn
, 1, build_int_cst (integer_type_node
,
3671 filter
= create_tmp_var (TREE_TYPE (TREE_TYPE (fn
)), NULL
);
3672 filter
= make_ssa_name (filter
, x
);
3673 gimple_call_set_lhs (x
, filter
);
3674 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3676 r
->u
.allowed
.label
= NULL
;
3677 x
= gimple_build_cond (EQ_EXPR
, filter
,
3678 build_int_cst (TREE_TYPE (filter
),
3679 r
->u
.allowed
.filter
),
3680 NULL_TREE
, NULL_TREE
);
3681 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3683 b_e
->flags
= b_e
->flags
| EDGE_TRUE_VALUE
;
3684 f_e
->flags
= (f_e
->flags
& ~EDGE_FALLTHRU
) | EDGE_FALSE_VALUE
;
3692 /* Replace the EH_DISPATCH with the SWITCH or COND generated above. */
3693 gsi_remove (&gsi
, true);
3699 const pass_data pass_data_lower_eh_dispatch
=
3701 GIMPLE_PASS
, /* type */
3702 "ehdisp", /* name */
3703 OPTGROUP_NONE
, /* optinfo_flags */
3704 TV_TREE_EH
, /* tv_id */
3705 PROP_gimple_lcf
, /* properties_required */
3706 0, /* properties_provided */
3707 0, /* properties_destroyed */
3708 0, /* todo_flags_start */
3709 0, /* todo_flags_finish */
3712 class pass_lower_eh_dispatch
: public gimple_opt_pass
3715 pass_lower_eh_dispatch (gcc::context
*ctxt
)
3716 : gimple_opt_pass (pass_data_lower_eh_dispatch
, ctxt
)
3719 /* opt_pass methods: */
3720 virtual bool gate (function
*fun
) { return fun
->eh
->region_tree
!= NULL
; }
3721 virtual unsigned int execute (function
*);
3723 }; // class pass_lower_eh_dispatch
3726 pass_lower_eh_dispatch::execute (function
*fun
)
3730 bool redirected
= false;
3732 assign_filter_values ();
3734 FOR_EACH_BB_FN (bb
, fun
)
3736 gimple last
= last_stmt (bb
);
3739 if (gimple_code (last
) == GIMPLE_EH_DISPATCH
)
3741 redirected
|= lower_eh_dispatch (bb
,
3742 as_a
<gimple_eh_dispatch
> (last
));
3743 flags
|= TODO_update_ssa_only_virtuals
;
3745 else if (gimple_code (last
) == GIMPLE_RESX
)
3747 if (stmt_can_throw_external (last
))
3748 optimize_clobbers (bb
);
3750 flags
|= sink_clobbers (bb
);
3755 delete_unreachable_blocks ();
3762 make_pass_lower_eh_dispatch (gcc::context
*ctxt
)
3764 return new pass_lower_eh_dispatch (ctxt
);
3767 /* Walk statements, see what regions and, optionally, landing pads
3768 are really referenced.
3770 Returns in R_REACHABLEP an sbitmap with bits set for reachable regions,
3771 and in LP_REACHABLE an sbitmap with bits set for reachable landing pads.
3773 Passing NULL for LP_REACHABLE is valid, in this case only reachable
3776 The caller is responsible for freeing the returned sbitmaps. */
3779 mark_reachable_handlers (sbitmap
*r_reachablep
, sbitmap
*lp_reachablep
)
3781 sbitmap r_reachable
, lp_reachable
;
3783 bool mark_landing_pads
= (lp_reachablep
!= NULL
);
3784 gcc_checking_assert (r_reachablep
!= NULL
);
3786 r_reachable
= sbitmap_alloc (cfun
->eh
->region_array
->length ());
3787 bitmap_clear (r_reachable
);
3788 *r_reachablep
= r_reachable
;
3790 if (mark_landing_pads
)
3792 lp_reachable
= sbitmap_alloc (cfun
->eh
->lp_array
->length ());
3793 bitmap_clear (lp_reachable
);
3794 *lp_reachablep
= lp_reachable
;
3797 lp_reachable
= NULL
;
3799 FOR_EACH_BB_FN (bb
, cfun
)
3801 gimple_stmt_iterator gsi
;
3803 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3805 gimple stmt
= gsi_stmt (gsi
);
3807 if (mark_landing_pads
)
3809 int lp_nr
= lookup_stmt_eh_lp (stmt
);
3811 /* Negative LP numbers are MUST_NOT_THROW regions which
3812 are not considered BB enders. */
3814 bitmap_set_bit (r_reachable
, -lp_nr
);
3816 /* Positive LP numbers are real landing pads, and BB enders. */
3819 gcc_assert (gsi_one_before_end_p (gsi
));
3820 eh_region region
= get_eh_region_from_lp_number (lp_nr
);
3821 bitmap_set_bit (r_reachable
, region
->index
);
3822 bitmap_set_bit (lp_reachable
, lp_nr
);
3826 /* Avoid removing regions referenced from RESX/EH_DISPATCH. */
3827 switch (gimple_code (stmt
))
3830 bitmap_set_bit (r_reachable
,
3831 gimple_resx_region (as_a
<gimple_resx
> (stmt
)));
3833 case GIMPLE_EH_DISPATCH
:
3834 bitmap_set_bit (r_reachable
,
3835 gimple_eh_dispatch_region (
3836 as_a
<gimple_eh_dispatch
> (stmt
)));
3845 /* Remove unreachable handlers and unreachable landing pads. */
3848 remove_unreachable_handlers (void)
3850 sbitmap r_reachable
, lp_reachable
;
3855 mark_reachable_handlers (&r_reachable
, &lp_reachable
);
3859 fprintf (dump_file
, "Before removal of unreachable regions:\n");
3860 dump_eh_tree (dump_file
, cfun
);
3861 fprintf (dump_file
, "Reachable regions: ");
3862 dump_bitmap_file (dump_file
, r_reachable
);
3863 fprintf (dump_file
, "Reachable landing pads: ");
3864 dump_bitmap_file (dump_file
, lp_reachable
);
3869 FOR_EACH_VEC_SAFE_ELT (cfun
->eh
->region_array
, i
, region
)
3870 if (region
&& !bitmap_bit_p (r_reachable
, region
->index
))
3872 "Removing unreachable region %d\n",
3876 remove_unreachable_eh_regions (r_reachable
);
3878 FOR_EACH_VEC_SAFE_ELT (cfun
->eh
->lp_array
, i
, lp
)
3879 if (lp
&& !bitmap_bit_p (lp_reachable
, lp
->index
))
3883 "Removing unreachable landing pad %d\n",
3885 remove_eh_landing_pad (lp
);
3890 fprintf (dump_file
, "\n\nAfter removal of unreachable regions:\n");
3891 dump_eh_tree (dump_file
, cfun
);
3892 fprintf (dump_file
, "\n\n");
3895 sbitmap_free (r_reachable
);
3896 sbitmap_free (lp_reachable
);
3898 #ifdef ENABLE_CHECKING
3899 verify_eh_tree (cfun
);
3903 /* Remove unreachable handlers if any landing pads have been removed after
3904 last ehcleanup pass (due to gimple_purge_dead_eh_edges). */
3907 maybe_remove_unreachable_handlers (void)
3912 if (cfun
->eh
== NULL
)
3915 FOR_EACH_VEC_SAFE_ELT (cfun
->eh
->lp_array
, i
, lp
)
3916 if (lp
&& lp
->post_landing_pad
)
3918 if (label_to_block (lp
->post_landing_pad
) == NULL
)
3920 remove_unreachable_handlers ();
3926 /* Remove regions that do not have landing pads. This assumes
3927 that remove_unreachable_handlers has already been run, and
3928 that we've just manipulated the landing pads since then.
3930 Preserve regions with landing pads and regions that prevent
3931 exceptions from propagating further, even if these regions
3932 are not reachable. */
3935 remove_unreachable_handlers_no_lp (void)
3938 sbitmap r_reachable
;
3941 mark_reachable_handlers (&r_reachable
, /*lp_reachablep=*/NULL
);
3943 FOR_EACH_VEC_SAFE_ELT (cfun
->eh
->region_array
, i
, region
)
3948 if (region
->landing_pads
!= NULL
3949 || region
->type
== ERT_MUST_NOT_THROW
)
3950 bitmap_set_bit (r_reachable
, region
->index
);
3953 && !bitmap_bit_p (r_reachable
, region
->index
))
3955 "Removing unreachable region %d\n",
3959 remove_unreachable_eh_regions (r_reachable
);
3961 sbitmap_free (r_reachable
);
3964 /* Undo critical edge splitting on an EH landing pad. Earlier, we
3965 optimisticaly split all sorts of edges, including EH edges. The
3966 optimization passes in between may not have needed them; if not,
3967 we should undo the split.
3969 Recognize this case by having one EH edge incoming to the BB and
3970 one normal edge outgoing; BB should be empty apart from the
3971 post_landing_pad label.
3973 Note that this is slightly different from the empty handler case
3974 handled by cleanup_empty_eh, in that the actual handler may yet
3975 have actual code but the landing pad has been separated from the
3976 handler. As such, cleanup_empty_eh relies on this transformation
3977 having been done first. */
3980 unsplit_eh (eh_landing_pad lp
)
3982 basic_block bb
= label_to_block (lp
->post_landing_pad
);
3983 gimple_stmt_iterator gsi
;
3986 /* Quickly check the edge counts on BB for singularity. */
3987 if (!single_pred_p (bb
) || !single_succ_p (bb
))
3989 e_in
= single_pred_edge (bb
);
3990 e_out
= single_succ_edge (bb
);
3992 /* Input edge must be EH and output edge must be normal. */
3993 if ((e_in
->flags
& EDGE_EH
) == 0 || (e_out
->flags
& EDGE_EH
) != 0)
3996 /* The block must be empty except for the labels and debug insns. */
3997 gsi
= gsi_after_labels (bb
);
3998 if (!gsi_end_p (gsi
) && is_gimple_debug (gsi_stmt (gsi
)))
3999 gsi_next_nondebug (&gsi
);
4000 if (!gsi_end_p (gsi
))
4003 /* The destination block must not already have a landing pad
4004 for a different region. */
4005 for (gsi
= gsi_start_bb (e_out
->dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4007 gimple stmt
= gsi_stmt (gsi
);
4011 if (gimple_code (stmt
) != GIMPLE_LABEL
)
4013 lab
= gimple_label_label (stmt
);
4014 lp_nr
= EH_LANDING_PAD_NR (lab
);
4015 if (lp_nr
&& get_eh_region_from_lp_number (lp_nr
) != lp
->region
)
4019 /* The new destination block must not already be a destination of
4020 the source block, lest we merge fallthru and eh edges and get
4021 all sorts of confused. */
4022 if (find_edge (e_in
->src
, e_out
->dest
))
4025 /* ??? We can get degenerate phis due to cfg cleanups. I would have
4026 thought this should have been cleaned up by a phicprop pass, but
4027 that doesn't appear to handle virtuals. Propagate by hand. */
4028 if (!gimple_seq_empty_p (phi_nodes (bb
)))
4030 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); )
4032 gimple use_stmt
, phi
= gsi_stmt (gsi
);
4033 tree lhs
= gimple_phi_result (phi
);
4034 tree rhs
= gimple_phi_arg_def (phi
, 0);
4035 use_operand_p use_p
;
4036 imm_use_iterator iter
;
4038 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs
)
4040 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
4041 SET_USE (use_p
, rhs
);
4044 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs
))
4045 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs
) = 1;
4047 remove_phi_node (&gsi
, true);
4051 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4052 fprintf (dump_file
, "Unsplit EH landing pad %d to block %i.\n",
4053 lp
->index
, e_out
->dest
->index
);
4055 /* Redirect the edge. Since redirect_eh_edge_1 expects to be moving
4056 a successor edge, humor it. But do the real CFG change with the
4057 predecessor of E_OUT in order to preserve the ordering of arguments
4058 to the PHI nodes in E_OUT->DEST. */
4059 redirect_eh_edge_1 (e_in
, e_out
->dest
, false);
4060 redirect_edge_pred (e_out
, e_in
->src
);
4061 e_out
->flags
= e_in
->flags
;
4062 e_out
->probability
= e_in
->probability
;
4063 e_out
->count
= e_in
->count
;
4069 /* Examine each landing pad block and see if it matches unsplit_eh. */
4072 unsplit_all_eh (void)
4074 bool changed
= false;
4078 for (i
= 1; vec_safe_iterate (cfun
->eh
->lp_array
, i
, &lp
); ++i
)
4080 changed
|= unsplit_eh (lp
);
4085 /* A subroutine of cleanup_empty_eh. Redirect all EH edges incoming
4086 to OLD_BB to NEW_BB; return true on success, false on failure.
4088 OLD_BB_OUT is the edge into NEW_BB from OLD_BB, so if we miss any
4089 PHI variables from OLD_BB we can pick them up from OLD_BB_OUT.
4090 Virtual PHIs may be deleted and marked for renaming. */
4093 cleanup_empty_eh_merge_phis (basic_block new_bb
, basic_block old_bb
,
4094 edge old_bb_out
, bool change_region
)
4096 gimple_stmt_iterator ngsi
, ogsi
;
4099 bitmap ophi_handled
;
4101 /* The destination block must not be a regular successor for any
4102 of the preds of the landing pad. Thus, avoid turning
4112 which CFG verification would choke on. See PR45172 and PR51089. */
4113 FOR_EACH_EDGE (e
, ei
, old_bb
->preds
)
4114 if (find_edge (e
->src
, new_bb
))
4117 FOR_EACH_EDGE (e
, ei
, old_bb
->preds
)
4118 redirect_edge_var_map_clear (e
);
4120 ophi_handled
= BITMAP_ALLOC (NULL
);
4122 /* First, iterate through the PHIs on NEW_BB and set up the edge_var_map
4123 for the edges we're going to move. */
4124 for (ngsi
= gsi_start_phis (new_bb
); !gsi_end_p (ngsi
); gsi_next (&ngsi
))
4126 gimple ophi
, nphi
= gsi_stmt (ngsi
);
4129 nresult
= gimple_phi_result (nphi
);
4130 nop
= gimple_phi_arg_def (nphi
, old_bb_out
->dest_idx
);
4132 /* Find the corresponding PHI in OLD_BB so we can forward-propagate
4133 the source ssa_name. */
4135 for (ogsi
= gsi_start_phis (old_bb
); !gsi_end_p (ogsi
); gsi_next (&ogsi
))
4137 ophi
= gsi_stmt (ogsi
);
4138 if (gimple_phi_result (ophi
) == nop
)
4143 /* If we did find the corresponding PHI, copy those inputs. */
4146 /* If NOP is used somewhere else beyond phis in new_bb, give up. */
4147 if (!has_single_use (nop
))
4149 imm_use_iterator imm_iter
;
4150 use_operand_p use_p
;
4152 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, nop
)
4154 if (!gimple_debug_bind_p (USE_STMT (use_p
))
4155 && (gimple_code (USE_STMT (use_p
)) != GIMPLE_PHI
4156 || gimple_bb (USE_STMT (use_p
)) != new_bb
))
4160 bitmap_set_bit (ophi_handled
, SSA_NAME_VERSION (nop
));
4161 FOR_EACH_EDGE (e
, ei
, old_bb
->preds
)
4166 if ((e
->flags
& EDGE_EH
) == 0)
4168 oop
= gimple_phi_arg_def (ophi
, e
->dest_idx
);
4169 oloc
= gimple_phi_arg_location (ophi
, e
->dest_idx
);
4170 redirect_edge_var_map_add (e
, nresult
, oop
, oloc
);
4173 /* If we didn't find the PHI, if it's a real variable or a VOP, we know
4174 from the fact that OLD_BB is tree_empty_eh_handler_p that the
4175 variable is unchanged from input to the block and we can simply
4176 re-use the input to NEW_BB from the OLD_BB_OUT edge. */
4180 = gimple_phi_arg_location (nphi
, old_bb_out
->dest_idx
);
4181 FOR_EACH_EDGE (e
, ei
, old_bb
->preds
)
4182 redirect_edge_var_map_add (e
, nresult
, nop
, nloc
);
4186 /* Second, verify that all PHIs from OLD_BB have been handled. If not,
4187 we don't know what values from the other edges into NEW_BB to use. */
4188 for (ogsi
= gsi_start_phis (old_bb
); !gsi_end_p (ogsi
); gsi_next (&ogsi
))
4190 gimple ophi
= gsi_stmt (ogsi
);
4191 tree oresult
= gimple_phi_result (ophi
);
4192 if (!bitmap_bit_p (ophi_handled
, SSA_NAME_VERSION (oresult
)))
4196 /* Finally, move the edges and update the PHIs. */
4197 for (ei
= ei_start (old_bb
->preds
); (e
= ei_safe_edge (ei
)); )
4198 if (e
->flags
& EDGE_EH
)
4200 /* ??? CFG manipluation routines do not try to update loop
4201 form on edge redirection. Do so manually here for now. */
4202 /* If we redirect a loop entry or latch edge that will either create
4203 a multiple entry loop or rotate the loop. If the loops merge
4204 we may have created a loop with multiple latches.
4205 All of this isn't easily fixed thus cancel the affected loop
4206 and mark the other loop as possibly having multiple latches. */
4207 if (e
->dest
== e
->dest
->loop_father
->header
)
4209 mark_loop_for_removal (e
->dest
->loop_father
);
4210 new_bb
->loop_father
->latch
= NULL
;
4211 loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES
);
4213 redirect_eh_edge_1 (e
, new_bb
, change_region
);
4214 redirect_edge_succ (e
, new_bb
);
4215 flush_pending_stmts (e
);
4220 BITMAP_FREE (ophi_handled
);
4224 FOR_EACH_EDGE (e
, ei
, old_bb
->preds
)
4225 redirect_edge_var_map_clear (e
);
4226 BITMAP_FREE (ophi_handled
);
4230 /* A subroutine of cleanup_empty_eh. Move a landing pad LP from its
4231 old region to NEW_REGION at BB. */
4234 cleanup_empty_eh_move_lp (basic_block bb
, edge e_out
,
4235 eh_landing_pad lp
, eh_region new_region
)
4237 gimple_stmt_iterator gsi
;
4240 for (pp
= &lp
->region
->landing_pads
; *pp
!= lp
; pp
= &(*pp
)->next_lp
)
4244 lp
->region
= new_region
;
4245 lp
->next_lp
= new_region
->landing_pads
;
4246 new_region
->landing_pads
= lp
;
4248 /* Delete the RESX that was matched within the empty handler block. */
4249 gsi
= gsi_last_bb (bb
);
4250 unlink_stmt_vdef (gsi_stmt (gsi
));
4251 gsi_remove (&gsi
, true);
4253 /* Clean up E_OUT for the fallthru. */
4254 e_out
->flags
= (e_out
->flags
& ~EDGE_EH
) | EDGE_FALLTHRU
;
4255 e_out
->probability
= REG_BR_PROB_BASE
;
4258 /* A subroutine of cleanup_empty_eh. Handle more complex cases of
4259 unsplitting than unsplit_eh was prepared to handle, e.g. when
4260 multiple incoming edges and phis are involved. */
4263 cleanup_empty_eh_unsplit (basic_block bb
, edge e_out
, eh_landing_pad lp
)
4265 gimple_stmt_iterator gsi
;
4268 /* We really ought not have totally lost everything following
4269 a landing pad label. Given that BB is empty, there had better
4271 gcc_assert (e_out
!= NULL
);
4273 /* The destination block must not already have a landing pad
4274 for a different region. */
4276 for (gsi
= gsi_start_bb (e_out
->dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4278 gimple stmt
= gsi_stmt (gsi
);
4281 if (gimple_code (stmt
) != GIMPLE_LABEL
)
4283 lab
= gimple_label_label (stmt
);
4284 lp_nr
= EH_LANDING_PAD_NR (lab
);
4285 if (lp_nr
&& get_eh_region_from_lp_number (lp_nr
) != lp
->region
)
4289 /* Attempt to move the PHIs into the successor block. */
4290 if (cleanup_empty_eh_merge_phis (e_out
->dest
, bb
, e_out
, false))
4292 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4294 "Unsplit EH landing pad %d to block %i "
4295 "(via cleanup_empty_eh).\n",
4296 lp
->index
, e_out
->dest
->index
);
4303 /* Return true if edge E_FIRST is part of an empty infinite loop
4304 or leads to such a loop through a series of single successor
4308 infinite_empty_loop_p (edge e_first
)
4310 bool inf_loop
= false;
4313 if (e_first
->dest
== e_first
->src
)
4316 e_first
->src
->aux
= (void *) 1;
4317 for (e
= e_first
; single_succ_p (e
->dest
); e
= single_succ_edge (e
->dest
))
4319 gimple_stmt_iterator gsi
;
4325 e
->dest
->aux
= (void *) 1;
4326 gsi
= gsi_after_labels (e
->dest
);
4327 if (!gsi_end_p (gsi
) && is_gimple_debug (gsi_stmt (gsi
)))
4328 gsi_next_nondebug (&gsi
);
4329 if (!gsi_end_p (gsi
))
4332 e_first
->src
->aux
= NULL
;
4333 for (e
= e_first
; e
->dest
->aux
; e
= single_succ_edge (e
->dest
))
4334 e
->dest
->aux
= NULL
;
4339 /* Examine the block associated with LP to determine if it's an empty
4340 handler for its EH region. If so, attempt to redirect EH edges to
4341 an outer region. Return true the CFG was updated in any way. This
4342 is similar to jump forwarding, just across EH edges. */
4345 cleanup_empty_eh (eh_landing_pad lp
)
4347 basic_block bb
= label_to_block (lp
->post_landing_pad
);
4348 gimple_stmt_iterator gsi
;
4350 eh_region new_region
;
4353 bool has_non_eh_pred
;
4357 /* There can be zero or one edges out of BB. This is the quickest test. */
4358 switch (EDGE_COUNT (bb
->succs
))
4364 e_out
= single_succ_edge (bb
);
4370 resx
= last_stmt (bb
);
4371 if (resx
&& is_gimple_resx (resx
))
4373 if (stmt_can_throw_external (resx
))
4374 optimize_clobbers (bb
);
4375 else if (sink_clobbers (bb
))
4379 gsi
= gsi_after_labels (bb
);
4381 /* Make sure to skip debug statements. */
4382 if (!gsi_end_p (gsi
) && is_gimple_debug (gsi_stmt (gsi
)))
4383 gsi_next_nondebug (&gsi
);
4385 /* If the block is totally empty, look for more unsplitting cases. */
4386 if (gsi_end_p (gsi
))
4388 /* For the degenerate case of an infinite loop bail out.
4389 If bb has no successors and is totally empty, which can happen e.g.
4390 because of incorrect noreturn attribute, bail out too. */
4392 || infinite_empty_loop_p (e_out
))
4395 return ret
| cleanup_empty_eh_unsplit (bb
, e_out
, lp
);
4398 /* The block should consist only of a single RESX statement, modulo a
4399 preceding call to __builtin_stack_restore if there is no outgoing
4400 edge, since the call can be eliminated in this case. */
4401 resx
= gsi_stmt (gsi
);
4402 if (!e_out
&& gimple_call_builtin_p (resx
, BUILT_IN_STACK_RESTORE
))
4405 resx
= gsi_stmt (gsi
);
4407 if (!is_gimple_resx (resx
))
4409 gcc_assert (gsi_one_before_end_p (gsi
));
4411 /* Determine if there are non-EH edges, or resx edges into the handler. */
4412 has_non_eh_pred
= false;
4413 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
4414 if (!(e
->flags
& EDGE_EH
))
4415 has_non_eh_pred
= true;
4417 /* Find the handler that's outer of the empty handler by looking at
4418 where the RESX instruction was vectored. */
4419 new_lp_nr
= lookup_stmt_eh_lp (resx
);
4420 new_region
= get_eh_region_from_lp_number (new_lp_nr
);
4422 /* If there's no destination region within the current function,
4423 redirection is trivial via removing the throwing statements from
4424 the EH region, removing the EH edges, and allowing the block
4425 to go unreachable. */
4426 if (new_region
== NULL
)
4428 gcc_assert (e_out
== NULL
);
4429 for (ei
= ei_start (bb
->preds
); (e
= ei_safe_edge (ei
)); )
4430 if (e
->flags
& EDGE_EH
)
4432 gimple stmt
= last_stmt (e
->src
);
4433 remove_stmt_from_eh_lp (stmt
);
4441 /* If the destination region is a MUST_NOT_THROW, allow the runtime
4442 to handle the abort and allow the blocks to go unreachable. */
4443 if (new_region
->type
== ERT_MUST_NOT_THROW
)
4445 for (ei
= ei_start (bb
->preds
); (e
= ei_safe_edge (ei
)); )
4446 if (e
->flags
& EDGE_EH
)
4448 gimple stmt
= last_stmt (e
->src
);
4449 remove_stmt_from_eh_lp (stmt
);
4450 add_stmt_to_eh_lp (stmt
, new_lp_nr
);
4458 /* Try to redirect the EH edges and merge the PHIs into the destination
4459 landing pad block. If the merge succeeds, we'll already have redirected
4460 all the EH edges. The handler itself will go unreachable if there were
4462 if (cleanup_empty_eh_merge_phis (e_out
->dest
, bb
, e_out
, true))
4465 /* Finally, if all input edges are EH edges, then we can (potentially)
4466 reduce the number of transfers from the runtime by moving the landing
4467 pad from the original region to the new region. This is a win when
4468 we remove the last CLEANUP region along a particular exception
4469 propagation path. Since nothing changes except for the region with
4470 which the landing pad is associated, the PHI nodes do not need to be
4472 if (!has_non_eh_pred
)
4474 cleanup_empty_eh_move_lp (bb
, e_out
, lp
, new_region
);
4475 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4476 fprintf (dump_file
, "Empty EH handler %i moved to EH region %i.\n",
4477 lp
->index
, new_region
->index
);
4479 /* ??? The CFG didn't change, but we may have rendered the
4480 old EH region unreachable. Trigger a cleanup there. */
4487 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4488 fprintf (dump_file
, "Empty EH handler %i removed.\n", lp
->index
);
4489 remove_eh_landing_pad (lp
);
4493 /* Do a post-order traversal of the EH region tree. Examine each
4494 post_landing_pad block and see if we can eliminate it as empty. */
4497 cleanup_all_empty_eh (void)
4499 bool changed
= false;
4503 for (i
= 1; vec_safe_iterate (cfun
->eh
->lp_array
, i
, &lp
); ++i
)
4505 changed
|= cleanup_empty_eh (lp
);
4510 /* Perform cleanups and lowering of exception handling
4511 1) cleanups regions with handlers doing nothing are optimized out
4512 2) MUST_NOT_THROW regions that became dead because of 1) are optimized out
4513 3) Info about regions that are containing instructions, and regions
4514 reachable via local EH edges is collected
4515 4) Eh tree is pruned for regions no longer necessary.
4517 TODO: Push MUST_NOT_THROW regions to the root of the EH tree.
4518 Unify those that have the same failure decl and locus.
4522 execute_cleanup_eh_1 (void)
4524 /* Do this first: unsplit_all_eh and cleanup_all_empty_eh can die
4525 looking up unreachable landing pads. */
4526 remove_unreachable_handlers ();
4528 /* Watch out for the region tree vanishing due to all unreachable. */
4529 if (cfun
->eh
->region_tree
)
4531 bool changed
= false;
4534 changed
|= unsplit_all_eh ();
4535 changed
|= cleanup_all_empty_eh ();
4539 free_dominance_info (CDI_DOMINATORS
);
4540 free_dominance_info (CDI_POST_DOMINATORS
);
4542 /* We delayed all basic block deletion, as we may have performed
4543 cleanups on EH edges while non-EH edges were still present. */
4544 delete_unreachable_blocks ();
4546 /* We manipulated the landing pads. Remove any region that no
4547 longer has a landing pad. */
4548 remove_unreachable_handlers_no_lp ();
4550 return TODO_cleanup_cfg
| TODO_update_ssa_only_virtuals
;
4559 const pass_data pass_data_cleanup_eh
=
4561 GIMPLE_PASS
, /* type */
4562 "ehcleanup", /* name */
4563 OPTGROUP_NONE
, /* optinfo_flags */
4564 TV_TREE_EH
, /* tv_id */
4565 PROP_gimple_lcf
, /* properties_required */
4566 0, /* properties_provided */
4567 0, /* properties_destroyed */
4568 0, /* todo_flags_start */
4569 0, /* todo_flags_finish */
4572 class pass_cleanup_eh
: public gimple_opt_pass
4575 pass_cleanup_eh (gcc::context
*ctxt
)
4576 : gimple_opt_pass (pass_data_cleanup_eh
, ctxt
)
4579 /* opt_pass methods: */
4580 opt_pass
* clone () { return new pass_cleanup_eh (m_ctxt
); }
4581 virtual bool gate (function
*fun
)
4583 return fun
->eh
!= NULL
&& fun
->eh
->region_tree
!= NULL
;
4586 virtual unsigned int execute (function
*);
4588 }; // class pass_cleanup_eh
4591 pass_cleanup_eh::execute (function
*fun
)
4593 int ret
= execute_cleanup_eh_1 ();
4595 /* If the function no longer needs an EH personality routine
4596 clear it. This exposes cross-language inlining opportunities
4597 and avoids references to a never defined personality routine. */
4598 if (DECL_FUNCTION_PERSONALITY (current_function_decl
)
4599 && function_needs_eh_personality (fun
) != eh_personality_lang
)
4600 DECL_FUNCTION_PERSONALITY (current_function_decl
) = NULL_TREE
;
4608 make_pass_cleanup_eh (gcc::context
*ctxt
)
4610 return new pass_cleanup_eh (ctxt
);
4613 /* Verify that BB containing STMT as the last statement, has precisely the
4614 edge that make_eh_edges would create. */
4617 verify_eh_edges (gimple stmt
)
4619 basic_block bb
= gimple_bb (stmt
);
4620 eh_landing_pad lp
= NULL
;
4625 lp_nr
= lookup_stmt_eh_lp (stmt
);
4627 lp
= get_eh_landing_pad_from_number (lp_nr
);
4630 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
4632 if (e
->flags
& EDGE_EH
)
4636 error ("BB %i has multiple EH edges", bb
->index
);
4648 error ("BB %i can not throw but has an EH edge", bb
->index
);
4654 if (!stmt_could_throw_p (stmt
))
4656 error ("BB %i last statement has incorrectly set lp", bb
->index
);
4660 if (eh_edge
== NULL
)
4662 error ("BB %i is missing an EH edge", bb
->index
);
4666 if (eh_edge
->dest
!= label_to_block (lp
->post_landing_pad
))
4668 error ("Incorrect EH edge %i->%i", bb
->index
, eh_edge
->dest
->index
);
4675 /* Similarly, but handle GIMPLE_EH_DISPATCH specifically. */
4678 verify_eh_dispatch_edge (gimple_eh_dispatch stmt
)
4682 basic_block src
, dst
;
4683 bool want_fallthru
= true;
4687 r
= get_eh_region_from_number (gimple_eh_dispatch_region (stmt
));
4688 src
= gimple_bb (stmt
);
4690 FOR_EACH_EDGE (e
, ei
, src
->succs
)
4691 gcc_assert (e
->aux
== NULL
);
4696 for (c
= r
->u
.eh_try
.first_catch
; c
; c
= c
->next_catch
)
4698 dst
= label_to_block (c
->label
);
4699 e
= find_edge (src
, dst
);
4702 error ("BB %i is missing an edge", src
->index
);
4707 /* A catch-all handler doesn't have a fallthru. */
4708 if (c
->type_list
== NULL
)
4710 want_fallthru
= false;
4716 case ERT_ALLOWED_EXCEPTIONS
:
4717 dst
= label_to_block (r
->u
.allowed
.label
);
4718 e
= find_edge (src
, dst
);
4721 error ("BB %i is missing an edge", src
->index
);
4732 FOR_EACH_EDGE (e
, ei
, src
->succs
)
4734 if (e
->flags
& EDGE_FALLTHRU
)
4736 if (fall_edge
!= NULL
)
4738 error ("BB %i too many fallthru edges", src
->index
);
4747 error ("BB %i has incorrect edge", src
->index
);
4751 if ((fall_edge
!= NULL
) ^ want_fallthru
)
4753 error ("BB %i has incorrect fallthru edge", src
->index
);