1 /* Exception handling semantics and decomposition for trees.
2 Copyright (C) 2003-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
23 #include "hash-table.h"
33 #include "hard-reg-set.h"
37 #include "basic-block.h"
38 #include "tree-ssa-alias.h"
39 #include "internal-fn.h"
41 #include "gimple-expr.h"
44 #include "gimple-iterator.h"
45 #include "gimple-ssa.h"
48 #include "tree-phinodes.h"
49 #include "ssa-iterators.h"
50 #include "stringpool.h"
51 #include "tree-ssanames.h"
52 #include "tree-into-ssa.h"
54 #include "tree-inline.h"
55 #include "tree-pass.h"
56 #include "langhooks.h"
57 #include "diagnostic-core.h"
60 #include "gimple-low.h"
62 /* In some instances a tree and a gimple need to be stored in a same table,
63 i.e. in hash tables. This is a structure to do this. */
64 typedef union {tree
*tp
; tree t
; gimple g
;} treemple
;
66 /* Misc functions used in this file. */
68 /* Remember and lookup EH landing pad data for arbitrary statements.
69 Really this means any statement that could_throw_p. We could
70 stuff this information into the stmt_ann data structure, but:
72 (1) We absolutely rely on this information being kept until
73 we get to rtl. Once we're done with lowering here, if we lose
74 the information there's no way to recover it!
76 (2) There are many more statements that *cannot* throw as
77 compared to those that can. We should be saving some amount
78 of space by only allocating memory for those that can throw. */
80 /* Add statement T in function IFUN to landing pad NUM. */
83 add_stmt_to_eh_lp_fn (struct function
*ifun
, gimple t
, int num
)
85 gcc_assert (num
!= 0);
87 if (!get_eh_throw_stmt_table (ifun
))
88 set_eh_throw_stmt_table (ifun
, hash_map
<gimple
, int>::create_ggc (31));
90 gcc_assert (!get_eh_throw_stmt_table (ifun
)->put (t
, num
));
93 /* Add statement T in the current function (cfun) to EH landing pad NUM. */
96 add_stmt_to_eh_lp (gimple t
, int num
)
98 add_stmt_to_eh_lp_fn (cfun
, t
, num
);
101 /* Add statement T to the single EH landing pad in REGION. */
104 record_stmt_eh_region (eh_region region
, gimple t
)
108 if (region
->type
== ERT_MUST_NOT_THROW
)
109 add_stmt_to_eh_lp_fn (cfun
, t
, -region
->index
);
112 eh_landing_pad lp
= region
->landing_pads
;
114 lp
= gen_eh_landing_pad (region
);
116 gcc_assert (lp
->next_lp
== NULL
);
117 add_stmt_to_eh_lp_fn (cfun
, t
, lp
->index
);
122 /* Remove statement T in function IFUN from its EH landing pad. */
125 remove_stmt_from_eh_lp_fn (struct function
*ifun
, gimple t
)
127 if (!get_eh_throw_stmt_table (ifun
))
130 if (!get_eh_throw_stmt_table (ifun
)->get (t
))
133 get_eh_throw_stmt_table (ifun
)->remove (t
);
138 /* Remove statement T in the current function (cfun) from its
142 remove_stmt_from_eh_lp (gimple t
)
144 return remove_stmt_from_eh_lp_fn (cfun
, t
);
147 /* Determine if statement T is inside an EH region in function IFUN.
148 Positive numbers indicate a landing pad index; negative numbers
149 indicate a MUST_NOT_THROW region index; zero indicates that the
150 statement is not recorded in the region table. */
153 lookup_stmt_eh_lp_fn (struct function
*ifun
, gimple t
)
155 if (ifun
->eh
->throw_stmt_table
== NULL
)
158 int *lp_nr
= ifun
->eh
->throw_stmt_table
->get (t
);
159 return lp_nr
? *lp_nr
: 0;
162 /* Likewise, but always use the current function. */
165 lookup_stmt_eh_lp (gimple t
)
167 /* We can get called from initialized data when -fnon-call-exceptions
168 is on; prevent crash. */
171 return lookup_stmt_eh_lp_fn (cfun
, t
);
174 /* First pass of EH node decomposition. Build up a tree of GIMPLE_TRY_FINALLY
175 nodes and LABEL_DECL nodes. We will use this during the second phase to
176 determine if a goto leaves the body of a TRY_FINALLY_EXPR node. */
178 struct finally_tree_node
180 /* When storing a GIMPLE_TRY, we have to record a gimple. However
181 when deciding whether a GOTO to a certain LABEL_DECL (which is a
182 tree) leaves the TRY block, its necessary to record a tree in
183 this field. Thus a treemple is used. */
188 /* Hashtable helpers. */
190 struct finally_tree_hasher
: typed_free_remove
<finally_tree_node
>
192 typedef finally_tree_node value_type
;
193 typedef finally_tree_node compare_type
;
194 static inline hashval_t
hash (const value_type
*);
195 static inline bool equal (const value_type
*, const compare_type
*);
199 finally_tree_hasher::hash (const value_type
*v
)
201 return (intptr_t)v
->child
.t
>> 4;
205 finally_tree_hasher::equal (const value_type
*v
, const compare_type
*c
)
207 return v
->child
.t
== c
->child
.t
;
210 /* Note that this table is *not* marked GTY. It is short-lived. */
211 static hash_table
<finally_tree_hasher
> *finally_tree
;
214 record_in_finally_tree (treemple child
, gimple parent
)
216 struct finally_tree_node
*n
;
217 finally_tree_node
**slot
;
219 n
= XNEW (struct finally_tree_node
);
223 slot
= finally_tree
->find_slot (n
, INSERT
);
229 collect_finally_tree (gimple stmt
, gimple region
);
231 /* Go through the gimple sequence. Works with collect_finally_tree to
232 record all GIMPLE_LABEL and GIMPLE_TRY statements. */
235 collect_finally_tree_1 (gimple_seq seq
, gimple region
)
237 gimple_stmt_iterator gsi
;
239 for (gsi
= gsi_start (seq
); !gsi_end_p (gsi
); gsi_next (&gsi
))
240 collect_finally_tree (gsi_stmt (gsi
), region
);
244 collect_finally_tree (gimple stmt
, gimple region
)
248 switch (gimple_code (stmt
))
251 temp
.t
= gimple_label_label (stmt
);
252 record_in_finally_tree (temp
, region
);
256 if (gimple_try_kind (stmt
) == GIMPLE_TRY_FINALLY
)
259 record_in_finally_tree (temp
, region
);
260 collect_finally_tree_1 (gimple_try_eval (stmt
), stmt
);
261 collect_finally_tree_1 (gimple_try_cleanup (stmt
), region
);
263 else if (gimple_try_kind (stmt
) == GIMPLE_TRY_CATCH
)
265 collect_finally_tree_1 (gimple_try_eval (stmt
), region
);
266 collect_finally_tree_1 (gimple_try_cleanup (stmt
), region
);
271 collect_finally_tree_1 (gimple_catch_handler (stmt
), region
);
274 case GIMPLE_EH_FILTER
:
275 collect_finally_tree_1 (gimple_eh_filter_failure (stmt
), region
);
279 collect_finally_tree_1 (gimple_eh_else_n_body (stmt
), region
);
280 collect_finally_tree_1 (gimple_eh_else_e_body (stmt
), region
);
284 /* A type, a decl, or some kind of statement that we're not
285 interested in. Don't walk them. */
291 /* Use the finally tree to determine if a jump from START to TARGET
292 would leave the try_finally node that START lives in. */
295 outside_finally_tree (treemple start
, gimple target
)
297 struct finally_tree_node n
, *p
;
302 p
= finally_tree
->find (&n
);
307 while (start
.g
!= target
);
312 /* Second pass of EH node decomposition. Actually transform the GIMPLE_TRY
313 nodes into a set of gotos, magic labels, and eh regions.
314 The eh region creation is straight-forward, but frobbing all the gotos
315 and such into shape isn't. */
317 /* The sequence into which we record all EH stuff. This will be
318 placed at the end of the function when we're all done. */
319 static gimple_seq eh_seq
;
321 /* Record whether an EH region contains something that can throw,
322 indexed by EH region number. */
323 static bitmap eh_region_may_contain_throw_map
;
325 /* The GOTO_QUEUE is is an array of GIMPLE_GOTO and GIMPLE_RETURN
326 statements that are seen to escape this GIMPLE_TRY_FINALLY node.
327 The idea is to record a gimple statement for everything except for
328 the conditionals, which get their labels recorded. Since labels are
329 of type 'tree', we need this node to store both gimple and tree
330 objects. REPL_STMT is the sequence used to replace the goto/return
331 statement. CONT_STMT is used to store the statement that allows
332 the return/goto to jump to the original destination. */
334 struct goto_queue_node
338 gimple_seq repl_stmt
;
341 /* This is used when index >= 0 to indicate that stmt is a label (as
342 opposed to a goto stmt). */
346 /* State of the world while lowering. */
350 /* What's "current" while constructing the eh region tree. These
351 correspond to variables of the same name in cfun->eh, which we
352 don't have easy access to. */
353 eh_region cur_region
;
355 /* What's "current" for the purposes of __builtin_eh_pointer. For
356 a CATCH, this is the associated TRY. For an EH_FILTER, this is
357 the associated ALLOWED_EXCEPTIONS, etc. */
358 eh_region ehp_region
;
360 /* Processing of TRY_FINALLY requires a bit more state. This is
361 split out into a separate structure so that we don't have to
362 copy so much when processing other nodes. */
363 struct leh_tf_state
*tf
;
368 /* Pointer to the GIMPLE_TRY_FINALLY node under discussion. The
369 try_finally_expr is the original GIMPLE_TRY_FINALLY. We need to retain
370 this so that outside_finally_tree can reliably reference the tree used
371 in the collect_finally_tree data structures. */
372 gimple try_finally_expr
;
375 /* While lowering a top_p usually it is expanded into multiple statements,
376 thus we need the following field to store them. */
377 gimple_seq top_p_seq
;
379 /* The state outside this try_finally node. */
380 struct leh_state
*outer
;
382 /* The exception region created for it. */
385 /* The goto queue. */
386 struct goto_queue_node
*goto_queue
;
387 size_t goto_queue_size
;
388 size_t goto_queue_active
;
390 /* Pointer map to help in searching goto_queue when it is large. */
391 hash_map
<gimple
, goto_queue_node
*> *goto_queue_map
;
393 /* The set of unique labels seen as entries in the goto queue. */
394 vec
<tree
> dest_array
;
396 /* A label to be added at the end of the completed transformed
397 sequence. It will be set if may_fallthru was true *at one time*,
398 though subsequent transformations may have cleared that flag. */
401 /* True if it is possible to fall out the bottom of the try block.
402 Cleared if the fallthru is converted to a goto. */
405 /* True if any entry in goto_queue is a GIMPLE_RETURN. */
408 /* True if the finally block can receive an exception edge.
409 Cleared if the exception case is handled by code duplication. */
413 static gimple_seq
lower_eh_must_not_throw (struct leh_state
*, gimple
);
415 /* Search for STMT in the goto queue. Return the replacement,
416 or null if the statement isn't in the queue. */
418 #define LARGE_GOTO_QUEUE 20
420 static void lower_eh_constructs_1 (struct leh_state
*state
, gimple_seq
*seq
);
423 find_goto_replacement (struct leh_tf_state
*tf
, treemple stmt
)
427 if (tf
->goto_queue_active
< LARGE_GOTO_QUEUE
)
429 for (i
= 0; i
< tf
->goto_queue_active
; i
++)
430 if ( tf
->goto_queue
[i
].stmt
.g
== stmt
.g
)
431 return tf
->goto_queue
[i
].repl_stmt
;
435 /* If we have a large number of entries in the goto_queue, create a
436 pointer map and use that for searching. */
438 if (!tf
->goto_queue_map
)
440 tf
->goto_queue_map
= new hash_map
<gimple
, goto_queue_node
*>;
441 for (i
= 0; i
< tf
->goto_queue_active
; i
++)
443 bool existed
= tf
->goto_queue_map
->put (tf
->goto_queue
[i
].stmt
.g
,
445 gcc_assert (!existed
);
449 goto_queue_node
**slot
= tf
->goto_queue_map
->get (stmt
.g
);
451 return ((*slot
)->repl_stmt
);
456 /* A subroutine of replace_goto_queue_1. Handles the sub-clauses of a
457 lowered GIMPLE_COND. If, by chance, the replacement is a simple goto,
458 then we can just splat it in, otherwise we add the new stmts immediately
459 after the GIMPLE_COND and redirect. */
462 replace_goto_queue_cond_clause (tree
*tp
, struct leh_tf_state
*tf
,
463 gimple_stmt_iterator
*gsi
)
468 location_t loc
= gimple_location (gsi_stmt (*gsi
));
471 new_seq
= find_goto_replacement (tf
, temp
);
475 if (gimple_seq_singleton_p (new_seq
)
476 && gimple_code (gimple_seq_first_stmt (new_seq
)) == GIMPLE_GOTO
)
478 *tp
= gimple_goto_dest (gimple_seq_first_stmt (new_seq
));
482 label
= create_artificial_label (loc
);
483 /* Set the new label for the GIMPLE_COND */
486 gsi_insert_after (gsi
, gimple_build_label (label
), GSI_CONTINUE_LINKING
);
487 gsi_insert_seq_after (gsi
, gimple_seq_copy (new_seq
), GSI_CONTINUE_LINKING
);
490 /* The real work of replace_goto_queue. Returns with TSI updated to
491 point to the next statement. */
493 static void replace_goto_queue_stmt_list (gimple_seq
*, struct leh_tf_state
*);
496 replace_goto_queue_1 (gimple stmt
, struct leh_tf_state
*tf
,
497 gimple_stmt_iterator
*gsi
)
503 switch (gimple_code (stmt
))
508 seq
= find_goto_replacement (tf
, temp
);
511 gsi_insert_seq_before (gsi
, gimple_seq_copy (seq
), GSI_SAME_STMT
);
512 gsi_remove (gsi
, false);
518 replace_goto_queue_cond_clause (gimple_op_ptr (stmt
, 2), tf
, gsi
);
519 replace_goto_queue_cond_clause (gimple_op_ptr (stmt
, 3), tf
, gsi
);
523 replace_goto_queue_stmt_list (gimple_try_eval_ptr (stmt
), tf
);
524 replace_goto_queue_stmt_list (gimple_try_cleanup_ptr (stmt
), tf
);
527 replace_goto_queue_stmt_list (gimple_catch_handler_ptr (stmt
), tf
);
529 case GIMPLE_EH_FILTER
:
530 replace_goto_queue_stmt_list (gimple_eh_filter_failure_ptr (stmt
), tf
);
533 replace_goto_queue_stmt_list (gimple_eh_else_n_body_ptr (stmt
), tf
);
534 replace_goto_queue_stmt_list (gimple_eh_else_e_body_ptr (stmt
), tf
);
538 /* These won't have gotos in them. */
545 /* A subroutine of replace_goto_queue. Handles GIMPLE_SEQ. */
548 replace_goto_queue_stmt_list (gimple_seq
*seq
, struct leh_tf_state
*tf
)
550 gimple_stmt_iterator gsi
= gsi_start (*seq
);
552 while (!gsi_end_p (gsi
))
553 replace_goto_queue_1 (gsi_stmt (gsi
), tf
, &gsi
);
556 /* Replace all goto queue members. */
559 replace_goto_queue (struct leh_tf_state
*tf
)
561 if (tf
->goto_queue_active
== 0)
563 replace_goto_queue_stmt_list (&tf
->top_p_seq
, tf
);
564 replace_goto_queue_stmt_list (&eh_seq
, tf
);
567 /* Add a new record to the goto queue contained in TF. NEW_STMT is the
568 data to be added, IS_LABEL indicates whether NEW_STMT is a label or
572 record_in_goto_queue (struct leh_tf_state
*tf
,
579 struct goto_queue_node
*q
;
581 gcc_assert (!tf
->goto_queue_map
);
583 active
= tf
->goto_queue_active
;
584 size
= tf
->goto_queue_size
;
587 size
= (size
? size
* 2 : 32);
588 tf
->goto_queue_size
= size
;
590 = XRESIZEVEC (struct goto_queue_node
, tf
->goto_queue
, size
);
593 q
= &tf
->goto_queue
[active
];
594 tf
->goto_queue_active
= active
+ 1;
596 memset (q
, 0, sizeof (*q
));
599 q
->location
= location
;
600 q
->is_label
= is_label
;
603 /* Record the LABEL label in the goto queue contained in TF.
607 record_in_goto_queue_label (struct leh_tf_state
*tf
, treemple stmt
, tree label
,
611 treemple temp
, new_stmt
;
616 /* Computed and non-local gotos do not get processed. Given
617 their nature we can neither tell whether we've escaped the
618 finally block nor redirect them if we knew. */
619 if (TREE_CODE (label
) != LABEL_DECL
)
622 /* No need to record gotos that don't leave the try block. */
624 if (!outside_finally_tree (temp
, tf
->try_finally_expr
))
627 if (! tf
->dest_array
.exists ())
629 tf
->dest_array
.create (10);
630 tf
->dest_array
.quick_push (label
);
635 int n
= tf
->dest_array
.length ();
636 for (index
= 0; index
< n
; ++index
)
637 if (tf
->dest_array
[index
] == label
)
640 tf
->dest_array
.safe_push (label
);
643 /* In the case of a GOTO we want to record the destination label,
644 since with a GIMPLE_COND we have an easy access to the then/else
647 record_in_goto_queue (tf
, new_stmt
, index
, true, location
);
650 /* For any GIMPLE_GOTO or GIMPLE_RETURN, decide whether it leaves a try_finally
651 node, and if so record that fact in the goto queue associated with that
655 maybe_record_in_goto_queue (struct leh_state
*state
, gimple stmt
)
657 struct leh_tf_state
*tf
= state
->tf
;
663 switch (gimple_code (stmt
))
666 new_stmt
.tp
= gimple_op_ptr (stmt
, 2);
667 record_in_goto_queue_label (tf
, new_stmt
, gimple_cond_true_label (stmt
),
668 EXPR_LOCATION (*new_stmt
.tp
));
669 new_stmt
.tp
= gimple_op_ptr (stmt
, 3);
670 record_in_goto_queue_label (tf
, new_stmt
, gimple_cond_false_label (stmt
),
671 EXPR_LOCATION (*new_stmt
.tp
));
675 record_in_goto_queue_label (tf
, new_stmt
, gimple_goto_dest (stmt
),
676 gimple_location (stmt
));
680 tf
->may_return
= true;
682 record_in_goto_queue (tf
, new_stmt
, -1, false, gimple_location (stmt
));
691 #ifdef ENABLE_CHECKING
692 /* We do not process GIMPLE_SWITCHes for now. As long as the original source
693 was in fact structured, and we've not yet done jump threading, then none
694 of the labels will leave outer GIMPLE_TRY_FINALLY nodes. Verify this. */
697 verify_norecord_switch_expr (struct leh_state
*state
, gimple switch_expr
)
699 struct leh_tf_state
*tf
= state
->tf
;
705 n
= gimple_switch_num_labels (switch_expr
);
707 for (i
= 0; i
< n
; ++i
)
710 tree lab
= CASE_LABEL (gimple_switch_label (switch_expr
, i
));
712 gcc_assert (!outside_finally_tree (temp
, tf
->try_finally_expr
));
716 #define verify_norecord_switch_expr(state, switch_expr)
719 /* Redirect a RETURN_EXPR pointed to by Q to FINLAB. If MOD is
720 non-null, insert it before the new branch. */
723 do_return_redirection (struct goto_queue_node
*q
, tree finlab
, gimple_seq mod
)
727 /* In the case of a return, the queue node must be a gimple statement. */
728 gcc_assert (!q
->is_label
);
730 /* Note that the return value may have already been computed, e.g.,
743 should return 0, not 1. We don't have to do anything to make
744 this happens because the return value has been placed in the
745 RESULT_DECL already. */
747 q
->cont_stmt
= q
->stmt
.g
;
750 gimple_seq_add_seq (&q
->repl_stmt
, mod
);
752 x
= gimple_build_goto (finlab
);
753 gimple_set_location (x
, q
->location
);
754 gimple_seq_add_stmt (&q
->repl_stmt
, x
);
757 /* Similar, but easier, for GIMPLE_GOTO. */
760 do_goto_redirection (struct goto_queue_node
*q
, tree finlab
, gimple_seq mod
,
761 struct leh_tf_state
*tf
)
765 gcc_assert (q
->is_label
);
767 q
->cont_stmt
= gimple_build_goto (tf
->dest_array
[q
->index
]);
770 gimple_seq_add_seq (&q
->repl_stmt
, mod
);
772 x
= gimple_build_goto (finlab
);
773 gimple_set_location (x
, q
->location
);
774 gimple_seq_add_stmt (&q
->repl_stmt
, x
);
777 /* Emit a standard landing pad sequence into SEQ for REGION. */
780 emit_post_landing_pad (gimple_seq
*seq
, eh_region region
)
782 eh_landing_pad lp
= region
->landing_pads
;
786 lp
= gen_eh_landing_pad (region
);
788 lp
->post_landing_pad
= create_artificial_label (UNKNOWN_LOCATION
);
789 EH_LANDING_PAD_NR (lp
->post_landing_pad
) = lp
->index
;
791 x
= gimple_build_label (lp
->post_landing_pad
);
792 gimple_seq_add_stmt (seq
, x
);
795 /* Emit a RESX statement into SEQ for REGION. */
798 emit_resx (gimple_seq
*seq
, eh_region region
)
800 gimple x
= gimple_build_resx (region
->index
);
801 gimple_seq_add_stmt (seq
, x
);
803 record_stmt_eh_region (region
->outer
, x
);
806 /* Emit an EH_DISPATCH statement into SEQ for REGION. */
809 emit_eh_dispatch (gimple_seq
*seq
, eh_region region
)
811 gimple x
= gimple_build_eh_dispatch (region
->index
);
812 gimple_seq_add_stmt (seq
, x
);
815 /* Note that the current EH region may contain a throw, or a
816 call to a function which itself may contain a throw. */
819 note_eh_region_may_contain_throw (eh_region region
)
821 while (bitmap_set_bit (eh_region_may_contain_throw_map
, region
->index
))
823 if (region
->type
== ERT_MUST_NOT_THROW
)
825 region
= region
->outer
;
831 /* Check if REGION has been marked as containing a throw. If REGION is
832 NULL, this predicate is false. */
835 eh_region_may_contain_throw (eh_region r
)
837 return r
&& bitmap_bit_p (eh_region_may_contain_throw_map
, r
->index
);
840 /* We want to transform
841 try { body; } catch { stuff; }
851 TP is a GIMPLE_TRY node. REGION is the region whose post_landing_pad
852 should be placed before the second operand, or NULL. OVER is
853 an existing label that should be put at the exit, or NULL. */
856 frob_into_branch_around (gimple tp
, eh_region region
, tree over
)
859 gimple_seq cleanup
, result
;
860 location_t loc
= gimple_location (tp
);
862 cleanup
= gimple_try_cleanup (tp
);
863 result
= gimple_try_eval (tp
);
866 emit_post_landing_pad (&eh_seq
, region
);
868 if (gimple_seq_may_fallthru (cleanup
))
871 over
= create_artificial_label (loc
);
872 x
= gimple_build_goto (over
);
873 gimple_set_location (x
, loc
);
874 gimple_seq_add_stmt (&cleanup
, x
);
876 gimple_seq_add_seq (&eh_seq
, cleanup
);
880 x
= gimple_build_label (over
);
881 gimple_seq_add_stmt (&result
, x
);
886 /* A subroutine of lower_try_finally. Duplicate the tree rooted at T.
887 Make sure to record all new labels found. */
890 lower_try_finally_dup_block (gimple_seq seq
, struct leh_state
*outer_state
,
893 gimple region
= NULL
;
895 gimple_stmt_iterator gsi
;
897 new_seq
= copy_gimple_seq_and_replace_locals (seq
);
899 for (gsi
= gsi_start (new_seq
); !gsi_end_p (gsi
); gsi_next (&gsi
))
901 gimple stmt
= gsi_stmt (gsi
);
902 if (LOCATION_LOCUS (gimple_location (stmt
)) == UNKNOWN_LOCATION
)
904 tree block
= gimple_block (stmt
);
905 gimple_set_location (stmt
, loc
);
906 gimple_set_block (stmt
, block
);
911 region
= outer_state
->tf
->try_finally_expr
;
912 collect_finally_tree_1 (new_seq
, region
);
917 /* A subroutine of lower_try_finally. Create a fallthru label for
918 the given try_finally state. The only tricky bit here is that
919 we have to make sure to record the label in our outer context. */
922 lower_try_finally_fallthru_label (struct leh_tf_state
*tf
)
924 tree label
= tf
->fallthru_label
;
929 label
= create_artificial_label (gimple_location (tf
->try_finally_expr
));
930 tf
->fallthru_label
= label
;
934 record_in_finally_tree (temp
, tf
->outer
->tf
->try_finally_expr
);
940 /* A subroutine of lower_try_finally. If FINALLY consits of a
941 GIMPLE_EH_ELSE node, return it. */
944 get_eh_else (gimple_seq finally
)
946 gimple x
= gimple_seq_first_stmt (finally
);
947 if (gimple_code (x
) == GIMPLE_EH_ELSE
)
949 gcc_assert (gimple_seq_singleton_p (finally
));
955 /* A subroutine of lower_try_finally. If the eh_protect_cleanup_actions
956 langhook returns non-null, then the language requires that the exception
957 path out of a try_finally be treated specially. To wit: the code within
958 the finally block may not itself throw an exception. We have two choices
959 here. First we can duplicate the finally block and wrap it in a
960 must_not_throw region. Second, we can generate code like
965 if (fintmp == eh_edge)
966 protect_cleanup_actions;
969 where "fintmp" is the temporary used in the switch statement generation
970 alternative considered below. For the nonce, we always choose the first
973 THIS_STATE may be null if this is a try-cleanup, not a try-finally. */
976 honor_protect_cleanup_actions (struct leh_state
*outer_state
,
977 struct leh_state
*this_state
,
978 struct leh_tf_state
*tf
)
980 tree protect_cleanup_actions
;
981 gimple_stmt_iterator gsi
;
982 bool finally_may_fallthru
;
986 /* First check for nothing to do. */
987 if (lang_hooks
.eh_protect_cleanup_actions
== NULL
)
989 protect_cleanup_actions
= lang_hooks
.eh_protect_cleanup_actions ();
990 if (protect_cleanup_actions
== NULL
)
993 finally
= gimple_try_cleanup (tf
->top_p
);
994 eh_else
= get_eh_else (finally
);
996 /* Duplicate the FINALLY block. Only need to do this for try-finally,
997 and not for cleanups. If we've got an EH_ELSE, extract it now. */
1000 finally
= gimple_eh_else_e_body (eh_else
);
1001 gimple_try_set_cleanup (tf
->top_p
, gimple_eh_else_n_body (eh_else
));
1003 else if (this_state
)
1004 finally
= lower_try_finally_dup_block (finally
, outer_state
,
1005 gimple_location (tf
->try_finally_expr
));
1006 finally_may_fallthru
= gimple_seq_may_fallthru (finally
);
1008 /* If this cleanup consists of a TRY_CATCH_EXPR with TRY_CATCH_IS_CLEANUP
1009 set, the handler of the TRY_CATCH_EXPR is another cleanup which ought
1010 to be in an enclosing scope, but needs to be implemented at this level
1011 to avoid a nesting violation (see wrap_temporary_cleanups in
1012 cp/decl.c). Since it's logically at an outer level, we should call
1013 terminate before we get to it, so strip it away before adding the
1014 MUST_NOT_THROW filter. */
1015 gsi
= gsi_start (finally
);
1017 if (gimple_code (x
) == GIMPLE_TRY
1018 && gimple_try_kind (x
) == GIMPLE_TRY_CATCH
1019 && gimple_try_catch_is_cleanup (x
))
1021 gsi_insert_seq_before (&gsi
, gimple_try_eval (x
), GSI_SAME_STMT
);
1022 gsi_remove (&gsi
, false);
1025 /* Wrap the block with protect_cleanup_actions as the action. */
1026 x
= gimple_build_eh_must_not_throw (protect_cleanup_actions
);
1027 x
= gimple_build_try (finally
, gimple_seq_alloc_with_stmt (x
),
1029 finally
= lower_eh_must_not_throw (outer_state
, x
);
1031 /* Drop all of this into the exception sequence. */
1032 emit_post_landing_pad (&eh_seq
, tf
->region
);
1033 gimple_seq_add_seq (&eh_seq
, finally
);
1034 if (finally_may_fallthru
)
1035 emit_resx (&eh_seq
, tf
->region
);
1037 /* Having now been handled, EH isn't to be considered with
1038 the rest of the outgoing edges. */
1039 tf
->may_throw
= false;
1042 /* A subroutine of lower_try_finally. We have determined that there is
1043 no fallthru edge out of the finally block. This means that there is
1044 no outgoing edge corresponding to any incoming edge. Restructure the
1045 try_finally node for this special case. */
1048 lower_try_finally_nofallthru (struct leh_state
*state
,
1049 struct leh_tf_state
*tf
)
1054 struct goto_queue_node
*q
, *qe
;
1056 lab
= create_artificial_label (gimple_location (tf
->try_finally_expr
));
1058 /* We expect that tf->top_p is a GIMPLE_TRY. */
1059 finally
= gimple_try_cleanup (tf
->top_p
);
1060 tf
->top_p_seq
= gimple_try_eval (tf
->top_p
);
1062 x
= gimple_build_label (lab
);
1063 gimple_seq_add_stmt (&tf
->top_p_seq
, x
);
1066 qe
= q
+ tf
->goto_queue_active
;
1069 do_return_redirection (q
, lab
, NULL
);
1071 do_goto_redirection (q
, lab
, NULL
, tf
);
1073 replace_goto_queue (tf
);
1075 /* Emit the finally block into the stream. Lower EH_ELSE at this time. */
1076 eh_else
= get_eh_else (finally
);
1079 finally
= gimple_eh_else_n_body (eh_else
);
1080 lower_eh_constructs_1 (state
, &finally
);
1081 gimple_seq_add_seq (&tf
->top_p_seq
, finally
);
1085 finally
= gimple_eh_else_e_body (eh_else
);
1086 lower_eh_constructs_1 (state
, &finally
);
1088 emit_post_landing_pad (&eh_seq
, tf
->region
);
1089 gimple_seq_add_seq (&eh_seq
, finally
);
1094 lower_eh_constructs_1 (state
, &finally
);
1095 gimple_seq_add_seq (&tf
->top_p_seq
, finally
);
1099 emit_post_landing_pad (&eh_seq
, tf
->region
);
1101 x
= gimple_build_goto (lab
);
1102 gimple_set_location (x
, gimple_location (tf
->try_finally_expr
));
1103 gimple_seq_add_stmt (&eh_seq
, x
);
1108 /* A subroutine of lower_try_finally. We have determined that there is
1109 exactly one destination of the finally block. Restructure the
1110 try_finally node for this special case. */
1113 lower_try_finally_onedest (struct leh_state
*state
, struct leh_tf_state
*tf
)
1115 struct goto_queue_node
*q
, *qe
;
1118 gimple_stmt_iterator gsi
;
1120 location_t loc
= gimple_location (tf
->try_finally_expr
);
1122 finally
= gimple_try_cleanup (tf
->top_p
);
1123 tf
->top_p_seq
= gimple_try_eval (tf
->top_p
);
1125 /* Since there's only one destination, and the destination edge can only
1126 either be EH or non-EH, that implies that all of our incoming edges
1127 are of the same type. Therefore we can lower EH_ELSE immediately. */
1128 x
= get_eh_else (finally
);
1132 finally
= gimple_eh_else_e_body (x
);
1134 finally
= gimple_eh_else_n_body (x
);
1137 lower_eh_constructs_1 (state
, &finally
);
1139 for (gsi
= gsi_start (finally
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1141 gimple stmt
= gsi_stmt (gsi
);
1142 if (LOCATION_LOCUS (gimple_location (stmt
)) == UNKNOWN_LOCATION
)
1144 tree block
= gimple_block (stmt
);
1145 gimple_set_location (stmt
, gimple_location (tf
->try_finally_expr
));
1146 gimple_set_block (stmt
, block
);
1152 /* Only reachable via the exception edge. Add the given label to
1153 the head of the FINALLY block. Append a RESX at the end. */
1154 emit_post_landing_pad (&eh_seq
, tf
->region
);
1155 gimple_seq_add_seq (&eh_seq
, finally
);
1156 emit_resx (&eh_seq
, tf
->region
);
1160 if (tf
->may_fallthru
)
1162 /* Only reachable via the fallthru edge. Do nothing but let
1163 the two blocks run together; we'll fall out the bottom. */
1164 gimple_seq_add_seq (&tf
->top_p_seq
, finally
);
1168 finally_label
= create_artificial_label (loc
);
1169 x
= gimple_build_label (finally_label
);
1170 gimple_seq_add_stmt (&tf
->top_p_seq
, x
);
1172 gimple_seq_add_seq (&tf
->top_p_seq
, finally
);
1175 qe
= q
+ tf
->goto_queue_active
;
1179 /* Reachable by return expressions only. Redirect them. */
1181 do_return_redirection (q
, finally_label
, NULL
);
1182 replace_goto_queue (tf
);
1186 /* Reachable by goto expressions only. Redirect them. */
1188 do_goto_redirection (q
, finally_label
, NULL
, tf
);
1189 replace_goto_queue (tf
);
1191 if (tf
->dest_array
[0] == tf
->fallthru_label
)
1193 /* Reachable by goto to fallthru label only. Redirect it
1194 to the new label (already created, sadly), and do not
1195 emit the final branch out, or the fallthru label. */
1196 tf
->fallthru_label
= NULL
;
1201 /* Place the original return/goto to the original destination
1202 immediately after the finally block. */
1203 x
= tf
->goto_queue
[0].cont_stmt
;
1204 gimple_seq_add_stmt (&tf
->top_p_seq
, x
);
1205 maybe_record_in_goto_queue (state
, x
);
1208 /* A subroutine of lower_try_finally. There are multiple edges incoming
1209 and outgoing from the finally block. Implement this by duplicating the
1210 finally block for every destination. */
1213 lower_try_finally_copy (struct leh_state
*state
, struct leh_tf_state
*tf
)
1216 gimple_seq new_stmt
;
1220 location_t tf_loc
= gimple_location (tf
->try_finally_expr
);
1222 finally
= gimple_try_cleanup (tf
->top_p
);
1224 /* Notice EH_ELSE, and simplify some of the remaining code
1225 by considering FINALLY to be the normal return path only. */
1226 eh_else
= get_eh_else (finally
);
1228 finally
= gimple_eh_else_n_body (eh_else
);
1230 tf
->top_p_seq
= gimple_try_eval (tf
->top_p
);
1233 if (tf
->may_fallthru
)
1235 seq
= lower_try_finally_dup_block (finally
, state
, tf_loc
);
1236 lower_eh_constructs_1 (state
, &seq
);
1237 gimple_seq_add_seq (&new_stmt
, seq
);
1239 tmp
= lower_try_finally_fallthru_label (tf
);
1240 x
= gimple_build_goto (tmp
);
1241 gimple_set_location (x
, tf_loc
);
1242 gimple_seq_add_stmt (&new_stmt
, x
);
1247 /* We don't need to copy the EH path of EH_ELSE,
1248 since it is only emitted once. */
1250 seq
= gimple_eh_else_e_body (eh_else
);
1252 seq
= lower_try_finally_dup_block (finally
, state
, tf_loc
);
1253 lower_eh_constructs_1 (state
, &seq
);
1255 emit_post_landing_pad (&eh_seq
, tf
->region
);
1256 gimple_seq_add_seq (&eh_seq
, seq
);
1257 emit_resx (&eh_seq
, tf
->region
);
1262 struct goto_queue_node
*q
, *qe
;
1263 int return_index
, index
;
1266 struct goto_queue_node
*q
;
1270 return_index
= tf
->dest_array
.length ();
1271 labels
= XCNEWVEC (struct labels_s
, return_index
+ 1);
1274 qe
= q
+ tf
->goto_queue_active
;
1277 index
= q
->index
< 0 ? return_index
: q
->index
;
1279 if (!labels
[index
].q
)
1280 labels
[index
].q
= q
;
1283 for (index
= 0; index
< return_index
+ 1; index
++)
1287 q
= labels
[index
].q
;
1291 lab
= labels
[index
].label
1292 = create_artificial_label (tf_loc
);
1294 if (index
== return_index
)
1295 do_return_redirection (q
, lab
, NULL
);
1297 do_goto_redirection (q
, lab
, NULL
, tf
);
1299 x
= gimple_build_label (lab
);
1300 gimple_seq_add_stmt (&new_stmt
, x
);
1302 seq
= lower_try_finally_dup_block (finally
, state
, q
->location
);
1303 lower_eh_constructs_1 (state
, &seq
);
1304 gimple_seq_add_seq (&new_stmt
, seq
);
1306 gimple_seq_add_stmt (&new_stmt
, q
->cont_stmt
);
1307 maybe_record_in_goto_queue (state
, q
->cont_stmt
);
1310 for (q
= tf
->goto_queue
; q
< qe
; q
++)
1314 index
= q
->index
< 0 ? return_index
: q
->index
;
1316 if (labels
[index
].q
== q
)
1319 lab
= labels
[index
].label
;
1321 if (index
== return_index
)
1322 do_return_redirection (q
, lab
, NULL
);
1324 do_goto_redirection (q
, lab
, NULL
, tf
);
1327 replace_goto_queue (tf
);
1331 /* Need to link new stmts after running replace_goto_queue due
1332 to not wanting to process the same goto stmts twice. */
1333 gimple_seq_add_seq (&tf
->top_p_seq
, new_stmt
);
1336 /* A subroutine of lower_try_finally. There are multiple edges incoming
1337 and outgoing from the finally block. Implement this by instrumenting
1338 each incoming edge and creating a switch statement at the end of the
1339 finally block that branches to the appropriate destination. */
1342 lower_try_finally_switch (struct leh_state
*state
, struct leh_tf_state
*tf
)
1344 struct goto_queue_node
*q
, *qe
;
1345 tree finally_tmp
, finally_label
;
1346 int return_index
, eh_index
, fallthru_index
;
1347 int nlabels
, ndests
, j
, last_case_index
;
1349 vec
<tree
> case_label_vec
;
1350 gimple_seq switch_body
= NULL
;
1355 hash_map
<tree
, gimple
> *cont_map
= NULL
;
1356 /* The location of the TRY_FINALLY stmt. */
1357 location_t tf_loc
= gimple_location (tf
->try_finally_expr
);
1358 /* The location of the finally block. */
1359 location_t finally_loc
;
1361 finally
= gimple_try_cleanup (tf
->top_p
);
1362 eh_else
= get_eh_else (finally
);
1364 /* Mash the TRY block to the head of the chain. */
1365 tf
->top_p_seq
= gimple_try_eval (tf
->top_p
);
1367 /* The location of the finally is either the last stmt in the finally
1368 block or the location of the TRY_FINALLY itself. */
1369 x
= gimple_seq_last_stmt (finally
);
1370 finally_loc
= x
? gimple_location (x
) : tf_loc
;
1372 /* Prepare for switch statement generation. */
1373 nlabels
= tf
->dest_array
.length ();
1374 return_index
= nlabels
;
1375 eh_index
= return_index
+ tf
->may_return
;
1376 fallthru_index
= eh_index
+ (tf
->may_throw
&& !eh_else
);
1377 ndests
= fallthru_index
+ tf
->may_fallthru
;
1379 finally_tmp
= create_tmp_var (integer_type_node
, "finally_tmp");
1380 finally_label
= create_artificial_label (finally_loc
);
1382 /* We use vec::quick_push on case_label_vec throughout this function,
1383 since we know the size in advance and allocate precisely as muce
1385 case_label_vec
.create (ndests
);
1387 last_case_index
= 0;
1389 /* Begin inserting code for getting to the finally block. Things
1390 are done in this order to correspond to the sequence the code is
1393 if (tf
->may_fallthru
)
1395 x
= gimple_build_assign (finally_tmp
,
1396 build_int_cst (integer_type_node
,
1398 gimple_seq_add_stmt (&tf
->top_p_seq
, x
);
1400 tmp
= build_int_cst (integer_type_node
, fallthru_index
);
1401 last_case
= build_case_label (tmp
, NULL
,
1402 create_artificial_label (tf_loc
));
1403 case_label_vec
.quick_push (last_case
);
1406 x
= gimple_build_label (CASE_LABEL (last_case
));
1407 gimple_seq_add_stmt (&switch_body
, x
);
1409 tmp
= lower_try_finally_fallthru_label (tf
);
1410 x
= gimple_build_goto (tmp
);
1411 gimple_set_location (x
, tf_loc
);
1412 gimple_seq_add_stmt (&switch_body
, x
);
1415 /* For EH_ELSE, emit the exception path (plus resx) now, then
1416 subsequently we only need consider the normal path. */
1421 finally
= gimple_eh_else_e_body (eh_else
);
1422 lower_eh_constructs_1 (state
, &finally
);
1424 emit_post_landing_pad (&eh_seq
, tf
->region
);
1425 gimple_seq_add_seq (&eh_seq
, finally
);
1426 emit_resx (&eh_seq
, tf
->region
);
1429 finally
= gimple_eh_else_n_body (eh_else
);
1431 else if (tf
->may_throw
)
1433 emit_post_landing_pad (&eh_seq
, tf
->region
);
1435 x
= gimple_build_assign (finally_tmp
,
1436 build_int_cst (integer_type_node
, eh_index
));
1437 gimple_seq_add_stmt (&eh_seq
, x
);
1439 x
= gimple_build_goto (finally_label
);
1440 gimple_set_location (x
, tf_loc
);
1441 gimple_seq_add_stmt (&eh_seq
, x
);
1443 tmp
= build_int_cst (integer_type_node
, eh_index
);
1444 last_case
= build_case_label (tmp
, NULL
,
1445 create_artificial_label (tf_loc
));
1446 case_label_vec
.quick_push (last_case
);
1449 x
= gimple_build_label (CASE_LABEL (last_case
));
1450 gimple_seq_add_stmt (&eh_seq
, x
);
1451 emit_resx (&eh_seq
, tf
->region
);
1454 x
= gimple_build_label (finally_label
);
1455 gimple_seq_add_stmt (&tf
->top_p_seq
, x
);
1457 lower_eh_constructs_1 (state
, &finally
);
1458 gimple_seq_add_seq (&tf
->top_p_seq
, finally
);
1460 /* Redirect each incoming goto edge. */
1462 qe
= q
+ tf
->goto_queue_active
;
1463 j
= last_case_index
+ tf
->may_return
;
1464 /* Prepare the assignments to finally_tmp that are executed upon the
1465 entrance through a particular edge. */
1468 gimple_seq mod
= NULL
;
1470 unsigned int case_index
;
1474 x
= gimple_build_assign (finally_tmp
,
1475 build_int_cst (integer_type_node
,
1477 gimple_seq_add_stmt (&mod
, x
);
1478 do_return_redirection (q
, finally_label
, mod
);
1479 switch_id
= return_index
;
1483 x
= gimple_build_assign (finally_tmp
,
1484 build_int_cst (integer_type_node
, q
->index
));
1485 gimple_seq_add_stmt (&mod
, x
);
1486 do_goto_redirection (q
, finally_label
, mod
, tf
);
1487 switch_id
= q
->index
;
1490 case_index
= j
+ q
->index
;
1491 if (case_label_vec
.length () <= case_index
|| !case_label_vec
[case_index
])
1494 tmp
= build_int_cst (integer_type_node
, switch_id
);
1495 case_lab
= build_case_label (tmp
, NULL
,
1496 create_artificial_label (tf_loc
));
1497 /* We store the cont_stmt in the pointer map, so that we can recover
1498 it in the loop below. */
1500 cont_map
= new hash_map
<tree
, gimple
>;
1501 cont_map
->put (case_lab
, q
->cont_stmt
);
1502 case_label_vec
.quick_push (case_lab
);
1505 for (j
= last_case_index
; j
< last_case_index
+ nlabels
; j
++)
1509 last_case
= case_label_vec
[j
];
1511 gcc_assert (last_case
);
1512 gcc_assert (cont_map
);
1514 cont_stmt
= *cont_map
->get (last_case
);
1516 x
= gimple_build_label (CASE_LABEL (last_case
));
1517 gimple_seq_add_stmt (&switch_body
, x
);
1518 gimple_seq_add_stmt (&switch_body
, cont_stmt
);
1519 maybe_record_in_goto_queue (state
, cont_stmt
);
1524 replace_goto_queue (tf
);
1526 /* Make sure that the last case is the default label, as one is required.
1527 Then sort the labels, which is also required in GIMPLE. */
1528 CASE_LOW (last_case
) = NULL
;
1529 tree tem
= case_label_vec
.pop ();
1530 gcc_assert (tem
== last_case
);
1531 sort_case_labels (case_label_vec
);
1533 /* Build the switch statement, setting last_case to be the default
1535 switch_stmt
= gimple_build_switch (finally_tmp
, last_case
,
1537 gimple_set_location (switch_stmt
, finally_loc
);
1539 /* Need to link SWITCH_STMT after running replace_goto_queue
1540 due to not wanting to process the same goto stmts twice. */
1541 gimple_seq_add_stmt (&tf
->top_p_seq
, switch_stmt
);
1542 gimple_seq_add_seq (&tf
->top_p_seq
, switch_body
);
1545 /* Decide whether or not we are going to duplicate the finally block.
1546 There are several considerations.
1548 First, if this is Java, then the finally block contains code
1549 written by the user. It has line numbers associated with it,
1550 so duplicating the block means it's difficult to set a breakpoint.
1551 Since controlling code generation via -g is verboten, we simply
1552 never duplicate code without optimization.
1554 Second, we'd like to prevent egregious code growth. One way to
1555 do this is to estimate the size of the finally block, multiply
1556 that by the number of copies we'd need to make, and compare against
1557 the estimate of the size of the switch machinery we'd have to add. */
1560 decide_copy_try_finally (int ndests
, bool may_throw
, gimple_seq finally
)
1562 int f_estimate
, sw_estimate
;
1565 /* If there's an EH_ELSE involved, the exception path is separate
1566 and really doesn't come into play for this computation. */
1567 eh_else
= get_eh_else (finally
);
1570 ndests
-= may_throw
;
1571 finally
= gimple_eh_else_n_body (eh_else
);
1576 gimple_stmt_iterator gsi
;
1581 for (gsi
= gsi_start (finally
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1583 gimple stmt
= gsi_stmt (gsi
);
1584 if (!is_gimple_debug (stmt
) && !gimple_clobber_p (stmt
))
1590 /* Finally estimate N times, plus N gotos. */
1591 f_estimate
= count_insns_seq (finally
, &eni_size_weights
);
1592 f_estimate
= (f_estimate
+ 1) * ndests
;
1594 /* Switch statement (cost 10), N variable assignments, N gotos. */
1595 sw_estimate
= 10 + 2 * ndests
;
1597 /* Optimize for size clearly wants our best guess. */
1598 if (optimize_function_for_size_p (cfun
))
1599 return f_estimate
< sw_estimate
;
1601 /* ??? These numbers are completely made up so far. */
1603 return f_estimate
< 100 || f_estimate
< sw_estimate
* 2;
1605 return f_estimate
< 40 || f_estimate
* 2 < sw_estimate
* 3;
1608 /* REG is the enclosing region for a possible cleanup region, or the region
1609 itself. Returns TRUE if such a region would be unreachable.
1611 Cleanup regions within a must-not-throw region aren't actually reachable
1612 even if there are throwing stmts within them, because the personality
1613 routine will call terminate before unwinding. */
1616 cleanup_is_dead_in (eh_region reg
)
1618 while (reg
&& reg
->type
== ERT_CLEANUP
)
1620 return (reg
&& reg
->type
== ERT_MUST_NOT_THROW
);
1623 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_FINALLY nodes
1624 to a sequence of labels and blocks, plus the exception region trees
1625 that record all the magic. This is complicated by the need to
1626 arrange for the FINALLY block to be executed on all exits. */
1629 lower_try_finally (struct leh_state
*state
, gimple tp
)
1631 struct leh_tf_state this_tf
;
1632 struct leh_state this_state
;
1634 gimple_seq old_eh_seq
;
1636 /* Process the try block. */
1638 memset (&this_tf
, 0, sizeof (this_tf
));
1639 this_tf
.try_finally_expr
= tp
;
1641 this_tf
.outer
= state
;
1642 if (using_eh_for_cleanups_p () && !cleanup_is_dead_in (state
->cur_region
))
1644 this_tf
.region
= gen_eh_region_cleanup (state
->cur_region
);
1645 this_state
.cur_region
= this_tf
.region
;
1649 this_tf
.region
= NULL
;
1650 this_state
.cur_region
= state
->cur_region
;
1653 this_state
.ehp_region
= state
->ehp_region
;
1654 this_state
.tf
= &this_tf
;
1656 old_eh_seq
= eh_seq
;
1659 lower_eh_constructs_1 (&this_state
, gimple_try_eval_ptr (tp
));
1661 /* Determine if the try block is escaped through the bottom. */
1662 this_tf
.may_fallthru
= gimple_seq_may_fallthru (gimple_try_eval (tp
));
1664 /* Determine if any exceptions are possible within the try block. */
1666 this_tf
.may_throw
= eh_region_may_contain_throw (this_tf
.region
);
1667 if (this_tf
.may_throw
)
1668 honor_protect_cleanup_actions (state
, &this_state
, &this_tf
);
1670 /* Determine how many edges (still) reach the finally block. Or rather,
1671 how many destinations are reached by the finally block. Use this to
1672 determine how we process the finally block itself. */
1674 ndests
= this_tf
.dest_array
.length ();
1675 ndests
+= this_tf
.may_fallthru
;
1676 ndests
+= this_tf
.may_return
;
1677 ndests
+= this_tf
.may_throw
;
1679 /* If the FINALLY block is not reachable, dike it out. */
1682 gimple_seq_add_seq (&this_tf
.top_p_seq
, gimple_try_eval (tp
));
1683 gimple_try_set_cleanup (tp
, NULL
);
1685 /* If the finally block doesn't fall through, then any destination
1686 we might try to impose there isn't reached either. There may be
1687 some minor amount of cleanup and redirection still needed. */
1688 else if (!gimple_seq_may_fallthru (gimple_try_cleanup (tp
)))
1689 lower_try_finally_nofallthru (state
, &this_tf
);
1691 /* We can easily special-case redirection to a single destination. */
1692 else if (ndests
== 1)
1693 lower_try_finally_onedest (state
, &this_tf
);
1694 else if (decide_copy_try_finally (ndests
, this_tf
.may_throw
,
1695 gimple_try_cleanup (tp
)))
1696 lower_try_finally_copy (state
, &this_tf
);
1698 lower_try_finally_switch (state
, &this_tf
);
1700 /* If someone requested we add a label at the end of the transformed
1702 if (this_tf
.fallthru_label
)
1704 /* This must be reached only if ndests == 0. */
1705 gimple x
= gimple_build_label (this_tf
.fallthru_label
);
1706 gimple_seq_add_stmt (&this_tf
.top_p_seq
, x
);
1709 this_tf
.dest_array
.release ();
1710 free (this_tf
.goto_queue
);
1711 if (this_tf
.goto_queue_map
)
1712 delete this_tf
.goto_queue_map
;
1714 /* If there was an old (aka outer) eh_seq, append the current eh_seq.
1715 If there was no old eh_seq, then the append is trivially already done. */
1719 eh_seq
= old_eh_seq
;
1722 gimple_seq new_eh_seq
= eh_seq
;
1723 eh_seq
= old_eh_seq
;
1724 gimple_seq_add_seq (&eh_seq
, new_eh_seq
);
1728 return this_tf
.top_p_seq
;
1731 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_CATCH with a
1732 list of GIMPLE_CATCH to a sequence of labels and blocks, plus the
1733 exception region trees that records all the magic. */
1736 lower_catch (struct leh_state
*state
, gimple tp
)
1738 eh_region try_region
= NULL
;
1739 struct leh_state this_state
= *state
;
1740 gimple_stmt_iterator gsi
;
1742 gimple_seq new_seq
, cleanup
;
1744 location_t try_catch_loc
= gimple_location (tp
);
1746 if (flag_exceptions
)
1748 try_region
= gen_eh_region_try (state
->cur_region
);
1749 this_state
.cur_region
= try_region
;
1752 lower_eh_constructs_1 (&this_state
, gimple_try_eval_ptr (tp
));
1754 if (!eh_region_may_contain_throw (try_region
))
1755 return gimple_try_eval (tp
);
1758 emit_eh_dispatch (&new_seq
, try_region
);
1759 emit_resx (&new_seq
, try_region
);
1761 this_state
.cur_region
= state
->cur_region
;
1762 this_state
.ehp_region
= try_region
;
1765 cleanup
= gimple_try_cleanup (tp
);
1766 for (gsi
= gsi_start (cleanup
);
1774 gcatch
= gsi_stmt (gsi
);
1775 c
= gen_eh_region_catch (try_region
, gimple_catch_types (gcatch
));
1777 handler
= gimple_catch_handler (gcatch
);
1778 lower_eh_constructs_1 (&this_state
, &handler
);
1780 c
->label
= create_artificial_label (UNKNOWN_LOCATION
);
1781 x
= gimple_build_label (c
->label
);
1782 gimple_seq_add_stmt (&new_seq
, x
);
1784 gimple_seq_add_seq (&new_seq
, handler
);
1786 if (gimple_seq_may_fallthru (new_seq
))
1789 out_label
= create_artificial_label (try_catch_loc
);
1791 x
= gimple_build_goto (out_label
);
1792 gimple_seq_add_stmt (&new_seq
, x
);
1798 gimple_try_set_cleanup (tp
, new_seq
);
1800 return frob_into_branch_around (tp
, try_region
, out_label
);
1803 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with a
1804 GIMPLE_EH_FILTER to a sequence of labels and blocks, plus the exception
1805 region trees that record all the magic. */
1808 lower_eh_filter (struct leh_state
*state
, gimple tp
)
1810 struct leh_state this_state
= *state
;
1811 eh_region this_region
= NULL
;
1815 inner
= gimple_seq_first_stmt (gimple_try_cleanup (tp
));
1817 if (flag_exceptions
)
1819 this_region
= gen_eh_region_allowed (state
->cur_region
,
1820 gimple_eh_filter_types (inner
));
1821 this_state
.cur_region
= this_region
;
1824 lower_eh_constructs_1 (&this_state
, gimple_try_eval_ptr (tp
));
1826 if (!eh_region_may_contain_throw (this_region
))
1827 return gimple_try_eval (tp
);
1830 this_state
.cur_region
= state
->cur_region
;
1831 this_state
.ehp_region
= this_region
;
1833 emit_eh_dispatch (&new_seq
, this_region
);
1834 emit_resx (&new_seq
, this_region
);
1836 this_region
->u
.allowed
.label
= create_artificial_label (UNKNOWN_LOCATION
);
1837 x
= gimple_build_label (this_region
->u
.allowed
.label
);
1838 gimple_seq_add_stmt (&new_seq
, x
);
1840 lower_eh_constructs_1 (&this_state
, gimple_eh_filter_failure_ptr (inner
));
1841 gimple_seq_add_seq (&new_seq
, gimple_eh_filter_failure (inner
));
1843 gimple_try_set_cleanup (tp
, new_seq
);
1845 return frob_into_branch_around (tp
, this_region
, NULL
);
1848 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with
1849 an GIMPLE_EH_MUST_NOT_THROW to a sequence of labels and blocks,
1850 plus the exception region trees that record all the magic. */
1853 lower_eh_must_not_throw (struct leh_state
*state
, gimple tp
)
1855 struct leh_state this_state
= *state
;
1857 if (flag_exceptions
)
1859 gimple inner
= gimple_seq_first_stmt (gimple_try_cleanup (tp
));
1860 eh_region this_region
;
1862 this_region
= gen_eh_region_must_not_throw (state
->cur_region
);
1863 this_region
->u
.must_not_throw
.failure_decl
1864 = gimple_eh_must_not_throw_fndecl (inner
);
1865 this_region
->u
.must_not_throw
.failure_loc
1866 = LOCATION_LOCUS (gimple_location (tp
));
1868 /* In order to get mangling applied to this decl, we must mark it
1869 used now. Otherwise, pass_ipa_free_lang_data won't think it
1871 TREE_USED (this_region
->u
.must_not_throw
.failure_decl
) = 1;
1873 this_state
.cur_region
= this_region
;
1876 lower_eh_constructs_1 (&this_state
, gimple_try_eval_ptr (tp
));
1878 return gimple_try_eval (tp
);
1881 /* Implement a cleanup expression. This is similar to try-finally,
1882 except that we only execute the cleanup block for exception edges. */
1885 lower_cleanup (struct leh_state
*state
, gimple tp
)
1887 struct leh_state this_state
= *state
;
1888 eh_region this_region
= NULL
;
1889 struct leh_tf_state fake_tf
;
1891 bool cleanup_dead
= cleanup_is_dead_in (state
->cur_region
);
1893 if (flag_exceptions
&& !cleanup_dead
)
1895 this_region
= gen_eh_region_cleanup (state
->cur_region
);
1896 this_state
.cur_region
= this_region
;
1899 lower_eh_constructs_1 (&this_state
, gimple_try_eval_ptr (tp
));
1901 if (cleanup_dead
|| !eh_region_may_contain_throw (this_region
))
1902 return gimple_try_eval (tp
);
1904 /* Build enough of a try-finally state so that we can reuse
1905 honor_protect_cleanup_actions. */
1906 memset (&fake_tf
, 0, sizeof (fake_tf
));
1907 fake_tf
.top_p
= fake_tf
.try_finally_expr
= tp
;
1908 fake_tf
.outer
= state
;
1909 fake_tf
.region
= this_region
;
1910 fake_tf
.may_fallthru
= gimple_seq_may_fallthru (gimple_try_eval (tp
));
1911 fake_tf
.may_throw
= true;
1913 honor_protect_cleanup_actions (state
, NULL
, &fake_tf
);
1915 if (fake_tf
.may_throw
)
1917 /* In this case honor_protect_cleanup_actions had nothing to do,
1918 and we should process this normally. */
1919 lower_eh_constructs_1 (state
, gimple_try_cleanup_ptr (tp
));
1920 result
= frob_into_branch_around (tp
, this_region
,
1921 fake_tf
.fallthru_label
);
1925 /* In this case honor_protect_cleanup_actions did nearly all of
1926 the work. All we have left is to append the fallthru_label. */
1928 result
= gimple_try_eval (tp
);
1929 if (fake_tf
.fallthru_label
)
1931 gimple x
= gimple_build_label (fake_tf
.fallthru_label
);
1932 gimple_seq_add_stmt (&result
, x
);
1938 /* Main loop for lowering eh constructs. Also moves gsi to the next
1942 lower_eh_constructs_2 (struct leh_state
*state
, gimple_stmt_iterator
*gsi
)
1946 gimple stmt
= gsi_stmt (*gsi
);
1948 switch (gimple_code (stmt
))
1952 tree fndecl
= gimple_call_fndecl (stmt
);
1955 if (fndecl
&& DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
1956 switch (DECL_FUNCTION_CODE (fndecl
))
1958 case BUILT_IN_EH_POINTER
:
1959 /* The front end may have generated a call to
1960 __builtin_eh_pointer (0) within a catch region. Replace
1961 this zero argument with the current catch region number. */
1962 if (state
->ehp_region
)
1964 tree nr
= build_int_cst (integer_type_node
,
1965 state
->ehp_region
->index
);
1966 gimple_call_set_arg (stmt
, 0, nr
);
1970 /* The user has dome something silly. Remove it. */
1971 rhs
= null_pointer_node
;
1976 case BUILT_IN_EH_FILTER
:
1977 /* ??? This should never appear, but since it's a builtin it
1978 is accessible to abuse by users. Just remove it and
1979 replace the use with the arbitrary value zero. */
1980 rhs
= build_int_cst (TREE_TYPE (TREE_TYPE (fndecl
)), 0);
1982 lhs
= gimple_call_lhs (stmt
);
1983 x
= gimple_build_assign (lhs
, rhs
);
1984 gsi_insert_before (gsi
, x
, GSI_SAME_STMT
);
1987 case BUILT_IN_EH_COPY_VALUES
:
1988 /* Likewise this should not appear. Remove it. */
1989 gsi_remove (gsi
, true);
1999 /* If the stmt can throw use a new temporary for the assignment
2000 to a LHS. This makes sure the old value of the LHS is
2001 available on the EH edge. Only do so for statements that
2002 potentially fall through (no noreturn calls e.g.), otherwise
2003 this new assignment might create fake fallthru regions. */
2004 if (stmt_could_throw_p (stmt
)
2005 && gimple_has_lhs (stmt
)
2006 && gimple_stmt_may_fallthru (stmt
)
2007 && !tree_could_throw_p (gimple_get_lhs (stmt
))
2008 && is_gimple_reg_type (TREE_TYPE (gimple_get_lhs (stmt
))))
2010 tree lhs
= gimple_get_lhs (stmt
);
2011 tree tmp
= create_tmp_var (TREE_TYPE (lhs
), NULL
);
2012 gimple s
= gimple_build_assign (lhs
, tmp
);
2013 gimple_set_location (s
, gimple_location (stmt
));
2014 gimple_set_block (s
, gimple_block (stmt
));
2015 gimple_set_lhs (stmt
, tmp
);
2016 if (TREE_CODE (TREE_TYPE (tmp
)) == COMPLEX_TYPE
2017 || TREE_CODE (TREE_TYPE (tmp
)) == VECTOR_TYPE
)
2018 DECL_GIMPLE_REG_P (tmp
) = 1;
2019 gsi_insert_after (gsi
, s
, GSI_SAME_STMT
);
2021 /* Look for things that can throw exceptions, and record them. */
2022 if (state
->cur_region
&& stmt_could_throw_p (stmt
))
2024 record_stmt_eh_region (state
->cur_region
, stmt
);
2025 note_eh_region_may_contain_throw (state
->cur_region
);
2032 maybe_record_in_goto_queue (state
, stmt
);
2036 verify_norecord_switch_expr (state
, stmt
);
2040 if (gimple_try_kind (stmt
) == GIMPLE_TRY_FINALLY
)
2041 replace
= lower_try_finally (state
, stmt
);
2044 x
= gimple_seq_first_stmt (gimple_try_cleanup (stmt
));
2047 replace
= gimple_try_eval (stmt
);
2048 lower_eh_constructs_1 (state
, &replace
);
2051 switch (gimple_code (x
))
2054 replace
= lower_catch (state
, stmt
);
2056 case GIMPLE_EH_FILTER
:
2057 replace
= lower_eh_filter (state
, stmt
);
2059 case GIMPLE_EH_MUST_NOT_THROW
:
2060 replace
= lower_eh_must_not_throw (state
, stmt
);
2062 case GIMPLE_EH_ELSE
:
2063 /* This code is only valid with GIMPLE_TRY_FINALLY. */
2066 replace
= lower_cleanup (state
, stmt
);
2071 /* Remove the old stmt and insert the transformed sequence
2073 gsi_insert_seq_before (gsi
, replace
, GSI_SAME_STMT
);
2074 gsi_remove (gsi
, true);
2076 /* Return since we don't want gsi_next () */
2079 case GIMPLE_EH_ELSE
:
2080 /* We should be eliminating this in lower_try_finally et al. */
2084 /* A type, a decl, or some kind of statement that we're not
2085 interested in. Don't walk them. */
2092 /* A helper to unwrap a gimple_seq and feed stmts to lower_eh_constructs_2. */
2095 lower_eh_constructs_1 (struct leh_state
*state
, gimple_seq
*pseq
)
2097 gimple_stmt_iterator gsi
;
2098 for (gsi
= gsi_start (*pseq
); !gsi_end_p (gsi
);)
2099 lower_eh_constructs_2 (state
, &gsi
);
2104 const pass_data pass_data_lower_eh
=
2106 GIMPLE_PASS
, /* type */
2108 OPTGROUP_NONE
, /* optinfo_flags */
2109 TV_TREE_EH
, /* tv_id */
2110 PROP_gimple_lcf
, /* properties_required */
2111 PROP_gimple_leh
, /* properties_provided */
2112 0, /* properties_destroyed */
2113 0, /* todo_flags_start */
2114 0, /* todo_flags_finish */
2117 class pass_lower_eh
: public gimple_opt_pass
2120 pass_lower_eh (gcc::context
*ctxt
)
2121 : gimple_opt_pass (pass_data_lower_eh
, ctxt
)
2124 /* opt_pass methods: */
2125 virtual unsigned int execute (function
*);
2127 }; // class pass_lower_eh
2130 pass_lower_eh::execute (function
*fun
)
2132 struct leh_state null_state
;
2135 bodyp
= gimple_body (current_function_decl
);
2139 finally_tree
= new hash_table
<finally_tree_hasher
> (31);
2140 eh_region_may_contain_throw_map
= BITMAP_ALLOC (NULL
);
2141 memset (&null_state
, 0, sizeof (null_state
));
2143 collect_finally_tree_1 (bodyp
, NULL
);
2144 lower_eh_constructs_1 (&null_state
, &bodyp
);
2145 gimple_set_body (current_function_decl
, bodyp
);
2147 /* We assume there's a return statement, or something, at the end of
2148 the function, and thus ploping the EH sequence afterward won't
2150 gcc_assert (!gimple_seq_may_fallthru (bodyp
));
2151 gimple_seq_add_seq (&bodyp
, eh_seq
);
2153 /* We assume that since BODYP already existed, adding EH_SEQ to it
2154 didn't change its value, and we don't have to re-set the function. */
2155 gcc_assert (bodyp
== gimple_body (current_function_decl
));
2157 delete finally_tree
;
2158 finally_tree
= NULL
;
2159 BITMAP_FREE (eh_region_may_contain_throw_map
);
2162 /* If this function needs a language specific EH personality routine
2163 and the frontend didn't already set one do so now. */
2164 if (function_needs_eh_personality (fun
) == eh_personality_lang
2165 && !DECL_FUNCTION_PERSONALITY (current_function_decl
))
2166 DECL_FUNCTION_PERSONALITY (current_function_decl
)
2167 = lang_hooks
.eh_personality ();
2175 make_pass_lower_eh (gcc::context
*ctxt
)
2177 return new pass_lower_eh (ctxt
);
2180 /* Create the multiple edges from an EH_DISPATCH statement to all of
2181 the possible handlers for its EH region. Return true if there's
2182 no fallthru edge; false if there is. */
2185 make_eh_dispatch_edges (gimple stmt
)
2189 basic_block src
, dst
;
2191 r
= get_eh_region_from_number (gimple_eh_dispatch_region (stmt
));
2192 src
= gimple_bb (stmt
);
2197 for (c
= r
->u
.eh_try
.first_catch
; c
; c
= c
->next_catch
)
2199 dst
= label_to_block (c
->label
);
2200 make_edge (src
, dst
, 0);
2202 /* A catch-all handler doesn't have a fallthru. */
2203 if (c
->type_list
== NULL
)
2208 case ERT_ALLOWED_EXCEPTIONS
:
2209 dst
= label_to_block (r
->u
.allowed
.label
);
2210 make_edge (src
, dst
, 0);
2220 /* Create the single EH edge from STMT to its nearest landing pad,
2221 if there is such a landing pad within the current function. */
2224 make_eh_edges (gimple stmt
)
2226 basic_block src
, dst
;
2230 lp_nr
= lookup_stmt_eh_lp (stmt
);
2234 lp
= get_eh_landing_pad_from_number (lp_nr
);
2235 gcc_assert (lp
!= NULL
);
2237 src
= gimple_bb (stmt
);
2238 dst
= label_to_block (lp
->post_landing_pad
);
2239 make_edge (src
, dst
, EDGE_EH
);
2242 /* Do the work in redirecting EDGE_IN to NEW_BB within the EH region tree;
2243 do not actually perform the final edge redirection.
2245 CHANGE_REGION is true when we're being called from cleanup_empty_eh and
2246 we intend to change the destination EH region as well; this means
2247 EH_LANDING_PAD_NR must already be set on the destination block label.
2248 If false, we're being called from generic cfg manipulation code and we
2249 should preserve our place within the region tree. */
2252 redirect_eh_edge_1 (edge edge_in
, basic_block new_bb
, bool change_region
)
2254 eh_landing_pad old_lp
, new_lp
;
2257 int old_lp_nr
, new_lp_nr
;
2258 tree old_label
, new_label
;
2262 old_bb
= edge_in
->dest
;
2263 old_label
= gimple_block_label (old_bb
);
2264 old_lp_nr
= EH_LANDING_PAD_NR (old_label
);
2265 gcc_assert (old_lp_nr
> 0);
2266 old_lp
= get_eh_landing_pad_from_number (old_lp_nr
);
2268 throw_stmt
= last_stmt (edge_in
->src
);
2269 gcc_assert (lookup_stmt_eh_lp (throw_stmt
) == old_lp_nr
);
2271 new_label
= gimple_block_label (new_bb
);
2273 /* Look for an existing region that might be using NEW_BB already. */
2274 new_lp_nr
= EH_LANDING_PAD_NR (new_label
);
2277 new_lp
= get_eh_landing_pad_from_number (new_lp_nr
);
2278 gcc_assert (new_lp
);
2280 /* Unless CHANGE_REGION is true, the new and old landing pad
2281 had better be associated with the same EH region. */
2282 gcc_assert (change_region
|| new_lp
->region
== old_lp
->region
);
2287 gcc_assert (!change_region
);
2290 /* Notice when we redirect the last EH edge away from OLD_BB. */
2291 FOR_EACH_EDGE (e
, ei
, old_bb
->preds
)
2292 if (e
!= edge_in
&& (e
->flags
& EDGE_EH
))
2297 /* NEW_LP already exists. If there are still edges into OLD_LP,
2298 there's nothing to do with the EH tree. If there are no more
2299 edges into OLD_LP, then we want to remove OLD_LP as it is unused.
2300 If CHANGE_REGION is true, then our caller is expecting to remove
2302 if (e
== NULL
&& !change_region
)
2303 remove_eh_landing_pad (old_lp
);
2307 /* No correct landing pad exists. If there are no more edges
2308 into OLD_LP, then we can simply re-use the existing landing pad.
2309 Otherwise, we have to create a new landing pad. */
2312 EH_LANDING_PAD_NR (old_lp
->post_landing_pad
) = 0;
2316 new_lp
= gen_eh_landing_pad (old_lp
->region
);
2317 new_lp
->post_landing_pad
= new_label
;
2318 EH_LANDING_PAD_NR (new_label
) = new_lp
->index
;
2321 /* Maybe move the throwing statement to the new region. */
2322 if (old_lp
!= new_lp
)
2324 remove_stmt_from_eh_lp (throw_stmt
);
2325 add_stmt_to_eh_lp (throw_stmt
, new_lp
->index
);
2329 /* Redirect EH edge E to NEW_BB. */
2332 redirect_eh_edge (edge edge_in
, basic_block new_bb
)
2334 redirect_eh_edge_1 (edge_in
, new_bb
, false);
2335 return ssa_redirect_edge (edge_in
, new_bb
);
2338 /* This is a subroutine of gimple_redirect_edge_and_branch. Update the
2339 labels for redirecting a non-fallthru EH_DISPATCH edge E to NEW_BB.
2340 The actual edge update will happen in the caller. */
2343 redirect_eh_dispatch_edge (gimple stmt
, edge e
, basic_block new_bb
)
2345 tree new_lab
= gimple_block_label (new_bb
);
2346 bool any_changed
= false;
2351 r
= get_eh_region_from_number (gimple_eh_dispatch_region (stmt
));
2355 for (c
= r
->u
.eh_try
.first_catch
; c
; c
= c
->next_catch
)
2357 old_bb
= label_to_block (c
->label
);
2358 if (old_bb
== e
->dest
)
2366 case ERT_ALLOWED_EXCEPTIONS
:
2367 old_bb
= label_to_block (r
->u
.allowed
.label
);
2368 gcc_assert (old_bb
== e
->dest
);
2369 r
->u
.allowed
.label
= new_lab
;
2377 gcc_assert (any_changed
);
2380 /* Helper function for operation_could_trap_p and stmt_could_throw_p. */
2383 operation_could_trap_helper_p (enum tree_code op
,
2394 case TRUNC_DIV_EXPR
:
2396 case FLOOR_DIV_EXPR
:
2397 case ROUND_DIV_EXPR
:
2398 case EXACT_DIV_EXPR
:
2400 case FLOOR_MOD_EXPR
:
2401 case ROUND_MOD_EXPR
:
2402 case TRUNC_MOD_EXPR
:
2404 if (honor_snans
|| honor_trapv
)
2407 return flag_trapping_math
;
2408 if (!TREE_CONSTANT (divisor
) || integer_zerop (divisor
))
2417 /* Some floating point comparisons may trap. */
2422 case UNORDERED_EXPR
:
2432 case FIX_TRUNC_EXPR
:
2433 /* Conversion of floating point might trap. */
2439 /* These operations don't trap with floating point. */
2447 /* Any floating arithmetic may trap. */
2448 if (fp_operation
&& flag_trapping_math
)
2456 /* Constructing an object cannot trap. */
2460 /* Any floating arithmetic may trap. */
2461 if (fp_operation
&& flag_trapping_math
)
2469 /* Return true if operation OP may trap. FP_OPERATION is true if OP is applied
2470 on floating-point values. HONOR_TRAPV is true if OP is applied on integer
2471 type operands that may trap. If OP is a division operator, DIVISOR contains
2472 the value of the divisor. */
2475 operation_could_trap_p (enum tree_code op
, bool fp_operation
, bool honor_trapv
,
2478 bool honor_nans
= (fp_operation
&& flag_trapping_math
2479 && !flag_finite_math_only
);
2480 bool honor_snans
= fp_operation
&& flag_signaling_nans
!= 0;
2483 if (TREE_CODE_CLASS (op
) != tcc_comparison
2484 && TREE_CODE_CLASS (op
) != tcc_unary
2485 && TREE_CODE_CLASS (op
) != tcc_binary
)
2488 return operation_could_trap_helper_p (op
, fp_operation
, honor_trapv
,
2489 honor_nans
, honor_snans
, divisor
,
2494 /* Returns true if it is possible to prove that the index of
2495 an array access REF (an ARRAY_REF expression) falls into the
2499 in_array_bounds_p (tree ref
)
2501 tree idx
= TREE_OPERAND (ref
, 1);
2504 if (TREE_CODE (idx
) != INTEGER_CST
)
2507 min
= array_ref_low_bound (ref
);
2508 max
= array_ref_up_bound (ref
);
2511 || TREE_CODE (min
) != INTEGER_CST
2512 || TREE_CODE (max
) != INTEGER_CST
)
2515 if (tree_int_cst_lt (idx
, min
)
2516 || tree_int_cst_lt (max
, idx
))
2522 /* Returns true if it is possible to prove that the range of
2523 an array access REF (an ARRAY_RANGE_REF expression) falls
2524 into the array bounds. */
2527 range_in_array_bounds_p (tree ref
)
2529 tree domain_type
= TYPE_DOMAIN (TREE_TYPE (ref
));
2530 tree range_min
, range_max
, min
, max
;
2532 range_min
= TYPE_MIN_VALUE (domain_type
);
2533 range_max
= TYPE_MAX_VALUE (domain_type
);
2536 || TREE_CODE (range_min
) != INTEGER_CST
2537 || TREE_CODE (range_max
) != INTEGER_CST
)
2540 min
= array_ref_low_bound (ref
);
2541 max
= array_ref_up_bound (ref
);
2544 || TREE_CODE (min
) != INTEGER_CST
2545 || TREE_CODE (max
) != INTEGER_CST
)
2548 if (tree_int_cst_lt (range_min
, min
)
2549 || tree_int_cst_lt (max
, range_max
))
2555 /* Return true if EXPR can trap, as in dereferencing an invalid pointer
2556 location or floating point arithmetic. C.f. the rtl version, may_trap_p.
2557 This routine expects only GIMPLE lhs or rhs input. */
2560 tree_could_trap_p (tree expr
)
2562 enum tree_code code
;
2563 bool fp_operation
= false;
2564 bool honor_trapv
= false;
2565 tree t
, base
, div
= NULL_TREE
;
2570 code
= TREE_CODE (expr
);
2571 t
= TREE_TYPE (expr
);
2575 if (COMPARISON_CLASS_P (expr
))
2576 fp_operation
= FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (expr
, 0)));
2578 fp_operation
= FLOAT_TYPE_P (t
);
2579 honor_trapv
= INTEGRAL_TYPE_P (t
) && TYPE_OVERFLOW_TRAPS (t
);
2582 if (TREE_CODE_CLASS (code
) == tcc_binary
)
2583 div
= TREE_OPERAND (expr
, 1);
2584 if (operation_could_trap_p (code
, fp_operation
, honor_trapv
, div
))
2594 case VIEW_CONVERT_EXPR
:
2595 case WITH_SIZE_EXPR
:
2596 expr
= TREE_OPERAND (expr
, 0);
2597 code
= TREE_CODE (expr
);
2600 case ARRAY_RANGE_REF
:
2601 base
= TREE_OPERAND (expr
, 0);
2602 if (tree_could_trap_p (base
))
2604 if (TREE_THIS_NOTRAP (expr
))
2606 return !range_in_array_bounds_p (expr
);
2609 base
= TREE_OPERAND (expr
, 0);
2610 if (tree_could_trap_p (base
))
2612 if (TREE_THIS_NOTRAP (expr
))
2614 return !in_array_bounds_p (expr
);
2616 case TARGET_MEM_REF
:
2618 if (TREE_CODE (TREE_OPERAND (expr
, 0)) == ADDR_EXPR
2619 && tree_could_trap_p (TREE_OPERAND (TREE_OPERAND (expr
, 0), 0)))
2621 if (TREE_THIS_NOTRAP (expr
))
2623 /* We cannot prove that the access is in-bounds when we have
2624 variable-index TARGET_MEM_REFs. */
2625 if (code
== TARGET_MEM_REF
2626 && (TMR_INDEX (expr
) || TMR_INDEX2 (expr
)))
2628 if (TREE_CODE (TREE_OPERAND (expr
, 0)) == ADDR_EXPR
)
2630 tree base
= TREE_OPERAND (TREE_OPERAND (expr
, 0), 0);
2631 offset_int off
= mem_ref_offset (expr
);
2632 if (wi::neg_p (off
, SIGNED
))
2634 if (TREE_CODE (base
) == STRING_CST
)
2635 return wi::leu_p (TREE_STRING_LENGTH (base
), off
);
2636 else if (DECL_SIZE_UNIT (base
) == NULL_TREE
2637 || TREE_CODE (DECL_SIZE_UNIT (base
)) != INTEGER_CST
2638 || wi::leu_p (wi::to_offset (DECL_SIZE_UNIT (base
)), off
))
2640 /* Now we are sure the first byte of the access is inside
2647 return !TREE_THIS_NOTRAP (expr
);
2650 return TREE_THIS_VOLATILE (expr
);
2653 t
= get_callee_fndecl (expr
);
2654 /* Assume that calls to weak functions may trap. */
2655 if (!t
|| !DECL_P (t
))
2658 return tree_could_trap_p (t
);
2662 /* Assume that accesses to weak functions may trap, unless we know
2663 they are certainly defined in current TU or in some other
2665 if (DECL_WEAK (expr
) && !DECL_COMDAT (expr
) && DECL_EXTERNAL (expr
))
2667 cgraph_node
*node
= cgraph_node::get (expr
);
2669 node
= node
->function_symbol ();
2670 return !(node
&& node
->in_other_partition
);
2675 /* Assume that accesses to weak vars may trap, unless we know
2676 they are certainly defined in current TU or in some other
2678 if (DECL_WEAK (expr
) && !DECL_COMDAT (expr
) && DECL_EXTERNAL (expr
))
2680 varpool_node
*node
= varpool_node::get (expr
);
2682 node
= node
->ultimate_alias_target ();
2683 return !(node
&& node
->in_other_partition
);
2693 /* Helper for stmt_could_throw_p. Return true if STMT (assumed to be a
2694 an assignment or a conditional) may throw. */
2697 stmt_could_throw_1_p (gimple stmt
)
2699 enum tree_code code
= gimple_expr_code (stmt
);
2700 bool honor_nans
= false;
2701 bool honor_snans
= false;
2702 bool fp_operation
= false;
2703 bool honor_trapv
= false;
2708 if (TREE_CODE_CLASS (code
) == tcc_comparison
2709 || TREE_CODE_CLASS (code
) == tcc_unary
2710 || TREE_CODE_CLASS (code
) == tcc_binary
)
2712 if (is_gimple_assign (stmt
)
2713 && TREE_CODE_CLASS (code
) == tcc_comparison
)
2714 t
= TREE_TYPE (gimple_assign_rhs1 (stmt
));
2715 else if (gimple_code (stmt
) == GIMPLE_COND
)
2716 t
= TREE_TYPE (gimple_cond_lhs (stmt
));
2718 t
= gimple_expr_type (stmt
);
2719 fp_operation
= FLOAT_TYPE_P (t
);
2722 honor_nans
= flag_trapping_math
&& !flag_finite_math_only
;
2723 honor_snans
= flag_signaling_nans
!= 0;
2725 else if (INTEGRAL_TYPE_P (t
) && TYPE_OVERFLOW_TRAPS (t
))
2729 /* Check if the main expression may trap. */
2730 t
= is_gimple_assign (stmt
) ? gimple_assign_rhs2 (stmt
) : NULL
;
2731 ret
= operation_could_trap_helper_p (code
, fp_operation
, honor_trapv
,
2732 honor_nans
, honor_snans
, t
,
2737 /* If the expression does not trap, see if any of the individual operands may
2739 for (i
= 0; i
< gimple_num_ops (stmt
); i
++)
2740 if (tree_could_trap_p (gimple_op (stmt
, i
)))
2747 /* Return true if statement STMT could throw an exception. */
2750 stmt_could_throw_p (gimple stmt
)
2752 if (!flag_exceptions
)
2755 /* The only statements that can throw an exception are assignments,
2756 conditionals, calls, resx, and asms. */
2757 switch (gimple_code (stmt
))
2763 return !gimple_call_nothrow_p (stmt
);
2767 if (!cfun
->can_throw_non_call_exceptions
)
2769 return stmt_could_throw_1_p (stmt
);
2772 if (!cfun
->can_throw_non_call_exceptions
)
2774 return gimple_asm_volatile_p (stmt
);
2782 /* Return true if expression T could throw an exception. */
2785 tree_could_throw_p (tree t
)
2787 if (!flag_exceptions
)
2789 if (TREE_CODE (t
) == MODIFY_EXPR
)
2791 if (cfun
->can_throw_non_call_exceptions
2792 && tree_could_trap_p (TREE_OPERAND (t
, 0)))
2794 t
= TREE_OPERAND (t
, 1);
2797 if (TREE_CODE (t
) == WITH_SIZE_EXPR
)
2798 t
= TREE_OPERAND (t
, 0);
2799 if (TREE_CODE (t
) == CALL_EXPR
)
2800 return (call_expr_flags (t
) & ECF_NOTHROW
) == 0;
2801 if (cfun
->can_throw_non_call_exceptions
)
2802 return tree_could_trap_p (t
);
2806 /* Return true if STMT can throw an exception that is not caught within
2807 the current function (CFUN). */
2810 stmt_can_throw_external (gimple stmt
)
2814 if (!stmt_could_throw_p (stmt
))
2817 lp_nr
= lookup_stmt_eh_lp (stmt
);
2821 /* Return true if STMT can throw an exception that is caught within
2822 the current function (CFUN). */
2825 stmt_can_throw_internal (gimple stmt
)
2829 if (!stmt_could_throw_p (stmt
))
2832 lp_nr
= lookup_stmt_eh_lp (stmt
);
2836 /* Given a statement STMT in IFUN, if STMT can no longer throw, then
2837 remove any entry it might have from the EH table. Return true if
2838 any change was made. */
2841 maybe_clean_eh_stmt_fn (struct function
*ifun
, gimple stmt
)
2843 if (stmt_could_throw_p (stmt
))
2845 return remove_stmt_from_eh_lp_fn (ifun
, stmt
);
2848 /* Likewise, but always use the current function. */
2851 maybe_clean_eh_stmt (gimple stmt
)
2853 return maybe_clean_eh_stmt_fn (cfun
, stmt
);
2856 /* Given a statement OLD_STMT and a new statement NEW_STMT that has replaced
2857 OLD_STMT in the function, remove OLD_STMT from the EH table and put NEW_STMT
2858 in the table if it should be in there. Return TRUE if a replacement was
2859 done that my require an EH edge purge. */
2862 maybe_clean_or_replace_eh_stmt (gimple old_stmt
, gimple new_stmt
)
2864 int lp_nr
= lookup_stmt_eh_lp (old_stmt
);
2868 bool new_stmt_could_throw
= stmt_could_throw_p (new_stmt
);
2870 if (new_stmt
== old_stmt
&& new_stmt_could_throw
)
2873 remove_stmt_from_eh_lp (old_stmt
);
2874 if (new_stmt_could_throw
)
2876 add_stmt_to_eh_lp (new_stmt
, lp_nr
);
2886 /* Given a statement OLD_STMT in OLD_FUN and a duplicate statement NEW_STMT
2887 in NEW_FUN, copy the EH table data from OLD_STMT to NEW_STMT. The MAP
2888 operand is the return value of duplicate_eh_regions. */
2891 maybe_duplicate_eh_stmt_fn (struct function
*new_fun
, gimple new_stmt
,
2892 struct function
*old_fun
, gimple old_stmt
,
2893 hash_map
<void *, void *> *map
,
2896 int old_lp_nr
, new_lp_nr
;
2898 if (!stmt_could_throw_p (new_stmt
))
2901 old_lp_nr
= lookup_stmt_eh_lp_fn (old_fun
, old_stmt
);
2904 if (default_lp_nr
== 0)
2906 new_lp_nr
= default_lp_nr
;
2908 else if (old_lp_nr
> 0)
2910 eh_landing_pad old_lp
, new_lp
;
2912 old_lp
= (*old_fun
->eh
->lp_array
)[old_lp_nr
];
2913 new_lp
= static_cast<eh_landing_pad
> (*map
->get (old_lp
));
2914 new_lp_nr
= new_lp
->index
;
2918 eh_region old_r
, new_r
;
2920 old_r
= (*old_fun
->eh
->region_array
)[-old_lp_nr
];
2921 new_r
= static_cast<eh_region
> (*map
->get (old_r
));
2922 new_lp_nr
= -new_r
->index
;
2925 add_stmt_to_eh_lp_fn (new_fun
, new_stmt
, new_lp_nr
);
2929 /* Similar, but both OLD_STMT and NEW_STMT are within the current function,
2930 and thus no remapping is required. */
2933 maybe_duplicate_eh_stmt (gimple new_stmt
, gimple old_stmt
)
2937 if (!stmt_could_throw_p (new_stmt
))
2940 lp_nr
= lookup_stmt_eh_lp (old_stmt
);
2944 add_stmt_to_eh_lp (new_stmt
, lp_nr
);
2948 /* Returns TRUE if oneh and twoh are exception handlers (gimple_try_cleanup of
2949 GIMPLE_TRY) that are similar enough to be considered the same. Currently
2950 this only handles handlers consisting of a single call, as that's the
2951 important case for C++: a destructor call for a particular object showing
2952 up in multiple handlers. */
2955 same_handler_p (gimple_seq oneh
, gimple_seq twoh
)
2957 gimple_stmt_iterator gsi
;
2961 gsi
= gsi_start (oneh
);
2962 if (!gsi_one_before_end_p (gsi
))
2964 ones
= gsi_stmt (gsi
);
2966 gsi
= gsi_start (twoh
);
2967 if (!gsi_one_before_end_p (gsi
))
2969 twos
= gsi_stmt (gsi
);
2971 if (!is_gimple_call (ones
)
2972 || !is_gimple_call (twos
)
2973 || gimple_call_lhs (ones
)
2974 || gimple_call_lhs (twos
)
2975 || gimple_call_chain (ones
)
2976 || gimple_call_chain (twos
)
2977 || !gimple_call_same_target_p (ones
, twos
)
2978 || gimple_call_num_args (ones
) != gimple_call_num_args (twos
))
2981 for (ai
= 0; ai
< gimple_call_num_args (ones
); ++ai
)
2982 if (!operand_equal_p (gimple_call_arg (ones
, ai
),
2983 gimple_call_arg (twos
, ai
), 0))
2990 try { A() } finally { try { ~B() } catch { ~A() } }
2991 try { ... } finally { ~A() }
2993 try { A() } catch { ~B() }
2994 try { ~B() ... } finally { ~A() }
2996 This occurs frequently in C++, where A is a local variable and B is a
2997 temporary used in the initializer for A. */
3000 optimize_double_finally (gimple one
, gimple two
)
3003 gimple_stmt_iterator gsi
;
3006 cleanup
= gimple_try_cleanup (one
);
3007 gsi
= gsi_start (cleanup
);
3008 if (!gsi_one_before_end_p (gsi
))
3011 oneh
= gsi_stmt (gsi
);
3012 if (gimple_code (oneh
) != GIMPLE_TRY
3013 || gimple_try_kind (oneh
) != GIMPLE_TRY_CATCH
)
3016 if (same_handler_p (gimple_try_cleanup (oneh
), gimple_try_cleanup (two
)))
3018 gimple_seq seq
= gimple_try_eval (oneh
);
3020 gimple_try_set_cleanup (one
, seq
);
3021 gimple_try_set_kind (one
, GIMPLE_TRY_CATCH
);
3022 seq
= copy_gimple_seq_and_replace_locals (seq
);
3023 gimple_seq_add_seq (&seq
, gimple_try_eval (two
));
3024 gimple_try_set_eval (two
, seq
);
3028 /* Perform EH refactoring optimizations that are simpler to do when code
3029 flow has been lowered but EH structures haven't. */
3032 refactor_eh_r (gimple_seq seq
)
3034 gimple_stmt_iterator gsi
;
3039 gsi
= gsi_start (seq
);
3043 if (gsi_end_p (gsi
))
3046 two
= gsi_stmt (gsi
);
3049 && gimple_code (one
) == GIMPLE_TRY
3050 && gimple_code (two
) == GIMPLE_TRY
3051 && gimple_try_kind (one
) == GIMPLE_TRY_FINALLY
3052 && gimple_try_kind (two
) == GIMPLE_TRY_FINALLY
)
3053 optimize_double_finally (one
, two
);
3055 switch (gimple_code (one
))
3058 refactor_eh_r (gimple_try_eval (one
));
3059 refactor_eh_r (gimple_try_cleanup (one
));
3062 refactor_eh_r (gimple_catch_handler (one
));
3064 case GIMPLE_EH_FILTER
:
3065 refactor_eh_r (gimple_eh_filter_failure (one
));
3067 case GIMPLE_EH_ELSE
:
3068 refactor_eh_r (gimple_eh_else_n_body (one
));
3069 refactor_eh_r (gimple_eh_else_e_body (one
));
3083 const pass_data pass_data_refactor_eh
=
3085 GIMPLE_PASS
, /* type */
3087 OPTGROUP_NONE
, /* optinfo_flags */
3088 TV_TREE_EH
, /* tv_id */
3089 PROP_gimple_lcf
, /* properties_required */
3090 0, /* properties_provided */
3091 0, /* properties_destroyed */
3092 0, /* todo_flags_start */
3093 0, /* todo_flags_finish */
3096 class pass_refactor_eh
: public gimple_opt_pass
3099 pass_refactor_eh (gcc::context
*ctxt
)
3100 : gimple_opt_pass (pass_data_refactor_eh
, ctxt
)
3103 /* opt_pass methods: */
3104 virtual bool gate (function
*) { return flag_exceptions
!= 0; }
3105 virtual unsigned int execute (function
*)
3107 refactor_eh_r (gimple_body (current_function_decl
));
3111 }; // class pass_refactor_eh
3116 make_pass_refactor_eh (gcc::context
*ctxt
)
3118 return new pass_refactor_eh (ctxt
);
3121 /* At the end of gimple optimization, we can lower RESX. */
3124 lower_resx (basic_block bb
, gimple stmt
, hash_map
<eh_region
, tree
> *mnt_map
)
3127 eh_region src_r
, dst_r
;
3128 gimple_stmt_iterator gsi
;
3133 lp_nr
= lookup_stmt_eh_lp (stmt
);
3135 dst_r
= get_eh_region_from_lp_number (lp_nr
);
3139 src_r
= get_eh_region_from_number (gimple_resx_region (stmt
));
3140 gsi
= gsi_last_bb (bb
);
3144 /* We can wind up with no source region when pass_cleanup_eh shows
3145 that there are no entries into an eh region and deletes it, but
3146 then the block that contains the resx isn't removed. This can
3147 happen without optimization when the switch statement created by
3148 lower_try_finally_switch isn't simplified to remove the eh case.
3150 Resolve this by expanding the resx node to an abort. */
3152 fn
= builtin_decl_implicit (BUILT_IN_TRAP
);
3153 x
= gimple_build_call (fn
, 0);
3154 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3156 while (EDGE_COUNT (bb
->succs
) > 0)
3157 remove_edge (EDGE_SUCC (bb
, 0));
3161 /* When we have a destination region, we resolve this by copying
3162 the excptr and filter values into place, and changing the edge
3163 to immediately after the landing pad. */
3171 /* We are resuming into a MUST_NOT_CALL region. Expand a call to
3172 the failure decl into a new block, if needed. */
3173 gcc_assert (dst_r
->type
== ERT_MUST_NOT_THROW
);
3175 tree
*slot
= mnt_map
->get (dst_r
);
3178 gimple_stmt_iterator gsi2
;
3180 new_bb
= create_empty_bb (bb
);
3181 add_bb_to_loop (new_bb
, bb
->loop_father
);
3182 lab
= gimple_block_label (new_bb
);
3183 gsi2
= gsi_start_bb (new_bb
);
3185 fn
= dst_r
->u
.must_not_throw
.failure_decl
;
3186 x
= gimple_build_call (fn
, 0);
3187 gimple_set_location (x
, dst_r
->u
.must_not_throw
.failure_loc
);
3188 gsi_insert_after (&gsi2
, x
, GSI_CONTINUE_LINKING
);
3190 mnt_map
->put (dst_r
, lab
);
3195 new_bb
= label_to_block (lab
);
3198 gcc_assert (EDGE_COUNT (bb
->succs
) == 0);
3199 e
= make_edge (bb
, new_bb
, EDGE_FALLTHRU
);
3200 e
->count
= bb
->count
;
3201 e
->probability
= REG_BR_PROB_BASE
;
3206 tree dst_nr
= build_int_cst (integer_type_node
, dst_r
->index
);
3208 fn
= builtin_decl_implicit (BUILT_IN_EH_COPY_VALUES
);
3209 src_nr
= build_int_cst (integer_type_node
, src_r
->index
);
3210 x
= gimple_build_call (fn
, 2, dst_nr
, src_nr
);
3211 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3213 /* Update the flags for the outgoing edge. */
3214 e
= single_succ_edge (bb
);
3215 gcc_assert (e
->flags
& EDGE_EH
);
3216 e
->flags
= (e
->flags
& ~EDGE_EH
) | EDGE_FALLTHRU
;
3218 /* If there are no more EH users of the landing pad, delete it. */
3219 FOR_EACH_EDGE (e
, ei
, e
->dest
->preds
)
3220 if (e
->flags
& EDGE_EH
)
3224 eh_landing_pad lp
= get_eh_landing_pad_from_number (lp_nr
);
3225 remove_eh_landing_pad (lp
);
3235 /* When we don't have a destination region, this exception escapes
3236 up the call chain. We resolve this by generating a call to the
3237 _Unwind_Resume library function. */
3239 /* The ARM EABI redefines _Unwind_Resume as __cxa_end_cleanup
3240 with no arguments for C++ and Java. Check for that. */
3241 if (src_r
->use_cxa_end_cleanup
)
3243 fn
= builtin_decl_implicit (BUILT_IN_CXA_END_CLEANUP
);
3244 x
= gimple_build_call (fn
, 0);
3245 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3249 fn
= builtin_decl_implicit (BUILT_IN_EH_POINTER
);
3250 src_nr
= build_int_cst (integer_type_node
, src_r
->index
);
3251 x
= gimple_build_call (fn
, 1, src_nr
);
3252 var
= create_tmp_var (ptr_type_node
, NULL
);
3253 var
= make_ssa_name (var
, x
);
3254 gimple_call_set_lhs (x
, var
);
3255 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3257 fn
= builtin_decl_implicit (BUILT_IN_UNWIND_RESUME
);
3258 x
= gimple_build_call (fn
, 1, var
);
3259 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3262 gcc_assert (EDGE_COUNT (bb
->succs
) == 0);
3265 gsi_remove (&gsi
, true);
3272 const pass_data pass_data_lower_resx
=
3274 GIMPLE_PASS
, /* type */
3276 OPTGROUP_NONE
, /* optinfo_flags */
3277 TV_TREE_EH
, /* tv_id */
3278 PROP_gimple_lcf
, /* properties_required */
3279 0, /* properties_provided */
3280 0, /* properties_destroyed */
3281 0, /* todo_flags_start */
3282 0, /* todo_flags_finish */
3285 class pass_lower_resx
: public gimple_opt_pass
3288 pass_lower_resx (gcc::context
*ctxt
)
3289 : gimple_opt_pass (pass_data_lower_resx
, ctxt
)
3292 /* opt_pass methods: */
3293 virtual bool gate (function
*) { return flag_exceptions
!= 0; }
3294 virtual unsigned int execute (function
*);
3296 }; // class pass_lower_resx
3299 pass_lower_resx::execute (function
*fun
)
3302 bool dominance_invalidated
= false;
3303 bool any_rewritten
= false;
3305 hash_map
<eh_region
, tree
> mnt_map
;
3307 FOR_EACH_BB_FN (bb
, fun
)
3309 gimple last
= last_stmt (bb
);
3310 if (last
&& is_gimple_resx (last
))
3312 dominance_invalidated
|= lower_resx (bb
, last
, &mnt_map
);
3313 any_rewritten
= true;
3317 if (dominance_invalidated
)
3319 free_dominance_info (CDI_DOMINATORS
);
3320 free_dominance_info (CDI_POST_DOMINATORS
);
3323 return any_rewritten
? TODO_update_ssa_only_virtuals
: 0;
3329 make_pass_lower_resx (gcc::context
*ctxt
)
3331 return new pass_lower_resx (ctxt
);
3334 /* Try to optimize var = {v} {CLOBBER} stmts followed just by
3338 optimize_clobbers (basic_block bb
)
3340 gimple_stmt_iterator gsi
= gsi_last_bb (bb
);
3341 bool any_clobbers
= false;
3342 bool seen_stack_restore
= false;
3346 /* Only optimize anything if the bb contains at least one clobber,
3347 ends with resx (checked by caller), optionally contains some
3348 debug stmts or labels, or at most one __builtin_stack_restore
3349 call, and has an incoming EH edge. */
3350 for (gsi_prev (&gsi
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
3352 gimple stmt
= gsi_stmt (gsi
);
3353 if (is_gimple_debug (stmt
))
3355 if (gimple_clobber_p (stmt
))
3357 any_clobbers
= true;
3360 if (!seen_stack_restore
3361 && gimple_call_builtin_p (stmt
, BUILT_IN_STACK_RESTORE
))
3363 seen_stack_restore
= true;
3366 if (gimple_code (stmt
) == GIMPLE_LABEL
)
3372 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
3373 if (e
->flags
& EDGE_EH
)
3377 gsi
= gsi_last_bb (bb
);
3378 for (gsi_prev (&gsi
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
3380 gimple stmt
= gsi_stmt (gsi
);
3381 if (!gimple_clobber_p (stmt
))
3383 unlink_stmt_vdef (stmt
);
3384 gsi_remove (&gsi
, true);
3385 release_defs (stmt
);
3389 /* Try to sink var = {v} {CLOBBER} stmts followed just by
3390 internal throw to successor BB. */
3393 sink_clobbers (basic_block bb
)
3397 gimple_stmt_iterator gsi
, dgsi
;
3399 bool any_clobbers
= false;
3402 /* Only optimize if BB has a single EH successor and
3403 all predecessor edges are EH too. */
3404 if (!single_succ_p (bb
)
3405 || (single_succ_edge (bb
)->flags
& EDGE_EH
) == 0)
3408 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
3410 if ((e
->flags
& EDGE_EH
) == 0)
3414 /* And BB contains only CLOBBER stmts before the final
3416 gsi
= gsi_last_bb (bb
);
3417 for (gsi_prev (&gsi
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
3419 gimple stmt
= gsi_stmt (gsi
);
3420 if (is_gimple_debug (stmt
))
3422 if (gimple_code (stmt
) == GIMPLE_LABEL
)
3424 if (!gimple_clobber_p (stmt
))
3426 any_clobbers
= true;
3431 edge succe
= single_succ_edge (bb
);
3432 succbb
= succe
->dest
;
3434 /* See if there is a virtual PHI node to take an updated virtual
3437 tree vuse
= NULL_TREE
;
3438 for (gsi
= gsi_start_phis (succbb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3440 tree res
= gimple_phi_result (gsi_stmt (gsi
));
3441 if (virtual_operand_p (res
))
3443 vphi
= gsi_stmt (gsi
);
3449 dgsi
= gsi_after_labels (succbb
);
3450 gsi
= gsi_last_bb (bb
);
3451 for (gsi_prev (&gsi
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
3453 gimple stmt
= gsi_stmt (gsi
);
3455 if (is_gimple_debug (stmt
))
3457 if (gimple_code (stmt
) == GIMPLE_LABEL
)
3459 lhs
= gimple_assign_lhs (stmt
);
3460 /* Unfortunately we don't have dominance info updated at this
3461 point, so checking if
3462 dominated_by_p (CDI_DOMINATORS, succbb,
3463 gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (lhs, 0)))
3464 would be too costly. Thus, avoid sinking any clobbers that
3465 refer to non-(D) SSA_NAMEs. */
3466 if (TREE_CODE (lhs
) == MEM_REF
3467 && TREE_CODE (TREE_OPERAND (lhs
, 0)) == SSA_NAME
3468 && !SSA_NAME_IS_DEFAULT_DEF (TREE_OPERAND (lhs
, 0)))
3470 unlink_stmt_vdef (stmt
);
3471 gsi_remove (&gsi
, true);
3472 release_defs (stmt
);
3476 /* As we do not change stmt order when sinking across a
3477 forwarder edge we can keep virtual operands in place. */
3478 gsi_remove (&gsi
, false);
3479 gsi_insert_before (&dgsi
, stmt
, GSI_NEW_STMT
);
3481 /* But adjust virtual operands if we sunk across a PHI node. */
3485 imm_use_iterator iter
;
3486 use_operand_p use_p
;
3487 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, vuse
)
3488 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
3489 SET_USE (use_p
, gimple_vdef (stmt
));
3490 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse
))
3492 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_vdef (stmt
)) = 1;
3493 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse
) = 0;
3495 /* Adjust the incoming virtual operand. */
3496 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (vphi
, succe
), gimple_vuse (stmt
));
3497 SET_USE (gimple_vuse_op (stmt
), vuse
);
3499 /* If there isn't a single predecessor but no virtual PHI node
3500 arrange for virtual operands to be renamed. */
3501 else if (gimple_vuse_op (stmt
) != NULL_USE_OPERAND_P
3502 && !single_pred_p (succbb
))
3504 /* In this case there will be no use of the VDEF of this stmt.
3505 ??? Unless this is a secondary opportunity and we have not
3506 removed unreachable blocks yet, so we cannot assert this.
3507 Which also means we will end up renaming too many times. */
3508 SET_USE (gimple_vuse_op (stmt
), gimple_vop (cfun
));
3509 mark_virtual_operands_for_renaming (cfun
);
3510 todo
|= TODO_update_ssa_only_virtuals
;
3517 /* At the end of inlining, we can lower EH_DISPATCH. Return true when
3518 we have found some duplicate labels and removed some edges. */
3521 lower_eh_dispatch (basic_block src
, gimple stmt
)
3523 gimple_stmt_iterator gsi
;
3528 bool redirected
= false;
3530 region_nr
= gimple_eh_dispatch_region (stmt
);
3531 r
= get_eh_region_from_number (region_nr
);
3533 gsi
= gsi_last_bb (src
);
3539 auto_vec
<tree
> labels
;
3540 tree default_label
= NULL
;
3544 hash_set
<tree
> seen_values
;
3546 /* Collect the labels for a switch. Zero the post_landing_pad
3547 field becase we'll no longer have anything keeping these labels
3548 in existence and the optimizer will be free to merge these
3550 for (c
= r
->u
.eh_try
.first_catch
; c
; c
= c
->next_catch
)
3552 tree tp_node
, flt_node
, lab
= c
->label
;
3553 bool have_label
= false;
3556 tp_node
= c
->type_list
;
3557 flt_node
= c
->filter_list
;
3559 if (tp_node
== NULL
)
3561 default_label
= lab
;
3566 /* Filter out duplicate labels that arise when this handler
3567 is shadowed by an earlier one. When no labels are
3568 attached to the handler anymore, we remove
3569 the corresponding edge and then we delete unreachable
3570 blocks at the end of this pass. */
3571 if (! seen_values
.contains (TREE_VALUE (flt_node
)))
3573 tree t
= build_case_label (TREE_VALUE (flt_node
),
3575 labels
.safe_push (t
);
3576 seen_values
.add (TREE_VALUE (flt_node
));
3580 tp_node
= TREE_CHAIN (tp_node
);
3581 flt_node
= TREE_CHAIN (flt_node
);
3586 remove_edge (find_edge (src
, label_to_block (lab
)));
3591 /* Clean up the edge flags. */
3592 FOR_EACH_EDGE (e
, ei
, src
->succs
)
3594 if (e
->flags
& EDGE_FALLTHRU
)
3596 /* If there was no catch-all, use the fallthru edge. */
3597 if (default_label
== NULL
)
3598 default_label
= gimple_block_label (e
->dest
);
3599 e
->flags
&= ~EDGE_FALLTHRU
;
3602 gcc_assert (default_label
!= NULL
);
3604 /* Don't generate a switch if there's only a default case.
3605 This is common in the form of try { A; } catch (...) { B; }. */
3606 if (!labels
.exists ())
3608 e
= single_succ_edge (src
);
3609 e
->flags
|= EDGE_FALLTHRU
;
3613 fn
= builtin_decl_implicit (BUILT_IN_EH_FILTER
);
3614 x
= gimple_build_call (fn
, 1, build_int_cst (integer_type_node
,
3616 filter
= create_tmp_var (TREE_TYPE (TREE_TYPE (fn
)), NULL
);
3617 filter
= make_ssa_name (filter
, x
);
3618 gimple_call_set_lhs (x
, filter
);
3619 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3621 /* Turn the default label into a default case. */
3622 default_label
= build_case_label (NULL
, NULL
, default_label
);
3623 sort_case_labels (labels
);
3625 x
= gimple_build_switch (filter
, default_label
, labels
);
3626 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3631 case ERT_ALLOWED_EXCEPTIONS
:
3633 edge b_e
= BRANCH_EDGE (src
);
3634 edge f_e
= FALLTHRU_EDGE (src
);
3636 fn
= builtin_decl_implicit (BUILT_IN_EH_FILTER
);
3637 x
= gimple_build_call (fn
, 1, build_int_cst (integer_type_node
,
3639 filter
= create_tmp_var (TREE_TYPE (TREE_TYPE (fn
)), NULL
);
3640 filter
= make_ssa_name (filter
, x
);
3641 gimple_call_set_lhs (x
, filter
);
3642 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3644 r
->u
.allowed
.label
= NULL
;
3645 x
= gimple_build_cond (EQ_EXPR
, filter
,
3646 build_int_cst (TREE_TYPE (filter
),
3647 r
->u
.allowed
.filter
),
3648 NULL_TREE
, NULL_TREE
);
3649 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3651 b_e
->flags
= b_e
->flags
| EDGE_TRUE_VALUE
;
3652 f_e
->flags
= (f_e
->flags
& ~EDGE_FALLTHRU
) | EDGE_FALSE_VALUE
;
3660 /* Replace the EH_DISPATCH with the SWITCH or COND generated above. */
3661 gsi_remove (&gsi
, true);
3667 const pass_data pass_data_lower_eh_dispatch
=
3669 GIMPLE_PASS
, /* type */
3670 "ehdisp", /* name */
3671 OPTGROUP_NONE
, /* optinfo_flags */
3672 TV_TREE_EH
, /* tv_id */
3673 PROP_gimple_lcf
, /* properties_required */
3674 0, /* properties_provided */
3675 0, /* properties_destroyed */
3676 0, /* todo_flags_start */
3677 0, /* todo_flags_finish */
3680 class pass_lower_eh_dispatch
: public gimple_opt_pass
3683 pass_lower_eh_dispatch (gcc::context
*ctxt
)
3684 : gimple_opt_pass (pass_data_lower_eh_dispatch
, ctxt
)
3687 /* opt_pass methods: */
3688 virtual bool gate (function
*fun
) { return fun
->eh
->region_tree
!= NULL
; }
3689 virtual unsigned int execute (function
*);
3691 }; // class pass_lower_eh_dispatch
3694 pass_lower_eh_dispatch::execute (function
*fun
)
3698 bool redirected
= false;
3700 assign_filter_values ();
3702 FOR_EACH_BB_FN (bb
, fun
)
3704 gimple last
= last_stmt (bb
);
3707 if (gimple_code (last
) == GIMPLE_EH_DISPATCH
)
3709 redirected
|= lower_eh_dispatch (bb
, last
);
3710 flags
|= TODO_update_ssa_only_virtuals
;
3712 else if (gimple_code (last
) == GIMPLE_RESX
)
3714 if (stmt_can_throw_external (last
))
3715 optimize_clobbers (bb
);
3717 flags
|= sink_clobbers (bb
);
3722 delete_unreachable_blocks ();
3729 make_pass_lower_eh_dispatch (gcc::context
*ctxt
)
3731 return new pass_lower_eh_dispatch (ctxt
);
3734 /* Walk statements, see what regions and, optionally, landing pads
3735 are really referenced.
3737 Returns in R_REACHABLEP an sbitmap with bits set for reachable regions,
3738 and in LP_REACHABLE an sbitmap with bits set for reachable landing pads.
3740 Passing NULL for LP_REACHABLE is valid, in this case only reachable
3743 The caller is responsible for freeing the returned sbitmaps. */
3746 mark_reachable_handlers (sbitmap
*r_reachablep
, sbitmap
*lp_reachablep
)
3748 sbitmap r_reachable
, lp_reachable
;
3750 bool mark_landing_pads
= (lp_reachablep
!= NULL
);
3751 gcc_checking_assert (r_reachablep
!= NULL
);
3753 r_reachable
= sbitmap_alloc (cfun
->eh
->region_array
->length ());
3754 bitmap_clear (r_reachable
);
3755 *r_reachablep
= r_reachable
;
3757 if (mark_landing_pads
)
3759 lp_reachable
= sbitmap_alloc (cfun
->eh
->lp_array
->length ());
3760 bitmap_clear (lp_reachable
);
3761 *lp_reachablep
= lp_reachable
;
3764 lp_reachable
= NULL
;
3766 FOR_EACH_BB_FN (bb
, cfun
)
3768 gimple_stmt_iterator gsi
;
3770 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3772 gimple stmt
= gsi_stmt (gsi
);
3774 if (mark_landing_pads
)
3776 int lp_nr
= lookup_stmt_eh_lp (stmt
);
3778 /* Negative LP numbers are MUST_NOT_THROW regions which
3779 are not considered BB enders. */
3781 bitmap_set_bit (r_reachable
, -lp_nr
);
3783 /* Positive LP numbers are real landing pads, and BB enders. */
3786 gcc_assert (gsi_one_before_end_p (gsi
));
3787 eh_region region
= get_eh_region_from_lp_number (lp_nr
);
3788 bitmap_set_bit (r_reachable
, region
->index
);
3789 bitmap_set_bit (lp_reachable
, lp_nr
);
3793 /* Avoid removing regions referenced from RESX/EH_DISPATCH. */
3794 switch (gimple_code (stmt
))
3797 bitmap_set_bit (r_reachable
, gimple_resx_region (stmt
));
3799 case GIMPLE_EH_DISPATCH
:
3800 bitmap_set_bit (r_reachable
, gimple_eh_dispatch_region (stmt
));
3809 /* Remove unreachable handlers and unreachable landing pads. */
3812 remove_unreachable_handlers (void)
3814 sbitmap r_reachable
, lp_reachable
;
3819 mark_reachable_handlers (&r_reachable
, &lp_reachable
);
3823 fprintf (dump_file
, "Before removal of unreachable regions:\n");
3824 dump_eh_tree (dump_file
, cfun
);
3825 fprintf (dump_file
, "Reachable regions: ");
3826 dump_bitmap_file (dump_file
, r_reachable
);
3827 fprintf (dump_file
, "Reachable landing pads: ");
3828 dump_bitmap_file (dump_file
, lp_reachable
);
3833 FOR_EACH_VEC_SAFE_ELT (cfun
->eh
->region_array
, i
, region
)
3834 if (region
&& !bitmap_bit_p (r_reachable
, region
->index
))
3836 "Removing unreachable region %d\n",
3840 remove_unreachable_eh_regions (r_reachable
);
3842 FOR_EACH_VEC_SAFE_ELT (cfun
->eh
->lp_array
, i
, lp
)
3843 if (lp
&& !bitmap_bit_p (lp_reachable
, lp
->index
))
3847 "Removing unreachable landing pad %d\n",
3849 remove_eh_landing_pad (lp
);
3854 fprintf (dump_file
, "\n\nAfter removal of unreachable regions:\n");
3855 dump_eh_tree (dump_file
, cfun
);
3856 fprintf (dump_file
, "\n\n");
3859 sbitmap_free (r_reachable
);
3860 sbitmap_free (lp_reachable
);
3862 #ifdef ENABLE_CHECKING
3863 verify_eh_tree (cfun
);
3867 /* Remove unreachable handlers if any landing pads have been removed after
3868 last ehcleanup pass (due to gimple_purge_dead_eh_edges). */
3871 maybe_remove_unreachable_handlers (void)
3876 if (cfun
->eh
== NULL
)
3879 FOR_EACH_VEC_SAFE_ELT (cfun
->eh
->lp_array
, i
, lp
)
3880 if (lp
&& lp
->post_landing_pad
)
3882 if (label_to_block (lp
->post_landing_pad
) == NULL
)
3884 remove_unreachable_handlers ();
3890 /* Remove regions that do not have landing pads. This assumes
3891 that remove_unreachable_handlers has already been run, and
3892 that we've just manipulated the landing pads since then.
3894 Preserve regions with landing pads and regions that prevent
3895 exceptions from propagating further, even if these regions
3896 are not reachable. */
3899 remove_unreachable_handlers_no_lp (void)
3902 sbitmap r_reachable
;
3905 mark_reachable_handlers (&r_reachable
, /*lp_reachablep=*/NULL
);
3907 FOR_EACH_VEC_SAFE_ELT (cfun
->eh
->region_array
, i
, region
)
3912 if (region
->landing_pads
!= NULL
3913 || region
->type
== ERT_MUST_NOT_THROW
)
3914 bitmap_set_bit (r_reachable
, region
->index
);
3917 && !bitmap_bit_p (r_reachable
, region
->index
))
3919 "Removing unreachable region %d\n",
3923 remove_unreachable_eh_regions (r_reachable
);
3925 sbitmap_free (r_reachable
);
3928 /* Undo critical edge splitting on an EH landing pad. Earlier, we
3929 optimisticaly split all sorts of edges, including EH edges. The
3930 optimization passes in between may not have needed them; if not,
3931 we should undo the split.
3933 Recognize this case by having one EH edge incoming to the BB and
3934 one normal edge outgoing; BB should be empty apart from the
3935 post_landing_pad label.
3937 Note that this is slightly different from the empty handler case
3938 handled by cleanup_empty_eh, in that the actual handler may yet
3939 have actual code but the landing pad has been separated from the
3940 handler. As such, cleanup_empty_eh relies on this transformation
3941 having been done first. */
3944 unsplit_eh (eh_landing_pad lp
)
3946 basic_block bb
= label_to_block (lp
->post_landing_pad
);
3947 gimple_stmt_iterator gsi
;
3950 /* Quickly check the edge counts on BB for singularity. */
3951 if (!single_pred_p (bb
) || !single_succ_p (bb
))
3953 e_in
= single_pred_edge (bb
);
3954 e_out
= single_succ_edge (bb
);
3956 /* Input edge must be EH and output edge must be normal. */
3957 if ((e_in
->flags
& EDGE_EH
) == 0 || (e_out
->flags
& EDGE_EH
) != 0)
3960 /* The block must be empty except for the labels and debug insns. */
3961 gsi
= gsi_after_labels (bb
);
3962 if (!gsi_end_p (gsi
) && is_gimple_debug (gsi_stmt (gsi
)))
3963 gsi_next_nondebug (&gsi
);
3964 if (!gsi_end_p (gsi
))
3967 /* The destination block must not already have a landing pad
3968 for a different region. */
3969 for (gsi
= gsi_start_bb (e_out
->dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3971 gimple stmt
= gsi_stmt (gsi
);
3975 if (gimple_code (stmt
) != GIMPLE_LABEL
)
3977 lab
= gimple_label_label (stmt
);
3978 lp_nr
= EH_LANDING_PAD_NR (lab
);
3979 if (lp_nr
&& get_eh_region_from_lp_number (lp_nr
) != lp
->region
)
3983 /* The new destination block must not already be a destination of
3984 the source block, lest we merge fallthru and eh edges and get
3985 all sorts of confused. */
3986 if (find_edge (e_in
->src
, e_out
->dest
))
3989 /* ??? We can get degenerate phis due to cfg cleanups. I would have
3990 thought this should have been cleaned up by a phicprop pass, but
3991 that doesn't appear to handle virtuals. Propagate by hand. */
3992 if (!gimple_seq_empty_p (phi_nodes (bb
)))
3994 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); )
3996 gimple use_stmt
, phi
= gsi_stmt (gsi
);
3997 tree lhs
= gimple_phi_result (phi
);
3998 tree rhs
= gimple_phi_arg_def (phi
, 0);
3999 use_operand_p use_p
;
4000 imm_use_iterator iter
;
4002 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs
)
4004 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
4005 SET_USE (use_p
, rhs
);
4008 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs
))
4009 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs
) = 1;
4011 remove_phi_node (&gsi
, true);
4015 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4016 fprintf (dump_file
, "Unsplit EH landing pad %d to block %i.\n",
4017 lp
->index
, e_out
->dest
->index
);
4019 /* Redirect the edge. Since redirect_eh_edge_1 expects to be moving
4020 a successor edge, humor it. But do the real CFG change with the
4021 predecessor of E_OUT in order to preserve the ordering of arguments
4022 to the PHI nodes in E_OUT->DEST. */
4023 redirect_eh_edge_1 (e_in
, e_out
->dest
, false);
4024 redirect_edge_pred (e_out
, e_in
->src
);
4025 e_out
->flags
= e_in
->flags
;
4026 e_out
->probability
= e_in
->probability
;
4027 e_out
->count
= e_in
->count
;
4033 /* Examine each landing pad block and see if it matches unsplit_eh. */
4036 unsplit_all_eh (void)
4038 bool changed
= false;
4042 for (i
= 1; vec_safe_iterate (cfun
->eh
->lp_array
, i
, &lp
); ++i
)
4044 changed
|= unsplit_eh (lp
);
4049 /* A subroutine of cleanup_empty_eh. Redirect all EH edges incoming
4050 to OLD_BB to NEW_BB; return true on success, false on failure.
4052 OLD_BB_OUT is the edge into NEW_BB from OLD_BB, so if we miss any
4053 PHI variables from OLD_BB we can pick them up from OLD_BB_OUT.
4054 Virtual PHIs may be deleted and marked for renaming. */
4057 cleanup_empty_eh_merge_phis (basic_block new_bb
, basic_block old_bb
,
4058 edge old_bb_out
, bool change_region
)
4060 gimple_stmt_iterator ngsi
, ogsi
;
4063 bitmap ophi_handled
;
4065 /* The destination block must not be a regular successor for any
4066 of the preds of the landing pad. Thus, avoid turning
4076 which CFG verification would choke on. See PR45172 and PR51089. */
4077 FOR_EACH_EDGE (e
, ei
, old_bb
->preds
)
4078 if (find_edge (e
->src
, new_bb
))
4081 FOR_EACH_EDGE (e
, ei
, old_bb
->preds
)
4082 redirect_edge_var_map_clear (e
);
4084 ophi_handled
= BITMAP_ALLOC (NULL
);
4086 /* First, iterate through the PHIs on NEW_BB and set up the edge_var_map
4087 for the edges we're going to move. */
4088 for (ngsi
= gsi_start_phis (new_bb
); !gsi_end_p (ngsi
); gsi_next (&ngsi
))
4090 gimple ophi
, nphi
= gsi_stmt (ngsi
);
4093 nresult
= gimple_phi_result (nphi
);
4094 nop
= gimple_phi_arg_def (nphi
, old_bb_out
->dest_idx
);
4096 /* Find the corresponding PHI in OLD_BB so we can forward-propagate
4097 the source ssa_name. */
4099 for (ogsi
= gsi_start_phis (old_bb
); !gsi_end_p (ogsi
); gsi_next (&ogsi
))
4101 ophi
= gsi_stmt (ogsi
);
4102 if (gimple_phi_result (ophi
) == nop
)
4107 /* If we did find the corresponding PHI, copy those inputs. */
4110 /* If NOP is used somewhere else beyond phis in new_bb, give up. */
4111 if (!has_single_use (nop
))
4113 imm_use_iterator imm_iter
;
4114 use_operand_p use_p
;
4116 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, nop
)
4118 if (!gimple_debug_bind_p (USE_STMT (use_p
))
4119 && (gimple_code (USE_STMT (use_p
)) != GIMPLE_PHI
4120 || gimple_bb (USE_STMT (use_p
)) != new_bb
))
4124 bitmap_set_bit (ophi_handled
, SSA_NAME_VERSION (nop
));
4125 FOR_EACH_EDGE (e
, ei
, old_bb
->preds
)
4130 if ((e
->flags
& EDGE_EH
) == 0)
4132 oop
= gimple_phi_arg_def (ophi
, e
->dest_idx
);
4133 oloc
= gimple_phi_arg_location (ophi
, e
->dest_idx
);
4134 redirect_edge_var_map_add (e
, nresult
, oop
, oloc
);
4137 /* If we didn't find the PHI, if it's a real variable or a VOP, we know
4138 from the fact that OLD_BB is tree_empty_eh_handler_p that the
4139 variable is unchanged from input to the block and we can simply
4140 re-use the input to NEW_BB from the OLD_BB_OUT edge. */
4144 = gimple_phi_arg_location (nphi
, old_bb_out
->dest_idx
);
4145 FOR_EACH_EDGE (e
, ei
, old_bb
->preds
)
4146 redirect_edge_var_map_add (e
, nresult
, nop
, nloc
);
4150 /* Second, verify that all PHIs from OLD_BB have been handled. If not,
4151 we don't know what values from the other edges into NEW_BB to use. */
4152 for (ogsi
= gsi_start_phis (old_bb
); !gsi_end_p (ogsi
); gsi_next (&ogsi
))
4154 gimple ophi
= gsi_stmt (ogsi
);
4155 tree oresult
= gimple_phi_result (ophi
);
4156 if (!bitmap_bit_p (ophi_handled
, SSA_NAME_VERSION (oresult
)))
4160 /* Finally, move the edges and update the PHIs. */
4161 for (ei
= ei_start (old_bb
->preds
); (e
= ei_safe_edge (ei
)); )
4162 if (e
->flags
& EDGE_EH
)
4164 /* ??? CFG manipluation routines do not try to update loop
4165 form on edge redirection. Do so manually here for now. */
4166 /* If we redirect a loop entry or latch edge that will either create
4167 a multiple entry loop or rotate the loop. If the loops merge
4168 we may have created a loop with multiple latches.
4169 All of this isn't easily fixed thus cancel the affected loop
4170 and mark the other loop as possibly having multiple latches. */
4171 if (e
->dest
== e
->dest
->loop_father
->header
)
4173 mark_loop_for_removal (e
->dest
->loop_father
);
4174 new_bb
->loop_father
->latch
= NULL
;
4175 loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES
);
4177 redirect_eh_edge_1 (e
, new_bb
, change_region
);
4178 redirect_edge_succ (e
, new_bb
);
4179 flush_pending_stmts (e
);
4184 BITMAP_FREE (ophi_handled
);
4188 FOR_EACH_EDGE (e
, ei
, old_bb
->preds
)
4189 redirect_edge_var_map_clear (e
);
4190 BITMAP_FREE (ophi_handled
);
4194 /* A subroutine of cleanup_empty_eh. Move a landing pad LP from its
4195 old region to NEW_REGION at BB. */
4198 cleanup_empty_eh_move_lp (basic_block bb
, edge e_out
,
4199 eh_landing_pad lp
, eh_region new_region
)
4201 gimple_stmt_iterator gsi
;
4204 for (pp
= &lp
->region
->landing_pads
; *pp
!= lp
; pp
= &(*pp
)->next_lp
)
4208 lp
->region
= new_region
;
4209 lp
->next_lp
= new_region
->landing_pads
;
4210 new_region
->landing_pads
= lp
;
4212 /* Delete the RESX that was matched within the empty handler block. */
4213 gsi
= gsi_last_bb (bb
);
4214 unlink_stmt_vdef (gsi_stmt (gsi
));
4215 gsi_remove (&gsi
, true);
4217 /* Clean up E_OUT for the fallthru. */
4218 e_out
->flags
= (e_out
->flags
& ~EDGE_EH
) | EDGE_FALLTHRU
;
4219 e_out
->probability
= REG_BR_PROB_BASE
;
4222 /* A subroutine of cleanup_empty_eh. Handle more complex cases of
4223 unsplitting than unsplit_eh was prepared to handle, e.g. when
4224 multiple incoming edges and phis are involved. */
4227 cleanup_empty_eh_unsplit (basic_block bb
, edge e_out
, eh_landing_pad lp
)
4229 gimple_stmt_iterator gsi
;
4232 /* We really ought not have totally lost everything following
4233 a landing pad label. Given that BB is empty, there had better
4235 gcc_assert (e_out
!= NULL
);
4237 /* The destination block must not already have a landing pad
4238 for a different region. */
4240 for (gsi
= gsi_start_bb (e_out
->dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4242 gimple stmt
= gsi_stmt (gsi
);
4245 if (gimple_code (stmt
) != GIMPLE_LABEL
)
4247 lab
= gimple_label_label (stmt
);
4248 lp_nr
= EH_LANDING_PAD_NR (lab
);
4249 if (lp_nr
&& get_eh_region_from_lp_number (lp_nr
) != lp
->region
)
4253 /* Attempt to move the PHIs into the successor block. */
4254 if (cleanup_empty_eh_merge_phis (e_out
->dest
, bb
, e_out
, false))
4256 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4258 "Unsplit EH landing pad %d to block %i "
4259 "(via cleanup_empty_eh).\n",
4260 lp
->index
, e_out
->dest
->index
);
4267 /* Return true if edge E_FIRST is part of an empty infinite loop
4268 or leads to such a loop through a series of single successor
4272 infinite_empty_loop_p (edge e_first
)
4274 bool inf_loop
= false;
4277 if (e_first
->dest
== e_first
->src
)
4280 e_first
->src
->aux
= (void *) 1;
4281 for (e
= e_first
; single_succ_p (e
->dest
); e
= single_succ_edge (e
->dest
))
4283 gimple_stmt_iterator gsi
;
4289 e
->dest
->aux
= (void *) 1;
4290 gsi
= gsi_after_labels (e
->dest
);
4291 if (!gsi_end_p (gsi
) && is_gimple_debug (gsi_stmt (gsi
)))
4292 gsi_next_nondebug (&gsi
);
4293 if (!gsi_end_p (gsi
))
4296 e_first
->src
->aux
= NULL
;
4297 for (e
= e_first
; e
->dest
->aux
; e
= single_succ_edge (e
->dest
))
4298 e
->dest
->aux
= NULL
;
4303 /* Examine the block associated with LP to determine if it's an empty
4304 handler for its EH region. If so, attempt to redirect EH edges to
4305 an outer region. Return true the CFG was updated in any way. This
4306 is similar to jump forwarding, just across EH edges. */
4309 cleanup_empty_eh (eh_landing_pad lp
)
4311 basic_block bb
= label_to_block (lp
->post_landing_pad
);
4312 gimple_stmt_iterator gsi
;
4314 eh_region new_region
;
4317 bool has_non_eh_pred
;
4321 /* There can be zero or one edges out of BB. This is the quickest test. */
4322 switch (EDGE_COUNT (bb
->succs
))
4328 e_out
= single_succ_edge (bb
);
4334 resx
= last_stmt (bb
);
4335 if (resx
&& is_gimple_resx (resx
))
4337 if (stmt_can_throw_external (resx
))
4338 optimize_clobbers (bb
);
4339 else if (sink_clobbers (bb
))
4343 gsi
= gsi_after_labels (bb
);
4345 /* Make sure to skip debug statements. */
4346 if (!gsi_end_p (gsi
) && is_gimple_debug (gsi_stmt (gsi
)))
4347 gsi_next_nondebug (&gsi
);
4349 /* If the block is totally empty, look for more unsplitting cases. */
4350 if (gsi_end_p (gsi
))
4352 /* For the degenerate case of an infinite loop bail out.
4353 If bb has no successors and is totally empty, which can happen e.g.
4354 because of incorrect noreturn attribute, bail out too. */
4356 || infinite_empty_loop_p (e_out
))
4359 return ret
| cleanup_empty_eh_unsplit (bb
, e_out
, lp
);
4362 /* The block should consist only of a single RESX statement, modulo a
4363 preceding call to __builtin_stack_restore if there is no outgoing
4364 edge, since the call can be eliminated in this case. */
4365 resx
= gsi_stmt (gsi
);
4366 if (!e_out
&& gimple_call_builtin_p (resx
, BUILT_IN_STACK_RESTORE
))
4369 resx
= gsi_stmt (gsi
);
4371 if (!is_gimple_resx (resx
))
4373 gcc_assert (gsi_one_before_end_p (gsi
));
4375 /* Determine if there are non-EH edges, or resx edges into the handler. */
4376 has_non_eh_pred
= false;
4377 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
4378 if (!(e
->flags
& EDGE_EH
))
4379 has_non_eh_pred
= true;
4381 /* Find the handler that's outer of the empty handler by looking at
4382 where the RESX instruction was vectored. */
4383 new_lp_nr
= lookup_stmt_eh_lp (resx
);
4384 new_region
= get_eh_region_from_lp_number (new_lp_nr
);
4386 /* If there's no destination region within the current function,
4387 redirection is trivial via removing the throwing statements from
4388 the EH region, removing the EH edges, and allowing the block
4389 to go unreachable. */
4390 if (new_region
== NULL
)
4392 gcc_assert (e_out
== NULL
);
4393 for (ei
= ei_start (bb
->preds
); (e
= ei_safe_edge (ei
)); )
4394 if (e
->flags
& EDGE_EH
)
4396 gimple stmt
= last_stmt (e
->src
);
4397 remove_stmt_from_eh_lp (stmt
);
4405 /* If the destination region is a MUST_NOT_THROW, allow the runtime
4406 to handle the abort and allow the blocks to go unreachable. */
4407 if (new_region
->type
== ERT_MUST_NOT_THROW
)
4409 for (ei
= ei_start (bb
->preds
); (e
= ei_safe_edge (ei
)); )
4410 if (e
->flags
& EDGE_EH
)
4412 gimple stmt
= last_stmt (e
->src
);
4413 remove_stmt_from_eh_lp (stmt
);
4414 add_stmt_to_eh_lp (stmt
, new_lp_nr
);
4422 /* Try to redirect the EH edges and merge the PHIs into the destination
4423 landing pad block. If the merge succeeds, we'll already have redirected
4424 all the EH edges. The handler itself will go unreachable if there were
4426 if (cleanup_empty_eh_merge_phis (e_out
->dest
, bb
, e_out
, true))
4429 /* Finally, if all input edges are EH edges, then we can (potentially)
4430 reduce the number of transfers from the runtime by moving the landing
4431 pad from the original region to the new region. This is a win when
4432 we remove the last CLEANUP region along a particular exception
4433 propagation path. Since nothing changes except for the region with
4434 which the landing pad is associated, the PHI nodes do not need to be
4436 if (!has_non_eh_pred
)
4438 cleanup_empty_eh_move_lp (bb
, e_out
, lp
, new_region
);
4439 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4440 fprintf (dump_file
, "Empty EH handler %i moved to EH region %i.\n",
4441 lp
->index
, new_region
->index
);
4443 /* ??? The CFG didn't change, but we may have rendered the
4444 old EH region unreachable. Trigger a cleanup there. */
4451 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4452 fprintf (dump_file
, "Empty EH handler %i removed.\n", lp
->index
);
4453 remove_eh_landing_pad (lp
);
4457 /* Do a post-order traversal of the EH region tree. Examine each
4458 post_landing_pad block and see if we can eliminate it as empty. */
4461 cleanup_all_empty_eh (void)
4463 bool changed
= false;
4467 for (i
= 1; vec_safe_iterate (cfun
->eh
->lp_array
, i
, &lp
); ++i
)
4469 changed
|= cleanup_empty_eh (lp
);
4474 /* Perform cleanups and lowering of exception handling
4475 1) cleanups regions with handlers doing nothing are optimized out
4476 2) MUST_NOT_THROW regions that became dead because of 1) are optimized out
4477 3) Info about regions that are containing instructions, and regions
4478 reachable via local EH edges is collected
4479 4) Eh tree is pruned for regions no longer necessary.
4481 TODO: Push MUST_NOT_THROW regions to the root of the EH tree.
4482 Unify those that have the same failure decl and locus.
4486 execute_cleanup_eh_1 (void)
4488 /* Do this first: unsplit_all_eh and cleanup_all_empty_eh can die
4489 looking up unreachable landing pads. */
4490 remove_unreachable_handlers ();
4492 /* Watch out for the region tree vanishing due to all unreachable. */
4493 if (cfun
->eh
->region_tree
)
4495 bool changed
= false;
4498 changed
|= unsplit_all_eh ();
4499 changed
|= cleanup_all_empty_eh ();
4503 free_dominance_info (CDI_DOMINATORS
);
4504 free_dominance_info (CDI_POST_DOMINATORS
);
4506 /* We delayed all basic block deletion, as we may have performed
4507 cleanups on EH edges while non-EH edges were still present. */
4508 delete_unreachable_blocks ();
4510 /* We manipulated the landing pads. Remove any region that no
4511 longer has a landing pad. */
4512 remove_unreachable_handlers_no_lp ();
4514 return TODO_cleanup_cfg
| TODO_update_ssa_only_virtuals
;
4523 const pass_data pass_data_cleanup_eh
=
4525 GIMPLE_PASS
, /* type */
4526 "ehcleanup", /* name */
4527 OPTGROUP_NONE
, /* optinfo_flags */
4528 TV_TREE_EH
, /* tv_id */
4529 PROP_gimple_lcf
, /* properties_required */
4530 0, /* properties_provided */
4531 0, /* properties_destroyed */
4532 0, /* todo_flags_start */
4533 0, /* todo_flags_finish */
4536 class pass_cleanup_eh
: public gimple_opt_pass
4539 pass_cleanup_eh (gcc::context
*ctxt
)
4540 : gimple_opt_pass (pass_data_cleanup_eh
, ctxt
)
4543 /* opt_pass methods: */
4544 opt_pass
* clone () { return new pass_cleanup_eh (m_ctxt
); }
4545 virtual bool gate (function
*fun
)
4547 return fun
->eh
!= NULL
&& fun
->eh
->region_tree
!= NULL
;
4550 virtual unsigned int execute (function
*);
4552 }; // class pass_cleanup_eh
4555 pass_cleanup_eh::execute (function
*fun
)
4557 int ret
= execute_cleanup_eh_1 ();
4559 /* If the function no longer needs an EH personality routine
4560 clear it. This exposes cross-language inlining opportunities
4561 and avoids references to a never defined personality routine. */
4562 if (DECL_FUNCTION_PERSONALITY (current_function_decl
)
4563 && function_needs_eh_personality (fun
) != eh_personality_lang
)
4564 DECL_FUNCTION_PERSONALITY (current_function_decl
) = NULL_TREE
;
4572 make_pass_cleanup_eh (gcc::context
*ctxt
)
4574 return new pass_cleanup_eh (ctxt
);
4577 /* Verify that BB containing STMT as the last statement, has precisely the
4578 edge that make_eh_edges would create. */
4581 verify_eh_edges (gimple stmt
)
4583 basic_block bb
= gimple_bb (stmt
);
4584 eh_landing_pad lp
= NULL
;
4589 lp_nr
= lookup_stmt_eh_lp (stmt
);
4591 lp
= get_eh_landing_pad_from_number (lp_nr
);
4594 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
4596 if (e
->flags
& EDGE_EH
)
4600 error ("BB %i has multiple EH edges", bb
->index
);
4612 error ("BB %i can not throw but has an EH edge", bb
->index
);
4618 if (!stmt_could_throw_p (stmt
))
4620 error ("BB %i last statement has incorrectly set lp", bb
->index
);
4624 if (eh_edge
== NULL
)
4626 error ("BB %i is missing an EH edge", bb
->index
);
4630 if (eh_edge
->dest
!= label_to_block (lp
->post_landing_pad
))
4632 error ("Incorrect EH edge %i->%i", bb
->index
, eh_edge
->dest
->index
);
4639 /* Similarly, but handle GIMPLE_EH_DISPATCH specifically. */
4642 verify_eh_dispatch_edge (gimple stmt
)
4646 basic_block src
, dst
;
4647 bool want_fallthru
= true;
4651 r
= get_eh_region_from_number (gimple_eh_dispatch_region (stmt
));
4652 src
= gimple_bb (stmt
);
4654 FOR_EACH_EDGE (e
, ei
, src
->succs
)
4655 gcc_assert (e
->aux
== NULL
);
4660 for (c
= r
->u
.eh_try
.first_catch
; c
; c
= c
->next_catch
)
4662 dst
= label_to_block (c
->label
);
4663 e
= find_edge (src
, dst
);
4666 error ("BB %i is missing an edge", src
->index
);
4671 /* A catch-all handler doesn't have a fallthru. */
4672 if (c
->type_list
== NULL
)
4674 want_fallthru
= false;
4680 case ERT_ALLOWED_EXCEPTIONS
:
4681 dst
= label_to_block (r
->u
.allowed
.label
);
4682 e
= find_edge (src
, dst
);
4685 error ("BB %i is missing an edge", src
->index
);
4696 FOR_EACH_EDGE (e
, ei
, src
->succs
)
4698 if (e
->flags
& EDGE_FALLTHRU
)
4700 if (fall_edge
!= NULL
)
4702 error ("BB %i too many fallthru edges", src
->index
);
4711 error ("BB %i has incorrect edge", src
->index
);
4715 if ((fall_edge
!= NULL
) ^ want_fallthru
)
4717 error ("BB %i has incorrect fallthru edge", src
->index
);