1 /* Exception handling semantics and decomposition for trees.
2 Copyright (C) 2003-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
23 #include "hash-table.h"
32 #include "basic-block.h"
33 #include "tree-ssa-alias.h"
34 #include "internal-fn.h"
36 #include "gimple-expr.h"
39 #include "gimple-iterator.h"
40 #include "gimple-ssa.h"
43 #include "tree-phinodes.h"
44 #include "ssa-iterators.h"
45 #include "stringpool.h"
46 #include "tree-ssanames.h"
47 #include "tree-into-ssa.h"
49 #include "tree-inline.h"
50 #include "tree-pass.h"
51 #include "langhooks.h"
52 #include "diagnostic-core.h"
55 #include "gimple-low.h"
57 /* In some instances a tree and a gimple need to be stored in a same table,
58 i.e. in hash tables. This is a structure to do this. */
59 typedef union {tree
*tp
; tree t
; gimple g
;} treemple
;
61 /* Misc functions used in this file. */
63 /* Remember and lookup EH landing pad data for arbitrary statements.
64 Really this means any statement that could_throw_p. We could
65 stuff this information into the stmt_ann data structure, but:
67 (1) We absolutely rely on this information being kept until
68 we get to rtl. Once we're done with lowering here, if we lose
69 the information there's no way to recover it!
71 (2) There are many more statements that *cannot* throw as
72 compared to those that can. We should be saving some amount
73 of space by only allocating memory for those that can throw. */
75 /* Add statement T in function IFUN to landing pad NUM. */
78 add_stmt_to_eh_lp_fn (struct function
*ifun
, gimple t
, int num
)
80 struct throw_stmt_node
*n
;
83 gcc_assert (num
!= 0);
85 n
= ggc_alloc
<throw_stmt_node
> ();
89 if (!get_eh_throw_stmt_table (ifun
))
90 set_eh_throw_stmt_table (ifun
, htab_create_ggc (31, struct_ptr_hash
,
94 slot
= htab_find_slot (get_eh_throw_stmt_table (ifun
), n
, INSERT
);
99 /* Add statement T in the current function (cfun) to EH landing pad NUM. */
102 add_stmt_to_eh_lp (gimple t
, int num
)
104 add_stmt_to_eh_lp_fn (cfun
, t
, num
);
107 /* Add statement T to the single EH landing pad in REGION. */
110 record_stmt_eh_region (eh_region region
, gimple t
)
114 if (region
->type
== ERT_MUST_NOT_THROW
)
115 add_stmt_to_eh_lp_fn (cfun
, t
, -region
->index
);
118 eh_landing_pad lp
= region
->landing_pads
;
120 lp
= gen_eh_landing_pad (region
);
122 gcc_assert (lp
->next_lp
== NULL
);
123 add_stmt_to_eh_lp_fn (cfun
, t
, lp
->index
);
128 /* Remove statement T in function IFUN from its EH landing pad. */
131 remove_stmt_from_eh_lp_fn (struct function
*ifun
, gimple t
)
133 struct throw_stmt_node dummy
;
136 if (!get_eh_throw_stmt_table (ifun
))
140 slot
= htab_find_slot (get_eh_throw_stmt_table (ifun
), &dummy
,
144 htab_clear_slot (get_eh_throw_stmt_table (ifun
), slot
);
152 /* Remove statement T in the current function (cfun) from its
156 remove_stmt_from_eh_lp (gimple t
)
158 return remove_stmt_from_eh_lp_fn (cfun
, t
);
161 /* Determine if statement T is inside an EH region in function IFUN.
162 Positive numbers indicate a landing pad index; negative numbers
163 indicate a MUST_NOT_THROW region index; zero indicates that the
164 statement is not recorded in the region table. */
167 lookup_stmt_eh_lp_fn (struct function
*ifun
, gimple t
)
169 struct throw_stmt_node
*p
, n
;
171 if (ifun
->eh
->throw_stmt_table
== NULL
)
175 p
= (struct throw_stmt_node
*) htab_find (ifun
->eh
->throw_stmt_table
, &n
);
176 return p
? p
->lp_nr
: 0;
179 /* Likewise, but always use the current function. */
182 lookup_stmt_eh_lp (gimple t
)
184 /* We can get called from initialized data when -fnon-call-exceptions
185 is on; prevent crash. */
188 return lookup_stmt_eh_lp_fn (cfun
, t
);
191 /* First pass of EH node decomposition. Build up a tree of GIMPLE_TRY_FINALLY
192 nodes and LABEL_DECL nodes. We will use this during the second phase to
193 determine if a goto leaves the body of a TRY_FINALLY_EXPR node. */
195 struct finally_tree_node
197 /* When storing a GIMPLE_TRY, we have to record a gimple. However
198 when deciding whether a GOTO to a certain LABEL_DECL (which is a
199 tree) leaves the TRY block, its necessary to record a tree in
200 this field. Thus a treemple is used. */
205 /* Hashtable helpers. */
207 struct finally_tree_hasher
: typed_free_remove
<finally_tree_node
>
209 typedef finally_tree_node value_type
;
210 typedef finally_tree_node compare_type
;
211 static inline hashval_t
hash (const value_type
*);
212 static inline bool equal (const value_type
*, const compare_type
*);
216 finally_tree_hasher::hash (const value_type
*v
)
218 return (intptr_t)v
->child
.t
>> 4;
222 finally_tree_hasher::equal (const value_type
*v
, const compare_type
*c
)
224 return v
->child
.t
== c
->child
.t
;
227 /* Note that this table is *not* marked GTY. It is short-lived. */
228 static hash_table
<finally_tree_hasher
> *finally_tree
;
231 record_in_finally_tree (treemple child
, gimple parent
)
233 struct finally_tree_node
*n
;
234 finally_tree_node
**slot
;
236 n
= XNEW (struct finally_tree_node
);
240 slot
= finally_tree
->find_slot (n
, INSERT
);
246 collect_finally_tree (gimple stmt
, gimple region
);
248 /* Go through the gimple sequence. Works with collect_finally_tree to
249 record all GIMPLE_LABEL and GIMPLE_TRY statements. */
252 collect_finally_tree_1 (gimple_seq seq
, gimple region
)
254 gimple_stmt_iterator gsi
;
256 for (gsi
= gsi_start (seq
); !gsi_end_p (gsi
); gsi_next (&gsi
))
257 collect_finally_tree (gsi_stmt (gsi
), region
);
261 collect_finally_tree (gimple stmt
, gimple region
)
265 switch (gimple_code (stmt
))
268 temp
.t
= gimple_label_label (stmt
);
269 record_in_finally_tree (temp
, region
);
273 if (gimple_try_kind (stmt
) == GIMPLE_TRY_FINALLY
)
276 record_in_finally_tree (temp
, region
);
277 collect_finally_tree_1 (gimple_try_eval (stmt
), stmt
);
278 collect_finally_tree_1 (gimple_try_cleanup (stmt
), region
);
280 else if (gimple_try_kind (stmt
) == GIMPLE_TRY_CATCH
)
282 collect_finally_tree_1 (gimple_try_eval (stmt
), region
);
283 collect_finally_tree_1 (gimple_try_cleanup (stmt
), region
);
288 collect_finally_tree_1 (gimple_catch_handler (stmt
), region
);
291 case GIMPLE_EH_FILTER
:
292 collect_finally_tree_1 (gimple_eh_filter_failure (stmt
), region
);
296 collect_finally_tree_1 (gimple_eh_else_n_body (stmt
), region
);
297 collect_finally_tree_1 (gimple_eh_else_e_body (stmt
), region
);
301 /* A type, a decl, or some kind of statement that we're not
302 interested in. Don't walk them. */
308 /* Use the finally tree to determine if a jump from START to TARGET
309 would leave the try_finally node that START lives in. */
312 outside_finally_tree (treemple start
, gimple target
)
314 struct finally_tree_node n
, *p
;
319 p
= finally_tree
->find (&n
);
324 while (start
.g
!= target
);
329 /* Second pass of EH node decomposition. Actually transform the GIMPLE_TRY
330 nodes into a set of gotos, magic labels, and eh regions.
331 The eh region creation is straight-forward, but frobbing all the gotos
332 and such into shape isn't. */
334 /* The sequence into which we record all EH stuff. This will be
335 placed at the end of the function when we're all done. */
336 static gimple_seq eh_seq
;
338 /* Record whether an EH region contains something that can throw,
339 indexed by EH region number. */
340 static bitmap eh_region_may_contain_throw_map
;
342 /* The GOTO_QUEUE is is an array of GIMPLE_GOTO and GIMPLE_RETURN
343 statements that are seen to escape this GIMPLE_TRY_FINALLY node.
344 The idea is to record a gimple statement for everything except for
345 the conditionals, which get their labels recorded. Since labels are
346 of type 'tree', we need this node to store both gimple and tree
347 objects. REPL_STMT is the sequence used to replace the goto/return
348 statement. CONT_STMT is used to store the statement that allows
349 the return/goto to jump to the original destination. */
351 struct goto_queue_node
355 gimple_seq repl_stmt
;
358 /* This is used when index >= 0 to indicate that stmt is a label (as
359 opposed to a goto stmt). */
363 /* State of the world while lowering. */
367 /* What's "current" while constructing the eh region tree. These
368 correspond to variables of the same name in cfun->eh, which we
369 don't have easy access to. */
370 eh_region cur_region
;
372 /* What's "current" for the purposes of __builtin_eh_pointer. For
373 a CATCH, this is the associated TRY. For an EH_FILTER, this is
374 the associated ALLOWED_EXCEPTIONS, etc. */
375 eh_region ehp_region
;
377 /* Processing of TRY_FINALLY requires a bit more state. This is
378 split out into a separate structure so that we don't have to
379 copy so much when processing other nodes. */
380 struct leh_tf_state
*tf
;
385 /* Pointer to the GIMPLE_TRY_FINALLY node under discussion. The
386 try_finally_expr is the original GIMPLE_TRY_FINALLY. We need to retain
387 this so that outside_finally_tree can reliably reference the tree used
388 in the collect_finally_tree data structures. */
389 gimple try_finally_expr
;
392 /* While lowering a top_p usually it is expanded into multiple statements,
393 thus we need the following field to store them. */
394 gimple_seq top_p_seq
;
396 /* The state outside this try_finally node. */
397 struct leh_state
*outer
;
399 /* The exception region created for it. */
402 /* The goto queue. */
403 struct goto_queue_node
*goto_queue
;
404 size_t goto_queue_size
;
405 size_t goto_queue_active
;
407 /* Pointer map to help in searching goto_queue when it is large. */
408 hash_map
<gimple
, goto_queue_node
*> *goto_queue_map
;
410 /* The set of unique labels seen as entries in the goto queue. */
411 vec
<tree
> dest_array
;
413 /* A label to be added at the end of the completed transformed
414 sequence. It will be set if may_fallthru was true *at one time*,
415 though subsequent transformations may have cleared that flag. */
418 /* True if it is possible to fall out the bottom of the try block.
419 Cleared if the fallthru is converted to a goto. */
422 /* True if any entry in goto_queue is a GIMPLE_RETURN. */
425 /* True if the finally block can receive an exception edge.
426 Cleared if the exception case is handled by code duplication. */
430 static gimple_seq
lower_eh_must_not_throw (struct leh_state
*, gimple
);
432 /* Search for STMT in the goto queue. Return the replacement,
433 or null if the statement isn't in the queue. */
435 #define LARGE_GOTO_QUEUE 20
437 static void lower_eh_constructs_1 (struct leh_state
*state
, gimple_seq
*seq
);
440 find_goto_replacement (struct leh_tf_state
*tf
, treemple stmt
)
444 if (tf
->goto_queue_active
< LARGE_GOTO_QUEUE
)
446 for (i
= 0; i
< tf
->goto_queue_active
; i
++)
447 if ( tf
->goto_queue
[i
].stmt
.g
== stmt
.g
)
448 return tf
->goto_queue
[i
].repl_stmt
;
452 /* If we have a large number of entries in the goto_queue, create a
453 pointer map and use that for searching. */
455 if (!tf
->goto_queue_map
)
457 tf
->goto_queue_map
= new hash_map
<gimple
, goto_queue_node
*>;
458 for (i
= 0; i
< tf
->goto_queue_active
; i
++)
460 bool existed
= tf
->goto_queue_map
->put (tf
->goto_queue
[i
].stmt
.g
,
462 gcc_assert (!existed
);
466 goto_queue_node
**slot
= tf
->goto_queue_map
->get (stmt
.g
);
468 return ((*slot
)->repl_stmt
);
473 /* A subroutine of replace_goto_queue_1. Handles the sub-clauses of a
474 lowered GIMPLE_COND. If, by chance, the replacement is a simple goto,
475 then we can just splat it in, otherwise we add the new stmts immediately
476 after the GIMPLE_COND and redirect. */
479 replace_goto_queue_cond_clause (tree
*tp
, struct leh_tf_state
*tf
,
480 gimple_stmt_iterator
*gsi
)
485 location_t loc
= gimple_location (gsi_stmt (*gsi
));
488 new_seq
= find_goto_replacement (tf
, temp
);
492 if (gimple_seq_singleton_p (new_seq
)
493 && gimple_code (gimple_seq_first_stmt (new_seq
)) == GIMPLE_GOTO
)
495 *tp
= gimple_goto_dest (gimple_seq_first_stmt (new_seq
));
499 label
= create_artificial_label (loc
);
500 /* Set the new label for the GIMPLE_COND */
503 gsi_insert_after (gsi
, gimple_build_label (label
), GSI_CONTINUE_LINKING
);
504 gsi_insert_seq_after (gsi
, gimple_seq_copy (new_seq
), GSI_CONTINUE_LINKING
);
507 /* The real work of replace_goto_queue. Returns with TSI updated to
508 point to the next statement. */
510 static void replace_goto_queue_stmt_list (gimple_seq
*, struct leh_tf_state
*);
513 replace_goto_queue_1 (gimple stmt
, struct leh_tf_state
*tf
,
514 gimple_stmt_iterator
*gsi
)
520 switch (gimple_code (stmt
))
525 seq
= find_goto_replacement (tf
, temp
);
528 gsi_insert_seq_before (gsi
, gimple_seq_copy (seq
), GSI_SAME_STMT
);
529 gsi_remove (gsi
, false);
535 replace_goto_queue_cond_clause (gimple_op_ptr (stmt
, 2), tf
, gsi
);
536 replace_goto_queue_cond_clause (gimple_op_ptr (stmt
, 3), tf
, gsi
);
540 replace_goto_queue_stmt_list (gimple_try_eval_ptr (stmt
), tf
);
541 replace_goto_queue_stmt_list (gimple_try_cleanup_ptr (stmt
), tf
);
544 replace_goto_queue_stmt_list (gimple_catch_handler_ptr (stmt
), tf
);
546 case GIMPLE_EH_FILTER
:
547 replace_goto_queue_stmt_list (gimple_eh_filter_failure_ptr (stmt
), tf
);
550 replace_goto_queue_stmt_list (gimple_eh_else_n_body_ptr (stmt
), tf
);
551 replace_goto_queue_stmt_list (gimple_eh_else_e_body_ptr (stmt
), tf
);
555 /* These won't have gotos in them. */
562 /* A subroutine of replace_goto_queue. Handles GIMPLE_SEQ. */
565 replace_goto_queue_stmt_list (gimple_seq
*seq
, struct leh_tf_state
*tf
)
567 gimple_stmt_iterator gsi
= gsi_start (*seq
);
569 while (!gsi_end_p (gsi
))
570 replace_goto_queue_1 (gsi_stmt (gsi
), tf
, &gsi
);
573 /* Replace all goto queue members. */
576 replace_goto_queue (struct leh_tf_state
*tf
)
578 if (tf
->goto_queue_active
== 0)
580 replace_goto_queue_stmt_list (&tf
->top_p_seq
, tf
);
581 replace_goto_queue_stmt_list (&eh_seq
, tf
);
584 /* Add a new record to the goto queue contained in TF. NEW_STMT is the
585 data to be added, IS_LABEL indicates whether NEW_STMT is a label or
589 record_in_goto_queue (struct leh_tf_state
*tf
,
596 struct goto_queue_node
*q
;
598 gcc_assert (!tf
->goto_queue_map
);
600 active
= tf
->goto_queue_active
;
601 size
= tf
->goto_queue_size
;
604 size
= (size
? size
* 2 : 32);
605 tf
->goto_queue_size
= size
;
607 = XRESIZEVEC (struct goto_queue_node
, tf
->goto_queue
, size
);
610 q
= &tf
->goto_queue
[active
];
611 tf
->goto_queue_active
= active
+ 1;
613 memset (q
, 0, sizeof (*q
));
616 q
->location
= location
;
617 q
->is_label
= is_label
;
620 /* Record the LABEL label in the goto queue contained in TF.
624 record_in_goto_queue_label (struct leh_tf_state
*tf
, treemple stmt
, tree label
,
628 treemple temp
, new_stmt
;
633 /* Computed and non-local gotos do not get processed. Given
634 their nature we can neither tell whether we've escaped the
635 finally block nor redirect them if we knew. */
636 if (TREE_CODE (label
) != LABEL_DECL
)
639 /* No need to record gotos that don't leave the try block. */
641 if (!outside_finally_tree (temp
, tf
->try_finally_expr
))
644 if (! tf
->dest_array
.exists ())
646 tf
->dest_array
.create (10);
647 tf
->dest_array
.quick_push (label
);
652 int n
= tf
->dest_array
.length ();
653 for (index
= 0; index
< n
; ++index
)
654 if (tf
->dest_array
[index
] == label
)
657 tf
->dest_array
.safe_push (label
);
660 /* In the case of a GOTO we want to record the destination label,
661 since with a GIMPLE_COND we have an easy access to the then/else
664 record_in_goto_queue (tf
, new_stmt
, index
, true, location
);
667 /* For any GIMPLE_GOTO or GIMPLE_RETURN, decide whether it leaves a try_finally
668 node, and if so record that fact in the goto queue associated with that
672 maybe_record_in_goto_queue (struct leh_state
*state
, gimple stmt
)
674 struct leh_tf_state
*tf
= state
->tf
;
680 switch (gimple_code (stmt
))
683 new_stmt
.tp
= gimple_op_ptr (stmt
, 2);
684 record_in_goto_queue_label (tf
, new_stmt
, gimple_cond_true_label (stmt
),
685 EXPR_LOCATION (*new_stmt
.tp
));
686 new_stmt
.tp
= gimple_op_ptr (stmt
, 3);
687 record_in_goto_queue_label (tf
, new_stmt
, gimple_cond_false_label (stmt
),
688 EXPR_LOCATION (*new_stmt
.tp
));
692 record_in_goto_queue_label (tf
, new_stmt
, gimple_goto_dest (stmt
),
693 gimple_location (stmt
));
697 tf
->may_return
= true;
699 record_in_goto_queue (tf
, new_stmt
, -1, false, gimple_location (stmt
));
708 #ifdef ENABLE_CHECKING
709 /* We do not process GIMPLE_SWITCHes for now. As long as the original source
710 was in fact structured, and we've not yet done jump threading, then none
711 of the labels will leave outer GIMPLE_TRY_FINALLY nodes. Verify this. */
714 verify_norecord_switch_expr (struct leh_state
*state
, gimple switch_expr
)
716 struct leh_tf_state
*tf
= state
->tf
;
722 n
= gimple_switch_num_labels (switch_expr
);
724 for (i
= 0; i
< n
; ++i
)
727 tree lab
= CASE_LABEL (gimple_switch_label (switch_expr
, i
));
729 gcc_assert (!outside_finally_tree (temp
, tf
->try_finally_expr
));
733 #define verify_norecord_switch_expr(state, switch_expr)
736 /* Redirect a RETURN_EXPR pointed to by Q to FINLAB. If MOD is
737 non-null, insert it before the new branch. */
740 do_return_redirection (struct goto_queue_node
*q
, tree finlab
, gimple_seq mod
)
744 /* In the case of a return, the queue node must be a gimple statement. */
745 gcc_assert (!q
->is_label
);
747 /* Note that the return value may have already been computed, e.g.,
760 should return 0, not 1. We don't have to do anything to make
761 this happens because the return value has been placed in the
762 RESULT_DECL already. */
764 q
->cont_stmt
= q
->stmt
.g
;
767 gimple_seq_add_seq (&q
->repl_stmt
, mod
);
769 x
= gimple_build_goto (finlab
);
770 gimple_set_location (x
, q
->location
);
771 gimple_seq_add_stmt (&q
->repl_stmt
, x
);
774 /* Similar, but easier, for GIMPLE_GOTO. */
777 do_goto_redirection (struct goto_queue_node
*q
, tree finlab
, gimple_seq mod
,
778 struct leh_tf_state
*tf
)
782 gcc_assert (q
->is_label
);
784 q
->cont_stmt
= gimple_build_goto (tf
->dest_array
[q
->index
]);
787 gimple_seq_add_seq (&q
->repl_stmt
, mod
);
789 x
= gimple_build_goto (finlab
);
790 gimple_set_location (x
, q
->location
);
791 gimple_seq_add_stmt (&q
->repl_stmt
, x
);
794 /* Emit a standard landing pad sequence into SEQ for REGION. */
797 emit_post_landing_pad (gimple_seq
*seq
, eh_region region
)
799 eh_landing_pad lp
= region
->landing_pads
;
803 lp
= gen_eh_landing_pad (region
);
805 lp
->post_landing_pad
= create_artificial_label (UNKNOWN_LOCATION
);
806 EH_LANDING_PAD_NR (lp
->post_landing_pad
) = lp
->index
;
808 x
= gimple_build_label (lp
->post_landing_pad
);
809 gimple_seq_add_stmt (seq
, x
);
812 /* Emit a RESX statement into SEQ for REGION. */
815 emit_resx (gimple_seq
*seq
, eh_region region
)
817 gimple x
= gimple_build_resx (region
->index
);
818 gimple_seq_add_stmt (seq
, x
);
820 record_stmt_eh_region (region
->outer
, x
);
823 /* Emit an EH_DISPATCH statement into SEQ for REGION. */
826 emit_eh_dispatch (gimple_seq
*seq
, eh_region region
)
828 gimple x
= gimple_build_eh_dispatch (region
->index
);
829 gimple_seq_add_stmt (seq
, x
);
832 /* Note that the current EH region may contain a throw, or a
833 call to a function which itself may contain a throw. */
836 note_eh_region_may_contain_throw (eh_region region
)
838 while (bitmap_set_bit (eh_region_may_contain_throw_map
, region
->index
))
840 if (region
->type
== ERT_MUST_NOT_THROW
)
842 region
= region
->outer
;
848 /* Check if REGION has been marked as containing a throw. If REGION is
849 NULL, this predicate is false. */
852 eh_region_may_contain_throw (eh_region r
)
854 return r
&& bitmap_bit_p (eh_region_may_contain_throw_map
, r
->index
);
857 /* We want to transform
858 try { body; } catch { stuff; }
868 TP is a GIMPLE_TRY node. REGION is the region whose post_landing_pad
869 should be placed before the second operand, or NULL. OVER is
870 an existing label that should be put at the exit, or NULL. */
873 frob_into_branch_around (gimple tp
, eh_region region
, tree over
)
876 gimple_seq cleanup
, result
;
877 location_t loc
= gimple_location (tp
);
879 cleanup
= gimple_try_cleanup (tp
);
880 result
= gimple_try_eval (tp
);
883 emit_post_landing_pad (&eh_seq
, region
);
885 if (gimple_seq_may_fallthru (cleanup
))
888 over
= create_artificial_label (loc
);
889 x
= gimple_build_goto (over
);
890 gimple_set_location (x
, loc
);
891 gimple_seq_add_stmt (&cleanup
, x
);
893 gimple_seq_add_seq (&eh_seq
, cleanup
);
897 x
= gimple_build_label (over
);
898 gimple_seq_add_stmt (&result
, x
);
903 /* A subroutine of lower_try_finally. Duplicate the tree rooted at T.
904 Make sure to record all new labels found. */
907 lower_try_finally_dup_block (gimple_seq seq
, struct leh_state
*outer_state
,
910 gimple region
= NULL
;
912 gimple_stmt_iterator gsi
;
914 new_seq
= copy_gimple_seq_and_replace_locals (seq
);
916 for (gsi
= gsi_start (new_seq
); !gsi_end_p (gsi
); gsi_next (&gsi
))
918 gimple stmt
= gsi_stmt (gsi
);
919 if (LOCATION_LOCUS (gimple_location (stmt
)) == UNKNOWN_LOCATION
)
921 tree block
= gimple_block (stmt
);
922 gimple_set_location (stmt
, loc
);
923 gimple_set_block (stmt
, block
);
928 region
= outer_state
->tf
->try_finally_expr
;
929 collect_finally_tree_1 (new_seq
, region
);
934 /* A subroutine of lower_try_finally. Create a fallthru label for
935 the given try_finally state. The only tricky bit here is that
936 we have to make sure to record the label in our outer context. */
939 lower_try_finally_fallthru_label (struct leh_tf_state
*tf
)
941 tree label
= tf
->fallthru_label
;
946 label
= create_artificial_label (gimple_location (tf
->try_finally_expr
));
947 tf
->fallthru_label
= label
;
951 record_in_finally_tree (temp
, tf
->outer
->tf
->try_finally_expr
);
957 /* A subroutine of lower_try_finally. If FINALLY consits of a
958 GIMPLE_EH_ELSE node, return it. */
961 get_eh_else (gimple_seq finally
)
963 gimple x
= gimple_seq_first_stmt (finally
);
964 if (gimple_code (x
) == GIMPLE_EH_ELSE
)
966 gcc_assert (gimple_seq_singleton_p (finally
));
972 /* A subroutine of lower_try_finally. If the eh_protect_cleanup_actions
973 langhook returns non-null, then the language requires that the exception
974 path out of a try_finally be treated specially. To wit: the code within
975 the finally block may not itself throw an exception. We have two choices
976 here. First we can duplicate the finally block and wrap it in a
977 must_not_throw region. Second, we can generate code like
982 if (fintmp == eh_edge)
983 protect_cleanup_actions;
986 where "fintmp" is the temporary used in the switch statement generation
987 alternative considered below. For the nonce, we always choose the first
990 THIS_STATE may be null if this is a try-cleanup, not a try-finally. */
993 honor_protect_cleanup_actions (struct leh_state
*outer_state
,
994 struct leh_state
*this_state
,
995 struct leh_tf_state
*tf
)
997 tree protect_cleanup_actions
;
998 gimple_stmt_iterator gsi
;
999 bool finally_may_fallthru
;
1003 /* First check for nothing to do. */
1004 if (lang_hooks
.eh_protect_cleanup_actions
== NULL
)
1006 protect_cleanup_actions
= lang_hooks
.eh_protect_cleanup_actions ();
1007 if (protect_cleanup_actions
== NULL
)
1010 finally
= gimple_try_cleanup (tf
->top_p
);
1011 eh_else
= get_eh_else (finally
);
1013 /* Duplicate the FINALLY block. Only need to do this for try-finally,
1014 and not for cleanups. If we've got an EH_ELSE, extract it now. */
1017 finally
= gimple_eh_else_e_body (eh_else
);
1018 gimple_try_set_cleanup (tf
->top_p
, gimple_eh_else_n_body (eh_else
));
1020 else if (this_state
)
1021 finally
= lower_try_finally_dup_block (finally
, outer_state
,
1022 gimple_location (tf
->try_finally_expr
));
1023 finally_may_fallthru
= gimple_seq_may_fallthru (finally
);
1025 /* If this cleanup consists of a TRY_CATCH_EXPR with TRY_CATCH_IS_CLEANUP
1026 set, the handler of the TRY_CATCH_EXPR is another cleanup which ought
1027 to be in an enclosing scope, but needs to be implemented at this level
1028 to avoid a nesting violation (see wrap_temporary_cleanups in
1029 cp/decl.c). Since it's logically at an outer level, we should call
1030 terminate before we get to it, so strip it away before adding the
1031 MUST_NOT_THROW filter. */
1032 gsi
= gsi_start (finally
);
1034 if (gimple_code (x
) == GIMPLE_TRY
1035 && gimple_try_kind (x
) == GIMPLE_TRY_CATCH
1036 && gimple_try_catch_is_cleanup (x
))
1038 gsi_insert_seq_before (&gsi
, gimple_try_eval (x
), GSI_SAME_STMT
);
1039 gsi_remove (&gsi
, false);
1042 /* Wrap the block with protect_cleanup_actions as the action. */
1043 x
= gimple_build_eh_must_not_throw (protect_cleanup_actions
);
1044 x
= gimple_build_try (finally
, gimple_seq_alloc_with_stmt (x
),
1046 finally
= lower_eh_must_not_throw (outer_state
, x
);
1048 /* Drop all of this into the exception sequence. */
1049 emit_post_landing_pad (&eh_seq
, tf
->region
);
1050 gimple_seq_add_seq (&eh_seq
, finally
);
1051 if (finally_may_fallthru
)
1052 emit_resx (&eh_seq
, tf
->region
);
1054 /* Having now been handled, EH isn't to be considered with
1055 the rest of the outgoing edges. */
1056 tf
->may_throw
= false;
1059 /* A subroutine of lower_try_finally. We have determined that there is
1060 no fallthru edge out of the finally block. This means that there is
1061 no outgoing edge corresponding to any incoming edge. Restructure the
1062 try_finally node for this special case. */
1065 lower_try_finally_nofallthru (struct leh_state
*state
,
1066 struct leh_tf_state
*tf
)
1071 struct goto_queue_node
*q
, *qe
;
1073 lab
= create_artificial_label (gimple_location (tf
->try_finally_expr
));
1075 /* We expect that tf->top_p is a GIMPLE_TRY. */
1076 finally
= gimple_try_cleanup (tf
->top_p
);
1077 tf
->top_p_seq
= gimple_try_eval (tf
->top_p
);
1079 x
= gimple_build_label (lab
);
1080 gimple_seq_add_stmt (&tf
->top_p_seq
, x
);
1083 qe
= q
+ tf
->goto_queue_active
;
1086 do_return_redirection (q
, lab
, NULL
);
1088 do_goto_redirection (q
, lab
, NULL
, tf
);
1090 replace_goto_queue (tf
);
1092 /* Emit the finally block into the stream. Lower EH_ELSE at this time. */
1093 eh_else
= get_eh_else (finally
);
1096 finally
= gimple_eh_else_n_body (eh_else
);
1097 lower_eh_constructs_1 (state
, &finally
);
1098 gimple_seq_add_seq (&tf
->top_p_seq
, finally
);
1102 finally
= gimple_eh_else_e_body (eh_else
);
1103 lower_eh_constructs_1 (state
, &finally
);
1105 emit_post_landing_pad (&eh_seq
, tf
->region
);
1106 gimple_seq_add_seq (&eh_seq
, finally
);
1111 lower_eh_constructs_1 (state
, &finally
);
1112 gimple_seq_add_seq (&tf
->top_p_seq
, finally
);
1116 emit_post_landing_pad (&eh_seq
, tf
->region
);
1118 x
= gimple_build_goto (lab
);
1119 gimple_set_location (x
, gimple_location (tf
->try_finally_expr
));
1120 gimple_seq_add_stmt (&eh_seq
, x
);
1125 /* A subroutine of lower_try_finally. We have determined that there is
1126 exactly one destination of the finally block. Restructure the
1127 try_finally node for this special case. */
1130 lower_try_finally_onedest (struct leh_state
*state
, struct leh_tf_state
*tf
)
1132 struct goto_queue_node
*q
, *qe
;
1135 gimple_stmt_iterator gsi
;
1137 location_t loc
= gimple_location (tf
->try_finally_expr
);
1139 finally
= gimple_try_cleanup (tf
->top_p
);
1140 tf
->top_p_seq
= gimple_try_eval (tf
->top_p
);
1142 /* Since there's only one destination, and the destination edge can only
1143 either be EH or non-EH, that implies that all of our incoming edges
1144 are of the same type. Therefore we can lower EH_ELSE immediately. */
1145 x
= get_eh_else (finally
);
1149 finally
= gimple_eh_else_e_body (x
);
1151 finally
= gimple_eh_else_n_body (x
);
1154 lower_eh_constructs_1 (state
, &finally
);
1156 for (gsi
= gsi_start (finally
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1158 gimple stmt
= gsi_stmt (gsi
);
1159 if (LOCATION_LOCUS (gimple_location (stmt
)) == UNKNOWN_LOCATION
)
1161 tree block
= gimple_block (stmt
);
1162 gimple_set_location (stmt
, gimple_location (tf
->try_finally_expr
));
1163 gimple_set_block (stmt
, block
);
1169 /* Only reachable via the exception edge. Add the given label to
1170 the head of the FINALLY block. Append a RESX at the end. */
1171 emit_post_landing_pad (&eh_seq
, tf
->region
);
1172 gimple_seq_add_seq (&eh_seq
, finally
);
1173 emit_resx (&eh_seq
, tf
->region
);
1177 if (tf
->may_fallthru
)
1179 /* Only reachable via the fallthru edge. Do nothing but let
1180 the two blocks run together; we'll fall out the bottom. */
1181 gimple_seq_add_seq (&tf
->top_p_seq
, finally
);
1185 finally_label
= create_artificial_label (loc
);
1186 x
= gimple_build_label (finally_label
);
1187 gimple_seq_add_stmt (&tf
->top_p_seq
, x
);
1189 gimple_seq_add_seq (&tf
->top_p_seq
, finally
);
1192 qe
= q
+ tf
->goto_queue_active
;
1196 /* Reachable by return expressions only. Redirect them. */
1198 do_return_redirection (q
, finally_label
, NULL
);
1199 replace_goto_queue (tf
);
1203 /* Reachable by goto expressions only. Redirect them. */
1205 do_goto_redirection (q
, finally_label
, NULL
, tf
);
1206 replace_goto_queue (tf
);
1208 if (tf
->dest_array
[0] == tf
->fallthru_label
)
1210 /* Reachable by goto to fallthru label only. Redirect it
1211 to the new label (already created, sadly), and do not
1212 emit the final branch out, or the fallthru label. */
1213 tf
->fallthru_label
= NULL
;
1218 /* Place the original return/goto to the original destination
1219 immediately after the finally block. */
1220 x
= tf
->goto_queue
[0].cont_stmt
;
1221 gimple_seq_add_stmt (&tf
->top_p_seq
, x
);
1222 maybe_record_in_goto_queue (state
, x
);
1225 /* A subroutine of lower_try_finally. There are multiple edges incoming
1226 and outgoing from the finally block. Implement this by duplicating the
1227 finally block for every destination. */
1230 lower_try_finally_copy (struct leh_state
*state
, struct leh_tf_state
*tf
)
1233 gimple_seq new_stmt
;
1237 location_t tf_loc
= gimple_location (tf
->try_finally_expr
);
1239 finally
= gimple_try_cleanup (tf
->top_p
);
1241 /* Notice EH_ELSE, and simplify some of the remaining code
1242 by considering FINALLY to be the normal return path only. */
1243 eh_else
= get_eh_else (finally
);
1245 finally
= gimple_eh_else_n_body (eh_else
);
1247 tf
->top_p_seq
= gimple_try_eval (tf
->top_p
);
1250 if (tf
->may_fallthru
)
1252 seq
= lower_try_finally_dup_block (finally
, state
, tf_loc
);
1253 lower_eh_constructs_1 (state
, &seq
);
1254 gimple_seq_add_seq (&new_stmt
, seq
);
1256 tmp
= lower_try_finally_fallthru_label (tf
);
1257 x
= gimple_build_goto (tmp
);
1258 gimple_set_location (x
, tf_loc
);
1259 gimple_seq_add_stmt (&new_stmt
, x
);
1264 /* We don't need to copy the EH path of EH_ELSE,
1265 since it is only emitted once. */
1267 seq
= gimple_eh_else_e_body (eh_else
);
1269 seq
= lower_try_finally_dup_block (finally
, state
, tf_loc
);
1270 lower_eh_constructs_1 (state
, &seq
);
1272 emit_post_landing_pad (&eh_seq
, tf
->region
);
1273 gimple_seq_add_seq (&eh_seq
, seq
);
1274 emit_resx (&eh_seq
, tf
->region
);
1279 struct goto_queue_node
*q
, *qe
;
1280 int return_index
, index
;
1283 struct goto_queue_node
*q
;
1287 return_index
= tf
->dest_array
.length ();
1288 labels
= XCNEWVEC (struct labels_s
, return_index
+ 1);
1291 qe
= q
+ tf
->goto_queue_active
;
1294 index
= q
->index
< 0 ? return_index
: q
->index
;
1296 if (!labels
[index
].q
)
1297 labels
[index
].q
= q
;
1300 for (index
= 0; index
< return_index
+ 1; index
++)
1304 q
= labels
[index
].q
;
1308 lab
= labels
[index
].label
1309 = create_artificial_label (tf_loc
);
1311 if (index
== return_index
)
1312 do_return_redirection (q
, lab
, NULL
);
1314 do_goto_redirection (q
, lab
, NULL
, tf
);
1316 x
= gimple_build_label (lab
);
1317 gimple_seq_add_stmt (&new_stmt
, x
);
1319 seq
= lower_try_finally_dup_block (finally
, state
, q
->location
);
1320 lower_eh_constructs_1 (state
, &seq
);
1321 gimple_seq_add_seq (&new_stmt
, seq
);
1323 gimple_seq_add_stmt (&new_stmt
, q
->cont_stmt
);
1324 maybe_record_in_goto_queue (state
, q
->cont_stmt
);
1327 for (q
= tf
->goto_queue
; q
< qe
; q
++)
1331 index
= q
->index
< 0 ? return_index
: q
->index
;
1333 if (labels
[index
].q
== q
)
1336 lab
= labels
[index
].label
;
1338 if (index
== return_index
)
1339 do_return_redirection (q
, lab
, NULL
);
1341 do_goto_redirection (q
, lab
, NULL
, tf
);
1344 replace_goto_queue (tf
);
1348 /* Need to link new stmts after running replace_goto_queue due
1349 to not wanting to process the same goto stmts twice. */
1350 gimple_seq_add_seq (&tf
->top_p_seq
, new_stmt
);
1353 /* A subroutine of lower_try_finally. There are multiple edges incoming
1354 and outgoing from the finally block. Implement this by instrumenting
1355 each incoming edge and creating a switch statement at the end of the
1356 finally block that branches to the appropriate destination. */
1359 lower_try_finally_switch (struct leh_state
*state
, struct leh_tf_state
*tf
)
1361 struct goto_queue_node
*q
, *qe
;
1362 tree finally_tmp
, finally_label
;
1363 int return_index
, eh_index
, fallthru_index
;
1364 int nlabels
, ndests
, j
, last_case_index
;
1366 vec
<tree
> case_label_vec
;
1367 gimple_seq switch_body
= NULL
;
1372 hash_map
<tree
, gimple
> *cont_map
= NULL
;
1373 /* The location of the TRY_FINALLY stmt. */
1374 location_t tf_loc
= gimple_location (tf
->try_finally_expr
);
1375 /* The location of the finally block. */
1376 location_t finally_loc
;
1378 finally
= gimple_try_cleanup (tf
->top_p
);
1379 eh_else
= get_eh_else (finally
);
1381 /* Mash the TRY block to the head of the chain. */
1382 tf
->top_p_seq
= gimple_try_eval (tf
->top_p
);
1384 /* The location of the finally is either the last stmt in the finally
1385 block or the location of the TRY_FINALLY itself. */
1386 x
= gimple_seq_last_stmt (finally
);
1387 finally_loc
= x
? gimple_location (x
) : tf_loc
;
1389 /* Prepare for switch statement generation. */
1390 nlabels
= tf
->dest_array
.length ();
1391 return_index
= nlabels
;
1392 eh_index
= return_index
+ tf
->may_return
;
1393 fallthru_index
= eh_index
+ (tf
->may_throw
&& !eh_else
);
1394 ndests
= fallthru_index
+ tf
->may_fallthru
;
1396 finally_tmp
= create_tmp_var (integer_type_node
, "finally_tmp");
1397 finally_label
= create_artificial_label (finally_loc
);
1399 /* We use vec::quick_push on case_label_vec throughout this function,
1400 since we know the size in advance and allocate precisely as muce
1402 case_label_vec
.create (ndests
);
1404 last_case_index
= 0;
1406 /* Begin inserting code for getting to the finally block. Things
1407 are done in this order to correspond to the sequence the code is
1410 if (tf
->may_fallthru
)
1412 x
= gimple_build_assign (finally_tmp
,
1413 build_int_cst (integer_type_node
,
1415 gimple_seq_add_stmt (&tf
->top_p_seq
, x
);
1417 tmp
= build_int_cst (integer_type_node
, fallthru_index
);
1418 last_case
= build_case_label (tmp
, NULL
,
1419 create_artificial_label (tf_loc
));
1420 case_label_vec
.quick_push (last_case
);
1423 x
= gimple_build_label (CASE_LABEL (last_case
));
1424 gimple_seq_add_stmt (&switch_body
, x
);
1426 tmp
= lower_try_finally_fallthru_label (tf
);
1427 x
= gimple_build_goto (tmp
);
1428 gimple_set_location (x
, tf_loc
);
1429 gimple_seq_add_stmt (&switch_body
, x
);
1432 /* For EH_ELSE, emit the exception path (plus resx) now, then
1433 subsequently we only need consider the normal path. */
1438 finally
= gimple_eh_else_e_body (eh_else
);
1439 lower_eh_constructs_1 (state
, &finally
);
1441 emit_post_landing_pad (&eh_seq
, tf
->region
);
1442 gimple_seq_add_seq (&eh_seq
, finally
);
1443 emit_resx (&eh_seq
, tf
->region
);
1446 finally
= gimple_eh_else_n_body (eh_else
);
1448 else if (tf
->may_throw
)
1450 emit_post_landing_pad (&eh_seq
, tf
->region
);
1452 x
= gimple_build_assign (finally_tmp
,
1453 build_int_cst (integer_type_node
, eh_index
));
1454 gimple_seq_add_stmt (&eh_seq
, x
);
1456 x
= gimple_build_goto (finally_label
);
1457 gimple_set_location (x
, tf_loc
);
1458 gimple_seq_add_stmt (&eh_seq
, x
);
1460 tmp
= build_int_cst (integer_type_node
, eh_index
);
1461 last_case
= build_case_label (tmp
, NULL
,
1462 create_artificial_label (tf_loc
));
1463 case_label_vec
.quick_push (last_case
);
1466 x
= gimple_build_label (CASE_LABEL (last_case
));
1467 gimple_seq_add_stmt (&eh_seq
, x
);
1468 emit_resx (&eh_seq
, tf
->region
);
1471 x
= gimple_build_label (finally_label
);
1472 gimple_seq_add_stmt (&tf
->top_p_seq
, x
);
1474 lower_eh_constructs_1 (state
, &finally
);
1475 gimple_seq_add_seq (&tf
->top_p_seq
, finally
);
1477 /* Redirect each incoming goto edge. */
1479 qe
= q
+ tf
->goto_queue_active
;
1480 j
= last_case_index
+ tf
->may_return
;
1481 /* Prepare the assignments to finally_tmp that are executed upon the
1482 entrance through a particular edge. */
1485 gimple_seq mod
= NULL
;
1487 unsigned int case_index
;
1491 x
= gimple_build_assign (finally_tmp
,
1492 build_int_cst (integer_type_node
,
1494 gimple_seq_add_stmt (&mod
, x
);
1495 do_return_redirection (q
, finally_label
, mod
);
1496 switch_id
= return_index
;
1500 x
= gimple_build_assign (finally_tmp
,
1501 build_int_cst (integer_type_node
, q
->index
));
1502 gimple_seq_add_stmt (&mod
, x
);
1503 do_goto_redirection (q
, finally_label
, mod
, tf
);
1504 switch_id
= q
->index
;
1507 case_index
= j
+ q
->index
;
1508 if (case_label_vec
.length () <= case_index
|| !case_label_vec
[case_index
])
1511 tmp
= build_int_cst (integer_type_node
, switch_id
);
1512 case_lab
= build_case_label (tmp
, NULL
,
1513 create_artificial_label (tf_loc
));
1514 /* We store the cont_stmt in the pointer map, so that we can recover
1515 it in the loop below. */
1517 cont_map
= new hash_map
<tree
, gimple
>;
1518 cont_map
->put (case_lab
, q
->cont_stmt
);
1519 case_label_vec
.quick_push (case_lab
);
1522 for (j
= last_case_index
; j
< last_case_index
+ nlabels
; j
++)
1526 last_case
= case_label_vec
[j
];
1528 gcc_assert (last_case
);
1529 gcc_assert (cont_map
);
1531 cont_stmt
= *cont_map
->get (last_case
);
1533 x
= gimple_build_label (CASE_LABEL (last_case
));
1534 gimple_seq_add_stmt (&switch_body
, x
);
1535 gimple_seq_add_stmt (&switch_body
, cont_stmt
);
1536 maybe_record_in_goto_queue (state
, cont_stmt
);
1541 replace_goto_queue (tf
);
1543 /* Make sure that the last case is the default label, as one is required.
1544 Then sort the labels, which is also required in GIMPLE. */
1545 CASE_LOW (last_case
) = NULL
;
1546 tree tem
= case_label_vec
.pop ();
1547 gcc_assert (tem
== last_case
);
1548 sort_case_labels (case_label_vec
);
1550 /* Build the switch statement, setting last_case to be the default
1552 switch_stmt
= gimple_build_switch (finally_tmp
, last_case
,
1554 gimple_set_location (switch_stmt
, finally_loc
);
1556 /* Need to link SWITCH_STMT after running replace_goto_queue
1557 due to not wanting to process the same goto stmts twice. */
1558 gimple_seq_add_stmt (&tf
->top_p_seq
, switch_stmt
);
1559 gimple_seq_add_seq (&tf
->top_p_seq
, switch_body
);
1562 /* Decide whether or not we are going to duplicate the finally block.
1563 There are several considerations.
1565 First, if this is Java, then the finally block contains code
1566 written by the user. It has line numbers associated with it,
1567 so duplicating the block means it's difficult to set a breakpoint.
1568 Since controlling code generation via -g is verboten, we simply
1569 never duplicate code without optimization.
1571 Second, we'd like to prevent egregious code growth. One way to
1572 do this is to estimate the size of the finally block, multiply
1573 that by the number of copies we'd need to make, and compare against
1574 the estimate of the size of the switch machinery we'd have to add. */
1577 decide_copy_try_finally (int ndests
, bool may_throw
, gimple_seq finally
)
1579 int f_estimate
, sw_estimate
;
1582 /* If there's an EH_ELSE involved, the exception path is separate
1583 and really doesn't come into play for this computation. */
1584 eh_else
= get_eh_else (finally
);
1587 ndests
-= may_throw
;
1588 finally
= gimple_eh_else_n_body (eh_else
);
1593 gimple_stmt_iterator gsi
;
1598 for (gsi
= gsi_start (finally
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1600 gimple stmt
= gsi_stmt (gsi
);
1601 if (!is_gimple_debug (stmt
) && !gimple_clobber_p (stmt
))
1607 /* Finally estimate N times, plus N gotos. */
1608 f_estimate
= count_insns_seq (finally
, &eni_size_weights
);
1609 f_estimate
= (f_estimate
+ 1) * ndests
;
1611 /* Switch statement (cost 10), N variable assignments, N gotos. */
1612 sw_estimate
= 10 + 2 * ndests
;
1614 /* Optimize for size clearly wants our best guess. */
1615 if (optimize_function_for_size_p (cfun
))
1616 return f_estimate
< sw_estimate
;
1618 /* ??? These numbers are completely made up so far. */
1620 return f_estimate
< 100 || f_estimate
< sw_estimate
* 2;
1622 return f_estimate
< 40 || f_estimate
* 2 < sw_estimate
* 3;
1625 /* REG is the enclosing region for a possible cleanup region, or the region
1626 itself. Returns TRUE if such a region would be unreachable.
1628 Cleanup regions within a must-not-throw region aren't actually reachable
1629 even if there are throwing stmts within them, because the personality
1630 routine will call terminate before unwinding. */
1633 cleanup_is_dead_in (eh_region reg
)
1635 while (reg
&& reg
->type
== ERT_CLEANUP
)
1637 return (reg
&& reg
->type
== ERT_MUST_NOT_THROW
);
1640 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_FINALLY nodes
1641 to a sequence of labels and blocks, plus the exception region trees
1642 that record all the magic. This is complicated by the need to
1643 arrange for the FINALLY block to be executed on all exits. */
1646 lower_try_finally (struct leh_state
*state
, gimple tp
)
1648 struct leh_tf_state this_tf
;
1649 struct leh_state this_state
;
1651 gimple_seq old_eh_seq
;
1653 /* Process the try block. */
1655 memset (&this_tf
, 0, sizeof (this_tf
));
1656 this_tf
.try_finally_expr
= tp
;
1658 this_tf
.outer
= state
;
1659 if (using_eh_for_cleanups_p () && !cleanup_is_dead_in (state
->cur_region
))
1661 this_tf
.region
= gen_eh_region_cleanup (state
->cur_region
);
1662 this_state
.cur_region
= this_tf
.region
;
1666 this_tf
.region
= NULL
;
1667 this_state
.cur_region
= state
->cur_region
;
1670 this_state
.ehp_region
= state
->ehp_region
;
1671 this_state
.tf
= &this_tf
;
1673 old_eh_seq
= eh_seq
;
1676 lower_eh_constructs_1 (&this_state
, gimple_try_eval_ptr (tp
));
1678 /* Determine if the try block is escaped through the bottom. */
1679 this_tf
.may_fallthru
= gimple_seq_may_fallthru (gimple_try_eval (tp
));
1681 /* Determine if any exceptions are possible within the try block. */
1683 this_tf
.may_throw
= eh_region_may_contain_throw (this_tf
.region
);
1684 if (this_tf
.may_throw
)
1685 honor_protect_cleanup_actions (state
, &this_state
, &this_tf
);
1687 /* Determine how many edges (still) reach the finally block. Or rather,
1688 how many destinations are reached by the finally block. Use this to
1689 determine how we process the finally block itself. */
1691 ndests
= this_tf
.dest_array
.length ();
1692 ndests
+= this_tf
.may_fallthru
;
1693 ndests
+= this_tf
.may_return
;
1694 ndests
+= this_tf
.may_throw
;
1696 /* If the FINALLY block is not reachable, dike it out. */
1699 gimple_seq_add_seq (&this_tf
.top_p_seq
, gimple_try_eval (tp
));
1700 gimple_try_set_cleanup (tp
, NULL
);
1702 /* If the finally block doesn't fall through, then any destination
1703 we might try to impose there isn't reached either. There may be
1704 some minor amount of cleanup and redirection still needed. */
1705 else if (!gimple_seq_may_fallthru (gimple_try_cleanup (tp
)))
1706 lower_try_finally_nofallthru (state
, &this_tf
);
1708 /* We can easily special-case redirection to a single destination. */
1709 else if (ndests
== 1)
1710 lower_try_finally_onedest (state
, &this_tf
);
1711 else if (decide_copy_try_finally (ndests
, this_tf
.may_throw
,
1712 gimple_try_cleanup (tp
)))
1713 lower_try_finally_copy (state
, &this_tf
);
1715 lower_try_finally_switch (state
, &this_tf
);
1717 /* If someone requested we add a label at the end of the transformed
1719 if (this_tf
.fallthru_label
)
1721 /* This must be reached only if ndests == 0. */
1722 gimple x
= gimple_build_label (this_tf
.fallthru_label
);
1723 gimple_seq_add_stmt (&this_tf
.top_p_seq
, x
);
1726 this_tf
.dest_array
.release ();
1727 free (this_tf
.goto_queue
);
1728 if (this_tf
.goto_queue_map
)
1729 delete this_tf
.goto_queue_map
;
1731 /* If there was an old (aka outer) eh_seq, append the current eh_seq.
1732 If there was no old eh_seq, then the append is trivially already done. */
1736 eh_seq
= old_eh_seq
;
1739 gimple_seq new_eh_seq
= eh_seq
;
1740 eh_seq
= old_eh_seq
;
1741 gimple_seq_add_seq (&eh_seq
, new_eh_seq
);
1745 return this_tf
.top_p_seq
;
1748 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_CATCH with a
1749 list of GIMPLE_CATCH to a sequence of labels and blocks, plus the
1750 exception region trees that records all the magic. */
1753 lower_catch (struct leh_state
*state
, gimple tp
)
1755 eh_region try_region
= NULL
;
1756 struct leh_state this_state
= *state
;
1757 gimple_stmt_iterator gsi
;
1759 gimple_seq new_seq
, cleanup
;
1761 location_t try_catch_loc
= gimple_location (tp
);
1763 if (flag_exceptions
)
1765 try_region
= gen_eh_region_try (state
->cur_region
);
1766 this_state
.cur_region
= try_region
;
1769 lower_eh_constructs_1 (&this_state
, gimple_try_eval_ptr (tp
));
1771 if (!eh_region_may_contain_throw (try_region
))
1772 return gimple_try_eval (tp
);
1775 emit_eh_dispatch (&new_seq
, try_region
);
1776 emit_resx (&new_seq
, try_region
);
1778 this_state
.cur_region
= state
->cur_region
;
1779 this_state
.ehp_region
= try_region
;
1782 cleanup
= gimple_try_cleanup (tp
);
1783 for (gsi
= gsi_start (cleanup
);
1791 gcatch
= gsi_stmt (gsi
);
1792 c
= gen_eh_region_catch (try_region
, gimple_catch_types (gcatch
));
1794 handler
= gimple_catch_handler (gcatch
);
1795 lower_eh_constructs_1 (&this_state
, &handler
);
1797 c
->label
= create_artificial_label (UNKNOWN_LOCATION
);
1798 x
= gimple_build_label (c
->label
);
1799 gimple_seq_add_stmt (&new_seq
, x
);
1801 gimple_seq_add_seq (&new_seq
, handler
);
1803 if (gimple_seq_may_fallthru (new_seq
))
1806 out_label
= create_artificial_label (try_catch_loc
);
1808 x
= gimple_build_goto (out_label
);
1809 gimple_seq_add_stmt (&new_seq
, x
);
1815 gimple_try_set_cleanup (tp
, new_seq
);
1817 return frob_into_branch_around (tp
, try_region
, out_label
);
1820 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with a
1821 GIMPLE_EH_FILTER to a sequence of labels and blocks, plus the exception
1822 region trees that record all the magic. */
1825 lower_eh_filter (struct leh_state
*state
, gimple tp
)
1827 struct leh_state this_state
= *state
;
1828 eh_region this_region
= NULL
;
1832 inner
= gimple_seq_first_stmt (gimple_try_cleanup (tp
));
1834 if (flag_exceptions
)
1836 this_region
= gen_eh_region_allowed (state
->cur_region
,
1837 gimple_eh_filter_types (inner
));
1838 this_state
.cur_region
= this_region
;
1841 lower_eh_constructs_1 (&this_state
, gimple_try_eval_ptr (tp
));
1843 if (!eh_region_may_contain_throw (this_region
))
1844 return gimple_try_eval (tp
);
1847 this_state
.cur_region
= state
->cur_region
;
1848 this_state
.ehp_region
= this_region
;
1850 emit_eh_dispatch (&new_seq
, this_region
);
1851 emit_resx (&new_seq
, this_region
);
1853 this_region
->u
.allowed
.label
= create_artificial_label (UNKNOWN_LOCATION
);
1854 x
= gimple_build_label (this_region
->u
.allowed
.label
);
1855 gimple_seq_add_stmt (&new_seq
, x
);
1857 lower_eh_constructs_1 (&this_state
, gimple_eh_filter_failure_ptr (inner
));
1858 gimple_seq_add_seq (&new_seq
, gimple_eh_filter_failure (inner
));
1860 gimple_try_set_cleanup (tp
, new_seq
);
1862 return frob_into_branch_around (tp
, this_region
, NULL
);
1865 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with
1866 an GIMPLE_EH_MUST_NOT_THROW to a sequence of labels and blocks,
1867 plus the exception region trees that record all the magic. */
1870 lower_eh_must_not_throw (struct leh_state
*state
, gimple tp
)
1872 struct leh_state this_state
= *state
;
1874 if (flag_exceptions
)
1876 gimple inner
= gimple_seq_first_stmt (gimple_try_cleanup (tp
));
1877 eh_region this_region
;
1879 this_region
= gen_eh_region_must_not_throw (state
->cur_region
);
1880 this_region
->u
.must_not_throw
.failure_decl
1881 = gimple_eh_must_not_throw_fndecl (inner
);
1882 this_region
->u
.must_not_throw
.failure_loc
1883 = LOCATION_LOCUS (gimple_location (tp
));
1885 /* In order to get mangling applied to this decl, we must mark it
1886 used now. Otherwise, pass_ipa_free_lang_data won't think it
1888 TREE_USED (this_region
->u
.must_not_throw
.failure_decl
) = 1;
1890 this_state
.cur_region
= this_region
;
1893 lower_eh_constructs_1 (&this_state
, gimple_try_eval_ptr (tp
));
1895 return gimple_try_eval (tp
);
1898 /* Implement a cleanup expression. This is similar to try-finally,
1899 except that we only execute the cleanup block for exception edges. */
1902 lower_cleanup (struct leh_state
*state
, gimple tp
)
1904 struct leh_state this_state
= *state
;
1905 eh_region this_region
= NULL
;
1906 struct leh_tf_state fake_tf
;
1908 bool cleanup_dead
= cleanup_is_dead_in (state
->cur_region
);
1910 if (flag_exceptions
&& !cleanup_dead
)
1912 this_region
= gen_eh_region_cleanup (state
->cur_region
);
1913 this_state
.cur_region
= this_region
;
1916 lower_eh_constructs_1 (&this_state
, gimple_try_eval_ptr (tp
));
1918 if (cleanup_dead
|| !eh_region_may_contain_throw (this_region
))
1919 return gimple_try_eval (tp
);
1921 /* Build enough of a try-finally state so that we can reuse
1922 honor_protect_cleanup_actions. */
1923 memset (&fake_tf
, 0, sizeof (fake_tf
));
1924 fake_tf
.top_p
= fake_tf
.try_finally_expr
= tp
;
1925 fake_tf
.outer
= state
;
1926 fake_tf
.region
= this_region
;
1927 fake_tf
.may_fallthru
= gimple_seq_may_fallthru (gimple_try_eval (tp
));
1928 fake_tf
.may_throw
= true;
1930 honor_protect_cleanup_actions (state
, NULL
, &fake_tf
);
1932 if (fake_tf
.may_throw
)
1934 /* In this case honor_protect_cleanup_actions had nothing to do,
1935 and we should process this normally. */
1936 lower_eh_constructs_1 (state
, gimple_try_cleanup_ptr (tp
));
1937 result
= frob_into_branch_around (tp
, this_region
,
1938 fake_tf
.fallthru_label
);
1942 /* In this case honor_protect_cleanup_actions did nearly all of
1943 the work. All we have left is to append the fallthru_label. */
1945 result
= gimple_try_eval (tp
);
1946 if (fake_tf
.fallthru_label
)
1948 gimple x
= gimple_build_label (fake_tf
.fallthru_label
);
1949 gimple_seq_add_stmt (&result
, x
);
1955 /* Main loop for lowering eh constructs. Also moves gsi to the next
1959 lower_eh_constructs_2 (struct leh_state
*state
, gimple_stmt_iterator
*gsi
)
1963 gimple stmt
= gsi_stmt (*gsi
);
1965 switch (gimple_code (stmt
))
1969 tree fndecl
= gimple_call_fndecl (stmt
);
1972 if (fndecl
&& DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
1973 switch (DECL_FUNCTION_CODE (fndecl
))
1975 case BUILT_IN_EH_POINTER
:
1976 /* The front end may have generated a call to
1977 __builtin_eh_pointer (0) within a catch region. Replace
1978 this zero argument with the current catch region number. */
1979 if (state
->ehp_region
)
1981 tree nr
= build_int_cst (integer_type_node
,
1982 state
->ehp_region
->index
);
1983 gimple_call_set_arg (stmt
, 0, nr
);
1987 /* The user has dome something silly. Remove it. */
1988 rhs
= null_pointer_node
;
1993 case BUILT_IN_EH_FILTER
:
1994 /* ??? This should never appear, but since it's a builtin it
1995 is accessible to abuse by users. Just remove it and
1996 replace the use with the arbitrary value zero. */
1997 rhs
= build_int_cst (TREE_TYPE (TREE_TYPE (fndecl
)), 0);
1999 lhs
= gimple_call_lhs (stmt
);
2000 x
= gimple_build_assign (lhs
, rhs
);
2001 gsi_insert_before (gsi
, x
, GSI_SAME_STMT
);
2004 case BUILT_IN_EH_COPY_VALUES
:
2005 /* Likewise this should not appear. Remove it. */
2006 gsi_remove (gsi
, true);
2016 /* If the stmt can throw use a new temporary for the assignment
2017 to a LHS. This makes sure the old value of the LHS is
2018 available on the EH edge. Only do so for statements that
2019 potentially fall through (no noreturn calls e.g.), otherwise
2020 this new assignment might create fake fallthru regions. */
2021 if (stmt_could_throw_p (stmt
)
2022 && gimple_has_lhs (stmt
)
2023 && gimple_stmt_may_fallthru (stmt
)
2024 && !tree_could_throw_p (gimple_get_lhs (stmt
))
2025 && is_gimple_reg_type (TREE_TYPE (gimple_get_lhs (stmt
))))
2027 tree lhs
= gimple_get_lhs (stmt
);
2028 tree tmp
= create_tmp_var (TREE_TYPE (lhs
), NULL
);
2029 gimple s
= gimple_build_assign (lhs
, tmp
);
2030 gimple_set_location (s
, gimple_location (stmt
));
2031 gimple_set_block (s
, gimple_block (stmt
));
2032 gimple_set_lhs (stmt
, tmp
);
2033 if (TREE_CODE (TREE_TYPE (tmp
)) == COMPLEX_TYPE
2034 || TREE_CODE (TREE_TYPE (tmp
)) == VECTOR_TYPE
)
2035 DECL_GIMPLE_REG_P (tmp
) = 1;
2036 gsi_insert_after (gsi
, s
, GSI_SAME_STMT
);
2038 /* Look for things that can throw exceptions, and record them. */
2039 if (state
->cur_region
&& stmt_could_throw_p (stmt
))
2041 record_stmt_eh_region (state
->cur_region
, stmt
);
2042 note_eh_region_may_contain_throw (state
->cur_region
);
2049 maybe_record_in_goto_queue (state
, stmt
);
2053 verify_norecord_switch_expr (state
, stmt
);
2057 if (gimple_try_kind (stmt
) == GIMPLE_TRY_FINALLY
)
2058 replace
= lower_try_finally (state
, stmt
);
2061 x
= gimple_seq_first_stmt (gimple_try_cleanup (stmt
));
2064 replace
= gimple_try_eval (stmt
);
2065 lower_eh_constructs_1 (state
, &replace
);
2068 switch (gimple_code (x
))
2071 replace
= lower_catch (state
, stmt
);
2073 case GIMPLE_EH_FILTER
:
2074 replace
= lower_eh_filter (state
, stmt
);
2076 case GIMPLE_EH_MUST_NOT_THROW
:
2077 replace
= lower_eh_must_not_throw (state
, stmt
);
2079 case GIMPLE_EH_ELSE
:
2080 /* This code is only valid with GIMPLE_TRY_FINALLY. */
2083 replace
= lower_cleanup (state
, stmt
);
2088 /* Remove the old stmt and insert the transformed sequence
2090 gsi_insert_seq_before (gsi
, replace
, GSI_SAME_STMT
);
2091 gsi_remove (gsi
, true);
2093 /* Return since we don't want gsi_next () */
2096 case GIMPLE_EH_ELSE
:
2097 /* We should be eliminating this in lower_try_finally et al. */
2101 /* A type, a decl, or some kind of statement that we're not
2102 interested in. Don't walk them. */
2109 /* A helper to unwrap a gimple_seq and feed stmts to lower_eh_constructs_2. */
2112 lower_eh_constructs_1 (struct leh_state
*state
, gimple_seq
*pseq
)
2114 gimple_stmt_iterator gsi
;
2115 for (gsi
= gsi_start (*pseq
); !gsi_end_p (gsi
);)
2116 lower_eh_constructs_2 (state
, &gsi
);
2121 const pass_data pass_data_lower_eh
=
2123 GIMPLE_PASS
, /* type */
2125 OPTGROUP_NONE
, /* optinfo_flags */
2126 TV_TREE_EH
, /* tv_id */
2127 PROP_gimple_lcf
, /* properties_required */
2128 PROP_gimple_leh
, /* properties_provided */
2129 0, /* properties_destroyed */
2130 0, /* todo_flags_start */
2131 0, /* todo_flags_finish */
2134 class pass_lower_eh
: public gimple_opt_pass
2137 pass_lower_eh (gcc::context
*ctxt
)
2138 : gimple_opt_pass (pass_data_lower_eh
, ctxt
)
2141 /* opt_pass methods: */
2142 virtual unsigned int execute (function
*);
2144 }; // class pass_lower_eh
2147 pass_lower_eh::execute (function
*fun
)
2149 struct leh_state null_state
;
2152 bodyp
= gimple_body (current_function_decl
);
2156 finally_tree
= new hash_table
<finally_tree_hasher
> (31);
2157 eh_region_may_contain_throw_map
= BITMAP_ALLOC (NULL
);
2158 memset (&null_state
, 0, sizeof (null_state
));
2160 collect_finally_tree_1 (bodyp
, NULL
);
2161 lower_eh_constructs_1 (&null_state
, &bodyp
);
2162 gimple_set_body (current_function_decl
, bodyp
);
2164 /* We assume there's a return statement, or something, at the end of
2165 the function, and thus ploping the EH sequence afterward won't
2167 gcc_assert (!gimple_seq_may_fallthru (bodyp
));
2168 gimple_seq_add_seq (&bodyp
, eh_seq
);
2170 /* We assume that since BODYP already existed, adding EH_SEQ to it
2171 didn't change its value, and we don't have to re-set the function. */
2172 gcc_assert (bodyp
== gimple_body (current_function_decl
));
2174 delete finally_tree
;
2175 finally_tree
= NULL
;
2176 BITMAP_FREE (eh_region_may_contain_throw_map
);
2179 /* If this function needs a language specific EH personality routine
2180 and the frontend didn't already set one do so now. */
2181 if (function_needs_eh_personality (fun
) == eh_personality_lang
2182 && !DECL_FUNCTION_PERSONALITY (current_function_decl
))
2183 DECL_FUNCTION_PERSONALITY (current_function_decl
)
2184 = lang_hooks
.eh_personality ();
2192 make_pass_lower_eh (gcc::context
*ctxt
)
2194 return new pass_lower_eh (ctxt
);
2197 /* Create the multiple edges from an EH_DISPATCH statement to all of
2198 the possible handlers for its EH region. Return true if there's
2199 no fallthru edge; false if there is. */
2202 make_eh_dispatch_edges (gimple stmt
)
2206 basic_block src
, dst
;
2208 r
= get_eh_region_from_number (gimple_eh_dispatch_region (stmt
));
2209 src
= gimple_bb (stmt
);
2214 for (c
= r
->u
.eh_try
.first_catch
; c
; c
= c
->next_catch
)
2216 dst
= label_to_block (c
->label
);
2217 make_edge (src
, dst
, 0);
2219 /* A catch-all handler doesn't have a fallthru. */
2220 if (c
->type_list
== NULL
)
2225 case ERT_ALLOWED_EXCEPTIONS
:
2226 dst
= label_to_block (r
->u
.allowed
.label
);
2227 make_edge (src
, dst
, 0);
2237 /* Create the single EH edge from STMT to its nearest landing pad,
2238 if there is such a landing pad within the current function. */
2241 make_eh_edges (gimple stmt
)
2243 basic_block src
, dst
;
2247 lp_nr
= lookup_stmt_eh_lp (stmt
);
2251 lp
= get_eh_landing_pad_from_number (lp_nr
);
2252 gcc_assert (lp
!= NULL
);
2254 src
= gimple_bb (stmt
);
2255 dst
= label_to_block (lp
->post_landing_pad
);
2256 make_edge (src
, dst
, EDGE_EH
);
2259 /* Do the work in redirecting EDGE_IN to NEW_BB within the EH region tree;
2260 do not actually perform the final edge redirection.
2262 CHANGE_REGION is true when we're being called from cleanup_empty_eh and
2263 we intend to change the destination EH region as well; this means
2264 EH_LANDING_PAD_NR must already be set on the destination block label.
2265 If false, we're being called from generic cfg manipulation code and we
2266 should preserve our place within the region tree. */
2269 redirect_eh_edge_1 (edge edge_in
, basic_block new_bb
, bool change_region
)
2271 eh_landing_pad old_lp
, new_lp
;
2274 int old_lp_nr
, new_lp_nr
;
2275 tree old_label
, new_label
;
2279 old_bb
= edge_in
->dest
;
2280 old_label
= gimple_block_label (old_bb
);
2281 old_lp_nr
= EH_LANDING_PAD_NR (old_label
);
2282 gcc_assert (old_lp_nr
> 0);
2283 old_lp
= get_eh_landing_pad_from_number (old_lp_nr
);
2285 throw_stmt
= last_stmt (edge_in
->src
);
2286 gcc_assert (lookup_stmt_eh_lp (throw_stmt
) == old_lp_nr
);
2288 new_label
= gimple_block_label (new_bb
);
2290 /* Look for an existing region that might be using NEW_BB already. */
2291 new_lp_nr
= EH_LANDING_PAD_NR (new_label
);
2294 new_lp
= get_eh_landing_pad_from_number (new_lp_nr
);
2295 gcc_assert (new_lp
);
2297 /* Unless CHANGE_REGION is true, the new and old landing pad
2298 had better be associated with the same EH region. */
2299 gcc_assert (change_region
|| new_lp
->region
== old_lp
->region
);
2304 gcc_assert (!change_region
);
2307 /* Notice when we redirect the last EH edge away from OLD_BB. */
2308 FOR_EACH_EDGE (e
, ei
, old_bb
->preds
)
2309 if (e
!= edge_in
&& (e
->flags
& EDGE_EH
))
2314 /* NEW_LP already exists. If there are still edges into OLD_LP,
2315 there's nothing to do with the EH tree. If there are no more
2316 edges into OLD_LP, then we want to remove OLD_LP as it is unused.
2317 If CHANGE_REGION is true, then our caller is expecting to remove
2319 if (e
== NULL
&& !change_region
)
2320 remove_eh_landing_pad (old_lp
);
2324 /* No correct landing pad exists. If there are no more edges
2325 into OLD_LP, then we can simply re-use the existing landing pad.
2326 Otherwise, we have to create a new landing pad. */
2329 EH_LANDING_PAD_NR (old_lp
->post_landing_pad
) = 0;
2333 new_lp
= gen_eh_landing_pad (old_lp
->region
);
2334 new_lp
->post_landing_pad
= new_label
;
2335 EH_LANDING_PAD_NR (new_label
) = new_lp
->index
;
2338 /* Maybe move the throwing statement to the new region. */
2339 if (old_lp
!= new_lp
)
2341 remove_stmt_from_eh_lp (throw_stmt
);
2342 add_stmt_to_eh_lp (throw_stmt
, new_lp
->index
);
2346 /* Redirect EH edge E to NEW_BB. */
2349 redirect_eh_edge (edge edge_in
, basic_block new_bb
)
2351 redirect_eh_edge_1 (edge_in
, new_bb
, false);
2352 return ssa_redirect_edge (edge_in
, new_bb
);
2355 /* This is a subroutine of gimple_redirect_edge_and_branch. Update the
2356 labels for redirecting a non-fallthru EH_DISPATCH edge E to NEW_BB.
2357 The actual edge update will happen in the caller. */
2360 redirect_eh_dispatch_edge (gimple stmt
, edge e
, basic_block new_bb
)
2362 tree new_lab
= gimple_block_label (new_bb
);
2363 bool any_changed
= false;
2368 r
= get_eh_region_from_number (gimple_eh_dispatch_region (stmt
));
2372 for (c
= r
->u
.eh_try
.first_catch
; c
; c
= c
->next_catch
)
2374 old_bb
= label_to_block (c
->label
);
2375 if (old_bb
== e
->dest
)
2383 case ERT_ALLOWED_EXCEPTIONS
:
2384 old_bb
= label_to_block (r
->u
.allowed
.label
);
2385 gcc_assert (old_bb
== e
->dest
);
2386 r
->u
.allowed
.label
= new_lab
;
2394 gcc_assert (any_changed
);
2397 /* Helper function for operation_could_trap_p and stmt_could_throw_p. */
2400 operation_could_trap_helper_p (enum tree_code op
,
2411 case TRUNC_DIV_EXPR
:
2413 case FLOOR_DIV_EXPR
:
2414 case ROUND_DIV_EXPR
:
2415 case EXACT_DIV_EXPR
:
2417 case FLOOR_MOD_EXPR
:
2418 case ROUND_MOD_EXPR
:
2419 case TRUNC_MOD_EXPR
:
2421 if (honor_snans
|| honor_trapv
)
2424 return flag_trapping_math
;
2425 if (!TREE_CONSTANT (divisor
) || integer_zerop (divisor
))
2434 /* Some floating point comparisons may trap. */
2439 case UNORDERED_EXPR
:
2449 case FIX_TRUNC_EXPR
:
2450 /* Conversion of floating point might trap. */
2456 /* These operations don't trap with floating point. */
2464 /* Any floating arithmetic may trap. */
2465 if (fp_operation
&& flag_trapping_math
)
2473 /* Constructing an object cannot trap. */
2477 /* Any floating arithmetic may trap. */
2478 if (fp_operation
&& flag_trapping_math
)
2486 /* Return true if operation OP may trap. FP_OPERATION is true if OP is applied
2487 on floating-point values. HONOR_TRAPV is true if OP is applied on integer
2488 type operands that may trap. If OP is a division operator, DIVISOR contains
2489 the value of the divisor. */
2492 operation_could_trap_p (enum tree_code op
, bool fp_operation
, bool honor_trapv
,
2495 bool honor_nans
= (fp_operation
&& flag_trapping_math
2496 && !flag_finite_math_only
);
2497 bool honor_snans
= fp_operation
&& flag_signaling_nans
!= 0;
2500 if (TREE_CODE_CLASS (op
) != tcc_comparison
2501 && TREE_CODE_CLASS (op
) != tcc_unary
2502 && TREE_CODE_CLASS (op
) != tcc_binary
)
2505 return operation_could_trap_helper_p (op
, fp_operation
, honor_trapv
,
2506 honor_nans
, honor_snans
, divisor
,
2511 /* Returns true if it is possible to prove that the index of
2512 an array access REF (an ARRAY_REF expression) falls into the
2516 in_array_bounds_p (tree ref
)
2518 tree idx
= TREE_OPERAND (ref
, 1);
2521 if (TREE_CODE (idx
) != INTEGER_CST
)
2524 min
= array_ref_low_bound (ref
);
2525 max
= array_ref_up_bound (ref
);
2528 || TREE_CODE (min
) != INTEGER_CST
2529 || TREE_CODE (max
) != INTEGER_CST
)
2532 if (tree_int_cst_lt (idx
, min
)
2533 || tree_int_cst_lt (max
, idx
))
2539 /* Returns true if it is possible to prove that the range of
2540 an array access REF (an ARRAY_RANGE_REF expression) falls
2541 into the array bounds. */
2544 range_in_array_bounds_p (tree ref
)
2546 tree domain_type
= TYPE_DOMAIN (TREE_TYPE (ref
));
2547 tree range_min
, range_max
, min
, max
;
2549 range_min
= TYPE_MIN_VALUE (domain_type
);
2550 range_max
= TYPE_MAX_VALUE (domain_type
);
2553 || TREE_CODE (range_min
) != INTEGER_CST
2554 || TREE_CODE (range_max
) != INTEGER_CST
)
2557 min
= array_ref_low_bound (ref
);
2558 max
= array_ref_up_bound (ref
);
2561 || TREE_CODE (min
) != INTEGER_CST
2562 || TREE_CODE (max
) != INTEGER_CST
)
2565 if (tree_int_cst_lt (range_min
, min
)
2566 || tree_int_cst_lt (max
, range_max
))
2572 /* Return true if EXPR can trap, as in dereferencing an invalid pointer
2573 location or floating point arithmetic. C.f. the rtl version, may_trap_p.
2574 This routine expects only GIMPLE lhs or rhs input. */
2577 tree_could_trap_p (tree expr
)
2579 enum tree_code code
;
2580 bool fp_operation
= false;
2581 bool honor_trapv
= false;
2582 tree t
, base
, div
= NULL_TREE
;
2587 code
= TREE_CODE (expr
);
2588 t
= TREE_TYPE (expr
);
2592 if (COMPARISON_CLASS_P (expr
))
2593 fp_operation
= FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (expr
, 0)));
2595 fp_operation
= FLOAT_TYPE_P (t
);
2596 honor_trapv
= INTEGRAL_TYPE_P (t
) && TYPE_OVERFLOW_TRAPS (t
);
2599 if (TREE_CODE_CLASS (code
) == tcc_binary
)
2600 div
= TREE_OPERAND (expr
, 1);
2601 if (operation_could_trap_p (code
, fp_operation
, honor_trapv
, div
))
2611 case VIEW_CONVERT_EXPR
:
2612 case WITH_SIZE_EXPR
:
2613 expr
= TREE_OPERAND (expr
, 0);
2614 code
= TREE_CODE (expr
);
2617 case ARRAY_RANGE_REF
:
2618 base
= TREE_OPERAND (expr
, 0);
2619 if (tree_could_trap_p (base
))
2621 if (TREE_THIS_NOTRAP (expr
))
2623 return !range_in_array_bounds_p (expr
);
2626 base
= TREE_OPERAND (expr
, 0);
2627 if (tree_could_trap_p (base
))
2629 if (TREE_THIS_NOTRAP (expr
))
2631 return !in_array_bounds_p (expr
);
2633 case TARGET_MEM_REF
:
2635 if (TREE_CODE (TREE_OPERAND (expr
, 0)) == ADDR_EXPR
2636 && tree_could_trap_p (TREE_OPERAND (TREE_OPERAND (expr
, 0), 0)))
2638 if (TREE_THIS_NOTRAP (expr
))
2640 /* We cannot prove that the access is in-bounds when we have
2641 variable-index TARGET_MEM_REFs. */
2642 if (code
== TARGET_MEM_REF
2643 && (TMR_INDEX (expr
) || TMR_INDEX2 (expr
)))
2645 if (TREE_CODE (TREE_OPERAND (expr
, 0)) == ADDR_EXPR
)
2647 tree base
= TREE_OPERAND (TREE_OPERAND (expr
, 0), 0);
2648 offset_int off
= mem_ref_offset (expr
);
2649 if (wi::neg_p (off
, SIGNED
))
2651 if (TREE_CODE (base
) == STRING_CST
)
2652 return wi::leu_p (TREE_STRING_LENGTH (base
), off
);
2653 else if (DECL_SIZE_UNIT (base
) == NULL_TREE
2654 || TREE_CODE (DECL_SIZE_UNIT (base
)) != INTEGER_CST
2655 || wi::leu_p (wi::to_offset (DECL_SIZE_UNIT (base
)), off
))
2657 /* Now we are sure the first byte of the access is inside
2664 return !TREE_THIS_NOTRAP (expr
);
2667 return TREE_THIS_VOLATILE (expr
);
2670 t
= get_callee_fndecl (expr
);
2671 /* Assume that calls to weak functions may trap. */
2672 if (!t
|| !DECL_P (t
))
2675 return tree_could_trap_p (t
);
2679 /* Assume that accesses to weak functions may trap, unless we know
2680 they are certainly defined in current TU or in some other
2682 if (DECL_WEAK (expr
) && !DECL_COMDAT (expr
))
2684 struct cgraph_node
*node
;
2685 if (!DECL_EXTERNAL (expr
))
2687 node
= cgraph_node::get (expr
)->function_symbol ();
2688 if (node
&& node
->in_other_partition
)
2695 /* Assume that accesses to weak vars may trap, unless we know
2696 they are certainly defined in current TU or in some other
2698 if (DECL_WEAK (expr
) && !DECL_COMDAT (expr
))
2701 if (!DECL_EXTERNAL (expr
))
2703 node
= varpool_node::get (expr
)->ultimate_alias_target ();
2704 if (node
&& node
->in_other_partition
)
2716 /* Helper for stmt_could_throw_p. Return true if STMT (assumed to be a
2717 an assignment or a conditional) may throw. */
2720 stmt_could_throw_1_p (gimple stmt
)
2722 enum tree_code code
= gimple_expr_code (stmt
);
2723 bool honor_nans
= false;
2724 bool honor_snans
= false;
2725 bool fp_operation
= false;
2726 bool honor_trapv
= false;
2731 if (TREE_CODE_CLASS (code
) == tcc_comparison
2732 || TREE_CODE_CLASS (code
) == tcc_unary
2733 || TREE_CODE_CLASS (code
) == tcc_binary
)
2735 if (is_gimple_assign (stmt
)
2736 && TREE_CODE_CLASS (code
) == tcc_comparison
)
2737 t
= TREE_TYPE (gimple_assign_rhs1 (stmt
));
2738 else if (gimple_code (stmt
) == GIMPLE_COND
)
2739 t
= TREE_TYPE (gimple_cond_lhs (stmt
));
2741 t
= gimple_expr_type (stmt
);
2742 fp_operation
= FLOAT_TYPE_P (t
);
2745 honor_nans
= flag_trapping_math
&& !flag_finite_math_only
;
2746 honor_snans
= flag_signaling_nans
!= 0;
2748 else if (INTEGRAL_TYPE_P (t
) && TYPE_OVERFLOW_TRAPS (t
))
2752 /* Check if the main expression may trap. */
2753 t
= is_gimple_assign (stmt
) ? gimple_assign_rhs2 (stmt
) : NULL
;
2754 ret
= operation_could_trap_helper_p (code
, fp_operation
, honor_trapv
,
2755 honor_nans
, honor_snans
, t
,
2760 /* If the expression does not trap, see if any of the individual operands may
2762 for (i
= 0; i
< gimple_num_ops (stmt
); i
++)
2763 if (tree_could_trap_p (gimple_op (stmt
, i
)))
2770 /* Return true if statement STMT could throw an exception. */
2773 stmt_could_throw_p (gimple stmt
)
2775 if (!flag_exceptions
)
2778 /* The only statements that can throw an exception are assignments,
2779 conditionals, calls, resx, and asms. */
2780 switch (gimple_code (stmt
))
2786 return !gimple_call_nothrow_p (stmt
);
2790 if (!cfun
->can_throw_non_call_exceptions
)
2792 return stmt_could_throw_1_p (stmt
);
2795 if (!cfun
->can_throw_non_call_exceptions
)
2797 return gimple_asm_volatile_p (stmt
);
2805 /* Return true if expression T could throw an exception. */
2808 tree_could_throw_p (tree t
)
2810 if (!flag_exceptions
)
2812 if (TREE_CODE (t
) == MODIFY_EXPR
)
2814 if (cfun
->can_throw_non_call_exceptions
2815 && tree_could_trap_p (TREE_OPERAND (t
, 0)))
2817 t
= TREE_OPERAND (t
, 1);
2820 if (TREE_CODE (t
) == WITH_SIZE_EXPR
)
2821 t
= TREE_OPERAND (t
, 0);
2822 if (TREE_CODE (t
) == CALL_EXPR
)
2823 return (call_expr_flags (t
) & ECF_NOTHROW
) == 0;
2824 if (cfun
->can_throw_non_call_exceptions
)
2825 return tree_could_trap_p (t
);
2829 /* Return true if STMT can throw an exception that is not caught within
2830 the current function (CFUN). */
2833 stmt_can_throw_external (gimple stmt
)
2837 if (!stmt_could_throw_p (stmt
))
2840 lp_nr
= lookup_stmt_eh_lp (stmt
);
2844 /* Return true if STMT can throw an exception that is caught within
2845 the current function (CFUN). */
2848 stmt_can_throw_internal (gimple stmt
)
2852 if (!stmt_could_throw_p (stmt
))
2855 lp_nr
= lookup_stmt_eh_lp (stmt
);
2859 /* Given a statement STMT in IFUN, if STMT can no longer throw, then
2860 remove any entry it might have from the EH table. Return true if
2861 any change was made. */
2864 maybe_clean_eh_stmt_fn (struct function
*ifun
, gimple stmt
)
2866 if (stmt_could_throw_p (stmt
))
2868 return remove_stmt_from_eh_lp_fn (ifun
, stmt
);
2871 /* Likewise, but always use the current function. */
2874 maybe_clean_eh_stmt (gimple stmt
)
2876 return maybe_clean_eh_stmt_fn (cfun
, stmt
);
2879 /* Given a statement OLD_STMT and a new statement NEW_STMT that has replaced
2880 OLD_STMT in the function, remove OLD_STMT from the EH table and put NEW_STMT
2881 in the table if it should be in there. Return TRUE if a replacement was
2882 done that my require an EH edge purge. */
2885 maybe_clean_or_replace_eh_stmt (gimple old_stmt
, gimple new_stmt
)
2887 int lp_nr
= lookup_stmt_eh_lp (old_stmt
);
2891 bool new_stmt_could_throw
= stmt_could_throw_p (new_stmt
);
2893 if (new_stmt
== old_stmt
&& new_stmt_could_throw
)
2896 remove_stmt_from_eh_lp (old_stmt
);
2897 if (new_stmt_could_throw
)
2899 add_stmt_to_eh_lp (new_stmt
, lp_nr
);
2909 /* Given a statement OLD_STMT in OLD_FUN and a duplicate statement NEW_STMT
2910 in NEW_FUN, copy the EH table data from OLD_STMT to NEW_STMT. The MAP
2911 operand is the return value of duplicate_eh_regions. */
2914 maybe_duplicate_eh_stmt_fn (struct function
*new_fun
, gimple new_stmt
,
2915 struct function
*old_fun
, gimple old_stmt
,
2916 hash_map
<void *, void *> *map
,
2919 int old_lp_nr
, new_lp_nr
;
2921 if (!stmt_could_throw_p (new_stmt
))
2924 old_lp_nr
= lookup_stmt_eh_lp_fn (old_fun
, old_stmt
);
2927 if (default_lp_nr
== 0)
2929 new_lp_nr
= default_lp_nr
;
2931 else if (old_lp_nr
> 0)
2933 eh_landing_pad old_lp
, new_lp
;
2935 old_lp
= (*old_fun
->eh
->lp_array
)[old_lp_nr
];
2936 new_lp
= static_cast<eh_landing_pad
> (*map
->get (old_lp
));
2937 new_lp_nr
= new_lp
->index
;
2941 eh_region old_r
, new_r
;
2943 old_r
= (*old_fun
->eh
->region_array
)[-old_lp_nr
];
2944 new_r
= static_cast<eh_region
> (*map
->get (old_r
));
2945 new_lp_nr
= -new_r
->index
;
2948 add_stmt_to_eh_lp_fn (new_fun
, new_stmt
, new_lp_nr
);
2952 /* Similar, but both OLD_STMT and NEW_STMT are within the current function,
2953 and thus no remapping is required. */
2956 maybe_duplicate_eh_stmt (gimple new_stmt
, gimple old_stmt
)
2960 if (!stmt_could_throw_p (new_stmt
))
2963 lp_nr
= lookup_stmt_eh_lp (old_stmt
);
2967 add_stmt_to_eh_lp (new_stmt
, lp_nr
);
2971 /* Returns TRUE if oneh and twoh are exception handlers (gimple_try_cleanup of
2972 GIMPLE_TRY) that are similar enough to be considered the same. Currently
2973 this only handles handlers consisting of a single call, as that's the
2974 important case for C++: a destructor call for a particular object showing
2975 up in multiple handlers. */
2978 same_handler_p (gimple_seq oneh
, gimple_seq twoh
)
2980 gimple_stmt_iterator gsi
;
2984 gsi
= gsi_start (oneh
);
2985 if (!gsi_one_before_end_p (gsi
))
2987 ones
= gsi_stmt (gsi
);
2989 gsi
= gsi_start (twoh
);
2990 if (!gsi_one_before_end_p (gsi
))
2992 twos
= gsi_stmt (gsi
);
2994 if (!is_gimple_call (ones
)
2995 || !is_gimple_call (twos
)
2996 || gimple_call_lhs (ones
)
2997 || gimple_call_lhs (twos
)
2998 || gimple_call_chain (ones
)
2999 || gimple_call_chain (twos
)
3000 || !gimple_call_same_target_p (ones
, twos
)
3001 || gimple_call_num_args (ones
) != gimple_call_num_args (twos
))
3004 for (ai
= 0; ai
< gimple_call_num_args (ones
); ++ai
)
3005 if (!operand_equal_p (gimple_call_arg (ones
, ai
),
3006 gimple_call_arg (twos
, ai
), 0))
3013 try { A() } finally { try { ~B() } catch { ~A() } }
3014 try { ... } finally { ~A() }
3016 try { A() } catch { ~B() }
3017 try { ~B() ... } finally { ~A() }
3019 This occurs frequently in C++, where A is a local variable and B is a
3020 temporary used in the initializer for A. */
3023 optimize_double_finally (gimple one
, gimple two
)
3026 gimple_stmt_iterator gsi
;
3029 cleanup
= gimple_try_cleanup (one
);
3030 gsi
= gsi_start (cleanup
);
3031 if (!gsi_one_before_end_p (gsi
))
3034 oneh
= gsi_stmt (gsi
);
3035 if (gimple_code (oneh
) != GIMPLE_TRY
3036 || gimple_try_kind (oneh
) != GIMPLE_TRY_CATCH
)
3039 if (same_handler_p (gimple_try_cleanup (oneh
), gimple_try_cleanup (two
)))
3041 gimple_seq seq
= gimple_try_eval (oneh
);
3043 gimple_try_set_cleanup (one
, seq
);
3044 gimple_try_set_kind (one
, GIMPLE_TRY_CATCH
);
3045 seq
= copy_gimple_seq_and_replace_locals (seq
);
3046 gimple_seq_add_seq (&seq
, gimple_try_eval (two
));
3047 gimple_try_set_eval (two
, seq
);
3051 /* Perform EH refactoring optimizations that are simpler to do when code
3052 flow has been lowered but EH structures haven't. */
3055 refactor_eh_r (gimple_seq seq
)
3057 gimple_stmt_iterator gsi
;
3062 gsi
= gsi_start (seq
);
3066 if (gsi_end_p (gsi
))
3069 two
= gsi_stmt (gsi
);
3072 && gimple_code (one
) == GIMPLE_TRY
3073 && gimple_code (two
) == GIMPLE_TRY
3074 && gimple_try_kind (one
) == GIMPLE_TRY_FINALLY
3075 && gimple_try_kind (two
) == GIMPLE_TRY_FINALLY
)
3076 optimize_double_finally (one
, two
);
3078 switch (gimple_code (one
))
3081 refactor_eh_r (gimple_try_eval (one
));
3082 refactor_eh_r (gimple_try_cleanup (one
));
3085 refactor_eh_r (gimple_catch_handler (one
));
3087 case GIMPLE_EH_FILTER
:
3088 refactor_eh_r (gimple_eh_filter_failure (one
));
3090 case GIMPLE_EH_ELSE
:
3091 refactor_eh_r (gimple_eh_else_n_body (one
));
3092 refactor_eh_r (gimple_eh_else_e_body (one
));
3106 const pass_data pass_data_refactor_eh
=
3108 GIMPLE_PASS
, /* type */
3110 OPTGROUP_NONE
, /* optinfo_flags */
3111 TV_TREE_EH
, /* tv_id */
3112 PROP_gimple_lcf
, /* properties_required */
3113 0, /* properties_provided */
3114 0, /* properties_destroyed */
3115 0, /* todo_flags_start */
3116 0, /* todo_flags_finish */
3119 class pass_refactor_eh
: public gimple_opt_pass
3122 pass_refactor_eh (gcc::context
*ctxt
)
3123 : gimple_opt_pass (pass_data_refactor_eh
, ctxt
)
3126 /* opt_pass methods: */
3127 virtual bool gate (function
*) { return flag_exceptions
!= 0; }
3128 virtual unsigned int execute (function
*)
3130 refactor_eh_r (gimple_body (current_function_decl
));
3134 }; // class pass_refactor_eh
3139 make_pass_refactor_eh (gcc::context
*ctxt
)
3141 return new pass_refactor_eh (ctxt
);
3144 /* At the end of gimple optimization, we can lower RESX. */
3147 lower_resx (basic_block bb
, gimple stmt
, hash_map
<eh_region
, tree
> *mnt_map
)
3150 eh_region src_r
, dst_r
;
3151 gimple_stmt_iterator gsi
;
3156 lp_nr
= lookup_stmt_eh_lp (stmt
);
3158 dst_r
= get_eh_region_from_lp_number (lp_nr
);
3162 src_r
= get_eh_region_from_number (gimple_resx_region (stmt
));
3163 gsi
= gsi_last_bb (bb
);
3167 /* We can wind up with no source region when pass_cleanup_eh shows
3168 that there are no entries into an eh region and deletes it, but
3169 then the block that contains the resx isn't removed. This can
3170 happen without optimization when the switch statement created by
3171 lower_try_finally_switch isn't simplified to remove the eh case.
3173 Resolve this by expanding the resx node to an abort. */
3175 fn
= builtin_decl_implicit (BUILT_IN_TRAP
);
3176 x
= gimple_build_call (fn
, 0);
3177 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3179 while (EDGE_COUNT (bb
->succs
) > 0)
3180 remove_edge (EDGE_SUCC (bb
, 0));
3184 /* When we have a destination region, we resolve this by copying
3185 the excptr and filter values into place, and changing the edge
3186 to immediately after the landing pad. */
3194 /* We are resuming into a MUST_NOT_CALL region. Expand a call to
3195 the failure decl into a new block, if needed. */
3196 gcc_assert (dst_r
->type
== ERT_MUST_NOT_THROW
);
3198 tree
*slot
= mnt_map
->get (dst_r
);
3201 gimple_stmt_iterator gsi2
;
3203 new_bb
= create_empty_bb (bb
);
3204 add_bb_to_loop (new_bb
, bb
->loop_father
);
3205 lab
= gimple_block_label (new_bb
);
3206 gsi2
= gsi_start_bb (new_bb
);
3208 fn
= dst_r
->u
.must_not_throw
.failure_decl
;
3209 x
= gimple_build_call (fn
, 0);
3210 gimple_set_location (x
, dst_r
->u
.must_not_throw
.failure_loc
);
3211 gsi_insert_after (&gsi2
, x
, GSI_CONTINUE_LINKING
);
3213 mnt_map
->put (dst_r
, lab
);
3218 new_bb
= label_to_block (lab
);
3221 gcc_assert (EDGE_COUNT (bb
->succs
) == 0);
3222 e
= make_edge (bb
, new_bb
, EDGE_FALLTHRU
);
3223 e
->count
= bb
->count
;
3224 e
->probability
= REG_BR_PROB_BASE
;
3229 tree dst_nr
= build_int_cst (integer_type_node
, dst_r
->index
);
3231 fn
= builtin_decl_implicit (BUILT_IN_EH_COPY_VALUES
);
3232 src_nr
= build_int_cst (integer_type_node
, src_r
->index
);
3233 x
= gimple_build_call (fn
, 2, dst_nr
, src_nr
);
3234 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3236 /* Update the flags for the outgoing edge. */
3237 e
= single_succ_edge (bb
);
3238 gcc_assert (e
->flags
& EDGE_EH
);
3239 e
->flags
= (e
->flags
& ~EDGE_EH
) | EDGE_FALLTHRU
;
3241 /* If there are no more EH users of the landing pad, delete it. */
3242 FOR_EACH_EDGE (e
, ei
, e
->dest
->preds
)
3243 if (e
->flags
& EDGE_EH
)
3247 eh_landing_pad lp
= get_eh_landing_pad_from_number (lp_nr
);
3248 remove_eh_landing_pad (lp
);
3258 /* When we don't have a destination region, this exception escapes
3259 up the call chain. We resolve this by generating a call to the
3260 _Unwind_Resume library function. */
3262 /* The ARM EABI redefines _Unwind_Resume as __cxa_end_cleanup
3263 with no arguments for C++ and Java. Check for that. */
3264 if (src_r
->use_cxa_end_cleanup
)
3266 fn
= builtin_decl_implicit (BUILT_IN_CXA_END_CLEANUP
);
3267 x
= gimple_build_call (fn
, 0);
3268 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3272 fn
= builtin_decl_implicit (BUILT_IN_EH_POINTER
);
3273 src_nr
= build_int_cst (integer_type_node
, src_r
->index
);
3274 x
= gimple_build_call (fn
, 1, src_nr
);
3275 var
= create_tmp_var (ptr_type_node
, NULL
);
3276 var
= make_ssa_name (var
, x
);
3277 gimple_call_set_lhs (x
, var
);
3278 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3280 fn
= builtin_decl_implicit (BUILT_IN_UNWIND_RESUME
);
3281 x
= gimple_build_call (fn
, 1, var
);
3282 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3285 gcc_assert (EDGE_COUNT (bb
->succs
) == 0);
3288 gsi_remove (&gsi
, true);
3295 const pass_data pass_data_lower_resx
=
3297 GIMPLE_PASS
, /* type */
3299 OPTGROUP_NONE
, /* optinfo_flags */
3300 TV_TREE_EH
, /* tv_id */
3301 PROP_gimple_lcf
, /* properties_required */
3302 0, /* properties_provided */
3303 0, /* properties_destroyed */
3304 0, /* todo_flags_start */
3305 0, /* todo_flags_finish */
3308 class pass_lower_resx
: public gimple_opt_pass
3311 pass_lower_resx (gcc::context
*ctxt
)
3312 : gimple_opt_pass (pass_data_lower_resx
, ctxt
)
3315 /* opt_pass methods: */
3316 virtual bool gate (function
*) { return flag_exceptions
!= 0; }
3317 virtual unsigned int execute (function
*);
3319 }; // class pass_lower_resx
3322 pass_lower_resx::execute (function
*fun
)
3325 bool dominance_invalidated
= false;
3326 bool any_rewritten
= false;
3328 hash_map
<eh_region
, tree
> mnt_map
;
3330 FOR_EACH_BB_FN (bb
, fun
)
3332 gimple last
= last_stmt (bb
);
3333 if (last
&& is_gimple_resx (last
))
3335 dominance_invalidated
|= lower_resx (bb
, last
, &mnt_map
);
3336 any_rewritten
= true;
3340 if (dominance_invalidated
)
3342 free_dominance_info (CDI_DOMINATORS
);
3343 free_dominance_info (CDI_POST_DOMINATORS
);
3346 return any_rewritten
? TODO_update_ssa_only_virtuals
: 0;
3352 make_pass_lower_resx (gcc::context
*ctxt
)
3354 return new pass_lower_resx (ctxt
);
3357 /* Try to optimize var = {v} {CLOBBER} stmts followed just by
3361 optimize_clobbers (basic_block bb
)
3363 gimple_stmt_iterator gsi
= gsi_last_bb (bb
);
3364 bool any_clobbers
= false;
3365 bool seen_stack_restore
= false;
3369 /* Only optimize anything if the bb contains at least one clobber,
3370 ends with resx (checked by caller), optionally contains some
3371 debug stmts or labels, or at most one __builtin_stack_restore
3372 call, and has an incoming EH edge. */
3373 for (gsi_prev (&gsi
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
3375 gimple stmt
= gsi_stmt (gsi
);
3376 if (is_gimple_debug (stmt
))
3378 if (gimple_clobber_p (stmt
))
3380 any_clobbers
= true;
3383 if (!seen_stack_restore
3384 && gimple_call_builtin_p (stmt
, BUILT_IN_STACK_RESTORE
))
3386 seen_stack_restore
= true;
3389 if (gimple_code (stmt
) == GIMPLE_LABEL
)
3395 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
3396 if (e
->flags
& EDGE_EH
)
3400 gsi
= gsi_last_bb (bb
);
3401 for (gsi_prev (&gsi
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
3403 gimple stmt
= gsi_stmt (gsi
);
3404 if (!gimple_clobber_p (stmt
))
3406 unlink_stmt_vdef (stmt
);
3407 gsi_remove (&gsi
, true);
3408 release_defs (stmt
);
3412 /* Try to sink var = {v} {CLOBBER} stmts followed just by
3413 internal throw to successor BB. */
3416 sink_clobbers (basic_block bb
)
3420 gimple_stmt_iterator gsi
, dgsi
;
3422 bool any_clobbers
= false;
3425 /* Only optimize if BB has a single EH successor and
3426 all predecessor edges are EH too. */
3427 if (!single_succ_p (bb
)
3428 || (single_succ_edge (bb
)->flags
& EDGE_EH
) == 0)
3431 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
3433 if ((e
->flags
& EDGE_EH
) == 0)
3437 /* And BB contains only CLOBBER stmts before the final
3439 gsi
= gsi_last_bb (bb
);
3440 for (gsi_prev (&gsi
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
3442 gimple stmt
= gsi_stmt (gsi
);
3443 if (is_gimple_debug (stmt
))
3445 if (gimple_code (stmt
) == GIMPLE_LABEL
)
3447 if (!gimple_clobber_p (stmt
))
3449 any_clobbers
= true;
3454 edge succe
= single_succ_edge (bb
);
3455 succbb
= succe
->dest
;
3457 /* See if there is a virtual PHI node to take an updated virtual
3460 tree vuse
= NULL_TREE
;
3461 for (gsi
= gsi_start_phis (succbb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3463 tree res
= gimple_phi_result (gsi_stmt (gsi
));
3464 if (virtual_operand_p (res
))
3466 vphi
= gsi_stmt (gsi
);
3472 dgsi
= gsi_after_labels (succbb
);
3473 gsi
= gsi_last_bb (bb
);
3474 for (gsi_prev (&gsi
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
3476 gimple stmt
= gsi_stmt (gsi
);
3478 if (is_gimple_debug (stmt
))
3480 if (gimple_code (stmt
) == GIMPLE_LABEL
)
3482 lhs
= gimple_assign_lhs (stmt
);
3483 /* Unfortunately we don't have dominance info updated at this
3484 point, so checking if
3485 dominated_by_p (CDI_DOMINATORS, succbb,
3486 gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (lhs, 0)))
3487 would be too costly. Thus, avoid sinking any clobbers that
3488 refer to non-(D) SSA_NAMEs. */
3489 if (TREE_CODE (lhs
) == MEM_REF
3490 && TREE_CODE (TREE_OPERAND (lhs
, 0)) == SSA_NAME
3491 && !SSA_NAME_IS_DEFAULT_DEF (TREE_OPERAND (lhs
, 0)))
3493 unlink_stmt_vdef (stmt
);
3494 gsi_remove (&gsi
, true);
3495 release_defs (stmt
);
3499 /* As we do not change stmt order when sinking across a
3500 forwarder edge we can keep virtual operands in place. */
3501 gsi_remove (&gsi
, false);
3502 gsi_insert_before (&dgsi
, stmt
, GSI_NEW_STMT
);
3504 /* But adjust virtual operands if we sunk across a PHI node. */
3508 imm_use_iterator iter
;
3509 use_operand_p use_p
;
3510 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, vuse
)
3511 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
3512 SET_USE (use_p
, gimple_vdef (stmt
));
3513 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse
))
3515 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_vdef (stmt
)) = 1;
3516 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse
) = 0;
3518 /* Adjust the incoming virtual operand. */
3519 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (vphi
, succe
), gimple_vuse (stmt
));
3520 SET_USE (gimple_vuse_op (stmt
), vuse
);
3522 /* If there isn't a single predecessor but no virtual PHI node
3523 arrange for virtual operands to be renamed. */
3524 else if (gimple_vuse_op (stmt
) != NULL_USE_OPERAND_P
3525 && !single_pred_p (succbb
))
3527 /* In this case there will be no use of the VDEF of this stmt.
3528 ??? Unless this is a secondary opportunity and we have not
3529 removed unreachable blocks yet, so we cannot assert this.
3530 Which also means we will end up renaming too many times. */
3531 SET_USE (gimple_vuse_op (stmt
), gimple_vop (cfun
));
3532 mark_virtual_operands_for_renaming (cfun
);
3533 todo
|= TODO_update_ssa_only_virtuals
;
3540 /* At the end of inlining, we can lower EH_DISPATCH. Return true when
3541 we have found some duplicate labels and removed some edges. */
3544 lower_eh_dispatch (basic_block src
, gimple stmt
)
3546 gimple_stmt_iterator gsi
;
3551 bool redirected
= false;
3553 region_nr
= gimple_eh_dispatch_region (stmt
);
3554 r
= get_eh_region_from_number (region_nr
);
3556 gsi
= gsi_last_bb (src
);
3562 auto_vec
<tree
> labels
;
3563 tree default_label
= NULL
;
3567 hash_set
<tree
> seen_values
;
3569 /* Collect the labels for a switch. Zero the post_landing_pad
3570 field becase we'll no longer have anything keeping these labels
3571 in existence and the optimizer will be free to merge these
3573 for (c
= r
->u
.eh_try
.first_catch
; c
; c
= c
->next_catch
)
3575 tree tp_node
, flt_node
, lab
= c
->label
;
3576 bool have_label
= false;
3579 tp_node
= c
->type_list
;
3580 flt_node
= c
->filter_list
;
3582 if (tp_node
== NULL
)
3584 default_label
= lab
;
3589 /* Filter out duplicate labels that arise when this handler
3590 is shadowed by an earlier one. When no labels are
3591 attached to the handler anymore, we remove
3592 the corresponding edge and then we delete unreachable
3593 blocks at the end of this pass. */
3594 if (! seen_values
.contains (TREE_VALUE (flt_node
)))
3596 tree t
= build_case_label (TREE_VALUE (flt_node
),
3598 labels
.safe_push (t
);
3599 seen_values
.add (TREE_VALUE (flt_node
));
3603 tp_node
= TREE_CHAIN (tp_node
);
3604 flt_node
= TREE_CHAIN (flt_node
);
3609 remove_edge (find_edge (src
, label_to_block (lab
)));
3614 /* Clean up the edge flags. */
3615 FOR_EACH_EDGE (e
, ei
, src
->succs
)
3617 if (e
->flags
& EDGE_FALLTHRU
)
3619 /* If there was no catch-all, use the fallthru edge. */
3620 if (default_label
== NULL
)
3621 default_label
= gimple_block_label (e
->dest
);
3622 e
->flags
&= ~EDGE_FALLTHRU
;
3625 gcc_assert (default_label
!= NULL
);
3627 /* Don't generate a switch if there's only a default case.
3628 This is common in the form of try { A; } catch (...) { B; }. */
3629 if (!labels
.exists ())
3631 e
= single_succ_edge (src
);
3632 e
->flags
|= EDGE_FALLTHRU
;
3636 fn
= builtin_decl_implicit (BUILT_IN_EH_FILTER
);
3637 x
= gimple_build_call (fn
, 1, build_int_cst (integer_type_node
,
3639 filter
= create_tmp_var (TREE_TYPE (TREE_TYPE (fn
)), NULL
);
3640 filter
= make_ssa_name (filter
, x
);
3641 gimple_call_set_lhs (x
, filter
);
3642 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3644 /* Turn the default label into a default case. */
3645 default_label
= build_case_label (NULL
, NULL
, default_label
);
3646 sort_case_labels (labels
);
3648 x
= gimple_build_switch (filter
, default_label
, labels
);
3649 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3654 case ERT_ALLOWED_EXCEPTIONS
:
3656 edge b_e
= BRANCH_EDGE (src
);
3657 edge f_e
= FALLTHRU_EDGE (src
);
3659 fn
= builtin_decl_implicit (BUILT_IN_EH_FILTER
);
3660 x
= gimple_build_call (fn
, 1, build_int_cst (integer_type_node
,
3662 filter
= create_tmp_var (TREE_TYPE (TREE_TYPE (fn
)), NULL
);
3663 filter
= make_ssa_name (filter
, x
);
3664 gimple_call_set_lhs (x
, filter
);
3665 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3667 r
->u
.allowed
.label
= NULL
;
3668 x
= gimple_build_cond (EQ_EXPR
, filter
,
3669 build_int_cst (TREE_TYPE (filter
),
3670 r
->u
.allowed
.filter
),
3671 NULL_TREE
, NULL_TREE
);
3672 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3674 b_e
->flags
= b_e
->flags
| EDGE_TRUE_VALUE
;
3675 f_e
->flags
= (f_e
->flags
& ~EDGE_FALLTHRU
) | EDGE_FALSE_VALUE
;
3683 /* Replace the EH_DISPATCH with the SWITCH or COND generated above. */
3684 gsi_remove (&gsi
, true);
3690 const pass_data pass_data_lower_eh_dispatch
=
3692 GIMPLE_PASS
, /* type */
3693 "ehdisp", /* name */
3694 OPTGROUP_NONE
, /* optinfo_flags */
3695 TV_TREE_EH
, /* tv_id */
3696 PROP_gimple_lcf
, /* properties_required */
3697 0, /* properties_provided */
3698 0, /* properties_destroyed */
3699 0, /* todo_flags_start */
3700 0, /* todo_flags_finish */
3703 class pass_lower_eh_dispatch
: public gimple_opt_pass
3706 pass_lower_eh_dispatch (gcc::context
*ctxt
)
3707 : gimple_opt_pass (pass_data_lower_eh_dispatch
, ctxt
)
3710 /* opt_pass methods: */
3711 virtual bool gate (function
*fun
) { return fun
->eh
->region_tree
!= NULL
; }
3712 virtual unsigned int execute (function
*);
3714 }; // class pass_lower_eh_dispatch
3717 pass_lower_eh_dispatch::execute (function
*fun
)
3721 bool redirected
= false;
3723 assign_filter_values ();
3725 FOR_EACH_BB_FN (bb
, fun
)
3727 gimple last
= last_stmt (bb
);
3730 if (gimple_code (last
) == GIMPLE_EH_DISPATCH
)
3732 redirected
|= lower_eh_dispatch (bb
, last
);
3733 flags
|= TODO_update_ssa_only_virtuals
;
3735 else if (gimple_code (last
) == GIMPLE_RESX
)
3737 if (stmt_can_throw_external (last
))
3738 optimize_clobbers (bb
);
3740 flags
|= sink_clobbers (bb
);
3745 delete_unreachable_blocks ();
3752 make_pass_lower_eh_dispatch (gcc::context
*ctxt
)
3754 return new pass_lower_eh_dispatch (ctxt
);
3757 /* Walk statements, see what regions and, optionally, landing pads
3758 are really referenced.
3760 Returns in R_REACHABLEP an sbitmap with bits set for reachable regions,
3761 and in LP_REACHABLE an sbitmap with bits set for reachable landing pads.
3763 Passing NULL for LP_REACHABLE is valid, in this case only reachable
3766 The caller is responsible for freeing the returned sbitmaps. */
3769 mark_reachable_handlers (sbitmap
*r_reachablep
, sbitmap
*lp_reachablep
)
3771 sbitmap r_reachable
, lp_reachable
;
3773 bool mark_landing_pads
= (lp_reachablep
!= NULL
);
3774 gcc_checking_assert (r_reachablep
!= NULL
);
3776 r_reachable
= sbitmap_alloc (cfun
->eh
->region_array
->length ());
3777 bitmap_clear (r_reachable
);
3778 *r_reachablep
= r_reachable
;
3780 if (mark_landing_pads
)
3782 lp_reachable
= sbitmap_alloc (cfun
->eh
->lp_array
->length ());
3783 bitmap_clear (lp_reachable
);
3784 *lp_reachablep
= lp_reachable
;
3787 lp_reachable
= NULL
;
3789 FOR_EACH_BB_FN (bb
, cfun
)
3791 gimple_stmt_iterator gsi
;
3793 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3795 gimple stmt
= gsi_stmt (gsi
);
3797 if (mark_landing_pads
)
3799 int lp_nr
= lookup_stmt_eh_lp (stmt
);
3801 /* Negative LP numbers are MUST_NOT_THROW regions which
3802 are not considered BB enders. */
3804 bitmap_set_bit (r_reachable
, -lp_nr
);
3806 /* Positive LP numbers are real landing pads, and BB enders. */
3809 gcc_assert (gsi_one_before_end_p (gsi
));
3810 eh_region region
= get_eh_region_from_lp_number (lp_nr
);
3811 bitmap_set_bit (r_reachable
, region
->index
);
3812 bitmap_set_bit (lp_reachable
, lp_nr
);
3816 /* Avoid removing regions referenced from RESX/EH_DISPATCH. */
3817 switch (gimple_code (stmt
))
3820 bitmap_set_bit (r_reachable
, gimple_resx_region (stmt
));
3822 case GIMPLE_EH_DISPATCH
:
3823 bitmap_set_bit (r_reachable
, gimple_eh_dispatch_region (stmt
));
3832 /* Remove unreachable handlers and unreachable landing pads. */
3835 remove_unreachable_handlers (void)
3837 sbitmap r_reachable
, lp_reachable
;
3842 mark_reachable_handlers (&r_reachable
, &lp_reachable
);
3846 fprintf (dump_file
, "Before removal of unreachable regions:\n");
3847 dump_eh_tree (dump_file
, cfun
);
3848 fprintf (dump_file
, "Reachable regions: ");
3849 dump_bitmap_file (dump_file
, r_reachable
);
3850 fprintf (dump_file
, "Reachable landing pads: ");
3851 dump_bitmap_file (dump_file
, lp_reachable
);
3856 FOR_EACH_VEC_SAFE_ELT (cfun
->eh
->region_array
, i
, region
)
3857 if (region
&& !bitmap_bit_p (r_reachable
, region
->index
))
3859 "Removing unreachable region %d\n",
3863 remove_unreachable_eh_regions (r_reachable
);
3865 FOR_EACH_VEC_SAFE_ELT (cfun
->eh
->lp_array
, i
, lp
)
3866 if (lp
&& !bitmap_bit_p (lp_reachable
, lp
->index
))
3870 "Removing unreachable landing pad %d\n",
3872 remove_eh_landing_pad (lp
);
3877 fprintf (dump_file
, "\n\nAfter removal of unreachable regions:\n");
3878 dump_eh_tree (dump_file
, cfun
);
3879 fprintf (dump_file
, "\n\n");
3882 sbitmap_free (r_reachable
);
3883 sbitmap_free (lp_reachable
);
3885 #ifdef ENABLE_CHECKING
3886 verify_eh_tree (cfun
);
3890 /* Remove unreachable handlers if any landing pads have been removed after
3891 last ehcleanup pass (due to gimple_purge_dead_eh_edges). */
3894 maybe_remove_unreachable_handlers (void)
3899 if (cfun
->eh
== NULL
)
3902 FOR_EACH_VEC_SAFE_ELT (cfun
->eh
->lp_array
, i
, lp
)
3903 if (lp
&& lp
->post_landing_pad
)
3905 if (label_to_block (lp
->post_landing_pad
) == NULL
)
3907 remove_unreachable_handlers ();
3913 /* Remove regions that do not have landing pads. This assumes
3914 that remove_unreachable_handlers has already been run, and
3915 that we've just manipulated the landing pads since then.
3917 Preserve regions with landing pads and regions that prevent
3918 exceptions from propagating further, even if these regions
3919 are not reachable. */
3922 remove_unreachable_handlers_no_lp (void)
3925 sbitmap r_reachable
;
3928 mark_reachable_handlers (&r_reachable
, /*lp_reachablep=*/NULL
);
3930 FOR_EACH_VEC_SAFE_ELT (cfun
->eh
->region_array
, i
, region
)
3935 if (region
->landing_pads
!= NULL
3936 || region
->type
== ERT_MUST_NOT_THROW
)
3937 bitmap_set_bit (r_reachable
, region
->index
);
3940 && !bitmap_bit_p (r_reachable
, region
->index
))
3942 "Removing unreachable region %d\n",
3946 remove_unreachable_eh_regions (r_reachable
);
3948 sbitmap_free (r_reachable
);
3951 /* Undo critical edge splitting on an EH landing pad. Earlier, we
3952 optimisticaly split all sorts of edges, including EH edges. The
3953 optimization passes in between may not have needed them; if not,
3954 we should undo the split.
3956 Recognize this case by having one EH edge incoming to the BB and
3957 one normal edge outgoing; BB should be empty apart from the
3958 post_landing_pad label.
3960 Note that this is slightly different from the empty handler case
3961 handled by cleanup_empty_eh, in that the actual handler may yet
3962 have actual code but the landing pad has been separated from the
3963 handler. As such, cleanup_empty_eh relies on this transformation
3964 having been done first. */
3967 unsplit_eh (eh_landing_pad lp
)
3969 basic_block bb
= label_to_block (lp
->post_landing_pad
);
3970 gimple_stmt_iterator gsi
;
3973 /* Quickly check the edge counts on BB for singularity. */
3974 if (!single_pred_p (bb
) || !single_succ_p (bb
))
3976 e_in
= single_pred_edge (bb
);
3977 e_out
= single_succ_edge (bb
);
3979 /* Input edge must be EH and output edge must be normal. */
3980 if ((e_in
->flags
& EDGE_EH
) == 0 || (e_out
->flags
& EDGE_EH
) != 0)
3983 /* The block must be empty except for the labels and debug insns. */
3984 gsi
= gsi_after_labels (bb
);
3985 if (!gsi_end_p (gsi
) && is_gimple_debug (gsi_stmt (gsi
)))
3986 gsi_next_nondebug (&gsi
);
3987 if (!gsi_end_p (gsi
))
3990 /* The destination block must not already have a landing pad
3991 for a different region. */
3992 for (gsi
= gsi_start_bb (e_out
->dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3994 gimple stmt
= gsi_stmt (gsi
);
3998 if (gimple_code (stmt
) != GIMPLE_LABEL
)
4000 lab
= gimple_label_label (stmt
);
4001 lp_nr
= EH_LANDING_PAD_NR (lab
);
4002 if (lp_nr
&& get_eh_region_from_lp_number (lp_nr
) != lp
->region
)
4006 /* The new destination block must not already be a destination of
4007 the source block, lest we merge fallthru and eh edges and get
4008 all sorts of confused. */
4009 if (find_edge (e_in
->src
, e_out
->dest
))
4012 /* ??? We can get degenerate phis due to cfg cleanups. I would have
4013 thought this should have been cleaned up by a phicprop pass, but
4014 that doesn't appear to handle virtuals. Propagate by hand. */
4015 if (!gimple_seq_empty_p (phi_nodes (bb
)))
4017 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); )
4019 gimple use_stmt
, phi
= gsi_stmt (gsi
);
4020 tree lhs
= gimple_phi_result (phi
);
4021 tree rhs
= gimple_phi_arg_def (phi
, 0);
4022 use_operand_p use_p
;
4023 imm_use_iterator iter
;
4025 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs
)
4027 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
4028 SET_USE (use_p
, rhs
);
4031 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs
))
4032 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs
) = 1;
4034 remove_phi_node (&gsi
, true);
4038 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4039 fprintf (dump_file
, "Unsplit EH landing pad %d to block %i.\n",
4040 lp
->index
, e_out
->dest
->index
);
4042 /* Redirect the edge. Since redirect_eh_edge_1 expects to be moving
4043 a successor edge, humor it. But do the real CFG change with the
4044 predecessor of E_OUT in order to preserve the ordering of arguments
4045 to the PHI nodes in E_OUT->DEST. */
4046 redirect_eh_edge_1 (e_in
, e_out
->dest
, false);
4047 redirect_edge_pred (e_out
, e_in
->src
);
4048 e_out
->flags
= e_in
->flags
;
4049 e_out
->probability
= e_in
->probability
;
4050 e_out
->count
= e_in
->count
;
4056 /* Examine each landing pad block and see if it matches unsplit_eh. */
4059 unsplit_all_eh (void)
4061 bool changed
= false;
4065 for (i
= 1; vec_safe_iterate (cfun
->eh
->lp_array
, i
, &lp
); ++i
)
4067 changed
|= unsplit_eh (lp
);
4072 /* A subroutine of cleanup_empty_eh. Redirect all EH edges incoming
4073 to OLD_BB to NEW_BB; return true on success, false on failure.
4075 OLD_BB_OUT is the edge into NEW_BB from OLD_BB, so if we miss any
4076 PHI variables from OLD_BB we can pick them up from OLD_BB_OUT.
4077 Virtual PHIs may be deleted and marked for renaming. */
4080 cleanup_empty_eh_merge_phis (basic_block new_bb
, basic_block old_bb
,
4081 edge old_bb_out
, bool change_region
)
4083 gimple_stmt_iterator ngsi
, ogsi
;
4086 bitmap ophi_handled
;
4088 /* The destination block must not be a regular successor for any
4089 of the preds of the landing pad. Thus, avoid turning
4099 which CFG verification would choke on. See PR45172 and PR51089. */
4100 FOR_EACH_EDGE (e
, ei
, old_bb
->preds
)
4101 if (find_edge (e
->src
, new_bb
))
4104 FOR_EACH_EDGE (e
, ei
, old_bb
->preds
)
4105 redirect_edge_var_map_clear (e
);
4107 ophi_handled
= BITMAP_ALLOC (NULL
);
4109 /* First, iterate through the PHIs on NEW_BB and set up the edge_var_map
4110 for the edges we're going to move. */
4111 for (ngsi
= gsi_start_phis (new_bb
); !gsi_end_p (ngsi
); gsi_next (&ngsi
))
4113 gimple ophi
, nphi
= gsi_stmt (ngsi
);
4116 nresult
= gimple_phi_result (nphi
);
4117 nop
= gimple_phi_arg_def (nphi
, old_bb_out
->dest_idx
);
4119 /* Find the corresponding PHI in OLD_BB so we can forward-propagate
4120 the source ssa_name. */
4122 for (ogsi
= gsi_start_phis (old_bb
); !gsi_end_p (ogsi
); gsi_next (&ogsi
))
4124 ophi
= gsi_stmt (ogsi
);
4125 if (gimple_phi_result (ophi
) == nop
)
4130 /* If we did find the corresponding PHI, copy those inputs. */
4133 /* If NOP is used somewhere else beyond phis in new_bb, give up. */
4134 if (!has_single_use (nop
))
4136 imm_use_iterator imm_iter
;
4137 use_operand_p use_p
;
4139 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, nop
)
4141 if (!gimple_debug_bind_p (USE_STMT (use_p
))
4142 && (gimple_code (USE_STMT (use_p
)) != GIMPLE_PHI
4143 || gimple_bb (USE_STMT (use_p
)) != new_bb
))
4147 bitmap_set_bit (ophi_handled
, SSA_NAME_VERSION (nop
));
4148 FOR_EACH_EDGE (e
, ei
, old_bb
->preds
)
4153 if ((e
->flags
& EDGE_EH
) == 0)
4155 oop
= gimple_phi_arg_def (ophi
, e
->dest_idx
);
4156 oloc
= gimple_phi_arg_location (ophi
, e
->dest_idx
);
4157 redirect_edge_var_map_add (e
, nresult
, oop
, oloc
);
4160 /* If we didn't find the PHI, if it's a real variable or a VOP, we know
4161 from the fact that OLD_BB is tree_empty_eh_handler_p that the
4162 variable is unchanged from input to the block and we can simply
4163 re-use the input to NEW_BB from the OLD_BB_OUT edge. */
4167 = gimple_phi_arg_location (nphi
, old_bb_out
->dest_idx
);
4168 FOR_EACH_EDGE (e
, ei
, old_bb
->preds
)
4169 redirect_edge_var_map_add (e
, nresult
, nop
, nloc
);
4173 /* Second, verify that all PHIs from OLD_BB have been handled. If not,
4174 we don't know what values from the other edges into NEW_BB to use. */
4175 for (ogsi
= gsi_start_phis (old_bb
); !gsi_end_p (ogsi
); gsi_next (&ogsi
))
4177 gimple ophi
= gsi_stmt (ogsi
);
4178 tree oresult
= gimple_phi_result (ophi
);
4179 if (!bitmap_bit_p (ophi_handled
, SSA_NAME_VERSION (oresult
)))
4183 /* Finally, move the edges and update the PHIs. */
4184 for (ei
= ei_start (old_bb
->preds
); (e
= ei_safe_edge (ei
)); )
4185 if (e
->flags
& EDGE_EH
)
4187 /* ??? CFG manipluation routines do not try to update loop
4188 form on edge redirection. Do so manually here for now. */
4189 /* If we redirect a loop entry or latch edge that will either create
4190 a multiple entry loop or rotate the loop. If the loops merge
4191 we may have created a loop with multiple latches.
4192 All of this isn't easily fixed thus cancel the affected loop
4193 and mark the other loop as possibly having multiple latches. */
4194 if (e
->dest
== e
->dest
->loop_father
->header
)
4196 e
->dest
->loop_father
->header
= NULL
;
4197 e
->dest
->loop_father
->latch
= NULL
;
4198 new_bb
->loop_father
->latch
= NULL
;
4199 loops_state_set (LOOPS_NEED_FIXUP
|LOOPS_MAY_HAVE_MULTIPLE_LATCHES
);
4201 redirect_eh_edge_1 (e
, new_bb
, change_region
);
4202 redirect_edge_succ (e
, new_bb
);
4203 flush_pending_stmts (e
);
4208 BITMAP_FREE (ophi_handled
);
4212 FOR_EACH_EDGE (e
, ei
, old_bb
->preds
)
4213 redirect_edge_var_map_clear (e
);
4214 BITMAP_FREE (ophi_handled
);
4218 /* A subroutine of cleanup_empty_eh. Move a landing pad LP from its
4219 old region to NEW_REGION at BB. */
4222 cleanup_empty_eh_move_lp (basic_block bb
, edge e_out
,
4223 eh_landing_pad lp
, eh_region new_region
)
4225 gimple_stmt_iterator gsi
;
4228 for (pp
= &lp
->region
->landing_pads
; *pp
!= lp
; pp
= &(*pp
)->next_lp
)
4232 lp
->region
= new_region
;
4233 lp
->next_lp
= new_region
->landing_pads
;
4234 new_region
->landing_pads
= lp
;
4236 /* Delete the RESX that was matched within the empty handler block. */
4237 gsi
= gsi_last_bb (bb
);
4238 unlink_stmt_vdef (gsi_stmt (gsi
));
4239 gsi_remove (&gsi
, true);
4241 /* Clean up E_OUT for the fallthru. */
4242 e_out
->flags
= (e_out
->flags
& ~EDGE_EH
) | EDGE_FALLTHRU
;
4243 e_out
->probability
= REG_BR_PROB_BASE
;
4246 /* A subroutine of cleanup_empty_eh. Handle more complex cases of
4247 unsplitting than unsplit_eh was prepared to handle, e.g. when
4248 multiple incoming edges and phis are involved. */
4251 cleanup_empty_eh_unsplit (basic_block bb
, edge e_out
, eh_landing_pad lp
)
4253 gimple_stmt_iterator gsi
;
4256 /* We really ought not have totally lost everything following
4257 a landing pad label. Given that BB is empty, there had better
4259 gcc_assert (e_out
!= NULL
);
4261 /* The destination block must not already have a landing pad
4262 for a different region. */
4264 for (gsi
= gsi_start_bb (e_out
->dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4266 gimple stmt
= gsi_stmt (gsi
);
4269 if (gimple_code (stmt
) != GIMPLE_LABEL
)
4271 lab
= gimple_label_label (stmt
);
4272 lp_nr
= EH_LANDING_PAD_NR (lab
);
4273 if (lp_nr
&& get_eh_region_from_lp_number (lp_nr
) != lp
->region
)
4277 /* Attempt to move the PHIs into the successor block. */
4278 if (cleanup_empty_eh_merge_phis (e_out
->dest
, bb
, e_out
, false))
4280 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4282 "Unsplit EH landing pad %d to block %i "
4283 "(via cleanup_empty_eh).\n",
4284 lp
->index
, e_out
->dest
->index
);
4291 /* Return true if edge E_FIRST is part of an empty infinite loop
4292 or leads to such a loop through a series of single successor
4296 infinite_empty_loop_p (edge e_first
)
4298 bool inf_loop
= false;
4301 if (e_first
->dest
== e_first
->src
)
4304 e_first
->src
->aux
= (void *) 1;
4305 for (e
= e_first
; single_succ_p (e
->dest
); e
= single_succ_edge (e
->dest
))
4307 gimple_stmt_iterator gsi
;
4313 e
->dest
->aux
= (void *) 1;
4314 gsi
= gsi_after_labels (e
->dest
);
4315 if (!gsi_end_p (gsi
) && is_gimple_debug (gsi_stmt (gsi
)))
4316 gsi_next_nondebug (&gsi
);
4317 if (!gsi_end_p (gsi
))
4320 e_first
->src
->aux
= NULL
;
4321 for (e
= e_first
; e
->dest
->aux
; e
= single_succ_edge (e
->dest
))
4322 e
->dest
->aux
= NULL
;
4327 /* Examine the block associated with LP to determine if it's an empty
4328 handler for its EH region. If so, attempt to redirect EH edges to
4329 an outer region. Return true the CFG was updated in any way. This
4330 is similar to jump forwarding, just across EH edges. */
4333 cleanup_empty_eh (eh_landing_pad lp
)
4335 basic_block bb
= label_to_block (lp
->post_landing_pad
);
4336 gimple_stmt_iterator gsi
;
4338 eh_region new_region
;
4341 bool has_non_eh_pred
;
4345 /* There can be zero or one edges out of BB. This is the quickest test. */
4346 switch (EDGE_COUNT (bb
->succs
))
4352 e_out
= single_succ_edge (bb
);
4358 resx
= last_stmt (bb
);
4359 if (resx
&& is_gimple_resx (resx
))
4361 if (stmt_can_throw_external (resx
))
4362 optimize_clobbers (bb
);
4363 else if (sink_clobbers (bb
))
4367 gsi
= gsi_after_labels (bb
);
4369 /* Make sure to skip debug statements. */
4370 if (!gsi_end_p (gsi
) && is_gimple_debug (gsi_stmt (gsi
)))
4371 gsi_next_nondebug (&gsi
);
4373 /* If the block is totally empty, look for more unsplitting cases. */
4374 if (gsi_end_p (gsi
))
4376 /* For the degenerate case of an infinite loop bail out.
4377 If bb has no successors and is totally empty, which can happen e.g.
4378 because of incorrect noreturn attribute, bail out too. */
4380 || infinite_empty_loop_p (e_out
))
4383 return ret
| cleanup_empty_eh_unsplit (bb
, e_out
, lp
);
4386 /* The block should consist only of a single RESX statement, modulo a
4387 preceding call to __builtin_stack_restore if there is no outgoing
4388 edge, since the call can be eliminated in this case. */
4389 resx
= gsi_stmt (gsi
);
4390 if (!e_out
&& gimple_call_builtin_p (resx
, BUILT_IN_STACK_RESTORE
))
4393 resx
= gsi_stmt (gsi
);
4395 if (!is_gimple_resx (resx
))
4397 gcc_assert (gsi_one_before_end_p (gsi
));
4399 /* Determine if there are non-EH edges, or resx edges into the handler. */
4400 has_non_eh_pred
= false;
4401 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
4402 if (!(e
->flags
& EDGE_EH
))
4403 has_non_eh_pred
= true;
4405 /* Find the handler that's outer of the empty handler by looking at
4406 where the RESX instruction was vectored. */
4407 new_lp_nr
= lookup_stmt_eh_lp (resx
);
4408 new_region
= get_eh_region_from_lp_number (new_lp_nr
);
4410 /* If there's no destination region within the current function,
4411 redirection is trivial via removing the throwing statements from
4412 the EH region, removing the EH edges, and allowing the block
4413 to go unreachable. */
4414 if (new_region
== NULL
)
4416 gcc_assert (e_out
== NULL
);
4417 for (ei
= ei_start (bb
->preds
); (e
= ei_safe_edge (ei
)); )
4418 if (e
->flags
& EDGE_EH
)
4420 gimple stmt
= last_stmt (e
->src
);
4421 remove_stmt_from_eh_lp (stmt
);
4429 /* If the destination region is a MUST_NOT_THROW, allow the runtime
4430 to handle the abort and allow the blocks to go unreachable. */
4431 if (new_region
->type
== ERT_MUST_NOT_THROW
)
4433 for (ei
= ei_start (bb
->preds
); (e
= ei_safe_edge (ei
)); )
4434 if (e
->flags
& EDGE_EH
)
4436 gimple stmt
= last_stmt (e
->src
);
4437 remove_stmt_from_eh_lp (stmt
);
4438 add_stmt_to_eh_lp (stmt
, new_lp_nr
);
4446 /* Try to redirect the EH edges and merge the PHIs into the destination
4447 landing pad block. If the merge succeeds, we'll already have redirected
4448 all the EH edges. The handler itself will go unreachable if there were
4450 if (cleanup_empty_eh_merge_phis (e_out
->dest
, bb
, e_out
, true))
4453 /* Finally, if all input edges are EH edges, then we can (potentially)
4454 reduce the number of transfers from the runtime by moving the landing
4455 pad from the original region to the new region. This is a win when
4456 we remove the last CLEANUP region along a particular exception
4457 propagation path. Since nothing changes except for the region with
4458 which the landing pad is associated, the PHI nodes do not need to be
4460 if (!has_non_eh_pred
)
4462 cleanup_empty_eh_move_lp (bb
, e_out
, lp
, new_region
);
4463 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4464 fprintf (dump_file
, "Empty EH handler %i moved to EH region %i.\n",
4465 lp
->index
, new_region
->index
);
4467 /* ??? The CFG didn't change, but we may have rendered the
4468 old EH region unreachable. Trigger a cleanup there. */
4475 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4476 fprintf (dump_file
, "Empty EH handler %i removed.\n", lp
->index
);
4477 remove_eh_landing_pad (lp
);
4481 /* Do a post-order traversal of the EH region tree. Examine each
4482 post_landing_pad block and see if we can eliminate it as empty. */
4485 cleanup_all_empty_eh (void)
4487 bool changed
= false;
4491 for (i
= 1; vec_safe_iterate (cfun
->eh
->lp_array
, i
, &lp
); ++i
)
4493 changed
|= cleanup_empty_eh (lp
);
4498 /* Perform cleanups and lowering of exception handling
4499 1) cleanups regions with handlers doing nothing are optimized out
4500 2) MUST_NOT_THROW regions that became dead because of 1) are optimized out
4501 3) Info about regions that are containing instructions, and regions
4502 reachable via local EH edges is collected
4503 4) Eh tree is pruned for regions no longer necessary.
4505 TODO: Push MUST_NOT_THROW regions to the root of the EH tree.
4506 Unify those that have the same failure decl and locus.
4510 execute_cleanup_eh_1 (void)
4512 /* Do this first: unsplit_all_eh and cleanup_all_empty_eh can die
4513 looking up unreachable landing pads. */
4514 remove_unreachable_handlers ();
4516 /* Watch out for the region tree vanishing due to all unreachable. */
4517 if (cfun
->eh
->region_tree
)
4519 bool changed
= false;
4522 changed
|= unsplit_all_eh ();
4523 changed
|= cleanup_all_empty_eh ();
4527 free_dominance_info (CDI_DOMINATORS
);
4528 free_dominance_info (CDI_POST_DOMINATORS
);
4530 /* We delayed all basic block deletion, as we may have performed
4531 cleanups on EH edges while non-EH edges were still present. */
4532 delete_unreachable_blocks ();
4534 /* We manipulated the landing pads. Remove any region that no
4535 longer has a landing pad. */
4536 remove_unreachable_handlers_no_lp ();
4538 return TODO_cleanup_cfg
| TODO_update_ssa_only_virtuals
;
4547 const pass_data pass_data_cleanup_eh
=
4549 GIMPLE_PASS
, /* type */
4550 "ehcleanup", /* name */
4551 OPTGROUP_NONE
, /* optinfo_flags */
4552 TV_TREE_EH
, /* tv_id */
4553 PROP_gimple_lcf
, /* properties_required */
4554 0, /* properties_provided */
4555 0, /* properties_destroyed */
4556 0, /* todo_flags_start */
4557 0, /* todo_flags_finish */
4560 class pass_cleanup_eh
: public gimple_opt_pass
4563 pass_cleanup_eh (gcc::context
*ctxt
)
4564 : gimple_opt_pass (pass_data_cleanup_eh
, ctxt
)
4567 /* opt_pass methods: */
4568 opt_pass
* clone () { return new pass_cleanup_eh (m_ctxt
); }
4569 virtual bool gate (function
*fun
)
4571 return fun
->eh
!= NULL
&& fun
->eh
->region_tree
!= NULL
;
4574 virtual unsigned int execute (function
*);
4576 }; // class pass_cleanup_eh
4579 pass_cleanup_eh::execute (function
*fun
)
4581 int ret
= execute_cleanup_eh_1 ();
4583 /* If the function no longer needs an EH personality routine
4584 clear it. This exposes cross-language inlining opportunities
4585 and avoids references to a never defined personality routine. */
4586 if (DECL_FUNCTION_PERSONALITY (current_function_decl
)
4587 && function_needs_eh_personality (fun
) != eh_personality_lang
)
4588 DECL_FUNCTION_PERSONALITY (current_function_decl
) = NULL_TREE
;
4596 make_pass_cleanup_eh (gcc::context
*ctxt
)
4598 return new pass_cleanup_eh (ctxt
);
4601 /* Verify that BB containing STMT as the last statement, has precisely the
4602 edge that make_eh_edges would create. */
4605 verify_eh_edges (gimple stmt
)
4607 basic_block bb
= gimple_bb (stmt
);
4608 eh_landing_pad lp
= NULL
;
4613 lp_nr
= lookup_stmt_eh_lp (stmt
);
4615 lp
= get_eh_landing_pad_from_number (lp_nr
);
4618 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
4620 if (e
->flags
& EDGE_EH
)
4624 error ("BB %i has multiple EH edges", bb
->index
);
4636 error ("BB %i can not throw but has an EH edge", bb
->index
);
4642 if (!stmt_could_throw_p (stmt
))
4644 error ("BB %i last statement has incorrectly set lp", bb
->index
);
4648 if (eh_edge
== NULL
)
4650 error ("BB %i is missing an EH edge", bb
->index
);
4654 if (eh_edge
->dest
!= label_to_block (lp
->post_landing_pad
))
4656 error ("Incorrect EH edge %i->%i", bb
->index
, eh_edge
->dest
->index
);
4663 /* Similarly, but handle GIMPLE_EH_DISPATCH specifically. */
4666 verify_eh_dispatch_edge (gimple stmt
)
4670 basic_block src
, dst
;
4671 bool want_fallthru
= true;
4675 r
= get_eh_region_from_number (gimple_eh_dispatch_region (stmt
));
4676 src
= gimple_bb (stmt
);
4678 FOR_EACH_EDGE (e
, ei
, src
->succs
)
4679 gcc_assert (e
->aux
== NULL
);
4684 for (c
= r
->u
.eh_try
.first_catch
; c
; c
= c
->next_catch
)
4686 dst
= label_to_block (c
->label
);
4687 e
= find_edge (src
, dst
);
4690 error ("BB %i is missing an edge", src
->index
);
4695 /* A catch-all handler doesn't have a fallthru. */
4696 if (c
->type_list
== NULL
)
4698 want_fallthru
= false;
4704 case ERT_ALLOWED_EXCEPTIONS
:
4705 dst
= label_to_block (r
->u
.allowed
.label
);
4706 e
= find_edge (src
, dst
);
4709 error ("BB %i is missing an edge", src
->index
);
4720 FOR_EACH_EDGE (e
, ei
, src
->succs
)
4722 if (e
->flags
& EDGE_FALLTHRU
)
4724 if (fall_edge
!= NULL
)
4726 error ("BB %i too many fallthru edges", src
->index
);
4735 error ("BB %i has incorrect edge", src
->index
);
4739 if ((fall_edge
!= NULL
) ^ want_fallthru
)
4741 error ("BB %i has incorrect fallthru edge", src
->index
);