1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
22 /* This file contains optimizer of the control flow. The main entry point is
23 cleanup_cfg. Following optimizations are performed:
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to its
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
36 #include "coretypes.h"
39 #include "hard-reg-set.h"
43 #include "insn-config.h"
51 #include "cfglayout.h"
53 #include "tree-pass.h"
57 #define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK)
59 /* Set to true when we are running first pass of try_optimize_cfg loop. */
60 static bool first_pass
;
61 static bool try_crossjump_to_edge (int, edge
, edge
);
62 static bool try_crossjump_bb (int, basic_block
);
63 static bool outgoing_edges_match (int, basic_block
, basic_block
);
64 static int flow_find_cross_jump (int, basic_block
, basic_block
, rtx
*, rtx
*);
65 static bool old_insns_match_p (int, rtx
, rtx
);
67 static void merge_blocks_move_predecessor_nojumps (basic_block
, basic_block
);
68 static void merge_blocks_move_successor_nojumps (basic_block
, basic_block
);
69 static bool try_optimize_cfg (int);
70 static bool try_simplify_condjump (basic_block
);
71 static bool try_forward_edges (int, basic_block
);
72 static edge
thread_jump (int, edge
, basic_block
);
73 static bool mark_effect (rtx
, bitmap
);
74 static void notice_new_block (basic_block
);
75 static void update_forwarder_flag (basic_block
);
76 static int mentions_nonequal_regs (rtx
*, void *);
77 static void merge_memattrs (rtx
, rtx
);
79 /* Set flags for newly created block. */
82 notice_new_block (basic_block bb
)
87 if (forwarder_block_p (bb
))
88 bb
->flags
|= BB_FORWARDER_BLOCK
;
91 /* Recompute forwarder flag after block has been modified. */
94 update_forwarder_flag (basic_block bb
)
96 if (forwarder_block_p (bb
))
97 bb
->flags
|= BB_FORWARDER_BLOCK
;
99 bb
->flags
&= ~BB_FORWARDER_BLOCK
;
102 /* Simplify a conditional jump around an unconditional jump.
103 Return true if something changed. */
106 try_simplify_condjump (basic_block cbranch_block
)
108 basic_block jump_block
, jump_dest_block
, cbranch_dest_block
;
109 edge cbranch_jump_edge
, cbranch_fallthru_edge
;
112 /* Verify that there are exactly two successors. */
113 if (EDGE_COUNT (cbranch_block
->succs
) != 2)
116 /* Verify that we've got a normal conditional branch at the end
118 cbranch_insn
= BB_END (cbranch_block
);
119 if (!any_condjump_p (cbranch_insn
))
122 cbranch_fallthru_edge
= FALLTHRU_EDGE (cbranch_block
);
123 cbranch_jump_edge
= BRANCH_EDGE (cbranch_block
);
125 /* The next block must not have multiple predecessors, must not
126 be the last block in the function, and must contain just the
127 unconditional jump. */
128 jump_block
= cbranch_fallthru_edge
->dest
;
129 if (!single_pred_p (jump_block
)
130 || jump_block
->next_bb
== EXIT_BLOCK_PTR
131 || !FORWARDER_BLOCK_P (jump_block
))
133 jump_dest_block
= single_succ (jump_block
);
135 /* If we are partitioning hot/cold basic blocks, we don't want to
136 mess up unconditional or indirect jumps that cross between hot
139 Basic block partitioning may result in some jumps that appear to
140 be optimizable (or blocks that appear to be mergeable), but which really
141 must be left untouched (they are required to make it safely across
142 partition boundaries). See the comments at the top of
143 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
145 if (BB_PARTITION (jump_block
) != BB_PARTITION (jump_dest_block
)
146 || (cbranch_jump_edge
->flags
& EDGE_CROSSING
))
149 /* The conditional branch must target the block after the
150 unconditional branch. */
151 cbranch_dest_block
= cbranch_jump_edge
->dest
;
153 if (cbranch_dest_block
== EXIT_BLOCK_PTR
154 || !can_fallthru (jump_block
, cbranch_dest_block
))
157 /* Invert the conditional branch. */
158 if (!invert_jump (cbranch_insn
, block_label (jump_dest_block
), 0))
162 fprintf (dump_file
, "Simplifying condjump %i around jump %i\n",
163 INSN_UID (cbranch_insn
), INSN_UID (BB_END (jump_block
)));
165 /* Success. Update the CFG to match. Note that after this point
166 the edge variable names appear backwards; the redirection is done
167 this way to preserve edge profile data. */
168 cbranch_jump_edge
= redirect_edge_succ_nodup (cbranch_jump_edge
,
170 cbranch_fallthru_edge
= redirect_edge_succ_nodup (cbranch_fallthru_edge
,
172 cbranch_jump_edge
->flags
|= EDGE_FALLTHRU
;
173 cbranch_fallthru_edge
->flags
&= ~EDGE_FALLTHRU
;
174 update_br_prob_note (cbranch_block
);
176 /* Delete the block with the unconditional jump, and clean up the mess. */
177 delete_basic_block (jump_block
);
178 tidy_fallthru_edge (cbranch_jump_edge
);
179 update_forwarder_flag (cbranch_block
);
184 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
185 on register. Used by jump threading. */
188 mark_effect (rtx exp
, regset nonequal
)
192 switch (GET_CODE (exp
))
194 /* In case we do clobber the register, mark it as equal, as we know the
195 value is dead so it don't have to match. */
197 if (REG_P (XEXP (exp
, 0)))
199 dest
= XEXP (exp
, 0);
200 regno
= REGNO (dest
);
201 CLEAR_REGNO_REG_SET (nonequal
, regno
);
202 if (regno
< FIRST_PSEUDO_REGISTER
)
204 int n
= hard_regno_nregs
[regno
][GET_MODE (dest
)];
206 CLEAR_REGNO_REG_SET (nonequal
, regno
+ n
);
212 if (rtx_equal_for_cselib_p (SET_DEST (exp
), SET_SRC (exp
)))
214 dest
= SET_DEST (exp
);
219 regno
= REGNO (dest
);
220 SET_REGNO_REG_SET (nonequal
, regno
);
221 if (regno
< FIRST_PSEUDO_REGISTER
)
223 int n
= hard_regno_nregs
[regno
][GET_MODE (dest
)];
225 SET_REGNO_REG_SET (nonequal
, regno
+ n
);
234 /* Return nonzero if X is a register set in regset DATA.
235 Called via for_each_rtx. */
237 mentions_nonequal_regs (rtx
*x
, void *data
)
239 regset nonequal
= (regset
) data
;
245 if (REGNO_REG_SET_P (nonequal
, regno
))
247 if (regno
< FIRST_PSEUDO_REGISTER
)
249 int n
= hard_regno_nregs
[regno
][GET_MODE (*x
)];
251 if (REGNO_REG_SET_P (nonequal
, regno
+ n
))
257 /* Attempt to prove that the basic block B will have no side effects and
258 always continues in the same edge if reached via E. Return the edge
259 if exist, NULL otherwise. */
262 thread_jump (int mode
, edge e
, basic_block b
)
264 rtx set1
, set2
, cond1
, cond2
, insn
;
265 enum rtx_code code1
, code2
, reversed_code2
;
266 bool reverse1
= false;
270 reg_set_iterator rsi
;
272 if (b
->flags
& BB_NONTHREADABLE_BLOCK
)
275 /* At the moment, we do handle only conditional jumps, but later we may
276 want to extend this code to tablejumps and others. */
277 if (EDGE_COUNT (e
->src
->succs
) != 2)
279 if (EDGE_COUNT (b
->succs
) != 2)
281 b
->flags
|= BB_NONTHREADABLE_BLOCK
;
285 /* Second branch must end with onlyjump, as we will eliminate the jump. */
286 if (!any_condjump_p (BB_END (e
->src
)))
289 if (!any_condjump_p (BB_END (b
)) || !onlyjump_p (BB_END (b
)))
291 b
->flags
|= BB_NONTHREADABLE_BLOCK
;
295 set1
= pc_set (BB_END (e
->src
));
296 set2
= pc_set (BB_END (b
));
297 if (((e
->flags
& EDGE_FALLTHRU
) != 0)
298 != (XEXP (SET_SRC (set1
), 1) == pc_rtx
))
301 cond1
= XEXP (SET_SRC (set1
), 0);
302 cond2
= XEXP (SET_SRC (set2
), 0);
304 code1
= reversed_comparison_code (cond1
, BB_END (e
->src
));
306 code1
= GET_CODE (cond1
);
308 code2
= GET_CODE (cond2
);
309 reversed_code2
= reversed_comparison_code (cond2
, BB_END (b
));
311 if (!comparison_dominates_p (code1
, code2
)
312 && !comparison_dominates_p (code1
, reversed_code2
))
315 /* Ensure that the comparison operators are equivalent.
316 ??? This is far too pessimistic. We should allow swapped operands,
317 different CCmodes, or for example comparisons for interval, that
318 dominate even when operands are not equivalent. */
319 if (!rtx_equal_p (XEXP (cond1
, 0), XEXP (cond2
, 0))
320 || !rtx_equal_p (XEXP (cond1
, 1), XEXP (cond2
, 1)))
323 /* Short circuit cases where block B contains some side effects, as we can't
325 for (insn
= NEXT_INSN (BB_HEAD (b
)); insn
!= NEXT_INSN (BB_END (b
));
326 insn
= NEXT_INSN (insn
))
327 if (INSN_P (insn
) && side_effects_p (PATTERN (insn
)))
329 b
->flags
|= BB_NONTHREADABLE_BLOCK
;
335 /* First process all values computed in the source basic block. */
336 for (insn
= NEXT_INSN (BB_HEAD (e
->src
));
337 insn
!= NEXT_INSN (BB_END (e
->src
));
338 insn
= NEXT_INSN (insn
))
340 cselib_process_insn (insn
);
342 nonequal
= BITMAP_ALLOC (NULL
);
343 CLEAR_REG_SET (nonequal
);
345 /* Now assume that we've continued by the edge E to B and continue
346 processing as if it were same basic block.
347 Our goal is to prove that whole block is an NOOP. */
349 for (insn
= NEXT_INSN (BB_HEAD (b
));
350 insn
!= NEXT_INSN (BB_END (b
)) && !failed
;
351 insn
= NEXT_INSN (insn
))
355 rtx pat
= PATTERN (insn
);
357 if (GET_CODE (pat
) == PARALLEL
)
359 for (i
= 0; i
< (unsigned)XVECLEN (pat
, 0); i
++)
360 failed
|= mark_effect (XVECEXP (pat
, 0, i
), nonequal
);
363 failed
|= mark_effect (pat
, nonequal
);
366 cselib_process_insn (insn
);
369 /* Later we should clear nonequal of dead registers. So far we don't
370 have life information in cfg_cleanup. */
373 b
->flags
|= BB_NONTHREADABLE_BLOCK
;
377 /* cond2 must not mention any register that is not equal to the
379 if (for_each_rtx (&cond2
, mentions_nonequal_regs
, nonequal
))
382 /* In case liveness information is available, we need to prove equivalence
383 only of the live values. */
384 if (mode
& CLEANUP_UPDATE_LIFE
)
385 AND_REG_SET (nonequal
, b
->il
.rtl
->global_live_at_end
);
387 EXECUTE_IF_SET_IN_REG_SET (nonequal
, 0, i
, rsi
)
390 BITMAP_FREE (nonequal
);
392 if ((comparison_dominates_p (code1
, code2
) != 0)
393 != (XEXP (SET_SRC (set2
), 1) == pc_rtx
))
394 return BRANCH_EDGE (b
);
396 return FALLTHRU_EDGE (b
);
399 BITMAP_FREE (nonequal
);
404 /* Attempt to forward edges leaving basic block B.
405 Return true if successful. */
408 try_forward_edges (int mode
, basic_block b
)
410 bool changed
= false;
412 edge e
, *threaded_edges
= NULL
;
414 /* If we are partitioning hot/cold basic blocks, we don't want to
415 mess up unconditional or indirect jumps that cross between hot
418 Basic block partitioning may result in some jumps that appear to
419 be optimizable (or blocks that appear to be mergeable), but which really m
420 ust be left untouched (they are required to make it safely across
421 partition boundaries). See the comments at the top of
422 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
424 if (find_reg_note (BB_END (b
), REG_CROSSING_JUMP
, NULL_RTX
))
427 for (ei
= ei_start (b
->succs
); (e
= ei_safe_edge (ei
)); )
429 basic_block target
, first
;
431 bool threaded
= false;
432 int nthreaded_edges
= 0;
433 bool may_thread
= first_pass
| (b
->flags
& BB_DIRTY
);
435 /* Skip complex edges because we don't know how to update them.
437 Still handle fallthru edges, as we can succeed to forward fallthru
438 edge to the same place as the branch edge of conditional branch
439 and turn conditional branch to an unconditional branch. */
440 if (e
->flags
& EDGE_COMPLEX
)
446 target
= first
= e
->dest
;
447 counter
= NUM_FIXED_BLOCKS
;
449 /* If we are partitioning hot/cold basic_blocks, we don't want to mess
450 up jumps that cross between hot/cold sections.
452 Basic block partitioning may result in some jumps that appear
453 to be optimizable (or blocks that appear to be mergeable), but which
454 really must be left untouched (they are required to make it safely
455 across partition boundaries). See the comments at the top of
456 bb-reorder.c:partition_hot_cold_basic_blocks for complete
459 if (first
!= EXIT_BLOCK_PTR
460 && find_reg_note (BB_END (first
), REG_CROSSING_JUMP
, NULL_RTX
))
463 while (counter
< n_basic_blocks
)
465 basic_block new_target
= NULL
;
466 bool new_target_threaded
= false;
467 may_thread
|= target
->flags
& BB_DIRTY
;
469 if (FORWARDER_BLOCK_P (target
)
470 && !(single_succ_edge (target
)->flags
& EDGE_CROSSING
)
471 && single_succ (target
) != EXIT_BLOCK_PTR
)
473 /* Bypass trivial infinite loops. */
474 new_target
= single_succ (target
);
475 if (target
== new_target
)
476 counter
= n_basic_blocks
;
479 /* Allow to thread only over one edge at time to simplify updating
481 else if ((mode
& CLEANUP_THREADING
) && may_thread
)
483 edge t
= thread_jump (mode
, e
, target
);
487 threaded_edges
= xmalloc (sizeof (*threaded_edges
)
493 /* Detect an infinite loop across blocks not
494 including the start block. */
495 for (i
= 0; i
< nthreaded_edges
; ++i
)
496 if (threaded_edges
[i
] == t
)
498 if (i
< nthreaded_edges
)
500 counter
= n_basic_blocks
;
505 /* Detect an infinite loop across the start block. */
509 gcc_assert (nthreaded_edges
< n_basic_blocks
- NUM_FIXED_BLOCKS
);
510 threaded_edges
[nthreaded_edges
++] = t
;
512 new_target
= t
->dest
;
513 new_target_threaded
= true;
520 /* Avoid killing of loop pre-headers, as it is the place loop
521 optimizer wants to hoist code to.
523 For fallthru forwarders, the LOOP_BEG note must appear between
524 the header of block and CODE_LABEL of the loop, for non forwarders
525 it must appear before the JUMP_INSN. */
526 if ((mode
& CLEANUP_PRE_LOOP
) && optimize
&& flag_loop_optimize
)
528 rtx insn
= (EDGE_SUCC (target
, 0)->flags
& EDGE_FALLTHRU
529 ? BB_HEAD (target
) : prev_nonnote_insn (BB_END (target
)));
532 insn
= NEXT_INSN (insn
);
534 for (; insn
&& !LABEL_P (insn
) && !INSN_P (insn
);
535 insn
= NEXT_INSN (insn
))
537 && NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_BEG
)
540 if (insn
&& NOTE_P (insn
))
543 /* Do not clean up branches to just past the end of a loop
544 at this time; it can mess up the loop optimizer's
545 recognition of some patterns. */
547 insn
= PREV_INSN (BB_HEAD (target
));
548 if (insn
&& NOTE_P (insn
)
549 && NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_END
)
555 threaded
|= new_target_threaded
;
558 if (counter
>= n_basic_blocks
)
561 fprintf (dump_file
, "Infinite loop in BB %i.\n",
564 else if (target
== first
)
565 ; /* We didn't do anything. */
568 /* Save the values now, as the edge may get removed. */
569 gcov_type edge_count
= e
->count
;
570 int edge_probability
= e
->probability
;
574 /* Don't force if target is exit block. */
575 if (threaded
&& target
!= EXIT_BLOCK_PTR
)
577 notice_new_block (redirect_edge_and_branch_force (e
, target
));
579 fprintf (dump_file
, "Conditionals threaded.\n");
581 else if (!redirect_edge_and_branch (e
, target
))
585 "Forwarding edge %i->%i to %i failed.\n",
586 b
->index
, e
->dest
->index
, target
->index
);
591 /* We successfully forwarded the edge. Now update profile
592 data: for each edge we traversed in the chain, remove
593 the original edge's execution count. */
594 edge_frequency
= ((edge_probability
* b
->frequency
595 + REG_BR_PROB_BASE
/ 2)
598 if (!FORWARDER_BLOCK_P (b
) && forwarder_block_p (b
))
599 b
->flags
|= BB_FORWARDER_BLOCK
;
605 if (!single_succ_p (first
))
607 gcc_assert (n
< nthreaded_edges
);
608 t
= threaded_edges
[n
++];
609 gcc_assert (t
->src
== first
);
610 update_bb_profile_for_threading (first
, edge_frequency
,
612 update_br_prob_note (first
);
616 first
->count
-= edge_count
;
617 if (first
->count
< 0)
619 first
->frequency
-= edge_frequency
;
620 if (first
->frequency
< 0)
621 first
->frequency
= 0;
622 /* It is possible that as the result of
623 threading we've removed edge as it is
624 threaded to the fallthru edge. Avoid
625 getting out of sync. */
626 if (n
< nthreaded_edges
627 && first
== threaded_edges
[n
]->src
)
629 t
= single_succ_edge (first
);
632 t
->count
-= edge_count
;
637 while (first
!= target
);
646 free (threaded_edges
);
651 /* Blocks A and B are to be merged into a single block. A has no incoming
652 fallthru edge, so it can be moved before B without adding or modifying
653 any jumps (aside from the jump from A to B). */
656 merge_blocks_move_predecessor_nojumps (basic_block a
, basic_block b
)
661 /* If we are partitioning hot/cold basic blocks, we don't want to
662 mess up unconditional or indirect jumps that cross between hot
665 Basic block partitioning may result in some jumps that appear to
666 be optimizable (or blocks that appear to be mergeable), but which really
667 must be left untouched (they are required to make it safely across
668 partition boundaries). See the comments at the top of
669 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
671 if (BB_PARTITION (a
) != BB_PARTITION (b
))
674 barrier
= next_nonnote_insn (BB_END (a
));
675 gcc_assert (BARRIER_P (barrier
));
676 delete_insn (barrier
);
678 /* Move block and loop notes out of the chain so that we do not
681 ??? A better solution would be to squeeze out all the non-nested notes
682 and adjust the block trees appropriately. Even better would be to have
683 a tighter connection between block trees and rtl so that this is not
685 only_notes
= squeeze_notes (&BB_HEAD (a
), &BB_END (a
));
686 gcc_assert (!only_notes
);
688 /* Scramble the insn chain. */
689 if (BB_END (a
) != PREV_INSN (BB_HEAD (b
)))
690 reorder_insns_nobb (BB_HEAD (a
), BB_END (a
), PREV_INSN (BB_HEAD (b
)));
691 a
->flags
|= BB_DIRTY
;
694 fprintf (dump_file
, "Moved block %d before %d and merged.\n",
697 /* Swap the records for the two blocks around. */
700 link_block (a
, b
->prev_bb
);
702 /* Now blocks A and B are contiguous. Merge them. */
706 /* Blocks A and B are to be merged into a single block. B has no outgoing
707 fallthru edge, so it can be moved after A without adding or modifying
708 any jumps (aside from the jump from A to B). */
711 merge_blocks_move_successor_nojumps (basic_block a
, basic_block b
)
713 rtx barrier
, real_b_end
;
717 /* If we are partitioning hot/cold basic blocks, we don't want to
718 mess up unconditional or indirect jumps that cross between hot
721 Basic block partitioning may result in some jumps that appear to
722 be optimizable (or blocks that appear to be mergeable), but which really
723 must be left untouched (they are required to make it safely across
724 partition boundaries). See the comments at the top of
725 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
727 if (BB_PARTITION (a
) != BB_PARTITION (b
))
730 real_b_end
= BB_END (b
);
732 /* If there is a jump table following block B temporarily add the jump table
733 to block B so that it will also be moved to the correct location. */
734 if (tablejump_p (BB_END (b
), &label
, &table
)
735 && prev_active_insn (label
) == BB_END (b
))
740 /* There had better have been a barrier there. Delete it. */
741 barrier
= NEXT_INSN (BB_END (b
));
742 if (barrier
&& BARRIER_P (barrier
))
743 delete_insn (barrier
);
745 /* Move block and loop notes out of the chain so that we do not
748 ??? A better solution would be to squeeze out all the non-nested notes
749 and adjust the block trees appropriately. Even better would be to have
750 a tighter connection between block trees and rtl so that this is not
752 only_notes
= squeeze_notes (&BB_HEAD (b
), &BB_END (b
));
753 gcc_assert (!only_notes
);
756 /* Scramble the insn chain. */
757 reorder_insns_nobb (BB_HEAD (b
), BB_END (b
), BB_END (a
));
759 /* Restore the real end of b. */
760 BB_END (b
) = real_b_end
;
763 fprintf (dump_file
, "Moved block %d after %d and merged.\n",
766 /* Now blocks A and B are contiguous. Merge them. */
770 /* Attempt to merge basic blocks that are potentially non-adjacent.
771 Return NULL iff the attempt failed, otherwise return basic block
772 where cleanup_cfg should continue. Because the merging commonly
773 moves basic block away or introduces another optimization
774 possibility, return basic block just before B so cleanup_cfg don't
777 It may be good idea to return basic block before C in the case
778 C has been moved after B and originally appeared earlier in the
779 insn sequence, but we have no information available about the
780 relative ordering of these two. Hopefully it is not too common. */
783 merge_blocks_move (edge e
, basic_block b
, basic_block c
, int mode
)
787 /* If we are partitioning hot/cold basic blocks, we don't want to
788 mess up unconditional or indirect jumps that cross between hot
791 Basic block partitioning may result in some jumps that appear to
792 be optimizable (or blocks that appear to be mergeable), but which really
793 must be left untouched (they are required to make it safely across
794 partition boundaries). See the comments at the top of
795 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
797 if (BB_PARTITION (b
) != BB_PARTITION (c
))
802 /* If B has a fallthru edge to C, no need to move anything. */
803 if (e
->flags
& EDGE_FALLTHRU
)
805 int b_index
= b
->index
, c_index
= c
->index
;
807 update_forwarder_flag (b
);
810 fprintf (dump_file
, "Merged %d and %d without moving.\n",
813 return b
->prev_bb
== ENTRY_BLOCK_PTR
? b
: b
->prev_bb
;
816 /* Otherwise we will need to move code around. Do that only if expensive
817 transformations are allowed. */
818 else if (mode
& CLEANUP_EXPENSIVE
)
820 edge tmp_edge
, b_fallthru_edge
;
821 bool c_has_outgoing_fallthru
;
822 bool b_has_incoming_fallthru
;
825 /* Avoid overactive code motion, as the forwarder blocks should be
826 eliminated by edge redirection instead. One exception might have
827 been if B is a forwarder block and C has no fallthru edge, but
828 that should be cleaned up by bb-reorder instead. */
829 if (FORWARDER_BLOCK_P (b
) || FORWARDER_BLOCK_P (c
))
832 /* We must make sure to not munge nesting of lexical blocks,
833 and loop notes. This is done by squeezing out all the notes
834 and leaving them there to lie. Not ideal, but functional. */
836 FOR_EACH_EDGE (tmp_edge
, ei
, c
->succs
)
837 if (tmp_edge
->flags
& EDGE_FALLTHRU
)
840 c_has_outgoing_fallthru
= (tmp_edge
!= NULL
);
842 FOR_EACH_EDGE (tmp_edge
, ei
, b
->preds
)
843 if (tmp_edge
->flags
& EDGE_FALLTHRU
)
846 b_has_incoming_fallthru
= (tmp_edge
!= NULL
);
847 b_fallthru_edge
= tmp_edge
;
850 next
= next
->prev_bb
;
852 /* Otherwise, we're going to try to move C after B. If C does
853 not have an outgoing fallthru, then it can be moved
854 immediately after B without introducing or modifying jumps. */
855 if (! c_has_outgoing_fallthru
)
857 merge_blocks_move_successor_nojumps (b
, c
);
858 return next
== ENTRY_BLOCK_PTR
? next
->next_bb
: next
;
861 /* If B does not have an incoming fallthru, then it can be moved
862 immediately before C without introducing or modifying jumps.
863 C cannot be the first block, so we do not have to worry about
864 accessing a non-existent block. */
866 if (b_has_incoming_fallthru
)
870 if (b_fallthru_edge
->src
== ENTRY_BLOCK_PTR
)
872 bb
= force_nonfallthru (b_fallthru_edge
);
874 notice_new_block (bb
);
877 merge_blocks_move_predecessor_nojumps (b
, c
);
878 return next
== ENTRY_BLOCK_PTR
? next
->next_bb
: next
;
885 /* Removes the memory attributes of MEM expression
886 if they are not equal. */
889 merge_memattrs (rtx x
, rtx y
)
898 if (x
== 0 || y
== 0)
903 if (code
!= GET_CODE (y
))
906 if (GET_MODE (x
) != GET_MODE (y
))
909 if (code
== MEM
&& MEM_ATTRS (x
) != MEM_ATTRS (y
))
913 else if (! MEM_ATTRS (y
))
919 if (MEM_ALIAS_SET (x
) != MEM_ALIAS_SET (y
))
921 set_mem_alias_set (x
, 0);
922 set_mem_alias_set (y
, 0);
925 if (! mem_expr_equal_p (MEM_EXPR (x
), MEM_EXPR (y
)))
929 set_mem_offset (x
, 0);
930 set_mem_offset (y
, 0);
932 else if (MEM_OFFSET (x
) != MEM_OFFSET (y
))
934 set_mem_offset (x
, 0);
935 set_mem_offset (y
, 0);
940 else if (!MEM_SIZE (y
))
943 mem_size
= GEN_INT (MAX (INTVAL (MEM_SIZE (x
)),
944 INTVAL (MEM_SIZE (y
))));
945 set_mem_size (x
, mem_size
);
946 set_mem_size (y
, mem_size
);
948 set_mem_align (x
, MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)));
949 set_mem_align (y
, MEM_ALIGN (x
));
953 fmt
= GET_RTX_FORMAT (code
);
954 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
959 /* Two vectors must have the same length. */
960 if (XVECLEN (x
, i
) != XVECLEN (y
, i
))
963 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
964 merge_memattrs (XVECEXP (x
, i
, j
), XVECEXP (y
, i
, j
));
969 merge_memattrs (XEXP (x
, i
), XEXP (y
, i
));
976 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
979 old_insns_match_p (int mode ATTRIBUTE_UNUSED
, rtx i1
, rtx i2
)
983 /* Verify that I1 and I2 are equivalent. */
984 if (GET_CODE (i1
) != GET_CODE (i2
))
990 if (GET_CODE (p1
) != GET_CODE (p2
))
993 /* If this is a CALL_INSN, compare register usage information.
994 If we don't check this on stack register machines, the two
995 CALL_INSNs might be merged leaving reg-stack.c with mismatching
996 numbers of stack registers in the same basic block.
997 If we don't check this on machines with delay slots, a delay slot may
998 be filled that clobbers a parameter expected by the subroutine.
1000 ??? We take the simple route for now and assume that if they're
1001 equal, they were constructed identically. */
1004 && (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1
),
1005 CALL_INSN_FUNCTION_USAGE (i2
))
1006 || SIBLING_CALL_P (i1
) != SIBLING_CALL_P (i2
)))
1010 /* If cross_jump_death_matters is not 0, the insn's mode
1011 indicates whether or not the insn contains any stack-like
1014 if ((mode
& CLEANUP_POST_REGSTACK
) && stack_regs_mentioned (i1
))
1016 /* If register stack conversion has already been done, then
1017 death notes must also be compared before it is certain that
1018 the two instruction streams match. */
1021 HARD_REG_SET i1_regset
, i2_regset
;
1023 CLEAR_HARD_REG_SET (i1_regset
);
1024 CLEAR_HARD_REG_SET (i2_regset
);
1026 for (note
= REG_NOTES (i1
); note
; note
= XEXP (note
, 1))
1027 if (REG_NOTE_KIND (note
) == REG_DEAD
&& STACK_REG_P (XEXP (note
, 0)))
1028 SET_HARD_REG_BIT (i1_regset
, REGNO (XEXP (note
, 0)));
1030 for (note
= REG_NOTES (i2
); note
; note
= XEXP (note
, 1))
1031 if (REG_NOTE_KIND (note
) == REG_DEAD
&& STACK_REG_P (XEXP (note
, 0)))
1032 SET_HARD_REG_BIT (i2_regset
, REGNO (XEXP (note
, 0)));
1034 GO_IF_HARD_REG_EQUAL (i1_regset
, i2_regset
, done
);
1043 if (reload_completed
1044 ? rtx_renumbered_equal_p (p1
, p2
) : rtx_equal_p (p1
, p2
))
1047 /* Do not do EQUIV substitution after reload. First, we're undoing the
1048 work of reload_cse. Second, we may be undoing the work of the post-
1049 reload splitting pass. */
1050 /* ??? Possibly add a new phase switch variable that can be used by
1051 targets to disallow the troublesome insns after splitting. */
1052 if (!reload_completed
)
1054 /* The following code helps take care of G++ cleanups. */
1055 rtx equiv1
= find_reg_equal_equiv_note (i1
);
1056 rtx equiv2
= find_reg_equal_equiv_note (i2
);
1058 if (equiv1
&& equiv2
1059 /* If the equivalences are not to a constant, they may
1060 reference pseudos that no longer exist, so we can't
1062 && (! reload_completed
1063 || (CONSTANT_P (XEXP (equiv1
, 0))
1064 && rtx_equal_p (XEXP (equiv1
, 0), XEXP (equiv2
, 0)))))
1066 rtx s1
= single_set (i1
);
1067 rtx s2
= single_set (i2
);
1068 if (s1
!= 0 && s2
!= 0
1069 && rtx_renumbered_equal_p (SET_DEST (s1
), SET_DEST (s2
)))
1071 validate_change (i1
, &SET_SRC (s1
), XEXP (equiv1
, 0), 1);
1072 validate_change (i2
, &SET_SRC (s2
), XEXP (equiv2
, 0), 1);
1073 if (! rtx_renumbered_equal_p (p1
, p2
))
1075 else if (apply_change_group ())
1084 /* Look through the insns at the end of BB1 and BB2 and find the longest
1085 sequence that are equivalent. Store the first insns for that sequence
1086 in *F1 and *F2 and return the sequence length.
1088 To simplify callers of this function, if the blocks match exactly,
1089 store the head of the blocks in *F1 and *F2. */
1092 flow_find_cross_jump (int mode ATTRIBUTE_UNUSED
, basic_block bb1
,
1093 basic_block bb2
, rtx
*f1
, rtx
*f2
)
1095 rtx i1
, i2
, last1
, last2
, afterlast1
, afterlast2
;
1098 /* Skip simple jumps at the end of the blocks. Complex jumps still
1099 need to be compared for equivalence, which we'll do below. */
1102 last1
= afterlast1
= last2
= afterlast2
= NULL_RTX
;
1104 || (returnjump_p (i1
) && !side_effects_p (PATTERN (i1
))))
1107 i1
= PREV_INSN (i1
);
1112 || (returnjump_p (i2
) && !side_effects_p (PATTERN (i2
))))
1115 /* Count everything except for unconditional jump as insn. */
1116 if (!simplejump_p (i2
) && !returnjump_p (i2
) && last1
)
1118 i2
= PREV_INSN (i2
);
1124 while (!INSN_P (i1
) && i1
!= BB_HEAD (bb1
))
1125 i1
= PREV_INSN (i1
);
1127 while (!INSN_P (i2
) && i2
!= BB_HEAD (bb2
))
1128 i2
= PREV_INSN (i2
);
1130 if (i1
== BB_HEAD (bb1
) || i2
== BB_HEAD (bb2
))
1133 if (!old_insns_match_p (mode
, i1
, i2
))
1136 merge_memattrs (i1
, i2
);
1138 /* Don't begin a cross-jump with a NOTE insn. */
1141 /* If the merged insns have different REG_EQUAL notes, then
1143 rtx equiv1
= find_reg_equal_equiv_note (i1
);
1144 rtx equiv2
= find_reg_equal_equiv_note (i2
);
1146 if (equiv1
&& !equiv2
)
1147 remove_note (i1
, equiv1
);
1148 else if (!equiv1
&& equiv2
)
1149 remove_note (i2
, equiv2
);
1150 else if (equiv1
&& equiv2
1151 && !rtx_equal_p (XEXP (equiv1
, 0), XEXP (equiv2
, 0)))
1153 remove_note (i1
, equiv1
);
1154 remove_note (i2
, equiv2
);
1157 afterlast1
= last1
, afterlast2
= last2
;
1158 last1
= i1
, last2
= i2
;
1162 i1
= PREV_INSN (i1
);
1163 i2
= PREV_INSN (i2
);
1167 /* Don't allow the insn after a compare to be shared by
1168 cross-jumping unless the compare is also shared. */
1169 if (ninsns
&& reg_mentioned_p (cc0_rtx
, last1
) && ! sets_cc0_p (last1
))
1170 last1
= afterlast1
, last2
= afterlast2
, ninsns
--;
1173 /* Include preceding notes and labels in the cross-jump. One,
1174 this may bring us to the head of the blocks as requested above.
1175 Two, it keeps line number notes as matched as may be. */
1178 while (last1
!= BB_HEAD (bb1
) && !INSN_P (PREV_INSN (last1
)))
1179 last1
= PREV_INSN (last1
);
1181 if (last1
!= BB_HEAD (bb1
) && LABEL_P (PREV_INSN (last1
)))
1182 last1
= PREV_INSN (last1
);
1184 while (last2
!= BB_HEAD (bb2
) && !INSN_P (PREV_INSN (last2
)))
1185 last2
= PREV_INSN (last2
);
1187 if (last2
!= BB_HEAD (bb2
) && LABEL_P (PREV_INSN (last2
)))
1188 last2
= PREV_INSN (last2
);
1197 /* Return true iff the condbranches at the end of BB1 and BB2 match. */
1199 condjump_equiv_p (struct equiv_info
*info
, bool call_init
)
1201 basic_block bb1
= info
->x_block
;
1202 basic_block bb2
= info
->y_block
;
1203 edge b1
= BRANCH_EDGE (bb1
);
1204 edge b2
= BRANCH_EDGE (bb2
);
1205 edge f1
= FALLTHRU_EDGE (bb1
);
1206 edge f2
= FALLTHRU_EDGE (bb2
);
1207 bool reverse
, match
;
1208 rtx set1
, set2
, cond1
, cond2
;
1210 enum rtx_code code1
, code2
;
1212 /* Get around possible forwarders on fallthru edges. Other cases
1213 should be optimized out already. */
1214 if (FORWARDER_BLOCK_P (f1
->dest
))
1215 f1
= single_succ_edge (f1
->dest
);
1217 if (FORWARDER_BLOCK_P (f2
->dest
))
1218 f2
= single_succ_edge (f2
->dest
);
1220 /* To simplify use of this function, return false if there are
1221 unneeded forwarder blocks. These will get eliminated later
1222 during cleanup_cfg. */
1223 if (FORWARDER_BLOCK_P (f1
->dest
)
1224 || FORWARDER_BLOCK_P (f2
->dest
)
1225 || FORWARDER_BLOCK_P (b1
->dest
)
1226 || FORWARDER_BLOCK_P (b2
->dest
))
1229 if (f1
->dest
== f2
->dest
&& b1
->dest
== b2
->dest
)
1231 else if (f1
->dest
== b2
->dest
&& b1
->dest
== f2
->dest
)
1236 set1
= pc_set (BB_END (bb1
));
1237 set2
= pc_set (BB_END (bb2
));
1238 if ((XEXP (SET_SRC (set1
), 1) == pc_rtx
)
1239 != (XEXP (SET_SRC (set2
), 1) == pc_rtx
))
1242 src1
= SET_SRC (set1
);
1243 src2
= SET_SRC (set2
);
1244 cond1
= XEXP (src1
, 0);
1245 cond2
= XEXP (src2
, 0);
1246 code1
= GET_CODE (cond1
);
1248 code2
= reversed_comparison_code (cond2
, BB_END (bb2
));
1250 code2
= GET_CODE (cond2
);
1252 if (code2
== UNKNOWN
)
1255 if (call_init
&& !struct_equiv_init (STRUCT_EQUIV_START
| info
->mode
, info
))
1257 /* Make the sources of the pc sets unreadable so that when we call
1258 insns_match_p it won't process them.
1259 The death_notes_match_p from insns_match_p won't see the local registers
1260 used for the pc set, but that could only cause missed optimizations when
1261 there are actually condjumps that use stack registers. */
1262 SET_SRC (set1
) = pc_rtx
;
1263 SET_SRC (set2
) = pc_rtx
;
1264 /* Verify codes and operands match. */
1267 match
= (insns_match_p (BB_END (bb1
), BB_END (bb2
), info
)
1268 && rtx_equiv_p (&XEXP (cond1
, 0), XEXP (cond2
, 0), 1, info
)
1269 && rtx_equiv_p (&XEXP (cond1
, 1), XEXP (cond2
, 1), 1, info
));
1272 else if (code1
== swap_condition (code2
))
1274 match
= (insns_match_p (BB_END (bb1
), BB_END (bb2
), info
)
1275 && rtx_equiv_p (&XEXP (cond1
, 1), XEXP (cond2
, 0), 1, info
)
1276 && rtx_equiv_p (&XEXP (cond1
, 0), XEXP (cond2
, 1), 1, info
));
1281 SET_SRC (set1
) = src1
;
1282 SET_SRC (set2
) = src2
;
1283 match
&= verify_changes (0);
1285 /* If we return true, we will join the blocks. Which means that
1286 we will only have one branch prediction bit to work with. Thus
1287 we require the existing branches to have probabilities that are
1291 && maybe_hot_bb_p (bb1
)
1292 && maybe_hot_bb_p (bb2
))
1296 if (b1
->dest
== b2
->dest
)
1297 prob2
= b2
->probability
;
1299 /* Do not use f2 probability as f2 may be forwarded. */
1300 prob2
= REG_BR_PROB_BASE
- b2
->probability
;
1302 /* Fail if the difference in probabilities is greater than 50%.
1303 This rules out two well-predicted branches with opposite
1305 if (abs (b1
->probability
- prob2
) > REG_BR_PROB_BASE
/ 2)
1309 "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
1310 bb1
->index
, bb2
->index
, b1
->probability
, prob2
);
1316 if (dump_file
&& match
)
1317 fprintf (dump_file
, "Conditionals in bb %i and %i match.\n",
1318 bb1
->index
, bb2
->index
);
1325 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1326 the branch instruction. This means that if we commonize the control
1327 flow before end of the basic block, the semantic remains unchanged.
1329 We may assume that there exists one edge with a common destination. */
1332 outgoing_edges_match (int mode
, basic_block bb1
, basic_block bb2
)
1334 int nehedges1
= 0, nehedges2
= 0;
1335 edge fallthru1
= 0, fallthru2
= 0;
1339 /* If BB1 has only one successor, we may be looking at either an
1340 unconditional jump, or a fake edge to exit. */
1341 if (single_succ_p (bb1
)
1342 && (single_succ_edge (bb1
)->flags
& (EDGE_COMPLEX
| EDGE_FAKE
)) == 0
1343 && (!JUMP_P (BB_END (bb1
)) || simplejump_p (BB_END (bb1
))))
1344 return (single_succ_p (bb2
)
1345 && (single_succ_edge (bb2
)->flags
1346 & (EDGE_COMPLEX
| EDGE_FAKE
)) == 0
1347 && (!JUMP_P (BB_END (bb2
)) || simplejump_p (BB_END (bb2
))));
1349 /* Match conditional jumps - this may get tricky when fallthru and branch
1350 edges are crossed. */
1351 if (EDGE_COUNT (bb1
->succs
) == 2
1352 && any_condjump_p (BB_END (bb1
))
1353 && onlyjump_p (BB_END (bb1
)))
1355 edge b1
, f1
, b2
, f2
;
1356 bool reverse
, match
;
1357 rtx set1
, set2
, cond1
, cond2
;
1358 enum rtx_code code1
, code2
;
1360 if (EDGE_COUNT (bb2
->succs
) != 2
1361 || !any_condjump_p (BB_END (bb2
))
1362 || !onlyjump_p (BB_END (bb2
)))
1365 b1
= BRANCH_EDGE (bb1
);
1366 b2
= BRANCH_EDGE (bb2
);
1367 f1
= FALLTHRU_EDGE (bb1
);
1368 f2
= FALLTHRU_EDGE (bb2
);
1370 /* Get around possible forwarders on fallthru edges. Other cases
1371 should be optimized out already. */
1372 if (FORWARDER_BLOCK_P (f1
->dest
))
1373 f1
= single_succ_edge (f1
->dest
);
1375 if (FORWARDER_BLOCK_P (f2
->dest
))
1376 f2
= single_succ_edge (f2
->dest
);
1378 /* To simplify use of this function, return false if there are
1379 unneeded forwarder blocks. These will get eliminated later
1380 during cleanup_cfg. */
1381 if (FORWARDER_BLOCK_P (f1
->dest
)
1382 || FORWARDER_BLOCK_P (f2
->dest
)
1383 || FORWARDER_BLOCK_P (b1
->dest
)
1384 || FORWARDER_BLOCK_P (b2
->dest
))
1387 if (f1
->dest
== f2
->dest
&& b1
->dest
== b2
->dest
)
1389 else if (f1
->dest
== b2
->dest
&& b1
->dest
== f2
->dest
)
1394 set1
= pc_set (BB_END (bb1
));
1395 set2
= pc_set (BB_END (bb2
));
1396 if ((XEXP (SET_SRC (set1
), 1) == pc_rtx
)
1397 != (XEXP (SET_SRC (set2
), 1) == pc_rtx
))
1400 cond1
= XEXP (SET_SRC (set1
), 0);
1401 cond2
= XEXP (SET_SRC (set2
), 0);
1402 code1
= GET_CODE (cond1
);
1404 code2
= reversed_comparison_code (cond2
, BB_END (bb2
));
1406 code2
= GET_CODE (cond2
);
1408 if (code2
== UNKNOWN
)
1411 /* Verify codes and operands match. */
1412 match
= ((code1
== code2
1413 && rtx_renumbered_equal_p (XEXP (cond1
, 0), XEXP (cond2
, 0))
1414 && rtx_renumbered_equal_p (XEXP (cond1
, 1), XEXP (cond2
, 1)))
1415 || (code1
== swap_condition (code2
)
1416 && rtx_renumbered_equal_p (XEXP (cond1
, 1),
1418 && rtx_renumbered_equal_p (XEXP (cond1
, 0),
1421 /* If we return true, we will join the blocks. Which means that
1422 we will only have one branch prediction bit to work with. Thus
1423 we require the existing branches to have probabilities that are
1427 && maybe_hot_bb_p (bb1
)
1428 && maybe_hot_bb_p (bb2
))
1432 if (b1
->dest
== b2
->dest
)
1433 prob2
= b2
->probability
;
1435 /* Do not use f2 probability as f2 may be forwarded. */
1436 prob2
= REG_BR_PROB_BASE
- b2
->probability
;
1438 /* Fail if the difference in probabilities is greater than 50%.
1439 This rules out two well-predicted branches with opposite
1441 if (abs (b1
->probability
- prob2
) > REG_BR_PROB_BASE
/ 2)
1445 "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
1446 bb1
->index
, bb2
->index
, b1
->probability
, prob2
);
1452 if (dump_file
&& match
)
1453 fprintf (dump_file
, "Conditionals in bb %i and %i match.\n",
1454 bb1
->index
, bb2
->index
);
1459 /* Generic case - we are seeing a computed jump, table jump or trapping
1462 /* Check whether there are tablejumps in the end of BB1 and BB2.
1463 Return true if they are identical. */
1468 if (tablejump_p (BB_END (bb1
), &label1
, &table1
)
1469 && tablejump_p (BB_END (bb2
), &label2
, &table2
)
1470 && GET_CODE (PATTERN (table1
)) == GET_CODE (PATTERN (table2
)))
1472 /* The labels should never be the same rtx. If they really are same
1473 the jump tables are same too. So disable crossjumping of blocks BB1
1474 and BB2 because when deleting the common insns in the end of BB1
1475 by delete_basic_block () the jump table would be deleted too. */
1476 /* If LABEL2 is referenced in BB1->END do not do anything
1477 because we would loose information when replacing
1478 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
1479 if (label1
!= label2
&& !rtx_referenced_p (label2
, BB_END (bb1
)))
1481 /* Set IDENTICAL to true when the tables are identical. */
1482 bool identical
= false;
1485 p1
= PATTERN (table1
);
1486 p2
= PATTERN (table2
);
1487 if (GET_CODE (p1
) == ADDR_VEC
&& rtx_equal_p (p1
, p2
))
1491 else if (GET_CODE (p1
) == ADDR_DIFF_VEC
1492 && (XVECLEN (p1
, 1) == XVECLEN (p2
, 1))
1493 && rtx_equal_p (XEXP (p1
, 2), XEXP (p2
, 2))
1494 && rtx_equal_p (XEXP (p1
, 3), XEXP (p2
, 3)))
1499 for (i
= XVECLEN (p1
, 1) - 1; i
>= 0 && identical
; i
--)
1500 if (!rtx_equal_p (XVECEXP (p1
, 1, i
), XVECEXP (p2
, 1, i
)))
1506 replace_label_data rr
;
1509 /* Temporarily replace references to LABEL1 with LABEL2
1510 in BB1->END so that we could compare the instructions. */
1513 rr
.update_label_nuses
= false;
1514 for_each_rtx (&BB_END (bb1
), replace_label
, &rr
);
1516 match
= old_insns_match_p (mode
, BB_END (bb1
), BB_END (bb2
));
1517 if (dump_file
&& match
)
1519 "Tablejumps in bb %i and %i match.\n",
1520 bb1
->index
, bb2
->index
);
1522 /* Set the original label in BB1->END because when deleting
1523 a block whose end is a tablejump, the tablejump referenced
1524 from the instruction is deleted too. */
1527 for_each_rtx (&BB_END (bb1
), replace_label
, &rr
);
1536 /* First ensure that the instructions match. There may be many outgoing
1537 edges so this test is generally cheaper. */
1538 if (!old_insns_match_p (mode
, BB_END (bb1
), BB_END (bb2
)))
1541 /* Search the outgoing edges, ensure that the counts do match, find possible
1542 fallthru and exception handling edges since these needs more
1544 if (EDGE_COUNT (bb1
->succs
) != EDGE_COUNT (bb2
->succs
))
1547 FOR_EACH_EDGE (e1
, ei
, bb1
->succs
)
1549 e2
= EDGE_SUCC (bb2
, ei
.index
);
1551 if (e1
->flags
& EDGE_EH
)
1554 if (e2
->flags
& EDGE_EH
)
1557 if (e1
->flags
& EDGE_FALLTHRU
)
1559 if (e2
->flags
& EDGE_FALLTHRU
)
1563 /* If number of edges of various types does not match, fail. */
1564 if (nehedges1
!= nehedges2
1565 || (fallthru1
!= 0) != (fallthru2
!= 0))
1568 /* fallthru edges must be forwarded to the same destination. */
1571 basic_block d1
= (forwarder_block_p (fallthru1
->dest
)
1572 ? single_succ (fallthru1
->dest
): fallthru1
->dest
);
1573 basic_block d2
= (forwarder_block_p (fallthru2
->dest
)
1574 ? single_succ (fallthru2
->dest
): fallthru2
->dest
);
1580 /* Ensure the same EH region. */
1582 rtx n1
= find_reg_note (BB_END (bb1
), REG_EH_REGION
, 0);
1583 rtx n2
= find_reg_note (BB_END (bb2
), REG_EH_REGION
, 0);
1588 if (n1
&& (!n2
|| XEXP (n1
, 0) != XEXP (n2
, 0)))
1592 /* We don't need to match the rest of edges as above checks should be enough
1593 to ensure that they are equivalent. */
1597 /* E1 and E2 are edges with the same destination block. Search their
1598 predecessors for common code. If found, redirect control flow from
1599 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1602 try_crossjump_to_edge (int mode
, edge e1
, edge e2
)
1605 basic_block src1
= e1
->src
, src2
= e2
->src
;
1606 basic_block redirect_to
, redirect_from
, to_remove
;
1607 rtx newpos1
, newpos2
;
1611 newpos1
= newpos2
= NULL_RTX
;
1613 /* If we have partitioned hot/cold basic blocks, it is a bad idea
1614 to try this optimization.
1616 Basic block partitioning may result in some jumps that appear to
1617 be optimizable (or blocks that appear to be mergeable), but which really
1618 must be left untouched (they are required to make it safely across
1619 partition boundaries). See the comments at the top of
1620 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1622 if (flag_reorder_blocks_and_partition
&& no_new_pseudos
)
1625 /* Search backward through forwarder blocks. We don't need to worry
1626 about multiple entry or chained forwarders, as they will be optimized
1627 away. We do this to look past the unconditional jump following a
1628 conditional jump that is required due to the current CFG shape. */
1629 if (single_pred_p (src1
)
1630 && FORWARDER_BLOCK_P (src1
))
1631 e1
= single_pred_edge (src1
), src1
= e1
->src
;
1633 if (single_pred_p (src2
)
1634 && FORWARDER_BLOCK_P (src2
))
1635 e2
= single_pred_edge (src2
), src2
= e2
->src
;
1637 /* Nothing to do if we reach ENTRY, or a common source block. */
1638 if (src1
== ENTRY_BLOCK_PTR
|| src2
== ENTRY_BLOCK_PTR
)
1643 /* Seeing more than 1 forwarder blocks would confuse us later... */
1644 if (FORWARDER_BLOCK_P (e1
->dest
)
1645 && FORWARDER_BLOCK_P (single_succ (e1
->dest
)))
1648 if (FORWARDER_BLOCK_P (e2
->dest
)
1649 && FORWARDER_BLOCK_P (single_succ (e2
->dest
)))
1652 /* Likewise with dead code (possibly newly created by the other optimizations
1654 if (EDGE_COUNT (src1
->preds
) == 0 || EDGE_COUNT (src2
->preds
) == 0)
1657 /* Look for the common insn sequence, part the first ... */
1658 if (!outgoing_edges_match (mode
, src1
, src2
))
1661 /* ... and part the second. */
1662 nmatch
= flow_find_cross_jump (mode
, src1
, src2
, &newpos1
, &newpos2
);
1664 /* Don't proceed with the crossjump unless we found a sufficient number
1665 of matching instructions or the 'from' block was totally matched
1666 (such that its predecessors will hopefully be redirected and the
1668 if ((nmatch
< PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS
))
1669 && (newpos1
!= BB_HEAD (src1
)))
1672 /* Here we know that the insns in the end of SRC1 which are common with SRC2
1674 If we have tablejumps in the end of SRC1 and SRC2
1675 they have been already compared for equivalence in outgoing_edges_match ()
1676 so replace the references to TABLE1 by references to TABLE2. */
1681 if (tablejump_p (BB_END (src1
), &label1
, &table1
)
1682 && tablejump_p (BB_END (src2
), &label2
, &table2
)
1683 && label1
!= label2
)
1685 replace_label_data rr
;
1688 /* Replace references to LABEL1 with LABEL2. */
1691 rr
.update_label_nuses
= true;
1692 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
1694 /* Do not replace the label in SRC1->END because when deleting
1695 a block whose end is a tablejump, the tablejump referenced
1696 from the instruction is deleted too. */
1697 if (insn
!= BB_END (src1
))
1698 for_each_rtx (&insn
, replace_label
, &rr
);
1703 /* Avoid splitting if possible. We must always split when SRC2 has
1704 EH predecessor edges, or we may end up with basic blocks with both
1705 normal and EH predecessor edges. */
1706 if (newpos2
== BB_HEAD (src2
)
1707 && !(EDGE_PRED (src2
, 0)->flags
& EDGE_EH
))
1711 if (newpos2
== BB_HEAD (src2
))
1713 /* Skip possible basic block header. */
1714 if (LABEL_P (newpos2
))
1715 newpos2
= NEXT_INSN (newpos2
);
1716 if (NOTE_P (newpos2
))
1717 newpos2
= NEXT_INSN (newpos2
);
1721 fprintf (dump_file
, "Splitting bb %i before %i insns\n",
1722 src2
->index
, nmatch
);
1723 redirect_to
= split_block (src2
, PREV_INSN (newpos2
))->dest
;
1728 "Cross jumping from bb %i to bb %i; %i common insns\n",
1729 src1
->index
, src2
->index
, nmatch
);
1731 redirect_to
->count
+= src1
->count
;
1732 redirect_to
->frequency
+= src1
->frequency
;
1733 /* We may have some registers visible trought the block. */
1734 redirect_to
->flags
|= BB_DIRTY
;
1736 /* Recompute the frequencies and counts of outgoing edges. */
1737 FOR_EACH_EDGE (s
, ei
, redirect_to
->succs
)
1741 basic_block d
= s
->dest
;
1743 if (FORWARDER_BLOCK_P (d
))
1744 d
= single_succ (d
);
1746 FOR_EACH_EDGE (s2
, ei
, src1
->succs
)
1748 basic_block d2
= s2
->dest
;
1749 if (FORWARDER_BLOCK_P (d2
))
1750 d2
= single_succ (d2
);
1755 s
->count
+= s2
->count
;
1757 /* Take care to update possible forwarder blocks. We verified
1758 that there is no more than one in the chain, so we can't run
1759 into infinite loop. */
1760 if (FORWARDER_BLOCK_P (s
->dest
))
1762 single_succ_edge (s
->dest
)->count
+= s2
->count
;
1763 s
->dest
->count
+= s2
->count
;
1764 s
->dest
->frequency
+= EDGE_FREQUENCY (s
);
1767 if (FORWARDER_BLOCK_P (s2
->dest
))
1769 single_succ_edge (s2
->dest
)->count
-= s2
->count
;
1770 if (single_succ_edge (s2
->dest
)->count
< 0)
1771 single_succ_edge (s2
->dest
)->count
= 0;
1772 s2
->dest
->count
-= s2
->count
;
1773 s2
->dest
->frequency
-= EDGE_FREQUENCY (s
);
1774 if (s2
->dest
->frequency
< 0)
1775 s2
->dest
->frequency
= 0;
1776 if (s2
->dest
->count
< 0)
1777 s2
->dest
->count
= 0;
1780 if (!redirect_to
->frequency
&& !src1
->frequency
)
1781 s
->probability
= (s
->probability
+ s2
->probability
) / 2;
1784 = ((s
->probability
* redirect_to
->frequency
+
1785 s2
->probability
* src1
->frequency
)
1786 / (redirect_to
->frequency
+ src1
->frequency
));
1789 update_br_prob_note (redirect_to
);
1791 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1793 /* Skip possible basic block header. */
1794 if (LABEL_P (newpos1
))
1795 newpos1
= NEXT_INSN (newpos1
);
1797 if (NOTE_P (newpos1
))
1798 newpos1
= NEXT_INSN (newpos1
);
1800 redirect_from
= split_block (src1
, PREV_INSN (newpos1
))->src
;
1801 to_remove
= single_succ (redirect_from
);
1803 redirect_edge_and_branch_force (single_succ_edge (redirect_from
), redirect_to
);
1804 delete_basic_block (to_remove
);
1806 update_forwarder_flag (redirect_from
);
1807 if (redirect_to
!= src2
)
1808 update_forwarder_flag (src2
);
1813 /* Search the predecessors of BB for common insn sequences. When found,
1814 share code between them by redirecting control flow. Return true if
1815 any changes made. */
1818 try_crossjump_bb (int mode
, basic_block bb
)
1820 edge e
, e2
, fallthru
;
1822 unsigned max
, ix
, ix2
;
1823 basic_block ev
, ev2
;
1826 /* Nothing to do if there is not at least two incoming edges. */
1827 if (EDGE_COUNT (bb
->preds
) < 2)
1830 /* Don't crossjump if this block ends in a computed jump,
1831 unless we are optimizing for size. */
1833 && bb
!= EXIT_BLOCK_PTR
1834 && computed_jump_p (BB_END (bb
)))
1837 /* If we are partitioning hot/cold basic blocks, we don't want to
1838 mess up unconditional or indirect jumps that cross between hot
1841 Basic block partitioning may result in some jumps that appear to
1842 be optimizable (or blocks that appear to be mergeable), but which really
1843 must be left untouched (they are required to make it safely across
1844 partition boundaries). See the comments at the top of
1845 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1847 if (BB_PARTITION (EDGE_PRED (bb
, 0)->src
) !=
1848 BB_PARTITION (EDGE_PRED (bb
, 1)->src
)
1849 || (EDGE_PRED (bb
, 0)->flags
& EDGE_CROSSING
))
1852 /* It is always cheapest to redirect a block that ends in a branch to
1853 a block that falls through into BB, as that adds no branches to the
1854 program. We'll try that combination first. */
1856 max
= PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES
);
1858 if (EDGE_COUNT (bb
->preds
) > max
)
1861 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
1863 if (e
->flags
& EDGE_FALLTHRU
)
1868 for (ix
= 0, ev
= bb
; ix
< EDGE_COUNT (ev
->preds
); )
1870 e
= EDGE_PRED (ev
, ix
);
1873 /* As noted above, first try with the fallthru predecessor. */
1876 /* Don't combine the fallthru edge into anything else.
1877 If there is a match, we'll do it the other way around. */
1880 /* If nothing changed since the last attempt, there is nothing
1883 && (!(e
->src
->flags
& BB_DIRTY
)
1884 && !(fallthru
->src
->flags
& BB_DIRTY
)))
1887 if (try_crossjump_to_edge (mode
, e
, fallthru
))
1896 /* Non-obvious work limiting check: Recognize that we're going
1897 to call try_crossjump_bb on every basic block. So if we have
1898 two blocks with lots of outgoing edges (a switch) and they
1899 share lots of common destinations, then we would do the
1900 cross-jump check once for each common destination.
1902 Now, if the blocks actually are cross-jump candidates, then
1903 all of their destinations will be shared. Which means that
1904 we only need check them for cross-jump candidacy once. We
1905 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1906 choosing to do the check from the block for which the edge
1907 in question is the first successor of A. */
1908 if (EDGE_SUCC (e
->src
, 0) != e
)
1911 for (ix2
= 0, ev2
= bb
; ix2
< EDGE_COUNT (ev2
->preds
); )
1913 e2
= EDGE_PRED (ev2
, ix2
);
1919 /* We've already checked the fallthru edge above. */
1923 /* The "first successor" check above only prevents multiple
1924 checks of crossjump(A,B). In order to prevent redundant
1925 checks of crossjump(B,A), require that A be the block
1926 with the lowest index. */
1927 if (e
->src
->index
> e2
->src
->index
)
1930 /* If nothing changed since the last attempt, there is nothing
1933 && (!(e
->src
->flags
& BB_DIRTY
)
1934 && !(e2
->src
->flags
& BB_DIRTY
)))
1937 if (try_crossjump_to_edge (mode
, e
, e2
))
1950 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1951 instructions etc. Return nonzero if changes were made. */
1954 try_optimize_cfg (int mode
)
1956 bool changed_overall
= false;
1959 basic_block bb
, b
, next
;
1961 if (mode
& CLEANUP_CROSSJUMP
)
1962 add_noreturn_fake_exit_edges ();
1964 if (mode
& (CLEANUP_UPDATE_LIFE
| CLEANUP_CROSSJUMP
| CLEANUP_THREADING
))
1968 update_forwarder_flag (bb
);
1970 if (! targetm
.cannot_modify_jumps_p ())
1973 /* Attempt to merge blocks as made possible by edge removal. If
1974 a block has only one successor, and the successor has only
1975 one predecessor, they may be combined. */
1983 "\n\ntry_optimize_cfg iteration %i\n\n",
1986 for (b
= ENTRY_BLOCK_PTR
->next_bb
; b
!= EXIT_BLOCK_PTR
;)
1990 bool changed_here
= false;
1992 /* Delete trivially dead basic blocks. */
1993 while (EDGE_COUNT (b
->preds
) == 0)
1997 fprintf (dump_file
, "Deleting block %i.\n",
2000 delete_basic_block (b
);
2001 if (!(mode
& CLEANUP_CFGLAYOUT
))
2006 /* Remove code labels no longer used. */
2007 if (single_pred_p (b
)
2008 && (single_pred_edge (b
)->flags
& EDGE_FALLTHRU
)
2009 && !(single_pred_edge (b
)->flags
& EDGE_COMPLEX
)
2010 && LABEL_P (BB_HEAD (b
))
2011 /* If the previous block ends with a branch to this
2012 block, we can't delete the label. Normally this
2013 is a condjump that is yet to be simplified, but
2014 if CASE_DROPS_THRU, this can be a tablejump with
2015 some element going to the same place as the
2016 default (fallthru). */
2017 && (single_pred (b
) == ENTRY_BLOCK_PTR
2018 || !JUMP_P (BB_END (single_pred (b
)))
2019 || ! label_is_jump_target_p (BB_HEAD (b
),
2020 BB_END (single_pred (b
)))))
2022 rtx label
= BB_HEAD (b
);
2024 delete_insn_chain (label
, label
);
2025 /* In the case label is undeletable, move it after the
2026 BASIC_BLOCK note. */
2027 if (NOTE_LINE_NUMBER (BB_HEAD (b
)) == NOTE_INSN_DELETED_LABEL
)
2029 rtx bb_note
= NEXT_INSN (BB_HEAD (b
));
2031 reorder_insns_nobb (label
, label
, bb_note
);
2032 BB_HEAD (b
) = bb_note
;
2035 fprintf (dump_file
, "Deleted label in block %i.\n",
2039 /* If we fall through an empty block, we can remove it. */
2040 if (!(mode
& CLEANUP_CFGLAYOUT
)
2041 && single_pred_p (b
)
2042 && (single_pred_edge (b
)->flags
& EDGE_FALLTHRU
)
2043 && !LABEL_P (BB_HEAD (b
))
2044 && FORWARDER_BLOCK_P (b
)
2045 /* Note that forwarder_block_p true ensures that
2046 there is a successor for this block. */
2047 && (single_succ_edge (b
)->flags
& EDGE_FALLTHRU
)
2048 && n_basic_blocks
> NUM_FIXED_BLOCKS
+ 1)
2052 "Deleting fallthru block %i.\n",
2055 c
= b
->prev_bb
== ENTRY_BLOCK_PTR
? b
->next_bb
: b
->prev_bb
;
2056 redirect_edge_succ_nodup (single_pred_edge (b
),
2058 delete_basic_block (b
);
2063 if (single_succ_p (b
)
2064 && (s
= single_succ_edge (b
))
2065 && !(s
->flags
& EDGE_COMPLEX
)
2066 && (c
= s
->dest
) != EXIT_BLOCK_PTR
2067 && single_pred_p (c
)
2070 /* When not in cfg_layout mode use code aware of reordering
2071 INSN. This code possibly creates new basic blocks so it
2072 does not fit merge_blocks interface and is kept here in
2073 hope that it will become useless once more of compiler
2074 is transformed to use cfg_layout mode. */
2076 if ((mode
& CLEANUP_CFGLAYOUT
)
2077 && can_merge_blocks_p (b
, c
))
2079 merge_blocks (b
, c
);
2080 update_forwarder_flag (b
);
2081 changed_here
= true;
2083 else if (!(mode
& CLEANUP_CFGLAYOUT
)
2084 /* If the jump insn has side effects,
2085 we can't kill the edge. */
2086 && (!JUMP_P (BB_END (b
))
2087 || (reload_completed
2088 ? simplejump_p (BB_END (b
))
2089 : (onlyjump_p (BB_END (b
))
2090 && !tablejump_p (BB_END (b
),
2092 && (next
= merge_blocks_move (s
, b
, c
, mode
)))
2095 changed_here
= true;
2099 /* Simplify branch over branch. */
2100 if ((mode
& CLEANUP_EXPENSIVE
)
2101 && !(mode
& CLEANUP_CFGLAYOUT
)
2102 && try_simplify_condjump (b
))
2103 changed_here
= true;
2105 /* If B has a single outgoing edge, but uses a
2106 non-trivial jump instruction without side-effects, we
2107 can either delete the jump entirely, or replace it
2108 with a simple unconditional jump. */
2109 if (single_succ_p (b
)
2110 && single_succ (b
) != EXIT_BLOCK_PTR
2111 && onlyjump_p (BB_END (b
))
2112 && !find_reg_note (BB_END (b
), REG_CROSSING_JUMP
, NULL_RTX
)
2113 && try_redirect_by_replacing_jump (single_succ_edge (b
),
2115 (mode
& CLEANUP_CFGLAYOUT
) != 0))
2117 update_forwarder_flag (b
);
2118 changed_here
= true;
2121 /* Simplify branch to branch. */
2122 if (try_forward_edges (mode
, b
))
2123 changed_here
= true;
2125 /* Look for shared code between blocks. */
2126 if ((mode
& CLEANUP_CROSSJUMP
)
2127 && try_crossjump_bb (mode
, b
))
2128 changed_here
= true;
2130 /* Don't get confused by the index shift caused by
2138 if ((mode
& CLEANUP_CROSSJUMP
)
2139 && try_crossjump_bb (mode
, EXIT_BLOCK_PTR
))
2142 #ifdef ENABLE_CHECKING
2144 verify_flow_info ();
2147 changed_overall
|= changed
;
2153 if (mode
& CLEANUP_CROSSJUMP
)
2154 remove_fake_exit_edges ();
2157 b
->flags
&= ~(BB_FORWARDER_BLOCK
| BB_NONTHREADABLE_BLOCK
);
2159 return changed_overall
;
2162 /* Delete all unreachable basic blocks. */
2165 delete_unreachable_blocks (void)
2167 bool changed
= false;
2168 basic_block b
, next_bb
;
2170 find_unreachable_blocks ();
2172 /* Delete all unreachable basic blocks. */
2174 for (b
= ENTRY_BLOCK_PTR
->next_bb
; b
!= EXIT_BLOCK_PTR
; b
= next_bb
)
2176 next_bb
= b
->next_bb
;
2178 if (!(b
->flags
& BB_REACHABLE
))
2180 delete_basic_block (b
);
2186 tidy_fallthru_edges ();
2190 /* Merges sequential blocks if possible. */
2193 merge_seq_blocks (void)
2196 bool changed
= false;
2198 for (bb
= ENTRY_BLOCK_PTR
->next_bb
; bb
!= EXIT_BLOCK_PTR
; )
2200 if (single_succ_p (bb
)
2201 && can_merge_blocks_p (bb
, single_succ (bb
)))
2203 /* Merge the blocks and retry. */
2204 merge_blocks (bb
, single_succ (bb
));
2215 /* Tidy the CFG by deleting unreachable code and whatnot. */
2218 cleanup_cfg (int mode
)
2220 bool changed
= false;
2222 timevar_push (TV_CLEANUP_CFG
);
2223 if (delete_unreachable_blocks ())
2226 /* We've possibly created trivially dead code. Cleanup it right
2227 now to introduce more opportunities for try_optimize_cfg. */
2228 if (!(mode
& (CLEANUP_NO_INSN_DEL
| CLEANUP_UPDATE_LIFE
))
2229 && !reload_completed
)
2230 delete_trivially_dead_insns (get_insns(), max_reg_num ());
2235 while (try_optimize_cfg (mode
))
2237 delete_unreachable_blocks (), changed
= true;
2238 if (mode
& CLEANUP_UPDATE_LIFE
)
2240 /* Cleaning up CFG introduces more opportunities for dead code
2241 removal that in turn may introduce more opportunities for
2242 cleaning up the CFG. */
2243 if (!update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES
,
2245 | PROP_SCAN_DEAD_CODE
2246 | PROP_KILL_DEAD_CODE
2247 | ((mode
& CLEANUP_LOG_LINKS
)
2248 ? PROP_LOG_LINKS
: 0)))
2251 else if (!(mode
& CLEANUP_NO_INSN_DEL
)
2252 && (mode
& CLEANUP_EXPENSIVE
)
2253 && !reload_completed
)
2255 if (!delete_trivially_dead_insns (get_insns(), max_reg_num ()))
2260 delete_dead_jumptables ();
2263 timevar_pop (TV_CLEANUP_CFG
);
2269 rest_of_handle_jump (void)
2271 delete_unreachable_blocks ();
2273 if (cfun
->tail_call_emit
)
2274 fixup_tail_calls ();
2277 struct tree_opt_pass pass_jump
=
2279 "sibling", /* name */
2281 rest_of_handle_jump
, /* execute */
2284 0, /* static_pass_number */
2285 TV_JUMP
, /* tv_id */
2286 0, /* properties_required */
2287 0, /* properties_provided */
2288 0, /* properties_destroyed */
2289 TODO_ggc_collect
, /* todo_flags_start */
2291 TODO_verify_flow
, /* todo_flags_finish */
2297 rest_of_handle_jump2 (void)
2299 /* Turn NOTE_INSN_EXPECTED_VALUE into REG_BR_PROB. Do this
2300 before jump optimization switches branch directions. */
2301 if (flag_guess_branch_prob
)
2302 expected_value_to_br_prob ();
2304 delete_trivially_dead_insns (get_insns (), max_reg_num ());
2305 reg_scan (get_insns (), max_reg_num ());
2307 dump_flow_info (dump_file
);
2308 cleanup_cfg ((optimize
? CLEANUP_EXPENSIVE
: 0) | CLEANUP_PRE_LOOP
2309 | (flag_thread_jumps
? CLEANUP_THREADING
: 0));
2311 create_loop_notes ();
2313 purge_line_number_notes ();
2316 cleanup_cfg (CLEANUP_EXPENSIVE
| CLEANUP_PRE_LOOP
);
2318 /* Jump optimization, and the removal of NULL pointer checks, may
2319 have reduced the number of instructions substantially. CSE, and
2320 future passes, allocate arrays whose dimensions involve the
2321 maximum instruction UID, so if we can reduce the maximum UID
2322 we'll save big on memory. */
2323 renumber_insns (dump_file
);
2327 struct tree_opt_pass pass_jump2
=
2331 rest_of_handle_jump2
, /* execute */
2334 0, /* static_pass_number */
2335 TV_JUMP
, /* tv_id */
2336 0, /* properties_required */
2337 0, /* properties_provided */
2338 0, /* properties_destroyed */
2339 TODO_ggc_collect
, /* todo_flags_start */
2340 TODO_dump_func
, /* todo_flags_finish */