2 * Flow - walk the linearized flowgraph, simplifying it as we
5 * Copyright (C) 2004 Linus Torvalds
16 #include "expression.h"
17 #include "linearize.h"
21 unsigned long bb_generation
;
24 * Dammit, if we have a phi-node followed by a conditional
25 * branch on that phi-node, we should damn well be able to
26 * do something about the source. Maybe.
28 static int rewrite_branch(struct basic_block
*bb
,
29 struct basic_block
**ptr
,
30 struct basic_block
*old
,
31 struct basic_block
*new)
33 if (*ptr
!= old
|| new == old
|| !bb
->ep
)
36 /* We might find new if-conversions or non-dominating CSEs */
37 /* we may also create new dead cycles */
38 repeat_phase
|= REPEAT_CSE
| REPEAT_CFG_CLEANUP
;
40 replace_bb_in_list(&bb
->children
, old
, new, 1);
41 remove_bb_from_list(&old
->parents
, bb
, 1);
42 add_bb(&new->parents
, bb
);
47 * Return the known truth value of a pseudo, or -1 if
50 static int pseudo_truth_value(pseudo_t pseudo
)
52 switch (pseudo
->type
) {
54 return !!pseudo
->value
;
57 struct instruction
*insn
= pseudo
->def
;
59 /* A symbol address is always considered true.. */
60 if (insn
->opcode
== OP_SYMADDR
&& insn
->target
== pseudo
)
70 * Does a basic block depend on the pseudos that "src" defines?
72 static int bb_depends_on(struct basic_block
*target
, struct basic_block
*src
)
76 FOR_EACH_PTR(src
->defines
, pseudo
) {
77 if (pseudo_in_list(target
->needs
, pseudo
))
79 } END_FOR_EACH_PTR(pseudo
);
84 * This really should be handled by bb_depends_on()
85 * which efficiently check the dependence using the
86 * defines - needs liveness info. Problem is that
87 * there is no liveness done on OP_PHI & OP_PHISRC.
89 * This function add the missing dependency checks.
91 static int bb_depends_on_phi(struct basic_block
*target
, struct basic_block
*src
)
93 struct instruction
*insn
;
94 FOR_EACH_PTR(src
->insns
, insn
) {
97 if (insn
->opcode
!= OP_PHI
)
99 if (pseudo_in_list(target
->needs
, insn
->target
))
101 } END_FOR_EACH_PTR(insn
);
106 * When we reach here, we have:
107 * - a basic block that ends in a conditional branch and
108 * that has no side effects apart from the pseudos it
110 * - the phi-node that the conditional branch depends on
111 * - full pseudo liveness information
113 * We need to check if any of the _sources_ of the phi-node
114 * may be constant, and not actually need this block at all.
116 static int try_to_simplify_bb(struct basic_block
*bb
, struct instruction
*first
, struct instruction
*second
)
123 * This a due to improper dominance tracking during
124 * simplify_symbol_usage()/conversion to SSA form.
125 * No sane simplification can be done when we have this.
127 bogus
= bb_list_size(bb
->parents
) != pseudo_list_size(first
->phi_list
);
129 FOR_EACH_PTR(first
->phi_list
, phi
) {
130 struct instruction
*def
= phi
->def
;
131 struct basic_block
*source
, *target
;
133 struct instruction
*br
;
140 if (!pseudo
|| !source
)
142 br
= last_instruction(source
->insns
);
145 if (br
->opcode
!= OP_CBR
&& br
->opcode
!= OP_BR
)
147 true = pseudo_truth_value(pseudo
);
150 target
= true ? second
->bb_true
: second
->bb_false
;
151 if (bb_depends_on(target
, bb
))
153 if (bb_depends_on_phi(target
, bb
))
155 changed
|= rewrite_branch(source
, &br
->bb_true
, bb
, target
);
156 changed
|= rewrite_branch(source
, &br
->bb_false
, bb
, target
);
157 if (changed
&& !bogus
)
158 kill_use(THIS_ADDRESS(phi
));
159 } END_FOR_EACH_PTR(phi
);
163 static int bb_has_side_effects(struct basic_block
*bb
)
165 struct instruction
*insn
;
166 FOR_EACH_PTR(bb
->insns
, insn
) {
167 switch (insn
->opcode
) {
169 /* FIXME! This should take "const" etc into account */
177 /* FIXME! This should take "volatile" etc into account */
183 } END_FOR_EACH_PTR(insn
);
187 static int simplify_phi_branch(struct basic_block
*bb
, struct instruction
*br
)
189 pseudo_t cond
= br
->cond
;
190 struct instruction
*def
;
192 if (cond
->type
!= PSEUDO_REG
)
195 if (def
->bb
!= bb
|| def
->opcode
!= OP_PHI
)
197 if (bb_has_side_effects(bb
))
199 return try_to_simplify_bb(bb
, def
, br
);
202 static int simplify_branch_branch(struct basic_block
*bb
, struct instruction
*br
,
203 struct basic_block
**target_p
, int true)
205 struct basic_block
*target
= *target_p
, *final
;
206 struct instruction
*insn
;
211 insn
= last_instruction(target
->insns
);
212 if (!insn
|| insn
->opcode
!= OP_CBR
|| insn
->cond
!= br
->cond
)
215 * Ahhah! We've found a branch to a branch on the same conditional!
216 * Now we just need to see if we can rewrite the branch..
219 final
= true ? insn
->bb_true
: insn
->bb_false
;
220 if (bb_has_side_effects(target
))
221 goto try_to_rewrite_target
;
222 if (bb_depends_on(final
, target
))
223 goto try_to_rewrite_target
;
224 if (bb_depends_on_phi(final
, target
))
226 return rewrite_branch(bb
, target_p
, target
, final
);
228 try_to_rewrite_target
:
230 * If we're the only parent, at least we can rewrite the
231 * now-known second branch.
233 if (bb_list_size(target
->parents
) != 1)
235 insert_branch(target
, insn
, final
);
239 static int simplify_one_branch(struct basic_block
*bb
, struct instruction
*br
)
241 if (simplify_phi_branch(bb
, br
))
243 return simplify_branch_branch(bb
, br
, &br
->bb_true
, 1) |
244 simplify_branch_branch(bb
, br
, &br
->bb_false
, 0);
247 static int simplify_branch_nodes(struct entrypoint
*ep
)
250 struct basic_block
*bb
;
252 FOR_EACH_PTR(ep
->bbs
, bb
) {
253 struct instruction
*br
= last_instruction(bb
->insns
);
255 if (!br
|| br
->opcode
!= OP_CBR
)
257 changed
|= simplify_one_branch(bb
, br
);
258 } END_FOR_EACH_PTR(bb
);
263 * This is called late - when we have intra-bb liveness information..
265 int simplify_flow(struct entrypoint
*ep
)
267 return simplify_branch_nodes(ep
);
270 static inline void concat_user_list(struct pseudo_user_list
*src
, struct pseudo_user_list
**dst
)
272 concat_ptr_list((struct ptr_list
*)src
, (struct ptr_list
**)dst
);
275 void convert_instruction_target(struct instruction
*insn
, pseudo_t src
)
278 struct pseudo_user
*pu
;
280 * Go through the "insn->users" list and replace them all..
282 target
= insn
->target
;
285 FOR_EACH_PTR(target
->users
, pu
) {
286 if (*pu
->userp
!= VOID
) {
287 assert(*pu
->userp
== target
);
290 } END_FOR_EACH_PTR(pu
);
291 if (has_use_list(src
))
292 concat_user_list(target
->users
, &src
->users
);
293 target
->users
= NULL
;
296 void convert_load_instruction(struct instruction
*insn
, pseudo_t src
)
298 convert_instruction_target(insn
, src
);
299 /* Turn the load into a no-op */
300 insn
->opcode
= OP_LNOP
;
304 static int overlapping_memop(struct instruction
*a
, struct instruction
*b
)
306 unsigned int a_start
= bytes_to_bits(a
->offset
);
307 unsigned int b_start
= bytes_to_bits(b
->offset
);
308 unsigned int a_size
= a
->size
;
309 unsigned int b_size
= b
->size
;
311 if (a_size
+ a_start
<= b_start
)
313 if (b_size
+ b_start
<= a_start
)
318 static inline int same_memop(struct instruction
*a
, struct instruction
*b
)
320 return a
->offset
== b
->offset
&& a
->size
== b
->size
;
323 static inline int distinct_symbols(pseudo_t a
, pseudo_t b
)
325 if (a
->type
!= PSEUDO_SYM
)
327 if (b
->type
!= PSEUDO_SYM
)
329 return a
->sym
!= b
->sym
;
333 * Return 1 if "dom" dominates the access to "pseudo"
336 * Return 0 if it doesn't, and -1 if you don't know.
338 int dominates(pseudo_t pseudo
, struct instruction
*insn
, struct instruction
*dom
, int local
)
340 int opcode
= dom
->opcode
;
342 if (opcode
== OP_CALL
|| opcode
== OP_ENTRY
)
343 return local
? 0 : -1;
344 if (opcode
!= OP_LOAD
&& opcode
!= OP_STORE
)
346 if (dom
->src
!= pseudo
) {
349 /* We don't think two explicitly different symbols ever alias */
350 if (distinct_symbols(insn
->src
, dom
->src
))
352 /* We could try to do some alias analysis here */
355 if (!same_memop(insn
, dom
)) {
356 if (dom
->opcode
== OP_LOAD
)
358 if (!overlapping_memop(insn
, dom
))
365 static int phisrc_in_bb(struct pseudo_list
*list
, struct basic_block
*bb
)
368 FOR_EACH_PTR(list
, p
) {
369 if (p
->def
->bb
== bb
)
371 } END_FOR_EACH_PTR(p
);
376 static int find_dominating_parents(pseudo_t pseudo
, struct instruction
*insn
,
377 struct basic_block
*bb
, unsigned long generation
, struct pseudo_list
**dominators
,
380 struct basic_block
*parent
;
385 FOR_EACH_PTR(bb
->parents
, parent
) {
386 struct instruction
*one
;
387 struct instruction
*br
;
390 FOR_EACH_PTR_REVERSE(parent
->insns
, one
) {
394 dominance
= dominates(pseudo
, insn
, one
, local
);
396 if (one
->opcode
== OP_LOAD
)
402 goto found_dominator
;
403 } END_FOR_EACH_PTR_REVERSE(one
);
405 if (parent
->generation
== generation
)
407 parent
->generation
= generation
;
409 if (!find_dominating_parents(pseudo
, insn
, parent
, generation
, dominators
, local
))
414 if (dominators
&& phisrc_in_bb(*dominators
, parent
))
416 br
= delete_last_instruction(&parent
->insns
);
417 phi
= alloc_phi(parent
, one
->target
, one
->size
);
418 phi
->ident
= phi
->ident
? : pseudo
->ident
;
419 add_instruction(&parent
->insns
, br
);
420 use_pseudo(insn
, phi
, add_pseudo(dominators
, phi
));
421 } END_FOR_EACH_PTR(parent
);
426 * We should probably sort the phi list just to make it easier to compare
427 * later for equality.
429 void rewrite_load_instruction(struct instruction
*insn
, struct pseudo_list
*dominators
)
434 * Check for somewhat common case of duplicate
437 new = first_pseudo(dominators
)->def
->src1
;
438 FOR_EACH_PTR(dominators
, phi
) {
439 if (new != phi
->def
->src1
)
441 new->ident
= new->ident
? : phi
->ident
;
442 } END_FOR_EACH_PTR(phi
);
445 * All the same pseudo - mark the phi-nodes unused
446 * and convert the load into a LNOP and replace the
449 FOR_EACH_PTR(dominators
, phi
) {
450 kill_instruction(phi
->def
);
451 } END_FOR_EACH_PTR(phi
);
452 convert_load_instruction(insn
, new);
456 /* We leave symbol pseudos with a bogus usage list here */
457 if (insn
->src
->type
!= PSEUDO_SYM
)
458 kill_use(&insn
->src
);
459 insn
->opcode
= OP_PHI
;
460 insn
->phi_list
= dominators
;
463 static int find_dominating_stores(pseudo_t pseudo
, struct instruction
*insn
,
464 unsigned long generation
, int local
)
466 struct basic_block
*bb
= insn
->bb
;
467 struct instruction
*one
, *dom
= NULL
;
468 struct pseudo_list
*dominators
;
471 /* Unreachable load? Undo it */
473 insn
->opcode
= OP_LNOP
;
478 FOR_EACH_PTR(bb
->insns
, one
) {
482 dominance
= dominates(pseudo
, insn
, one
, local
);
484 /* Ignore partial load dominators */
485 if (one
->opcode
== OP_LOAD
)
495 } END_FOR_EACH_PTR(one
);
497 warning(pseudo
->sym
->pos
, "unable to find symbol read");
504 convert_load_instruction(insn
, dom
->target
);
508 /* OK, go find the parents */
509 bb
->generation
= generation
;
512 if (!find_dominating_parents(pseudo
, insn
, bb
, generation
, &dominators
, local
))
515 /* This happens with initial assignments to structures etc.. */
520 convert_load_instruction(insn
, value_pseudo(insn
->type
, 0));
525 * If we find just one dominating instruction, we
526 * can turn it into a direct thing. Otherwise we'll
527 * have to turn the load into a phi-node of the
530 rewrite_load_instruction(insn
, dominators
);
534 static void kill_store(struct instruction
*insn
)
538 insn
->opcode
= OP_SNOP
;
539 kill_use(&insn
->target
);
543 /* Kill a pseudo that is dead on exit from the bb */
544 static void kill_dead_stores(pseudo_t pseudo
, unsigned long generation
, struct basic_block
*bb
, int local
)
546 struct instruction
*insn
;
547 struct basic_block
*parent
;
549 if (bb
->generation
== generation
)
551 bb
->generation
= generation
;
552 FOR_EACH_PTR_REVERSE(bb
->insns
, insn
) {
553 int opcode
= insn
->opcode
;
555 if (opcode
!= OP_LOAD
&& opcode
!= OP_STORE
) {
558 if (opcode
== OP_CALL
)
562 if (insn
->src
== pseudo
) {
563 if (opcode
== OP_LOAD
)
570 if (insn
->src
->type
!= PSEUDO_SYM
)
572 } END_FOR_EACH_PTR_REVERSE(insn
);
574 FOR_EACH_PTR(bb
->parents
, parent
) {
575 struct basic_block
*child
;
576 FOR_EACH_PTR(parent
->children
, child
) {
577 if (child
&& child
!= bb
)
579 } END_FOR_EACH_PTR(child
);
580 kill_dead_stores(pseudo
, generation
, parent
, local
);
581 } END_FOR_EACH_PTR(parent
);
585 * This should see if the "insn" trivially dominates some previous store, and kill the
586 * store if unnecessary.
588 static void kill_dominated_stores(pseudo_t pseudo
, struct instruction
*insn
,
589 unsigned long generation
, struct basic_block
*bb
, int local
, int found
)
591 struct instruction
*one
;
592 struct basic_block
*parent
;
594 /* Unreachable store? Undo it */
599 if (bb
->generation
== generation
)
601 bb
->generation
= generation
;
602 FOR_EACH_PTR_REVERSE(bb
->insns
, one
) {
610 dominance
= dominates(pseudo
, insn
, one
, local
);
615 if (one
->opcode
== OP_LOAD
)
618 } END_FOR_EACH_PTR_REVERSE(one
);
621 warning(bb
->pos
, "Unable to find instruction");
625 FOR_EACH_PTR(bb
->parents
, parent
) {
626 struct basic_block
*child
;
627 FOR_EACH_PTR(parent
->children
, child
) {
628 if (child
&& child
!= bb
)
630 } END_FOR_EACH_PTR(child
);
631 kill_dominated_stores(pseudo
, insn
, generation
, parent
, local
, found
);
632 } END_FOR_EACH_PTR(parent
);
635 void check_access(struct instruction
*insn
)
637 pseudo_t pseudo
= insn
->src
;
639 if (insn
->bb
&& pseudo
->type
== PSEUDO_SYM
) {
640 int offset
= insn
->offset
, bit
= bytes_to_bits(offset
) + insn
->size
;
641 struct symbol
*sym
= pseudo
->sym
;
643 if (sym
->bit_size
> 0 && (offset
< 0 || bit
> sym
->bit_size
))
644 warning(insn
->pos
, "invalid access %s '%s' (%d %d)",
645 offset
< 0 ? "below" : "past the end of",
646 show_ident(sym
->ident
), offset
,
647 bits_to_bytes(sym
->bit_size
));
651 static void simplify_one_symbol(struct entrypoint
*ep
, struct symbol
*sym
)
654 struct pseudo_user
*pu
;
658 /* Never used as a symbol? */
659 pseudo
= sym
->pseudo
;
663 /* We don't do coverage analysis of volatiles.. */
664 if (sym
->ctype
.modifiers
& MOD_VOLATILE
)
667 /* ..and symbols with external visibility need more care */
668 mod
= sym
->ctype
.modifiers
& (MOD_NONLOCAL
| MOD_STATIC
| MOD_ADDRESSABLE
);
670 goto external_visibility
;
672 FOR_EACH_PTR(pseudo
->users
, pu
) {
673 /* We know that the symbol-pseudo use is the "src" in the instruction */
674 struct instruction
*insn
= pu
->insn
;
676 switch (insn
->opcode
) {
684 mod
|= MOD_ADDRESSABLE
;
685 goto external_visibility
;
692 warning(sym
->pos
, "symbol '%s' pseudo used in unexpected way", show_ident(sym
->ident
));
694 } END_FOR_EACH_PTR(pu
);
698 FOR_EACH_PTR_REVERSE(pseudo
->users
, pu
) {
699 struct instruction
*insn
= pu
->insn
;
700 if (insn
->opcode
== OP_LOAD
)
701 all
&= find_dominating_stores(pseudo
, insn
, ++bb_generation
, !mod
);
702 } END_FOR_EACH_PTR_REVERSE(pu
);
704 /* If we converted all the loads, remove the stores. They are dead */
706 FOR_EACH_PTR(pseudo
->users
, pu
) {
707 struct instruction
*insn
= pu
->insn
;
708 if (insn
->opcode
== OP_STORE
)
710 } END_FOR_EACH_PTR(pu
);
713 * If we couldn't take the shortcut, see if we can at least kill some
716 FOR_EACH_PTR(pseudo
->users
, pu
) {
717 struct instruction
*insn
= pu
->insn
;
718 if (insn
->opcode
== OP_STORE
)
719 kill_dominated_stores(pseudo
, insn
, ++bb_generation
, insn
->bb
, !mod
, 0);
720 } END_FOR_EACH_PTR(pu
);
722 if (!(mod
& (MOD_NONLOCAL
| MOD_STATIC
))) {
723 struct basic_block
*bb
;
724 FOR_EACH_PTR(ep
->bbs
, bb
) {
726 kill_dead_stores(pseudo
, ++bb_generation
, bb
, !mod
);
727 } END_FOR_EACH_PTR(bb
);
734 void simplify_symbol_usage(struct entrypoint
*ep
)
738 FOR_EACH_PTR(ep
->accesses
, pseudo
) {
739 simplify_one_symbol(ep
, pseudo
->sym
);
740 } END_FOR_EACH_PTR(pseudo
);
743 static void mark_bb_reachable(struct basic_block
*bb
, unsigned long generation
)
745 struct basic_block
*child
;
747 if (bb
->generation
== generation
)
749 bb
->generation
= generation
;
750 FOR_EACH_PTR(bb
->children
, child
) {
751 mark_bb_reachable(child
, generation
);
752 } END_FOR_EACH_PTR(child
);
755 static void kill_defs(struct instruction
*insn
)
757 pseudo_t target
= insn
->target
;
759 if (!has_use_list(target
))
761 if (target
->def
!= insn
)
764 convert_instruction_target(insn
, VOID
);
767 void kill_bb(struct basic_block
*bb
)
769 struct instruction
*insn
;
770 struct basic_block
*child
, *parent
;
772 FOR_EACH_PTR(bb
->insns
, insn
) {
773 kill_instruction_force(insn
);
776 * We kill unreachable instructions even if they
777 * otherwise aren't "killable" (e.g. volatile loads)
779 } END_FOR_EACH_PTR(insn
);
782 FOR_EACH_PTR(bb
->children
, child
) {
783 remove_bb_from_list(&child
->parents
, bb
, 0);
784 } END_FOR_EACH_PTR(child
);
787 FOR_EACH_PTR(bb
->parents
, parent
) {
788 remove_bb_from_list(&parent
->children
, bb
, 0);
789 } END_FOR_EACH_PTR(parent
);
793 void kill_unreachable_bbs(struct entrypoint
*ep
)
795 struct basic_block
*bb
;
796 unsigned long generation
= ++bb_generation
;
798 mark_bb_reachable(ep
->entry
->bb
, generation
);
799 FOR_EACH_PTR(ep
->bbs
, bb
) {
800 if (bb
->generation
== generation
)
802 /* Mark it as being dead */
805 DELETE_CURRENT_PTR(bb
);
806 } END_FOR_EACH_PTR(bb
);
807 PACK_PTR_LIST(&ep
->bbs
);
810 static int rewrite_parent_branch(struct basic_block
*bb
, struct basic_block
*old
, struct basic_block
*new)
813 struct instruction
*insn
= last_instruction(bb
->insns
);
818 /* Infinite loops: let's not "optimize" them.. */
822 switch (insn
->opcode
) {
824 changed
|= rewrite_branch(bb
, &insn
->bb_false
, old
, new);
827 changed
|= rewrite_branch(bb
, &insn
->bb_true
, old
, new);
831 struct multijmp
*jmp
;
832 FOR_EACH_PTR(insn
->multijmp_list
, jmp
) {
833 changed
|= rewrite_branch(bb
, &jmp
->target
, old
, new);
834 } END_FOR_EACH_PTR(jmp
);
843 static struct basic_block
* rewrite_branch_bb(struct basic_block
*bb
, struct instruction
*br
)
845 struct basic_block
*parent
;
846 struct basic_block
*target
= br
->bb_true
;
847 struct basic_block
*false = br
->bb_false
;
849 if (br
->opcode
== OP_CBR
) {
850 pseudo_t cond
= br
->cond
;
851 if (cond
->type
!= PSEUDO_VAL
)
853 target
= cond
->value
? target
: false;
857 * We can't do FOR_EACH_PTR() here, because the parent list
858 * may change when we rewrite the parent.
860 while ((parent
= first_basic_block(bb
->parents
)) != NULL
) {
861 if (!rewrite_parent_branch(parent
, bb
, target
))
867 static void vrfy_bb_in_list(struct basic_block
*bb
, struct basic_block_list
*list
)
870 struct basic_block
*tmp
;
871 int no_bb_in_list
= 0;
873 FOR_EACH_PTR(list
, tmp
) {
876 } END_FOR_EACH_PTR(tmp
);
877 assert(no_bb_in_list
);
881 static void vrfy_parents(struct basic_block
*bb
)
883 struct basic_block
*tmp
;
884 FOR_EACH_PTR(bb
->parents
, tmp
) {
885 vrfy_bb_in_list(bb
, tmp
->children
);
886 } END_FOR_EACH_PTR(tmp
);
889 static void vrfy_children(struct basic_block
*bb
)
891 struct basic_block
*tmp
;
892 struct instruction
*br
= last_instruction(bb
->insns
);
895 assert(!bb
->children
);
898 switch (br
->opcode
) {
899 struct multijmp
*jmp
;
901 vrfy_bb_in_list(br
->bb_false
, bb
->children
);
904 vrfy_bb_in_list(br
->bb_true
, bb
->children
);
907 case OP_COMPUTEDGOTO
:
908 FOR_EACH_PTR(br
->multijmp_list
, jmp
) {
909 vrfy_bb_in_list(jmp
->target
, bb
->children
);
910 } END_FOR_EACH_PTR(jmp
);
916 FOR_EACH_PTR(bb
->children
, tmp
) {
917 vrfy_bb_in_list(bb
, tmp
->parents
);
918 } END_FOR_EACH_PTR(tmp
);
921 static void vrfy_bb_flow(struct basic_block
*bb
)
927 void vrfy_flow(struct entrypoint
*ep
)
929 struct basic_block
*bb
;
930 struct basic_block
*entry
= ep
->entry
->bb
;
932 FOR_EACH_PTR(ep
->bbs
, bb
) {
936 } END_FOR_EACH_PTR(bb
);
940 void pack_basic_blocks(struct entrypoint
*ep
)
942 struct basic_block
*bb
;
944 /* See if we can merge a bb into another one.. */
945 FOR_EACH_PTR(ep
->bbs
, bb
) {
946 struct instruction
*first
, *insn
;
947 struct basic_block
*parent
, *child
, *last
;
949 if (!bb_reachable(bb
))
955 FOR_EACH_PTR(bb
->insns
, first
) {
958 switch (first
->opcode
) {
959 case OP_NOP
: case OP_LNOP
: case OP_SNOP
:
963 struct basic_block
*replace
;
964 replace
= rewrite_branch_bb(bb
, first
);
974 } END_FOR_EACH_PTR(first
);
978 * See if we only have one parent..
981 FOR_EACH_PTR(bb
->parents
, parent
) {
988 } END_FOR_EACH_PTR(parent
);
991 if (!parent
|| parent
== bb
)
995 * Goodie. See if the parent can merge..
997 FOR_EACH_PTR(parent
->children
, child
) {
1000 } END_FOR_EACH_PTR(child
);
1005 repeat_phase
|= REPEAT_CSE
;
1007 parent
->children
= bb
->children
;
1008 bb
->children
= NULL
;
1011 FOR_EACH_PTR(parent
->children
, child
) {
1012 replace_bb_in_list(&child
->parents
, bb
, parent
, 0);
1013 } END_FOR_EACH_PTR(child
);
1015 kill_instruction(delete_last_instruction(&parent
->insns
));
1016 FOR_EACH_PTR(bb
->insns
, insn
) {
1018 assert(insn
->bb
== bb
);
1021 add_instruction(&parent
->insns
, insn
);
1022 } END_FOR_EACH_PTR(insn
);
1026 /* nothing to do */;
1027 } END_FOR_EACH_PTR(bb
);