2 * Copyright (C) 2004 Linus Torvalds
17 #include "expression.h"
18 #include "linearize.h"
23 unsigned long bb_generation
;
26 // remove phi-sources from a removed edge
28 // :note: It's possible to have several edges between the same BBs.
29 // It's common with switches but it's also possible with branches.
30 // This function will only remove a single phi-source per edge.
31 int remove_phisources(struct basic_block
*par
, struct basic_block
*old
)
33 struct instruction
*insn
;
36 FOR_EACH_PTR(old
->insns
, insn
) {
41 if (insn
->opcode
!= OP_PHI
)
44 // found a phi-node in the target BB,
45 // now look after its phi-sources.
46 FOR_EACH_PTR(insn
->phi_list
, phi
) {
47 struct instruction
*phisrc
= phi
->def
;
51 assert(phisrc
->phi_node
== insn
);
52 if (phisrc
->bb
!= par
)
54 // found a phi-source corresponding to this edge:
55 // remove it but avoid the recursive killing.
56 REPLACE_CURRENT_PTR(phi
, VOID
);
57 remove_use(&phisrc
->src
);
59 changed
|= REPEAT_CSE
;
60 // Only the first one must be removed.
62 } END_FOR_EACH_PTR(phi
);
64 } END_FOR_EACH_PTR(insn
);
69 // remove all phisources but the one corresponding to the given target
70 static int remove_other_phisources(struct basic_block
*bb
, struct multijmp_list
*list
, struct basic_block
*target
)
75 FOR_EACH_PTR(list
, jmp
) {
76 if (jmp
->target
== target
) {
80 changed
|= remove_phisources(bb
, jmp
->target
);
81 } END_FOR_EACH_PTR(jmp
);
86 * Dammit, if we have a phi-node followed by a conditional
87 * branch on that phi-node, we should damn well be able to
88 * do something about the source. Maybe.
90 static int rewrite_branch(struct basic_block
*bb
,
91 struct basic_block
**ptr
,
92 struct basic_block
*old
,
93 struct basic_block
*new)
95 if (*ptr
!= old
|| new == old
|| !bb
->ep
)
98 /* We might find new if-conversions or non-dominating CSEs */
99 /* we may also create new dead cycles */
100 repeat_phase
|= REPEAT_CSE
| REPEAT_CFG_CLEANUP
;
102 replace_bb_in_list(&bb
->children
, old
, new, 1);
103 remove_bb_from_list(&old
->parents
, bb
, 1);
104 add_bb(&new->parents
, bb
);
109 * Return the known truth value of a pseudo, or -1 if
112 static int pseudo_truth_value(pseudo_t pseudo
)
114 switch (pseudo
->type
) {
116 return !!pseudo
->value
;
119 struct instruction
*insn
= pseudo
->def
;
121 /* A symbol address is always considered true.. */
122 if (insn
->opcode
== OP_SYMADDR
&& insn
->target
== pseudo
)
132 * Does a basic block depend on the pseudos that "src" defines?
134 static int bb_depends_on(struct basic_block
*target
, struct basic_block
*src
)
138 FOR_EACH_PTR(src
->defines
, pseudo
) {
139 if (pseudo_in_list(target
->needs
, pseudo
))
141 } END_FOR_EACH_PTR(pseudo
);
146 * This really should be handled by bb_depends_on()
147 * which efficiently check the dependence using the
148 * defines - needs liveness info. Problem is that
149 * there is no liveness done on OP_PHI & OP_PHISRC.
151 * This function add the missing dependency checks.
153 static int bb_depends_on_phi(struct basic_block
*target
, struct basic_block
*src
)
155 struct instruction
*insn
;
156 FOR_EACH_PTR(src
->insns
, insn
) {
159 if (insn
->opcode
!= OP_PHI
)
161 if (pseudo_in_list(target
->needs
, insn
->target
))
163 } END_FOR_EACH_PTR(insn
);
168 // does the BB contains ignorable instructions but a final branch?
169 // :note: something could be done for phi-sources but ... we'll see.
170 static bool bb_is_forwarder(struct basic_block
*bb
)
172 struct instruction
*insn
;
174 FOR_EACH_PTR(bb
->insns
, insn
) {
177 switch (insn
->opcode
) {
179 case OP_INLINED_CALL
:
187 } END_FOR_EACH_PTR(insn
);
193 // check if the sources of a phi-node match with the parent BBs
194 static bool phi_check(struct instruction
*node
)
196 struct basic_block
*bb
;
199 PREPARE_PTR_LIST(node
->bb
->parents
, bb
);
200 FOR_EACH_PTR(node
->phi_list
, phi
) {
201 if (phi
== VOID
|| !phi
->def
)
203 if (phi
->def
->bb
!= bb
)
206 } END_FOR_EACH_PTR(phi
);
214 * When we reach here, we have:
215 * - a basic block that ends in a conditional branch and
216 * that has no side effects apart from the pseudos it
218 * - the phi-node that the conditional branch depends on
219 * - full pseudo liveness information
221 * We need to check if any of the _sources_ of the phi-node
222 * may be constant, and not actually need this block at all.
224 static int try_to_simplify_bb(struct basic_block
*bb
, struct instruction
*first
, struct instruction
*second
)
231 * This a due to improper dominance tracking during
232 * simplify_symbol_usage()/conversion to SSA form.
233 * No sane simplification can be done when we have this.
235 bogus
= !phi_check(first
);
237 FOR_EACH_PTR(first
->phi_list
, phi
) {
238 struct instruction
*def
= phi
->def
;
239 struct basic_block
*source
, *target
;
241 struct instruction
*br
;
248 if (!pseudo
|| !source
)
250 br
= last_instruction(source
->insns
);
253 if (br
->opcode
!= OP_CBR
&& br
->opcode
!= OP_BR
)
255 cond
= pseudo_truth_value(pseudo
);
258 target
= cond
? second
->bb_true
: second
->bb_false
;
259 if (bb_depends_on(target
, bb
))
261 if (bb_depends_on_phi(target
, bb
))
263 changed
|= rewrite_branch(source
, &br
->bb_true
, bb
, target
);
264 changed
|= rewrite_branch(source
, &br
->bb_false
, bb
, target
);
265 if (changed
&& !bogus
)
266 kill_use(THIS_ADDRESS(phi
));
267 } END_FOR_EACH_PTR(phi
);
271 static int bb_has_side_effects(struct basic_block
*bb
)
273 struct instruction
*insn
;
274 FOR_EACH_PTR(bb
->insns
, insn
) {
277 switch (insn
->opcode
) {
279 /* FIXME! This should take "const" etc into account */
285 if (insn
->is_volatile
)
294 /* FIXME! This should take "volatile" etc into account */
300 } END_FOR_EACH_PTR(insn
);
304 static int simplify_phi_branch(struct basic_block
*bb
, struct instruction
*br
)
306 pseudo_t cond
= br
->cond
;
307 struct instruction
*def
;
309 if (cond
->type
!= PSEUDO_REG
)
312 if (def
->bb
!= bb
|| def
->opcode
!= OP_PHI
)
314 if (bb_has_side_effects(bb
))
316 return try_to_simplify_bb(bb
, def
, br
);
319 static int simplify_branch_branch(struct basic_block
*bb
, struct instruction
*br
,
320 struct basic_block
**target_p
, int bb_true
)
322 struct basic_block
*target
= *target_p
, *final
;
323 struct instruction
*insn
;
328 insn
= last_instruction(target
->insns
);
329 if (!insn
|| insn
->opcode
!= OP_CBR
|| insn
->cond
!= br
->cond
)
332 * Ahhah! We've found a branch to a branch on the same conditional!
333 * Now we just need to see if we can rewrite the branch..
336 final
= bb_true
? insn
->bb_true
: insn
->bb_false
;
337 if (bb_has_side_effects(target
))
338 goto try_to_rewrite_target
;
339 if (bb_depends_on(final
, target
))
340 goto try_to_rewrite_target
;
341 if (bb_depends_on_phi(final
, target
))
343 return rewrite_branch(bb
, target_p
, target
, final
);
345 try_to_rewrite_target
:
347 * If we're the only parent, at least we can rewrite the
348 * now-known second branch.
350 if (bb_list_size(target
->parents
) != 1)
352 convert_to_jump(insn
, final
);
356 static int simplify_one_branch(struct basic_block
*bb
, struct instruction
*br
)
358 if (simplify_phi_branch(bb
, br
))
360 return simplify_branch_branch(bb
, br
, &br
->bb_true
, 1) |
361 simplify_branch_branch(bb
, br
, &br
->bb_false
, 0);
364 static int simplify_branch_nodes(struct entrypoint
*ep
)
367 struct basic_block
*bb
;
369 FOR_EACH_PTR(ep
->bbs
, bb
) {
370 struct instruction
*br
= last_instruction(bb
->insns
);
372 if (!br
|| br
->opcode
!= OP_CBR
)
374 changed
|= simplify_one_branch(bb
, br
);
375 } END_FOR_EACH_PTR(bb
);
380 * This is called late - when we have intra-bb liveness information..
382 int simplify_flow(struct entrypoint
*ep
)
384 return simplify_branch_nodes(ep
);
387 static inline void concat_user_list(struct pseudo_user_list
*src
, struct pseudo_user_list
**dst
)
389 copy_ptr_list((struct ptr_list
**)dst
, (struct ptr_list
*)src
);
392 void convert_instruction_target(struct instruction
*insn
, pseudo_t src
)
395 struct pseudo_user
*pu
;
397 * Go through the "insn->users" list and replace them all..
399 target
= insn
->target
;
402 FOR_EACH_PTR(target
->users
, pu
) {
403 if (*pu
->userp
!= VOID
) {
404 assert(*pu
->userp
== target
);
407 } END_FOR_EACH_PTR(pu
);
408 if (has_use_list(src
))
409 concat_user_list(target
->users
, &src
->users
);
410 target
->users
= NULL
;
413 static int overlapping_memop(struct instruction
*a
, struct instruction
*b
)
415 unsigned int a_start
= bytes_to_bits(a
->offset
);
416 unsigned int b_start
= bytes_to_bits(b
->offset
);
417 unsigned int a_size
= a
->size
;
418 unsigned int b_size
= b
->size
;
420 if (a_size
+ a_start
<= b_start
)
422 if (b_size
+ b_start
<= a_start
)
427 static inline int same_memop(struct instruction
*a
, struct instruction
*b
)
429 return a
->offset
== b
->offset
&& a
->size
== b
->size
;
432 static inline int distinct_symbols(pseudo_t a
, pseudo_t b
)
434 if (a
->type
!= PSEUDO_SYM
)
436 if (b
->type
!= PSEUDO_SYM
)
438 return a
->sym
!= b
->sym
;
442 * Return 1 if "dom" dominates the access to "pseudo"
445 * Return 0 if it doesn't, and -1 if you don't know.
447 int dominates(struct instruction
*insn
, struct instruction
*dom
, int local
)
449 switch (dom
->opcode
) {
450 case OP_CALL
: case OP_ENTRY
:
451 return local
? 0 : -1;
452 case OP_LOAD
: case OP_STORE
:
455 if (dom
->clobber_memory
)
457 if (dom
->output_memory
)
464 if (dom
->src
!= insn
->src
) {
467 /* We don't think two explicitly different symbols ever alias */
468 if (distinct_symbols(insn
->src
, dom
->src
))
470 /* We could try to do some alias analysis here */
473 if (!same_memop(insn
, dom
)) {
474 if (!overlapping_memop(insn
, dom
))
481 /* Kill a pseudo that is dead on exit from the bb */
483 // * the variable is not global but may have its address used (local/non-local)
484 // * the stores are only needed by others functions which would do some
485 // loads via the escaped address
486 // We start by the terminating BB (normal exit BB + no-return/unreachable)
487 // We walkup the BB' intruction backward
488 // * we're only concerned by loads, stores & calls
489 // * if we reach a call -> we have to stop if var is non-local
490 // * if we reach a load of our var -> we have to stop
491 // * if we reach a store of our var -> we can kill it, it's dead
492 // * we can ignore other stores & loads if the var is local
493 // * if we reach another store or load done via non-symbol access
494 // (so done via some address calculation) -> we have to stop
495 // If we reach the top of the BB we can recurse into the parents BBs.
496 static void kill_dead_stores_bb(pseudo_t pseudo
, unsigned long generation
, struct basic_block
*bb
, int local
)
498 struct instruction
*insn
;
499 struct basic_block
*parent
;
501 if (bb
->generation
== generation
)
503 bb
->generation
= generation
;
504 FOR_EACH_PTR_REVERSE(bb
->insns
, insn
) {
507 switch (insn
->opcode
) {
509 if (insn
->src
== pseudo
)
513 if (insn
->src
== pseudo
) {
514 kill_instruction_force(insn
);
524 if (!local
&& insn
->src
->type
!= PSEUDO_SYM
)
526 } END_FOR_EACH_PTR_REVERSE(insn
);
528 FOR_EACH_PTR(bb
->parents
, parent
) {
529 if (bb_list_size(parent
->children
) > 1)
531 kill_dead_stores_bb(pseudo
, generation
, parent
, local
);
532 } END_FOR_EACH_PTR(parent
);
535 void check_access(struct instruction
*insn
)
537 pseudo_t pseudo
= insn
->src
;
539 if (insn
->bb
&& pseudo
->type
== PSEUDO_SYM
) {
540 int offset
= insn
->offset
, bit
= bytes_to_bits(offset
) + insn
->size
;
541 struct symbol
*sym
= pseudo
->sym
;
543 if (sym
->bit_size
> 0 && (offset
< 0 || bit
> sym
->bit_size
)) {
546 warning(insn
->pos
, "invalid access %s '%s' (%d %d)",
547 offset
< 0 ? "below" : "past the end of",
548 show_ident(sym
->ident
), offset
,
549 bits_to_bytes(sym
->bit_size
));
555 static struct pseudo_user
*first_user(pseudo_t p
)
557 struct pseudo_user
*pu
;
558 FOR_EACH_PTR(p
->users
, pu
) {
562 } END_FOR_EACH_PTR(pu
);
566 void kill_dead_stores(struct entrypoint
*ep
, pseudo_t addr
, int local
)
568 unsigned long generation
;
569 struct basic_block
*bb
;
571 switch (pseudo_user_list_size(addr
->users
)) {
576 struct pseudo_user
*pu
= first_user(addr
);
577 struct instruction
*insn
= pu
->insn
;
578 if (insn
->opcode
== OP_STORE
) {
579 kill_instruction_force(insn
);
587 generation
= ++bb_generation
;
588 FOR_EACH_PTR(ep
->bbs
, bb
) {
591 kill_dead_stores_bb(addr
, generation
, bb
, local
);
592 } END_FOR_EACH_PTR(bb
);
595 static void mark_bb_reachable(struct basic_block
*bb
, unsigned long generation
)
597 struct basic_block
*child
;
599 if (bb
->generation
== generation
)
601 bb
->generation
= generation
;
602 FOR_EACH_PTR(bb
->children
, child
) {
603 mark_bb_reachable(child
, generation
);
604 } END_FOR_EACH_PTR(child
);
607 static void kill_defs(struct instruction
*insn
)
609 pseudo_t target
= insn
->target
;
611 if (!has_use_list(target
))
613 if (target
->def
!= insn
)
616 convert_instruction_target(insn
, VOID
);
619 void kill_bb(struct basic_block
*bb
)
621 struct instruction
*insn
;
622 struct basic_block
*child
, *parent
;
624 FOR_EACH_PTR(bb
->insns
, insn
) {
627 kill_instruction_force(insn
);
630 * We kill unreachable instructions even if they
631 * otherwise aren't "killable" (e.g. volatile loads)
633 } END_FOR_EACH_PTR(insn
);
636 FOR_EACH_PTR(bb
->children
, child
) {
637 remove_bb_from_list(&child
->parents
, bb
, 0);
638 } END_FOR_EACH_PTR(child
);
641 FOR_EACH_PTR(bb
->parents
, parent
) {
642 remove_bb_from_list(&parent
->children
, bb
, 0);
643 } END_FOR_EACH_PTR(parent
);
647 void kill_unreachable_bbs(struct entrypoint
*ep
)
649 struct basic_block
*bb
;
650 unsigned long generation
= ++bb_generation
;
652 mark_bb_reachable(ep
->entry
->bb
, generation
);
653 FOR_EACH_PTR(ep
->bbs
, bb
) {
654 if (bb
->generation
== generation
)
656 /* Mark it as being dead */
659 DELETE_CURRENT_PTR(bb
);
660 } END_FOR_EACH_PTR(bb
);
661 PACK_PTR_LIST(&ep
->bbs
);
664 static int rewrite_parent_branch(struct basic_block
*bb
, struct basic_block
*old
, struct basic_block
*new)
667 struct instruction
*insn
= last_instruction(bb
->insns
);
672 /* Infinite loops: let's not "optimize" them.. */
676 switch (insn
->opcode
) {
678 changed
|= rewrite_branch(bb
, &insn
->bb_false
, old
, new);
681 changed
|= rewrite_branch(bb
, &insn
->bb_true
, old
, new);
685 struct multijmp
*jmp
;
686 FOR_EACH_PTR(insn
->multijmp_list
, jmp
) {
687 changed
|= rewrite_branch(bb
, &jmp
->target
, old
, new);
688 } END_FOR_EACH_PTR(jmp
);
697 static struct basic_block
* rewrite_branch_bb(struct basic_block
*bb
, struct instruction
*br
)
699 struct basic_block
*parent
;
700 struct basic_block
*target
= br
->bb_true
;
702 if (br
->opcode
== OP_CBR
) {
703 pseudo_t cond
= br
->cond
;
704 if (cond
->type
!= PSEUDO_VAL
)
706 target
= cond
->value
? target
: br
->bb_false
;
710 * We can't do FOR_EACH_PTR() here, because the parent list
711 * may change when we rewrite the parent.
713 while ((parent
= first_basic_block(bb
->parents
)) != NULL
) {
714 if (!rewrite_parent_branch(parent
, bb
, target
))
720 static void vrfy_bb_in_list(struct basic_block
*bb
, struct basic_block_list
*list
)
723 struct basic_block
*tmp
;
724 int no_bb_in_list
= 0;
726 FOR_EACH_PTR(list
, tmp
) {
729 } END_FOR_EACH_PTR(tmp
);
730 assert(no_bb_in_list
);
734 static void vrfy_parents(struct basic_block
*bb
)
736 struct basic_block
*tmp
;
737 FOR_EACH_PTR(bb
->parents
, tmp
) {
738 vrfy_bb_in_list(bb
, tmp
->children
);
739 } END_FOR_EACH_PTR(tmp
);
742 static void vrfy_children(struct basic_block
*bb
)
744 struct basic_block
*tmp
;
745 struct instruction
*br
= last_instruction(bb
->insns
);
748 assert(!bb
->children
);
751 switch (br
->opcode
) {
752 struct multijmp
*jmp
;
754 vrfy_bb_in_list(br
->bb_false
, bb
->children
);
757 vrfy_bb_in_list(br
->bb_true
, bb
->children
);
760 case OP_COMPUTEDGOTO
:
761 FOR_EACH_PTR(br
->multijmp_list
, jmp
) {
762 vrfy_bb_in_list(jmp
->target
, bb
->children
);
763 } END_FOR_EACH_PTR(jmp
);
769 FOR_EACH_PTR(bb
->children
, tmp
) {
770 vrfy_bb_in_list(bb
, tmp
->parents
);
771 } END_FOR_EACH_PTR(tmp
);
774 static void vrfy_bb_flow(struct basic_block
*bb
)
780 void vrfy_flow(struct entrypoint
*ep
)
782 struct basic_block
*bb
;
783 struct basic_block
*entry
= ep
->entry
->bb
;
785 FOR_EACH_PTR(ep
->bbs
, bb
) {
789 } END_FOR_EACH_PTR(bb
);
794 // change a switch or a conditional branch into a branch
795 int convert_to_jump(struct instruction
*insn
, struct basic_block
*target
)
797 struct basic_block
*bb
= insn
->bb
;
798 struct basic_block
*child
;
799 int changed
= REPEAT_CSE
;
801 switch (insn
->opcode
) {
803 changed
|= remove_phisources(insn
->bb
, insn
->bb_true
== target
? insn
->bb_false
: insn
->bb_true
);
806 changed
|= remove_other_phisources(insn
->bb
, insn
->multijmp_list
, target
);
809 kill_use(&insn
->cond
);
810 insn
->bb_true
= target
;
811 insn
->bb_false
= NULL
;
814 insn
->opcode
= OP_BR
;
816 FOR_EACH_PTR(bb
->children
, child
) {
817 if (child
== target
) {
818 target
= NULL
; // leave first occurence
821 DELETE_CURRENT_PTR(child
);
822 remove_bb_from_list(&child
->parents
, bb
, 1);
823 changed
|= REPEAT_CFG_CLEANUP
;
824 } END_FOR_EACH_PTR(child
);
825 PACK_PTR_LIST(&bb
->children
);
826 repeat_phase
|= changed
;
830 static int retarget_parents(struct basic_block
*bb
, struct basic_block
*target
)
832 struct basic_block
*parent
;
835 * We can't do FOR_EACH_PTR() here, because the parent list
836 * may change when we rewrite the parent.
838 while ((parent
= first_basic_block(bb
->parents
))) {
839 if (!rewrite_parent_branch(parent
, bb
, target
))
843 return REPEAT_CFG_CLEANUP
;
846 static void remove_merging_phisrc(struct instruction
*insn
, struct basic_block
*bot
)
848 struct instruction
*node
= insn
->phi_node
;
852 kill_instruction(insn
);
856 FOR_EACH_PTR(node
->phi_list
, phi
) {
857 struct instruction
*phisrc
;
862 if (phisrc
->bb
== bot
) {
863 kill_instruction(insn
);
866 } END_FOR_EACH_PTR(phi
);
869 static void remove_merging_phi(struct basic_block
*top
, struct instruction
*insn
)
873 FOR_EACH_PTR(insn
->phi_list
, phi
) {
874 struct instruction
*def
;
883 convert_instruction_target(insn
, def
->src
);
884 kill_instruction(def
);
885 kill_instruction(insn
);
886 } END_FOR_EACH_PTR(phi
);
891 // @top: the first BB to be merged
892 // @bot: the second BB to be merged
893 static int merge_bb(struct basic_block
*top
, struct basic_block
*bot
)
895 struct instruction
*insn
;
896 struct basic_block
*bb
;
901 top
->children
= bot
->children
;
902 bot
->children
= NULL
;
905 FOR_EACH_PTR(top
->children
, bb
) {
906 replace_bb_in_list(&bb
->parents
, bot
, top
, 1);
907 } END_FOR_EACH_PTR(bb
);
909 FOR_EACH_PTR(top
->insns
, insn
) {
912 if (insn
->opcode
!= OP_PHISOURCE
)
914 remove_merging_phisrc(insn
, bot
);
915 } END_FOR_EACH_PTR(insn
);
917 kill_instruction(delete_last_instruction(&top
->insns
));
918 FOR_EACH_PTR(bot
->insns
, insn
) {
921 assert(insn
->bb
== bot
);
922 switch (insn
->opcode
) {
924 remove_merging_phi(top
, insn
);
928 add_instruction(&top
->insns
, insn
);
929 } END_FOR_EACH_PTR(insn
);
932 return REPEAT_CFG_CLEANUP
;
936 // early simplification of the CFG
937 // Three things are done here:
938 // # inactive BB are removed
939 // # branches to a 'forwarder' BB are redirected to the forwardee.
940 // # merge single-child/single-parent BBs.
941 int simplify_cfg_early(struct entrypoint
*ep
)
943 struct basic_block
*bb
;
946 FOR_EACH_PTR_REVERSE(ep
->bbs
, bb
) {
947 struct instruction
*insn
;
948 struct basic_block
*tgt
;
951 DELETE_CURRENT_PTR(bb
);
952 changed
= REPEAT_CFG_CLEANUP
;
956 insn
= last_instruction(bb
->insns
);
959 switch (insn
->opcode
) {
962 if (bb_is_forwarder(bb
))
963 changed
|= retarget_parents(bb
, tgt
);
964 else if (bb_list_size(tgt
->parents
) == 1)
965 changed
|= merge_bb(bb
, tgt
);
968 } END_FOR_EACH_PTR_REVERSE(bb
);
972 void pack_basic_blocks(struct entrypoint
*ep
)
974 struct basic_block
*bb
;
976 /* See if we can merge a bb into another one.. */
977 FOR_EACH_PTR(ep
->bbs
, bb
) {
978 struct instruction
*first
;
979 struct basic_block
*parent
, *child
, *last
;
981 if (!bb_reachable(bb
))
987 FOR_EACH_PTR(bb
->insns
, first
) {
990 switch (first
->opcode
) {
992 case OP_INLINED_CALL
:
996 struct basic_block
*replace
;
997 replace
= rewrite_branch_bb(bb
, first
);
1007 } END_FOR_EACH_PTR(first
);
1011 * See if we only have one parent..
1014 FOR_EACH_PTR(bb
->parents
, parent
) {
1021 } END_FOR_EACH_PTR(parent
);
1024 if (!parent
|| parent
== bb
)
1028 * Goodie. See if the parent can merge..
1030 FOR_EACH_PTR(parent
->children
, child
) {
1033 } END_FOR_EACH_PTR(child
);
1035 repeat_phase
|= merge_bb(parent
, bb
);
1038 /* nothing to do */;
1039 } END_FOR_EACH_PTR(bb
);