2 * Copyright (C) 2004 Linus Torvalds
17 #include "expression.h"
18 #include "linearize.h"
22 #include "flowgraph.h"
24 unsigned long bb_generation
;
27 * Dammit, if we have a phi-node followed by a conditional
28 * branch on that phi-node, we should damn well be able to
29 * do something about the source. Maybe.
31 static int rewrite_branch(struct basic_block
*bb
,
32 struct basic_block
**ptr
,
33 struct basic_block
*old
,
34 struct basic_block
*new)
36 if (*ptr
!= old
|| new == old
|| !bb
->ep
)
39 /* We might find new if-conversions or non-dominating CSEs */
40 /* we may also create new dead cycles */
41 repeat_phase
|= REPEAT_CSE
| REPEAT_CFG_CLEANUP
;
43 replace_bb_in_list(&bb
->children
, old
, new, 1);
44 remove_bb_from_list(&old
->parents
, bb
, 1);
45 add_bb(&new->parents
, bb
);
50 // returns the phi-node corresponding to a phi-source
51 static struct instruction
*get_phinode(struct instruction
*phisrc
)
53 struct pseudo_user
*pu
;
55 FOR_EACH_PTR(phisrc
->target
->users
, pu
) {
56 struct instruction
*user
;
61 assert(user
->opcode
== OP_PHI
);
63 } END_FOR_EACH_PTR(pu
);
69 * Return the known truth value of a pseudo, or -1 if
72 static int pseudo_truth_value(pseudo_t pseudo
)
74 switch (pseudo
->type
) {
76 return !!pseudo
->value
;
79 struct instruction
*insn
= pseudo
->def
;
81 /* A symbol address is always considered true.. */
82 if (insn
->opcode
== OP_SYMADDR
&& insn
->target
== pseudo
)
92 // check if the BB is empty or only contains phi-sources
93 static int bb_is_trivial(struct basic_block
*bb
)
95 struct instruction
*insn
;
98 FOR_EACH_PTR(bb
->insns
, insn
) {
101 switch (insn
->opcode
) {
102 case OP_TERMINATOR
... OP_TERMINATOR_END
:
105 case OP_INLINED_CALL
:
113 } END_FOR_EACH_PTR(insn
);
120 * Does a basic block depend on the pseudos that "src" defines?
122 static int bb_depends_on(struct basic_block
*target
, struct basic_block
*src
)
126 FOR_EACH_PTR(src
->defines
, pseudo
) {
127 if (pseudo_in_list(target
->needs
, pseudo
))
129 } END_FOR_EACH_PTR(pseudo
);
134 * This really should be handled by bb_depends_on()
135 * which efficiently check the dependence using the
136 * defines - needs liveness info. Problem is that
137 * there is no liveness done on OP_PHI & OP_PHISRC.
139 * This function add the missing dependency checks.
141 static int bb_depends_on_phi(struct basic_block
*target
, struct basic_block
*src
)
143 struct instruction
*insn
;
144 FOR_EACH_PTR(src
->insns
, insn
) {
147 if (insn
->opcode
!= OP_PHI
)
149 if (pseudo_in_list(target
->needs
, insn
->target
))
151 } END_FOR_EACH_PTR(insn
);
156 // does the BB contains ignorable instructions but a final branch?
157 // :note: something could be done for phi-sources but ... we'll see.
158 static bool bb_is_forwarder(struct basic_block
*bb
)
160 struct instruction
*insn
;
162 FOR_EACH_PTR(bb
->insns
, insn
) {
165 switch (insn
->opcode
) {
167 case OP_INLINED_CALL
:
175 } END_FOR_EACH_PTR(insn
);
181 // do jump threading in dominated BBs
182 // @dom: the BB which must dominate the modified BBs.
183 // @old: the old target BB
184 // @new: the new target BB
185 // @return: 0 if no chnages have been made, 1 otherwise.
187 // In all BB dominated by @dom, rewrite branches to @old into branches to @new
188 static int retarget_bb(struct basic_block
*dom
, struct basic_block
*old
, struct basic_block
*new)
190 struct basic_block
*bb
;
197 FOR_EACH_PTR(old
->parents
, bb
) {
198 struct instruction
*last
;
199 struct multijmp
*jmp
;
201 if (!domtree_dominates(dom
, bb
))
203 last
= last_instruction(bb
->insns
);
204 switch (last
->opcode
) {
206 changed
|= rewrite_branch(bb
, &last
->bb_true
, old
, new);
209 changed
|= rewrite_branch(bb
, &last
->bb_true
, old
, new);
210 changed
|= rewrite_branch(bb
, &last
->bb_false
, old
, new);
213 case OP_COMPUTEDGOTO
:
214 FOR_EACH_PTR(last
->multijmp_list
, jmp
) {
215 changed
|= rewrite_branch(bb
, &jmp
->target
, old
, new);
216 } END_FOR_EACH_PTR(jmp
);
222 // since rewrite_branch() will modify old->parents() the list
223 // iteration won't work correctly. Several solution exist for
224 // this but in this case the simplest is to restart the loop.
226 } END_FOR_EACH_PTR(bb
);
230 static int simplify_cbr_cbr(struct instruction
*insn
)
232 struct instruction
*last
;
233 struct basic_block
*bot
= insn
->bb
;
234 struct basic_block
*top
= bot
->idom
;
241 trivial
= bb_is_trivial(bot
);
246 last
= last_instruction(top
->insns
);
247 if (last
->opcode
!= OP_CBR
|| last
->cond
!= insn
->cond
)
250 changed
|= retarget_bb(last
->bb_true
, bot
, insn
->bb_true
);
251 changed
|= retarget_bb(last
->bb_false
, bot
, insn
->bb_false
);
256 * When we reach here, we have:
257 * - a basic block that ends in a conditional branch and
258 * that has no side effects apart from the pseudos it
260 * - the phi-node that the conditional branch depends on
261 * - full pseudo liveness information
263 * We need to check if any of the _sources_ of the phi-node
264 * may be constant, and not actually need this block at all.
266 static int try_to_simplify_bb(struct basic_block
*bb
, struct instruction
*first
, struct instruction
*second
)
273 * This a due to improper dominance tracking during
274 * simplify_symbol_usage()/conversion to SSA form.
275 * No sane simplification can be done when we have this.
277 bogus
= bb_list_size(bb
->parents
) != pseudo_list_size(first
->phi_list
);
279 FOR_EACH_PTR(first
->phi_list
, phi
) {
280 struct instruction
*def
= phi
->def
;
281 struct basic_block
*source
, *target
;
283 struct instruction
*br
;
290 if (!pseudo
|| !source
)
292 br
= last_instruction(source
->insns
);
295 if (br
->opcode
!= OP_CBR
&& br
->opcode
!= OP_BR
)
297 cond
= pseudo_truth_value(pseudo
);
300 target
= cond
? second
->bb_true
: second
->bb_false
;
301 if (bb_depends_on(target
, bb
))
303 if (bb_depends_on_phi(target
, bb
))
305 changed
|= rewrite_branch(source
, &br
->bb_true
, bb
, target
);
306 changed
|= rewrite_branch(source
, &br
->bb_false
, bb
, target
);
307 if (changed
&& !bogus
)
308 kill_use(THIS_ADDRESS(phi
));
309 } END_FOR_EACH_PTR(phi
);
313 static int bb_has_side_effects(struct basic_block
*bb
)
315 struct instruction
*insn
;
316 FOR_EACH_PTR(bb
->insns
, insn
) {
319 switch (insn
->opcode
) {
321 /* FIXME! This should take "const" etc into account */
327 if (insn
->is_volatile
)
336 /* FIXME! This should take "volatile" etc into account */
342 } END_FOR_EACH_PTR(insn
);
346 static int simplify_phi_branch(struct basic_block
*bb
, struct instruction
*br
)
348 pseudo_t cond
= br
->cond
;
349 struct instruction
*def
;
351 if (cond
->type
!= PSEUDO_REG
)
354 if (def
->bb
!= bb
|| def
->opcode
!= OP_PHI
)
356 if (bb_has_side_effects(bb
))
358 return try_to_simplify_bb(bb
, def
, br
);
361 static int simplify_branch_branch(struct basic_block
*bb
, struct instruction
*br
,
362 struct basic_block
**target_p
, int bb_true
)
364 struct basic_block
*target
= *target_p
, *final
;
365 struct instruction
*insn
;
370 insn
= last_instruction(target
->insns
);
371 if (!insn
|| insn
->opcode
!= OP_CBR
|| insn
->cond
!= br
->cond
)
374 * Ahhah! We've found a branch to a branch on the same conditional!
375 * Now we just need to see if we can rewrite the branch..
378 final
= bb_true
? insn
->bb_true
: insn
->bb_false
;
379 if (bb_has_side_effects(target
))
380 goto try_to_rewrite_target
;
381 if (bb_depends_on(final
, target
))
382 goto try_to_rewrite_target
;
383 if (bb_depends_on_phi(final
, target
))
385 return rewrite_branch(bb
, target_p
, target
, final
);
387 try_to_rewrite_target
:
389 * If we're the only parent, at least we can rewrite the
390 * now-known second branch.
392 if (bb_list_size(target
->parents
) != 1)
394 insert_branch(target
, insn
, final
);
398 static int simplify_one_branch(struct basic_block
*bb
, struct instruction
*br
)
400 if (simplify_phi_branch(bb
, br
))
402 if (simplify_cbr_cbr(br
))
404 return simplify_branch_branch(bb
, br
, &br
->bb_true
, 1) |
405 simplify_branch_branch(bb
, br
, &br
->bb_false
, 0);
408 static int simplify_branch_nodes(struct entrypoint
*ep
)
411 struct basic_block
*bb
;
413 FOR_EACH_PTR(ep
->bbs
, bb
) {
414 struct instruction
*br
= last_instruction(bb
->insns
);
416 if (!br
|| br
->opcode
!= OP_CBR
)
418 changed
|= simplify_one_branch(bb
, br
);
419 } END_FOR_EACH_PTR(bb
);
424 * This is called late - when we have intra-bb liveness information..
426 int simplify_flow(struct entrypoint
*ep
)
428 return simplify_branch_nodes(ep
);
431 static inline void concat_user_list(struct pseudo_user_list
*src
, struct pseudo_user_list
**dst
)
433 copy_ptr_list((struct ptr_list
**)dst
, (struct ptr_list
*)src
);
436 void convert_instruction_target(struct instruction
*insn
, pseudo_t src
)
439 struct pseudo_user
*pu
;
441 * Go through the "insn->users" list and replace them all..
443 target
= insn
->target
;
446 FOR_EACH_PTR(target
->users
, pu
) {
447 if (*pu
->userp
!= VOID
) {
448 assert(*pu
->userp
== target
);
451 } END_FOR_EACH_PTR(pu
);
452 if (has_use_list(src
))
453 concat_user_list(target
->users
, &src
->users
);
454 target
->users
= NULL
;
457 static int overlapping_memop(struct instruction
*a
, struct instruction
*b
)
459 unsigned int a_start
= bytes_to_bits(a
->offset
);
460 unsigned int b_start
= bytes_to_bits(b
->offset
);
461 unsigned int a_size
= a
->size
;
462 unsigned int b_size
= b
->size
;
464 if (a_size
+ a_start
<= b_start
)
466 if (b_size
+ b_start
<= a_start
)
471 static inline int same_memop(struct instruction
*a
, struct instruction
*b
)
473 return a
->offset
== b
->offset
&& a
->size
== b
->size
;
476 static inline int distinct_symbols(pseudo_t a
, pseudo_t b
)
478 if (a
->type
!= PSEUDO_SYM
)
480 if (b
->type
!= PSEUDO_SYM
)
482 return a
->sym
!= b
->sym
;
486 * Return 1 if "dom" dominates the access to "pseudo"
489 * Return 0 if it doesn't, and -1 if you don't know.
491 int dominates(pseudo_t pseudo
, struct instruction
*insn
, struct instruction
*dom
, int local
)
493 int opcode
= dom
->opcode
;
495 if (opcode
== OP_CALL
|| opcode
== OP_ENTRY
)
496 return local
? 0 : -1;
497 if (opcode
!= OP_LOAD
&& opcode
!= OP_STORE
)
499 if (dom
->src
!= pseudo
) {
502 /* We don't think two explicitly different symbols ever alias */
503 if (distinct_symbols(insn
->src
, dom
->src
))
505 /* We could try to do some alias analysis here */
508 if (!same_memop(insn
, dom
)) {
509 if (!overlapping_memop(insn
, dom
))
516 /* Kill a pseudo that is dead on exit from the bb */
518 // * the variable is not global but may have its address used (local/non-local)
519 // * the stores are only needed by others functions which would do some
520 // loads via the escaped address
521 // We start by the terminating BB (normal exit BB + no-return/unreachable)
522 // We walkup the BB' intruction backward
523 // * we're only concerned by loads, stores & calls
524 // * if we reach a call -> we have to stop if var is non-local
525 // * if we reach a load of our var -> we have to stop
526 // * if we reach a store of our var -> we can kill it, it's dead
527 // * we can ignore other stores & loads if the var is local
528 // * if we reach another store or load done via non-symbol access
529 // (so done via some address calculation) -> we have to stop
530 // If we reach the top of the BB we can recurse into the parents BBs.
531 static void kill_dead_stores_bb(pseudo_t pseudo
, unsigned long generation
, struct basic_block
*bb
, int local
)
533 struct instruction
*insn
;
534 struct basic_block
*parent
;
536 if (bb
->generation
== generation
)
538 bb
->generation
= generation
;
539 FOR_EACH_PTR_REVERSE(bb
->insns
, insn
) {
542 switch (insn
->opcode
) {
544 if (insn
->src
== pseudo
)
548 if (insn
->src
== pseudo
) {
549 kill_instruction_force(insn
);
559 if (!local
&& insn
->src
->type
!= PSEUDO_SYM
)
561 } END_FOR_EACH_PTR_REVERSE(insn
);
563 FOR_EACH_PTR(bb
->parents
, parent
) {
564 if (bb_list_size(parent
->children
) > 1)
566 kill_dead_stores_bb(pseudo
, generation
, parent
, local
);
567 } END_FOR_EACH_PTR(parent
);
570 void check_access(struct instruction
*insn
)
572 pseudo_t pseudo
= insn
->src
;
574 if (insn
->bb
&& pseudo
->type
== PSEUDO_SYM
) {
575 int offset
= insn
->offset
, bit
= bytes_to_bits(offset
) + insn
->size
;
576 struct symbol
*sym
= pseudo
->sym
;
578 if (sym
->bit_size
> 0 && (offset
< 0 || bit
> sym
->bit_size
)) {
581 warning(insn
->pos
, "invalid access %s '%s' (%d %d)",
582 offset
< 0 ? "below" : "past the end of",
583 show_ident(sym
->ident
), offset
,
584 bits_to_bytes(sym
->bit_size
));
590 static struct pseudo_user
*first_user(pseudo_t p
)
592 struct pseudo_user
*pu
;
593 FOR_EACH_PTR(p
->users
, pu
) {
597 } END_FOR_EACH_PTR(pu
);
601 void kill_dead_stores(struct entrypoint
*ep
, pseudo_t addr
, int local
)
603 unsigned long generation
;
604 struct basic_block
*bb
;
606 switch (pseudo_user_list_size(addr
->users
)) {
611 struct pseudo_user
*pu
= first_user(addr
);
612 struct instruction
*insn
= pu
->insn
;
613 if (insn
->opcode
== OP_STORE
) {
614 kill_instruction_force(insn
);
622 generation
= ++bb_generation
;
623 FOR_EACH_PTR(ep
->bbs
, bb
) {
626 kill_dead_stores_bb(addr
, generation
, bb
, local
);
627 } END_FOR_EACH_PTR(bb
);
630 static void mark_bb_reachable(struct basic_block
*bb
, unsigned long generation
)
632 struct basic_block
*child
;
634 if (bb
->generation
== generation
)
636 bb
->generation
= generation
;
637 FOR_EACH_PTR(bb
->children
, child
) {
638 mark_bb_reachable(child
, generation
);
639 } END_FOR_EACH_PTR(child
);
642 static void kill_defs(struct instruction
*insn
)
644 pseudo_t target
= insn
->target
;
646 if (!has_use_list(target
))
648 if (target
->def
!= insn
)
651 convert_instruction_target(insn
, VOID
);
654 void kill_bb(struct basic_block
*bb
)
656 struct instruction
*insn
;
657 struct basic_block
*child
, *parent
;
659 FOR_EACH_PTR(bb
->insns
, insn
) {
662 kill_instruction_force(insn
);
665 * We kill unreachable instructions even if they
666 * otherwise aren't "killable" (e.g. volatile loads)
668 } END_FOR_EACH_PTR(insn
);
671 FOR_EACH_PTR(bb
->children
, child
) {
672 remove_bb_from_list(&child
->parents
, bb
, 0);
673 } END_FOR_EACH_PTR(child
);
676 FOR_EACH_PTR(bb
->parents
, parent
) {
677 remove_bb_from_list(&parent
->children
, bb
, 0);
678 } END_FOR_EACH_PTR(parent
);
682 void kill_unreachable_bbs(struct entrypoint
*ep
)
684 struct basic_block
*bb
;
685 unsigned long generation
= ++bb_generation
;
687 mark_bb_reachable(ep
->entry
->bb
, generation
);
688 FOR_EACH_PTR(ep
->bbs
, bb
) {
689 if (bb
->generation
== generation
)
691 /* Mark it as being dead */
694 DELETE_CURRENT_PTR(bb
);
695 } END_FOR_EACH_PTR(bb
);
696 PACK_PTR_LIST(&ep
->bbs
);
699 static int rewrite_parent_branch(struct basic_block
*bb
, struct basic_block
*old
, struct basic_block
*new)
702 struct instruction
*insn
= last_instruction(bb
->insns
);
707 /* Infinite loops: let's not "optimize" them.. */
711 switch (insn
->opcode
) {
713 changed
|= rewrite_branch(bb
, &insn
->bb_false
, old
, new);
716 changed
|= rewrite_branch(bb
, &insn
->bb_true
, old
, new);
720 struct multijmp
*jmp
;
721 FOR_EACH_PTR(insn
->multijmp_list
, jmp
) {
722 changed
|= rewrite_branch(bb
, &jmp
->target
, old
, new);
723 } END_FOR_EACH_PTR(jmp
);
732 static struct basic_block
* rewrite_branch_bb(struct basic_block
*bb
, struct instruction
*br
)
734 struct basic_block
*parent
;
735 struct basic_block
*target
= br
->bb_true
;
737 if (br
->opcode
== OP_CBR
) {
738 pseudo_t cond
= br
->cond
;
739 if (cond
->type
!= PSEUDO_VAL
)
741 target
= cond
->value
? target
: br
->bb_false
;
745 * We can't do FOR_EACH_PTR() here, because the parent list
746 * may change when we rewrite the parent.
748 while ((parent
= first_basic_block(bb
->parents
)) != NULL
) {
749 if (!rewrite_parent_branch(parent
, bb
, target
))
755 static void vrfy_bb_in_list(struct basic_block
*bb
, struct basic_block_list
*list
)
758 struct basic_block
*tmp
;
759 int no_bb_in_list
= 0;
761 FOR_EACH_PTR(list
, tmp
) {
764 } END_FOR_EACH_PTR(tmp
);
765 assert(no_bb_in_list
);
769 static void vrfy_parents(struct basic_block
*bb
)
771 struct basic_block
*tmp
;
772 FOR_EACH_PTR(bb
->parents
, tmp
) {
773 vrfy_bb_in_list(bb
, tmp
->children
);
774 } END_FOR_EACH_PTR(tmp
);
777 static void vrfy_children(struct basic_block
*bb
)
779 struct basic_block
*tmp
;
780 struct instruction
*br
= last_instruction(bb
->insns
);
783 assert(!bb
->children
);
786 switch (br
->opcode
) {
787 struct multijmp
*jmp
;
789 vrfy_bb_in_list(br
->bb_false
, bb
->children
);
792 vrfy_bb_in_list(br
->bb_true
, bb
->children
);
795 case OP_COMPUTEDGOTO
:
796 FOR_EACH_PTR(br
->multijmp_list
, jmp
) {
797 vrfy_bb_in_list(jmp
->target
, bb
->children
);
798 } END_FOR_EACH_PTR(jmp
);
804 FOR_EACH_PTR(bb
->children
, tmp
) {
805 vrfy_bb_in_list(bb
, tmp
->parents
);
806 } END_FOR_EACH_PTR(tmp
);
809 static void vrfy_bb_flow(struct basic_block
*bb
)
815 void vrfy_flow(struct entrypoint
*ep
)
817 struct basic_block
*bb
;
818 struct basic_block
*entry
= ep
->entry
->bb
;
820 FOR_EACH_PTR(ep
->bbs
, bb
) {
824 } END_FOR_EACH_PTR(bb
);
828 static int retarget_parents(struct basic_block
*bb
, struct basic_block
*target
)
830 struct basic_block
*parent
;
833 * We can't do FOR_EACH_PTR() here, because the parent list
834 * may change when we rewrite the parent.
836 while ((parent
= first_basic_block(bb
->parents
))) {
837 if (!rewrite_parent_branch(parent
, bb
, target
))
841 return REPEAT_CFG_CLEANUP
;
844 static void remove_merging_phisrc(struct basic_block
*top
, struct instruction
*insn
)
846 struct instruction
*user
= get_phinode(insn
);
849 FOR_EACH_PTR(user
->phi_list
, phi
) {
850 struct instruction
*phisrc
;
855 if (phisrc
->bb
!= top
)
857 REPLACE_CURRENT_PTR(phi
, VOID
);
858 kill_instruction(phisrc
);
859 } END_FOR_EACH_PTR(phi
);
862 static void remove_merging_phi(struct basic_block
*top
, struct instruction
*insn
)
866 FOR_EACH_PTR(insn
->phi_list
, phi
) {
867 struct instruction
*def
;
876 convert_instruction_target(insn
, def
->src
);
877 kill_instruction(def
);
878 kill_instruction(insn
);
879 } END_FOR_EACH_PTR(phi
);
884 // @top: the first BB to be merged
885 // @bot: the second BB to be merged
886 static int merge_bb(struct basic_block
*top
, struct basic_block
*bot
)
888 struct instruction
*insn
;
889 struct basic_block
*bb
;
894 top
->children
= bot
->children
;
895 bot
->children
= NULL
;
898 FOR_EACH_PTR(top
->children
, bb
) {
899 replace_bb_in_list(&bb
->parents
, bot
, top
, 1);
900 } END_FOR_EACH_PTR(bb
);
902 kill_instruction(delete_last_instruction(&top
->insns
));
903 FOR_EACH_PTR(bot
->insns
, insn
) {
906 assert(insn
->bb
== bot
);
907 switch (insn
->opcode
) {
909 remove_merging_phi(top
, insn
);
912 remove_merging_phisrc(top
, insn
);
916 add_instruction(&top
->insns
, insn
);
917 } END_FOR_EACH_PTR(insn
);
920 return REPEAT_CFG_CLEANUP
;
924 // early simplification of the CFG
925 // Three things are done here:
926 // # inactive BB are removed
927 // # branches to a 'forwarder' BB are redirected to the forwardee.
928 // # merge single-child/single-parent BBs.
929 int simplify_cfg_early(struct entrypoint
*ep
)
931 struct basic_block
*bb
;
934 FOR_EACH_PTR_REVERSE(ep
->bbs
, bb
) {
935 struct instruction
*insn
;
936 struct basic_block
*tgt
;
939 DELETE_CURRENT_PTR(bb
);
940 changed
= REPEAT_CFG_CLEANUP
;
944 insn
= last_instruction(bb
->insns
);
947 switch (insn
->opcode
) {
950 if (bb_is_forwarder(bb
))
951 changed
|= retarget_parents(bb
, tgt
);
952 else if (bb_list_size(tgt
->parents
) == 1)
953 changed
|= merge_bb(bb
, tgt
);
956 } END_FOR_EACH_PTR_REVERSE(bb
);
960 void pack_basic_blocks(struct entrypoint
*ep
)
962 struct basic_block
*bb
;
964 /* See if we can merge a bb into another one.. */
965 FOR_EACH_PTR(ep
->bbs
, bb
) {
966 struct instruction
*first
;
967 struct basic_block
*parent
, *child
, *last
;
969 if (!bb_reachable(bb
))
975 FOR_EACH_PTR(bb
->insns
, first
) {
978 switch (first
->opcode
) {
980 case OP_INLINED_CALL
:
984 struct basic_block
*replace
;
985 replace
= rewrite_branch_bb(bb
, first
);
995 } END_FOR_EACH_PTR(first
);
999 * See if we only have one parent..
1002 FOR_EACH_PTR(bb
->parents
, parent
) {
1009 } END_FOR_EACH_PTR(parent
);
1012 if (!parent
|| parent
== bb
)
1016 * Goodie. See if the parent can merge..
1018 FOR_EACH_PTR(parent
->children
, child
) {
1021 } END_FOR_EACH_PTR(child
);
1023 repeat_phase
|= merge_bb(parent
, bb
);
1026 /* nothing to do */;
1027 } END_FOR_EACH_PTR(bb
);