Simplify constant unops
[smatch.git] / flow.c
blob061711dafc7cd0d6457518e88f4c72024636332e
1 /*
2 * Flow - walk the linearized flowgraph, simplifying it as we
3 * go along.
5 * Copyright (C) 2004 Linus Torvalds
6 */
8 #include <string.h>
9 #include <stdarg.h>
10 #include <stdlib.h>
11 #include <stdio.h>
12 #include <stddef.h>
13 #include <assert.h>
15 #include "parse.h"
16 #include "expression.h"
17 #include "linearize.h"
18 #include "flow.h"
20 unsigned long bb_generation;
23 * Dammit, if we have a phi-node followed by a conditional
24 * branch on that phi-node, we should damn well be able to
25 * do something about the source. Maybe.
27 static int rewrite_branch(struct basic_block *bb,
28 struct basic_block **ptr,
29 struct basic_block *old,
30 struct basic_block *new)
32 if (*ptr != old || new == old)
33 return 0;
35 /* We might find new if-conversions or non-dominating CSEs */
36 repeat_phase |= REPEAT_CSE;
37 *ptr = new;
38 replace_bb_in_list(&bb->children, old, new, 1);
39 remove_bb_from_list(&old->parents, bb, 1);
40 add_bb(&new->parents, bb);
41 return 1;
45 * Return the known truth value of a pseudo, or -1 if
46 * it's not known.
48 static int pseudo_truth_value(pseudo_t pseudo)
50 switch (pseudo->type) {
51 case PSEUDO_VAL:
52 return !!pseudo->value;
54 case PSEUDO_REG: {
55 struct instruction *insn = pseudo->def;
56 if (insn->opcode == OP_SETVAL && insn->target == pseudo) {
57 struct expression *expr = insn->val;
59 /* A symbol address is always considered true.. */
60 if (!expr)
61 return 1;
62 if (expr->type == EXPR_VALUE)
63 return !!expr->value;
66 /* Fall through */
67 default:
68 return -1;
73 * Does a basic block depend on the pseudos that "src" defines?
75 static int bb_depends_on(struct basic_block *target, struct basic_block *src)
77 pseudo_t pseudo;
79 FOR_EACH_PTR(src->defines, pseudo) {
80 if (pseudo_in_list(target->needs, pseudo))
81 return 1;
82 } END_FOR_EACH_PTR(pseudo);
83 return 0;
87 * When we reach here, we have:
88 * - a basic block that ends in a conditional branch and
89 * that has no side effects apart from the pseudos it
90 * may change.
91 * - the phi-node that the conditional branch depends on
92 * - full pseudo liveness information
94 * We need to check if any of the _sources_ of the phi-node
95 * may be constant, and not actually need this block at all.
97 static int try_to_simplify_bb(struct basic_block *bb, struct instruction *first, struct instruction *second)
99 int changed = 0;
100 pseudo_t phi;
102 FOR_EACH_PTR(first->phi_list, phi) {
103 struct instruction *def = phi->def;
104 struct basic_block *source, *target;
105 pseudo_t pseudo;
106 struct instruction *br;
107 int true;
109 if (!def)
110 continue;
111 source = def->bb;
112 pseudo = def->src1;
113 if (!pseudo || !source)
114 continue;
115 br = last_instruction(source->insns);
116 if (!br)
117 continue;
118 if (br->opcode != OP_BR)
119 continue;
120 true = pseudo_truth_value(pseudo);
121 if (true < 0)
122 continue;
123 target = true ? second->bb_true : second->bb_false;
124 if (bb_depends_on(target, bb))
125 continue;
126 changed |= rewrite_branch(source, &br->bb_true, bb, target);
127 changed |= rewrite_branch(source, &br->bb_false, bb, target);
128 } END_FOR_EACH_PTR(phi);
129 return changed;
132 static int bb_has_side_effects(struct basic_block *bb)
134 struct instruction *insn;
135 FOR_EACH_PTR(bb->insns, insn) {
136 switch (insn->opcode) {
137 case OP_CALL:
138 /* Fixme! This should take "const" etc into account */
139 return 1;
141 case OP_STORE:
142 case OP_CONTEXT:
143 return 1;
145 case OP_ASM:
146 /* Fixme! This should take "volatile" etc into account */
147 return 1;
149 default:
150 continue;
152 } END_FOR_EACH_PTR(insn);
153 return 0;
156 static int simplify_phi_branch(struct basic_block *bb, struct instruction *br)
158 pseudo_t cond = br->cond;
159 struct instruction *def;
161 if (cond->type != PSEUDO_REG)
162 return 0;
163 def = cond->def;
164 if (def->bb != bb || def->opcode != OP_PHI)
165 return 0;
166 if (bb_has_side_effects(bb))
167 return 0;
168 return try_to_simplify_bb(bb, def, br);
171 static int simplify_branch_branch(struct basic_block *bb, struct instruction *br,
172 struct basic_block **target_p, int true)
174 struct basic_block *target = *target_p, *final;
175 struct instruction *insn;
176 int retval;
178 if (target == bb)
179 return 0;
180 insn = last_instruction(target->insns);
181 if (!insn || insn->opcode != OP_BR || insn->cond != br->cond)
182 return 0;
184 * Ahhah! We've found a branch to a branch on the same conditional!
185 * Now we just need to see if we can rewrite the branch..
187 retval = 0;
188 final = true ? insn->bb_true : insn->bb_false;
189 if (bb_has_side_effects(target))
190 goto try_to_rewrite_target;
191 if (bb_depends_on(final, target))
192 goto try_to_rewrite_target;
193 return rewrite_branch(bb, target_p, target, final);
195 try_to_rewrite_target:
197 * If we're the only parent, at least we can rewrite the
198 * now-known second branch.
200 if (bb_list_size(target->parents) != 1)
201 return retval;
202 insert_branch(target, insn, final);
203 return 1;
206 static int simplify_one_branch(struct basic_block *bb, struct instruction *br)
208 if (simplify_phi_branch(bb, br))
209 return 1;
210 return simplify_branch_branch(bb, br, &br->bb_true, 1) |
211 simplify_branch_branch(bb, br, &br->bb_false, 0);
214 static int simplify_branch_nodes(struct entrypoint *ep)
216 int changed = 0;
217 struct basic_block *bb;
219 FOR_EACH_PTR(ep->bbs, bb) {
220 struct instruction *br = last_instruction(bb->insns);
222 if (!br || br->opcode != OP_BR || !br->bb_false)
223 continue;
224 changed |= simplify_one_branch(bb, br);
225 } END_FOR_EACH_PTR(bb);
226 return changed;
230 * This is called late - when we have intra-bb liveness information..
232 int simplify_flow(struct entrypoint *ep)
234 return simplify_branch_nodes(ep);
237 static inline void concat_user_list(struct pseudo_ptr_list *src, struct pseudo_ptr_list **dst)
239 concat_ptr_list((struct ptr_list *)src, (struct ptr_list **)dst);
242 void convert_instruction_target(struct instruction *insn, pseudo_t src)
244 pseudo_t target, *usep;
247 * Go through the "insn->users" list and replace them all..
249 target = insn->target;
250 if (target == src)
251 return;
252 FOR_EACH_PTR(target->users, usep) {
253 if (*usep != VOID) {
254 assert(*usep == target);
255 *usep = src;
257 } END_FOR_EACH_PTR(usep);
258 concat_user_list(target->users, &src->users);
259 target->users = NULL;
262 void convert_load_instruction(struct instruction *insn, pseudo_t src)
264 convert_instruction_target(insn, src);
265 /* Turn the load into a no-op */
266 insn->opcode = OP_LNOP;
267 insn->bb = NULL;
270 static int overlapping_memop(struct instruction *a, struct instruction *b)
272 unsigned int a_start = a->offset << 3;
273 unsigned int b_start = b->offset << 3;
274 unsigned int a_size = a->size;
275 unsigned int b_size = b->size;
277 if (a_size + a_start <= b_start)
278 return 0;
279 if (b_size + b_start <= a_start)
280 return 0;
281 return 1;
284 static inline int same_memop(struct instruction *a, struct instruction *b)
286 return a->offset == b->offset && a->size == b->size;
290 * Return 1 if "one" dominates the access to 'pseudo'
291 * in insn.
293 * Return 0 if it doesn't, and -1 if you don't know.
295 int dominates(pseudo_t pseudo, struct instruction *insn, struct instruction *dom, int local)
297 int opcode = dom->opcode;
299 if (opcode == OP_CALL || opcode == OP_ENTRY)
300 return local ? 0 : -1;
301 if (opcode != OP_LOAD && opcode != OP_STORE)
302 return 0;
303 if (dom->src != pseudo) {
304 if (local)
305 return 0;
306 /* We don't think two explicitly different symbols ever alias */
307 if (dom->src->type == PSEUDO_SYM)
308 return 0;
309 /* We could try to do some alias analysis here */
310 return -1;
312 if (!same_memop(insn, dom)) {
313 if (dom->opcode == OP_LOAD)
314 return 0;
315 if (!overlapping_memop(insn, dom))
316 return 0;
317 return -1;
319 return 1;
322 static int find_dominating_parents(pseudo_t pseudo, struct instruction *insn,
323 struct basic_block *bb, unsigned long generation, struct pseudo_list **dominators,
324 int local, int loads)
326 struct basic_block *parent;
328 if (!bb->parents)
329 return !!local;
331 if (bb_list_size(bb->parents) > 1)
332 loads = 0;
333 FOR_EACH_PTR(bb->parents, parent) {
334 struct instruction *one;
335 struct instruction *br;
336 pseudo_t phi;
338 FOR_EACH_PTR_REVERSE(parent->insns, one) {
339 int dominance;
340 if (one == insn)
341 goto no_dominance;
342 dominance = dominates(pseudo, insn, one, local);
343 if (dominance < 0) {
344 if (one->opcode == OP_LOAD)
345 continue;
346 return 0;
348 if (!dominance)
349 continue;
350 if (one->opcode == OP_LOAD && !loads)
351 continue;
352 goto found_dominator;
353 } END_FOR_EACH_PTR_REVERSE(one);
354 no_dominance:
355 if (parent->generation == generation)
356 continue;
357 parent->generation = generation;
359 if (!find_dominating_parents(pseudo, insn, parent, generation, dominators, local, loads))
360 return 0;
361 continue;
363 found_dominator:
364 br = delete_last_instruction(&parent->insns);
365 phi = alloc_phi(parent, one->target, one->size);
366 phi->ident = phi->ident ? : pseudo->ident;
367 add_instruction(&parent->insns, br);
368 use_pseudo(phi, add_pseudo(dominators, phi));
369 } END_FOR_EACH_PTR(parent);
370 return 1;
374 * We should probably sort the phi list just to make it easier to compare
375 * later for equality.
377 void rewrite_load_instruction(struct instruction *insn, struct pseudo_list *dominators)
379 pseudo_t new, phi;
382 * Check for somewhat common case of duplicate
383 * phi nodes.
385 new = first_pseudo(dominators)->def->src1;
386 FOR_EACH_PTR(dominators, phi) {
387 if (new != phi->def->src1)
388 goto complex_phi;
389 new->ident = new->ident ? : phi->ident;
390 } END_FOR_EACH_PTR(phi);
393 * All the same pseudo - mark the phi-nodes unused
394 * and convert the load into a LNOP and replace the
395 * pseudo.
397 FOR_EACH_PTR(dominators, phi) {
398 phi->def->bb = NULL;
399 } END_FOR_EACH_PTR(phi);
400 convert_load_instruction(insn, new);
401 return;
403 complex_phi:
404 /* We leave symbol pseudos with a bogus usage list here */
405 if (insn->src->type != PSEUDO_SYM)
406 kill_use(&insn->src);
407 insn->opcode = OP_PHI;
408 insn->phi_list = dominators;
411 static int find_dominating_stores(pseudo_t pseudo, struct instruction *insn,
412 unsigned long generation, int local)
414 struct basic_block *bb = insn->bb;
415 struct instruction *one, *dom = NULL;
416 struct pseudo_list *dominators;
417 int partial;
419 /* Unreachable load? Undo it */
420 if (!bb) {
421 insn->opcode = OP_LNOP;
422 return 1;
425 partial = 0;
426 FOR_EACH_PTR(bb->insns, one) {
427 int dominance;
428 if (one == insn)
429 goto found;
430 dominance = dominates(pseudo, insn, one, local);
431 if (dominance < 0) {
432 /* Ignore partial load dominators */
433 if (one->opcode == OP_LOAD)
434 continue;
435 dom = NULL;
436 partial = 1;
437 continue;
439 if (!dominance)
440 continue;
441 dom = one;
442 partial = 0;
443 } END_FOR_EACH_PTR(one);
444 /* Whaa? */
445 warning(pseudo->sym->pos, "unable to find symbol read");
446 return 0;
447 found:
448 if (partial)
449 return 0;
451 if (dom) {
452 convert_load_instruction(insn, dom->target);
453 return 1;
456 /* Ok, go find the parents */
457 bb->generation = generation;
459 dominators = NULL;
460 if (!find_dominating_parents(pseudo, insn, bb, generation, &dominators, local, 1))
461 return 0;
463 /* This happens with initial assignments to structures etc.. */
464 if (!dominators) {
465 if (!local)
466 return 0;
467 convert_load_instruction(insn, value_pseudo(0));
468 return 1;
472 * If we find just one dominating instruction, we
473 * can turn it into a direct thing. Otherwise we'll
474 * have to turn the load into a phi-node of the
475 * dominators.
477 rewrite_load_instruction(insn, dominators);
478 return 1;
481 static void kill_store(struct instruction *insn)
483 if (insn) {
484 insn->bb = NULL;
485 insn->opcode = OP_SNOP;
486 kill_use(&insn->target);
490 /* Kill a pseudo that is dead on exit from the bb */
491 static void kill_dead_stores(pseudo_t pseudo, unsigned long generation, struct basic_block *bb, int local)
493 struct instruction *insn;
494 struct basic_block *parent;
496 if (bb->generation == generation)
497 return;
498 bb->generation = generation;
499 FOR_EACH_PTR_REVERSE(bb->insns, insn) {
500 int opcode = insn->opcode;
502 if (opcode != OP_LOAD && opcode != OP_STORE) {
503 if (local)
504 continue;
505 if (opcode == OP_CALL)
506 return;
507 continue;
509 if (insn->src == pseudo) {
510 if (opcode == OP_LOAD)
511 return;
512 kill_store(insn);
513 continue;
515 if (local)
516 continue;
517 if (insn->src->type != PSEUDO_SYM)
518 return;
519 } END_FOR_EACH_PTR_REVERSE(insn);
521 FOR_EACH_PTR(bb->parents, parent) {
522 struct basic_block *child;
523 FOR_EACH_PTR(parent->children, child) {
524 if (child && child != bb)
525 return;
526 } END_FOR_EACH_PTR(child);
527 kill_dead_stores(pseudo, generation, parent, local);
528 } END_FOR_EACH_PTR(parent);
532 * This should see if the "insn" trivially dominates some previous store, and kill the
533 * store if unnecessary.
535 static void kill_dominated_stores(pseudo_t pseudo, struct instruction *insn,
536 unsigned long generation, struct basic_block *bb, int local, int found)
538 struct instruction *one;
539 struct basic_block *parent;
541 /* Unreachable store? Undo it */
542 if (!bb) {
543 kill_store(insn);
544 return;
546 if (bb->generation == generation)
547 return;
548 bb->generation = generation;
549 FOR_EACH_PTR_REVERSE(bb->insns, one) {
550 int dominance;
551 if (!found) {
552 if (one != insn)
553 continue;
554 found = 1;
555 continue;
557 dominance = dominates(pseudo, insn, one, local);
558 if (!dominance)
559 continue;
560 if (dominance < 0)
561 return;
562 if (one->opcode == OP_LOAD)
563 return;
564 kill_store(one);
565 } END_FOR_EACH_PTR_REVERSE(one);
567 if (!found) {
568 warning(bb->pos, "Unable to find instruction");
569 return;
572 FOR_EACH_PTR(bb->parents, parent) {
573 struct basic_block *child;
574 FOR_EACH_PTR(parent->children, child) {
575 if (child && child != bb)
576 return;
577 } END_FOR_EACH_PTR(child);
578 kill_dominated_stores(pseudo, insn, generation, parent, local, found);
579 } END_FOR_EACH_PTR(parent);
582 static void simplify_one_symbol(struct entrypoint *ep, struct symbol *sym)
584 pseudo_t pseudo, src, *pp;
585 struct instruction *def;
586 unsigned long mod;
587 int all, stores, complex;
589 /* Never used as a symbol? */
590 pseudo = sym->pseudo;
591 if (!pseudo)
592 return;
594 /* We don't do coverage analysis of volatiles.. */
595 if (sym->ctype.modifiers & MOD_VOLATILE)
596 return;
598 /* ..and symbols with external visibility need more care */
599 mod = sym->ctype.modifiers & (MOD_NONLOCAL | MOD_STATIC | MOD_ADDRESSABLE);
600 if (mod)
601 goto external_visibility;
603 def = NULL;
604 stores = 0;
605 complex = 0;
606 FOR_EACH_PTR(pseudo->users, pp) {
607 /* We know that the symbol-pseudo use is the "src" in the instruction */
608 struct instruction *insn = container(pp, struct instruction, src);
610 switch (insn->opcode) {
611 case OP_STORE:
612 stores++;
613 def = insn;
614 break;
615 case OP_LOAD:
616 break;
617 case OP_SETVAL:
618 if (!insn->bb)
619 continue;
620 mod |= MOD_ADDRESSABLE;
621 goto external_visibility;
622 case OP_NOP:
623 case OP_SNOP:
624 case OP_LNOP:
625 case OP_PHI:
626 continue;
627 default:
628 warning(sym->pos, "symbol '%s' pseudo used in unexpected way", show_ident(sym->ident));
630 complex |= insn->offset;
631 } END_FOR_EACH_PTR(pp);
633 if (complex)
634 goto complex_def;
635 if (stores > 1)
636 goto multi_def;
639 * Goodie, we have a single store (if even that) in the whole
640 * thing. Replace all loads with moves from the pseudo,
641 * replace the store with a def.
643 src = VOID;
644 if (def)
645 src = def->target;
647 FOR_EACH_PTR(pseudo->users, pp) {
648 struct instruction *insn = container(pp, struct instruction, src);
649 if (insn->opcode == OP_LOAD)
650 convert_load_instruction(insn, src);
651 } END_FOR_EACH_PTR(pp);
653 /* Turn the store into a no-op */
654 kill_store(def);
655 return;
657 multi_def:
658 complex_def:
659 external_visibility:
660 all = 1;
661 FOR_EACH_PTR_REVERSE(pseudo->users, pp) {
662 struct instruction *insn = container(pp, struct instruction, src);
663 if (insn->opcode == OP_LOAD)
664 all &= find_dominating_stores(pseudo, insn, ++bb_generation, !mod);
665 } END_FOR_EACH_PTR_REVERSE(pp);
667 /* If we converted all the loads, remove the stores. They are dead */
668 if (all && !mod) {
669 FOR_EACH_PTR(pseudo->users, pp) {
670 struct instruction *insn = container(pp, struct instruction, src);
671 if (insn->opcode == OP_STORE)
672 kill_store(insn);
673 } END_FOR_EACH_PTR(pp);
674 } else {
676 * If we couldn't take the shortcut, see if we can at least kill some
677 * of them..
679 FOR_EACH_PTR(pseudo->users, pp) {
680 struct instruction *insn = container(pp, struct instruction, src);
681 if (insn->opcode == OP_STORE)
682 kill_dominated_stores(pseudo, insn, ++bb_generation, insn->bb, !mod, 0);
683 } END_FOR_EACH_PTR(pp);
685 if (!(mod & (MOD_NONLOCAL | MOD_STATIC))) {
686 struct basic_block *bb;
687 FOR_EACH_PTR(ep->bbs, bb) {
688 if (!bb->children)
689 kill_dead_stores(pseudo, ++bb_generation, bb, !mod);
690 } END_FOR_EACH_PTR(bb);
694 return;
697 void simplify_symbol_usage(struct entrypoint *ep)
699 struct symbol *sym;
701 FOR_EACH_PTR(ep->accesses, sym) {
702 simplify_one_symbol(ep, sym);
703 } END_FOR_EACH_PTR(sym);
706 static void mark_bb_reachable(struct basic_block *bb, unsigned long generation)
708 struct basic_block *child;
710 if (bb->generation == generation)
711 return;
712 bb->generation = generation;
713 FOR_EACH_PTR(bb->children, child) {
714 mark_bb_reachable(child, generation);
715 } END_FOR_EACH_PTR(child);
718 static void kill_defs(struct instruction *insn)
720 pseudo_t target = insn->target;
722 if (!has_use_list(target))
723 return;
724 if (target->def != insn)
725 return;
727 convert_instruction_target(insn, VOID);
730 void kill_bb(struct basic_block *bb)
732 struct instruction *insn;
733 struct basic_block *child, *parent;
735 FOR_EACH_PTR(bb->insns, insn) {
736 kill_instruction(insn);
737 kill_defs(insn);
739 * We kill unreachable instructions even if they
740 * otherwise aren't "killable". Eg volatile loads
741 * etc.
743 insn->bb = NULL;
744 } END_FOR_EACH_PTR(insn);
745 bb->insns = NULL;
747 FOR_EACH_PTR(bb->children, child) {
748 remove_bb_from_list(&child->parents, bb, 0);
749 } END_FOR_EACH_PTR(child);
750 bb->children = NULL;
752 FOR_EACH_PTR(bb->parents, parent) {
753 remove_bb_from_list(&parent->children, bb, 0);
754 } END_FOR_EACH_PTR(parent);
755 bb->parents = NULL;
758 void kill_unreachable_bbs(struct entrypoint *ep)
760 struct basic_block *bb;
761 unsigned long generation = ++bb_generation;
763 mark_bb_reachable(ep->entry->bb, generation);
764 FOR_EACH_PTR(ep->bbs, bb) {
765 if (bb->generation == generation)
766 continue;
767 /* Mark it as being dead */
768 kill_bb(bb);
769 } END_FOR_EACH_PTR(bb);
772 static int rewrite_parent_branch(struct basic_block *bb, struct basic_block *old, struct basic_block *new)
774 int changed = 0;
775 struct instruction *insn = last_instruction(bb->insns);
777 if (!insn)
778 return 0;
780 /* Infinite loops: let's not "optimize" them.. */
781 if (old == new)
782 return 0;
784 switch (insn->opcode) {
785 case OP_BR:
786 changed |= rewrite_branch(bb, &insn->bb_true, old, new);
787 changed |= rewrite_branch(bb, &insn->bb_false, old, new);
788 assert(changed);
789 return changed;
790 case OP_SWITCH: {
791 struct multijmp *jmp;
792 FOR_EACH_PTR(insn->multijmp_list, jmp) {
793 changed |= rewrite_branch(bb, &jmp->target, old, new);
794 } END_FOR_EACH_PTR(jmp);
795 assert(changed);
796 return changed;
798 default:
799 return 0;
803 static struct basic_block * rewrite_branch_bb(struct basic_block *bb, struct instruction *br)
805 struct basic_block *parent;
806 struct basic_block *target = br->bb_true;
807 struct basic_block *false = br->bb_false;
809 if (target && false) {
810 pseudo_t cond = br->cond;
811 if (cond->type != PSEUDO_VAL)
812 return NULL;
813 target = cond->value ? target : false;
817 * We can't do FOR_EACH_PTR() here, because the parent list
818 * may change when we rewrite the parent.
820 while ((parent = first_basic_block(bb->parents)) != NULL) {
821 if (!rewrite_parent_branch(parent, bb, target))
822 return NULL;
824 return target;
827 static void vrfy_bb_in_list(struct basic_block *bb, struct basic_block_list *list)
829 if (bb) {
830 struct basic_block *tmp;
831 int no_bb_in_list = 0;
833 FOR_EACH_PTR(list, tmp) {
834 if (bb == tmp)
835 return;
836 } END_FOR_EACH_PTR(tmp);
837 assert(no_bb_in_list);
841 static void vrfy_parents(struct basic_block *bb)
843 struct basic_block *tmp;
844 FOR_EACH_PTR(bb->parents, tmp) {
845 vrfy_bb_in_list(bb, tmp->children);
846 } END_FOR_EACH_PTR(tmp);
849 static void vrfy_children(struct basic_block *bb)
851 struct basic_block *tmp;
852 struct instruction *br = last_instruction(bb->insns);
854 if (!br) {
855 assert(!bb->children);
856 return;
858 switch (br->opcode) {
859 struct multijmp *jmp;
860 case OP_BR:
861 vrfy_bb_in_list(br->bb_true, bb->children);
862 vrfy_bb_in_list(br->bb_false, bb->children);
863 break;
864 case OP_SWITCH:
865 case OP_COMPUTEDGOTO:
866 FOR_EACH_PTR(br->multijmp_list, jmp) {
867 vrfy_bb_in_list(jmp->target, bb->children);
868 } END_FOR_EACH_PTR(jmp);
869 break;
870 default:
871 break;
874 FOR_EACH_PTR(bb->children, tmp) {
875 vrfy_bb_in_list(bb, tmp->parents);
876 } END_FOR_EACH_PTR(tmp);
879 static void vrfy_bb_flow(struct basic_block *bb)
881 vrfy_children(bb);
882 vrfy_parents(bb);
885 void vrfy_flow(struct entrypoint *ep)
887 struct basic_block *bb;
888 struct basic_block *entry = ep->entry->bb;
890 FOR_EACH_PTR(ep->bbs, bb) {
891 if (bb == entry)
892 entry = NULL;
893 vrfy_bb_flow(bb);
894 } END_FOR_EACH_PTR(bb);
895 assert(!entry);
898 void pack_basic_blocks(struct entrypoint *ep)
900 struct basic_block *bb;
902 /* See if we can merge a bb into another one.. */
903 FOR_EACH_PTR(ep->bbs, bb) {
904 struct instruction *first, *insn;
905 struct basic_block *parent, *child, *last;
907 if (!bb_reachable(bb))
908 continue;
911 * Just a branch?
913 FOR_EACH_PTR(bb->insns, first) {
914 if (!first->bb)
915 continue;
916 switch (first->opcode) {
917 case OP_NOP: case OP_LNOP: case OP_SNOP:
918 continue;
919 case OP_BR: {
920 struct basic_block *replace;
921 replace = rewrite_branch_bb(bb, first);
922 if (replace) {
923 kill_bb(bb);
924 goto no_merge;
927 /* fallthrough */
928 default:
929 goto out;
931 } END_FOR_EACH_PTR(first);
933 out:
935 * See if we only have one parent..
937 last = NULL;
938 FOR_EACH_PTR(bb->parents, parent) {
939 if (last) {
940 if (last != parent)
941 goto no_merge;
942 continue;
944 last = parent;
945 } END_FOR_EACH_PTR(parent);
947 parent = last;
948 if (!parent || parent == bb)
949 continue;
952 * Goodie. See if the parent can merge..
954 FOR_EACH_PTR(parent->children, child) {
955 if (child != bb)
956 goto no_merge;
957 } END_FOR_EACH_PTR(child);
960 * Merge the two.
962 repeat_phase |= REPEAT_CSE;
965 * But don't allow phi-source merges after this.
966 * FIXME, FIXME! I really need to think about this.
967 * Is it true? I think it's ok to merge phi-sources,
968 * as long as we keep their relative position in
969 * the stream. It's the re-ordering we can't have.
970 * I think.
972 merge_phi_sources = 0;
974 parent->children = bb->children;
975 bb->children = NULL;
976 bb->parents = NULL;
978 FOR_EACH_PTR(parent->children, child) {
979 replace_bb_in_list(&child->parents, bb, parent, 0);
980 } END_FOR_EACH_PTR(child);
982 delete_last_instruction(&parent->insns);
983 FOR_EACH_PTR(bb->insns, insn) {
984 if (insn->bb) {
985 assert(insn->bb == bb);
986 insn->bb = parent;
988 add_instruction(&parent->insns, insn);
989 } END_FOR_EACH_PTR(insn);
990 bb->insns = NULL;
992 no_merge:
993 /* nothing to do */;
994 } END_FOR_EACH_PTR(bb);