From ae322dde9734f2bd52149e4e06e2f5fee4794b08 Mon Sep 17 00:00:00 2001 From: bje Date: Mon, 16 Aug 2004 12:20:16 +0000 Subject: [PATCH] * basic-block.h: Include "errors.h". (ENABLE_CHECKING): Define to 0 if not already defined. (FOR_EACH_EDGE): Reimplement. (END_FOR_EACH_EDGE): New. * Update all callers: Eliminate explicit iterator variables, use FOR_EACH_EDGE and END_FOR_EACH_EDGE macros. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/branches/edge-vector-branch@86052 138bc75d-0d04-0410-961f-82ee72b054a4 --- gcc/ChangeLog.vec | 9 ++ gcc/basic-block.h | 32 +++++- gcc/bb-reorder.c | 262 +++++++++++++++++++++++++----------------------- gcc/bt-load.c | 12 ++- gcc/cfg.c | 143 +++++++++++++++----------- gcc/cfganal.c | 155 ++++++++++++++++------------ gcc/cfgbuild.c | 35 ++++--- gcc/cfgcleanup.c | 56 ++++++----- gcc/cfgexpand.c | 52 ++++++---- gcc/cfghooks.c | 68 ++++++++----- gcc/cfglayout.c | 59 ++++++----- gcc/cfgloop.c | 145 ++++++++++++++++----------- gcc/cfgloopanal.c | 39 ++++--- gcc/cfgloopmanip.c | 125 ++++++++++++++--------- gcc/cfgrtl.c | 141 +++++++++++++++----------- gcc/config/i386/i386.c | 82 +++++++-------- gcc/config/ia64/ia64.c | 12 ++- gcc/cse.c | 4 +- gcc/df.c | 11 +- gcc/dominance.c | 10 +- gcc/except.c | 26 +++-- gcc/final.c | 4 +- gcc/flow.c | 11 +- gcc/function.c | 37 ++++--- gcc/gcse.c | 120 ++++++++++++---------- gcc/global.c | 21 ++-- gcc/graph.c | 4 +- gcc/ifcvt.c | 27 ++--- gcc/lcm.c | 181 +++++++++++++++++++-------------- gcc/loop-init.c | 10 +- gcc/loop-invariant.c | 4 +- gcc/loop-iv.c | 4 +- gcc/predict.c | 248 +++++++++++++++++++++++++-------------------- gcc/profile.c | 187 ++++++++++++++++++++-------------- gcc/ra-build.c | 2 +- gcc/ra-rewrite.c | 5 +- gcc/ra.c | 4 +- gcc/recog.c | 10 +- gcc/reg-stack.c | 53 ++++++---- gcc/reload1.c | 16 +-- gcc/sbitmap.c | 29 ++++-- gcc/sched-ebb.c | 50 +++++---- gcc/sched-rgn.c | 27 +++-- gcc/tracer.c | 46 +++++---- gcc/tree-cfg.c | 242 +++++++++++++++++++++++++------------------- gcc/tree-into-ssa.c | 35 ++++--- gcc/tree-outof-ssa.c | 14 ++- gcc/tree-pretty-print.c | 62 ++++++------ gcc/tree-sra.c | 4 +- gcc/tree-ssa-ccp.c | 32 +++--- gcc/tree-ssa-dce.c | 10 +- gcc/tree-ssa-dom.c | 18 ++-- gcc/tree-ssa-live.c | 42 ++++---- gcc/tree-ssa-loop-ch.c | 16 +-- gcc/tree-ssa-loop-im.c | 12 ++- gcc/tree-ssa-pre.c | 39 ++++--- gcc/tree-ssa.c | 15 +-- gcc/tree-tailcall.c | 27 +++-- gcc/var-tracking.c | 7 +- 59 files changed, 1829 insertions(+), 1324 deletions(-) diff --git a/gcc/ChangeLog.vec b/gcc/ChangeLog.vec index b880b42eb01..329ac0571f1 100644 --- a/gcc/ChangeLog.vec +++ b/gcc/ChangeLog.vec @@ -1,3 +1,12 @@ +2004-08-16 Ben Elliston + + * basic-block.h: Include "errors.h". + (ENABLE_CHECKING): Define to 0 if not already defined. + (FOR_EACH_EDGE): Reimplement. + (END_FOR_EACH_EDGE): New. + * Update all callers: Eliminate explicit iterator variables, use + FOR_EACH_EDGE and END_FOR_EACH_EDGE macros. + 2004-08-12 Ben Elliston * basic-block.h (FOR_EACH_PRED_EDGE, FOR_EACH_SUCC_EDGE): Remove. diff --git a/gcc/basic-block.h b/gcc/basic-block.h index 488192f5df8..62c9614a944 100644 --- a/gcc/basic-block.h +++ b/gcc/basic-block.h @@ -29,6 +29,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA #include "hard-reg-set.h" #include "predict.h" #include "vec.h" +#include "errors.h" /* Head of register set linked list. */ typedef bitmap_head regset_head; @@ -517,13 +518,34 @@ struct edge_list #define EDGE_CRITICAL_P(e) (EDGE_COUNT ((e)->src->succs) >= 2 \ && EDGE_COUNT ((e)->dest->preds) >= 2) -#define FOR_EACH_EDGE(e, vec, iter) \ - for ((iter) = 0; VEC_iterate (edge, (vec), (iter), (e)); (iter)++) +#ifndef ENABLE_CHECKING +#define ENABLE_CHECKING 0 +#endif #define EDGE_COUNT(ev) VEC_length (edge, (ev)) -#define EDGE_I(ev,i) VEC_index(edge, (ev), (i)) -#define EDGE_PRED(bb,i) VEC_index(edge, (bb)->preds, (i)) -#define EDGE_SUCC(bb,i) VEC_index(edge, (bb)->succs, (i)) +#define EDGE_I(ev,i) VEC_index (edge, (ev), (i)) +#define EDGE_PRED(bb,i) VEC_index (edge, (bb)->preds, (i)) +#define EDGE_SUCC(bb,i) VEC_index (edge, (bb)->succs, (i)) + +#define FOR_EACH_EDGE(EDGE,EDGE_VEC) \ +do { \ + VEC(edge) *__ev = (EDGE_VEC); \ + edge __check_edge; \ + unsigned int __ix; \ + (EDGE) = NULL; \ + for (__ix = 0; VEC_iterate (edge, __ev, __ix, (EDGE)); __ix++) \ + { \ + if (ENABLE_CHECKING) \ + __check_edge = (EDGE); + +#define END_FOR_EACH_EDGE \ + if (ENABLE_CHECKING \ + && (__ix >= EDGE_COUNT (__ev) \ + || EDGE_I (__ev, __ix) != __check_edge)) \ + internal_error ("edge modified in FOR_EACH_EDGE"); \ + } \ +} \ +while (0) struct edge_list * create_edge_list (void); void free_edge_list (struct edge_list *); diff --git a/gcc/bb-reorder.c b/gcc/bb-reorder.c index 45f4b05a285..9abef676700 100644 --- a/gcc/bb-reorder.c +++ b/gcc/bb-reorder.c @@ -224,7 +224,6 @@ find_traces (int *n_traces, struct trace *traces) int i; int number_of_rounds; edge e; - unsigned ix; fibheap_t heap; /* Add one extra round of trace collection when partitioning hot/cold @@ -239,7 +238,7 @@ find_traces (int *n_traces, struct trace *traces) heap = fibheap_new (); max_entry_frequency = 0; max_entry_count = 0; - FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs, ix) + FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs) { bbd[e->dest->index].heap = heap; bbd[e->dest->index].node = fibheap_insert (heap, bb_to_key (e->dest), @@ -249,6 +248,7 @@ find_traces (int *n_traces, struct trace *traces) if (e->dest->count > max_entry_count) max_entry_count = e->dest->count; } + END_FOR_EACH_EDGE; /* Find the traces. */ for (i = 0; i < number_of_rounds; i++) @@ -307,9 +307,8 @@ rotate_loop (edge back_edge, struct trace *trace, int trace_n) do { edge e; - unsigned ix; - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) if (e->dest != EXIT_BLOCK_PTR && e->dest->rbi->visited != trace_n && (e->flags & EDGE_CAN_FALLTHRU) @@ -357,6 +356,7 @@ rotate_loop (edge back_edge, struct trace *trace, int trace_n) } } } + END_FOR_EACH_EDGE; bb = bb->rbi->next; } while (bb != back_edge->dest); @@ -444,7 +444,6 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th, struct trace *trace; edge best_edge, e; fibheapkey_t key; - unsigned ix; bb = fibheap_extract_min (*heap); bbd[bb->index].heap = NULL; @@ -495,7 +494,7 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th, bb->index, *n_traces - 1); /* Select the successor that will be placed after BB. */ - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { #ifdef ENABLE_CHECKING if (e->flags & EDGE_FAKE) @@ -533,6 +532,7 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th, best_freq = freq; } } + END_FOR_EACH_EDGE; /* If the best destination has multiple predecessors, and can be duplicated cheaper than a jump, don't allow it to be added @@ -542,7 +542,7 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th, best_edge = NULL; /* Add all non-selected successors to the heaps. */ - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { if (e == best_edge || e->dest == EXIT_BLOCK_PTR @@ -601,9 +601,9 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th, (which_heap == new_heap) ? "next" : "this", e->dest->index, (long) key); } - } } + END_FOR_EACH_EDGE; if (best_edge) /* Suitable successor was found. */ { @@ -637,9 +637,12 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th, /* Check whether there is another edge from BB. */ edge another_edge; - FOR_EACH_EDGE (another_edge, bb->succs, ix) - if (another_edge != best_edge) - break; + FOR_EACH_EDGE (another_edge, bb->succs) + { + if (another_edge != best_edge) + break; + } + END_FOR_EACH_EDGE; if (!another_edge && copy_bb_p (best_edge->dest, !optimize_size)) @@ -676,25 +679,28 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th, */ - FOR_EACH_EDGE (e, bb->succs, ix) - if (e != best_edge - && (e->flags & EDGE_CAN_FALLTHRU) - && !(e->flags & EDGE_COMPLEX) - && !e->dest->rbi->visited - && EDGE_COUNT (e->dest->preds) == 1 - && !e->crossing_edge - && EDGE_COUNT (e->dest->succs) == 1 - && (EDGE_SUCC (e->dest, 0)->flags & EDGE_CAN_FALLTHRU) - && !(EDGE_SUCC (e->dest, 0)->flags & EDGE_COMPLEX) - && EDGE_SUCC (e->dest, 0)->dest == best_edge->dest - && 2 * e->dest->frequency >= EDGE_FREQUENCY (best_edge)) - { - best_edge = e; - if (dump_file) - fprintf (dump_file, "Selecting BB %d\n", - best_edge->dest->index); - break; - } + FOR_EACH_EDGE (e, bb->succs) + { + if (e != best_edge + && (e->flags & EDGE_CAN_FALLTHRU) + && !(e->flags & EDGE_COMPLEX) + && !e->dest->rbi->visited + && EDGE_COUNT (e->dest->preds) == 1 + && !e->crossing_edge + && EDGE_COUNT (e->dest->succs) == 1 + && (EDGE_SUCC (e->dest, 0)->flags & EDGE_CAN_FALLTHRU) + && !(EDGE_SUCC (e->dest, 0)->flags & EDGE_COMPLEX) + && EDGE_SUCC (e->dest, 0)->dest == best_edge->dest + && 2 * e->dest->frequency >= EDGE_FREQUENCY (best_edge)) + { + best_edge = e; + if (dump_file) + fprintf (dump_file, "Selecting BB %d\n", + best_edge->dest->index); + break; + } + } + END_FOR_EACH_EDGE; bb->rbi->next = best_edge->dest; bb = best_edge->dest; @@ -709,7 +715,7 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th, /* The trace is terminated so we have to recount the keys in heap (some block can have a lower key because now one of its predecessors is an end of the trace). */ - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { if (e->dest == EXIT_BLOCK_PTR || e->dest->rbi->visited) @@ -733,6 +739,7 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th, } } } + END_FOR_EACH_EDGE; } fibheap_delete (*heap); @@ -797,7 +804,6 @@ static fibheapkey_t bb_to_key (basic_block bb) { edge e; - unsigned ix; int priority = 0; @@ -808,7 +814,7 @@ bb_to_key (basic_block bb) /* Prefer blocks whose predecessor is an end of some trace or whose predecessor edge is EDGE_DFS_BACK. */ - FOR_EACH_EDGE (e, bb->preds, ix) + FOR_EACH_EDGE (e, bb->preds) { if ((e->src != ENTRY_BLOCK_PTR && bbd[e->src->index].end_of_trace >= 0) || (e->flags & EDGE_DFS_BACK)) @@ -819,6 +825,7 @@ bb_to_key (basic_block bb) priority = edge_freq; } } + END_FOR_EACH_EDGE; if (priority) /* The block with priority should have significantly lower key. */ @@ -965,10 +972,9 @@ connect_traces (int n_traces, struct trace *traces) /* Find the predecessor traces. */ for (t2 = t; t2 > 0;) { - unsigned ix; best = NULL; best_len = 0; - FOR_EACH_EDGE (e, traces[t2].first->preds, ix) + FOR_EACH_EDGE (e, traces[t2].first->preds) { int si = e->src->index; @@ -986,6 +992,8 @@ connect_traces (int n_traces, struct trace *traces) best_len = traces[bbd[si].end_of_trace].length; } } + END_FOR_EACH_EDGE; + if (best) { best->src->rbi->next = best->dest; @@ -1012,14 +1020,13 @@ connect_traces (int n_traces, struct trace *traces) /* Find the successor traces. */ while (1) { - unsigned ix; /* Find the continuation of the chain. */ best = NULL; best_len = 0; - FOR_EACH_EDGE (e, traces[t].last->succs, ix) + FOR_EACH_EDGE (e, traces[t].last->succs) { int di = e->dest->index; - + if (e->dest != EXIT_BLOCK_PTR && (e->flags & EDGE_CAN_FALLTHRU) && !(e->flags & EDGE_COMPLEX) @@ -1034,6 +1041,7 @@ connect_traces (int n_traces, struct trace *traces) best_len = traces[bbd[di].start_of_trace].length; } } + END_FOR_EACH_EDGE; if (best) { @@ -1052,60 +1060,63 @@ connect_traces (int n_traces, struct trace *traces) else { /* Try to connect the traces by duplication of 1 block. */ - unsigned ix, ix2; edge e2; basic_block next_bb = NULL; bool try_copy = false; - FOR_EACH_EDGE (e, traces[t].last->succs, ix) - if (e->dest != EXIT_BLOCK_PTR - && (e->flags & EDGE_CAN_FALLTHRU) - && !(e->flags & EDGE_COMPLEX) - && (!best || e->probability > best->probability)) - { - edge best2 = NULL; - int best2_len = 0; - - /* If the destination is a start of a trace which is only - one block long, then no need to search the successor - blocks of the trace. Accept it. */ - if (bbd[e->dest->index].start_of_trace >= 0 - && traces[bbd[e->dest->index].start_of_trace].length - == 1) - { - best = e; - try_copy = true; - continue; - } - - FOR_EACH_EDGE (e2, e->dest->succs, ix2) - { - int di = e2->dest->index; - - if (e2->dest == EXIT_BLOCK_PTR - || ((e2->flags & EDGE_CAN_FALLTHRU) - && !(e2->flags & EDGE_COMPLEX) - && bbd[di].start_of_trace >= 0 - && !connected[bbd[di].start_of_trace] - && (EDGE_FREQUENCY (e2) >= freq_threshold) - && (e2->count >= count_threshold) - && (!best2 - || e2->probability > best2->probability - || (e2->probability == best2->probability - && traces[bbd[di].start_of_trace].length - > best2_len)))) - { - best = e; - best2 = e2; - if (e2->dest != EXIT_BLOCK_PTR) - best2_len = traces[bbd[di].start_of_trace].length; - else - best2_len = INT_MAX; - next_bb = e2->dest; - try_copy = true; - } - } - } + FOR_EACH_EDGE (e, traces[t].last->succs) + { + if (e->dest != EXIT_BLOCK_PTR + && (e->flags & EDGE_CAN_FALLTHRU) + && !(e->flags & EDGE_COMPLEX) + && (!best || e->probability > best->probability)) + { + edge best2 = NULL; + int best2_len = 0; + + /* If the destination is a start of a trace which is only + one block long, then no need to search the successor + blocks of the trace. Accept it. */ + if (bbd[e->dest->index].start_of_trace >= 0 + && traces[bbd[e->dest->index].start_of_trace].length + == 1) + { + best = e; + try_copy = true; + continue; + } + + FOR_EACH_EDGE (e2, e->dest->succs) + { + int di = e2->dest->index; + + if (e2->dest == EXIT_BLOCK_PTR + || ((e2->flags & EDGE_CAN_FALLTHRU) + && !(e2->flags & EDGE_COMPLEX) + && bbd[di].start_of_trace >= 0 + && !connected[bbd[di].start_of_trace] + && (EDGE_FREQUENCY (e2) >= freq_threshold) + && (e2->count >= count_threshold) + && (!best2 + || e2->probability > best2->probability + || (e2->probability == best2->probability + && traces[bbd[di].start_of_trace].length + > best2_len)))) + { + best = e; + best2 = e2; + if (e2->dest != EXIT_BLOCK_PTR) + best2_len = traces[bbd[di].start_of_trace].length; + else + best2_len = INT_MAX; + next_bb = e2->dest; + try_copy = true; + } + } + END_FOR_EACH_EDGE; + } + } + END_FOR_EACH_EDGE; if (flag_reorder_blocks_and_partition) try_copy = false; @@ -1251,7 +1262,6 @@ find_rarely_executed_basic_blocks_and_crossing_edges (edge *crossing_edges, basic_block bb; edge e; int i; - unsigned ix; /* Mark which partition (hot/cold) each basic block belongs in. */ @@ -1267,7 +1277,7 @@ find_rarely_executed_basic_blocks_and_crossing_edges (edge *crossing_edges, i = 0; FOR_EACH_BB (bb) - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { if (e->src != ENTRY_BLOCK_PTR && e->dest != EXIT_BLOCK_PTR @@ -1285,6 +1295,7 @@ find_rarely_executed_basic_blocks_and_crossing_edges (edge *crossing_edges, else e->crossing_edge = false; } + END_FOR_EACH_EDGE; *n_crossing_edges = i; } @@ -1549,36 +1560,37 @@ find_jump_block (basic_block jump_dest) basic_block source_bb = NULL; edge e; rtx insn; - unsigned ix; - FOR_EACH_EDGE (e, jump_dest->preds, ix) - if (e->crossing_edge) - { - basic_block src = e->src; + FOR_EACH_EDGE (e, jump_dest->preds) + { + if (e->crossing_edge) + { + basic_block src = e->src; - /* Check each predecessor to see if it has a label, and contains - only one executable instruction, which is an unconditional jump. - If so, we can use it. */ + /* Check each predecessor to see if it has a label, and contains + only one executable instruction, which is an unconditional jump. + If so, we can use it. */ - if (LABEL_P (BB_HEAD (src))) - for (insn = BB_HEAD (src); - !INSN_P (insn) && insn != NEXT_INSN (BB_END (src)); - insn = NEXT_INSN (insn)) - { - if (INSN_P (insn) - && insn == BB_END (src) - && JUMP_P (insn) - && !any_condjump_p (insn)) - { - source_bb = src; - break; - } - } + if (LABEL_P (BB_HEAD (src))) + for (insn = BB_HEAD (src); + !INSN_P (insn) && insn != NEXT_INSN (BB_END (src)); + insn = NEXT_INSN (insn)) + { + if (INSN_P (insn) + && insn == BB_END (src) + && JUMP_P (insn) + && !any_condjump_p (insn)) + { + source_bb = src; + break; + } + } - if (source_bb) - break; - } - + if (source_bb) + break; + } + } + END_FOR_EACH_EDGE; return source_bb; } @@ -1837,16 +1849,18 @@ add_reg_crossing_jump_notes (void) { basic_block bb; edge e; - unsigned ix; FOR_EACH_BB (bb) - FOR_EACH_EDGE (e, bb->succs, ix) - if (e->crossing_edge - && JUMP_P (BB_END (e->src))) - REG_NOTES (BB_END (e->src)) = gen_rtx_EXPR_LIST (REG_CROSSING_JUMP, - NULL_RTX, - REG_NOTES (BB_END - (e->src))); + FOR_EACH_EDGE (e, bb->succs) + { + if (e->crossing_edge + && JUMP_P (BB_END (e->src))) + REG_NOTES (BB_END (e->src)) = gen_rtx_EXPR_LIST (REG_CROSSING_JUMP, + NULL_RTX, + REG_NOTES (BB_END + (e->src))); + } + END_FOR_EACH_EDGE; } /* Basic blocks containing NOTE_INSN_UNLIKELY_EXECUTED_CODE will be diff --git a/gcc/bt-load.c b/gcc/bt-load.c index e86cac9a399..d49f4c30f0a 100644 --- a/gcc/bt-load.c +++ b/gcc/bt-load.c @@ -878,7 +878,6 @@ augment_live_range (bitmap live_range, HARD_REG_SET *btrs_live_in_range, else if (dominated_by_p (CDI_DOMINATORS, head_bb, new_bb)) { edge e; - unsigned ix; int new_block = new_bb->index; bitmap_set_bit (live_range, new_block); @@ -899,8 +898,11 @@ augment_live_range (bitmap live_range, HARD_REG_SET *btrs_live_in_range, fprintf (dump_file, "\n"); } - FOR_EACH_EDGE (e, head_bb->preds, ix) - *tos++ = e->src; + FOR_EACH_EDGE (e, head_bb->preds) + { + *tos++ = e->src; + } + END_FOR_EACH_EDGE; } else abort(); @@ -911,7 +913,6 @@ augment_live_range (bitmap live_range, HARD_REG_SET *btrs_live_in_range, if (!bitmap_bit_p (live_range, bb->index)) { edge e; - unsigned ix; bitmap_set_bit (live_range, bb->index); IOR_HARD_REG_SET (*btrs_live_in_range, @@ -925,12 +926,13 @@ augment_live_range (bitmap live_range, HARD_REG_SET *btrs_live_in_range, fprintf (dump_file, "\n"); } - FOR_EACH_EDGE (e, bb->preds, ix) + FOR_EACH_EDGE (e, bb->preds) { basic_block pred = e->src; if (!bitmap_bit_p (live_range, pred->index)) *tos++ = pred; } + END_FOR_EACH_EDGE; } } diff --git a/gcc/cfg.c b/gcc/cfg.c index 45356f37660..fb85d6a4754 100644 --- a/gcc/cfg.c +++ b/gcc/cfg.c @@ -141,20 +141,19 @@ clear_edges (void) { basic_block bb; edge e; - unsigned ix; FOR_EACH_BB (bb) { - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) free_edge (e); - + END_FOR_EACH_EDGE; VEC_truncate (edge, bb->succs, 0); VEC_truncate (edge, bb->preds, 0); } - FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs, ix) + FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs) free_edge (e); - + END_FOR_EACH_EDGE; VEC_truncate (edge, EXIT_BLOCK_PTR->preds, 0); VEC_truncate (edge, ENTRY_BLOCK_PTR->succs, 0); @@ -286,7 +285,6 @@ cached_make_edge (sbitmap *edge_cache, basic_block src, basic_block dst, int fla { int use_edge_cache; edge e; - unsigned ix; /* Don't bother with edge cache for ENTRY or EXIT, if there aren't that many edges to them, or we didn't allocate memory for it. */ @@ -307,12 +305,15 @@ cached_make_edge (sbitmap *edge_cache, basic_block src, basic_block dst, int fla /* Fall through. */ case 0: - FOR_EACH_EDGE (e, src->succs, ix) - if (e->dest == dst) - { - e->flags |= flags; - return NULL; - } + FOR_EACH_EDGE (e, src->succs) + { + if (e->dest == dst) + { + e->flags |= flags; + return NULL; + } + } + END_FOR_EACH_EDGE; break; } @@ -353,31 +354,36 @@ remove_edge (edge e) { edge tmp; basic_block src, dest; - unsigned ix; bool found = false; src = e->src; dest = e->dest; - FOR_EACH_EDGE (tmp, src->succs, ix) - if (tmp == e) - { - VEC_unordered_remove (edge, src->succs, ix); - found = true; - break; - } + FOR_EACH_EDGE (tmp, src->succs) + { + if (tmp == e) + { + VEC_unordered_remove (edge, src->succs, __ix); + found = true; + break; + } + } + END_FOR_EACH_EDGE; if (!found) abort (); found = false; - FOR_EACH_EDGE (tmp, dest->preds, ix) - if (tmp == e) - { - VEC_unordered_remove (edge, dest->preds, ix); - found = true; - break; - } + FOR_EACH_EDGE (tmp, dest->preds) + { + if (tmp == e) + { + VEC_unordered_remove (edge, dest->preds, __ix); + found = true; + break; + } + } + END_FOR_EACH_EDGE; if (!found) abort (); @@ -391,17 +397,19 @@ void redirect_edge_succ (edge e, basic_block new_succ) { edge tmp; - unsigned ix; bool found = false; /* Disconnect the edge from the old successor block. */ - FOR_EACH_EDGE (tmp, e->dest->preds, ix) - if (tmp == e) - { - VEC_unordered_remove (edge, e->dest->preds, ix); - found = true; - break; - } + FOR_EACH_EDGE (tmp, e->dest->preds) + { + if (tmp == e) + { + VEC_unordered_remove (edge, e->dest->preds, __ix); + found = true; + break; + } + } + END_FOR_EACH_EDGE; if (!found) abort (); @@ -417,12 +425,14 @@ edge redirect_edge_succ_nodup (edge e, basic_block new_succ) { edge s; - unsigned ix; /* Check whether the edge is already present. */ - FOR_EACH_EDGE (s, e->src->succs, ix) - if (s->dest == new_succ && s != e) - break; + FOR_EACH_EDGE (s, e->src->succs) + { + if (s->dest == new_succ && s != e) + break; + } + END_FOR_EACH_EDGE; if (s) { @@ -446,17 +456,19 @@ void redirect_edge_pred (edge e, basic_block new_pred) { edge tmp; - unsigned ix; bool found = false; /* Disconnect the edge from the old predecessor block. */ - FOR_EACH_EDGE (tmp, e->src->succs, ix) - if (tmp == e) - { - VEC_unordered_remove (edge, e->src->succs, ix); - found = true; - break; - } + FOR_EACH_EDGE (tmp, e->src->succs) + { + if (tmp == e) + { + VEC_unordered_remove (edge, e->src->succs, __ix); + found = true; + break; + } + } + END_FOR_EACH_EDGE; if (!found) abort (); @@ -535,7 +547,6 @@ dump_flow_info (FILE *file) { edge e; int sum; - unsigned ix; gcov_type lsum; fprintf (file, "\nBasic block %d ", bb->index); @@ -551,12 +562,14 @@ dump_flow_info (FILE *file) fprintf (file, ".\n"); fprintf (file, "Predecessors: "); - FOR_EACH_EDGE (e, bb->preds, ix) + FOR_EACH_EDGE (e, bb->preds) dump_edge_info (file, e, 0); + END_FOR_EACH_EDGE; fprintf (file, "\nSuccessors: "); - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) dump_edge_info (file, e, 1); + END_FOR_EACH_EDGE; fprintf (file, "\nRegisters live at start:"); dump_regset (bb->global_live_at_start, file); @@ -572,27 +585,33 @@ dump_flow_info (FILE *file) It is still practical to have them reported for debugging of simple testcases. */ sum = 0; - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) sum += e->probability; + END_FOR_EACH_EDGE; + if (EDGE_COUNT (bb->succs) > 0 && abs (sum - REG_BR_PROB_BASE) > 100) fprintf (file, "Invalid sum of outgoing probabilities %.1f%%\n", sum * 100.0 / REG_BR_PROB_BASE); sum = 0; - FOR_EACH_EDGE (e, bb->preds, ix) + FOR_EACH_EDGE (e, bb->preds) sum += EDGE_FREQUENCY (e); + END_FOR_EACH_EDGE; + if (abs (sum - bb->frequency) > 100) fprintf (file, "Invalid sum of incomming frequencies %i, should be %i\n", sum, bb->frequency); lsum = 0; - FOR_EACH_EDGE (e, bb->preds, ix) + FOR_EACH_EDGE (e, bb->preds) lsum += e->count; + END_FOR_EACH_EDGE; if (lsum - bb->count > 100 || lsum - bb->count < -100) fprintf (file, "Invalid sum of incomming counts %i, should be %i\n", (int)lsum, (int)bb->count); lsum = 0; - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) lsum += e->count; + END_FOR_EACH_EDGE; if (EDGE_COUNT (bb->succs) > 0 && (lsum - bb->count > 100 || lsum - bb->count < -100)) fprintf (file, "Invalid sum of incomming counts %i, should be %i\n", @@ -769,10 +788,10 @@ alloc_aux_for_edges (int size) FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) { edge e; - unsigned ix; - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) alloc_aux_for_edge (e, size); + END_FOR_EACH_EDGE; } } } @@ -784,12 +803,12 @@ clear_aux_for_edges (void) { basic_block bb; edge e; - unsigned ix; FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) { - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) e->aux = NULL; + END_FOR_EACH_EDGE; } } @@ -826,7 +845,7 @@ debug_bb_n (int n) static void dump_cfg_bb_info (FILE *file, basic_block bb) { - unsigned i, ix; + unsigned i; bool first = true; static const char * const bb_bitnames[] = { @@ -851,12 +870,14 @@ dump_cfg_bb_info (FILE *file, basic_block bb) fprintf (file, "\n"); fprintf (file, "Predecessors: "); - FOR_EACH_EDGE (e, bb->preds, ix) + FOR_EACH_EDGE (e, bb->preds) dump_edge_info (file, e, 0); + END_FOR_EACH_EDGE; fprintf (file, "\nSuccessors: "); - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) dump_edge_info (file, e, 1); + END_FOR_EACH_EDGE; fprintf (file, "\n\n"); } diff --git a/gcc/cfganal.c b/gcc/cfganal.c index e6b7927f1ac..25e8210298c 100644 --- a/gcc/cfganal.c +++ b/gcc/cfganal.c @@ -104,15 +104,17 @@ can_fallthru (basic_block src, basic_block target) rtx insn = BB_END (src); rtx insn2; edge e; - unsigned ix; if (target == EXIT_BLOCK_PTR) return true; if (src->next_bb != target) return 0; - FOR_EACH_EDGE (e, src->succs, ix) - if (e->dest == EXIT_BLOCK_PTR && e->flags & EDGE_FALLTHRU) - return 0; + FOR_EACH_EDGE (e, src->succs) + { + if (e->dest == EXIT_BLOCK_PTR && e->flags & EDGE_FALLTHRU) + return 0; + } + END_FOR_EACH_EDGE; insn2 = BB_HEAD (target); if (insn2 && !active_insn_p (insn2)) @@ -129,13 +131,15 @@ bool could_fall_through (basic_block src, basic_block target) { edge e; - unsigned ix; if (target == EXIT_BLOCK_PTR) return true; - FOR_EACH_EDGE (e, src->succs, ix) - if (e->dest == EXIT_BLOCK_PTR && e->flags & EDGE_FALLTHRU) - return 0; + FOR_EACH_EDGE (e, src->succs) + { + if (e->dest == EXIT_BLOCK_PTR && e->flags & EDGE_FALLTHRU) + return 0; + } + END_FOR_EACH_EDGE; return true; } @@ -245,9 +249,8 @@ set_edge_can_fallthru_flag (void) FOR_EACH_BB (bb) { edge e; - unsigned ix; - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { e->flags &= ~EDGE_CAN_FALLTHRU; @@ -255,6 +258,7 @@ set_edge_can_fallthru_flag (void) if (e->flags & EDGE_FALLTHRU) e->flags |= EDGE_CAN_FALLTHRU; } + END_FOR_EACH_EDGE; /* If the BB ends with an invertible condjump all (2) edges are CAN_FALLTHRU edges. */ @@ -278,7 +282,6 @@ void find_unreachable_blocks (void) { edge e; - unsigned ix; basic_block *tos, *worklist, bb; tos = worklist = xmalloc (sizeof (basic_block) * n_basic_blocks); @@ -292,13 +295,14 @@ find_unreachable_blocks (void) be only one. It isn't inconceivable that we might one day directly support Fortran alternate entry points. */ - FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs, ix) + FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs) { *tos++ = e->dest; /* Mark the block reachable. */ e->dest->flags |= BB_REACHABLE; } + END_FOR_EACH_EDGE; /* Iterate: find everything reachable from what we've already seen. */ @@ -306,12 +310,15 @@ find_unreachable_blocks (void) { basic_block b = *--tos; - FOR_EACH_EDGE (e, b->succs, ix) - if (!(e->dest->flags & BB_REACHABLE)) - { - *tos++ = e->dest; - e->dest->flags |= BB_REACHABLE; - } + FOR_EACH_EDGE (e, b->succs) + { + if (!(e->dest->flags & BB_REACHABLE)) + { + *tos++ = e->dest; + e->dest->flags |= BB_REACHABLE; + } + } + END_FOR_EACH_EDGE; } free (worklist); @@ -338,7 +345,6 @@ create_edge_list (void) int num_edges; int block_count; basic_block bb; - unsigned ix; block_count = n_basic_blocks + 2; /* Include the entry and exit blocks. */ @@ -360,9 +366,13 @@ create_edge_list (void) /* Follow successors of blocks, and register these edges. */ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) - FOR_EACH_EDGE (e, bb->succs, ix) - elist->index_to_edge[num_edges++] = e; - + { + FOR_EACH_EDGE (e, bb->succs) + { + elist->index_to_edge[num_edges++] = e; + } + END_FOR_EACH_EDGE; + } return elist; } @@ -412,12 +422,11 @@ verify_edge_list (FILE *f, struct edge_list *elist) { int pred, succ, index; edge e; - unsigned ix; basic_block bb, p, s; FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) { - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { pred = e->src->index; succ = e->dest->index; @@ -435,6 +444,7 @@ verify_edge_list (FILE *f, struct edge_list *elist) fprintf (f, "*p* Succ for index %d should be %d not %d\n", index, succ, INDEX_EDGE_SUCC_BB (elist, index)->index); } + END_FOR_EACH_EDGE; } /* We've verified that all the edges are in the list, now lets make sure @@ -445,19 +455,25 @@ verify_edge_list (FILE *f, struct edge_list *elist) { int found_edge = 0; - FOR_EACH_EDGE (e, p->succs, ix) - if (e->dest == s) - { - found_edge = 1; - break; - } + FOR_EACH_EDGE (e, p->succs) + { + if (e->dest == s) + { + found_edge = 1; + break; + } + } + END_FOR_EACH_EDGE; - FOR_EACH_EDGE (e, s->preds, ix) - if (e->src == p) - { - found_edge = 1; - break; - } + FOR_EACH_EDGE (e, s->preds) + { + if (e->src == p) + { + found_edge = 1; + break; + } + } + END_FOR_EACH_EDGE; if (EDGE_INDEX (elist, p, s) == EDGE_INDEX_NO_EDGE && found_edge != 0) @@ -477,11 +493,13 @@ edge find_edge (basic_block pred, basic_block succ) { edge e; - unsigned ix; - FOR_EACH_EDGE (e, pred->succs, ix) - if (e->dest == succ) - return e; + FOR_EACH_EDGE (e, pred->succs) + { + if (e->dest == succ) + return e; + } + END_FOR_EACH_EDGE; return NULL; } @@ -544,16 +562,16 @@ static void remove_fake_predecessors (basic_block bb) { edge e; - unsigned ix; - FOR_EACH_EDGE (e, bb->preds, ix) + FOR_EACH_EDGE (e, bb->preds) { if ((e->flags & EDGE_FAKE) == EDGE_FAKE) { remove_edge (e); - ix--; + __ix--; } } + END_FOR_EACH_EDGE; } /* This routine will remove all fake edges from the flow graph. If @@ -988,17 +1006,19 @@ flow_dfs_compute_reverse_execute (depth_first_search_ds data) { basic_block bb; edge e; - unsigned ix; while (data->sp > 0) { bb = data->stack[--data->sp]; /* Perform depth-first search on adjacent vertices. */ - FOR_EACH_EDGE (e, bb->preds, ix) - if (!TEST_BIT (data->visited_blocks, - e->src->index - (INVALID_BLOCK + 1))) - flow_dfs_compute_reverse_add_bb (data, e->src); + FOR_EACH_EDGE (e, bb->preds) + { + if (!TEST_BIT (data->visited_blocks, + e->src->index - (INVALID_BLOCK + 1))) + flow_dfs_compute_reverse_add_bb (data, e->src); + } + END_FOR_EACH_EDGE; } /* Determine if there are unvisited basic blocks. */ @@ -1029,7 +1049,6 @@ dfs_enumerate_from (basic_block bb, int reverse, { basic_block *st, lbb; int sp = 0, tv = 0; - unsigned ix; st = xcalloc (rslt_max, sizeof (basic_block)); rslt[tv++] = st[sp++] = bb; @@ -1040,25 +1059,31 @@ dfs_enumerate_from (basic_block bb, int reverse, lbb = st[--sp]; if (reverse) { - FOR_EACH_EDGE (e, lbb->preds, ix) - if (!(e->src->flags & BB_VISITED) && predicate (e->src, data)) - { - if (tv == rslt_max) - abort (); - rslt[tv++] = st[sp++] = e->src; - e->src->flags |= BB_VISITED; - } + FOR_EACH_EDGE (e, lbb->preds) + { + if (!(e->src->flags & BB_VISITED) && predicate (e->src, data)) + { + if (tv == rslt_max) + abort (); + rslt[tv++] = st[sp++] = e->src; + e->src->flags |= BB_VISITED; + } + } + END_FOR_EACH_EDGE; } else { - FOR_EACH_EDGE (e, lbb->succs, ix) - if (!(e->dest->flags & BB_VISITED) && predicate (e->dest, data)) - { - if (tv == rslt_max) - abort (); - rslt[tv++] = st[sp++] = e->dest; - e->dest->flags |= BB_VISITED; - } + FOR_EACH_EDGE (e, lbb->succs) + { + if (!(e->dest->flags & BB_VISITED) && predicate (e->dest, data)) + { + if (tv == rslt_max) + abort (); + rslt[tv++] = st[sp++] = e->dest; + e->dest->flags |= BB_VISITED; + } + } + END_FOR_EACH_EDGE; } } free (st); diff --git a/gcc/cfgbuild.c b/gcc/cfgbuild.c index d16690a8c0d..3b6529bbcb8 100644 --- a/gcc/cfgbuild.c +++ b/gcc/cfgbuild.c @@ -249,11 +249,13 @@ make_edges (basic_block min, basic_block max, int update_p) FOR_BB_BETWEEN (bb, min, max->next_bb, next_bb) { edge e; - unsigned ix; - FOR_EACH_EDGE (e, bb->succs, ix) - if (e->dest != EXIT_BLOCK_PTR) - SET_BIT (edge_cache[bb->index], e->dest->index); + FOR_EACH_EDGE (e, bb->succs) + { + if (e->dest != EXIT_BLOCK_PTR) + SET_BIT (edge_cache[bb->index], e->dest->index); + } + END_FOR_EACH_EDGE; } } @@ -269,7 +271,6 @@ make_edges (basic_block min, basic_block max, int update_p) enum rtx_code code; int force_fallthru = 0; edge e; - unsigned ix; if (LABEL_P (BB_HEAD (bb)) && LABEL_ALT_ENTRY_P (BB_HEAD (bb))) @@ -389,12 +390,16 @@ make_edges (basic_block min, basic_block max, int update_p) /* Find out if we can drop through to the next block. */ insn = NEXT_INSN (insn); - FOR_EACH_EDGE (e, bb->succs, ix) - if (e->dest == EXIT_BLOCK_PTR && e->flags & EDGE_FALLTHRU) - { - insn = 0; - break; - } + FOR_EACH_EDGE (e, bb->succs) + { + if (e->dest == EXIT_BLOCK_PTR && e->flags & EDGE_FALLTHRU) + { + insn = 0; + break; + } + } + END_FOR_EACH_EDGE; + while (insn && NOTE_P (insn) && NOTE_LINE_NUMBER (insn) != NOTE_INSN_BASIC_BLOCK) @@ -701,7 +706,6 @@ find_many_sub_basic_blocks (sbitmap blocks) FOR_BB_BETWEEN (bb, min, max->next_bb, next_bb) { edge e; - unsigned ix; if (STATE (bb) == BLOCK_ORIGINAL) continue; @@ -709,11 +713,12 @@ find_many_sub_basic_blocks (sbitmap blocks) { bb->count = 0; bb->frequency = 0; - FOR_EACH_EDGE (e, bb->preds, ix) + FOR_EACH_EDGE (e, bb->preds) { bb->count += e->count; bb->frequency += EDGE_FREQUENCY (e); } + END_FOR_EACH_EDGE; } compute_outgoing_frequencies (bb); @@ -744,17 +749,17 @@ find_sub_basic_blocks (basic_block bb) FOR_BB_BETWEEN (b, min, max->next_bb, next_bb) { edge e; - unsigned ix; if (b != min) { b->count = 0; b->frequency = 0; - FOR_EACH_EDGE (e, b->preds, ix) + FOR_EACH_EDGE (e, b->preds) { b->count += e->count; b->frequency += EDGE_FREQUENCY (e); } + END_FOR_EACH_EDGE; } compute_outgoing_frequencies (b); diff --git a/gcc/cfgcleanup.c b/gcc/cfgcleanup.c index 28f401f1186..51e0b8241d5 100644 --- a/gcc/cfgcleanup.c +++ b/gcc/cfgcleanup.c @@ -414,7 +414,6 @@ try_forward_edges (int mode, basic_block b) { bool changed = false; edge e, *threaded_edges = NULL; - unsigned ix; /* If we are partitioning hot/cold basic blocks, we don't want to mess up unconditional or indirect jumps that cross between hot @@ -424,7 +423,7 @@ try_forward_edges (int mode, basic_block b) && find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)) return false; - FOR_EACH_EDGE (e, b->succs, ix) + FOR_EACH_EDGE (e, b->succs) { basic_block target, first; int counter; @@ -615,10 +614,12 @@ try_forward_edges (int mode, basic_block b) } else { - unsigned ix; - FOR_EACH_EDGE (e, first->succs, ix) - e->probability = ((e->probability * REG_BR_PROB_BASE) - / (double) prob); + FOR_EACH_EDGE (e, first->succs) + { + e->probability = ((e->probability * REG_BR_PROB_BASE) + / (double) prob); + } + END_FOR_EACH_EDGE; } update_br_prob_note (first); } @@ -644,6 +645,7 @@ try_forward_edges (int mode, basic_block b) changed = true; } } + END_FOR_EACH_EDGE; if (threaded_edges) free (threaded_edges); @@ -810,7 +812,6 @@ merge_blocks_move (edge e, basic_block b, basic_block c, int mode) edge tmp_edge, b_fallthru_edge; bool c_has_outgoing_fallthru; bool b_has_incoming_fallthru; - unsigned ix; /* Avoid overactive code motion, as the forwarder blocks should be eliminated by edge redirection instead. One exception might have @@ -823,15 +824,21 @@ merge_blocks_move (edge e, basic_block b, basic_block c, int mode) and loop notes. This is done by squeezing out all the notes and leaving them there to lie. Not ideal, but functional. */ - FOR_EACH_EDGE (tmp_edge, c->succs, ix) - if (tmp_edge->flags & EDGE_FALLTHRU) - break; + FOR_EACH_EDGE (tmp_edge, c->succs) + { + if (tmp_edge->flags & EDGE_FALLTHRU) + break; + } + END_FOR_EACH_EDGE; c_has_outgoing_fallthru = (tmp_edge != NULL); - FOR_EACH_EDGE (tmp_edge, b->preds, ix) - if (tmp_edge->flags & EDGE_FALLTHRU) - break; + FOR_EACH_EDGE (tmp_edge, b->preds) + { + if (tmp_edge->flags & EDGE_FALLTHRU) + break; + } + END_FOR_EACH_EDGE; b_has_incoming_fallthru = (tmp_edge != NULL); b_fallthru_edge = tmp_edge; @@ -1187,7 +1194,6 @@ outgoing_edges_match (int mode, basic_block bb1, basic_block bb2) int nehedges1 = 0, nehedges2 = 0; edge fallthru1 = 0, fallthru2 = 0; edge e1, e2; - unsigned ix; /* If BB1 has only one successor, we may be looking at either an unconditional jump, or a fake edge to exit. */ @@ -1398,10 +1404,10 @@ outgoing_edges_match (int mode, basic_block bb1, basic_block bb2) if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs)) return false; - FOR_EACH_EDGE (e1, bb1->succs, ix) + FOR_EACH_EDGE (e1, bb1->succs) { - e2 = EDGE_SUCC (bb2, ix); - + e2 = EDGE_SUCC (bb2, __ix); + if (e1->flags & EDGE_EH) nehedges1++; @@ -1413,6 +1419,7 @@ outgoing_edges_match (int mode, basic_block bb1, basic_block bb2) if (e2->flags & EDGE_FALLTHRU) fallthru2 = e2; } + END_FOR_EACH_EDGE; /* If number of edges of various types does not match, fail. */ if (nehedges1 != nehedges2 @@ -1460,7 +1467,6 @@ try_crossjump_to_edge (int mode, edge e1, edge e2) basic_block redirect_to, redirect_from, to_remove; rtx newpos1, newpos2; edge s; - unsigned ix; newpos1 = newpos2 = NULL_RTX; @@ -1566,16 +1572,15 @@ try_crossjump_to_edge (int mode, edge e1, edge e2) redirect_to->flags |= BB_DIRTY; /* Recompute the frequencies and counts of outgoing edges. */ - FOR_EACH_EDGE (s, redirect_to->succs, ix) + FOR_EACH_EDGE (s, redirect_to->succs) { edge s2; - unsigned ix2; basic_block d = s->dest; if (FORWARDER_BLOCK_P (d)) d = EDGE_SUCC (d, 0)->dest; - FOR_EACH_EDGE (s2, src1->succs, ix2) + FOR_EACH_EDGE (s2, src1->succs) { basic_block d2 = s2->dest; if (FORWARDER_BLOCK_P (d2)) @@ -1583,6 +1588,7 @@ try_crossjump_to_edge (int mode, edge e1, edge e2) if (d == d2) break; } + END_FOR_EACH_EDGE; s->count += s2->count; @@ -1617,6 +1623,7 @@ try_crossjump_to_edge (int mode, edge e1, edge e2) s2->probability * src1->frequency) / (redirect_to->frequency + src1->frequency)); } + END_FOR_EACH_EDGE; update_br_prob_note (redirect_to); @@ -1649,7 +1656,7 @@ try_crossjump_bb (int mode, basic_block bb) { edge e, e2, fallthru; bool changed; - unsigned n, max, ix, ix2; + unsigned max, ix, ix2; basic_block ev, ev2; /* Nothing to do if there is not at least two incoming edges. */ @@ -1674,11 +1681,12 @@ try_crossjump_bb (int mode, basic_block bb) if (EDGE_COUNT (bb->preds) > max) return false; - FOR_EACH_EDGE (e, bb->preds, n) + FOR_EACH_EDGE (e, bb->preds); { if (e->flags & EDGE_FALLTHRU) - fallthru = e; + fallthru = e; } + END_FOR_EACH_EDGE; changed = false; for (ix = 0, ev = bb; ix < EDGE_COUNT (ev->preds); ) diff --git a/gcc/cfgexpand.c b/gcc/cfgexpand.c index 00a4784e49f..29091cddebb 100644 --- a/gcc/cfgexpand.c +++ b/gcc/cfgexpand.c @@ -136,7 +136,6 @@ expand_gimple_tailcall (basic_block bb, tree stmt) { rtx last = get_last_insn (); edge e; - unsigned ix; int probability; gcov_type count; @@ -163,7 +162,7 @@ expand_gimple_tailcall (basic_block bb, tree stmt) probability = 0; count = 0; - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { if (!(e->flags & (EDGE_ABNORMAL | EDGE_EH))) { @@ -179,9 +178,10 @@ expand_gimple_tailcall (basic_block bb, tree stmt) count += e->count; probability += e->probability; remove_edge (e); - ix--; /* HACK! */ + __ix--; /* HACK! */ } } + END_FOR_EACH_EDGE; /* This is somewhat ugly: the call_expr expander often emits instructions after the sibcall (to perform the function return). These confuse the @@ -225,7 +225,6 @@ expand_gimple_basic_block (basic_block bb, FILE * dump_file) tree stmt = NULL; rtx note, last; edge e; - unsigned ix; if (dump_file) { @@ -256,7 +255,7 @@ expand_gimple_basic_block (basic_block bb, FILE * dump_file) NOTE_BASIC_BLOCK (note) = bb; - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { /* Clear EDGE_EXECUTABLE. This flag is never used in the backend. */ e->flags &= ~EDGE_EXECUTABLE; @@ -267,9 +266,10 @@ expand_gimple_basic_block (basic_block bb, FILE * dump_file) if (e->flags & EDGE_ABNORMAL) { remove_edge (e); - ix--; + __ix--; } } + END_FOR_EACH_EDGE; for (; !bsi_end_p (bsi); bsi_next (&bsi)) { @@ -322,11 +322,13 @@ construct_init_block (void) { basic_block init_block, first_block; edge e; - unsigned ix; - FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs, ix) - if (e->dest == ENTRY_BLOCK_PTR->next_bb) - break; + FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs) + { + if (e->dest == ENTRY_BLOCK_PTR->next_bb) + break; + } + END_FOR_EACH_EDGE; init_block = create_basic_block (NEXT_INSN (get_insns ()), get_last_insn (), @@ -385,25 +387,31 @@ construct_exit_block (void) exit_block->frequency = EXIT_BLOCK_PTR->frequency; exit_block->count = EXIT_BLOCK_PTR->count; - FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds, ix) + ix = 0; + while (ix < EDGE_COUNT (EXIT_BLOCK_PTR->preds)) { + e = EDGE_I (EXIT_BLOCK_PTR->preds, ix); if (!(e->flags & EDGE_ABNORMAL)) - { - redirect_edge_succ (e, exit_block); - ix--; - } + redirect_edge_succ (e, exit_block); + else + ix++; } + e = make_edge (exit_block, EXIT_BLOCK_PTR, EDGE_FALLTHRU); e->probability = REG_BR_PROB_BASE; e->count = EXIT_BLOCK_PTR->count; - FOR_EACH_EDGE (e2, EXIT_BLOCK_PTR->preds, ix) - if (e2 != e) - { - e->count -= e2->count; - exit_block->count -= e2->count; - exit_block->frequency -= EDGE_FREQUENCY (e2); - } + FOR_EACH_EDGE (e2, EXIT_BLOCK_PTR->preds) + { + if (e2 != e) + { + e->count -= e2->count; + exit_block->count -= e2->count; + exit_block->frequency -= EDGE_FREQUENCY (e2); + } + } + END_FOR_EACH_EDGE; + if (e->count < 0) e->count = 0; if (exit_block->count < 0) diff --git a/gcc/cfghooks.c b/gcc/cfghooks.c index 83ecab9e098..a260b115407 100644 --- a/gcc/cfghooks.c +++ b/gcc/cfghooks.c @@ -106,7 +106,6 @@ verify_flow_info (void) { int n_fallthru = 0; edge e; - unsigned ix; if (bb->count < 0) { @@ -120,7 +119,7 @@ verify_flow_info (void) bb->index, bb->frequency); err = 1; } - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { if (last_visited [e->dest->index + 2] == bb) { @@ -160,13 +159,15 @@ verify_flow_info (void) edge_checksum[e->dest->index + 2] += (size_t) e; } + END_FOR_EACH_EDGE; + if (n_fallthru > 1) { error ("Wrong amount of branch edges after unconditional jump %i", bb->index); err = 1; } - FOR_EACH_EDGE (e, bb->preds, ix) + FOR_EACH_EDGE (e, bb->preds) { if (e->dest != bb) { @@ -180,18 +181,24 @@ verify_flow_info (void) } edge_checksum[e->dest->index + 2] -= (size_t) e; } + END_FOR_EACH_EDGE; } /* Complete edge checksumming for ENTRY and EXIT. */ { edge e; - unsigned ix; - FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs, ix) - edge_checksum[e->dest->index + 2] += (size_t) e; + FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs) + { + edge_checksum[e->dest->index + 2] += (size_t) e; + } + END_FOR_EACH_EDGE; - FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds, ix) - edge_checksum[e->dest->index + 2] -= (size_t) e; + FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds) + { + edge_checksum[e->dest->index + 2] -= (size_t) e; + } + END_FOR_EACH_EDGE; } FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) @@ -223,7 +230,6 @@ void dump_bb (basic_block bb, FILE *outf, int indent) { edge e; - unsigned ix; char *s_indent; s_indent = alloca ((size_t) indent + 1); @@ -248,13 +254,19 @@ dump_bb (basic_block bb, FILE *outf, int indent) putc ('\n', outf); fprintf (outf, ";;%s pred: ", s_indent); - FOR_EACH_EDGE (e, bb->preds, ix) - dump_edge_info (outf, e, 0); + FOR_EACH_EDGE (e, bb->preds) + { + dump_edge_info (outf, e, 0); + } + END_FOR_EACH_EDGE; putc ('\n', outf); fprintf (outf, ";;%s succ: ", s_indent); - FOR_EACH_EDGE (e, bb->succs, ix) - dump_edge_info (outf, e, 1); + FOR_EACH_EDGE (e, bb->succs) + { + dump_edge_info (outf, e, 1); + } + END_FOR_EACH_EDGE; putc ('\n', outf); if (cfg_hooks->dump_bb) @@ -389,7 +401,6 @@ split_edge (edge e) gcov_type count = e->count; int freq = EDGE_FREQUENCY (e); edge f; - unsigned ix; if (!cfg_hooks->split_edge) internal_error ("%s does not support split_edge.", cfg_hooks->name); @@ -417,7 +428,7 @@ split_edge (edge e) if (get_immediate_dominator (CDI_DOMINATORS, EDGE_SUCC (ret, 0)->dest) == EDGE_PRED (ret, 0)->src) { - FOR_EACH_EDGE (f, EDGE_SUCC (ret, 0)->dest->preds, ix) + FOR_EACH_EDGE (f, EDGE_SUCC (ret, 0)->dest->preds) { if (f == EDGE_SUCC (ret, 0)) continue; @@ -426,6 +437,7 @@ split_edge (edge e) EDGE_SUCC (ret, 0)->dest)) break; } + END_FOR_EACH_EDGE; if (!f) set_immediate_dominator (CDI_DOMINATORS, EDGE_SUCC (ret, 0)->dest, ret); @@ -504,7 +516,6 @@ void merge_blocks (basic_block a, basic_block b) { edge e; - unsigned ix; if (!cfg_hooks->merge_blocks) internal_error ("%s does not support merge_blocks.", cfg_hooks->name); @@ -520,8 +531,11 @@ merge_blocks (basic_block a, basic_block b) remove_edge (EDGE_SUCC (a, 0)); /* Adjust the edges out of B for the new owner. */ - FOR_EACH_EDGE (e, b->succs, ix) - e->src = a; + FOR_EACH_EDGE (e, b->succs) + { + e->src = a; + } + END_FOR_EACH_EDGE; a->succs = b->succs; a->flags |= b->flags; @@ -550,7 +564,6 @@ make_forwarder_block (basic_block bb, bool (*redirect_edge_p) (edge), { edge e, fallthru; basic_block dummy, jump; - unsigned ix; if (!cfg_hooks->make_forwarder_block) internal_error ("%s does not support make_forwarder_block.", @@ -561,7 +574,7 @@ make_forwarder_block (basic_block bb, bool (*redirect_edge_p) (edge), bb = fallthru->dest; /* Redirect back edges we want to keep. */ - FOR_EACH_EDGE (e, dummy->preds, ix) + FOR_EACH_EDGE (e, dummy->preds) { if (redirect_edge_p (e)) continue; @@ -580,6 +593,7 @@ make_forwarder_block (basic_block bb, bool (*redirect_edge_p) (edge), if (jump) new_bb_cbk (jump); } + END_FOR_EACH_EDGE; if (dom_computed[CDI_DOMINATORS] >= DOM_CONS_OK) { @@ -653,7 +667,6 @@ bool can_duplicate_block_p (basic_block bb) { edge e; - unsigned ix; if (!cfg_hooks->can_duplicate_block_p) internal_error ("%s does not support can_duplicate_block_p.", @@ -664,9 +677,12 @@ can_duplicate_block_p (basic_block bb) /* Duplicating fallthru block to exit would require adding a jump and splitting the real last BB. */ - FOR_EACH_EDGE (e, bb->succs, ix) - if (e->dest == EXIT_BLOCK_PTR && e->flags & EDGE_FALLTHRU) - return false; + FOR_EACH_EDGE (e, bb->succs) + { + if (e->dest == EXIT_BLOCK_PTR && e->flags & EDGE_FALLTHRU) + return false; + } + END_FOR_EACH_EDGE; return cfg_hooks->can_duplicate_block_p (bb); } @@ -678,7 +694,6 @@ basic_block duplicate_block (basic_block bb, edge e) { edge s, n; - unsigned ix; basic_block new_bb; gcov_type new_count = e ? e->count : 0; @@ -699,7 +714,7 @@ duplicate_block (basic_block bb, edge e) new_bb->loop_depth = bb->loop_depth; new_bb->flags = bb->flags; - FOR_EACH_EDGE (s, bb->succs, ix) + FOR_EACH_EDGE (s, bb->succs) { /* Since we are creating edges from a new block to successors of another block (which therefore are known to be disjoint), there @@ -716,6 +731,7 @@ duplicate_block (basic_block bb, edge e) n->count = s->count; n->aux = s->aux; } + END_FOR_EACH_EDGE; if (e) { diff --git a/gcc/cfglayout.c b/gcc/cfglayout.c index 213966ce240..17a6593a791 100644 --- a/gcc/cfglayout.c +++ b/gcc/cfglayout.c @@ -635,7 +635,6 @@ fixup_reorder_chain (void) rtx bb_end_insn; basic_block nb; basic_block old_bb; - unsigned ix; if (EDGE_COUNT (bb->succs) == 0) continue; @@ -644,11 +643,14 @@ fixup_reorder_chain (void) a taken jump. */ e_taken = e_fall = NULL; - FOR_EACH_EDGE (e, bb->succs, ix) - if (e->flags & EDGE_FALLTHRU) - e_fall = e; - else if (! (e->flags & EDGE_EH)) - e_taken = e; + FOR_EACH_EDGE (e, bb->succs) + { + if (e->flags & EDGE_FALLTHRU) + e_fall = e; + else if (! (e->flags & EDGE_EH)) + e_taken = e; + } + END_FOR_EACH_EDGE; bb_end_insn = BB_END (bb); if (JUMP_P (bb_end_insn)) @@ -860,11 +862,13 @@ fixup_reorder_chain (void) FOR_EACH_BB (bb) { edge e; - unsigned ix; - FOR_EACH_EDGE (e, bb->succs, ix) - if (e->flags & EDGE_FALLTHRU) - break; + FOR_EACH_EDGE (e, bb->succs) + { + if (e->flags & EDGE_FALLTHRU) + break; + } + END_FOR_EACH_EDGE; if (e && !can_fallthru (e->src, e->dest)) force_nonfallthru (e); @@ -924,7 +928,6 @@ static void fixup_fallthru_exit_predecessor (void) { edge e; - unsigned ix; basic_block bb = NULL; /* This transformation is not valid before reload, because we might separate @@ -932,9 +935,12 @@ fixup_fallthru_exit_predecessor (void) if (! reload_completed) abort (); - FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds, ix) - if (e->flags & EDGE_FALLTHRU) - bb = e->src; + FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds) + { + if (e->flags & EDGE_FALLTHRU) + bb = e->src; + } + END_FOR_EACH_EDGE; if (bb && bb->rbi->next) { @@ -1233,7 +1239,7 @@ cfg_layout_finalize (void) bool can_copy_bbs_p (basic_block *bbs, unsigned n) { - unsigned i, ix; + unsigned i; edge e; int ret = true; @@ -1244,14 +1250,17 @@ can_copy_bbs_p (basic_block *bbs, unsigned n) { /* In case we should redirect abnormal edge during duplication, fail. */ - FOR_EACH_EDGE (e, bbs[i]->succs, ix) - if ((e->flags & EDGE_ABNORMAL) - && e->dest->rbi->duplicated) - { - ret = false; - goto end; - } - + FOR_EACH_EDGE (e, bbs[i]->succs) + { + if ((e->flags & EDGE_ABNORMAL) + && e->dest->rbi->duplicated) + { + ret = false; + goto end; + } + } + END_FOR_EACH_EDGE; + if (!can_duplicate_block_p (bbs[i])) { ret = false; @@ -1289,7 +1298,6 @@ copy_bbs (basic_block *bbs, unsigned n, basic_block *new_bbs, unsigned i, j; basic_block bb, new_bb, dom_bb; edge e; - unsigned ix; /* Duplicate bbs, update dominators, assign bbs to loops. */ for (i = 0; i < n; i++) @@ -1330,7 +1338,7 @@ copy_bbs (basic_block *bbs, unsigned n, basic_block *new_bbs, new_bb = new_bbs[i]; bb = bbs[i]; - FOR_EACH_EDGE (e, new_bb->succs, ix) + FOR_EACH_EDGE (e, new_bb->succs) { for (j = 0; j < n_edges; j++) if (edges[j] && edges[j]->src == bb && edges[j]->dest == e->dest) @@ -1340,6 +1348,7 @@ copy_bbs (basic_block *bbs, unsigned n, basic_block *new_bbs, continue; redirect_edge_and_branch_force (e, e->dest->rbi->copy); } + END_FOR_EACH_EDGE; } /* Clear information about duplicates. */ diff --git a/gcc/cfgloop.c b/gcc/cfgloop.c index 4f94ecba7db..4f0addc8ca5 100644 --- a/gcc/cfgloop.c +++ b/gcc/cfgloop.c @@ -64,11 +64,13 @@ flow_loops_cfg_dump (const struct loops *loops, FILE *file) FOR_EACH_BB (bb) { edge succ; - unsigned ix; fprintf (file, ";; %d succs { ", bb->index); - FOR_EACH_EDGE (succ, bb->succs, ix) - fprintf (file, "%d ", succ->dest->index); + FOR_EACH_EDGE (succ, bb->succs) + { + fprintf (file, "%d ", succ->dest->index); + } + END_FOR_EACH_EDGE; fprintf (file, "}\n"); } @@ -245,15 +247,15 @@ static void flow_loop_entry_edges_find (struct loop *loop) { edge e; - unsigned ix; int num_entries; num_entries = 0; - FOR_EACH_EDGE (e, loop->header->preds, ix) + FOR_EACH_EDGE (e, loop->header->preds) { if (flow_loop_outside_edge_p (loop, e)) num_entries++; } + END_FOR_EACH_EDGE; if (! num_entries) abort (); @@ -261,11 +263,12 @@ flow_loop_entry_edges_find (struct loop *loop) loop->entry_edges = xmalloc (num_entries * sizeof (edge *)); num_entries = 0; - FOR_EACH_EDGE (e, loop->header->preds, ix) + FOR_EACH_EDGE (e, loop->header->preds) { if (flow_loop_outside_edge_p (loop, e)) loop->entry_edges[num_entries++] = e; } + END_FOR_EACH_EDGE; loop->num_entries = num_entries; } @@ -277,7 +280,7 @@ flow_loop_exit_edges_find (struct loop *loop) { edge e; basic_block node, *bbs; - unsigned num_exits, i, ix; + unsigned num_exits, i; loop->exit_edges = NULL; loop->num_exits = 0; @@ -291,13 +294,14 @@ flow_loop_exit_edges_find (struct loop *loop) { node = bbs[i]; - FOR_EACH_EDGE (e, node->succs, ix) + FOR_EACH_EDGE (e, node->succs) { basic_block dest = e->dest; if (!flow_bb_inside_loop_p (loop, dest)) num_exits++; } + END_FOR_EACH_EDGE; } if (! num_exits) @@ -313,13 +317,14 @@ flow_loop_exit_edges_find (struct loop *loop) for (i = 0; i < loop->num_nodes; i++) { node = bbs[i]; - FOR_EACH_EDGE (e, node->succs, ix) + FOR_EACH_EDGE (e, node->succs) { basic_block dest = e->dest; if (!flow_bb_inside_loop_p (loop, dest)) loop->exit_edges[num_exits++] = e; - } + } + END_FOR_EACH_EDGE; } free (bbs); loop->num_exits = num_exits; @@ -351,11 +356,10 @@ flow_loop_nodes_find (basic_block header, struct loop *loop) { basic_block node; edge e; - unsigned ix; node = stack[--sp]; - FOR_EACH_EDGE (e, node->preds, ix) + FOR_EACH_EDGE (e, node->preds) { basic_block ancestor = e->src; @@ -368,6 +372,7 @@ flow_loop_nodes_find (basic_block header, struct loop *loop) stack[sp++] = ancestor; } } + END_FOR_EACH_EDGE; } free (stack); } @@ -418,12 +423,11 @@ flow_loop_pre_header_find (basic_block header) { basic_block pre_header; edge e; - unsigned ix; /* If block p is a predecessor of the header and is the only block that the header does not dominate, then it is the pre-header. */ pre_header = NULL; - FOR_EACH_EDGE (e, header->preds, ix) + FOR_EACH_EDGE (e, header->preds) { basic_block node = e->src; @@ -441,6 +445,7 @@ flow_loop_pre_header_find (basic_block header) } } } + END_FOR_EACH_EDGE; return pre_header; } @@ -612,7 +617,6 @@ canonicalize_loop_headers (void) { basic_block header; edge e; - unsigned ix; alloc_aux_for_blocks (sizeof (int)); alloc_aux_for_edges (sizeof (int)); @@ -623,7 +627,7 @@ canonicalize_loop_headers (void) int num_latches = 0; int have_abnormal_edge = 0; - FOR_EACH_EDGE (e, header->preds, ix) + FOR_EACH_EDGE (e, header->preds) { basic_block latch = e->src; @@ -637,6 +641,8 @@ canonicalize_loop_headers (void) LATCH_EDGE (e) = 1; } } + END_FOR_EACH_EDGE; + if (have_abnormal_edge) HEADER_BLOCK (header) = 0; else @@ -670,23 +676,29 @@ canonicalize_loop_headers (void) heavy = NULL; max_freq = 0; - FOR_EACH_EDGE (e, header->preds, ix) - if (LATCH_EDGE (e) && - EDGE_FREQUENCY (e) > max_freq) - max_freq = EDGE_FREQUENCY (e); + FOR_EACH_EDGE (e, header->preds) + { + if (LATCH_EDGE (e) && + EDGE_FREQUENCY (e) > max_freq) + max_freq = EDGE_FREQUENCY (e); + } + END_FOR_EACH_EDGE; - FOR_EACH_EDGE (e, header->preds, ix) - if (LATCH_EDGE (e) && - EDGE_FREQUENCY (e) >= max_freq / HEAVY_EDGE_RATIO) - { - if (heavy) - { - is_heavy = 0; - break; - } - else - heavy = e; - } + FOR_EACH_EDGE (e, header->preds) + { + if (LATCH_EDGE (e) && + EDGE_FREQUENCY (e) >= max_freq / HEAVY_EDGE_RATIO) + { + if (heavy) + { + is_heavy = 0; + break; + } + else + heavy = e; + } + } + END_FOR_EACH_EDGE; if (is_heavy) { @@ -739,7 +751,6 @@ flow_loops_find (struct loops *loops, int flags) int *rc_order; basic_block header; basic_block bb; - unsigned ix; /* This function cannot be repeatedly called with different flags to build up the loop information. The loop tree @@ -777,13 +788,17 @@ flow_loops_find (struct loops *loops, int flags) /* If we have an abnormal predecessor, do not consider the loop (not worth the problems). */ - FOR_EACH_EDGE (e, header->preds, ix) - if (e->flags & EDGE_ABNORMAL) - break; + FOR_EACH_EDGE (e, header->preds) + { + if (e->flags & EDGE_ABNORMAL) + break; + } + END_FOR_EACH_EDGE; + if (e) continue; - FOR_EACH_EDGE (e, header->preds, ix) + FOR_EACH_EDGE (e, header->preds) { basic_block latch = e->src; @@ -806,6 +821,7 @@ flow_loops_find (struct loops *loops, int flags) num_loops++; } } + END_FOR_EACH_EDGE; } /* Allocate loop structures. */ @@ -864,7 +880,7 @@ flow_loops_find (struct loops *loops, int flags) num_loops++; /* Look for the latch for this header block. */ - FOR_EACH_EDGE (e, header->preds, ix) + FOR_EACH_EDGE (e, header->preds) { basic_block latch = e->src; @@ -875,6 +891,7 @@ flow_loops_find (struct loops *loops, int flags) break; } } + END_FOR_EACH_EDGE; flow_loop_tree_node_add (header->loop_father, loop); loop->num_nodes = flow_loop_nodes_find (loop->header, loop); @@ -1045,7 +1062,6 @@ get_loop_exit_edges (const struct loop *loop, unsigned int *n_edges) edge *edges, e; unsigned i, n; basic_block * body; - unsigned ix; if (loop->latch == EXIT_BLOCK_PTR) abort (); @@ -1053,16 +1069,22 @@ get_loop_exit_edges (const struct loop *loop, unsigned int *n_edges) body = get_loop_body (loop); n = 0; for (i = 0; i < loop->num_nodes; i++) - FOR_EACH_EDGE (e, body[i]->succs, ix) - if (!flow_bb_inside_loop_p (loop, e->dest)) - n++; + FOR_EACH_EDGE (e, body[i]->succs) + { + if (!flow_bb_inside_loop_p (loop, e->dest)) + n++; + } + END_FOR_EACH_EDGE; edges = xmalloc (n * sizeof (edge)); *n_edges = n; n = 0; for (i = 0; i < loop->num_nodes; i++) - FOR_EACH_EDGE (e, body[i]->succs, ix) - if (!flow_bb_inside_loop_p (loop, e->dest)) - edges[n++] = e; + FOR_EACH_EDGE (e, body[i]->succs) + { + if (!flow_bb_inside_loop_p (loop, e->dest)) + edges[n++] = e; + } + END_FOR_EACH_EDGE; free (body); return edges; @@ -1186,7 +1208,6 @@ verify_loop_structure (struct loops *loops) struct loop *loop; int err = 0; edge e; - unsigned ix; /* Check sizes. */ sizes = xcalloc (loops->num, sizeof (int)); @@ -1284,9 +1305,12 @@ verify_loop_structure (struct loops *loops) SET_BIT (irreds, bb->index); else RESET_BIT (irreds, bb->index); - FOR_EACH_EDGE (e, bb->succs, ix) - if (e->flags & EDGE_IRREDUCIBLE_LOOP) - e->flags |= EDGE_ALL_FLAGS + 1; + FOR_EACH_EDGE (e, bb->succs) + { + if (e->flags & EDGE_IRREDUCIBLE_LOOP) + e->flags |= EDGE_ALL_FLAGS + 1; + } + END_FOR_EACH_EDGE; } /* Recount it. */ @@ -1307,7 +1331,7 @@ verify_loop_structure (struct loops *loops) error ("Basic block %d should not be marked irreducible.", bb->index); err = 1; } - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { if ((e->flags & EDGE_IRREDUCIBLE_LOOP) && !(e->flags & (EDGE_ALL_FLAGS + 1))) @@ -1325,6 +1349,7 @@ verify_loop_structure (struct loops *loops) } e->flags &= ~(EDGE_ALL_FLAGS + 1); } + END_FOR_EACH_EDGE; } free (irreds); } @@ -1338,11 +1363,13 @@ edge loop_latch_edge (const struct loop *loop) { edge e; - unsigned ix; - FOR_EACH_EDGE (e, loop->header->preds, ix) - if (e->src == loop->latch) - break; + FOR_EACH_EDGE (e, loop->header->preds) + { + if (e->src == loop->latch) + break; + } + END_FOR_EACH_EDGE; return e; } @@ -1352,11 +1379,13 @@ edge loop_preheader_edge (const struct loop *loop) { edge e; - unsigned ix; - FOR_EACH_EDGE (e, loop->header->preds, ix) - if (e->src != loop->latch) - break; + FOR_EACH_EDGE (e, loop->header->preds) + { + if (e->src != loop->latch) + break; + } + END_FOR_EACH_EDGE; return e; } diff --git a/gcc/cfgloopanal.c b/gcc/cfgloopanal.c index b8ddcf1063a..4beec2f80f3 100644 --- a/gcc/cfgloopanal.c +++ b/gcc/cfgloopanal.c @@ -274,22 +274,23 @@ mark_irreducible_loops (struct loops *loops) int *queue2 = xmalloc ((last_basic_block + loops->num) * sizeof (int)); int nq, depth; struct loop *cloop; - unsigned ix; /* Reset the flags. */ FOR_BB_BETWEEN (act, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) { - unsigned ix; act->flags &= ~BB_IRREDUCIBLE_LOOP; - FOR_EACH_EDGE (e, act->succs, ix) - e->flags &= ~EDGE_IRREDUCIBLE_LOOP; + FOR_EACH_EDGE (e, act->succs) + { + e->flags &= ~EDGE_IRREDUCIBLE_LOOP; + } + END_FOR_EACH_EDGE; } /* Create the edge lists. */ g = new_graph (last_basic_block + loops->num); FOR_BB_BETWEEN (act, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) - FOR_EACH_EDGE (e, act->succs, ix) + FOR_EACH_EDGE (e, act->succs) { /* Ignore edges to exit. */ if (e->dest == EXIT_BLOCK_PTR) @@ -328,6 +329,7 @@ mark_irreducible_loops (struct loops *loops) add_edge (g, src, dest, e); } + END_FOR_EACH_EDGE; /* Find the strongly connected components. Use the algorithm of Tarjan -- first determine the postorder dfs numbering in reversed graph, then @@ -418,7 +420,6 @@ unsigned expected_loop_iterations (const struct loop *loop) { edge e; - unsigned ix; if (loop->header->count) { @@ -427,11 +428,14 @@ expected_loop_iterations (const struct loop *loop) count_in = 0; count_latch = 0; - FOR_EACH_EDGE (e, loop->header->preds, ix) - if (e->src == loop->latch) - count_latch = e->count; - else - count_in += e->count; + FOR_EACH_EDGE (e, loop->header->preds) + { + if (e->src == loop->latch) + count_latch = e->count; + else + count_in += e->count; + } + END_FOR_EACH_EDGE; if (count_in == 0) expected = count_latch * 2; @@ -448,11 +452,14 @@ expected_loop_iterations (const struct loop *loop) freq_in = 0; freq_latch = 0; - FOR_EACH_EDGE (e, loop->header->preds, ix) - if (e->src == loop->latch) - freq_latch = EDGE_FREQUENCY (e); - else - freq_in += EDGE_FREQUENCY (e); + FOR_EACH_EDGE (e, loop->header->preds) + { + if (e->src == loop->latch) + freq_latch = EDGE_FREQUENCY (e); + else + freq_in += EDGE_FREQUENCY (e); + } + END_FOR_EACH_EDGE; if (freq_in == 0) return freq_latch * 2; diff --git a/gcc/cfgloopmanip.c b/gcc/cfgloopmanip.c index e06b5c2077e..27f49cbd87b 100644 --- a/gcc/cfgloopmanip.c +++ b/gcc/cfgloopmanip.c @@ -118,10 +118,9 @@ static bool fix_bb_placement (struct loops *loops, basic_block bb) { edge e; - unsigned ix; struct loop *loop = loops->tree_root, *act; - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { if (e->dest == EXIT_BLOCK_PTR) continue; @@ -133,6 +132,7 @@ fix_bb_placement (struct loops *loops, basic_block bb) if (flow_loop_nested_p (loop, act)) loop = act; } + END_FOR_EACH_EDGE; if (loop == bb->loop_father) return false; @@ -158,7 +158,6 @@ fix_bb_placements (struct loops *loops, basic_block from) basic_block *queue, *qtop, *qbeg, *qend; struct loop *base_loop; edge e; - unsigned ix; /* We pass through blocks back-reachable from FROM, testing whether some of their successors moved to outer loop. It may be necessary to @@ -205,7 +204,7 @@ fix_bb_placements (struct loops *loops, basic_block from) } /* Something has changed, insert predecessors into queue. */ - FOR_EACH_EDGE (e, from->preds, ix) + FOR_EACH_EDGE (e, from->preds) { basic_block pred = e->src; struct loop *nca; @@ -237,6 +236,7 @@ fix_bb_placements (struct loops *loops, basic_block from) qend = queue; SET_BIT (in_queue, pred->index); } + END_FOR_EACH_EDGE; } free (in_queue); free (queue); @@ -254,7 +254,6 @@ fix_irreducible_loops (basic_block from) sbitmap on_stack; edge *edges, e; unsigned n_edges, i; - unsigned ix; if (!(from->flags & BB_IRREDUCIBLE_LOOP)) return; @@ -271,9 +270,12 @@ fix_irreducible_loops (basic_block from) bb = stack[--stack_top]; RESET_BIT (on_stack, bb->index); - FOR_EACH_EDGE (e, bb->preds, ix) - if (e->flags & EDGE_IRREDUCIBLE_LOOP) - break; + FOR_EACH_EDGE (e, bb->preds) + { + if (e->flags & EDGE_IRREDUCIBLE_LOOP) + break; + } + END_FOR_EACH_EDGE; if (e) continue; @@ -284,8 +286,11 @@ fix_irreducible_loops (basic_block from) { n_edges = EDGE_COUNT (bb->succs); edges = xmalloc (n_edges * sizeof (edge)); - FOR_EACH_EDGE (e, bb->succs, ix) - edges[ix] = e; + FOR_EACH_EDGE (e, bb->succs) + { + edges[__ix] = e; + } + END_FOR_EACH_EDGE; } for (i = 0; i < n_edges; i++) @@ -323,7 +328,6 @@ remove_path (struct loops *loops, edge e) basic_block *rem_bbs, *bord_bbs, *dom_bbs, from, bb; int i, nrem, n_bord_bbs, n_dom_bbs; sbitmap seen; - unsigned ix; if (!loop_delete_branch_edge (e, 0)) return false; @@ -358,12 +362,15 @@ remove_path (struct loops *loops, edge e) for (i = 0; i < nrem; i++) { bb = rem_bbs[i]; - FOR_EACH_EDGE (ae, bb->succs, ix) - if (ae->dest != EXIT_BLOCK_PTR && !TEST_BIT (seen, ae->dest->index)) - { - SET_BIT (seen, ae->dest->index); - bord_bbs[n_bord_bbs++] = ae->dest; - } + FOR_EACH_EDGE (ae, rem_bbs[i]->succs) + { + if (ae->dest != EXIT_BLOCK_PTR && !TEST_BIT (seen, ae->dest->index)) + { + SET_BIT (seen, ae->dest->index); + bord_bbs[n_bord_bbs++] = ae->dest; + } + } + END_FOR_EACH_EDGE; } /* Remove the path. */ @@ -457,14 +464,16 @@ scale_bbs_frequencies (basic_block *bbs, int nbbs, int num, int den) { int i; edge e; - unsigned ix; for (i = 0; i < nbbs; i++) { bbs[i]->frequency = (bbs[i]->frequency * num) / den; bbs[i]->count = RDIV (bbs[i]->count * num, den); - FOR_EACH_EDGE (e, bbs[i]->succs, ix) - e->count = (e->count * num) /den; + FOR_EACH_EDGE (e, bbs[i]->succs) + { + e->count = (e->count * num) /den; + } + END_FOR_EACH_EDGE; } } @@ -502,7 +511,6 @@ loopify (struct loops *loops, edge latch_edge, edge header_edge, int freq, prob, tot_prob; gcov_type cnt; edge e; - unsigned ix; loop->header = header_edge->dest; loop->latch = latch_edge->src; @@ -537,8 +545,11 @@ loopify (struct loops *loops, edge latch_edge, edge header_edge, /* Fix frequencies. */ switch_bb->frequency = freq; switch_bb->count = cnt; - FOR_EACH_EDGE (e, switch_bb->succs, ix) - e->count = (switch_bb->count * e->probability) / REG_BR_PROB_BASE; + FOR_EACH_EDGE (e, switch_bb->succs) + { + e->count = (switch_bb->count * e->probability) / REG_BR_PROB_BASE; + } + END_FOR_EACH_EDGE; scale_loop_frequencies (loop, prob, tot_prob); scale_loop_frequencies (succ_bb->loop_father, tot_prob - prob, tot_prob); @@ -640,19 +651,22 @@ int fix_loop_placement (struct loop *loop) { basic_block *body; - unsigned i, ix; + unsigned i; edge e; struct loop *father = loop->pred[0], *act; body = get_loop_body (loop); for (i = 0; i < loop->num_nodes; i++) - FOR_EACH_EDGE (e, body[i]->succs, ix) - if (!flow_bb_inside_loop_p (loop, e->dest)) - { - act = find_common_loop (loop, e->dest->loop_father); - if (flow_loop_nested_p (father, act)) - father = act; - } + FOR_EACH_EDGE (e, body[i]->succs) + { + if (!flow_bb_inside_loop_p (loop, e->dest)) + { + act = find_common_loop (loop, e->dest->loop_father); + if (flow_loop_nested_p (father, act)) + father = act; + } + } + END_FOR_EACH_EDGE; free (body); if (father != loop->outer) @@ -857,7 +871,6 @@ duplicate_loop_to_header_edge (struct loop *loop, edge e, struct loops *loops, int p, freq_in, freq_le, freq_out_orig; int prob_pass_thru, prob_pass_wont_exit, prob_pass_main; int add_irreducible_flag; - unsigned ix; if (e->dest != loop->header) abort (); @@ -994,11 +1007,14 @@ duplicate_loop_to_header_edge (struct loop *loop, edge e, struct loops *loops, if (new_bb->loop_father == target) new_bb->flags |= BB_IRREDUCIBLE_LOOP; - FOR_EACH_EDGE (ae, new_bb->succs, ix) - if (ae->dest->rbi->duplicated - && (ae->src->loop_father == target - || ae->dest->loop_father == target)) - ae->flags |= EDGE_IRREDUCIBLE_LOOP; + FOR_EACH_EDGE (ae, new_bb->succs) + { + if (ae->dest->rbi->duplicated + && (ae->src->loop_father == target + || ae->dest->loop_father == target)) + ae->flags |= EDGE_IRREDUCIBLE_LOOP; + } + END_FOR_EACH_EDGE; } for (i = 0; i < n; i++) new_bbs[i]->rbi->duplicated = 0; @@ -1119,25 +1135,28 @@ create_preheader (struct loop *loop, int flags) struct loop *cloop, *ploop; int nentry = 0; bool irred = false; - unsigned ix; cloop = loop->outer; - FOR_EACH_EDGE (e, loop->header->preds, ix) + FOR_EACH_EDGE (e, loop->header->preds) { if (e->src == loop->latch) continue; irred |= (e->flags & EDGE_IRREDUCIBLE_LOOP) != 0; nentry++; } + END_FOR_EACH_EDGE; + if (!nentry) abort (); if (nentry == 1) { - FOR_EACH_EDGE (e, loop->header->preds, ix) - if (e->src != loop->latch) - break; - + FOR_EACH_EDGE (e, loop->header->preds) + { + if (e->src != loop->latch) + break; + } + END_FOR_EACH_EDGE; if (!(flags & CP_SIMPLE_PREHEADERS) || EDGE_COUNT (e->src->succs) == 1) return NULL; } @@ -1156,9 +1175,12 @@ create_preheader (struct loop *loop, int flags) /* Reorganize blocks so that the preheader is not stuck in the middle of the loop. */ - FOR_EACH_EDGE (e, dummy->preds, ix) - if (e->src != loop->latch) - break; + FOR_EACH_EDGE (e, dummy->preds) + { + if (e->src != loop->latch) + break; + } + END_FOR_EACH_EDGE; move_block_after (dummy, e->src); loop->header->loop_father = loop; @@ -1193,7 +1215,7 @@ create_preheaders (struct loops *loops, int flags) void force_single_succ_latches (struct loops *loops) { - unsigned i, ix; + unsigned i; struct loop *loop; edge e; @@ -1203,9 +1225,12 @@ force_single_succ_latches (struct loops *loops) if (loop->latch != loop->header && EDGE_COUNT (loop->latch->succs) == 1) continue; - FOR_EACH_EDGE (e, loop->header->preds, ix) - if (e->src == loop->latch) - break; + FOR_EACH_EDGE (e, loop->header->preds) + { + if (e->src == loop->latch) + break; + } + END_FOR_EACH_EDGE; loop_split_edge_with (e, NULL_RTX); } diff --git a/gcc/cfgrtl.c b/gcc/cfgrtl.c index 571450c3318..115fbbe3771 100644 --- a/gcc/cfgrtl.c +++ b/gcc/cfgrtl.c @@ -469,7 +469,6 @@ rtl_split_block (basic_block bb, void *insnp) basic_block new_bb; rtx insn = insnp; edge e; - unsigned ix; if (!insn) { @@ -494,8 +493,11 @@ rtl_split_block (basic_block bb, void *insnp) /* Redirect the outgoing edges. */ new_bb->succs = bb->succs; bb->succs = NULL; - FOR_EACH_EDGE (e, new_bb->succs, ix) - e->src = new_bb; + FOR_EACH_EDGE (e, new_bb->succs) + { + e->src = new_bb; + } + END_FOR_EACH_EDGE; if (bb->global_live_at_start) { @@ -677,7 +679,6 @@ try_redirect_by_replacing_jump (edge e, basic_block target, bool in_cfglayout) edge tmp; rtx set; int fallthru = 0; - unsigned ix; /* If we are partitioning hot/cold basic blocks, we don't want to mess up unconditional or indirect jumps that cross between hot @@ -688,9 +689,12 @@ try_redirect_by_replacing_jump (edge e, basic_block target, bool in_cfglayout) return NULL; /* Verify that all targets will be TARGET. */ - FOR_EACH_EDGE (tmp, src->succs, ix) - if (tmp->dest != target && tmp != e) - break; + FOR_EACH_EDGE (tmp, src->succs) + { + if (tmp->dest != target && tmp != e) + break; + } + END_FOR_EACH_EDGE; if (tmp || !onlyjump_p (insn)) return NULL; @@ -1046,7 +1050,6 @@ force_nonfallthru_and_redirect (edge e, basic_block target) /* We can't redirect the entry block. Create an empty block at the start of the function which we use to add the new jump. */ edge tmp; - unsigned ix; bool found = false; basic_block bb = create_basic_block (BB_HEAD (e->dest), NULL, ENTRY_BLOCK_PTR); @@ -1054,13 +1057,16 @@ force_nonfallthru_and_redirect (edge e, basic_block target) /* Change the existing edge's source to be the new block, and add a new edge from the entry block to the new block. */ e->src = bb; - FOR_EACH_EDGE (tmp, ENTRY_BLOCK_PTR->succs, ix) - if (tmp == e) - { - VEC_unordered_remove (edge, ENTRY_BLOCK_PTR->succs, ix); - found = true; - break; - } + FOR_EACH_EDGE (tmp, ENTRY_BLOCK_PTR->succs) + { + if (tmp == e) + { + VEC_unordered_remove (edge, ENTRY_BLOCK_PTR->succs, __ix); + found = true; + break; + } + } + END_FOR_EACH_EDGE; if (!found) abort (); @@ -1204,9 +1210,12 @@ rtl_tidy_fallthru_edge (edge e) edge e2; unsigned ix; - FOR_EACH_EDGE (e2, b->succs, ix) - if (e == e2) - break; + for (ix = 0; ix < EDGE_COUNT (b->succs); ix++) + { + e2 = EDGE_I (b->succs, ix); + if (e == e2) + break; + } /* ??? In a late-running flow pass, other folks may have deleted basic blocks by nopping out blocks, leaving multiple BARRIERs between here @@ -1319,11 +1328,13 @@ rtl_split_edge (edge edge_in) if ((edge_in->flags & EDGE_FALLTHRU) == 0) { edge e; - unsigned ix; - FOR_EACH_EDGE (e, edge_in->dest->preds, ix) - if (e->flags & EDGE_FALLTHRU) - break; + FOR_EACH_EDGE (e, edge_in->dest->preds) + { + if (e->flags & EDGE_FALLTHRU) + break; + } + END_FOR_EACH_EDGE; if (e) force_nonfallthru (e); @@ -1695,9 +1706,8 @@ commit_edge_insertions (void) FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) { edge e; - unsigned ix; - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { if (e->insns.r) { @@ -1705,6 +1715,7 @@ commit_edge_insertions (void) commit_one_edge_insertion (e, false); } } + END_FOR_EACH_EDGE; } if (!changed) @@ -1743,9 +1754,8 @@ commit_edge_insertions_watch_calls (void) FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) { edge e; - unsigned ix; - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { if (e->insns.r) { @@ -1753,6 +1763,7 @@ commit_edge_insertions_watch_calls (void) commit_one_edge_insertion (e, true); } } + END_FOR_EACH_EDGE; } if (!changed) @@ -1982,7 +1993,6 @@ rtl_verify_flow_info_1 (void) int n_fallthru = 0, n_eh = 0, n_call = 0, n_abnormal = 0, n_branch = 0; edge e, fallthru = NULL; rtx note; - unsigned ix; if (INSN_P (BB_END (bb)) && (note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX)) @@ -1996,7 +2006,7 @@ rtl_verify_flow_info_1 (void) err = 1; } } - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { if (e->flags & EDGE_FALLTHRU) { @@ -2023,6 +2033,7 @@ rtl_verify_flow_info_1 (void) else if (e->flags & EDGE_ABNORMAL) n_abnormal++; } + END_FOR_EACH_EDGE; if (n_eh && GET_CODE (PATTERN (BB_END (bb))) != RESX && !find_reg_note (BB_END (bb), REG_EH_REGION, NULL_RTX)) @@ -2155,14 +2166,16 @@ rtl_verify_flow_info (void) int num_bb_notes; const rtx rtx_first = get_insns (); basic_block last_bb_seen = ENTRY_BLOCK_PTR, curr_bb = NULL; - unsigned ix; FOR_EACH_BB_REVERSE (bb) { edge e; - FOR_EACH_EDGE (e, bb->succs, ix) - if (e->flags & EDGE_FALLTHRU) - break; + FOR_EACH_EDGE (e, bb->succs) + { + if (e->flags & EDGE_FALLTHRU) + break; + } + END_FOR_EACH_EDGE; if (!e) { rtx insn; @@ -2278,7 +2291,6 @@ purge_dead_edges (basic_block bb) rtx insn = BB_END (bb), note; bool purged = false; bool found; - unsigned ix; /* If this instruction cannot trap, remove REG_EH_REGION notes. */ if (NONJUMP_INSN_P (insn) @@ -2293,7 +2305,7 @@ purge_dead_edges (basic_block bb) } /* Cleanup abnormal edges caused by exceptions or non-local gotos. */ - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { if (e->flags & EDGE_EH) { @@ -2313,8 +2325,9 @@ purge_dead_edges (basic_block bb) remove_edge (e); bb->flags |= BB_DIRTY; purged = true; - ix --; + __ix --; } + END_FOR_EACH_EDGE; if (JUMP_P (insn)) { @@ -2338,7 +2351,7 @@ purge_dead_edges (basic_block bb) remove_note (insn, note); } - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { /* Avoid abnormal flags to leak from computed jumps turned into simplejumps. */ @@ -2372,8 +2385,9 @@ purge_dead_edges (basic_block bb) bb->flags |= BB_DIRTY; purged = true; remove_edge (e); - ix--; + __ix--; } + END_FOR_EACH_EDGE; if (EDGE_COUNT (bb->succs) == 0 || !purged) return purged; @@ -2426,7 +2440,7 @@ purge_dead_edges (basic_block bb) edge we know that there used to be a jump here and can then safely remove all non-fallthru edges. */ found = false; - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { if (! (e->flags & (EDGE_COMPLEX | EDGE_FALLTHRU))) { @@ -2434,19 +2448,22 @@ purge_dead_edges (basic_block bb) break; } } + END_FOR_EACH_EDGE; + if (!found) return purged; - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { if (!(e->flags & EDGE_FALLTHRU)) { bb->flags |= BB_DIRTY; remove_edge (e); purged = true; - ix--; + __ix--; } } + END_FOR_EACH_EDGE; if (EDGE_COUNT (bb->succs) != 1) abort (); @@ -2572,12 +2589,16 @@ cfg_layout_redirect_edge_and_branch (edge e, basic_block dest) unsigned ix; edge tmp, s; - FOR_EACH_EDGE (tmp, src->succs, ix) - if (e == tmp) - { - found = true; - break; - } + FOR_EACH_EDGE (tmp, src->succs) + { + if (e == tmp) + { + found = true; + break; + } + } + END_FOR_EACH_EDGE; + if (!found) abort (); @@ -2937,15 +2958,17 @@ rtl_flow_call_edges_add (sbitmap blocks) if (need_fake_edge_p (insn)) { edge e; - unsigned ix; - FOR_EACH_EDGE (e, bb->succs, ix) - if (e->dest == EXIT_BLOCK_PTR) - { - insert_insn_on_edge (gen_rtx_USE (VOIDmode, const0_rtx), e); - commit_edge_insertions (); - break; - } + FOR_EACH_EDGE (e, bb->succs) + { + if (e->dest == EXIT_BLOCK_PTR) + { + insert_insn_on_edge (gen_rtx_USE (VOIDmode, const0_rtx), e); + commit_edge_insertions (); + break; + } + } + END_FOR_EACH_EDGE; } } @@ -2972,7 +2995,6 @@ rtl_flow_call_edges_add (sbitmap blocks) { edge e; rtx split_at_insn = insn; - unsigned ix; /* Don't split the block between a call and an insn that should remain in the same block as the call. */ @@ -2988,9 +3010,12 @@ rtl_flow_call_edges_add (sbitmap blocks) #ifdef ENABLE_CHECKING if (split_at_insn == BB_END (bb)) - FOR_EACH_EDGE (e, bb->succs, ix) - if (e->dest == EXIT_BLOCK_PTR) - abort (); + FOR_EACH_EDGE (e, bb->succs) + { + if (e->dest == EXIT_BLOCK_PTR) + abort (); + } + END_FOR_EACH_EDGE; #endif /* Note that the following may create a new basic block diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c index 723f5aa251c..433c3a751f4 100644 --- a/gcc/config/i386/i386.c +++ b/gcc/config/i386/i386.c @@ -15761,49 +15761,51 @@ static void ix86_pad_returns (void) { edge e; - unsigned ix; - FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds, ix) - { - basic_block bb = e->src; - rtx ret = BB_END (bb); - rtx prev; - bool replace = false; - - if (GET_CODE (ret) != JUMP_INSN || GET_CODE (PATTERN (ret)) != RETURN - || !maybe_hot_bb_p (bb)) - continue; - for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev)) - if (active_insn_p (prev) || GET_CODE (prev) == CODE_LABEL) - break; - if (prev && GET_CODE (prev) == CODE_LABEL) - { - edge e; - unsigned ix; + FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds) + { + basic_block bb = e->src; + rtx ret = BB_END (bb); + rtx prev; + bool replace = false; + + if (GET_CODE (ret) != JUMP_INSN || GET_CODE (PATTERN (ret)) != RETURN + || !maybe_hot_bb_p (bb)) + continue; + for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev)) + if (active_insn_p (prev) || GET_CODE (prev) == CODE_LABEL) + break; + if (prev && GET_CODE (prev) == CODE_LABEL) + { + edge e; - FOR_EACH_EDGE (e, bb->preds, ix) - if (EDGE_FREQUENCY (e) && e->src->index >= 0 - && !(e->flags & EDGE_FALLTHRU)) + FOR_EACH_EDGE (e, bb->preds) + { + if (EDGE_FREQUENCY (e) && e->src->index >= 0 + && !(e->flags & EDGE_FALLTHRU)) + replace = true; + } + END_FOR_EACH_EDGE; + } + if (!replace) + { + prev = prev_active_insn (ret); + if (prev + && ((GET_CODE (prev) == JUMP_INSN && any_condjump_p (prev)) + || GET_CODE (prev) == CALL_INSN)) replace = true; - } - if (!replace) - { - prev = prev_active_insn (ret); - if (prev - && ((GET_CODE (prev) == JUMP_INSN && any_condjump_p (prev)) - || GET_CODE (prev) == CALL_INSN)) - replace = true; - /* Empty functions get branch mispredict even when the jump destination - is not visible to us. */ - if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED) - replace = true; - } - if (replace) - { - emit_insn_before (gen_return_internal_long (), ret); - delete_insn (ret); - } - } + /* Empty functions get branch mispredict even when the jump destination + is not visible to us. */ + if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED) + replace = true; + } + if (replace) + { + emit_insn_before (gen_return_internal_long (), ret); + delete_insn (ret); + } + } + END_FOR_EACH_EDGE; } /* Implement machine specific optimizations. We implement padding of returns diff --git a/gcc/config/ia64/ia64.c b/gcc/config/ia64/ia64.c index 8475b2d034e..6797d47af21 100644 --- a/gcc/config/ia64/ia64.c +++ b/gcc/config/ia64/ia64.c @@ -2542,12 +2542,14 @@ ia64_expand_prologue (void) if (optimize) { edge e; - unsigned ix; - FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds, ix) - if ((e->flags & EDGE_FAKE) == 0 - && (e->flags & EDGE_FALLTHRU) != 0) - break; + FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds) + { + if ((e->flags & EDGE_FAKE) == 0 + && (e->flags & EDGE_FALLTHRU) != 0) + break; + } + END_FOR_EACH_EDGE; epilogue_p = (e != NULL); } else diff --git a/gcc/cse.c b/gcc/cse.c index dfb35cdeb2f..db8c4e71fb3 100644 --- a/gcc/cse.c +++ b/gcc/cse.c @@ -7631,7 +7631,6 @@ cse_cc_succs (basic_block bb, rtx cc_reg, rtx cc_src, bool can_change_mode) rtx last_insns[2]; unsigned int i; rtx newreg; - unsigned ix; /* We expect to have two successors. Look at both before picking the final mode for the comparison. If we have more successors @@ -7642,7 +7641,7 @@ cse_cc_succs (basic_block bb, rtx cc_reg, rtx cc_src, bool can_change_mode) found_equiv = false; mode = GET_MODE (cc_src); insn_count = 0; - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { rtx insn; rtx end; @@ -7765,6 +7764,7 @@ cse_cc_succs (basic_block bb, rtx cc_reg, rtx cc_src, bool can_change_mode) } } } + END_FOR_EACH_EDGE; if (! found_equiv) return VOIDmode; diff --git a/gcc/df.c b/gcc/df.c index 0ac0db9e9a5..4af07157028 100644 --- a/gcc/df.c +++ b/gcc/df.c @@ -3814,7 +3814,6 @@ hybrid_search (basic_block bb, struct dataflow *dataflow, int changed; int i = bb->index; edge e; - unsigned ix; SET_BIT (visited, bb->index); if (!TEST_BIT (pending, bb->index)) @@ -3827,7 +3826,7 @@ hybrid_search (basic_block bb, struct dataflow *dataflow, { \ /* Calculate of predecessor_outs. */ \ bitmap_zero (IN_SET[i]); \ - FOR_EACH_EDGE (e, bb->E_ANTI, ix) \ + FOR_EACH_EDGE (e, bb->E_ANTI) \ { \ if (e->E_ANTI_BB == E_ANTI_START_BB) \ continue; \ @@ -3838,7 +3837,7 @@ hybrid_search (basic_block bb, struct dataflow *dataflow, IN_SET[i], IN_SET[i], \ OUT_SET[e->E_ANTI_BB->index]); \ } \ - \ + END_FOR_EACH_EDGE; \ (*dataflow->transfun)(i, &changed, \ dataflow->in[i], dataflow->out[i], \ dataflow->gen[i], dataflow->kill[i], \ @@ -3847,7 +3846,7 @@ hybrid_search (basic_block bb, struct dataflow *dataflow, if (!changed) \ break; \ \ - FOR_EACH_EDGE (e, bb->E, ix) \ + FOR_EACH_EDGE (e, bb->E) \ { \ if (e->E_BB == E_START_BB || e->E_BB->index == i) \ continue; \ @@ -3857,8 +3856,9 @@ hybrid_search (basic_block bb, struct dataflow *dataflow, \ SET_BIT (pending, e->E_BB->index); \ } \ + END_FOR_EACH_EDGE; \ \ - FOR_EACH_EDGE (e, bb->E, ix) \ + FOR_EACH_EDGE (e, bb->E) \ { \ if (e->E_BB == E_START_BB || e->E_BB->index == i) \ continue; \ @@ -3869,6 +3869,7 @@ hybrid_search (basic_block bb, struct dataflow *dataflow, if (!TEST_BIT (visited, e->E_BB->index)) \ hybrid_search (e->E_BB, dataflow, visited, pending, considered); \ } \ + END_FOR_EACH_EDGE; \ } while (0) if (dataflow->dir == DF_FORWARD) diff --git a/gcc/dominance.c b/gcc/dominance.c index afb43f72901..df836610bc2 100644 --- a/gcc/dominance.c +++ b/gcc/dominance.c @@ -519,7 +519,7 @@ calc_idoms (struct dom_info *di, enum cdi_direction reverse) to them. That way we have the smallest node with also a path to us only over nodes behind us. In effect we search for our semidominator. */ - FOR_EACH_EDGE (e, ev, ix) + FOR_EACH_EDGE (e, ev) { TBB k1; basic_block b = (reverse) ? e->dest : e->src; @@ -539,6 +539,7 @@ calc_idoms (struct dom_info *di, enum cdi_direction reverse) if (k1 < k) k = k1; } + END_FOR_EACH_EDGE; di->key[v] = k; link_roots (di, par, v); @@ -849,14 +850,13 @@ recount_dominator (enum cdi_direction dir, basic_block bb) { basic_block dom_bb = NULL; edge e; - unsigned ix; if (!dom_computed[dir]) abort (); if (dir == CDI_DOMINATORS) { - FOR_EACH_EDGE (e, bb->preds, ix) + FOR_EACH_EDGE (e, bb->preds) { /* Ignore the predecessors that either are not reachable from the entry block, or whose dominator was not determined yet. */ @@ -866,14 +866,16 @@ recount_dominator (enum cdi_direction dir, basic_block bb) if (!dominated_by_p (dir, e->src, bb)) dom_bb = nearest_common_dominator (dir, dom_bb, e->src); } + END_FOR_EACH_EDGE; } else { - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { if (!dominated_by_p (dir, e->dest, bb)) dom_bb = nearest_common_dominator (dir, dom_bb, e->dest); } + END_FOR_EACH_EDGE; } return dom_bb; diff --git a/gcc/except.c b/gcc/except.c index 4dbdbd90719..9ee346336d7 100644 --- a/gcc/except.c +++ b/gcc/except.c @@ -1456,14 +1456,16 @@ emit_to_new_bb_before (rtx seq, rtx insn) rtx last; basic_block bb; edge e; - unsigned ix; /* If there happens to be an fallthru edge (possibly created by cleanup_cfg call), we don't want it to go into newly created landing pad or other EH construct. */ - FOR_EACH_EDGE (e, BLOCK_FOR_INSN (insn)->preds, ix) - if (e->flags & EDGE_FALLTHRU) - force_nonfallthru (e); + FOR_EACH_EDGE (e, BLOCK_FOR_INSN (insn)->preds) + { + if (e->flags & EDGE_FALLTHRU) + force_nonfallthru (e); + } + END_FOR_EACH_EDGE; last = emit_insn_before (seq, insn); if (BARRIER_P (last)) last = PREV_INSN (last); @@ -2024,7 +2026,6 @@ sjlj_emit_function_exit (void) { rtx seq; edge e; - unsigned ix; start_sequence (); @@ -2038,9 +2039,12 @@ sjlj_emit_function_exit (void) post-dominates all can_throw_internal instructions. This is the last possible moment. */ - FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds, ix) - if (e->flags & EDGE_FALLTHRU) - break; + FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds) + { + if (e->flags & EDGE_FALLTHRU) + break; + } + END_FOR_EACH_EDGE; if (e) { rtx insn; @@ -2206,17 +2210,17 @@ finish_eh_generation (void) FOR_EACH_BB (bb) { edge e; - unsigned ix; bool eh = false; - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { if (e->flags & EDGE_EH) { remove_edge (e); - ix--; + __ix--; eh = true; } } + END_FOR_EACH_EDGE; if (eh) rtl_make_eh_edge (NULL, bb, BB_END (bb)); } diff --git a/gcc/final.c b/gcc/final.c index f26e2abfcf6..1533d116e62 100644 --- a/gcc/final.c +++ b/gcc/final.c @@ -677,7 +677,6 @@ compute_alignments (void) rtx label = BB_HEAD (bb); int fallthru_frequency = 0, branch_frequency = 0, has_fallthru = 0; edge e; - unsigned ix; if (!LABEL_P (label) || probably_never_executed_bb_p (bb)) @@ -685,13 +684,14 @@ compute_alignments (void) max_log = LABEL_ALIGN (label); max_skip = LABEL_ALIGN_MAX_SKIP; - FOR_EACH_EDGE (e, bb->preds, ix) + FOR_EACH_EDGE (e, bb->preds) { if (e->flags & EDGE_FALLTHRU) has_fallthru = 1, fallthru_frequency += EDGE_FREQUENCY (e); else branch_frequency += EDGE_FREQUENCY (e); } + END_FOR_EACH_EDGE; /* There are two purposes to align block with no fallthru incoming edge: 1) to avoid fetch stalls when branch destination is near cache boundary diff --git a/gcc/flow.c b/gcc/flow.c index 6679ce046dd..8d0d18adbe2 100644 --- a/gcc/flow.c +++ b/gcc/flow.c @@ -1096,7 +1096,6 @@ calculate_global_regs_live (sbitmap blocks_in, sbitmap blocks_out, int flags) int rescan, changed; basic_block bb; edge e; - unsigned ix; bb = *qhead++; if (qhead == qend) @@ -1107,7 +1106,7 @@ calculate_global_regs_live (sbitmap blocks_in, sbitmap blocks_out, int flags) CLEAR_REG_SET (new_live_at_end); if (EDGE_COUNT (bb->succs) > 0) - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { basic_block sb = e->dest; @@ -1131,6 +1130,7 @@ calculate_global_regs_live (sbitmap blocks_in, sbitmap blocks_out, int flags) if (EH_USES (i)) SET_REGNO_REG_SET (new_live_at_end, i); } + END_FOR_EACH_EDGE; else { /* This might be a noreturn function that throws. And @@ -1263,7 +1263,7 @@ calculate_global_regs_live (sbitmap blocks_in, sbitmap blocks_out, int flags) /* Queue all predecessors of BB so that we may re-examine their live_at_end. */ - FOR_EACH_EDGE (e, bb->preds, ix) + FOR_EACH_EDGE (e, bb->preds) { basic_block pb = e->src; if (pb->aux == NULL) @@ -1274,6 +1274,7 @@ calculate_global_regs_live (sbitmap blocks_in, sbitmap blocks_out, int flags) pb->aux = pb; } } + END_FOR_EACH_EDGE; } FREE_REG_SET (tmp); @@ -1366,11 +1367,10 @@ initialize_uninitialized_subregs (void) { rtx insn; edge e; - unsigned ix; int reg, did_something = 0; find_regno_partial_param param; - FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs, ix) + FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs) { basic_block bb = e->dest; regset map = bb->global_live_at_start; @@ -1405,6 +1405,7 @@ initialize_uninitialized_subregs (void) } }); } + END_FOR_EACH_EDGE; if (did_something) commit_edge_insertions (); diff --git a/gcc/function.c b/gcc/function.c index 19b792a6d48..29aa7e49ff1 100644 --- a/gcc/function.c +++ b/gcc/function.c @@ -4948,7 +4948,6 @@ thread_prologue_and_epilogue_insns (rtx f ATTRIBUTE_UNUSED) { int inserted = 0; edge e; - unsigned ix; #if defined (HAVE_sibcall_epilogue) || defined (HAVE_epilogue) || defined (HAVE_return) || defined (HAVE_prologue) rtx seq; #endif @@ -4987,9 +4986,13 @@ thread_prologue_and_epilogue_insns (rtx f ATTRIBUTE_UNUSED) /* If the exit block has no non-fake predecessors, we don't need an epilogue. */ - FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds, ix) - if ((e->flags & EDGE_FAKE) == 0) - break; + FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds) + { + if ((e->flags & EDGE_FAKE) == 0) + break; + } + END_FOR_EACH_EDGE; + if (e == NULL) goto epilogue_done; @@ -5005,9 +5008,13 @@ thread_prologue_and_epilogue_insns (rtx f ATTRIBUTE_UNUSED) basic_block last; rtx label; - FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds, ix) - if (e->flags & EDGE_FALLTHRU) - break; + FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds) + { + if (e->flags & EDGE_FALLTHRU) + break; + } + END_FOR_EACH_EDGE; + if (e == NULL) goto epilogue_done; last = e->src; @@ -5036,7 +5043,7 @@ thread_prologue_and_epilogue_insns (rtx f ATTRIBUTE_UNUSED) break; } - FOR_EACH_EDGE (e, last->preds, ix) + FOR_EACH_EDGE (e, last->preds) { basic_block bb = e->src; rtx jump; @@ -5075,6 +5082,7 @@ thread_prologue_and_epilogue_insns (rtx f ATTRIBUTE_UNUSED) /* Fix up the CFG for the successful change we just made. */ redirect_edge_succ (e, EXIT_BLOCK_PTR); } + END_FOR_EACH_EDGE; /* Emit a return insn for the exit fallthru block. Whether this is still reachable will be determined later. */ @@ -5092,9 +5100,13 @@ thread_prologue_and_epilogue_insns (rtx f ATTRIBUTE_UNUSED) There really shouldn't be a mixture -- either all should have been converted or none, however... */ - FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds, ix) - if (e->flags & EDGE_FALLTHRU) - break; + FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds) + { + if (e->flags & EDGE_FALLTHRU) + break; + } + END_FOR_EACH_EDGE; + if (e == NULL) goto epilogue_done; @@ -5154,7 +5166,7 @@ epilogue_done: #ifdef HAVE_sibcall_epilogue /* Emit sibling epilogues before any sibling call sites. */ - FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds, ix) + FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds) { basic_block bb = e->src; rtx insn = BB_END (bb); @@ -5179,6 +5191,7 @@ epilogue_done: i = PREV_INSN (insn); newinsn = emit_insn_before (seq, insn); } + END_FOR_EACH_EDGE; #endif #ifdef HAVE_prologue diff --git a/gcc/gcse.c b/gcc/gcse.c index 884d3ebc513..9caba94714c 100644 --- a/gcc/gcse.c +++ b/gcc/gcse.c @@ -3911,7 +3911,6 @@ bypass_block (basic_block bb, rtx setcc, rtx jump) edge e, edest; int i, change; int may_be_loop_header; - unsigned ix; insn = (setcc != NULL) ? setcc : jump; @@ -3924,15 +3923,18 @@ bypass_block (basic_block bb, rtx setcc, rtx jump) may_be_loop_header = false; - FOR_EACH_EDGE (e, bb->preds, ix) - if (e->flags & EDGE_DFS_BACK) - { - may_be_loop_header = true; - break; - } + FOR_EACH_EDGE (e, bb->preds) + { + if (e->flags & EDGE_DFS_BACK) + { + may_be_loop_header = true; + break; + } + } + END_FOR_EACH_EDGE; change = 0; - FOR_EACH_EDGE (e, bb->preds, ix) + FOR_EACH_EDGE (e, bb->preds) { if (e->flags & EDGE_COMPLEX) continue; @@ -3990,15 +3992,17 @@ bypass_block (basic_block bb, rtx setcc, rtx jump) } else if (GET_CODE (new) == LABEL_REF) { - unsigned ix; dest = BLOCK_FOR_INSN (XEXP (new, 0)); /* Don't bypass edges containing instructions. */ - FOR_EACH_EDGE (edest, bb->succs, ix) - if (edest->dest == dest && edest->insns.r) - { - dest = NULL; - break; - } + FOR_EACH_EDGE (edest, bb->succs) + { + if (edest->dest == dest && edest->insns.r) + { + dest = NULL; + break; + } + } + END_FOR_EACH_EDGE; } else dest = NULL; @@ -4010,13 +4014,15 @@ bypass_block (basic_block bb, rtx setcc, rtx jump) if (dest && setcc && !CC0_P (SET_DEST (PATTERN (setcc)))) { edge e2; - unsigned ix; - FOR_EACH_EDGE (e2, e->src->succs, ix) - if (e2->dest == dest) - { - dest = NULL; - break; - } + FOR_EACH_EDGE (e2, e->src->succs) + { + if (e2->dest == dest) + { + dest = NULL; + break; + } + } + END_FOR_EACH_EDGE; } old_dest = e->dest; @@ -4048,6 +4054,7 @@ bypass_block (basic_block bb, rtx setcc, rtx jump) } } } + END_FOR_EACH_EDGE; return change; } @@ -4226,20 +4233,22 @@ compute_pre_data (void) FOR_EACH_BB (bb) { edge e; - unsigned ix; /* If the current block is the destination of an abnormal edge, we kill all trapping expressions because we won't be able to properly place the instruction on the edge. So make them neither anticipatable nor transparent. This is fairly conservative. */ - FOR_EACH_EDGE (e, bb->preds, ix) - if (e->flags & EDGE_ABNORMAL) - { - sbitmap_difference (antloc[bb->index], antloc[bb->index], trapping_expr); - sbitmap_difference (transp[bb->index], transp[bb->index], trapping_expr); - break; - } - + FOR_EACH_EDGE (e, bb->preds) + { + if (e->flags & EDGE_ABNORMAL) + { + sbitmap_difference (antloc[bb->index], antloc[bb->index], trapping_expr); + sbitmap_difference (transp[bb->index], transp[bb->index], trapping_expr); + break; + } + } + END_FOR_EACH_EDGE; + sbitmap_a_or_b (ae_kill[bb->index], transp[bb->index], comp[bb->index]); sbitmap_not (ae_kill[bb->index], ae_kill[bb->index]); } @@ -4272,9 +4281,8 @@ static int pre_expr_reaches_here_p_work (basic_block occr_bb, struct expr *expr, basic_block bb, char *visited) { edge pred; - unsigned ix; - FOR_EACH_EDGE (pred, bb->preds, ix) + FOR_EACH_EDGE (pred, bb->preds) { basic_block pred_bb = pred->src; @@ -4306,6 +4314,7 @@ pre_expr_reaches_here_p_work (basic_block occr_bb, struct expr *expr, basic_bloc return 1; } } + END_FOR_EACH_EDGE; /* All paths have been checked. */ return 0; @@ -5146,7 +5155,6 @@ hoist_expr_reaches_here_p (basic_block expr_bb, int expr_index, basic_block bb, { edge pred; int visited_allocated_locally = 0; - unsigned ix; if (visited == NULL) { @@ -5154,7 +5162,7 @@ hoist_expr_reaches_here_p (basic_block expr_bb, int expr_index, basic_block bb, visited = xcalloc (last_basic_block, 1); } - FOR_EACH_EDGE (pred, bb->preds, ix) + FOR_EACH_EDGE (pred, bb->preds) { basic_block pred_bb = pred->src; @@ -5180,6 +5188,7 @@ hoist_expr_reaches_here_p (basic_block expr_bb, int expr_index, basic_block bb, break; } } + END_FOR_EACH_EDGE; if (visited_allocated_locally) free (visited); @@ -6528,7 +6537,6 @@ insert_store (struct ls_expr * expr, edge e) rtx reg, insn; basic_block bb; edge tmp; - unsigned ix; /* We did all the deleted before this insert, so if we didn't delete a store, then we haven't set the reaching reg yet either. */ @@ -6545,25 +6553,29 @@ insert_store (struct ls_expr * expr, edge e) insert it at the start of the BB, and reset the insert bits on the other edges so we don't try to insert it on the other edges. */ bb = e->dest; - FOR_EACH_EDGE (tmp, e->dest->preds, ix) - if (!(tmp->flags & EDGE_FAKE)) - { - int index = EDGE_INDEX (edge_list, tmp->src, tmp->dest); - if (index == EDGE_INDEX_NO_EDGE) - abort (); - if (! TEST_BIT (pre_insert_map[index], expr->index)) - break; - } + FOR_EACH_EDGE (tmp, e->dest->preds) + { + if (!(tmp->flags & EDGE_FAKE)) + { + int index = EDGE_INDEX (edge_list, tmp->src, tmp->dest); + if (index == EDGE_INDEX_NO_EDGE) + abort (); + if (! TEST_BIT (pre_insert_map[index], expr->index)) + break; + } + } + END_FOR_EACH_EDGE; /* If tmp is NULL, we found an insertion on every edge, blank the insertion vector for these edges, and insert at the start of the BB. */ if (!tmp && bb != EXIT_BLOCK_PTR) { - FOR_EACH_EDGE (tmp, e->dest->preds, ix) + FOR_EACH_EDGE (tmp, e->dest->preds) { int index = EDGE_INDEX (edge_list, tmp->src, tmp->dest); RESET_BIT (pre_insert_map[index], expr->index); } + END_FOR_EACH_EDGE; insert_insn_start_bb (insn, bb); return 0; } @@ -7087,14 +7099,16 @@ static bool bb_has_well_behaved_predecessors (basic_block bb) { edge pred; - unsigned ix; if (EDGE_COUNT (bb->preds) == 0) return false; - FOR_EACH_EDGE (pred, bb->preds, ix) - if (((pred->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (pred)) - || is_jump_table_basic_block (pred->src)) - return false; + FOR_EACH_EDGE (pred, bb->preds) + { + if (((pred->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (pred)) + || is_jump_table_basic_block (pred->src)) + return false; + } + END_FOR_EACH_EDGE; return true; } @@ -7152,7 +7166,6 @@ eliminate_partially_redundant_loads (basic_block bb, rtx insn, int npred_ok = 0; gcov_type ok_count = 0; /* Redundant load execution count. */ gcov_type critical_count = 0; /* Execution count of critical edges. */ - unsigned ix; /* The execution count of the loads to be added to make the load fully redundant. */ @@ -7170,7 +7183,7 @@ eliminate_partially_redundant_loads (basic_block bb, rtx insn, return; /* Check potential for replacing load with copy for predecessors. */ - FOR_EACH_EDGE (pred, bb->preds, ix) + FOR_EACH_EDGE (pred, bb->preds) { rtx next_pred_bb_end; @@ -7226,6 +7239,7 @@ eliminate_partially_redundant_loads (basic_block bb, rtx insn, unavail_occrs = unoccr; } } + END_FOR_EACH_EDGE; if (npred_ok == 0 /* No load can be replaced by copy. */ || (optimize_size && npred_ok > 1)) /* Prevent exploding the code. */ diff --git a/gcc/global.c b/gcc/global.c index 9f0f824082f..8f52d50d437 100644 --- a/gcc/global.c +++ b/gcc/global.c @@ -747,11 +747,13 @@ global_conflicts (void) regs live across such edges. */ { edge e; - unsigned ix; - FOR_EACH_EDGE (e, b->preds, ix) - if (e->flags & EDGE_ABNORMAL) - break; + FOR_EACH_EDGE (e, b->preds) + { + if (e->flags & EDGE_ABNORMAL) + break; + } + END_FOR_EACH_EDGE; if (e != NULL) { @@ -2322,7 +2324,6 @@ calculate_reg_pav (void) varray_type bbs, new_bbs, temp; basic_block *bb_array; sbitmap wset; - unsigned ix; VARRAY_BB_INIT (bbs, n_basic_blocks, "basic blocks"); VARRAY_BB_INIT (new_bbs, n_basic_blocks, "basic blocks for the next iter."); @@ -2341,10 +2342,13 @@ calculate_reg_pav (void) { bb = bb_array [i]; changed_p = 0; - FOR_EACH_EDGE (e, bb->preds, ix) - changed_p = modify_bb_reg_pav (bb, e->src, changed_p); + FOR_EACH_EDGE (e, bb->preds) + { + changed_p = modify_bb_reg_pav (bb, e->src, changed_p); + } + END_FOR_EACH_EDGE; if (changed_p) - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { succ = e->dest; if (succ->index != EXIT_BLOCK && !TEST_BIT (wset, succ->index)) @@ -2353,6 +2357,7 @@ calculate_reg_pav (void) VARRAY_PUSH_BB (new_bbs, succ); } } + END_FOR_EACH_EDGE; } temp = bbs; bbs = new_bbs; diff --git a/gcc/graph.c b/gcc/graph.c index 22adf275b26..9f408cf0ad9 100644 --- a/gcc/graph.c +++ b/gcc/graph.c @@ -310,7 +310,6 @@ print_rtl_graph_with_bb (const char *base, const char *suffix, rtx rtx_first) if ((i = end[INSN_UID (tmp_rtx)]) >= 0) { edge e; - unsigned ix; bb = BASIC_BLOCK (i); @@ -320,7 +319,7 @@ print_rtl_graph_with_bb (const char *base, const char *suffix, rtx rtx_first) /* Now specify the edges to all the successors of this basic block. */ - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { if (e->dest != EXIT_BLOCK_PTR) { @@ -344,6 +343,7 @@ print_rtl_graph_with_bb (const char *base, const char *suffix, rtx rtx_first) edge_printed = 1; } } + END_FOR_EACH_EDGE; } if (!edge_printed) diff --git a/gcc/ifcvt.c b/gcc/ifcvt.c index 640a8c33716..7ecbeb3f022 100644 --- a/gcc/ifcvt.c +++ b/gcc/ifcvt.c @@ -126,8 +126,7 @@ mark_loop_exit_edges (void) { FOR_EACH_BB (bb) { - unsigned int ix; - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { if (find_common_loop (bb->loop_father, e->dest->loop_father) != bb->loop_father) @@ -135,6 +134,7 @@ mark_loop_exit_edges (void) else e->flags &= ~EDGE_LOOP_EXIT; } + END_FOR_EACH_EDGE; } } @@ -250,11 +250,13 @@ static basic_block block_fallthru (basic_block bb) { edge e; - unsigned ix; - FOR_EACH_EDGE (e, bb->succs, ix) - if (! (e->flags & EDGE_FALLTHRU)) - break; + FOR_EACH_EDGE (e, bb->succs) + { + if (! (e->flags & EDGE_FALLTHRU)) + break; + } + END_FOR_EACH_EDGE; return (e) ? e->dest : NULL_BLOCK; } @@ -2358,7 +2360,6 @@ block_jumps_and_fallthru_p (basic_block cur_bb, basic_block target_bb) rtx insn; rtx end; int n_insns = 0; - unsigned ix; if (!cur_bb || !target_bb) return -1; @@ -2367,7 +2368,7 @@ block_jumps_and_fallthru_p (basic_block cur_bb, basic_block target_bb) if (EDGE_COUNT (cur_bb->succs) == 0) return FALSE; - FOR_EACH_EDGE (cur_edge, cur_bb->succs, ix) + FOR_EACH_EDGE (cur_edge, cur_bb->succs) { if (cur_edge->flags & EDGE_COMPLEX) /* Anything complex isn't what we want. */ @@ -2382,6 +2383,7 @@ block_jumps_and_fallthru_p (basic_block cur_bb, basic_block target_bb) else return -1; } + END_FOR_EACH_EDGE; if ((jump_p & fallthru_p) == 0) return -1; @@ -2428,7 +2430,6 @@ find_if_block (struct ce_if_block * ce_info) int else_predecessors; edge cur_edge; basic_block next; - unsigned ix; ce_info->last_test_bb = test_bb; @@ -2492,20 +2493,22 @@ find_if_block (struct ce_if_block * ce_info) /* Count the number of edges the THEN and ELSE blocks have. */ then_predecessors = 0; - FOR_EACH_EDGE (cur_edge, then_bb->preds, ix) + FOR_EACH_EDGE (cur_edge, then_bb->preds) { then_predecessors++; if (cur_edge->flags & EDGE_COMPLEX) return FALSE; } + END_FOR_EACH_EDGE; else_predecessors = 0; - FOR_EACH_EDGE (cur_edge, else_bb->preds, ix) + FOR_EACH_EDGE (cur_edge, else_bb->preds) { else_predecessors++; if (cur_edge->flags & EDGE_COMPLEX) return FALSE; } + END_FOR_EACH_EDGE; /* The THEN block of an IF-THEN combo must have exactly one predecessor, other than any || blocks which jump to the THEN block. */ @@ -2743,7 +2746,7 @@ block_has_only_trap (basic_block bb) return NULL_RTX; /* The block must have no successors. */ - if (EDGE_COUNT (bb->succs) == 0) + if (EDGE_COUNT (bb->succs) > 0) return NULL_RTX; /* The only instruction in the THEN block must be the trap. */ diff --git a/gcc/lcm.c b/gcc/lcm.c index 56ebdd63488..bdd83cfdd0f 100644 --- a/gcc/lcm.c +++ b/gcc/lcm.c @@ -101,7 +101,7 @@ compute_antinout_edge (sbitmap *antloc, sbitmap *transp, sbitmap *antin, basic_block bb; edge e; basic_block *worklist, *qin, *qout, *qend; - unsigned int ix, qlen; + unsigned int qlen; /* Allocate a worklist array/queue. Entries are only added to the list if they were not already on the list. So the size is @@ -126,8 +126,11 @@ compute_antinout_edge (sbitmap *antloc, sbitmap *transp, sbitmap *antin, /* Mark blocks which are predecessors of the exit block so that we can easily identify them below. */ - FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds, ix) - e->src->aux = EXIT_BLOCK_PTR; + FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds) + { + e->src->aux = EXIT_BLOCK_PTR; + } + END_FOR_EACH_EDGE; /* Iterate until the worklist is empty. */ while (qlen) @@ -157,15 +160,18 @@ compute_antinout_edge (sbitmap *antloc, sbitmap *transp, sbitmap *antin, /* If the in state of this block changed, then we need to add the predecessors of this block to the worklist if they are not already on the worklist. */ - FOR_EACH_EDGE (e, bb->preds, ix) - if (!e->src->aux && e->src != ENTRY_BLOCK_PTR) - { - *qin++ = e->src; - e->src->aux = e; - qlen++; - if (qin >= qend) - qin = worklist; - } + FOR_EACH_EDGE (e, bb->preds) + { + if (!e->src->aux && e->src != ENTRY_BLOCK_PTR) + { + *qin++ = e->src; + e->src->aux = e; + qlen++; + if (qin >= qend) + qin = worklist; + } + } + END_FOR_EACH_EDGE; } clear_aux_for_edges (); @@ -250,7 +256,7 @@ compute_laterin (struct edge_list *edge_list, sbitmap *earliest, int num_edges, i; edge e; basic_block *worklist, *qin, *qout, *qend, bb; - unsigned int ix, qlen; + unsigned int qlen; num_edges = NUM_EDGES (edge_list); @@ -280,8 +286,11 @@ compute_laterin (struct edge_list *edge_list, sbitmap *earliest, do not want to be overly optimistic. Consider an outgoing edge from the entry block. That edge should always have a LATER value the same as EARLIEST for that edge. */ - FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs, ix) - sbitmap_copy (later[(size_t) e->aux], earliest[(size_t) e->aux]); + FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs) + { + sbitmap_copy (later[(size_t) e->aux], earliest[(size_t) e->aux]); + } + END_FOR_EACH_EDGE; /* Add all the blocks to the worklist. This prevents an early exit from the loop given our optimistic initialization of LATER above. */ @@ -309,35 +318,44 @@ compute_laterin (struct edge_list *edge_list, sbitmap *earliest, /* Compute the intersection of LATERIN for each incoming edge to B. */ sbitmap_ones (laterin[bb->index]); - FOR_EACH_EDGE (e, bb->preds, ix) - sbitmap_a_and_b (laterin[bb->index], laterin[bb->index], later[(size_t)e->aux]); + FOR_EACH_EDGE (e, bb->preds) + { + sbitmap_a_and_b (laterin[bb->index], laterin[bb->index], later[(size_t)e->aux]); + } + END_FOR_EACH_EDGE; /* Calculate LATER for all outgoing edges. */ - FOR_EACH_EDGE (e, bb->succs, ix) - if (sbitmap_union_of_diff_cg (later[(size_t) e->aux], - earliest[(size_t) e->aux], - laterin[e->src->index], - antloc[e->src->index]) - /* If LATER for an outgoing edge was changed, then we need - to add the target of the outgoing edge to the worklist. */ - && e->dest != EXIT_BLOCK_PTR && e->dest->aux == 0) - { - *qin++ = e->dest; - e->dest->aux = e; - qlen++; - if (qin >= qend) - qin = worklist; - } + FOR_EACH_EDGE (e, bb->succs) + { + if (sbitmap_union_of_diff_cg (later[(size_t) e->aux], + earliest[(size_t) e->aux], + laterin[e->src->index], + antloc[e->src->index]) + /* If LATER for an outgoing edge was changed, then we need + to add the target of the outgoing edge to the worklist. */ + && e->dest != EXIT_BLOCK_PTR && e->dest->aux == 0) + { + *qin++ = e->dest; + e->dest->aux = e; + qlen++; + if (qin >= qend) + qin = worklist; + } + } + END_FOR_EACH_EDGE; } /* Computation of insertion and deletion points requires computing LATERIN for the EXIT block. We allocated an extra entry in the LATERIN array for just this purpose. */ sbitmap_ones (laterin[last_basic_block]); - FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds, ix) - sbitmap_a_and_b (laterin[last_basic_block], - laterin[last_basic_block], - later[(size_t) e->aux]); + FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds) + { + sbitmap_a_and_b (laterin[last_basic_block], + laterin[last_basic_block], + later[(size_t) e->aux]); + } + END_FOR_EACH_EDGE; clear_aux_for_edges (); free (worklist); @@ -474,7 +492,7 @@ compute_available (sbitmap *avloc, sbitmap *kill, sbitmap *avout, { edge e; basic_block *worklist, *qin, *qout, *qend, bb; - unsigned int ix, qlen; + unsigned int qlen; /* Allocate a worklist array/queue. Entries are only added to the list if they were not already on the list. So the size is @@ -498,8 +516,11 @@ compute_available (sbitmap *avloc, sbitmap *kill, sbitmap *avout, /* Mark blocks which are successors of the entry block so that we can easily identify them below. */ - FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs, ix) - e->dest->aux = ENTRY_BLOCK_PTR; + FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs) + { + e->dest->aux = ENTRY_BLOCK_PTR; + } + END_FOR_EACH_EDGE; /* Iterate until the worklist is empty. */ while (qlen) @@ -530,16 +551,19 @@ compute_available (sbitmap *avloc, sbitmap *kill, sbitmap *avout, /* If the out state of this block changed, then we need to add the successors of this block to the worklist if they are not already on the worklist. */ - FOR_EACH_EDGE (e, bb->succs, ix) - if (!e->dest->aux && e->dest != EXIT_BLOCK_PTR) - { - *qin++ = e->dest; - e->dest->aux = e; - qlen++; - - if (qin >= qend) - qin = worklist; - } + FOR_EACH_EDGE (e, bb->succs) + { + if (!e->dest->aux && e->dest != EXIT_BLOCK_PTR) + { + *qin++ = e->dest; + e->dest->aux = e; + qlen++; + + if (qin >= qend) + qin = worklist; + } + } + END_FOR_EACH_EDGE; } clear_aux_for_edges (); @@ -600,7 +624,6 @@ compute_nearerout (struct edge_list *edge_list, sbitmap *farthest, int num_edges, i; edge e; basic_block *worklist, *tos, bb; - unsigned ix; num_edges = NUM_EDGES (edge_list); @@ -621,8 +644,11 @@ compute_nearerout (struct edge_list *edge_list, sbitmap *farthest, do not want to be overly optimistic. Consider an incoming edge to the exit block. That edge should always have a NEARER value the same as FARTHEST for that edge. */ - FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds, ix) - sbitmap_copy (nearer[(size_t)e->aux], farthest[(size_t)e->aux]); + FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds) + { + sbitmap_copy (nearer[(size_t)e->aux], farthest[(size_t)e->aux]); + } + END_FOR_EACH_EDGE; /* Add all the blocks to the worklist. This prevents an early exit from the loop given our optimistic initialization of NEARER. */ @@ -641,33 +667,42 @@ compute_nearerout (struct edge_list *edge_list, sbitmap *farthest, /* Compute the intersection of NEARER for each outgoing edge from B. */ sbitmap_ones (nearerout[bb->index]); - FOR_EACH_EDGE (e, bb->succs, ix) - sbitmap_a_and_b (nearerout[bb->index], nearerout[bb->index], - nearer[(size_t) e->aux]); + FOR_EACH_EDGE (e, bb->succs) + { + sbitmap_a_and_b (nearerout[bb->index], nearerout[bb->index], + nearer[(size_t) e->aux]); + } + END_FOR_EACH_EDGE; /* Calculate NEARER for all incoming edges. */ - FOR_EACH_EDGE (e, bb->preds, ix) - if (sbitmap_union_of_diff_cg (nearer[(size_t) e->aux], - farthest[(size_t) e->aux], - nearerout[e->dest->index], - st_avloc[e->dest->index]) - /* If NEARER for an incoming edge was changed, then we need - to add the source of the incoming edge to the worklist. */ - && e->src != ENTRY_BLOCK_PTR && e->src->aux == 0) - { - *tos++ = e->src; - e->src->aux = e; - } + FOR_EACH_EDGE (e, bb->preds) + { + if (sbitmap_union_of_diff_cg (nearer[(size_t) e->aux], + farthest[(size_t) e->aux], + nearerout[e->dest->index], + st_avloc[e->dest->index]) + /* If NEARER for an incoming edge was changed, then we need + to add the source of the incoming edge to the worklist. */ + && e->src != ENTRY_BLOCK_PTR && e->src->aux == 0) + { + *tos++ = e->src; + e->src->aux = e; + } + } + END_FOR_EACH_EDGE; } /* Computation of insertion and deletion points requires computing NEAREROUT for the ENTRY block. We allocated an extra entry in the NEAREROUT array for just this purpose. */ sbitmap_ones (nearerout[last_basic_block]); - FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs, ix) - sbitmap_a_and_b (nearerout[last_basic_block], - nearerout[last_basic_block], - nearer[(size_t) e->aux]); + FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs) + { + sbitmap_a_and_b (nearerout[last_basic_block], + nearerout[last_basic_block], + nearer[(size_t) e->aux]); + } + END_FOR_EACH_EDGE; clear_aux_for_edges (); free (tos); @@ -908,9 +943,8 @@ static void make_preds_opaque (basic_block b, int j) { edge e; - unsigned ix; - FOR_EACH_EDGE (e, b->preds, ix) + FOR_EACH_EDGE (e, b->preds) { basic_block pb = e->src; @@ -920,6 +954,7 @@ make_preds_opaque (basic_block b, int j) RESET_BIT (transp[pb->index], j); make_preds_opaque (pb, j); } + END_FOR_EACH_EDGE; } /* Record in LIVE that register REG died. */ diff --git a/gcc/loop-init.c b/gcc/loop-init.c index ea38d53e80c..77a412d758e 100644 --- a/gcc/loop-init.c +++ b/gcc/loop-init.c @@ -35,7 +35,6 @@ loop_optimizer_init (FILE *dumpfile) { struct loops *loops = xcalloc (1, sizeof (struct loops)); edge e; - unsigned ix; static bool first_time = true; if (first_time) @@ -46,9 +45,12 @@ loop_optimizer_init (FILE *dumpfile) /* Avoid annoying special cases of edges going to exit block. */ - FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds, ix) - if ((e->flags & EDGE_FALLTHRU) && EDGE_COUNT (e->src->succs) > 1) - split_edge (e); + FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds) + { + if ((e->flags & EDGE_FALLTHRU) && EDGE_COUNT (e->src->succs) > 1) + split_edge (e); + } + END_FOR_EACH_EDGE; /* Find the loops. */ diff --git a/gcc/loop-invariant.c b/gcc/loop-invariant.c index 52bb4a00385..299a6d96aec 100644 --- a/gcc/loop-invariant.c +++ b/gcc/loop-invariant.c @@ -223,7 +223,6 @@ find_exits (struct loop *loop, basic_block *body, struct loop *outermost_exit = loop, *aexit; bool has_call = false; rtx insn; - unsigned ix; for (i = 0; i < loop->num_nodes; i++) { @@ -240,7 +239,7 @@ find_exits (struct loop *loop, basic_block *body, } } - FOR_EACH_EDGE (e, body[i]->succs, ix) + FOR_EACH_EDGE (e, body[i]->succs) { if (flow_bb_inside_loop_p (loop, e->dest)) continue; @@ -250,6 +249,7 @@ find_exits (struct loop *loop, basic_block *body, outermost_exit = find_common_loop (outermost_exit, e->dest->loop_father); } + END_FOR_EACH_EDGE; continue; } diff --git a/gcc/loop-iv.c b/gcc/loop-iv.c index 8b01bf1c31e..f830f2dae8c 100644 --- a/gcc/loop-iv.c +++ b/gcc/loop-iv.c @@ -2501,8 +2501,7 @@ find_simple_exit (struct loop *loop, struct niter_desc *desc) for (i = 0; i < loop->num_nodes; i++) { - unsigned ix; - FOR_EACH_EDGE (e, body[i]->succs, ix) + FOR_EACH_EDGE (e, body[i]->succs) { if (flow_bb_inside_loop_p (loop, e->dest)) continue; @@ -2519,6 +2518,7 @@ find_simple_exit (struct loop *loop, struct niter_desc *desc) continue; *desc = act; } + END_FOR_EACH_EDGE; } if (dump_file) diff --git a/gcc/predict.c b/gcc/predict.c index 9c24cce7e1b..d2844ba62e5 100644 --- a/gcc/predict.c +++ b/gcc/predict.c @@ -287,14 +287,16 @@ dump_prediction (FILE *file, enum br_predictor predictor, int probability, basic_block bb, int used) { edge e; - unsigned ix; if (!file) return; - FOR_EACH_EDGE (e, bb->succs, ix) - if (! (e->flags & EDGE_FALLTHRU)) - break; + FOR_EACH_EDGE (e, bb->succs) + { + if (! (e->flags & EDGE_FALLTHRU)) + break; + } + END_FOR_EACH_EDGE; fprintf (file, " %s heuristics%s: %.1f%%", predictor_info[predictor].name, @@ -429,17 +431,19 @@ combine_predictions_for_bb (FILE *file, basic_block bb) struct edge_prediction *pred; int nedges = 0; edge e, first = NULL, second = NULL; - unsigned ix; - FOR_EACH_EDGE (e, bb->succs, ix) - if (!(e->flags & (EDGE_EH | EDGE_FAKE))) - { - nedges ++; - if (first && !second) - second = e; - if (!first) - first = e; - } + FOR_EACH_EDGE (e, bb->succs) + { + if (!(e->flags & (EDGE_EH | EDGE_FAKE))) + { + nedges ++; + if (first && !second) + second = e; + if (!first) + first = e; + } + } + END_FOR_EACH_EDGE; /* When there is no successor or only one choice, prediction is easy. @@ -449,11 +453,14 @@ combine_predictions_for_bb (FILE *file, basic_block bb) this later. */ if (nedges != 2) { - FOR_EACH_EDGE (e, bb->succs, ix) - if (!(e->flags & (EDGE_EH | EDGE_FAKE))) - e->probability = (REG_BR_PROB_BASE + nedges / 2) / nedges; - else - e->probability = 0; + FOR_EACH_EDGE (e, bb->succs) + { + if (!(e->flags & (EDGE_EH | EDGE_FAKE))) + e->probability = (REG_BR_PROB_BASE + nedges / 2) / nedges; + else + e->probability = 0; + } + END_FOR_EACH_EDGE; bb_ann (bb)->predictions = NULL; if (file) fprintf (file, "%i edges in bb %i predicted to even probabilities\n", @@ -579,7 +586,6 @@ predict_loops (struct loops *loops_info, bool simpleloops) { int header_found = 0; edge e; - unsigned ix; bb = bbs[j]; @@ -593,25 +599,31 @@ predict_loops (struct loops *loops_info, bool simpleloops) /* Loop branch heuristics - predict an edge back to a loop's head as taken. */ - FOR_EACH_EDGE (e, bb->succs, ix) - if (e->dest == loop->header - && e->src == loop->latch) - { - header_found = 1; - predict_edge_def (e, PRED_LOOP_BRANCH, TAKEN); - } + FOR_EACH_EDGE (e, bb->succs) + { + if (e->dest == loop->header + && e->src == loop->latch) + { + header_found = 1; + predict_edge_def (e, PRED_LOOP_BRANCH, TAKEN); + } + } + END_FOR_EACH_EDGE; /* Loop exit heuristics - predict an edge exiting the loop if the conditional has no loop header successors as not taken. */ if (!header_found) - FOR_EACH_EDGE (e, bb->succs, ix) - if (e->dest->index < 0 - || !flow_bb_inside_loop_p (loop, e->dest)) - predict_edge - (e, PRED_LOOP_EXIT, - (REG_BR_PROB_BASE - - predictor_info [(int) PRED_LOOP_EXIT].hitrate) - / exits); + FOR_EACH_EDGE (e, bb->succs) + { + if (e->dest->index < 0 + || !flow_bb_inside_loop_p (loop, e->dest)) + predict_edge + (e, PRED_LOOP_EXIT, + (REG_BR_PROB_BASE + - predictor_info [(int) PRED_LOOP_EXIT].hitrate) + / exits); + } + END_FOR_EACH_EDGE; } /* Free basic blocks from get_loop_body. */ @@ -642,12 +654,11 @@ estimate_probability (struct loops *loops_info) rtx last_insn = BB_END (bb); rtx cond; edge e; - unsigned ix; if (! can_predict_insn_p (last_insn)) continue; - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { /* Predict early returns to be probable, as we've already taken care for error returns and other are often used for fast paths @@ -685,6 +696,7 @@ estimate_probability (struct loops *loops_info) } } } + END_FOR_EACH_EDGE; cond = get_condition (last_insn, NULL, false, false); if (! cond) @@ -796,17 +808,20 @@ estimate_probability (struct loops *loops_info) ?? In the future we want to make abnormal edges improbable. */ int nedges = 0; edge e; - unsigned ix; - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { nedges++; if (e->probability != 0) break; } + END_FOR_EACH_EDGE; if (!e) - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) + { e->probability = (REG_BR_PROB_BASE + nedges / 2) / nedges; + } + END_FOR_EACH_EDGE; } } estimate_bb_frequencies (loops_info); @@ -823,13 +838,15 @@ tree_predict_by_opcode (basic_block bb) tree cond; tree op0; tree type; - unsigned ix; if (!stmt || TREE_CODE (stmt) != COND_EXPR) return; - FOR_EACH_EDGE (then_edge, bb->succs, ix) - if (then_edge->flags & EDGE_TRUE_VALUE) - break; + FOR_EACH_EDGE (then_edge, bb->succs) + { + if (then_edge->flags & EDGE_TRUE_VALUE) + break; + } + END_FOR_EACH_EDGE; cond = TREE_OPERAND (stmt, 0); if (TREE_CODE_CLASS (TREE_CODE (cond)) != '<') return; @@ -940,9 +957,8 @@ tree_estimate_probability (void) FOR_EACH_BB (bb) { edge e; - unsigned ix; - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { /* Predict early returns to be probable, as we've already taken care for error returns and other are often used for fast paths @@ -985,6 +1001,7 @@ tree_estimate_probability (void) } } } + END_FOR_EACH_EDGE; tree_predict_by_opcode (bb); } FOR_EACH_BB (bb) @@ -1127,9 +1144,8 @@ propagate_freq (struct loop *loop) basic_block head = loop->header; basic_block bb; basic_block last; - basic_block nextbb; edge e; - unsigned ix; + basic_block nextbb; /* For each basic block we need to visit count number of his predecessors we need to visit first. */ @@ -1139,14 +1155,17 @@ propagate_freq (struct loop *loop) { int count = 0; - FOR_EACH_EDGE (e, bb->preds, ix) - if (BLOCK_INFO (e->src)->tovisit && !(e->flags & EDGE_DFS_BACK)) - count++; - else if (BLOCK_INFO (e->src)->tovisit - && dump_file && !EDGE_INFO (e)->back_edge) - fprintf (dump_file, - "Irreducible region hit, ignoring edge to %i->%i\n", - e->src->index, bb->index); + FOR_EACH_EDGE (e, bb->preds) + { + if (BLOCK_INFO (e->src)->tovisit && !(e->flags & EDGE_DFS_BACK)) + count++; + else if (BLOCK_INFO (e->src)->tovisit + && dump_file && !EDGE_INFO (e)->back_edge) + fprintf (dump_file, + "Irreducible region hit, ignoring edge to %i->%i\n", + e->src->index, bb->index); + } + END_FOR_EACH_EDGE; BLOCK_INFO (bb)->npredecessors = count; } } @@ -1167,30 +1186,35 @@ propagate_freq (struct loop *loop) if (bb != head) { #ifdef ENABLE_CHECKING - FOR_EACH_EDGE (e, bb->preds, ix) - if (BLOCK_INFO (e->src)->tovisit && !(e->flags & EDGE_DFS_BACK)) - abort (); + FOR_EACH_EDGE (e, bb->preds) + { + if (BLOCK_INFO (e->src)->tovisit && !(e->flags & EDGE_DFS_BACK)) + abort (); + } + END_FOR_EACH_EDGE; #endif - FOR_EACH_EDGE (e, bb->preds, ix) - if (EDGE_INFO (e)->back_edge) - { - sreal_add (&cyclic_probability, &cyclic_probability, - &EDGE_INFO (e)->back_edge_prob); - } - else if (!(e->flags & EDGE_DFS_BACK)) - { - sreal tmp; - - /* frequency += (e->probability - * BLOCK_INFO (e->src)->frequency / - REG_BR_PROB_BASE); */ - - sreal_init (&tmp, e->probability, 0); - sreal_mul (&tmp, &tmp, &BLOCK_INFO (e->src)->frequency); - sreal_mul (&tmp, &tmp, &real_inv_br_prob_base); - sreal_add (&frequency, &frequency, &tmp); - } + FOR_EACH_EDGE (e, bb->preds) + { + if (EDGE_INFO (e)->back_edge) + { + sreal_add (&cyclic_probability, &cyclic_probability, + &EDGE_INFO (e)->back_edge_prob); + } + else if (!(e->flags & EDGE_DFS_BACK)) + { + sreal tmp; + + /* frequency += (e->probability * + BLOCK_INFO (e->src)->frequency / REG_BR_PROB_BASE); */ + + sreal_init (&tmp, e->probability, 0); + sreal_mul (&tmp, &tmp, &BLOCK_INFO (e->src)->frequency); + sreal_mul (&tmp, &tmp, &real_inv_br_prob_base); + sreal_add (&frequency, &frequency, &tmp); + } + } + END_FOR_EACH_EDGE; if (sreal_compare (&cyclic_probability, &real_zero) == 0) { @@ -1217,37 +1241,43 @@ propagate_freq (struct loop *loop) BLOCK_INFO (bb)->tovisit = 0; /* Compute back edge frequencies. */ - FOR_EACH_EDGE (e, bb->succs, ix) - if (e->dest == head) - { - sreal tmp; - - /* EDGE_INFO (e)->back_edge_prob - = ((e->probability * BLOCK_INFO (bb)->frequency) - / REG_BR_PROB_BASE); */ - - sreal_init (&tmp, e->probability, 0); - sreal_mul (&tmp, &tmp, &BLOCK_INFO (bb)->frequency); - sreal_mul (&EDGE_INFO (e)->back_edge_prob, - &tmp, &real_inv_br_prob_base); - } + FOR_EACH_EDGE (e, bb->succs) + { + if (e->dest == head) + { + sreal tmp; + + /* EDGE_INFO (e)->back_edge_prob + = ((e->probability * BLOCK_INFO (bb)->frequency) + / REG_BR_PROB_BASE); */ + + sreal_init (&tmp, e->probability, 0); + sreal_mul (&tmp, &tmp, &BLOCK_INFO (bb)->frequency); + sreal_mul (&EDGE_INFO (e)->back_edge_prob, + &tmp, &real_inv_br_prob_base); + } + } + END_FOR_EACH_EDGE; /* Propagate to successor blocks. */ - FOR_EACH_EDGE (e, bb->succs, ix) - if (!(e->flags & EDGE_DFS_BACK) - && BLOCK_INFO (e->dest)->npredecessors) - { - BLOCK_INFO (e->dest)->npredecessors--; - if (!BLOCK_INFO (e->dest)->npredecessors) - { - if (!nextbb) - nextbb = e->dest; - else - BLOCK_INFO (last)->next = e->dest; - - last = e->dest; - } - } + FOR_EACH_EDGE (e, bb->succs) + { + if (!(e->flags & EDGE_DFS_BACK) + && BLOCK_INFO (e->dest)->npredecessors) + { + BLOCK_INFO (e->dest)->npredecessors--; + if (!BLOCK_INFO (e->dest)->npredecessors) + { + if (!nextbb) + nextbb = e->dest; + else + BLOCK_INFO (last)->next = e->dest; + + last = e->dest; + } + } + } + END_FOR_EACH_EDGE; } } @@ -1376,16 +1406,16 @@ estimate_bb_frequencies (struct loops *loops) FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) { edge e; - unsigned ix; BLOCK_INFO (bb)->tovisit = 0; - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { sreal_init (&EDGE_INFO (e)->back_edge_prob, e->probability, 0); sreal_mul (&EDGE_INFO (e)->back_edge_prob, &EDGE_INFO (e)->back_edge_prob, &real_inv_br_prob_base); } + END_FOR_EACH_EDGE; } /* First compute probabilities locally for each loop from innermost diff --git a/gcc/profile.c b/gcc/profile.c index 85a83a07663..5e5980c385b 100644 --- a/gcc/profile.c +++ b/gcc/profile.c @@ -142,9 +142,8 @@ instrument_edges (struct edge_list *el) FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) { edge e; - unsigned int ix; - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { struct edge_info *inf = EDGE_INFO (e); @@ -159,6 +158,7 @@ instrument_edges (struct edge_list *el) (profile_hooks->gen_edge_profiler) (num_instr_edges++, e); } } + END_FOR_EACH_EDGE; } total_num_blocks_created += num_edges; @@ -239,10 +239,12 @@ get_exec_counts (void) FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) { edge e; - unsigned int ix; - FOR_EACH_EDGE (e, bb->succs, ix) - if (!EDGE_INFO (e)->ignore && !EDGE_INFO (e)->on_tree) - num_edges++; + FOR_EACH_EDGE (e, bb->succs) + { + if (!EDGE_INFO (e)->ignore && !EDGE_INFO (e)->on_tree) + num_edges++; + } + END_FOR_EACH_EDGE; } counts = get_coverage_counts (GCOV_COUNTER_ARCS, num_edges, &profile_info); @@ -296,13 +298,19 @@ compute_branch_probabilities (void) FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) { edge e; - unsigned int ix; - FOR_EACH_EDGE (e, bb->succs, ix) - if (!EDGE_INFO (e)->ignore) - BB_INFO (bb)->succ_count++; - FOR_EACH_EDGE (e, bb->preds, ix) - if (!EDGE_INFO (e)->ignore) - BB_INFO (bb)->pred_count++; + FOR_EACH_EDGE (e, bb->succs) + { + if (!EDGE_INFO (e)->ignore) + BB_INFO (bb)->succ_count++; + } + END_FOR_EACH_EDGE; + + FOR_EACH_EDGE (e, bb->preds) + { + if (!EDGE_INFO (e)->ignore) + BB_INFO (bb)->pred_count++; + } + END_FOR_EACH_EDGE; } /* Avoid predicting entry on exit nodes. */ @@ -318,35 +326,37 @@ compute_branch_probabilities (void) FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) { edge e; - unsigned int ix; - FOR_EACH_EDGE (e, bb->succs, ix) - if (!EDGE_INFO (e)->ignore && !EDGE_INFO (e)->on_tree) - { - num_edges++; - if (exec_counts) - { - e->count = exec_counts[exec_counts_pos++]; - if (e->count > profile_info->sum_max) - { - error ("corrupted profile info: edge from %i to %i exceeds maximal count", + FOR_EACH_EDGE (e, bb->succs) + { + if (!EDGE_INFO (e)->ignore && !EDGE_INFO (e)->on_tree) + { + num_edges++; + if (exec_counts) + { + e->count = exec_counts[exec_counts_pos++]; + if (e->count > profile_info->sum_max) + { + error ("corrupted profile info: edge from %i to %i exceeds maximal count", + bb->index, e->dest->index); + } + } + else + e->count = 0; + + EDGE_INFO (e)->count_valid = 1; + BB_INFO (bb)->succ_count--; + BB_INFO (e->dest)->pred_count--; + if (dump_file) + { + fprintf (dump_file, "\nRead edge from %i to %i, count:", bb->index, e->dest->index); - } - } - else - e->count = 0; - - EDGE_INFO (e)->count_valid = 1; - BB_INFO (bb)->succ_count--; - BB_INFO (e->dest)->pred_count--; - if (dump_file) - { - fprintf (dump_file, "\nRead edge from %i to %i, count:", - bb->index, e->dest->index); - fprintf (dump_file, HOST_WIDEST_INT_PRINT_DEC, - (HOST_WIDEST_INT) e->count); - } - } + fprintf (dump_file, HOST_WIDEST_INT_PRINT_DEC, + (HOST_WIDEST_INT) e->count); + } + } + } + END_FOR_EACH_EDGE; } if (dump_file) @@ -383,11 +393,11 @@ compute_branch_probabilities (void) if (bi->succ_count == 0) { edge e; - unsigned ix; gcov_type total = 0; - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) total += e->count; + END_FOR_EACH_EDGE; bb->count = total; bi->count_valid = 1; changes = 1; @@ -395,11 +405,11 @@ compute_branch_probabilities (void) else if (bi->pred_count == 0) { edge e; - unsigned int ix; gcov_type total = 0; - FOR_EACH_EDGE (e, bb->preds, ix) + FOR_EACH_EDGE (e, bb->preds) total += e->count; + END_FOR_EACH_EDGE; bb->count = total; bi->count_valid = 1; changes = 1; @@ -410,18 +420,21 @@ compute_branch_probabilities (void) if (bi->succ_count == 1) { edge e; - unsigned ix; gcov_type total = 0; /* One of the counts will be invalid, but it is zero, so adding it in also doesn't hurt. */ - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) total += e->count; + END_FOR_EACH_EDGE; /* Seedgeh for the invalid edge, and set its count. */ - FOR_EACH_EDGE (e, bb->succs, ix) - if (! EDGE_INFO (e)->count_valid && ! EDGE_INFO (e)->ignore) - break; + FOR_EACH_EDGE (e, bb->succs) + { + if (! EDGE_INFO (e)->count_valid && ! EDGE_INFO (e)->ignore) + break; + } + END_FOR_EACH_EDGE; /* Calculate count for remaining edge by conservation. */ total = bb->count - total; @@ -438,18 +451,21 @@ compute_branch_probabilities (void) if (bi->pred_count == 1) { edge e; - unsigned ix; gcov_type total = 0; /* One of the counts will be invalid, but it is zero, so adding it in also doesn't hurt. */ - FOR_EACH_EDGE (e, bb->preds, ix) + FOR_EACH_EDGE (e, bb->preds) total += e->count; + END_FOR_EACH_EDGE; /* Search for the invalid edge, and set its count. */ - FOR_EACH_EDGE (e, bb->preds, ix) - if (!EDGE_INFO (e)->count_valid && !EDGE_INFO (e)->ignore) - break; + FOR_EACH_EDGE (e, bb->preds) + { + if (!EDGE_INFO (e)->count_valid && !EDGE_INFO (e)->ignore) + break; + } + END_FOR_EACH_EDGE; /* Calculate count for remaining edge by conservation. */ total = bb->count - total + e->count; @@ -493,7 +509,6 @@ compute_branch_probabilities (void) { edge e; rtx note; - unsigned int ix; if (bb->count < 0) { @@ -501,7 +516,7 @@ compute_branch_probabilities (void) bb->index, (int)bb->count); bb->count = 0; } - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { /* Function may return twice in the cased the called function is setjmp or calls fork, but we can't represent this by extra @@ -524,10 +539,16 @@ compute_branch_probabilities (void) e->count = bb->count / 2; } } + END_FOR_EACH_EDGE; + if (bb->count) { - FOR_EACH_EDGE (e, bb->succs, ix) - e->probability = (e->count * REG_BR_PROB_BASE + bb->count / 2) / bb->count; + FOR_EACH_EDGE (e, bb->succs) + { + e->probability = (e->count * REG_BR_PROB_BASE + bb->count / 2) / bb->count; + } + END_FOR_EACH_EDGE; + if (bb->index >= 0 && block_ends_with_condjump_p (bb) && EDGE_COUNT (bb->succs) >= 2) @@ -538,9 +559,12 @@ compute_branch_probabilities (void) /* Find the branch edge. It is possible that we do have fake edges here. */ - FOR_EACH_EDGE (e, bb->succs, ix) - if (!(e->flags & (EDGE_FAKE | EDGE_FALLTHRU))) - break; + FOR_EACH_EDGE (e, bb->succs) + { + if (!(e->flags & (EDGE_FAKE | EDGE_FALLTHRU))) + break; + } + END_FOR_EACH_EDGE; prob = e->probability; index = prob * 20 / REG_BR_PROB_BASE; @@ -574,22 +598,31 @@ compute_branch_probabilities (void) { int total = 0; - FOR_EACH_EDGE (e, bb->succs, ix) - if (!(e->flags & (EDGE_COMPLEX | EDGE_FAKE))) - total ++; + FOR_EACH_EDGE (e, bb->succs) + { + if (!(e->flags & (EDGE_COMPLEX | EDGE_FAKE))) + total ++; + } + END_FOR_EACH_EDGE; if (total) { - FOR_EACH_EDGE (e, bb->succs, ix) - if (!(e->flags & (EDGE_COMPLEX | EDGE_FAKE))) - e->probability = REG_BR_PROB_BASE / total; - else - e->probability = 0; + FOR_EACH_EDGE (e, bb->succs) + { + if (!(e->flags & (EDGE_COMPLEX | EDGE_FAKE))) + e->probability = REG_BR_PROB_BASE / total; + else + e->probability = 0; + } + END_FOR_EACH_EDGE; } else { total += EDGE_COUNT (bb->succs); - FOR_EACH_EDGE (e, bb->succs, ix) - e->probability = REG_BR_PROB_BASE / total; + FOR_EACH_EDGE (e, bb->succs) + { + e->probability = REG_BR_PROB_BASE / total; + } + END_FOR_EACH_EDGE; } if (bb->index >= 0 && block_ends_with_condjump_p (bb) @@ -730,7 +763,6 @@ branch_prob (void) int need_exit_edge = 0, need_entry_edge = 0; int have_exit_edge = 0, have_entry_edge = 0; edge e; - unsigned ix; /* Functions returning multiple times are not handled by extra edges. Instead we simply allow negative counts on edges from exit to the @@ -738,7 +770,7 @@ branch_prob (void) with the extra edges because that would result in flowgraph that needs to have fake edges outside the spanning tree. */ - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { if ((e->flags & (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL)) && e->dest != EXIT_BLOCK_PTR) @@ -746,7 +778,9 @@ branch_prob (void) if (e->dest == EXIT_BLOCK_PTR) have_exit_edge = 1; } - FOR_EACH_EDGE (e, bb->preds, ix) + END_FOR_EACH_EDGE; + + FOR_EACH_EDGE (e, bb->preds) { if ((e->flags & (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL)) && e->src != ENTRY_BLOCK_PTR) @@ -754,6 +788,7 @@ branch_prob (void) if (e->src == ENTRY_BLOCK_PTR) have_entry_edge = 1; } + END_FOR_EACH_EDGE; if (need_exit_edge && !have_exit_edge) { @@ -862,12 +897,11 @@ branch_prob (void) FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) { edge e; - unsigned ix; offset = gcov_write_tag (GCOV_TAG_ARCS); gcov_write_unsigned (BB_TO_GCOV_INDEX (bb)); - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { struct edge_info *i = EDGE_INFO (e); if (!i->ignore) @@ -885,6 +919,7 @@ branch_prob (void) gcov_write_unsigned (flag_bits); } } + END_FOR_EACH_EDGE; gcov_write_length (offset); } diff --git a/gcc/ra-build.c b/gcc/ra-build.c index 9fe1bad6b11..df20e6e5db3 100644 --- a/gcc/ra-build.c +++ b/gcc/ra-build.c @@ -939,7 +939,7 @@ live_in (struct df *df, struct curr_use *use, rtx insn) return; if (bb != BLOCK_FOR_INSN (insn)) { - edge e = NULL; + edge e; unsigned HOST_WIDE_INT undef = use->undefined; struct ra_bb_info *info = (struct ra_bb_info *) bb->aux; if (EDGE_COUNT (bb->preds) == 0) diff --git a/gcc/ra-rewrite.c b/gcc/ra-rewrite.c index 987c0664ea3..4e49234ac55 100644 --- a/gcc/ra-rewrite.c +++ b/gcc/ra-rewrite.c @@ -1346,7 +1346,6 @@ rewrite_program2 (bitmap new_deaths) nl_first_reload = ri.nl_size; if (ri.num_reloads) { - unsigned ix; int in_ir = 0; edge e; int num = 0; @@ -1354,7 +1353,7 @@ rewrite_program2 (bitmap new_deaths) HARD_REG_SET cum_colors, colors; CLEAR_HARD_REG_SET (cum_colors); - FOR_EACH_EDGE (e, bb->preds, ix) + FOR_EACH_EDGE (e, bb->preds) { int j; @@ -1371,6 +1370,8 @@ rewrite_program2 (bitmap new_deaths) IOR_HARD_REG_SET (cum_colors, colors); num++; } + END_FOR_EACH_EDGE; + if (num == 5) in_ir = 1; diff --git a/gcc/ra.c b/gcc/ra.c index 5ffe077e0a3..2249dddd4c0 100644 --- a/gcc/ra.c +++ b/gcc/ra.c @@ -679,9 +679,8 @@ reg_alloc (void) if (last) { edge e; - unsigned ix; - FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds, ix) + FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds) { basic_block bb = e->src; last = BB_END (bb); @@ -695,6 +694,7 @@ reg_alloc (void) emit_insn_after (insns, last); } } + END_FOR_EACH_EDGE; } /* Setup debugging levels. */ diff --git a/gcc/recog.c b/gcc/recog.c index c7fb554be4e..cc826f879e9 100644 --- a/gcc/recog.c +++ b/gcc/recog.c @@ -3141,12 +3141,14 @@ peephole2_optimize (FILE *dump_file ATTRIBUTE_UNUSED) /* Re-insert the EH_REGION notes. */ if (note || (was_call && nonlocal_goto_handler_labels)) { - unsigned ix; edge eh_edge; - FOR_EACH_EDGE (eh_edge, bb->succs, ix) - if (eh_edge->flags & (EDGE_EH | EDGE_ABNORMAL_CALL)) - break; + FOR_EACH_EDGE (eh_edge, bb->succs) + { + if (eh_edge->flags & (EDGE_EH | EDGE_ABNORMAL_CALL)) + break; + } + END_FOR_EACH_EDGE; for (x = try ; x != before_try ; x = PREV_INSN (x)) if (CALL_P (x) diff --git a/gcc/reg-stack.c b/gcc/reg-stack.c index 0316d8c8145..fdb1a6c5e41 100644 --- a/gcc/reg-stack.c +++ b/gcc/reg-stack.c @@ -442,12 +442,14 @@ reg_to_stack (FILE *file) FOR_EACH_BB_REVERSE (bb) { edge e; - unsigned ix; - FOR_EACH_EDGE (e, bb->preds, ix) - if (!(e->flags & EDGE_DFS_BACK) - && e->src != ENTRY_BLOCK_PTR) - BLOCK_INFO (bb)->predecessors++; + FOR_EACH_EDGE (e, bb->preds) + { + if (!(e->flags & EDGE_DFS_BACK) + && e->src != ENTRY_BLOCK_PTR) + BLOCK_INFO (bb)->predecessors++; + } + END_FOR_EACH_EDGE; } /* Create the replacement registers up front. */ @@ -2556,7 +2558,6 @@ convert_regs_entry (void) int inserted = 0; edge e; basic_block block; - unsigned ix; FOR_EACH_BB_REVERSE (block) { @@ -2585,7 +2586,7 @@ convert_regs_entry (void) Note that we are inserting converted code here. This code is never seen by the convert_regs pass. */ - FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs, ix) + FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs) { basic_block block = e->dest; block_info bi = BLOCK_INFO (block); @@ -2607,6 +2608,7 @@ convert_regs_entry (void) bi->stack_in.top = top; } + END_FOR_EACH_EDGE; return inserted; } @@ -2793,7 +2795,6 @@ convert_regs_1 (FILE *file, basic_block block) rtx insn, next; edge e, beste = NULL; bool control_flow_insn_deleted = false; - unsigned ix; inserted = 0; deleted = 0; @@ -2804,7 +2805,7 @@ convert_regs_1 (FILE *file, basic_block block) if multiple such exists, take one with largest count, prefer critical one (as splitting critical edges is more expensive), or one with lowest index, to avoid random changes with different orders of the edges. */ - FOR_EACH_EDGE (e, block->preds, ix) + FOR_EACH_EDGE (e, block->preds) { if (e->flags & EDGE_DFS_BACK) ; @@ -2827,6 +2828,7 @@ convert_regs_1 (FILE *file, basic_block block) else if (e->src->index < beste->src->index) beste = e; } + END_FOR_EACH_EDGE; /* Initialize stack at block entry. */ if (bi->stack_in.top == -2) @@ -2955,7 +2957,7 @@ convert_regs_1 (FILE *file, basic_block block) bi->stack_out = regstack; /* Compensate the back edges, as those wasn't visited yet. */ - FOR_EACH_EDGE (e, block->succs, ix) + FOR_EACH_EDGE (e, block->succs) { if (e->flags & EDGE_DFS_BACK || (e->dest == EXIT_BLOCK_PTR)) @@ -2966,7 +2968,9 @@ convert_regs_1 (FILE *file, basic_block block) inserted |= compensate_edge (e, file); } } - FOR_EACH_EDGE (e, block->preds, ix) + END_FOR_EACH_EDGE; + + FOR_EACH_EDGE (e, block->preds) { if (e != beste && !(e->flags & EDGE_DFS_BACK) && e->src != ENTRY_BLOCK_PTR) @@ -2976,6 +2980,7 @@ convert_regs_1 (FILE *file, basic_block block) inserted |= compensate_edge (e, file); } } + END_FOR_EACH_EDGE; return inserted; } @@ -3001,7 +3006,6 @@ convert_regs_2 (FILE *file, basic_block block) do { edge e; - unsigned ix; block = *--sp; @@ -3018,13 +3022,16 @@ convert_regs_2 (FILE *file, basic_block block) stack the successor in all cases and hand over the task of fixing up the discrepancy to convert_regs_1. */ - FOR_EACH_EDGE (e, block->succs, ix) - if (! (e->flags & EDGE_DFS_BACK)) - { - BLOCK_INFO (e->dest)->predecessors--; - if (!BLOCK_INFO (e->dest)->predecessors) - *sp++ = e->dest; - } + FOR_EACH_EDGE (e, block->succs) + { + if (! (e->flags & EDGE_DFS_BACK)) + { + BLOCK_INFO (e->dest)->predecessors--; + if (!BLOCK_INFO (e->dest)->predecessors) + *sp++ = e->dest; + } + } + END_FOR_EACH_EDGE; inserted |= convert_regs_1 (file, block); BLOCK_INFO (block)->done = 1; @@ -3044,7 +3051,6 @@ convert_regs (FILE *file) int inserted; basic_block b; edge e; - unsigned ix; /* Initialize uninitialized registers on function entry. */ inserted = convert_regs_entry (); @@ -3059,8 +3065,11 @@ convert_regs (FILE *file) /* Process all blocks reachable from all entry points. */ - FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs, ix) - inserted |= convert_regs_2 (file, e->dest); + FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs) + { + inserted |= convert_regs_2 (file, e->dest); + } + END_FOR_EACH_EDGE; /* ??? Process all unreachable blocks. Though there's no excuse for keeping these even when not optimizing. */ diff --git a/gcc/reload1.c b/gcc/reload1.c index 3de09553daf..33c4ba87a48 100644 --- a/gcc/reload1.c +++ b/gcc/reload1.c @@ -8047,11 +8047,10 @@ fixup_abnormal_edges (void) FOR_EACH_BB (bb) { edge e; - unsigned ix; /* Look for cases we are interested in - calls or instructions causing exceptions. */ - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { if (e->flags & EDGE_ABNORMAL_CALL) break; @@ -8059,16 +8058,21 @@ fixup_abnormal_edges (void) == (EDGE_ABNORMAL | EDGE_EH)) break; } + END_FOR_EACH_EDGE; + if (e && !CALL_P (BB_END (bb)) && !can_throw_internal (BB_END (bb))) { rtx insn = BB_END (bb), stop = NEXT_INSN (BB_END (bb)); rtx next; - unsigned ix; - FOR_EACH_EDGE (e, bb->succs, ix) - if (e->flags & EDGE_FALLTHRU) - break; + FOR_EACH_EDGE (e, bb->succs) + { + if (e->flags & EDGE_FALLTHRU) + break; + } + END_FOR_EACH_EDGE; + /* Get past the new insns generated. Allow notes, as the insns may be already deleted. */ while ((NONJUMP_INSN_P (insn) || NOTE_P (insn)) diff --git a/gcc/sbitmap.c b/gcc/sbitmap.c index 74cf52cf346..11031355219 100644 --- a/gcc/sbitmap.c +++ b/gcc/sbitmap.c @@ -517,11 +517,12 @@ sbitmap_intersection_of_succs (sbitmap dst, sbitmap *src, int bb) edge e; unsigned ix; - FOR_EACH_EDGE (e, b->succs, ix) + for (ix = 0; ix < EDGE_COUNT (b->succs); ix++) { + e = EDGE_SUCC (b, ix); if (e->dest == EXIT_BLOCK_PTR) continue; - + sbitmap_copy (dst, src[e->dest->index]); break; } @@ -529,11 +530,12 @@ sbitmap_intersection_of_succs (sbitmap dst, sbitmap *src, int bb) if (!e) sbitmap_ones (dst); else - for (++ix; VEC_iterate (edge, b->succs, ix, e); ix++) + for (++ix; ix < EDGE_COUNT (b->succs); ix++) { unsigned int i; sbitmap_ptr p, r; + e = EDGE_SUCC (b, ix); if (e->dest == EXIT_BLOCK_PTR) continue; @@ -555,8 +557,9 @@ sbitmap_intersection_of_preds (sbitmap dst, sbitmap *src, int bb) edge e; unsigned ix; - FOR_EACH_EDGE (e, b->preds, ix) + for (ix = 0; ix < EDGE_COUNT (b->preds); ix++) { + e = EDGE_PRED (b, ix); if (e->src == ENTRY_BLOCK_PTR) continue; @@ -567,11 +570,12 @@ sbitmap_intersection_of_preds (sbitmap dst, sbitmap *src, int bb) if (!e) sbitmap_ones (dst); else - for (++ix; VEC_iterate (edge, b->preds, ix, e); ix++) + for (++ix; ix < EDGE_COUNT (b->preds); ix++) { unsigned int i; sbitmap_ptr p, r; + e = EDGE_PRED (b, ix); if (e->src == ENTRY_BLOCK_PTR) continue; @@ -593,8 +597,9 @@ sbitmap_union_of_succs (sbitmap dst, sbitmap *src, int bb) edge e; unsigned ix; - FOR_EACH_EDGE (e, b->succs, ix) + for (ix = 0; ix < EDGE_COUNT (b->succs); ix++) { + e = EDGE_SUCC (b, ix); if (e->dest == EXIT_BLOCK_PTR) continue; @@ -602,14 +607,15 @@ sbitmap_union_of_succs (sbitmap dst, sbitmap *src, int bb) break; } - if (e == 0) + if (ix == EDGE_COUNT (b->succs)) sbitmap_zero (dst); else - for (ix++; VEC_iterate (edge, b->succs, ix, e); ix++) + for (ix++; ix < EDGE_COUNT (b->succs); ix++) { unsigned int i; sbitmap_ptr p, r; + e = EDGE_SUCC (b, ix); if (e->dest == EXIT_BLOCK_PTR) continue; @@ -631,7 +637,7 @@ sbitmap_union_of_preds (sbitmap dst, sbitmap *src, int bb) edge e; unsigned ix; - FOR_EACH_EDGE (e, b->preds, ix) + for (ix = 0; ix < EDGE_COUNT (b->preds); ix++) { if (e->src== ENTRY_BLOCK_PTR) continue; @@ -640,14 +646,15 @@ sbitmap_union_of_preds (sbitmap dst, sbitmap *src, int bb) break; } - if (e == 0) + if (ix == EDGE_COUNT (b->preds)) sbitmap_zero (dst); else - for (ix++; VEC_iterate (edge, b->preds, ix, e); ix++) + for (ix++; ix < EDGE_COUNT (b->preds); ix++) { unsigned int i; sbitmap_ptr p, r; + e = EDGE_PRED (b, ix); if (e->src == ENTRY_BLOCK_PTR) continue; diff --git a/gcc/sched-ebb.c b/gcc/sched-ebb.c index 2befa2d757f..07013574014 100644 --- a/gcc/sched-ebb.c +++ b/gcc/sched-ebb.c @@ -175,20 +175,22 @@ compute_jump_reg_dependencies (rtx insn, regset cond_set, regset used, { basic_block b = BLOCK_FOR_INSN (insn); edge e; - unsigned ix; - - FOR_EACH_EDGE (e, b->succs, ix) - if (e->flags & EDGE_FALLTHRU) - /* The jump may be a by-product of a branch that has been merged - in the main codepath after being conditionalized. Therefore - it may guard the fallthrough block from using a value that has - conditionally overwritten that of the main codepath. So we - consider that it restores the value of the main codepath. */ - bitmap_operation (set, e->dest->global_live_at_start, cond_set, - BITMAP_AND); - else - bitmap_operation (used, used, e->dest->global_live_at_start, - BITMAP_IOR); + + FOR_EACH_EDGE (e, b->succs) + { + if (e->flags & EDGE_FALLTHRU) + /* The jump may be a by-product of a branch that has been merged + in the main codepath after being conditionalized. Therefore + it may guard the fallthrough block from using a value that has + conditionally overwritten that of the main codepath. So we + consider that it restores the value of the main codepath. */ + bitmap_operation (set, e->dest->global_live_at_start, cond_set, + BITMAP_AND); + else + bitmap_operation (used, used, e->dest->global_live_at_start, + BITMAP_IOR); + } + END_FOR_EACH_EDGE; } /* Used in schedule_insns to initialize current_sched_info for scheduling @@ -283,7 +285,6 @@ fix_basic_block_boundaries (basic_block bb, basic_block last, rtx head, { edge f; rtx h; - unsigned ix; /* An obscure special case, where we do have partially dead instruction scheduled after last control flow instruction. @@ -296,9 +297,12 @@ fix_basic_block_boundaries (basic_block bb, basic_block last, rtx head, do the split and re-emit it back in case this will ever trigger problem. */ - FOR_EACH_EDGE (f, bb->prev_bb->succs, ix) - if (f->flags & EDGE_FALLTHRU) - break; + FOR_EACH_EDGE (f, bb->prev_bb->succs) + { + if (f->flags & EDGE_FALLTHRU) + break; + } + END_FOR_EACH_EDGE; if (f) { @@ -594,14 +598,16 @@ schedule_ebbs (FILE *dump_file) for (;;) { edge e; - unsigned ix; tail = BB_END (bb); if (bb->next_bb == EXIT_BLOCK_PTR || LABEL_P (BB_HEAD (bb->next_bb))) break; - FOR_EACH_EDGE (e, bb->succs, ix) - if ((e->flags & EDGE_FALLTHRU) != 0) - break; + FOR_EACH_EDGE (e, bb->succs) + { + if ((e->flags & EDGE_FALLTHRU) != 0) + break; + } + END_FOR_EACH_EDGE; if (! e) break; if (e->probability <= probability_cutoff) diff --git a/gcc/sched-rgn.c b/gcc/sched-rgn.c index 090c9fc96a3..a52a4e73d92 100644 --- a/gcc/sched-rgn.c +++ b/gcc/sched-rgn.c @@ -803,7 +803,6 @@ find_rgns (struct edge_list *edge_list) if (TEST_BIT (header, bb->index) && TEST_BIT (inner, bb->index)) { edge e; - unsigned ix; basic_block jbb; /* Now check that the loop is reducible. We do this separate @@ -845,9 +844,12 @@ find_rgns (struct edge_list *edge_list) /* Decrease degree of all I's successors for topological ordering. */ - FOR_EACH_EDGE (e, bb->succs, ix) - if (e->dest != EXIT_BLOCK_PTR) - --degree[e->dest->index]; + FOR_EACH_EDGE (e, bb->succs) + { + if (e->dest != EXIT_BLOCK_PTR) + --degree[e->dest->index]; + } + END_FOR_EACH_EDGE; /* Estimate # insns, and count # blocks in the region. */ num_bbs = 1; @@ -879,9 +881,8 @@ find_rgns (struct edge_list *edge_list) else { edge e; - unsigned ix; - FOR_EACH_EDGE (e, bb->preds, ix) + FOR_EACH_EDGE (e, bb->preds) { if (e->src == ENTRY_BLOCK_PTR) continue; @@ -901,6 +902,7 @@ find_rgns (struct edge_list *edge_list) } } } + END_FOR_EACH_EDGE; } /* Now add all the blocks in the loop to the queue. @@ -936,10 +938,9 @@ find_rgns (struct edge_list *edge_list) while (head < tail && !too_large_failure) { edge e; - unsigned ix; child = queue[++head]; - FOR_EACH_EDGE (e, BASIC_BLOCK (child)->preds, ix) + FOR_EACH_EDGE (e, BASIC_BLOCK (child)->preds) { node = e->src->index; @@ -963,6 +964,7 @@ find_rgns (struct edge_list *edge_list) } } } + END_FOR_EACH_EDGE; } if (tail >= 0 && !too_large_failure) @@ -994,9 +996,12 @@ find_rgns (struct edge_list *edge_list) CONTAINING_RGN (child) = nr_regions; queue[head] = queue[tail--]; - FOR_EACH_EDGE (e, BASIC_BLOCK (child)->succs, ix) - if (e->dest != EXIT_BLOCK_PTR) - --degree[e->dest->index]; + FOR_EACH_EDGE (e, BASIC_BLOCK (child)->succs) + { + if (e->dest != EXIT_BLOCK_PTR) + --degree[e->dest->index]; + } + END_FOR_EACH_EDGE; } else --head; diff --git a/gcc/tracer.c b/gcc/tracer.c index d3f9e98dfe8..e58411e1c3d 100644 --- a/gcc/tracer.c +++ b/gcc/tracer.c @@ -118,11 +118,13 @@ find_best_successor (basic_block bb) { edge e; edge best = NULL; - unsigned ix; - FOR_EACH_EDGE (e, bb->succs, ix) - if (!best || better_p (e, best)) - best = e; + FOR_EACH_EDGE (e, bb->succs) + { + if (!best || better_p (e, best)) + best = e; + } + END_FOR_EACH_EDGE; if (!best || ignore_bb_p (best->dest)) return NULL; if (best->probability <= probability_cutoff) @@ -137,11 +139,13 @@ find_best_predecessor (basic_block bb) { edge e; edge best = NULL; - unsigned ix; - FOR_EACH_EDGE (e, bb->preds, ix) - if (!best || better_p (e, best)) - best = e; + FOR_EACH_EDGE (e, bb->preds) + { + if (!best || better_p (e, best)) + best = e; + } + END_FOR_EACH_EDGE; if (!best || ignore_bb_p (best->src)) return NULL; if (EDGE_FREQUENCY (best) * REG_BR_PROB_BASE @@ -276,12 +280,14 @@ tail_duplicate (void) && can_duplicate_block_p (bb2)) { edge e; - unsigned ix; basic_block old = bb2; - FOR_EACH_EDGE (e, bb2->preds, ix) - if (e->src == bb) - break; + FOR_EACH_EDGE (e, bb2->preds) + { + if (e->src == bb) + break; + } + END_FOR_EACH_EDGE; nduplicated += counts [bb2->index]; bb2 = duplicate_block (bb2, e); @@ -330,17 +336,19 @@ layout_superblocks (void) while (bb != EXIT_BLOCK_PTR) { - unsigned ix; edge e, best = NULL; while (end->rbi->next) end = end->rbi->next; - FOR_EACH_EDGE (e, end->succs, ix) - if (e->dest != EXIT_BLOCK_PTR - && e->dest != EDGE_SUCC (ENTRY_BLOCK_PTR, 0)->dest - && !e->dest->rbi->visited - && (!best || EDGE_FREQUENCY (e) > EDGE_FREQUENCY (best))) - best = e; + FOR_EACH_EDGE (e, end->succs) + { + if (e->dest != EXIT_BLOCK_PTR + && e->dest != EDGE_SUCC (ENTRY_BLOCK_PTR, 0)->dest + && !e->dest->rbi->visited + && (!best || EDGE_FREQUENCY (e) > EDGE_FREQUENCY (best))) + best = e; + } + END_FOR_EACH_EDGE; if (best) { diff --git a/gcc/tree-cfg.c b/gcc/tree-cfg.c index 19c1cbd67a1..46a481e924b 100644 --- a/gcc/tree-cfg.c +++ b/gcc/tree-cfg.c @@ -1936,7 +1936,6 @@ cleanup_control_expr_graph (basic_block bb, block_stmt_iterator bsi) if (EDGE_COUNT (bb->succs) > 1) { - unsigned ix; edge e; switch (TREE_CODE (expr)) @@ -1960,17 +1959,19 @@ cleanup_control_expr_graph (basic_block bb, block_stmt_iterator bsi) return false; /* Remove all the edges except the one that is always executed. */ - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { if (e != taken_edge) { taken_edge->probability += e->probability; taken_edge->count += e->count; ssa_remove_edge (e); - ix--; + __ix--; retval = true; } } + END_FOR_EACH_EDGE; + if (taken_edge->probability > REG_BR_PROB_BASE) taken_edge->probability = REG_BR_PROB_BASE; } @@ -2185,7 +2186,6 @@ static void compute_dominance_frontiers_1 (bitmap *frontiers, basic_block bb, sbitmap done) { edge e; - unsigned ix; basic_block c; SET_BIT (done, bb->index); @@ -2202,13 +2202,14 @@ compute_dominance_frontiers_1 (bitmap *frontiers, basic_block bb, sbitmap done) } /* Find blocks conforming to rule (1) above. */ - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { if (e->dest == EXIT_BLOCK_PTR) continue; if (get_immediate_dominator (CDI_DOMINATORS, e->dest) != bb) bitmap_set_bit (frontiers[bb->index], e->dest->index); } + END_FOR_EACH_EDGE; /* Find blocks conforming to rule (2). */ for (c = first_dom_son (CDI_DOMINATORS, bb); @@ -2384,7 +2385,6 @@ static void tree_cfg2vcg (FILE *file) { edge e; - unsigned ix; basic_block bb; const char *funcname = lang_hooks.decl_printable_name (current_function_decl, 2); @@ -2395,7 +2395,7 @@ tree_cfg2vcg (FILE *file) fprintf (file, "node: { title: \"EXIT\" label: \"EXIT\" }\n"); /* Write blocks and edges. */ - FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs, ix) + FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs) { fprintf (file, "edge: { sourcename: \"ENTRY\" targetname: \"%d\"", e->dest->index); @@ -2407,6 +2407,8 @@ tree_cfg2vcg (FILE *file) fprintf (file, " }\n"); } + END_FOR_EACH_EDGE; + fputc ('\n', file); FOR_EACH_BB (bb) @@ -2440,7 +2442,7 @@ tree_cfg2vcg (FILE *file) bb->index, bb->index, head_name, head_line, end_name, end_line); - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { if (e->dest == EXIT_BLOCK_PTR) fprintf (file, "edge: { sourcename: \"%d\" targetname: \"EXIT\"", bb->index); @@ -2454,6 +2456,7 @@ tree_cfg2vcg (FILE *file) fprintf (file, " }\n"); } + END_FOR_EACH_EDGE; if (bb->next_bb != EXIT_BLOCK_PTR) fputc ('\n', file); @@ -2593,7 +2596,6 @@ disband_implicit_edges (void) block_stmt_iterator last; edge e; tree stmt, label; - unsigned ix; FOR_EACH_BB (bb) { @@ -2606,7 +2608,7 @@ disband_implicit_edges (void) from cfg_remove_useless_stmts here since it violates the invariants for tree--cfg correspondence and thus fits better here where we do it anyway. */ - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { if (e->dest != bb->next_bb) continue; @@ -2619,6 +2621,7 @@ disband_implicit_edges (void) abort (); e->flags |= EDGE_FALLTHRU; } + END_FOR_EACH_EDGE; continue; } @@ -2646,9 +2649,12 @@ disband_implicit_edges (void) continue; /* Find a fallthru edge and emit the goto if necessary. */ - FOR_EACH_EDGE (e, bb->succs, ix) - if (e->flags & EDGE_FALLTHRU) - break; + FOR_EACH_EDGE (e, bb->succs) + { + if (e->flags & EDGE_FALLTHRU) + break; + } + END_FOR_EACH_EDGE; if (!e || e->dest == bb->next_bb) continue; @@ -3000,15 +3006,17 @@ bsi_commit_edge_inserts (int *new_blocks) basic_block bb; edge e; int blocks; - unsigned ix; blocks = n_basic_blocks; bsi_commit_edge_inserts_1 (EDGE_SUCC (ENTRY_BLOCK_PTR, 0)); FOR_EACH_BB (bb) - FOR_EACH_EDGE (e, bb->succs, ix) - bsi_commit_edge_inserts_1 (e); + FOR_EACH_EDGE (e, bb->succs) + { + bsi_commit_edge_inserts_1 (e); + } + END_FOR_EACH_EDGE; if (new_blocks) *new_blocks = n_basic_blocks - blocks; @@ -3059,7 +3067,6 @@ tree_split_edge (edge edge_in) edge new_edge, e; tree phi; int i, num_elem; - unsigned ix; /* Abnormal edges cannot be split. */ if (edge_in->flags & EDGE_ABNORMAL) @@ -3071,9 +3078,13 @@ tree_split_edge (edge edge_in) /* Place the new block in the block list. Try to keep the new block near its "logical" location. This is of most help to humans looking at debugging dumps. */ - FOR_EACH_EDGE (e, dest->preds, ix) - if (e->src->next_bb == dest) - break; + FOR_EACH_EDGE (e, dest->preds) + { + if (e->src->next_bb == dest) + break; + } + END_FOR_EACH_EDGE; + if (!e) after_bb = dest->prev_bb; else @@ -3494,7 +3505,6 @@ tree_verify_flow_info (void) block_stmt_iterator bsi; tree stmt; edge e; - unsigned ix; if (ENTRY_BLOCK_PTR->stmt_list) { @@ -3508,12 +3518,15 @@ tree_verify_flow_info (void) err = 1; } - FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds, ix) - if (e->flags & EDGE_FALLTHRU) - { - error ("Fallthru to exit from bb %d\n", e->src->index); - err = 1; - } + FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds) + { + if (e->flags & EDGE_FALLTHRU) + { + error ("Fallthru to exit from bb %d\n", e->src->index); + err = 1; + } + } + END_FOR_EACH_EDGE; FOR_EACH_BB (bb) { @@ -3574,14 +3587,16 @@ tree_verify_flow_info (void) if (is_ctrl_stmt (stmt)) { - unsigned ix; - FOR_EACH_EDGE (e, bb->succs, ix) - if (e->flags & EDGE_FALLTHRU) - { - error ("Fallthru edge after a control statement in bb %d \n", - bb->index); - err = 1; - } + FOR_EACH_EDGE (e, bb->succs) + { + if (e->flags & EDGE_FALLTHRU) + { + error ("Fallthru edge after a control statement in bb %d \n", + bb->index); + err = 1; + } + } + END_FOR_EACH_EDGE; } switch (TREE_CODE (stmt)) @@ -3639,16 +3654,18 @@ tree_verify_flow_info (void) { /* FIXME. We should double check that the labels in the destination blocks have their address taken. */ - unsigned ix; - FOR_EACH_EDGE (e, bb->succs, ix) - if ((e->flags & (EDGE_FALLTHRU | EDGE_TRUE_VALUE - | EDGE_FALSE_VALUE)) - || !(e->flags & EDGE_ABNORMAL)) - { - error ("Wrong outgoing edge flags at end of bb %d\n", - bb->index); - err = 1; - } + FOR_EACH_EDGE (e, bb->succs) + { + if ((e->flags & (EDGE_FALLTHRU | EDGE_TRUE_VALUE + | EDGE_FALSE_VALUE)) + || !(e->flags & EDGE_ABNORMAL)) + { + error ("Wrong outgoing edge flags at end of bb %d\n", + bb->index); + err = 1; + } + } + END_FOR_EACH_EDGE; } break; @@ -3717,7 +3734,7 @@ tree_verify_flow_info (void) err = 1; } - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { if (!e->dest->aux) { @@ -3734,6 +3751,7 @@ tree_verify_flow_info (void) err = 1; } } + END_FOR_EACH_EDGE; /* Check that we have all of them. */ for (i = 0; i < n; ++i) @@ -3749,8 +3767,11 @@ tree_verify_flow_info (void) } } - FOR_EACH_EDGE (e, bb->succs, ix) - e->dest->aux = (void *)0; + FOR_EACH_EDGE (e, bb->succs) + { + e->dest->aux = (void *) 0; + } + END_FOR_EACH_EDGE; } default: ; @@ -3773,7 +3794,6 @@ tree_make_forwarder_block (edge fallthru) edge e; basic_block dummy, bb; tree phi, new_phi, var, prev, next; - unsigned ix; dummy = fallthru->src; bb = fallthru->dest; @@ -3803,7 +3823,7 @@ tree_make_forwarder_block (edge fallthru) set_phi_nodes (bb, prev); /* Add the arguments we have stored on edges. */ - FOR_EACH_EDGE (e, bb->preds, ix) + FOR_EACH_EDGE (e, bb->preds) { if (e == fallthru) continue; @@ -3815,6 +3835,7 @@ tree_make_forwarder_block (edge fallthru) PENDING_STMT (e) = NULL; } + END_FOR_EACH_EDGE; } @@ -3827,7 +3848,6 @@ tree_forwarder_block_p (basic_block bb) { block_stmt_iterator bsi; edge e; - unsigned ix; /* If we have already determined that this block is not forwardable, then no further checks are necessary. */ @@ -3846,12 +3866,15 @@ tree_forwarder_block_p (basic_block bb) } /* Successors of the entry block are not forwarders. */ - FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs, ix) - if (e->dest == bb) - { - bb_ann (bb)->forwardable = 0; - return false; - } + FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs) + { + if (e->dest == bb) + { + bb_ann (bb)->forwardable = 0; + return false; + } + } + END_FOR_EACH_EDGE; /* BB can not have any PHI nodes. This could potentially be relaxed early in compilation if we re-rewrote the variables appearing in @@ -3904,8 +3927,6 @@ thread_jumps (void) FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) { - unsigned ix; - /* Don't waste time on unreachable blocks. */ if (EDGE_COUNT (bb->preds) == 0) continue; @@ -3921,7 +3942,7 @@ thread_jumps (void) /* Examine each of our block's successors to see if it is forwardable. */ - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { /* If the edge is abnormal or its destination is not forwardable, then there's nothing to do. */ @@ -3983,7 +4004,7 @@ thread_jumps (void) retval = true; old_dest = e->dest; e = redirect_edge_and_branch (e, dest); - ix--; + __ix--; if (!old) { @@ -4010,7 +4031,7 @@ thread_jumps (void) { tmp = EDGE_SUCC (old_dest, 0)->dest; - if (EDGE_COUNT (old_dest->preds) != 0) + if (EDGE_COUNT (old_dest->preds) > 0) break; delete_basic_block (old_dest); @@ -4042,6 +4063,7 @@ thread_jumps (void) } } } + END_FOR_EACH_EDGE; /* Reset the forwardable bit on our block since it's no longer in a forwarding chain path. */ @@ -4096,12 +4118,14 @@ tree_try_redirect_by_replacing_jump (edge e, basic_block target) edge tmp; block_stmt_iterator b; tree stmt; - unsigned ix; /* Verify that all targets will be TARGET. */ - FOR_EACH_EDGE (tmp, src->succs, ix) - if (tmp->dest != target && tmp != e) - break; + FOR_EACH_EDGE (tmp, src->succs) + { + if (tmp->dest != target && tmp != e) + break; + } + END_FOR_EACH_EDGE; if (tmp) return NULL; @@ -4223,15 +4247,17 @@ tree_split_block (basic_block bb, void *stmt) tree act; basic_block new_bb; edge e; - unsigned ix; new_bb = create_empty_bb (bb); /* Redirect the outgoing edges. */ new_bb->succs = bb->succs; bb->succs = NULL; - FOR_EACH_EDGE (e, new_bb->succs, ix) - e->src = new_bb; + FOR_EACH_EDGE (e, new_bb->succs) + { + e->src = new_bb; + } + END_FOR_EACH_EDGE; if (stmt && TREE_CODE ((tree) stmt) == LABEL_EXPR) stmt = NULL; @@ -4429,10 +4455,12 @@ static void print_pred_bbs (FILE *file, basic_block bb) { edge e; - unsigned ix; - FOR_EACH_EDGE (e, bb->preds, ix) - fprintf (file, "bb_%d", e->src->index); + FOR_EACH_EDGE (e, bb->preds) + { + fprintf (file, "bb_%d", e->src->index); + } + END_FOR_EACH_EDGE; } @@ -4442,10 +4470,12 @@ static void print_succ_bbs (FILE *file, basic_block bb) { edge e; - unsigned ix; - FOR_EACH_EDGE (e, bb->succs, ix) - fprintf (file, "bb_%d", e->src->index); + FOR_EACH_EDGE (e, bb->succs) + { + fprintf (file, "bb_%d", e->src->index); + } + END_FOR_EACH_EDGE; } @@ -4613,15 +4643,17 @@ tree_flow_call_edges_add (sbitmap blocks) if (need_fake_edge_p (t)) { edge e; - unsigned ix; - FOR_EACH_EDGE (e, bb->succs, ix) - if (e->dest == EXIT_BLOCK_PTR) - { - bsi_insert_on_edge (e, build_empty_stmt ()); - bsi_commit_edge_inserts ((int *)NULL); - break; - } + FOR_EACH_EDGE (e, bb->succs) + { + if (e->dest == EXIT_BLOCK_PTR) + { + bsi_insert_on_edge (e, build_empty_stmt ()); + bsi_commit_edge_inserts ((int *)NULL); + break; + } + } + END_FOR_EACH_EDGE; } } @@ -4658,10 +4690,12 @@ tree_flow_call_edges_add (sbitmap blocks) #ifdef ENABLE_CHECKING if (stmt == last_stmt) { - unsigned ix; - FOR_EACH_EDGE (e, bb->succs, ix) - if (e->dest == EXIT_BLOCK_PTR) - abort (); + FOR_EACH_EDGE (e, bb->succs) + { + if (e->dest == EXIT_BLOCK_PTR) + abort (); + } + END_FOR_EACH_EDGE; } #endif @@ -4693,20 +4727,20 @@ tree_purge_dead_eh_edges (basic_block bb) bool changed = false; edge e; tree stmt = last_stmt (bb); - unsigned ix; if (stmt && tree_can_throw_internal (stmt)) return false; - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { if (e->flags & EDGE_EH) { ssa_remove_edge (e); - ix--; + __ix--; changed = true; } } + END_FOR_EACH_EDGE; return changed; } @@ -4755,15 +4789,17 @@ split_critical_edges (void) { basic_block bb; edge e; - unsigned ix; FOR_ALL_BB (bb) { - FOR_EACH_EDGE (e, bb->succs, ix) - if (EDGE_CRITICAL_P (e) && !(e->flags & EDGE_ABNORMAL)) - { - split_edge (e); - } + FOR_EACH_EDGE (e, bb->succs) + { + if (EDGE_CRITICAL_P (e) && !(e->flags & EDGE_ABNORMAL)) + { + split_edge (e); + } + } + END_FOR_EACH_EDGE; } } @@ -4868,7 +4904,6 @@ execute_warn_function_return (void) #endif tree last; edge e; - unsigned ix; if (warn_missing_noreturn && !TREE_THIS_VOLATILE (cfun->decl) @@ -4879,14 +4914,14 @@ execute_warn_function_return (void) /* If we have a path to EXIT, then we do return. */ if (TREE_THIS_VOLATILE (cfun->decl) - && EDGE_COUNT (EXIT_BLOCK_PTR->preds) != 0) + && EDGE_COUNT (EXIT_BLOCK_PTR->preds) > 0) { #ifdef USE_MAPPED_LOCATION location = UNKNOWN_LOCATION; #else locus = NULL; #endif - FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds, ix) + FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds) { last = last_stmt (e->src); if (TREE_CODE (last) == RETURN_EXPR @@ -4896,7 +4931,9 @@ execute_warn_function_return (void) && (locus = EXPR_LOCUS (last)) != NULL) #endif break; - } + } + END_FOR_EACH_EDGE; + #ifdef USE_MAPPED_LOCATION if (location == UNKNOWN_LOCATION) location = cfun->function_end_locus; @@ -4911,10 +4948,10 @@ execute_warn_function_return (void) /* If we see "return;" in some basic block, then we do reach the end without returning a value. */ else if (warn_return_type - && EDGE_COUNT (EXIT_BLOCK_PTR->preds) != 0 + && EDGE_COUNT (EXIT_BLOCK_PTR->preds) > 0 && !VOID_TYPE_P (TREE_TYPE (TREE_TYPE (cfun->decl)))) { - FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds, ix) + FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds) { tree last = last_stmt (e->src); if (TREE_CODE (last) == RETURN_EXPR @@ -4934,6 +4971,7 @@ execute_warn_function_return (void) break; } } + END_FOR_EACH_EDGE; } } diff --git a/gcc/tree-into-ssa.c b/gcc/tree-into-ssa.c index afde12169a6..9d7303fa4a8 100644 --- a/gcc/tree-into-ssa.c +++ b/gcc/tree-into-ssa.c @@ -224,13 +224,12 @@ compute_global_livein (bitmap livein, bitmap def_blocks) while (tos != worklist) { edge e; - unsigned ix; /* Pull a block off the worklist. */ bb = *--tos; /* For each predecessor block. */ - FOR_EACH_EDGE (e, bb->preds, ix) + FOR_EACH_EDGE (e, bb->preds) { basic_block pred = e->src; int pred_index = pred->index; @@ -244,6 +243,7 @@ compute_global_livein (bitmap livein, bitmap def_blocks) bitmap_set_bit (livein, pred_index); } } + END_FOR_EACH_EDGE; } free (worklist); @@ -299,9 +299,9 @@ ssa_mark_phi_uses (struct dom_walk_data *walk_data, basic_block bb) sbitmap kills = gd->kills; edge e; tree phi, use; - unsigned uid, ix; + unsigned uid; - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { if (e->dest == EXIT_BLOCK_PTR) continue; @@ -319,6 +319,7 @@ ssa_mark_phi_uses (struct dom_walk_data *walk_data, basic_block bb) set_livein_block (use, bb); } } + END_FOR_EACH_EDGE; } /* Call back for walk_dominator_tree used to collect definition sites @@ -827,15 +828,17 @@ ssa_rewrite_initialize_block (struct dom_walk_data *walk_data, basic_block bb) = (struct rewrite_block_data *)VARRAY_TOP_GENERIC_PTR (walk_data->block_data_stack); sbitmap names_to_rename = walk_data->global_data; edge e; - unsigned ix; bool abnormal_phi; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "\n\nRenaming block #%d\n\n", bb->index); - FOR_EACH_EDGE (e, bb->preds, ix) - if (e->flags & EDGE_ABNORMAL) - break; + FOR_EACH_EDGE (e, bb->preds) + { + if (e->flags & EDGE_ABNORMAL) + break; + } + END_FOR_EACH_EDGE; abnormal_phi = (e != NULL); /* Step 1. Register new definitions for every PHI node in the block. @@ -870,9 +873,8 @@ rewrite_add_phi_arguments (struct dom_walk_data *walk_data ATTRIBUTE_UNUSED, basic_block bb) { edge e; - unsigned ix; - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { tree phi; @@ -890,6 +892,7 @@ rewrite_add_phi_arguments (struct dom_walk_data *walk_data ATTRIBUTE_UNUSED, add_phi_arg (&phi, currdef, e); } } + END_FOR_EACH_EDGE; } /* Ditto, for ssa name rewriting. */ @@ -898,11 +901,10 @@ static void ssa_rewrite_phi_arguments (struct dom_walk_data *walk_data, basic_block bb) { edge e; - unsigned ix; sbitmap names_to_rename = walk_data->global_data; use_operand_p op; - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { tree phi; @@ -923,6 +925,7 @@ ssa_rewrite_phi_arguments (struct dom_walk_data *walk_data, basic_block bb) SSA_NAME_OCCURS_IN_ABNORMAL_PHI (USE_FROM_PTR (op)) = 1; } } + END_FOR_EACH_EDGE; } /* SSA Rewriting Step 5. Restore the current reaching definition for each @@ -1124,9 +1127,11 @@ insert_phi_nodes_for (tree var, bitmap *dfs, varray_type *work_stack) /* If we are rewriting ssa names, add also the phi arguments. */ if (TREE_CODE (var) == SSA_NAME) { - unsigned ix; - FOR_EACH_EDGE (e, bb->preds, ix) - add_phi_arg (&phi, var, e); + FOR_EACH_EDGE (e, bb->preds) + { + add_phi_arg (&phi, var, e); + } + END_FOR_EACH_EDGE; } } while (0)); diff --git a/gcc/tree-outof-ssa.c b/gcc/tree-outof-ssa.c index 8f01036087e..5dddc6de19a 100644 --- a/gcc/tree-outof-ssa.c +++ b/gcc/tree-outof-ssa.c @@ -581,14 +581,14 @@ coalesce_abnormal_edges (var_map map, conflict_graph graph, root_var_p rv) edge e; tree phi, var, tmp; int x, y; - unsigned ix; /* Code cannot be inserted on abnormal edges. Look for all abnormal edges, and coalesce any PHI results with their arguments across that edge. */ FOR_EACH_BB (bb) - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) + { if (e->dest != EXIT_BLOCK_PTR && e->flags & EDGE_ABNORMAL) for (phi = phi_nodes (e->dest); phi; phi = PHI_CHAIN (phi)) { @@ -657,6 +657,8 @@ coalesce_abnormal_edges (var_map map, conflict_graph graph, root_var_p rv) } } } + } + END_FOR_EACH_EDGE; } @@ -1942,9 +1944,11 @@ rewrite_trees (var_map map, tree *values) phi = phi_nodes (bb); if (phi) { - unsigned ix; - FOR_EACH_EDGE (e, bb->preds, ix) - eliminate_phi (e, phi_arg_from_edge (phi, e), g); + FOR_EACH_EDGE (e, bb->preds) + { + eliminate_phi (e, phi_arg_from_edge (phi, e), g); + } + END_FOR_EACH_EDGE; } } diff --git a/gcc/tree-pretty-print.c b/gcc/tree-pretty-print.c index ead487c3baf..956ad89cd5b 100644 --- a/gcc/tree-pretty-print.c +++ b/gcc/tree-pretty-print.c @@ -2096,7 +2096,6 @@ dump_bb_header (pretty_printer *buffer, basic_block bb, int indent, int flags) { edge e; tree stmt; - unsigned ix; if (flags & TDF_BLOCKS) { @@ -2120,17 +2119,20 @@ dump_bb_header (pretty_printer *buffer, basic_block bb, int indent, int flags) pp_string (buffer, "# PRED:"); pp_write_text_to_stream (buffer); - FOR_EACH_EDGE (e, bb->preds, ix) - if (flags & TDF_SLIM) - { - pp_string (buffer, " "); - if (e->src == ENTRY_BLOCK_PTR) - pp_string (buffer, "ENTRY"); - else - pp_decimal_int (buffer, e->src->index); - } - else - dump_edge_info (buffer->buffer->stream, e, 0); + FOR_EACH_EDGE (e, bb->preds) + { + if (flags & TDF_SLIM) + { + pp_string (buffer, " "); + if (e->src == ENTRY_BLOCK_PTR) + pp_string (buffer, "ENTRY"); + else + pp_decimal_int (buffer, e->src->index); + } + else + dump_edge_info (buffer->buffer->stream, e, 0); + } + END_FOR_EACH_EDGE; pp_newline (buffer); } else @@ -2154,23 +2156,25 @@ static void dump_bb_end (pretty_printer *buffer, basic_block bb, int indent, int flags) { edge e; - unsigned ix; INDENT (indent); pp_string (buffer, "# SUCC:"); pp_write_text_to_stream (buffer); - FOR_EACH_EDGE (e, bb->succs, ix) - if (flags & TDF_SLIM) - { - pp_string (buffer, " "); - if (e->dest == EXIT_BLOCK_PTR) - pp_string (buffer, "EXIT"); - else - pp_decimal_int (buffer, e->dest->index); - } - else - dump_edge_info (buffer->buffer->stream, e, 1); + FOR_EACH_EDGE (e, bb->succs) + { + if (flags & TDF_SLIM) + { + pp_string (buffer, " "); + if (e->dest == EXIT_BLOCK_PTR) + pp_string (buffer, "EXIT"); + else + pp_decimal_int (buffer, e->dest->index); + } + else + dump_edge_info (buffer->buffer->stream, e, 1); + } + END_FOR_EACH_EDGE; pp_newline (buffer); } @@ -2226,13 +2230,15 @@ dump_implicit_edges (pretty_printer *buffer, basic_block bb, int indent, int flags) { edge e; - unsigned ix; /* If there is a fallthru edge, we may need to add an artificial goto to the dump. */ - FOR_EACH_EDGE (e, bb->succs, ix) - if (e->flags & EDGE_FALLTHRU) - break; + FOR_EACH_EDGE (e, bb->succs) + { + if (e->flags & EDGE_FALLTHRU) + break; + } + END_FOR_EACH_EDGE; if (e && e->dest != bb->next_bb) { INDENT (indent); diff --git a/gcc/tree-sra.c b/gcc/tree-sra.c index e824215eebe..430aaf3ba4e 100644 --- a/gcc/tree-sra.c +++ b/gcc/tree-sra.c @@ -1681,11 +1681,10 @@ void insert_edge_copies (tree stmt, basic_block bb) { edge e; - unsigned ix; bool first_copy; first_copy = true; - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { /* We don't need to insert copies on abnormal edges. The value of the scalar replacement is not guaranteed to @@ -1701,6 +1700,7 @@ insert_edge_copies (tree stmt, basic_block bb) bsi_insert_on_edge (e, lhd_unsave_expr_now (stmt)); } } + END_FOR_EACH_EDGE; } /* Helper function to insert LIST before BSI, and set up line number info. */ diff --git a/gcc/tree-ssa-ccp.c b/gcc/tree-ssa-ccp.c index c2940055824..0f198f08675 100644 --- a/gcc/tree-ssa-ccp.c +++ b/gcc/tree-ssa-ccp.c @@ -302,7 +302,6 @@ simulate_block (basic_block block) block_stmt_iterator j; unsigned int normal_edge_count; edge e, normal_edge; - unsigned ix; /* Note that we have simulated this block. */ SET_BIT (executable_blocks, block->index); @@ -320,7 +319,7 @@ simulate_block (basic_block block) normal_edge_count = 0; normal_edge = NULL; - FOR_EACH_EDGE (e, block->succs, ix) + FOR_EACH_EDGE (e, block->succs) { if (e->flags & EDGE_ABNORMAL) { @@ -331,10 +330,11 @@ simulate_block (basic_block block) normal_edge_count++; normal_edge = e; } - } - - if (normal_edge_count == 1) - add_control_edge (normal_edge); + } + END_FOR_EACH_EDGE; + + if (normal_edge_count == 1) + add_control_edge (normal_edge); } } @@ -840,10 +840,12 @@ static void add_outgoing_control_edges (basic_block bb) { edge e; - unsigned ix; - FOR_EACH_EDGE (e, bb->succs, ix) - add_control_edge (e); + FOR_EACH_EDGE (e, bb->succs) + { + add_control_edge (e); + } + END_FOR_EACH_EDGE; } @@ -1187,7 +1189,6 @@ initialize (void) edge e; basic_block bb; sbitmap virtual_var; - unsigned ix; /* Worklists of SSA edges. */ VARRAY_TREE_INIT (ssa_edges, 20, "ssa_edges"); @@ -1218,7 +1219,6 @@ initialize (void) v_must_def_optype v_must_defs; size_t x; int vary; - unsigned ix; /* Get the default value for each definition. */ for (i = bsi_start (bb); !bsi_end_p (i); bsi_next (&i)) @@ -1256,8 +1256,11 @@ initialize (void) } } - FOR_EACH_EDGE (e, bb->succs, ix) - e->flags &= ~EDGE_EXECUTABLE; + FOR_EACH_EDGE (e, bb->succs) + { + e->flags &= ~EDGE_EXECUTABLE; + } + END_FOR_EACH_EDGE; } /* Now process PHI nodes. */ @@ -1304,7 +1307,7 @@ initialize (void) /* Seed the algorithm by adding the successors of the entry block to the edge worklist. */ - FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs, ix) + FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs) { if (e->dest != EXIT_BLOCK_PTR) { @@ -1312,6 +1315,7 @@ initialize (void) cfg_blocks_add (e->dest); } } + END_FOR_EACH_EDGE; } diff --git a/gcc/tree-ssa-dce.c b/gcc/tree-ssa-dce.c index 1c02d02b413..7912d5605d1 100644 --- a/gcc/tree-ssa-dce.c +++ b/gcc/tree-ssa-dce.c @@ -468,10 +468,12 @@ find_obviously_necessary_stmts (struct edge_list *el) and we currently do not have a means to recognize the finite ones. */ FOR_EACH_BB (bb) { - unsigned ix; - FOR_EACH_EDGE (e, bb->succs, ix) - if (e->flags & EDGE_DFS_BACK) - mark_control_dependent_edges_necessary (e->dest, el); + FOR_EACH_EDGE (e, bb->succs) + { + if (e->flags & EDGE_DFS_BACK) + mark_control_dependent_edges_necessary (e->dest, el); + } + END_FOR_EACH_EDGE; } } } diff --git a/gcc/tree-ssa-dom.c b/gcc/tree-ssa-dom.c index 99af2e59554..b8ca396a5f8 100644 --- a/gcc/tree-ssa-dom.c +++ b/gcc/tree-ssa-dom.c @@ -845,7 +845,6 @@ thread_across_edge (struct dom_walk_data *walk_data, edge e) { tree cond, cached_lhs; edge e1; - unsigned ix; /* Do not forward entry edges into the loop. In the case loop has multiple entry edges we may end up in constructing irreducible @@ -854,9 +853,12 @@ thread_across_edge (struct dom_walk_data *walk_data, edge e) edges forward to the same destination block. */ if (!e->flags & EDGE_DFS_BACK) { - FOR_EACH_EDGE (e1, e->dest->preds, ix) - if (e1->flags & EDGE_DFS_BACK) - break; + FOR_EACH_EDGE (e1, e->dest->preds) + { + if (e1->flags & EDGE_DFS_BACK) + break; + } + END_FOR_EACH_EDGE; if (e1) return; } @@ -1417,9 +1419,8 @@ single_incoming_edge_ignoring_loop_edges (basic_block bb) { edge retval = NULL; edge e; - unsigned ix; - FOR_EACH_EDGE (e, bb->preds, ix) + FOR_EACH_EDGE (e, bb->preds) { /* A loop back edge can be identified by the destination of the edge dominating the source of the edge. */ @@ -1435,6 +1436,7 @@ single_incoming_edge_ignoring_loop_edges (basic_block bb) it. */ retval = e; } + END_FOR_EACH_EDGE; return retval; } @@ -2544,12 +2546,11 @@ cprop_into_successor_phis (basic_block bb, bitmap nonzero_vars) { edge e; - unsigned ix; /* This can get rather expensive if the implementation is naive in how it finds the phi alternative associated with a particular edge. */ - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { tree phi; int phi_num_args; @@ -2628,6 +2629,7 @@ cprop_into_successor_phis (basic_block bb, } } } + END_FOR_EACH_EDGE; } diff --git a/gcc/tree-ssa-live.c b/gcc/tree-ssa-live.c index 833a6cff8bf..4683c0fd228 100644 --- a/gcc/tree-ssa-live.c +++ b/gcc/tree-ssa-live.c @@ -511,7 +511,6 @@ live_worklist (tree_live_info_p live, varray_type stack, int i) tree var; basic_block def_bb = NULL; edge e; - unsigned ix; var_map map = live->map; var = partition_to_var (map, i); @@ -528,18 +527,21 @@ live_worklist (tree_live_info_p live, varray_type stack, int i) b = VARRAY_TOP_INT (stack); VARRAY_POP (stack); - FOR_EACH_EDGE (e, BASIC_BLOCK (b)->preds, ix) - if (e->src != ENTRY_BLOCK_PTR) - { - /* Its not live on entry to the block its defined in. */ - if (e->src == def_bb) - continue; - if (!bitmap_bit_p (live->livein[i], e->src->index)) - { - bitmap_set_bit (live->livein[i], e->src->index); - VARRAY_PUSH_INT (stack, e->src->index); - } - } + FOR_EACH_EDGE (e, BASIC_BLOCK (b)->preds) + { + if (e->src != ENTRY_BLOCK_PTR) + { + /* Its not live on entry to the block its defined in. */ + if (e->src == def_bb) + continue; + if (!bitmap_bit_p (live->livein[i], e->src->index)) + { + bitmap_set_bit (live->livein[i], e->src->index); + VARRAY_PUSH_INT (stack, e->src->index); + } + } + } + END_FOR_EACH_EDGE; } } @@ -586,7 +588,6 @@ calculate_live_on_entry (var_map map) tree phi, var, stmt; tree op; edge e; - unsigned ix; varray_type stack; block_stmt_iterator bsi; use_optype uses; @@ -669,7 +670,7 @@ calculate_live_on_entry (var_map map) bb = ENTRY_BLOCK_PTR; num = 0; - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { int entry_block = e->dest->index; if (e->dest == EXIT_BLOCK_PTR) @@ -743,6 +744,7 @@ calculate_live_on_entry (var_map map) } } } + END_FOR_EACH_EDGE; if (num > 0) abort (); #endif @@ -763,7 +765,6 @@ calculate_live_on_exit (tree_live_info_p liveinfo) bitmap *on_exit; basic_block bb; edge e; - unsigned ix; tree t, phi; bitmap on_entry; var_map map = liveinfo->map; @@ -792,9 +793,12 @@ calculate_live_on_exit (tree_live_info_p liveinfo) on_entry = live_entry_blocks (liveinfo, i); EXECUTE_IF_SET_IN_BITMAP (on_entry, 0, b, { - FOR_EACH_EDGE (e, BASIC_BLOCK (b)->preds, ix) - if (e->src != ENTRY_BLOCK_PTR) - bitmap_set_bit (on_exit[e->src->index], i); + FOR_EACH_EDGE (e, BASIC_BLOCK (b)->preds) + { + if (e->src != ENTRY_BLOCK_PTR) + bitmap_set_bit (on_exit[e->src->index], i); + } + END_FOR_EACH_EDGE; }); } diff --git a/gcc/tree-ssa-loop-ch.c b/gcc/tree-ssa-loop-ch.c index 38a0565dd68..1b607426c6c 100644 --- a/gcc/tree-ssa-loop-ch.c +++ b/gcc/tree-ssa-loop-ch.c @@ -172,7 +172,6 @@ duplicate_blocks (varray_type bbs_to_duplicate) for (i = 0; i < VARRAY_ACTIVE_SIZE (bbs_to_duplicate); i++) { - unsigned ix; preheader_edge = VARRAY_GENERIC_PTR_NOGC (bbs_to_duplicate, i); header = preheader_edge->dest; @@ -193,14 +192,16 @@ duplicate_blocks (varray_type bbs_to_duplicate) PENDING_STMT (preheader_edge) = NULL; /* Add the phi arguments to the outgoing edges. */ - FOR_EACH_EDGE (e, header->succs, ix) + FOR_EACH_EDGE (e, header->succs) { edge e1; - unsigned ix1; - FOR_EACH_EDGE (e1, new_header->succs, ix1) - if (e1->dest == e->dest) - break; - if (e1 == NULL || ix1 > EDGE_COUNT (new_header->succs)) + FOR_EACH_EDGE (e1, new_header->succs) + { + if (e1->dest == e->dest) + break; + } + END_FOR_EACH_EDGE; + if (e1 == NULL) abort (); for (phi = phi_nodes (e->dest); phi; phi = TREE_CHAIN (phi)) @@ -209,6 +210,7 @@ duplicate_blocks (varray_type bbs_to_duplicate) add_phi_arg (&phi, def, e1); } } + END_FOR_EACH_EDGE; } calculate_dominance_info (CDI_DOMINATORS); diff --git a/gcc/tree-ssa-loop-im.c b/gcc/tree-ssa-loop-im.c index 944e6ba2763..10547fa08f6 100644 --- a/gcc/tree-ssa-loop-im.c +++ b/gcc/tree-ssa-loop-im.c @@ -1185,7 +1185,7 @@ static void fill_always_executed_in (struct loop *loop, sbitmap contains_call) { basic_block bb = NULL, *bbs, last = NULL; - unsigned i, ix; + unsigned i; edge e; struct loop *inn_loop = loop; @@ -1203,9 +1203,13 @@ fill_always_executed_in (struct loop *loop, sbitmap contains_call) if (TEST_BIT (contains_call, bb->index)) break; - FOR_EACH_EDGE (e, bb->succs, ix) - if (!flow_bb_inside_loop_p (loop, e->dest)) - break; + FOR_EACH_EDGE (e, bb->succs) + { + if (!flow_bb_inside_loop_p (loop, e->dest)) + break; + } + END_FOR_EACH_EDGE; + if (e) break; diff --git a/gcc/tree-ssa-pre.c b/gcc/tree-ssa-pre.c index 5954affbc0c..9266be554fd 100644 --- a/gcc/tree-ssa-pre.c +++ b/gcc/tree-ssa-pre.c @@ -1134,13 +1134,15 @@ compute_antic_aux (basic_block block) setting the BB_VISITED flag. */ if (! (block->flags & BB_VISITED)) { - unsigned ix; - FOR_EACH_EDGE (e, block->preds, ix) - if (e->flags & EDGE_ABNORMAL) - { - block->flags |= BB_VISITED; - break; - } + FOR_EACH_EDGE (e, block->preds) + { + if (e->flags & EDGE_ABNORMAL) + { + block->flags |= BB_VISITED; + break; + } + } + END_FOR_EACH_EDGE; } if (block->flags & BB_VISITED) { @@ -1171,12 +1173,14 @@ compute_antic_aux (basic_block block) varray_type worklist; edge e; size_t i; - unsigned ix; basic_block bprime, first; VARRAY_BB_INIT (worklist, 1, "succ"); - FOR_EACH_EDGE (e, block->succs, ix) - VARRAY_PUSH_BB (worklist, e->dest); + FOR_EACH_EDGE (e, block->succs) + { + VARRAY_PUSH_BB (worklist, e->dest); + } + END_FOR_EACH_EDGE; first = VARRAY_BB (worklist, 0); set_copy (ANTIC_OUT, ANTIC_IN (first)); @@ -1442,7 +1446,6 @@ insert_aux (basic_block block) edge pred; basic_block bprime; tree eprime; - unsigned ix; val = get_value_handle (node->expr); if (bitmap_set_contains_value (PHI_GEN (block), val)) @@ -1456,7 +1459,7 @@ insert_aux (basic_block block) avail = xcalloc (last_basic_block, sizeof (tree)); - FOR_EACH_EDGE (pred, block->preds, ix) + FOR_EACH_EDGE (pred, block->preds) { tree vprime; tree edoubleprime; @@ -1512,6 +1515,8 @@ insert_aux (basic_block block) abort (); } } + END_FOR_EACH_EDGE; + /* If we can insert it, it's not the same value already existing along every predecessor, and it's defined by some predecessor, it is @@ -1528,7 +1533,7 @@ insert_aux (basic_block block) } /* Make the necessary insertions. */ - FOR_EACH_EDGE (pred, block->preds, ix) + FOR_EACH_EDGE (pred, block->preds) { tree stmts = alloc_stmt_list (); tree builtexpr; @@ -1544,7 +1549,9 @@ insert_aux (basic_block block) bsi_commit_edge_inserts (NULL); avail[bprime->index] = builtexpr; } - } + } + END_FOR_EACH_EDGE; + /* Now build a phi for the new variable. */ temp = create_tmp_var (type, "prephitmp"); add_referenced_tmp_var (temp); @@ -1560,11 +1567,13 @@ insert_aux (basic_block block) bitmap_value_replace_in_set (AVAIL_OUT (block), PHI_RESULT (temp)); - FOR_EACH_EDGE (pred, block->preds, ix) + FOR_EACH_EDGE (pred, block->preds) { add_phi_arg (&temp, avail[pred->src->index], pred); } + END_FOR_EACH_EDGE; + if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Created phi "); diff --git a/gcc/tree-ssa.c b/gcc/tree-ssa.c index 12e4656fe9a..e641eadbe69 100644 --- a/gcc/tree-ssa.c +++ b/gcc/tree-ssa.c @@ -266,13 +266,15 @@ static bool verify_phi_args (tree phi, basic_block bb, basic_block *definition_block) { edge e; - unsigned ix; bool err = false; int i, phi_num_args = PHI_NUM_ARGS (phi); /* Mark all the incoming edges. */ - FOR_EACH_EDGE (e, bb->preds, ix) - e->aux = (void *) 1; + FOR_EACH_EDGE (e, bb->preds) + { + e->aux = (void *) 1; + } + END_FOR_EACH_EDGE; for (i = 0; i < phi_num_args; i++) { @@ -316,7 +318,7 @@ verify_phi_args (tree phi, basic_block bb, basic_block *definition_block) e->aux = (void *) 2; } - FOR_EACH_EDGE (e, bb->preds, ix) + FOR_EACH_EDGE (e, bb->preds) { if (e->aux != (void *) 2) { @@ -327,6 +329,7 @@ verify_phi_args (tree phi, basic_block bb, basic_block *definition_block) } e->aux = (void *) 0; } + END_FOR_EACH_EDGE; error: if (err) @@ -591,11 +594,10 @@ verify_ssa (void) { edge e; tree phi; - unsigned ix; block_stmt_iterator bsi; /* Make sure that all edges have a clear 'aux' field. */ - FOR_EACH_EDGE (e, bb->preds, ix) + FOR_EACH_EDGE (e, bb->preds) { if (e->aux) { @@ -604,6 +606,7 @@ verify_ssa (void) goto err; } } + END_FOR_EACH_EDGE; /* Verify the arguments for every PHI node in the block. */ for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi)) diff --git a/gcc/tree-tailcall.c b/gcc/tree-tailcall.c index 6c03565fc37..2c513911470 100644 --- a/gcc/tree-tailcall.c +++ b/gcc/tree-tailcall.c @@ -190,7 +190,6 @@ independent_of_stmt_p (tree expr, tree at, block_stmt_iterator bsi) { basic_block bb, call_bb, at_bb; edge e; - unsigned ix; if (is_gimple_min_invariant (expr)) return expr; @@ -231,9 +230,13 @@ independent_of_stmt_p (tree expr, tree at, block_stmt_iterator bsi) break; } - FOR_EACH_EDGE (e, bb->preds, ix) - if (e->src->aux) - break; + FOR_EACH_EDGE (e, bb->preds) + { + if (e->src->aux) + break; + } + END_FOR_EACH_EDGE; + if (!e) abort (); @@ -362,7 +365,6 @@ find_tail_calls (basic_block bb, struct tailcall **ret) bool tail_recursion; struct tailcall *nw; edge e; - unsigned ix; tree m, a; basic_block abb; stmt_ann_t ann; @@ -408,9 +410,11 @@ find_tail_calls (basic_block bb, struct tailcall **ret) if (bsi_end_p (bsi)) { /* Recurse to the predecessors. */ - FOR_EACH_EDGE (e, bb->preds, ix) - find_tail_calls (e->src, ret); - + FOR_EACH_EDGE (e, bb->preds) + { + find_tail_calls (e->src, ret); + } + END_FOR_EACH_EDGE; return; } @@ -801,7 +805,6 @@ static void tree_optimize_tail_calls_1 (bool opt_tailcalls) { edge e; - unsigned ix; bool phis_constructed = false; struct tailcall *tailcalls = NULL, *act, *next; bool changed = false; @@ -813,7 +816,7 @@ tree_optimize_tail_calls_1 (bool opt_tailcalls) if (opt_tailcalls) opt_tailcalls = suitable_for_tail_call_opt_p (); - FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds, ix) + FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds) { /* Only traverse the normal exits, i.e. those that end with return statement. */ @@ -823,6 +826,7 @@ tree_optimize_tail_calls_1 (bool opt_tailcalls) && TREE_CODE (stmt) == RETURN_EXPR) find_tail_calls (e->src, &tailcalls); } + END_FOR_EACH_EDGE; /* Construct the phi nodes and accumulators if necessary. */ a_acc = m_acc = NULL_TREE; @@ -896,7 +900,7 @@ tree_optimize_tail_calls_1 (bool opt_tailcalls) if (a_acc || m_acc) { /* Modify the remaining return statements. */ - FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds, ix) + FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds) { stmt = last_stmt (e->src); @@ -904,6 +908,7 @@ tree_optimize_tail_calls_1 (bool opt_tailcalls) && TREE_CODE (stmt) == RETURN_EXPR) adjust_return_value (e->src, m_acc, a_acc); } + END_FOR_EACH_EDGE; } if (changed) diff --git a/gcc/var-tracking.c b/gcc/var-tracking.c index 79580a6d1eb..47a34b4a237 100644 --- a/gcc/var-tracking.c +++ b/gcc/var-tracking.c @@ -1690,7 +1690,6 @@ vt_find_locations (void) int *bb_order; int *rc_order; int i; - unsigned ix; /* Compute reverse completion order of depth first search of the CFG so that the data-flow runs faster. */ @@ -1735,15 +1734,16 @@ vt_find_locations (void) /* Calculate the IN set as union of predecessor OUT sets. */ dataflow_set_clear (&VTI (bb)->in); - FOR_EACH_EDGE (e, bb->preds, ix) + FOR_EACH_EDGE (e, bb->preds) { dataflow_set_union (&VTI (bb)->in, &VTI (e->src)->out); } + END_FOR_EACH_EDGE; changed = compute_bb_dataflow (bb); if (changed) { - FOR_EACH_EDGE (e, bb->succs, ix) + FOR_EACH_EDGE (e, bb->succs) { if (e->dest == EXIT_BLOCK_PTR) continue; @@ -1770,6 +1770,7 @@ vt_find_locations (void) e->dest); } } + END_FOR_EACH_EDGE; } } } -- 2.11.4.GIT