From a84786da06e20beb8fc8f38149f26ab8217d983c Mon Sep 17 00:00:00 2001 From: bje Date: Sat, 18 Sep 2004 02:54:46 +0000 Subject: [PATCH] * basic-block.h (ei_safe_edge): New function. (FOR_EACH_EDGE): Rewrite; include iterator argument. (END_FOR_EACH_EDGE): Remove. * Update all callers. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/branches/edge-vector-branch@87694 138bc75d-0d04-0410-961f-82ee72b054a4 --- gcc/ChangeLog.vec | 22 +++++++++++- gcc/basic-block.h | 39 ++++++--------------- gcc/bb-reorder.c | 59 ++++++++++++++----------------- gcc/bt-load.c | 8 ++--- gcc/cfg.c | 50 ++++++++++++-------------- gcc/cfganal.c | 52 +++++++++++++-------------- gcc/cfgbuild.c | 20 +++++------ gcc/cfgcleanup.c | 29 ++++++++-------- gcc/cfgexpand.c | 8 ++--- gcc/cfghooks.c | 37 +++++++++----------- gcc/cfglayout.c | 21 ++++++----- gcc/cfgloop.c | 85 ++++++++++++++++++++++----------------------- gcc/cfgloopanal.c | 14 ++++---- gcc/cfgloopmanip.c | 51 +++++++++++++-------------- gcc/cfgrtl.c | 56 +++++++++++++++-------------- gcc/config/i386/i386.c | 8 ++--- gcc/config/ia64/ia64.c | 3 +- gcc/cse.c | 4 +-- gcc/df.c | 10 +++--- gcc/dominance.c | 7 ++-- gcc/except.c | 4 +-- gcc/final.c | 6 ++-- gcc/flow.c | 11 +++--- gcc/function.c | 10 +++--- gcc/gcse.c | 29 +++++++--------- gcc/global.c | 11 +++--- gcc/graph.c | 4 +-- gcc/ifcvt.c | 19 +++++----- gcc/lcm.c | 44 ++++++++++------------- gcc/loop-invariant.c | 4 +-- gcc/loop-iv.c | 18 +++++----- gcc/loop-unroll.c | 6 ++-- gcc/postreload-gcse.c | 8 ++--- gcc/predict.c | 62 +++++++++++++++------------------ gcc/profile.c | 73 ++++++++++++++++++-------------------- gcc/ra-rewrite.c | 4 +-- gcc/ra.c | 4 +-- gcc/recog.c | 4 +-- gcc/reg-stack.c | 26 +++++++------- gcc/reload1.c | 7 ++-- gcc/sched-ebb.c | 12 +++---- gcc/sched-rgn.c | 13 +++---- gcc/tracer.c | 16 ++++----- gcc/tree-cfg.c | 81 ++++++++++++++++++++---------------------- gcc/tree-if-conv.c | 24 ++++++------- gcc/tree-into-ssa.c | 24 ++++++------- gcc/tree-outof-ssa.c | 8 ++--- gcc/tree-pretty-print.c | 12 +++---- gcc/tree-sra.c | 4 +-- gcc/tree-ssa-dce.c | 4 +-- gcc/tree-ssa-dom.c | 12 +++---- gcc/tree-ssa-live.c | 13 ++++--- gcc/tree-ssa-loop-ch.c | 8 ++--- gcc/tree-ssa-loop-im.c | 4 +-- gcc/tree-ssa-loop-ivopts.c | 4 +-- gcc/tree-ssa-loop-manip.c | 11 +++--- gcc/tree-ssa-pre.c | 18 +++++----- gcc/tree-ssa-propagate.c | 15 ++++---- gcc/tree-ssa-threadupdate.c | 4 +-- gcc/tree-ssa.c | 11 +++--- gcc/tree-tailcall.c | 15 ++++---- gcc/var-tracking.c | 7 ++-- 62 files changed, 589 insertions(+), 668 deletions(-) diff --git a/gcc/ChangeLog.vec b/gcc/ChangeLog.vec index 3ba8f17d1c5..e05d23bc3d5 100644 --- a/gcc/ChangeLog.vec +++ b/gcc/ChangeLog.vec @@ -1,4 +1,24 @@ -2004-09-17 Ben Elliston +2004-09-18 Ben Elliston + + * basic-block.h (ei_safe_edge): New function. + (FOR_EACH_EDGE): Rewrite; include iterator argument. + (END_FOR_EACH_EDGE): Remove. + * bb-reorder.c, bt-load.c, cfg.c, cfganal.c, cfgbuild.c, + cfgcleanup.c, cfgexpand.c, cfghooks.c, cfglayout.c, cfgloop.c, + cfgloopanal.c, cfgloopmanip.c, cfgrtl.c, cse.c, df.c, dominance.c, + except.c, final.c, flow.c, function.c, gcse.c, global.c, graph.c, + ifcvt.c, lcm.c, loop-invariant.c, loop-iv.c, loop-unroll.c, + postreload-gcse.c, predict.c, profile.c, ra-rewrite.c, ra.c, + recog.c, reg-stack.c, reload1.c, sched-ebb.c, sched-rgn.c, + tracer.c, tree-cfg.c, tree-if-conv.c, tree-into-ssa.c, + tree-outof-ssa.c, tree-pretty-print.c, tree-sra.c, tree-ssa-dce.c, + tree-ssa-dom.c, tree-ssa-live.c, tree-ssa-loop-ch.c, + tree-ssa-loop-im.c, tree-ssa-loop-ivopts.c, tree-ssa-loop-manip.c, + tree-ssa-pre.c, tree-ssa-propagate.c, tree-ssa-threadupdate.c, + tree-ssa.c, tree-tailcall.c, var-tracking.c, config/i386/i386.c, + config/ia64/ia64.c: Update all callers. + +2004-09-17 Ben Elliston * basic-block.h (struct edge_stack): Remove. (edge_iterator): New type. diff --git a/gcc/basic-block.h b/gcc/basic-block.h index 5a01f29aa60..0549efd9c3e 100644 --- a/gcc/basic-block.h +++ b/gcc/basic-block.h @@ -602,35 +602,16 @@ ei_edge (edge_iterator i) return EDGE_I (i.container, i.index); } -#define FOR_EACH_EDGE(EDGE,EDGE_VEC) \ -do { \ - VEC(edge) *__ev = (EDGE_VEC); \ - edge __check_edge; \ - unsigned int __ix; \ - unsigned int __num_edges = EDGE_COUNT (__ev); \ - (EDGE) = NULL; \ - for (__ix = 0; VEC_iterate (edge, __ev, __ix, (EDGE)); __ix++) \ - { \ - if (ENABLE_VEC_CHECKING) \ - __check_edge = (EDGE); - -#define END_FOR_EACH_EDGE \ - if (ENABLE_VEC_CHECKING \ - && (__ix >= EDGE_COUNT (__ev) \ - || EDGE_I (__ev, __ix) != __check_edge)) \ - internal_error ("edge modified in FOR_EACH_EDGE: %s:%s", \ - __FILE__, __FUNCTION__); \ - } \ - if (ENABLE_VEC_CHECKING \ - && __num_edges > EDGE_COUNT (__ev)) \ - internal_error ("insufficient edges FOR_EACH_EDGE: %s:%s", \ - __FILE__, __FUNCTION__); \ - if (ENABLE_VEC_CHECKING \ - && __num_edges < EDGE_COUNT (__ev)) \ - internal_error ("excess edges FOR_EACH_EDGE: %s:%s", \ - __FILE__, __FUNCTION__); \ -} \ -while (0) +static inline edge +ei_safe_edge (edge_iterator i) +{ + return !ei_end_p (i) ? ei_edge (i) : NULL; +} + +#define FOR_EACH_EDGE(EDGE,ITER,EDGE_VEC) \ + for ((EDGE) = NULL, (ITER) = ei_start ((EDGE_VEC)); \ + ((EDGE) = ei_safe_edge ((ITER))); \ + ei_next (&(ITER))) struct edge_list * create_edge_list (void); void free_edge_list (struct edge_list *); diff --git a/gcc/bb-reorder.c b/gcc/bb-reorder.c index ff7ef436a8a..4d1da90cef9 100644 --- a/gcc/bb-reorder.c +++ b/gcc/bb-reorder.c @@ -229,6 +229,7 @@ find_traces (int *n_traces, struct trace *traces) int i; int number_of_rounds; edge e; + edge_iterator ei; fibheap_t heap; /* Add one extra round of trace collection when partitioning hot/cold @@ -243,7 +244,7 @@ find_traces (int *n_traces, struct trace *traces) heap = fibheap_new (); max_entry_frequency = 0; max_entry_count = 0; - FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs) + FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs) { bbd[e->dest->index].heap = heap; bbd[e->dest->index].node = fibheap_insert (heap, bb_to_key (e->dest), @@ -253,7 +254,6 @@ find_traces (int *n_traces, struct trace *traces) if (e->dest->count > max_entry_count) max_entry_count = e->dest->count; } - END_FOR_EACH_EDGE; /* Find the traces. */ for (i = 0; i < number_of_rounds; i++) @@ -312,8 +312,9 @@ rotate_loop (edge back_edge, struct trace *trace, int trace_n) do { edge e; + edge_iterator ei; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) if (e->dest != EXIT_BLOCK_PTR && e->dest->rbi->visited != trace_n && (e->flags & EDGE_CAN_FALLTHRU) @@ -361,7 +362,6 @@ rotate_loop (edge back_edge, struct trace *trace, int trace_n) } } } - END_FOR_EACH_EDGE; bb = bb->rbi->next; } while (bb != back_edge->dest); @@ -451,6 +451,7 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th, struct trace *trace; edge best_edge, e; fibheapkey_t key; + edge_iterator ei; bb = fibheap_extract_min (*heap); bbd[bb->index].heap = NULL; @@ -501,7 +502,7 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th, bb->index, *n_traces - 1); /* Select the successor that will be placed after BB. */ - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { gcc_assert (!(e->flags & EDGE_FAKE)); @@ -536,7 +537,6 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th, best_freq = freq; } } - END_FOR_EACH_EDGE; /* If the best destination has multiple predecessors, and can be duplicated cheaper than a jump, don't allow it to be added @@ -546,7 +546,7 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th, best_edge = NULL; /* Add all non-selected successors to the heaps. */ - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e == best_edge || e->dest == EXIT_BLOCK_PTR @@ -607,7 +607,6 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th, } } } - END_FOR_EACH_EDGE; if (best_edge) /* Suitable successor was found. */ { @@ -641,12 +640,11 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th, /* Check whether there is another edge from BB. */ edge another_edge; - FOR_EACH_EDGE (another_edge, bb->succs) + FOR_EACH_EDGE (another_edge, ei, bb->succs) { if (another_edge != best_edge) break; } - END_FOR_EACH_EDGE; if (!another_edge && copy_bb_p (best_edge->dest, !optimize_size)) @@ -683,7 +681,7 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th, */ - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e != best_edge && (e->flags & EDGE_CAN_FALLTHRU) @@ -704,7 +702,6 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th, break; } } - END_FOR_EACH_EDGE; bb->rbi->next = best_edge->dest; bb = best_edge->dest; @@ -719,7 +716,7 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th, /* The trace is terminated so we have to recount the keys in heap (some block can have a lower key because now one of its predecessors is an end of the trace). */ - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->dest == EXIT_BLOCK_PTR || e->dest->rbi->visited) @@ -743,7 +740,6 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th, } } } - END_FOR_EACH_EDGE; } fibheap_delete (*heap); @@ -809,7 +805,7 @@ static fibheapkey_t bb_to_key (basic_block bb) { edge e; - + edge_iterator ei; int priority = 0; /* Do not start in probably never executed blocks. */ @@ -820,7 +816,7 @@ bb_to_key (basic_block bb) /* Prefer blocks whose predecessor is an end of some trace or whose predecessor edge is EDGE_DFS_BACK. */ - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) { if ((e->src != ENTRY_BLOCK_PTR && bbd[e->src->index].end_of_trace >= 0) || (e->flags & EDGE_DFS_BACK)) @@ -831,7 +827,6 @@ bb_to_key (basic_block bb) priority = edge_freq; } } - END_FOR_EACH_EDGE; if (priority) /* The block with priority should have significantly lower key. */ @@ -978,9 +973,10 @@ connect_traces (int n_traces, struct trace *traces) /* Find the predecessor traces. */ for (t2 = t; t2 > 0;) { + edge_iterator ei; best = NULL; best_len = 0; - FOR_EACH_EDGE (e, traces[t2].first->preds) + FOR_EACH_EDGE (e, ei, traces[t2].first->preds) { int si = e->src->index; @@ -998,7 +994,6 @@ connect_traces (int n_traces, struct trace *traces) best_len = traces[bbd[si].end_of_trace].length; } } - END_FOR_EACH_EDGE; if (best) { @@ -1027,9 +1022,10 @@ connect_traces (int n_traces, struct trace *traces) while (1) { /* Find the continuation of the chain. */ + edge_iterator ei; best = NULL; best_len = 0; - FOR_EACH_EDGE (e, traces[t].last->succs) + FOR_EACH_EDGE (e, ei, traces[t].last->succs) { int di = e->dest->index; @@ -1047,7 +1043,6 @@ connect_traces (int n_traces, struct trace *traces) best_len = traces[bbd[di].start_of_trace].length; } } - END_FOR_EACH_EDGE; if (best) { @@ -1070,13 +1065,14 @@ connect_traces (int n_traces, struct trace *traces) basic_block next_bb = NULL; bool try_copy = false; - FOR_EACH_EDGE (e, traces[t].last->succs) + FOR_EACH_EDGE (e, ei, traces[t].last->succs) { if (e->dest != EXIT_BLOCK_PTR && (e->flags & EDGE_CAN_FALLTHRU) && !(e->flags & EDGE_COMPLEX) && (!best || e->probability > best->probability)) { + edge_iterator ei; edge best2 = NULL; int best2_len = 0; @@ -1092,7 +1088,7 @@ connect_traces (int n_traces, struct trace *traces) continue; } - FOR_EACH_EDGE (e2, e->dest->succs) + FOR_EACH_EDGE (e2, ei, e->dest->succs) { int di = e2->dest->index; @@ -1119,10 +1115,8 @@ connect_traces (int n_traces, struct trace *traces) try_copy = true; } } - END_FOR_EACH_EDGE; } } - END_FOR_EACH_EDGE; if (flag_reorder_blocks_and_partition) try_copy = false; @@ -1271,6 +1265,7 @@ find_rarely_executed_basic_blocks_and_crossing_edges (edge *crossing_edges, bool has_hot_blocks = false; edge e; int i; + edge_iterator ei; /* Mark which partition (hot/cold) each basic block belongs in. */ @@ -1290,13 +1285,12 @@ find_rarely_executed_basic_blocks_and_crossing_edges (edge *crossing_edges, the hot partition (if there is one). */ if (has_hot_blocks) - FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs) + FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs) if (e->dest->index >= 0) { BB_SET_PARTITION (e->dest, BB_HOT_PARTITION); break; } - END_FOR_EACH_EDGE; /* Mark every edge that crosses between sections. */ @@ -1304,7 +1298,7 @@ find_rarely_executed_basic_blocks_and_crossing_edges (edge *crossing_edges, if (targetm.have_named_sections) { FOR_EACH_BB (bb) - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->src != ENTRY_BLOCK_PTR && e->dest != EXIT_BLOCK_PTR @@ -1322,7 +1316,6 @@ find_rarely_executed_basic_blocks_and_crossing_edges (edge *crossing_edges, else e->flags &= ~EDGE_CROSSING; } - END_FOR_EACH_EDGE; } *n_crossing_edges = i; } @@ -1572,8 +1565,9 @@ find_jump_block (basic_block jump_dest) basic_block source_bb = NULL; edge e; rtx insn; + edge_iterator ei; - FOR_EACH_EDGE (e, jump_dest->preds) + FOR_EACH_EDGE (e, ei, jump_dest->preds) if (e->flags & EDGE_CROSSING) { basic_block src = e->src; @@ -1600,7 +1594,6 @@ find_jump_block (basic_block jump_dest) if (source_bb) break; } - END_FOR_EACH_EDGE; return source_bb; } @@ -1858,9 +1851,10 @@ add_reg_crossing_jump_notes (void) { basic_block bb; edge e; + edge_iterator ei; FOR_EACH_BB (bb) - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if ((e->flags & EDGE_CROSSING) && JUMP_P (BB_END (e->src))) @@ -1869,7 +1863,6 @@ add_reg_crossing_jump_notes (void) REG_NOTES (BB_END (e->src))); } - END_FOR_EACH_EDGE; } /* Basic blocks containing NOTE_INSN_UNLIKELY_EXECUTED_CODE will be diff --git a/gcc/bt-load.c b/gcc/bt-load.c index 47d0af6078e..567905eaa99 100644 --- a/gcc/bt-load.c +++ b/gcc/bt-load.c @@ -878,6 +878,7 @@ augment_live_range (bitmap live_range, HARD_REG_SET *btrs_live_in_range, else { edge e; + edge_iterator ei; int new_block = new_bb->index; gcc_assert (dominated_by_p (CDI_DOMINATORS, head_bb, new_bb)); @@ -900,11 +901,10 @@ augment_live_range (bitmap live_range, HARD_REG_SET *btrs_live_in_range, fprintf (dump_file, "\n"); } - FOR_EACH_EDGE (e, head_bb->preds) + FOR_EACH_EDGE (e, ei, head_bb->preds) { *tos++ = e->src; } - END_FOR_EACH_EDGE; } while (tos != worklist) @@ -913,6 +913,7 @@ augment_live_range (bitmap live_range, HARD_REG_SET *btrs_live_in_range, if (!bitmap_bit_p (live_range, bb->index)) { edge e; + edge_iterator ei; bitmap_set_bit (live_range, bb->index); IOR_HARD_REG_SET (*btrs_live_in_range, @@ -926,13 +927,12 @@ augment_live_range (bitmap live_range, HARD_REG_SET *btrs_live_in_range, fprintf (dump_file, "\n"); } - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) { basic_block pred = e->src; if (!bitmap_bit_p (live_range, pred->index)) *tos++ = pred; } - END_FOR_EACH_EDGE; } } diff --git a/gcc/cfg.c b/gcc/cfg.c index 36193f9fb59..32daba24088 100644 --- a/gcc/cfg.c +++ b/gcc/cfg.c @@ -144,19 +144,18 @@ clear_edges (void) { basic_block bb; edge e; + edge_iterator ei; FOR_EACH_BB (bb) { - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) free_edge (e); - END_FOR_EACH_EDGE; VEC_truncate (edge, bb->succs, 0); VEC_truncate (edge, bb->preds, 0); } - FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs) + FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs) free_edge (e); - END_FOR_EACH_EDGE; VEC_truncate (edge, EXIT_BLOCK_PTR->preds, 0); VEC_truncate (edge, ENTRY_BLOCK_PTR->succs, 0); @@ -285,6 +284,7 @@ cached_make_edge (sbitmap *edge_cache, basic_block src, basic_block dst, int fla { int use_edge_cache; edge e; + edge_iterator ei; /* Don't bother with edge cache for ENTRY or EXIT, if there aren't that many edges to them, or we didn't allocate memory for it. */ @@ -305,7 +305,7 @@ cached_make_edge (sbitmap *edge_cache, basic_block src, basic_block dst, int fla /* Fall through. */ case 0: - FOR_EACH_EDGE (e, src->succs) + FOR_EACH_EDGE (e, ei, src->succs) { if (e->dest == dst) { @@ -313,7 +313,6 @@ cached_make_edge (sbitmap *edge_cache, basic_block src, basic_block dst, int fla return NULL; } } - END_FOR_EACH_EDGE; break; } @@ -431,14 +430,14 @@ edge redirect_edge_succ_nodup (edge e, basic_block new_succ) { edge s; + edge_iterator ei; /* Check whether the edge is already present. */ - FOR_EACH_EDGE (s, e->src->succs) + FOR_EACH_EDGE (s, ei, e->src->succs) { if (s->dest == new_succ && s != e) break; } - END_FOR_EACH_EDGE; if (s) { @@ -507,22 +506,21 @@ check_bb_profile (basic_block bb, FILE * file) edge e; int sum = 0; gcov_type lsum; + edge_iterator ei; if (profile_status == PROFILE_ABSENT) return; if (bb != EXIT_BLOCK_PTR) { - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) sum += e->probability; - END_FOR_EACH_EDGE; if (EDGE_COUNT (bb->succs) && abs (sum - REG_BR_PROB_BASE) > 100) fprintf (file, "Invalid sum of outgoing probabilities %.1f%%\n", sum * 100.0 / REG_BR_PROB_BASE); lsum = 0; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) lsum += e->count; - END_FOR_EACH_EDGE; if (EDGE_COUNT (bb->succs) && (lsum - bb->count > 100 || lsum - bb->count < -100)) fprintf (file, "Invalid sum of outgoing counts %i, should be %i\n", @@ -531,17 +529,15 @@ check_bb_profile (basic_block bb, FILE * file) if (bb != ENTRY_BLOCK_PTR) { sum = 0; - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) sum += EDGE_FREQUENCY (e); - END_FOR_EACH_EDGE; if (abs (sum - bb->frequency) > 100) fprintf (file, "Invalid sum of incoming frequencies %i, should be %i\n", sum, bb->frequency); lsum = 0; - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) lsum += e->count; - END_FOR_EACH_EDGE; if (lsum - bb->count > 100 || lsum - bb->count < -100) fprintf (file, "Invalid sum of incoming counts %i, should be %i\n", (int) lsum, (int) bb->count); @@ -607,6 +603,7 @@ dump_flow_info (FILE *file) FOR_EACH_BB (bb) { edge e; + edge_iterator ei; fprintf (file, "\nBasic block %d ", bb->index); fprintf (file, "prev %d, next %d, ", @@ -621,14 +618,12 @@ dump_flow_info (FILE *file) fprintf (file, ".\n"); fprintf (file, "Predecessors: "); - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) dump_edge_info (file, e, 0); - END_FOR_EACH_EDGE; fprintf (file, "\nSuccessors: "); - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) dump_edge_info (file, e, 1); - END_FOR_EACH_EDGE; fprintf (file, "\nRegisters live at start:"); dump_regset (bb->global_live_at_start, file); @@ -820,10 +815,10 @@ alloc_aux_for_edges (int size) FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) { edge e; + edge_iterator ei; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) alloc_aux_for_edge (e, size); - END_FOR_EACH_EDGE; } } } @@ -838,9 +833,9 @@ clear_aux_for_edges (void) FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) { - FOR_EACH_EDGE (e, bb->succs) + edge_iterator ei; + FOR_EACH_EDGE (e, ei, bb->succs) e->aux = NULL; - END_FOR_EACH_EDGE; } } @@ -877,6 +872,7 @@ static void dump_cfg_bb_info (FILE *file, basic_block bb) { unsigned i; + edge_iterator ei; bool first = true; static const char * const bb_bitnames[] = { @@ -901,14 +897,12 @@ dump_cfg_bb_info (FILE *file, basic_block bb) fprintf (file, "\n"); fprintf (file, "Predecessors: "); - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) dump_edge_info (file, e, 0); - END_FOR_EACH_EDGE; fprintf (file, "\nSuccessors: "); - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) dump_edge_info (file, e, 1); - END_FOR_EACH_EDGE; fprintf (file, "\n\n"); } diff --git a/gcc/cfganal.c b/gcc/cfganal.c index f412dbf8290..2076de07568 100644 --- a/gcc/cfganal.c +++ b/gcc/cfganal.c @@ -105,17 +105,17 @@ can_fallthru (basic_block src, basic_block target) rtx insn = BB_END (src); rtx insn2; edge e; + edge_iterator ei; if (target == EXIT_BLOCK_PTR) return true; if (src->next_bb != target) return 0; - FOR_EACH_EDGE (e, src->succs) + FOR_EACH_EDGE (e, ei, src->succs) { if (e->dest == EXIT_BLOCK_PTR && e->flags & EDGE_FALLTHRU) return 0; } - END_FOR_EACH_EDGE; insn2 = BB_HEAD (target); if (insn2 && !active_insn_p (insn2)) @@ -132,15 +132,15 @@ bool could_fall_through (basic_block src, basic_block target) { edge e; + edge_iterator ei; if (target == EXIT_BLOCK_PTR) return true; - FOR_EACH_EDGE (e, src->succs) + FOR_EACH_EDGE (e, ei, src->succs) { if (e->dest == EXIT_BLOCK_PTR && e->flags & EDGE_FALLTHRU) return 0; } - END_FOR_EACH_EDGE; return true; } @@ -246,8 +246,9 @@ set_edge_can_fallthru_flag (void) FOR_EACH_BB (bb) { edge e; + edge_iterator ei; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { e->flags &= ~EDGE_CAN_FALLTHRU; @@ -255,7 +256,6 @@ set_edge_can_fallthru_flag (void) if (e->flags & EDGE_FALLTHRU) e->flags |= EDGE_CAN_FALLTHRU; } - END_FOR_EACH_EDGE; /* If the BB ends with an invertible condjump all (2) edges are CAN_FALLTHRU edges. */ @@ -279,6 +279,7 @@ void find_unreachable_blocks (void) { edge e; + edge_iterator ei; basic_block *tos, *worklist, bb; tos = worklist = xmalloc (sizeof (basic_block) * n_basic_blocks); @@ -292,14 +293,13 @@ find_unreachable_blocks (void) be only one. It isn't inconceivable that we might one day directly support Fortran alternate entry points. */ - FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs) + FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs) { *tos++ = e->dest; /* Mark the block reachable. */ e->dest->flags |= BB_REACHABLE; } - END_FOR_EACH_EDGE; /* Iterate: find everything reachable from what we've already seen. */ @@ -307,7 +307,7 @@ find_unreachable_blocks (void) { basic_block b = *--tos; - FOR_EACH_EDGE (e, b->succs) + FOR_EACH_EDGE (e, ei, b->succs) { if (!(e->dest->flags & BB_REACHABLE)) { @@ -315,7 +315,6 @@ find_unreachable_blocks (void) e->dest->flags |= BB_REACHABLE; } } - END_FOR_EACH_EDGE; } free (worklist); @@ -342,6 +341,7 @@ create_edge_list (void) int num_edges; int block_count; basic_block bb; + edge_iterator ei; block_count = n_basic_blocks + 2; /* Include the entry and exit blocks. */ @@ -364,11 +364,10 @@ create_edge_list (void) /* Follow successors of blocks, and register these edges. */ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) { - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { elist->index_to_edge[num_edges++] = e; } - END_FOR_EACH_EDGE; } return elist; } @@ -420,10 +419,11 @@ verify_edge_list (FILE *f, struct edge_list *elist) int pred, succ, index; edge e; basic_block bb, p, s; + edge_iterator ei; FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) { - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { pred = e->src->index; succ = e->dest->index; @@ -441,7 +441,6 @@ verify_edge_list (FILE *f, struct edge_list *elist) fprintf (f, "*p* Succ for index %d should be %d not %d\n", index, succ, INDEX_EDGE_SUCC_BB (elist, index)->index); } - END_FOR_EACH_EDGE; } /* We've verified that all the edges are in the list, now lets make sure @@ -452,7 +451,7 @@ verify_edge_list (FILE *f, struct edge_list *elist) { int found_edge = 0; - FOR_EACH_EDGE (e, p->succs) + FOR_EACH_EDGE (e, ei, p->succs) { if (e->dest == s) { @@ -460,9 +459,8 @@ verify_edge_list (FILE *f, struct edge_list *elist) break; } } - END_FOR_EACH_EDGE; - FOR_EACH_EDGE (e, s->preds) + FOR_EACH_EDGE (e, ei, s->preds) { if (e->src == p) { @@ -470,7 +468,6 @@ verify_edge_list (FILE *f, struct edge_list *elist) break; } } - END_FOR_EACH_EDGE; if (EDGE_INDEX (elist, p, s) == EDGE_INDEX_NO_EDGE && found_edge != 0) @@ -490,13 +487,13 @@ edge find_edge (basic_block pred, basic_block succ) { edge e; + edge_iterator ei; - FOR_EACH_EDGE (e, pred->succs) + FOR_EACH_EDGE (e, ei, pred->succs) { if (e->dest == succ) return e; } - END_FOR_EACH_EDGE; return NULL; } @@ -983,19 +980,19 @@ flow_dfs_compute_reverse_execute (depth_first_search_ds data) { basic_block bb; edge e; + edge_iterator ei; while (data->sp > 0) { bb = data->stack[--data->sp]; /* Perform depth-first search on adjacent vertices. */ - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) { if (!TEST_BIT (data->visited_blocks, e->src->index - (INVALID_BLOCK + 1))) flow_dfs_compute_reverse_add_bb (data, e->src); } - END_FOR_EACH_EDGE; } /* Determine if there are unvisited basic blocks. */ @@ -1033,10 +1030,11 @@ dfs_enumerate_from (basic_block bb, int reverse, while (sp) { edge e; + edge_iterator ei; lbb = st[--sp]; if (reverse) { - FOR_EACH_EDGE (e, lbb->preds) + FOR_EACH_EDGE (e, ei, lbb->preds) { if (!(e->src->flags & BB_VISITED) && predicate (e->src, data)) { @@ -1045,11 +1043,10 @@ dfs_enumerate_from (basic_block bb, int reverse, e->src->flags |= BB_VISITED; } } - END_FOR_EACH_EDGE; } else { - FOR_EACH_EDGE (e, lbb->succs) + FOR_EACH_EDGE (e, ei, lbb->succs) { if (!(e->dest->flags & BB_VISITED) && predicate (e->dest, data)) { @@ -1058,7 +1055,6 @@ dfs_enumerate_from (basic_block bb, int reverse, e->dest->flags |= BB_VISITED; } } - END_FOR_EACH_EDGE; } } free (st); @@ -1088,6 +1084,7 @@ static void compute_dominance_frontiers_1 (bitmap *frontiers, basic_block bb, sbitmap done) { edge e; + edge_iterator ei; basic_block c; SET_BIT (done, bb->index); @@ -1104,14 +1101,13 @@ compute_dominance_frontiers_1 (bitmap *frontiers, basic_block bb, sbitmap done) } /* Find blocks conforming to rule (1) above. */ - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->dest == EXIT_BLOCK_PTR) continue; if (get_immediate_dominator (CDI_DOMINATORS, e->dest) != bb) bitmap_set_bit (frontiers[bb->index], e->dest->index); } - END_FOR_EACH_EDGE; /* Find blocks conforming to rule (2). */ for (c = first_dom_son (CDI_DOMINATORS, bb); diff --git a/gcc/cfgbuild.c b/gcc/cfgbuild.c index 7e484ee39ec..69bdaf14477 100644 --- a/gcc/cfgbuild.c +++ b/gcc/cfgbuild.c @@ -251,13 +251,13 @@ make_edges (basic_block min, basic_block max, int update_p) FOR_BB_BETWEEN (bb, min, max->next_bb, next_bb) { edge e; + edge_iterator ei; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->dest != EXIT_BLOCK_PTR) SET_BIT (edge_cache[bb->index], e->dest->index); } - END_FOR_EACH_EDGE; } } @@ -273,6 +273,7 @@ make_edges (basic_block min, basic_block max, int update_p) enum rtx_code code; int force_fallthru = 0; edge e; + edge_iterator ei; if (LABEL_P (BB_HEAD (bb)) && LABEL_ALT_ENTRY_P (BB_HEAD (bb))) @@ -391,7 +392,7 @@ make_edges (basic_block min, basic_block max, int update_p) /* Find out if we can drop through to the next block. */ insn = NEXT_INSN (insn); - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->dest == EXIT_BLOCK_PTR && e->flags & EDGE_FALLTHRU) { @@ -399,7 +400,6 @@ make_edges (basic_block min, basic_block max, int update_p) break; } } - END_FOR_EACH_EDGE; while (insn && NOTE_P (insn) @@ -647,6 +647,7 @@ static void compute_outgoing_frequencies (basic_block b) { edge e, f; + edge_iterator ei; if (EDGE_COUNT (b->succs) == 2) { @@ -676,12 +677,11 @@ compute_outgoing_frequencies (basic_block b) } guess_outgoing_edge_probabilities (b); if (b->count) - FOR_EACH_EDGE (e, b->succs) + FOR_EACH_EDGE (e, ei, b->succs) { e->count = ((b->count * e->probability + REG_BR_PROB_BASE / 2) / REG_BR_PROB_BASE); } - END_FOR_EACH_EDGE; } /* Assume that someone emitted code with control flow instructions to the @@ -719,6 +719,7 @@ find_many_sub_basic_blocks (sbitmap blocks) FOR_BB_BETWEEN (bb, min, max->next_bb, next_bb) { edge e; + edge_iterator ei; if (STATE (bb) == BLOCK_ORIGINAL) continue; @@ -726,12 +727,11 @@ find_many_sub_basic_blocks (sbitmap blocks) { bb->count = 0; bb->frequency = 0; - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) { bb->count += e->count; bb->frequency += EDGE_FREQUENCY (e); } - END_FOR_EACH_EDGE; } compute_outgoing_frequencies (bb); @@ -762,17 +762,17 @@ find_sub_basic_blocks (basic_block bb) FOR_BB_BETWEEN (b, min, max->next_bb, next_bb) { edge e; + edge_iterator ei; if (b != min) { b->count = 0; b->frequency = 0; - FOR_EACH_EDGE (e, b->preds) + FOR_EACH_EDGE (e, ei, b->preds) { b->count += e->count; b->frequency += EDGE_FREQUENCY (e); } - END_FOR_EACH_EDGE; } compute_outgoing_frequencies (b); diff --git a/gcc/cfgcleanup.c b/gcc/cfgcleanup.c index 8b5ea9f3da5..4bd7bd36ecd 100644 --- a/gcc/cfgcleanup.c +++ b/gcc/cfgcleanup.c @@ -644,12 +644,12 @@ try_forward_edges (int mode, basic_block b) } else { - FOR_EACH_EDGE (e, first->succs) + edge_iterator ei; + FOR_EACH_EDGE (e, ei, first->succs) { e->probability = ((e->probability * REG_BR_PROB_BASE) / (double) prob); } - END_FOR_EACH_EDGE; } update_br_prob_note (first); } @@ -863,6 +863,7 @@ merge_blocks_move (edge e, basic_block b, basic_block c, int mode) edge tmp_edge, b_fallthru_edge; bool c_has_outgoing_fallthru; bool b_has_incoming_fallthru; + edge_iterator ei; /* Avoid overactive code motion, as the forwarder blocks should be eliminated by edge redirection instead. One exception might have @@ -875,21 +876,19 @@ merge_blocks_move (edge e, basic_block b, basic_block c, int mode) and loop notes. This is done by squeezing out all the notes and leaving them there to lie. Not ideal, but functional. */ - FOR_EACH_EDGE (tmp_edge, c->succs) + FOR_EACH_EDGE (tmp_edge, ei, c->succs) { if (tmp_edge->flags & EDGE_FALLTHRU) break; } - END_FOR_EACH_EDGE; c_has_outgoing_fallthru = (tmp_edge != NULL); - FOR_EACH_EDGE (tmp_edge, b->preds) + FOR_EACH_EDGE (tmp_edge, ei, b->preds) { if (tmp_edge->flags & EDGE_FALLTHRU) break; } - END_FOR_EACH_EDGE; b_has_incoming_fallthru = (tmp_edge != NULL); b_fallthru_edge = tmp_edge; @@ -1246,6 +1245,7 @@ outgoing_edges_match (int mode, basic_block bb1, basic_block bb2) int nehedges1 = 0, nehedges2 = 0; edge fallthru1 = 0, fallthru2 = 0; edge e1, e2; + edge_iterator ei; /* If BB1 has only one successor, we may be looking at either an unconditional jump, or a fake edge to exit. */ @@ -1456,10 +1456,10 @@ outgoing_edges_match (int mode, basic_block bb1, basic_block bb2) if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs)) return false; - FOR_EACH_EDGE (e1, bb1->succs) + FOR_EACH_EDGE (e1, ei, bb1->succs) { /* FIXME: Don't use private iterator. */ - e2 = EDGE_SUCC (bb2, __ix); + e2 = EDGE_SUCC (bb2, ei.index); if (e1->flags & EDGE_EH) nehedges1++; @@ -1472,7 +1472,6 @@ outgoing_edges_match (int mode, basic_block bb1, basic_block bb2) if (e2->flags & EDGE_FALLTHRU) fallthru2 = e2; } - END_FOR_EACH_EDGE; /* If number of edges of various types does not match, fail. */ if (nehedges1 != nehedges2 @@ -1520,6 +1519,7 @@ try_crossjump_to_edge (int mode, edge e1, edge e2) basic_block redirect_to, redirect_from, to_remove; rtx newpos1, newpos2; edge s; + edge_iterator ei; newpos1 = newpos2 = NULL_RTX; @@ -1637,15 +1637,16 @@ try_crossjump_to_edge (int mode, edge e1, edge e2) redirect_to->flags |= BB_DIRTY; /* Recompute the frequencies and counts of outgoing edges. */ - FOR_EACH_EDGE (s, redirect_to->succs) + FOR_EACH_EDGE (s, ei, redirect_to->succs) { edge s2; + edge_iterator ei; basic_block d = s->dest; if (FORWARDER_BLOCK_P (d)) d = EDGE_SUCC (d, 0)->dest; - FOR_EACH_EDGE (s2, src1->succs) + FOR_EACH_EDGE (s2, ei, src1->succs) { basic_block d2 = s2->dest; if (FORWARDER_BLOCK_P (d2)) @@ -1653,7 +1654,6 @@ try_crossjump_to_edge (int mode, edge e1, edge e2) if (d == d2) break; } - END_FOR_EACH_EDGE; s->count += s2->count; @@ -1688,7 +1688,6 @@ try_crossjump_to_edge (int mode, edge e1, edge e2) s2->probability * src1->frequency) / (redirect_to->frequency + src1->frequency)); } - END_FOR_EACH_EDGE; update_br_prob_note (redirect_to); @@ -1723,6 +1722,7 @@ try_crossjump_bb (int mode, basic_block bb) bool changed; unsigned max, ix, ix2; basic_block ev, ev2; + edge_iterator ei; /* Nothing to do if there is not at least two incoming edges. */ if (EDGE_COUNT (bb->preds) < 2) @@ -1752,12 +1752,11 @@ try_crossjump_bb (int mode, basic_block bb) if (EDGE_COUNT (bb->preds) > max) return false; - FOR_EACH_EDGE (e, bb->preds); + FOR_EACH_EDGE (e, ei, bb->preds) { if (e->flags & EDGE_FALLTHRU) fallthru = e; } - END_FOR_EACH_EDGE; changed = false; for (ix = 0, ev = bb; ix < EDGE_COUNT (ev->preds); ) diff --git a/gcc/cfgexpand.c b/gcc/cfgexpand.c index 1a2f30549f9..90e666fd8ef 100644 --- a/gcc/cfgexpand.c +++ b/gcc/cfgexpand.c @@ -1084,13 +1084,13 @@ construct_init_block (void) { basic_block init_block, first_block; edge e; + edge_iterator ei; - FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs) + FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs) { if (e->dest == ENTRY_BLOCK_PTR->next_bb) break; } - END_FOR_EACH_EDGE; init_block = create_basic_block (NEXT_INSN (get_insns ()), get_last_insn (), @@ -1123,6 +1123,7 @@ construct_exit_block (void) basic_block exit_block; edge e, e2; unsigned ix; + edge_iterator ei; /* Make sure the locus is set to the end of the function, so that epilogue line numbers and warnings are set properly. */ @@ -1163,7 +1164,7 @@ construct_exit_block (void) e->probability = REG_BR_PROB_BASE; e->count = EXIT_BLOCK_PTR->count; - FOR_EACH_EDGE (e2, EXIT_BLOCK_PTR->preds) + FOR_EACH_EDGE (e2, ei, EXIT_BLOCK_PTR->preds) { if (e2 != e) { @@ -1172,7 +1173,6 @@ construct_exit_block (void) exit_block->frequency -= EDGE_FREQUENCY (e2); } } - END_FOR_EACH_EDGE; if (e->count < 0) e->count = 0; diff --git a/gcc/cfghooks.c b/gcc/cfghooks.c index fc0c628adc6..22d3403e29c 100644 --- a/gcc/cfghooks.c +++ b/gcc/cfghooks.c @@ -106,6 +106,7 @@ verify_flow_info (void) { int n_fallthru = 0; edge e; + edge_iterator ei; if (bb->count < 0) { @@ -119,7 +120,7 @@ verify_flow_info (void) bb->index, bb->frequency); err = 1; } - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (last_visited [e->dest->index + 2] == bb) { @@ -159,7 +160,6 @@ verify_flow_info (void) edge_checksum[e->dest->index + 2] += (size_t) e; } - END_FOR_EACH_EDGE; if (n_fallthru > 1) { @@ -167,7 +167,7 @@ verify_flow_info (void) err = 1; } - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) { if (e->dest != bb) { @@ -181,24 +181,22 @@ verify_flow_info (void) } edge_checksum[e->dest->index + 2] -= (size_t) e; } - END_FOR_EACH_EDGE; } /* Complete edge checksumming for ENTRY and EXIT. */ { edge e; + edge_iterator ei; - FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs) + FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs) { edge_checksum[e->dest->index + 2] += (size_t) e; } - END_FOR_EACH_EDGE; - FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds) + FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds) { edge_checksum[e->dest->index + 2] -= (size_t) e; } - END_FOR_EACH_EDGE; } FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) @@ -230,6 +228,7 @@ void dump_bb (basic_block bb, FILE *outf, int indent) { edge e; + edge_iterator ei; char *s_indent; s_indent = alloca ((size_t) indent + 1); @@ -254,19 +253,17 @@ dump_bb (basic_block bb, FILE *outf, int indent) putc ('\n', outf); fprintf (outf, ";;%s pred: ", s_indent); - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) { dump_edge_info (outf, e, 0); } - END_FOR_EACH_EDGE; putc ('\n', outf); fprintf (outf, ";;%s succ: ", s_indent); - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { dump_edge_info (outf, e, 1); } - END_FOR_EACH_EDGE; putc ('\n', outf); if (cfg_hooks->dump_bb) @@ -428,7 +425,8 @@ split_edge (edge e) if (get_immediate_dominator (CDI_DOMINATORS, EDGE_SUCC (ret, 0)->dest) == EDGE_PRED (ret, 0)->src) { - FOR_EACH_EDGE (f, EDGE_SUCC (ret, 0)->dest->preds) + edge_iterator ei; + FOR_EACH_EDGE (f, ei, EDGE_SUCC (ret, 0)->dest->preds) { if (f == EDGE_SUCC (ret, 0)) continue; @@ -437,7 +435,6 @@ split_edge (edge e) EDGE_SUCC (ret, 0)->dest)) break; } - END_FOR_EACH_EDGE; if (!f) set_immediate_dominator (CDI_DOMINATORS, EDGE_SUCC (ret, 0)->dest, ret); @@ -516,6 +513,7 @@ void merge_blocks (basic_block a, basic_block b) { edge e; + edge_iterator ei; if (!cfg_hooks->merge_blocks) internal_error ("%s does not support merge_blocks.", cfg_hooks->name); @@ -531,11 +529,10 @@ merge_blocks (basic_block a, basic_block b) remove_edge (EDGE_SUCC (a, 0)); /* Adjust the edges out of B for the new owner. */ - FOR_EACH_EDGE (e, b->succs) + FOR_EACH_EDGE (e, ei, b->succs) { e->src = a; } - END_FOR_EACH_EDGE; a->succs = b->succs; a->flags |= b->flags; @@ -670,6 +667,7 @@ bool can_duplicate_block_p (basic_block bb) { edge e; + edge_iterator ei; if (!cfg_hooks->can_duplicate_block_p) internal_error ("%s does not support can_duplicate_block_p.", @@ -680,12 +678,11 @@ can_duplicate_block_p (basic_block bb) /* Duplicating fallthru block to exit would require adding a jump and splitting the real last BB. */ - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->dest == EXIT_BLOCK_PTR && e->flags & EDGE_FALLTHRU) return false; } - END_FOR_EACH_EDGE; return cfg_hooks->can_duplicate_block_p (bb); } @@ -699,6 +696,7 @@ duplicate_block (basic_block bb, edge e) edge s, n; basic_block new_bb; gcov_type new_count = e ? e->count : 0; + edge_iterator ei; if (!cfg_hooks->duplicate_block) internal_error ("%s does not support duplicate_block.", @@ -715,7 +713,7 @@ duplicate_block (basic_block bb, edge e) new_bb->loop_depth = bb->loop_depth; new_bb->flags = bb->flags; - FOR_EACH_EDGE (s, bb->succs) + FOR_EACH_EDGE (s, ei, bb->succs) { /* Since we are creating edges from a new block to successors of another block (which therefore are known to be disjoint), there @@ -732,7 +730,6 @@ duplicate_block (basic_block bb, edge e) n->count = s->count; n->aux = s->aux; } - END_FOR_EACH_EDGE; if (e) { diff --git a/gcc/cfglayout.c b/gcc/cfglayout.c index d19c6c6c35f..483cfaf22c4 100644 --- a/gcc/cfglayout.c +++ b/gcc/cfglayout.c @@ -632,6 +632,7 @@ fixup_reorder_chain (void) rtx bb_end_insn; basic_block nb; basic_block old_bb; + edge_iterator ei; if (EDGE_COUNT (bb->succs) == 0) continue; @@ -640,14 +641,13 @@ fixup_reorder_chain (void) a taken jump. */ e_taken = e_fall = NULL; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->flags & EDGE_FALLTHRU) e_fall = e; else if (! (e->flags & EDGE_EH)) e_taken = e; } - END_FOR_EACH_EDGE; bb_end_insn = BB_END (bb); if (JUMP_P (bb_end_insn)) @@ -864,13 +864,13 @@ fixup_reorder_chain (void) FOR_EACH_BB (bb) { edge e; + edge_iterator ei; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->flags & EDGE_FALLTHRU) break; } - END_FOR_EACH_EDGE; if (e && !can_fallthru (e->src, e->dest)) force_nonfallthru (e); @@ -926,6 +926,7 @@ static void fixup_fallthru_exit_predecessor (void) { edge e; + edge_iterator ei; basic_block bb = NULL; /* This transformation is not valid before reload, because we might @@ -933,12 +934,11 @@ fixup_fallthru_exit_predecessor (void) value. */ gcc_assert (reload_completed); - FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds) + FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds) { if (e->flags & EDGE_FALLTHRU) bb = e->src; } - END_FOR_EACH_EDGE; if (bb && bb->rbi->next) { @@ -1238,8 +1238,8 @@ can_copy_bbs_p (basic_block *bbs, unsigned n) for (i = 0; i < n; i++) { /* In case we should redirect abnormal edge during duplication, fail. */ - - FOR_EACH_EDGE (e, bbs[i]->succs) + edge_iterator ei; + FOR_EACH_EDGE (e, ei, bbs[i]->succs) { if ((e->flags & EDGE_ABNORMAL) && e->dest->rbi->duplicated) @@ -1248,7 +1248,6 @@ can_copy_bbs_p (basic_block *bbs, unsigned n) goto end; } } - END_FOR_EACH_EDGE; if (!can_duplicate_block_p (bbs[i])) { @@ -1324,10 +1323,11 @@ copy_bbs (basic_block *bbs, unsigned n, basic_block *new_bbs, new_edges[j] = NULL; for (i = 0; i < n; i++) { + edge_iterator ei; new_bb = new_bbs[i]; bb = bbs[i]; - FOR_EACH_EDGE (e, new_bb->succs) + FOR_EACH_EDGE (e, ei, new_bb->succs) { for (j = 0; j < n_edges; j++) if (edges[j] && edges[j]->src == bb && edges[j]->dest == e->dest) @@ -1337,7 +1337,6 @@ copy_bbs (basic_block *bbs, unsigned n, basic_block *new_bbs, continue; redirect_edge_and_branch_force (e, e->dest->rbi->copy); } - END_FOR_EACH_EDGE; } /* Clear information about duplicates. */ diff --git a/gcc/cfgloop.c b/gcc/cfgloop.c index 81989373676..99120d5106e 100644 --- a/gcc/cfgloop.c +++ b/gcc/cfgloop.c @@ -64,13 +64,13 @@ flow_loops_cfg_dump (const struct loops *loops, FILE *file) FOR_EACH_BB (bb) { edge succ; + edge_iterator ei; fprintf (file, ";; %d succs { ", bb->index); - FOR_EACH_EDGE (succ, bb->succs) + FOR_EACH_EDGE (succ, ei, bb->succs) { fprintf (file, "%d ", succ->dest->index); } - END_FOR_EACH_EDGE; fprintf (file, "}\n"); } @@ -245,27 +245,26 @@ static void flow_loop_entry_edges_find (struct loop *loop) { edge e; + edge_iterator ei; int num_entries; num_entries = 0; - FOR_EACH_EDGE (e, loop->header->preds) + FOR_EACH_EDGE (e, ei, loop->header->preds) { if (flow_loop_outside_edge_p (loop, e)) num_entries++; } - END_FOR_EACH_EDGE; gcc_assert (num_entries); loop->entry_edges = xmalloc (num_entries * sizeof (edge *)); num_entries = 0; - FOR_EACH_EDGE (e, loop->header->preds) + FOR_EACH_EDGE (e, ei, loop->header->preds) { if (flow_loop_outside_edge_p (loop, e)) loop->entry_edges[num_entries++] = e; } - END_FOR_EACH_EDGE; loop->num_entries = num_entries; } @@ -289,16 +288,16 @@ flow_loop_exit_edges_find (struct loop *loop) bbs = get_loop_body (loop); for (i = 0; i < loop->num_nodes; i++) { + edge_iterator ei; node = bbs[i]; - FOR_EACH_EDGE (e, node->succs) + FOR_EACH_EDGE (e, ei, node->succs) { basic_block dest = e->dest; if (!flow_bb_inside_loop_p (loop, dest)) num_exits++; } - END_FOR_EACH_EDGE; } if (! num_exits) @@ -313,8 +312,9 @@ flow_loop_exit_edges_find (struct loop *loop) num_exits = 0; for (i = 0; i < loop->num_nodes; i++) { + edge_iterator ei; node = bbs[i]; - FOR_EACH_EDGE (e, node->succs) + FOR_EACH_EDGE (e, ei, node->succs) { basic_block dest = e->dest; @@ -324,7 +324,6 @@ flow_loop_exit_edges_find (struct loop *loop) loop->exit_edges[num_exits++] = e; } } - END_FOR_EACH_EDGE; } free (bbs); loop->num_exits = num_exits; @@ -356,10 +355,11 @@ flow_loop_nodes_find (basic_block header, struct loop *loop) { basic_block node; edge e; + edge_iterator ei; node = stack[--sp]; - FOR_EACH_EDGE (e, node->preds) + FOR_EACH_EDGE (e, ei, node->preds) { basic_block ancestor = e->src; @@ -372,7 +372,6 @@ flow_loop_nodes_find (basic_block header, struct loop *loop) stack[sp++] = ancestor; } } - END_FOR_EACH_EDGE; } free (stack); } @@ -399,9 +398,10 @@ mark_single_exit_loops (struct loops *loops) FOR_EACH_BB (bb) { + edge_iterator ei; if (bb->loop_father == loops->tree_root) continue; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->dest == EXIT_BLOCK_PTR) continue; @@ -421,7 +421,6 @@ mark_single_exit_loops (struct loops *loops) loop->single_exit = e; } } - END_FOR_EACH_EDGE; } for (i = 1; i < loops->num; i++) @@ -481,11 +480,12 @@ flow_loop_pre_header_find (basic_block header) { basic_block pre_header; edge e; + edge_iterator ei; /* If block p is a predecessor of the header and is the only block that the header does not dominate, then it is the pre-header. */ pre_header = NULL; - FOR_EACH_EDGE (e, header->preds) + FOR_EACH_EDGE (e, ei, header->preds) { basic_block node = e->src; @@ -503,7 +503,6 @@ flow_loop_pre_header_find (basic_block header) } } } - END_FOR_EACH_EDGE; return pre_header; } @@ -682,10 +681,11 @@ canonicalize_loop_headers (void) /* Split blocks so that each loop has only single latch. */ FOR_EACH_BB (header) { + edge_iterator ei; int num_latches = 0; int have_abnormal_edge = 0; - FOR_EACH_EDGE (e, header->preds) + FOR_EACH_EDGE (e, ei, header->preds) { basic_block latch = e->src; @@ -699,7 +699,6 @@ canonicalize_loop_headers (void) LATCH_EDGE (e) = 1; } } - END_FOR_EACH_EDGE; if (have_abnormal_edge) HEADER_BLOCK (header) = 0; @@ -725,6 +724,7 @@ canonicalize_loop_headers (void) { int max_freq, is_heavy; edge heavy, tmp_edge; + edge_iterator ei; if (HEADER_BLOCK (header) <= 1) continue; @@ -734,15 +734,14 @@ canonicalize_loop_headers (void) heavy = NULL; max_freq = 0; - FOR_EACH_EDGE (e, header->preds) + FOR_EACH_EDGE (e, ei, header->preds) { if (LATCH_EDGE (e) && EDGE_FREQUENCY (e) > max_freq) max_freq = EDGE_FREQUENCY (e); } - END_FOR_EACH_EDGE; - FOR_EACH_EDGE (e, header->preds) + FOR_EACH_EDGE (e, ei, header->preds) { if (LATCH_EDGE (e) && EDGE_FREQUENCY (e) >= max_freq / HEAVY_EDGE_RATIO) @@ -756,7 +755,6 @@ canonicalize_loop_headers (void) heavy = e; } } - END_FOR_EACH_EDGE; if (is_heavy) { @@ -839,23 +837,23 @@ flow_loops_find (struct loops *loops, int flags) num_loops = 0; FOR_EACH_BB (header) { + edge_iterator ei; int more_latches = 0; header->loop_depth = 0; /* If we have an abnormal predecessor, do not consider the loop (not worth the problems). */ - FOR_EACH_EDGE (e, header->preds) + FOR_EACH_EDGE (e, ei, header->preds) { if (e->flags & EDGE_ABNORMAL) break; } - END_FOR_EACH_EDGE; if (e) continue; - FOR_EACH_EDGE (e, header->preds) + FOR_EACH_EDGE (e, ei, header->preds) { basic_block latch = e->src; @@ -876,7 +874,6 @@ flow_loops_find (struct loops *loops, int flags) num_loops++; } } - END_FOR_EACH_EDGE; } /* Allocate loop structures. */ @@ -920,6 +917,7 @@ flow_loops_find (struct loops *loops, int flags) for (b = 0; b < n_basic_blocks; b++) { struct loop *loop; + edge_iterator ei; /* Search the nodes of the CFG in reverse completion order so that we can find outer loops first. */ @@ -935,7 +933,7 @@ flow_loops_find (struct loops *loops, int flags) num_loops++; /* Look for the latch for this header block. */ - FOR_EACH_EDGE (e, header->preds) + FOR_EACH_EDGE (e, ei, header->preds) { basic_block latch = e->src; @@ -946,7 +944,6 @@ flow_loops_find (struct loops *loops, int flags) break; } } - END_FOR_EACH_EDGE; flow_loop_tree_node_add (header->loop_father, loop); loop->num_nodes = flow_loop_nodes_find (loop->header, loop); @@ -1124,6 +1121,7 @@ get_loop_body_in_bfs_order (const struct loop *loop) while (i < loop->num_nodes) { edge e; + edge_iterator ei; if (!bitmap_bit_p (visited, bb->index)) { @@ -1132,7 +1130,7 @@ get_loop_body_in_bfs_order (const struct loop *loop) blocks[i++] = bb; } - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (flow_bb_inside_loop_p (loop, e->dest)) { @@ -1143,7 +1141,6 @@ get_loop_body_in_bfs_order (const struct loop *loop) } } } - END_FOR_EACH_EDGE; gcc_assert (i >= vc); @@ -1161,28 +1158,27 @@ get_loop_exit_edges (const struct loop *loop, unsigned int *n_edges) edge *edges, e; unsigned i, n; basic_block * body; + edge_iterator ei; gcc_assert (loop->latch != EXIT_BLOCK_PTR); body = get_loop_body (loop); n = 0; for (i = 0; i < loop->num_nodes; i++) - FOR_EACH_EDGE (e, body[i]->succs) + FOR_EACH_EDGE (e, ei, body[i]->succs) { if (!flow_bb_inside_loop_p (loop, e->dest)) n++; } - END_FOR_EACH_EDGE; edges = xmalloc (n * sizeof (edge)); *n_edges = n; n = 0; for (i = 0; i < loop->num_nodes; i++) - FOR_EACH_EDGE (e, body[i]->succs) + FOR_EACH_EDGE (e, ei, body[i]->succs) { if (!flow_bb_inside_loop_p (loop, e->dest)) edges[n++] = e; } - END_FOR_EACH_EDGE; free (body); return edges; @@ -1395,16 +1391,16 @@ verify_loop_structure (struct loops *loops) irreds = sbitmap_alloc (last_basic_block); FOR_EACH_BB (bb) { + edge_iterator ei; if (bb->flags & BB_IRREDUCIBLE_LOOP) SET_BIT (irreds, bb->index); else RESET_BIT (irreds, bb->index); - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->flags & EDGE_IRREDUCIBLE_LOOP) e->flags |= EDGE_ALL_FLAGS + 1; } - END_FOR_EACH_EDGE; } /* Recount it. */ @@ -1413,6 +1409,8 @@ verify_loop_structure (struct loops *loops) /* Compare. */ FOR_EACH_BB (bb) { + edge_iterator ei; + if ((bb->flags & BB_IRREDUCIBLE_LOOP) && !TEST_BIT (irreds, bb->index)) { @@ -1425,7 +1423,7 @@ verify_loop_structure (struct loops *loops) error ("Basic block %d should not be marked irreducible.", bb->index); err = 1; } - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if ((e->flags & EDGE_IRREDUCIBLE_LOOP) && !(e->flags & (EDGE_ALL_FLAGS + 1))) @@ -1443,7 +1441,6 @@ verify_loop_structure (struct loops *loops) } e->flags &= ~(EDGE_ALL_FLAGS + 1); } - END_FOR_EACH_EDGE; } free (irreds); } @@ -1454,9 +1451,10 @@ verify_loop_structure (struct loops *loops) memset (sizes, 0, sizeof (unsigned) * loops->num); FOR_EACH_BB (bb) { + edge_iterator ei; if (bb->loop_father == loops->tree_root) continue; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->dest == EXIT_BLOCK_PTR) continue; @@ -1482,7 +1480,6 @@ verify_loop_structure (struct loops *loops) } } } - END_FOR_EACH_EDGE; } for (i = 1; i < loops->num; i++) @@ -1520,13 +1517,13 @@ edge loop_latch_edge (const struct loop *loop) { edge e; + edge_iterator ei; - FOR_EACH_EDGE (e, loop->header->preds) + FOR_EACH_EDGE (e, ei, loop->header->preds) { if (e->src == loop->latch) break; } - END_FOR_EACH_EDGE; return e; } @@ -1536,13 +1533,13 @@ edge loop_preheader_edge (const struct loop *loop) { edge e; + edge_iterator ei; - FOR_EACH_EDGE (e, loop->header->preds) + FOR_EACH_EDGE (e, ei, loop->header->preds) { if (e->src != loop->latch) break; } - END_FOR_EACH_EDGE; return e; } diff --git a/gcc/cfgloopanal.c b/gcc/cfgloopanal.c index ab8e6dfac55..ff2950d2864 100644 --- a/gcc/cfgloopanal.c +++ b/gcc/cfgloopanal.c @@ -267,6 +267,7 @@ mark_irreducible_loops (struct loops *loops) { basic_block act; edge e; + edge_iterator ei; int i, src, dest; struct graph *g; int *queue1 = xmalloc ((last_basic_block + loops->num) * sizeof (int)); @@ -278,18 +279,17 @@ mark_irreducible_loops (struct loops *loops) FOR_BB_BETWEEN (act, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) { act->flags &= ~BB_IRREDUCIBLE_LOOP; - FOR_EACH_EDGE (e, act->succs) + FOR_EACH_EDGE (e, ei, act->succs) { e->flags &= ~EDGE_IRREDUCIBLE_LOOP; } - END_FOR_EACH_EDGE; } /* Create the edge lists. */ g = new_graph (last_basic_block + loops->num); FOR_BB_BETWEEN (act, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) - FOR_EACH_EDGE (e, act->succs) + FOR_EACH_EDGE (e, ei, act->succs) { /* Ignore edges to exit. */ if (e->dest == EXIT_BLOCK_PTR) @@ -328,7 +328,6 @@ mark_irreducible_loops (struct loops *loops) add_edge (g, src, dest, e); } - END_FOR_EACH_EDGE; /* Find the strongly connected components. Use the algorithm of Tarjan -- first determine the postorder dfs numbering in reversed graph, then @@ -419,6 +418,7 @@ unsigned expected_loop_iterations (const struct loop *loop) { edge e; + edge_iterator ei; if (loop->header->count) { @@ -427,14 +427,13 @@ expected_loop_iterations (const struct loop *loop) count_in = 0; count_latch = 0; - FOR_EACH_EDGE (e, loop->header->preds) + FOR_EACH_EDGE (e, ei, loop->header->preds) { if (e->src == loop->latch) count_latch = e->count; else count_in += e->count; } - END_FOR_EACH_EDGE; if (count_in == 0) expected = count_latch * 2; @@ -451,14 +450,13 @@ expected_loop_iterations (const struct loop *loop) freq_in = 0; freq_latch = 0; - FOR_EACH_EDGE (e, loop->header->preds) + FOR_EACH_EDGE (e, ei, loop->header->preds) { if (e->src == loop->latch) freq_latch = EDGE_FREQUENCY (e); else freq_in += EDGE_FREQUENCY (e); } - END_FOR_EACH_EDGE; if (freq_in == 0) return freq_latch * 2; diff --git a/gcc/cfgloopmanip.c b/gcc/cfgloopmanip.c index 5d004f3bf91..d1bd66af617 100644 --- a/gcc/cfgloopmanip.c +++ b/gcc/cfgloopmanip.c @@ -117,9 +117,10 @@ static bool fix_bb_placement (struct loops *loops, basic_block bb) { edge e; + edge_iterator ei; struct loop *loop = loops->tree_root, *act; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->dest == EXIT_BLOCK_PTR) continue; @@ -131,7 +132,6 @@ fix_bb_placement (struct loops *loops, basic_block bb) if (flow_loop_nested_p (loop, act)) loop = act; } - END_FOR_EACH_EDGE; if (loop == bb->loop_father) return false; @@ -183,6 +183,7 @@ fix_bb_placements (struct loops *loops, basic_block from) while (qbeg != qend) { + edge_iterator ei; from = *qbeg; qbeg++; if (qbeg == qtop) @@ -203,7 +204,7 @@ fix_bb_placements (struct loops *loops, basic_block from) } /* Something has changed, insert predecessors into queue. */ - FOR_EACH_EDGE (e, from->preds) + FOR_EACH_EDGE (e, ei, from->preds) { basic_block pred = e->src; struct loop *nca; @@ -235,7 +236,6 @@ fix_bb_placements (struct loops *loops, basic_block from) qend = queue; SET_BIT (in_queue, pred->index); } - END_FOR_EACH_EDGE; } free (in_queue); free (queue); @@ -266,15 +266,15 @@ fix_irreducible_loops (basic_block from) while (stack_top) { + edge_iterator ei; bb = stack[--stack_top]; RESET_BIT (on_stack, bb->index); - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) { if (e->flags & EDGE_IRREDUCIBLE_LOOP) break; } - END_FOR_EACH_EDGE; if (e) continue; @@ -285,12 +285,11 @@ fix_irreducible_loops (basic_block from) { n_edges = EDGE_COUNT (bb->succs); edges = xmalloc (n_edges * sizeof (edge)); - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { /* FIXME: Don't use private iterator. */ - edges[__ix] = e; + edges[ei.index] = e; } - END_FOR_EACH_EDGE; } for (i = 0; i < n_edges; i++) @@ -362,8 +361,9 @@ remove_path (struct loops *loops, edge e) SET_BIT (seen, rem_bbs[i]->index); for (i = 0; i < nrem; i++) { + edge_iterator ei; bb = rem_bbs[i]; - FOR_EACH_EDGE (ae, rem_bbs[i]->succs) + FOR_EACH_EDGE (ae, ei, rem_bbs[i]->succs) { if (ae->dest != EXIT_BLOCK_PTR && !TEST_BIT (seen, ae->dest->index)) { @@ -371,7 +371,6 @@ remove_path (struct loops *loops, edge e) bord_bbs[n_bord_bbs++] = ae->dest; } } - END_FOR_EACH_EDGE; } /* Remove the path. */ @@ -468,13 +467,13 @@ scale_bbs_frequencies (basic_block *bbs, int nbbs, int num, int den) for (i = 0; i < nbbs; i++) { + edge_iterator ei; bbs[i]->frequency = (bbs[i]->frequency * num) / den; bbs[i]->count = RDIV (bbs[i]->count * num, den); - FOR_EACH_EDGE (e, bbs[i]->succs) + FOR_EACH_EDGE (e, ei, bbs[i]->succs) { e->count = (e->count * num) /den; } - END_FOR_EACH_EDGE; } } @@ -512,6 +511,7 @@ loopify (struct loops *loops, edge latch_edge, edge header_edge, int freq, prob, tot_prob; gcov_type cnt; edge e; + edge_iterator ei; loop->header = header_edge->dest; loop->latch = latch_edge->src; @@ -546,11 +546,10 @@ loopify (struct loops *loops, edge latch_edge, edge header_edge, /* Fix frequencies. */ switch_bb->frequency = freq; switch_bb->count = cnt; - FOR_EACH_EDGE (e, switch_bb->succs) + FOR_EACH_EDGE (e, ei, switch_bb->succs) { e->count = (switch_bb->count * e->probability) / REG_BR_PROB_BASE; } - END_FOR_EACH_EDGE; scale_loop_frequencies (loop, prob, tot_prob); scale_loop_frequencies (succ_bb->loop_father, tot_prob - prob, tot_prob); @@ -654,11 +653,12 @@ fix_loop_placement (struct loop *loop) basic_block *body; unsigned i; edge e; + edge_iterator ei; struct loop *father = loop->pred[0], *act; body = get_loop_body (loop); for (i = 0; i < loop->num_nodes; i++) - FOR_EACH_EDGE (e, body[i]->succs) + FOR_EACH_EDGE (e, ei, body[i]->succs) { if (!flow_bb_inside_loop_p (loop, e->dest)) { @@ -667,7 +667,6 @@ fix_loop_placement (struct loop *loop) father = act; } } - END_FOR_EACH_EDGE; free (body); if (father != loop->outer) @@ -1016,18 +1015,18 @@ duplicate_loop_to_header_edge (struct loop *loop, edge e, struct loops *loops, new_bbs[i]->rbi->duplicated = 1; for (i = 0; i < n; i++) { + edge_iterator ei; new_bb = new_bbs[i]; if (new_bb->loop_father == target) new_bb->flags |= BB_IRREDUCIBLE_LOOP; - FOR_EACH_EDGE (ae, new_bb->succs) + FOR_EACH_EDGE (ae, ei, new_bb->succs) { if (ae->dest->rbi->duplicated && (ae->src->loop_father == target || ae->dest->loop_father == target)) ae->flags |= EDGE_IRREDUCIBLE_LOOP; } - END_FOR_EACH_EDGE; } for (i = 0; i < n; i++) new_bbs[i]->rbi->duplicated = 0; @@ -1148,27 +1147,26 @@ create_preheader (struct loop *loop, int flags) struct loop *cloop, *ploop; int nentry = 0; bool irred = false; + edge_iterator ei; cloop = loop->outer; - FOR_EACH_EDGE (e, loop->header->preds) + FOR_EACH_EDGE (e, ei, loop->header->preds) { if (e->src == loop->latch) continue; irred |= (e->flags & EDGE_IRREDUCIBLE_LOOP) != 0; nentry++; } - END_FOR_EACH_EDGE; gcc_assert (nentry); if (nentry == 1) { - FOR_EACH_EDGE (e, loop->header->preds) + FOR_EACH_EDGE (e, ei, loop->header->preds) { if (e->src != loop->latch) break; } - END_FOR_EACH_EDGE; if (!(flags & CP_SIMPLE_PREHEADERS) || EDGE_COUNT (e->src->succs) == 1) return NULL; } @@ -1187,12 +1185,11 @@ create_preheader (struct loop *loop, int flags) /* Reorganize blocks so that the preheader is not stuck in the middle of the loop. */ - FOR_EACH_EDGE (e, dummy->preds) + FOR_EACH_EDGE (e, ei, dummy->preds) { if (e->src != loop->latch) break; } - END_FOR_EACH_EDGE; move_block_after (dummy, e->src); loop->header->loop_father = loop; @@ -1233,16 +1230,16 @@ force_single_succ_latches (struct loops *loops) for (i = 1; i < loops->num; i++) { + edge_iterator ei; loop = loops->parray[i]; if (loop->latch != loop->header && EDGE_COUNT (loop->latch->succs) == 1) continue; - FOR_EACH_EDGE (e, loop->header->preds) + FOR_EACH_EDGE (e, ei, loop->header->preds) { if (e->src == loop->latch) break; } - END_FOR_EACH_EDGE; loop_split_edge_with (e, NULL_RTX); } diff --git a/gcc/cfgrtl.c b/gcc/cfgrtl.c index d02e54cc105..0977fd6dc61 100644 --- a/gcc/cfgrtl.c +++ b/gcc/cfgrtl.c @@ -459,6 +459,7 @@ rtl_split_block (basic_block bb, void *insnp) basic_block new_bb; rtx insn = insnp; edge e; + edge_iterator ei; if (!insn) { @@ -484,11 +485,10 @@ rtl_split_block (basic_block bb, void *insnp) /* Redirect the outgoing edges. */ new_bb->succs = bb->succs; bb->succs = NULL; - FOR_EACH_EDGE (e, new_bb->succs) + FOR_EACH_EDGE (e, ei, new_bb->succs) { e->src = new_bb; } - END_FOR_EACH_EDGE; if (bb->global_live_at_start) { @@ -673,6 +673,7 @@ try_redirect_by_replacing_jump (edge e, basic_block target, bool in_cfglayout) edge tmp; rtx set; int fallthru = 0; + edge_iterator ei; /* If we are partitioning hot/cold basic blocks, we don't want to mess up unconditional or indirect jumps that cross between hot @@ -690,12 +691,11 @@ try_redirect_by_replacing_jump (edge e, basic_block target, bool in_cfglayout) return NULL; /* Verify that all targets will be TARGET. */ - FOR_EACH_EDGE (tmp, src->succs) + FOR_EACH_EDGE (tmp, ei, src->succs) { if (tmp->dest != target && tmp != e) break; } - END_FOR_EACH_EDGE; if (tmp || !onlyjump_p (insn)) return NULL; @@ -1331,13 +1331,13 @@ rtl_split_edge (edge edge_in) if ((edge_in->flags & EDGE_FALLTHRU) == 0) { edge e; + edge_iterator ei; - FOR_EACH_EDGE (e, edge_in->dest->preds) + FOR_EACH_EDGE (e, ei, edge_in->dest->preds) { if (e->flags & EDGE_FALLTHRU) break; } - END_FOR_EACH_EDGE; if (e) force_nonfallthru (e); @@ -1710,8 +1710,9 @@ commit_edge_insertions (void) FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) { edge e; + edge_iterator ei; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->insns.r) { @@ -1719,7 +1720,6 @@ commit_edge_insertions (void) commit_one_edge_insertion (e, false); } } - END_FOR_EACH_EDGE; } if (!changed) @@ -1757,8 +1757,9 @@ commit_edge_insertions_watch_calls (void) FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) { edge e; + edge_iterator ei; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->insns.r) { @@ -1766,7 +1767,6 @@ commit_edge_insertions_watch_calls (void) commit_one_edge_insertion (e, true); } } - END_FOR_EACH_EDGE; } if (!changed) @@ -1995,6 +1995,7 @@ rtl_verify_flow_info_1 (void) int n_fallthru = 0, n_eh = 0, n_call = 0, n_abnormal = 0, n_branch = 0; edge e, fallthru = NULL; rtx note; + edge_iterator ei; if (INSN_P (BB_END (bb)) && (note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX)) @@ -2008,7 +2009,7 @@ rtl_verify_flow_info_1 (void) err = 1; } } - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->flags & EDGE_FALLTHRU) { @@ -2039,7 +2040,6 @@ rtl_verify_flow_info_1 (void) else if (e->flags & EDGE_ABNORMAL) n_abnormal++; } - END_FOR_EACH_EDGE; if (n_eh && GET_CODE (PATTERN (BB_END (bb))) != RESX && !find_reg_note (BB_END (bb), REG_EH_REGION, NULL_RTX)) @@ -2176,12 +2176,13 @@ rtl_verify_flow_info (void) FOR_EACH_BB_REVERSE (bb) { edge e; - FOR_EACH_EDGE (e, bb->succs) + edge_iterator ei; + + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->flags & EDGE_FALLTHRU) break; } - END_FOR_EACH_EDGE; if (!e) { rtx insn; @@ -2298,6 +2299,7 @@ purge_dead_edges (basic_block bb) rtx insn = BB_END (bb), note; bool purged = false; bool found; + edge_iterator ei; /* If this instruction cannot trap, remove REG_EH_REGION notes. */ if (NONJUMP_INSN_P (insn) @@ -2461,7 +2463,7 @@ purge_dead_edges (basic_block bb) edge we know that there used to be a jump here and can then safely remove all non-fallthru edges. */ found = false; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (! (e->flags & (EDGE_COMPLEX | EDGE_FALLTHRU))) { @@ -2469,7 +2471,6 @@ purge_dead_edges (basic_block bb) break; } } - END_FOR_EACH_EDGE; if (!found) return purged; @@ -2610,17 +2611,18 @@ cfg_layout_redirect_edge_and_branch (edge e, basic_block dest) bool found = false; unsigned ix = 0; edge tmp, s; + edge_iterator ei; - FOR_EACH_EDGE (tmp, src->succs) + FOR_EACH_EDGE (tmp, ei, src->succs) { if (e == tmp) { found = true; - ix = __ix; + /* FIXME: Don't access iterator directly. */ + ix = ei.index; break; } } - END_FOR_EACH_EDGE; if (!found) abort (); @@ -2991,8 +2993,9 @@ rtl_flow_call_edges_add (sbitmap blocks) if (need_fake_edge_p (insn)) { edge e; + edge_iterator ei; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->dest == EXIT_BLOCK_PTR) { @@ -3001,7 +3004,6 @@ rtl_flow_call_edges_add (sbitmap blocks) break; } } - END_FOR_EACH_EDGE; } } @@ -3043,11 +3045,13 @@ rtl_flow_call_edges_add (sbitmap blocks) #ifdef ENABLE_CHECKING if (split_at_insn == BB_END (bb)) - FOR_EACH_EDGE (e, bb->succs) - { - gcc_assert (e->dest != EXIT_BLOCK_PTR); - } - END_FOR_EACH_EDGE; + { + edge_iterator ei; + FOR_EACH_EDGE (e, ei, bb->succs) + { + gcc_assert (e->dest != EXIT_BLOCK_PTR); + } + } #endif /* Note that the following may create a new basic block diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c index 3d7b2bcd9ee..80d9d7e0467 100644 --- a/gcc/config/i386/i386.c +++ b/gcc/config/i386/i386.c @@ -14828,8 +14828,9 @@ static void ix86_pad_returns (void) { edge e; + edge_iterator ei; - FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds) + FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds) { basic_block bb = e->src; rtx ret = BB_END (bb); @@ -14845,14 +14846,14 @@ ix86_pad_returns (void) if (prev && GET_CODE (prev) == CODE_LABEL) { edge e; + edge_iterator ei; - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) { if (EDGE_FREQUENCY (e) && e->src->index >= 0 && !(e->flags & EDGE_FALLTHRU)) replace = true; } - END_FOR_EACH_EDGE; } if (!replace) { @@ -14872,7 +14873,6 @@ ix86_pad_returns (void) delete_insn (ret); } } - END_FOR_EACH_EDGE; } /* Implement machine specific optimizations. We implement padding of returns diff --git a/gcc/config/ia64/ia64.c b/gcc/config/ia64/ia64.c index 966c032d24d..9ffb2cb8d11 100644 --- a/gcc/config/ia64/ia64.c +++ b/gcc/config/ia64/ia64.c @@ -2030,13 +2030,12 @@ ia64_expand_prologue (void) { edge e; - FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds) + FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds) { if ((e->flags & EDGE_FAKE) == 0 && (e->flags & EDGE_FALLTHRU) != 0) break; } - END_FOR_EACH_EDGE; epilogue_p = (e != NULL); } else diff --git a/gcc/cse.c b/gcc/cse.c index 257033e92c7..e1e6b5e5ea9 100644 --- a/gcc/cse.c +++ b/gcc/cse.c @@ -7391,6 +7391,7 @@ cse_cc_succs (basic_block bb, rtx cc_reg, rtx cc_src, bool can_change_mode) rtx last_insns[2]; unsigned int i; rtx newreg; + edge_iterator ei; /* We expect to have two successors. Look at both before picking the final mode for the comparison. If we have more successors @@ -7401,7 +7402,7 @@ cse_cc_succs (basic_block bb, rtx cc_reg, rtx cc_src, bool can_change_mode) found_equiv = false; mode = GET_MODE (cc_src); insn_count = 0; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { rtx insn; rtx end; @@ -7522,7 +7523,6 @@ cse_cc_succs (basic_block bb, rtx cc_reg, rtx cc_src, bool can_change_mode) } } } - END_FOR_EACH_EDGE; if (! found_equiv) return VOIDmode; diff --git a/gcc/df.c b/gcc/df.c index 68ae1383189..4d0c0060e87 100644 --- a/gcc/df.c +++ b/gcc/df.c @@ -3789,6 +3789,7 @@ hybrid_search (basic_block bb, struct dataflow *dataflow, int changed; int i = bb->index; edge e; + edge_iterator ei; SET_BIT (visited, bb->index); gcc_assert (TEST_BIT (pending, bb->index)); @@ -3800,7 +3801,7 @@ hybrid_search (basic_block bb, struct dataflow *dataflow, { \ /* Calculate of predecessor_outs. */ \ bitmap_zero (IN_SET[i]); \ - FOR_EACH_EDGE (e, bb->E_ANTI) \ + FOR_EACH_EDGE (e, ei, bb->E_ANTI) \ { \ if (e->E_ANTI_BB == E_ANTI_START_BB) \ continue; \ @@ -3811,7 +3812,6 @@ hybrid_search (basic_block bb, struct dataflow *dataflow, IN_SET[i], IN_SET[i], \ OUT_SET[e->E_ANTI_BB->index]); \ } \ - END_FOR_EACH_EDGE; \ (*dataflow->transfun)(i, &changed, \ dataflow->in[i], dataflow->out[i], \ dataflow->gen[i], dataflow->kill[i], \ @@ -3820,7 +3820,7 @@ hybrid_search (basic_block bb, struct dataflow *dataflow, if (!changed) \ break; \ \ - FOR_EACH_EDGE (e, bb->E) \ + FOR_EACH_EDGE (e, ei, bb->E) \ { \ if (e->E_BB == E_START_BB || e->E_BB->index == i) \ continue; \ @@ -3830,9 +3830,8 @@ hybrid_search (basic_block bb, struct dataflow *dataflow, \ SET_BIT (pending, e->E_BB->index); \ } \ - END_FOR_EACH_EDGE; \ \ - FOR_EACH_EDGE (e, bb->E) \ + FOR_EACH_EDGE (e, ei, bb->E) \ { \ if (e->E_BB == E_START_BB || e->E_BB->index == i) \ continue; \ @@ -3843,7 +3842,6 @@ hybrid_search (basic_block bb, struct dataflow *dataflow, if (!TEST_BIT (visited, e->E_BB->index)) \ hybrid_search (e->E_BB, dataflow, visited, pending, considered); \ } \ - END_FOR_EACH_EDGE; \ } while (0) if (dataflow->dir == DF_FORWARD) diff --git a/gcc/dominance.c b/gcc/dominance.c index 9be3915e7fc..29232f99d3e 100644 --- a/gcc/dominance.c +++ b/gcc/dominance.c @@ -836,12 +836,13 @@ recount_dominator (enum cdi_direction dir, basic_block bb) { basic_block dom_bb = NULL; edge e; + edge_iterator ei; gcc_assert (dom_computed[dir]); if (dir == CDI_DOMINATORS) { - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) { /* Ignore the predecessors that either are not reachable from the entry block, or whose dominator was not determined yet. */ @@ -851,16 +852,14 @@ recount_dominator (enum cdi_direction dir, basic_block bb) if (!dominated_by_p (dir, e->src, bb)) dom_bb = nearest_common_dominator (dir, dom_bb, e->src); } - END_FOR_EACH_EDGE; } else { - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (!dominated_by_p (dir, e->dest, bb)) dom_bb = nearest_common_dominator (dir, dom_bb, e->dest); } - END_FOR_EACH_EDGE; } return dom_bb; diff --git a/gcc/except.c b/gcc/except.c index 74a2932ce42..33344160338 100644 --- a/gcc/except.c +++ b/gcc/except.c @@ -2023,6 +2023,7 @@ sjlj_emit_function_exit (void) { rtx seq; edge e; + edge_iterator ei; start_sequence (); @@ -2036,12 +2037,11 @@ sjlj_emit_function_exit (void) post-dominates all can_throw_internal instructions. This is the last possible moment. */ - FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds) + FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds) { if (e->flags & EDGE_FALLTHRU) break; } - END_FOR_EACH_EDGE; if (e) { rtx insn; diff --git a/gcc/final.c b/gcc/final.c index a18688ba90b..3893c8f47b9 100644 --- a/gcc/final.c +++ b/gcc/final.c @@ -1,4 +1,4 @@ -/* Convert RTL to assembler code and output it, for GNU compiler. +/* Convert to assembler code and output it, for GNU compiler. Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. @@ -677,6 +677,7 @@ compute_alignments (void) rtx label = BB_HEAD (bb); int fallthru_frequency = 0, branch_frequency = 0, has_fallthru = 0; edge e; + edge_iterator ei; if (!LABEL_P (label) || probably_never_executed_bb_p (bb)) @@ -684,14 +685,13 @@ compute_alignments (void) max_log = LABEL_ALIGN (label); max_skip = LABEL_ALIGN_MAX_SKIP; - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) { if (e->flags & EDGE_FALLTHRU) has_fallthru = 1, fallthru_frequency += EDGE_FREQUENCY (e); else branch_frequency += EDGE_FREQUENCY (e); } - END_FOR_EACH_EDGE; /* There are two purposes to align block with no fallthru incoming edge: 1) to avoid fetch stalls when branch destination is near cache boundary diff --git a/gcc/flow.c b/gcc/flow.c index a06eae9427a..8e43f4c9391 100644 --- a/gcc/flow.c +++ b/gcc/flow.c @@ -1091,6 +1091,7 @@ calculate_global_regs_live (sbitmap blocks_in, sbitmap blocks_out, int flags) int rescan, changed; basic_block bb; edge e; + edge_iterator ei; bb = *qhead++; if (qhead == qend) @@ -1101,7 +1102,7 @@ calculate_global_regs_live (sbitmap blocks_in, sbitmap blocks_out, int flags) CLEAR_REG_SET (new_live_at_end); if (EDGE_COUNT (bb->succs) > 0) - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { basic_block sb = e->dest; @@ -1125,7 +1126,6 @@ calculate_global_regs_live (sbitmap blocks_in, sbitmap blocks_out, int flags) if (EH_USES (i)) SET_REGNO_REG_SET (new_live_at_end, i); } - END_FOR_EACH_EDGE; else { /* This might be a noreturn function that throws. And @@ -1258,7 +1258,7 @@ calculate_global_regs_live (sbitmap blocks_in, sbitmap blocks_out, int flags) /* Queue all predecessors of BB so that we may re-examine their live_at_end. */ - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) { basic_block pb = e->src; if (pb->aux == NULL) @@ -1269,7 +1269,6 @@ calculate_global_regs_live (sbitmap blocks_in, sbitmap blocks_out, int flags) pb->aux = pb; } } - END_FOR_EACH_EDGE; } FREE_REG_SET (tmp); @@ -1364,8 +1363,9 @@ initialize_uninitialized_subregs (void) edge e; int reg, did_something = 0; find_regno_partial_param param; + edge_iterator ei; - FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs) + FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs) { basic_block bb = e->dest; regset map = bb->global_live_at_start; @@ -1400,7 +1400,6 @@ initialize_uninitialized_subregs (void) } }); } - END_FOR_EACH_EDGE; if (did_something) commit_edge_insertions (); diff --git a/gcc/function.c b/gcc/function.c index 36a2f7475e9..f738493332d 100644 --- a/gcc/function.c +++ b/gcc/function.c @@ -4958,6 +4958,7 @@ thread_prologue_and_epilogue_insns (rtx f ATTRIBUTE_UNUSED) #ifdef HAVE_return unsigned ix; #endif + edge_iterator ei; #ifdef HAVE_prologue if (HAVE_prologue) @@ -4986,12 +4987,11 @@ thread_prologue_and_epilogue_insns (rtx f ATTRIBUTE_UNUSED) /* If the exit block has no non-fake predecessors, we don't need an epilogue. */ - FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds) + FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds) { if ((e->flags & EDGE_FAKE) == 0) break; } - END_FOR_EACH_EDGE; if (e == NULL) goto epilogue_done; @@ -5008,12 +5008,11 @@ thread_prologue_and_epilogue_insns (rtx f ATTRIBUTE_UNUSED) basic_block last; rtx label; - FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds) + FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds) { if (e->flags & EDGE_FALLTHRU) break; } - END_FOR_EACH_EDGE; if (e == NULL) goto epilogue_done; @@ -5115,12 +5114,11 @@ thread_prologue_and_epilogue_insns (rtx f ATTRIBUTE_UNUSED) There really shouldn't be a mixture -- either all should have been converted or none, however... */ - FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds) + FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds) { if (e->flags & EDGE_FALLTHRU) break; } - END_FOR_EACH_EDGE; if (e == NULL) goto epilogue_done; diff --git a/gcc/gcse.c b/gcc/gcse.c index b9dc272851f..b5d27626cd7 100644 --- a/gcc/gcse.c +++ b/gcc/gcse.c @@ -3571,6 +3571,7 @@ bypass_block (basic_block bb, rtx setcc, rtx jump) int may_be_loop_header; unsigned removed_p; unsigned ix; + edge_iterator ei; insn = (setcc != NULL) ? setcc : jump; @@ -3583,7 +3584,7 @@ bypass_block (basic_block bb, rtx setcc, rtx jump) may_be_loop_header = false; - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) { if (e->flags & EDGE_DFS_BACK) { @@ -3591,7 +3592,6 @@ bypass_block (basic_block bb, rtx setcc, rtx jump) break; } } - END_FOR_EACH_EDGE; change = 0; for (ix = 0; VEC_iterate (edge, bb->preds, ix, e); ) @@ -3665,7 +3665,7 @@ bypass_block (basic_block bb, rtx setcc, rtx jump) { dest = BLOCK_FOR_INSN (XEXP (new, 0)); /* Don't bypass edges containing instructions. */ - FOR_EACH_EDGE (edest, bb->succs) + FOR_EACH_EDGE (edest, ei, bb->succs) { if (edest->dest == dest && edest->insns.r) { @@ -3673,7 +3673,6 @@ bypass_block (basic_block bb, rtx setcc, rtx jump) break; } } - END_FOR_EACH_EDGE; } else dest = NULL; @@ -3685,7 +3684,7 @@ bypass_block (basic_block bb, rtx setcc, rtx jump) if (dest && setcc && !CC0_P (SET_DEST (PATTERN (setcc)))) { edge e2; - FOR_EACH_EDGE (e2, e->src->succs) + FOR_EACH_EDGE (e2, ei, e->src->succs) { if (e2->dest == dest) { @@ -3693,7 +3692,6 @@ bypass_block (basic_block bb, rtx setcc, rtx jump) break; } } - END_FOR_EACH_EDGE; } old_dest = e->dest; @@ -3907,12 +3905,13 @@ compute_pre_data (void) FOR_EACH_BB (bb) { edge e; + edge_iterator ei; /* If the current block is the destination of an abnormal edge, we kill all trapping expressions because we won't be able to properly place the instruction on the edge. So make them neither anticipatable nor transparent. This is fairly conservative. */ - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) { if (e->flags & EDGE_ABNORMAL) { @@ -3921,7 +3920,6 @@ compute_pre_data (void) break; } } - END_FOR_EACH_EDGE; sbitmap_a_or_b (ae_kill[bb->index], transp[bb->index], comp[bb->index]); sbitmap_not (ae_kill[bb->index], ae_kill[bb->index]); @@ -3955,8 +3953,9 @@ static int pre_expr_reaches_here_p_work (basic_block occr_bb, struct expr *expr, basic_block bb, char *visited) { edge pred; + edge_iterator ei; - FOR_EACH_EDGE (pred, bb->preds) + FOR_EACH_EDGE (pred, ei, bb->preds) { basic_block pred_bb = pred->src; @@ -3988,7 +3987,6 @@ pre_expr_reaches_here_p_work (basic_block occr_bb, struct expr *expr, basic_bloc return 1; } } - END_FOR_EACH_EDGE; /* All paths have been checked. */ return 0; @@ -4835,6 +4833,7 @@ static int hoist_expr_reaches_here_p (basic_block expr_bb, int expr_index, basic_block bb, char *visited) { edge pred; + edge_iterator ei; int visited_allocated_locally = 0; if (visited == NULL) @@ -4843,7 +4842,7 @@ hoist_expr_reaches_here_p (basic_block expr_bb, int expr_index, basic_block bb, visited = xcalloc (last_basic_block, 1); } - FOR_EACH_EDGE (pred, bb->preds) + FOR_EACH_EDGE (pred, ei, bb->preds) { basic_block pred_bb = pred->src; @@ -4869,7 +4868,6 @@ hoist_expr_reaches_here_p (basic_block expr_bb, int expr_index, basic_block bb, break; } } - END_FOR_EACH_EDGE; if (visited_allocated_locally) free (visited); @@ -6213,6 +6211,7 @@ insert_store (struct ls_expr * expr, edge e) rtx reg, insn; basic_block bb; edge tmp; + edge_iterator ei; /* We did all the deleted before this insert, so if we didn't delete a store, then we haven't set the reaching reg yet either. */ @@ -6229,7 +6228,7 @@ insert_store (struct ls_expr * expr, edge e) insert it at the start of the BB, and reset the insert bits on the other edges so we don't try to insert it on the other edges. */ bb = e->dest; - FOR_EACH_EDGE (tmp, e->dest->preds) + FOR_EACH_EDGE (tmp, ei, e->dest->preds) { if (!(tmp->flags & EDGE_FAKE)) { @@ -6239,18 +6238,16 @@ insert_store (struct ls_expr * expr, edge e) break; } } - END_FOR_EACH_EDGE; /* If tmp is NULL, we found an insertion on every edge, blank the insertion vector for these edges, and insert at the start of the BB. */ if (!tmp && bb != EXIT_BLOCK_PTR) { - FOR_EACH_EDGE (tmp, e->dest->preds) + FOR_EACH_EDGE (tmp, ei, e->dest->preds) { int index = EDGE_INDEX (edge_list, tmp->src, tmp->dest); RESET_BIT (pre_insert_map[index], expr->index); } - END_FOR_EACH_EDGE; insert_insn_start_bb (insn, bb); return 0; } diff --git a/gcc/global.c b/gcc/global.c index d7a68f10ad8..dd024cf45ea 100644 --- a/gcc/global.c +++ b/gcc/global.c @@ -748,13 +748,13 @@ global_conflicts (void) regs live across such edges. */ { edge e; + edge_iterator ei; - FOR_EACH_EDGE (e, b->preds) + FOR_EACH_EDGE (e, ei, b->preds) { if (e->flags & EDGE_ABNORMAL) break; } - END_FOR_EACH_EDGE; if (e != NULL) { @@ -2342,15 +2342,15 @@ calculate_reg_pav (void) sbitmap_zero (wset); for (i = 0; i < nel; i++) { + edge_iterator ei; bb = bb_array [i]; changed_p = 0; - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) { changed_p = modify_bb_reg_pav (bb, e->src, changed_p); } - END_FOR_EACH_EDGE; if (changed_p) - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { succ = e->dest; if (succ->index != EXIT_BLOCK && !TEST_BIT (wset, succ->index)) @@ -2359,7 +2359,6 @@ calculate_reg_pav (void) VARRAY_PUSH_BB (new_bbs, succ); } } - END_FOR_EACH_EDGE; } temp = bbs; bbs = new_bbs; diff --git a/gcc/graph.c b/gcc/graph.c index 5075ad2af23..c9c15051898 100644 --- a/gcc/graph.c +++ b/gcc/graph.c @@ -308,6 +308,7 @@ print_rtl_graph_with_bb (const char *base, rtx rtx_first) if ((i = end[INSN_UID (tmp_rtx)]) >= 0) { edge e; + edge_iterator ei; bb = BASIC_BLOCK (i); @@ -317,7 +318,7 @@ print_rtl_graph_with_bb (const char *base, rtx rtx_first) /* Now specify the edges to all the successors of this basic block. */ - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->dest != EXIT_BLOCK_PTR) { @@ -341,7 +342,6 @@ print_rtl_graph_with_bb (const char *base, rtx rtx_first) edge_printed = 1; } } - END_FOR_EACH_EDGE; } if (!edge_printed) diff --git a/gcc/ifcvt.c b/gcc/ifcvt.c index ef788242c36..da8a632c595 100644 --- a/gcc/ifcvt.c +++ b/gcc/ifcvt.c @@ -126,7 +126,8 @@ mark_loop_exit_edges (void) { FOR_EACH_BB (bb) { - FOR_EACH_EDGE (e, bb->succs) + edge_iterator ei; + FOR_EACH_EDGE (e, ei, bb->succs) { if (find_common_loop (bb->loop_father, e->dest->loop_father) != bb->loop_father) @@ -134,7 +135,6 @@ mark_loop_exit_edges (void) else e->flags &= ~EDGE_LOOP_EXIT; } - END_FOR_EACH_EDGE; } } @@ -250,13 +250,13 @@ static basic_block block_fallthru (basic_block bb) { edge e; + edge_iterator ei; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->flags & EDGE_FALLTHRU) break; } - END_FOR_EACH_EDGE; return (e) ? e->dest : NULL_BLOCK; } @@ -2361,6 +2361,7 @@ block_jumps_and_fallthru_p (basic_block cur_bb, basic_block target_bb) rtx insn; rtx end; int n_insns = 0; + edge_iterator ei; if (!cur_bb || !target_bb) return -1; @@ -2369,7 +2370,7 @@ block_jumps_and_fallthru_p (basic_block cur_bb, basic_block target_bb) if (EDGE_COUNT (cur_bb->succs) == 0) return FALSE; - FOR_EACH_EDGE (cur_edge, cur_bb->succs) + FOR_EACH_EDGE (cur_edge, ei, cur_bb->succs) { if (cur_edge->flags & EDGE_COMPLEX) /* Anything complex isn't what we want. */ @@ -2384,7 +2385,6 @@ block_jumps_and_fallthru_p (basic_block cur_bb, basic_block target_bb) else return -1; } - END_FOR_EACH_EDGE; if ((jump_p & fallthru_p) == 0) return -1; @@ -2431,6 +2431,7 @@ find_if_block (struct ce_if_block * ce_info) int else_predecessors; edge cur_edge; basic_block next; + edge_iterator ei; ce_info->last_test_bb = test_bb; @@ -2494,22 +2495,20 @@ find_if_block (struct ce_if_block * ce_info) /* Count the number of edges the THEN and ELSE blocks have. */ then_predecessors = 0; - FOR_EACH_EDGE (cur_edge, then_bb->preds) + FOR_EACH_EDGE (cur_edge, ei, then_bb->preds) { then_predecessors++; if (cur_edge->flags & EDGE_COMPLEX) return FALSE; } - END_FOR_EACH_EDGE; else_predecessors = 0; - FOR_EACH_EDGE (cur_edge, else_bb->preds) + FOR_EACH_EDGE (cur_edge, ei, else_bb->preds) { else_predecessors++; if (cur_edge->flags & EDGE_COMPLEX) return FALSE; } - END_FOR_EACH_EDGE; /* The THEN block of an IF-THEN combo must have exactly one predecessor, other than any || blocks which jump to the THEN block. */ diff --git a/gcc/lcm.c b/gcc/lcm.c index eb5fa63f3ad..39c6fe2eb4d 100644 --- a/gcc/lcm.c +++ b/gcc/lcm.c @@ -102,6 +102,7 @@ compute_antinout_edge (sbitmap *antloc, sbitmap *transp, sbitmap *antin, edge e; basic_block *worklist, *qin, *qout, *qend; unsigned int qlen; + edge_iterator ei; /* Allocate a worklist array/queue. Entries are only added to the list if they were not already on the list. So the size is @@ -126,11 +127,10 @@ compute_antinout_edge (sbitmap *antloc, sbitmap *transp, sbitmap *antin, /* Mark blocks which are predecessors of the exit block so that we can easily identify them below. */ - FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds) + FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds) { e->src->aux = EXIT_BLOCK_PTR; } - END_FOR_EACH_EDGE; /* Iterate until the worklist is empty. */ while (qlen) @@ -160,7 +160,7 @@ compute_antinout_edge (sbitmap *antloc, sbitmap *transp, sbitmap *antin, /* If the in state of this block changed, then we need to add the predecessors of this block to the worklist if they are not already on the worklist. */ - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) { if (!e->src->aux && e->src != ENTRY_BLOCK_PTR) { @@ -171,7 +171,6 @@ compute_antinout_edge (sbitmap *antloc, sbitmap *transp, sbitmap *antin, qin = worklist; } } - END_FOR_EACH_EDGE; } clear_aux_for_edges (); @@ -257,6 +256,7 @@ compute_laterin (struct edge_list *edge_list, sbitmap *earliest, edge e; basic_block *worklist, *qin, *qout, *qend, bb; unsigned int qlen; + edge_iterator ei; num_edges = NUM_EDGES (edge_list); @@ -286,11 +286,10 @@ compute_laterin (struct edge_list *edge_list, sbitmap *earliest, do not want to be overly optimistic. Consider an outgoing edge from the entry block. That edge should always have a LATER value the same as EARLIEST for that edge. */ - FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs) + FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs) { sbitmap_copy (later[(size_t) e->aux], earliest[(size_t) e->aux]); } - END_FOR_EACH_EDGE; /* Add all the blocks to the worklist. This prevents an early exit from the loop given our optimistic initialization of LATER above. */ @@ -319,15 +318,14 @@ compute_laterin (struct edge_list *edge_list, sbitmap *earliest, /* Compute the intersection of LATERIN for each incoming edge to B. */ sbitmap_ones (laterin[bb->index]); - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) { sbitmap_a_and_b (laterin[bb->index], laterin[bb->index], later[(size_t)e->aux]); } - END_FOR_EACH_EDGE; /* Calculate LATER for all outgoing edges. */ - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (sbitmap_union_of_diff_cg (later[(size_t) e->aux], earliest[(size_t) e->aux], @@ -344,20 +342,18 @@ compute_laterin (struct edge_list *edge_list, sbitmap *earliest, qin = worklist; } } - END_FOR_EACH_EDGE; } /* Computation of insertion and deletion points requires computing LATERIN for the EXIT block. We allocated an extra entry in the LATERIN array for just this purpose. */ sbitmap_ones (laterin[last_basic_block]); - FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds) + FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds) { sbitmap_a_and_b (laterin[last_basic_block], laterin[last_basic_block], later[(size_t) e->aux]); } - END_FOR_EACH_EDGE; clear_aux_for_edges (); free (worklist); @@ -496,6 +492,7 @@ compute_available (sbitmap *avloc, sbitmap *kill, sbitmap *avout, edge e; basic_block *worklist, *qin, *qout, *qend, bb; unsigned int qlen; + edge_iterator ei; /* Allocate a worklist array/queue. Entries are only added to the list if they were not already on the list. So the size is @@ -519,11 +516,10 @@ compute_available (sbitmap *avloc, sbitmap *kill, sbitmap *avout, /* Mark blocks which are successors of the entry block so that we can easily identify them below. */ - FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs) + FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs) { e->dest->aux = ENTRY_BLOCK_PTR; } - END_FOR_EACH_EDGE; /* Iterate until the worklist is empty. */ while (qlen) @@ -555,7 +551,7 @@ compute_available (sbitmap *avloc, sbitmap *kill, sbitmap *avout, /* If the out state of this block changed, then we need to add the successors of this block to the worklist if they are not already on the worklist. */ - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (!e->dest->aux && e->dest != EXIT_BLOCK_PTR) { @@ -567,7 +563,6 @@ compute_available (sbitmap *avloc, sbitmap *kill, sbitmap *avout, qin = worklist; } } - END_FOR_EACH_EDGE; } clear_aux_for_edges (); @@ -628,6 +623,7 @@ compute_nearerout (struct edge_list *edge_list, sbitmap *farthest, int num_edges, i; edge e; basic_block *worklist, *tos, bb; + edge_iterator ei; num_edges = NUM_EDGES (edge_list); @@ -648,11 +644,10 @@ compute_nearerout (struct edge_list *edge_list, sbitmap *farthest, do not want to be overly optimistic. Consider an incoming edge to the exit block. That edge should always have a NEARER value the same as FARTHEST for that edge. */ - FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds) + FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds) { sbitmap_copy (nearer[(size_t)e->aux], farthest[(size_t)e->aux]); } - END_FOR_EACH_EDGE; /* Add all the blocks to the worklist. This prevents an early exit from the loop given our optimistic initialization of NEARER. */ @@ -671,15 +666,14 @@ compute_nearerout (struct edge_list *edge_list, sbitmap *farthest, /* Compute the intersection of NEARER for each outgoing edge from B. */ sbitmap_ones (nearerout[bb->index]); - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { sbitmap_a_and_b (nearerout[bb->index], nearerout[bb->index], nearer[(size_t) e->aux]); } - END_FOR_EACH_EDGE; /* Calculate NEARER for all incoming edges. */ - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) { if (sbitmap_union_of_diff_cg (nearer[(size_t) e->aux], farthest[(size_t) e->aux], @@ -693,20 +687,18 @@ compute_nearerout (struct edge_list *edge_list, sbitmap *farthest, e->src->aux = e; } } - END_FOR_EACH_EDGE; } /* Computation of insertion and deletion points requires computing NEAREROUT for the ENTRY block. We allocated an extra entry in the NEAREROUT array for just this purpose. */ sbitmap_ones (nearerout[last_basic_block]); - FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs) + FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs) { sbitmap_a_and_b (nearerout[last_basic_block], nearerout[last_basic_block], nearer[(size_t) e->aux]); } - END_FOR_EACH_EDGE; clear_aux_for_edges (); free (tos); @@ -948,8 +940,9 @@ static void make_preds_opaque (basic_block b, int j) { edge e; + edge_iterator ei; - FOR_EACH_EDGE (e, b->preds) + FOR_EACH_EDGE (e, ei, b->preds) { basic_block pb = e->src; @@ -959,7 +952,6 @@ make_preds_opaque (basic_block b, int j) RESET_BIT (transp[pb->index], j); make_preds_opaque (pb, j); } - END_FOR_EACH_EDGE; } /* Record in LIVE that register REG died. */ diff --git a/gcc/loop-invariant.c b/gcc/loop-invariant.c index 40d52606081..438a1a0ab3a 100644 --- a/gcc/loop-invariant.c +++ b/gcc/loop-invariant.c @@ -219,6 +219,7 @@ find_exits (struct loop *loop, basic_block *body, bitmap may_exit, bitmap has_exit) { unsigned i; + edge_iterator ei; edge e; struct loop *outermost_exit = loop, *aexit; bool has_call = false; @@ -239,7 +240,7 @@ find_exits (struct loop *loop, basic_block *body, } } - FOR_EACH_EDGE (e, body[i]->succs) + FOR_EACH_EDGE (e, ei, body[i]->succs) { if (flow_bb_inside_loop_p (loop, e->dest)) continue; @@ -249,7 +250,6 @@ find_exits (struct loop *loop, basic_block *body, outermost_exit = find_common_loop (outermost_exit, e->dest->loop_father); } - END_FOR_EACH_EDGE; continue; } diff --git a/gcc/loop-iv.c b/gcc/loop-iv.c index 2dcbb820ddb..bebf3e0b308 100644 --- a/gcc/loop-iv.c +++ b/gcc/loop-iv.c @@ -2481,7 +2481,7 @@ check_simple_exit (struct loop *loop, edge e, struct niter_desc *desc) { basic_block exit_bb; rtx condition, at; - edge ei; + edge ein; exit_bb = e->src; desc->simple_p = false; @@ -2498,18 +2498,18 @@ check_simple_exit (struct loop *loop, edge e, struct niter_desc *desc) if (!any_condjump_p (BB_END (exit_bb))) return; - ei = EDGE_SUCC (exit_bb, 0); - if (ei == e) - ei = EDGE_SUCC (exit_bb, 1); + ein = EDGE_SUCC (exit_bb, 0); + if (ein == e) + ein = EDGE_SUCC (exit_bb, 1); desc->out_edge = e; - desc->in_edge = ei; + desc->in_edge = ein; /* Test whether the condition is suitable. */ - if (!(condition = get_condition (BB_END (ei->src), &at, false, false))) + if (!(condition = get_condition (BB_END (ein->src), &at, false, false))) return; - if (ei->flags & EDGE_FALLTHRU) + if (ein->flags & EDGE_FALLTHRU) { condition = reversed_condition (condition); if (!condition) @@ -2531,13 +2531,14 @@ find_simple_exit (struct loop *loop, struct niter_desc *desc) edge e; struct niter_desc act; bool any = false; + edge_iterator ei; desc->simple_p = false; body = get_loop_body (loop); for (i = 0; i < loop->num_nodes; i++) { - FOR_EACH_EDGE (e, body[i]->succs) + FOR_EACH_EDGE (e, ei, body[i]->succs) { if (flow_bb_inside_loop_p (loop, e->dest)) continue; @@ -2554,7 +2555,6 @@ find_simple_exit (struct loop *loop, struct niter_desc *desc) continue; *desc = act; } - END_FOR_EACH_EDGE; } if (dump_file) diff --git a/gcc/loop-unroll.c b/gcc/loop-unroll.c index 12fa4e23fcd..bef985fcac0 100644 --- a/gcc/loop-unroll.c +++ b/gcc/loop-unroll.c @@ -426,7 +426,7 @@ peel_loop_completely (struct loops *loops, struct loop *loop) sbitmap wont_exit; unsigned HOST_WIDE_INT npeel; unsigned n_remove_edges, i; - edge *remove_edges, ei; + edge *remove_edges, ein; struct niter_desc *desc = get_simple_loop_desc (loop); npeel = desc->niter; @@ -456,12 +456,12 @@ peel_loop_completely (struct loops *loops, struct loop *loop) free (remove_edges); } - ei = desc->in_edge; + ein = desc->in_edge; free_simple_loop_desc (loop); /* Now remove the unreachable part of the last iteration and cancel the loop. */ - remove_path (loops, ei); + remove_path (loops, ein); if (dump_file) fprintf (dump_file, ";; Peeled loop completely, %d times\n", (int) npeel); diff --git a/gcc/postreload-gcse.c b/gcc/postreload-gcse.c index 8690af2c902..92a564b00aa 100644 --- a/gcc/postreload-gcse.c +++ b/gcc/postreload-gcse.c @@ -1036,11 +1036,12 @@ static bool bb_has_well_behaved_predecessors (basic_block bb) { edge pred; + edge_iterator ei; if (EDGE_COUNT (bb->preds) == 0) return false; - FOR_EACH_EDGE (pred, bb->preds) + FOR_EACH_EDGE (pred, ei, bb->preds) { if ((pred->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (pred)) return false; @@ -1048,7 +1049,6 @@ bb_has_well_behaved_predecessors (basic_block bb) if (JUMP_TABLE_DATA_P (BB_END (pred->src))) return false; } - END_FOR_EACH_EDGE; return true; } @@ -1083,6 +1083,7 @@ eliminate_partially_redundant_load (basic_block bb, rtx insn, int npred_ok = 0; gcov_type ok_count = 0; /* Redundant load execution count. */ gcov_type critical_count = 0; /* Execution count of critical edges. */ + edge_iterator ei; /* The execution count of the loads to be added to make the load fully redundant. */ @@ -1098,7 +1099,7 @@ eliminate_partially_redundant_load (basic_block bb, rtx insn, return; /* Check potential for replacing load with copy for predecessors. */ - FOR_EACH_EDGE (pred, bb->preds) + FOR_EACH_EDGE (pred, ei, bb->preds) { rtx next_pred_bb_end; @@ -1160,7 +1161,6 @@ eliminate_partially_redundant_load (basic_block bb, rtx insn, rollback_unoccr = unoccr; } } - END_FOR_EACH_EDGE; if (/* No load can be replaced by copy. */ npred_ok == 0 diff --git a/gcc/predict.c b/gcc/predict.c index 7881d31597b..51668344087 100644 --- a/gcc/predict.c +++ b/gcc/predict.c @@ -287,16 +287,16 @@ dump_prediction (FILE *file, enum br_predictor predictor, int probability, basic_block bb, int used) { edge e; + edge_iterator ei; if (!file) return; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (! (e->flags & EDGE_FALLTHRU)) break; } - END_FOR_EACH_EDGE; fprintf (file, " %s heuristics%s: %.1f%%", predictor_info[predictor].name, @@ -324,22 +324,21 @@ set_even_probabilities (basic_block bb) { int nedges = 0; edge e; + edge_iterator ei; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (!(e->flags & (EDGE_EH | EDGE_FAKE))) nedges ++; } - END_FOR_EACH_EDGE; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (!(e->flags & (EDGE_EH | EDGE_FAKE))) e->probability = (REG_BR_PROB_BASE + nedges / 2) / nedges; else e->probability = 0; } - END_FOR_EACH_EDGE; } /* Combine all REG_BR_PRED notes into single probability and attach REG_BR_PROB @@ -464,8 +463,9 @@ combine_predictions_for_bb (FILE *file, basic_block bb) struct edge_prediction *pred; int nedges = 0; edge e, first = NULL, second = NULL; + edge_iterator ei; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (!(e->flags & (EDGE_EH | EDGE_FAKE))) { @@ -476,7 +476,6 @@ combine_predictions_for_bb (FILE *file, basic_block bb) first = e; } } - END_FOR_EACH_EDGE; /* When there is no successor or only one choice, prediction is easy. @@ -616,6 +615,7 @@ predict_loops (struct loops *loops_info, bool simpleloops) { int header_found = 0; edge e; + edge_iterator ei; bb = bbs[j]; @@ -629,7 +629,7 @@ predict_loops (struct loops *loops_info, bool simpleloops) /* Loop branch heuristics - predict an edge back to a loop's head as taken. */ - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->dest == loop->header && e->src == loop->latch) @@ -638,12 +638,11 @@ predict_loops (struct loops *loops_info, bool simpleloops) predict_edge_def (e, PRED_LOOP_BRANCH, TAKEN); } } - END_FOR_EACH_EDGE; /* Loop exit heuristics - predict an edge exiting the loop if the conditional has no loop header successors as not taken. */ if (!header_found) - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->dest->index < 0 || !flow_bb_inside_loop_p (loop, e->dest)) @@ -653,7 +652,6 @@ predict_loops (struct loops *loops_info, bool simpleloops) - predictor_info [(int) PRED_LOOP_EXIT].hitrate) / exits); } - END_FOR_EACH_EDGE; } /* Free basic blocks from get_loop_body. */ @@ -782,11 +780,12 @@ estimate_probability (struct loops *loops_info) { rtx last_insn = BB_END (bb); edge e; + edge_iterator ei; if (! can_predict_insn_p (last_insn)) continue; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { /* Predict early returns to be probable, as we've already taken care for error returns and other are often used for fast paths @@ -824,7 +823,6 @@ estimate_probability (struct loops *loops_info) } } } - END_FOR_EACH_EDGE; bb_estimate_probability_locally (bb); } @@ -840,6 +838,7 @@ estimate_probability (struct loops *loops_info) notes. */ FOR_EACH_BB (bb) { + edge_iterator ei; rtx last_insn = BB_END (bb); if (!can_predict_insn_p (last_insn)) @@ -850,19 +849,17 @@ estimate_probability (struct loops *loops_info) int nedges = 0; edge e; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { nedges++; if (e->probability != 0) break; } - END_FOR_EACH_EDGE; if (!e) - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { e->probability = (REG_BR_PROB_BASE + nedges / 2) / nedges; } - END_FOR_EACH_EDGE; } } estimate_bb_frequencies (loops_info); @@ -889,15 +886,15 @@ tree_predict_by_opcode (basic_block bb) tree cond; tree op0; tree type; + edge_iterator ei; if (!stmt || TREE_CODE (stmt) != COND_EXPR) return; - FOR_EACH_EDGE (then_edge, bb->succs) + FOR_EACH_EDGE (then_edge, ei, bb->succs) { if (then_edge->flags & EDGE_TRUE_VALUE) break; } - END_FOR_EACH_EDGE; cond = TREE_OPERAND (stmt, 0); if (TREE_CODE_CLASS (TREE_CODE (cond)) != '<') return; @@ -1008,8 +1005,9 @@ tree_estimate_probability (void) FOR_EACH_BB (bb) { edge e; + edge_iterator ei; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { /* Predict early returns to be probable, as we've already taken care for error returns and other are often used for fast paths @@ -1052,7 +1050,6 @@ tree_estimate_probability (void) } } } - END_FOR_EACH_EDGE; tree_predict_by_opcode (bb); } FOR_EACH_BB (bb) @@ -1206,9 +1203,10 @@ propagate_freq (struct loop *loop) { if (BLOCK_INFO (bb)->tovisit) { + edge_iterator ei; int count = 0; - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) { if (BLOCK_INFO (e->src)->tovisit && !(e->flags & EDGE_DFS_BACK)) count++; @@ -1218,7 +1216,6 @@ propagate_freq (struct loop *loop) "Irreducible region hit, ignoring edge to %i->%i\n", e->src->index, bb->index); } - END_FOR_EACH_EDGE; BLOCK_INFO (bb)->npredecessors = count; } } @@ -1227,6 +1224,7 @@ propagate_freq (struct loop *loop) last = head; for (bb = head; bb; bb = nextbb) { + edge_iterator ei; sreal cyclic_probability, frequency; memcpy (&cyclic_probability, &real_zero, sizeof (real_zero)); @@ -1239,15 +1237,14 @@ propagate_freq (struct loop *loop) if (bb != head) { #ifdef ENABLE_CHECKING - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) { if (BLOCK_INFO (e->src)->tovisit && !(e->flags & EDGE_DFS_BACK)) abort (); } - END_FOR_EACH_EDGE; #endif - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) { if (EDGE_INFO (e)->back_edge) { @@ -1268,7 +1265,6 @@ propagate_freq (struct loop *loop) sreal_add (&frequency, &frequency, &tmp); } } - END_FOR_EACH_EDGE; if (sreal_compare (&cyclic_probability, &real_zero) == 0) { @@ -1295,7 +1291,7 @@ propagate_freq (struct loop *loop) BLOCK_INFO (bb)->tovisit = 0; /* Compute back edge frequencies. */ - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->dest == head) { @@ -1311,10 +1307,9 @@ propagate_freq (struct loop *loop) &tmp, &real_inv_br_prob_base); } } - END_FOR_EACH_EDGE; /* Propagate to successor blocks. */ - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (!(e->flags & EDGE_DFS_BACK) && BLOCK_INFO (e->dest)->npredecessors) @@ -1331,7 +1326,6 @@ propagate_freq (struct loop *loop) } } } - END_FOR_EACH_EDGE; } } @@ -1460,16 +1454,16 @@ estimate_bb_frequencies (struct loops *loops) FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) { edge e; + edge_iterator ei; BLOCK_INFO (bb)->tovisit = 0; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { sreal_init (&EDGE_INFO (e)->back_edge_prob, e->probability, 0); sreal_mul (&EDGE_INFO (e)->back_edge_prob, &EDGE_INFO (e)->back_edge_prob, &real_inv_br_prob_base); } - END_FOR_EACH_EDGE; } /* First compute probabilities locally for each loop from innermost diff --git a/gcc/profile.c b/gcc/profile.c index 78449433e51..1eac149d89e 100644 --- a/gcc/profile.c +++ b/gcc/profile.c @@ -142,8 +142,9 @@ instrument_edges (struct edge_list *el) FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) { edge e; + edge_iterator ei; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { struct edge_info *inf = EDGE_INFO (e); @@ -158,7 +159,6 @@ instrument_edges (struct edge_list *el) (profile_hooks->gen_edge_profiler) (num_instr_edges++, e); } } - END_FOR_EACH_EDGE; } total_num_blocks_created += num_edges; @@ -240,12 +240,13 @@ get_exec_counts (void) FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) { edge e; - FOR_EACH_EDGE (e, bb->succs) + edge_iterator ei; + + FOR_EACH_EDGE (e, ei, bb->succs) { if (!EDGE_INFO (e)->ignore && !EDGE_INFO (e)->on_tree) num_edges++; } - END_FOR_EACH_EDGE; } counts = get_coverage_counts (GCOV_COUNTER_ARCS, num_edges, &profile_info); @@ -299,19 +300,19 @@ compute_branch_probabilities (void) FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) { edge e; - FOR_EACH_EDGE (e, bb->succs) + edge_iterator ei; + + FOR_EACH_EDGE (e, ei, bb->succs) { if (!EDGE_INFO (e)->ignore) BB_INFO (bb)->succ_count++; } - END_FOR_EACH_EDGE; - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) { if (!EDGE_INFO (e)->ignore) BB_INFO (bb)->pred_count++; } - END_FOR_EACH_EDGE; } /* Avoid predicting entry on exit nodes. */ @@ -327,8 +328,9 @@ compute_branch_probabilities (void) FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) { edge e; + edge_iterator ei; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (!EDGE_INFO (e)->ignore && !EDGE_INFO (e)->on_tree) { @@ -357,7 +359,6 @@ compute_branch_probabilities (void) } } } - END_FOR_EACH_EDGE; } if (dump_file) @@ -394,11 +395,11 @@ compute_branch_probabilities (void) if (bi->succ_count == 0) { edge e; + edge_iterator ei; gcov_type total = 0; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) total += e->count; - END_FOR_EACH_EDGE; bb->count = total; bi->count_valid = 1; changes = 1; @@ -406,11 +407,11 @@ compute_branch_probabilities (void) else if (bi->pred_count == 0) { edge e; + edge_iterator ei; gcov_type total = 0; - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) total += e->count; - END_FOR_EACH_EDGE; bb->count = total; bi->count_valid = 1; changes = 1; @@ -421,21 +422,20 @@ compute_branch_probabilities (void) if (bi->succ_count == 1) { edge e; + edge_iterator ei; gcov_type total = 0; /* One of the counts will be invalid, but it is zero, so adding it in also doesn't hurt. */ - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) total += e->count; - END_FOR_EACH_EDGE; /* Seedgeh for the invalid edge, and set its count. */ - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (! EDGE_INFO (e)->count_valid && ! EDGE_INFO (e)->ignore) break; } - END_FOR_EACH_EDGE; /* Calculate count for remaining edge by conservation. */ total = bb->count - total; @@ -452,21 +452,20 @@ compute_branch_probabilities (void) if (bi->pred_count == 1) { edge e; + edge_iterator ei; gcov_type total = 0; /* One of the counts will be invalid, but it is zero, so adding it in also doesn't hurt. */ - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) total += e->count; - END_FOR_EACH_EDGE; /* Search for the invalid edge, and set its count. */ - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) { if (!EDGE_INFO (e)->count_valid && !EDGE_INFO (e)->ignore) break; } - END_FOR_EACH_EDGE; /* Calculate count for remaining edge by conservation. */ total = bb->count - total + e->count; @@ -509,6 +508,7 @@ compute_branch_probabilities (void) FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) { edge e; + edge_iterator ei; rtx note; if (bb->count < 0) @@ -517,7 +517,7 @@ compute_branch_probabilities (void) bb->index, (int)bb->count); bb->count = 0; } - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { /* Function may return twice in the cased the called function is setjmp or calls fork, but we can't represent this by extra @@ -540,15 +540,13 @@ compute_branch_probabilities (void) e->count = bb->count / 2; } } - END_FOR_EACH_EDGE; if (bb->count) { - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { e->probability = (e->count * REG_BR_PROB_BASE + bb->count / 2) / bb->count; } - END_FOR_EACH_EDGE; if (bb->index >= 0 && block_ends_with_condjump_p (bb) @@ -560,12 +558,11 @@ compute_branch_probabilities (void) /* Find the branch edge. It is possible that we do have fake edges here. */ - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (!(e->flags & (EDGE_FAKE | EDGE_FALLTHRU))) break; } - END_FOR_EACH_EDGE; prob = e->probability; index = prob * 20 / REG_BR_PROB_BASE; @@ -599,31 +596,28 @@ compute_branch_probabilities (void) { int total = 0; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (!(e->flags & (EDGE_COMPLEX | EDGE_FAKE))) total ++; } - END_FOR_EACH_EDGE; if (total) { - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (!(e->flags & (EDGE_COMPLEX | EDGE_FAKE))) e->probability = REG_BR_PROB_BASE / total; else e->probability = 0; } - END_FOR_EACH_EDGE; } else { total += EDGE_COUNT (bb->succs); - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { e->probability = REG_BR_PROB_BASE / total; } - END_FOR_EACH_EDGE; } if (bb->index >= 0 && block_ends_with_condjump_p (bb) @@ -770,6 +764,7 @@ branch_prob (void) int need_exit_edge = 0, need_entry_edge = 0; int have_exit_edge = 0, have_entry_edge = 0; edge e; + edge_iterator ei; /* Functions returning multiple times are not handled by extra edges. Instead we simply allow negative counts on edges from exit to the @@ -777,7 +772,7 @@ branch_prob (void) with the extra edges because that would result in flowgraph that needs to have fake edges outside the spanning tree. */ - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if ((e->flags & (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL)) && e->dest != EXIT_BLOCK_PTR) @@ -785,9 +780,8 @@ branch_prob (void) if (e->dest == EXIT_BLOCK_PTR) have_exit_edge = 1; } - END_FOR_EACH_EDGE; - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) { if ((e->flags & (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL)) && e->src != ENTRY_BLOCK_PTR) @@ -795,7 +789,6 @@ branch_prob (void) if (e->src == ENTRY_BLOCK_PTR) have_entry_edge = 1; } - END_FOR_EACH_EDGE; if (need_exit_edge && !have_exit_edge) { @@ -904,11 +897,12 @@ branch_prob (void) FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) { edge e; + edge_iterator ei; offset = gcov_write_tag (GCOV_TAG_ARCS); gcov_write_unsigned (BB_TO_GCOV_INDEX (bb)); - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { struct edge_info *i = EDGE_INFO (e); if (!i->ignore) @@ -926,7 +920,6 @@ branch_prob (void) gcov_write_unsigned (flag_bits); } } - END_FOR_EACH_EDGE; gcov_write_length (offset); } diff --git a/gcc/ra-rewrite.c b/gcc/ra-rewrite.c index 41a84911927..5bc30f3d117 100644 --- a/gcc/ra-rewrite.c +++ b/gcc/ra-rewrite.c @@ -1344,11 +1344,12 @@ rewrite_program2 (bitmap new_deaths) int in_ir = 0; edge e; int num = 0; + edge_iterator ei; HARD_REG_SET cum_colors, colors; CLEAR_HARD_REG_SET (cum_colors); - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) { int j; @@ -1365,7 +1366,6 @@ rewrite_program2 (bitmap new_deaths) IOR_HARD_REG_SET (cum_colors, colors); num++; } - END_FOR_EACH_EDGE; if (num == 5) in_ir = 1; diff --git a/gcc/ra.c b/gcc/ra.c index 61f9069621d..0b84dfc90b0 100644 --- a/gcc/ra.c +++ b/gcc/ra.c @@ -682,8 +682,9 @@ reg_alloc (void) if (last) { edge e; + edge_iterator ei; - FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds) + FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds) { basic_block bb = e->src; last = BB_END (bb); @@ -697,7 +698,6 @@ reg_alloc (void) emit_insn_after (insns, last); } } - END_FOR_EACH_EDGE; } /* Setup debugging levels. */ diff --git a/gcc/recog.c b/gcc/recog.c index 96eb5d33779..3bc00d70b16 100644 --- a/gcc/recog.c +++ b/gcc/recog.c @@ -3116,13 +3116,13 @@ peephole2_optimize (FILE *dump_file ATTRIBUTE_UNUSED) if (note || (was_call && nonlocal_goto_handler_labels)) { edge eh_edge; + edge_iterator ei; - FOR_EACH_EDGE (eh_edge, bb->succs) + FOR_EACH_EDGE (eh_edge, ei, bb->succs) { if (eh_edge->flags & (EDGE_EH | EDGE_ABNORMAL_CALL)) break; } - END_FOR_EACH_EDGE; for (x = try ; x != before_try ; x = PREV_INSN (x)) if (CALL_P (x) diff --git a/gcc/reg-stack.c b/gcc/reg-stack.c index 1c2bb237d9d..b36005d2a59 100644 --- a/gcc/reg-stack.c +++ b/gcc/reg-stack.c @@ -442,14 +442,14 @@ reg_to_stack (FILE *file) FOR_EACH_BB_REVERSE (bb) { edge e; + edge_iterator ei; - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) { if (!(e->flags & EDGE_DFS_BACK) && e->src != ENTRY_BLOCK_PTR) BLOCK_INFO (bb)->predecessors++; } - END_FOR_EACH_EDGE; } /* Create the replacement registers up front. */ @@ -2532,6 +2532,7 @@ convert_regs_entry (void) { int inserted = 0; edge e; + edge_iterator ei; basic_block block; FOR_EACH_BB_REVERSE (block) @@ -2561,7 +2562,7 @@ convert_regs_entry (void) Note that we are inserting converted code here. This code is never seen by the convert_regs pass. */ - FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs) + FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs) { basic_block block = e->dest; block_info bi = BLOCK_INFO (block); @@ -2583,7 +2584,6 @@ convert_regs_entry (void) bi->stack_in.top = top; } - END_FOR_EACH_EDGE; return inserted; } @@ -2769,6 +2769,7 @@ convert_regs_1 (FILE *file, basic_block block) rtx insn, next; edge e, beste = NULL; bool control_flow_insn_deleted = false; + edge_iterator ei; inserted = 0; deleted = 0; @@ -2779,7 +2780,7 @@ convert_regs_1 (FILE *file, basic_block block) if multiple such exists, take one with largest count, prefer critical one (as splitting critical edges is more expensive), or one with lowest index, to avoid random changes with different orders of the edges. */ - FOR_EACH_EDGE (e, block->preds) + FOR_EACH_EDGE (e, ei, block->preds) { if (e->flags & EDGE_DFS_BACK) ; @@ -2802,7 +2803,6 @@ convert_regs_1 (FILE *file, basic_block block) else if (e->src->index < beste->src->index) beste = e; } - END_FOR_EACH_EDGE; /* Initialize stack at block entry. */ if (bi->stack_in.top == -2) @@ -2929,7 +2929,7 @@ convert_regs_1 (FILE *file, basic_block block) bi->stack_out = regstack; /* Compensate the back edges, as those wasn't visited yet. */ - FOR_EACH_EDGE (e, block->succs) + FOR_EACH_EDGE (e, ei, block->succs) { if (e->flags & EDGE_DFS_BACK || (e->dest == EXIT_BLOCK_PTR)) @@ -2939,9 +2939,8 @@ convert_regs_1 (FILE *file, basic_block block) inserted |= compensate_edge (e, file); } } - END_FOR_EACH_EDGE; - FOR_EACH_EDGE (e, block->preds) + FOR_EACH_EDGE (e, ei, block->preds) { if (e != beste && !(e->flags & EDGE_DFS_BACK) && e->src != ENTRY_BLOCK_PTR) @@ -2950,7 +2949,6 @@ convert_regs_1 (FILE *file, basic_block block) inserted |= compensate_edge (e, file); } } - END_FOR_EACH_EDGE; return inserted; } @@ -2976,6 +2974,7 @@ convert_regs_2 (FILE *file, basic_block block) do { edge e; + edge_iterator ei; block = *--sp; @@ -2992,7 +2991,7 @@ convert_regs_2 (FILE *file, basic_block block) stack the successor in all cases and hand over the task of fixing up the discrepancy to convert_regs_1. */ - FOR_EACH_EDGE (e, block->succs) + FOR_EACH_EDGE (e, ei, block->succs) { if (! (e->flags & EDGE_DFS_BACK)) { @@ -3001,7 +3000,6 @@ convert_regs_2 (FILE *file, basic_block block) *sp++ = e->dest; } } - END_FOR_EACH_EDGE; inserted |= convert_regs_1 (file, block); BLOCK_INFO (block)->done = 1; @@ -3021,6 +3019,7 @@ convert_regs (FILE *file) int inserted; basic_block b; edge e; + edge_iterator ei; /* Initialize uninitialized registers on function entry. */ inserted = convert_regs_entry (); @@ -3035,11 +3034,10 @@ convert_regs (FILE *file) /* Process all blocks reachable from all entry points. */ - FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs) + FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs) { inserted |= convert_regs_2 (file, e->dest); } - END_FOR_EACH_EDGE; /* ??? Process all unreachable blocks. Though there's no excuse for keeping these even when not optimizing. */ diff --git a/gcc/reload1.c b/gcc/reload1.c index 7c865caaff9..4d9dd796978 100644 --- a/gcc/reload1.c +++ b/gcc/reload1.c @@ -8031,10 +8031,11 @@ fixup_abnormal_edges (void) FOR_EACH_BB (bb) { edge e; + edge_iterator ei; /* Look for cases we are interested in - calls or instructions causing exceptions. */ - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->flags & EDGE_ABNORMAL_CALL) break; @@ -8042,7 +8043,6 @@ fixup_abnormal_edges (void) == (EDGE_ABNORMAL | EDGE_EH)) break; } - END_FOR_EACH_EDGE; if (e && !CALL_P (BB_END (bb)) && !can_throw_internal (BB_END (bb))) @@ -8050,12 +8050,11 @@ fixup_abnormal_edges (void) rtx insn = BB_END (bb), stop = NEXT_INSN (BB_END (bb)); rtx next; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->flags & EDGE_FALLTHRU) break; } - END_FOR_EACH_EDGE; /* Get past the new insns generated. Allow notes, as the insns may be already deleted. */ diff --git a/gcc/sched-ebb.c b/gcc/sched-ebb.c index f07b0099034..e951c8eefd9 100644 --- a/gcc/sched-ebb.c +++ b/gcc/sched-ebb.c @@ -175,8 +175,9 @@ compute_jump_reg_dependencies (rtx insn, regset cond_set, regset used, { basic_block b = BLOCK_FOR_INSN (insn); edge e; + edge_iterator ei; - FOR_EACH_EDGE (e, b->succs) + FOR_EACH_EDGE (e, ei, b->succs) { if (e->flags & EDGE_FALLTHRU) /* The jump may be a by-product of a branch that has been merged @@ -190,7 +191,6 @@ compute_jump_reg_dependencies (rtx insn, regset cond_set, regset used, bitmap_operation (used, used, e->dest->global_live_at_start, BITMAP_IOR); } - END_FOR_EACH_EDGE; } /* Used in schedule_insns to initialize current_sched_info for scheduling @@ -284,6 +284,7 @@ fix_basic_block_boundaries (basic_block bb, basic_block last, rtx head, { edge f; rtx h; + edge_iterator ei; /* An obscure special case, where we do have partially dead instruction scheduled after last control flow instruction. @@ -296,12 +297,11 @@ fix_basic_block_boundaries (basic_block bb, basic_block last, rtx head, do the split and re-emit it back in case this will ever trigger problem. */ - FOR_EACH_EDGE (f, bb->prev_bb->succs) + FOR_EACH_EDGE (f, ei, bb->prev_bb->succs) { if (f->flags & EDGE_FALLTHRU) break; } - END_FOR_EACH_EDGE; if (f) { @@ -596,16 +596,16 @@ schedule_ebbs (FILE *dump_file) for (;;) { edge e; + edge_iterator ei; tail = BB_END (bb); if (bb->next_bb == EXIT_BLOCK_PTR || LABEL_P (BB_HEAD (bb->next_bb))) break; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if ((e->flags & EDGE_FALLTHRU) != 0) break; } - END_FOR_EACH_EDGE; if (! e) break; if (e->probability <= probability_cutoff) diff --git a/gcc/sched-rgn.c b/gcc/sched-rgn.c index 8b10d33e8b3..b5d9fc8c289 100644 --- a/gcc/sched-rgn.c +++ b/gcc/sched-rgn.c @@ -802,6 +802,7 @@ find_rgns (struct edge_list *edge_list) if (TEST_BIT (header, bb->index) && TEST_BIT (inner, bb->index)) { edge e; + edge_iterator ei; basic_block jbb; /* Now check that the loop is reducible. We do this separate @@ -843,12 +844,11 @@ find_rgns (struct edge_list *edge_list) /* Decrease degree of all I's successors for topological ordering. */ - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->dest != EXIT_BLOCK_PTR) --degree[e->dest->index]; } - END_FOR_EACH_EDGE; /* Estimate # insns, and count # blocks in the region. */ num_bbs = 1; @@ -881,7 +881,7 @@ find_rgns (struct edge_list *edge_list) { edge e; - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) { if (e->src == ENTRY_BLOCK_PTR) continue; @@ -901,7 +901,6 @@ find_rgns (struct edge_list *edge_list) } } } - END_FOR_EACH_EDGE; } /* Now add all the blocks in the loop to the queue. @@ -939,7 +938,7 @@ find_rgns (struct edge_list *edge_list) edge e; child = queue[++head]; - FOR_EACH_EDGE (e, BASIC_BLOCK (child)->preds) + FOR_EACH_EDGE (e, ei, BASIC_BLOCK (child)->preds) { node = e->src->index; @@ -963,7 +962,6 @@ find_rgns (struct edge_list *edge_list) } } } - END_FOR_EACH_EDGE; } if (tail >= 0 && !too_large_failure) @@ -995,12 +993,11 @@ find_rgns (struct edge_list *edge_list) CONTAINING_RGN (child) = nr_regions; queue[head] = queue[tail--]; - FOR_EACH_EDGE (e, BASIC_BLOCK (child)->succs) + FOR_EACH_EDGE (e, ei, BASIC_BLOCK (child)->succs) { if (e->dest != EXIT_BLOCK_PTR) --degree[e->dest->index]; } - END_FOR_EACH_EDGE; } else --head; diff --git a/gcc/tracer.c b/gcc/tracer.c index 42328b118ca..77335c9bcfe 100644 --- a/gcc/tracer.c +++ b/gcc/tracer.c @@ -118,13 +118,13 @@ find_best_successor (basic_block bb) { edge e; edge best = NULL; + edge_iterator ei; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (!best || better_p (e, best)) best = e; } - END_FOR_EACH_EDGE; if (!best || ignore_bb_p (best->dest)) return NULL; if (best->probability <= probability_cutoff) @@ -139,13 +139,13 @@ find_best_predecessor (basic_block bb) { edge e; edge best = NULL; + edge_iterator ei; - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) { if (!best || better_p (e, best)) best = e; } - END_FOR_EACH_EDGE; if (!best || ignore_bb_p (best->src)) return NULL; if (EDGE_FREQUENCY (best) * REG_BR_PROB_BASE @@ -279,14 +279,14 @@ tail_duplicate (void) && can_duplicate_block_p (bb2)) { edge e; + edge_iterator ei; basic_block old = bb2; - FOR_EACH_EDGE (e, bb2->preds) + FOR_EACH_EDGE (e, ei, bb2->preds) { if (e->src == bb) break; } - END_FOR_EACH_EDGE; nduplicated += counts [bb2->index]; bb2 = duplicate_block (bb2, e); @@ -335,11 +335,12 @@ layout_superblocks (void) while (bb != EXIT_BLOCK_PTR) { + edge_iterator ei; edge e, best = NULL; while (end->rbi->next) end = end->rbi->next; - FOR_EACH_EDGE (e, end->succs) + FOR_EACH_EDGE (e, ei, end->succs) { if (e->dest != EXIT_BLOCK_PTR && e->dest != EDGE_SUCC (ENTRY_BLOCK_PTR, 0)->dest @@ -347,7 +348,6 @@ layout_superblocks (void) && (!best || EDGE_FREQUENCY (e) > EDGE_FREQUENCY (best))) best = e; } - END_FOR_EACH_EDGE; if (best) { diff --git a/gcc/tree-cfg.c b/gcc/tree-cfg.c index 2fb4b2bc04f..bdadb885e24 100644 --- a/gcc/tree-cfg.c +++ b/gcc/tree-cfg.c @@ -2301,6 +2301,7 @@ static void tree_cfg2vcg (FILE *file) { edge e; + edge_iterator ei; basic_block bb; const char *funcname = lang_hooks.decl_printable_name (current_function_decl, 2); @@ -2311,7 +2312,7 @@ tree_cfg2vcg (FILE *file) fprintf (file, "node: { title: \"EXIT\" label: \"EXIT\" }\n"); /* Write blocks and edges. */ - FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs) + FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs) { fprintf (file, "edge: { sourcename: \"ENTRY\" targetname: \"%d\"", e->dest->index); @@ -2323,7 +2324,6 @@ tree_cfg2vcg (FILE *file) fprintf (file, " }\n"); } - END_FOR_EACH_EDGE; fputc ('\n', file); @@ -2358,7 +2358,7 @@ tree_cfg2vcg (FILE *file) bb->index, bb->index, head_name, head_line, end_name, end_line); - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->dest == EXIT_BLOCK_PTR) fprintf (file, "edge: { sourcename: \"%d\" targetname: \"EXIT\"", bb->index); @@ -2372,7 +2372,6 @@ tree_cfg2vcg (FILE *file) fprintf (file, " }\n"); } - END_FOR_EACH_EDGE; if (bb->next_bb != EXIT_BLOCK_PTR) fputc ('\n', file); @@ -2507,6 +2506,7 @@ disband_implicit_edges (void) basic_block bb; block_stmt_iterator last; edge e; + edge_iterator ei; tree stmt, label; FOR_EACH_BB (bb) @@ -2520,7 +2520,7 @@ disband_implicit_edges (void) from cfg_remove_useless_stmts here since it violates the invariants for tree--cfg correspondence and thus fits better here where we do it anyway. */ - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->dest != bb->next_bb) continue; @@ -2533,7 +2533,6 @@ disband_implicit_edges (void) gcc_unreachable (); e->flags |= EDGE_FALLTHRU; } - END_FOR_EACH_EDGE; continue; } @@ -2560,12 +2559,11 @@ disband_implicit_edges (void) continue; /* Find a fallthru edge and emit the goto if necessary. */ - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->flags & EDGE_FALLTHRU) break; } - END_FOR_EACH_EDGE; if (!e || e->dest == bb->next_bb) continue; @@ -2925,17 +2923,17 @@ bsi_commit_edge_inserts (int *new_blocks) basic_block bb; edge e; int blocks; + edge_iterator ei; blocks = n_basic_blocks; bsi_commit_edge_inserts_1 (EDGE_SUCC (ENTRY_BLOCK_PTR, 0)); FOR_EACH_BB (bb) - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { bsi_commit_edge_inserts_1 (e); } - END_FOR_EACH_EDGE; if (new_blocks) *new_blocks = n_basic_blocks - blocks; @@ -3004,6 +3002,7 @@ tree_split_edge (edge edge_in) edge new_edge, e; tree phi; int i, num_elem; + edge_iterator ei; /* Abnormal edges cannot be split. */ gcc_assert (!(edge_in->flags & EDGE_ABNORMAL)); @@ -3014,12 +3013,11 @@ tree_split_edge (edge edge_in) /* Place the new block in the block list. Try to keep the new block near its "logical" location. This is of most help to humans looking at debugging dumps. */ - FOR_EACH_EDGE (e, dest->preds) + FOR_EACH_EDGE (e, ei, dest->preds) { if (e->src->next_bb == dest) break; } - END_FOR_EACH_EDGE; if (!e) after_bb = dest->prev_bb; @@ -3439,6 +3437,7 @@ tree_verify_flow_info (void) block_stmt_iterator bsi; tree stmt; edge e; + edge_iterator ei; if (ENTRY_BLOCK_PTR->stmt_list) { @@ -3452,7 +3451,7 @@ tree_verify_flow_info (void) err = 1; } - FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds) + FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds) { if (e->flags & EDGE_FALLTHRU) { @@ -3460,7 +3459,6 @@ tree_verify_flow_info (void) err = 1; } } - END_FOR_EACH_EDGE; FOR_EACH_BB (bb) { @@ -3521,7 +3519,7 @@ tree_verify_flow_info (void) if (is_ctrl_stmt (stmt)) { - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->flags & EDGE_FALLTHRU) { @@ -3530,7 +3528,6 @@ tree_verify_flow_info (void) err = 1; } } - END_FOR_EACH_EDGE; } switch (TREE_CODE (stmt)) @@ -3588,7 +3585,7 @@ tree_verify_flow_info (void) { /* FIXME. We should double check that the labels in the destination blocks have their address taken. */ - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if ((e->flags & (EDGE_FALLTHRU | EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)) @@ -3599,7 +3596,6 @@ tree_verify_flow_info (void) err = 1; } } - END_FOR_EACH_EDGE; } break; @@ -3667,7 +3663,7 @@ tree_verify_flow_info (void) err = 1; } - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (!e->dest->aux) { @@ -3684,7 +3680,6 @@ tree_verify_flow_info (void) err = 1; } } - END_FOR_EACH_EDGE; /* Check that we have all of them. */ for (i = 0; i < n; ++i) @@ -3700,11 +3695,10 @@ tree_verify_flow_info (void) } } - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { e->dest->aux = (void *) 0; } - END_FOR_EACH_EDGE; } default: ; @@ -3725,6 +3719,7 @@ static void tree_make_forwarder_block (edge fallthru) { edge e; + edge_iterator ei; basic_block dummy, bb; tree phi, new_phi, var, prev, next; @@ -3756,7 +3751,7 @@ tree_make_forwarder_block (edge fallthru) set_phi_nodes (bb, prev); /* Add the arguments we have stored on edges. */ - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) { if (e == fallthru) continue; @@ -3768,7 +3763,6 @@ tree_make_forwarder_block (edge fallthru) PENDING_STMT (e) = NULL; } - END_FOR_EACH_EDGE; } @@ -3781,6 +3775,7 @@ tree_forwarder_block_p (basic_block bb) { block_stmt_iterator bsi; edge e; + edge_iterator ei; /* If we have already determined that this block is not forwardable, then no further checks are necessary. */ @@ -3799,7 +3794,7 @@ tree_forwarder_block_p (basic_block bb) } /* Successors of the entry block are not forwarders. */ - FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs) + FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs) { if (e->dest == bb) { @@ -3807,7 +3802,6 @@ tree_forwarder_block_p (basic_block bb) return false; } } - END_FOR_EACH_EDGE; /* BB can not have any PHI nodes. This could potentially be relaxed early in compilation if we re-rewrote the variables appearing in @@ -4059,14 +4053,14 @@ tree_try_redirect_by_replacing_jump (edge e, basic_block target) edge tmp; block_stmt_iterator b; tree stmt; + edge_iterator ei; /* Verify that all targets will be TARGET. */ - FOR_EACH_EDGE (tmp, src->succs) + FOR_EACH_EDGE (tmp, ei, src->succs) { if (tmp->dest != target && tmp != e) break; } - END_FOR_EACH_EDGE; if (tmp) return NULL; @@ -4186,17 +4180,17 @@ tree_split_block (basic_block bb, void *stmt) tree act; basic_block new_bb; edge e; + edge_iterator ei; new_bb = create_empty_bb (bb); /* Redirect the outgoing edges. */ new_bb->succs = bb->succs; bb->succs = NULL; - FOR_EACH_EDGE (e, new_bb->succs) + FOR_EACH_EDGE (e, ei, new_bb->succs) { e->src = new_bb; } - END_FOR_EACH_EDGE; if (stmt && TREE_CODE ((tree) stmt) == LABEL_EXPR) stmt = NULL; @@ -4410,12 +4404,12 @@ static void print_pred_bbs (FILE *file, basic_block bb) { edge e; + edge_iterator ei; - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) { fprintf (file, "bb_%d", e->src->index); } - END_FOR_EACH_EDGE; } @@ -4425,12 +4419,12 @@ static void print_succ_bbs (FILE *file, basic_block bb) { edge e; + edge_iterator ei; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { fprintf (file, "bb_%d", e->src->index); } - END_FOR_EACH_EDGE; } @@ -4589,6 +4583,7 @@ tree_flow_call_edges_add (sbitmap blocks) Handle this by adding a dummy instruction in a new last basic block. */ if (check_last_block) { + edge_iterator ei; basic_block bb = EXIT_BLOCK_PTR->prev_bb; block_stmt_iterator bsi = bsi_last (bb); tree t = NULL_TREE; @@ -4599,7 +4594,7 @@ tree_flow_call_edges_add (sbitmap blocks) { edge e; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->dest == EXIT_BLOCK_PTR) { @@ -4608,7 +4603,6 @@ tree_flow_call_edges_add (sbitmap blocks) break; } } - END_FOR_EACH_EDGE; } } @@ -4645,9 +4639,9 @@ tree_flow_call_edges_add (sbitmap blocks) #ifdef ENABLE_CHECKING if (stmt == last_stmt) { - FOR_EACH_EDGE (e, bb->succs) + edge_iterator ei; + FOR_EACH_EDGE (e, ei, bb->succs) gcc_assert (e->dest != EXIT_BLOCK_PTR); - END_FOR_EACH_EDGE; } #endif @@ -4742,17 +4736,17 @@ split_critical_edges (void) { basic_block bb; edge e; + edge_iterator ei; FOR_ALL_BB (bb) { - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (EDGE_CRITICAL_P (e) && !(e->flags & EDGE_ABNORMAL)) { split_edge (e); } } - END_FOR_EACH_EDGE; } } @@ -4858,6 +4852,7 @@ execute_warn_function_return (void) #endif tree last; edge e; + edge_iterator ei; if (warn_missing_noreturn && !TREE_THIS_VOLATILE (cfun->decl) @@ -4875,7 +4870,7 @@ execute_warn_function_return (void) #else locus = NULL; #endif - FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds) + FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds) { last = last_stmt (e->src); if (TREE_CODE (last) == RETURN_EXPR @@ -4886,7 +4881,6 @@ execute_warn_function_return (void) #endif break; } - END_FOR_EACH_EDGE; #ifdef USE_MAPPED_LOCATION if (location == UNKNOWN_LOCATION) @@ -4905,7 +4899,7 @@ execute_warn_function_return (void) && EDGE_COUNT (EXIT_BLOCK_PTR->preds) > 0 && !VOID_TYPE_P (TREE_TYPE (TREE_TYPE (cfun->decl)))) { - FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds) + FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds) { tree last = last_stmt (e->src); if (TREE_CODE (last) == RETURN_EXPR @@ -4925,7 +4919,6 @@ execute_warn_function_return (void) break; } } - END_FOR_EACH_EDGE; } } diff --git a/gcc/tree-if-conv.c b/gcc/tree-if-conv.c index 19c9abd853a..204c6eb311a 100644 --- a/gcc/tree-if-conv.c +++ b/gcc/tree-if-conv.c @@ -469,6 +469,7 @@ static bool if_convertable_bb_p (struct loop *loop, basic_block bb, bool exit_bb_seen) { edge e; + edge_iterator ei; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "----------[%d]-------------\n", bb->index); @@ -490,7 +491,7 @@ if_convertable_bb_p (struct loop *loop, basic_block bb, bool exit_bb_seen) } /* Be less adventurous and handle only normal edges. */ - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH | EDGE_ABNORMAL | EDGE_IRREDUCIBLE_LOOP)) @@ -500,7 +501,6 @@ if_convertable_bb_p (struct loop *loop, basic_block bb, bool exit_bb_seen) return false; } } - END_FOR_EACH_EDGE; return true; } @@ -524,6 +524,7 @@ if_convertable_loop_p (struct loop *loop, bool for_vectorizer ATTRIBUTE_UNUSED) block_stmt_iterator itr; unsigned int i; edge e; + edge_iterator ei; bool exit_bb_seen = false; /* Handle only inner most loop. */ @@ -556,12 +557,11 @@ if_convertable_loop_p (struct loop *loop, bool for_vectorizer ATTRIBUTE_UNUSED) /* If one of the loop header's edge is exit edge then do not apply if-conversion. */ - FOR_EACH_EDGE (e, loop->header->succs) + FOR_EACH_EDGE (e, ei, loop->header->succs) { if (e->flags & EDGE_LOOP_EXIT) return false; } - END_FOR_EACH_EDGE; compute_immediate_uses (TDFA_USE_OPS|TDFA_USE_VOPS, NULL); @@ -682,8 +682,9 @@ find_phi_replacement_condition (basic_block bb, tree *cond, basic_block p2 = NULL; basic_block true_bb = NULL; tree tmp_cond; + edge_iterator ei; - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) { if (p1 == NULL) p1 = e->src; @@ -693,7 +694,6 @@ find_phi_replacement_condition (basic_block bb, tree *cond, p2 = e->src; } } - END_FOR_EACH_EDGE; /* Use condition that is not TRUTH_NOT_EXPR in conditional modify expr. */ tmp_cond = p1->aux; @@ -873,6 +873,7 @@ combine_blocks (struct loop *loop) if (bb == exit_bb) { edge new_e; + edge_iterator ei; /* Connect this node with loop header. */ new_e = make_edge (ifc_bbs[0], bb, EDGE_FALLTHRU); @@ -881,7 +882,7 @@ combine_blocks (struct loop *loop) if (exit_bb != loop->latch) { /* Redirect non-exit edge to loop->latch. */ - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (!(e->flags & EDGE_LOOP_EXIT)) { @@ -889,7 +890,6 @@ combine_blocks (struct loop *loop) set_immediate_dominator (CDI_DOMINATORS, loop->latch, bb); } } - END_FOR_EACH_EDGE; } continue; } @@ -965,12 +965,12 @@ static bool pred_blocks_visited_p (basic_block bb, bitmap *visited) { edge e; - FOR_EACH_EDGE (e, bb->preds) + edge_iterator ei; + FOR_EACH_EDGE (e, ei, bb->preds) { if (!bitmap_bit_p (*visited, e->src->index)) return false; } - END_FOR_EACH_EDGE; return true; } @@ -1038,9 +1038,10 @@ static bool bb_with_exit_edge_p (basic_block bb) { edge e; + edge_iterator ei; bool exit_edge_found = false; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->flags & EDGE_LOOP_EXIT) { @@ -1048,7 +1049,6 @@ bb_with_exit_edge_p (basic_block bb) break; } } - END_FOR_EACH_EDGE; return exit_edge_found; } diff --git a/gcc/tree-into-ssa.c b/gcc/tree-into-ssa.c index cab4f18cfbe..1953455f03e 100644 --- a/gcc/tree-into-ssa.c +++ b/gcc/tree-into-ssa.c @@ -223,12 +223,13 @@ compute_global_livein (bitmap livein, bitmap def_blocks) while (tos != worklist) { edge e; + edge_iterator ei; /* Pull a block off the worklist. */ bb = *--tos; /* For each predecessor block. */ - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) { basic_block pred = e->src; int pred_index = pred->index; @@ -242,7 +243,6 @@ compute_global_livein (bitmap livein, bitmap def_blocks) bitmap_set_bit (livein, pred_index); } } - END_FOR_EACH_EDGE; } free (worklist); @@ -299,8 +299,9 @@ ssa_mark_phi_uses (struct dom_walk_data *walk_data, basic_block bb) edge e; tree phi, use; unsigned uid; + edge_iterator ei; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->dest == EXIT_BLOCK_PTR) continue; @@ -318,7 +319,6 @@ ssa_mark_phi_uses (struct dom_walk_data *walk_data, basic_block bb) set_livein_block (use, bb); } } - END_FOR_EACH_EDGE; } /* Call back for walk_dominator_tree used to collect definition sites @@ -732,16 +732,16 @@ ssa_rewrite_initialize_block (struct dom_walk_data *walk_data, basic_block bb) sbitmap names_to_rename = walk_data->global_data; edge e; bool abnormal_phi; + edge_iterator ei; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "\n\nRenaming block #%d\n\n", bb->index); - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) { if (e->flags & EDGE_ABNORMAL) break; } - END_FOR_EACH_EDGE; abnormal_phi = (e != NULL); /* Step 1. Register new definitions for every PHI node in the block. @@ -776,8 +776,9 @@ rewrite_add_phi_arguments (struct dom_walk_data *walk_data ATTRIBUTE_UNUSED, basic_block bb) { edge e; + edge_iterator ei; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { tree phi; @@ -795,7 +796,6 @@ rewrite_add_phi_arguments (struct dom_walk_data *walk_data ATTRIBUTE_UNUSED, add_phi_arg (&phi, currdef, e); } } - END_FOR_EACH_EDGE; } /* Ditto, for ssa name rewriting. */ @@ -806,8 +806,9 @@ ssa_rewrite_phi_arguments (struct dom_walk_data *walk_data, basic_block bb) edge e; sbitmap names_to_rename = walk_data->global_data; use_operand_p op; + edge_iterator ei; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { tree phi; @@ -828,7 +829,6 @@ ssa_rewrite_phi_arguments (struct dom_walk_data *walk_data, basic_block bb) SSA_NAME_OCCURS_IN_ABNORMAL_PHI (USE_FROM_PTR (op)) = 1; } } - END_FOR_EACH_EDGE; } /* SSA Rewriting Step 5. Restore the current reaching definition for each @@ -1030,11 +1030,11 @@ insert_phi_nodes_for (tree var, bitmap *dfs, varray_type *work_stack) /* If we are rewriting ssa names, add also the phi arguments. */ if (TREE_CODE (var) == SSA_NAME) { - FOR_EACH_EDGE (e, bb->preds) + edge_iterator ei; + FOR_EACH_EDGE (e, ei, bb->preds) { add_phi_arg (&phi, var, e); } - END_FOR_EACH_EDGE; } } while (0)); diff --git a/gcc/tree-outof-ssa.c b/gcc/tree-outof-ssa.c index a5922cc57b9..c7793c38c84 100644 --- a/gcc/tree-outof-ssa.c +++ b/gcc/tree-outof-ssa.c @@ -581,13 +581,14 @@ coalesce_abnormal_edges (var_map map, conflict_graph graph, root_var_p rv) edge e; tree phi, var, tmp; int x, y; + edge_iterator ei; /* Code cannot be inserted on abnormal edges. Look for all abnormal edges, and coalesce any PHI results with their arguments across that edge. */ FOR_EACH_BB (bb) - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->dest != EXIT_BLOCK_PTR && e->flags & EDGE_ABNORMAL) for (phi = phi_nodes (e->dest); phi; phi = PHI_CHAIN (phi)) @@ -670,7 +671,6 @@ coalesce_abnormal_edges (var_map map, conflict_graph graph, root_var_p rv) } } } - END_FOR_EACH_EDGE; } @@ -1931,11 +1931,11 @@ rewrite_trees (var_map map, tree *values) phi = phi_nodes (bb); if (phi) { - FOR_EACH_EDGE (e, bb->preds) + edge_iterator ei; + FOR_EACH_EDGE (e, ei, bb->preds) { eliminate_phi (e, phi_arg_from_edge (phi, e), g); } - END_FOR_EACH_EDGE; } } diff --git a/gcc/tree-pretty-print.c b/gcc/tree-pretty-print.c index 604c8daf257..0e60099437b 100644 --- a/gcc/tree-pretty-print.c +++ b/gcc/tree-pretty-print.c @@ -2105,6 +2105,7 @@ dump_bb_header (pretty_printer *buffer, basic_block bb, int indent, int flags) { edge e; tree stmt; + edge_iterator ei; if (flags & TDF_BLOCKS) { @@ -2128,7 +2129,7 @@ dump_bb_header (pretty_printer *buffer, basic_block bb, int indent, int flags) pp_string (buffer, "# PRED:"); pp_write_text_to_stream (buffer); - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) { if (flags & TDF_SLIM) { @@ -2141,7 +2142,6 @@ dump_bb_header (pretty_printer *buffer, basic_block bb, int indent, int flags) else dump_edge_info (buffer->buffer->stream, e, 0); } - END_FOR_EACH_EDGE; pp_newline (buffer); } else @@ -2167,12 +2167,13 @@ static void dump_bb_end (pretty_printer *buffer, basic_block bb, int indent, int flags) { edge e; + edge_iterator ei; INDENT (indent); pp_string (buffer, "# SUCC:"); pp_write_text_to_stream (buffer); - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (flags & TDF_SLIM) { @@ -2185,7 +2186,6 @@ dump_bb_end (pretty_printer *buffer, basic_block bb, int indent, int flags) else dump_edge_info (buffer->buffer->stream, e, 1); } - END_FOR_EACH_EDGE; pp_newline (buffer); } @@ -2241,15 +2241,15 @@ dump_implicit_edges (pretty_printer *buffer, basic_block bb, int indent, int flags) { edge e; + edge_iterator ei; /* If there is a fallthru edge, we may need to add an artificial goto to the dump. */ - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->flags & EDGE_FALLTHRU) break; } - END_FOR_EACH_EDGE; if (e && e->dest != bb->next_bb) { INDENT (indent); diff --git a/gcc/tree-sra.c b/gcc/tree-sra.c index 1f9669142bb..8683d00875c 100644 --- a/gcc/tree-sra.c +++ b/gcc/tree-sra.c @@ -1661,10 +1661,11 @@ void insert_edge_copies (tree stmt, basic_block bb) { edge e; + edge_iterator ei; bool first_copy; first_copy = true; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { /* We don't need to insert copies on abnormal edges. The value of the scalar replacement is not guaranteed to @@ -1680,7 +1681,6 @@ insert_edge_copies (tree stmt, basic_block bb) bsi_insert_on_edge (e, unsave_expr_now (stmt)); } } - END_FOR_EACH_EDGE; } /* Helper function to insert LIST before BSI, and set up line number info. */ diff --git a/gcc/tree-ssa-dce.c b/gcc/tree-ssa-dce.c index d95a3b46084..0631d3b545f 100644 --- a/gcc/tree-ssa-dce.c +++ b/gcc/tree-ssa-dce.c @@ -495,12 +495,12 @@ find_obviously_necessary_stmts (struct edge_list *el) and we currently do not have a means to recognize the finite ones. */ FOR_EACH_BB (bb) { - FOR_EACH_EDGE (e, bb->succs) + edge_iterator ei; + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->flags & EDGE_DFS_BACK) mark_control_dependent_edges_necessary (e->dest, el); } - END_FOR_EACH_EDGE; } } } diff --git a/gcc/tree-ssa-dom.c b/gcc/tree-ssa-dom.c index 17f8b7077c1..449c458f177 100644 --- a/gcc/tree-ssa-dom.c +++ b/gcc/tree-ssa-dom.c @@ -581,6 +581,7 @@ thread_across_edge (struct dom_walk_data *walk_data, edge e) { tree cond, cached_lhs; edge e1; + edge_iterator ei; /* Do not forward entry edges into the loop. In the case loop has multiple entry edges we may end up in constructing irreducible @@ -589,12 +590,11 @@ thread_across_edge (struct dom_walk_data *walk_data, edge e) edges forward to the same destination block. */ if (!e->flags & EDGE_DFS_BACK) { - FOR_EACH_EDGE (e1, e->dest->preds) + FOR_EACH_EDGE (e1, ei, e->dest->preds) { if (e1->flags & EDGE_DFS_BACK) break; } - END_FOR_EACH_EDGE; if (e1) return; } @@ -1140,8 +1140,9 @@ single_incoming_edge_ignoring_loop_edges (basic_block bb) { edge retval = NULL; edge e; + edge_iterator ei; - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) { /* A loop back edge can be identified by the destination of the edge dominating the source of the edge. */ @@ -1157,7 +1158,6 @@ single_incoming_edge_ignoring_loop_edges (basic_block bb) it. */ retval = e; } - END_FOR_EACH_EDGE; return retval; } @@ -2263,11 +2263,12 @@ cprop_into_successor_phis (basic_block bb, bitmap nonzero_vars) { edge e; + edge_iterator ei; /* This can get rather expensive if the implementation is naive in how it finds the phi alternative associated with a particular edge. */ - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { tree phi; int phi_num_args; @@ -2343,7 +2344,6 @@ cprop_into_successor_phis (basic_block bb, } } } - END_FOR_EACH_EDGE; } diff --git a/gcc/tree-ssa-live.c b/gcc/tree-ssa-live.c index 41f37c9eaa8..2c2fa28b854 100644 --- a/gcc/tree-ssa-live.c +++ b/gcc/tree-ssa-live.c @@ -488,6 +488,7 @@ live_worklist (tree_live_info_p live, varray_type stack, int i) basic_block def_bb = NULL; edge e; var_map map = live->map; + edge_iterator ei; var = partition_to_var (map, i); if (SSA_NAME_DEF_STMT (var)) @@ -503,7 +504,7 @@ live_worklist (tree_live_info_p live, varray_type stack, int i) b = VARRAY_TOP_INT (stack); VARRAY_POP (stack); - FOR_EACH_EDGE (e, BASIC_BLOCK (b)->preds) + FOR_EACH_EDGE (e, ei, BASIC_BLOCK (b)->preds) { if (e->src != ENTRY_BLOCK_PTR) { @@ -517,7 +518,6 @@ live_worklist (tree_live_info_p live, varray_type stack, int i) } } } - END_FOR_EACH_EDGE; } } @@ -571,7 +571,7 @@ calculate_live_on_entry (var_map map) #ifdef ENABLE_CHECKING int num; #endif - + edge_iterator ei; saw_def = BITMAP_XMALLOC (); @@ -643,7 +643,7 @@ calculate_live_on_entry (var_map map) bb = ENTRY_BLOCK_PTR; num = 0; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { int entry_block = e->dest->index; if (e->dest == EXIT_BLOCK_PTR) @@ -717,7 +717,6 @@ calculate_live_on_entry (var_map map) } } } - END_FOR_EACH_EDGE; gcc_assert (num <= 0); #endif @@ -765,12 +764,12 @@ calculate_live_on_exit (tree_live_info_p liveinfo) on_entry = live_entry_blocks (liveinfo, i); EXECUTE_IF_SET_IN_BITMAP (on_entry, 0, b, { - FOR_EACH_EDGE (e, BASIC_BLOCK (b)->preds) + edge_iterator ei; + FOR_EACH_EDGE (e, ei, BASIC_BLOCK (b)->preds) { if (e->src != ENTRY_BLOCK_PTR) bitmap_set_bit (on_exit[e->src->index], i); } - END_FOR_EACH_EDGE; }); } diff --git a/gcc/tree-ssa-loop-ch.c b/gcc/tree-ssa-loop-ch.c index fcbefff419c..ff680cb5dbb 100644 --- a/gcc/tree-ssa-loop-ch.c +++ b/gcc/tree-ssa-loop-ch.c @@ -101,6 +101,7 @@ static void duplicate_blocks (varray_type bbs_to_duplicate) { unsigned i; + edge_iterator ei; edge preheader_edge, e; basic_block header, new_header; tree phi, new_phi, var; @@ -130,15 +131,15 @@ duplicate_blocks (varray_type bbs_to_duplicate) PENDING_STMT (preheader_edge) = NULL; /* Add the phi arguments to the outgoing edges. */ - FOR_EACH_EDGE (e, header->succs) + FOR_EACH_EDGE (e, ei, header->succs) { edge e1; - FOR_EACH_EDGE (e1, new_header->succs) + edge_iterator ei; + FOR_EACH_EDGE (e1, ei, new_header->succs) { if (e1->dest == e->dest) break; } - END_FOR_EACH_EDGE; if (e1 == NULL) abort (); @@ -148,7 +149,6 @@ duplicate_blocks (varray_type bbs_to_duplicate) add_phi_arg (&phi, def, e1); } } - END_FOR_EACH_EDGE; } calculate_dominance_info (CDI_DOMINATORS); diff --git a/gcc/tree-ssa-loop-im.c b/gcc/tree-ssa-loop-im.c index 0236f3049fc..b06e1d6a358 100644 --- a/gcc/tree-ssa-loop-im.c +++ b/gcc/tree-ssa-loop-im.c @@ -1292,6 +1292,7 @@ fill_always_executed_in (struct loop *loop, sbitmap contains_call) for (i = 0; i < loop->num_nodes; i++) { + edge_iterator ei; bb = bbs[i]; if (dominated_by_p (CDI_DOMINATORS, loop->latch, bb)) @@ -1300,12 +1301,11 @@ fill_always_executed_in (struct loop *loop, sbitmap contains_call) if (TEST_BIT (contains_call, bb->index)) break; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (!flow_bb_inside_loop_p (loop, e->dest)) break; } - END_FOR_EACH_EDGE; if (e) break; diff --git a/gcc/tree-ssa-loop-ivopts.c b/gcc/tree-ssa-loop-ivopts.c index 33af6e855d9..224a68ff3bc 100644 --- a/gcc/tree-ssa-loop-ivopts.c +++ b/gcc/tree-ssa-loop-ivopts.c @@ -1395,15 +1395,15 @@ find_interesting_uses (struct ivopts_data *data) for (i = 0; i < data->current_loop->num_nodes; i++) { + edge_iterator ei; bb = body[i]; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->dest != EXIT_BLOCK_PTR && !flow_bb_inside_loop_p (data->current_loop, e->dest)) find_interesting_uses_outside (data, e); } - END_FOR_EACH_EDGE; for (phi = phi_nodes (bb); phi; phi = TREE_CHAIN (phi)) find_interesting_uses_stmt (data, phi); diff --git a/gcc/tree-ssa-loop-manip.c b/gcc/tree-ssa-loop-manip.c index fd086d0c001..1eba9f8654e 100644 --- a/gcc/tree-ssa-loop-manip.c +++ b/gcc/tree-ssa-loop-manip.c @@ -123,25 +123,24 @@ add_exit_phis_edge (basic_block exit, tree use) basic_block def_bb = bb_for_stmt (def_stmt); struct loop *def_loop; edge e; + edge_iterator ei; /* Check that some of the edges entering the EXIT block exits a loop in that USE is defined. */ - FOR_EACH_EDGE (e, exit->preds) + FOR_EACH_EDGE (e, ei, exit->preds) { def_loop = find_common_loop (def_bb->loop_father, e->src->loop_father); if (!flow_bb_inside_loop_p (def_loop, e->dest)) break; } - END_FOR_EACH_EDGE; if (!e) return; phi = create_phi_node (use, exit); - FOR_EACH_EDGE (e, exit->preds) + FOR_EACH_EDGE (e, ei, exit->preds) add_phi_arg (&phi, use, e); - END_FOR_EACH_EDGE; SSA_NAME_DEF_STMT (use) = def_stmt; } @@ -190,17 +189,17 @@ get_loops_exits (void) bitmap exits = BITMAP_XMALLOC (); basic_block bb; edge e; + edge_iterator ei; FOR_EACH_BB (bb) { - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) if (e->src != ENTRY_BLOCK_PTR && !flow_bb_inside_loop_p (e->src->loop_father, bb)) { bitmap_set_bit (exits, bb->index); break; } - END_FOR_EACH_EDGE; } return exits; diff --git a/gcc/tree-ssa-pre.c b/gcc/tree-ssa-pre.c index ea819455cf0..56bed5cb93c 100644 --- a/gcc/tree-ssa-pre.c +++ b/gcc/tree-ssa-pre.c @@ -1128,7 +1128,8 @@ compute_antic_aux (basic_block block) setting the BB_VISITED flag. */ if (! (block->flags & BB_VISITED)) { - FOR_EACH_EDGE (e, block->preds) + edge_iterator ei; + FOR_EACH_EDGE (e, ei, block->preds) { if (e->flags & EDGE_ABNORMAL) { @@ -1136,7 +1137,6 @@ compute_antic_aux (basic_block block) break; } } - END_FOR_EACH_EDGE; } if (block->flags & BB_VISITED) { @@ -1168,13 +1168,13 @@ compute_antic_aux (basic_block block) edge e; size_t i; basic_block bprime, first; + edge_iterator ei; VARRAY_BB_INIT (worklist, 1, "succ"); - FOR_EACH_EDGE (e, block->succs) + FOR_EACH_EDGE (e, ei, block->succs) { VARRAY_PUSH_BB (worklist, e->dest); } - END_FOR_EACH_EDGE; first = VARRAY_BB (worklist, 0); set_copy (ANTIC_OUT, ANTIC_IN (first)); @@ -1438,6 +1438,7 @@ insert_aux (basic_block block) edge pred; basic_block bprime; tree eprime; + edge_iterator ei; val = get_value_handle (node->expr); if (bitmap_set_contains_value (PHI_GEN (block), val)) @@ -1451,7 +1452,7 @@ insert_aux (basic_block block) avail = xcalloc (last_basic_block, sizeof (tree)); - FOR_EACH_EDGE (pred, block->preds) + FOR_EACH_EDGE (pred, ei, block->preds) { tree vprime; tree edoubleprime; @@ -1506,7 +1507,6 @@ insert_aux (basic_block block) (first_s, edoubleprime, 0)); } } - END_FOR_EACH_EDGE; /* If we can insert it, it's not the same value already existing along every predecessor, and @@ -1524,7 +1524,7 @@ insert_aux (basic_block block) } /* Make the necessary insertions. */ - FOR_EACH_EDGE (pred, block->preds) + FOR_EACH_EDGE (pred, ei, block->preds) { tree stmts = alloc_stmt_list (); tree builtexpr; @@ -1541,7 +1541,6 @@ insert_aux (basic_block block) avail[bprime->index] = builtexpr; } } - END_FOR_EACH_EDGE; /* Now build a phi for the new variable. */ temp = create_tmp_var (type, "prephitmp"); @@ -1558,12 +1557,11 @@ insert_aux (basic_block block) bitmap_value_replace_in_set (AVAIL_OUT (block), PHI_RESULT (temp)); - FOR_EACH_EDGE (pred, block->preds) + FOR_EACH_EDGE (pred, ei, block->preds) { add_phi_arg (&temp, avail[pred->src->index], pred); } - END_FOR_EACH_EDGE; if (dump_file && (dump_flags & TDF_DETAILS)) { diff --git a/gcc/tree-ssa-propagate.c b/gcc/tree-ssa-propagate.c index e545d3597cd..5910d633fa4 100644 --- a/gcc/tree-ssa-propagate.c +++ b/gcc/tree-ssa-propagate.c @@ -318,10 +318,10 @@ simulate_stmt (tree stmt) if (stmt_ends_bb_p (stmt)) { edge e; + edge_iterator ei; basic_block bb = bb_for_stmt (stmt); - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) add_control_edge (e); - END_FOR_EACH_EDGE; } } else if (val == SSA_PROP_INTERESTING) @@ -407,6 +407,7 @@ simulate_block (basic_block block) block_stmt_iterator j; unsigned int normal_edge_count; edge e, normal_edge; + edge_iterator ei; /* Note that we have simulated this block. */ SET_BIT (executable_blocks, block->index); @@ -435,7 +436,7 @@ simulate_block (basic_block block) worklist. */ normal_edge_count = 0; normal_edge = NULL; - FOR_EACH_EDGE (e, block->succs) + FOR_EACH_EDGE (e, ei, block->succs) { if (e->flags & EDGE_ABNORMAL) add_control_edge (e); @@ -445,7 +446,6 @@ simulate_block (basic_block block) normal_edge = e; } } - END_FOR_EACH_EDGE; if (normal_edge_count == 1) add_control_edge (normal_edge); @@ -459,6 +459,7 @@ static void ssa_prop_init (void) { edge e; + edge_iterator ei; basic_block bb; /* Worklists of SSA edges. */ @@ -484,14 +485,13 @@ ssa_prop_init (void) for (si = bsi_start (bb); !bsi_end_p (si); bsi_next (&si)) STMT_IN_SSA_EDGE_WORKLIST (bsi_stmt (si)) = 0; - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) e->flags &= ~EDGE_EXECUTABLE; - END_FOR_EACH_EDGE; } /* Seed the algorithm by adding the successors of the entry block to the edge worklist. */ - FOR_EACH_EDGE (e, ENTRY_BLOCK_PTR->succs) + FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs) { if (e->dest != EXIT_BLOCK_PTR) { @@ -499,7 +499,6 @@ ssa_prop_init (void) cfg_blocks_add (e->dest); } } - END_FOR_EACH_EDGE; } diff --git a/gcc/tree-ssa-threadupdate.c b/gcc/tree-ssa-threadupdate.c index d8e4dd1e6ce..bf178e9d0f9 100644 --- a/gcc/tree-ssa-threadupdate.c +++ b/gcc/tree-ssa-threadupdate.c @@ -231,6 +231,7 @@ thread_block (basic_block bb) /* E is an incoming edge into BB that we may or may not want to redirect to a duplicate of BB. */ edge e; + edge_iterator ei; /* ALL indicates whether or not all incoming edges into BB should be threaded to a duplicate of BB. */ @@ -243,7 +244,7 @@ thread_block (basic_block bb) /* Look at each incoming edge into BB. Record each unique outgoing edge that we want to thread an incoming edge to. Also note if all incoming edges are threaded or not. */ - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) { if (!e->aux) { @@ -279,7 +280,6 @@ thread_block (basic_block bb) } } } - END_FOR_EACH_EDGE; /* Now create duplicates of BB. Note that if all incoming edges are threaded, then BB is going to become unreachable. In that case diff --git a/gcc/tree-ssa.c b/gcc/tree-ssa.c index e85a8ee17ea..8ccea165fc7 100644 --- a/gcc/tree-ssa.c +++ b/gcc/tree-ssa.c @@ -267,13 +267,13 @@ verify_phi_args (tree phi, basic_block bb, basic_block *definition_block) edge e; bool err = false; int i, phi_num_args = PHI_NUM_ARGS (phi); + edge_iterator ei; /* Mark all the incoming edges. */ - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) { e->aux = (void *) 1; } - END_FOR_EACH_EDGE; for (i = 0; i < phi_num_args; i++) { @@ -317,7 +317,7 @@ verify_phi_args (tree phi, basic_block bb, basic_block *definition_block) e->aux = (void *) 2; } - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) { if (e->aux != (void *) 2) { @@ -328,7 +328,6 @@ verify_phi_args (tree phi, basic_block bb, basic_block *definition_block) } e->aux = (void *) 0; } - END_FOR_EACH_EDGE; error: if (err) @@ -566,10 +565,11 @@ verify_ssa (void) { edge e; tree phi; + edge_iterator ei; block_stmt_iterator bsi; /* Make sure that all edges have a clear 'aux' field. */ - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) { if (e->aux) { @@ -578,7 +578,6 @@ verify_ssa (void) goto err; } } - END_FOR_EACH_EDGE; /* Verify the arguments for every PHI node in the block. */ for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi)) diff --git a/gcc/tree-tailcall.c b/gcc/tree-tailcall.c index e950151ede1..067e43981aa 100644 --- a/gcc/tree-tailcall.c +++ b/gcc/tree-tailcall.c @@ -190,6 +190,7 @@ independent_of_stmt_p (tree expr, tree at, block_stmt_iterator bsi) { basic_block bb, call_bb, at_bb; edge e; + edge_iterator ei; if (is_gimple_min_invariant (expr)) return expr; @@ -230,12 +231,11 @@ independent_of_stmt_p (tree expr, tree at, block_stmt_iterator bsi) break; } - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) { if (e->src->aux) break; } - END_FOR_EACH_EDGE; gcc_assert (e); @@ -409,12 +409,12 @@ find_tail_calls (basic_block bb, struct tailcall **ret) if (bsi_end_p (bsi)) { + edge_iterator ei; /* Recurse to the predecessors. */ - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) { find_tail_calls (e->src, ret); } - END_FOR_EACH_EDGE; return; } @@ -817,13 +817,14 @@ tree_optimize_tail_calls_1 (bool opt_tailcalls) bool changed = false; basic_block first = EDGE_SUCC (ENTRY_BLOCK_PTR, 0)->dest; tree stmt, param, ret_type, tmp, phi; + edge_iterator ei; if (!suitable_for_tail_opt_p ()) return; if (opt_tailcalls) opt_tailcalls = suitable_for_tail_call_opt_p (); - FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds) + FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds) { /* Only traverse the normal exits, i.e. those that end with return statement. */ @@ -833,7 +834,6 @@ tree_optimize_tail_calls_1 (bool opt_tailcalls) && TREE_CODE (stmt) == RETURN_EXPR) find_tail_calls (e->src, &tailcalls); } - END_FOR_EACH_EDGE; /* Construct the phi nodes and accumulators if necessary. */ a_acc = m_acc = NULL_TREE; @@ -905,7 +905,7 @@ tree_optimize_tail_calls_1 (bool opt_tailcalls) if (a_acc || m_acc) { /* Modify the remaining return statements. */ - FOR_EACH_EDGE (e, EXIT_BLOCK_PTR->preds) + FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds) { stmt = last_stmt (e->src); @@ -913,7 +913,6 @@ tree_optimize_tail_calls_1 (bool opt_tailcalls) && TREE_CODE (stmt) == RETURN_EXPR) adjust_return_value (e->src, m_acc, a_acc); } - END_FOR_EACH_EDGE; } if (changed) diff --git a/gcc/var-tracking.c b/gcc/var-tracking.c index 6a5044e70f7..6fc7e3259e1 100644 --- a/gcc/var-tracking.c +++ b/gcc/var-tracking.c @@ -1725,21 +1725,21 @@ vt_find_locations (void) if (!TEST_BIT (visited, bb->index)) { bool changed; + edge_iterator ei; SET_BIT (visited, bb->index); /* Calculate the IN set as union of predecessor OUT sets. */ dataflow_set_clear (&VTI (bb)->in); - FOR_EACH_EDGE (e, bb->preds) + FOR_EACH_EDGE (e, ei, bb->preds) { dataflow_set_union (&VTI (bb)->in, &VTI (e->src)->out); } - END_FOR_EACH_EDGE; changed = compute_bb_dataflow (bb); if (changed) { - FOR_EACH_EDGE (e, bb->succs) + FOR_EACH_EDGE (e, ei, bb->succs) { if (e->dest == EXIT_BLOCK_PTR) continue; @@ -1766,7 +1766,6 @@ vt_find_locations (void) e->dest); } } - END_FOR_EACH_EDGE; } } } -- 2.11.4.GIT