From d0aa45d0e076f4b997afbac938d8808cbe34d0c2 Mon Sep 17 00:00:00 2001 From: aoliva Date: Fri, 29 Apr 2011 05:22:08 +0000 Subject: [PATCH] * haifa-sched.c (last_nondebug_scheduled_insn): New. (rank_for_schedule): Use it. (schedule_block): Set it. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@173150 138bc75d-0d04-0410-961f-82ee72b054a4 --- gcc/ChangeLog | 6 ++++++ gcc/haifa-sched.c | 26 +++++++++++--------------- 2 files changed, 17 insertions(+), 15 deletions(-) diff --git a/gcc/ChangeLog b/gcc/ChangeLog index fdbc4f1212e..5bb00379407 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,9 @@ +2011-04-29 Alexandre Oliva + + * haifa-sched.c (last_nondebug_scheduled_insn): New. + (rank_for_schedule): Use it. + (schedule_block): Set it. + 2011-04-28 David Li * tree.c (crc32_string): Use crc32_byte. diff --git a/gcc/haifa-sched.c b/gcc/haifa-sched.c index d67aee8f827..15d8f65375f 100644 --- a/gcc/haifa-sched.c +++ b/gcc/haifa-sched.c @@ -783,6 +783,12 @@ print_curr_reg_pressure (void) /* Pointer to the last instruction scheduled. */ static rtx last_scheduled_insn; +/* Pointer to the last nondebug instruction scheduled within the + block, or the prev_head of the scheduling block. Used by + rank_for_schedule, so that insns independent of the last scheduled + insn will be preferred over dependent instructions. */ +static rtx last_nondebug_scheduled_insn; + /* Pointer that iterates through the list of unscheduled insns if we have a dbg_cnt enabled. It always points at an insn prior to the first unscheduled one. */ @@ -1158,7 +1164,6 @@ rank_for_schedule (const void *x, const void *y) { rtx tmp = *(const rtx *) y; rtx tmp2 = *(const rtx *) x; - rtx last; int tmp_class, tmp2_class; int val, priority_val, info_val; @@ -1239,24 +1244,13 @@ rank_for_schedule (const void *x, const void *y) if(flag_sched_rank_heuristic && info_val) return info_val; - if (flag_sched_last_insn_heuristic) - { - int i = VEC_length (rtx, scheduled_insns); - last = NULL_RTX; - while (i-- > 0) - { - last = VEC_index (rtx, scheduled_insns, i); - if (NONDEBUG_INSN_P (last)) - break; - } - } - /* Compare insns based on their relation to the last scheduled non-debug insn. */ - if (flag_sched_last_insn_heuristic && last && NONDEBUG_INSN_P (last)) + if (flag_sched_last_insn_heuristic && last_nondebug_scheduled_insn) { dep_t dep1; dep_t dep2; + rtx last = last_nondebug_scheduled_insn; /* Classify the instructions into three classes: 1) Data dependent on last schedule insn. @@ -2967,6 +2961,7 @@ schedule_block (basic_block *target_bb) /* We start inserting insns after PREV_HEAD. */ last_scheduled_insn = nonscheduled_insns_begin = prev_head; + last_nondebug_scheduled_insn = NULL_RTX; gcc_assert ((NOTE_P (last_scheduled_insn) || DEBUG_INSN_P (last_scheduled_insn)) @@ -3226,7 +3221,8 @@ schedule_block (basic_block *target_bb) /* Update counters, etc in the scheduler's front end. */ (*current_sched_info->begin_schedule_ready) (insn); VEC_safe_push (rtx, heap, scheduled_insns, insn); - last_scheduled_insn = insn; + gcc_assert (NONDEBUG_INSN_P (insn)); + last_nondebug_scheduled_insn = last_scheduled_insn = insn; if (recog_memoized (insn) >= 0) { -- 2.11.4.GIT