Daily bump.
[official-gcc.git] / gcc / haifa-sched.c
blob12cb2052f9f937d778c1d833a6f35cf1b83428dd
1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 /* Instruction scheduling pass. This file, along with sched-deps.c,
25 contains the generic parts. The actual entry point is found for
26 the normal instruction scheduling pass is found in sched-rgn.c.
28 We compute insn priorities based on data dependencies. Flow
29 analysis only creates a fraction of the data-dependencies we must
30 observe: namely, only those dependencies which the combiner can be
31 expected to use. For this pass, we must therefore create the
32 remaining dependencies we need to observe: register dependencies,
33 memory dependencies, dependencies to keep function calls in order,
34 and the dependence between a conditional branch and the setting of
35 condition codes are all dealt with here.
37 The scheduler first traverses the data flow graph, starting with
38 the last instruction, and proceeding to the first, assigning values
39 to insn_priority as it goes. This sorts the instructions
40 topologically by data dependence.
42 Once priorities have been established, we order the insns using
43 list scheduling. This works as follows: starting with a list of
44 all the ready insns, and sorted according to priority number, we
45 schedule the insn from the end of the list by placing its
46 predecessors in the list according to their priority order. We
47 consider this insn scheduled by setting the pointer to the "end" of
48 the list to point to the previous insn. When an insn has no
49 predecessors, we either queue it until sufficient time has elapsed
50 or add it to the ready list. As the instructions are scheduled or
51 when stalls are introduced, the queue advances and dumps insns into
52 the ready list. When all insns down to the lowest priority have
53 been scheduled, the critical path of the basic block has been made
54 as short as possible. The remaining insns are then scheduled in
55 remaining slots.
57 The following list shows the order in which we want to break ties
58 among insns in the ready list:
60 1. choose insn with the longest path to end of bb, ties
61 broken by
62 2. choose insn with least contribution to register pressure,
63 ties broken by
64 3. prefer in-block upon interblock motion, ties broken by
65 4. prefer useful upon speculative motion, ties broken by
66 5. choose insn with largest control flow probability, ties
67 broken by
68 6. choose insn with the least dependences upon the previously
69 scheduled insn, or finally
70 7 choose the insn which has the most insns dependent on it.
71 8. choose insn with lowest UID.
73 Memory references complicate matters. Only if we can be certain
74 that memory references are not part of the data dependency graph
75 (via true, anti, or output dependence), can we move operations past
76 memory references. To first approximation, reads can be done
77 independently, while writes introduce dependencies. Better
78 approximations will yield fewer dependencies.
80 Before reload, an extended analysis of interblock data dependences
81 is required for interblock scheduling. This is performed in
82 compute_block_backward_dependences ().
84 Dependencies set up by memory references are treated in exactly the
85 same way as other dependencies, by using insn backward dependences
86 INSN_BACK_DEPS. INSN_BACK_DEPS are translated into forward dependences
87 INSN_FORW_DEPS the purpose of forward list scheduling.
89 Having optimized the critical path, we may have also unduly
90 extended the lifetimes of some registers. If an operation requires
91 that constants be loaded into registers, it is certainly desirable
92 to load those constants as early as necessary, but no earlier.
93 I.e., it will not do to load up a bunch of registers at the
94 beginning of a basic block only to use them at the end, if they
95 could be loaded later, since this may result in excessive register
96 utilization.
98 Note that since branches are never in basic blocks, but only end
99 basic blocks, this pass will not move branches. But that is ok,
100 since we can use GNU's delayed branch scheduling pass to take care
101 of this case.
103 Also note that no further optimizations based on algebraic
104 identities are performed, so this pass would be a good one to
105 perform instruction splitting, such as breaking up a multiply
106 instruction into shifts and adds where that is profitable.
108 Given the memory aliasing analysis that this pass should perform,
109 it should be possible to remove redundant stores to memory, and to
110 load values from registers instead of hitting memory.
112 Before reload, speculative insns are moved only if a 'proof' exists
113 that no exception will be caused by this, and if no live registers
114 exist that inhibit the motion (live registers constraints are not
115 represented by data dependence edges).
117 This pass must update information that subsequent passes expect to
118 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
119 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
121 The information in the line number notes is carefully retained by
122 this pass. Notes that refer to the starting and ending of
123 exception regions are also carefully retained by this pass. All
124 other NOTE insns are grouped in their same relative order at the
125 beginning of basic blocks and regions that have been scheduled. */
127 #include "config.h"
128 #include "system.h"
129 #include "coretypes.h"
130 #include "tm.h"
131 #include "diagnostic-core.h"
132 #include "rtl.h"
133 #include "tm_p.h"
134 #include "hard-reg-set.h"
135 #include "regs.h"
136 #include "function.h"
137 #include "flags.h"
138 #include "insn-config.h"
139 #include "insn-attr.h"
140 #include "except.h"
141 #include "recog.h"
142 #include "sched-int.h"
143 #include "target.h"
144 #include "output.h"
145 #include "params.h"
146 #include "vecprim.h"
147 #include "dbgcnt.h"
148 #include "cfgloop.h"
149 #include "ira.h"
150 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
152 #ifdef INSN_SCHEDULING
154 /* issue_rate is the number of insns that can be scheduled in the same
155 machine cycle. It can be defined in the config/mach/mach.h file,
156 otherwise we set it to 1. */
158 int issue_rate;
160 /* sched-verbose controls the amount of debugging output the
161 scheduler prints. It is controlled by -fsched-verbose=N:
162 N>0 and no -DSR : the output is directed to stderr.
163 N>=10 will direct the printouts to stderr (regardless of -dSR).
164 N=1: same as -dSR.
165 N=2: bb's probabilities, detailed ready list info, unit/insn info.
166 N=3: rtl at abort point, control-flow, regions info.
167 N=5: dependences info. */
169 int sched_verbose = 0;
171 /* Debugging file. All printouts are sent to dump, which is always set,
172 either to stderr, or to the dump listing file (-dRS). */
173 FILE *sched_dump = 0;
175 /* This is a placeholder for the scheduler parameters common
176 to all schedulers. */
177 struct common_sched_info_def *common_sched_info;
179 #define INSN_TICK(INSN) (HID (INSN)->tick)
180 #define INTER_TICK(INSN) (HID (INSN)->inter_tick)
182 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
183 then it should be recalculated from scratch. */
184 #define INVALID_TICK (-(max_insn_queue_index + 1))
185 /* The minimal value of the INSN_TICK of an instruction. */
186 #define MIN_TICK (-max_insn_queue_index)
188 /* List of important notes we must keep around. This is a pointer to the
189 last element in the list. */
190 rtx note_list;
192 static struct spec_info_def spec_info_var;
193 /* Description of the speculative part of the scheduling.
194 If NULL - no speculation. */
195 spec_info_t spec_info = NULL;
197 /* True, if recovery block was added during scheduling of current block.
198 Used to determine, if we need to fix INSN_TICKs. */
199 static bool haifa_recovery_bb_recently_added_p;
201 /* True, if recovery block was added during this scheduling pass.
202 Used to determine if we should have empty memory pools of dependencies
203 after finishing current region. */
204 bool haifa_recovery_bb_ever_added_p;
206 /* Counters of different types of speculative instructions. */
207 static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
209 /* Array used in {unlink, restore}_bb_notes. */
210 static rtx *bb_header = 0;
212 /* Basic block after which recovery blocks will be created. */
213 static basic_block before_recovery;
215 /* Basic block just before the EXIT_BLOCK and after recovery, if we have
216 created it. */
217 basic_block after_recovery;
219 /* FALSE if we add bb to another region, so we don't need to initialize it. */
220 bool adding_bb_to_current_region_p = true;
222 /* Queues, etc. */
224 /* An instruction is ready to be scheduled when all insns preceding it
225 have already been scheduled. It is important to ensure that all
226 insns which use its result will not be executed until its result
227 has been computed. An insn is maintained in one of four structures:
229 (P) the "Pending" set of insns which cannot be scheduled until
230 their dependencies have been satisfied.
231 (Q) the "Queued" set of insns that can be scheduled when sufficient
232 time has passed.
233 (R) the "Ready" list of unscheduled, uncommitted insns.
234 (S) the "Scheduled" list of insns.
236 Initially, all insns are either "Pending" or "Ready" depending on
237 whether their dependencies are satisfied.
239 Insns move from the "Ready" list to the "Scheduled" list as they
240 are committed to the schedule. As this occurs, the insns in the
241 "Pending" list have their dependencies satisfied and move to either
242 the "Ready" list or the "Queued" set depending on whether
243 sufficient time has passed to make them ready. As time passes,
244 insns move from the "Queued" set to the "Ready" list.
246 The "Pending" list (P) are the insns in the INSN_FORW_DEPS of the
247 unscheduled insns, i.e., those that are ready, queued, and pending.
248 The "Queued" set (Q) is implemented by the variable `insn_queue'.
249 The "Ready" list (R) is implemented by the variables `ready' and
250 `n_ready'.
251 The "Scheduled" list (S) is the new insn chain built by this pass.
253 The transition (R->S) is implemented in the scheduling loop in
254 `schedule_block' when the best insn to schedule is chosen.
255 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
256 insns move from the ready list to the scheduled list.
257 The transition (Q->R) is implemented in 'queue_to_insn' as time
258 passes or stalls are introduced. */
260 /* Implement a circular buffer to delay instructions until sufficient
261 time has passed. For the new pipeline description interface,
262 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
263 than maximal time of instruction execution computed by genattr.c on
264 the base maximal time of functional unit reservations and getting a
265 result. This is the longest time an insn may be queued. */
267 static rtx *insn_queue;
268 static int q_ptr = 0;
269 static int q_size = 0;
270 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
271 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
273 #define QUEUE_SCHEDULED (-3)
274 #define QUEUE_NOWHERE (-2)
275 #define QUEUE_READY (-1)
276 /* QUEUE_SCHEDULED - INSN is scheduled.
277 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
278 queue or ready list.
279 QUEUE_READY - INSN is in ready list.
280 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
282 #define QUEUE_INDEX(INSN) (HID (INSN)->queue_index)
284 /* The following variable value refers for all current and future
285 reservations of the processor units. */
286 state_t curr_state;
288 /* The following variable value is size of memory representing all
289 current and future reservations of the processor units. */
290 size_t dfa_state_size;
292 /* The following array is used to find the best insn from ready when
293 the automaton pipeline interface is used. */
294 char *ready_try = NULL;
296 /* The ready list. */
297 struct ready_list ready = {NULL, 0, 0, 0, 0};
299 /* The pointer to the ready list (to be removed). */
300 static struct ready_list *readyp = &ready;
302 /* Scheduling clock. */
303 static int clock_var;
305 static int may_trap_exp (const_rtx, int);
307 /* Nonzero iff the address is comprised from at most 1 register. */
308 #define CONST_BASED_ADDRESS_P(x) \
309 (REG_P (x) \
310 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
311 || (GET_CODE (x) == LO_SUM)) \
312 && (CONSTANT_P (XEXP (x, 0)) \
313 || CONSTANT_P (XEXP (x, 1)))))
315 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
316 as found by analyzing insn's expression. */
319 static int haifa_luid_for_non_insn (rtx x);
321 /* Haifa version of sched_info hooks common to all headers. */
322 const struct common_sched_info_def haifa_common_sched_info =
324 NULL, /* fix_recovery_cfg */
325 NULL, /* add_block */
326 NULL, /* estimate_number_of_insns */
327 haifa_luid_for_non_insn, /* luid_for_non_insn */
328 SCHED_PASS_UNKNOWN /* sched_pass_id */
331 const struct sched_scan_info_def *sched_scan_info;
333 /* Mapping from instruction UID to its Logical UID. */
334 VEC (int, heap) *sched_luids = NULL;
336 /* Next LUID to assign to an instruction. */
337 int sched_max_luid = 1;
339 /* Haifa Instruction Data. */
340 VEC (haifa_insn_data_def, heap) *h_i_d = NULL;
342 void (* sched_init_only_bb) (basic_block, basic_block);
344 /* Split block function. Different schedulers might use different functions
345 to handle their internal data consistent. */
346 basic_block (* sched_split_block) (basic_block, rtx);
348 /* Create empty basic block after the specified block. */
349 basic_block (* sched_create_empty_bb) (basic_block);
351 static int
352 may_trap_exp (const_rtx x, int is_store)
354 enum rtx_code code;
356 if (x == 0)
357 return TRAP_FREE;
358 code = GET_CODE (x);
359 if (is_store)
361 if (code == MEM && may_trap_p (x))
362 return TRAP_RISKY;
363 else
364 return TRAP_FREE;
366 if (code == MEM)
368 /* The insn uses memory: a volatile load. */
369 if (MEM_VOLATILE_P (x))
370 return IRISKY;
371 /* An exception-free load. */
372 if (!may_trap_p (x))
373 return IFREE;
374 /* A load with 1 base register, to be further checked. */
375 if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
376 return PFREE_CANDIDATE;
377 /* No info on the load, to be further checked. */
378 return PRISKY_CANDIDATE;
380 else
382 const char *fmt;
383 int i, insn_class = TRAP_FREE;
385 /* Neither store nor load, check if it may cause a trap. */
386 if (may_trap_p (x))
387 return TRAP_RISKY;
388 /* Recursive step: walk the insn... */
389 fmt = GET_RTX_FORMAT (code);
390 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
392 if (fmt[i] == 'e')
394 int tmp_class = may_trap_exp (XEXP (x, i), is_store);
395 insn_class = WORST_CLASS (insn_class, tmp_class);
397 else if (fmt[i] == 'E')
399 int j;
400 for (j = 0; j < XVECLEN (x, i); j++)
402 int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
403 insn_class = WORST_CLASS (insn_class, tmp_class);
404 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
405 break;
408 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
409 break;
411 return insn_class;
415 /* Classifies rtx X of an insn for the purpose of verifying that X can be
416 executed speculatively (and consequently the insn can be moved
417 speculatively), by examining X, returning:
418 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
419 TRAP_FREE: non-load insn.
420 IFREE: load from a globally safe location.
421 IRISKY: volatile load.
422 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
423 being either PFREE or PRISKY. */
425 static int
426 haifa_classify_rtx (const_rtx x)
428 int tmp_class = TRAP_FREE;
429 int insn_class = TRAP_FREE;
430 enum rtx_code code;
432 if (GET_CODE (x) == PARALLEL)
434 int i, len = XVECLEN (x, 0);
436 for (i = len - 1; i >= 0; i--)
438 tmp_class = haifa_classify_rtx (XVECEXP (x, 0, i));
439 insn_class = WORST_CLASS (insn_class, tmp_class);
440 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
441 break;
444 else
446 code = GET_CODE (x);
447 switch (code)
449 case CLOBBER:
450 /* Test if it is a 'store'. */
451 tmp_class = may_trap_exp (XEXP (x, 0), 1);
452 break;
453 case SET:
454 /* Test if it is a store. */
455 tmp_class = may_trap_exp (SET_DEST (x), 1);
456 if (tmp_class == TRAP_RISKY)
457 break;
458 /* Test if it is a load. */
459 tmp_class =
460 WORST_CLASS (tmp_class,
461 may_trap_exp (SET_SRC (x), 0));
462 break;
463 case COND_EXEC:
464 tmp_class = haifa_classify_rtx (COND_EXEC_CODE (x));
465 if (tmp_class == TRAP_RISKY)
466 break;
467 tmp_class = WORST_CLASS (tmp_class,
468 may_trap_exp (COND_EXEC_TEST (x), 0));
469 break;
470 case TRAP_IF:
471 tmp_class = TRAP_RISKY;
472 break;
473 default:;
475 insn_class = tmp_class;
478 return insn_class;
482 haifa_classify_insn (const_rtx insn)
484 return haifa_classify_rtx (PATTERN (insn));
487 /* Forward declarations. */
489 static int priority (rtx);
490 static int rank_for_schedule (const void *, const void *);
491 static void swap_sort (rtx *, int);
492 static void queue_insn (rtx, int);
493 static int schedule_insn (rtx);
494 static void adjust_priority (rtx);
495 static void advance_one_cycle (void);
496 static void extend_h_i_d (void);
499 /* Notes handling mechanism:
500 =========================
501 Generally, NOTES are saved before scheduling and restored after scheduling.
502 The scheduler distinguishes between two types of notes:
504 (1) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
505 Before scheduling a region, a pointer to the note is added to the insn
506 that follows or precedes it. (This happens as part of the data dependence
507 computation). After scheduling an insn, the pointer contained in it is
508 used for regenerating the corresponding note (in reemit_notes).
510 (2) All other notes (e.g. INSN_DELETED): Before scheduling a block,
511 these notes are put in a list (in rm_other_notes() and
512 unlink_other_notes ()). After scheduling the block, these notes are
513 inserted at the beginning of the block (in schedule_block()). */
515 static void ready_add (struct ready_list *, rtx, bool);
516 static rtx ready_remove_first (struct ready_list *);
517 static rtx ready_remove_first_dispatch (struct ready_list *ready);
519 static void queue_to_ready (struct ready_list *);
520 static int early_queue_to_ready (state_t, struct ready_list *);
522 static void debug_ready_list (struct ready_list *);
524 /* The following functions are used to implement multi-pass scheduling
525 on the first cycle. */
526 static rtx ready_remove (struct ready_list *, int);
527 static void ready_remove_insn (rtx);
529 static void fix_inter_tick (rtx, rtx);
530 static int fix_tick_ready (rtx);
531 static void change_queue_index (rtx, int);
533 /* The following functions are used to implement scheduling of data/control
534 speculative instructions. */
536 static void extend_h_i_d (void);
537 static void init_h_i_d (rtx);
538 static void generate_recovery_code (rtx);
539 static void process_insn_forw_deps_be_in_spec (rtx, rtx, ds_t);
540 static void begin_speculative_block (rtx);
541 static void add_to_speculative_block (rtx);
542 static void init_before_recovery (basic_block *);
543 static void create_check_block_twin (rtx, bool);
544 static void fix_recovery_deps (basic_block);
545 static void haifa_change_pattern (rtx, rtx);
546 static void dump_new_block_header (int, basic_block, rtx, rtx);
547 static void restore_bb_notes (basic_block);
548 static void fix_jump_move (rtx);
549 static void move_block_after_check (rtx);
550 static void move_succs (VEC(edge,gc) **, basic_block);
551 static void sched_remove_insn (rtx);
552 static void clear_priorities (rtx, rtx_vec_t *);
553 static void calc_priorities (rtx_vec_t);
554 static void add_jump_dependencies (rtx, rtx);
555 #ifdef ENABLE_CHECKING
556 static int has_edge_p (VEC(edge,gc) *, int);
557 static void check_cfg (rtx, rtx);
558 #endif
560 #endif /* INSN_SCHEDULING */
562 /* Point to state used for the current scheduling pass. */
563 struct haifa_sched_info *current_sched_info;
565 #ifndef INSN_SCHEDULING
566 void
567 schedule_insns (void)
570 #else
572 /* Do register pressure sensitive insn scheduling if the flag is set
573 up. */
574 bool sched_pressure_p;
576 /* Map regno -> its pressure class. The map defined only when
577 SCHED_PRESSURE_P is true. */
578 enum reg_class *sched_regno_pressure_class;
580 /* The current register pressure. Only elements corresponding pressure
581 classes are defined. */
582 static int curr_reg_pressure[N_REG_CLASSES];
584 /* Saved value of the previous array. */
585 static int saved_reg_pressure[N_REG_CLASSES];
587 /* Register living at given scheduling point. */
588 static bitmap curr_reg_live;
590 /* Saved value of the previous array. */
591 static bitmap saved_reg_live;
593 /* Registers mentioned in the current region. */
594 static bitmap region_ref_regs;
596 /* Initiate register pressure relative info for scheduling the current
597 region. Currently it is only clearing register mentioned in the
598 current region. */
599 void
600 sched_init_region_reg_pressure_info (void)
602 bitmap_clear (region_ref_regs);
605 /* Update current register pressure related info after birth (if
606 BIRTH_P) or death of register REGNO. */
607 static void
608 mark_regno_birth_or_death (int regno, bool birth_p)
610 enum reg_class pressure_class;
612 pressure_class = sched_regno_pressure_class[regno];
613 if (regno >= FIRST_PSEUDO_REGISTER)
615 if (pressure_class != NO_REGS)
617 if (birth_p)
619 bitmap_set_bit (curr_reg_live, regno);
620 curr_reg_pressure[pressure_class]
621 += (ira_reg_class_max_nregs
622 [pressure_class][PSEUDO_REGNO_MODE (regno)]);
624 else
626 bitmap_clear_bit (curr_reg_live, regno);
627 curr_reg_pressure[pressure_class]
628 -= (ira_reg_class_max_nregs
629 [pressure_class][PSEUDO_REGNO_MODE (regno)]);
633 else if (pressure_class != NO_REGS
634 && ! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
636 if (birth_p)
638 bitmap_set_bit (curr_reg_live, regno);
639 curr_reg_pressure[pressure_class]++;
641 else
643 bitmap_clear_bit (curr_reg_live, regno);
644 curr_reg_pressure[pressure_class]--;
649 /* Initiate current register pressure related info from living
650 registers given by LIVE. */
651 static void
652 initiate_reg_pressure_info (bitmap live)
654 int i;
655 unsigned int j;
656 bitmap_iterator bi;
658 for (i = 0; i < ira_pressure_classes_num; i++)
659 curr_reg_pressure[ira_pressure_classes[i]] = 0;
660 bitmap_clear (curr_reg_live);
661 EXECUTE_IF_SET_IN_BITMAP (live, 0, j, bi)
662 if (current_nr_blocks == 1 || bitmap_bit_p (region_ref_regs, j))
663 mark_regno_birth_or_death (j, true);
666 /* Mark registers in X as mentioned in the current region. */
667 static void
668 setup_ref_regs (rtx x)
670 int i, j, regno;
671 const RTX_CODE code = GET_CODE (x);
672 const char *fmt;
674 if (REG_P (x))
676 regno = REGNO (x);
677 if (HARD_REGISTER_NUM_P (regno))
678 bitmap_set_range (region_ref_regs, regno,
679 hard_regno_nregs[regno][GET_MODE (x)]);
680 else
681 bitmap_set_bit (region_ref_regs, REGNO (x));
682 return;
684 fmt = GET_RTX_FORMAT (code);
685 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
686 if (fmt[i] == 'e')
687 setup_ref_regs (XEXP (x, i));
688 else if (fmt[i] == 'E')
690 for (j = 0; j < XVECLEN (x, i); j++)
691 setup_ref_regs (XVECEXP (x, i, j));
695 /* Initiate current register pressure related info at the start of
696 basic block BB. */
697 static void
698 initiate_bb_reg_pressure_info (basic_block bb)
700 unsigned int i ATTRIBUTE_UNUSED;
701 rtx insn;
703 if (current_nr_blocks > 1)
704 FOR_BB_INSNS (bb, insn)
705 if (NONDEBUG_INSN_P (insn))
706 setup_ref_regs (PATTERN (insn));
707 initiate_reg_pressure_info (df_get_live_in (bb));
708 #ifdef EH_RETURN_DATA_REGNO
709 if (bb_has_eh_pred (bb))
710 for (i = 0; ; ++i)
712 unsigned int regno = EH_RETURN_DATA_REGNO (i);
714 if (regno == INVALID_REGNUM)
715 break;
716 if (! bitmap_bit_p (df_get_live_in (bb), regno))
717 mark_regno_birth_or_death (regno, true);
719 #endif
722 /* Save current register pressure related info. */
723 static void
724 save_reg_pressure (void)
726 int i;
728 for (i = 0; i < ira_pressure_classes_num; i++)
729 saved_reg_pressure[ira_pressure_classes[i]]
730 = curr_reg_pressure[ira_pressure_classes[i]];
731 bitmap_copy (saved_reg_live, curr_reg_live);
734 /* Restore saved register pressure related info. */
735 static void
736 restore_reg_pressure (void)
738 int i;
740 for (i = 0; i < ira_pressure_classes_num; i++)
741 curr_reg_pressure[ira_pressure_classes[i]]
742 = saved_reg_pressure[ira_pressure_classes[i]];
743 bitmap_copy (curr_reg_live, saved_reg_live);
746 /* Return TRUE if the register is dying after its USE. */
747 static bool
748 dying_use_p (struct reg_use_data *use)
750 struct reg_use_data *next;
752 for (next = use->next_regno_use; next != use; next = next->next_regno_use)
753 if (NONDEBUG_INSN_P (next->insn)
754 && QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED)
755 return false;
756 return true;
759 /* Print info about the current register pressure and its excess for
760 each pressure class. */
761 static void
762 print_curr_reg_pressure (void)
764 int i;
765 enum reg_class cl;
767 fprintf (sched_dump, ";;\t");
768 for (i = 0; i < ira_pressure_classes_num; i++)
770 cl = ira_pressure_classes[i];
771 gcc_assert (curr_reg_pressure[cl] >= 0);
772 fprintf (sched_dump, " %s:%d(%d)", reg_class_names[cl],
773 curr_reg_pressure[cl],
774 curr_reg_pressure[cl] - ira_available_class_regs[cl]);
776 fprintf (sched_dump, "\n");
779 /* Pointer to the last instruction scheduled. Used by rank_for_schedule,
780 so that insns independent of the last scheduled insn will be preferred
781 over dependent instructions. */
783 static rtx last_scheduled_insn;
785 /* Cached cost of the instruction. Use below function to get cost of the
786 insn. -1 here means that the field is not initialized. */
787 #define INSN_COST(INSN) (HID (INSN)->cost)
789 /* Compute cost of executing INSN.
790 This is the number of cycles between instruction issue and
791 instruction results. */
793 insn_cost (rtx insn)
795 int cost;
797 if (sel_sched_p ())
799 if (recog_memoized (insn) < 0)
800 return 0;
802 cost = insn_default_latency (insn);
803 if (cost < 0)
804 cost = 0;
806 return cost;
809 cost = INSN_COST (insn);
811 if (cost < 0)
813 /* A USE insn, or something else we don't need to
814 understand. We can't pass these directly to
815 result_ready_cost or insn_default_latency because it will
816 trigger a fatal error for unrecognizable insns. */
817 if (recog_memoized (insn) < 0)
819 INSN_COST (insn) = 0;
820 return 0;
822 else
824 cost = insn_default_latency (insn);
825 if (cost < 0)
826 cost = 0;
828 INSN_COST (insn) = cost;
832 return cost;
835 /* Compute cost of dependence LINK.
836 This is the number of cycles between instruction issue and
837 instruction results.
838 ??? We also use this function to call recog_memoized on all insns. */
840 dep_cost_1 (dep_t link, dw_t dw)
842 rtx insn = DEP_PRO (link);
843 rtx used = DEP_CON (link);
844 int cost;
846 /* A USE insn should never require the value used to be computed.
847 This allows the computation of a function's result and parameter
848 values to overlap the return and call. We don't care about the
849 the dependence cost when only decreasing register pressure. */
850 if (recog_memoized (used) < 0)
852 cost = 0;
853 recog_memoized (insn);
855 else
857 enum reg_note dep_type = DEP_TYPE (link);
859 cost = insn_cost (insn);
861 if (INSN_CODE (insn) >= 0)
863 if (dep_type == REG_DEP_ANTI)
864 cost = 0;
865 else if (dep_type == REG_DEP_OUTPUT)
867 cost = (insn_default_latency (insn)
868 - insn_default_latency (used));
869 if (cost <= 0)
870 cost = 1;
872 else if (bypass_p (insn))
873 cost = insn_latency (insn, used);
877 if (targetm.sched.adjust_cost_2)
878 cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost,
879 dw);
880 else if (targetm.sched.adjust_cost != NULL)
882 /* This variable is used for backward compatibility with the
883 targets. */
884 rtx dep_cost_rtx_link = alloc_INSN_LIST (NULL_RTX, NULL_RTX);
886 /* Make it self-cycled, so that if some tries to walk over this
887 incomplete list he/she will be caught in an endless loop. */
888 XEXP (dep_cost_rtx_link, 1) = dep_cost_rtx_link;
890 /* Targets use only REG_NOTE_KIND of the link. */
891 PUT_REG_NOTE_KIND (dep_cost_rtx_link, DEP_TYPE (link));
893 cost = targetm.sched.adjust_cost (used, dep_cost_rtx_link,
894 insn, cost);
896 free_INSN_LIST_node (dep_cost_rtx_link);
899 if (cost < 0)
900 cost = 0;
903 return cost;
906 /* Compute cost of dependence LINK.
907 This is the number of cycles between instruction issue and
908 instruction results. */
910 dep_cost (dep_t link)
912 return dep_cost_1 (link, 0);
915 /* Use this sel-sched.c friendly function in reorder2 instead of increasing
916 INSN_PRIORITY explicitly. */
917 void
918 increase_insn_priority (rtx insn, int amount)
920 if (!sel_sched_p ())
922 /* We're dealing with haifa-sched.c INSN_PRIORITY. */
923 if (INSN_PRIORITY_KNOWN (insn))
924 INSN_PRIORITY (insn) += amount;
926 else
928 /* In sel-sched.c INSN_PRIORITY is not kept up to date.
929 Use EXPR_PRIORITY instead. */
930 sel_add_to_insn_priority (insn, amount);
934 /* Return 'true' if DEP should be included in priority calculations. */
935 static bool
936 contributes_to_priority_p (dep_t dep)
938 if (DEBUG_INSN_P (DEP_CON (dep))
939 || DEBUG_INSN_P (DEP_PRO (dep)))
940 return false;
942 /* Critical path is meaningful in block boundaries only. */
943 if (!current_sched_info->contributes_to_priority (DEP_CON (dep),
944 DEP_PRO (dep)))
945 return false;
947 /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
948 then speculative instructions will less likely be
949 scheduled. That is because the priority of
950 their producers will increase, and, thus, the
951 producers will more likely be scheduled, thus,
952 resolving the dependence. */
953 if (sched_deps_info->generate_spec_deps
954 && !(spec_info->flags & COUNT_SPEC_IN_CRITICAL_PATH)
955 && (DEP_STATUS (dep) & SPECULATIVE))
956 return false;
958 return true;
961 /* Compute the number of nondebug forward deps of an insn. */
963 static int
964 dep_list_size (rtx insn)
966 sd_iterator_def sd_it;
967 dep_t dep;
968 int dbgcount = 0, nodbgcount = 0;
970 if (!MAY_HAVE_DEBUG_INSNS)
971 return sd_lists_size (insn, SD_LIST_FORW);
973 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
975 if (DEBUG_INSN_P (DEP_CON (dep)))
976 dbgcount++;
977 else if (!DEBUG_INSN_P (DEP_PRO (dep)))
978 nodbgcount++;
981 gcc_assert (dbgcount + nodbgcount == sd_lists_size (insn, SD_LIST_FORW));
983 return nodbgcount;
986 /* Compute the priority number for INSN. */
987 static int
988 priority (rtx insn)
990 if (! INSN_P (insn))
991 return 0;
993 /* We should not be interested in priority of an already scheduled insn. */
994 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
996 if (!INSN_PRIORITY_KNOWN (insn))
998 int this_priority = -1;
1000 if (dep_list_size (insn) == 0)
1001 /* ??? We should set INSN_PRIORITY to insn_cost when and insn has
1002 some forward deps but all of them are ignored by
1003 contributes_to_priority hook. At the moment we set priority of
1004 such insn to 0. */
1005 this_priority = insn_cost (insn);
1006 else
1008 rtx prev_first, twin;
1009 basic_block rec;
1011 /* For recovery check instructions we calculate priority slightly
1012 different than that of normal instructions. Instead of walking
1013 through INSN_FORW_DEPS (check) list, we walk through
1014 INSN_FORW_DEPS list of each instruction in the corresponding
1015 recovery block. */
1017 /* Selective scheduling does not define RECOVERY_BLOCK macro. */
1018 rec = sel_sched_p () ? NULL : RECOVERY_BLOCK (insn);
1019 if (!rec || rec == EXIT_BLOCK_PTR)
1021 prev_first = PREV_INSN (insn);
1022 twin = insn;
1024 else
1026 prev_first = NEXT_INSN (BB_HEAD (rec));
1027 twin = PREV_INSN (BB_END (rec));
1032 sd_iterator_def sd_it;
1033 dep_t dep;
1035 FOR_EACH_DEP (twin, SD_LIST_FORW, sd_it, dep)
1037 rtx next;
1038 int next_priority;
1040 next = DEP_CON (dep);
1042 if (BLOCK_FOR_INSN (next) != rec)
1044 int cost;
1046 if (!contributes_to_priority_p (dep))
1047 continue;
1049 if (twin == insn)
1050 cost = dep_cost (dep);
1051 else
1053 struct _dep _dep1, *dep1 = &_dep1;
1055 init_dep (dep1, insn, next, REG_DEP_ANTI);
1057 cost = dep_cost (dep1);
1060 next_priority = cost + priority (next);
1062 if (next_priority > this_priority)
1063 this_priority = next_priority;
1067 twin = PREV_INSN (twin);
1069 while (twin != prev_first);
1072 if (this_priority < 0)
1074 gcc_assert (this_priority == -1);
1076 this_priority = insn_cost (insn);
1079 INSN_PRIORITY (insn) = this_priority;
1080 INSN_PRIORITY_STATUS (insn) = 1;
1083 return INSN_PRIORITY (insn);
1086 /* Macros and functions for keeping the priority queue sorted, and
1087 dealing with queuing and dequeuing of instructions. */
1089 #define SCHED_SORT(READY, N_READY) \
1090 do { if ((N_READY) == 2) \
1091 swap_sort (READY, N_READY); \
1092 else if ((N_READY) > 2) \
1093 qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \
1094 while (0)
1096 /* Setup info about the current register pressure impact of scheduling
1097 INSN at the current scheduling point. */
1098 static void
1099 setup_insn_reg_pressure_info (rtx insn)
1101 int i, change, before, after, hard_regno;
1102 int excess_cost_change;
1103 enum machine_mode mode;
1104 enum reg_class cl;
1105 struct reg_pressure_data *pressure_info;
1106 int *max_reg_pressure;
1107 struct reg_use_data *use;
1108 static int death[N_REG_CLASSES];
1110 gcc_checking_assert (!DEBUG_INSN_P (insn));
1112 excess_cost_change = 0;
1113 for (i = 0; i < ira_pressure_classes_num; i++)
1114 death[ira_pressure_classes[i]] = 0;
1115 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1116 if (dying_use_p (use))
1118 cl = sched_regno_pressure_class[use->regno];
1119 if (use->regno < FIRST_PSEUDO_REGISTER)
1120 death[cl]++;
1121 else
1122 death[cl]
1123 += ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (use->regno)];
1125 pressure_info = INSN_REG_PRESSURE (insn);
1126 max_reg_pressure = INSN_MAX_REG_PRESSURE (insn);
1127 gcc_assert (pressure_info != NULL && max_reg_pressure != NULL);
1128 for (i = 0; i < ira_pressure_classes_num; i++)
1130 cl = ira_pressure_classes[i];
1131 gcc_assert (curr_reg_pressure[cl] >= 0);
1132 change = (int) pressure_info[i].set_increase - death[cl];
1133 before = MAX (0, max_reg_pressure[i] - ira_available_class_regs[cl]);
1134 after = MAX (0, max_reg_pressure[i] + change
1135 - ira_available_class_regs[cl]);
1136 hard_regno = ira_class_hard_regs[cl][0];
1137 gcc_assert (hard_regno >= 0);
1138 mode = reg_raw_mode[hard_regno];
1139 excess_cost_change += ((after - before)
1140 * (ira_memory_move_cost[mode][cl][0]
1141 + ira_memory_move_cost[mode][cl][1]));
1143 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insn) = excess_cost_change;
1146 /* Returns a positive value if x is preferred; returns a negative value if
1147 y is preferred. Should never return 0, since that will make the sort
1148 unstable. */
1150 static int
1151 rank_for_schedule (const void *x, const void *y)
1153 rtx tmp = *(const rtx *) y;
1154 rtx tmp2 = *(const rtx *) x;
1155 rtx last;
1156 int tmp_class, tmp2_class;
1157 int val, priority_val, info_val;
1159 if (MAY_HAVE_DEBUG_INSNS)
1161 /* Schedule debug insns as early as possible. */
1162 if (DEBUG_INSN_P (tmp) && !DEBUG_INSN_P (tmp2))
1163 return -1;
1164 else if (DEBUG_INSN_P (tmp2))
1165 return 1;
1168 /* The insn in a schedule group should be issued the first. */
1169 if (flag_sched_group_heuristic &&
1170 SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
1171 return SCHED_GROUP_P (tmp2) ? 1 : -1;
1173 /* Make sure that priority of TMP and TMP2 are initialized. */
1174 gcc_assert (INSN_PRIORITY_KNOWN (tmp) && INSN_PRIORITY_KNOWN (tmp2));
1176 if (sched_pressure_p)
1178 int diff;
1180 /* Prefer insn whose scheduling results in the smallest register
1181 pressure excess. */
1182 if ((diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp)
1183 + (INSN_TICK (tmp) > clock_var
1184 ? INSN_TICK (tmp) - clock_var : 0)
1185 - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2)
1186 - (INSN_TICK (tmp2) > clock_var
1187 ? INSN_TICK (tmp2) - clock_var : 0))) != 0)
1188 return diff;
1192 if (sched_pressure_p
1193 && (INSN_TICK (tmp2) > clock_var || INSN_TICK (tmp) > clock_var))
1195 if (INSN_TICK (tmp) <= clock_var)
1196 return -1;
1197 else if (INSN_TICK (tmp2) <= clock_var)
1198 return 1;
1199 else
1200 return INSN_TICK (tmp) - INSN_TICK (tmp2);
1202 /* Prefer insn with higher priority. */
1203 priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
1205 if (flag_sched_critical_path_heuristic && priority_val)
1206 return priority_val;
1208 /* Prefer speculative insn with greater dependencies weakness. */
1209 if (flag_sched_spec_insn_heuristic && spec_info)
1211 ds_t ds1, ds2;
1212 dw_t dw1, dw2;
1213 int dw;
1215 ds1 = TODO_SPEC (tmp) & SPECULATIVE;
1216 if (ds1)
1217 dw1 = ds_weak (ds1);
1218 else
1219 dw1 = NO_DEP_WEAK;
1221 ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
1222 if (ds2)
1223 dw2 = ds_weak (ds2);
1224 else
1225 dw2 = NO_DEP_WEAK;
1227 dw = dw2 - dw1;
1228 if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
1229 return dw;
1232 info_val = (*current_sched_info->rank) (tmp, tmp2);
1233 if(flag_sched_rank_heuristic && info_val)
1234 return info_val;
1236 if (flag_sched_last_insn_heuristic)
1238 last = last_scheduled_insn;
1240 if (DEBUG_INSN_P (last) && last != current_sched_info->prev_head)
1242 last = PREV_INSN (last);
1243 while (!NONDEBUG_INSN_P (last)
1244 && last != current_sched_info->prev_head);
1247 /* Compare insns based on their relation to the last scheduled
1248 non-debug insn. */
1249 if (flag_sched_last_insn_heuristic && NONDEBUG_INSN_P (last))
1251 dep_t dep1;
1252 dep_t dep2;
1254 /* Classify the instructions into three classes:
1255 1) Data dependent on last schedule insn.
1256 2) Anti/Output dependent on last scheduled insn.
1257 3) Independent of last scheduled insn, or has latency of one.
1258 Choose the insn from the highest numbered class if different. */
1259 dep1 = sd_find_dep_between (last, tmp, true);
1261 if (dep1 == NULL || dep_cost (dep1) == 1)
1262 tmp_class = 3;
1263 else if (/* Data dependence. */
1264 DEP_TYPE (dep1) == REG_DEP_TRUE)
1265 tmp_class = 1;
1266 else
1267 tmp_class = 2;
1269 dep2 = sd_find_dep_between (last, tmp2, true);
1271 if (dep2 == NULL || dep_cost (dep2) == 1)
1272 tmp2_class = 3;
1273 else if (/* Data dependence. */
1274 DEP_TYPE (dep2) == REG_DEP_TRUE)
1275 tmp2_class = 1;
1276 else
1277 tmp2_class = 2;
1279 if ((val = tmp2_class - tmp_class))
1280 return val;
1283 /* Prefer the insn which has more later insns that depend on it.
1284 This gives the scheduler more freedom when scheduling later
1285 instructions at the expense of added register pressure. */
1287 val = (dep_list_size (tmp2) - dep_list_size (tmp));
1289 if (flag_sched_dep_count_heuristic && val != 0)
1290 return val;
1292 /* If insns are equally good, sort by INSN_LUID (original insn order),
1293 so that we make the sort stable. This minimizes instruction movement,
1294 thus minimizing sched's effect on debugging and cross-jumping. */
1295 return INSN_LUID (tmp) - INSN_LUID (tmp2);
1298 /* Resort the array A in which only element at index N may be out of order. */
1300 HAIFA_INLINE static void
1301 swap_sort (rtx *a, int n)
1303 rtx insn = a[n - 1];
1304 int i = n - 2;
1306 while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
1308 a[i + 1] = a[i];
1309 i -= 1;
1311 a[i + 1] = insn;
1314 /* Add INSN to the insn queue so that it can be executed at least
1315 N_CYCLES after the currently executing insn. Preserve insns
1316 chain for debugging purposes. */
1318 HAIFA_INLINE static void
1319 queue_insn (rtx insn, int n_cycles)
1321 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
1322 rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]);
1324 gcc_assert (n_cycles <= max_insn_queue_index);
1325 gcc_assert (!DEBUG_INSN_P (insn));
1327 insn_queue[next_q] = link;
1328 q_size += 1;
1330 if (sched_verbose >= 2)
1332 fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
1333 (*current_sched_info->print_insn) (insn, 0));
1335 fprintf (sched_dump, "queued for %d cycles.\n", n_cycles);
1338 QUEUE_INDEX (insn) = next_q;
1341 /* Remove INSN from queue. */
1342 static void
1343 queue_remove (rtx insn)
1345 gcc_assert (QUEUE_INDEX (insn) >= 0);
1346 remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
1347 q_size--;
1348 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
1351 /* Return a pointer to the bottom of the ready list, i.e. the insn
1352 with the lowest priority. */
1354 rtx *
1355 ready_lastpos (struct ready_list *ready)
1357 gcc_assert (ready->n_ready >= 1);
1358 return ready->vec + ready->first - ready->n_ready + 1;
1361 /* Add an element INSN to the ready list so that it ends up with the
1362 lowest/highest priority depending on FIRST_P. */
1364 HAIFA_INLINE static void
1365 ready_add (struct ready_list *ready, rtx insn, bool first_p)
1367 if (!first_p)
1369 if (ready->first == ready->n_ready)
1371 memmove (ready->vec + ready->veclen - ready->n_ready,
1372 ready_lastpos (ready),
1373 ready->n_ready * sizeof (rtx));
1374 ready->first = ready->veclen - 1;
1376 ready->vec[ready->first - ready->n_ready] = insn;
1378 else
1380 if (ready->first == ready->veclen - 1)
1382 if (ready->n_ready)
1383 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
1384 memmove (ready->vec + ready->veclen - ready->n_ready - 1,
1385 ready_lastpos (ready),
1386 ready->n_ready * sizeof (rtx));
1387 ready->first = ready->veclen - 2;
1389 ready->vec[++(ready->first)] = insn;
1392 ready->n_ready++;
1393 if (DEBUG_INSN_P (insn))
1394 ready->n_debug++;
1396 gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
1397 QUEUE_INDEX (insn) = QUEUE_READY;
1400 /* Remove the element with the highest priority from the ready list and
1401 return it. */
1403 HAIFA_INLINE static rtx
1404 ready_remove_first (struct ready_list *ready)
1406 rtx t;
1408 gcc_assert (ready->n_ready);
1409 t = ready->vec[ready->first--];
1410 ready->n_ready--;
1411 if (DEBUG_INSN_P (t))
1412 ready->n_debug--;
1413 /* If the queue becomes empty, reset it. */
1414 if (ready->n_ready == 0)
1415 ready->first = ready->veclen - 1;
1417 gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
1418 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1420 return t;
1423 /* The following code implements multi-pass scheduling for the first
1424 cycle. In other words, we will try to choose ready insn which
1425 permits to start maximum number of insns on the same cycle. */
1427 /* Return a pointer to the element INDEX from the ready. INDEX for
1428 insn with the highest priority is 0, and the lowest priority has
1429 N_READY - 1. */
1432 ready_element (struct ready_list *ready, int index)
1434 gcc_assert (ready->n_ready && index < ready->n_ready);
1436 return ready->vec[ready->first - index];
1439 /* Remove the element INDEX from the ready list and return it. INDEX
1440 for insn with the highest priority is 0, and the lowest priority
1441 has N_READY - 1. */
1443 HAIFA_INLINE static rtx
1444 ready_remove (struct ready_list *ready, int index)
1446 rtx t;
1447 int i;
1449 if (index == 0)
1450 return ready_remove_first (ready);
1451 gcc_assert (ready->n_ready && index < ready->n_ready);
1452 t = ready->vec[ready->first - index];
1453 ready->n_ready--;
1454 if (DEBUG_INSN_P (t))
1455 ready->n_debug--;
1456 for (i = index; i < ready->n_ready; i++)
1457 ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
1458 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1459 return t;
1462 /* Remove INSN from the ready list. */
1463 static void
1464 ready_remove_insn (rtx insn)
1466 int i;
1468 for (i = 0; i < readyp->n_ready; i++)
1469 if (ready_element (readyp, i) == insn)
1471 ready_remove (readyp, i);
1472 return;
1474 gcc_unreachable ();
1477 /* Sort the ready list READY by ascending priority, using the SCHED_SORT
1478 macro. */
1480 void
1481 ready_sort (struct ready_list *ready)
1483 int i;
1484 rtx *first = ready_lastpos (ready);
1486 if (sched_pressure_p)
1488 for (i = 0; i < ready->n_ready; i++)
1489 if (!DEBUG_INSN_P (first[i]))
1490 setup_insn_reg_pressure_info (first[i]);
1492 SCHED_SORT (first, ready->n_ready);
1495 /* PREV is an insn that is ready to execute. Adjust its priority if that
1496 will help shorten or lengthen register lifetimes as appropriate. Also
1497 provide a hook for the target to tweak itself. */
1499 HAIFA_INLINE static void
1500 adjust_priority (rtx prev)
1502 /* ??? There used to be code here to try and estimate how an insn
1503 affected register lifetimes, but it did it by looking at REG_DEAD
1504 notes, which we removed in schedule_region. Nor did it try to
1505 take into account register pressure or anything useful like that.
1507 Revisit when we have a machine model to work with and not before. */
1509 if (targetm.sched.adjust_priority)
1510 INSN_PRIORITY (prev) =
1511 targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
1514 /* Advance DFA state STATE on one cycle. */
1515 void
1516 advance_state (state_t state)
1518 if (targetm.sched.dfa_pre_advance_cycle)
1519 targetm.sched.dfa_pre_advance_cycle ();
1521 if (targetm.sched.dfa_pre_cycle_insn)
1522 state_transition (state,
1523 targetm.sched.dfa_pre_cycle_insn ());
1525 state_transition (state, NULL);
1527 if (targetm.sched.dfa_post_cycle_insn)
1528 state_transition (state,
1529 targetm.sched.dfa_post_cycle_insn ());
1531 if (targetm.sched.dfa_post_advance_cycle)
1532 targetm.sched.dfa_post_advance_cycle ();
1535 /* Advance time on one cycle. */
1536 HAIFA_INLINE static void
1537 advance_one_cycle (void)
1539 advance_state (curr_state);
1540 if (sched_verbose >= 6)
1541 fprintf (sched_dump, ";;\tAdvanced a state.\n");
1544 /* Clock at which the previous instruction was issued. */
1545 static int last_clock_var;
1547 /* Update register pressure after scheduling INSN. */
1548 static void
1549 update_register_pressure (rtx insn)
1551 struct reg_use_data *use;
1552 struct reg_set_data *set;
1554 gcc_checking_assert (!DEBUG_INSN_P (insn));
1556 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1557 if (dying_use_p (use) && bitmap_bit_p (curr_reg_live, use->regno))
1558 mark_regno_birth_or_death (use->regno, false);
1559 for (set = INSN_REG_SET_LIST (insn); set != NULL; set = set->next_insn_set)
1560 mark_regno_birth_or_death (set->regno, true);
1563 /* Set up or update (if UPDATE_P) max register pressure (see its
1564 meaning in sched-int.h::_haifa_insn_data) for all current BB insns
1565 after insn AFTER. */
1566 static void
1567 setup_insn_max_reg_pressure (rtx after, bool update_p)
1569 int i, p;
1570 bool eq_p;
1571 rtx insn;
1572 static int max_reg_pressure[N_REG_CLASSES];
1574 save_reg_pressure ();
1575 for (i = 0; i < ira_pressure_classes_num; i++)
1576 max_reg_pressure[ira_pressure_classes[i]]
1577 = curr_reg_pressure[ira_pressure_classes[i]];
1578 for (insn = NEXT_INSN (after);
1579 insn != NULL_RTX && ! BARRIER_P (insn)
1580 && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (after);
1581 insn = NEXT_INSN (insn))
1582 if (NONDEBUG_INSN_P (insn))
1584 eq_p = true;
1585 for (i = 0; i < ira_pressure_classes_num; i++)
1587 p = max_reg_pressure[ira_pressure_classes[i]];
1588 if (INSN_MAX_REG_PRESSURE (insn)[i] != p)
1590 eq_p = false;
1591 INSN_MAX_REG_PRESSURE (insn)[i]
1592 = max_reg_pressure[ira_pressure_classes[i]];
1595 if (update_p && eq_p)
1596 break;
1597 update_register_pressure (insn);
1598 for (i = 0; i < ira_pressure_classes_num; i++)
1599 if (max_reg_pressure[ira_pressure_classes[i]]
1600 < curr_reg_pressure[ira_pressure_classes[i]])
1601 max_reg_pressure[ira_pressure_classes[i]]
1602 = curr_reg_pressure[ira_pressure_classes[i]];
1604 restore_reg_pressure ();
1607 /* Update the current register pressure after scheduling INSN. Update
1608 also max register pressure for unscheduled insns of the current
1609 BB. */
1610 static void
1611 update_reg_and_insn_max_reg_pressure (rtx insn)
1613 int i;
1614 int before[N_REG_CLASSES];
1616 for (i = 0; i < ira_pressure_classes_num; i++)
1617 before[i] = curr_reg_pressure[ira_pressure_classes[i]];
1618 update_register_pressure (insn);
1619 for (i = 0; i < ira_pressure_classes_num; i++)
1620 if (curr_reg_pressure[ira_pressure_classes[i]] != before[i])
1621 break;
1622 if (i < ira_pressure_classes_num)
1623 setup_insn_max_reg_pressure (insn, true);
1626 /* Set up register pressure at the beginning of basic block BB whose
1627 insns starting after insn AFTER. Set up also max register pressure
1628 for all insns of the basic block. */
1629 void
1630 sched_setup_bb_reg_pressure_info (basic_block bb, rtx after)
1632 gcc_assert (sched_pressure_p);
1633 initiate_bb_reg_pressure_info (bb);
1634 setup_insn_max_reg_pressure (after, false);
1637 /* INSN is the "currently executing insn". Launch each insn which was
1638 waiting on INSN. READY is the ready list which contains the insns
1639 that are ready to fire. CLOCK is the current cycle. The function
1640 returns necessary cycle advance after issuing the insn (it is not
1641 zero for insns in a schedule group). */
1643 static int
1644 schedule_insn (rtx insn)
1646 sd_iterator_def sd_it;
1647 dep_t dep;
1648 int i;
1649 int advance = 0;
1651 if (sched_verbose >= 1)
1653 struct reg_pressure_data *pressure_info;
1654 char buf[2048];
1656 print_insn (buf, insn, 0);
1657 buf[40] = 0;
1658 fprintf (sched_dump, ";;\t%3i--> %-40s:", clock_var, buf);
1660 if (recog_memoized (insn) < 0)
1661 fprintf (sched_dump, "nothing");
1662 else
1663 print_reservation (sched_dump, insn);
1664 pressure_info = INSN_REG_PRESSURE (insn);
1665 if (pressure_info != NULL)
1667 fputc (':', sched_dump);
1668 for (i = 0; i < ira_pressure_classes_num; i++)
1669 fprintf (sched_dump, "%s%+d(%d)",
1670 reg_class_names[ira_pressure_classes[i]],
1671 pressure_info[i].set_increase, pressure_info[i].change);
1673 fputc ('\n', sched_dump);
1676 if (sched_pressure_p && !DEBUG_INSN_P (insn))
1677 update_reg_and_insn_max_reg_pressure (insn);
1679 /* Scheduling instruction should have all its dependencies resolved and
1680 should have been removed from the ready list. */
1681 gcc_assert (sd_lists_empty_p (insn, SD_LIST_BACK));
1683 /* Reset debug insns invalidated by moving this insn. */
1684 if (MAY_HAVE_DEBUG_INSNS && !DEBUG_INSN_P (insn))
1685 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1686 sd_iterator_cond (&sd_it, &dep);)
1688 rtx dbg = DEP_PRO (dep);
1689 struct reg_use_data *use, *next;
1691 gcc_assert (DEBUG_INSN_P (dbg));
1693 if (sched_verbose >= 6)
1694 fprintf (sched_dump, ";;\t\tresetting: debug insn %d\n",
1695 INSN_UID (dbg));
1697 /* ??? Rather than resetting the debug insn, we might be able
1698 to emit a debug temp before the just-scheduled insn, but
1699 this would involve checking that the expression at the
1700 point of the debug insn is equivalent to the expression
1701 before the just-scheduled insn. They might not be: the
1702 expression in the debug insn may depend on other insns not
1703 yet scheduled that set MEMs, REGs or even other debug
1704 insns. It's not clear that attempting to preserve debug
1705 information in these cases is worth the effort, given how
1706 uncommon these resets are and the likelihood that the debug
1707 temps introduced won't survive the schedule change. */
1708 INSN_VAR_LOCATION_LOC (dbg) = gen_rtx_UNKNOWN_VAR_LOC ();
1709 df_insn_rescan (dbg);
1711 /* Unknown location doesn't use any registers. */
1712 for (use = INSN_REG_USE_LIST (dbg); use != NULL; use = next)
1714 struct reg_use_data *prev = use;
1716 /* Remove use from the cyclic next_regno_use chain first. */
1717 while (prev->next_regno_use != use)
1718 prev = prev->next_regno_use;
1719 prev->next_regno_use = use->next_regno_use;
1720 next = use->next_insn_use;
1721 free (use);
1723 INSN_REG_USE_LIST (dbg) = NULL;
1725 /* We delete rather than resolve these deps, otherwise we
1726 crash in sched_free_deps(), because forward deps are
1727 expected to be released before backward deps. */
1728 sd_delete_dep (sd_it);
1731 gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
1732 QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
1734 gcc_assert (INSN_TICK (insn) >= MIN_TICK);
1735 if (INSN_TICK (insn) > clock_var)
1736 /* INSN has been prematurely moved from the queue to the ready list.
1737 This is possible only if following flag is set. */
1738 gcc_assert (flag_sched_stalled_insns);
1740 /* ??? Probably, if INSN is scheduled prematurely, we should leave
1741 INSN_TICK untouched. This is a machine-dependent issue, actually. */
1742 INSN_TICK (insn) = clock_var;
1744 /* Update dependent instructions. */
1745 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
1746 sd_iterator_cond (&sd_it, &dep);)
1748 rtx next = DEP_CON (dep);
1750 /* Resolve the dependence between INSN and NEXT.
1751 sd_resolve_dep () moves current dep to another list thus
1752 advancing the iterator. */
1753 sd_resolve_dep (sd_it);
1755 /* Don't bother trying to mark next as ready if insn is a debug
1756 insn. If insn is the last hard dependency, it will have
1757 already been discounted. */
1758 if (DEBUG_INSN_P (insn) && !DEBUG_INSN_P (next))
1759 continue;
1761 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
1763 int effective_cost;
1765 effective_cost = try_ready (next);
1767 if (effective_cost >= 0
1768 && SCHED_GROUP_P (next)
1769 && advance < effective_cost)
1770 advance = effective_cost;
1772 else
1773 /* Check always has only one forward dependence (to the first insn in
1774 the recovery block), therefore, this will be executed only once. */
1776 gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
1777 fix_recovery_deps (RECOVERY_BLOCK (insn));
1781 /* This is the place where scheduler doesn't *basically* need backward and
1782 forward dependencies for INSN anymore. Nevertheless they are used in
1783 heuristics in rank_for_schedule (), early_queue_to_ready () and in
1784 some targets (e.g. rs6000). Thus the earliest place where we *can*
1785 remove dependencies is after targetm.sched.finish () call in
1786 schedule_block (). But, on the other side, the safest place to remove
1787 dependencies is when we are finishing scheduling entire region. As we
1788 don't generate [many] dependencies during scheduling itself, we won't
1789 need memory until beginning of next region.
1790 Bottom line: Dependencies are removed for all insns in the end of
1791 scheduling the region. */
1793 /* Annotate the instruction with issue information -- TImode
1794 indicates that the instruction is expected not to be able
1795 to issue on the same cycle as the previous insn. A machine
1796 may use this information to decide how the instruction should
1797 be aligned. */
1798 if (issue_rate > 1
1799 && GET_CODE (PATTERN (insn)) != USE
1800 && GET_CODE (PATTERN (insn)) != CLOBBER
1801 && !DEBUG_INSN_P (insn))
1803 if (reload_completed)
1804 PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
1805 last_clock_var = clock_var;
1808 return advance;
1811 /* Functions for handling of notes. */
1813 /* Add note list that ends on FROM_END to the end of TO_ENDP. */
1814 void
1815 concat_note_lists (rtx from_end, rtx *to_endp)
1817 rtx from_start;
1819 /* It's easy when have nothing to concat. */
1820 if (from_end == NULL)
1821 return;
1823 /* It's also easy when destination is empty. */
1824 if (*to_endp == NULL)
1826 *to_endp = from_end;
1827 return;
1830 from_start = from_end;
1831 while (PREV_INSN (from_start) != NULL)
1832 from_start = PREV_INSN (from_start);
1834 PREV_INSN (from_start) = *to_endp;
1835 NEXT_INSN (*to_endp) = from_start;
1836 *to_endp = from_end;
1839 /* Delete notes between HEAD and TAIL and put them in the chain
1840 of notes ended by NOTE_LIST. */
1841 void
1842 remove_notes (rtx head, rtx tail)
1844 rtx next_tail, insn, next;
1846 note_list = 0;
1847 if (head == tail && !INSN_P (head))
1848 return;
1850 next_tail = NEXT_INSN (tail);
1851 for (insn = head; insn != next_tail; insn = next)
1853 next = NEXT_INSN (insn);
1854 if (!NOTE_P (insn))
1855 continue;
1857 switch (NOTE_KIND (insn))
1859 case NOTE_INSN_BASIC_BLOCK:
1860 continue;
1862 case NOTE_INSN_EPILOGUE_BEG:
1863 if (insn != tail)
1865 remove_insn (insn);
1866 add_reg_note (next, REG_SAVE_NOTE,
1867 GEN_INT (NOTE_INSN_EPILOGUE_BEG));
1868 break;
1870 /* FALLTHRU */
1872 default:
1873 remove_insn (insn);
1875 /* Add the note to list that ends at NOTE_LIST. */
1876 PREV_INSN (insn) = note_list;
1877 NEXT_INSN (insn) = NULL_RTX;
1878 if (note_list)
1879 NEXT_INSN (note_list) = insn;
1880 note_list = insn;
1881 break;
1884 gcc_assert ((sel_sched_p () || insn != tail) && insn != head);
1889 /* Return the head and tail pointers of ebb starting at BEG and ending
1890 at END. */
1891 void
1892 get_ebb_head_tail (basic_block beg, basic_block end, rtx *headp, rtx *tailp)
1894 rtx beg_head = BB_HEAD (beg);
1895 rtx beg_tail = BB_END (beg);
1896 rtx end_head = BB_HEAD (end);
1897 rtx end_tail = BB_END (end);
1899 /* Don't include any notes or labels at the beginning of the BEG
1900 basic block, or notes at the end of the END basic blocks. */
1902 if (LABEL_P (beg_head))
1903 beg_head = NEXT_INSN (beg_head);
1905 while (beg_head != beg_tail)
1906 if (NOTE_P (beg_head))
1907 beg_head = NEXT_INSN (beg_head);
1908 else if (DEBUG_INSN_P (beg_head))
1910 rtx note, next;
1912 for (note = NEXT_INSN (beg_head);
1913 note != beg_tail;
1914 note = next)
1916 next = NEXT_INSN (note);
1917 if (NOTE_P (note))
1919 if (sched_verbose >= 9)
1920 fprintf (sched_dump, "reorder %i\n", INSN_UID (note));
1922 reorder_insns_nobb (note, note, PREV_INSN (beg_head));
1924 if (BLOCK_FOR_INSN (note) != beg)
1925 df_insn_change_bb (note, beg);
1927 else if (!DEBUG_INSN_P (note))
1928 break;
1931 break;
1933 else
1934 break;
1936 *headp = beg_head;
1938 if (beg == end)
1939 end_head = beg_head;
1940 else if (LABEL_P (end_head))
1941 end_head = NEXT_INSN (end_head);
1943 while (end_head != end_tail)
1944 if (NOTE_P (end_tail))
1945 end_tail = PREV_INSN (end_tail);
1946 else if (DEBUG_INSN_P (end_tail))
1948 rtx note, prev;
1950 for (note = PREV_INSN (end_tail);
1951 note != end_head;
1952 note = prev)
1954 prev = PREV_INSN (note);
1955 if (NOTE_P (note))
1957 if (sched_verbose >= 9)
1958 fprintf (sched_dump, "reorder %i\n", INSN_UID (note));
1960 reorder_insns_nobb (note, note, end_tail);
1962 if (end_tail == BB_END (end))
1963 BB_END (end) = note;
1965 if (BLOCK_FOR_INSN (note) != end)
1966 df_insn_change_bb (note, end);
1968 else if (!DEBUG_INSN_P (note))
1969 break;
1972 break;
1974 else
1975 break;
1977 *tailp = end_tail;
1980 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
1983 no_real_insns_p (const_rtx head, const_rtx tail)
1985 while (head != NEXT_INSN (tail))
1987 if (!NOTE_P (head) && !LABEL_P (head))
1988 return 0;
1989 head = NEXT_INSN (head);
1991 return 1;
1994 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
1995 previously found among the insns. Insert them just before HEAD. */
1997 restore_other_notes (rtx head, basic_block head_bb)
1999 if (note_list != 0)
2001 rtx note_head = note_list;
2003 if (head)
2004 head_bb = BLOCK_FOR_INSN (head);
2005 else
2006 head = NEXT_INSN (bb_note (head_bb));
2008 while (PREV_INSN (note_head))
2010 set_block_for_insn (note_head, head_bb);
2011 note_head = PREV_INSN (note_head);
2013 /* In the above cycle we've missed this note. */
2014 set_block_for_insn (note_head, head_bb);
2016 PREV_INSN (note_head) = PREV_INSN (head);
2017 NEXT_INSN (PREV_INSN (head)) = note_head;
2018 PREV_INSN (head) = note_list;
2019 NEXT_INSN (note_list) = head;
2021 if (BLOCK_FOR_INSN (head) != head_bb)
2022 BB_END (head_bb) = note_list;
2024 head = note_head;
2027 return head;
2030 /* Move insns that became ready to fire from queue to ready list. */
2032 static void
2033 queue_to_ready (struct ready_list *ready)
2035 rtx insn;
2036 rtx link;
2037 rtx skip_insn;
2039 q_ptr = NEXT_Q (q_ptr);
2041 if (dbg_cnt (sched_insn) == false)
2042 /* If debug counter is activated do not requeue insn next after
2043 last_scheduled_insn. */
2044 skip_insn = next_nonnote_nondebug_insn (last_scheduled_insn);
2045 else
2046 skip_insn = NULL_RTX;
2048 /* Add all pending insns that can be scheduled without stalls to the
2049 ready list. */
2050 for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1))
2052 insn = XEXP (link, 0);
2053 q_size -= 1;
2055 if (sched_verbose >= 2)
2056 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
2057 (*current_sched_info->print_insn) (insn, 0));
2059 /* If the ready list is full, delay the insn for 1 cycle.
2060 See the comment in schedule_block for the rationale. */
2061 if (!reload_completed
2062 && ready->n_ready - ready->n_debug > MAX_SCHED_READY_INSNS
2063 && !SCHED_GROUP_P (insn)
2064 && insn != skip_insn)
2066 if (sched_verbose >= 2)
2067 fprintf (sched_dump, "requeued because ready full\n");
2068 queue_insn (insn, 1);
2070 else
2072 ready_add (ready, insn, false);
2073 if (sched_verbose >= 2)
2074 fprintf (sched_dump, "moving to ready without stalls\n");
2077 free_INSN_LIST_list (&insn_queue[q_ptr]);
2079 /* If there are no ready insns, stall until one is ready and add all
2080 of the pending insns at that point to the ready list. */
2081 if (ready->n_ready == 0)
2083 int stalls;
2085 for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
2087 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
2089 for (; link; link = XEXP (link, 1))
2091 insn = XEXP (link, 0);
2092 q_size -= 1;
2094 if (sched_verbose >= 2)
2095 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
2096 (*current_sched_info->print_insn) (insn, 0));
2098 ready_add (ready, insn, false);
2099 if (sched_verbose >= 2)
2100 fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
2102 free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]);
2104 advance_one_cycle ();
2106 break;
2109 advance_one_cycle ();
2112 q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
2113 clock_var += stalls;
2117 /* Used by early_queue_to_ready. Determines whether it is "ok" to
2118 prematurely move INSN from the queue to the ready list. Currently,
2119 if a target defines the hook 'is_costly_dependence', this function
2120 uses the hook to check whether there exist any dependences which are
2121 considered costly by the target, between INSN and other insns that
2122 have already been scheduled. Dependences are checked up to Y cycles
2123 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
2124 controlling this value.
2125 (Other considerations could be taken into account instead (or in
2126 addition) depending on user flags and target hooks. */
2128 static bool
2129 ok_for_early_queue_removal (rtx insn)
2131 int n_cycles;
2132 rtx prev_insn = last_scheduled_insn;
2134 if (targetm.sched.is_costly_dependence)
2136 for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
2138 for ( ; prev_insn; prev_insn = PREV_INSN (prev_insn))
2140 int cost;
2142 if (prev_insn == current_sched_info->prev_head)
2144 prev_insn = NULL;
2145 break;
2148 if (!NOTE_P (prev_insn))
2150 dep_t dep;
2152 dep = sd_find_dep_between (prev_insn, insn, true);
2154 if (dep != NULL)
2156 cost = dep_cost (dep);
2158 if (targetm.sched.is_costly_dependence (dep, cost,
2159 flag_sched_stalled_insns_dep - n_cycles))
2160 return false;
2164 if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
2165 break;
2168 if (!prev_insn)
2169 break;
2170 prev_insn = PREV_INSN (prev_insn);
2174 return true;
2178 /* Remove insns from the queue, before they become "ready" with respect
2179 to FU latency considerations. */
2181 static int
2182 early_queue_to_ready (state_t state, struct ready_list *ready)
2184 rtx insn;
2185 rtx link;
2186 rtx next_link;
2187 rtx prev_link;
2188 bool move_to_ready;
2189 int cost;
2190 state_t temp_state = alloca (dfa_state_size);
2191 int stalls;
2192 int insns_removed = 0;
2195 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
2196 function:
2198 X == 0: There is no limit on how many queued insns can be removed
2199 prematurely. (flag_sched_stalled_insns = -1).
2201 X >= 1: Only X queued insns can be removed prematurely in each
2202 invocation. (flag_sched_stalled_insns = X).
2204 Otherwise: Early queue removal is disabled.
2205 (flag_sched_stalled_insns = 0)
2208 if (! flag_sched_stalled_insns)
2209 return 0;
2211 for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
2213 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
2215 if (sched_verbose > 6)
2216 fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
2218 prev_link = 0;
2219 while (link)
2221 next_link = XEXP (link, 1);
2222 insn = XEXP (link, 0);
2223 if (insn && sched_verbose > 6)
2224 print_rtl_single (sched_dump, insn);
2226 memcpy (temp_state, state, dfa_state_size);
2227 if (recog_memoized (insn) < 0)
2228 /* non-negative to indicate that it's not ready
2229 to avoid infinite Q->R->Q->R... */
2230 cost = 0;
2231 else
2232 cost = state_transition (temp_state, insn);
2234 if (sched_verbose >= 6)
2235 fprintf (sched_dump, "transition cost = %d\n", cost);
2237 move_to_ready = false;
2238 if (cost < 0)
2240 move_to_ready = ok_for_early_queue_removal (insn);
2241 if (move_to_ready == true)
2243 /* move from Q to R */
2244 q_size -= 1;
2245 ready_add (ready, insn, false);
2247 if (prev_link)
2248 XEXP (prev_link, 1) = next_link;
2249 else
2250 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
2252 free_INSN_LIST_node (link);
2254 if (sched_verbose >= 2)
2255 fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
2256 (*current_sched_info->print_insn) (insn, 0));
2258 insns_removed++;
2259 if (insns_removed == flag_sched_stalled_insns)
2260 /* Remove no more than flag_sched_stalled_insns insns
2261 from Q at a time. */
2262 return insns_removed;
2266 if (move_to_ready == false)
2267 prev_link = link;
2269 link = next_link;
2270 } /* while link */
2271 } /* if link */
2273 } /* for stalls.. */
2275 return insns_removed;
2279 /* Print the ready list for debugging purposes. Callable from debugger. */
2281 static void
2282 debug_ready_list (struct ready_list *ready)
2284 rtx *p;
2285 int i;
2287 if (ready->n_ready == 0)
2289 fprintf (sched_dump, "\n");
2290 return;
2293 p = ready_lastpos (ready);
2294 for (i = 0; i < ready->n_ready; i++)
2296 fprintf (sched_dump, " %s:%d",
2297 (*current_sched_info->print_insn) (p[i], 0),
2298 INSN_LUID (p[i]));
2299 if (sched_pressure_p)
2300 fprintf (sched_dump, "(cost=%d",
2301 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (p[i]));
2302 if (INSN_TICK (p[i]) > clock_var)
2303 fprintf (sched_dump, ":delay=%d", INSN_TICK (p[i]) - clock_var);
2304 if (sched_pressure_p)
2305 fprintf (sched_dump, ")");
2307 fprintf (sched_dump, "\n");
2310 /* Search INSN for REG_SAVE_NOTE notes and convert them back into insn
2311 NOTEs. This is used for NOTE_INSN_EPILOGUE_BEG, so that sched-ebb
2312 replaces the epilogue note in the correct basic block. */
2313 void
2314 reemit_notes (rtx insn)
2316 rtx note, last = insn;
2318 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2320 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
2322 enum insn_note note_type = (enum insn_note) INTVAL (XEXP (note, 0));
2324 last = emit_note_before (note_type, last);
2325 remove_note (insn, note);
2330 /* Move INSN. Reemit notes if needed. Update CFG, if needed. */
2331 static void
2332 move_insn (rtx insn, rtx last, rtx nt)
2334 if (PREV_INSN (insn) != last)
2336 basic_block bb;
2337 rtx note;
2338 int jump_p = 0;
2340 bb = BLOCK_FOR_INSN (insn);
2342 /* BB_HEAD is either LABEL or NOTE. */
2343 gcc_assert (BB_HEAD (bb) != insn);
2345 if (BB_END (bb) == insn)
2346 /* If this is last instruction in BB, move end marker one
2347 instruction up. */
2349 /* Jumps are always placed at the end of basic block. */
2350 jump_p = control_flow_insn_p (insn);
2352 gcc_assert (!jump_p
2353 || ((common_sched_info->sched_pass_id == SCHED_RGN_PASS)
2354 && IS_SPECULATION_BRANCHY_CHECK_P (insn))
2355 || (common_sched_info->sched_pass_id
2356 == SCHED_EBB_PASS));
2358 gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
2360 BB_END (bb) = PREV_INSN (insn);
2363 gcc_assert (BB_END (bb) != last);
2365 if (jump_p)
2366 /* We move the block note along with jump. */
2368 gcc_assert (nt);
2370 note = NEXT_INSN (insn);
2371 while (NOTE_NOT_BB_P (note) && note != nt)
2372 note = NEXT_INSN (note);
2374 if (note != nt
2375 && (LABEL_P (note)
2376 || BARRIER_P (note)))
2377 note = NEXT_INSN (note);
2379 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
2381 else
2382 note = insn;
2384 NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
2385 PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
2387 NEXT_INSN (note) = NEXT_INSN (last);
2388 PREV_INSN (NEXT_INSN (last)) = note;
2390 NEXT_INSN (last) = insn;
2391 PREV_INSN (insn) = last;
2393 bb = BLOCK_FOR_INSN (last);
2395 if (jump_p)
2397 fix_jump_move (insn);
2399 if (BLOCK_FOR_INSN (insn) != bb)
2400 move_block_after_check (insn);
2402 gcc_assert (BB_END (bb) == last);
2405 df_insn_change_bb (insn, bb);
2407 /* Update BB_END, if needed. */
2408 if (BB_END (bb) == last)
2409 BB_END (bb) = insn;
2412 SCHED_GROUP_P (insn) = 0;
2415 /* Return true if scheduling INSN will finish current clock cycle. */
2416 static bool
2417 insn_finishes_cycle_p (rtx insn)
2419 if (SCHED_GROUP_P (insn))
2420 /* After issuing INSN, rest of the sched_group will be forced to issue
2421 in order. Don't make any plans for the rest of cycle. */
2422 return true;
2424 /* Finishing the block will, apparently, finish the cycle. */
2425 if (current_sched_info->insn_finishes_block_p
2426 && current_sched_info->insn_finishes_block_p (insn))
2427 return true;
2429 return false;
2432 /* Define type for target data used in multipass scheduling. */
2433 #ifndef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T
2434 # define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T int
2435 #endif
2436 typedef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T first_cycle_multipass_data_t;
2438 /* The following structure describe an entry of the stack of choices. */
2439 struct choice_entry
2441 /* Ordinal number of the issued insn in the ready queue. */
2442 int index;
2443 /* The number of the rest insns whose issues we should try. */
2444 int rest;
2445 /* The number of issued essential insns. */
2446 int n;
2447 /* State after issuing the insn. */
2448 state_t state;
2449 /* Target-specific data. */
2450 first_cycle_multipass_data_t target_data;
2453 /* The following array is used to implement a stack of choices used in
2454 function max_issue. */
2455 static struct choice_entry *choice_stack;
2457 /* The following variable value is number of essential insns issued on
2458 the current cycle. An insn is essential one if it changes the
2459 processors state. */
2460 int cycle_issued_insns;
2462 /* This holds the value of the target dfa_lookahead hook. */
2463 int dfa_lookahead;
2465 /* The following variable value is maximal number of tries of issuing
2466 insns for the first cycle multipass insn scheduling. We define
2467 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
2468 need this constraint if all real insns (with non-negative codes)
2469 had reservations because in this case the algorithm complexity is
2470 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
2471 might be incomplete and such insn might occur. For such
2472 descriptions, the complexity of algorithm (without the constraint)
2473 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
2474 static int max_lookahead_tries;
2476 /* The following value is value of hook
2477 `first_cycle_multipass_dfa_lookahead' at the last call of
2478 `max_issue'. */
2479 static int cached_first_cycle_multipass_dfa_lookahead = 0;
2481 /* The following value is value of `issue_rate' at the last call of
2482 `sched_init'. */
2483 static int cached_issue_rate = 0;
2485 /* The following function returns maximal (or close to maximal) number
2486 of insns which can be issued on the same cycle and one of which
2487 insns is insns with the best rank (the first insn in READY). To
2488 make this function tries different samples of ready insns. READY
2489 is current queue `ready'. Global array READY_TRY reflects what
2490 insns are already issued in this try. The function stops immediately,
2491 if it reached the such a solution, that all instruction can be issued.
2492 INDEX will contain index of the best insn in READY. The following
2493 function is used only for first cycle multipass scheduling.
2495 PRIVILEGED_N >= 0
2497 This function expects recognized insns only. All USEs,
2498 CLOBBERs, etc must be filtered elsewhere. */
2500 max_issue (struct ready_list *ready, int privileged_n, state_t state,
2501 bool first_cycle_insn_p, int *index)
2503 int n, i, all, n_ready, best, delay, tries_num;
2504 int more_issue;
2505 struct choice_entry *top;
2506 rtx insn;
2508 n_ready = ready->n_ready;
2509 gcc_assert (dfa_lookahead >= 1 && privileged_n >= 0
2510 && privileged_n <= n_ready);
2512 /* Init MAX_LOOKAHEAD_TRIES. */
2513 if (cached_first_cycle_multipass_dfa_lookahead != dfa_lookahead)
2515 cached_first_cycle_multipass_dfa_lookahead = dfa_lookahead;
2516 max_lookahead_tries = 100;
2517 for (i = 0; i < issue_rate; i++)
2518 max_lookahead_tries *= dfa_lookahead;
2521 /* Init max_points. */
2522 more_issue = issue_rate - cycle_issued_insns;
2523 gcc_assert (more_issue >= 0);
2525 /* The number of the issued insns in the best solution. */
2526 best = 0;
2528 top = choice_stack;
2530 /* Set initial state of the search. */
2531 memcpy (top->state, state, dfa_state_size);
2532 top->rest = dfa_lookahead;
2533 top->n = 0;
2534 if (targetm.sched.first_cycle_multipass_begin)
2535 targetm.sched.first_cycle_multipass_begin (&top->target_data,
2536 ready_try, n_ready,
2537 first_cycle_insn_p);
2539 /* Count the number of the insns to search among. */
2540 for (all = i = 0; i < n_ready; i++)
2541 if (!ready_try [i])
2542 all++;
2544 /* I is the index of the insn to try next. */
2545 i = 0;
2546 tries_num = 0;
2547 for (;;)
2549 if (/* If we've reached a dead end or searched enough of what we have
2550 been asked... */
2551 top->rest == 0
2552 /* or have nothing else to try... */
2553 || i >= n_ready
2554 /* or should not issue more. */
2555 || top->n >= more_issue)
2557 /* ??? (... || i == n_ready). */
2558 gcc_assert (i <= n_ready);
2560 /* We should not issue more than issue_rate instructions. */
2561 gcc_assert (top->n <= more_issue);
2563 if (top == choice_stack)
2564 break;
2566 if (best < top - choice_stack)
2568 if (privileged_n)
2570 n = privileged_n;
2571 /* Try to find issued privileged insn. */
2572 while (n && !ready_try[--n]);
2575 if (/* If all insns are equally good... */
2576 privileged_n == 0
2577 /* Or a privileged insn will be issued. */
2578 || ready_try[n])
2579 /* Then we have a solution. */
2581 best = top - choice_stack;
2582 /* This is the index of the insn issued first in this
2583 solution. */
2584 *index = choice_stack [1].index;
2585 if (top->n == more_issue || best == all)
2586 break;
2590 /* Set ready-list index to point to the last insn
2591 ('i++' below will advance it to the next insn). */
2592 i = top->index;
2594 /* Backtrack. */
2595 ready_try [i] = 0;
2597 if (targetm.sched.first_cycle_multipass_backtrack)
2598 targetm.sched.first_cycle_multipass_backtrack (&top->target_data,
2599 ready_try, n_ready);
2601 top--;
2602 memcpy (state, top->state, dfa_state_size);
2604 else if (!ready_try [i])
2606 tries_num++;
2607 if (tries_num > max_lookahead_tries)
2608 break;
2609 insn = ready_element (ready, i);
2610 delay = state_transition (state, insn);
2611 if (delay < 0)
2613 if (state_dead_lock_p (state)
2614 || insn_finishes_cycle_p (insn))
2615 /* We won't issue any more instructions in the next
2616 choice_state. */
2617 top->rest = 0;
2618 else
2619 top->rest--;
2621 n = top->n;
2622 if (memcmp (top->state, state, dfa_state_size) != 0)
2623 n++;
2625 /* Advance to the next choice_entry. */
2626 top++;
2627 /* Initialize it. */
2628 top->rest = dfa_lookahead;
2629 top->index = i;
2630 top->n = n;
2631 memcpy (top->state, state, dfa_state_size);
2632 ready_try [i] = 1;
2634 if (targetm.sched.first_cycle_multipass_issue)
2635 targetm.sched.first_cycle_multipass_issue (&top->target_data,
2636 ready_try, n_ready,
2637 insn,
2638 &((top - 1)
2639 ->target_data));
2641 i = -1;
2645 /* Increase ready-list index. */
2646 i++;
2649 if (targetm.sched.first_cycle_multipass_end)
2650 targetm.sched.first_cycle_multipass_end (best != 0
2651 ? &choice_stack[1].target_data
2652 : NULL);
2654 /* Restore the original state of the DFA. */
2655 memcpy (state, choice_stack->state, dfa_state_size);
2657 return best;
2660 /* The following function chooses insn from READY and modifies
2661 READY. The following function is used only for first
2662 cycle multipass scheduling.
2663 Return:
2664 -1 if cycle should be advanced,
2665 0 if INSN_PTR is set to point to the desirable insn,
2666 1 if choose_ready () should be restarted without advancing the cycle. */
2667 static int
2668 choose_ready (struct ready_list *ready, bool first_cycle_insn_p,
2669 rtx *insn_ptr)
2671 int lookahead;
2673 if (dbg_cnt (sched_insn) == false)
2675 rtx insn;
2677 insn = next_nonnote_insn (last_scheduled_insn);
2679 if (QUEUE_INDEX (insn) == QUEUE_READY)
2680 /* INSN is in the ready_list. */
2682 ready_remove_insn (insn);
2683 *insn_ptr = insn;
2684 return 0;
2687 /* INSN is in the queue. Advance cycle to move it to the ready list. */
2688 return -1;
2691 lookahead = 0;
2693 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
2694 lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
2695 if (lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0))
2696 || DEBUG_INSN_P (ready_element (ready, 0)))
2698 if (targetm.sched.dispatch (NULL_RTX, IS_DISPATCH_ON))
2699 *insn_ptr = ready_remove_first_dispatch (ready);
2700 else
2701 *insn_ptr = ready_remove_first (ready);
2703 return 0;
2705 else
2707 /* Try to choose the better insn. */
2708 int index = 0, i, n;
2709 rtx insn;
2710 int try_data = 1, try_control = 1;
2711 ds_t ts;
2713 insn = ready_element (ready, 0);
2714 if (INSN_CODE (insn) < 0)
2716 *insn_ptr = ready_remove_first (ready);
2717 return 0;
2720 if (spec_info
2721 && spec_info->flags & (PREFER_NON_DATA_SPEC
2722 | PREFER_NON_CONTROL_SPEC))
2724 for (i = 0, n = ready->n_ready; i < n; i++)
2726 rtx x;
2727 ds_t s;
2729 x = ready_element (ready, i);
2730 s = TODO_SPEC (x);
2732 if (spec_info->flags & PREFER_NON_DATA_SPEC
2733 && !(s & DATA_SPEC))
2735 try_data = 0;
2736 if (!(spec_info->flags & PREFER_NON_CONTROL_SPEC)
2737 || !try_control)
2738 break;
2741 if (spec_info->flags & PREFER_NON_CONTROL_SPEC
2742 && !(s & CONTROL_SPEC))
2744 try_control = 0;
2745 if (!(spec_info->flags & PREFER_NON_DATA_SPEC) || !try_data)
2746 break;
2751 ts = TODO_SPEC (insn);
2752 if ((ts & SPECULATIVE)
2753 && (((!try_data && (ts & DATA_SPEC))
2754 || (!try_control && (ts & CONTROL_SPEC)))
2755 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
2756 && !targetm.sched
2757 .first_cycle_multipass_dfa_lookahead_guard_spec (insn))))
2758 /* Discard speculative instruction that stands first in the ready
2759 list. */
2761 change_queue_index (insn, 1);
2762 return 1;
2765 ready_try[0] = 0;
2767 for (i = 1; i < ready->n_ready; i++)
2769 insn = ready_element (ready, i);
2771 ready_try [i]
2772 = ((!try_data && (TODO_SPEC (insn) & DATA_SPEC))
2773 || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC)));
2776 /* Let the target filter the search space. */
2777 for (i = 1; i < ready->n_ready; i++)
2778 if (!ready_try[i])
2780 insn = ready_element (ready, i);
2782 /* If this insn is recognizable we should have already
2783 recognized it earlier.
2784 ??? Not very clear where this is supposed to be done.
2785 See dep_cost_1. */
2786 gcc_checking_assert (INSN_CODE (insn) >= 0
2787 || recog_memoized (insn) < 0);
2789 ready_try [i]
2790 = (/* INSN_CODE check can be omitted here as it is also done later
2791 in max_issue (). */
2792 INSN_CODE (insn) < 0
2793 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2794 && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2795 (insn)));
2798 if (max_issue (ready, 1, curr_state, first_cycle_insn_p, &index) == 0)
2800 *insn_ptr = ready_remove_first (ready);
2801 if (sched_verbose >= 4)
2802 fprintf (sched_dump, ";;\t\tChosen insn (but can't issue) : %s \n",
2803 (*current_sched_info->print_insn) (*insn_ptr, 0));
2804 return 0;
2806 else
2808 if (sched_verbose >= 4)
2809 fprintf (sched_dump, ";;\t\tChosen insn : %s\n",
2810 (*current_sched_info->print_insn)
2811 (ready_element (ready, index), 0));
2813 *insn_ptr = ready_remove (ready, index);
2814 return 0;
2819 /* Use forward list scheduling to rearrange insns of block pointed to by
2820 TARGET_BB, possibly bringing insns from subsequent blocks in the same
2821 region. */
2823 void
2824 schedule_block (basic_block *target_bb)
2826 int i;
2827 bool first_cycle_insn_p;
2828 int can_issue_more;
2829 state_t temp_state = NULL; /* It is used for multipass scheduling. */
2830 int sort_p, advance, start_clock_var;
2832 /* Head/tail info for this block. */
2833 rtx prev_head = current_sched_info->prev_head;
2834 rtx next_tail = current_sched_info->next_tail;
2835 rtx head = NEXT_INSN (prev_head);
2836 rtx tail = PREV_INSN (next_tail);
2838 /* We used to have code to avoid getting parameters moved from hard
2839 argument registers into pseudos.
2841 However, it was removed when it proved to be of marginal benefit
2842 and caused problems because schedule_block and compute_forward_dependences
2843 had different notions of what the "head" insn was. */
2845 gcc_assert (head != tail || INSN_P (head));
2847 haifa_recovery_bb_recently_added_p = false;
2849 /* Debug info. */
2850 if (sched_verbose)
2851 dump_new_block_header (0, *target_bb, head, tail);
2853 state_reset (curr_state);
2855 /* Clear the ready list. */
2856 ready.first = ready.veclen - 1;
2857 ready.n_ready = 0;
2858 ready.n_debug = 0;
2860 /* It is used for first cycle multipass scheduling. */
2861 temp_state = alloca (dfa_state_size);
2863 if (targetm.sched.init)
2864 targetm.sched.init (sched_dump, sched_verbose, ready.veclen);
2866 /* We start inserting insns after PREV_HEAD. */
2867 last_scheduled_insn = prev_head;
2869 gcc_assert ((NOTE_P (last_scheduled_insn)
2870 || DEBUG_INSN_P (last_scheduled_insn))
2871 && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
2873 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
2874 queue. */
2875 q_ptr = 0;
2876 q_size = 0;
2878 insn_queue = XALLOCAVEC (rtx, max_insn_queue_index + 1);
2879 memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
2881 /* Start just before the beginning of time. */
2882 clock_var = -1;
2884 /* We need queue and ready lists and clock_var be initialized
2885 in try_ready () (which is called through init_ready_list ()). */
2886 (*current_sched_info->init_ready_list) ();
2888 /* The algorithm is O(n^2) in the number of ready insns at any given
2889 time in the worst case. Before reload we are more likely to have
2890 big lists so truncate them to a reasonable size. */
2891 if (!reload_completed
2892 && ready.n_ready - ready.n_debug > MAX_SCHED_READY_INSNS)
2894 ready_sort (&ready);
2896 /* Find first free-standing insn past MAX_SCHED_READY_INSNS.
2897 If there are debug insns, we know they're first. */
2898 for (i = MAX_SCHED_READY_INSNS + ready.n_debug; i < ready.n_ready; i++)
2899 if (!SCHED_GROUP_P (ready_element (&ready, i)))
2900 break;
2902 if (sched_verbose >= 2)
2904 fprintf (sched_dump,
2905 ";;\t\tReady list on entry: %d insns\n", ready.n_ready);
2906 fprintf (sched_dump,
2907 ";;\t\t before reload => truncated to %d insns\n", i);
2910 /* Delay all insns past it for 1 cycle. If debug counter is
2911 activated make an exception for the insn right after
2912 last_scheduled_insn. */
2914 rtx skip_insn;
2916 if (dbg_cnt (sched_insn) == false)
2917 skip_insn = next_nonnote_insn (last_scheduled_insn);
2918 else
2919 skip_insn = NULL_RTX;
2921 while (i < ready.n_ready)
2923 rtx insn;
2925 insn = ready_remove (&ready, i);
2927 if (insn != skip_insn)
2928 queue_insn (insn, 1);
2933 /* Now we can restore basic block notes and maintain precise cfg. */
2934 restore_bb_notes (*target_bb);
2936 last_clock_var = -1;
2938 advance = 0;
2940 sort_p = TRUE;
2941 /* Loop until all the insns in BB are scheduled. */
2942 while ((*current_sched_info->schedule_more_p) ())
2946 start_clock_var = clock_var;
2948 clock_var++;
2950 advance_one_cycle ();
2952 /* Add to the ready list all pending insns that can be issued now.
2953 If there are no ready insns, increment clock until one
2954 is ready and add all pending insns at that point to the ready
2955 list. */
2956 queue_to_ready (&ready);
2958 gcc_assert (ready.n_ready);
2960 if (sched_verbose >= 2)
2962 fprintf (sched_dump, ";;\t\tReady list after queue_to_ready: ");
2963 debug_ready_list (&ready);
2965 advance -= clock_var - start_clock_var;
2967 while (advance > 0);
2969 if (sort_p)
2971 /* Sort the ready list based on priority. */
2972 ready_sort (&ready);
2974 if (sched_verbose >= 2)
2976 fprintf (sched_dump, ";;\t\tReady list after ready_sort: ");
2977 debug_ready_list (&ready);
2981 /* We don't want md sched reorder to even see debug isns, so put
2982 them out right away. */
2983 if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
2985 if (control_flow_insn_p (last_scheduled_insn))
2987 *target_bb = current_sched_info->advance_target_bb
2988 (*target_bb, 0);
2990 if (sched_verbose)
2992 rtx x;
2994 x = next_real_insn (last_scheduled_insn);
2995 gcc_assert (x);
2996 dump_new_block_header (1, *target_bb, x, tail);
2999 last_scheduled_insn = bb_note (*target_bb);
3002 while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
3004 rtx insn = ready_remove_first (&ready);
3005 gcc_assert (DEBUG_INSN_P (insn));
3006 (*current_sched_info->begin_schedule_ready) (insn,
3007 last_scheduled_insn);
3008 move_insn (insn, last_scheduled_insn,
3009 current_sched_info->next_tail);
3010 last_scheduled_insn = insn;
3011 advance = schedule_insn (insn);
3012 gcc_assert (advance == 0);
3013 if (ready.n_ready > 0)
3014 ready_sort (&ready);
3017 if (!ready.n_ready)
3018 continue;
3021 /* Allow the target to reorder the list, typically for
3022 better instruction bundling. */
3023 if (sort_p && targetm.sched.reorder
3024 && (ready.n_ready == 0
3025 || !SCHED_GROUP_P (ready_element (&ready, 0))))
3026 can_issue_more =
3027 targetm.sched.reorder (sched_dump, sched_verbose,
3028 ready_lastpos (&ready),
3029 &ready.n_ready, clock_var);
3030 else
3031 can_issue_more = issue_rate;
3033 first_cycle_insn_p = true;
3034 cycle_issued_insns = 0;
3035 for (;;)
3037 rtx insn;
3038 int cost;
3039 bool asm_p = false;
3041 if (sched_verbose >= 2)
3043 fprintf (sched_dump, ";;\tReady list (t = %3d): ",
3044 clock_var);
3045 debug_ready_list (&ready);
3046 if (sched_pressure_p)
3047 print_curr_reg_pressure ();
3050 if (ready.n_ready == 0
3051 && can_issue_more
3052 && reload_completed)
3054 /* Allow scheduling insns directly from the queue in case
3055 there's nothing better to do (ready list is empty) but
3056 there are still vacant dispatch slots in the current cycle. */
3057 if (sched_verbose >= 6)
3058 fprintf (sched_dump,";;\t\tSecond chance\n");
3059 memcpy (temp_state, curr_state, dfa_state_size);
3060 if (early_queue_to_ready (temp_state, &ready))
3061 ready_sort (&ready);
3064 if (ready.n_ready == 0
3065 || !can_issue_more
3066 || state_dead_lock_p (curr_state)
3067 || !(*current_sched_info->schedule_more_p) ())
3068 break;
3070 /* Select and remove the insn from the ready list. */
3071 if (sort_p)
3073 int res;
3075 insn = NULL_RTX;
3076 res = choose_ready (&ready, first_cycle_insn_p, &insn);
3078 if (res < 0)
3079 /* Finish cycle. */
3080 break;
3081 if (res > 0)
3082 /* Restart choose_ready (). */
3083 continue;
3085 gcc_assert (insn != NULL_RTX);
3087 else
3088 insn = ready_remove_first (&ready);
3090 if (sched_pressure_p && INSN_TICK (insn) > clock_var)
3092 ready_add (&ready, insn, true);
3093 advance = 1;
3094 break;
3097 if (targetm.sched.dfa_new_cycle
3098 && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
3099 insn, last_clock_var,
3100 clock_var, &sort_p))
3101 /* SORT_P is used by the target to override sorting
3102 of the ready list. This is needed when the target
3103 has modified its internal structures expecting that
3104 the insn will be issued next. As we need the insn
3105 to have the highest priority (so it will be returned by
3106 the ready_remove_first call above), we invoke
3107 ready_add (&ready, insn, true).
3108 But, still, there is one issue: INSN can be later
3109 discarded by scheduler's front end through
3110 current_sched_info->can_schedule_ready_p, hence, won't
3111 be issued next. */
3113 ready_add (&ready, insn, true);
3114 break;
3117 sort_p = TRUE;
3118 memcpy (temp_state, curr_state, dfa_state_size);
3119 if (recog_memoized (insn) < 0)
3121 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
3122 || asm_noperands (PATTERN (insn)) >= 0);
3123 if (!first_cycle_insn_p && asm_p)
3124 /* This is asm insn which is tried to be issued on the
3125 cycle not first. Issue it on the next cycle. */
3126 cost = 1;
3127 else
3128 /* A USE insn, or something else we don't need to
3129 understand. We can't pass these directly to
3130 state_transition because it will trigger a
3131 fatal error for unrecognizable insns. */
3132 cost = 0;
3134 else if (sched_pressure_p)
3135 cost = 0;
3136 else
3138 cost = state_transition (temp_state, insn);
3139 if (cost < 0)
3140 cost = 0;
3141 else if (cost == 0)
3142 cost = 1;
3145 if (cost >= 1)
3147 queue_insn (insn, cost);
3148 if (SCHED_GROUP_P (insn))
3150 advance = cost;
3151 break;
3154 continue;
3157 if (current_sched_info->can_schedule_ready_p
3158 && ! (*current_sched_info->can_schedule_ready_p) (insn))
3159 /* We normally get here only if we don't want to move
3160 insn from the split block. */
3162 TODO_SPEC (insn) = (TODO_SPEC (insn) & ~SPECULATIVE) | HARD_DEP;
3163 continue;
3166 /* DECISION is made. */
3168 if (TODO_SPEC (insn) & SPECULATIVE)
3169 generate_recovery_code (insn);
3171 if (control_flow_insn_p (last_scheduled_insn)
3172 /* This is used to switch basic blocks by request
3173 from scheduler front-end (actually, sched-ebb.c only).
3174 This is used to process blocks with single fallthru
3175 edge. If succeeding block has jump, it [jump] will try
3176 move at the end of current bb, thus corrupting CFG. */
3177 || current_sched_info->advance_target_bb (*target_bb, insn))
3179 *target_bb = current_sched_info->advance_target_bb
3180 (*target_bb, 0);
3182 if (sched_verbose)
3184 rtx x;
3186 x = next_real_insn (last_scheduled_insn);
3187 gcc_assert (x);
3188 dump_new_block_header (1, *target_bb, x, tail);
3191 last_scheduled_insn = bb_note (*target_bb);
3194 /* Update counters, etc in the scheduler's front end. */
3195 (*current_sched_info->begin_schedule_ready) (insn,
3196 last_scheduled_insn);
3198 move_insn (insn, last_scheduled_insn, current_sched_info->next_tail);
3200 if (targetm.sched.dispatch (NULL_RTX, IS_DISPATCH_ON))
3201 targetm.sched.dispatch_do (insn, ADD_TO_DISPATCH_WINDOW);
3203 reemit_notes (insn);
3204 last_scheduled_insn = insn;
3206 if (memcmp (curr_state, temp_state, dfa_state_size) != 0)
3208 cycle_issued_insns++;
3209 memcpy (curr_state, temp_state, dfa_state_size);
3212 if (targetm.sched.variable_issue)
3213 can_issue_more =
3214 targetm.sched.variable_issue (sched_dump, sched_verbose,
3215 insn, can_issue_more);
3216 /* A naked CLOBBER or USE generates no instruction, so do
3217 not count them against the issue rate. */
3218 else if (GET_CODE (PATTERN (insn)) != USE
3219 && GET_CODE (PATTERN (insn)) != CLOBBER)
3220 can_issue_more--;
3221 advance = schedule_insn (insn);
3223 /* After issuing an asm insn we should start a new cycle. */
3224 if (advance == 0 && asm_p)
3225 advance = 1;
3226 if (advance != 0)
3227 break;
3229 first_cycle_insn_p = false;
3231 /* Sort the ready list based on priority. This must be
3232 redone here, as schedule_insn may have readied additional
3233 insns that will not be sorted correctly. */
3234 if (ready.n_ready > 0)
3235 ready_sort (&ready);
3237 /* Quickly go through debug insns such that md sched
3238 reorder2 doesn't have to deal with debug insns. */
3239 if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0))
3240 && (*current_sched_info->schedule_more_p) ())
3242 if (control_flow_insn_p (last_scheduled_insn))
3244 *target_bb = current_sched_info->advance_target_bb
3245 (*target_bb, 0);
3247 if (sched_verbose)
3249 rtx x;
3251 x = next_real_insn (last_scheduled_insn);
3252 gcc_assert (x);
3253 dump_new_block_header (1, *target_bb, x, tail);
3256 last_scheduled_insn = bb_note (*target_bb);
3259 while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
3261 insn = ready_remove_first (&ready);
3262 gcc_assert (DEBUG_INSN_P (insn));
3263 (*current_sched_info->begin_schedule_ready)
3264 (insn, last_scheduled_insn);
3265 move_insn (insn, last_scheduled_insn,
3266 current_sched_info->next_tail);
3267 advance = schedule_insn (insn);
3268 last_scheduled_insn = insn;
3269 gcc_assert (advance == 0);
3270 if (ready.n_ready > 0)
3271 ready_sort (&ready);
3275 if (targetm.sched.reorder2
3276 && (ready.n_ready == 0
3277 || !SCHED_GROUP_P (ready_element (&ready, 0))))
3279 can_issue_more =
3280 targetm.sched.reorder2 (sched_dump, sched_verbose,
3281 ready.n_ready
3282 ? ready_lastpos (&ready) : NULL,
3283 &ready.n_ready, clock_var);
3288 /* Debug info. */
3289 if (sched_verbose)
3291 fprintf (sched_dump, ";;\tReady list (final): ");
3292 debug_ready_list (&ready);
3295 if (current_sched_info->queue_must_finish_empty)
3296 /* Sanity check -- queue must be empty now. Meaningless if region has
3297 multiple bbs. */
3298 gcc_assert (!q_size && !ready.n_ready && !ready.n_debug);
3299 else
3301 /* We must maintain QUEUE_INDEX between blocks in region. */
3302 for (i = ready.n_ready - 1; i >= 0; i--)
3304 rtx x;
3306 x = ready_element (&ready, i);
3307 QUEUE_INDEX (x) = QUEUE_NOWHERE;
3308 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
3311 if (q_size)
3312 for (i = 0; i <= max_insn_queue_index; i++)
3314 rtx link;
3315 for (link = insn_queue[i]; link; link = XEXP (link, 1))
3317 rtx x;
3319 x = XEXP (link, 0);
3320 QUEUE_INDEX (x) = QUEUE_NOWHERE;
3321 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
3323 free_INSN_LIST_list (&insn_queue[i]);
3327 if (sched_verbose)
3328 fprintf (sched_dump, ";; total time = %d\n", clock_var);
3330 if (!current_sched_info->queue_must_finish_empty
3331 || haifa_recovery_bb_recently_added_p)
3333 /* INSN_TICK (minimum clock tick at which the insn becomes
3334 ready) may be not correct for the insn in the subsequent
3335 blocks of the region. We should use a correct value of
3336 `clock_var' or modify INSN_TICK. It is better to keep
3337 clock_var value equal to 0 at the start of a basic block.
3338 Therefore we modify INSN_TICK here. */
3339 fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
3342 if (targetm.sched.finish)
3344 targetm.sched.finish (sched_dump, sched_verbose);
3345 /* Target might have added some instructions to the scheduled block
3346 in its md_finish () hook. These new insns don't have any data
3347 initialized and to identify them we extend h_i_d so that they'll
3348 get zero luids. */
3349 sched_init_luids (NULL, NULL, NULL, NULL);
3352 if (sched_verbose)
3353 fprintf (sched_dump, ";; new head = %d\n;; new tail = %d\n\n",
3354 INSN_UID (head), INSN_UID (tail));
3356 /* Update head/tail boundaries. */
3357 head = NEXT_INSN (prev_head);
3358 tail = last_scheduled_insn;
3360 head = restore_other_notes (head, NULL);
3362 current_sched_info->head = head;
3363 current_sched_info->tail = tail;
3366 /* Set_priorities: compute priority of each insn in the block. */
3369 set_priorities (rtx head, rtx tail)
3371 rtx insn;
3372 int n_insn;
3373 int sched_max_insns_priority =
3374 current_sched_info->sched_max_insns_priority;
3375 rtx prev_head;
3377 if (head == tail && ! INSN_P (head))
3378 gcc_unreachable ();
3380 n_insn = 0;
3382 prev_head = PREV_INSN (head);
3383 for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
3385 if (!INSN_P (insn))
3386 continue;
3388 n_insn++;
3389 (void) priority (insn);
3391 gcc_assert (INSN_PRIORITY_KNOWN (insn));
3393 sched_max_insns_priority = MAX (sched_max_insns_priority,
3394 INSN_PRIORITY (insn));
3397 current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
3399 return n_insn;
3402 /* Set dump and sched_verbose for the desired debugging output. If no
3403 dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
3404 For -fsched-verbose=N, N>=10, print everything to stderr. */
3405 void
3406 setup_sched_dump (void)
3408 sched_verbose = sched_verbose_param;
3409 if (sched_verbose_param == 0 && dump_file)
3410 sched_verbose = 1;
3411 sched_dump = ((sched_verbose_param >= 10 || !dump_file)
3412 ? stderr : dump_file);
3415 /* Initialize some global state for the scheduler. This function works
3416 with the common data shared between all the schedulers. It is called
3417 from the scheduler specific initialization routine. */
3419 void
3420 sched_init (void)
3422 /* Disable speculative loads in their presence if cc0 defined. */
3423 #ifdef HAVE_cc0
3424 flag_schedule_speculative_load = 0;
3425 #endif
3427 if (targetm.sched.dispatch (NULL_RTX, IS_DISPATCH_ON))
3428 targetm.sched.dispatch_do (NULL_RTX, DISPATCH_INIT);
3430 sched_pressure_p = (flag_sched_pressure && ! reload_completed
3431 && common_sched_info->sched_pass_id == SCHED_RGN_PASS);
3433 if (sched_pressure_p)
3434 ira_setup_eliminable_regset ();
3436 /* Initialize SPEC_INFO. */
3437 if (targetm.sched.set_sched_flags)
3439 spec_info = &spec_info_var;
3440 targetm.sched.set_sched_flags (spec_info);
3442 if (spec_info->mask != 0)
3444 spec_info->data_weakness_cutoff =
3445 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
3446 spec_info->control_weakness_cutoff =
3447 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF)
3448 * REG_BR_PROB_BASE) / 100;
3450 else
3451 /* So we won't read anything accidentally. */
3452 spec_info = NULL;
3455 else
3456 /* So we won't read anything accidentally. */
3457 spec_info = 0;
3459 /* Initialize issue_rate. */
3460 if (targetm.sched.issue_rate)
3461 issue_rate = targetm.sched.issue_rate ();
3462 else
3463 issue_rate = 1;
3465 if (cached_issue_rate != issue_rate)
3467 cached_issue_rate = issue_rate;
3468 /* To invalidate max_lookahead_tries: */
3469 cached_first_cycle_multipass_dfa_lookahead = 0;
3472 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
3473 dfa_lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
3474 else
3475 dfa_lookahead = 0;
3477 if (targetm.sched.init_dfa_pre_cycle_insn)
3478 targetm.sched.init_dfa_pre_cycle_insn ();
3480 if (targetm.sched.init_dfa_post_cycle_insn)
3481 targetm.sched.init_dfa_post_cycle_insn ();
3483 dfa_start ();
3484 dfa_state_size = state_size ();
3486 init_alias_analysis ();
3488 df_set_flags (DF_LR_RUN_DCE);
3489 df_note_add_problem ();
3491 /* More problems needed for interloop dep calculation in SMS. */
3492 if (common_sched_info->sched_pass_id == SCHED_SMS_PASS)
3494 df_rd_add_problem ();
3495 df_chain_add_problem (DF_DU_CHAIN + DF_UD_CHAIN);
3498 df_analyze ();
3500 /* Do not run DCE after reload, as this can kill nops inserted
3501 by bundling. */
3502 if (reload_completed)
3503 df_clear_flags (DF_LR_RUN_DCE);
3505 regstat_compute_calls_crossed ();
3507 if (targetm.sched.init_global)
3508 targetm.sched.init_global (sched_dump, sched_verbose, get_max_uid () + 1);
3510 if (sched_pressure_p)
3512 int i, max_regno = max_reg_num ();
3514 ira_set_pseudo_classes (sched_verbose ? sched_dump : NULL);
3515 sched_regno_pressure_class
3516 = (enum reg_class *) xmalloc (max_regno * sizeof (enum reg_class));
3517 for (i = 0; i < max_regno; i++)
3518 sched_regno_pressure_class[i]
3519 = (i < FIRST_PSEUDO_REGISTER
3520 ? ira_pressure_class_translate[REGNO_REG_CLASS (i)]
3521 : ira_pressure_class_translate[reg_allocno_class (i)]);
3522 curr_reg_live = BITMAP_ALLOC (NULL);
3523 saved_reg_live = BITMAP_ALLOC (NULL);
3524 region_ref_regs = BITMAP_ALLOC (NULL);
3527 curr_state = xmalloc (dfa_state_size);
3530 static void haifa_init_only_bb (basic_block, basic_block);
3532 /* Initialize data structures specific to the Haifa scheduler. */
3533 void
3534 haifa_sched_init (void)
3536 setup_sched_dump ();
3537 sched_init ();
3539 if (spec_info != NULL)
3541 sched_deps_info->use_deps_list = 1;
3542 sched_deps_info->generate_spec_deps = 1;
3545 /* Initialize luids, dependency caches, target and h_i_d for the
3546 whole function. */
3548 bb_vec_t bbs = VEC_alloc (basic_block, heap, n_basic_blocks);
3549 basic_block bb;
3551 sched_init_bbs ();
3553 FOR_EACH_BB (bb)
3554 VEC_quick_push (basic_block, bbs, bb);
3555 sched_init_luids (bbs, NULL, NULL, NULL);
3556 sched_deps_init (true);
3557 sched_extend_target ();
3558 haifa_init_h_i_d (bbs, NULL, NULL, NULL);
3560 VEC_free (basic_block, heap, bbs);
3563 sched_init_only_bb = haifa_init_only_bb;
3564 sched_split_block = sched_split_block_1;
3565 sched_create_empty_bb = sched_create_empty_bb_1;
3566 haifa_recovery_bb_ever_added_p = false;
3568 #ifdef ENABLE_CHECKING
3569 /* This is used preferably for finding bugs in check_cfg () itself.
3570 We must call sched_bbs_init () before check_cfg () because check_cfg ()
3571 assumes that the last insn in the last bb has a non-null successor. */
3572 check_cfg (0, 0);
3573 #endif
3575 nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
3576 before_recovery = 0;
3577 after_recovery = 0;
3580 /* Finish work with the data specific to the Haifa scheduler. */
3581 void
3582 haifa_sched_finish (void)
3584 sched_create_empty_bb = NULL;
3585 sched_split_block = NULL;
3586 sched_init_only_bb = NULL;
3588 if (spec_info && spec_info->dump)
3590 char c = reload_completed ? 'a' : 'b';
3592 fprintf (spec_info->dump,
3593 ";; %s:\n", current_function_name ());
3595 fprintf (spec_info->dump,
3596 ";; Procedure %cr-begin-data-spec motions == %d\n",
3597 c, nr_begin_data);
3598 fprintf (spec_info->dump,
3599 ";; Procedure %cr-be-in-data-spec motions == %d\n",
3600 c, nr_be_in_data);
3601 fprintf (spec_info->dump,
3602 ";; Procedure %cr-begin-control-spec motions == %d\n",
3603 c, nr_begin_control);
3604 fprintf (spec_info->dump,
3605 ";; Procedure %cr-be-in-control-spec motions == %d\n",
3606 c, nr_be_in_control);
3609 /* Finalize h_i_d, dependency caches, and luids for the whole
3610 function. Target will be finalized in md_global_finish (). */
3611 sched_deps_finish ();
3612 sched_finish_luids ();
3613 current_sched_info = NULL;
3614 sched_finish ();
3617 /* Free global data used during insn scheduling. This function works with
3618 the common data shared between the schedulers. */
3620 void
3621 sched_finish (void)
3623 haifa_finish_h_i_d ();
3624 if (sched_pressure_p)
3626 free (sched_regno_pressure_class);
3627 BITMAP_FREE (region_ref_regs);
3628 BITMAP_FREE (saved_reg_live);
3629 BITMAP_FREE (curr_reg_live);
3631 free (curr_state);
3633 if (targetm.sched.finish_global)
3634 targetm.sched.finish_global (sched_dump, sched_verbose);
3636 end_alias_analysis ();
3638 regstat_free_calls_crossed ();
3640 dfa_finish ();
3642 #ifdef ENABLE_CHECKING
3643 /* After reload ia64 backend clobbers CFG, so can't check anything. */
3644 if (!reload_completed)
3645 check_cfg (0, 0);
3646 #endif
3649 /* Fix INSN_TICKs of the instructions in the current block as well as
3650 INSN_TICKs of their dependents.
3651 HEAD and TAIL are the begin and the end of the current scheduled block. */
3652 static void
3653 fix_inter_tick (rtx head, rtx tail)
3655 /* Set of instructions with corrected INSN_TICK. */
3656 bitmap_head processed;
3657 /* ??? It is doubtful if we should assume that cycle advance happens on
3658 basic block boundaries. Basically insns that are unconditionally ready
3659 on the start of the block are more preferable then those which have
3660 a one cycle dependency over insn from the previous block. */
3661 int next_clock = clock_var + 1;
3663 bitmap_initialize (&processed, 0);
3665 /* Iterates over scheduled instructions and fix their INSN_TICKs and
3666 INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
3667 across different blocks. */
3668 for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
3670 if (INSN_P (head))
3672 int tick;
3673 sd_iterator_def sd_it;
3674 dep_t dep;
3676 tick = INSN_TICK (head);
3677 gcc_assert (tick >= MIN_TICK);
3679 /* Fix INSN_TICK of instruction from just scheduled block. */
3680 if (bitmap_set_bit (&processed, INSN_LUID (head)))
3682 tick -= next_clock;
3684 if (tick < MIN_TICK)
3685 tick = MIN_TICK;
3687 INSN_TICK (head) = tick;
3690 FOR_EACH_DEP (head, SD_LIST_RES_FORW, sd_it, dep)
3692 rtx next;
3694 next = DEP_CON (dep);
3695 tick = INSN_TICK (next);
3697 if (tick != INVALID_TICK
3698 /* If NEXT has its INSN_TICK calculated, fix it.
3699 If not - it will be properly calculated from
3700 scratch later in fix_tick_ready. */
3701 && bitmap_set_bit (&processed, INSN_LUID (next)))
3703 tick -= next_clock;
3705 if (tick < MIN_TICK)
3706 tick = MIN_TICK;
3708 if (tick > INTER_TICK (next))
3709 INTER_TICK (next) = tick;
3710 else
3711 tick = INTER_TICK (next);
3713 INSN_TICK (next) = tick;
3718 bitmap_clear (&processed);
3721 static int haifa_speculate_insn (rtx, ds_t, rtx *);
3723 /* Check if NEXT is ready to be added to the ready or queue list.
3724 If "yes", add it to the proper list.
3725 Returns:
3726 -1 - is not ready yet,
3727 0 - added to the ready list,
3728 0 < N - queued for N cycles. */
3730 try_ready (rtx next)
3732 ds_t old_ts, *ts;
3734 ts = &TODO_SPEC (next);
3735 old_ts = *ts;
3737 gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP))
3738 && ((old_ts & HARD_DEP)
3739 || (old_ts & SPECULATIVE)));
3741 if (sd_lists_empty_p (next, SD_LIST_BACK))
3742 /* NEXT has all its dependencies resolved. */
3744 /* Remove HARD_DEP bit from NEXT's status. */
3745 *ts &= ~HARD_DEP;
3747 if (current_sched_info->flags & DO_SPECULATION)
3748 /* Remove all speculative bits from NEXT's status. */
3749 *ts &= ~SPECULATIVE;
3751 else
3753 /* One of the NEXT's dependencies has been resolved.
3754 Recalculate NEXT's status. */
3756 *ts &= ~SPECULATIVE & ~HARD_DEP;
3758 if (sd_lists_empty_p (next, SD_LIST_HARD_BACK))
3759 /* Now we've got NEXT with speculative deps only.
3760 1. Look at the deps to see what we have to do.
3761 2. Check if we can do 'todo'. */
3763 sd_iterator_def sd_it;
3764 dep_t dep;
3765 bool first_p = true;
3767 FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
3769 ds_t ds = DEP_STATUS (dep) & SPECULATIVE;
3771 if (DEBUG_INSN_P (DEP_PRO (dep))
3772 && !DEBUG_INSN_P (next))
3773 continue;
3775 if (first_p)
3777 first_p = false;
3779 *ts = ds;
3781 else
3782 *ts = ds_merge (*ts, ds);
3785 if (ds_weak (*ts) < spec_info->data_weakness_cutoff)
3786 /* Too few points. */
3787 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3789 else
3790 *ts |= HARD_DEP;
3793 if (*ts & HARD_DEP)
3794 gcc_assert (*ts == old_ts
3795 && QUEUE_INDEX (next) == QUEUE_NOWHERE);
3796 else if (current_sched_info->new_ready)
3797 *ts = current_sched_info->new_ready (next, *ts);
3799 /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
3800 have its original pattern or changed (speculative) one. This is due
3801 to changing ebb in region scheduling.
3802 * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
3803 has speculative pattern.
3805 We can't assert (!(*ts & HARD_DEP) || *ts == old_ts) here because
3806 control-speculative NEXT could have been discarded by sched-rgn.c
3807 (the same case as when discarded by can_schedule_ready_p ()). */
3809 if ((*ts & SPECULATIVE)
3810 /* If (old_ts == *ts), then (old_ts & SPECULATIVE) and we don't
3811 need to change anything. */
3812 && *ts != old_ts)
3814 int res;
3815 rtx new_pat;
3817 gcc_assert ((*ts & SPECULATIVE) && !(*ts & ~SPECULATIVE));
3819 res = haifa_speculate_insn (next, *ts, &new_pat);
3821 switch (res)
3823 case -1:
3824 /* It would be nice to change DEP_STATUS of all dependences,
3825 which have ((DEP_STATUS & SPECULATIVE) == *ts) to HARD_DEP,
3826 so we won't reanalyze anything. */
3827 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3828 break;
3830 case 0:
3831 /* We follow the rule, that every speculative insn
3832 has non-null ORIG_PAT. */
3833 if (!ORIG_PAT (next))
3834 ORIG_PAT (next) = PATTERN (next);
3835 break;
3837 case 1:
3838 if (!ORIG_PAT (next))
3839 /* If we gonna to overwrite the original pattern of insn,
3840 save it. */
3841 ORIG_PAT (next) = PATTERN (next);
3843 haifa_change_pattern (next, new_pat);
3844 break;
3846 default:
3847 gcc_unreachable ();
3851 /* We need to restore pattern only if (*ts == 0), because otherwise it is
3852 either correct (*ts & SPECULATIVE),
3853 or we simply don't care (*ts & HARD_DEP). */
3855 gcc_assert (!ORIG_PAT (next)
3856 || !IS_SPECULATION_BRANCHY_CHECK_P (next));
3858 if (*ts & HARD_DEP)
3860 /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
3861 control-speculative NEXT could have been discarded by sched-rgn.c
3862 (the same case as when discarded by can_schedule_ready_p ()). */
3863 /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
3865 change_queue_index (next, QUEUE_NOWHERE);
3866 return -1;
3868 else if (!(*ts & BEGIN_SPEC) && ORIG_PAT (next) && !IS_SPECULATION_CHECK_P (next))
3869 /* We should change pattern of every previously speculative
3870 instruction - and we determine if NEXT was speculative by using
3871 ORIG_PAT field. Except one case - speculation checks have ORIG_PAT
3872 pat too, so skip them. */
3874 haifa_change_pattern (next, ORIG_PAT (next));
3875 ORIG_PAT (next) = 0;
3878 if (sched_verbose >= 2)
3880 int s = TODO_SPEC (next);
3882 fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
3883 (*current_sched_info->print_insn) (next, 0));
3885 if (spec_info && spec_info->dump)
3887 if (s & BEGIN_DATA)
3888 fprintf (spec_info->dump, "; data-spec;");
3889 if (s & BEGIN_CONTROL)
3890 fprintf (spec_info->dump, "; control-spec;");
3891 if (s & BE_IN_CONTROL)
3892 fprintf (spec_info->dump, "; in-control-spec;");
3895 fprintf (sched_dump, "\n");
3898 adjust_priority (next);
3900 return fix_tick_ready (next);
3903 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
3904 static int
3905 fix_tick_ready (rtx next)
3907 int tick, delay;
3909 if (!DEBUG_INSN_P (next) && !sd_lists_empty_p (next, SD_LIST_RES_BACK))
3911 int full_p;
3912 sd_iterator_def sd_it;
3913 dep_t dep;
3915 tick = INSN_TICK (next);
3916 /* if tick is not equal to INVALID_TICK, then update
3917 INSN_TICK of NEXT with the most recent resolved dependence
3918 cost. Otherwise, recalculate from scratch. */
3919 full_p = (tick == INVALID_TICK);
3921 FOR_EACH_DEP (next, SD_LIST_RES_BACK, sd_it, dep)
3923 rtx pro = DEP_PRO (dep);
3924 int tick1;
3926 gcc_assert (INSN_TICK (pro) >= MIN_TICK);
3928 tick1 = INSN_TICK (pro) + dep_cost (dep);
3929 if (tick1 > tick)
3930 tick = tick1;
3932 if (!full_p)
3933 break;
3936 else
3937 tick = -1;
3939 INSN_TICK (next) = tick;
3941 delay = tick - clock_var;
3942 if (delay <= 0 || sched_pressure_p)
3943 delay = QUEUE_READY;
3945 change_queue_index (next, delay);
3947 return delay;
3950 /* Move NEXT to the proper queue list with (DELAY >= 1),
3951 or add it to the ready list (DELAY == QUEUE_READY),
3952 or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
3953 static void
3954 change_queue_index (rtx next, int delay)
3956 int i = QUEUE_INDEX (next);
3958 gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
3959 && delay != 0);
3960 gcc_assert (i != QUEUE_SCHEDULED);
3962 if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
3963 || (delay < 0 && delay == i))
3964 /* We have nothing to do. */
3965 return;
3967 /* Remove NEXT from wherever it is now. */
3968 if (i == QUEUE_READY)
3969 ready_remove_insn (next);
3970 else if (i >= 0)
3971 queue_remove (next);
3973 /* Add it to the proper place. */
3974 if (delay == QUEUE_READY)
3975 ready_add (readyp, next, false);
3976 else if (delay >= 1)
3977 queue_insn (next, delay);
3979 if (sched_verbose >= 2)
3981 fprintf (sched_dump, ";;\t\ttick updated: insn %s",
3982 (*current_sched_info->print_insn) (next, 0));
3984 if (delay == QUEUE_READY)
3985 fprintf (sched_dump, " into ready\n");
3986 else if (delay >= 1)
3987 fprintf (sched_dump, " into queue with cost=%d\n", delay);
3988 else
3989 fprintf (sched_dump, " removed from ready or queue lists\n");
3993 static int sched_ready_n_insns = -1;
3995 /* Initialize per region data structures. */
3996 void
3997 sched_extend_ready_list (int new_sched_ready_n_insns)
3999 int i;
4001 if (sched_ready_n_insns == -1)
4002 /* At the first call we need to initialize one more choice_stack
4003 entry. */
4005 i = 0;
4006 sched_ready_n_insns = 0;
4008 else
4009 i = sched_ready_n_insns + 1;
4011 ready.veclen = new_sched_ready_n_insns + issue_rate;
4012 ready.vec = XRESIZEVEC (rtx, ready.vec, ready.veclen);
4014 gcc_assert (new_sched_ready_n_insns >= sched_ready_n_insns);
4016 ready_try = (char *) xrecalloc (ready_try, new_sched_ready_n_insns,
4017 sched_ready_n_insns, sizeof (*ready_try));
4019 /* We allocate +1 element to save initial state in the choice_stack[0]
4020 entry. */
4021 choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
4022 new_sched_ready_n_insns + 1);
4024 for (; i <= new_sched_ready_n_insns; i++)
4026 choice_stack[i].state = xmalloc (dfa_state_size);
4028 if (targetm.sched.first_cycle_multipass_init)
4029 targetm.sched.first_cycle_multipass_init (&(choice_stack[i]
4030 .target_data));
4033 sched_ready_n_insns = new_sched_ready_n_insns;
4036 /* Free per region data structures. */
4037 void
4038 sched_finish_ready_list (void)
4040 int i;
4042 free (ready.vec);
4043 ready.vec = NULL;
4044 ready.veclen = 0;
4046 free (ready_try);
4047 ready_try = NULL;
4049 for (i = 0; i <= sched_ready_n_insns; i++)
4051 if (targetm.sched.first_cycle_multipass_fini)
4052 targetm.sched.first_cycle_multipass_fini (&(choice_stack[i]
4053 .target_data));
4055 free (choice_stack [i].state);
4057 free (choice_stack);
4058 choice_stack = NULL;
4060 sched_ready_n_insns = -1;
4063 static int
4064 haifa_luid_for_non_insn (rtx x)
4066 gcc_assert (NOTE_P (x) || LABEL_P (x));
4068 return 0;
4071 /* Generates recovery code for INSN. */
4072 static void
4073 generate_recovery_code (rtx insn)
4075 if (TODO_SPEC (insn) & BEGIN_SPEC)
4076 begin_speculative_block (insn);
4078 /* Here we have insn with no dependencies to
4079 instructions other then CHECK_SPEC ones. */
4081 if (TODO_SPEC (insn) & BE_IN_SPEC)
4082 add_to_speculative_block (insn);
4085 /* Helper function.
4086 Tries to add speculative dependencies of type FS between instructions
4087 in deps_list L and TWIN. */
4088 static void
4089 process_insn_forw_deps_be_in_spec (rtx insn, rtx twin, ds_t fs)
4091 sd_iterator_def sd_it;
4092 dep_t dep;
4094 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
4096 ds_t ds;
4097 rtx consumer;
4099 consumer = DEP_CON (dep);
4101 ds = DEP_STATUS (dep);
4103 if (/* If we want to create speculative dep. */
4105 /* And we can do that because this is a true dep. */
4106 && (ds & DEP_TYPES) == DEP_TRUE)
4108 gcc_assert (!(ds & BE_IN_SPEC));
4110 if (/* If this dep can be overcome with 'begin speculation'. */
4111 ds & BEGIN_SPEC)
4112 /* Then we have a choice: keep the dep 'begin speculative'
4113 or transform it into 'be in speculative'. */
4115 if (/* In try_ready we assert that if insn once became ready
4116 it can be removed from the ready (or queue) list only
4117 due to backend decision. Hence we can't let the
4118 probability of the speculative dep to decrease. */
4119 ds_weak (ds) <= ds_weak (fs))
4121 ds_t new_ds;
4123 new_ds = (ds & ~BEGIN_SPEC) | fs;
4125 if (/* consumer can 'be in speculative'. */
4126 sched_insn_is_legitimate_for_speculation_p (consumer,
4127 new_ds))
4128 /* Transform it to be in speculative. */
4129 ds = new_ds;
4132 else
4133 /* Mark the dep as 'be in speculative'. */
4134 ds |= fs;
4138 dep_def _new_dep, *new_dep = &_new_dep;
4140 init_dep_1 (new_dep, twin, consumer, DEP_TYPE (dep), ds);
4141 sd_add_dep (new_dep, false);
4146 /* Generates recovery code for BEGIN speculative INSN. */
4147 static void
4148 begin_speculative_block (rtx insn)
4150 if (TODO_SPEC (insn) & BEGIN_DATA)
4151 nr_begin_data++;
4152 if (TODO_SPEC (insn) & BEGIN_CONTROL)
4153 nr_begin_control++;
4155 create_check_block_twin (insn, false);
4157 TODO_SPEC (insn) &= ~BEGIN_SPEC;
4160 static void haifa_init_insn (rtx);
4162 /* Generates recovery code for BE_IN speculative INSN. */
4163 static void
4164 add_to_speculative_block (rtx insn)
4166 ds_t ts;
4167 sd_iterator_def sd_it;
4168 dep_t dep;
4169 rtx twins = NULL;
4170 rtx_vec_t priorities_roots;
4172 ts = TODO_SPEC (insn);
4173 gcc_assert (!(ts & ~BE_IN_SPEC));
4175 if (ts & BE_IN_DATA)
4176 nr_be_in_data++;
4177 if (ts & BE_IN_CONTROL)
4178 nr_be_in_control++;
4180 TODO_SPEC (insn) &= ~BE_IN_SPEC;
4181 gcc_assert (!TODO_SPEC (insn));
4183 DONE_SPEC (insn) |= ts;
4185 /* First we convert all simple checks to branchy. */
4186 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4187 sd_iterator_cond (&sd_it, &dep);)
4189 rtx check = DEP_PRO (dep);
4191 if (IS_SPECULATION_SIMPLE_CHECK_P (check))
4193 create_check_block_twin (check, true);
4195 /* Restart search. */
4196 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4198 else
4199 /* Continue search. */
4200 sd_iterator_next (&sd_it);
4203 priorities_roots = NULL;
4204 clear_priorities (insn, &priorities_roots);
4206 while (1)
4208 rtx check, twin;
4209 basic_block rec;
4211 /* Get the first backward dependency of INSN. */
4212 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4213 if (!sd_iterator_cond (&sd_it, &dep))
4214 /* INSN has no backward dependencies left. */
4215 break;
4217 gcc_assert ((DEP_STATUS (dep) & BEGIN_SPEC) == 0
4218 && (DEP_STATUS (dep) & BE_IN_SPEC) != 0
4219 && (DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
4221 check = DEP_PRO (dep);
4223 gcc_assert (!IS_SPECULATION_CHECK_P (check) && !ORIG_PAT (check)
4224 && QUEUE_INDEX (check) == QUEUE_NOWHERE);
4226 rec = BLOCK_FOR_INSN (check);
4228 twin = emit_insn_before (copy_insn (PATTERN (insn)), BB_END (rec));
4229 haifa_init_insn (twin);
4231 sd_copy_back_deps (twin, insn, true);
4233 if (sched_verbose && spec_info->dump)
4234 /* INSN_BB (insn) isn't determined for twin insns yet.
4235 So we can't use current_sched_info->print_insn. */
4236 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
4237 INSN_UID (twin), rec->index);
4239 twins = alloc_INSN_LIST (twin, twins);
4241 /* Add dependences between TWIN and all appropriate
4242 instructions from REC. */
4243 FOR_EACH_DEP (insn, SD_LIST_SPEC_BACK, sd_it, dep)
4245 rtx pro = DEP_PRO (dep);
4247 gcc_assert (DEP_TYPE (dep) == REG_DEP_TRUE);
4249 /* INSN might have dependencies from the instructions from
4250 several recovery blocks. At this iteration we process those
4251 producers that reside in REC. */
4252 if (BLOCK_FOR_INSN (pro) == rec)
4254 dep_def _new_dep, *new_dep = &_new_dep;
4256 init_dep (new_dep, pro, twin, REG_DEP_TRUE);
4257 sd_add_dep (new_dep, false);
4261 process_insn_forw_deps_be_in_spec (insn, twin, ts);
4263 /* Remove all dependencies between INSN and insns in REC. */
4264 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4265 sd_iterator_cond (&sd_it, &dep);)
4267 rtx pro = DEP_PRO (dep);
4269 if (BLOCK_FOR_INSN (pro) == rec)
4270 sd_delete_dep (sd_it);
4271 else
4272 sd_iterator_next (&sd_it);
4276 /* We couldn't have added the dependencies between INSN and TWINS earlier
4277 because that would make TWINS appear in the INSN_BACK_DEPS (INSN). */
4278 while (twins)
4280 rtx twin;
4282 twin = XEXP (twins, 0);
4285 dep_def _new_dep, *new_dep = &_new_dep;
4287 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
4288 sd_add_dep (new_dep, false);
4291 twin = XEXP (twins, 1);
4292 free_INSN_LIST_node (twins);
4293 twins = twin;
4296 calc_priorities (priorities_roots);
4297 VEC_free (rtx, heap, priorities_roots);
4300 /* Extends and fills with zeros (only the new part) array pointed to by P. */
4301 void *
4302 xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
4304 gcc_assert (new_nmemb >= old_nmemb);
4305 p = XRESIZEVAR (void, p, new_nmemb * size);
4306 memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
4307 return p;
4310 /* Helper function.
4311 Find fallthru edge from PRED. */
4312 edge
4313 find_fallthru_edge_from (basic_block pred)
4315 edge e;
4316 basic_block succ;
4318 succ = pred->next_bb;
4319 gcc_assert (succ->prev_bb == pred);
4321 if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
4323 e = find_fallthru_edge (pred->succs);
4325 if (e)
4327 gcc_assert (e->dest == succ);
4328 return e;
4331 else
4333 e = find_fallthru_edge (succ->preds);
4335 if (e)
4337 gcc_assert (e->src == pred);
4338 return e;
4342 return NULL;
4345 /* Extend per basic block data structures. */
4346 static void
4347 sched_extend_bb (void)
4349 rtx insn;
4351 /* The following is done to keep current_sched_info->next_tail non null. */
4352 insn = BB_END (EXIT_BLOCK_PTR->prev_bb);
4353 if (NEXT_INSN (insn) == 0
4354 || (!NOTE_P (insn)
4355 && !LABEL_P (insn)
4356 /* Don't emit a NOTE if it would end up before a BARRIER. */
4357 && !BARRIER_P (NEXT_INSN (insn))))
4359 rtx note = emit_note_after (NOTE_INSN_DELETED, insn);
4360 /* Make insn appear outside BB. */
4361 set_block_for_insn (note, NULL);
4362 BB_END (EXIT_BLOCK_PTR->prev_bb) = insn;
4366 /* Init per basic block data structures. */
4367 void
4368 sched_init_bbs (void)
4370 sched_extend_bb ();
4373 /* Initialize BEFORE_RECOVERY variable. */
4374 static void
4375 init_before_recovery (basic_block *before_recovery_ptr)
4377 basic_block last;
4378 edge e;
4380 last = EXIT_BLOCK_PTR->prev_bb;
4381 e = find_fallthru_edge_from (last);
4383 if (e)
4385 /* We create two basic blocks:
4386 1. Single instruction block is inserted right after E->SRC
4387 and has jump to
4388 2. Empty block right before EXIT_BLOCK.
4389 Between these two blocks recovery blocks will be emitted. */
4391 basic_block single, empty;
4392 rtx x, label;
4394 /* If the fallthrough edge to exit we've found is from the block we've
4395 created before, don't do anything more. */
4396 if (last == after_recovery)
4397 return;
4399 adding_bb_to_current_region_p = false;
4401 single = sched_create_empty_bb (last);
4402 empty = sched_create_empty_bb (single);
4404 /* Add new blocks to the root loop. */
4405 if (current_loops != NULL)
4407 add_bb_to_loop (single, VEC_index (loop_p, current_loops->larray, 0));
4408 add_bb_to_loop (empty, VEC_index (loop_p, current_loops->larray, 0));
4411 single->count = last->count;
4412 empty->count = last->count;
4413 single->frequency = last->frequency;
4414 empty->frequency = last->frequency;
4415 BB_COPY_PARTITION (single, last);
4416 BB_COPY_PARTITION (empty, last);
4418 redirect_edge_succ (e, single);
4419 make_single_succ_edge (single, empty, 0);
4420 make_single_succ_edge (empty, EXIT_BLOCK_PTR,
4421 EDGE_FALLTHRU | EDGE_CAN_FALLTHRU);
4423 label = block_label (empty);
4424 x = emit_jump_insn_after (gen_jump (label), BB_END (single));
4425 JUMP_LABEL (x) = label;
4426 LABEL_NUSES (label)++;
4427 haifa_init_insn (x);
4429 emit_barrier_after (x);
4431 sched_init_only_bb (empty, NULL);
4432 sched_init_only_bb (single, NULL);
4433 sched_extend_bb ();
4435 adding_bb_to_current_region_p = true;
4436 before_recovery = single;
4437 after_recovery = empty;
4439 if (before_recovery_ptr)
4440 *before_recovery_ptr = before_recovery;
4442 if (sched_verbose >= 2 && spec_info->dump)
4443 fprintf (spec_info->dump,
4444 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
4445 last->index, single->index, empty->index);
4447 else
4448 before_recovery = last;
4451 /* Returns new recovery block. */
4452 basic_block
4453 sched_create_recovery_block (basic_block *before_recovery_ptr)
4455 rtx label;
4456 rtx barrier;
4457 basic_block rec;
4459 haifa_recovery_bb_recently_added_p = true;
4460 haifa_recovery_bb_ever_added_p = true;
4462 init_before_recovery (before_recovery_ptr);
4464 barrier = get_last_bb_insn (before_recovery);
4465 gcc_assert (BARRIER_P (barrier));
4467 label = emit_label_after (gen_label_rtx (), barrier);
4469 rec = create_basic_block (label, label, before_recovery);
4471 /* A recovery block always ends with an unconditional jump. */
4472 emit_barrier_after (BB_END (rec));
4474 if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
4475 BB_SET_PARTITION (rec, BB_COLD_PARTITION);
4477 if (sched_verbose && spec_info->dump)
4478 fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
4479 rec->index);
4481 return rec;
4484 /* Create edges: FIRST_BB -> REC; FIRST_BB -> SECOND_BB; REC -> SECOND_BB
4485 and emit necessary jumps. */
4486 void
4487 sched_create_recovery_edges (basic_block first_bb, basic_block rec,
4488 basic_block second_bb)
4490 rtx label;
4491 rtx jump;
4492 int edge_flags;
4494 /* This is fixing of incoming edge. */
4495 /* ??? Which other flags should be specified? */
4496 if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
4497 /* Partition type is the same, if it is "unpartitioned". */
4498 edge_flags = EDGE_CROSSING;
4499 else
4500 edge_flags = 0;
4502 make_edge (first_bb, rec, edge_flags);
4503 label = block_label (second_bb);
4504 jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
4505 JUMP_LABEL (jump) = label;
4506 LABEL_NUSES (label)++;
4508 if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
4509 /* Partition type is the same, if it is "unpartitioned". */
4511 /* Rewritten from cfgrtl.c. */
4512 if (flag_reorder_blocks_and_partition
4513 && targetm.have_named_sections)
4515 /* We don't need the same note for the check because
4516 any_condjump_p (check) == true. */
4517 add_reg_note (jump, REG_CROSSING_JUMP, NULL_RTX);
4519 edge_flags = EDGE_CROSSING;
4521 else
4522 edge_flags = 0;
4524 make_single_succ_edge (rec, second_bb, edge_flags);
4525 if (dom_info_available_p (CDI_DOMINATORS))
4526 set_immediate_dominator (CDI_DOMINATORS, rec, first_bb);
4529 /* This function creates recovery code for INSN. If MUTATE_P is nonzero,
4530 INSN is a simple check, that should be converted to branchy one. */
4531 static void
4532 create_check_block_twin (rtx insn, bool mutate_p)
4534 basic_block rec;
4535 rtx label, check, twin;
4536 ds_t fs;
4537 sd_iterator_def sd_it;
4538 dep_t dep;
4539 dep_def _new_dep, *new_dep = &_new_dep;
4540 ds_t todo_spec;
4542 gcc_assert (ORIG_PAT (insn) != NULL_RTX);
4544 if (!mutate_p)
4545 todo_spec = TODO_SPEC (insn);
4546 else
4548 gcc_assert (IS_SPECULATION_SIMPLE_CHECK_P (insn)
4549 && (TODO_SPEC (insn) & SPECULATIVE) == 0);
4551 todo_spec = CHECK_SPEC (insn);
4554 todo_spec &= SPECULATIVE;
4556 /* Create recovery block. */
4557 if (mutate_p || targetm.sched.needs_block_p (todo_spec))
4559 rec = sched_create_recovery_block (NULL);
4560 label = BB_HEAD (rec);
4562 else
4564 rec = EXIT_BLOCK_PTR;
4565 label = NULL_RTX;
4568 /* Emit CHECK. */
4569 check = targetm.sched.gen_spec_check (insn, label, todo_spec);
4571 if (rec != EXIT_BLOCK_PTR)
4573 /* To have mem_reg alive at the beginning of second_bb,
4574 we emit check BEFORE insn, so insn after splitting
4575 insn will be at the beginning of second_bb, which will
4576 provide us with the correct life information. */
4577 check = emit_jump_insn_before (check, insn);
4578 JUMP_LABEL (check) = label;
4579 LABEL_NUSES (label)++;
4581 else
4582 check = emit_insn_before (check, insn);
4584 /* Extend data structures. */
4585 haifa_init_insn (check);
4587 /* CHECK is being added to current region. Extend ready list. */
4588 gcc_assert (sched_ready_n_insns != -1);
4589 sched_extend_ready_list (sched_ready_n_insns + 1);
4591 if (current_sched_info->add_remove_insn)
4592 current_sched_info->add_remove_insn (insn, 0);
4594 RECOVERY_BLOCK (check) = rec;
4596 if (sched_verbose && spec_info->dump)
4597 fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
4598 (*current_sched_info->print_insn) (check, 0));
4600 gcc_assert (ORIG_PAT (insn));
4602 /* Initialize TWIN (twin is a duplicate of original instruction
4603 in the recovery block). */
4604 if (rec != EXIT_BLOCK_PTR)
4606 sd_iterator_def sd_it;
4607 dep_t dep;
4609 FOR_EACH_DEP (insn, SD_LIST_RES_BACK, sd_it, dep)
4610 if ((DEP_STATUS (dep) & DEP_OUTPUT) != 0)
4612 struct _dep _dep2, *dep2 = &_dep2;
4614 init_dep (dep2, DEP_PRO (dep), check, REG_DEP_TRUE);
4616 sd_add_dep (dep2, true);
4619 twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
4620 haifa_init_insn (twin);
4622 if (sched_verbose && spec_info->dump)
4623 /* INSN_BB (insn) isn't determined for twin insns yet.
4624 So we can't use current_sched_info->print_insn. */
4625 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
4626 INSN_UID (twin), rec->index);
4628 else
4630 ORIG_PAT (check) = ORIG_PAT (insn);
4631 HAS_INTERNAL_DEP (check) = 1;
4632 twin = check;
4633 /* ??? We probably should change all OUTPUT dependencies to
4634 (TRUE | OUTPUT). */
4637 /* Copy all resolved back dependencies of INSN to TWIN. This will
4638 provide correct value for INSN_TICK (TWIN). */
4639 sd_copy_back_deps (twin, insn, true);
4641 if (rec != EXIT_BLOCK_PTR)
4642 /* In case of branchy check, fix CFG. */
4644 basic_block first_bb, second_bb;
4645 rtx jump;
4647 first_bb = BLOCK_FOR_INSN (check);
4648 second_bb = sched_split_block (first_bb, check);
4650 sched_create_recovery_edges (first_bb, rec, second_bb);
4652 sched_init_only_bb (second_bb, first_bb);
4653 sched_init_only_bb (rec, EXIT_BLOCK_PTR);
4655 jump = BB_END (rec);
4656 haifa_init_insn (jump);
4659 /* Move backward dependences from INSN to CHECK and
4660 move forward dependences from INSN to TWIN. */
4662 /* First, create dependencies between INSN's producers and CHECK & TWIN. */
4663 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
4665 rtx pro = DEP_PRO (dep);
4666 ds_t ds;
4668 /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
4669 check --TRUE--> producer ??? or ANTI ???
4670 twin --TRUE--> producer
4671 twin --ANTI--> check
4673 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
4674 check --ANTI--> producer
4675 twin --ANTI--> producer
4676 twin --ANTI--> check
4678 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
4679 check ~~TRUE~~> producer
4680 twin ~~TRUE~~> producer
4681 twin --ANTI--> check */
4683 ds = DEP_STATUS (dep);
4685 if (ds & BEGIN_SPEC)
4687 gcc_assert (!mutate_p);
4688 ds &= ~BEGIN_SPEC;
4691 init_dep_1 (new_dep, pro, check, DEP_TYPE (dep), ds);
4692 sd_add_dep (new_dep, false);
4694 if (rec != EXIT_BLOCK_PTR)
4696 DEP_CON (new_dep) = twin;
4697 sd_add_dep (new_dep, false);
4701 /* Second, remove backward dependencies of INSN. */
4702 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4703 sd_iterator_cond (&sd_it, &dep);)
4705 if ((DEP_STATUS (dep) & BEGIN_SPEC)
4706 || mutate_p)
4707 /* We can delete this dep because we overcome it with
4708 BEGIN_SPECULATION. */
4709 sd_delete_dep (sd_it);
4710 else
4711 sd_iterator_next (&sd_it);
4714 /* Future Speculations. Determine what BE_IN speculations will be like. */
4715 fs = 0;
4717 /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
4718 here. */
4720 gcc_assert (!DONE_SPEC (insn));
4722 if (!mutate_p)
4724 ds_t ts = TODO_SPEC (insn);
4726 DONE_SPEC (insn) = ts & BEGIN_SPEC;
4727 CHECK_SPEC (check) = ts & BEGIN_SPEC;
4729 /* Luckiness of future speculations solely depends upon initial
4730 BEGIN speculation. */
4731 if (ts & BEGIN_DATA)
4732 fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
4733 if (ts & BEGIN_CONTROL)
4734 fs = set_dep_weak (fs, BE_IN_CONTROL,
4735 get_dep_weak (ts, BEGIN_CONTROL));
4737 else
4738 CHECK_SPEC (check) = CHECK_SPEC (insn);
4740 /* Future speculations: call the helper. */
4741 process_insn_forw_deps_be_in_spec (insn, twin, fs);
4743 if (rec != EXIT_BLOCK_PTR)
4745 /* Which types of dependencies should we use here is,
4746 generally, machine-dependent question... But, for now,
4747 it is not. */
4749 if (!mutate_p)
4751 init_dep (new_dep, insn, check, REG_DEP_TRUE);
4752 sd_add_dep (new_dep, false);
4754 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
4755 sd_add_dep (new_dep, false);
4757 else
4759 if (spec_info->dump)
4760 fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
4761 (*current_sched_info->print_insn) (insn, 0));
4763 /* Remove all dependencies of the INSN. */
4765 sd_it = sd_iterator_start (insn, (SD_LIST_FORW
4766 | SD_LIST_BACK
4767 | SD_LIST_RES_BACK));
4768 while (sd_iterator_cond (&sd_it, &dep))
4769 sd_delete_dep (sd_it);
4772 /* If former check (INSN) already was moved to the ready (or queue)
4773 list, add new check (CHECK) there too. */
4774 if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
4775 try_ready (check);
4777 /* Remove old check from instruction stream and free its
4778 data. */
4779 sched_remove_insn (insn);
4782 init_dep (new_dep, check, twin, REG_DEP_ANTI);
4783 sd_add_dep (new_dep, false);
4785 else
4787 init_dep_1 (new_dep, insn, check, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
4788 sd_add_dep (new_dep, false);
4791 if (!mutate_p)
4792 /* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
4793 because it'll be done later in add_to_speculative_block. */
4795 rtx_vec_t priorities_roots = NULL;
4797 clear_priorities (twin, &priorities_roots);
4798 calc_priorities (priorities_roots);
4799 VEC_free (rtx, heap, priorities_roots);
4803 /* Removes dependency between instructions in the recovery block REC
4804 and usual region instructions. It keeps inner dependences so it
4805 won't be necessary to recompute them. */
4806 static void
4807 fix_recovery_deps (basic_block rec)
4809 rtx note, insn, jump, ready_list = 0;
4810 bitmap_head in_ready;
4811 rtx link;
4813 bitmap_initialize (&in_ready, 0);
4815 /* NOTE - a basic block note. */
4816 note = NEXT_INSN (BB_HEAD (rec));
4817 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4818 insn = BB_END (rec);
4819 gcc_assert (JUMP_P (insn));
4820 insn = PREV_INSN (insn);
4824 sd_iterator_def sd_it;
4825 dep_t dep;
4827 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
4828 sd_iterator_cond (&sd_it, &dep);)
4830 rtx consumer = DEP_CON (dep);
4832 if (BLOCK_FOR_INSN (consumer) != rec)
4834 sd_delete_dep (sd_it);
4836 if (bitmap_set_bit (&in_ready, INSN_LUID (consumer)))
4837 ready_list = alloc_INSN_LIST (consumer, ready_list);
4839 else
4841 gcc_assert ((DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
4843 sd_iterator_next (&sd_it);
4847 insn = PREV_INSN (insn);
4849 while (insn != note);
4851 bitmap_clear (&in_ready);
4853 /* Try to add instructions to the ready or queue list. */
4854 for (link = ready_list; link; link = XEXP (link, 1))
4855 try_ready (XEXP (link, 0));
4856 free_INSN_LIST_list (&ready_list);
4858 /* Fixing jump's dependences. */
4859 insn = BB_HEAD (rec);
4860 jump = BB_END (rec);
4862 gcc_assert (LABEL_P (insn));
4863 insn = NEXT_INSN (insn);
4865 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
4866 add_jump_dependencies (insn, jump);
4869 /* Change pattern of INSN to NEW_PAT. */
4870 void
4871 sched_change_pattern (rtx insn, rtx new_pat)
4873 int t;
4875 t = validate_change (insn, &PATTERN (insn), new_pat, 0);
4876 gcc_assert (t);
4877 dfa_clear_single_insn_cache (insn);
4880 /* Change pattern of INSN to NEW_PAT. Invalidate cached haifa
4881 instruction data. */
4882 static void
4883 haifa_change_pattern (rtx insn, rtx new_pat)
4885 sched_change_pattern (insn, new_pat);
4887 /* Invalidate INSN_COST, so it'll be recalculated. */
4888 INSN_COST (insn) = -1;
4889 /* Invalidate INSN_TICK, so it'll be recalculated. */
4890 INSN_TICK (insn) = INVALID_TICK;
4893 /* -1 - can't speculate,
4894 0 - for speculation with REQUEST mode it is OK to use
4895 current instruction pattern,
4896 1 - need to change pattern for *NEW_PAT to be speculative. */
4898 sched_speculate_insn (rtx insn, ds_t request, rtx *new_pat)
4900 gcc_assert (current_sched_info->flags & DO_SPECULATION
4901 && (request & SPECULATIVE)
4902 && sched_insn_is_legitimate_for_speculation_p (insn, request));
4904 if ((request & spec_info->mask) != request)
4905 return -1;
4907 if (request & BE_IN_SPEC
4908 && !(request & BEGIN_SPEC))
4909 return 0;
4911 return targetm.sched.speculate_insn (insn, request, new_pat);
4914 static int
4915 haifa_speculate_insn (rtx insn, ds_t request, rtx *new_pat)
4917 gcc_assert (sched_deps_info->generate_spec_deps
4918 && !IS_SPECULATION_CHECK_P (insn));
4920 if (HAS_INTERNAL_DEP (insn)
4921 || SCHED_GROUP_P (insn))
4922 return -1;
4924 return sched_speculate_insn (insn, request, new_pat);
4927 /* Print some information about block BB, which starts with HEAD and
4928 ends with TAIL, before scheduling it.
4929 I is zero, if scheduler is about to start with the fresh ebb. */
4930 static void
4931 dump_new_block_header (int i, basic_block bb, rtx head, rtx tail)
4933 if (!i)
4934 fprintf (sched_dump,
4935 ";; ======================================================\n");
4936 else
4937 fprintf (sched_dump,
4938 ";; =====================ADVANCING TO=====================\n");
4939 fprintf (sched_dump,
4940 ";; -- basic block %d from %d to %d -- %s reload\n",
4941 bb->index, INSN_UID (head), INSN_UID (tail),
4942 (reload_completed ? "after" : "before"));
4943 fprintf (sched_dump,
4944 ";; ======================================================\n");
4945 fprintf (sched_dump, "\n");
4948 /* Unlink basic block notes and labels and saves them, so they
4949 can be easily restored. We unlink basic block notes in EBB to
4950 provide back-compatibility with the previous code, as target backends
4951 assume, that there'll be only instructions between
4952 current_sched_info->{head and tail}. We restore these notes as soon
4953 as we can.
4954 FIRST (LAST) is the first (last) basic block in the ebb.
4955 NB: In usual case (FIRST == LAST) nothing is really done. */
4956 void
4957 unlink_bb_notes (basic_block first, basic_block last)
4959 /* We DON'T unlink basic block notes of the first block in the ebb. */
4960 if (first == last)
4961 return;
4963 bb_header = XNEWVEC (rtx, last_basic_block);
4965 /* Make a sentinel. */
4966 if (last->next_bb != EXIT_BLOCK_PTR)
4967 bb_header[last->next_bb->index] = 0;
4969 first = first->next_bb;
4972 rtx prev, label, note, next;
4974 label = BB_HEAD (last);
4975 if (LABEL_P (label))
4976 note = NEXT_INSN (label);
4977 else
4978 note = label;
4979 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4981 prev = PREV_INSN (label);
4982 next = NEXT_INSN (note);
4983 gcc_assert (prev && next);
4985 NEXT_INSN (prev) = next;
4986 PREV_INSN (next) = prev;
4988 bb_header[last->index] = label;
4990 if (last == first)
4991 break;
4993 last = last->prev_bb;
4995 while (1);
4998 /* Restore basic block notes.
4999 FIRST is the first basic block in the ebb. */
5000 static void
5001 restore_bb_notes (basic_block first)
5003 if (!bb_header)
5004 return;
5006 /* We DON'T unlink basic block notes of the first block in the ebb. */
5007 first = first->next_bb;
5008 /* Remember: FIRST is actually a second basic block in the ebb. */
5010 while (first != EXIT_BLOCK_PTR
5011 && bb_header[first->index])
5013 rtx prev, label, note, next;
5015 label = bb_header[first->index];
5016 prev = PREV_INSN (label);
5017 next = NEXT_INSN (prev);
5019 if (LABEL_P (label))
5020 note = NEXT_INSN (label);
5021 else
5022 note = label;
5023 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
5025 bb_header[first->index] = 0;
5027 NEXT_INSN (prev) = label;
5028 NEXT_INSN (note) = next;
5029 PREV_INSN (next) = note;
5031 first = first->next_bb;
5034 free (bb_header);
5035 bb_header = 0;
5038 /* Helper function.
5039 Fix CFG after both in- and inter-block movement of
5040 control_flow_insn_p JUMP. */
5041 static void
5042 fix_jump_move (rtx jump)
5044 basic_block bb, jump_bb, jump_bb_next;
5046 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
5047 jump_bb = BLOCK_FOR_INSN (jump);
5048 jump_bb_next = jump_bb->next_bb;
5050 gcc_assert (common_sched_info->sched_pass_id == SCHED_EBB_PASS
5051 || IS_SPECULATION_BRANCHY_CHECK_P (jump));
5053 if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
5054 /* if jump_bb_next is not empty. */
5055 BB_END (jump_bb) = BB_END (jump_bb_next);
5057 if (BB_END (bb) != PREV_INSN (jump))
5058 /* Then there are instruction after jump that should be placed
5059 to jump_bb_next. */
5060 BB_END (jump_bb_next) = BB_END (bb);
5061 else
5062 /* Otherwise jump_bb_next is empty. */
5063 BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
5065 /* To make assertion in move_insn happy. */
5066 BB_END (bb) = PREV_INSN (jump);
5068 update_bb_for_insn (jump_bb_next);
5071 /* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
5072 static void
5073 move_block_after_check (rtx jump)
5075 basic_block bb, jump_bb, jump_bb_next;
5076 VEC(edge,gc) *t;
5078 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
5079 jump_bb = BLOCK_FOR_INSN (jump);
5080 jump_bb_next = jump_bb->next_bb;
5082 update_bb_for_insn (jump_bb);
5084 gcc_assert (IS_SPECULATION_CHECK_P (jump)
5085 || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next)));
5087 unlink_block (jump_bb_next);
5088 link_block (jump_bb_next, bb);
5090 t = bb->succs;
5091 bb->succs = 0;
5092 move_succs (&(jump_bb->succs), bb);
5093 move_succs (&(jump_bb_next->succs), jump_bb);
5094 move_succs (&t, jump_bb_next);
5096 df_mark_solutions_dirty ();
5098 common_sched_info->fix_recovery_cfg
5099 (bb->index, jump_bb->index, jump_bb_next->index);
5102 /* Helper function for move_block_after_check.
5103 This functions attaches edge vector pointed to by SUCCSP to
5104 block TO. */
5105 static void
5106 move_succs (VEC(edge,gc) **succsp, basic_block to)
5108 edge e;
5109 edge_iterator ei;
5111 gcc_assert (to->succs == 0);
5113 to->succs = *succsp;
5115 FOR_EACH_EDGE (e, ei, to->succs)
5116 e->src = to;
5118 *succsp = 0;
5121 /* Remove INSN from the instruction stream.
5122 INSN should have any dependencies. */
5123 static void
5124 sched_remove_insn (rtx insn)
5126 sd_finish_insn (insn);
5128 change_queue_index (insn, QUEUE_NOWHERE);
5129 current_sched_info->add_remove_insn (insn, 1);
5130 remove_insn (insn);
5133 /* Clear priorities of all instructions, that are forward dependent on INSN.
5134 Store in vector pointed to by ROOTS_PTR insns on which priority () should
5135 be invoked to initialize all cleared priorities. */
5136 static void
5137 clear_priorities (rtx insn, rtx_vec_t *roots_ptr)
5139 sd_iterator_def sd_it;
5140 dep_t dep;
5141 bool insn_is_root_p = true;
5143 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
5145 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
5147 rtx pro = DEP_PRO (dep);
5149 if (INSN_PRIORITY_STATUS (pro) >= 0
5150 && QUEUE_INDEX (insn) != QUEUE_SCHEDULED)
5152 /* If DEP doesn't contribute to priority then INSN itself should
5153 be added to priority roots. */
5154 if (contributes_to_priority_p (dep))
5155 insn_is_root_p = false;
5157 INSN_PRIORITY_STATUS (pro) = -1;
5158 clear_priorities (pro, roots_ptr);
5162 if (insn_is_root_p)
5163 VEC_safe_push (rtx, heap, *roots_ptr, insn);
5166 /* Recompute priorities of instructions, whose priorities might have been
5167 changed. ROOTS is a vector of instructions whose priority computation will
5168 trigger initialization of all cleared priorities. */
5169 static void
5170 calc_priorities (rtx_vec_t roots)
5172 int i;
5173 rtx insn;
5175 FOR_EACH_VEC_ELT (rtx, roots, i, insn)
5176 priority (insn);
5180 /* Add dependences between JUMP and other instructions in the recovery
5181 block. INSN is the first insn the recovery block. */
5182 static void
5183 add_jump_dependencies (rtx insn, rtx jump)
5187 insn = NEXT_INSN (insn);
5188 if (insn == jump)
5189 break;
5191 if (dep_list_size (insn) == 0)
5193 dep_def _new_dep, *new_dep = &_new_dep;
5195 init_dep (new_dep, insn, jump, REG_DEP_ANTI);
5196 sd_add_dep (new_dep, false);
5199 while (1);
5201 gcc_assert (!sd_lists_empty_p (jump, SD_LIST_BACK));
5204 /* Return the NOTE_INSN_BASIC_BLOCK of BB. */
5206 bb_note (basic_block bb)
5208 rtx note;
5210 note = BB_HEAD (bb);
5211 if (LABEL_P (note))
5212 note = NEXT_INSN (note);
5214 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
5215 return note;
5218 #ifdef ENABLE_CHECKING
5219 /* Helper function for check_cfg.
5220 Return nonzero, if edge vector pointed to by EL has edge with TYPE in
5221 its flags. */
5222 static int
5223 has_edge_p (VEC(edge,gc) *el, int type)
5225 edge e;
5226 edge_iterator ei;
5228 FOR_EACH_EDGE (e, ei, el)
5229 if (e->flags & type)
5230 return 1;
5231 return 0;
5234 /* Search back, starting at INSN, for an insn that is not a
5235 NOTE_INSN_VAR_LOCATION. Don't search beyond HEAD, and return it if
5236 no such insn can be found. */
5237 static inline rtx
5238 prev_non_location_insn (rtx insn, rtx head)
5240 while (insn != head && NOTE_P (insn)
5241 && NOTE_KIND (insn) == NOTE_INSN_VAR_LOCATION)
5242 insn = PREV_INSN (insn);
5244 return insn;
5247 /* Check few properties of CFG between HEAD and TAIL.
5248 If HEAD (TAIL) is NULL check from the beginning (till the end) of the
5249 instruction stream. */
5250 static void
5251 check_cfg (rtx head, rtx tail)
5253 rtx next_tail;
5254 basic_block bb = 0;
5255 int not_first = 0, not_last;
5257 if (head == NULL)
5258 head = get_insns ();
5259 if (tail == NULL)
5260 tail = get_last_insn ();
5261 next_tail = NEXT_INSN (tail);
5265 not_last = head != tail;
5267 if (not_first)
5268 gcc_assert (NEXT_INSN (PREV_INSN (head)) == head);
5269 if (not_last)
5270 gcc_assert (PREV_INSN (NEXT_INSN (head)) == head);
5272 if (LABEL_P (head)
5273 || (NOTE_INSN_BASIC_BLOCK_P (head)
5274 && (!not_first
5275 || (not_first && !LABEL_P (PREV_INSN (head))))))
5277 gcc_assert (bb == 0);
5278 bb = BLOCK_FOR_INSN (head);
5279 if (bb != 0)
5280 gcc_assert (BB_HEAD (bb) == head);
5281 else
5282 /* This is the case of jump table. See inside_basic_block_p (). */
5283 gcc_assert (LABEL_P (head) && !inside_basic_block_p (head));
5286 if (bb == 0)
5288 gcc_assert (!inside_basic_block_p (head));
5289 head = NEXT_INSN (head);
5291 else
5293 gcc_assert (inside_basic_block_p (head)
5294 || NOTE_P (head));
5295 gcc_assert (BLOCK_FOR_INSN (head) == bb);
5297 if (LABEL_P (head))
5299 head = NEXT_INSN (head);
5300 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (head));
5302 else
5304 if (control_flow_insn_p (head))
5306 gcc_assert (prev_non_location_insn (BB_END (bb), head)
5307 == head);
5309 if (any_uncondjump_p (head))
5310 gcc_assert (EDGE_COUNT (bb->succs) == 1
5311 && BARRIER_P (NEXT_INSN (head)));
5312 else if (any_condjump_p (head))
5313 gcc_assert (/* Usual case. */
5314 (EDGE_COUNT (bb->succs) > 1
5315 && !BARRIER_P (NEXT_INSN (head)))
5316 /* Or jump to the next instruction. */
5317 || (EDGE_COUNT (bb->succs) == 1
5318 && (BB_HEAD (EDGE_I (bb->succs, 0)->dest)
5319 == JUMP_LABEL (head))));
5321 if (BB_END (bb) == head)
5323 if (EDGE_COUNT (bb->succs) > 1)
5324 gcc_assert (control_flow_insn_p (prev_non_location_insn
5325 (head, BB_HEAD (bb)))
5326 || has_edge_p (bb->succs, EDGE_COMPLEX));
5327 bb = 0;
5330 head = NEXT_INSN (head);
5334 not_first = 1;
5336 while (head != next_tail);
5338 gcc_assert (bb == 0);
5341 #endif /* ENABLE_CHECKING */
5343 /* Extend per basic block data structures. */
5344 static void
5345 extend_bb (void)
5347 if (sched_scan_info->extend_bb)
5348 sched_scan_info->extend_bb ();
5351 /* Init data for BB. */
5352 static void
5353 init_bb (basic_block bb)
5355 if (sched_scan_info->init_bb)
5356 sched_scan_info->init_bb (bb);
5359 /* Extend per insn data structures. */
5360 static void
5361 extend_insn (void)
5363 if (sched_scan_info->extend_insn)
5364 sched_scan_info->extend_insn ();
5367 /* Init data structures for INSN. */
5368 static void
5369 init_insn (rtx insn)
5371 if (sched_scan_info->init_insn)
5372 sched_scan_info->init_insn (insn);
5375 /* Init all insns in BB. */
5376 static void
5377 init_insns_in_bb (basic_block bb)
5379 rtx insn;
5381 FOR_BB_INSNS (bb, insn)
5382 init_insn (insn);
5385 /* A driver function to add a set of basic blocks (BBS),
5386 a single basic block (BB), a set of insns (INSNS) or a single insn (INSN)
5387 to the scheduling region. */
5388 void
5389 sched_scan (const struct sched_scan_info_def *ssi,
5390 bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
5392 sched_scan_info = ssi;
5394 if (bbs != NULL || bb != NULL)
5396 extend_bb ();
5398 if (bbs != NULL)
5400 unsigned i;
5401 basic_block x;
5403 FOR_EACH_VEC_ELT (basic_block, bbs, i, x)
5404 init_bb (x);
5407 if (bb != NULL)
5408 init_bb (bb);
5411 extend_insn ();
5413 if (bbs != NULL)
5415 unsigned i;
5416 basic_block x;
5418 FOR_EACH_VEC_ELT (basic_block, bbs, i, x)
5419 init_insns_in_bb (x);
5422 if (bb != NULL)
5423 init_insns_in_bb (bb);
5425 if (insns != NULL)
5427 unsigned i;
5428 rtx x;
5430 FOR_EACH_VEC_ELT (rtx, insns, i, x)
5431 init_insn (x);
5434 if (insn != NULL)
5435 init_insn (insn);
5439 /* Extend data structures for logical insn UID. */
5440 static void
5441 luids_extend_insn (void)
5443 int new_luids_max_uid = get_max_uid () + 1;
5445 VEC_safe_grow_cleared (int, heap, sched_luids, new_luids_max_uid);
5448 /* Initialize LUID for INSN. */
5449 static void
5450 luids_init_insn (rtx insn)
5452 int i = INSN_P (insn) ? 1 : common_sched_info->luid_for_non_insn (insn);
5453 int luid;
5455 if (i >= 0)
5457 luid = sched_max_luid;
5458 sched_max_luid += i;
5460 else
5461 luid = -1;
5463 SET_INSN_LUID (insn, luid);
5466 /* Initialize luids for BBS, BB, INSNS and INSN.
5467 The hook common_sched_info->luid_for_non_insn () is used to determine
5468 if notes, labels, etc. need luids. */
5469 void
5470 sched_init_luids (bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
5472 const struct sched_scan_info_def ssi =
5474 NULL, /* extend_bb */
5475 NULL, /* init_bb */
5476 luids_extend_insn, /* extend_insn */
5477 luids_init_insn /* init_insn */
5480 sched_scan (&ssi, bbs, bb, insns, insn);
5483 /* Free LUIDs. */
5484 void
5485 sched_finish_luids (void)
5487 VEC_free (int, heap, sched_luids);
5488 sched_max_luid = 1;
5491 /* Return logical uid of INSN. Helpful while debugging. */
5493 insn_luid (rtx insn)
5495 return INSN_LUID (insn);
5498 /* Extend per insn data in the target. */
5499 void
5500 sched_extend_target (void)
5502 if (targetm.sched.h_i_d_extended)
5503 targetm.sched.h_i_d_extended ();
5506 /* Extend global scheduler structures (those, that live across calls to
5507 schedule_block) to include information about just emitted INSN. */
5508 static void
5509 extend_h_i_d (void)
5511 int reserve = (get_max_uid () + 1
5512 - VEC_length (haifa_insn_data_def, h_i_d));
5513 if (reserve > 0
5514 && ! VEC_space (haifa_insn_data_def, h_i_d, reserve))
5516 VEC_safe_grow_cleared (haifa_insn_data_def, heap, h_i_d,
5517 3 * get_max_uid () / 2);
5518 sched_extend_target ();
5522 /* Initialize h_i_d entry of the INSN with default values.
5523 Values, that are not explicitly initialized here, hold zero. */
5524 static void
5525 init_h_i_d (rtx insn)
5527 if (INSN_LUID (insn) > 0)
5529 INSN_COST (insn) = -1;
5530 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
5531 INSN_TICK (insn) = INVALID_TICK;
5532 INTER_TICK (insn) = INVALID_TICK;
5533 TODO_SPEC (insn) = HARD_DEP;
5537 /* Initialize haifa_insn_data for BBS, BB, INSNS and INSN. */
5538 void
5539 haifa_init_h_i_d (bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
5541 const struct sched_scan_info_def ssi =
5543 NULL, /* extend_bb */
5544 NULL, /* init_bb */
5545 extend_h_i_d, /* extend_insn */
5546 init_h_i_d /* init_insn */
5549 sched_scan (&ssi, bbs, bb, insns, insn);
5552 /* Finalize haifa_insn_data. */
5553 void
5554 haifa_finish_h_i_d (void)
5556 int i;
5557 haifa_insn_data_t data;
5558 struct reg_use_data *use, *next;
5560 FOR_EACH_VEC_ELT (haifa_insn_data_def, h_i_d, i, data)
5562 if (data->reg_pressure != NULL)
5563 free (data->reg_pressure);
5564 for (use = data->reg_use_list; use != NULL; use = next)
5566 next = use->next_insn_use;
5567 free (use);
5570 VEC_free (haifa_insn_data_def, heap, h_i_d);
5573 /* Init data for the new insn INSN. */
5574 static void
5575 haifa_init_insn (rtx insn)
5577 gcc_assert (insn != NULL);
5579 sched_init_luids (NULL, NULL, NULL, insn);
5580 sched_extend_target ();
5581 sched_deps_init (false);
5582 haifa_init_h_i_d (NULL, NULL, NULL, insn);
5584 if (adding_bb_to_current_region_p)
5586 sd_init_insn (insn);
5588 /* Extend dependency caches by one element. */
5589 extend_dependency_caches (1, false);
5593 /* Init data for the new basic block BB which comes after AFTER. */
5594 static void
5595 haifa_init_only_bb (basic_block bb, basic_block after)
5597 gcc_assert (bb != NULL);
5599 sched_init_bbs ();
5601 if (common_sched_info->add_block)
5602 /* This changes only data structures of the front-end. */
5603 common_sched_info->add_block (bb, after);
5606 /* A generic version of sched_split_block (). */
5607 basic_block
5608 sched_split_block_1 (basic_block first_bb, rtx after)
5610 edge e;
5612 e = split_block (first_bb, after);
5613 gcc_assert (e->src == first_bb);
5615 /* sched_split_block emits note if *check == BB_END. Probably it
5616 is better to rip that note off. */
5618 return e->dest;
5621 /* A generic version of sched_create_empty_bb (). */
5622 basic_block
5623 sched_create_empty_bb_1 (basic_block after)
5625 return create_empty_bb (after);
5628 /* Insert PAT as an INSN into the schedule and update the necessary data
5629 structures to account for it. */
5631 sched_emit_insn (rtx pat)
5633 rtx insn = emit_insn_after (pat, last_scheduled_insn);
5634 last_scheduled_insn = insn;
5635 haifa_init_insn (insn);
5636 return insn;
5639 /* This function returns a candidate satisfying dispatch constraints from
5640 the ready list. */
5642 static rtx
5643 ready_remove_first_dispatch (struct ready_list *ready)
5645 int i;
5646 rtx insn = ready_element (ready, 0);
5648 if (ready->n_ready == 1
5649 || INSN_CODE (insn) < 0
5650 || !INSN_P (insn)
5651 || !active_insn_p (insn)
5652 || targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
5653 return ready_remove_first (ready);
5655 for (i = 1; i < ready->n_ready; i++)
5657 insn = ready_element (ready, i);
5659 if (INSN_CODE (insn) < 0
5660 || !INSN_P (insn)
5661 || !active_insn_p (insn))
5662 continue;
5664 if (targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
5666 /* Return ith element of ready. */
5667 insn = ready_remove (ready, i);
5668 return insn;
5672 if (targetm.sched.dispatch (NULL_RTX, DISPATCH_VIOLATION))
5673 return ready_remove_first (ready);
5675 for (i = 1; i < ready->n_ready; i++)
5677 insn = ready_element (ready, i);
5679 if (INSN_CODE (insn) < 0
5680 || !INSN_P (insn)
5681 || !active_insn_p (insn))
5682 continue;
5684 /* Return i-th element of ready. */
5685 if (targetm.sched.dispatch (insn, IS_CMP))
5686 return ready_remove (ready, i);
5689 return ready_remove_first (ready);
5692 /* Get number of ready insn in the ready list. */
5695 number_in_ready (void)
5697 return ready.n_ready;
5700 /* Get number of ready's in the ready list. */
5703 get_ready_element (int i)
5705 return ready_element (&ready, i);
5708 #endif /* INSN_SCHEDULING */