In libobjc/: 2010-12-21 Nicola Pero <nicola.pero@meta-innovation.com>
[official-gcc.git] / gcc / haifa-sched.c
blobbd3b84c5c2306db3858d4ab1393cd01d0aec98c1
1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 /* Instruction scheduling pass. This file, along with sched-deps.c,
25 contains the generic parts. The actual entry point is found for
26 the normal instruction scheduling pass is found in sched-rgn.c.
28 We compute insn priorities based on data dependencies. Flow
29 analysis only creates a fraction of the data-dependencies we must
30 observe: namely, only those dependencies which the combiner can be
31 expected to use. For this pass, we must therefore create the
32 remaining dependencies we need to observe: register dependencies,
33 memory dependencies, dependencies to keep function calls in order,
34 and the dependence between a conditional branch and the setting of
35 condition codes are all dealt with here.
37 The scheduler first traverses the data flow graph, starting with
38 the last instruction, and proceeding to the first, assigning values
39 to insn_priority as it goes. This sorts the instructions
40 topologically by data dependence.
42 Once priorities have been established, we order the insns using
43 list scheduling. This works as follows: starting with a list of
44 all the ready insns, and sorted according to priority number, we
45 schedule the insn from the end of the list by placing its
46 predecessors in the list according to their priority order. We
47 consider this insn scheduled by setting the pointer to the "end" of
48 the list to point to the previous insn. When an insn has no
49 predecessors, we either queue it until sufficient time has elapsed
50 or add it to the ready list. As the instructions are scheduled or
51 when stalls are introduced, the queue advances and dumps insns into
52 the ready list. When all insns down to the lowest priority have
53 been scheduled, the critical path of the basic block has been made
54 as short as possible. The remaining insns are then scheduled in
55 remaining slots.
57 The following list shows the order in which we want to break ties
58 among insns in the ready list:
60 1. choose insn with the longest path to end of bb, ties
61 broken by
62 2. choose insn with least contribution to register pressure,
63 ties broken by
64 3. prefer in-block upon interblock motion, ties broken by
65 4. prefer useful upon speculative motion, ties broken by
66 5. choose insn with largest control flow probability, ties
67 broken by
68 6. choose insn with the least dependences upon the previously
69 scheduled insn, or finally
70 7 choose the insn which has the most insns dependent on it.
71 8. choose insn with lowest UID.
73 Memory references complicate matters. Only if we can be certain
74 that memory references are not part of the data dependency graph
75 (via true, anti, or output dependence), can we move operations past
76 memory references. To first approximation, reads can be done
77 independently, while writes introduce dependencies. Better
78 approximations will yield fewer dependencies.
80 Before reload, an extended analysis of interblock data dependences
81 is required for interblock scheduling. This is performed in
82 compute_block_backward_dependences ().
84 Dependencies set up by memory references are treated in exactly the
85 same way as other dependencies, by using insn backward dependences
86 INSN_BACK_DEPS. INSN_BACK_DEPS are translated into forward dependences
87 INSN_FORW_DEPS the purpose of forward list scheduling.
89 Having optimized the critical path, we may have also unduly
90 extended the lifetimes of some registers. If an operation requires
91 that constants be loaded into registers, it is certainly desirable
92 to load those constants as early as necessary, but no earlier.
93 I.e., it will not do to load up a bunch of registers at the
94 beginning of a basic block only to use them at the end, if they
95 could be loaded later, since this may result in excessive register
96 utilization.
98 Note that since branches are never in basic blocks, but only end
99 basic blocks, this pass will not move branches. But that is ok,
100 since we can use GNU's delayed branch scheduling pass to take care
101 of this case.
103 Also note that no further optimizations based on algebraic
104 identities are performed, so this pass would be a good one to
105 perform instruction splitting, such as breaking up a multiply
106 instruction into shifts and adds where that is profitable.
108 Given the memory aliasing analysis that this pass should perform,
109 it should be possible to remove redundant stores to memory, and to
110 load values from registers instead of hitting memory.
112 Before reload, speculative insns are moved only if a 'proof' exists
113 that no exception will be caused by this, and if no live registers
114 exist that inhibit the motion (live registers constraints are not
115 represented by data dependence edges).
117 This pass must update information that subsequent passes expect to
118 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
119 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
121 The information in the line number notes is carefully retained by
122 this pass. Notes that refer to the starting and ending of
123 exception regions are also carefully retained by this pass. All
124 other NOTE insns are grouped in their same relative order at the
125 beginning of basic blocks and regions that have been scheduled. */
127 #include "config.h"
128 #include "system.h"
129 #include "coretypes.h"
130 #include "tm.h"
131 #include "diagnostic-core.h"
132 #include "rtl.h"
133 #include "tm_p.h"
134 #include "hard-reg-set.h"
135 #include "regs.h"
136 #include "function.h"
137 #include "flags.h"
138 #include "insn-config.h"
139 #include "insn-attr.h"
140 #include "except.h"
141 #include "recog.h"
142 #include "sched-int.h"
143 #include "target.h"
144 #include "output.h"
145 #include "params.h"
146 #include "vecprim.h"
147 #include "dbgcnt.h"
148 #include "cfgloop.h"
149 #include "ira.h"
150 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
152 #ifdef INSN_SCHEDULING
154 /* issue_rate is the number of insns that can be scheduled in the same
155 machine cycle. It can be defined in the config/mach/mach.h file,
156 otherwise we set it to 1. */
158 int issue_rate;
160 /* sched-verbose controls the amount of debugging output the
161 scheduler prints. It is controlled by -fsched-verbose=N:
162 N>0 and no -DSR : the output is directed to stderr.
163 N>=10 will direct the printouts to stderr (regardless of -dSR).
164 N=1: same as -dSR.
165 N=2: bb's probabilities, detailed ready list info, unit/insn info.
166 N=3: rtl at abort point, control-flow, regions info.
167 N=5: dependences info. */
169 int sched_verbose = 0;
171 /* Debugging file. All printouts are sent to dump, which is always set,
172 either to stderr, or to the dump listing file (-dRS). */
173 FILE *sched_dump = 0;
175 /* This is a placeholder for the scheduler parameters common
176 to all schedulers. */
177 struct common_sched_info_def *common_sched_info;
179 #define INSN_TICK(INSN) (HID (INSN)->tick)
180 #define INTER_TICK(INSN) (HID (INSN)->inter_tick)
182 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
183 then it should be recalculated from scratch. */
184 #define INVALID_TICK (-(max_insn_queue_index + 1))
185 /* The minimal value of the INSN_TICK of an instruction. */
186 #define MIN_TICK (-max_insn_queue_index)
188 /* List of important notes we must keep around. This is a pointer to the
189 last element in the list. */
190 rtx note_list;
192 static struct spec_info_def spec_info_var;
193 /* Description of the speculative part of the scheduling.
194 If NULL - no speculation. */
195 spec_info_t spec_info = NULL;
197 /* True, if recovery block was added during scheduling of current block.
198 Used to determine, if we need to fix INSN_TICKs. */
199 static bool haifa_recovery_bb_recently_added_p;
201 /* True, if recovery block was added during this scheduling pass.
202 Used to determine if we should have empty memory pools of dependencies
203 after finishing current region. */
204 bool haifa_recovery_bb_ever_added_p;
206 /* Counters of different types of speculative instructions. */
207 static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
209 /* Array used in {unlink, restore}_bb_notes. */
210 static rtx *bb_header = 0;
212 /* Basic block after which recovery blocks will be created. */
213 static basic_block before_recovery;
215 /* Basic block just before the EXIT_BLOCK and after recovery, if we have
216 created it. */
217 basic_block after_recovery;
219 /* FALSE if we add bb to another region, so we don't need to initialize it. */
220 bool adding_bb_to_current_region_p = true;
222 /* Queues, etc. */
224 /* An instruction is ready to be scheduled when all insns preceding it
225 have already been scheduled. It is important to ensure that all
226 insns which use its result will not be executed until its result
227 has been computed. An insn is maintained in one of four structures:
229 (P) the "Pending" set of insns which cannot be scheduled until
230 their dependencies have been satisfied.
231 (Q) the "Queued" set of insns that can be scheduled when sufficient
232 time has passed.
233 (R) the "Ready" list of unscheduled, uncommitted insns.
234 (S) the "Scheduled" list of insns.
236 Initially, all insns are either "Pending" or "Ready" depending on
237 whether their dependencies are satisfied.
239 Insns move from the "Ready" list to the "Scheduled" list as they
240 are committed to the schedule. As this occurs, the insns in the
241 "Pending" list have their dependencies satisfied and move to either
242 the "Ready" list or the "Queued" set depending on whether
243 sufficient time has passed to make them ready. As time passes,
244 insns move from the "Queued" set to the "Ready" list.
246 The "Pending" list (P) are the insns in the INSN_FORW_DEPS of the
247 unscheduled insns, i.e., those that are ready, queued, and pending.
248 The "Queued" set (Q) is implemented by the variable `insn_queue'.
249 The "Ready" list (R) is implemented by the variables `ready' and
250 `n_ready'.
251 The "Scheduled" list (S) is the new insn chain built by this pass.
253 The transition (R->S) is implemented in the scheduling loop in
254 `schedule_block' when the best insn to schedule is chosen.
255 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
256 insns move from the ready list to the scheduled list.
257 The transition (Q->R) is implemented in 'queue_to_insn' as time
258 passes or stalls are introduced. */
260 /* Implement a circular buffer to delay instructions until sufficient
261 time has passed. For the new pipeline description interface,
262 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
263 than maximal time of instruction execution computed by genattr.c on
264 the base maximal time of functional unit reservations and getting a
265 result. This is the longest time an insn may be queued. */
267 static rtx *insn_queue;
268 static int q_ptr = 0;
269 static int q_size = 0;
270 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
271 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
273 #define QUEUE_SCHEDULED (-3)
274 #define QUEUE_NOWHERE (-2)
275 #define QUEUE_READY (-1)
276 /* QUEUE_SCHEDULED - INSN is scheduled.
277 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
278 queue or ready list.
279 QUEUE_READY - INSN is in ready list.
280 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
282 #define QUEUE_INDEX(INSN) (HID (INSN)->queue_index)
284 /* The following variable value refers for all current and future
285 reservations of the processor units. */
286 state_t curr_state;
288 /* The following variable value is size of memory representing all
289 current and future reservations of the processor units. */
290 size_t dfa_state_size;
292 /* The following array is used to find the best insn from ready when
293 the automaton pipeline interface is used. */
294 char *ready_try = NULL;
296 /* The ready list. */
297 struct ready_list ready = {NULL, 0, 0, 0, 0};
299 /* The pointer to the ready list (to be removed). */
300 static struct ready_list *readyp = &ready;
302 /* Scheduling clock. */
303 static int clock_var;
305 static int may_trap_exp (const_rtx, int);
307 /* Nonzero iff the address is comprised from at most 1 register. */
308 #define CONST_BASED_ADDRESS_P(x) \
309 (REG_P (x) \
310 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
311 || (GET_CODE (x) == LO_SUM)) \
312 && (CONSTANT_P (XEXP (x, 0)) \
313 || CONSTANT_P (XEXP (x, 1)))))
315 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
316 as found by analyzing insn's expression. */
319 static int haifa_luid_for_non_insn (rtx x);
321 /* Haifa version of sched_info hooks common to all headers. */
322 const struct common_sched_info_def haifa_common_sched_info =
324 NULL, /* fix_recovery_cfg */
325 NULL, /* add_block */
326 NULL, /* estimate_number_of_insns */
327 haifa_luid_for_non_insn, /* luid_for_non_insn */
328 SCHED_PASS_UNKNOWN /* sched_pass_id */
331 const struct sched_scan_info_def *sched_scan_info;
333 /* Mapping from instruction UID to its Logical UID. */
334 VEC (int, heap) *sched_luids = NULL;
336 /* Next LUID to assign to an instruction. */
337 int sched_max_luid = 1;
339 /* Haifa Instruction Data. */
340 VEC (haifa_insn_data_def, heap) *h_i_d = NULL;
342 void (* sched_init_only_bb) (basic_block, basic_block);
344 /* Split block function. Different schedulers might use different functions
345 to handle their internal data consistent. */
346 basic_block (* sched_split_block) (basic_block, rtx);
348 /* Create empty basic block after the specified block. */
349 basic_block (* sched_create_empty_bb) (basic_block);
351 static int
352 may_trap_exp (const_rtx x, int is_store)
354 enum rtx_code code;
356 if (x == 0)
357 return TRAP_FREE;
358 code = GET_CODE (x);
359 if (is_store)
361 if (code == MEM && may_trap_p (x))
362 return TRAP_RISKY;
363 else
364 return TRAP_FREE;
366 if (code == MEM)
368 /* The insn uses memory: a volatile load. */
369 if (MEM_VOLATILE_P (x))
370 return IRISKY;
371 /* An exception-free load. */
372 if (!may_trap_p (x))
373 return IFREE;
374 /* A load with 1 base register, to be further checked. */
375 if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
376 return PFREE_CANDIDATE;
377 /* No info on the load, to be further checked. */
378 return PRISKY_CANDIDATE;
380 else
382 const char *fmt;
383 int i, insn_class = TRAP_FREE;
385 /* Neither store nor load, check if it may cause a trap. */
386 if (may_trap_p (x))
387 return TRAP_RISKY;
388 /* Recursive step: walk the insn... */
389 fmt = GET_RTX_FORMAT (code);
390 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
392 if (fmt[i] == 'e')
394 int tmp_class = may_trap_exp (XEXP (x, i), is_store);
395 insn_class = WORST_CLASS (insn_class, tmp_class);
397 else if (fmt[i] == 'E')
399 int j;
400 for (j = 0; j < XVECLEN (x, i); j++)
402 int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
403 insn_class = WORST_CLASS (insn_class, tmp_class);
404 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
405 break;
408 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
409 break;
411 return insn_class;
415 /* Classifies rtx X of an insn for the purpose of verifying that X can be
416 executed speculatively (and consequently the insn can be moved
417 speculatively), by examining X, returning:
418 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
419 TRAP_FREE: non-load insn.
420 IFREE: load from a globally safe location.
421 IRISKY: volatile load.
422 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
423 being either PFREE or PRISKY. */
425 static int
426 haifa_classify_rtx (const_rtx x)
428 int tmp_class = TRAP_FREE;
429 int insn_class = TRAP_FREE;
430 enum rtx_code code;
432 if (GET_CODE (x) == PARALLEL)
434 int i, len = XVECLEN (x, 0);
436 for (i = len - 1; i >= 0; i--)
438 tmp_class = haifa_classify_rtx (XVECEXP (x, 0, i));
439 insn_class = WORST_CLASS (insn_class, tmp_class);
440 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
441 break;
444 else
446 code = GET_CODE (x);
447 switch (code)
449 case CLOBBER:
450 /* Test if it is a 'store'. */
451 tmp_class = may_trap_exp (XEXP (x, 0), 1);
452 break;
453 case SET:
454 /* Test if it is a store. */
455 tmp_class = may_trap_exp (SET_DEST (x), 1);
456 if (tmp_class == TRAP_RISKY)
457 break;
458 /* Test if it is a load. */
459 tmp_class =
460 WORST_CLASS (tmp_class,
461 may_trap_exp (SET_SRC (x), 0));
462 break;
463 case COND_EXEC:
464 tmp_class = haifa_classify_rtx (COND_EXEC_CODE (x));
465 if (tmp_class == TRAP_RISKY)
466 break;
467 tmp_class = WORST_CLASS (tmp_class,
468 may_trap_exp (COND_EXEC_TEST (x), 0));
469 break;
470 case TRAP_IF:
471 tmp_class = TRAP_RISKY;
472 break;
473 default:;
475 insn_class = tmp_class;
478 return insn_class;
482 haifa_classify_insn (const_rtx insn)
484 return haifa_classify_rtx (PATTERN (insn));
487 /* Forward declarations. */
489 static int priority (rtx);
490 static int rank_for_schedule (const void *, const void *);
491 static void swap_sort (rtx *, int);
492 static void queue_insn (rtx, int);
493 static int schedule_insn (rtx);
494 static void adjust_priority (rtx);
495 static void advance_one_cycle (void);
496 static void extend_h_i_d (void);
499 /* Notes handling mechanism:
500 =========================
501 Generally, NOTES are saved before scheduling and restored after scheduling.
502 The scheduler distinguishes between two types of notes:
504 (1) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
505 Before scheduling a region, a pointer to the note is added to the insn
506 that follows or precedes it. (This happens as part of the data dependence
507 computation). After scheduling an insn, the pointer contained in it is
508 used for regenerating the corresponding note (in reemit_notes).
510 (2) All other notes (e.g. INSN_DELETED): Before scheduling a block,
511 these notes are put in a list (in rm_other_notes() and
512 unlink_other_notes ()). After scheduling the block, these notes are
513 inserted at the beginning of the block (in schedule_block()). */
515 static void ready_add (struct ready_list *, rtx, bool);
516 static rtx ready_remove_first (struct ready_list *);
517 static rtx ready_remove_first_dispatch (struct ready_list *ready);
519 static void queue_to_ready (struct ready_list *);
520 static int early_queue_to_ready (state_t, struct ready_list *);
522 static void debug_ready_list (struct ready_list *);
524 /* The following functions are used to implement multi-pass scheduling
525 on the first cycle. */
526 static rtx ready_remove (struct ready_list *, int);
527 static void ready_remove_insn (rtx);
529 static void fix_inter_tick (rtx, rtx);
530 static int fix_tick_ready (rtx);
531 static void change_queue_index (rtx, int);
533 /* The following functions are used to implement scheduling of data/control
534 speculative instructions. */
536 static void extend_h_i_d (void);
537 static void init_h_i_d (rtx);
538 static void generate_recovery_code (rtx);
539 static void process_insn_forw_deps_be_in_spec (rtx, rtx, ds_t);
540 static void begin_speculative_block (rtx);
541 static void add_to_speculative_block (rtx);
542 static void init_before_recovery (basic_block *);
543 static void create_check_block_twin (rtx, bool);
544 static void fix_recovery_deps (basic_block);
545 static void haifa_change_pattern (rtx, rtx);
546 static void dump_new_block_header (int, basic_block, rtx, rtx);
547 static void restore_bb_notes (basic_block);
548 static void fix_jump_move (rtx);
549 static void move_block_after_check (rtx);
550 static void move_succs (VEC(edge,gc) **, basic_block);
551 static void sched_remove_insn (rtx);
552 static void clear_priorities (rtx, rtx_vec_t *);
553 static void calc_priorities (rtx_vec_t);
554 static void add_jump_dependencies (rtx, rtx);
555 #ifdef ENABLE_CHECKING
556 static int has_edge_p (VEC(edge,gc) *, int);
557 static void check_cfg (rtx, rtx);
558 #endif
560 #endif /* INSN_SCHEDULING */
562 /* Point to state used for the current scheduling pass. */
563 struct haifa_sched_info *current_sched_info;
565 #ifndef INSN_SCHEDULING
566 void
567 schedule_insns (void)
570 #else
572 /* Do register pressure sensitive insn scheduling if the flag is set
573 up. */
574 bool sched_pressure_p;
576 /* Map regno -> its cover class. The map defined only when
577 SCHED_PRESSURE_P is true. */
578 enum reg_class *sched_regno_cover_class;
580 /* The current register pressure. Only elements corresponding cover
581 classes are defined. */
582 static int curr_reg_pressure[N_REG_CLASSES];
584 /* Saved value of the previous array. */
585 static int saved_reg_pressure[N_REG_CLASSES];
587 /* Register living at given scheduling point. */
588 static bitmap curr_reg_live;
590 /* Saved value of the previous array. */
591 static bitmap saved_reg_live;
593 /* Registers mentioned in the current region. */
594 static bitmap region_ref_regs;
596 /* Initiate register pressure relative info for scheduling the current
597 region. Currently it is only clearing register mentioned in the
598 current region. */
599 void
600 sched_init_region_reg_pressure_info (void)
602 bitmap_clear (region_ref_regs);
605 /* Update current register pressure related info after birth (if
606 BIRTH_P) or death of register REGNO. */
607 static void
608 mark_regno_birth_or_death (int regno, bool birth_p)
610 enum reg_class cover_class;
612 cover_class = sched_regno_cover_class[regno];
613 if (regno >= FIRST_PSEUDO_REGISTER)
615 if (cover_class != NO_REGS)
617 if (birth_p)
619 bitmap_set_bit (curr_reg_live, regno);
620 curr_reg_pressure[cover_class]
621 += ira_reg_class_nregs[cover_class][PSEUDO_REGNO_MODE (regno)];
623 else
625 bitmap_clear_bit (curr_reg_live, regno);
626 curr_reg_pressure[cover_class]
627 -= ira_reg_class_nregs[cover_class][PSEUDO_REGNO_MODE (regno)];
631 else if (cover_class != NO_REGS
632 && ! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
634 if (birth_p)
636 bitmap_set_bit (curr_reg_live, regno);
637 curr_reg_pressure[cover_class]++;
639 else
641 bitmap_clear_bit (curr_reg_live, regno);
642 curr_reg_pressure[cover_class]--;
647 /* Initiate current register pressure related info from living
648 registers given by LIVE. */
649 static void
650 initiate_reg_pressure_info (bitmap live)
652 int i;
653 unsigned int j;
654 bitmap_iterator bi;
656 for (i = 0; i < ira_reg_class_cover_size; i++)
657 curr_reg_pressure[ira_reg_class_cover[i]] = 0;
658 bitmap_clear (curr_reg_live);
659 EXECUTE_IF_SET_IN_BITMAP (live, 0, j, bi)
660 if (current_nr_blocks == 1 || bitmap_bit_p (region_ref_regs, j))
661 mark_regno_birth_or_death (j, true);
664 /* Mark registers in X as mentioned in the current region. */
665 static void
666 setup_ref_regs (rtx x)
668 int i, j, regno;
669 const RTX_CODE code = GET_CODE (x);
670 const char *fmt;
672 if (REG_P (x))
674 regno = REGNO (x);
675 if (regno >= FIRST_PSEUDO_REGISTER)
676 bitmap_set_bit (region_ref_regs, REGNO (x));
677 else
678 for (i = hard_regno_nregs[regno][GET_MODE (x)] - 1; i >= 0; i--)
679 bitmap_set_bit (region_ref_regs, regno + i);
680 return;
682 fmt = GET_RTX_FORMAT (code);
683 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
684 if (fmt[i] == 'e')
685 setup_ref_regs (XEXP (x, i));
686 else if (fmt[i] == 'E')
688 for (j = 0; j < XVECLEN (x, i); j++)
689 setup_ref_regs (XVECEXP (x, i, j));
693 /* Initiate current register pressure related info at the start of
694 basic block BB. */
695 static void
696 initiate_bb_reg_pressure_info (basic_block bb)
698 unsigned int i ATTRIBUTE_UNUSED;
699 rtx insn;
701 if (current_nr_blocks > 1)
702 FOR_BB_INSNS (bb, insn)
703 if (NONDEBUG_INSN_P (insn))
704 setup_ref_regs (PATTERN (insn));
705 initiate_reg_pressure_info (df_get_live_in (bb));
706 #ifdef EH_RETURN_DATA_REGNO
707 if (bb_has_eh_pred (bb))
708 for (i = 0; ; ++i)
710 unsigned int regno = EH_RETURN_DATA_REGNO (i);
712 if (regno == INVALID_REGNUM)
713 break;
714 if (! bitmap_bit_p (df_get_live_in (bb), regno))
715 mark_regno_birth_or_death (regno, true);
717 #endif
720 /* Save current register pressure related info. */
721 static void
722 save_reg_pressure (void)
724 int i;
726 for (i = 0; i < ira_reg_class_cover_size; i++)
727 saved_reg_pressure[ira_reg_class_cover[i]]
728 = curr_reg_pressure[ira_reg_class_cover[i]];
729 bitmap_copy (saved_reg_live, curr_reg_live);
732 /* Restore saved register pressure related info. */
733 static void
734 restore_reg_pressure (void)
736 int i;
738 for (i = 0; i < ira_reg_class_cover_size; i++)
739 curr_reg_pressure[ira_reg_class_cover[i]]
740 = saved_reg_pressure[ira_reg_class_cover[i]];
741 bitmap_copy (curr_reg_live, saved_reg_live);
744 /* Return TRUE if the register is dying after its USE. */
745 static bool
746 dying_use_p (struct reg_use_data *use)
748 struct reg_use_data *next;
750 for (next = use->next_regno_use; next != use; next = next->next_regno_use)
751 if (NONDEBUG_INSN_P (next->insn)
752 && QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED)
753 return false;
754 return true;
757 /* Print info about the current register pressure and its excess for
758 each cover class. */
759 static void
760 print_curr_reg_pressure (void)
762 int i;
763 enum reg_class cl;
765 fprintf (sched_dump, ";;\t");
766 for (i = 0; i < ira_reg_class_cover_size; i++)
768 cl = ira_reg_class_cover[i];
769 gcc_assert (curr_reg_pressure[cl] >= 0);
770 fprintf (sched_dump, " %s:%d(%d)", reg_class_names[cl],
771 curr_reg_pressure[cl],
772 curr_reg_pressure[cl] - ira_available_class_regs[cl]);
774 fprintf (sched_dump, "\n");
777 /* Pointer to the last instruction scheduled. Used by rank_for_schedule,
778 so that insns independent of the last scheduled insn will be preferred
779 over dependent instructions. */
781 static rtx last_scheduled_insn;
783 /* Cached cost of the instruction. Use below function to get cost of the
784 insn. -1 here means that the field is not initialized. */
785 #define INSN_COST(INSN) (HID (INSN)->cost)
787 /* Compute cost of executing INSN.
788 This is the number of cycles between instruction issue and
789 instruction results. */
791 insn_cost (rtx insn)
793 int cost;
795 if (sel_sched_p ())
797 if (recog_memoized (insn) < 0)
798 return 0;
800 cost = insn_default_latency (insn);
801 if (cost < 0)
802 cost = 0;
804 return cost;
807 cost = INSN_COST (insn);
809 if (cost < 0)
811 /* A USE insn, or something else we don't need to
812 understand. We can't pass these directly to
813 result_ready_cost or insn_default_latency because it will
814 trigger a fatal error for unrecognizable insns. */
815 if (recog_memoized (insn) < 0)
817 INSN_COST (insn) = 0;
818 return 0;
820 else
822 cost = insn_default_latency (insn);
823 if (cost < 0)
824 cost = 0;
826 INSN_COST (insn) = cost;
830 return cost;
833 /* Compute cost of dependence LINK.
834 This is the number of cycles between instruction issue and
835 instruction results.
836 ??? We also use this function to call recog_memoized on all insns. */
838 dep_cost_1 (dep_t link, dw_t dw)
840 rtx insn = DEP_PRO (link);
841 rtx used = DEP_CON (link);
842 int cost;
844 /* A USE insn should never require the value used to be computed.
845 This allows the computation of a function's result and parameter
846 values to overlap the return and call. We don't care about the
847 the dependence cost when only decreasing register pressure. */
848 if (recog_memoized (used) < 0)
850 cost = 0;
851 recog_memoized (insn);
853 else
855 enum reg_note dep_type = DEP_TYPE (link);
857 cost = insn_cost (insn);
859 if (INSN_CODE (insn) >= 0)
861 if (dep_type == REG_DEP_ANTI)
862 cost = 0;
863 else if (dep_type == REG_DEP_OUTPUT)
865 cost = (insn_default_latency (insn)
866 - insn_default_latency (used));
867 if (cost <= 0)
868 cost = 1;
870 else if (bypass_p (insn))
871 cost = insn_latency (insn, used);
875 if (targetm.sched.adjust_cost_2)
876 cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost,
877 dw);
878 else if (targetm.sched.adjust_cost != NULL)
880 /* This variable is used for backward compatibility with the
881 targets. */
882 rtx dep_cost_rtx_link = alloc_INSN_LIST (NULL_RTX, NULL_RTX);
884 /* Make it self-cycled, so that if some tries to walk over this
885 incomplete list he/she will be caught in an endless loop. */
886 XEXP (dep_cost_rtx_link, 1) = dep_cost_rtx_link;
888 /* Targets use only REG_NOTE_KIND of the link. */
889 PUT_REG_NOTE_KIND (dep_cost_rtx_link, DEP_TYPE (link));
891 cost = targetm.sched.adjust_cost (used, dep_cost_rtx_link,
892 insn, cost);
894 free_INSN_LIST_node (dep_cost_rtx_link);
897 if (cost < 0)
898 cost = 0;
901 return cost;
904 /* Compute cost of dependence LINK.
905 This is the number of cycles between instruction issue and
906 instruction results. */
908 dep_cost (dep_t link)
910 return dep_cost_1 (link, 0);
913 /* Use this sel-sched.c friendly function in reorder2 instead of increasing
914 INSN_PRIORITY explicitly. */
915 void
916 increase_insn_priority (rtx insn, int amount)
918 if (!sel_sched_p ())
920 /* We're dealing with haifa-sched.c INSN_PRIORITY. */
921 if (INSN_PRIORITY_KNOWN (insn))
922 INSN_PRIORITY (insn) += amount;
924 else
926 /* In sel-sched.c INSN_PRIORITY is not kept up to date.
927 Use EXPR_PRIORITY instead. */
928 sel_add_to_insn_priority (insn, amount);
932 /* Return 'true' if DEP should be included in priority calculations. */
933 static bool
934 contributes_to_priority_p (dep_t dep)
936 if (DEBUG_INSN_P (DEP_CON (dep))
937 || DEBUG_INSN_P (DEP_PRO (dep)))
938 return false;
940 /* Critical path is meaningful in block boundaries only. */
941 if (!current_sched_info->contributes_to_priority (DEP_CON (dep),
942 DEP_PRO (dep)))
943 return false;
945 /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
946 then speculative instructions will less likely be
947 scheduled. That is because the priority of
948 their producers will increase, and, thus, the
949 producers will more likely be scheduled, thus,
950 resolving the dependence. */
951 if (sched_deps_info->generate_spec_deps
952 && !(spec_info->flags & COUNT_SPEC_IN_CRITICAL_PATH)
953 && (DEP_STATUS (dep) & SPECULATIVE))
954 return false;
956 return true;
959 /* Compute the number of nondebug forward deps of an insn. */
961 static int
962 dep_list_size (rtx insn)
964 sd_iterator_def sd_it;
965 dep_t dep;
966 int dbgcount = 0, nodbgcount = 0;
968 if (!MAY_HAVE_DEBUG_INSNS)
969 return sd_lists_size (insn, SD_LIST_FORW);
971 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
973 if (DEBUG_INSN_P (DEP_CON (dep)))
974 dbgcount++;
975 else if (!DEBUG_INSN_P (DEP_PRO (dep)))
976 nodbgcount++;
979 gcc_assert (dbgcount + nodbgcount == sd_lists_size (insn, SD_LIST_FORW));
981 return nodbgcount;
984 /* Compute the priority number for INSN. */
985 static int
986 priority (rtx insn)
988 if (! INSN_P (insn))
989 return 0;
991 /* We should not be interested in priority of an already scheduled insn. */
992 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
994 if (!INSN_PRIORITY_KNOWN (insn))
996 int this_priority = -1;
998 if (dep_list_size (insn) == 0)
999 /* ??? We should set INSN_PRIORITY to insn_cost when and insn has
1000 some forward deps but all of them are ignored by
1001 contributes_to_priority hook. At the moment we set priority of
1002 such insn to 0. */
1003 this_priority = insn_cost (insn);
1004 else
1006 rtx prev_first, twin;
1007 basic_block rec;
1009 /* For recovery check instructions we calculate priority slightly
1010 different than that of normal instructions. Instead of walking
1011 through INSN_FORW_DEPS (check) list, we walk through
1012 INSN_FORW_DEPS list of each instruction in the corresponding
1013 recovery block. */
1015 /* Selective scheduling does not define RECOVERY_BLOCK macro. */
1016 rec = sel_sched_p () ? NULL : RECOVERY_BLOCK (insn);
1017 if (!rec || rec == EXIT_BLOCK_PTR)
1019 prev_first = PREV_INSN (insn);
1020 twin = insn;
1022 else
1024 prev_first = NEXT_INSN (BB_HEAD (rec));
1025 twin = PREV_INSN (BB_END (rec));
1030 sd_iterator_def sd_it;
1031 dep_t dep;
1033 FOR_EACH_DEP (twin, SD_LIST_FORW, sd_it, dep)
1035 rtx next;
1036 int next_priority;
1038 next = DEP_CON (dep);
1040 if (BLOCK_FOR_INSN (next) != rec)
1042 int cost;
1044 if (!contributes_to_priority_p (dep))
1045 continue;
1047 if (twin == insn)
1048 cost = dep_cost (dep);
1049 else
1051 struct _dep _dep1, *dep1 = &_dep1;
1053 init_dep (dep1, insn, next, REG_DEP_ANTI);
1055 cost = dep_cost (dep1);
1058 next_priority = cost + priority (next);
1060 if (next_priority > this_priority)
1061 this_priority = next_priority;
1065 twin = PREV_INSN (twin);
1067 while (twin != prev_first);
1070 if (this_priority < 0)
1072 gcc_assert (this_priority == -1);
1074 this_priority = insn_cost (insn);
1077 INSN_PRIORITY (insn) = this_priority;
1078 INSN_PRIORITY_STATUS (insn) = 1;
1081 return INSN_PRIORITY (insn);
1084 /* Macros and functions for keeping the priority queue sorted, and
1085 dealing with queuing and dequeuing of instructions. */
1087 #define SCHED_SORT(READY, N_READY) \
1088 do { if ((N_READY) == 2) \
1089 swap_sort (READY, N_READY); \
1090 else if ((N_READY) > 2) \
1091 qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \
1092 while (0)
1094 /* Setup info about the current register pressure impact of scheduling
1095 INSN at the current scheduling point. */
1096 static void
1097 setup_insn_reg_pressure_info (rtx insn)
1099 int i, change, before, after, hard_regno;
1100 int excess_cost_change;
1101 enum machine_mode mode;
1102 enum reg_class cl;
1103 struct reg_pressure_data *pressure_info;
1104 int *max_reg_pressure;
1105 struct reg_use_data *use;
1106 static int death[N_REG_CLASSES];
1108 gcc_checking_assert (!DEBUG_INSN_P (insn));
1110 excess_cost_change = 0;
1111 for (i = 0; i < ira_reg_class_cover_size; i++)
1112 death[ira_reg_class_cover[i]] = 0;
1113 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1114 if (dying_use_p (use))
1116 cl = sched_regno_cover_class[use->regno];
1117 if (use->regno < FIRST_PSEUDO_REGISTER)
1118 death[cl]++;
1119 else
1120 death[cl] += ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (use->regno)];
1122 pressure_info = INSN_REG_PRESSURE (insn);
1123 max_reg_pressure = INSN_MAX_REG_PRESSURE (insn);
1124 gcc_assert (pressure_info != NULL && max_reg_pressure != NULL);
1125 for (i = 0; i < ira_reg_class_cover_size; i++)
1127 cl = ira_reg_class_cover[i];
1128 gcc_assert (curr_reg_pressure[cl] >= 0);
1129 change = (int) pressure_info[i].set_increase - death[cl];
1130 before = MAX (0, max_reg_pressure[i] - ira_available_class_regs[cl]);
1131 after = MAX (0, max_reg_pressure[i] + change
1132 - ira_available_class_regs[cl]);
1133 hard_regno = ira_class_hard_regs[cl][0];
1134 gcc_assert (hard_regno >= 0);
1135 mode = reg_raw_mode[hard_regno];
1136 excess_cost_change += ((after - before)
1137 * (ira_memory_move_cost[mode][cl][0]
1138 + ira_memory_move_cost[mode][cl][1]));
1140 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insn) = excess_cost_change;
1143 /* Returns a positive value if x is preferred; returns a negative value if
1144 y is preferred. Should never return 0, since that will make the sort
1145 unstable. */
1147 static int
1148 rank_for_schedule (const void *x, const void *y)
1150 rtx tmp = *(const rtx *) y;
1151 rtx tmp2 = *(const rtx *) x;
1152 rtx last;
1153 int tmp_class, tmp2_class;
1154 int val, priority_val, info_val;
1156 if (MAY_HAVE_DEBUG_INSNS)
1158 /* Schedule debug insns as early as possible. */
1159 if (DEBUG_INSN_P (tmp) && !DEBUG_INSN_P (tmp2))
1160 return -1;
1161 else if (DEBUG_INSN_P (tmp2))
1162 return 1;
1165 /* The insn in a schedule group should be issued the first. */
1166 if (flag_sched_group_heuristic &&
1167 SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
1168 return SCHED_GROUP_P (tmp2) ? 1 : -1;
1170 /* Make sure that priority of TMP and TMP2 are initialized. */
1171 gcc_assert (INSN_PRIORITY_KNOWN (tmp) && INSN_PRIORITY_KNOWN (tmp2));
1173 if (sched_pressure_p)
1175 int diff;
1177 /* Prefer insn whose scheduling results in the smallest register
1178 pressure excess. */
1179 if ((diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp)
1180 + (INSN_TICK (tmp) > clock_var
1181 ? INSN_TICK (tmp) - clock_var : 0)
1182 - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2)
1183 - (INSN_TICK (tmp2) > clock_var
1184 ? INSN_TICK (tmp2) - clock_var : 0))) != 0)
1185 return diff;
1189 if (sched_pressure_p
1190 && (INSN_TICK (tmp2) > clock_var || INSN_TICK (tmp) > clock_var))
1192 if (INSN_TICK (tmp) <= clock_var)
1193 return -1;
1194 else if (INSN_TICK (tmp2) <= clock_var)
1195 return 1;
1196 else
1197 return INSN_TICK (tmp) - INSN_TICK (tmp2);
1199 /* Prefer insn with higher priority. */
1200 priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
1202 if (flag_sched_critical_path_heuristic && priority_val)
1203 return priority_val;
1205 /* Prefer speculative insn with greater dependencies weakness. */
1206 if (flag_sched_spec_insn_heuristic && spec_info)
1208 ds_t ds1, ds2;
1209 dw_t dw1, dw2;
1210 int dw;
1212 ds1 = TODO_SPEC (tmp) & SPECULATIVE;
1213 if (ds1)
1214 dw1 = ds_weak (ds1);
1215 else
1216 dw1 = NO_DEP_WEAK;
1218 ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
1219 if (ds2)
1220 dw2 = ds_weak (ds2);
1221 else
1222 dw2 = NO_DEP_WEAK;
1224 dw = dw2 - dw1;
1225 if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
1226 return dw;
1229 info_val = (*current_sched_info->rank) (tmp, tmp2);
1230 if(flag_sched_rank_heuristic && info_val)
1231 return info_val;
1233 if (flag_sched_last_insn_heuristic)
1235 last = last_scheduled_insn;
1237 if (DEBUG_INSN_P (last) && last != current_sched_info->prev_head)
1239 last = PREV_INSN (last);
1240 while (!NONDEBUG_INSN_P (last)
1241 && last != current_sched_info->prev_head);
1244 /* Compare insns based on their relation to the last scheduled
1245 non-debug insn. */
1246 if (flag_sched_last_insn_heuristic && NONDEBUG_INSN_P (last))
1248 dep_t dep1;
1249 dep_t dep2;
1251 /* Classify the instructions into three classes:
1252 1) Data dependent on last schedule insn.
1253 2) Anti/Output dependent on last scheduled insn.
1254 3) Independent of last scheduled insn, or has latency of one.
1255 Choose the insn from the highest numbered class if different. */
1256 dep1 = sd_find_dep_between (last, tmp, true);
1258 if (dep1 == NULL || dep_cost (dep1) == 1)
1259 tmp_class = 3;
1260 else if (/* Data dependence. */
1261 DEP_TYPE (dep1) == REG_DEP_TRUE)
1262 tmp_class = 1;
1263 else
1264 tmp_class = 2;
1266 dep2 = sd_find_dep_between (last, tmp2, true);
1268 if (dep2 == NULL || dep_cost (dep2) == 1)
1269 tmp2_class = 3;
1270 else if (/* Data dependence. */
1271 DEP_TYPE (dep2) == REG_DEP_TRUE)
1272 tmp2_class = 1;
1273 else
1274 tmp2_class = 2;
1276 if ((val = tmp2_class - tmp_class))
1277 return val;
1280 /* Prefer the insn which has more later insns that depend on it.
1281 This gives the scheduler more freedom when scheduling later
1282 instructions at the expense of added register pressure. */
1284 val = (dep_list_size (tmp2) - dep_list_size (tmp));
1286 if (flag_sched_dep_count_heuristic && val != 0)
1287 return val;
1289 /* If insns are equally good, sort by INSN_LUID (original insn order),
1290 so that we make the sort stable. This minimizes instruction movement,
1291 thus minimizing sched's effect on debugging and cross-jumping. */
1292 return INSN_LUID (tmp) - INSN_LUID (tmp2);
1295 /* Resort the array A in which only element at index N may be out of order. */
1297 HAIFA_INLINE static void
1298 swap_sort (rtx *a, int n)
1300 rtx insn = a[n - 1];
1301 int i = n - 2;
1303 while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
1305 a[i + 1] = a[i];
1306 i -= 1;
1308 a[i + 1] = insn;
1311 /* Add INSN to the insn queue so that it can be executed at least
1312 N_CYCLES after the currently executing insn. Preserve insns
1313 chain for debugging purposes. */
1315 HAIFA_INLINE static void
1316 queue_insn (rtx insn, int n_cycles)
1318 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
1319 rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]);
1321 gcc_assert (n_cycles <= max_insn_queue_index);
1322 gcc_assert (!DEBUG_INSN_P (insn));
1324 insn_queue[next_q] = link;
1325 q_size += 1;
1327 if (sched_verbose >= 2)
1329 fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
1330 (*current_sched_info->print_insn) (insn, 0));
1332 fprintf (sched_dump, "queued for %d cycles.\n", n_cycles);
1335 QUEUE_INDEX (insn) = next_q;
1338 /* Remove INSN from queue. */
1339 static void
1340 queue_remove (rtx insn)
1342 gcc_assert (QUEUE_INDEX (insn) >= 0);
1343 remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
1344 q_size--;
1345 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
1348 /* Return a pointer to the bottom of the ready list, i.e. the insn
1349 with the lowest priority. */
1351 rtx *
1352 ready_lastpos (struct ready_list *ready)
1354 gcc_assert (ready->n_ready >= 1);
1355 return ready->vec + ready->first - ready->n_ready + 1;
1358 /* Add an element INSN to the ready list so that it ends up with the
1359 lowest/highest priority depending on FIRST_P. */
1361 HAIFA_INLINE static void
1362 ready_add (struct ready_list *ready, rtx insn, bool first_p)
1364 if (!first_p)
1366 if (ready->first == ready->n_ready)
1368 memmove (ready->vec + ready->veclen - ready->n_ready,
1369 ready_lastpos (ready),
1370 ready->n_ready * sizeof (rtx));
1371 ready->first = ready->veclen - 1;
1373 ready->vec[ready->first - ready->n_ready] = insn;
1375 else
1377 if (ready->first == ready->veclen - 1)
1379 if (ready->n_ready)
1380 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
1381 memmove (ready->vec + ready->veclen - ready->n_ready - 1,
1382 ready_lastpos (ready),
1383 ready->n_ready * sizeof (rtx));
1384 ready->first = ready->veclen - 2;
1386 ready->vec[++(ready->first)] = insn;
1389 ready->n_ready++;
1390 if (DEBUG_INSN_P (insn))
1391 ready->n_debug++;
1393 gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
1394 QUEUE_INDEX (insn) = QUEUE_READY;
1397 /* Remove the element with the highest priority from the ready list and
1398 return it. */
1400 HAIFA_INLINE static rtx
1401 ready_remove_first (struct ready_list *ready)
1403 rtx t;
1405 gcc_assert (ready->n_ready);
1406 t = ready->vec[ready->first--];
1407 ready->n_ready--;
1408 if (DEBUG_INSN_P (t))
1409 ready->n_debug--;
1410 /* If the queue becomes empty, reset it. */
1411 if (ready->n_ready == 0)
1412 ready->first = ready->veclen - 1;
1414 gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
1415 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1417 return t;
1420 /* The following code implements multi-pass scheduling for the first
1421 cycle. In other words, we will try to choose ready insn which
1422 permits to start maximum number of insns on the same cycle. */
1424 /* Return a pointer to the element INDEX from the ready. INDEX for
1425 insn with the highest priority is 0, and the lowest priority has
1426 N_READY - 1. */
1429 ready_element (struct ready_list *ready, int index)
1431 gcc_assert (ready->n_ready && index < ready->n_ready);
1433 return ready->vec[ready->first - index];
1436 /* Remove the element INDEX from the ready list and return it. INDEX
1437 for insn with the highest priority is 0, and the lowest priority
1438 has N_READY - 1. */
1440 HAIFA_INLINE static rtx
1441 ready_remove (struct ready_list *ready, int index)
1443 rtx t;
1444 int i;
1446 if (index == 0)
1447 return ready_remove_first (ready);
1448 gcc_assert (ready->n_ready && index < ready->n_ready);
1449 t = ready->vec[ready->first - index];
1450 ready->n_ready--;
1451 if (DEBUG_INSN_P (t))
1452 ready->n_debug--;
1453 for (i = index; i < ready->n_ready; i++)
1454 ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
1455 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1456 return t;
1459 /* Remove INSN from the ready list. */
1460 static void
1461 ready_remove_insn (rtx insn)
1463 int i;
1465 for (i = 0; i < readyp->n_ready; i++)
1466 if (ready_element (readyp, i) == insn)
1468 ready_remove (readyp, i);
1469 return;
1471 gcc_unreachable ();
1474 /* Sort the ready list READY by ascending priority, using the SCHED_SORT
1475 macro. */
1477 void
1478 ready_sort (struct ready_list *ready)
1480 int i;
1481 rtx *first = ready_lastpos (ready);
1483 if (sched_pressure_p)
1485 for (i = 0; i < ready->n_ready; i++)
1486 if (!DEBUG_INSN_P (first[i]))
1487 setup_insn_reg_pressure_info (first[i]);
1489 SCHED_SORT (first, ready->n_ready);
1492 /* PREV is an insn that is ready to execute. Adjust its priority if that
1493 will help shorten or lengthen register lifetimes as appropriate. Also
1494 provide a hook for the target to tweak itself. */
1496 HAIFA_INLINE static void
1497 adjust_priority (rtx prev)
1499 /* ??? There used to be code here to try and estimate how an insn
1500 affected register lifetimes, but it did it by looking at REG_DEAD
1501 notes, which we removed in schedule_region. Nor did it try to
1502 take into account register pressure or anything useful like that.
1504 Revisit when we have a machine model to work with and not before. */
1506 if (targetm.sched.adjust_priority)
1507 INSN_PRIORITY (prev) =
1508 targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
1511 /* Advance DFA state STATE on one cycle. */
1512 void
1513 advance_state (state_t state)
1515 if (targetm.sched.dfa_pre_advance_cycle)
1516 targetm.sched.dfa_pre_advance_cycle ();
1518 if (targetm.sched.dfa_pre_cycle_insn)
1519 state_transition (state,
1520 targetm.sched.dfa_pre_cycle_insn ());
1522 state_transition (state, NULL);
1524 if (targetm.sched.dfa_post_cycle_insn)
1525 state_transition (state,
1526 targetm.sched.dfa_post_cycle_insn ());
1528 if (targetm.sched.dfa_post_advance_cycle)
1529 targetm.sched.dfa_post_advance_cycle ();
1532 /* Advance time on one cycle. */
1533 HAIFA_INLINE static void
1534 advance_one_cycle (void)
1536 advance_state (curr_state);
1537 if (sched_verbose >= 6)
1538 fprintf (sched_dump, ";;\tAdvanced a state.\n");
1541 /* Clock at which the previous instruction was issued. */
1542 static int last_clock_var;
1544 /* Update register pressure after scheduling INSN. */
1545 static void
1546 update_register_pressure (rtx insn)
1548 struct reg_use_data *use;
1549 struct reg_set_data *set;
1551 gcc_checking_assert (!DEBUG_INSN_P (insn));
1553 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1554 if (dying_use_p (use) && bitmap_bit_p (curr_reg_live, use->regno))
1555 mark_regno_birth_or_death (use->regno, false);
1556 for (set = INSN_REG_SET_LIST (insn); set != NULL; set = set->next_insn_set)
1557 mark_regno_birth_or_death (set->regno, true);
1560 /* Set up or update (if UPDATE_P) max register pressure (see its
1561 meaning in sched-int.h::_haifa_insn_data) for all current BB insns
1562 after insn AFTER. */
1563 static void
1564 setup_insn_max_reg_pressure (rtx after, bool update_p)
1566 int i, p;
1567 bool eq_p;
1568 rtx insn;
1569 static int max_reg_pressure[N_REG_CLASSES];
1571 save_reg_pressure ();
1572 for (i = 0; i < ira_reg_class_cover_size; i++)
1573 max_reg_pressure[ira_reg_class_cover[i]]
1574 = curr_reg_pressure[ira_reg_class_cover[i]];
1575 for (insn = NEXT_INSN (after);
1576 insn != NULL_RTX && ! BARRIER_P (insn)
1577 && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (after);
1578 insn = NEXT_INSN (insn))
1579 if (NONDEBUG_INSN_P (insn))
1581 eq_p = true;
1582 for (i = 0; i < ira_reg_class_cover_size; i++)
1584 p = max_reg_pressure[ira_reg_class_cover[i]];
1585 if (INSN_MAX_REG_PRESSURE (insn)[i] != p)
1587 eq_p = false;
1588 INSN_MAX_REG_PRESSURE (insn)[i]
1589 = max_reg_pressure[ira_reg_class_cover[i]];
1592 if (update_p && eq_p)
1593 break;
1594 update_register_pressure (insn);
1595 for (i = 0; i < ira_reg_class_cover_size; i++)
1596 if (max_reg_pressure[ira_reg_class_cover[i]]
1597 < curr_reg_pressure[ira_reg_class_cover[i]])
1598 max_reg_pressure[ira_reg_class_cover[i]]
1599 = curr_reg_pressure[ira_reg_class_cover[i]];
1601 restore_reg_pressure ();
1604 /* Update the current register pressure after scheduling INSN. Update
1605 also max register pressure for unscheduled insns of the current
1606 BB. */
1607 static void
1608 update_reg_and_insn_max_reg_pressure (rtx insn)
1610 int i;
1611 int before[N_REG_CLASSES];
1613 for (i = 0; i < ira_reg_class_cover_size; i++)
1614 before[i] = curr_reg_pressure[ira_reg_class_cover[i]];
1615 update_register_pressure (insn);
1616 for (i = 0; i < ira_reg_class_cover_size; i++)
1617 if (curr_reg_pressure[ira_reg_class_cover[i]] != before[i])
1618 break;
1619 if (i < ira_reg_class_cover_size)
1620 setup_insn_max_reg_pressure (insn, true);
1623 /* Set up register pressure at the beginning of basic block BB whose
1624 insns starting after insn AFTER. Set up also max register pressure
1625 for all insns of the basic block. */
1626 void
1627 sched_setup_bb_reg_pressure_info (basic_block bb, rtx after)
1629 gcc_assert (sched_pressure_p);
1630 initiate_bb_reg_pressure_info (bb);
1631 setup_insn_max_reg_pressure (after, false);
1634 /* INSN is the "currently executing insn". Launch each insn which was
1635 waiting on INSN. READY is the ready list which contains the insns
1636 that are ready to fire. CLOCK is the current cycle. The function
1637 returns necessary cycle advance after issuing the insn (it is not
1638 zero for insns in a schedule group). */
1640 static int
1641 schedule_insn (rtx insn)
1643 sd_iterator_def sd_it;
1644 dep_t dep;
1645 int i;
1646 int advance = 0;
1648 if (sched_verbose >= 1)
1650 struct reg_pressure_data *pressure_info;
1651 char buf[2048];
1653 print_insn (buf, insn, 0);
1654 buf[40] = 0;
1655 fprintf (sched_dump, ";;\t%3i--> %-40s:", clock_var, buf);
1657 if (recog_memoized (insn) < 0)
1658 fprintf (sched_dump, "nothing");
1659 else
1660 print_reservation (sched_dump, insn);
1661 pressure_info = INSN_REG_PRESSURE (insn);
1662 if (pressure_info != NULL)
1664 fputc (':', sched_dump);
1665 for (i = 0; i < ira_reg_class_cover_size; i++)
1666 fprintf (sched_dump, "%s%+d(%d)",
1667 reg_class_names[ira_reg_class_cover[i]],
1668 pressure_info[i].set_increase, pressure_info[i].change);
1670 fputc ('\n', sched_dump);
1673 if (sched_pressure_p && !DEBUG_INSN_P (insn))
1674 update_reg_and_insn_max_reg_pressure (insn);
1676 /* Scheduling instruction should have all its dependencies resolved and
1677 should have been removed from the ready list. */
1678 gcc_assert (sd_lists_empty_p (insn, SD_LIST_BACK));
1680 /* Reset debug insns invalidated by moving this insn. */
1681 if (MAY_HAVE_DEBUG_INSNS && !DEBUG_INSN_P (insn))
1682 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1683 sd_iterator_cond (&sd_it, &dep);)
1685 rtx dbg = DEP_PRO (dep);
1686 struct reg_use_data *use, *next;
1688 gcc_assert (DEBUG_INSN_P (dbg));
1690 if (sched_verbose >= 6)
1691 fprintf (sched_dump, ";;\t\tresetting: debug insn %d\n",
1692 INSN_UID (dbg));
1694 /* ??? Rather than resetting the debug insn, we might be able
1695 to emit a debug temp before the just-scheduled insn, but
1696 this would involve checking that the expression at the
1697 point of the debug insn is equivalent to the expression
1698 before the just-scheduled insn. They might not be: the
1699 expression in the debug insn may depend on other insns not
1700 yet scheduled that set MEMs, REGs or even other debug
1701 insns. It's not clear that attempting to preserve debug
1702 information in these cases is worth the effort, given how
1703 uncommon these resets are and the likelihood that the debug
1704 temps introduced won't survive the schedule change. */
1705 INSN_VAR_LOCATION_LOC (dbg) = gen_rtx_UNKNOWN_VAR_LOC ();
1706 df_insn_rescan (dbg);
1708 /* Unknown location doesn't use any registers. */
1709 for (use = INSN_REG_USE_LIST (dbg); use != NULL; use = next)
1711 struct reg_use_data *prev = use;
1713 /* Remove use from the cyclic next_regno_use chain first. */
1714 while (prev->next_regno_use != use)
1715 prev = prev->next_regno_use;
1716 prev->next_regno_use = use->next_regno_use;
1717 next = use->next_insn_use;
1718 free (use);
1720 INSN_REG_USE_LIST (dbg) = NULL;
1722 /* We delete rather than resolve these deps, otherwise we
1723 crash in sched_free_deps(), because forward deps are
1724 expected to be released before backward deps. */
1725 sd_delete_dep (sd_it);
1728 gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
1729 QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
1731 gcc_assert (INSN_TICK (insn) >= MIN_TICK);
1732 if (INSN_TICK (insn) > clock_var)
1733 /* INSN has been prematurely moved from the queue to the ready list.
1734 This is possible only if following flag is set. */
1735 gcc_assert (flag_sched_stalled_insns);
1737 /* ??? Probably, if INSN is scheduled prematurely, we should leave
1738 INSN_TICK untouched. This is a machine-dependent issue, actually. */
1739 INSN_TICK (insn) = clock_var;
1741 /* Update dependent instructions. */
1742 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
1743 sd_iterator_cond (&sd_it, &dep);)
1745 rtx next = DEP_CON (dep);
1747 /* Resolve the dependence between INSN and NEXT.
1748 sd_resolve_dep () moves current dep to another list thus
1749 advancing the iterator. */
1750 sd_resolve_dep (sd_it);
1752 /* Don't bother trying to mark next as ready if insn is a debug
1753 insn. If insn is the last hard dependency, it will have
1754 already been discounted. */
1755 if (DEBUG_INSN_P (insn) && !DEBUG_INSN_P (next))
1756 continue;
1758 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
1760 int effective_cost;
1762 effective_cost = try_ready (next);
1764 if (effective_cost >= 0
1765 && SCHED_GROUP_P (next)
1766 && advance < effective_cost)
1767 advance = effective_cost;
1769 else
1770 /* Check always has only one forward dependence (to the first insn in
1771 the recovery block), therefore, this will be executed only once. */
1773 gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
1774 fix_recovery_deps (RECOVERY_BLOCK (insn));
1778 /* This is the place where scheduler doesn't *basically* need backward and
1779 forward dependencies for INSN anymore. Nevertheless they are used in
1780 heuristics in rank_for_schedule (), early_queue_to_ready () and in
1781 some targets (e.g. rs6000). Thus the earliest place where we *can*
1782 remove dependencies is after targetm.sched.finish () call in
1783 schedule_block (). But, on the other side, the safest place to remove
1784 dependencies is when we are finishing scheduling entire region. As we
1785 don't generate [many] dependencies during scheduling itself, we won't
1786 need memory until beginning of next region.
1787 Bottom line: Dependencies are removed for all insns in the end of
1788 scheduling the region. */
1790 /* Annotate the instruction with issue information -- TImode
1791 indicates that the instruction is expected not to be able
1792 to issue on the same cycle as the previous insn. A machine
1793 may use this information to decide how the instruction should
1794 be aligned. */
1795 if (issue_rate > 1
1796 && GET_CODE (PATTERN (insn)) != USE
1797 && GET_CODE (PATTERN (insn)) != CLOBBER
1798 && !DEBUG_INSN_P (insn))
1800 if (reload_completed)
1801 PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
1802 last_clock_var = clock_var;
1805 return advance;
1808 /* Functions for handling of notes. */
1810 /* Add note list that ends on FROM_END to the end of TO_ENDP. */
1811 void
1812 concat_note_lists (rtx from_end, rtx *to_endp)
1814 rtx from_start;
1816 /* It's easy when have nothing to concat. */
1817 if (from_end == NULL)
1818 return;
1820 /* It's also easy when destination is empty. */
1821 if (*to_endp == NULL)
1823 *to_endp = from_end;
1824 return;
1827 from_start = from_end;
1828 while (PREV_INSN (from_start) != NULL)
1829 from_start = PREV_INSN (from_start);
1831 PREV_INSN (from_start) = *to_endp;
1832 NEXT_INSN (*to_endp) = from_start;
1833 *to_endp = from_end;
1836 /* Delete notes between HEAD and TAIL and put them in the chain
1837 of notes ended by NOTE_LIST. */
1838 void
1839 remove_notes (rtx head, rtx tail)
1841 rtx next_tail, insn, next;
1843 note_list = 0;
1844 if (head == tail && !INSN_P (head))
1845 return;
1847 next_tail = NEXT_INSN (tail);
1848 for (insn = head; insn != next_tail; insn = next)
1850 next = NEXT_INSN (insn);
1851 if (!NOTE_P (insn))
1852 continue;
1854 switch (NOTE_KIND (insn))
1856 case NOTE_INSN_BASIC_BLOCK:
1857 continue;
1859 case NOTE_INSN_EPILOGUE_BEG:
1860 if (insn != tail)
1862 remove_insn (insn);
1863 add_reg_note (next, REG_SAVE_NOTE,
1864 GEN_INT (NOTE_INSN_EPILOGUE_BEG));
1865 break;
1867 /* FALLTHRU */
1869 default:
1870 remove_insn (insn);
1872 /* Add the note to list that ends at NOTE_LIST. */
1873 PREV_INSN (insn) = note_list;
1874 NEXT_INSN (insn) = NULL_RTX;
1875 if (note_list)
1876 NEXT_INSN (note_list) = insn;
1877 note_list = insn;
1878 break;
1881 gcc_assert ((sel_sched_p () || insn != tail) && insn != head);
1886 /* Return the head and tail pointers of ebb starting at BEG and ending
1887 at END. */
1888 void
1889 get_ebb_head_tail (basic_block beg, basic_block end, rtx *headp, rtx *tailp)
1891 rtx beg_head = BB_HEAD (beg);
1892 rtx beg_tail = BB_END (beg);
1893 rtx end_head = BB_HEAD (end);
1894 rtx end_tail = BB_END (end);
1896 /* Don't include any notes or labels at the beginning of the BEG
1897 basic block, or notes at the end of the END basic blocks. */
1899 if (LABEL_P (beg_head))
1900 beg_head = NEXT_INSN (beg_head);
1902 while (beg_head != beg_tail)
1903 if (NOTE_P (beg_head) || BOUNDARY_DEBUG_INSN_P (beg_head))
1904 beg_head = NEXT_INSN (beg_head);
1905 else
1906 break;
1908 *headp = beg_head;
1910 if (beg == end)
1911 end_head = beg_head;
1912 else if (LABEL_P (end_head))
1913 end_head = NEXT_INSN (end_head);
1915 while (end_head != end_tail)
1916 if (NOTE_P (end_tail) || BOUNDARY_DEBUG_INSN_P (end_tail))
1917 end_tail = PREV_INSN (end_tail);
1918 else
1919 break;
1921 *tailp = end_tail;
1924 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
1927 no_real_insns_p (const_rtx head, const_rtx tail)
1929 while (head != NEXT_INSN (tail))
1931 if (!NOTE_P (head) && !LABEL_P (head)
1932 && !BOUNDARY_DEBUG_INSN_P (head))
1933 return 0;
1934 head = NEXT_INSN (head);
1936 return 1;
1939 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
1940 previously found among the insns. Insert them just before HEAD. */
1942 restore_other_notes (rtx head, basic_block head_bb)
1944 if (note_list != 0)
1946 rtx note_head = note_list;
1948 if (head)
1949 head_bb = BLOCK_FOR_INSN (head);
1950 else
1951 head = NEXT_INSN (bb_note (head_bb));
1953 while (PREV_INSN (note_head))
1955 set_block_for_insn (note_head, head_bb);
1956 note_head = PREV_INSN (note_head);
1958 /* In the above cycle we've missed this note. */
1959 set_block_for_insn (note_head, head_bb);
1961 PREV_INSN (note_head) = PREV_INSN (head);
1962 NEXT_INSN (PREV_INSN (head)) = note_head;
1963 PREV_INSN (head) = note_list;
1964 NEXT_INSN (note_list) = head;
1966 if (BLOCK_FOR_INSN (head) != head_bb)
1967 BB_END (head_bb) = note_list;
1969 head = note_head;
1972 return head;
1975 /* Move insns that became ready to fire from queue to ready list. */
1977 static void
1978 queue_to_ready (struct ready_list *ready)
1980 rtx insn;
1981 rtx link;
1982 rtx skip_insn;
1984 q_ptr = NEXT_Q (q_ptr);
1986 if (dbg_cnt (sched_insn) == false)
1987 /* If debug counter is activated do not requeue insn next after
1988 last_scheduled_insn. */
1989 skip_insn = next_nonnote_nondebug_insn (last_scheduled_insn);
1990 else
1991 skip_insn = NULL_RTX;
1993 /* Add all pending insns that can be scheduled without stalls to the
1994 ready list. */
1995 for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1))
1997 insn = XEXP (link, 0);
1998 q_size -= 1;
2000 if (sched_verbose >= 2)
2001 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
2002 (*current_sched_info->print_insn) (insn, 0));
2004 /* If the ready list is full, delay the insn for 1 cycle.
2005 See the comment in schedule_block for the rationale. */
2006 if (!reload_completed
2007 && ready->n_ready - ready->n_debug > MAX_SCHED_READY_INSNS
2008 && !SCHED_GROUP_P (insn)
2009 && insn != skip_insn)
2011 if (sched_verbose >= 2)
2012 fprintf (sched_dump, "requeued because ready full\n");
2013 queue_insn (insn, 1);
2015 else
2017 ready_add (ready, insn, false);
2018 if (sched_verbose >= 2)
2019 fprintf (sched_dump, "moving to ready without stalls\n");
2022 free_INSN_LIST_list (&insn_queue[q_ptr]);
2024 /* If there are no ready insns, stall until one is ready and add all
2025 of the pending insns at that point to the ready list. */
2026 if (ready->n_ready == 0)
2028 int stalls;
2030 for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
2032 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
2034 for (; link; link = XEXP (link, 1))
2036 insn = XEXP (link, 0);
2037 q_size -= 1;
2039 if (sched_verbose >= 2)
2040 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
2041 (*current_sched_info->print_insn) (insn, 0));
2043 ready_add (ready, insn, false);
2044 if (sched_verbose >= 2)
2045 fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
2047 free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]);
2049 advance_one_cycle ();
2051 break;
2054 advance_one_cycle ();
2057 q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
2058 clock_var += stalls;
2062 /* Used by early_queue_to_ready. Determines whether it is "ok" to
2063 prematurely move INSN from the queue to the ready list. Currently,
2064 if a target defines the hook 'is_costly_dependence', this function
2065 uses the hook to check whether there exist any dependences which are
2066 considered costly by the target, between INSN and other insns that
2067 have already been scheduled. Dependences are checked up to Y cycles
2068 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
2069 controlling this value.
2070 (Other considerations could be taken into account instead (or in
2071 addition) depending on user flags and target hooks. */
2073 static bool
2074 ok_for_early_queue_removal (rtx insn)
2076 int n_cycles;
2077 rtx prev_insn = last_scheduled_insn;
2079 if (targetm.sched.is_costly_dependence)
2081 for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
2083 for ( ; prev_insn; prev_insn = PREV_INSN (prev_insn))
2085 int cost;
2087 if (prev_insn == current_sched_info->prev_head)
2089 prev_insn = NULL;
2090 break;
2093 if (!NOTE_P (prev_insn))
2095 dep_t dep;
2097 dep = sd_find_dep_between (prev_insn, insn, true);
2099 if (dep != NULL)
2101 cost = dep_cost (dep);
2103 if (targetm.sched.is_costly_dependence (dep, cost,
2104 flag_sched_stalled_insns_dep - n_cycles))
2105 return false;
2109 if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
2110 break;
2113 if (!prev_insn)
2114 break;
2115 prev_insn = PREV_INSN (prev_insn);
2119 return true;
2123 /* Remove insns from the queue, before they become "ready" with respect
2124 to FU latency considerations. */
2126 static int
2127 early_queue_to_ready (state_t state, struct ready_list *ready)
2129 rtx insn;
2130 rtx link;
2131 rtx next_link;
2132 rtx prev_link;
2133 bool move_to_ready;
2134 int cost;
2135 state_t temp_state = alloca (dfa_state_size);
2136 int stalls;
2137 int insns_removed = 0;
2140 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
2141 function:
2143 X == 0: There is no limit on how many queued insns can be removed
2144 prematurely. (flag_sched_stalled_insns = -1).
2146 X >= 1: Only X queued insns can be removed prematurely in each
2147 invocation. (flag_sched_stalled_insns = X).
2149 Otherwise: Early queue removal is disabled.
2150 (flag_sched_stalled_insns = 0)
2153 if (! flag_sched_stalled_insns)
2154 return 0;
2156 for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
2158 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
2160 if (sched_verbose > 6)
2161 fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
2163 prev_link = 0;
2164 while (link)
2166 next_link = XEXP (link, 1);
2167 insn = XEXP (link, 0);
2168 if (insn && sched_verbose > 6)
2169 print_rtl_single (sched_dump, insn);
2171 memcpy (temp_state, state, dfa_state_size);
2172 if (recog_memoized (insn) < 0)
2173 /* non-negative to indicate that it's not ready
2174 to avoid infinite Q->R->Q->R... */
2175 cost = 0;
2176 else
2177 cost = state_transition (temp_state, insn);
2179 if (sched_verbose >= 6)
2180 fprintf (sched_dump, "transition cost = %d\n", cost);
2182 move_to_ready = false;
2183 if (cost < 0)
2185 move_to_ready = ok_for_early_queue_removal (insn);
2186 if (move_to_ready == true)
2188 /* move from Q to R */
2189 q_size -= 1;
2190 ready_add (ready, insn, false);
2192 if (prev_link)
2193 XEXP (prev_link, 1) = next_link;
2194 else
2195 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
2197 free_INSN_LIST_node (link);
2199 if (sched_verbose >= 2)
2200 fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
2201 (*current_sched_info->print_insn) (insn, 0));
2203 insns_removed++;
2204 if (insns_removed == flag_sched_stalled_insns)
2205 /* Remove no more than flag_sched_stalled_insns insns
2206 from Q at a time. */
2207 return insns_removed;
2211 if (move_to_ready == false)
2212 prev_link = link;
2214 link = next_link;
2215 } /* while link */
2216 } /* if link */
2218 } /* for stalls.. */
2220 return insns_removed;
2224 /* Print the ready list for debugging purposes. Callable from debugger. */
2226 static void
2227 debug_ready_list (struct ready_list *ready)
2229 rtx *p;
2230 int i;
2232 if (ready->n_ready == 0)
2234 fprintf (sched_dump, "\n");
2235 return;
2238 p = ready_lastpos (ready);
2239 for (i = 0; i < ready->n_ready; i++)
2241 fprintf (sched_dump, " %s:%d",
2242 (*current_sched_info->print_insn) (p[i], 0),
2243 INSN_LUID (p[i]));
2244 if (sched_pressure_p)
2245 fprintf (sched_dump, "(cost=%d",
2246 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (p[i]));
2247 if (INSN_TICK (p[i]) > clock_var)
2248 fprintf (sched_dump, ":delay=%d", INSN_TICK (p[i]) - clock_var);
2249 if (sched_pressure_p)
2250 fprintf (sched_dump, ")");
2252 fprintf (sched_dump, "\n");
2255 /* Search INSN for REG_SAVE_NOTE notes and convert them back into insn
2256 NOTEs. This is used for NOTE_INSN_EPILOGUE_BEG, so that sched-ebb
2257 replaces the epilogue note in the correct basic block. */
2258 void
2259 reemit_notes (rtx insn)
2261 rtx note, last = insn;
2263 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2265 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
2267 enum insn_note note_type = (enum insn_note) INTVAL (XEXP (note, 0));
2269 last = emit_note_before (note_type, last);
2270 remove_note (insn, note);
2275 /* Move INSN. Reemit notes if needed. Update CFG, if needed. */
2276 static void
2277 move_insn (rtx insn, rtx last, rtx nt)
2279 if (PREV_INSN (insn) != last)
2281 basic_block bb;
2282 rtx note;
2283 int jump_p = 0;
2285 bb = BLOCK_FOR_INSN (insn);
2287 /* BB_HEAD is either LABEL or NOTE. */
2288 gcc_assert (BB_HEAD (bb) != insn);
2290 if (BB_END (bb) == insn)
2291 /* If this is last instruction in BB, move end marker one
2292 instruction up. */
2294 /* Jumps are always placed at the end of basic block. */
2295 jump_p = control_flow_insn_p (insn);
2297 gcc_assert (!jump_p
2298 || ((common_sched_info->sched_pass_id == SCHED_RGN_PASS)
2299 && IS_SPECULATION_BRANCHY_CHECK_P (insn))
2300 || (common_sched_info->sched_pass_id
2301 == SCHED_EBB_PASS));
2303 gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
2305 BB_END (bb) = PREV_INSN (insn);
2308 gcc_assert (BB_END (bb) != last);
2310 if (jump_p)
2311 /* We move the block note along with jump. */
2313 gcc_assert (nt);
2315 note = NEXT_INSN (insn);
2316 while (NOTE_NOT_BB_P (note) && note != nt)
2317 note = NEXT_INSN (note);
2319 if (note != nt
2320 && (LABEL_P (note)
2321 || BARRIER_P (note)))
2322 note = NEXT_INSN (note);
2324 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
2326 else
2327 note = insn;
2329 NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
2330 PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
2332 NEXT_INSN (note) = NEXT_INSN (last);
2333 PREV_INSN (NEXT_INSN (last)) = note;
2335 NEXT_INSN (last) = insn;
2336 PREV_INSN (insn) = last;
2338 bb = BLOCK_FOR_INSN (last);
2340 if (jump_p)
2342 fix_jump_move (insn);
2344 if (BLOCK_FOR_INSN (insn) != bb)
2345 move_block_after_check (insn);
2347 gcc_assert (BB_END (bb) == last);
2350 df_insn_change_bb (insn, bb);
2352 /* Update BB_END, if needed. */
2353 if (BB_END (bb) == last)
2354 BB_END (bb) = insn;
2357 SCHED_GROUP_P (insn) = 0;
2360 /* Return true if scheduling INSN will finish current clock cycle. */
2361 static bool
2362 insn_finishes_cycle_p (rtx insn)
2364 if (SCHED_GROUP_P (insn))
2365 /* After issuing INSN, rest of the sched_group will be forced to issue
2366 in order. Don't make any plans for the rest of cycle. */
2367 return true;
2369 /* Finishing the block will, apparently, finish the cycle. */
2370 if (current_sched_info->insn_finishes_block_p
2371 && current_sched_info->insn_finishes_block_p (insn))
2372 return true;
2374 return false;
2377 /* Define type for target data used in multipass scheduling. */
2378 #ifndef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T
2379 # define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T int
2380 #endif
2381 typedef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T first_cycle_multipass_data_t;
2383 /* The following structure describe an entry of the stack of choices. */
2384 struct choice_entry
2386 /* Ordinal number of the issued insn in the ready queue. */
2387 int index;
2388 /* The number of the rest insns whose issues we should try. */
2389 int rest;
2390 /* The number of issued essential insns. */
2391 int n;
2392 /* State after issuing the insn. */
2393 state_t state;
2394 /* Target-specific data. */
2395 first_cycle_multipass_data_t target_data;
2398 /* The following array is used to implement a stack of choices used in
2399 function max_issue. */
2400 static struct choice_entry *choice_stack;
2402 /* The following variable value is number of essential insns issued on
2403 the current cycle. An insn is essential one if it changes the
2404 processors state. */
2405 int cycle_issued_insns;
2407 /* This holds the value of the target dfa_lookahead hook. */
2408 int dfa_lookahead;
2410 /* The following variable value is maximal number of tries of issuing
2411 insns for the first cycle multipass insn scheduling. We define
2412 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
2413 need this constraint if all real insns (with non-negative codes)
2414 had reservations because in this case the algorithm complexity is
2415 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
2416 might be incomplete and such insn might occur. For such
2417 descriptions, the complexity of algorithm (without the constraint)
2418 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
2419 static int max_lookahead_tries;
2421 /* The following value is value of hook
2422 `first_cycle_multipass_dfa_lookahead' at the last call of
2423 `max_issue'. */
2424 static int cached_first_cycle_multipass_dfa_lookahead = 0;
2426 /* The following value is value of `issue_rate' at the last call of
2427 `sched_init'. */
2428 static int cached_issue_rate = 0;
2430 /* The following function returns maximal (or close to maximal) number
2431 of insns which can be issued on the same cycle and one of which
2432 insns is insns with the best rank (the first insn in READY). To
2433 make this function tries different samples of ready insns. READY
2434 is current queue `ready'. Global array READY_TRY reflects what
2435 insns are already issued in this try. The function stops immediately,
2436 if it reached the such a solution, that all instruction can be issued.
2437 INDEX will contain index of the best insn in READY. The following
2438 function is used only for first cycle multipass scheduling.
2440 PRIVILEGED_N >= 0
2442 This function expects recognized insns only. All USEs,
2443 CLOBBERs, etc must be filtered elsewhere. */
2445 max_issue (struct ready_list *ready, int privileged_n, state_t state,
2446 bool first_cycle_insn_p, int *index)
2448 int n, i, all, n_ready, best, delay, tries_num;
2449 int more_issue;
2450 struct choice_entry *top;
2451 rtx insn;
2453 n_ready = ready->n_ready;
2454 gcc_assert (dfa_lookahead >= 1 && privileged_n >= 0
2455 && privileged_n <= n_ready);
2457 /* Init MAX_LOOKAHEAD_TRIES. */
2458 if (cached_first_cycle_multipass_dfa_lookahead != dfa_lookahead)
2460 cached_first_cycle_multipass_dfa_lookahead = dfa_lookahead;
2461 max_lookahead_tries = 100;
2462 for (i = 0; i < issue_rate; i++)
2463 max_lookahead_tries *= dfa_lookahead;
2466 /* Init max_points. */
2467 more_issue = issue_rate - cycle_issued_insns;
2468 gcc_assert (more_issue >= 0);
2470 /* The number of the issued insns in the best solution. */
2471 best = 0;
2473 top = choice_stack;
2475 /* Set initial state of the search. */
2476 memcpy (top->state, state, dfa_state_size);
2477 top->rest = dfa_lookahead;
2478 top->n = 0;
2479 if (targetm.sched.first_cycle_multipass_begin)
2480 targetm.sched.first_cycle_multipass_begin (&top->target_data,
2481 ready_try, n_ready,
2482 first_cycle_insn_p);
2484 /* Count the number of the insns to search among. */
2485 for (all = i = 0; i < n_ready; i++)
2486 if (!ready_try [i])
2487 all++;
2489 /* I is the index of the insn to try next. */
2490 i = 0;
2491 tries_num = 0;
2492 for (;;)
2494 if (/* If we've reached a dead end or searched enough of what we have
2495 been asked... */
2496 top->rest == 0
2497 /* or have nothing else to try... */
2498 || i >= n_ready
2499 /* or should not issue more. */
2500 || top->n >= more_issue)
2502 /* ??? (... || i == n_ready). */
2503 gcc_assert (i <= n_ready);
2505 /* We should not issue more than issue_rate instructions. */
2506 gcc_assert (top->n <= more_issue);
2508 if (top == choice_stack)
2509 break;
2511 if (best < top - choice_stack)
2513 if (privileged_n)
2515 n = privileged_n;
2516 /* Try to find issued privileged insn. */
2517 while (n && !ready_try[--n]);
2520 if (/* If all insns are equally good... */
2521 privileged_n == 0
2522 /* Or a privileged insn will be issued. */
2523 || ready_try[n])
2524 /* Then we have a solution. */
2526 best = top - choice_stack;
2527 /* This is the index of the insn issued first in this
2528 solution. */
2529 *index = choice_stack [1].index;
2530 if (top->n == more_issue || best == all)
2531 break;
2535 /* Set ready-list index to point to the last insn
2536 ('i++' below will advance it to the next insn). */
2537 i = top->index;
2539 /* Backtrack. */
2540 ready_try [i] = 0;
2542 if (targetm.sched.first_cycle_multipass_backtrack)
2543 targetm.sched.first_cycle_multipass_backtrack (&top->target_data,
2544 ready_try, n_ready);
2546 top--;
2547 memcpy (state, top->state, dfa_state_size);
2549 else if (!ready_try [i])
2551 tries_num++;
2552 if (tries_num > max_lookahead_tries)
2553 break;
2554 insn = ready_element (ready, i);
2555 delay = state_transition (state, insn);
2556 if (delay < 0)
2558 if (state_dead_lock_p (state)
2559 || insn_finishes_cycle_p (insn))
2560 /* We won't issue any more instructions in the next
2561 choice_state. */
2562 top->rest = 0;
2563 else
2564 top->rest--;
2566 n = top->n;
2567 if (memcmp (top->state, state, dfa_state_size) != 0)
2568 n++;
2570 /* Advance to the next choice_entry. */
2571 top++;
2572 /* Initialize it. */
2573 top->rest = dfa_lookahead;
2574 top->index = i;
2575 top->n = n;
2576 memcpy (top->state, state, dfa_state_size);
2577 ready_try [i] = 1;
2579 if (targetm.sched.first_cycle_multipass_issue)
2580 targetm.sched.first_cycle_multipass_issue (&top->target_data,
2581 ready_try, n_ready,
2582 insn,
2583 &((top - 1)
2584 ->target_data));
2586 i = -1;
2590 /* Increase ready-list index. */
2591 i++;
2594 if (targetm.sched.first_cycle_multipass_end)
2595 targetm.sched.first_cycle_multipass_end (best != 0
2596 ? &choice_stack[1].target_data
2597 : NULL);
2599 /* Restore the original state of the DFA. */
2600 memcpy (state, choice_stack->state, dfa_state_size);
2602 return best;
2605 /* The following function chooses insn from READY and modifies
2606 READY. The following function is used only for first
2607 cycle multipass scheduling.
2608 Return:
2609 -1 if cycle should be advanced,
2610 0 if INSN_PTR is set to point to the desirable insn,
2611 1 if choose_ready () should be restarted without advancing the cycle. */
2612 static int
2613 choose_ready (struct ready_list *ready, bool first_cycle_insn_p,
2614 rtx *insn_ptr)
2616 int lookahead;
2618 if (dbg_cnt (sched_insn) == false)
2620 rtx insn;
2622 insn = next_nonnote_insn (last_scheduled_insn);
2624 if (QUEUE_INDEX (insn) == QUEUE_READY)
2625 /* INSN is in the ready_list. */
2627 ready_remove_insn (insn);
2628 *insn_ptr = insn;
2629 return 0;
2632 /* INSN is in the queue. Advance cycle to move it to the ready list. */
2633 return -1;
2636 lookahead = 0;
2638 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
2639 lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
2640 if (lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0))
2641 || DEBUG_INSN_P (ready_element (ready, 0)))
2643 if (targetm.sched.dispatch (NULL_RTX, IS_DISPATCH_ON))
2644 *insn_ptr = ready_remove_first_dispatch (ready);
2645 else
2646 *insn_ptr = ready_remove_first (ready);
2648 return 0;
2650 else
2652 /* Try to choose the better insn. */
2653 int index = 0, i, n;
2654 rtx insn;
2655 int try_data = 1, try_control = 1;
2656 ds_t ts;
2658 insn = ready_element (ready, 0);
2659 if (INSN_CODE (insn) < 0)
2661 *insn_ptr = ready_remove_first (ready);
2662 return 0;
2665 if (spec_info
2666 && spec_info->flags & (PREFER_NON_DATA_SPEC
2667 | PREFER_NON_CONTROL_SPEC))
2669 for (i = 0, n = ready->n_ready; i < n; i++)
2671 rtx x;
2672 ds_t s;
2674 x = ready_element (ready, i);
2675 s = TODO_SPEC (x);
2677 if (spec_info->flags & PREFER_NON_DATA_SPEC
2678 && !(s & DATA_SPEC))
2680 try_data = 0;
2681 if (!(spec_info->flags & PREFER_NON_CONTROL_SPEC)
2682 || !try_control)
2683 break;
2686 if (spec_info->flags & PREFER_NON_CONTROL_SPEC
2687 && !(s & CONTROL_SPEC))
2689 try_control = 0;
2690 if (!(spec_info->flags & PREFER_NON_DATA_SPEC) || !try_data)
2691 break;
2696 ts = TODO_SPEC (insn);
2697 if ((ts & SPECULATIVE)
2698 && (((!try_data && (ts & DATA_SPEC))
2699 || (!try_control && (ts & CONTROL_SPEC)))
2700 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
2701 && !targetm.sched
2702 .first_cycle_multipass_dfa_lookahead_guard_spec (insn))))
2703 /* Discard speculative instruction that stands first in the ready
2704 list. */
2706 change_queue_index (insn, 1);
2707 return 1;
2710 ready_try[0] = 0;
2712 for (i = 1; i < ready->n_ready; i++)
2714 insn = ready_element (ready, i);
2716 ready_try [i]
2717 = ((!try_data && (TODO_SPEC (insn) & DATA_SPEC))
2718 || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC)));
2721 /* Let the target filter the search space. */
2722 for (i = 1; i < ready->n_ready; i++)
2723 if (!ready_try[i])
2725 insn = ready_element (ready, i);
2727 /* If this insn is recognizable we should have already
2728 recognized it earlier.
2729 ??? Not very clear where this is supposed to be done.
2730 See dep_cost_1. */
2731 gcc_checking_assert (INSN_CODE (insn) >= 0
2732 || recog_memoized (insn) < 0);
2734 ready_try [i]
2735 = (/* INSN_CODE check can be omitted here as it is also done later
2736 in max_issue (). */
2737 INSN_CODE (insn) < 0
2738 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2739 && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2740 (insn)));
2743 if (max_issue (ready, 1, curr_state, first_cycle_insn_p, &index) == 0)
2745 *insn_ptr = ready_remove_first (ready);
2746 if (sched_verbose >= 4)
2747 fprintf (sched_dump, ";;\t\tChosen insn (but can't issue) : %s \n",
2748 (*current_sched_info->print_insn) (*insn_ptr, 0));
2749 return 0;
2751 else
2753 if (sched_verbose >= 4)
2754 fprintf (sched_dump, ";;\t\tChosen insn : %s\n",
2755 (*current_sched_info->print_insn)
2756 (ready_element (ready, index), 0));
2758 *insn_ptr = ready_remove (ready, index);
2759 return 0;
2764 /* Use forward list scheduling to rearrange insns of block pointed to by
2765 TARGET_BB, possibly bringing insns from subsequent blocks in the same
2766 region. */
2768 void
2769 schedule_block (basic_block *target_bb)
2771 int i;
2772 bool first_cycle_insn_p;
2773 int can_issue_more;
2774 state_t temp_state = NULL; /* It is used for multipass scheduling. */
2775 int sort_p, advance, start_clock_var;
2777 /* Head/tail info for this block. */
2778 rtx prev_head = current_sched_info->prev_head;
2779 rtx next_tail = current_sched_info->next_tail;
2780 rtx head = NEXT_INSN (prev_head);
2781 rtx tail = PREV_INSN (next_tail);
2783 /* We used to have code to avoid getting parameters moved from hard
2784 argument registers into pseudos.
2786 However, it was removed when it proved to be of marginal benefit
2787 and caused problems because schedule_block and compute_forward_dependences
2788 had different notions of what the "head" insn was. */
2790 gcc_assert (head != tail || INSN_P (head));
2792 haifa_recovery_bb_recently_added_p = false;
2794 /* Debug info. */
2795 if (sched_verbose)
2796 dump_new_block_header (0, *target_bb, head, tail);
2798 state_reset (curr_state);
2800 /* Clear the ready list. */
2801 ready.first = ready.veclen - 1;
2802 ready.n_ready = 0;
2803 ready.n_debug = 0;
2805 /* It is used for first cycle multipass scheduling. */
2806 temp_state = alloca (dfa_state_size);
2808 if (targetm.sched.init)
2809 targetm.sched.init (sched_dump, sched_verbose, ready.veclen);
2811 /* We start inserting insns after PREV_HEAD. */
2812 last_scheduled_insn = prev_head;
2814 gcc_assert ((NOTE_P (last_scheduled_insn)
2815 || BOUNDARY_DEBUG_INSN_P (last_scheduled_insn))
2816 && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
2818 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
2819 queue. */
2820 q_ptr = 0;
2821 q_size = 0;
2823 insn_queue = XALLOCAVEC (rtx, max_insn_queue_index + 1);
2824 memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
2826 /* Start just before the beginning of time. */
2827 clock_var = -1;
2829 /* We need queue and ready lists and clock_var be initialized
2830 in try_ready () (which is called through init_ready_list ()). */
2831 (*current_sched_info->init_ready_list) ();
2833 /* The algorithm is O(n^2) in the number of ready insns at any given
2834 time in the worst case. Before reload we are more likely to have
2835 big lists so truncate them to a reasonable size. */
2836 if (!reload_completed
2837 && ready.n_ready - ready.n_debug > MAX_SCHED_READY_INSNS)
2839 ready_sort (&ready);
2841 /* Find first free-standing insn past MAX_SCHED_READY_INSNS.
2842 If there are debug insns, we know they're first. */
2843 for (i = MAX_SCHED_READY_INSNS + ready.n_debug; i < ready.n_ready; i++)
2844 if (!SCHED_GROUP_P (ready_element (&ready, i)))
2845 break;
2847 if (sched_verbose >= 2)
2849 fprintf (sched_dump,
2850 ";;\t\tReady list on entry: %d insns\n", ready.n_ready);
2851 fprintf (sched_dump,
2852 ";;\t\t before reload => truncated to %d insns\n", i);
2855 /* Delay all insns past it for 1 cycle. If debug counter is
2856 activated make an exception for the insn right after
2857 last_scheduled_insn. */
2859 rtx skip_insn;
2861 if (dbg_cnt (sched_insn) == false)
2862 skip_insn = next_nonnote_insn (last_scheduled_insn);
2863 else
2864 skip_insn = NULL_RTX;
2866 while (i < ready.n_ready)
2868 rtx insn;
2870 insn = ready_remove (&ready, i);
2872 if (insn != skip_insn)
2873 queue_insn (insn, 1);
2878 /* Now we can restore basic block notes and maintain precise cfg. */
2879 restore_bb_notes (*target_bb);
2881 last_clock_var = -1;
2883 advance = 0;
2885 sort_p = TRUE;
2886 /* Loop until all the insns in BB are scheduled. */
2887 while ((*current_sched_info->schedule_more_p) ())
2891 start_clock_var = clock_var;
2893 clock_var++;
2895 advance_one_cycle ();
2897 /* Add to the ready list all pending insns that can be issued now.
2898 If there are no ready insns, increment clock until one
2899 is ready and add all pending insns at that point to the ready
2900 list. */
2901 queue_to_ready (&ready);
2903 gcc_assert (ready.n_ready);
2905 if (sched_verbose >= 2)
2907 fprintf (sched_dump, ";;\t\tReady list after queue_to_ready: ");
2908 debug_ready_list (&ready);
2910 advance -= clock_var - start_clock_var;
2912 while (advance > 0);
2914 if (sort_p)
2916 /* Sort the ready list based on priority. */
2917 ready_sort (&ready);
2919 if (sched_verbose >= 2)
2921 fprintf (sched_dump, ";;\t\tReady list after ready_sort: ");
2922 debug_ready_list (&ready);
2926 /* We don't want md sched reorder to even see debug isns, so put
2927 them out right away. */
2928 if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
2930 if (control_flow_insn_p (last_scheduled_insn))
2932 *target_bb = current_sched_info->advance_target_bb
2933 (*target_bb, 0);
2935 if (sched_verbose)
2937 rtx x;
2939 x = next_real_insn (last_scheduled_insn);
2940 gcc_assert (x);
2941 dump_new_block_header (1, *target_bb, x, tail);
2944 last_scheduled_insn = bb_note (*target_bb);
2947 while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
2949 rtx insn = ready_remove_first (&ready);
2950 gcc_assert (DEBUG_INSN_P (insn));
2951 (*current_sched_info->begin_schedule_ready) (insn,
2952 last_scheduled_insn);
2953 move_insn (insn, last_scheduled_insn,
2954 current_sched_info->next_tail);
2955 last_scheduled_insn = insn;
2956 advance = schedule_insn (insn);
2957 gcc_assert (advance == 0);
2958 if (ready.n_ready > 0)
2959 ready_sort (&ready);
2962 if (!ready.n_ready)
2963 continue;
2966 /* Allow the target to reorder the list, typically for
2967 better instruction bundling. */
2968 if (sort_p && targetm.sched.reorder
2969 && (ready.n_ready == 0
2970 || !SCHED_GROUP_P (ready_element (&ready, 0))))
2971 can_issue_more =
2972 targetm.sched.reorder (sched_dump, sched_verbose,
2973 ready_lastpos (&ready),
2974 &ready.n_ready, clock_var);
2975 else
2976 can_issue_more = issue_rate;
2978 first_cycle_insn_p = true;
2979 cycle_issued_insns = 0;
2980 for (;;)
2982 rtx insn;
2983 int cost;
2984 bool asm_p = false;
2986 if (sched_verbose >= 2)
2988 fprintf (sched_dump, ";;\tReady list (t = %3d): ",
2989 clock_var);
2990 debug_ready_list (&ready);
2991 if (sched_pressure_p)
2992 print_curr_reg_pressure ();
2995 if (ready.n_ready == 0
2996 && can_issue_more
2997 && reload_completed)
2999 /* Allow scheduling insns directly from the queue in case
3000 there's nothing better to do (ready list is empty) but
3001 there are still vacant dispatch slots in the current cycle. */
3002 if (sched_verbose >= 6)
3003 fprintf (sched_dump,";;\t\tSecond chance\n");
3004 memcpy (temp_state, curr_state, dfa_state_size);
3005 if (early_queue_to_ready (temp_state, &ready))
3006 ready_sort (&ready);
3009 if (ready.n_ready == 0
3010 || !can_issue_more
3011 || state_dead_lock_p (curr_state)
3012 || !(*current_sched_info->schedule_more_p) ())
3013 break;
3015 /* Select and remove the insn from the ready list. */
3016 if (sort_p)
3018 int res;
3020 insn = NULL_RTX;
3021 res = choose_ready (&ready, first_cycle_insn_p, &insn);
3023 if (res < 0)
3024 /* Finish cycle. */
3025 break;
3026 if (res > 0)
3027 /* Restart choose_ready (). */
3028 continue;
3030 gcc_assert (insn != NULL_RTX);
3032 else
3033 insn = ready_remove_first (&ready);
3035 if (sched_pressure_p && INSN_TICK (insn) > clock_var)
3037 ready_add (&ready, insn, true);
3038 advance = 1;
3039 break;
3042 if (targetm.sched.dfa_new_cycle
3043 && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
3044 insn, last_clock_var,
3045 clock_var, &sort_p))
3046 /* SORT_P is used by the target to override sorting
3047 of the ready list. This is needed when the target
3048 has modified its internal structures expecting that
3049 the insn will be issued next. As we need the insn
3050 to have the highest priority (so it will be returned by
3051 the ready_remove_first call above), we invoke
3052 ready_add (&ready, insn, true).
3053 But, still, there is one issue: INSN can be later
3054 discarded by scheduler's front end through
3055 current_sched_info->can_schedule_ready_p, hence, won't
3056 be issued next. */
3058 ready_add (&ready, insn, true);
3059 break;
3062 sort_p = TRUE;
3063 memcpy (temp_state, curr_state, dfa_state_size);
3064 if (recog_memoized (insn) < 0)
3066 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
3067 || asm_noperands (PATTERN (insn)) >= 0);
3068 if (!first_cycle_insn_p && asm_p)
3069 /* This is asm insn which is tried to be issued on the
3070 cycle not first. Issue it on the next cycle. */
3071 cost = 1;
3072 else
3073 /* A USE insn, or something else we don't need to
3074 understand. We can't pass these directly to
3075 state_transition because it will trigger a
3076 fatal error for unrecognizable insns. */
3077 cost = 0;
3079 else if (sched_pressure_p)
3080 cost = 0;
3081 else
3083 cost = state_transition (temp_state, insn);
3084 if (cost < 0)
3085 cost = 0;
3086 else if (cost == 0)
3087 cost = 1;
3090 if (cost >= 1)
3092 queue_insn (insn, cost);
3093 if (SCHED_GROUP_P (insn))
3095 advance = cost;
3096 break;
3099 continue;
3102 if (current_sched_info->can_schedule_ready_p
3103 && ! (*current_sched_info->can_schedule_ready_p) (insn))
3104 /* We normally get here only if we don't want to move
3105 insn from the split block. */
3107 TODO_SPEC (insn) = (TODO_SPEC (insn) & ~SPECULATIVE) | HARD_DEP;
3108 continue;
3111 /* DECISION is made. */
3113 if (TODO_SPEC (insn) & SPECULATIVE)
3114 generate_recovery_code (insn);
3116 if (control_flow_insn_p (last_scheduled_insn)
3117 /* This is used to switch basic blocks by request
3118 from scheduler front-end (actually, sched-ebb.c only).
3119 This is used to process blocks with single fallthru
3120 edge. If succeeding block has jump, it [jump] will try
3121 move at the end of current bb, thus corrupting CFG. */
3122 || current_sched_info->advance_target_bb (*target_bb, insn))
3124 *target_bb = current_sched_info->advance_target_bb
3125 (*target_bb, 0);
3127 if (sched_verbose)
3129 rtx x;
3131 x = next_real_insn (last_scheduled_insn);
3132 gcc_assert (x);
3133 dump_new_block_header (1, *target_bb, x, tail);
3136 last_scheduled_insn = bb_note (*target_bb);
3139 /* Update counters, etc in the scheduler's front end. */
3140 (*current_sched_info->begin_schedule_ready) (insn,
3141 last_scheduled_insn);
3143 move_insn (insn, last_scheduled_insn, current_sched_info->next_tail);
3145 if (targetm.sched.dispatch (NULL_RTX, IS_DISPATCH_ON))
3146 targetm.sched.dispatch_do (insn, ADD_TO_DISPATCH_WINDOW);
3148 reemit_notes (insn);
3149 last_scheduled_insn = insn;
3151 if (memcmp (curr_state, temp_state, dfa_state_size) != 0)
3153 cycle_issued_insns++;
3154 memcpy (curr_state, temp_state, dfa_state_size);
3157 if (targetm.sched.variable_issue)
3158 can_issue_more =
3159 targetm.sched.variable_issue (sched_dump, sched_verbose,
3160 insn, can_issue_more);
3161 /* A naked CLOBBER or USE generates no instruction, so do
3162 not count them against the issue rate. */
3163 else if (GET_CODE (PATTERN (insn)) != USE
3164 && GET_CODE (PATTERN (insn)) != CLOBBER)
3165 can_issue_more--;
3166 advance = schedule_insn (insn);
3168 /* After issuing an asm insn we should start a new cycle. */
3169 if (advance == 0 && asm_p)
3170 advance = 1;
3171 if (advance != 0)
3172 break;
3174 first_cycle_insn_p = false;
3176 /* Sort the ready list based on priority. This must be
3177 redone here, as schedule_insn may have readied additional
3178 insns that will not be sorted correctly. */
3179 if (ready.n_ready > 0)
3180 ready_sort (&ready);
3182 /* Quickly go through debug insns such that md sched
3183 reorder2 doesn't have to deal with debug insns. */
3184 if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0))
3185 && (*current_sched_info->schedule_more_p) ())
3187 if (control_flow_insn_p (last_scheduled_insn))
3189 *target_bb = current_sched_info->advance_target_bb
3190 (*target_bb, 0);
3192 if (sched_verbose)
3194 rtx x;
3196 x = next_real_insn (last_scheduled_insn);
3197 gcc_assert (x);
3198 dump_new_block_header (1, *target_bb, x, tail);
3201 last_scheduled_insn = bb_note (*target_bb);
3204 while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
3206 insn = ready_remove_first (&ready);
3207 gcc_assert (DEBUG_INSN_P (insn));
3208 (*current_sched_info->begin_schedule_ready)
3209 (insn, last_scheduled_insn);
3210 move_insn (insn, last_scheduled_insn,
3211 current_sched_info->next_tail);
3212 advance = schedule_insn (insn);
3213 last_scheduled_insn = insn;
3214 gcc_assert (advance == 0);
3215 if (ready.n_ready > 0)
3216 ready_sort (&ready);
3220 if (targetm.sched.reorder2
3221 && (ready.n_ready == 0
3222 || !SCHED_GROUP_P (ready_element (&ready, 0))))
3224 can_issue_more =
3225 targetm.sched.reorder2 (sched_dump, sched_verbose,
3226 ready.n_ready
3227 ? ready_lastpos (&ready) : NULL,
3228 &ready.n_ready, clock_var);
3233 /* Debug info. */
3234 if (sched_verbose)
3236 fprintf (sched_dump, ";;\tReady list (final): ");
3237 debug_ready_list (&ready);
3240 if (current_sched_info->queue_must_finish_empty)
3241 /* Sanity check -- queue must be empty now. Meaningless if region has
3242 multiple bbs. */
3243 gcc_assert (!q_size && !ready.n_ready && !ready.n_debug);
3244 else
3246 /* We must maintain QUEUE_INDEX between blocks in region. */
3247 for (i = ready.n_ready - 1; i >= 0; i--)
3249 rtx x;
3251 x = ready_element (&ready, i);
3252 QUEUE_INDEX (x) = QUEUE_NOWHERE;
3253 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
3256 if (q_size)
3257 for (i = 0; i <= max_insn_queue_index; i++)
3259 rtx link;
3260 for (link = insn_queue[i]; link; link = XEXP (link, 1))
3262 rtx x;
3264 x = XEXP (link, 0);
3265 QUEUE_INDEX (x) = QUEUE_NOWHERE;
3266 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
3268 free_INSN_LIST_list (&insn_queue[i]);
3272 if (sched_verbose)
3273 fprintf (sched_dump, ";; total time = %d\n", clock_var);
3275 if (!current_sched_info->queue_must_finish_empty
3276 || haifa_recovery_bb_recently_added_p)
3278 /* INSN_TICK (minimum clock tick at which the insn becomes
3279 ready) may be not correct for the insn in the subsequent
3280 blocks of the region. We should use a correct value of
3281 `clock_var' or modify INSN_TICK. It is better to keep
3282 clock_var value equal to 0 at the start of a basic block.
3283 Therefore we modify INSN_TICK here. */
3284 fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
3287 if (targetm.sched.finish)
3289 targetm.sched.finish (sched_dump, sched_verbose);
3290 /* Target might have added some instructions to the scheduled block
3291 in its md_finish () hook. These new insns don't have any data
3292 initialized and to identify them we extend h_i_d so that they'll
3293 get zero luids. */
3294 sched_init_luids (NULL, NULL, NULL, NULL);
3297 if (sched_verbose)
3298 fprintf (sched_dump, ";; new head = %d\n;; new tail = %d\n\n",
3299 INSN_UID (head), INSN_UID (tail));
3301 /* Update head/tail boundaries. */
3302 head = NEXT_INSN (prev_head);
3303 tail = last_scheduled_insn;
3305 head = restore_other_notes (head, NULL);
3307 current_sched_info->head = head;
3308 current_sched_info->tail = tail;
3311 /* Set_priorities: compute priority of each insn in the block. */
3314 set_priorities (rtx head, rtx tail)
3316 rtx insn;
3317 int n_insn;
3318 int sched_max_insns_priority =
3319 current_sched_info->sched_max_insns_priority;
3320 rtx prev_head;
3322 if (head == tail && (! INSN_P (head) || BOUNDARY_DEBUG_INSN_P (head)))
3323 gcc_unreachable ();
3325 n_insn = 0;
3327 prev_head = PREV_INSN (head);
3328 for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
3330 if (!INSN_P (insn))
3331 continue;
3333 n_insn++;
3334 (void) priority (insn);
3336 gcc_assert (INSN_PRIORITY_KNOWN (insn));
3338 sched_max_insns_priority = MAX (sched_max_insns_priority,
3339 INSN_PRIORITY (insn));
3342 current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
3344 return n_insn;
3347 /* Set dump and sched_verbose for the desired debugging output. If no
3348 dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
3349 For -fsched-verbose=N, N>=10, print everything to stderr. */
3350 void
3351 setup_sched_dump (void)
3353 sched_verbose = sched_verbose_param;
3354 if (sched_verbose_param == 0 && dump_file)
3355 sched_verbose = 1;
3356 sched_dump = ((sched_verbose_param >= 10 || !dump_file)
3357 ? stderr : dump_file);
3360 /* Initialize some global state for the scheduler. This function works
3361 with the common data shared between all the schedulers. It is called
3362 from the scheduler specific initialization routine. */
3364 void
3365 sched_init (void)
3367 /* Disable speculative loads in their presence if cc0 defined. */
3368 #ifdef HAVE_cc0
3369 flag_schedule_speculative_load = 0;
3370 #endif
3372 if (targetm.sched.dispatch (NULL_RTX, IS_DISPATCH_ON))
3373 targetm.sched.dispatch_do (NULL_RTX, DISPATCH_INIT);
3375 sched_pressure_p = (flag_sched_pressure && ! reload_completed
3376 && common_sched_info->sched_pass_id == SCHED_RGN_PASS);
3378 if (sched_pressure_p)
3379 ira_setup_eliminable_regset ();
3381 /* Initialize SPEC_INFO. */
3382 if (targetm.sched.set_sched_flags)
3384 spec_info = &spec_info_var;
3385 targetm.sched.set_sched_flags (spec_info);
3387 if (spec_info->mask != 0)
3389 spec_info->data_weakness_cutoff =
3390 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
3391 spec_info->control_weakness_cutoff =
3392 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF)
3393 * REG_BR_PROB_BASE) / 100;
3395 else
3396 /* So we won't read anything accidentally. */
3397 spec_info = NULL;
3400 else
3401 /* So we won't read anything accidentally. */
3402 spec_info = 0;
3404 /* Initialize issue_rate. */
3405 if (targetm.sched.issue_rate)
3406 issue_rate = targetm.sched.issue_rate ();
3407 else
3408 issue_rate = 1;
3410 if (cached_issue_rate != issue_rate)
3412 cached_issue_rate = issue_rate;
3413 /* To invalidate max_lookahead_tries: */
3414 cached_first_cycle_multipass_dfa_lookahead = 0;
3417 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
3418 dfa_lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
3419 else
3420 dfa_lookahead = 0;
3422 if (targetm.sched.init_dfa_pre_cycle_insn)
3423 targetm.sched.init_dfa_pre_cycle_insn ();
3425 if (targetm.sched.init_dfa_post_cycle_insn)
3426 targetm.sched.init_dfa_post_cycle_insn ();
3428 dfa_start ();
3429 dfa_state_size = state_size ();
3431 init_alias_analysis ();
3433 df_set_flags (DF_LR_RUN_DCE);
3434 df_note_add_problem ();
3436 /* More problems needed for interloop dep calculation in SMS. */
3437 if (common_sched_info->sched_pass_id == SCHED_SMS_PASS)
3439 df_rd_add_problem ();
3440 df_chain_add_problem (DF_DU_CHAIN + DF_UD_CHAIN);
3443 df_analyze ();
3445 /* Do not run DCE after reload, as this can kill nops inserted
3446 by bundling. */
3447 if (reload_completed)
3448 df_clear_flags (DF_LR_RUN_DCE);
3450 regstat_compute_calls_crossed ();
3452 if (targetm.sched.init_global)
3453 targetm.sched.init_global (sched_dump, sched_verbose, get_max_uid () + 1);
3455 if (sched_pressure_p)
3457 int i, max_regno = max_reg_num ();
3459 ira_set_pseudo_classes (sched_verbose ? sched_dump : NULL);
3460 sched_regno_cover_class
3461 = (enum reg_class *) xmalloc (max_regno * sizeof (enum reg_class));
3462 for (i = 0; i < max_regno; i++)
3463 sched_regno_cover_class[i]
3464 = (i < FIRST_PSEUDO_REGISTER
3465 ? ira_class_translate[REGNO_REG_CLASS (i)]
3466 : reg_cover_class (i));
3467 curr_reg_live = BITMAP_ALLOC (NULL);
3468 saved_reg_live = BITMAP_ALLOC (NULL);
3469 region_ref_regs = BITMAP_ALLOC (NULL);
3472 curr_state = xmalloc (dfa_state_size);
3475 static void haifa_init_only_bb (basic_block, basic_block);
3477 /* Initialize data structures specific to the Haifa scheduler. */
3478 void
3479 haifa_sched_init (void)
3481 setup_sched_dump ();
3482 sched_init ();
3484 if (spec_info != NULL)
3486 sched_deps_info->use_deps_list = 1;
3487 sched_deps_info->generate_spec_deps = 1;
3490 /* Initialize luids, dependency caches, target and h_i_d for the
3491 whole function. */
3493 bb_vec_t bbs = VEC_alloc (basic_block, heap, n_basic_blocks);
3494 basic_block bb;
3496 sched_init_bbs ();
3498 FOR_EACH_BB (bb)
3499 VEC_quick_push (basic_block, bbs, bb);
3500 sched_init_luids (bbs, NULL, NULL, NULL);
3501 sched_deps_init (true);
3502 sched_extend_target ();
3503 haifa_init_h_i_d (bbs, NULL, NULL, NULL);
3505 VEC_free (basic_block, heap, bbs);
3508 sched_init_only_bb = haifa_init_only_bb;
3509 sched_split_block = sched_split_block_1;
3510 sched_create_empty_bb = sched_create_empty_bb_1;
3511 haifa_recovery_bb_ever_added_p = false;
3513 #ifdef ENABLE_CHECKING
3514 /* This is used preferably for finding bugs in check_cfg () itself.
3515 We must call sched_bbs_init () before check_cfg () because check_cfg ()
3516 assumes that the last insn in the last bb has a non-null successor. */
3517 check_cfg (0, 0);
3518 #endif
3520 nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
3521 before_recovery = 0;
3522 after_recovery = 0;
3525 /* Finish work with the data specific to the Haifa scheduler. */
3526 void
3527 haifa_sched_finish (void)
3529 sched_create_empty_bb = NULL;
3530 sched_split_block = NULL;
3531 sched_init_only_bb = NULL;
3533 if (spec_info && spec_info->dump)
3535 char c = reload_completed ? 'a' : 'b';
3537 fprintf (spec_info->dump,
3538 ";; %s:\n", current_function_name ());
3540 fprintf (spec_info->dump,
3541 ";; Procedure %cr-begin-data-spec motions == %d\n",
3542 c, nr_begin_data);
3543 fprintf (spec_info->dump,
3544 ";; Procedure %cr-be-in-data-spec motions == %d\n",
3545 c, nr_be_in_data);
3546 fprintf (spec_info->dump,
3547 ";; Procedure %cr-begin-control-spec motions == %d\n",
3548 c, nr_begin_control);
3549 fprintf (spec_info->dump,
3550 ";; Procedure %cr-be-in-control-spec motions == %d\n",
3551 c, nr_be_in_control);
3554 /* Finalize h_i_d, dependency caches, and luids for the whole
3555 function. Target will be finalized in md_global_finish (). */
3556 sched_deps_finish ();
3557 sched_finish_luids ();
3558 current_sched_info = NULL;
3559 sched_finish ();
3562 /* Free global data used during insn scheduling. This function works with
3563 the common data shared between the schedulers. */
3565 void
3566 sched_finish (void)
3568 haifa_finish_h_i_d ();
3569 if (sched_pressure_p)
3571 free (sched_regno_cover_class);
3572 BITMAP_FREE (region_ref_regs);
3573 BITMAP_FREE (saved_reg_live);
3574 BITMAP_FREE (curr_reg_live);
3576 free (curr_state);
3578 if (targetm.sched.finish_global)
3579 targetm.sched.finish_global (sched_dump, sched_verbose);
3581 end_alias_analysis ();
3583 regstat_free_calls_crossed ();
3585 dfa_finish ();
3587 #ifdef ENABLE_CHECKING
3588 /* After reload ia64 backend clobbers CFG, so can't check anything. */
3589 if (!reload_completed)
3590 check_cfg (0, 0);
3591 #endif
3594 /* Fix INSN_TICKs of the instructions in the current block as well as
3595 INSN_TICKs of their dependents.
3596 HEAD and TAIL are the begin and the end of the current scheduled block. */
3597 static void
3598 fix_inter_tick (rtx head, rtx tail)
3600 /* Set of instructions with corrected INSN_TICK. */
3601 bitmap_head processed;
3602 /* ??? It is doubtful if we should assume that cycle advance happens on
3603 basic block boundaries. Basically insns that are unconditionally ready
3604 on the start of the block are more preferable then those which have
3605 a one cycle dependency over insn from the previous block. */
3606 int next_clock = clock_var + 1;
3608 bitmap_initialize (&processed, 0);
3610 /* Iterates over scheduled instructions and fix their INSN_TICKs and
3611 INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
3612 across different blocks. */
3613 for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
3615 if (INSN_P (head))
3617 int tick;
3618 sd_iterator_def sd_it;
3619 dep_t dep;
3621 tick = INSN_TICK (head);
3622 gcc_assert (tick >= MIN_TICK);
3624 /* Fix INSN_TICK of instruction from just scheduled block. */
3625 if (bitmap_set_bit (&processed, INSN_LUID (head)))
3627 tick -= next_clock;
3629 if (tick < MIN_TICK)
3630 tick = MIN_TICK;
3632 INSN_TICK (head) = tick;
3635 FOR_EACH_DEP (head, SD_LIST_RES_FORW, sd_it, dep)
3637 rtx next;
3639 next = DEP_CON (dep);
3640 tick = INSN_TICK (next);
3642 if (tick != INVALID_TICK
3643 /* If NEXT has its INSN_TICK calculated, fix it.
3644 If not - it will be properly calculated from
3645 scratch later in fix_tick_ready. */
3646 && bitmap_set_bit (&processed, INSN_LUID (next)))
3648 tick -= next_clock;
3650 if (tick < MIN_TICK)
3651 tick = MIN_TICK;
3653 if (tick > INTER_TICK (next))
3654 INTER_TICK (next) = tick;
3655 else
3656 tick = INTER_TICK (next);
3658 INSN_TICK (next) = tick;
3663 bitmap_clear (&processed);
3666 static int haifa_speculate_insn (rtx, ds_t, rtx *);
3668 /* Check if NEXT is ready to be added to the ready or queue list.
3669 If "yes", add it to the proper list.
3670 Returns:
3671 -1 - is not ready yet,
3672 0 - added to the ready list,
3673 0 < N - queued for N cycles. */
3675 try_ready (rtx next)
3677 ds_t old_ts, *ts;
3679 ts = &TODO_SPEC (next);
3680 old_ts = *ts;
3682 gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP))
3683 && ((old_ts & HARD_DEP)
3684 || (old_ts & SPECULATIVE)));
3686 if (sd_lists_empty_p (next, SD_LIST_BACK))
3687 /* NEXT has all its dependencies resolved. */
3689 /* Remove HARD_DEP bit from NEXT's status. */
3690 *ts &= ~HARD_DEP;
3692 if (current_sched_info->flags & DO_SPECULATION)
3693 /* Remove all speculative bits from NEXT's status. */
3694 *ts &= ~SPECULATIVE;
3696 else
3698 /* One of the NEXT's dependencies has been resolved.
3699 Recalculate NEXT's status. */
3701 *ts &= ~SPECULATIVE & ~HARD_DEP;
3703 if (sd_lists_empty_p (next, SD_LIST_HARD_BACK))
3704 /* Now we've got NEXT with speculative deps only.
3705 1. Look at the deps to see what we have to do.
3706 2. Check if we can do 'todo'. */
3708 sd_iterator_def sd_it;
3709 dep_t dep;
3710 bool first_p = true;
3712 FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
3714 ds_t ds = DEP_STATUS (dep) & SPECULATIVE;
3716 if (DEBUG_INSN_P (DEP_PRO (dep))
3717 && !DEBUG_INSN_P (next))
3718 continue;
3720 if (first_p)
3722 first_p = false;
3724 *ts = ds;
3726 else
3727 *ts = ds_merge (*ts, ds);
3730 if (ds_weak (*ts) < spec_info->data_weakness_cutoff)
3731 /* Too few points. */
3732 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3734 else
3735 *ts |= HARD_DEP;
3738 if (*ts & HARD_DEP)
3739 gcc_assert (*ts == old_ts
3740 && QUEUE_INDEX (next) == QUEUE_NOWHERE);
3741 else if (current_sched_info->new_ready)
3742 *ts = current_sched_info->new_ready (next, *ts);
3744 /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
3745 have its original pattern or changed (speculative) one. This is due
3746 to changing ebb in region scheduling.
3747 * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
3748 has speculative pattern.
3750 We can't assert (!(*ts & HARD_DEP) || *ts == old_ts) here because
3751 control-speculative NEXT could have been discarded by sched-rgn.c
3752 (the same case as when discarded by can_schedule_ready_p ()). */
3754 if ((*ts & SPECULATIVE)
3755 /* If (old_ts == *ts), then (old_ts & SPECULATIVE) and we don't
3756 need to change anything. */
3757 && *ts != old_ts)
3759 int res;
3760 rtx new_pat;
3762 gcc_assert ((*ts & SPECULATIVE) && !(*ts & ~SPECULATIVE));
3764 res = haifa_speculate_insn (next, *ts, &new_pat);
3766 switch (res)
3768 case -1:
3769 /* It would be nice to change DEP_STATUS of all dependences,
3770 which have ((DEP_STATUS & SPECULATIVE) == *ts) to HARD_DEP,
3771 so we won't reanalyze anything. */
3772 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3773 break;
3775 case 0:
3776 /* We follow the rule, that every speculative insn
3777 has non-null ORIG_PAT. */
3778 if (!ORIG_PAT (next))
3779 ORIG_PAT (next) = PATTERN (next);
3780 break;
3782 case 1:
3783 if (!ORIG_PAT (next))
3784 /* If we gonna to overwrite the original pattern of insn,
3785 save it. */
3786 ORIG_PAT (next) = PATTERN (next);
3788 haifa_change_pattern (next, new_pat);
3789 break;
3791 default:
3792 gcc_unreachable ();
3796 /* We need to restore pattern only if (*ts == 0), because otherwise it is
3797 either correct (*ts & SPECULATIVE),
3798 or we simply don't care (*ts & HARD_DEP). */
3800 gcc_assert (!ORIG_PAT (next)
3801 || !IS_SPECULATION_BRANCHY_CHECK_P (next));
3803 if (*ts & HARD_DEP)
3805 /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
3806 control-speculative NEXT could have been discarded by sched-rgn.c
3807 (the same case as when discarded by can_schedule_ready_p ()). */
3808 /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
3810 change_queue_index (next, QUEUE_NOWHERE);
3811 return -1;
3813 else if (!(*ts & BEGIN_SPEC) && ORIG_PAT (next) && !IS_SPECULATION_CHECK_P (next))
3814 /* We should change pattern of every previously speculative
3815 instruction - and we determine if NEXT was speculative by using
3816 ORIG_PAT field. Except one case - speculation checks have ORIG_PAT
3817 pat too, so skip them. */
3819 haifa_change_pattern (next, ORIG_PAT (next));
3820 ORIG_PAT (next) = 0;
3823 if (sched_verbose >= 2)
3825 int s = TODO_SPEC (next);
3827 fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
3828 (*current_sched_info->print_insn) (next, 0));
3830 if (spec_info && spec_info->dump)
3832 if (s & BEGIN_DATA)
3833 fprintf (spec_info->dump, "; data-spec;");
3834 if (s & BEGIN_CONTROL)
3835 fprintf (spec_info->dump, "; control-spec;");
3836 if (s & BE_IN_CONTROL)
3837 fprintf (spec_info->dump, "; in-control-spec;");
3840 fprintf (sched_dump, "\n");
3843 adjust_priority (next);
3845 return fix_tick_ready (next);
3848 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
3849 static int
3850 fix_tick_ready (rtx next)
3852 int tick, delay;
3854 if (!sd_lists_empty_p (next, SD_LIST_RES_BACK))
3856 int full_p;
3857 sd_iterator_def sd_it;
3858 dep_t dep;
3860 tick = INSN_TICK (next);
3861 /* if tick is not equal to INVALID_TICK, then update
3862 INSN_TICK of NEXT with the most recent resolved dependence
3863 cost. Otherwise, recalculate from scratch. */
3864 full_p = (tick == INVALID_TICK);
3866 FOR_EACH_DEP (next, SD_LIST_RES_BACK, sd_it, dep)
3868 rtx pro = DEP_PRO (dep);
3869 int tick1;
3871 gcc_assert (INSN_TICK (pro) >= MIN_TICK);
3873 tick1 = INSN_TICK (pro) + dep_cost (dep);
3874 if (tick1 > tick)
3875 tick = tick1;
3877 if (!full_p)
3878 break;
3881 else
3882 tick = -1;
3884 INSN_TICK (next) = tick;
3886 delay = tick - clock_var;
3887 if (delay <= 0 || sched_pressure_p)
3888 delay = QUEUE_READY;
3890 change_queue_index (next, delay);
3892 return delay;
3895 /* Move NEXT to the proper queue list with (DELAY >= 1),
3896 or add it to the ready list (DELAY == QUEUE_READY),
3897 or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
3898 static void
3899 change_queue_index (rtx next, int delay)
3901 int i = QUEUE_INDEX (next);
3903 gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
3904 && delay != 0);
3905 gcc_assert (i != QUEUE_SCHEDULED);
3907 if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
3908 || (delay < 0 && delay == i))
3909 /* We have nothing to do. */
3910 return;
3912 /* Remove NEXT from wherever it is now. */
3913 if (i == QUEUE_READY)
3914 ready_remove_insn (next);
3915 else if (i >= 0)
3916 queue_remove (next);
3918 /* Add it to the proper place. */
3919 if (delay == QUEUE_READY)
3920 ready_add (readyp, next, false);
3921 else if (delay >= 1)
3922 queue_insn (next, delay);
3924 if (sched_verbose >= 2)
3926 fprintf (sched_dump, ";;\t\ttick updated: insn %s",
3927 (*current_sched_info->print_insn) (next, 0));
3929 if (delay == QUEUE_READY)
3930 fprintf (sched_dump, " into ready\n");
3931 else if (delay >= 1)
3932 fprintf (sched_dump, " into queue with cost=%d\n", delay);
3933 else
3934 fprintf (sched_dump, " removed from ready or queue lists\n");
3938 static int sched_ready_n_insns = -1;
3940 /* Initialize per region data structures. */
3941 void
3942 sched_extend_ready_list (int new_sched_ready_n_insns)
3944 int i;
3946 if (sched_ready_n_insns == -1)
3947 /* At the first call we need to initialize one more choice_stack
3948 entry. */
3950 i = 0;
3951 sched_ready_n_insns = 0;
3953 else
3954 i = sched_ready_n_insns + 1;
3956 ready.veclen = new_sched_ready_n_insns + issue_rate;
3957 ready.vec = XRESIZEVEC (rtx, ready.vec, ready.veclen);
3959 gcc_assert (new_sched_ready_n_insns >= sched_ready_n_insns);
3961 ready_try = (char *) xrecalloc (ready_try, new_sched_ready_n_insns,
3962 sched_ready_n_insns, sizeof (*ready_try));
3964 /* We allocate +1 element to save initial state in the choice_stack[0]
3965 entry. */
3966 choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
3967 new_sched_ready_n_insns + 1);
3969 for (; i <= new_sched_ready_n_insns; i++)
3971 choice_stack[i].state = xmalloc (dfa_state_size);
3973 if (targetm.sched.first_cycle_multipass_init)
3974 targetm.sched.first_cycle_multipass_init (&(choice_stack[i]
3975 .target_data));
3978 sched_ready_n_insns = new_sched_ready_n_insns;
3981 /* Free per region data structures. */
3982 void
3983 sched_finish_ready_list (void)
3985 int i;
3987 free (ready.vec);
3988 ready.vec = NULL;
3989 ready.veclen = 0;
3991 free (ready_try);
3992 ready_try = NULL;
3994 for (i = 0; i <= sched_ready_n_insns; i++)
3996 if (targetm.sched.first_cycle_multipass_fini)
3997 targetm.sched.first_cycle_multipass_fini (&(choice_stack[i]
3998 .target_data));
4000 free (choice_stack [i].state);
4002 free (choice_stack);
4003 choice_stack = NULL;
4005 sched_ready_n_insns = -1;
4008 static int
4009 haifa_luid_for_non_insn (rtx x)
4011 gcc_assert (NOTE_P (x) || LABEL_P (x));
4013 return 0;
4016 /* Generates recovery code for INSN. */
4017 static void
4018 generate_recovery_code (rtx insn)
4020 if (TODO_SPEC (insn) & BEGIN_SPEC)
4021 begin_speculative_block (insn);
4023 /* Here we have insn with no dependencies to
4024 instructions other then CHECK_SPEC ones. */
4026 if (TODO_SPEC (insn) & BE_IN_SPEC)
4027 add_to_speculative_block (insn);
4030 /* Helper function.
4031 Tries to add speculative dependencies of type FS between instructions
4032 in deps_list L and TWIN. */
4033 static void
4034 process_insn_forw_deps_be_in_spec (rtx insn, rtx twin, ds_t fs)
4036 sd_iterator_def sd_it;
4037 dep_t dep;
4039 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
4041 ds_t ds;
4042 rtx consumer;
4044 consumer = DEP_CON (dep);
4046 ds = DEP_STATUS (dep);
4048 if (/* If we want to create speculative dep. */
4050 /* And we can do that because this is a true dep. */
4051 && (ds & DEP_TYPES) == DEP_TRUE)
4053 gcc_assert (!(ds & BE_IN_SPEC));
4055 if (/* If this dep can be overcome with 'begin speculation'. */
4056 ds & BEGIN_SPEC)
4057 /* Then we have a choice: keep the dep 'begin speculative'
4058 or transform it into 'be in speculative'. */
4060 if (/* In try_ready we assert that if insn once became ready
4061 it can be removed from the ready (or queue) list only
4062 due to backend decision. Hence we can't let the
4063 probability of the speculative dep to decrease. */
4064 ds_weak (ds) <= ds_weak (fs))
4066 ds_t new_ds;
4068 new_ds = (ds & ~BEGIN_SPEC) | fs;
4070 if (/* consumer can 'be in speculative'. */
4071 sched_insn_is_legitimate_for_speculation_p (consumer,
4072 new_ds))
4073 /* Transform it to be in speculative. */
4074 ds = new_ds;
4077 else
4078 /* Mark the dep as 'be in speculative'. */
4079 ds |= fs;
4083 dep_def _new_dep, *new_dep = &_new_dep;
4085 init_dep_1 (new_dep, twin, consumer, DEP_TYPE (dep), ds);
4086 sd_add_dep (new_dep, false);
4091 /* Generates recovery code for BEGIN speculative INSN. */
4092 static void
4093 begin_speculative_block (rtx insn)
4095 if (TODO_SPEC (insn) & BEGIN_DATA)
4096 nr_begin_data++;
4097 if (TODO_SPEC (insn) & BEGIN_CONTROL)
4098 nr_begin_control++;
4100 create_check_block_twin (insn, false);
4102 TODO_SPEC (insn) &= ~BEGIN_SPEC;
4105 static void haifa_init_insn (rtx);
4107 /* Generates recovery code for BE_IN speculative INSN. */
4108 static void
4109 add_to_speculative_block (rtx insn)
4111 ds_t ts;
4112 sd_iterator_def sd_it;
4113 dep_t dep;
4114 rtx twins = NULL;
4115 rtx_vec_t priorities_roots;
4117 ts = TODO_SPEC (insn);
4118 gcc_assert (!(ts & ~BE_IN_SPEC));
4120 if (ts & BE_IN_DATA)
4121 nr_be_in_data++;
4122 if (ts & BE_IN_CONTROL)
4123 nr_be_in_control++;
4125 TODO_SPEC (insn) &= ~BE_IN_SPEC;
4126 gcc_assert (!TODO_SPEC (insn));
4128 DONE_SPEC (insn) |= ts;
4130 /* First we convert all simple checks to branchy. */
4131 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4132 sd_iterator_cond (&sd_it, &dep);)
4134 rtx check = DEP_PRO (dep);
4136 if (IS_SPECULATION_SIMPLE_CHECK_P (check))
4138 create_check_block_twin (check, true);
4140 /* Restart search. */
4141 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4143 else
4144 /* Continue search. */
4145 sd_iterator_next (&sd_it);
4148 priorities_roots = NULL;
4149 clear_priorities (insn, &priorities_roots);
4151 while (1)
4153 rtx check, twin;
4154 basic_block rec;
4156 /* Get the first backward dependency of INSN. */
4157 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4158 if (!sd_iterator_cond (&sd_it, &dep))
4159 /* INSN has no backward dependencies left. */
4160 break;
4162 gcc_assert ((DEP_STATUS (dep) & BEGIN_SPEC) == 0
4163 && (DEP_STATUS (dep) & BE_IN_SPEC) != 0
4164 && (DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
4166 check = DEP_PRO (dep);
4168 gcc_assert (!IS_SPECULATION_CHECK_P (check) && !ORIG_PAT (check)
4169 && QUEUE_INDEX (check) == QUEUE_NOWHERE);
4171 rec = BLOCK_FOR_INSN (check);
4173 twin = emit_insn_before (copy_insn (PATTERN (insn)), BB_END (rec));
4174 haifa_init_insn (twin);
4176 sd_copy_back_deps (twin, insn, true);
4178 if (sched_verbose && spec_info->dump)
4179 /* INSN_BB (insn) isn't determined for twin insns yet.
4180 So we can't use current_sched_info->print_insn. */
4181 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
4182 INSN_UID (twin), rec->index);
4184 twins = alloc_INSN_LIST (twin, twins);
4186 /* Add dependences between TWIN and all appropriate
4187 instructions from REC. */
4188 FOR_EACH_DEP (insn, SD_LIST_SPEC_BACK, sd_it, dep)
4190 rtx pro = DEP_PRO (dep);
4192 gcc_assert (DEP_TYPE (dep) == REG_DEP_TRUE);
4194 /* INSN might have dependencies from the instructions from
4195 several recovery blocks. At this iteration we process those
4196 producers that reside in REC. */
4197 if (BLOCK_FOR_INSN (pro) == rec)
4199 dep_def _new_dep, *new_dep = &_new_dep;
4201 init_dep (new_dep, pro, twin, REG_DEP_TRUE);
4202 sd_add_dep (new_dep, false);
4206 process_insn_forw_deps_be_in_spec (insn, twin, ts);
4208 /* Remove all dependencies between INSN and insns in REC. */
4209 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4210 sd_iterator_cond (&sd_it, &dep);)
4212 rtx pro = DEP_PRO (dep);
4214 if (BLOCK_FOR_INSN (pro) == rec)
4215 sd_delete_dep (sd_it);
4216 else
4217 sd_iterator_next (&sd_it);
4221 /* We couldn't have added the dependencies between INSN and TWINS earlier
4222 because that would make TWINS appear in the INSN_BACK_DEPS (INSN). */
4223 while (twins)
4225 rtx twin;
4227 twin = XEXP (twins, 0);
4230 dep_def _new_dep, *new_dep = &_new_dep;
4232 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
4233 sd_add_dep (new_dep, false);
4236 twin = XEXP (twins, 1);
4237 free_INSN_LIST_node (twins);
4238 twins = twin;
4241 calc_priorities (priorities_roots);
4242 VEC_free (rtx, heap, priorities_roots);
4245 /* Extends and fills with zeros (only the new part) array pointed to by P. */
4246 void *
4247 xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
4249 gcc_assert (new_nmemb >= old_nmemb);
4250 p = XRESIZEVAR (void, p, new_nmemb * size);
4251 memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
4252 return p;
4255 /* Helper function.
4256 Find fallthru edge from PRED. */
4257 edge
4258 find_fallthru_edge_from (basic_block pred)
4260 edge e;
4261 basic_block succ;
4263 succ = pred->next_bb;
4264 gcc_assert (succ->prev_bb == pred);
4266 if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
4268 e = find_fallthru_edge (pred->succs);
4270 if (e)
4272 gcc_assert (e->dest == succ);
4273 return e;
4276 else
4278 e = find_fallthru_edge (succ->preds);
4280 if (e)
4282 gcc_assert (e->src == pred);
4283 return e;
4287 return NULL;
4290 /* Extend per basic block data structures. */
4291 static void
4292 sched_extend_bb (void)
4294 rtx insn;
4296 /* The following is done to keep current_sched_info->next_tail non null. */
4297 insn = BB_END (EXIT_BLOCK_PTR->prev_bb);
4298 if (NEXT_INSN (insn) == 0
4299 || (!NOTE_P (insn)
4300 && !LABEL_P (insn)
4301 /* Don't emit a NOTE if it would end up before a BARRIER. */
4302 && !BARRIER_P (NEXT_INSN (insn))))
4304 rtx note = emit_note_after (NOTE_INSN_DELETED, insn);
4305 /* Make insn appear outside BB. */
4306 set_block_for_insn (note, NULL);
4307 BB_END (EXIT_BLOCK_PTR->prev_bb) = insn;
4311 /* Init per basic block data structures. */
4312 void
4313 sched_init_bbs (void)
4315 sched_extend_bb ();
4318 /* Initialize BEFORE_RECOVERY variable. */
4319 static void
4320 init_before_recovery (basic_block *before_recovery_ptr)
4322 basic_block last;
4323 edge e;
4325 last = EXIT_BLOCK_PTR->prev_bb;
4326 e = find_fallthru_edge_from (last);
4328 if (e)
4330 /* We create two basic blocks:
4331 1. Single instruction block is inserted right after E->SRC
4332 and has jump to
4333 2. Empty block right before EXIT_BLOCK.
4334 Between these two blocks recovery blocks will be emitted. */
4336 basic_block single, empty;
4337 rtx x, label;
4339 /* If the fallthrough edge to exit we've found is from the block we've
4340 created before, don't do anything more. */
4341 if (last == after_recovery)
4342 return;
4344 adding_bb_to_current_region_p = false;
4346 single = sched_create_empty_bb (last);
4347 empty = sched_create_empty_bb (single);
4349 /* Add new blocks to the root loop. */
4350 if (current_loops != NULL)
4352 add_bb_to_loop (single, VEC_index (loop_p, current_loops->larray, 0));
4353 add_bb_to_loop (empty, VEC_index (loop_p, current_loops->larray, 0));
4356 single->count = last->count;
4357 empty->count = last->count;
4358 single->frequency = last->frequency;
4359 empty->frequency = last->frequency;
4360 BB_COPY_PARTITION (single, last);
4361 BB_COPY_PARTITION (empty, last);
4363 redirect_edge_succ (e, single);
4364 make_single_succ_edge (single, empty, 0);
4365 make_single_succ_edge (empty, EXIT_BLOCK_PTR,
4366 EDGE_FALLTHRU | EDGE_CAN_FALLTHRU);
4368 label = block_label (empty);
4369 x = emit_jump_insn_after (gen_jump (label), BB_END (single));
4370 JUMP_LABEL (x) = label;
4371 LABEL_NUSES (label)++;
4372 haifa_init_insn (x);
4374 emit_barrier_after (x);
4376 sched_init_only_bb (empty, NULL);
4377 sched_init_only_bb (single, NULL);
4378 sched_extend_bb ();
4380 adding_bb_to_current_region_p = true;
4381 before_recovery = single;
4382 after_recovery = empty;
4384 if (before_recovery_ptr)
4385 *before_recovery_ptr = before_recovery;
4387 if (sched_verbose >= 2 && spec_info->dump)
4388 fprintf (spec_info->dump,
4389 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
4390 last->index, single->index, empty->index);
4392 else
4393 before_recovery = last;
4396 /* Returns new recovery block. */
4397 basic_block
4398 sched_create_recovery_block (basic_block *before_recovery_ptr)
4400 rtx label;
4401 rtx barrier;
4402 basic_block rec;
4404 haifa_recovery_bb_recently_added_p = true;
4405 haifa_recovery_bb_ever_added_p = true;
4407 init_before_recovery (before_recovery_ptr);
4409 barrier = get_last_bb_insn (before_recovery);
4410 gcc_assert (BARRIER_P (barrier));
4412 label = emit_label_after (gen_label_rtx (), barrier);
4414 rec = create_basic_block (label, label, before_recovery);
4416 /* A recovery block always ends with an unconditional jump. */
4417 emit_barrier_after (BB_END (rec));
4419 if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
4420 BB_SET_PARTITION (rec, BB_COLD_PARTITION);
4422 if (sched_verbose && spec_info->dump)
4423 fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
4424 rec->index);
4426 return rec;
4429 /* Create edges: FIRST_BB -> REC; FIRST_BB -> SECOND_BB; REC -> SECOND_BB
4430 and emit necessary jumps. */
4431 void
4432 sched_create_recovery_edges (basic_block first_bb, basic_block rec,
4433 basic_block second_bb)
4435 rtx label;
4436 rtx jump;
4437 int edge_flags;
4439 /* This is fixing of incoming edge. */
4440 /* ??? Which other flags should be specified? */
4441 if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
4442 /* Partition type is the same, if it is "unpartitioned". */
4443 edge_flags = EDGE_CROSSING;
4444 else
4445 edge_flags = 0;
4447 make_edge (first_bb, rec, edge_flags);
4448 label = block_label (second_bb);
4449 jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
4450 JUMP_LABEL (jump) = label;
4451 LABEL_NUSES (label)++;
4453 if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
4454 /* Partition type is the same, if it is "unpartitioned". */
4456 /* Rewritten from cfgrtl.c. */
4457 if (flag_reorder_blocks_and_partition
4458 && targetm.have_named_sections)
4460 /* We don't need the same note for the check because
4461 any_condjump_p (check) == true. */
4462 add_reg_note (jump, REG_CROSSING_JUMP, NULL_RTX);
4464 edge_flags = EDGE_CROSSING;
4466 else
4467 edge_flags = 0;
4469 make_single_succ_edge (rec, second_bb, edge_flags);
4470 if (dom_info_available_p (CDI_DOMINATORS))
4471 set_immediate_dominator (CDI_DOMINATORS, rec, first_bb);
4474 /* This function creates recovery code for INSN. If MUTATE_P is nonzero,
4475 INSN is a simple check, that should be converted to branchy one. */
4476 static void
4477 create_check_block_twin (rtx insn, bool mutate_p)
4479 basic_block rec;
4480 rtx label, check, twin;
4481 ds_t fs;
4482 sd_iterator_def sd_it;
4483 dep_t dep;
4484 dep_def _new_dep, *new_dep = &_new_dep;
4485 ds_t todo_spec;
4487 gcc_assert (ORIG_PAT (insn) != NULL_RTX);
4489 if (!mutate_p)
4490 todo_spec = TODO_SPEC (insn);
4491 else
4493 gcc_assert (IS_SPECULATION_SIMPLE_CHECK_P (insn)
4494 && (TODO_SPEC (insn) & SPECULATIVE) == 0);
4496 todo_spec = CHECK_SPEC (insn);
4499 todo_spec &= SPECULATIVE;
4501 /* Create recovery block. */
4502 if (mutate_p || targetm.sched.needs_block_p (todo_spec))
4504 rec = sched_create_recovery_block (NULL);
4505 label = BB_HEAD (rec);
4507 else
4509 rec = EXIT_BLOCK_PTR;
4510 label = NULL_RTX;
4513 /* Emit CHECK. */
4514 check = targetm.sched.gen_spec_check (insn, label, todo_spec);
4516 if (rec != EXIT_BLOCK_PTR)
4518 /* To have mem_reg alive at the beginning of second_bb,
4519 we emit check BEFORE insn, so insn after splitting
4520 insn will be at the beginning of second_bb, which will
4521 provide us with the correct life information. */
4522 check = emit_jump_insn_before (check, insn);
4523 JUMP_LABEL (check) = label;
4524 LABEL_NUSES (label)++;
4526 else
4527 check = emit_insn_before (check, insn);
4529 /* Extend data structures. */
4530 haifa_init_insn (check);
4532 /* CHECK is being added to current region. Extend ready list. */
4533 gcc_assert (sched_ready_n_insns != -1);
4534 sched_extend_ready_list (sched_ready_n_insns + 1);
4536 if (current_sched_info->add_remove_insn)
4537 current_sched_info->add_remove_insn (insn, 0);
4539 RECOVERY_BLOCK (check) = rec;
4541 if (sched_verbose && spec_info->dump)
4542 fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
4543 (*current_sched_info->print_insn) (check, 0));
4545 gcc_assert (ORIG_PAT (insn));
4547 /* Initialize TWIN (twin is a duplicate of original instruction
4548 in the recovery block). */
4549 if (rec != EXIT_BLOCK_PTR)
4551 sd_iterator_def sd_it;
4552 dep_t dep;
4554 FOR_EACH_DEP (insn, SD_LIST_RES_BACK, sd_it, dep)
4555 if ((DEP_STATUS (dep) & DEP_OUTPUT) != 0)
4557 struct _dep _dep2, *dep2 = &_dep2;
4559 init_dep (dep2, DEP_PRO (dep), check, REG_DEP_TRUE);
4561 sd_add_dep (dep2, true);
4564 twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
4565 haifa_init_insn (twin);
4567 if (sched_verbose && spec_info->dump)
4568 /* INSN_BB (insn) isn't determined for twin insns yet.
4569 So we can't use current_sched_info->print_insn. */
4570 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
4571 INSN_UID (twin), rec->index);
4573 else
4575 ORIG_PAT (check) = ORIG_PAT (insn);
4576 HAS_INTERNAL_DEP (check) = 1;
4577 twin = check;
4578 /* ??? We probably should change all OUTPUT dependencies to
4579 (TRUE | OUTPUT). */
4582 /* Copy all resolved back dependencies of INSN to TWIN. This will
4583 provide correct value for INSN_TICK (TWIN). */
4584 sd_copy_back_deps (twin, insn, true);
4586 if (rec != EXIT_BLOCK_PTR)
4587 /* In case of branchy check, fix CFG. */
4589 basic_block first_bb, second_bb;
4590 rtx jump;
4592 first_bb = BLOCK_FOR_INSN (check);
4593 second_bb = sched_split_block (first_bb, check);
4595 sched_create_recovery_edges (first_bb, rec, second_bb);
4597 sched_init_only_bb (second_bb, first_bb);
4598 sched_init_only_bb (rec, EXIT_BLOCK_PTR);
4600 jump = BB_END (rec);
4601 haifa_init_insn (jump);
4604 /* Move backward dependences from INSN to CHECK and
4605 move forward dependences from INSN to TWIN. */
4607 /* First, create dependencies between INSN's producers and CHECK & TWIN. */
4608 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
4610 rtx pro = DEP_PRO (dep);
4611 ds_t ds;
4613 /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
4614 check --TRUE--> producer ??? or ANTI ???
4615 twin --TRUE--> producer
4616 twin --ANTI--> check
4618 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
4619 check --ANTI--> producer
4620 twin --ANTI--> producer
4621 twin --ANTI--> check
4623 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
4624 check ~~TRUE~~> producer
4625 twin ~~TRUE~~> producer
4626 twin --ANTI--> check */
4628 ds = DEP_STATUS (dep);
4630 if (ds & BEGIN_SPEC)
4632 gcc_assert (!mutate_p);
4633 ds &= ~BEGIN_SPEC;
4636 init_dep_1 (new_dep, pro, check, DEP_TYPE (dep), ds);
4637 sd_add_dep (new_dep, false);
4639 if (rec != EXIT_BLOCK_PTR)
4641 DEP_CON (new_dep) = twin;
4642 sd_add_dep (new_dep, false);
4646 /* Second, remove backward dependencies of INSN. */
4647 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4648 sd_iterator_cond (&sd_it, &dep);)
4650 if ((DEP_STATUS (dep) & BEGIN_SPEC)
4651 || mutate_p)
4652 /* We can delete this dep because we overcome it with
4653 BEGIN_SPECULATION. */
4654 sd_delete_dep (sd_it);
4655 else
4656 sd_iterator_next (&sd_it);
4659 /* Future Speculations. Determine what BE_IN speculations will be like. */
4660 fs = 0;
4662 /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
4663 here. */
4665 gcc_assert (!DONE_SPEC (insn));
4667 if (!mutate_p)
4669 ds_t ts = TODO_SPEC (insn);
4671 DONE_SPEC (insn) = ts & BEGIN_SPEC;
4672 CHECK_SPEC (check) = ts & BEGIN_SPEC;
4674 /* Luckiness of future speculations solely depends upon initial
4675 BEGIN speculation. */
4676 if (ts & BEGIN_DATA)
4677 fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
4678 if (ts & BEGIN_CONTROL)
4679 fs = set_dep_weak (fs, BE_IN_CONTROL,
4680 get_dep_weak (ts, BEGIN_CONTROL));
4682 else
4683 CHECK_SPEC (check) = CHECK_SPEC (insn);
4685 /* Future speculations: call the helper. */
4686 process_insn_forw_deps_be_in_spec (insn, twin, fs);
4688 if (rec != EXIT_BLOCK_PTR)
4690 /* Which types of dependencies should we use here is,
4691 generally, machine-dependent question... But, for now,
4692 it is not. */
4694 if (!mutate_p)
4696 init_dep (new_dep, insn, check, REG_DEP_TRUE);
4697 sd_add_dep (new_dep, false);
4699 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
4700 sd_add_dep (new_dep, false);
4702 else
4704 if (spec_info->dump)
4705 fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
4706 (*current_sched_info->print_insn) (insn, 0));
4708 /* Remove all dependencies of the INSN. */
4710 sd_it = sd_iterator_start (insn, (SD_LIST_FORW
4711 | SD_LIST_BACK
4712 | SD_LIST_RES_BACK));
4713 while (sd_iterator_cond (&sd_it, &dep))
4714 sd_delete_dep (sd_it);
4717 /* If former check (INSN) already was moved to the ready (or queue)
4718 list, add new check (CHECK) there too. */
4719 if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
4720 try_ready (check);
4722 /* Remove old check from instruction stream and free its
4723 data. */
4724 sched_remove_insn (insn);
4727 init_dep (new_dep, check, twin, REG_DEP_ANTI);
4728 sd_add_dep (new_dep, false);
4730 else
4732 init_dep_1 (new_dep, insn, check, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
4733 sd_add_dep (new_dep, false);
4736 if (!mutate_p)
4737 /* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
4738 because it'll be done later in add_to_speculative_block. */
4740 rtx_vec_t priorities_roots = NULL;
4742 clear_priorities (twin, &priorities_roots);
4743 calc_priorities (priorities_roots);
4744 VEC_free (rtx, heap, priorities_roots);
4748 /* Removes dependency between instructions in the recovery block REC
4749 and usual region instructions. It keeps inner dependences so it
4750 won't be necessary to recompute them. */
4751 static void
4752 fix_recovery_deps (basic_block rec)
4754 rtx note, insn, jump, ready_list = 0;
4755 bitmap_head in_ready;
4756 rtx link;
4758 bitmap_initialize (&in_ready, 0);
4760 /* NOTE - a basic block note. */
4761 note = NEXT_INSN (BB_HEAD (rec));
4762 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4763 insn = BB_END (rec);
4764 gcc_assert (JUMP_P (insn));
4765 insn = PREV_INSN (insn);
4769 sd_iterator_def sd_it;
4770 dep_t dep;
4772 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
4773 sd_iterator_cond (&sd_it, &dep);)
4775 rtx consumer = DEP_CON (dep);
4777 if (BLOCK_FOR_INSN (consumer) != rec)
4779 sd_delete_dep (sd_it);
4781 if (bitmap_set_bit (&in_ready, INSN_LUID (consumer)))
4782 ready_list = alloc_INSN_LIST (consumer, ready_list);
4784 else
4786 gcc_assert ((DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
4788 sd_iterator_next (&sd_it);
4792 insn = PREV_INSN (insn);
4794 while (insn != note);
4796 bitmap_clear (&in_ready);
4798 /* Try to add instructions to the ready or queue list. */
4799 for (link = ready_list; link; link = XEXP (link, 1))
4800 try_ready (XEXP (link, 0));
4801 free_INSN_LIST_list (&ready_list);
4803 /* Fixing jump's dependences. */
4804 insn = BB_HEAD (rec);
4805 jump = BB_END (rec);
4807 gcc_assert (LABEL_P (insn));
4808 insn = NEXT_INSN (insn);
4810 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
4811 add_jump_dependencies (insn, jump);
4814 /* Change pattern of INSN to NEW_PAT. */
4815 void
4816 sched_change_pattern (rtx insn, rtx new_pat)
4818 int t;
4820 t = validate_change (insn, &PATTERN (insn), new_pat, 0);
4821 gcc_assert (t);
4822 dfa_clear_single_insn_cache (insn);
4825 /* Change pattern of INSN to NEW_PAT. Invalidate cached haifa
4826 instruction data. */
4827 static void
4828 haifa_change_pattern (rtx insn, rtx new_pat)
4830 sched_change_pattern (insn, new_pat);
4832 /* Invalidate INSN_COST, so it'll be recalculated. */
4833 INSN_COST (insn) = -1;
4834 /* Invalidate INSN_TICK, so it'll be recalculated. */
4835 INSN_TICK (insn) = INVALID_TICK;
4838 /* -1 - can't speculate,
4839 0 - for speculation with REQUEST mode it is OK to use
4840 current instruction pattern,
4841 1 - need to change pattern for *NEW_PAT to be speculative. */
4843 sched_speculate_insn (rtx insn, ds_t request, rtx *new_pat)
4845 gcc_assert (current_sched_info->flags & DO_SPECULATION
4846 && (request & SPECULATIVE)
4847 && sched_insn_is_legitimate_for_speculation_p (insn, request));
4849 if ((request & spec_info->mask) != request)
4850 return -1;
4852 if (request & BE_IN_SPEC
4853 && !(request & BEGIN_SPEC))
4854 return 0;
4856 return targetm.sched.speculate_insn (insn, request, new_pat);
4859 static int
4860 haifa_speculate_insn (rtx insn, ds_t request, rtx *new_pat)
4862 gcc_assert (sched_deps_info->generate_spec_deps
4863 && !IS_SPECULATION_CHECK_P (insn));
4865 if (HAS_INTERNAL_DEP (insn)
4866 || SCHED_GROUP_P (insn))
4867 return -1;
4869 return sched_speculate_insn (insn, request, new_pat);
4872 /* Print some information about block BB, which starts with HEAD and
4873 ends with TAIL, before scheduling it.
4874 I is zero, if scheduler is about to start with the fresh ebb. */
4875 static void
4876 dump_new_block_header (int i, basic_block bb, rtx head, rtx tail)
4878 if (!i)
4879 fprintf (sched_dump,
4880 ";; ======================================================\n");
4881 else
4882 fprintf (sched_dump,
4883 ";; =====================ADVANCING TO=====================\n");
4884 fprintf (sched_dump,
4885 ";; -- basic block %d from %d to %d -- %s reload\n",
4886 bb->index, INSN_UID (head), INSN_UID (tail),
4887 (reload_completed ? "after" : "before"));
4888 fprintf (sched_dump,
4889 ";; ======================================================\n");
4890 fprintf (sched_dump, "\n");
4893 /* Unlink basic block notes and labels and saves them, so they
4894 can be easily restored. We unlink basic block notes in EBB to
4895 provide back-compatibility with the previous code, as target backends
4896 assume, that there'll be only instructions between
4897 current_sched_info->{head and tail}. We restore these notes as soon
4898 as we can.
4899 FIRST (LAST) is the first (last) basic block in the ebb.
4900 NB: In usual case (FIRST == LAST) nothing is really done. */
4901 void
4902 unlink_bb_notes (basic_block first, basic_block last)
4904 /* We DON'T unlink basic block notes of the first block in the ebb. */
4905 if (first == last)
4906 return;
4908 bb_header = XNEWVEC (rtx, last_basic_block);
4910 /* Make a sentinel. */
4911 if (last->next_bb != EXIT_BLOCK_PTR)
4912 bb_header[last->next_bb->index] = 0;
4914 first = first->next_bb;
4917 rtx prev, label, note, next;
4919 label = BB_HEAD (last);
4920 if (LABEL_P (label))
4921 note = NEXT_INSN (label);
4922 else
4923 note = label;
4924 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4926 prev = PREV_INSN (label);
4927 next = NEXT_INSN (note);
4928 gcc_assert (prev && next);
4930 NEXT_INSN (prev) = next;
4931 PREV_INSN (next) = prev;
4933 bb_header[last->index] = label;
4935 if (last == first)
4936 break;
4938 last = last->prev_bb;
4940 while (1);
4943 /* Restore basic block notes.
4944 FIRST is the first basic block in the ebb. */
4945 static void
4946 restore_bb_notes (basic_block first)
4948 if (!bb_header)
4949 return;
4951 /* We DON'T unlink basic block notes of the first block in the ebb. */
4952 first = first->next_bb;
4953 /* Remember: FIRST is actually a second basic block in the ebb. */
4955 while (first != EXIT_BLOCK_PTR
4956 && bb_header[first->index])
4958 rtx prev, label, note, next;
4960 label = bb_header[first->index];
4961 prev = PREV_INSN (label);
4962 next = NEXT_INSN (prev);
4964 if (LABEL_P (label))
4965 note = NEXT_INSN (label);
4966 else
4967 note = label;
4968 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4970 bb_header[first->index] = 0;
4972 NEXT_INSN (prev) = label;
4973 NEXT_INSN (note) = next;
4974 PREV_INSN (next) = note;
4976 first = first->next_bb;
4979 free (bb_header);
4980 bb_header = 0;
4983 /* Helper function.
4984 Fix CFG after both in- and inter-block movement of
4985 control_flow_insn_p JUMP. */
4986 static void
4987 fix_jump_move (rtx jump)
4989 basic_block bb, jump_bb, jump_bb_next;
4991 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
4992 jump_bb = BLOCK_FOR_INSN (jump);
4993 jump_bb_next = jump_bb->next_bb;
4995 gcc_assert (common_sched_info->sched_pass_id == SCHED_EBB_PASS
4996 || IS_SPECULATION_BRANCHY_CHECK_P (jump));
4998 if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
4999 /* if jump_bb_next is not empty. */
5000 BB_END (jump_bb) = BB_END (jump_bb_next);
5002 if (BB_END (bb) != PREV_INSN (jump))
5003 /* Then there are instruction after jump that should be placed
5004 to jump_bb_next. */
5005 BB_END (jump_bb_next) = BB_END (bb);
5006 else
5007 /* Otherwise jump_bb_next is empty. */
5008 BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
5010 /* To make assertion in move_insn happy. */
5011 BB_END (bb) = PREV_INSN (jump);
5013 update_bb_for_insn (jump_bb_next);
5016 /* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
5017 static void
5018 move_block_after_check (rtx jump)
5020 basic_block bb, jump_bb, jump_bb_next;
5021 VEC(edge,gc) *t;
5023 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
5024 jump_bb = BLOCK_FOR_INSN (jump);
5025 jump_bb_next = jump_bb->next_bb;
5027 update_bb_for_insn (jump_bb);
5029 gcc_assert (IS_SPECULATION_CHECK_P (jump)
5030 || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next)));
5032 unlink_block (jump_bb_next);
5033 link_block (jump_bb_next, bb);
5035 t = bb->succs;
5036 bb->succs = 0;
5037 move_succs (&(jump_bb->succs), bb);
5038 move_succs (&(jump_bb_next->succs), jump_bb);
5039 move_succs (&t, jump_bb_next);
5041 df_mark_solutions_dirty ();
5043 common_sched_info->fix_recovery_cfg
5044 (bb->index, jump_bb->index, jump_bb_next->index);
5047 /* Helper function for move_block_after_check.
5048 This functions attaches edge vector pointed to by SUCCSP to
5049 block TO. */
5050 static void
5051 move_succs (VEC(edge,gc) **succsp, basic_block to)
5053 edge e;
5054 edge_iterator ei;
5056 gcc_assert (to->succs == 0);
5058 to->succs = *succsp;
5060 FOR_EACH_EDGE (e, ei, to->succs)
5061 e->src = to;
5063 *succsp = 0;
5066 /* Remove INSN from the instruction stream.
5067 INSN should have any dependencies. */
5068 static void
5069 sched_remove_insn (rtx insn)
5071 sd_finish_insn (insn);
5073 change_queue_index (insn, QUEUE_NOWHERE);
5074 current_sched_info->add_remove_insn (insn, 1);
5075 remove_insn (insn);
5078 /* Clear priorities of all instructions, that are forward dependent on INSN.
5079 Store in vector pointed to by ROOTS_PTR insns on which priority () should
5080 be invoked to initialize all cleared priorities. */
5081 static void
5082 clear_priorities (rtx insn, rtx_vec_t *roots_ptr)
5084 sd_iterator_def sd_it;
5085 dep_t dep;
5086 bool insn_is_root_p = true;
5088 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
5090 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
5092 rtx pro = DEP_PRO (dep);
5094 if (INSN_PRIORITY_STATUS (pro) >= 0
5095 && QUEUE_INDEX (insn) != QUEUE_SCHEDULED)
5097 /* If DEP doesn't contribute to priority then INSN itself should
5098 be added to priority roots. */
5099 if (contributes_to_priority_p (dep))
5100 insn_is_root_p = false;
5102 INSN_PRIORITY_STATUS (pro) = -1;
5103 clear_priorities (pro, roots_ptr);
5107 if (insn_is_root_p)
5108 VEC_safe_push (rtx, heap, *roots_ptr, insn);
5111 /* Recompute priorities of instructions, whose priorities might have been
5112 changed. ROOTS is a vector of instructions whose priority computation will
5113 trigger initialization of all cleared priorities. */
5114 static void
5115 calc_priorities (rtx_vec_t roots)
5117 int i;
5118 rtx insn;
5120 FOR_EACH_VEC_ELT (rtx, roots, i, insn)
5121 priority (insn);
5125 /* Add dependences between JUMP and other instructions in the recovery
5126 block. INSN is the first insn the recovery block. */
5127 static void
5128 add_jump_dependencies (rtx insn, rtx jump)
5132 insn = NEXT_INSN (insn);
5133 if (insn == jump)
5134 break;
5136 if (dep_list_size (insn) == 0)
5138 dep_def _new_dep, *new_dep = &_new_dep;
5140 init_dep (new_dep, insn, jump, REG_DEP_ANTI);
5141 sd_add_dep (new_dep, false);
5144 while (1);
5146 gcc_assert (!sd_lists_empty_p (jump, SD_LIST_BACK));
5149 /* Return the NOTE_INSN_BASIC_BLOCK of BB. */
5151 bb_note (basic_block bb)
5153 rtx note;
5155 note = BB_HEAD (bb);
5156 if (LABEL_P (note))
5157 note = NEXT_INSN (note);
5159 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
5160 return note;
5163 #ifdef ENABLE_CHECKING
5164 /* Helper function for check_cfg.
5165 Return nonzero, if edge vector pointed to by EL has edge with TYPE in
5166 its flags. */
5167 static int
5168 has_edge_p (VEC(edge,gc) *el, int type)
5170 edge e;
5171 edge_iterator ei;
5173 FOR_EACH_EDGE (e, ei, el)
5174 if (e->flags & type)
5175 return 1;
5176 return 0;
5179 /* Search back, starting at INSN, for an insn that is not a
5180 NOTE_INSN_VAR_LOCATION. Don't search beyond HEAD, and return it if
5181 no such insn can be found. */
5182 static inline rtx
5183 prev_non_location_insn (rtx insn, rtx head)
5185 while (insn != head && NOTE_P (insn)
5186 && NOTE_KIND (insn) == NOTE_INSN_VAR_LOCATION)
5187 insn = PREV_INSN (insn);
5189 return insn;
5192 /* Check few properties of CFG between HEAD and TAIL.
5193 If HEAD (TAIL) is NULL check from the beginning (till the end) of the
5194 instruction stream. */
5195 static void
5196 check_cfg (rtx head, rtx tail)
5198 rtx next_tail;
5199 basic_block bb = 0;
5200 int not_first = 0, not_last;
5202 if (head == NULL)
5203 head = get_insns ();
5204 if (tail == NULL)
5205 tail = get_last_insn ();
5206 next_tail = NEXT_INSN (tail);
5210 not_last = head != tail;
5212 if (not_first)
5213 gcc_assert (NEXT_INSN (PREV_INSN (head)) == head);
5214 if (not_last)
5215 gcc_assert (PREV_INSN (NEXT_INSN (head)) == head);
5217 if (LABEL_P (head)
5218 || (NOTE_INSN_BASIC_BLOCK_P (head)
5219 && (!not_first
5220 || (not_first && !LABEL_P (PREV_INSN (head))))))
5222 gcc_assert (bb == 0);
5223 bb = BLOCK_FOR_INSN (head);
5224 if (bb != 0)
5225 gcc_assert (BB_HEAD (bb) == head);
5226 else
5227 /* This is the case of jump table. See inside_basic_block_p (). */
5228 gcc_assert (LABEL_P (head) && !inside_basic_block_p (head));
5231 if (bb == 0)
5233 gcc_assert (!inside_basic_block_p (head));
5234 head = NEXT_INSN (head);
5236 else
5238 gcc_assert (inside_basic_block_p (head)
5239 || NOTE_P (head));
5240 gcc_assert (BLOCK_FOR_INSN (head) == bb);
5242 if (LABEL_P (head))
5244 head = NEXT_INSN (head);
5245 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (head));
5247 else
5249 if (control_flow_insn_p (head))
5251 gcc_assert (prev_non_location_insn (BB_END (bb), head)
5252 == head);
5254 if (any_uncondjump_p (head))
5255 gcc_assert (EDGE_COUNT (bb->succs) == 1
5256 && BARRIER_P (NEXT_INSN (head)));
5257 else if (any_condjump_p (head))
5258 gcc_assert (/* Usual case. */
5259 (EDGE_COUNT (bb->succs) > 1
5260 && !BARRIER_P (NEXT_INSN (head)))
5261 /* Or jump to the next instruction. */
5262 || (EDGE_COUNT (bb->succs) == 1
5263 && (BB_HEAD (EDGE_I (bb->succs, 0)->dest)
5264 == JUMP_LABEL (head))));
5266 if (BB_END (bb) == head)
5268 if (EDGE_COUNT (bb->succs) > 1)
5269 gcc_assert (control_flow_insn_p (prev_non_location_insn
5270 (head, BB_HEAD (bb)))
5271 || has_edge_p (bb->succs, EDGE_COMPLEX));
5272 bb = 0;
5275 head = NEXT_INSN (head);
5279 not_first = 1;
5281 while (head != next_tail);
5283 gcc_assert (bb == 0);
5286 #endif /* ENABLE_CHECKING */
5288 /* Extend per basic block data structures. */
5289 static void
5290 extend_bb (void)
5292 if (sched_scan_info->extend_bb)
5293 sched_scan_info->extend_bb ();
5296 /* Init data for BB. */
5297 static void
5298 init_bb (basic_block bb)
5300 if (sched_scan_info->init_bb)
5301 sched_scan_info->init_bb (bb);
5304 /* Extend per insn data structures. */
5305 static void
5306 extend_insn (void)
5308 if (sched_scan_info->extend_insn)
5309 sched_scan_info->extend_insn ();
5312 /* Init data structures for INSN. */
5313 static void
5314 init_insn (rtx insn)
5316 if (sched_scan_info->init_insn)
5317 sched_scan_info->init_insn (insn);
5320 /* Init all insns in BB. */
5321 static void
5322 init_insns_in_bb (basic_block bb)
5324 rtx insn;
5326 FOR_BB_INSNS (bb, insn)
5327 init_insn (insn);
5330 /* A driver function to add a set of basic blocks (BBS),
5331 a single basic block (BB), a set of insns (INSNS) or a single insn (INSN)
5332 to the scheduling region. */
5333 void
5334 sched_scan (const struct sched_scan_info_def *ssi,
5335 bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
5337 sched_scan_info = ssi;
5339 if (bbs != NULL || bb != NULL)
5341 extend_bb ();
5343 if (bbs != NULL)
5345 unsigned i;
5346 basic_block x;
5348 FOR_EACH_VEC_ELT (basic_block, bbs, i, x)
5349 init_bb (x);
5352 if (bb != NULL)
5353 init_bb (bb);
5356 extend_insn ();
5358 if (bbs != NULL)
5360 unsigned i;
5361 basic_block x;
5363 FOR_EACH_VEC_ELT (basic_block, bbs, i, x)
5364 init_insns_in_bb (x);
5367 if (bb != NULL)
5368 init_insns_in_bb (bb);
5370 if (insns != NULL)
5372 unsigned i;
5373 rtx x;
5375 FOR_EACH_VEC_ELT (rtx, insns, i, x)
5376 init_insn (x);
5379 if (insn != NULL)
5380 init_insn (insn);
5384 /* Extend data structures for logical insn UID. */
5385 static void
5386 luids_extend_insn (void)
5388 int new_luids_max_uid = get_max_uid () + 1;
5390 VEC_safe_grow_cleared (int, heap, sched_luids, new_luids_max_uid);
5393 /* Initialize LUID for INSN. */
5394 static void
5395 luids_init_insn (rtx insn)
5397 int i = INSN_P (insn) ? 1 : common_sched_info->luid_for_non_insn (insn);
5398 int luid;
5400 if (i >= 0)
5402 luid = sched_max_luid;
5403 sched_max_luid += i;
5405 else
5406 luid = -1;
5408 SET_INSN_LUID (insn, luid);
5411 /* Initialize luids for BBS, BB, INSNS and INSN.
5412 The hook common_sched_info->luid_for_non_insn () is used to determine
5413 if notes, labels, etc. need luids. */
5414 void
5415 sched_init_luids (bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
5417 const struct sched_scan_info_def ssi =
5419 NULL, /* extend_bb */
5420 NULL, /* init_bb */
5421 luids_extend_insn, /* extend_insn */
5422 luids_init_insn /* init_insn */
5425 sched_scan (&ssi, bbs, bb, insns, insn);
5428 /* Free LUIDs. */
5429 void
5430 sched_finish_luids (void)
5432 VEC_free (int, heap, sched_luids);
5433 sched_max_luid = 1;
5436 /* Return logical uid of INSN. Helpful while debugging. */
5438 insn_luid (rtx insn)
5440 return INSN_LUID (insn);
5443 /* Extend per insn data in the target. */
5444 void
5445 sched_extend_target (void)
5447 if (targetm.sched.h_i_d_extended)
5448 targetm.sched.h_i_d_extended ();
5451 /* Extend global scheduler structures (those, that live across calls to
5452 schedule_block) to include information about just emitted INSN. */
5453 static void
5454 extend_h_i_d (void)
5456 int reserve = (get_max_uid () + 1
5457 - VEC_length (haifa_insn_data_def, h_i_d));
5458 if (reserve > 0
5459 && ! VEC_space (haifa_insn_data_def, h_i_d, reserve))
5461 VEC_safe_grow_cleared (haifa_insn_data_def, heap, h_i_d,
5462 3 * get_max_uid () / 2);
5463 sched_extend_target ();
5467 /* Initialize h_i_d entry of the INSN with default values.
5468 Values, that are not explicitly initialized here, hold zero. */
5469 static void
5470 init_h_i_d (rtx insn)
5472 if (INSN_LUID (insn) > 0)
5474 INSN_COST (insn) = -1;
5475 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
5476 INSN_TICK (insn) = INVALID_TICK;
5477 INTER_TICK (insn) = INVALID_TICK;
5478 TODO_SPEC (insn) = HARD_DEP;
5482 /* Initialize haifa_insn_data for BBS, BB, INSNS and INSN. */
5483 void
5484 haifa_init_h_i_d (bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
5486 const struct sched_scan_info_def ssi =
5488 NULL, /* extend_bb */
5489 NULL, /* init_bb */
5490 extend_h_i_d, /* extend_insn */
5491 init_h_i_d /* init_insn */
5494 sched_scan (&ssi, bbs, bb, insns, insn);
5497 /* Finalize haifa_insn_data. */
5498 void
5499 haifa_finish_h_i_d (void)
5501 int i;
5502 haifa_insn_data_t data;
5503 struct reg_use_data *use, *next;
5505 FOR_EACH_VEC_ELT (haifa_insn_data_def, h_i_d, i, data)
5507 if (data->reg_pressure != NULL)
5508 free (data->reg_pressure);
5509 for (use = data->reg_use_list; use != NULL; use = next)
5511 next = use->next_insn_use;
5512 free (use);
5515 VEC_free (haifa_insn_data_def, heap, h_i_d);
5518 /* Init data for the new insn INSN. */
5519 static void
5520 haifa_init_insn (rtx insn)
5522 gcc_assert (insn != NULL);
5524 sched_init_luids (NULL, NULL, NULL, insn);
5525 sched_extend_target ();
5526 sched_deps_init (false);
5527 haifa_init_h_i_d (NULL, NULL, NULL, insn);
5529 if (adding_bb_to_current_region_p)
5531 sd_init_insn (insn);
5533 /* Extend dependency caches by one element. */
5534 extend_dependency_caches (1, false);
5538 /* Init data for the new basic block BB which comes after AFTER. */
5539 static void
5540 haifa_init_only_bb (basic_block bb, basic_block after)
5542 gcc_assert (bb != NULL);
5544 sched_init_bbs ();
5546 if (common_sched_info->add_block)
5547 /* This changes only data structures of the front-end. */
5548 common_sched_info->add_block (bb, after);
5551 /* A generic version of sched_split_block (). */
5552 basic_block
5553 sched_split_block_1 (basic_block first_bb, rtx after)
5555 edge e;
5557 e = split_block (first_bb, after);
5558 gcc_assert (e->src == first_bb);
5560 /* sched_split_block emits note if *check == BB_END. Probably it
5561 is better to rip that note off. */
5563 return e->dest;
5566 /* A generic version of sched_create_empty_bb (). */
5567 basic_block
5568 sched_create_empty_bb_1 (basic_block after)
5570 return create_empty_bb (after);
5573 /* Insert PAT as an INSN into the schedule and update the necessary data
5574 structures to account for it. */
5576 sched_emit_insn (rtx pat)
5578 rtx insn = emit_insn_after (pat, last_scheduled_insn);
5579 last_scheduled_insn = insn;
5580 haifa_init_insn (insn);
5581 return insn;
5584 /* This function returns a candidate satisfying dispatch constraints from
5585 the ready list. */
5587 static rtx
5588 ready_remove_first_dispatch (struct ready_list *ready)
5590 int i;
5591 rtx insn = ready_element (ready, 0);
5593 if (ready->n_ready == 1
5594 || INSN_CODE (insn) < 0
5595 || !INSN_P (insn)
5596 || !active_insn_p (insn)
5597 || targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
5598 return ready_remove_first (ready);
5600 for (i = 1; i < ready->n_ready; i++)
5602 insn = ready_element (ready, i);
5604 if (INSN_CODE (insn) < 0
5605 || !INSN_P (insn)
5606 || !active_insn_p (insn))
5607 continue;
5609 if (targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
5611 /* Return ith element of ready. */
5612 insn = ready_remove (ready, i);
5613 return insn;
5617 if (targetm.sched.dispatch (NULL_RTX, DISPATCH_VIOLATION))
5618 return ready_remove_first (ready);
5620 for (i = 1; i < ready->n_ready; i++)
5622 insn = ready_element (ready, i);
5624 if (INSN_CODE (insn) < 0
5625 || !INSN_P (insn)
5626 || !active_insn_p (insn))
5627 continue;
5629 /* Return i-th element of ready. */
5630 if (targetm.sched.dispatch (insn, IS_CMP))
5631 return ready_remove (ready, i);
5634 return ready_remove_first (ready);
5637 /* Get number of ready insn in the ready list. */
5640 number_in_ready (void)
5642 return ready.n_ready;
5645 /* Get number of ready's in the ready list. */
5648 get_ready_element (int i)
5650 return ready_element (&ready, i);
5653 #endif /* INSN_SCHEDULING */