init.c (sort_mem_initializers): Rename "field_type" to "ctx".
[official-gcc.git] / gcc / haifa-sched.c
blobf136e40cba3c60d8cebab3dd37089c508534d8df
1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 /* Instruction scheduling pass. This file, along with sched-deps.c,
25 contains the generic parts. The actual entry point is found for
26 the normal instruction scheduling pass is found in sched-rgn.c.
28 We compute insn priorities based on data dependencies. Flow
29 analysis only creates a fraction of the data-dependencies we must
30 observe: namely, only those dependencies which the combiner can be
31 expected to use. For this pass, we must therefore create the
32 remaining dependencies we need to observe: register dependencies,
33 memory dependencies, dependencies to keep function calls in order,
34 and the dependence between a conditional branch and the setting of
35 condition codes are all dealt with here.
37 The scheduler first traverses the data flow graph, starting with
38 the last instruction, and proceeding to the first, assigning values
39 to insn_priority as it goes. This sorts the instructions
40 topologically by data dependence.
42 Once priorities have been established, we order the insns using
43 list scheduling. This works as follows: starting with a list of
44 all the ready insns, and sorted according to priority number, we
45 schedule the insn from the end of the list by placing its
46 predecessors in the list according to their priority order. We
47 consider this insn scheduled by setting the pointer to the "end" of
48 the list to point to the previous insn. When an insn has no
49 predecessors, we either queue it until sufficient time has elapsed
50 or add it to the ready list. As the instructions are scheduled or
51 when stalls are introduced, the queue advances and dumps insns into
52 the ready list. When all insns down to the lowest priority have
53 been scheduled, the critical path of the basic block has been made
54 as short as possible. The remaining insns are then scheduled in
55 remaining slots.
57 The following list shows the order in which we want to break ties
58 among insns in the ready list:
60 1. choose insn with the longest path to end of bb, ties
61 broken by
62 2. choose insn with least contribution to register pressure,
63 ties broken by
64 3. prefer in-block upon interblock motion, ties broken by
65 4. prefer useful upon speculative motion, ties broken by
66 5. choose insn with largest control flow probability, ties
67 broken by
68 6. choose insn with the least dependences upon the previously
69 scheduled insn, or finally
70 7 choose the insn which has the most insns dependent on it.
71 8. choose insn with lowest UID.
73 Memory references complicate matters. Only if we can be certain
74 that memory references are not part of the data dependency graph
75 (via true, anti, or output dependence), can we move operations past
76 memory references. To first approximation, reads can be done
77 independently, while writes introduce dependencies. Better
78 approximations will yield fewer dependencies.
80 Before reload, an extended analysis of interblock data dependences
81 is required for interblock scheduling. This is performed in
82 compute_block_backward_dependences ().
84 Dependencies set up by memory references are treated in exactly the
85 same way as other dependencies, by using insn backward dependences
86 INSN_BACK_DEPS. INSN_BACK_DEPS are translated into forward dependences
87 INSN_FORW_DEPS the purpose of forward list scheduling.
89 Having optimized the critical path, we may have also unduly
90 extended the lifetimes of some registers. If an operation requires
91 that constants be loaded into registers, it is certainly desirable
92 to load those constants as early as necessary, but no earlier.
93 I.e., it will not do to load up a bunch of registers at the
94 beginning of a basic block only to use them at the end, if they
95 could be loaded later, since this may result in excessive register
96 utilization.
98 Note that since branches are never in basic blocks, but only end
99 basic blocks, this pass will not move branches. But that is ok,
100 since we can use GNU's delayed branch scheduling pass to take care
101 of this case.
103 Also note that no further optimizations based on algebraic
104 identities are performed, so this pass would be a good one to
105 perform instruction splitting, such as breaking up a multiply
106 instruction into shifts and adds where that is profitable.
108 Given the memory aliasing analysis that this pass should perform,
109 it should be possible to remove redundant stores to memory, and to
110 load values from registers instead of hitting memory.
112 Before reload, speculative insns are moved only if a 'proof' exists
113 that no exception will be caused by this, and if no live registers
114 exist that inhibit the motion (live registers constraints are not
115 represented by data dependence edges).
117 This pass must update information that subsequent passes expect to
118 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
119 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
121 The information in the line number notes is carefully retained by
122 this pass. Notes that refer to the starting and ending of
123 exception regions are also carefully retained by this pass. All
124 other NOTE insns are grouped in their same relative order at the
125 beginning of basic blocks and regions that have been scheduled. */
127 #include "config.h"
128 #include "system.h"
129 #include "coretypes.h"
130 #include "tm.h"
131 #include "diagnostic-core.h"
132 #include "toplev.h"
133 #include "rtl.h"
134 #include "tm_p.h"
135 #include "hard-reg-set.h"
136 #include "regs.h"
137 #include "function.h"
138 #include "flags.h"
139 #include "insn-config.h"
140 #include "insn-attr.h"
141 #include "except.h"
142 #include "recog.h"
143 #include "sched-int.h"
144 #include "target.h"
145 #include "output.h"
146 #include "params.h"
147 #include "vecprim.h"
148 #include "dbgcnt.h"
149 #include "cfgloop.h"
150 #include "ira.h"
151 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
153 #ifdef INSN_SCHEDULING
155 /* issue_rate is the number of insns that can be scheduled in the same
156 machine cycle. It can be defined in the config/mach/mach.h file,
157 otherwise we set it to 1. */
159 int issue_rate;
161 /* sched-verbose controls the amount of debugging output the
162 scheduler prints. It is controlled by -fsched-verbose=N:
163 N>0 and no -DSR : the output is directed to stderr.
164 N>=10 will direct the printouts to stderr (regardless of -dSR).
165 N=1: same as -dSR.
166 N=2: bb's probabilities, detailed ready list info, unit/insn info.
167 N=3: rtl at abort point, control-flow, regions info.
168 N=5: dependences info. */
170 static int sched_verbose_param = 0;
171 int sched_verbose = 0;
173 /* Debugging file. All printouts are sent to dump, which is always set,
174 either to stderr, or to the dump listing file (-dRS). */
175 FILE *sched_dump = 0;
177 /* fix_sched_param() is called from toplev.c upon detection
178 of the -fsched-verbose=N option. */
180 void
181 fix_sched_param (const char *param, const char *val)
183 if (!strcmp (param, "verbose"))
184 sched_verbose_param = atoi (val);
185 else
186 warning (0, "fix_sched_param: unknown param: %s", param);
189 /* This is a placeholder for the scheduler parameters common
190 to all schedulers. */
191 struct common_sched_info_def *common_sched_info;
193 #define INSN_TICK(INSN) (HID (INSN)->tick)
194 #define INTER_TICK(INSN) (HID (INSN)->inter_tick)
196 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
197 then it should be recalculated from scratch. */
198 #define INVALID_TICK (-(max_insn_queue_index + 1))
199 /* The minimal value of the INSN_TICK of an instruction. */
200 #define MIN_TICK (-max_insn_queue_index)
202 /* Issue points are used to distinguish between instructions in max_issue ().
203 For now, all instructions are equally good. */
204 #define ISSUE_POINTS(INSN) 1
206 /* List of important notes we must keep around. This is a pointer to the
207 last element in the list. */
208 rtx note_list;
210 static struct spec_info_def spec_info_var;
211 /* Description of the speculative part of the scheduling.
212 If NULL - no speculation. */
213 spec_info_t spec_info = NULL;
215 /* True, if recovery block was added during scheduling of current block.
216 Used to determine, if we need to fix INSN_TICKs. */
217 static bool haifa_recovery_bb_recently_added_p;
219 /* True, if recovery block was added during this scheduling pass.
220 Used to determine if we should have empty memory pools of dependencies
221 after finishing current region. */
222 bool haifa_recovery_bb_ever_added_p;
224 /* Counters of different types of speculative instructions. */
225 static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
227 /* Array used in {unlink, restore}_bb_notes. */
228 static rtx *bb_header = 0;
230 /* Basic block after which recovery blocks will be created. */
231 static basic_block before_recovery;
233 /* Basic block just before the EXIT_BLOCK and after recovery, if we have
234 created it. */
235 basic_block after_recovery;
237 /* FALSE if we add bb to another region, so we don't need to initialize it. */
238 bool adding_bb_to_current_region_p = true;
240 /* Queues, etc. */
242 /* An instruction is ready to be scheduled when all insns preceding it
243 have already been scheduled. It is important to ensure that all
244 insns which use its result will not be executed until its result
245 has been computed. An insn is maintained in one of four structures:
247 (P) the "Pending" set of insns which cannot be scheduled until
248 their dependencies have been satisfied.
249 (Q) the "Queued" set of insns that can be scheduled when sufficient
250 time has passed.
251 (R) the "Ready" list of unscheduled, uncommitted insns.
252 (S) the "Scheduled" list of insns.
254 Initially, all insns are either "Pending" or "Ready" depending on
255 whether their dependencies are satisfied.
257 Insns move from the "Ready" list to the "Scheduled" list as they
258 are committed to the schedule. As this occurs, the insns in the
259 "Pending" list have their dependencies satisfied and move to either
260 the "Ready" list or the "Queued" set depending on whether
261 sufficient time has passed to make them ready. As time passes,
262 insns move from the "Queued" set to the "Ready" list.
264 The "Pending" list (P) are the insns in the INSN_FORW_DEPS of the
265 unscheduled insns, i.e., those that are ready, queued, and pending.
266 The "Queued" set (Q) is implemented by the variable `insn_queue'.
267 The "Ready" list (R) is implemented by the variables `ready' and
268 `n_ready'.
269 The "Scheduled" list (S) is the new insn chain built by this pass.
271 The transition (R->S) is implemented in the scheduling loop in
272 `schedule_block' when the best insn to schedule is chosen.
273 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
274 insns move from the ready list to the scheduled list.
275 The transition (Q->R) is implemented in 'queue_to_insn' as time
276 passes or stalls are introduced. */
278 /* Implement a circular buffer to delay instructions until sufficient
279 time has passed. For the new pipeline description interface,
280 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
281 than maximal time of instruction execution computed by genattr.c on
282 the base maximal time of functional unit reservations and getting a
283 result. This is the longest time an insn may be queued. */
285 static rtx *insn_queue;
286 static int q_ptr = 0;
287 static int q_size = 0;
288 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
289 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
291 #define QUEUE_SCHEDULED (-3)
292 #define QUEUE_NOWHERE (-2)
293 #define QUEUE_READY (-1)
294 /* QUEUE_SCHEDULED - INSN is scheduled.
295 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
296 queue or ready list.
297 QUEUE_READY - INSN is in ready list.
298 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
300 #define QUEUE_INDEX(INSN) (HID (INSN)->queue_index)
302 /* The following variable value refers for all current and future
303 reservations of the processor units. */
304 state_t curr_state;
306 /* The following variable value is size of memory representing all
307 current and future reservations of the processor units. */
308 size_t dfa_state_size;
310 /* The following array is used to find the best insn from ready when
311 the automaton pipeline interface is used. */
312 char *ready_try = NULL;
314 /* The ready list. */
315 struct ready_list ready = {NULL, 0, 0, 0, 0};
317 /* The pointer to the ready list (to be removed). */
318 static struct ready_list *readyp = &ready;
320 /* Scheduling clock. */
321 static int clock_var;
323 static int may_trap_exp (const_rtx, int);
325 /* Nonzero iff the address is comprised from at most 1 register. */
326 #define CONST_BASED_ADDRESS_P(x) \
327 (REG_P (x) \
328 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
329 || (GET_CODE (x) == LO_SUM)) \
330 && (CONSTANT_P (XEXP (x, 0)) \
331 || CONSTANT_P (XEXP (x, 1)))))
333 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
334 as found by analyzing insn's expression. */
337 static int haifa_luid_for_non_insn (rtx x);
339 /* Haifa version of sched_info hooks common to all headers. */
340 const struct common_sched_info_def haifa_common_sched_info =
342 NULL, /* fix_recovery_cfg */
343 NULL, /* add_block */
344 NULL, /* estimate_number_of_insns */
345 haifa_luid_for_non_insn, /* luid_for_non_insn */
346 SCHED_PASS_UNKNOWN /* sched_pass_id */
349 const struct sched_scan_info_def *sched_scan_info;
351 /* Mapping from instruction UID to its Logical UID. */
352 VEC (int, heap) *sched_luids = NULL;
354 /* Next LUID to assign to an instruction. */
355 int sched_max_luid = 1;
357 /* Haifa Instruction Data. */
358 VEC (haifa_insn_data_def, heap) *h_i_d = NULL;
360 void (* sched_init_only_bb) (basic_block, basic_block);
362 /* Split block function. Different schedulers might use different functions
363 to handle their internal data consistent. */
364 basic_block (* sched_split_block) (basic_block, rtx);
366 /* Create empty basic block after the specified block. */
367 basic_block (* sched_create_empty_bb) (basic_block);
369 static int
370 may_trap_exp (const_rtx x, int is_store)
372 enum rtx_code code;
374 if (x == 0)
375 return TRAP_FREE;
376 code = GET_CODE (x);
377 if (is_store)
379 if (code == MEM && may_trap_p (x))
380 return TRAP_RISKY;
381 else
382 return TRAP_FREE;
384 if (code == MEM)
386 /* The insn uses memory: a volatile load. */
387 if (MEM_VOLATILE_P (x))
388 return IRISKY;
389 /* An exception-free load. */
390 if (!may_trap_p (x))
391 return IFREE;
392 /* A load with 1 base register, to be further checked. */
393 if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
394 return PFREE_CANDIDATE;
395 /* No info on the load, to be further checked. */
396 return PRISKY_CANDIDATE;
398 else
400 const char *fmt;
401 int i, insn_class = TRAP_FREE;
403 /* Neither store nor load, check if it may cause a trap. */
404 if (may_trap_p (x))
405 return TRAP_RISKY;
406 /* Recursive step: walk the insn... */
407 fmt = GET_RTX_FORMAT (code);
408 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
410 if (fmt[i] == 'e')
412 int tmp_class = may_trap_exp (XEXP (x, i), is_store);
413 insn_class = WORST_CLASS (insn_class, tmp_class);
415 else if (fmt[i] == 'E')
417 int j;
418 for (j = 0; j < XVECLEN (x, i); j++)
420 int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
421 insn_class = WORST_CLASS (insn_class, tmp_class);
422 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
423 break;
426 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
427 break;
429 return insn_class;
433 /* Classifies rtx X of an insn for the purpose of verifying that X can be
434 executed speculatively (and consequently the insn can be moved
435 speculatively), by examining X, returning:
436 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
437 TRAP_FREE: non-load insn.
438 IFREE: load from a globally safe location.
439 IRISKY: volatile load.
440 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
441 being either PFREE or PRISKY. */
443 static int
444 haifa_classify_rtx (const_rtx x)
446 int tmp_class = TRAP_FREE;
447 int insn_class = TRAP_FREE;
448 enum rtx_code code;
450 if (GET_CODE (x) == PARALLEL)
452 int i, len = XVECLEN (x, 0);
454 for (i = len - 1; i >= 0; i--)
456 tmp_class = haifa_classify_rtx (XVECEXP (x, 0, i));
457 insn_class = WORST_CLASS (insn_class, tmp_class);
458 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
459 break;
462 else
464 code = GET_CODE (x);
465 switch (code)
467 case CLOBBER:
468 /* Test if it is a 'store'. */
469 tmp_class = may_trap_exp (XEXP (x, 0), 1);
470 break;
471 case SET:
472 /* Test if it is a store. */
473 tmp_class = may_trap_exp (SET_DEST (x), 1);
474 if (tmp_class == TRAP_RISKY)
475 break;
476 /* Test if it is a load. */
477 tmp_class =
478 WORST_CLASS (tmp_class,
479 may_trap_exp (SET_SRC (x), 0));
480 break;
481 case COND_EXEC:
482 tmp_class = haifa_classify_rtx (COND_EXEC_CODE (x));
483 if (tmp_class == TRAP_RISKY)
484 break;
485 tmp_class = WORST_CLASS (tmp_class,
486 may_trap_exp (COND_EXEC_TEST (x), 0));
487 break;
488 case TRAP_IF:
489 tmp_class = TRAP_RISKY;
490 break;
491 default:;
493 insn_class = tmp_class;
496 return insn_class;
500 haifa_classify_insn (const_rtx insn)
502 return haifa_classify_rtx (PATTERN (insn));
505 /* Forward declarations. */
507 static int priority (rtx);
508 static int rank_for_schedule (const void *, const void *);
509 static void swap_sort (rtx *, int);
510 static void queue_insn (rtx, int);
511 static int schedule_insn (rtx);
512 static void adjust_priority (rtx);
513 static void advance_one_cycle (void);
514 static void extend_h_i_d (void);
517 /* Notes handling mechanism:
518 =========================
519 Generally, NOTES are saved before scheduling and restored after scheduling.
520 The scheduler distinguishes between two types of notes:
522 (1) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
523 Before scheduling a region, a pointer to the note is added to the insn
524 that follows or precedes it. (This happens as part of the data dependence
525 computation). After scheduling an insn, the pointer contained in it is
526 used for regenerating the corresponding note (in reemit_notes).
528 (2) All other notes (e.g. INSN_DELETED): Before scheduling a block,
529 these notes are put in a list (in rm_other_notes() and
530 unlink_other_notes ()). After scheduling the block, these notes are
531 inserted at the beginning of the block (in schedule_block()). */
533 static void ready_add (struct ready_list *, rtx, bool);
534 static rtx ready_remove_first (struct ready_list *);
536 static void queue_to_ready (struct ready_list *);
537 static int early_queue_to_ready (state_t, struct ready_list *);
539 static void debug_ready_list (struct ready_list *);
541 /* The following functions are used to implement multi-pass scheduling
542 on the first cycle. */
543 static rtx ready_remove (struct ready_list *, int);
544 static void ready_remove_insn (rtx);
546 static int choose_ready (struct ready_list *, rtx *);
548 static void fix_inter_tick (rtx, rtx);
549 static int fix_tick_ready (rtx);
550 static void change_queue_index (rtx, int);
552 /* The following functions are used to implement scheduling of data/control
553 speculative instructions. */
555 static void extend_h_i_d (void);
556 static void init_h_i_d (rtx);
557 static void generate_recovery_code (rtx);
558 static void process_insn_forw_deps_be_in_spec (rtx, rtx, ds_t);
559 static void begin_speculative_block (rtx);
560 static void add_to_speculative_block (rtx);
561 static void init_before_recovery (basic_block *);
562 static void create_check_block_twin (rtx, bool);
563 static void fix_recovery_deps (basic_block);
564 static void haifa_change_pattern (rtx, rtx);
565 static void dump_new_block_header (int, basic_block, rtx, rtx);
566 static void restore_bb_notes (basic_block);
567 static void fix_jump_move (rtx);
568 static void move_block_after_check (rtx);
569 static void move_succs (VEC(edge,gc) **, basic_block);
570 static void sched_remove_insn (rtx);
571 static void clear_priorities (rtx, rtx_vec_t *);
572 static void calc_priorities (rtx_vec_t);
573 static void add_jump_dependencies (rtx, rtx);
574 #ifdef ENABLE_CHECKING
575 static int has_edge_p (VEC(edge,gc) *, int);
576 static void check_cfg (rtx, rtx);
577 #endif
579 #endif /* INSN_SCHEDULING */
581 /* Point to state used for the current scheduling pass. */
582 struct haifa_sched_info *current_sched_info;
584 #ifndef INSN_SCHEDULING
585 void
586 schedule_insns (void)
589 #else
591 /* Do register pressure sensitive insn scheduling if the flag is set
592 up. */
593 bool sched_pressure_p;
595 /* Map regno -> its cover class. The map defined only when
596 SCHED_PRESSURE_P is true. */
597 enum reg_class *sched_regno_cover_class;
599 /* The current register pressure. Only elements corresponding cover
600 classes are defined. */
601 static int curr_reg_pressure[N_REG_CLASSES];
603 /* Saved value of the previous array. */
604 static int saved_reg_pressure[N_REG_CLASSES];
606 /* Register living at given scheduling point. */
607 static bitmap curr_reg_live;
609 /* Saved value of the previous array. */
610 static bitmap saved_reg_live;
612 /* Registers mentioned in the current region. */
613 static bitmap region_ref_regs;
615 /* Initiate register pressure relative info for scheduling the current
616 region. Currently it is only clearing register mentioned in the
617 current region. */
618 void
619 sched_init_region_reg_pressure_info (void)
621 bitmap_clear (region_ref_regs);
624 /* Update current register pressure related info after birth (if
625 BIRTH_P) or death of register REGNO. */
626 static void
627 mark_regno_birth_or_death (int regno, bool birth_p)
629 enum reg_class cover_class;
631 cover_class = sched_regno_cover_class[regno];
632 if (regno >= FIRST_PSEUDO_REGISTER)
634 if (cover_class != NO_REGS)
636 if (birth_p)
638 bitmap_set_bit (curr_reg_live, regno);
639 curr_reg_pressure[cover_class]
640 += ira_reg_class_nregs[cover_class][PSEUDO_REGNO_MODE (regno)];
642 else
644 bitmap_clear_bit (curr_reg_live, regno);
645 curr_reg_pressure[cover_class]
646 -= ira_reg_class_nregs[cover_class][PSEUDO_REGNO_MODE (regno)];
650 else if (cover_class != NO_REGS
651 && ! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
653 if (birth_p)
655 bitmap_set_bit (curr_reg_live, regno);
656 curr_reg_pressure[cover_class]++;
658 else
660 bitmap_clear_bit (curr_reg_live, regno);
661 curr_reg_pressure[cover_class]--;
666 /* Initiate current register pressure related info from living
667 registers given by LIVE. */
668 static void
669 initiate_reg_pressure_info (bitmap live)
671 int i;
672 unsigned int j;
673 bitmap_iterator bi;
675 for (i = 0; i < ira_reg_class_cover_size; i++)
676 curr_reg_pressure[ira_reg_class_cover[i]] = 0;
677 bitmap_clear (curr_reg_live);
678 EXECUTE_IF_SET_IN_BITMAP (live, 0, j, bi)
679 if (current_nr_blocks == 1 || bitmap_bit_p (region_ref_regs, j))
680 mark_regno_birth_or_death (j, true);
683 /* Mark registers in X as mentioned in the current region. */
684 static void
685 setup_ref_regs (rtx x)
687 int i, j, regno;
688 const RTX_CODE code = GET_CODE (x);
689 const char *fmt;
691 if (REG_P (x))
693 regno = REGNO (x);
694 if (regno >= FIRST_PSEUDO_REGISTER)
695 bitmap_set_bit (region_ref_regs, REGNO (x));
696 else
697 for (i = hard_regno_nregs[regno][GET_MODE (x)] - 1; i >= 0; i--)
698 bitmap_set_bit (region_ref_regs, regno + i);
699 return;
701 fmt = GET_RTX_FORMAT (code);
702 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
703 if (fmt[i] == 'e')
704 setup_ref_regs (XEXP (x, i));
705 else if (fmt[i] == 'E')
707 for (j = 0; j < XVECLEN (x, i); j++)
708 setup_ref_regs (XVECEXP (x, i, j));
712 /* Initiate current register pressure related info at the start of
713 basic block BB. */
714 static void
715 initiate_bb_reg_pressure_info (basic_block bb)
717 unsigned int i;
718 rtx insn;
720 if (current_nr_blocks > 1)
721 FOR_BB_INSNS (bb, insn)
722 if (NONDEBUG_INSN_P (insn))
723 setup_ref_regs (PATTERN (insn));
724 initiate_reg_pressure_info (df_get_live_in (bb));
725 #ifdef EH_RETURN_DATA_REGNO
726 if (bb_has_eh_pred (bb))
727 for (i = 0; ; ++i)
729 unsigned int regno = EH_RETURN_DATA_REGNO (i);
731 if (regno == INVALID_REGNUM)
732 break;
733 if (! bitmap_bit_p (df_get_live_in (bb), regno))
734 mark_regno_birth_or_death (regno, true);
736 #endif
739 /* Save current register pressure related info. */
740 static void
741 save_reg_pressure (void)
743 int i;
745 for (i = 0; i < ira_reg_class_cover_size; i++)
746 saved_reg_pressure[ira_reg_class_cover[i]]
747 = curr_reg_pressure[ira_reg_class_cover[i]];
748 bitmap_copy (saved_reg_live, curr_reg_live);
751 /* Restore saved register pressure related info. */
752 static void
753 restore_reg_pressure (void)
755 int i;
757 for (i = 0; i < ira_reg_class_cover_size; i++)
758 curr_reg_pressure[ira_reg_class_cover[i]]
759 = saved_reg_pressure[ira_reg_class_cover[i]];
760 bitmap_copy (curr_reg_live, saved_reg_live);
763 /* Return TRUE if the register is dying after its USE. */
764 static bool
765 dying_use_p (struct reg_use_data *use)
767 struct reg_use_data *next;
769 for (next = use->next_regno_use; next != use; next = next->next_regno_use)
770 if (NONDEBUG_INSN_P (next->insn)
771 && QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED)
772 return false;
773 return true;
776 /* Print info about the current register pressure and its excess for
777 each cover class. */
778 static void
779 print_curr_reg_pressure (void)
781 int i;
782 enum reg_class cl;
784 fprintf (sched_dump, ";;\t");
785 for (i = 0; i < ira_reg_class_cover_size; i++)
787 cl = ira_reg_class_cover[i];
788 gcc_assert (curr_reg_pressure[cl] >= 0);
789 fprintf (sched_dump, " %s:%d(%d)", reg_class_names[cl],
790 curr_reg_pressure[cl],
791 curr_reg_pressure[cl] - ira_available_class_regs[cl]);
793 fprintf (sched_dump, "\n");
796 /* Pointer to the last instruction scheduled. Used by rank_for_schedule,
797 so that insns independent of the last scheduled insn will be preferred
798 over dependent instructions. */
800 static rtx last_scheduled_insn;
802 /* Cached cost of the instruction. Use below function to get cost of the
803 insn. -1 here means that the field is not initialized. */
804 #define INSN_COST(INSN) (HID (INSN)->cost)
806 /* Compute cost of executing INSN.
807 This is the number of cycles between instruction issue and
808 instruction results. */
810 insn_cost (rtx insn)
812 int cost;
814 if (sel_sched_p ())
816 if (recog_memoized (insn) < 0)
817 return 0;
819 cost = insn_default_latency (insn);
820 if (cost < 0)
821 cost = 0;
823 return cost;
826 cost = INSN_COST (insn);
828 if (cost < 0)
830 /* A USE insn, or something else we don't need to
831 understand. We can't pass these directly to
832 result_ready_cost or insn_default_latency because it will
833 trigger a fatal error for unrecognizable insns. */
834 if (recog_memoized (insn) < 0)
836 INSN_COST (insn) = 0;
837 return 0;
839 else
841 cost = insn_default_latency (insn);
842 if (cost < 0)
843 cost = 0;
845 INSN_COST (insn) = cost;
849 return cost;
852 /* Compute cost of dependence LINK.
853 This is the number of cycles between instruction issue and
854 instruction results.
855 ??? We also use this function to call recog_memoized on all insns. */
857 dep_cost_1 (dep_t link, dw_t dw)
859 rtx insn = DEP_PRO (link);
860 rtx used = DEP_CON (link);
861 int cost;
863 /* A USE insn should never require the value used to be computed.
864 This allows the computation of a function's result and parameter
865 values to overlap the return and call. We don't care about the
866 the dependence cost when only decreasing register pressure. */
867 if (recog_memoized (used) < 0)
869 cost = 0;
870 recog_memoized (insn);
872 else
874 enum reg_note dep_type = DEP_TYPE (link);
876 cost = insn_cost (insn);
878 if (INSN_CODE (insn) >= 0)
880 if (dep_type == REG_DEP_ANTI)
881 cost = 0;
882 else if (dep_type == REG_DEP_OUTPUT)
884 cost = (insn_default_latency (insn)
885 - insn_default_latency (used));
886 if (cost <= 0)
887 cost = 1;
889 else if (bypass_p (insn))
890 cost = insn_latency (insn, used);
894 if (targetm.sched.adjust_cost_2)
895 cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost,
896 dw);
897 else if (targetm.sched.adjust_cost != NULL)
899 /* This variable is used for backward compatibility with the
900 targets. */
901 rtx dep_cost_rtx_link = alloc_INSN_LIST (NULL_RTX, NULL_RTX);
903 /* Make it self-cycled, so that if some tries to walk over this
904 incomplete list he/she will be caught in an endless loop. */
905 XEXP (dep_cost_rtx_link, 1) = dep_cost_rtx_link;
907 /* Targets use only REG_NOTE_KIND of the link. */
908 PUT_REG_NOTE_KIND (dep_cost_rtx_link, DEP_TYPE (link));
910 cost = targetm.sched.adjust_cost (used, dep_cost_rtx_link,
911 insn, cost);
913 free_INSN_LIST_node (dep_cost_rtx_link);
916 if (cost < 0)
917 cost = 0;
920 return cost;
923 /* Compute cost of dependence LINK.
924 This is the number of cycles between instruction issue and
925 instruction results. */
927 dep_cost (dep_t link)
929 return dep_cost_1 (link, 0);
932 /* Use this sel-sched.c friendly function in reorder2 instead of increasing
933 INSN_PRIORITY explicitly. */
934 void
935 increase_insn_priority (rtx insn, int amount)
937 if (!sel_sched_p ())
939 /* We're dealing with haifa-sched.c INSN_PRIORITY. */
940 if (INSN_PRIORITY_KNOWN (insn))
941 INSN_PRIORITY (insn) += amount;
943 else
945 /* In sel-sched.c INSN_PRIORITY is not kept up to date.
946 Use EXPR_PRIORITY instead. */
947 sel_add_to_insn_priority (insn, amount);
951 /* Return 'true' if DEP should be included in priority calculations. */
952 static bool
953 contributes_to_priority_p (dep_t dep)
955 if (DEBUG_INSN_P (DEP_CON (dep))
956 || DEBUG_INSN_P (DEP_PRO (dep)))
957 return false;
959 /* Critical path is meaningful in block boundaries only. */
960 if (!current_sched_info->contributes_to_priority (DEP_CON (dep),
961 DEP_PRO (dep)))
962 return false;
964 /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
965 then speculative instructions will less likely be
966 scheduled. That is because the priority of
967 their producers will increase, and, thus, the
968 producers will more likely be scheduled, thus,
969 resolving the dependence. */
970 if (sched_deps_info->generate_spec_deps
971 && !(spec_info->flags & COUNT_SPEC_IN_CRITICAL_PATH)
972 && (DEP_STATUS (dep) & SPECULATIVE))
973 return false;
975 return true;
978 /* Compute the number of nondebug forward deps of an insn. */
980 static int
981 dep_list_size (rtx insn)
983 sd_iterator_def sd_it;
984 dep_t dep;
985 int dbgcount = 0, nodbgcount = 0;
987 if (!MAY_HAVE_DEBUG_INSNS)
988 return sd_lists_size (insn, SD_LIST_FORW);
990 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
992 if (DEBUG_INSN_P (DEP_CON (dep)))
993 dbgcount++;
994 else if (!DEBUG_INSN_P (DEP_PRO (dep)))
995 nodbgcount++;
998 gcc_assert (dbgcount + nodbgcount == sd_lists_size (insn, SD_LIST_FORW));
1000 return nodbgcount;
1003 /* Compute the priority number for INSN. */
1004 static int
1005 priority (rtx insn)
1007 if (! INSN_P (insn))
1008 return 0;
1010 /* We should not be interested in priority of an already scheduled insn. */
1011 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
1013 if (!INSN_PRIORITY_KNOWN (insn))
1015 int this_priority = -1;
1017 if (dep_list_size (insn) == 0)
1018 /* ??? We should set INSN_PRIORITY to insn_cost when and insn has
1019 some forward deps but all of them are ignored by
1020 contributes_to_priority hook. At the moment we set priority of
1021 such insn to 0. */
1022 this_priority = insn_cost (insn);
1023 else
1025 rtx prev_first, twin;
1026 basic_block rec;
1028 /* For recovery check instructions we calculate priority slightly
1029 different than that of normal instructions. Instead of walking
1030 through INSN_FORW_DEPS (check) list, we walk through
1031 INSN_FORW_DEPS list of each instruction in the corresponding
1032 recovery block. */
1034 /* Selective scheduling does not define RECOVERY_BLOCK macro. */
1035 rec = sel_sched_p () ? NULL : RECOVERY_BLOCK (insn);
1036 if (!rec || rec == EXIT_BLOCK_PTR)
1038 prev_first = PREV_INSN (insn);
1039 twin = insn;
1041 else
1043 prev_first = NEXT_INSN (BB_HEAD (rec));
1044 twin = PREV_INSN (BB_END (rec));
1049 sd_iterator_def sd_it;
1050 dep_t dep;
1052 FOR_EACH_DEP (twin, SD_LIST_FORW, sd_it, dep)
1054 rtx next;
1055 int next_priority;
1057 next = DEP_CON (dep);
1059 if (BLOCK_FOR_INSN (next) != rec)
1061 int cost;
1063 if (!contributes_to_priority_p (dep))
1064 continue;
1066 if (twin == insn)
1067 cost = dep_cost (dep);
1068 else
1070 struct _dep _dep1, *dep1 = &_dep1;
1072 init_dep (dep1, insn, next, REG_DEP_ANTI);
1074 cost = dep_cost (dep1);
1077 next_priority = cost + priority (next);
1079 if (next_priority > this_priority)
1080 this_priority = next_priority;
1084 twin = PREV_INSN (twin);
1086 while (twin != prev_first);
1089 if (this_priority < 0)
1091 gcc_assert (this_priority == -1);
1093 this_priority = insn_cost (insn);
1096 INSN_PRIORITY (insn) = this_priority;
1097 INSN_PRIORITY_STATUS (insn) = 1;
1100 return INSN_PRIORITY (insn);
1103 /* Macros and functions for keeping the priority queue sorted, and
1104 dealing with queuing and dequeuing of instructions. */
1106 #define SCHED_SORT(READY, N_READY) \
1107 do { if ((N_READY) == 2) \
1108 swap_sort (READY, N_READY); \
1109 else if ((N_READY) > 2) \
1110 qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \
1111 while (0)
1113 /* Setup info about the current register pressure impact of scheduling
1114 INSN at the current scheduling point. */
1115 static void
1116 setup_insn_reg_pressure_info (rtx insn)
1118 int i, change, before, after, hard_regno;
1119 int excess_cost_change;
1120 enum machine_mode mode;
1121 enum reg_class cl;
1122 struct reg_pressure_data *pressure_info;
1123 int *max_reg_pressure;
1124 struct reg_use_data *use;
1125 static int death[N_REG_CLASSES];
1127 gcc_checking_assert (!DEBUG_INSN_P (insn));
1129 excess_cost_change = 0;
1130 for (i = 0; i < ira_reg_class_cover_size; i++)
1131 death[ira_reg_class_cover[i]] = 0;
1132 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1133 if (dying_use_p (use))
1135 cl = sched_regno_cover_class[use->regno];
1136 if (use->regno < FIRST_PSEUDO_REGISTER)
1137 death[cl]++;
1138 else
1139 death[cl] += ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (use->regno)];
1141 pressure_info = INSN_REG_PRESSURE (insn);
1142 max_reg_pressure = INSN_MAX_REG_PRESSURE (insn);
1143 gcc_assert (pressure_info != NULL && max_reg_pressure != NULL);
1144 for (i = 0; i < ira_reg_class_cover_size; i++)
1146 cl = ira_reg_class_cover[i];
1147 gcc_assert (curr_reg_pressure[cl] >= 0);
1148 change = (int) pressure_info[i].set_increase - death[cl];
1149 before = MAX (0, max_reg_pressure[i] - ira_available_class_regs[cl]);
1150 after = MAX (0, max_reg_pressure[i] + change
1151 - ira_available_class_regs[cl]);
1152 hard_regno = ira_class_hard_regs[cl][0];
1153 gcc_assert (hard_regno >= 0);
1154 mode = reg_raw_mode[hard_regno];
1155 excess_cost_change += ((after - before)
1156 * (ira_memory_move_cost[mode][cl][0]
1157 + ira_memory_move_cost[mode][cl][1]));
1159 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insn) = excess_cost_change;
1162 /* Returns a positive value if x is preferred; returns a negative value if
1163 y is preferred. Should never return 0, since that will make the sort
1164 unstable. */
1166 static int
1167 rank_for_schedule (const void *x, const void *y)
1169 rtx tmp = *(const rtx *) y;
1170 rtx tmp2 = *(const rtx *) x;
1171 rtx last;
1172 int tmp_class, tmp2_class;
1173 int val, priority_val, info_val;
1175 if (MAY_HAVE_DEBUG_INSNS)
1177 /* Schedule debug insns as early as possible. */
1178 if (DEBUG_INSN_P (tmp) && !DEBUG_INSN_P (tmp2))
1179 return -1;
1180 else if (DEBUG_INSN_P (tmp2))
1181 return 1;
1184 /* The insn in a schedule group should be issued the first. */
1185 if (flag_sched_group_heuristic &&
1186 SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
1187 return SCHED_GROUP_P (tmp2) ? 1 : -1;
1189 /* Make sure that priority of TMP and TMP2 are initialized. */
1190 gcc_assert (INSN_PRIORITY_KNOWN (tmp) && INSN_PRIORITY_KNOWN (tmp2));
1192 if (sched_pressure_p)
1194 int diff;
1196 /* Prefer insn whose scheduling results in the smallest register
1197 pressure excess. */
1198 if ((diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp)
1199 + (INSN_TICK (tmp) > clock_var
1200 ? INSN_TICK (tmp) - clock_var : 0)
1201 - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2)
1202 - (INSN_TICK (tmp2) > clock_var
1203 ? INSN_TICK (tmp2) - clock_var : 0))) != 0)
1204 return diff;
1208 if (sched_pressure_p
1209 && (INSN_TICK (tmp2) > clock_var || INSN_TICK (tmp) > clock_var))
1211 if (INSN_TICK (tmp) <= clock_var)
1212 return -1;
1213 else if (INSN_TICK (tmp2) <= clock_var)
1214 return 1;
1215 else
1216 return INSN_TICK (tmp) - INSN_TICK (tmp2);
1218 /* Prefer insn with higher priority. */
1219 priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
1221 if (flag_sched_critical_path_heuristic && priority_val)
1222 return priority_val;
1224 /* Prefer speculative insn with greater dependencies weakness. */
1225 if (flag_sched_spec_insn_heuristic && spec_info)
1227 ds_t ds1, ds2;
1228 dw_t dw1, dw2;
1229 int dw;
1231 ds1 = TODO_SPEC (tmp) & SPECULATIVE;
1232 if (ds1)
1233 dw1 = ds_weak (ds1);
1234 else
1235 dw1 = NO_DEP_WEAK;
1237 ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
1238 if (ds2)
1239 dw2 = ds_weak (ds2);
1240 else
1241 dw2 = NO_DEP_WEAK;
1243 dw = dw2 - dw1;
1244 if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
1245 return dw;
1248 info_val = (*current_sched_info->rank) (tmp, tmp2);
1249 if(flag_sched_rank_heuristic && info_val)
1250 return info_val;
1252 if (flag_sched_last_insn_heuristic)
1254 last = last_scheduled_insn;
1256 if (DEBUG_INSN_P (last) && last != current_sched_info->prev_head)
1258 last = PREV_INSN (last);
1259 while (!NONDEBUG_INSN_P (last)
1260 && last != current_sched_info->prev_head);
1263 /* Compare insns based on their relation to the last scheduled
1264 non-debug insn. */
1265 if (flag_sched_last_insn_heuristic && NONDEBUG_INSN_P (last))
1267 dep_t dep1;
1268 dep_t dep2;
1270 /* Classify the instructions into three classes:
1271 1) Data dependent on last schedule insn.
1272 2) Anti/Output dependent on last scheduled insn.
1273 3) Independent of last scheduled insn, or has latency of one.
1274 Choose the insn from the highest numbered class if different. */
1275 dep1 = sd_find_dep_between (last, tmp, true);
1277 if (dep1 == NULL || dep_cost (dep1) == 1)
1278 tmp_class = 3;
1279 else if (/* Data dependence. */
1280 DEP_TYPE (dep1) == REG_DEP_TRUE)
1281 tmp_class = 1;
1282 else
1283 tmp_class = 2;
1285 dep2 = sd_find_dep_between (last, tmp2, true);
1287 if (dep2 == NULL || dep_cost (dep2) == 1)
1288 tmp2_class = 3;
1289 else if (/* Data dependence. */
1290 DEP_TYPE (dep2) == REG_DEP_TRUE)
1291 tmp2_class = 1;
1292 else
1293 tmp2_class = 2;
1295 if ((val = tmp2_class - tmp_class))
1296 return val;
1299 /* Prefer the insn which has more later insns that depend on it.
1300 This gives the scheduler more freedom when scheduling later
1301 instructions at the expense of added register pressure. */
1303 val = (dep_list_size (tmp2) - dep_list_size (tmp));
1305 if (flag_sched_dep_count_heuristic && val != 0)
1306 return val;
1308 /* If insns are equally good, sort by INSN_LUID (original insn order),
1309 so that we make the sort stable. This minimizes instruction movement,
1310 thus minimizing sched's effect on debugging and cross-jumping. */
1311 return INSN_LUID (tmp) - INSN_LUID (tmp2);
1314 /* Resort the array A in which only element at index N may be out of order. */
1316 HAIFA_INLINE static void
1317 swap_sort (rtx *a, int n)
1319 rtx insn = a[n - 1];
1320 int i = n - 2;
1322 while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
1324 a[i + 1] = a[i];
1325 i -= 1;
1327 a[i + 1] = insn;
1330 /* Add INSN to the insn queue so that it can be executed at least
1331 N_CYCLES after the currently executing insn. Preserve insns
1332 chain for debugging purposes. */
1334 HAIFA_INLINE static void
1335 queue_insn (rtx insn, int n_cycles)
1337 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
1338 rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]);
1340 gcc_assert (n_cycles <= max_insn_queue_index);
1341 gcc_assert (!DEBUG_INSN_P (insn));
1343 insn_queue[next_q] = link;
1344 q_size += 1;
1346 if (sched_verbose >= 2)
1348 fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
1349 (*current_sched_info->print_insn) (insn, 0));
1351 fprintf (sched_dump, "queued for %d cycles.\n", n_cycles);
1354 QUEUE_INDEX (insn) = next_q;
1357 /* Remove INSN from queue. */
1358 static void
1359 queue_remove (rtx insn)
1361 gcc_assert (QUEUE_INDEX (insn) >= 0);
1362 remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
1363 q_size--;
1364 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
1367 /* Return a pointer to the bottom of the ready list, i.e. the insn
1368 with the lowest priority. */
1370 rtx *
1371 ready_lastpos (struct ready_list *ready)
1373 gcc_assert (ready->n_ready >= 1);
1374 return ready->vec + ready->first - ready->n_ready + 1;
1377 /* Add an element INSN to the ready list so that it ends up with the
1378 lowest/highest priority depending on FIRST_P. */
1380 HAIFA_INLINE static void
1381 ready_add (struct ready_list *ready, rtx insn, bool first_p)
1383 if (!first_p)
1385 if (ready->first == ready->n_ready)
1387 memmove (ready->vec + ready->veclen - ready->n_ready,
1388 ready_lastpos (ready),
1389 ready->n_ready * sizeof (rtx));
1390 ready->first = ready->veclen - 1;
1392 ready->vec[ready->first - ready->n_ready] = insn;
1394 else
1396 if (ready->first == ready->veclen - 1)
1398 if (ready->n_ready)
1399 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
1400 memmove (ready->vec + ready->veclen - ready->n_ready - 1,
1401 ready_lastpos (ready),
1402 ready->n_ready * sizeof (rtx));
1403 ready->first = ready->veclen - 2;
1405 ready->vec[++(ready->first)] = insn;
1408 ready->n_ready++;
1409 if (DEBUG_INSN_P (insn))
1410 ready->n_debug++;
1412 gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
1413 QUEUE_INDEX (insn) = QUEUE_READY;
1416 /* Remove the element with the highest priority from the ready list and
1417 return it. */
1419 HAIFA_INLINE static rtx
1420 ready_remove_first (struct ready_list *ready)
1422 rtx t;
1424 gcc_assert (ready->n_ready);
1425 t = ready->vec[ready->first--];
1426 ready->n_ready--;
1427 if (DEBUG_INSN_P (t))
1428 ready->n_debug--;
1429 /* If the queue becomes empty, reset it. */
1430 if (ready->n_ready == 0)
1431 ready->first = ready->veclen - 1;
1433 gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
1434 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1436 return t;
1439 /* The following code implements multi-pass scheduling for the first
1440 cycle. In other words, we will try to choose ready insn which
1441 permits to start maximum number of insns on the same cycle. */
1443 /* Return a pointer to the element INDEX from the ready. INDEX for
1444 insn with the highest priority is 0, and the lowest priority has
1445 N_READY - 1. */
1448 ready_element (struct ready_list *ready, int index)
1450 gcc_assert (ready->n_ready && index < ready->n_ready);
1452 return ready->vec[ready->first - index];
1455 /* Remove the element INDEX from the ready list and return it. INDEX
1456 for insn with the highest priority is 0, and the lowest priority
1457 has N_READY - 1. */
1459 HAIFA_INLINE static rtx
1460 ready_remove (struct ready_list *ready, int index)
1462 rtx t;
1463 int i;
1465 if (index == 0)
1466 return ready_remove_first (ready);
1467 gcc_assert (ready->n_ready && index < ready->n_ready);
1468 t = ready->vec[ready->first - index];
1469 ready->n_ready--;
1470 if (DEBUG_INSN_P (t))
1471 ready->n_debug--;
1472 for (i = index; i < ready->n_ready; i++)
1473 ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
1474 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1475 return t;
1478 /* Remove INSN from the ready list. */
1479 static void
1480 ready_remove_insn (rtx insn)
1482 int i;
1484 for (i = 0; i < readyp->n_ready; i++)
1485 if (ready_element (readyp, i) == insn)
1487 ready_remove (readyp, i);
1488 return;
1490 gcc_unreachable ();
1493 /* Sort the ready list READY by ascending priority, using the SCHED_SORT
1494 macro. */
1496 void
1497 ready_sort (struct ready_list *ready)
1499 int i;
1500 rtx *first = ready_lastpos (ready);
1502 if (sched_pressure_p)
1504 for (i = 0; i < ready->n_ready; i++)
1505 if (!DEBUG_INSN_P (first[i]))
1506 setup_insn_reg_pressure_info (first[i]);
1508 SCHED_SORT (first, ready->n_ready);
1511 /* PREV is an insn that is ready to execute. Adjust its priority if that
1512 will help shorten or lengthen register lifetimes as appropriate. Also
1513 provide a hook for the target to tweak itself. */
1515 HAIFA_INLINE static void
1516 adjust_priority (rtx prev)
1518 /* ??? There used to be code here to try and estimate how an insn
1519 affected register lifetimes, but it did it by looking at REG_DEAD
1520 notes, which we removed in schedule_region. Nor did it try to
1521 take into account register pressure or anything useful like that.
1523 Revisit when we have a machine model to work with and not before. */
1525 if (targetm.sched.adjust_priority)
1526 INSN_PRIORITY (prev) =
1527 targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
1530 /* Advance DFA state STATE on one cycle. */
1531 void
1532 advance_state (state_t state)
1534 if (targetm.sched.dfa_pre_advance_cycle)
1535 targetm.sched.dfa_pre_advance_cycle ();
1537 if (targetm.sched.dfa_pre_cycle_insn)
1538 state_transition (state,
1539 targetm.sched.dfa_pre_cycle_insn ());
1541 state_transition (state, NULL);
1543 if (targetm.sched.dfa_post_cycle_insn)
1544 state_transition (state,
1545 targetm.sched.dfa_post_cycle_insn ());
1547 if (targetm.sched.dfa_post_advance_cycle)
1548 targetm.sched.dfa_post_advance_cycle ();
1551 /* Advance time on one cycle. */
1552 HAIFA_INLINE static void
1553 advance_one_cycle (void)
1555 advance_state (curr_state);
1556 if (sched_verbose >= 6)
1557 fprintf (sched_dump, ";;\tAdvanced a state.\n");
1560 /* Clock at which the previous instruction was issued. */
1561 static int last_clock_var;
1563 /* Update register pressure after scheduling INSN. */
1564 static void
1565 update_register_pressure (rtx insn)
1567 struct reg_use_data *use;
1568 struct reg_set_data *set;
1570 gcc_checking_assert (!DEBUG_INSN_P (insn));
1572 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1573 if (dying_use_p (use) && bitmap_bit_p (curr_reg_live, use->regno))
1574 mark_regno_birth_or_death (use->regno, false);
1575 for (set = INSN_REG_SET_LIST (insn); set != NULL; set = set->next_insn_set)
1576 mark_regno_birth_or_death (set->regno, true);
1579 /* Set up or update (if UPDATE_P) max register pressure (see its
1580 meaning in sched-int.h::_haifa_insn_data) for all current BB insns
1581 after insn AFTER. */
1582 static void
1583 setup_insn_max_reg_pressure (rtx after, bool update_p)
1585 int i, p;
1586 bool eq_p;
1587 rtx insn;
1588 static int max_reg_pressure[N_REG_CLASSES];
1590 save_reg_pressure ();
1591 for (i = 0; i < ira_reg_class_cover_size; i++)
1592 max_reg_pressure[ira_reg_class_cover[i]]
1593 = curr_reg_pressure[ira_reg_class_cover[i]];
1594 for (insn = NEXT_INSN (after);
1595 insn != NULL_RTX && ! BARRIER_P (insn)
1596 && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (after);
1597 insn = NEXT_INSN (insn))
1598 if (NONDEBUG_INSN_P (insn))
1600 eq_p = true;
1601 for (i = 0; i < ira_reg_class_cover_size; i++)
1603 p = max_reg_pressure[ira_reg_class_cover[i]];
1604 if (INSN_MAX_REG_PRESSURE (insn)[i] != p)
1606 eq_p = false;
1607 INSN_MAX_REG_PRESSURE (insn)[i]
1608 = max_reg_pressure[ira_reg_class_cover[i]];
1611 if (update_p && eq_p)
1612 break;
1613 update_register_pressure (insn);
1614 for (i = 0; i < ira_reg_class_cover_size; i++)
1615 if (max_reg_pressure[ira_reg_class_cover[i]]
1616 < curr_reg_pressure[ira_reg_class_cover[i]])
1617 max_reg_pressure[ira_reg_class_cover[i]]
1618 = curr_reg_pressure[ira_reg_class_cover[i]];
1620 restore_reg_pressure ();
1623 /* Update the current register pressure after scheduling INSN. Update
1624 also max register pressure for unscheduled insns of the current
1625 BB. */
1626 static void
1627 update_reg_and_insn_max_reg_pressure (rtx insn)
1629 int i;
1630 int before[N_REG_CLASSES];
1632 for (i = 0; i < ira_reg_class_cover_size; i++)
1633 before[i] = curr_reg_pressure[ira_reg_class_cover[i]];
1634 update_register_pressure (insn);
1635 for (i = 0; i < ira_reg_class_cover_size; i++)
1636 if (curr_reg_pressure[ira_reg_class_cover[i]] != before[i])
1637 break;
1638 if (i < ira_reg_class_cover_size)
1639 setup_insn_max_reg_pressure (insn, true);
1642 /* Set up register pressure at the beginning of basic block BB whose
1643 insns starting after insn AFTER. Set up also max register pressure
1644 for all insns of the basic block. */
1645 void
1646 sched_setup_bb_reg_pressure_info (basic_block bb, rtx after)
1648 gcc_assert (sched_pressure_p);
1649 initiate_bb_reg_pressure_info (bb);
1650 setup_insn_max_reg_pressure (after, false);
1653 /* INSN is the "currently executing insn". Launch each insn which was
1654 waiting on INSN. READY is the ready list which contains the insns
1655 that are ready to fire. CLOCK is the current cycle. The function
1656 returns necessary cycle advance after issuing the insn (it is not
1657 zero for insns in a schedule group). */
1659 static int
1660 schedule_insn (rtx insn)
1662 sd_iterator_def sd_it;
1663 dep_t dep;
1664 int i;
1665 int advance = 0;
1667 if (sched_verbose >= 1)
1669 struct reg_pressure_data *pressure_info;
1670 char buf[2048];
1672 print_insn (buf, insn, 0);
1673 buf[40] = 0;
1674 fprintf (sched_dump, ";;\t%3i--> %-40s:", clock_var, buf);
1676 if (recog_memoized (insn) < 0)
1677 fprintf (sched_dump, "nothing");
1678 else
1679 print_reservation (sched_dump, insn);
1680 pressure_info = INSN_REG_PRESSURE (insn);
1681 if (pressure_info != NULL)
1683 fputc (':', sched_dump);
1684 for (i = 0; i < ira_reg_class_cover_size; i++)
1685 fprintf (sched_dump, "%s%+d(%d)",
1686 reg_class_names[ira_reg_class_cover[i]],
1687 pressure_info[i].set_increase, pressure_info[i].change);
1689 fputc ('\n', sched_dump);
1692 if (sched_pressure_p && !DEBUG_INSN_P (insn))
1693 update_reg_and_insn_max_reg_pressure (insn);
1695 /* Scheduling instruction should have all its dependencies resolved and
1696 should have been removed from the ready list. */
1697 gcc_assert (sd_lists_empty_p (insn, SD_LIST_BACK));
1699 /* Reset debug insns invalidated by moving this insn. */
1700 if (MAY_HAVE_DEBUG_INSNS && !DEBUG_INSN_P (insn))
1701 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1702 sd_iterator_cond (&sd_it, &dep);)
1704 rtx dbg = DEP_PRO (dep);
1705 struct reg_use_data *use, *next;
1707 gcc_assert (DEBUG_INSN_P (dbg));
1709 if (sched_verbose >= 6)
1710 fprintf (sched_dump, ";;\t\tresetting: debug insn %d\n",
1711 INSN_UID (dbg));
1713 /* ??? Rather than resetting the debug insn, we might be able
1714 to emit a debug temp before the just-scheduled insn, but
1715 this would involve checking that the expression at the
1716 point of the debug insn is equivalent to the expression
1717 before the just-scheduled insn. They might not be: the
1718 expression in the debug insn may depend on other insns not
1719 yet scheduled that set MEMs, REGs or even other debug
1720 insns. It's not clear that attempting to preserve debug
1721 information in these cases is worth the effort, given how
1722 uncommon these resets are and the likelihood that the debug
1723 temps introduced won't survive the schedule change. */
1724 INSN_VAR_LOCATION_LOC (dbg) = gen_rtx_UNKNOWN_VAR_LOC ();
1725 df_insn_rescan (dbg);
1727 /* Unknown location doesn't use any registers. */
1728 for (use = INSN_REG_USE_LIST (dbg); use != NULL; use = next)
1730 struct reg_use_data *prev = use;
1732 /* Remove use from the cyclic next_regno_use chain first. */
1733 while (prev->next_regno_use != use)
1734 prev = prev->next_regno_use;
1735 prev->next_regno_use = use->next_regno_use;
1736 next = use->next_insn_use;
1737 free (use);
1739 INSN_REG_USE_LIST (dbg) = NULL;
1741 /* We delete rather than resolve these deps, otherwise we
1742 crash in sched_free_deps(), because forward deps are
1743 expected to be released before backward deps. */
1744 sd_delete_dep (sd_it);
1747 gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
1748 QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
1750 gcc_assert (INSN_TICK (insn) >= MIN_TICK);
1751 if (INSN_TICK (insn) > clock_var)
1752 /* INSN has been prematurely moved from the queue to the ready list.
1753 This is possible only if following flag is set. */
1754 gcc_assert (flag_sched_stalled_insns);
1756 /* ??? Probably, if INSN is scheduled prematurely, we should leave
1757 INSN_TICK untouched. This is a machine-dependent issue, actually. */
1758 INSN_TICK (insn) = clock_var;
1760 /* Update dependent instructions. */
1761 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
1762 sd_iterator_cond (&sd_it, &dep);)
1764 rtx next = DEP_CON (dep);
1766 /* Resolve the dependence between INSN and NEXT.
1767 sd_resolve_dep () moves current dep to another list thus
1768 advancing the iterator. */
1769 sd_resolve_dep (sd_it);
1771 /* Don't bother trying to mark next as ready if insn is a debug
1772 insn. If insn is the last hard dependency, it will have
1773 already been discounted. */
1774 if (DEBUG_INSN_P (insn) && !DEBUG_INSN_P (next))
1775 continue;
1777 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
1779 int effective_cost;
1781 effective_cost = try_ready (next);
1783 if (effective_cost >= 0
1784 && SCHED_GROUP_P (next)
1785 && advance < effective_cost)
1786 advance = effective_cost;
1788 else
1789 /* Check always has only one forward dependence (to the first insn in
1790 the recovery block), therefore, this will be executed only once. */
1792 gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
1793 fix_recovery_deps (RECOVERY_BLOCK (insn));
1797 /* This is the place where scheduler doesn't *basically* need backward and
1798 forward dependencies for INSN anymore. Nevertheless they are used in
1799 heuristics in rank_for_schedule (), early_queue_to_ready () and in
1800 some targets (e.g. rs6000). Thus the earliest place where we *can*
1801 remove dependencies is after targetm.sched.finish () call in
1802 schedule_block (). But, on the other side, the safest place to remove
1803 dependencies is when we are finishing scheduling entire region. As we
1804 don't generate [many] dependencies during scheduling itself, we won't
1805 need memory until beginning of next region.
1806 Bottom line: Dependencies are removed for all insns in the end of
1807 scheduling the region. */
1809 /* Annotate the instruction with issue information -- TImode
1810 indicates that the instruction is expected not to be able
1811 to issue on the same cycle as the previous insn. A machine
1812 may use this information to decide how the instruction should
1813 be aligned. */
1814 if (issue_rate > 1
1815 && GET_CODE (PATTERN (insn)) != USE
1816 && GET_CODE (PATTERN (insn)) != CLOBBER
1817 && !DEBUG_INSN_P (insn))
1819 if (reload_completed)
1820 PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
1821 last_clock_var = clock_var;
1824 return advance;
1827 /* Functions for handling of notes. */
1829 /* Add note list that ends on FROM_END to the end of TO_ENDP. */
1830 void
1831 concat_note_lists (rtx from_end, rtx *to_endp)
1833 rtx from_start;
1835 /* It's easy when have nothing to concat. */
1836 if (from_end == NULL)
1837 return;
1839 /* It's also easy when destination is empty. */
1840 if (*to_endp == NULL)
1842 *to_endp = from_end;
1843 return;
1846 from_start = from_end;
1847 while (PREV_INSN (from_start) != NULL)
1848 from_start = PREV_INSN (from_start);
1850 PREV_INSN (from_start) = *to_endp;
1851 NEXT_INSN (*to_endp) = from_start;
1852 *to_endp = from_end;
1855 /* Delete notes between HEAD and TAIL and put them in the chain
1856 of notes ended by NOTE_LIST. */
1857 void
1858 remove_notes (rtx head, rtx tail)
1860 rtx next_tail, insn, next;
1862 note_list = 0;
1863 if (head == tail && !INSN_P (head))
1864 return;
1866 next_tail = NEXT_INSN (tail);
1867 for (insn = head; insn != next_tail; insn = next)
1869 next = NEXT_INSN (insn);
1870 if (!NOTE_P (insn))
1871 continue;
1873 switch (NOTE_KIND (insn))
1875 case NOTE_INSN_BASIC_BLOCK:
1876 continue;
1878 case NOTE_INSN_EPILOGUE_BEG:
1879 if (insn != tail)
1881 remove_insn (insn);
1882 add_reg_note (next, REG_SAVE_NOTE,
1883 GEN_INT (NOTE_INSN_EPILOGUE_BEG));
1884 break;
1886 /* FALLTHRU */
1888 default:
1889 remove_insn (insn);
1891 /* Add the note to list that ends at NOTE_LIST. */
1892 PREV_INSN (insn) = note_list;
1893 NEXT_INSN (insn) = NULL_RTX;
1894 if (note_list)
1895 NEXT_INSN (note_list) = insn;
1896 note_list = insn;
1897 break;
1900 gcc_assert ((sel_sched_p () || insn != tail) && insn != head);
1905 /* Return the head and tail pointers of ebb starting at BEG and ending
1906 at END. */
1907 void
1908 get_ebb_head_tail (basic_block beg, basic_block end, rtx *headp, rtx *tailp)
1910 rtx beg_head = BB_HEAD (beg);
1911 rtx beg_tail = BB_END (beg);
1912 rtx end_head = BB_HEAD (end);
1913 rtx end_tail = BB_END (end);
1915 /* Don't include any notes or labels at the beginning of the BEG
1916 basic block, or notes at the end of the END basic blocks. */
1918 if (LABEL_P (beg_head))
1919 beg_head = NEXT_INSN (beg_head);
1921 while (beg_head != beg_tail)
1922 if (NOTE_P (beg_head) || BOUNDARY_DEBUG_INSN_P (beg_head))
1923 beg_head = NEXT_INSN (beg_head);
1924 else
1925 break;
1927 *headp = beg_head;
1929 if (beg == end)
1930 end_head = beg_head;
1931 else if (LABEL_P (end_head))
1932 end_head = NEXT_INSN (end_head);
1934 while (end_head != end_tail)
1935 if (NOTE_P (end_tail) || BOUNDARY_DEBUG_INSN_P (end_tail))
1936 end_tail = PREV_INSN (end_tail);
1937 else
1938 break;
1940 *tailp = end_tail;
1943 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
1946 no_real_insns_p (const_rtx head, const_rtx tail)
1948 while (head != NEXT_INSN (tail))
1950 if (!NOTE_P (head) && !LABEL_P (head)
1951 && !BOUNDARY_DEBUG_INSN_P (head))
1952 return 0;
1953 head = NEXT_INSN (head);
1955 return 1;
1958 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
1959 previously found among the insns. Insert them just before HEAD. */
1961 restore_other_notes (rtx head, basic_block head_bb)
1963 if (note_list != 0)
1965 rtx note_head = note_list;
1967 if (head)
1968 head_bb = BLOCK_FOR_INSN (head);
1969 else
1970 head = NEXT_INSN (bb_note (head_bb));
1972 while (PREV_INSN (note_head))
1974 set_block_for_insn (note_head, head_bb);
1975 note_head = PREV_INSN (note_head);
1977 /* In the above cycle we've missed this note. */
1978 set_block_for_insn (note_head, head_bb);
1980 PREV_INSN (note_head) = PREV_INSN (head);
1981 NEXT_INSN (PREV_INSN (head)) = note_head;
1982 PREV_INSN (head) = note_list;
1983 NEXT_INSN (note_list) = head;
1985 if (BLOCK_FOR_INSN (head) != head_bb)
1986 BB_END (head_bb) = note_list;
1988 head = note_head;
1991 return head;
1994 /* Move insns that became ready to fire from queue to ready list. */
1996 static void
1997 queue_to_ready (struct ready_list *ready)
1999 rtx insn;
2000 rtx link;
2001 rtx skip_insn;
2003 q_ptr = NEXT_Q (q_ptr);
2005 if (dbg_cnt (sched_insn) == false)
2007 /* If debug counter is activated do not requeue insn next after
2008 last_scheduled_insn. */
2009 skip_insn = next_nonnote_insn (last_scheduled_insn);
2010 while (skip_insn && DEBUG_INSN_P (skip_insn))
2011 skip_insn = next_nonnote_insn (skip_insn);
2013 else
2014 skip_insn = NULL_RTX;
2016 /* Add all pending insns that can be scheduled without stalls to the
2017 ready list. */
2018 for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1))
2020 insn = XEXP (link, 0);
2021 q_size -= 1;
2023 if (sched_verbose >= 2)
2024 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
2025 (*current_sched_info->print_insn) (insn, 0));
2027 /* If the ready list is full, delay the insn for 1 cycle.
2028 See the comment in schedule_block for the rationale. */
2029 if (!reload_completed
2030 && ready->n_ready - ready->n_debug > MAX_SCHED_READY_INSNS
2031 && !SCHED_GROUP_P (insn)
2032 && insn != skip_insn)
2034 if (sched_verbose >= 2)
2035 fprintf (sched_dump, "requeued because ready full\n");
2036 queue_insn (insn, 1);
2038 else
2040 ready_add (ready, insn, false);
2041 if (sched_verbose >= 2)
2042 fprintf (sched_dump, "moving to ready without stalls\n");
2045 free_INSN_LIST_list (&insn_queue[q_ptr]);
2047 /* If there are no ready insns, stall until one is ready and add all
2048 of the pending insns at that point to the ready list. */
2049 if (ready->n_ready == 0)
2051 int stalls;
2053 for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
2055 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
2057 for (; link; link = XEXP (link, 1))
2059 insn = XEXP (link, 0);
2060 q_size -= 1;
2062 if (sched_verbose >= 2)
2063 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
2064 (*current_sched_info->print_insn) (insn, 0));
2066 ready_add (ready, insn, false);
2067 if (sched_verbose >= 2)
2068 fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
2070 free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]);
2072 advance_one_cycle ();
2074 break;
2077 advance_one_cycle ();
2080 q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
2081 clock_var += stalls;
2085 /* Used by early_queue_to_ready. Determines whether it is "ok" to
2086 prematurely move INSN from the queue to the ready list. Currently,
2087 if a target defines the hook 'is_costly_dependence', this function
2088 uses the hook to check whether there exist any dependences which are
2089 considered costly by the target, between INSN and other insns that
2090 have already been scheduled. Dependences are checked up to Y cycles
2091 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
2092 controlling this value.
2093 (Other considerations could be taken into account instead (or in
2094 addition) depending on user flags and target hooks. */
2096 static bool
2097 ok_for_early_queue_removal (rtx insn)
2099 int n_cycles;
2100 rtx prev_insn = last_scheduled_insn;
2102 if (targetm.sched.is_costly_dependence)
2104 for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
2106 for ( ; prev_insn; prev_insn = PREV_INSN (prev_insn))
2108 int cost;
2110 if (prev_insn == current_sched_info->prev_head)
2112 prev_insn = NULL;
2113 break;
2116 if (!NOTE_P (prev_insn))
2118 dep_t dep;
2120 dep = sd_find_dep_between (prev_insn, insn, true);
2122 if (dep != NULL)
2124 cost = dep_cost (dep);
2126 if (targetm.sched.is_costly_dependence (dep, cost,
2127 flag_sched_stalled_insns_dep - n_cycles))
2128 return false;
2132 if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
2133 break;
2136 if (!prev_insn)
2137 break;
2138 prev_insn = PREV_INSN (prev_insn);
2142 return true;
2146 /* Remove insns from the queue, before they become "ready" with respect
2147 to FU latency considerations. */
2149 static int
2150 early_queue_to_ready (state_t state, struct ready_list *ready)
2152 rtx insn;
2153 rtx link;
2154 rtx next_link;
2155 rtx prev_link;
2156 bool move_to_ready;
2157 int cost;
2158 state_t temp_state = alloca (dfa_state_size);
2159 int stalls;
2160 int insns_removed = 0;
2163 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
2164 function:
2166 X == 0: There is no limit on how many queued insns can be removed
2167 prematurely. (flag_sched_stalled_insns = -1).
2169 X >= 1: Only X queued insns can be removed prematurely in each
2170 invocation. (flag_sched_stalled_insns = X).
2172 Otherwise: Early queue removal is disabled.
2173 (flag_sched_stalled_insns = 0)
2176 if (! flag_sched_stalled_insns)
2177 return 0;
2179 for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
2181 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
2183 if (sched_verbose > 6)
2184 fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
2186 prev_link = 0;
2187 while (link)
2189 next_link = XEXP (link, 1);
2190 insn = XEXP (link, 0);
2191 if (insn && sched_verbose > 6)
2192 print_rtl_single (sched_dump, insn);
2194 memcpy (temp_state, state, dfa_state_size);
2195 if (recog_memoized (insn) < 0)
2196 /* non-negative to indicate that it's not ready
2197 to avoid infinite Q->R->Q->R... */
2198 cost = 0;
2199 else
2200 cost = state_transition (temp_state, insn);
2202 if (sched_verbose >= 6)
2203 fprintf (sched_dump, "transition cost = %d\n", cost);
2205 move_to_ready = false;
2206 if (cost < 0)
2208 move_to_ready = ok_for_early_queue_removal (insn);
2209 if (move_to_ready == true)
2211 /* move from Q to R */
2212 q_size -= 1;
2213 ready_add (ready, insn, false);
2215 if (prev_link)
2216 XEXP (prev_link, 1) = next_link;
2217 else
2218 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
2220 free_INSN_LIST_node (link);
2222 if (sched_verbose >= 2)
2223 fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
2224 (*current_sched_info->print_insn) (insn, 0));
2226 insns_removed++;
2227 if (insns_removed == flag_sched_stalled_insns)
2228 /* Remove no more than flag_sched_stalled_insns insns
2229 from Q at a time. */
2230 return insns_removed;
2234 if (move_to_ready == false)
2235 prev_link = link;
2237 link = next_link;
2238 } /* while link */
2239 } /* if link */
2241 } /* for stalls.. */
2243 return insns_removed;
2247 /* Print the ready list for debugging purposes. Callable from debugger. */
2249 static void
2250 debug_ready_list (struct ready_list *ready)
2252 rtx *p;
2253 int i;
2255 if (ready->n_ready == 0)
2257 fprintf (sched_dump, "\n");
2258 return;
2261 p = ready_lastpos (ready);
2262 for (i = 0; i < ready->n_ready; i++)
2264 fprintf (sched_dump, " %s:%d",
2265 (*current_sched_info->print_insn) (p[i], 0),
2266 INSN_LUID (p[i]));
2267 if (sched_pressure_p)
2268 fprintf (sched_dump, "(cost=%d",
2269 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (p[i]));
2270 if (INSN_TICK (p[i]) > clock_var)
2271 fprintf (sched_dump, ":delay=%d", INSN_TICK (p[i]) - clock_var);
2272 if (sched_pressure_p)
2273 fprintf (sched_dump, ")");
2275 fprintf (sched_dump, "\n");
2278 /* Search INSN for REG_SAVE_NOTE notes and convert them back into insn
2279 NOTEs. This is used for NOTE_INSN_EPILOGUE_BEG, so that sched-ebb
2280 replaces the epilogue note in the correct basic block. */
2281 void
2282 reemit_notes (rtx insn)
2284 rtx note, last = insn;
2286 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2288 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
2290 enum insn_note note_type = (enum insn_note) INTVAL (XEXP (note, 0));
2292 last = emit_note_before (note_type, last);
2293 remove_note (insn, note);
2298 /* Move INSN. Reemit notes if needed. Update CFG, if needed. */
2299 static void
2300 move_insn (rtx insn, rtx last, rtx nt)
2302 if (PREV_INSN (insn) != last)
2304 basic_block bb;
2305 rtx note;
2306 int jump_p = 0;
2308 bb = BLOCK_FOR_INSN (insn);
2310 /* BB_HEAD is either LABEL or NOTE. */
2311 gcc_assert (BB_HEAD (bb) != insn);
2313 if (BB_END (bb) == insn)
2314 /* If this is last instruction in BB, move end marker one
2315 instruction up. */
2317 /* Jumps are always placed at the end of basic block. */
2318 jump_p = control_flow_insn_p (insn);
2320 gcc_assert (!jump_p
2321 || ((common_sched_info->sched_pass_id == SCHED_RGN_PASS)
2322 && IS_SPECULATION_BRANCHY_CHECK_P (insn))
2323 || (common_sched_info->sched_pass_id
2324 == SCHED_EBB_PASS));
2326 gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
2328 BB_END (bb) = PREV_INSN (insn);
2331 gcc_assert (BB_END (bb) != last);
2333 if (jump_p)
2334 /* We move the block note along with jump. */
2336 gcc_assert (nt);
2338 note = NEXT_INSN (insn);
2339 while (NOTE_NOT_BB_P (note) && note != nt)
2340 note = NEXT_INSN (note);
2342 if (note != nt
2343 && (LABEL_P (note)
2344 || BARRIER_P (note)))
2345 note = NEXT_INSN (note);
2347 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
2349 else
2350 note = insn;
2352 NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
2353 PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
2355 NEXT_INSN (note) = NEXT_INSN (last);
2356 PREV_INSN (NEXT_INSN (last)) = note;
2358 NEXT_INSN (last) = insn;
2359 PREV_INSN (insn) = last;
2361 bb = BLOCK_FOR_INSN (last);
2363 if (jump_p)
2365 fix_jump_move (insn);
2367 if (BLOCK_FOR_INSN (insn) != bb)
2368 move_block_after_check (insn);
2370 gcc_assert (BB_END (bb) == last);
2373 df_insn_change_bb (insn, bb);
2375 /* Update BB_END, if needed. */
2376 if (BB_END (bb) == last)
2377 BB_END (bb) = insn;
2380 SCHED_GROUP_P (insn) = 0;
2383 /* Return true if scheduling INSN will finish current clock cycle. */
2384 static bool
2385 insn_finishes_cycle_p (rtx insn)
2387 if (SCHED_GROUP_P (insn))
2388 /* After issuing INSN, rest of the sched_group will be forced to issue
2389 in order. Don't make any plans for the rest of cycle. */
2390 return true;
2392 /* Finishing the block will, apparently, finish the cycle. */
2393 if (current_sched_info->insn_finishes_block_p
2394 && current_sched_info->insn_finishes_block_p (insn))
2395 return true;
2397 return false;
2400 /* The following structure describe an entry of the stack of choices. */
2401 struct choice_entry
2403 /* Ordinal number of the issued insn in the ready queue. */
2404 int index;
2405 /* The number of the rest insns whose issues we should try. */
2406 int rest;
2407 /* The number of issued essential insns. */
2408 int n;
2409 /* State after issuing the insn. */
2410 state_t state;
2413 /* The following array is used to implement a stack of choices used in
2414 function max_issue. */
2415 static struct choice_entry *choice_stack;
2417 /* The following variable value is number of essential insns issued on
2418 the current cycle. An insn is essential one if it changes the
2419 processors state. */
2420 int cycle_issued_insns;
2422 /* This holds the value of the target dfa_lookahead hook. */
2423 int dfa_lookahead;
2425 /* The following variable value is maximal number of tries of issuing
2426 insns for the first cycle multipass insn scheduling. We define
2427 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
2428 need this constraint if all real insns (with non-negative codes)
2429 had reservations because in this case the algorithm complexity is
2430 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
2431 might be incomplete and such insn might occur. For such
2432 descriptions, the complexity of algorithm (without the constraint)
2433 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
2434 static int max_lookahead_tries;
2436 /* The following value is value of hook
2437 `first_cycle_multipass_dfa_lookahead' at the last call of
2438 `max_issue'. */
2439 static int cached_first_cycle_multipass_dfa_lookahead = 0;
2441 /* The following value is value of `issue_rate' at the last call of
2442 `sched_init'. */
2443 static int cached_issue_rate = 0;
2445 /* The following function returns maximal (or close to maximal) number
2446 of insns which can be issued on the same cycle and one of which
2447 insns is insns with the best rank (the first insn in READY). To
2448 make this function tries different samples of ready insns. READY
2449 is current queue `ready'. Global array READY_TRY reflects what
2450 insns are already issued in this try. MAX_POINTS is the sum of points
2451 of all instructions in READY. The function stops immediately,
2452 if it reached the such a solution, that all instruction can be issued.
2453 INDEX will contain index of the best insn in READY. The following
2454 function is used only for first cycle multipass scheduling.
2456 PRIVILEGED_N >= 0
2458 This function expects recognized insns only. All USEs,
2459 CLOBBERs, etc must be filtered elsewhere. */
2461 max_issue (struct ready_list *ready, int privileged_n, state_t state,
2462 int *index)
2464 int n, i, all, n_ready, best, delay, tries_num, max_points;
2465 int more_issue;
2466 struct choice_entry *top;
2467 rtx insn;
2469 n_ready = ready->n_ready;
2470 gcc_assert (dfa_lookahead >= 1 && privileged_n >= 0
2471 && privileged_n <= n_ready);
2473 /* Init MAX_LOOKAHEAD_TRIES. */
2474 if (cached_first_cycle_multipass_dfa_lookahead != dfa_lookahead)
2476 cached_first_cycle_multipass_dfa_lookahead = dfa_lookahead;
2477 max_lookahead_tries = 100;
2478 for (i = 0; i < issue_rate; i++)
2479 max_lookahead_tries *= dfa_lookahead;
2482 /* Init max_points. */
2483 max_points = 0;
2484 more_issue = issue_rate - cycle_issued_insns;
2486 /* ??? We used to assert here that we never issue more insns than issue_rate.
2487 However, some targets (e.g. MIPS/SB1) claim lower issue rate than can be
2488 achieved to get better performance. Until these targets are fixed to use
2489 scheduler hooks to manipulate insns priority instead, the assert should
2490 be disabled.
2492 gcc_assert (more_issue >= 0); */
2494 for (i = 0; i < n_ready; i++)
2495 if (!ready_try [i])
2497 if (more_issue-- > 0)
2498 max_points += ISSUE_POINTS (ready_element (ready, i));
2499 else
2500 break;
2503 /* The number of the issued insns in the best solution. */
2504 best = 0;
2506 top = choice_stack;
2508 /* Set initial state of the search. */
2509 memcpy (top->state, state, dfa_state_size);
2510 top->rest = dfa_lookahead;
2511 top->n = 0;
2513 /* Count the number of the insns to search among. */
2514 for (all = i = 0; i < n_ready; i++)
2515 if (!ready_try [i])
2516 all++;
2518 /* I is the index of the insn to try next. */
2519 i = 0;
2520 tries_num = 0;
2521 for (;;)
2523 if (/* If we've reached a dead end or searched enough of what we have
2524 been asked... */
2525 top->rest == 0
2526 /* Or have nothing else to try. */
2527 || i >= n_ready)
2529 /* ??? (... || i == n_ready). */
2530 gcc_assert (i <= n_ready);
2532 if (top == choice_stack)
2533 break;
2535 if (best < top - choice_stack)
2537 if (privileged_n)
2539 n = privileged_n;
2540 /* Try to find issued privileged insn. */
2541 while (n && !ready_try[--n]);
2544 if (/* If all insns are equally good... */
2545 privileged_n == 0
2546 /* Or a privileged insn will be issued. */
2547 || ready_try[n])
2548 /* Then we have a solution. */
2550 best = top - choice_stack;
2551 /* This is the index of the insn issued first in this
2552 solution. */
2553 *index = choice_stack [1].index;
2554 if (top->n == max_points || best == all)
2555 break;
2559 /* Set ready-list index to point to the last insn
2560 ('i++' below will advance it to the next insn). */
2561 i = top->index;
2563 /* Backtrack. */
2564 ready_try [i] = 0;
2565 top--;
2566 memcpy (state, top->state, dfa_state_size);
2568 else if (!ready_try [i])
2570 tries_num++;
2571 if (tries_num > max_lookahead_tries)
2572 break;
2573 insn = ready_element (ready, i);
2574 delay = state_transition (state, insn);
2575 if (delay < 0)
2577 if (state_dead_lock_p (state)
2578 || insn_finishes_cycle_p (insn))
2579 /* We won't issue any more instructions in the next
2580 choice_state. */
2581 top->rest = 0;
2582 else
2583 top->rest--;
2585 n = top->n;
2586 if (memcmp (top->state, state, dfa_state_size) != 0)
2587 n += ISSUE_POINTS (insn);
2589 /* Advance to the next choice_entry. */
2590 top++;
2591 /* Initialize it. */
2592 top->rest = dfa_lookahead;
2593 top->index = i;
2594 top->n = n;
2595 memcpy (top->state, state, dfa_state_size);
2597 ready_try [i] = 1;
2598 i = -1;
2602 /* Increase ready-list index. */
2603 i++;
2606 /* Restore the original state of the DFA. */
2607 memcpy (state, choice_stack->state, dfa_state_size);
2609 return best;
2612 /* The following function chooses insn from READY and modifies
2613 READY. The following function is used only for first
2614 cycle multipass scheduling.
2615 Return:
2616 -1 if cycle should be advanced,
2617 0 if INSN_PTR is set to point to the desirable insn,
2618 1 if choose_ready () should be restarted without advancing the cycle. */
2619 static int
2620 choose_ready (struct ready_list *ready, rtx *insn_ptr)
2622 int lookahead;
2624 if (dbg_cnt (sched_insn) == false)
2626 rtx insn;
2628 insn = next_nonnote_insn (last_scheduled_insn);
2630 if (QUEUE_INDEX (insn) == QUEUE_READY)
2631 /* INSN is in the ready_list. */
2633 ready_remove_insn (insn);
2634 *insn_ptr = insn;
2635 return 0;
2638 /* INSN is in the queue. Advance cycle to move it to the ready list. */
2639 return -1;
2642 lookahead = 0;
2644 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
2645 lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
2646 if (lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0))
2647 || DEBUG_INSN_P (ready_element (ready, 0)))
2649 *insn_ptr = ready_remove_first (ready);
2650 return 0;
2652 else
2654 /* Try to choose the better insn. */
2655 int index = 0, i, n;
2656 rtx insn;
2657 int try_data = 1, try_control = 1;
2658 ds_t ts;
2660 insn = ready_element (ready, 0);
2661 if (INSN_CODE (insn) < 0)
2663 *insn_ptr = ready_remove_first (ready);
2664 return 0;
2667 if (spec_info
2668 && spec_info->flags & (PREFER_NON_DATA_SPEC
2669 | PREFER_NON_CONTROL_SPEC))
2671 for (i = 0, n = ready->n_ready; i < n; i++)
2673 rtx x;
2674 ds_t s;
2676 x = ready_element (ready, i);
2677 s = TODO_SPEC (x);
2679 if (spec_info->flags & PREFER_NON_DATA_SPEC
2680 && !(s & DATA_SPEC))
2682 try_data = 0;
2683 if (!(spec_info->flags & PREFER_NON_CONTROL_SPEC)
2684 || !try_control)
2685 break;
2688 if (spec_info->flags & PREFER_NON_CONTROL_SPEC
2689 && !(s & CONTROL_SPEC))
2691 try_control = 0;
2692 if (!(spec_info->flags & PREFER_NON_DATA_SPEC) || !try_data)
2693 break;
2698 ts = TODO_SPEC (insn);
2699 if ((ts & SPECULATIVE)
2700 && (((!try_data && (ts & DATA_SPEC))
2701 || (!try_control && (ts & CONTROL_SPEC)))
2702 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
2703 && !targetm.sched
2704 .first_cycle_multipass_dfa_lookahead_guard_spec (insn))))
2705 /* Discard speculative instruction that stands first in the ready
2706 list. */
2708 change_queue_index (insn, 1);
2709 return 1;
2712 ready_try[0] = 0;
2714 for (i = 1; i < ready->n_ready; i++)
2716 insn = ready_element (ready, i);
2718 ready_try [i]
2719 = ((!try_data && (TODO_SPEC (insn) & DATA_SPEC))
2720 || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC)));
2723 /* Let the target filter the search space. */
2724 for (i = 1; i < ready->n_ready; i++)
2725 if (!ready_try[i])
2727 insn = ready_element (ready, i);
2729 #ifdef ENABLE_CHECKING
2730 /* If this insn is recognizable we should have already
2731 recognized it earlier.
2732 ??? Not very clear where this is supposed to be done.
2733 See dep_cost_1. */
2734 gcc_assert (INSN_CODE (insn) >= 0
2735 || recog_memoized (insn) < 0);
2736 #endif
2738 ready_try [i]
2739 = (/* INSN_CODE check can be omitted here as it is also done later
2740 in max_issue (). */
2741 INSN_CODE (insn) < 0
2742 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2743 && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2744 (insn)));
2747 if (max_issue (ready, 1, curr_state, &index) == 0)
2749 *insn_ptr = ready_remove_first (ready);
2750 if (sched_verbose >= 4)
2751 fprintf (sched_dump, ";;\t\tChosen insn (but can't issue) : %s \n",
2752 (*current_sched_info->print_insn) (*insn_ptr, 0));
2753 return 0;
2755 else
2757 if (sched_verbose >= 4)
2758 fprintf (sched_dump, ";;\t\tChosen insn : %s\n",
2759 (*current_sched_info->print_insn)
2760 (ready_element (ready, index), 0));
2762 *insn_ptr = ready_remove (ready, index);
2763 return 0;
2768 /* Use forward list scheduling to rearrange insns of block pointed to by
2769 TARGET_BB, possibly bringing insns from subsequent blocks in the same
2770 region. */
2772 void
2773 schedule_block (basic_block *target_bb)
2775 int i, first_cycle_insn_p;
2776 int can_issue_more;
2777 state_t temp_state = NULL; /* It is used for multipass scheduling. */
2778 int sort_p, advance, start_clock_var;
2780 /* Head/tail info for this block. */
2781 rtx prev_head = current_sched_info->prev_head;
2782 rtx next_tail = current_sched_info->next_tail;
2783 rtx head = NEXT_INSN (prev_head);
2784 rtx tail = PREV_INSN (next_tail);
2786 /* We used to have code to avoid getting parameters moved from hard
2787 argument registers into pseudos.
2789 However, it was removed when it proved to be of marginal benefit
2790 and caused problems because schedule_block and compute_forward_dependences
2791 had different notions of what the "head" insn was. */
2793 gcc_assert (head != tail || INSN_P (head));
2795 haifa_recovery_bb_recently_added_p = false;
2797 /* Debug info. */
2798 if (sched_verbose)
2799 dump_new_block_header (0, *target_bb, head, tail);
2801 state_reset (curr_state);
2803 /* Clear the ready list. */
2804 ready.first = ready.veclen - 1;
2805 ready.n_ready = 0;
2806 ready.n_debug = 0;
2808 /* It is used for first cycle multipass scheduling. */
2809 temp_state = alloca (dfa_state_size);
2811 if (targetm.sched.init)
2812 targetm.sched.init (sched_dump, sched_verbose, ready.veclen);
2814 /* We start inserting insns after PREV_HEAD. */
2815 last_scheduled_insn = prev_head;
2817 gcc_assert ((NOTE_P (last_scheduled_insn)
2818 || BOUNDARY_DEBUG_INSN_P (last_scheduled_insn))
2819 && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
2821 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
2822 queue. */
2823 q_ptr = 0;
2824 q_size = 0;
2826 insn_queue = XALLOCAVEC (rtx, max_insn_queue_index + 1);
2827 memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
2829 /* Start just before the beginning of time. */
2830 clock_var = -1;
2832 /* We need queue and ready lists and clock_var be initialized
2833 in try_ready () (which is called through init_ready_list ()). */
2834 (*current_sched_info->init_ready_list) ();
2836 /* The algorithm is O(n^2) in the number of ready insns at any given
2837 time in the worst case. Before reload we are more likely to have
2838 big lists so truncate them to a reasonable size. */
2839 if (!reload_completed
2840 && ready.n_ready - ready.n_debug > MAX_SCHED_READY_INSNS)
2842 ready_sort (&ready);
2844 /* Find first free-standing insn past MAX_SCHED_READY_INSNS.
2845 If there are debug insns, we know they're first. */
2846 for (i = MAX_SCHED_READY_INSNS + ready.n_debug; i < ready.n_ready; i++)
2847 if (!SCHED_GROUP_P (ready_element (&ready, i)))
2848 break;
2850 if (sched_verbose >= 2)
2852 fprintf (sched_dump,
2853 ";;\t\tReady list on entry: %d insns\n", ready.n_ready);
2854 fprintf (sched_dump,
2855 ";;\t\t before reload => truncated to %d insns\n", i);
2858 /* Delay all insns past it for 1 cycle. If debug counter is
2859 activated make an exception for the insn right after
2860 last_scheduled_insn. */
2862 rtx skip_insn;
2864 if (dbg_cnt (sched_insn) == false)
2865 skip_insn = next_nonnote_insn (last_scheduled_insn);
2866 else
2867 skip_insn = NULL_RTX;
2869 while (i < ready.n_ready)
2871 rtx insn;
2873 insn = ready_remove (&ready, i);
2875 if (insn != skip_insn)
2876 queue_insn (insn, 1);
2881 /* Now we can restore basic block notes and maintain precise cfg. */
2882 restore_bb_notes (*target_bb);
2884 last_clock_var = -1;
2886 advance = 0;
2888 sort_p = TRUE;
2889 /* Loop until all the insns in BB are scheduled. */
2890 while ((*current_sched_info->schedule_more_p) ())
2894 start_clock_var = clock_var;
2896 clock_var++;
2898 advance_one_cycle ();
2900 /* Add to the ready list all pending insns that can be issued now.
2901 If there are no ready insns, increment clock until one
2902 is ready and add all pending insns at that point to the ready
2903 list. */
2904 queue_to_ready (&ready);
2906 gcc_assert (ready.n_ready);
2908 if (sched_verbose >= 2)
2910 fprintf (sched_dump, ";;\t\tReady list after queue_to_ready: ");
2911 debug_ready_list (&ready);
2913 advance -= clock_var - start_clock_var;
2915 while (advance > 0);
2917 if (sort_p)
2919 /* Sort the ready list based on priority. */
2920 ready_sort (&ready);
2922 if (sched_verbose >= 2)
2924 fprintf (sched_dump, ";;\t\tReady list after ready_sort: ");
2925 debug_ready_list (&ready);
2929 /* We don't want md sched reorder to even see debug isns, so put
2930 them out right away. */
2931 if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
2933 if (control_flow_insn_p (last_scheduled_insn))
2935 *target_bb = current_sched_info->advance_target_bb
2936 (*target_bb, 0);
2938 if (sched_verbose)
2940 rtx x;
2942 x = next_real_insn (last_scheduled_insn);
2943 gcc_assert (x);
2944 dump_new_block_header (1, *target_bb, x, tail);
2947 last_scheduled_insn = bb_note (*target_bb);
2950 while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
2952 rtx insn = ready_remove_first (&ready);
2953 gcc_assert (DEBUG_INSN_P (insn));
2954 (*current_sched_info->begin_schedule_ready) (insn,
2955 last_scheduled_insn);
2956 move_insn (insn, last_scheduled_insn,
2957 current_sched_info->next_tail);
2958 last_scheduled_insn = insn;
2959 advance = schedule_insn (insn);
2960 gcc_assert (advance == 0);
2961 if (ready.n_ready > 0)
2962 ready_sort (&ready);
2965 if (!ready.n_ready)
2966 continue;
2969 /* Allow the target to reorder the list, typically for
2970 better instruction bundling. */
2971 if (sort_p && targetm.sched.reorder
2972 && (ready.n_ready == 0
2973 || !SCHED_GROUP_P (ready_element (&ready, 0))))
2974 can_issue_more =
2975 targetm.sched.reorder (sched_dump, sched_verbose,
2976 ready_lastpos (&ready),
2977 &ready.n_ready, clock_var);
2978 else
2979 can_issue_more = issue_rate;
2981 first_cycle_insn_p = 1;
2982 cycle_issued_insns = 0;
2983 for (;;)
2985 rtx insn;
2986 int cost;
2987 bool asm_p = false;
2989 if (sched_verbose >= 2)
2991 fprintf (sched_dump, ";;\tReady list (t = %3d): ",
2992 clock_var);
2993 debug_ready_list (&ready);
2994 if (sched_pressure_p)
2995 print_curr_reg_pressure ();
2998 if (ready.n_ready == 0
2999 && can_issue_more
3000 && reload_completed)
3002 /* Allow scheduling insns directly from the queue in case
3003 there's nothing better to do (ready list is empty) but
3004 there are still vacant dispatch slots in the current cycle. */
3005 if (sched_verbose >= 6)
3006 fprintf (sched_dump,";;\t\tSecond chance\n");
3007 memcpy (temp_state, curr_state, dfa_state_size);
3008 if (early_queue_to_ready (temp_state, &ready))
3009 ready_sort (&ready);
3012 if (ready.n_ready == 0
3013 || !can_issue_more
3014 || state_dead_lock_p (curr_state)
3015 || !(*current_sched_info->schedule_more_p) ())
3016 break;
3018 /* Select and remove the insn from the ready list. */
3019 if (sort_p)
3021 int res;
3023 insn = NULL_RTX;
3024 res = choose_ready (&ready, &insn);
3026 if (res < 0)
3027 /* Finish cycle. */
3028 break;
3029 if (res > 0)
3030 /* Restart choose_ready (). */
3031 continue;
3033 gcc_assert (insn != NULL_RTX);
3035 else
3036 insn = ready_remove_first (&ready);
3038 if (sched_pressure_p && INSN_TICK (insn) > clock_var)
3040 ready_add (&ready, insn, true);
3041 advance = 1;
3042 break;
3045 if (targetm.sched.dfa_new_cycle
3046 && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
3047 insn, last_clock_var,
3048 clock_var, &sort_p))
3049 /* SORT_P is used by the target to override sorting
3050 of the ready list. This is needed when the target
3051 has modified its internal structures expecting that
3052 the insn will be issued next. As we need the insn
3053 to have the highest priority (so it will be returned by
3054 the ready_remove_first call above), we invoke
3055 ready_add (&ready, insn, true).
3056 But, still, there is one issue: INSN can be later
3057 discarded by scheduler's front end through
3058 current_sched_info->can_schedule_ready_p, hence, won't
3059 be issued next. */
3061 ready_add (&ready, insn, true);
3062 break;
3065 sort_p = TRUE;
3066 memcpy (temp_state, curr_state, dfa_state_size);
3067 if (recog_memoized (insn) < 0)
3069 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
3070 || asm_noperands (PATTERN (insn)) >= 0);
3071 if (!first_cycle_insn_p && asm_p)
3072 /* This is asm insn which is tried to be issued on the
3073 cycle not first. Issue it on the next cycle. */
3074 cost = 1;
3075 else
3076 /* A USE insn, or something else we don't need to
3077 understand. We can't pass these directly to
3078 state_transition because it will trigger a
3079 fatal error for unrecognizable insns. */
3080 cost = 0;
3082 else if (sched_pressure_p)
3083 cost = 0;
3084 else
3086 cost = state_transition (temp_state, insn);
3087 if (cost < 0)
3088 cost = 0;
3089 else if (cost == 0)
3090 cost = 1;
3093 if (cost >= 1)
3095 queue_insn (insn, cost);
3096 if (SCHED_GROUP_P (insn))
3098 advance = cost;
3099 break;
3102 continue;
3105 if (current_sched_info->can_schedule_ready_p
3106 && ! (*current_sched_info->can_schedule_ready_p) (insn))
3107 /* We normally get here only if we don't want to move
3108 insn from the split block. */
3110 TODO_SPEC (insn) = (TODO_SPEC (insn) & ~SPECULATIVE) | HARD_DEP;
3111 continue;
3114 /* DECISION is made. */
3116 if (TODO_SPEC (insn) & SPECULATIVE)
3117 generate_recovery_code (insn);
3119 if (control_flow_insn_p (last_scheduled_insn)
3120 /* This is used to switch basic blocks by request
3121 from scheduler front-end (actually, sched-ebb.c only).
3122 This is used to process blocks with single fallthru
3123 edge. If succeeding block has jump, it [jump] will try
3124 move at the end of current bb, thus corrupting CFG. */
3125 || current_sched_info->advance_target_bb (*target_bb, insn))
3127 *target_bb = current_sched_info->advance_target_bb
3128 (*target_bb, 0);
3130 if (sched_verbose)
3132 rtx x;
3134 x = next_real_insn (last_scheduled_insn);
3135 gcc_assert (x);
3136 dump_new_block_header (1, *target_bb, x, tail);
3139 last_scheduled_insn = bb_note (*target_bb);
3142 /* Update counters, etc in the scheduler's front end. */
3143 (*current_sched_info->begin_schedule_ready) (insn,
3144 last_scheduled_insn);
3146 move_insn (insn, last_scheduled_insn, current_sched_info->next_tail);
3147 reemit_notes (insn);
3148 last_scheduled_insn = insn;
3150 if (memcmp (curr_state, temp_state, dfa_state_size) != 0)
3152 cycle_issued_insns++;
3153 memcpy (curr_state, temp_state, dfa_state_size);
3156 if (targetm.sched.variable_issue)
3157 can_issue_more =
3158 targetm.sched.variable_issue (sched_dump, sched_verbose,
3159 insn, can_issue_more);
3160 /* A naked CLOBBER or USE generates no instruction, so do
3161 not count them against the issue rate. */
3162 else if (GET_CODE (PATTERN (insn)) != USE
3163 && GET_CODE (PATTERN (insn)) != CLOBBER)
3164 can_issue_more--;
3165 advance = schedule_insn (insn);
3167 /* After issuing an asm insn we should start a new cycle. */
3168 if (advance == 0 && asm_p)
3169 advance = 1;
3170 if (advance != 0)
3171 break;
3173 first_cycle_insn_p = 0;
3175 /* Sort the ready list based on priority. This must be
3176 redone here, as schedule_insn may have readied additional
3177 insns that will not be sorted correctly. */
3178 if (ready.n_ready > 0)
3179 ready_sort (&ready);
3181 /* Quickly go through debug insns such that md sched
3182 reorder2 doesn't have to deal with debug insns. */
3183 if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0))
3184 && (*current_sched_info->schedule_more_p) ())
3186 if (control_flow_insn_p (last_scheduled_insn))
3188 *target_bb = current_sched_info->advance_target_bb
3189 (*target_bb, 0);
3191 if (sched_verbose)
3193 rtx x;
3195 x = next_real_insn (last_scheduled_insn);
3196 gcc_assert (x);
3197 dump_new_block_header (1, *target_bb, x, tail);
3200 last_scheduled_insn = bb_note (*target_bb);
3203 while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
3205 insn = ready_remove_first (&ready);
3206 gcc_assert (DEBUG_INSN_P (insn));
3207 (*current_sched_info->begin_schedule_ready)
3208 (insn, last_scheduled_insn);
3209 move_insn (insn, last_scheduled_insn,
3210 current_sched_info->next_tail);
3211 advance = schedule_insn (insn);
3212 last_scheduled_insn = insn;
3213 gcc_assert (advance == 0);
3214 if (ready.n_ready > 0)
3215 ready_sort (&ready);
3219 if (targetm.sched.reorder2
3220 && (ready.n_ready == 0
3221 || !SCHED_GROUP_P (ready_element (&ready, 0))))
3223 can_issue_more =
3224 targetm.sched.reorder2 (sched_dump, sched_verbose,
3225 ready.n_ready
3226 ? ready_lastpos (&ready) : NULL,
3227 &ready.n_ready, clock_var);
3232 /* Debug info. */
3233 if (sched_verbose)
3235 fprintf (sched_dump, ";;\tReady list (final): ");
3236 debug_ready_list (&ready);
3239 if (current_sched_info->queue_must_finish_empty)
3240 /* Sanity check -- queue must be empty now. Meaningless if region has
3241 multiple bbs. */
3242 gcc_assert (!q_size && !ready.n_ready && !ready.n_debug);
3243 else
3245 /* We must maintain QUEUE_INDEX between blocks in region. */
3246 for (i = ready.n_ready - 1; i >= 0; i--)
3248 rtx x;
3250 x = ready_element (&ready, i);
3251 QUEUE_INDEX (x) = QUEUE_NOWHERE;
3252 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
3255 if (q_size)
3256 for (i = 0; i <= max_insn_queue_index; i++)
3258 rtx link;
3259 for (link = insn_queue[i]; link; link = XEXP (link, 1))
3261 rtx x;
3263 x = XEXP (link, 0);
3264 QUEUE_INDEX (x) = QUEUE_NOWHERE;
3265 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
3267 free_INSN_LIST_list (&insn_queue[i]);
3271 if (sched_verbose)
3272 fprintf (sched_dump, ";; total time = %d\n", clock_var);
3274 if (!current_sched_info->queue_must_finish_empty
3275 || haifa_recovery_bb_recently_added_p)
3277 /* INSN_TICK (minimum clock tick at which the insn becomes
3278 ready) may be not correct for the insn in the subsequent
3279 blocks of the region. We should use a correct value of
3280 `clock_var' or modify INSN_TICK. It is better to keep
3281 clock_var value equal to 0 at the start of a basic block.
3282 Therefore we modify INSN_TICK here. */
3283 fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
3286 if (targetm.sched.finish)
3288 targetm.sched.finish (sched_dump, sched_verbose);
3289 /* Target might have added some instructions to the scheduled block
3290 in its md_finish () hook. These new insns don't have any data
3291 initialized and to identify them we extend h_i_d so that they'll
3292 get zero luids. */
3293 sched_init_luids (NULL, NULL, NULL, NULL);
3296 if (sched_verbose)
3297 fprintf (sched_dump, ";; new head = %d\n;; new tail = %d\n\n",
3298 INSN_UID (head), INSN_UID (tail));
3300 /* Update head/tail boundaries. */
3301 head = NEXT_INSN (prev_head);
3302 tail = last_scheduled_insn;
3304 head = restore_other_notes (head, NULL);
3306 current_sched_info->head = head;
3307 current_sched_info->tail = tail;
3310 /* Set_priorities: compute priority of each insn in the block. */
3313 set_priorities (rtx head, rtx tail)
3315 rtx insn;
3316 int n_insn;
3317 int sched_max_insns_priority =
3318 current_sched_info->sched_max_insns_priority;
3319 rtx prev_head;
3321 if (head == tail && (! INSN_P (head) || BOUNDARY_DEBUG_INSN_P (head)))
3322 gcc_unreachable ();
3324 n_insn = 0;
3326 prev_head = PREV_INSN (head);
3327 for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
3329 if (!INSN_P (insn))
3330 continue;
3332 n_insn++;
3333 (void) priority (insn);
3335 gcc_assert (INSN_PRIORITY_KNOWN (insn));
3337 sched_max_insns_priority = MAX (sched_max_insns_priority,
3338 INSN_PRIORITY (insn));
3341 current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
3343 return n_insn;
3346 /* Set dump and sched_verbose for the desired debugging output. If no
3347 dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
3348 For -fsched-verbose=N, N>=10, print everything to stderr. */
3349 void
3350 setup_sched_dump (void)
3352 sched_verbose = sched_verbose_param;
3353 if (sched_verbose_param == 0 && dump_file)
3354 sched_verbose = 1;
3355 sched_dump = ((sched_verbose_param >= 10 || !dump_file)
3356 ? stderr : dump_file);
3359 /* Initialize some global state for the scheduler. This function works
3360 with the common data shared between all the schedulers. It is called
3361 from the scheduler specific initialization routine. */
3363 void
3364 sched_init (void)
3366 /* Disable speculative loads in their presence if cc0 defined. */
3367 #ifdef HAVE_cc0
3368 flag_schedule_speculative_load = 0;
3369 #endif
3371 sched_pressure_p = (flag_sched_pressure && ! reload_completed
3372 && common_sched_info->sched_pass_id == SCHED_RGN_PASS);
3373 if (sched_pressure_p)
3374 ira_setup_eliminable_regset ();
3376 /* Initialize SPEC_INFO. */
3377 if (targetm.sched.set_sched_flags)
3379 spec_info = &spec_info_var;
3380 targetm.sched.set_sched_flags (spec_info);
3382 if (spec_info->mask != 0)
3384 spec_info->data_weakness_cutoff =
3385 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
3386 spec_info->control_weakness_cutoff =
3387 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF)
3388 * REG_BR_PROB_BASE) / 100;
3390 else
3391 /* So we won't read anything accidentally. */
3392 spec_info = NULL;
3395 else
3396 /* So we won't read anything accidentally. */
3397 spec_info = 0;
3399 /* Initialize issue_rate. */
3400 if (targetm.sched.issue_rate)
3401 issue_rate = targetm.sched.issue_rate ();
3402 else
3403 issue_rate = 1;
3405 if (cached_issue_rate != issue_rate)
3407 cached_issue_rate = issue_rate;
3408 /* To invalidate max_lookahead_tries: */
3409 cached_first_cycle_multipass_dfa_lookahead = 0;
3412 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
3413 dfa_lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
3414 else
3415 dfa_lookahead = 0;
3417 if (targetm.sched.init_dfa_pre_cycle_insn)
3418 targetm.sched.init_dfa_pre_cycle_insn ();
3420 if (targetm.sched.init_dfa_post_cycle_insn)
3421 targetm.sched.init_dfa_post_cycle_insn ();
3423 dfa_start ();
3424 dfa_state_size = state_size ();
3426 init_alias_analysis ();
3428 df_set_flags (DF_LR_RUN_DCE);
3429 df_note_add_problem ();
3431 /* More problems needed for interloop dep calculation in SMS. */
3432 if (common_sched_info->sched_pass_id == SCHED_SMS_PASS)
3434 df_rd_add_problem ();
3435 df_chain_add_problem (DF_DU_CHAIN + DF_UD_CHAIN);
3438 df_analyze ();
3440 /* Do not run DCE after reload, as this can kill nops inserted
3441 by bundling. */
3442 if (reload_completed)
3443 df_clear_flags (DF_LR_RUN_DCE);
3445 regstat_compute_calls_crossed ();
3447 if (targetm.sched.init_global)
3448 targetm.sched.init_global (sched_dump, sched_verbose, get_max_uid () + 1);
3450 if (sched_pressure_p)
3452 int i, max_regno = max_reg_num ();
3454 ira_set_pseudo_classes (sched_verbose ? sched_dump : NULL);
3455 sched_regno_cover_class
3456 = (enum reg_class *) xmalloc (max_regno * sizeof (enum reg_class));
3457 for (i = 0; i < max_regno; i++)
3458 sched_regno_cover_class[i]
3459 = (i < FIRST_PSEUDO_REGISTER
3460 ? ira_class_translate[REGNO_REG_CLASS (i)]
3461 : reg_cover_class (i));
3462 curr_reg_live = BITMAP_ALLOC (NULL);
3463 saved_reg_live = BITMAP_ALLOC (NULL);
3464 region_ref_regs = BITMAP_ALLOC (NULL);
3467 curr_state = xmalloc (dfa_state_size);
3470 static void haifa_init_only_bb (basic_block, basic_block);
3472 /* Initialize data structures specific to the Haifa scheduler. */
3473 void
3474 haifa_sched_init (void)
3476 setup_sched_dump ();
3477 sched_init ();
3479 if (spec_info != NULL)
3481 sched_deps_info->use_deps_list = 1;
3482 sched_deps_info->generate_spec_deps = 1;
3485 /* Initialize luids, dependency caches, target and h_i_d for the
3486 whole function. */
3488 bb_vec_t bbs = VEC_alloc (basic_block, heap, n_basic_blocks);
3489 basic_block bb;
3491 sched_init_bbs ();
3493 FOR_EACH_BB (bb)
3494 VEC_quick_push (basic_block, bbs, bb);
3495 sched_init_luids (bbs, NULL, NULL, NULL);
3496 sched_deps_init (true);
3497 sched_extend_target ();
3498 haifa_init_h_i_d (bbs, NULL, NULL, NULL);
3500 VEC_free (basic_block, heap, bbs);
3503 sched_init_only_bb = haifa_init_only_bb;
3504 sched_split_block = sched_split_block_1;
3505 sched_create_empty_bb = sched_create_empty_bb_1;
3506 haifa_recovery_bb_ever_added_p = false;
3508 #ifdef ENABLE_CHECKING
3509 /* This is used preferably for finding bugs in check_cfg () itself.
3510 We must call sched_bbs_init () before check_cfg () because check_cfg ()
3511 assumes that the last insn in the last bb has a non-null successor. */
3512 check_cfg (0, 0);
3513 #endif
3515 nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
3516 before_recovery = 0;
3517 after_recovery = 0;
3520 /* Finish work with the data specific to the Haifa scheduler. */
3521 void
3522 haifa_sched_finish (void)
3524 sched_create_empty_bb = NULL;
3525 sched_split_block = NULL;
3526 sched_init_only_bb = NULL;
3528 if (spec_info && spec_info->dump)
3530 char c = reload_completed ? 'a' : 'b';
3532 fprintf (spec_info->dump,
3533 ";; %s:\n", current_function_name ());
3535 fprintf (spec_info->dump,
3536 ";; Procedure %cr-begin-data-spec motions == %d\n",
3537 c, nr_begin_data);
3538 fprintf (spec_info->dump,
3539 ";; Procedure %cr-be-in-data-spec motions == %d\n",
3540 c, nr_be_in_data);
3541 fprintf (spec_info->dump,
3542 ";; Procedure %cr-begin-control-spec motions == %d\n",
3543 c, nr_begin_control);
3544 fprintf (spec_info->dump,
3545 ";; Procedure %cr-be-in-control-spec motions == %d\n",
3546 c, nr_be_in_control);
3549 /* Finalize h_i_d, dependency caches, and luids for the whole
3550 function. Target will be finalized in md_global_finish (). */
3551 sched_deps_finish ();
3552 sched_finish_luids ();
3553 current_sched_info = NULL;
3554 sched_finish ();
3557 /* Free global data used during insn scheduling. This function works with
3558 the common data shared between the schedulers. */
3560 void
3561 sched_finish (void)
3563 haifa_finish_h_i_d ();
3564 if (sched_pressure_p)
3566 free (sched_regno_cover_class);
3567 BITMAP_FREE (region_ref_regs);
3568 BITMAP_FREE (saved_reg_live);
3569 BITMAP_FREE (curr_reg_live);
3571 free (curr_state);
3573 if (targetm.sched.finish_global)
3574 targetm.sched.finish_global (sched_dump, sched_verbose);
3576 end_alias_analysis ();
3578 regstat_free_calls_crossed ();
3580 dfa_finish ();
3582 #ifdef ENABLE_CHECKING
3583 /* After reload ia64 backend clobbers CFG, so can't check anything. */
3584 if (!reload_completed)
3585 check_cfg (0, 0);
3586 #endif
3589 /* Fix INSN_TICKs of the instructions in the current block as well as
3590 INSN_TICKs of their dependents.
3591 HEAD and TAIL are the begin and the end of the current scheduled block. */
3592 static void
3593 fix_inter_tick (rtx head, rtx tail)
3595 /* Set of instructions with corrected INSN_TICK. */
3596 bitmap_head processed;
3597 /* ??? It is doubtful if we should assume that cycle advance happens on
3598 basic block boundaries. Basically insns that are unconditionally ready
3599 on the start of the block are more preferable then those which have
3600 a one cycle dependency over insn from the previous block. */
3601 int next_clock = clock_var + 1;
3603 bitmap_initialize (&processed, 0);
3605 /* Iterates over scheduled instructions and fix their INSN_TICKs and
3606 INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
3607 across different blocks. */
3608 for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
3610 if (INSN_P (head))
3612 int tick;
3613 sd_iterator_def sd_it;
3614 dep_t dep;
3616 tick = INSN_TICK (head);
3617 gcc_assert (tick >= MIN_TICK);
3619 /* Fix INSN_TICK of instruction from just scheduled block. */
3620 if (!bitmap_bit_p (&processed, INSN_LUID (head)))
3622 bitmap_set_bit (&processed, INSN_LUID (head));
3623 tick -= next_clock;
3625 if (tick < MIN_TICK)
3626 tick = MIN_TICK;
3628 INSN_TICK (head) = tick;
3631 FOR_EACH_DEP (head, SD_LIST_RES_FORW, sd_it, dep)
3633 rtx next;
3635 next = DEP_CON (dep);
3636 tick = INSN_TICK (next);
3638 if (tick != INVALID_TICK
3639 /* If NEXT has its INSN_TICK calculated, fix it.
3640 If not - it will be properly calculated from
3641 scratch later in fix_tick_ready. */
3642 && !bitmap_bit_p (&processed, INSN_LUID (next)))
3644 bitmap_set_bit (&processed, INSN_LUID (next));
3645 tick -= next_clock;
3647 if (tick < MIN_TICK)
3648 tick = MIN_TICK;
3650 if (tick > INTER_TICK (next))
3651 INTER_TICK (next) = tick;
3652 else
3653 tick = INTER_TICK (next);
3655 INSN_TICK (next) = tick;
3660 bitmap_clear (&processed);
3663 static int haifa_speculate_insn (rtx, ds_t, rtx *);
3665 /* Check if NEXT is ready to be added to the ready or queue list.
3666 If "yes", add it to the proper list.
3667 Returns:
3668 -1 - is not ready yet,
3669 0 - added to the ready list,
3670 0 < N - queued for N cycles. */
3672 try_ready (rtx next)
3674 ds_t old_ts, *ts;
3676 ts = &TODO_SPEC (next);
3677 old_ts = *ts;
3679 gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP))
3680 && ((old_ts & HARD_DEP)
3681 || (old_ts & SPECULATIVE)));
3683 if (sd_lists_empty_p (next, SD_LIST_BACK))
3684 /* NEXT has all its dependencies resolved. */
3686 /* Remove HARD_DEP bit from NEXT's status. */
3687 *ts &= ~HARD_DEP;
3689 if (current_sched_info->flags & DO_SPECULATION)
3690 /* Remove all speculative bits from NEXT's status. */
3691 *ts &= ~SPECULATIVE;
3693 else
3695 /* One of the NEXT's dependencies has been resolved.
3696 Recalculate NEXT's status. */
3698 *ts &= ~SPECULATIVE & ~HARD_DEP;
3700 if (sd_lists_empty_p (next, SD_LIST_HARD_BACK))
3701 /* Now we've got NEXT with speculative deps only.
3702 1. Look at the deps to see what we have to do.
3703 2. Check if we can do 'todo'. */
3705 sd_iterator_def sd_it;
3706 dep_t dep;
3707 bool first_p = true;
3709 FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
3711 ds_t ds = DEP_STATUS (dep) & SPECULATIVE;
3713 if (DEBUG_INSN_P (DEP_PRO (dep))
3714 && !DEBUG_INSN_P (next))
3715 continue;
3717 if (first_p)
3719 first_p = false;
3721 *ts = ds;
3723 else
3724 *ts = ds_merge (*ts, ds);
3727 if (ds_weak (*ts) < spec_info->data_weakness_cutoff)
3728 /* Too few points. */
3729 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3731 else
3732 *ts |= HARD_DEP;
3735 if (*ts & HARD_DEP)
3736 gcc_assert (*ts == old_ts
3737 && QUEUE_INDEX (next) == QUEUE_NOWHERE);
3738 else if (current_sched_info->new_ready)
3739 *ts = current_sched_info->new_ready (next, *ts);
3741 /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
3742 have its original pattern or changed (speculative) one. This is due
3743 to changing ebb in region scheduling.
3744 * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
3745 has speculative pattern.
3747 We can't assert (!(*ts & HARD_DEP) || *ts == old_ts) here because
3748 control-speculative NEXT could have been discarded by sched-rgn.c
3749 (the same case as when discarded by can_schedule_ready_p ()). */
3751 if ((*ts & SPECULATIVE)
3752 /* If (old_ts == *ts), then (old_ts & SPECULATIVE) and we don't
3753 need to change anything. */
3754 && *ts != old_ts)
3756 int res;
3757 rtx new_pat;
3759 gcc_assert ((*ts & SPECULATIVE) && !(*ts & ~SPECULATIVE));
3761 res = haifa_speculate_insn (next, *ts, &new_pat);
3763 switch (res)
3765 case -1:
3766 /* It would be nice to change DEP_STATUS of all dependences,
3767 which have ((DEP_STATUS & SPECULATIVE) == *ts) to HARD_DEP,
3768 so we won't reanalyze anything. */
3769 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3770 break;
3772 case 0:
3773 /* We follow the rule, that every speculative insn
3774 has non-null ORIG_PAT. */
3775 if (!ORIG_PAT (next))
3776 ORIG_PAT (next) = PATTERN (next);
3777 break;
3779 case 1:
3780 if (!ORIG_PAT (next))
3781 /* If we gonna to overwrite the original pattern of insn,
3782 save it. */
3783 ORIG_PAT (next) = PATTERN (next);
3785 haifa_change_pattern (next, new_pat);
3786 break;
3788 default:
3789 gcc_unreachable ();
3793 /* We need to restore pattern only if (*ts == 0), because otherwise it is
3794 either correct (*ts & SPECULATIVE),
3795 or we simply don't care (*ts & HARD_DEP). */
3797 gcc_assert (!ORIG_PAT (next)
3798 || !IS_SPECULATION_BRANCHY_CHECK_P (next));
3800 if (*ts & HARD_DEP)
3802 /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
3803 control-speculative NEXT could have been discarded by sched-rgn.c
3804 (the same case as when discarded by can_schedule_ready_p ()). */
3805 /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
3807 change_queue_index (next, QUEUE_NOWHERE);
3808 return -1;
3810 else if (!(*ts & BEGIN_SPEC) && ORIG_PAT (next) && !IS_SPECULATION_CHECK_P (next))
3811 /* We should change pattern of every previously speculative
3812 instruction - and we determine if NEXT was speculative by using
3813 ORIG_PAT field. Except one case - speculation checks have ORIG_PAT
3814 pat too, so skip them. */
3816 haifa_change_pattern (next, ORIG_PAT (next));
3817 ORIG_PAT (next) = 0;
3820 if (sched_verbose >= 2)
3822 int s = TODO_SPEC (next);
3824 fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
3825 (*current_sched_info->print_insn) (next, 0));
3827 if (spec_info && spec_info->dump)
3829 if (s & BEGIN_DATA)
3830 fprintf (spec_info->dump, "; data-spec;");
3831 if (s & BEGIN_CONTROL)
3832 fprintf (spec_info->dump, "; control-spec;");
3833 if (s & BE_IN_CONTROL)
3834 fprintf (spec_info->dump, "; in-control-spec;");
3837 fprintf (sched_dump, "\n");
3840 adjust_priority (next);
3842 return fix_tick_ready (next);
3845 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
3846 static int
3847 fix_tick_ready (rtx next)
3849 int tick, delay;
3851 if (!sd_lists_empty_p (next, SD_LIST_RES_BACK))
3853 int full_p;
3854 sd_iterator_def sd_it;
3855 dep_t dep;
3857 tick = INSN_TICK (next);
3858 /* if tick is not equal to INVALID_TICK, then update
3859 INSN_TICK of NEXT with the most recent resolved dependence
3860 cost. Otherwise, recalculate from scratch. */
3861 full_p = (tick == INVALID_TICK);
3863 FOR_EACH_DEP (next, SD_LIST_RES_BACK, sd_it, dep)
3865 rtx pro = DEP_PRO (dep);
3866 int tick1;
3868 gcc_assert (INSN_TICK (pro) >= MIN_TICK);
3870 tick1 = INSN_TICK (pro) + dep_cost (dep);
3871 if (tick1 > tick)
3872 tick = tick1;
3874 if (!full_p)
3875 break;
3878 else
3879 tick = -1;
3881 INSN_TICK (next) = tick;
3883 delay = tick - clock_var;
3884 if (delay <= 0 || sched_pressure_p)
3885 delay = QUEUE_READY;
3887 change_queue_index (next, delay);
3889 return delay;
3892 /* Move NEXT to the proper queue list with (DELAY >= 1),
3893 or add it to the ready list (DELAY == QUEUE_READY),
3894 or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
3895 static void
3896 change_queue_index (rtx next, int delay)
3898 int i = QUEUE_INDEX (next);
3900 gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
3901 && delay != 0);
3902 gcc_assert (i != QUEUE_SCHEDULED);
3904 if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
3905 || (delay < 0 && delay == i))
3906 /* We have nothing to do. */
3907 return;
3909 /* Remove NEXT from wherever it is now. */
3910 if (i == QUEUE_READY)
3911 ready_remove_insn (next);
3912 else if (i >= 0)
3913 queue_remove (next);
3915 /* Add it to the proper place. */
3916 if (delay == QUEUE_READY)
3917 ready_add (readyp, next, false);
3918 else if (delay >= 1)
3919 queue_insn (next, delay);
3921 if (sched_verbose >= 2)
3923 fprintf (sched_dump, ";;\t\ttick updated: insn %s",
3924 (*current_sched_info->print_insn) (next, 0));
3926 if (delay == QUEUE_READY)
3927 fprintf (sched_dump, " into ready\n");
3928 else if (delay >= 1)
3929 fprintf (sched_dump, " into queue with cost=%d\n", delay);
3930 else
3931 fprintf (sched_dump, " removed from ready or queue lists\n");
3935 static int sched_ready_n_insns = -1;
3937 /* Initialize per region data structures. */
3938 void
3939 sched_extend_ready_list (int new_sched_ready_n_insns)
3941 int i;
3943 if (sched_ready_n_insns == -1)
3944 /* At the first call we need to initialize one more choice_stack
3945 entry. */
3947 i = 0;
3948 sched_ready_n_insns = 0;
3950 else
3951 i = sched_ready_n_insns + 1;
3953 ready.veclen = new_sched_ready_n_insns + issue_rate;
3954 ready.vec = XRESIZEVEC (rtx, ready.vec, ready.veclen);
3956 gcc_assert (new_sched_ready_n_insns >= sched_ready_n_insns);
3958 ready_try = (char *) xrecalloc (ready_try, new_sched_ready_n_insns,
3959 sched_ready_n_insns, sizeof (*ready_try));
3961 /* We allocate +1 element to save initial state in the choice_stack[0]
3962 entry. */
3963 choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
3964 new_sched_ready_n_insns + 1);
3966 for (; i <= new_sched_ready_n_insns; i++)
3967 choice_stack[i].state = xmalloc (dfa_state_size);
3969 sched_ready_n_insns = new_sched_ready_n_insns;
3972 /* Free per region data structures. */
3973 void
3974 sched_finish_ready_list (void)
3976 int i;
3978 free (ready.vec);
3979 ready.vec = NULL;
3980 ready.veclen = 0;
3982 free (ready_try);
3983 ready_try = NULL;
3985 for (i = 0; i <= sched_ready_n_insns; i++)
3986 free (choice_stack [i].state);
3987 free (choice_stack);
3988 choice_stack = NULL;
3990 sched_ready_n_insns = -1;
3993 static int
3994 haifa_luid_for_non_insn (rtx x)
3996 gcc_assert (NOTE_P (x) || LABEL_P (x));
3998 return 0;
4001 /* Generates recovery code for INSN. */
4002 static void
4003 generate_recovery_code (rtx insn)
4005 if (TODO_SPEC (insn) & BEGIN_SPEC)
4006 begin_speculative_block (insn);
4008 /* Here we have insn with no dependencies to
4009 instructions other then CHECK_SPEC ones. */
4011 if (TODO_SPEC (insn) & BE_IN_SPEC)
4012 add_to_speculative_block (insn);
4015 /* Helper function.
4016 Tries to add speculative dependencies of type FS between instructions
4017 in deps_list L and TWIN. */
4018 static void
4019 process_insn_forw_deps_be_in_spec (rtx insn, rtx twin, ds_t fs)
4021 sd_iterator_def sd_it;
4022 dep_t dep;
4024 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
4026 ds_t ds;
4027 rtx consumer;
4029 consumer = DEP_CON (dep);
4031 ds = DEP_STATUS (dep);
4033 if (/* If we want to create speculative dep. */
4035 /* And we can do that because this is a true dep. */
4036 && (ds & DEP_TYPES) == DEP_TRUE)
4038 gcc_assert (!(ds & BE_IN_SPEC));
4040 if (/* If this dep can be overcome with 'begin speculation'. */
4041 ds & BEGIN_SPEC)
4042 /* Then we have a choice: keep the dep 'begin speculative'
4043 or transform it into 'be in speculative'. */
4045 if (/* In try_ready we assert that if insn once became ready
4046 it can be removed from the ready (or queue) list only
4047 due to backend decision. Hence we can't let the
4048 probability of the speculative dep to decrease. */
4049 ds_weak (ds) <= ds_weak (fs))
4051 ds_t new_ds;
4053 new_ds = (ds & ~BEGIN_SPEC) | fs;
4055 if (/* consumer can 'be in speculative'. */
4056 sched_insn_is_legitimate_for_speculation_p (consumer,
4057 new_ds))
4058 /* Transform it to be in speculative. */
4059 ds = new_ds;
4062 else
4063 /* Mark the dep as 'be in speculative'. */
4064 ds |= fs;
4068 dep_def _new_dep, *new_dep = &_new_dep;
4070 init_dep_1 (new_dep, twin, consumer, DEP_TYPE (dep), ds);
4071 sd_add_dep (new_dep, false);
4076 /* Generates recovery code for BEGIN speculative INSN. */
4077 static void
4078 begin_speculative_block (rtx insn)
4080 if (TODO_SPEC (insn) & BEGIN_DATA)
4081 nr_begin_data++;
4082 if (TODO_SPEC (insn) & BEGIN_CONTROL)
4083 nr_begin_control++;
4085 create_check_block_twin (insn, false);
4087 TODO_SPEC (insn) &= ~BEGIN_SPEC;
4090 static void haifa_init_insn (rtx);
4092 /* Generates recovery code for BE_IN speculative INSN. */
4093 static void
4094 add_to_speculative_block (rtx insn)
4096 ds_t ts;
4097 sd_iterator_def sd_it;
4098 dep_t dep;
4099 rtx twins = NULL;
4100 rtx_vec_t priorities_roots;
4102 ts = TODO_SPEC (insn);
4103 gcc_assert (!(ts & ~BE_IN_SPEC));
4105 if (ts & BE_IN_DATA)
4106 nr_be_in_data++;
4107 if (ts & BE_IN_CONTROL)
4108 nr_be_in_control++;
4110 TODO_SPEC (insn) &= ~BE_IN_SPEC;
4111 gcc_assert (!TODO_SPEC (insn));
4113 DONE_SPEC (insn) |= ts;
4115 /* First we convert all simple checks to branchy. */
4116 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4117 sd_iterator_cond (&sd_it, &dep);)
4119 rtx check = DEP_PRO (dep);
4121 if (IS_SPECULATION_SIMPLE_CHECK_P (check))
4123 create_check_block_twin (check, true);
4125 /* Restart search. */
4126 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4128 else
4129 /* Continue search. */
4130 sd_iterator_next (&sd_it);
4133 priorities_roots = NULL;
4134 clear_priorities (insn, &priorities_roots);
4136 while (1)
4138 rtx check, twin;
4139 basic_block rec;
4141 /* Get the first backward dependency of INSN. */
4142 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4143 if (!sd_iterator_cond (&sd_it, &dep))
4144 /* INSN has no backward dependencies left. */
4145 break;
4147 gcc_assert ((DEP_STATUS (dep) & BEGIN_SPEC) == 0
4148 && (DEP_STATUS (dep) & BE_IN_SPEC) != 0
4149 && (DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
4151 check = DEP_PRO (dep);
4153 gcc_assert (!IS_SPECULATION_CHECK_P (check) && !ORIG_PAT (check)
4154 && QUEUE_INDEX (check) == QUEUE_NOWHERE);
4156 rec = BLOCK_FOR_INSN (check);
4158 twin = emit_insn_before (copy_insn (PATTERN (insn)), BB_END (rec));
4159 haifa_init_insn (twin);
4161 sd_copy_back_deps (twin, insn, true);
4163 if (sched_verbose && spec_info->dump)
4164 /* INSN_BB (insn) isn't determined for twin insns yet.
4165 So we can't use current_sched_info->print_insn. */
4166 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
4167 INSN_UID (twin), rec->index);
4169 twins = alloc_INSN_LIST (twin, twins);
4171 /* Add dependences between TWIN and all appropriate
4172 instructions from REC. */
4173 FOR_EACH_DEP (insn, SD_LIST_SPEC_BACK, sd_it, dep)
4175 rtx pro = DEP_PRO (dep);
4177 gcc_assert (DEP_TYPE (dep) == REG_DEP_TRUE);
4179 /* INSN might have dependencies from the instructions from
4180 several recovery blocks. At this iteration we process those
4181 producers that reside in REC. */
4182 if (BLOCK_FOR_INSN (pro) == rec)
4184 dep_def _new_dep, *new_dep = &_new_dep;
4186 init_dep (new_dep, pro, twin, REG_DEP_TRUE);
4187 sd_add_dep (new_dep, false);
4191 process_insn_forw_deps_be_in_spec (insn, twin, ts);
4193 /* Remove all dependencies between INSN and insns in REC. */
4194 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4195 sd_iterator_cond (&sd_it, &dep);)
4197 rtx pro = DEP_PRO (dep);
4199 if (BLOCK_FOR_INSN (pro) == rec)
4200 sd_delete_dep (sd_it);
4201 else
4202 sd_iterator_next (&sd_it);
4206 /* We couldn't have added the dependencies between INSN and TWINS earlier
4207 because that would make TWINS appear in the INSN_BACK_DEPS (INSN). */
4208 while (twins)
4210 rtx twin;
4212 twin = XEXP (twins, 0);
4215 dep_def _new_dep, *new_dep = &_new_dep;
4217 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
4218 sd_add_dep (new_dep, false);
4221 twin = XEXP (twins, 1);
4222 free_INSN_LIST_node (twins);
4223 twins = twin;
4226 calc_priorities (priorities_roots);
4227 VEC_free (rtx, heap, priorities_roots);
4230 /* Extends and fills with zeros (only the new part) array pointed to by P. */
4231 void *
4232 xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
4234 gcc_assert (new_nmemb >= old_nmemb);
4235 p = XRESIZEVAR (void, p, new_nmemb * size);
4236 memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
4237 return p;
4240 /* Helper function.
4241 Find fallthru edge from PRED. */
4242 edge
4243 find_fallthru_edge (basic_block pred)
4245 edge e;
4246 edge_iterator ei;
4247 basic_block succ;
4249 succ = pred->next_bb;
4250 gcc_assert (succ->prev_bb == pred);
4252 if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
4254 FOR_EACH_EDGE (e, ei, pred->succs)
4255 if (e->flags & EDGE_FALLTHRU)
4257 gcc_assert (e->dest == succ);
4258 return e;
4261 else
4263 FOR_EACH_EDGE (e, ei, succ->preds)
4264 if (e->flags & EDGE_FALLTHRU)
4266 gcc_assert (e->src == pred);
4267 return e;
4271 return NULL;
4274 /* Extend per basic block data structures. */
4275 static void
4276 sched_extend_bb (void)
4278 rtx insn;
4280 /* The following is done to keep current_sched_info->next_tail non null. */
4281 insn = BB_END (EXIT_BLOCK_PTR->prev_bb);
4282 if (NEXT_INSN (insn) == 0
4283 || (!NOTE_P (insn)
4284 && !LABEL_P (insn)
4285 /* Don't emit a NOTE if it would end up before a BARRIER. */
4286 && !BARRIER_P (NEXT_INSN (insn))))
4288 rtx note = emit_note_after (NOTE_INSN_DELETED, insn);
4289 /* Make insn appear outside BB. */
4290 set_block_for_insn (note, NULL);
4291 BB_END (EXIT_BLOCK_PTR->prev_bb) = insn;
4295 /* Init per basic block data structures. */
4296 void
4297 sched_init_bbs (void)
4299 sched_extend_bb ();
4302 /* Initialize BEFORE_RECOVERY variable. */
4303 static void
4304 init_before_recovery (basic_block *before_recovery_ptr)
4306 basic_block last;
4307 edge e;
4309 last = EXIT_BLOCK_PTR->prev_bb;
4310 e = find_fallthru_edge (last);
4312 if (e)
4314 /* We create two basic blocks:
4315 1. Single instruction block is inserted right after E->SRC
4316 and has jump to
4317 2. Empty block right before EXIT_BLOCK.
4318 Between these two blocks recovery blocks will be emitted. */
4320 basic_block single, empty;
4321 rtx x, label;
4323 /* If the fallthrough edge to exit we've found is from the block we've
4324 created before, don't do anything more. */
4325 if (last == after_recovery)
4326 return;
4328 adding_bb_to_current_region_p = false;
4330 single = sched_create_empty_bb (last);
4331 empty = sched_create_empty_bb (single);
4333 /* Add new blocks to the root loop. */
4334 if (current_loops != NULL)
4336 add_bb_to_loop (single, VEC_index (loop_p, current_loops->larray, 0));
4337 add_bb_to_loop (empty, VEC_index (loop_p, current_loops->larray, 0));
4340 single->count = last->count;
4341 empty->count = last->count;
4342 single->frequency = last->frequency;
4343 empty->frequency = last->frequency;
4344 BB_COPY_PARTITION (single, last);
4345 BB_COPY_PARTITION (empty, last);
4347 redirect_edge_succ (e, single);
4348 make_single_succ_edge (single, empty, 0);
4349 make_single_succ_edge (empty, EXIT_BLOCK_PTR,
4350 EDGE_FALLTHRU | EDGE_CAN_FALLTHRU);
4352 label = block_label (empty);
4353 x = emit_jump_insn_after (gen_jump (label), BB_END (single));
4354 JUMP_LABEL (x) = label;
4355 LABEL_NUSES (label)++;
4356 haifa_init_insn (x);
4358 emit_barrier_after (x);
4360 sched_init_only_bb (empty, NULL);
4361 sched_init_only_bb (single, NULL);
4362 sched_extend_bb ();
4364 adding_bb_to_current_region_p = true;
4365 before_recovery = single;
4366 after_recovery = empty;
4368 if (before_recovery_ptr)
4369 *before_recovery_ptr = before_recovery;
4371 if (sched_verbose >= 2 && spec_info->dump)
4372 fprintf (spec_info->dump,
4373 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
4374 last->index, single->index, empty->index);
4376 else
4377 before_recovery = last;
4380 /* Returns new recovery block. */
4381 basic_block
4382 sched_create_recovery_block (basic_block *before_recovery_ptr)
4384 rtx label;
4385 rtx barrier;
4386 basic_block rec;
4388 haifa_recovery_bb_recently_added_p = true;
4389 haifa_recovery_bb_ever_added_p = true;
4391 init_before_recovery (before_recovery_ptr);
4393 barrier = get_last_bb_insn (before_recovery);
4394 gcc_assert (BARRIER_P (barrier));
4396 label = emit_label_after (gen_label_rtx (), barrier);
4398 rec = create_basic_block (label, label, before_recovery);
4400 /* A recovery block always ends with an unconditional jump. */
4401 emit_barrier_after (BB_END (rec));
4403 if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
4404 BB_SET_PARTITION (rec, BB_COLD_PARTITION);
4406 if (sched_verbose && spec_info->dump)
4407 fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
4408 rec->index);
4410 return rec;
4413 /* Create edges: FIRST_BB -> REC; FIRST_BB -> SECOND_BB; REC -> SECOND_BB
4414 and emit necessary jumps. */
4415 void
4416 sched_create_recovery_edges (basic_block first_bb, basic_block rec,
4417 basic_block second_bb)
4419 rtx label;
4420 rtx jump;
4421 int edge_flags;
4423 /* This is fixing of incoming edge. */
4424 /* ??? Which other flags should be specified? */
4425 if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
4426 /* Partition type is the same, if it is "unpartitioned". */
4427 edge_flags = EDGE_CROSSING;
4428 else
4429 edge_flags = 0;
4431 make_edge (first_bb, rec, edge_flags);
4432 label = block_label (second_bb);
4433 jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
4434 JUMP_LABEL (jump) = label;
4435 LABEL_NUSES (label)++;
4437 if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
4438 /* Partition type is the same, if it is "unpartitioned". */
4440 /* Rewritten from cfgrtl.c. */
4441 if (flag_reorder_blocks_and_partition
4442 && targetm.have_named_sections)
4444 /* We don't need the same note for the check because
4445 any_condjump_p (check) == true. */
4446 add_reg_note (jump, REG_CROSSING_JUMP, NULL_RTX);
4448 edge_flags = EDGE_CROSSING;
4450 else
4451 edge_flags = 0;
4453 make_single_succ_edge (rec, second_bb, edge_flags);
4456 /* This function creates recovery code for INSN. If MUTATE_P is nonzero,
4457 INSN is a simple check, that should be converted to branchy one. */
4458 static void
4459 create_check_block_twin (rtx insn, bool mutate_p)
4461 basic_block rec;
4462 rtx label, check, twin;
4463 ds_t fs;
4464 sd_iterator_def sd_it;
4465 dep_t dep;
4466 dep_def _new_dep, *new_dep = &_new_dep;
4467 ds_t todo_spec;
4469 gcc_assert (ORIG_PAT (insn) != NULL_RTX);
4471 if (!mutate_p)
4472 todo_spec = TODO_SPEC (insn);
4473 else
4475 gcc_assert (IS_SPECULATION_SIMPLE_CHECK_P (insn)
4476 && (TODO_SPEC (insn) & SPECULATIVE) == 0);
4478 todo_spec = CHECK_SPEC (insn);
4481 todo_spec &= SPECULATIVE;
4483 /* Create recovery block. */
4484 if (mutate_p || targetm.sched.needs_block_p (todo_spec))
4486 rec = sched_create_recovery_block (NULL);
4487 label = BB_HEAD (rec);
4489 else
4491 rec = EXIT_BLOCK_PTR;
4492 label = NULL_RTX;
4495 /* Emit CHECK. */
4496 check = targetm.sched.gen_spec_check (insn, label, todo_spec);
4498 if (rec != EXIT_BLOCK_PTR)
4500 /* To have mem_reg alive at the beginning of second_bb,
4501 we emit check BEFORE insn, so insn after splitting
4502 insn will be at the beginning of second_bb, which will
4503 provide us with the correct life information. */
4504 check = emit_jump_insn_before (check, insn);
4505 JUMP_LABEL (check) = label;
4506 LABEL_NUSES (label)++;
4508 else
4509 check = emit_insn_before (check, insn);
4511 /* Extend data structures. */
4512 haifa_init_insn (check);
4514 /* CHECK is being added to current region. Extend ready list. */
4515 gcc_assert (sched_ready_n_insns != -1);
4516 sched_extend_ready_list (sched_ready_n_insns + 1);
4518 if (current_sched_info->add_remove_insn)
4519 current_sched_info->add_remove_insn (insn, 0);
4521 RECOVERY_BLOCK (check) = rec;
4523 if (sched_verbose && spec_info->dump)
4524 fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
4525 (*current_sched_info->print_insn) (check, 0));
4527 gcc_assert (ORIG_PAT (insn));
4529 /* Initialize TWIN (twin is a duplicate of original instruction
4530 in the recovery block). */
4531 if (rec != EXIT_BLOCK_PTR)
4533 sd_iterator_def sd_it;
4534 dep_t dep;
4536 FOR_EACH_DEP (insn, SD_LIST_RES_BACK, sd_it, dep)
4537 if ((DEP_STATUS (dep) & DEP_OUTPUT) != 0)
4539 struct _dep _dep2, *dep2 = &_dep2;
4541 init_dep (dep2, DEP_PRO (dep), check, REG_DEP_TRUE);
4543 sd_add_dep (dep2, true);
4546 twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
4547 haifa_init_insn (twin);
4549 if (sched_verbose && spec_info->dump)
4550 /* INSN_BB (insn) isn't determined for twin insns yet.
4551 So we can't use current_sched_info->print_insn. */
4552 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
4553 INSN_UID (twin), rec->index);
4555 else
4557 ORIG_PAT (check) = ORIG_PAT (insn);
4558 HAS_INTERNAL_DEP (check) = 1;
4559 twin = check;
4560 /* ??? We probably should change all OUTPUT dependencies to
4561 (TRUE | OUTPUT). */
4564 /* Copy all resolved back dependencies of INSN to TWIN. This will
4565 provide correct value for INSN_TICK (TWIN). */
4566 sd_copy_back_deps (twin, insn, true);
4568 if (rec != EXIT_BLOCK_PTR)
4569 /* In case of branchy check, fix CFG. */
4571 basic_block first_bb, second_bb;
4572 rtx jump;
4574 first_bb = BLOCK_FOR_INSN (check);
4575 second_bb = sched_split_block (first_bb, check);
4577 sched_create_recovery_edges (first_bb, rec, second_bb);
4579 sched_init_only_bb (second_bb, first_bb);
4580 sched_init_only_bb (rec, EXIT_BLOCK_PTR);
4582 jump = BB_END (rec);
4583 haifa_init_insn (jump);
4586 /* Move backward dependences from INSN to CHECK and
4587 move forward dependences from INSN to TWIN. */
4589 /* First, create dependencies between INSN's producers and CHECK & TWIN. */
4590 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
4592 rtx pro = DEP_PRO (dep);
4593 ds_t ds;
4595 /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
4596 check --TRUE--> producer ??? or ANTI ???
4597 twin --TRUE--> producer
4598 twin --ANTI--> check
4600 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
4601 check --ANTI--> producer
4602 twin --ANTI--> producer
4603 twin --ANTI--> check
4605 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
4606 check ~~TRUE~~> producer
4607 twin ~~TRUE~~> producer
4608 twin --ANTI--> check */
4610 ds = DEP_STATUS (dep);
4612 if (ds & BEGIN_SPEC)
4614 gcc_assert (!mutate_p);
4615 ds &= ~BEGIN_SPEC;
4618 init_dep_1 (new_dep, pro, check, DEP_TYPE (dep), ds);
4619 sd_add_dep (new_dep, false);
4621 if (rec != EXIT_BLOCK_PTR)
4623 DEP_CON (new_dep) = twin;
4624 sd_add_dep (new_dep, false);
4628 /* Second, remove backward dependencies of INSN. */
4629 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4630 sd_iterator_cond (&sd_it, &dep);)
4632 if ((DEP_STATUS (dep) & BEGIN_SPEC)
4633 || mutate_p)
4634 /* We can delete this dep because we overcome it with
4635 BEGIN_SPECULATION. */
4636 sd_delete_dep (sd_it);
4637 else
4638 sd_iterator_next (&sd_it);
4641 /* Future Speculations. Determine what BE_IN speculations will be like. */
4642 fs = 0;
4644 /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
4645 here. */
4647 gcc_assert (!DONE_SPEC (insn));
4649 if (!mutate_p)
4651 ds_t ts = TODO_SPEC (insn);
4653 DONE_SPEC (insn) = ts & BEGIN_SPEC;
4654 CHECK_SPEC (check) = ts & BEGIN_SPEC;
4656 /* Luckiness of future speculations solely depends upon initial
4657 BEGIN speculation. */
4658 if (ts & BEGIN_DATA)
4659 fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
4660 if (ts & BEGIN_CONTROL)
4661 fs = set_dep_weak (fs, BE_IN_CONTROL,
4662 get_dep_weak (ts, BEGIN_CONTROL));
4664 else
4665 CHECK_SPEC (check) = CHECK_SPEC (insn);
4667 /* Future speculations: call the helper. */
4668 process_insn_forw_deps_be_in_spec (insn, twin, fs);
4670 if (rec != EXIT_BLOCK_PTR)
4672 /* Which types of dependencies should we use here is,
4673 generally, machine-dependent question... But, for now,
4674 it is not. */
4676 if (!mutate_p)
4678 init_dep (new_dep, insn, check, REG_DEP_TRUE);
4679 sd_add_dep (new_dep, false);
4681 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
4682 sd_add_dep (new_dep, false);
4684 else
4686 if (spec_info->dump)
4687 fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
4688 (*current_sched_info->print_insn) (insn, 0));
4690 /* Remove all dependencies of the INSN. */
4692 sd_it = sd_iterator_start (insn, (SD_LIST_FORW
4693 | SD_LIST_BACK
4694 | SD_LIST_RES_BACK));
4695 while (sd_iterator_cond (&sd_it, &dep))
4696 sd_delete_dep (sd_it);
4699 /* If former check (INSN) already was moved to the ready (or queue)
4700 list, add new check (CHECK) there too. */
4701 if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
4702 try_ready (check);
4704 /* Remove old check from instruction stream and free its
4705 data. */
4706 sched_remove_insn (insn);
4709 init_dep (new_dep, check, twin, REG_DEP_ANTI);
4710 sd_add_dep (new_dep, false);
4712 else
4714 init_dep_1 (new_dep, insn, check, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
4715 sd_add_dep (new_dep, false);
4718 if (!mutate_p)
4719 /* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
4720 because it'll be done later in add_to_speculative_block. */
4722 rtx_vec_t priorities_roots = NULL;
4724 clear_priorities (twin, &priorities_roots);
4725 calc_priorities (priorities_roots);
4726 VEC_free (rtx, heap, priorities_roots);
4730 /* Removes dependency between instructions in the recovery block REC
4731 and usual region instructions. It keeps inner dependences so it
4732 won't be necessary to recompute them. */
4733 static void
4734 fix_recovery_deps (basic_block rec)
4736 rtx note, insn, jump, ready_list = 0;
4737 bitmap_head in_ready;
4738 rtx link;
4740 bitmap_initialize (&in_ready, 0);
4742 /* NOTE - a basic block note. */
4743 note = NEXT_INSN (BB_HEAD (rec));
4744 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4745 insn = BB_END (rec);
4746 gcc_assert (JUMP_P (insn));
4747 insn = PREV_INSN (insn);
4751 sd_iterator_def sd_it;
4752 dep_t dep;
4754 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
4755 sd_iterator_cond (&sd_it, &dep);)
4757 rtx consumer = DEP_CON (dep);
4759 if (BLOCK_FOR_INSN (consumer) != rec)
4761 sd_delete_dep (sd_it);
4763 if (!bitmap_bit_p (&in_ready, INSN_LUID (consumer)))
4765 ready_list = alloc_INSN_LIST (consumer, ready_list);
4766 bitmap_set_bit (&in_ready, INSN_LUID (consumer));
4769 else
4771 gcc_assert ((DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
4773 sd_iterator_next (&sd_it);
4777 insn = PREV_INSN (insn);
4779 while (insn != note);
4781 bitmap_clear (&in_ready);
4783 /* Try to add instructions to the ready or queue list. */
4784 for (link = ready_list; link; link = XEXP (link, 1))
4785 try_ready (XEXP (link, 0));
4786 free_INSN_LIST_list (&ready_list);
4788 /* Fixing jump's dependences. */
4789 insn = BB_HEAD (rec);
4790 jump = BB_END (rec);
4792 gcc_assert (LABEL_P (insn));
4793 insn = NEXT_INSN (insn);
4795 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
4796 add_jump_dependencies (insn, jump);
4799 /* Change pattern of INSN to NEW_PAT. */
4800 void
4801 sched_change_pattern (rtx insn, rtx new_pat)
4803 int t;
4805 t = validate_change (insn, &PATTERN (insn), new_pat, 0);
4806 gcc_assert (t);
4807 dfa_clear_single_insn_cache (insn);
4810 /* Change pattern of INSN to NEW_PAT. Invalidate cached haifa
4811 instruction data. */
4812 static void
4813 haifa_change_pattern (rtx insn, rtx new_pat)
4815 sched_change_pattern (insn, new_pat);
4817 /* Invalidate INSN_COST, so it'll be recalculated. */
4818 INSN_COST (insn) = -1;
4819 /* Invalidate INSN_TICK, so it'll be recalculated. */
4820 INSN_TICK (insn) = INVALID_TICK;
4823 /* -1 - can't speculate,
4824 0 - for speculation with REQUEST mode it is OK to use
4825 current instruction pattern,
4826 1 - need to change pattern for *NEW_PAT to be speculative. */
4828 sched_speculate_insn (rtx insn, ds_t request, rtx *new_pat)
4830 gcc_assert (current_sched_info->flags & DO_SPECULATION
4831 && (request & SPECULATIVE)
4832 && sched_insn_is_legitimate_for_speculation_p (insn, request));
4834 if ((request & spec_info->mask) != request)
4835 return -1;
4837 if (request & BE_IN_SPEC
4838 && !(request & BEGIN_SPEC))
4839 return 0;
4841 return targetm.sched.speculate_insn (insn, request, new_pat);
4844 static int
4845 haifa_speculate_insn (rtx insn, ds_t request, rtx *new_pat)
4847 gcc_assert (sched_deps_info->generate_spec_deps
4848 && !IS_SPECULATION_CHECK_P (insn));
4850 if (HAS_INTERNAL_DEP (insn)
4851 || SCHED_GROUP_P (insn))
4852 return -1;
4854 return sched_speculate_insn (insn, request, new_pat);
4857 /* Print some information about block BB, which starts with HEAD and
4858 ends with TAIL, before scheduling it.
4859 I is zero, if scheduler is about to start with the fresh ebb. */
4860 static void
4861 dump_new_block_header (int i, basic_block bb, rtx head, rtx tail)
4863 if (!i)
4864 fprintf (sched_dump,
4865 ";; ======================================================\n");
4866 else
4867 fprintf (sched_dump,
4868 ";; =====================ADVANCING TO=====================\n");
4869 fprintf (sched_dump,
4870 ";; -- basic block %d from %d to %d -- %s reload\n",
4871 bb->index, INSN_UID (head), INSN_UID (tail),
4872 (reload_completed ? "after" : "before"));
4873 fprintf (sched_dump,
4874 ";; ======================================================\n");
4875 fprintf (sched_dump, "\n");
4878 /* Unlink basic block notes and labels and saves them, so they
4879 can be easily restored. We unlink basic block notes in EBB to
4880 provide back-compatibility with the previous code, as target backends
4881 assume, that there'll be only instructions between
4882 current_sched_info->{head and tail}. We restore these notes as soon
4883 as we can.
4884 FIRST (LAST) is the first (last) basic block in the ebb.
4885 NB: In usual case (FIRST == LAST) nothing is really done. */
4886 void
4887 unlink_bb_notes (basic_block first, basic_block last)
4889 /* We DON'T unlink basic block notes of the first block in the ebb. */
4890 if (first == last)
4891 return;
4893 bb_header = XNEWVEC (rtx, last_basic_block);
4895 /* Make a sentinel. */
4896 if (last->next_bb != EXIT_BLOCK_PTR)
4897 bb_header[last->next_bb->index] = 0;
4899 first = first->next_bb;
4902 rtx prev, label, note, next;
4904 label = BB_HEAD (last);
4905 if (LABEL_P (label))
4906 note = NEXT_INSN (label);
4907 else
4908 note = label;
4909 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4911 prev = PREV_INSN (label);
4912 next = NEXT_INSN (note);
4913 gcc_assert (prev && next);
4915 NEXT_INSN (prev) = next;
4916 PREV_INSN (next) = prev;
4918 bb_header[last->index] = label;
4920 if (last == first)
4921 break;
4923 last = last->prev_bb;
4925 while (1);
4928 /* Restore basic block notes.
4929 FIRST is the first basic block in the ebb. */
4930 static void
4931 restore_bb_notes (basic_block first)
4933 if (!bb_header)
4934 return;
4936 /* We DON'T unlink basic block notes of the first block in the ebb. */
4937 first = first->next_bb;
4938 /* Remember: FIRST is actually a second basic block in the ebb. */
4940 while (first != EXIT_BLOCK_PTR
4941 && bb_header[first->index])
4943 rtx prev, label, note, next;
4945 label = bb_header[first->index];
4946 prev = PREV_INSN (label);
4947 next = NEXT_INSN (prev);
4949 if (LABEL_P (label))
4950 note = NEXT_INSN (label);
4951 else
4952 note = label;
4953 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4955 bb_header[first->index] = 0;
4957 NEXT_INSN (prev) = label;
4958 NEXT_INSN (note) = next;
4959 PREV_INSN (next) = note;
4961 first = first->next_bb;
4964 free (bb_header);
4965 bb_header = 0;
4968 /* Helper function.
4969 Fix CFG after both in- and inter-block movement of
4970 control_flow_insn_p JUMP. */
4971 static void
4972 fix_jump_move (rtx jump)
4974 basic_block bb, jump_bb, jump_bb_next;
4976 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
4977 jump_bb = BLOCK_FOR_INSN (jump);
4978 jump_bb_next = jump_bb->next_bb;
4980 gcc_assert (common_sched_info->sched_pass_id == SCHED_EBB_PASS
4981 || IS_SPECULATION_BRANCHY_CHECK_P (jump));
4983 if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
4984 /* if jump_bb_next is not empty. */
4985 BB_END (jump_bb) = BB_END (jump_bb_next);
4987 if (BB_END (bb) != PREV_INSN (jump))
4988 /* Then there are instruction after jump that should be placed
4989 to jump_bb_next. */
4990 BB_END (jump_bb_next) = BB_END (bb);
4991 else
4992 /* Otherwise jump_bb_next is empty. */
4993 BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
4995 /* To make assertion in move_insn happy. */
4996 BB_END (bb) = PREV_INSN (jump);
4998 update_bb_for_insn (jump_bb_next);
5001 /* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
5002 static void
5003 move_block_after_check (rtx jump)
5005 basic_block bb, jump_bb, jump_bb_next;
5006 VEC(edge,gc) *t;
5008 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
5009 jump_bb = BLOCK_FOR_INSN (jump);
5010 jump_bb_next = jump_bb->next_bb;
5012 update_bb_for_insn (jump_bb);
5014 gcc_assert (IS_SPECULATION_CHECK_P (jump)
5015 || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next)));
5017 unlink_block (jump_bb_next);
5018 link_block (jump_bb_next, bb);
5020 t = bb->succs;
5021 bb->succs = 0;
5022 move_succs (&(jump_bb->succs), bb);
5023 move_succs (&(jump_bb_next->succs), jump_bb);
5024 move_succs (&t, jump_bb_next);
5026 df_mark_solutions_dirty ();
5028 common_sched_info->fix_recovery_cfg
5029 (bb->index, jump_bb->index, jump_bb_next->index);
5032 /* Helper function for move_block_after_check.
5033 This functions attaches edge vector pointed to by SUCCSP to
5034 block TO. */
5035 static void
5036 move_succs (VEC(edge,gc) **succsp, basic_block to)
5038 edge e;
5039 edge_iterator ei;
5041 gcc_assert (to->succs == 0);
5043 to->succs = *succsp;
5045 FOR_EACH_EDGE (e, ei, to->succs)
5046 e->src = to;
5048 *succsp = 0;
5051 /* Remove INSN from the instruction stream.
5052 INSN should have any dependencies. */
5053 static void
5054 sched_remove_insn (rtx insn)
5056 sd_finish_insn (insn);
5058 change_queue_index (insn, QUEUE_NOWHERE);
5059 current_sched_info->add_remove_insn (insn, 1);
5060 remove_insn (insn);
5063 /* Clear priorities of all instructions, that are forward dependent on INSN.
5064 Store in vector pointed to by ROOTS_PTR insns on which priority () should
5065 be invoked to initialize all cleared priorities. */
5066 static void
5067 clear_priorities (rtx insn, rtx_vec_t *roots_ptr)
5069 sd_iterator_def sd_it;
5070 dep_t dep;
5071 bool insn_is_root_p = true;
5073 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
5075 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
5077 rtx pro = DEP_PRO (dep);
5079 if (INSN_PRIORITY_STATUS (pro) >= 0
5080 && QUEUE_INDEX (insn) != QUEUE_SCHEDULED)
5082 /* If DEP doesn't contribute to priority then INSN itself should
5083 be added to priority roots. */
5084 if (contributes_to_priority_p (dep))
5085 insn_is_root_p = false;
5087 INSN_PRIORITY_STATUS (pro) = -1;
5088 clear_priorities (pro, roots_ptr);
5092 if (insn_is_root_p)
5093 VEC_safe_push (rtx, heap, *roots_ptr, insn);
5096 /* Recompute priorities of instructions, whose priorities might have been
5097 changed. ROOTS is a vector of instructions whose priority computation will
5098 trigger initialization of all cleared priorities. */
5099 static void
5100 calc_priorities (rtx_vec_t roots)
5102 int i;
5103 rtx insn;
5105 for (i = 0; VEC_iterate (rtx, roots, i, insn); i++)
5106 priority (insn);
5110 /* Add dependences between JUMP and other instructions in the recovery
5111 block. INSN is the first insn the recovery block. */
5112 static void
5113 add_jump_dependencies (rtx insn, rtx jump)
5117 insn = NEXT_INSN (insn);
5118 if (insn == jump)
5119 break;
5121 if (dep_list_size (insn) == 0)
5123 dep_def _new_dep, *new_dep = &_new_dep;
5125 init_dep (new_dep, insn, jump, REG_DEP_ANTI);
5126 sd_add_dep (new_dep, false);
5129 while (1);
5131 gcc_assert (!sd_lists_empty_p (jump, SD_LIST_BACK));
5134 /* Return the NOTE_INSN_BASIC_BLOCK of BB. */
5136 bb_note (basic_block bb)
5138 rtx note;
5140 note = BB_HEAD (bb);
5141 if (LABEL_P (note))
5142 note = NEXT_INSN (note);
5144 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
5145 return note;
5148 #ifdef ENABLE_CHECKING
5149 /* Helper function for check_cfg.
5150 Return nonzero, if edge vector pointed to by EL has edge with TYPE in
5151 its flags. */
5152 static int
5153 has_edge_p (VEC(edge,gc) *el, int type)
5155 edge e;
5156 edge_iterator ei;
5158 FOR_EACH_EDGE (e, ei, el)
5159 if (e->flags & type)
5160 return 1;
5161 return 0;
5164 /* Search back, starting at INSN, for an insn that is not a
5165 NOTE_INSN_VAR_LOCATION. Don't search beyond HEAD, and return it if
5166 no such insn can be found. */
5167 static inline rtx
5168 prev_non_location_insn (rtx insn, rtx head)
5170 while (insn != head && NOTE_P (insn)
5171 && NOTE_KIND (insn) == NOTE_INSN_VAR_LOCATION)
5172 insn = PREV_INSN (insn);
5174 return insn;
5177 /* Check few properties of CFG between HEAD and TAIL.
5178 If HEAD (TAIL) is NULL check from the beginning (till the end) of the
5179 instruction stream. */
5180 static void
5181 check_cfg (rtx head, rtx tail)
5183 rtx next_tail;
5184 basic_block bb = 0;
5185 int not_first = 0, not_last;
5187 if (head == NULL)
5188 head = get_insns ();
5189 if (tail == NULL)
5190 tail = get_last_insn ();
5191 next_tail = NEXT_INSN (tail);
5195 not_last = head != tail;
5197 if (not_first)
5198 gcc_assert (NEXT_INSN (PREV_INSN (head)) == head);
5199 if (not_last)
5200 gcc_assert (PREV_INSN (NEXT_INSN (head)) == head);
5202 if (LABEL_P (head)
5203 || (NOTE_INSN_BASIC_BLOCK_P (head)
5204 && (!not_first
5205 || (not_first && !LABEL_P (PREV_INSN (head))))))
5207 gcc_assert (bb == 0);
5208 bb = BLOCK_FOR_INSN (head);
5209 if (bb != 0)
5210 gcc_assert (BB_HEAD (bb) == head);
5211 else
5212 /* This is the case of jump table. See inside_basic_block_p (). */
5213 gcc_assert (LABEL_P (head) && !inside_basic_block_p (head));
5216 if (bb == 0)
5218 gcc_assert (!inside_basic_block_p (head));
5219 head = NEXT_INSN (head);
5221 else
5223 gcc_assert (inside_basic_block_p (head)
5224 || NOTE_P (head));
5225 gcc_assert (BLOCK_FOR_INSN (head) == bb);
5227 if (LABEL_P (head))
5229 head = NEXT_INSN (head);
5230 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (head));
5232 else
5234 if (control_flow_insn_p (head))
5236 gcc_assert (prev_non_location_insn (BB_END (bb), head)
5237 == head);
5239 if (any_uncondjump_p (head))
5240 gcc_assert (EDGE_COUNT (bb->succs) == 1
5241 && BARRIER_P (NEXT_INSN (head)));
5242 else if (any_condjump_p (head))
5243 gcc_assert (/* Usual case. */
5244 (EDGE_COUNT (bb->succs) > 1
5245 && !BARRIER_P (NEXT_INSN (head)))
5246 /* Or jump to the next instruction. */
5247 || (EDGE_COUNT (bb->succs) == 1
5248 && (BB_HEAD (EDGE_I (bb->succs, 0)->dest)
5249 == JUMP_LABEL (head))));
5251 if (BB_END (bb) == head)
5253 if (EDGE_COUNT (bb->succs) > 1)
5254 gcc_assert (control_flow_insn_p (prev_non_location_insn
5255 (head, BB_HEAD (bb)))
5256 || has_edge_p (bb->succs, EDGE_COMPLEX));
5257 bb = 0;
5260 head = NEXT_INSN (head);
5264 not_first = 1;
5266 while (head != next_tail);
5268 gcc_assert (bb == 0);
5271 #endif /* ENABLE_CHECKING */
5273 /* Extend per basic block data structures. */
5274 static void
5275 extend_bb (void)
5277 if (sched_scan_info->extend_bb)
5278 sched_scan_info->extend_bb ();
5281 /* Init data for BB. */
5282 static void
5283 init_bb (basic_block bb)
5285 if (sched_scan_info->init_bb)
5286 sched_scan_info->init_bb (bb);
5289 /* Extend per insn data structures. */
5290 static void
5291 extend_insn (void)
5293 if (sched_scan_info->extend_insn)
5294 sched_scan_info->extend_insn ();
5297 /* Init data structures for INSN. */
5298 static void
5299 init_insn (rtx insn)
5301 if (sched_scan_info->init_insn)
5302 sched_scan_info->init_insn (insn);
5305 /* Init all insns in BB. */
5306 static void
5307 init_insns_in_bb (basic_block bb)
5309 rtx insn;
5311 FOR_BB_INSNS (bb, insn)
5312 init_insn (insn);
5315 /* A driver function to add a set of basic blocks (BBS),
5316 a single basic block (BB), a set of insns (INSNS) or a single insn (INSN)
5317 to the scheduling region. */
5318 void
5319 sched_scan (const struct sched_scan_info_def *ssi,
5320 bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
5322 sched_scan_info = ssi;
5324 if (bbs != NULL || bb != NULL)
5326 extend_bb ();
5328 if (bbs != NULL)
5330 unsigned i;
5331 basic_block x;
5333 for (i = 0; VEC_iterate (basic_block, bbs, i, x); i++)
5334 init_bb (x);
5337 if (bb != NULL)
5338 init_bb (bb);
5341 extend_insn ();
5343 if (bbs != NULL)
5345 unsigned i;
5346 basic_block x;
5348 for (i = 0; VEC_iterate (basic_block, bbs, i, x); i++)
5349 init_insns_in_bb (x);
5352 if (bb != NULL)
5353 init_insns_in_bb (bb);
5355 if (insns != NULL)
5357 unsigned i;
5358 rtx x;
5360 for (i = 0; VEC_iterate (rtx, insns, i, x); i++)
5361 init_insn (x);
5364 if (insn != NULL)
5365 init_insn (insn);
5369 /* Extend data structures for logical insn UID. */
5370 static void
5371 luids_extend_insn (void)
5373 int new_luids_max_uid = get_max_uid () + 1;
5375 VEC_safe_grow_cleared (int, heap, sched_luids, new_luids_max_uid);
5378 /* Initialize LUID for INSN. */
5379 static void
5380 luids_init_insn (rtx insn)
5382 int i = INSN_P (insn) ? 1 : common_sched_info->luid_for_non_insn (insn);
5383 int luid;
5385 if (i >= 0)
5387 luid = sched_max_luid;
5388 sched_max_luid += i;
5390 else
5391 luid = -1;
5393 SET_INSN_LUID (insn, luid);
5396 /* Initialize luids for BBS, BB, INSNS and INSN.
5397 The hook common_sched_info->luid_for_non_insn () is used to determine
5398 if notes, labels, etc. need luids. */
5399 void
5400 sched_init_luids (bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
5402 const struct sched_scan_info_def ssi =
5404 NULL, /* extend_bb */
5405 NULL, /* init_bb */
5406 luids_extend_insn, /* extend_insn */
5407 luids_init_insn /* init_insn */
5410 sched_scan (&ssi, bbs, bb, insns, insn);
5413 /* Free LUIDs. */
5414 void
5415 sched_finish_luids (void)
5417 VEC_free (int, heap, sched_luids);
5418 sched_max_luid = 1;
5421 /* Return logical uid of INSN. Helpful while debugging. */
5423 insn_luid (rtx insn)
5425 return INSN_LUID (insn);
5428 /* Extend per insn data in the target. */
5429 void
5430 sched_extend_target (void)
5432 if (targetm.sched.h_i_d_extended)
5433 targetm.sched.h_i_d_extended ();
5436 /* Extend global scheduler structures (those, that live across calls to
5437 schedule_block) to include information about just emitted INSN. */
5438 static void
5439 extend_h_i_d (void)
5441 int reserve = (get_max_uid () + 1
5442 - VEC_length (haifa_insn_data_def, h_i_d));
5443 if (reserve > 0
5444 && ! VEC_space (haifa_insn_data_def, h_i_d, reserve))
5446 VEC_safe_grow_cleared (haifa_insn_data_def, heap, h_i_d,
5447 3 * get_max_uid () / 2);
5448 sched_extend_target ();
5452 /* Initialize h_i_d entry of the INSN with default values.
5453 Values, that are not explicitly initialized here, hold zero. */
5454 static void
5455 init_h_i_d (rtx insn)
5457 if (INSN_LUID (insn) > 0)
5459 INSN_COST (insn) = -1;
5460 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
5461 INSN_TICK (insn) = INVALID_TICK;
5462 INTER_TICK (insn) = INVALID_TICK;
5463 TODO_SPEC (insn) = HARD_DEP;
5467 /* Initialize haifa_insn_data for BBS, BB, INSNS and INSN. */
5468 void
5469 haifa_init_h_i_d (bb_vec_t bbs, basic_block bb, insn_vec_t insns, rtx insn)
5471 const struct sched_scan_info_def ssi =
5473 NULL, /* extend_bb */
5474 NULL, /* init_bb */
5475 extend_h_i_d, /* extend_insn */
5476 init_h_i_d /* init_insn */
5479 sched_scan (&ssi, bbs, bb, insns, insn);
5482 /* Finalize haifa_insn_data. */
5483 void
5484 haifa_finish_h_i_d (void)
5486 int i;
5487 haifa_insn_data_t data;
5488 struct reg_use_data *use, *next;
5490 for (i = 0; VEC_iterate (haifa_insn_data_def, h_i_d, i, data); i++)
5492 if (data->reg_pressure != NULL)
5493 free (data->reg_pressure);
5494 for (use = data->reg_use_list; use != NULL; use = next)
5496 next = use->next_insn_use;
5497 free (use);
5500 VEC_free (haifa_insn_data_def, heap, h_i_d);
5503 /* Init data for the new insn INSN. */
5504 static void
5505 haifa_init_insn (rtx insn)
5507 gcc_assert (insn != NULL);
5509 sched_init_luids (NULL, NULL, NULL, insn);
5510 sched_extend_target ();
5511 sched_deps_init (false);
5512 haifa_init_h_i_d (NULL, NULL, NULL, insn);
5514 if (adding_bb_to_current_region_p)
5516 sd_init_insn (insn);
5518 /* Extend dependency caches by one element. */
5519 extend_dependency_caches (1, false);
5523 /* Init data for the new basic block BB which comes after AFTER. */
5524 static void
5525 haifa_init_only_bb (basic_block bb, basic_block after)
5527 gcc_assert (bb != NULL);
5529 sched_init_bbs ();
5531 if (common_sched_info->add_block)
5532 /* This changes only data structures of the front-end. */
5533 common_sched_info->add_block (bb, after);
5536 /* A generic version of sched_split_block (). */
5537 basic_block
5538 sched_split_block_1 (basic_block first_bb, rtx after)
5540 edge e;
5542 e = split_block (first_bb, after);
5543 gcc_assert (e->src == first_bb);
5545 /* sched_split_block emits note if *check == BB_END. Probably it
5546 is better to rip that note off. */
5548 return e->dest;
5551 /* A generic version of sched_create_empty_bb (). */
5552 basic_block
5553 sched_create_empty_bb_1 (basic_block after)
5555 return create_empty_bb (after);
5558 /* Insert PAT as an INSN into the schedule and update the necessary data
5559 structures to account for it. */
5561 sched_emit_insn (rtx pat)
5563 rtx insn = emit_insn_after (pat, last_scheduled_insn);
5564 last_scheduled_insn = insn;
5565 haifa_init_insn (insn);
5566 return insn;
5569 #endif /* INSN_SCHEDULING */