* symtab.c (symtab_node::reset_section): New method.
[official-gcc.git] / gcc / haifa-sched.c
blob0e1800a633260ebae4a98656a4a9c6c21025bc34
1 /* Instruction scheduling pass.
2 Copyright (C) 1992-2014 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
4 and currently maintained by, Jim Wilson (wilson@cygnus.com)
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* Instruction scheduling pass. This file, along with sched-deps.c,
23 contains the generic parts. The actual entry point for
24 the normal instruction scheduling pass is found in sched-rgn.c.
26 We compute insn priorities based on data dependencies. Flow
27 analysis only creates a fraction of the data-dependencies we must
28 observe: namely, only those dependencies which the combiner can be
29 expected to use. For this pass, we must therefore create the
30 remaining dependencies we need to observe: register dependencies,
31 memory dependencies, dependencies to keep function calls in order,
32 and the dependence between a conditional branch and the setting of
33 condition codes are all dealt with here.
35 The scheduler first traverses the data flow graph, starting with
36 the last instruction, and proceeding to the first, assigning values
37 to insn_priority as it goes. This sorts the instructions
38 topologically by data dependence.
40 Once priorities have been established, we order the insns using
41 list scheduling. This works as follows: starting with a list of
42 all the ready insns, and sorted according to priority number, we
43 schedule the insn from the end of the list by placing its
44 predecessors in the list according to their priority order. We
45 consider this insn scheduled by setting the pointer to the "end" of
46 the list to point to the previous insn. When an insn has no
47 predecessors, we either queue it until sufficient time has elapsed
48 or add it to the ready list. As the instructions are scheduled or
49 when stalls are introduced, the queue advances and dumps insns into
50 the ready list. When all insns down to the lowest priority have
51 been scheduled, the critical path of the basic block has been made
52 as short as possible. The remaining insns are then scheduled in
53 remaining slots.
55 The following list shows the order in which we want to break ties
56 among insns in the ready list:
58 1. choose insn with the longest path to end of bb, ties
59 broken by
60 2. choose insn with least contribution to register pressure,
61 ties broken by
62 3. prefer in-block upon interblock motion, ties broken by
63 4. prefer useful upon speculative motion, ties broken by
64 5. choose insn with largest control flow probability, ties
65 broken by
66 6. choose insn with the least dependences upon the previously
67 scheduled insn, or finally
68 7 choose the insn which has the most insns dependent on it.
69 8. choose insn with lowest UID.
71 Memory references complicate matters. Only if we can be certain
72 that memory references are not part of the data dependency graph
73 (via true, anti, or output dependence), can we move operations past
74 memory references. To first approximation, reads can be done
75 independently, while writes introduce dependencies. Better
76 approximations will yield fewer dependencies.
78 Before reload, an extended analysis of interblock data dependences
79 is required for interblock scheduling. This is performed in
80 compute_block_dependences ().
82 Dependencies set up by memory references are treated in exactly the
83 same way as other dependencies, by using insn backward dependences
84 INSN_BACK_DEPS. INSN_BACK_DEPS are translated into forward dependences
85 INSN_FORW_DEPS for the purpose of forward list scheduling.
87 Having optimized the critical path, we may have also unduly
88 extended the lifetimes of some registers. If an operation requires
89 that constants be loaded into registers, it is certainly desirable
90 to load those constants as early as necessary, but no earlier.
91 I.e., it will not do to load up a bunch of registers at the
92 beginning of a basic block only to use them at the end, if they
93 could be loaded later, since this may result in excessive register
94 utilization.
96 Note that since branches are never in basic blocks, but only end
97 basic blocks, this pass will not move branches. But that is ok,
98 since we can use GNU's delayed branch scheduling pass to take care
99 of this case.
101 Also note that no further optimizations based on algebraic
102 identities are performed, so this pass would be a good one to
103 perform instruction splitting, such as breaking up a multiply
104 instruction into shifts and adds where that is profitable.
106 Given the memory aliasing analysis that this pass should perform,
107 it should be possible to remove redundant stores to memory, and to
108 load values from registers instead of hitting memory.
110 Before reload, speculative insns are moved only if a 'proof' exists
111 that no exception will be caused by this, and if no live registers
112 exist that inhibit the motion (live registers constraints are not
113 represented by data dependence edges).
115 This pass must update information that subsequent passes expect to
116 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
117 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
119 The information in the line number notes is carefully retained by
120 this pass. Notes that refer to the starting and ending of
121 exception regions are also carefully retained by this pass. All
122 other NOTE insns are grouped in their same relative order at the
123 beginning of basic blocks and regions that have been scheduled. */
125 #include "config.h"
126 #include "system.h"
127 #include "coretypes.h"
128 #include "tm.h"
129 #include "diagnostic-core.h"
130 #include "hard-reg-set.h"
131 #include "rtl.h"
132 #include "tm_p.h"
133 #include "regs.h"
134 #include "function.h"
135 #include "flags.h"
136 #include "insn-config.h"
137 #include "insn-attr.h"
138 #include "except.h"
139 #include "recog.h"
140 #include "sched-int.h"
141 #include "target.h"
142 #include "common/common-target.h"
143 #include "params.h"
144 #include "dbgcnt.h"
145 #include "cfgloop.h"
146 #include "ira.h"
147 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
148 #include "hash-table.h"
149 #include "dumpfile.h"
151 #ifdef INSN_SCHEDULING
153 /* True if we do register pressure relief through live-range
154 shrinkage. */
155 static bool live_range_shrinkage_p;
157 /* Switch on live range shrinkage. */
158 void
159 initialize_live_range_shrinkage (void)
161 live_range_shrinkage_p = true;
164 /* Switch off live range shrinkage. */
165 void
166 finish_live_range_shrinkage (void)
168 live_range_shrinkage_p = false;
171 /* issue_rate is the number of insns that can be scheduled in the same
172 machine cycle. It can be defined in the config/mach/mach.h file,
173 otherwise we set it to 1. */
175 int issue_rate;
177 /* This can be set to true by a backend if the scheduler should not
178 enable a DCE pass. */
179 bool sched_no_dce;
181 /* The current initiation interval used when modulo scheduling. */
182 static int modulo_ii;
184 /* The maximum number of stages we are prepared to handle. */
185 static int modulo_max_stages;
187 /* The number of insns that exist in each iteration of the loop. We use this
188 to detect when we've scheduled all insns from the first iteration. */
189 static int modulo_n_insns;
191 /* The current count of insns in the first iteration of the loop that have
192 already been scheduled. */
193 static int modulo_insns_scheduled;
195 /* The maximum uid of insns from the first iteration of the loop. */
196 static int modulo_iter0_max_uid;
198 /* The number of times we should attempt to backtrack when modulo scheduling.
199 Decreased each time we have to backtrack. */
200 static int modulo_backtracks_left;
202 /* The stage in which the last insn from the original loop was
203 scheduled. */
204 static int modulo_last_stage;
206 /* sched-verbose controls the amount of debugging output the
207 scheduler prints. It is controlled by -fsched-verbose=N:
208 N>0 and no -DSR : the output is directed to stderr.
209 N>=10 will direct the printouts to stderr (regardless of -dSR).
210 N=1: same as -dSR.
211 N=2: bb's probabilities, detailed ready list info, unit/insn info.
212 N=3: rtl at abort point, control-flow, regions info.
213 N=5: dependences info. */
215 int sched_verbose = 0;
217 /* Debugging file. All printouts are sent to dump, which is always set,
218 either to stderr, or to the dump listing file (-dRS). */
219 FILE *sched_dump = 0;
221 /* This is a placeholder for the scheduler parameters common
222 to all schedulers. */
223 struct common_sched_info_def *common_sched_info;
225 #define INSN_TICK(INSN) (HID (INSN)->tick)
226 #define INSN_EXACT_TICK(INSN) (HID (INSN)->exact_tick)
227 #define INSN_TICK_ESTIMATE(INSN) (HID (INSN)->tick_estimate)
228 #define INTER_TICK(INSN) (HID (INSN)->inter_tick)
229 #define FEEDS_BACKTRACK_INSN(INSN) (HID (INSN)->feeds_backtrack_insn)
230 #define SHADOW_P(INSN) (HID (INSN)->shadow_p)
231 #define MUST_RECOMPUTE_SPEC_P(INSN) (HID (INSN)->must_recompute_spec)
232 /* Cached cost of the instruction. Use insn_cost to get cost of the
233 insn. -1 here means that the field is not initialized. */
234 #define INSN_COST(INSN) (HID (INSN)->cost)
236 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
237 then it should be recalculated from scratch. */
238 #define INVALID_TICK (-(max_insn_queue_index + 1))
239 /* The minimal value of the INSN_TICK of an instruction. */
240 #define MIN_TICK (-max_insn_queue_index)
242 /* List of important notes we must keep around. This is a pointer to the
243 last element in the list. */
244 rtx note_list;
246 static struct spec_info_def spec_info_var;
247 /* Description of the speculative part of the scheduling.
248 If NULL - no speculation. */
249 spec_info_t spec_info = NULL;
251 /* True, if recovery block was added during scheduling of current block.
252 Used to determine, if we need to fix INSN_TICKs. */
253 static bool haifa_recovery_bb_recently_added_p;
255 /* True, if recovery block was added during this scheduling pass.
256 Used to determine if we should have empty memory pools of dependencies
257 after finishing current region. */
258 bool haifa_recovery_bb_ever_added_p;
260 /* Counters of different types of speculative instructions. */
261 static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
263 /* Array used in {unlink, restore}_bb_notes. */
264 static rtx *bb_header = 0;
266 /* Basic block after which recovery blocks will be created. */
267 static basic_block before_recovery;
269 /* Basic block just before the EXIT_BLOCK and after recovery, if we have
270 created it. */
271 basic_block after_recovery;
273 /* FALSE if we add bb to another region, so we don't need to initialize it. */
274 bool adding_bb_to_current_region_p = true;
276 /* Queues, etc. */
278 /* An instruction is ready to be scheduled when all insns preceding it
279 have already been scheduled. It is important to ensure that all
280 insns which use its result will not be executed until its result
281 has been computed. An insn is maintained in one of four structures:
283 (P) the "Pending" set of insns which cannot be scheduled until
284 their dependencies have been satisfied.
285 (Q) the "Queued" set of insns that can be scheduled when sufficient
286 time has passed.
287 (R) the "Ready" list of unscheduled, uncommitted insns.
288 (S) the "Scheduled" list of insns.
290 Initially, all insns are either "Pending" or "Ready" depending on
291 whether their dependencies are satisfied.
293 Insns move from the "Ready" list to the "Scheduled" list as they
294 are committed to the schedule. As this occurs, the insns in the
295 "Pending" list have their dependencies satisfied and move to either
296 the "Ready" list or the "Queued" set depending on whether
297 sufficient time has passed to make them ready. As time passes,
298 insns move from the "Queued" set to the "Ready" list.
300 The "Pending" list (P) are the insns in the INSN_FORW_DEPS of the
301 unscheduled insns, i.e., those that are ready, queued, and pending.
302 The "Queued" set (Q) is implemented by the variable `insn_queue'.
303 The "Ready" list (R) is implemented by the variables `ready' and
304 `n_ready'.
305 The "Scheduled" list (S) is the new insn chain built by this pass.
307 The transition (R->S) is implemented in the scheduling loop in
308 `schedule_block' when the best insn to schedule is chosen.
309 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
310 insns move from the ready list to the scheduled list.
311 The transition (Q->R) is implemented in 'queue_to_insn' as time
312 passes or stalls are introduced. */
314 /* Implement a circular buffer to delay instructions until sufficient
315 time has passed. For the new pipeline description interface,
316 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
317 than maximal time of instruction execution computed by genattr.c on
318 the base maximal time of functional unit reservations and getting a
319 result. This is the longest time an insn may be queued. */
321 static rtx *insn_queue;
322 static int q_ptr = 0;
323 static int q_size = 0;
324 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
325 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
327 #define QUEUE_SCHEDULED (-3)
328 #define QUEUE_NOWHERE (-2)
329 #define QUEUE_READY (-1)
330 /* QUEUE_SCHEDULED - INSN is scheduled.
331 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
332 queue or ready list.
333 QUEUE_READY - INSN is in ready list.
334 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
336 #define QUEUE_INDEX(INSN) (HID (INSN)->queue_index)
338 /* The following variable value refers for all current and future
339 reservations of the processor units. */
340 state_t curr_state;
342 /* The following variable value is size of memory representing all
343 current and future reservations of the processor units. */
344 size_t dfa_state_size;
346 /* The following array is used to find the best insn from ready when
347 the automaton pipeline interface is used. */
348 signed char *ready_try = NULL;
350 /* The ready list. */
351 struct ready_list ready = {NULL, 0, 0, 0, 0};
353 /* The pointer to the ready list (to be removed). */
354 static struct ready_list *readyp = &ready;
356 /* Scheduling clock. */
357 static int clock_var;
359 /* Clock at which the previous instruction was issued. */
360 static int last_clock_var;
362 /* Set to true if, when queuing a shadow insn, we discover that it would be
363 scheduled too late. */
364 static bool must_backtrack;
366 /* The following variable value is number of essential insns issued on
367 the current cycle. An insn is essential one if it changes the
368 processors state. */
369 int cycle_issued_insns;
371 /* This records the actual schedule. It is built up during the main phase
372 of schedule_block, and afterwards used to reorder the insns in the RTL. */
373 static vec<rtx> scheduled_insns;
375 static int may_trap_exp (const_rtx, int);
377 /* Nonzero iff the address is comprised from at most 1 register. */
378 #define CONST_BASED_ADDRESS_P(x) \
379 (REG_P (x) \
380 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
381 || (GET_CODE (x) == LO_SUM)) \
382 && (CONSTANT_P (XEXP (x, 0)) \
383 || CONSTANT_P (XEXP (x, 1)))))
385 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
386 as found by analyzing insn's expression. */
389 static int haifa_luid_for_non_insn (rtx x);
391 /* Haifa version of sched_info hooks common to all headers. */
392 const struct common_sched_info_def haifa_common_sched_info =
394 NULL, /* fix_recovery_cfg */
395 NULL, /* add_block */
396 NULL, /* estimate_number_of_insns */
397 haifa_luid_for_non_insn, /* luid_for_non_insn */
398 SCHED_PASS_UNKNOWN /* sched_pass_id */
401 /* Mapping from instruction UID to its Logical UID. */
402 vec<int> sched_luids = vNULL;
404 /* Next LUID to assign to an instruction. */
405 int sched_max_luid = 1;
407 /* Haifa Instruction Data. */
408 vec<haifa_insn_data_def> h_i_d = vNULL;
410 void (* sched_init_only_bb) (basic_block, basic_block);
412 /* Split block function. Different schedulers might use different functions
413 to handle their internal data consistent. */
414 basic_block (* sched_split_block) (basic_block, rtx);
416 /* Create empty basic block after the specified block. */
417 basic_block (* sched_create_empty_bb) (basic_block);
419 /* Return the number of cycles until INSN is expected to be ready.
420 Return zero if it already is. */
421 static int
422 insn_delay (rtx insn)
424 return MAX (INSN_TICK (insn) - clock_var, 0);
427 static int
428 may_trap_exp (const_rtx x, int is_store)
430 enum rtx_code code;
432 if (x == 0)
433 return TRAP_FREE;
434 code = GET_CODE (x);
435 if (is_store)
437 if (code == MEM && may_trap_p (x))
438 return TRAP_RISKY;
439 else
440 return TRAP_FREE;
442 if (code == MEM)
444 /* The insn uses memory: a volatile load. */
445 if (MEM_VOLATILE_P (x))
446 return IRISKY;
447 /* An exception-free load. */
448 if (!may_trap_p (x))
449 return IFREE;
450 /* A load with 1 base register, to be further checked. */
451 if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
452 return PFREE_CANDIDATE;
453 /* No info on the load, to be further checked. */
454 return PRISKY_CANDIDATE;
456 else
458 const char *fmt;
459 int i, insn_class = TRAP_FREE;
461 /* Neither store nor load, check if it may cause a trap. */
462 if (may_trap_p (x))
463 return TRAP_RISKY;
464 /* Recursive step: walk the insn... */
465 fmt = GET_RTX_FORMAT (code);
466 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
468 if (fmt[i] == 'e')
470 int tmp_class = may_trap_exp (XEXP (x, i), is_store);
471 insn_class = WORST_CLASS (insn_class, tmp_class);
473 else if (fmt[i] == 'E')
475 int j;
476 for (j = 0; j < XVECLEN (x, i); j++)
478 int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
479 insn_class = WORST_CLASS (insn_class, tmp_class);
480 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
481 break;
484 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
485 break;
487 return insn_class;
491 /* Classifies rtx X of an insn for the purpose of verifying that X can be
492 executed speculatively (and consequently the insn can be moved
493 speculatively), by examining X, returning:
494 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
495 TRAP_FREE: non-load insn.
496 IFREE: load from a globally safe location.
497 IRISKY: volatile load.
498 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
499 being either PFREE or PRISKY. */
501 static int
502 haifa_classify_rtx (const_rtx x)
504 int tmp_class = TRAP_FREE;
505 int insn_class = TRAP_FREE;
506 enum rtx_code code;
508 if (GET_CODE (x) == PARALLEL)
510 int i, len = XVECLEN (x, 0);
512 for (i = len - 1; i >= 0; i--)
514 tmp_class = haifa_classify_rtx (XVECEXP (x, 0, i));
515 insn_class = WORST_CLASS (insn_class, tmp_class);
516 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
517 break;
520 else
522 code = GET_CODE (x);
523 switch (code)
525 case CLOBBER:
526 /* Test if it is a 'store'. */
527 tmp_class = may_trap_exp (XEXP (x, 0), 1);
528 break;
529 case SET:
530 /* Test if it is a store. */
531 tmp_class = may_trap_exp (SET_DEST (x), 1);
532 if (tmp_class == TRAP_RISKY)
533 break;
534 /* Test if it is a load. */
535 tmp_class =
536 WORST_CLASS (tmp_class,
537 may_trap_exp (SET_SRC (x), 0));
538 break;
539 case COND_EXEC:
540 tmp_class = haifa_classify_rtx (COND_EXEC_CODE (x));
541 if (tmp_class == TRAP_RISKY)
542 break;
543 tmp_class = WORST_CLASS (tmp_class,
544 may_trap_exp (COND_EXEC_TEST (x), 0));
545 break;
546 case TRAP_IF:
547 tmp_class = TRAP_RISKY;
548 break;
549 default:;
551 insn_class = tmp_class;
554 return insn_class;
558 haifa_classify_insn (const_rtx insn)
560 return haifa_classify_rtx (PATTERN (insn));
563 /* After the scheduler initialization function has been called, this function
564 can be called to enable modulo scheduling. II is the initiation interval
565 we should use, it affects the delays for delay_pairs that were recorded as
566 separated by a given number of stages.
568 MAX_STAGES provides us with a limit
569 after which we give up scheduling; the caller must have unrolled at least
570 as many copies of the loop body and recorded delay_pairs for them.
572 INSNS is the number of real (non-debug) insns in one iteration of
573 the loop. MAX_UID can be used to test whether an insn belongs to
574 the first iteration of the loop; all of them have a uid lower than
575 MAX_UID. */
576 void
577 set_modulo_params (int ii, int max_stages, int insns, int max_uid)
579 modulo_ii = ii;
580 modulo_max_stages = max_stages;
581 modulo_n_insns = insns;
582 modulo_iter0_max_uid = max_uid;
583 modulo_backtracks_left = PARAM_VALUE (PARAM_MAX_MODULO_BACKTRACK_ATTEMPTS);
586 /* A structure to record a pair of insns where the first one is a real
587 insn that has delay slots, and the second is its delayed shadow.
588 I1 is scheduled normally and will emit an assembly instruction,
589 while I2 describes the side effect that takes place at the
590 transition between cycles CYCLES and (CYCLES + 1) after I1. */
591 struct delay_pair
593 struct delay_pair *next_same_i1;
594 rtx i1, i2;
595 int cycles;
596 /* When doing modulo scheduling, we a delay_pair can also be used to
597 show that I1 and I2 are the same insn in a different stage. If that
598 is the case, STAGES will be nonzero. */
599 int stages;
602 /* Helpers for delay hashing. */
604 struct delay_i1_hasher : typed_noop_remove <delay_pair>
606 typedef delay_pair value_type;
607 typedef void compare_type;
608 static inline hashval_t hash (const value_type *);
609 static inline bool equal (const value_type *, const compare_type *);
612 /* Returns a hash value for X, based on hashing just I1. */
614 inline hashval_t
615 delay_i1_hasher::hash (const value_type *x)
617 return htab_hash_pointer (x->i1);
620 /* Return true if I1 of pair X is the same as that of pair Y. */
622 inline bool
623 delay_i1_hasher::equal (const value_type *x, const compare_type *y)
625 return x->i1 == y;
628 struct delay_i2_hasher : typed_free_remove <delay_pair>
630 typedef delay_pair value_type;
631 typedef void compare_type;
632 static inline hashval_t hash (const value_type *);
633 static inline bool equal (const value_type *, const compare_type *);
636 /* Returns a hash value for X, based on hashing just I2. */
638 inline hashval_t
639 delay_i2_hasher::hash (const value_type *x)
641 return htab_hash_pointer (x->i2);
644 /* Return true if I2 of pair X is the same as that of pair Y. */
646 inline bool
647 delay_i2_hasher::equal (const value_type *x, const compare_type *y)
649 return x->i2 == y;
652 /* Two hash tables to record delay_pairs, one indexed by I1 and the other
653 indexed by I2. */
654 static hash_table <delay_i1_hasher> delay_htab;
655 static hash_table <delay_i2_hasher> delay_htab_i2;
657 /* Called through htab_traverse. Walk the hashtable using I2 as
658 index, and delete all elements involving an UID higher than
659 that pointed to by *DATA. */
661 haifa_htab_i2_traverse (delay_pair **slot, int *data)
663 int maxuid = *data;
664 struct delay_pair *p = *slot;
665 if (INSN_UID (p->i2) >= maxuid || INSN_UID (p->i1) >= maxuid)
667 delay_htab_i2.clear_slot (slot);
669 return 1;
672 /* Called through htab_traverse. Walk the hashtable using I2 as
673 index, and delete all elements involving an UID higher than
674 that pointed to by *DATA. */
676 haifa_htab_i1_traverse (delay_pair **pslot, int *data)
678 int maxuid = *data;
679 struct delay_pair *p, *first, **pprev;
681 if (INSN_UID ((*pslot)->i1) >= maxuid)
683 delay_htab.clear_slot (pslot);
684 return 1;
686 pprev = &first;
687 for (p = *pslot; p; p = p->next_same_i1)
689 if (INSN_UID (p->i2) < maxuid)
691 *pprev = p;
692 pprev = &p->next_same_i1;
695 *pprev = NULL;
696 if (first == NULL)
697 delay_htab.clear_slot (pslot);
698 else
699 *pslot = first;
700 return 1;
703 /* Discard all delay pairs which involve an insn with an UID higher
704 than MAX_UID. */
705 void
706 discard_delay_pairs_above (int max_uid)
708 delay_htab.traverse <int *, haifa_htab_i1_traverse> (&max_uid);
709 delay_htab_i2.traverse <int *, haifa_htab_i2_traverse> (&max_uid);
712 /* This function can be called by a port just before it starts the final
713 scheduling pass. It records the fact that an instruction with delay
714 slots has been split into two insns, I1 and I2. The first one will be
715 scheduled normally and initiates the operation. The second one is a
716 shadow which must follow a specific number of cycles after I1; its only
717 purpose is to show the side effect that occurs at that cycle in the RTL.
718 If a JUMP_INSN or a CALL_INSN has been split, I1 should be a normal INSN,
719 while I2 retains the original insn type.
721 There are two ways in which the number of cycles can be specified,
722 involving the CYCLES and STAGES arguments to this function. If STAGES
723 is zero, we just use the value of CYCLES. Otherwise, STAGES is a factor
724 which is multiplied by MODULO_II to give the number of cycles. This is
725 only useful if the caller also calls set_modulo_params to enable modulo
726 scheduling. */
728 void
729 record_delay_slot_pair (rtx i1, rtx i2, int cycles, int stages)
731 struct delay_pair *p = XNEW (struct delay_pair);
732 struct delay_pair **slot;
734 p->i1 = i1;
735 p->i2 = i2;
736 p->cycles = cycles;
737 p->stages = stages;
739 if (!delay_htab.is_created ())
741 delay_htab.create (10);
742 delay_htab_i2.create (10);
744 slot = delay_htab.find_slot_with_hash (i1, htab_hash_pointer (i1), INSERT);
745 p->next_same_i1 = *slot;
746 *slot = p;
747 slot = delay_htab_i2.find_slot_with_hash (i2, htab_hash_pointer (i2), INSERT);
748 *slot = p;
751 /* Examine the delay pair hashtable to see if INSN is a shadow for another,
752 and return the other insn if so. Return NULL otherwise. */
754 real_insn_for_shadow (rtx insn)
756 struct delay_pair *pair;
758 if (!delay_htab.is_created ())
759 return NULL_RTX;
761 pair = delay_htab_i2.find_with_hash (insn, htab_hash_pointer (insn));
762 if (!pair || pair->stages > 0)
763 return NULL_RTX;
764 return pair->i1;
767 /* For a pair P of insns, return the fixed distance in cycles from the first
768 insn after which the second must be scheduled. */
769 static int
770 pair_delay (struct delay_pair *p)
772 if (p->stages == 0)
773 return p->cycles;
774 else
775 return p->stages * modulo_ii;
778 /* Given an insn INSN, add a dependence on its delayed shadow if it
779 has one. Also try to find situations where shadows depend on each other
780 and add dependencies to the real insns to limit the amount of backtracking
781 needed. */
782 void
783 add_delay_dependencies (rtx insn)
785 struct delay_pair *pair;
786 sd_iterator_def sd_it;
787 dep_t dep;
789 if (!delay_htab.is_created ())
790 return;
792 pair = delay_htab_i2.find_with_hash (insn, htab_hash_pointer (insn));
793 if (!pair)
794 return;
795 add_dependence (insn, pair->i1, REG_DEP_ANTI);
796 if (pair->stages)
797 return;
799 FOR_EACH_DEP (pair->i2, SD_LIST_BACK, sd_it, dep)
801 rtx pro = DEP_PRO (dep);
802 struct delay_pair *other_pair
803 = delay_htab_i2.find_with_hash (pro, htab_hash_pointer (pro));
804 if (!other_pair || other_pair->stages)
805 continue;
806 if (pair_delay (other_pair) >= pair_delay (pair))
808 if (sched_verbose >= 4)
810 fprintf (sched_dump, ";;\tadding dependence %d <- %d\n",
811 INSN_UID (other_pair->i1),
812 INSN_UID (pair->i1));
813 fprintf (sched_dump, ";;\tpair1 %d <- %d, cost %d\n",
814 INSN_UID (pair->i1),
815 INSN_UID (pair->i2),
816 pair_delay (pair));
817 fprintf (sched_dump, ";;\tpair2 %d <- %d, cost %d\n",
818 INSN_UID (other_pair->i1),
819 INSN_UID (other_pair->i2),
820 pair_delay (other_pair));
822 add_dependence (pair->i1, other_pair->i1, REG_DEP_ANTI);
827 /* Forward declarations. */
829 static int priority (rtx);
830 static int rank_for_schedule (const void *, const void *);
831 static void swap_sort (rtx *, int);
832 static void queue_insn (rtx, int, const char *);
833 static int schedule_insn (rtx);
834 static void adjust_priority (rtx);
835 static void advance_one_cycle (void);
836 static void extend_h_i_d (void);
839 /* Notes handling mechanism:
840 =========================
841 Generally, NOTES are saved before scheduling and restored after scheduling.
842 The scheduler distinguishes between two types of notes:
844 (1) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
845 Before scheduling a region, a pointer to the note is added to the insn
846 that follows or precedes it. (This happens as part of the data dependence
847 computation). After scheduling an insn, the pointer contained in it is
848 used for regenerating the corresponding note (in reemit_notes).
850 (2) All other notes (e.g. INSN_DELETED): Before scheduling a block,
851 these notes are put in a list (in rm_other_notes() and
852 unlink_other_notes ()). After scheduling the block, these notes are
853 inserted at the beginning of the block (in schedule_block()). */
855 static void ready_add (struct ready_list *, rtx, bool);
856 static rtx ready_remove_first (struct ready_list *);
857 static rtx ready_remove_first_dispatch (struct ready_list *ready);
859 static void queue_to_ready (struct ready_list *);
860 static int early_queue_to_ready (state_t, struct ready_list *);
862 /* The following functions are used to implement multi-pass scheduling
863 on the first cycle. */
864 static rtx ready_remove (struct ready_list *, int);
865 static void ready_remove_insn (rtx);
867 static void fix_inter_tick (rtx, rtx);
868 static int fix_tick_ready (rtx);
869 static void change_queue_index (rtx, int);
871 /* The following functions are used to implement scheduling of data/control
872 speculative instructions. */
874 static void extend_h_i_d (void);
875 static void init_h_i_d (rtx);
876 static int haifa_speculate_insn (rtx, ds_t, rtx *);
877 static void generate_recovery_code (rtx);
878 static void process_insn_forw_deps_be_in_spec (rtx, rtx, ds_t);
879 static void begin_speculative_block (rtx);
880 static void add_to_speculative_block (rtx);
881 static void init_before_recovery (basic_block *);
882 static void create_check_block_twin (rtx, bool);
883 static void fix_recovery_deps (basic_block);
884 static bool haifa_change_pattern (rtx, rtx);
885 static void dump_new_block_header (int, basic_block, rtx, rtx);
886 static void restore_bb_notes (basic_block);
887 static void fix_jump_move (rtx);
888 static void move_block_after_check (rtx);
889 static void move_succs (vec<edge, va_gc> **, basic_block);
890 static void sched_remove_insn (rtx);
891 static void clear_priorities (rtx, rtx_vec_t *);
892 static void calc_priorities (rtx_vec_t);
893 static void add_jump_dependencies (rtx, rtx);
895 #endif /* INSN_SCHEDULING */
897 /* Point to state used for the current scheduling pass. */
898 struct haifa_sched_info *current_sched_info;
900 #ifndef INSN_SCHEDULING
901 void
902 schedule_insns (void)
905 #else
907 /* Do register pressure sensitive insn scheduling if the flag is set
908 up. */
909 enum sched_pressure_algorithm sched_pressure;
911 /* Map regno -> its pressure class. The map defined only when
912 SCHED_PRESSURE != SCHED_PRESSURE_NONE. */
913 enum reg_class *sched_regno_pressure_class;
915 /* The current register pressure. Only elements corresponding pressure
916 classes are defined. */
917 static int curr_reg_pressure[N_REG_CLASSES];
919 /* Saved value of the previous array. */
920 static int saved_reg_pressure[N_REG_CLASSES];
922 /* Register living at given scheduling point. */
923 static bitmap curr_reg_live;
925 /* Saved value of the previous array. */
926 static bitmap saved_reg_live;
928 /* Registers mentioned in the current region. */
929 static bitmap region_ref_regs;
931 /* Initiate register pressure relative info for scheduling the current
932 region. Currently it is only clearing register mentioned in the
933 current region. */
934 void
935 sched_init_region_reg_pressure_info (void)
937 bitmap_clear (region_ref_regs);
940 /* PRESSURE[CL] describes the pressure on register class CL. Update it
941 for the birth (if BIRTH_P) or death (if !BIRTH_P) of register REGNO.
942 LIVE tracks the set of live registers; if it is null, assume that
943 every birth or death is genuine. */
944 static inline void
945 mark_regno_birth_or_death (bitmap live, int *pressure, int regno, bool birth_p)
947 enum reg_class pressure_class;
949 pressure_class = sched_regno_pressure_class[regno];
950 if (regno >= FIRST_PSEUDO_REGISTER)
952 if (pressure_class != NO_REGS)
954 if (birth_p)
956 if (!live || bitmap_set_bit (live, regno))
957 pressure[pressure_class]
958 += (ira_reg_class_max_nregs
959 [pressure_class][PSEUDO_REGNO_MODE (regno)]);
961 else
963 if (!live || bitmap_clear_bit (live, regno))
964 pressure[pressure_class]
965 -= (ira_reg_class_max_nregs
966 [pressure_class][PSEUDO_REGNO_MODE (regno)]);
970 else if (pressure_class != NO_REGS
971 && ! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
973 if (birth_p)
975 if (!live || bitmap_set_bit (live, regno))
976 pressure[pressure_class]++;
978 else
980 if (!live || bitmap_clear_bit (live, regno))
981 pressure[pressure_class]--;
986 /* Initiate current register pressure related info from living
987 registers given by LIVE. */
988 static void
989 initiate_reg_pressure_info (bitmap live)
991 int i;
992 unsigned int j;
993 bitmap_iterator bi;
995 for (i = 0; i < ira_pressure_classes_num; i++)
996 curr_reg_pressure[ira_pressure_classes[i]] = 0;
997 bitmap_clear (curr_reg_live);
998 EXECUTE_IF_SET_IN_BITMAP (live, 0, j, bi)
999 if (sched_pressure == SCHED_PRESSURE_MODEL
1000 || current_nr_blocks == 1
1001 || bitmap_bit_p (region_ref_regs, j))
1002 mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure, j, true);
1005 /* Mark registers in X as mentioned in the current region. */
1006 static void
1007 setup_ref_regs (rtx x)
1009 int i, j, regno;
1010 const RTX_CODE code = GET_CODE (x);
1011 const char *fmt;
1013 if (REG_P (x))
1015 regno = REGNO (x);
1016 if (HARD_REGISTER_NUM_P (regno))
1017 bitmap_set_range (region_ref_regs, regno,
1018 hard_regno_nregs[regno][GET_MODE (x)]);
1019 else
1020 bitmap_set_bit (region_ref_regs, REGNO (x));
1021 return;
1023 fmt = GET_RTX_FORMAT (code);
1024 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1025 if (fmt[i] == 'e')
1026 setup_ref_regs (XEXP (x, i));
1027 else if (fmt[i] == 'E')
1029 for (j = 0; j < XVECLEN (x, i); j++)
1030 setup_ref_regs (XVECEXP (x, i, j));
1034 /* Initiate current register pressure related info at the start of
1035 basic block BB. */
1036 static void
1037 initiate_bb_reg_pressure_info (basic_block bb)
1039 unsigned int i ATTRIBUTE_UNUSED;
1040 rtx insn;
1042 if (current_nr_blocks > 1)
1043 FOR_BB_INSNS (bb, insn)
1044 if (NONDEBUG_INSN_P (insn))
1045 setup_ref_regs (PATTERN (insn));
1046 initiate_reg_pressure_info (df_get_live_in (bb));
1047 #ifdef EH_RETURN_DATA_REGNO
1048 if (bb_has_eh_pred (bb))
1049 for (i = 0; ; ++i)
1051 unsigned int regno = EH_RETURN_DATA_REGNO (i);
1053 if (regno == INVALID_REGNUM)
1054 break;
1055 if (! bitmap_bit_p (df_get_live_in (bb), regno))
1056 mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure,
1057 regno, true);
1059 #endif
1062 /* Save current register pressure related info. */
1063 static void
1064 save_reg_pressure (void)
1066 int i;
1068 for (i = 0; i < ira_pressure_classes_num; i++)
1069 saved_reg_pressure[ira_pressure_classes[i]]
1070 = curr_reg_pressure[ira_pressure_classes[i]];
1071 bitmap_copy (saved_reg_live, curr_reg_live);
1074 /* Restore saved register pressure related info. */
1075 static void
1076 restore_reg_pressure (void)
1078 int i;
1080 for (i = 0; i < ira_pressure_classes_num; i++)
1081 curr_reg_pressure[ira_pressure_classes[i]]
1082 = saved_reg_pressure[ira_pressure_classes[i]];
1083 bitmap_copy (curr_reg_live, saved_reg_live);
1086 /* Return TRUE if the register is dying after its USE. */
1087 static bool
1088 dying_use_p (struct reg_use_data *use)
1090 struct reg_use_data *next;
1092 for (next = use->next_regno_use; next != use; next = next->next_regno_use)
1093 if (NONDEBUG_INSN_P (next->insn)
1094 && QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED)
1095 return false;
1096 return true;
1099 /* Print info about the current register pressure and its excess for
1100 each pressure class. */
1101 static void
1102 print_curr_reg_pressure (void)
1104 int i;
1105 enum reg_class cl;
1107 fprintf (sched_dump, ";;\t");
1108 for (i = 0; i < ira_pressure_classes_num; i++)
1110 cl = ira_pressure_classes[i];
1111 gcc_assert (curr_reg_pressure[cl] >= 0);
1112 fprintf (sched_dump, " %s:%d(%d)", reg_class_names[cl],
1113 curr_reg_pressure[cl],
1114 curr_reg_pressure[cl] - ira_class_hard_regs_num[cl]);
1116 fprintf (sched_dump, "\n");
1119 /* Determine if INSN has a condition that is clobbered if a register
1120 in SET_REGS is modified. */
1121 static bool
1122 cond_clobbered_p (rtx insn, HARD_REG_SET set_regs)
1124 rtx pat = PATTERN (insn);
1125 gcc_assert (GET_CODE (pat) == COND_EXEC);
1126 if (TEST_HARD_REG_BIT (set_regs, REGNO (XEXP (COND_EXEC_TEST (pat), 0))))
1128 sd_iterator_def sd_it;
1129 dep_t dep;
1130 haifa_change_pattern (insn, ORIG_PAT (insn));
1131 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1132 DEP_STATUS (dep) &= ~DEP_CANCELLED;
1133 TODO_SPEC (insn) = HARD_DEP;
1134 if (sched_verbose >= 2)
1135 fprintf (sched_dump,
1136 ";;\t\tdequeue insn %s because of clobbered condition\n",
1137 (*current_sched_info->print_insn) (insn, 0));
1138 return true;
1141 return false;
1144 /* This function should be called after modifying the pattern of INSN,
1145 to update scheduler data structures as needed. */
1146 static void
1147 update_insn_after_change (rtx insn)
1149 sd_iterator_def sd_it;
1150 dep_t dep;
1152 dfa_clear_single_insn_cache (insn);
1154 sd_it = sd_iterator_start (insn,
1155 SD_LIST_FORW | SD_LIST_BACK | SD_LIST_RES_BACK);
1156 while (sd_iterator_cond (&sd_it, &dep))
1158 DEP_COST (dep) = UNKNOWN_DEP_COST;
1159 sd_iterator_next (&sd_it);
1162 /* Invalidate INSN_COST, so it'll be recalculated. */
1163 INSN_COST (insn) = -1;
1164 /* Invalidate INSN_TICK, so it'll be recalculated. */
1165 INSN_TICK (insn) = INVALID_TICK;
1169 /* Two VECs, one to hold dependencies for which pattern replacements
1170 need to be applied or restored at the start of the next cycle, and
1171 another to hold an integer that is either one, to apply the
1172 corresponding replacement, or zero to restore it. */
1173 static vec<dep_t> next_cycle_replace_deps;
1174 static vec<int> next_cycle_apply;
1176 static void apply_replacement (dep_t, bool);
1177 static void restore_pattern (dep_t, bool);
1179 /* Look at the remaining dependencies for insn NEXT, and compute and return
1180 the TODO_SPEC value we should use for it. This is called after one of
1181 NEXT's dependencies has been resolved.
1182 We also perform pattern replacements for predication, and for broken
1183 replacement dependencies. The latter is only done if FOR_BACKTRACK is
1184 false. */
1186 static ds_t
1187 recompute_todo_spec (rtx next, bool for_backtrack)
1189 ds_t new_ds;
1190 sd_iterator_def sd_it;
1191 dep_t dep, modify_dep = NULL;
1192 int n_spec = 0;
1193 int n_control = 0;
1194 int n_replace = 0;
1195 bool first_p = true;
1197 if (sd_lists_empty_p (next, SD_LIST_BACK))
1198 /* NEXT has all its dependencies resolved. */
1199 return 0;
1201 if (!sd_lists_empty_p (next, SD_LIST_HARD_BACK))
1202 return HARD_DEP;
1204 /* Now we've got NEXT with speculative deps only.
1205 1. Look at the deps to see what we have to do.
1206 2. Check if we can do 'todo'. */
1207 new_ds = 0;
1209 FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
1211 rtx pro = DEP_PRO (dep);
1212 ds_t ds = DEP_STATUS (dep) & SPECULATIVE;
1214 if (DEBUG_INSN_P (pro) && !DEBUG_INSN_P (next))
1215 continue;
1217 if (ds)
1219 n_spec++;
1220 if (first_p)
1222 first_p = false;
1224 new_ds = ds;
1226 else
1227 new_ds = ds_merge (new_ds, ds);
1229 else if (DEP_TYPE (dep) == REG_DEP_CONTROL)
1231 if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED)
1233 n_control++;
1234 modify_dep = dep;
1236 DEP_STATUS (dep) &= ~DEP_CANCELLED;
1238 else if (DEP_REPLACE (dep) != NULL)
1240 if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED)
1242 n_replace++;
1243 modify_dep = dep;
1245 DEP_STATUS (dep) &= ~DEP_CANCELLED;
1249 if (n_replace > 0 && n_control == 0 && n_spec == 0)
1251 if (!dbg_cnt (sched_breakdep))
1252 return HARD_DEP;
1253 FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
1255 struct dep_replacement *desc = DEP_REPLACE (dep);
1256 if (desc != NULL)
1258 if (desc->insn == next && !for_backtrack)
1260 gcc_assert (n_replace == 1);
1261 apply_replacement (dep, true);
1263 DEP_STATUS (dep) |= DEP_CANCELLED;
1266 return 0;
1269 else if (n_control == 1 && n_replace == 0 && n_spec == 0)
1271 rtx pro, other, new_pat;
1272 rtx cond = NULL_RTX;
1273 bool success;
1274 rtx prev = NULL_RTX;
1275 int i;
1276 unsigned regno;
1278 if ((current_sched_info->flags & DO_PREDICATION) == 0
1279 || (ORIG_PAT (next) != NULL_RTX
1280 && PREDICATED_PAT (next) == NULL_RTX))
1281 return HARD_DEP;
1283 pro = DEP_PRO (modify_dep);
1284 other = real_insn_for_shadow (pro);
1285 if (other != NULL_RTX)
1286 pro = other;
1288 cond = sched_get_reverse_condition_uncached (pro);
1289 regno = REGNO (XEXP (cond, 0));
1291 /* Find the last scheduled insn that modifies the condition register.
1292 We can stop looking once we find the insn we depend on through the
1293 REG_DEP_CONTROL; if the condition register isn't modified after it,
1294 we know that it still has the right value. */
1295 if (QUEUE_INDEX (pro) == QUEUE_SCHEDULED)
1296 FOR_EACH_VEC_ELT_REVERSE (scheduled_insns, i, prev)
1298 HARD_REG_SET t;
1300 find_all_hard_reg_sets (prev, &t, true);
1301 if (TEST_HARD_REG_BIT (t, regno))
1302 return HARD_DEP;
1303 if (prev == pro)
1304 break;
1306 if (ORIG_PAT (next) == NULL_RTX)
1308 ORIG_PAT (next) = PATTERN (next);
1310 new_pat = gen_rtx_COND_EXEC (VOIDmode, cond, PATTERN (next));
1311 success = haifa_change_pattern (next, new_pat);
1312 if (!success)
1313 return HARD_DEP;
1314 PREDICATED_PAT (next) = new_pat;
1316 else if (PATTERN (next) != PREDICATED_PAT (next))
1318 bool success = haifa_change_pattern (next,
1319 PREDICATED_PAT (next));
1320 gcc_assert (success);
1322 DEP_STATUS (modify_dep) |= DEP_CANCELLED;
1323 return DEP_CONTROL;
1326 if (PREDICATED_PAT (next) != NULL_RTX)
1328 int tick = INSN_TICK (next);
1329 bool success = haifa_change_pattern (next,
1330 ORIG_PAT (next));
1331 INSN_TICK (next) = tick;
1332 gcc_assert (success);
1335 /* We can't handle the case where there are both speculative and control
1336 dependencies, so we return HARD_DEP in such a case. Also fail if
1337 we have speculative dependencies with not enough points, or more than
1338 one control dependency. */
1339 if ((n_spec > 0 && (n_control > 0 || n_replace > 0))
1340 || (n_spec > 0
1341 /* Too few points? */
1342 && ds_weak (new_ds) < spec_info->data_weakness_cutoff)
1343 || n_control > 0
1344 || n_replace > 0)
1345 return HARD_DEP;
1347 return new_ds;
1350 /* Pointer to the last instruction scheduled. */
1351 static rtx last_scheduled_insn;
1353 /* Pointer to the last nondebug instruction scheduled within the
1354 block, or the prev_head of the scheduling block. Used by
1355 rank_for_schedule, so that insns independent of the last scheduled
1356 insn will be preferred over dependent instructions. */
1357 static rtx last_nondebug_scheduled_insn;
1359 /* Pointer that iterates through the list of unscheduled insns if we
1360 have a dbg_cnt enabled. It always points at an insn prior to the
1361 first unscheduled one. */
1362 static rtx nonscheduled_insns_begin;
1364 /* Compute cost of executing INSN.
1365 This is the number of cycles between instruction issue and
1366 instruction results. */
1368 insn_cost (rtx insn)
1370 int cost;
1372 if (sel_sched_p ())
1374 if (recog_memoized (insn) < 0)
1375 return 0;
1377 cost = insn_default_latency (insn);
1378 if (cost < 0)
1379 cost = 0;
1381 return cost;
1384 cost = INSN_COST (insn);
1386 if (cost < 0)
1388 /* A USE insn, or something else we don't need to
1389 understand. We can't pass these directly to
1390 result_ready_cost or insn_default_latency because it will
1391 trigger a fatal error for unrecognizable insns. */
1392 if (recog_memoized (insn) < 0)
1394 INSN_COST (insn) = 0;
1395 return 0;
1397 else
1399 cost = insn_default_latency (insn);
1400 if (cost < 0)
1401 cost = 0;
1403 INSN_COST (insn) = cost;
1407 return cost;
1410 /* Compute cost of dependence LINK.
1411 This is the number of cycles between instruction issue and
1412 instruction results.
1413 ??? We also use this function to call recog_memoized on all insns. */
1415 dep_cost_1 (dep_t link, dw_t dw)
1417 rtx insn = DEP_PRO (link);
1418 rtx used = DEP_CON (link);
1419 int cost;
1421 if (DEP_COST (link) != UNKNOWN_DEP_COST)
1422 return DEP_COST (link);
1424 if (delay_htab.is_created ())
1426 struct delay_pair *delay_entry;
1427 delay_entry
1428 = delay_htab_i2.find_with_hash (used, htab_hash_pointer (used));
1429 if (delay_entry)
1431 if (delay_entry->i1 == insn)
1433 DEP_COST (link) = pair_delay (delay_entry);
1434 return DEP_COST (link);
1439 /* A USE insn should never require the value used to be computed.
1440 This allows the computation of a function's result and parameter
1441 values to overlap the return and call. We don't care about the
1442 dependence cost when only decreasing register pressure. */
1443 if (recog_memoized (used) < 0)
1445 cost = 0;
1446 recog_memoized (insn);
1448 else
1450 enum reg_note dep_type = DEP_TYPE (link);
1452 cost = insn_cost (insn);
1454 if (INSN_CODE (insn) >= 0)
1456 if (dep_type == REG_DEP_ANTI)
1457 cost = 0;
1458 else if (dep_type == REG_DEP_OUTPUT)
1460 cost = (insn_default_latency (insn)
1461 - insn_default_latency (used));
1462 if (cost <= 0)
1463 cost = 1;
1465 else if (bypass_p (insn))
1466 cost = insn_latency (insn, used);
1470 if (targetm.sched.adjust_cost_2)
1471 cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost,
1472 dw);
1473 else if (targetm.sched.adjust_cost != NULL)
1475 /* This variable is used for backward compatibility with the
1476 targets. */
1477 rtx dep_cost_rtx_link = alloc_INSN_LIST (NULL_RTX, NULL_RTX);
1479 /* Make it self-cycled, so that if some tries to walk over this
1480 incomplete list he/she will be caught in an endless loop. */
1481 XEXP (dep_cost_rtx_link, 1) = dep_cost_rtx_link;
1483 /* Targets use only REG_NOTE_KIND of the link. */
1484 PUT_REG_NOTE_KIND (dep_cost_rtx_link, DEP_TYPE (link));
1486 cost = targetm.sched.adjust_cost (used, dep_cost_rtx_link,
1487 insn, cost);
1489 free_INSN_LIST_node (dep_cost_rtx_link);
1492 if (cost < 0)
1493 cost = 0;
1496 DEP_COST (link) = cost;
1497 return cost;
1500 /* Compute cost of dependence LINK.
1501 This is the number of cycles between instruction issue and
1502 instruction results. */
1504 dep_cost (dep_t link)
1506 return dep_cost_1 (link, 0);
1509 /* Use this sel-sched.c friendly function in reorder2 instead of increasing
1510 INSN_PRIORITY explicitly. */
1511 void
1512 increase_insn_priority (rtx insn, int amount)
1514 if (!sel_sched_p ())
1516 /* We're dealing with haifa-sched.c INSN_PRIORITY. */
1517 if (INSN_PRIORITY_KNOWN (insn))
1518 INSN_PRIORITY (insn) += amount;
1520 else
1522 /* In sel-sched.c INSN_PRIORITY is not kept up to date.
1523 Use EXPR_PRIORITY instead. */
1524 sel_add_to_insn_priority (insn, amount);
1528 /* Return 'true' if DEP should be included in priority calculations. */
1529 static bool
1530 contributes_to_priority_p (dep_t dep)
1532 if (DEBUG_INSN_P (DEP_CON (dep))
1533 || DEBUG_INSN_P (DEP_PRO (dep)))
1534 return false;
1536 /* Critical path is meaningful in block boundaries only. */
1537 if (!current_sched_info->contributes_to_priority (DEP_CON (dep),
1538 DEP_PRO (dep)))
1539 return false;
1541 if (DEP_REPLACE (dep) != NULL)
1542 return false;
1544 /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
1545 then speculative instructions will less likely be
1546 scheduled. That is because the priority of
1547 their producers will increase, and, thus, the
1548 producers will more likely be scheduled, thus,
1549 resolving the dependence. */
1550 if (sched_deps_info->generate_spec_deps
1551 && !(spec_info->flags & COUNT_SPEC_IN_CRITICAL_PATH)
1552 && (DEP_STATUS (dep) & SPECULATIVE))
1553 return false;
1555 return true;
1558 /* Compute the number of nondebug deps in list LIST for INSN. */
1560 static int
1561 dep_list_size (rtx insn, sd_list_types_def list)
1563 sd_iterator_def sd_it;
1564 dep_t dep;
1565 int dbgcount = 0, nodbgcount = 0;
1567 if (!MAY_HAVE_DEBUG_INSNS)
1568 return sd_lists_size (insn, list);
1570 FOR_EACH_DEP (insn, list, sd_it, dep)
1572 if (DEBUG_INSN_P (DEP_CON (dep)))
1573 dbgcount++;
1574 else if (!DEBUG_INSN_P (DEP_PRO (dep)))
1575 nodbgcount++;
1578 gcc_assert (dbgcount + nodbgcount == sd_lists_size (insn, list));
1580 return nodbgcount;
1583 /* Compute the priority number for INSN. */
1584 static int
1585 priority (rtx insn)
1587 if (! INSN_P (insn))
1588 return 0;
1590 /* We should not be interested in priority of an already scheduled insn. */
1591 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
1593 if (!INSN_PRIORITY_KNOWN (insn))
1595 int this_priority = -1;
1597 if (dep_list_size (insn, SD_LIST_FORW) == 0)
1598 /* ??? We should set INSN_PRIORITY to insn_cost when and insn has
1599 some forward deps but all of them are ignored by
1600 contributes_to_priority hook. At the moment we set priority of
1601 such insn to 0. */
1602 this_priority = insn_cost (insn);
1603 else
1605 rtx prev_first, twin;
1606 basic_block rec;
1608 /* For recovery check instructions we calculate priority slightly
1609 different than that of normal instructions. Instead of walking
1610 through INSN_FORW_DEPS (check) list, we walk through
1611 INSN_FORW_DEPS list of each instruction in the corresponding
1612 recovery block. */
1614 /* Selective scheduling does not define RECOVERY_BLOCK macro. */
1615 rec = sel_sched_p () ? NULL : RECOVERY_BLOCK (insn);
1616 if (!rec || rec == EXIT_BLOCK_PTR_FOR_FN (cfun))
1618 prev_first = PREV_INSN (insn);
1619 twin = insn;
1621 else
1623 prev_first = NEXT_INSN (BB_HEAD (rec));
1624 twin = PREV_INSN (BB_END (rec));
1629 sd_iterator_def sd_it;
1630 dep_t dep;
1632 FOR_EACH_DEP (twin, SD_LIST_FORW, sd_it, dep)
1634 rtx next;
1635 int next_priority;
1637 next = DEP_CON (dep);
1639 if (BLOCK_FOR_INSN (next) != rec)
1641 int cost;
1643 if (!contributes_to_priority_p (dep))
1644 continue;
1646 if (twin == insn)
1647 cost = dep_cost (dep);
1648 else
1650 struct _dep _dep1, *dep1 = &_dep1;
1652 init_dep (dep1, insn, next, REG_DEP_ANTI);
1654 cost = dep_cost (dep1);
1657 next_priority = cost + priority (next);
1659 if (next_priority > this_priority)
1660 this_priority = next_priority;
1664 twin = PREV_INSN (twin);
1666 while (twin != prev_first);
1669 if (this_priority < 0)
1671 gcc_assert (this_priority == -1);
1673 this_priority = insn_cost (insn);
1676 INSN_PRIORITY (insn) = this_priority;
1677 INSN_PRIORITY_STATUS (insn) = 1;
1680 return INSN_PRIORITY (insn);
1683 /* Macros and functions for keeping the priority queue sorted, and
1684 dealing with queuing and dequeuing of instructions. */
1686 #define SCHED_SORT(READY, N_READY) \
1687 do { if ((N_READY) == 2) \
1688 swap_sort (READY, N_READY); \
1689 else if ((N_READY) > 2) \
1690 qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \
1691 while (0)
1693 /* For each pressure class CL, set DEATH[CL] to the number of registers
1694 in that class that die in INSN. */
1696 static void
1697 calculate_reg_deaths (rtx insn, int *death)
1699 int i;
1700 struct reg_use_data *use;
1702 for (i = 0; i < ira_pressure_classes_num; i++)
1703 death[ira_pressure_classes[i]] = 0;
1704 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1705 if (dying_use_p (use))
1706 mark_regno_birth_or_death (0, death, use->regno, true);
1709 /* Setup info about the current register pressure impact of scheduling
1710 INSN at the current scheduling point. */
1711 static void
1712 setup_insn_reg_pressure_info (rtx insn)
1714 int i, change, before, after, hard_regno;
1715 int excess_cost_change;
1716 enum machine_mode mode;
1717 enum reg_class cl;
1718 struct reg_pressure_data *pressure_info;
1719 int *max_reg_pressure;
1720 static int death[N_REG_CLASSES];
1722 gcc_checking_assert (!DEBUG_INSN_P (insn));
1724 excess_cost_change = 0;
1725 calculate_reg_deaths (insn, death);
1726 pressure_info = INSN_REG_PRESSURE (insn);
1727 max_reg_pressure = INSN_MAX_REG_PRESSURE (insn);
1728 gcc_assert (pressure_info != NULL && max_reg_pressure != NULL);
1729 for (i = 0; i < ira_pressure_classes_num; i++)
1731 cl = ira_pressure_classes[i];
1732 gcc_assert (curr_reg_pressure[cl] >= 0);
1733 change = (int) pressure_info[i].set_increase - death[cl];
1734 before = MAX (0, max_reg_pressure[i] - ira_class_hard_regs_num[cl]);
1735 after = MAX (0, max_reg_pressure[i] + change
1736 - ira_class_hard_regs_num[cl]);
1737 hard_regno = ira_class_hard_regs[cl][0];
1738 gcc_assert (hard_regno >= 0);
1739 mode = reg_raw_mode[hard_regno];
1740 excess_cost_change += ((after - before)
1741 * (ira_memory_move_cost[mode][cl][0]
1742 + ira_memory_move_cost[mode][cl][1]));
1744 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insn) = excess_cost_change;
1747 /* This is the first page of code related to SCHED_PRESSURE_MODEL.
1748 It tries to make the scheduler take register pressure into account
1749 without introducing too many unnecessary stalls. It hooks into the
1750 main scheduling algorithm at several points:
1752 - Before scheduling starts, model_start_schedule constructs a
1753 "model schedule" for the current block. This model schedule is
1754 chosen solely to keep register pressure down. It does not take the
1755 target's pipeline or the original instruction order into account,
1756 except as a tie-breaker. It also doesn't work to a particular
1757 pressure limit.
1759 This model schedule gives us an idea of what pressure can be
1760 achieved for the block and gives us an example of a schedule that
1761 keeps to that pressure. It also makes the final schedule less
1762 dependent on the original instruction order. This is important
1763 because the original order can either be "wide" (many values live
1764 at once, such as in user-scheduled code) or "narrow" (few values
1765 live at once, such as after loop unrolling, where several
1766 iterations are executed sequentially).
1768 We do not apply this model schedule to the rtx stream. We simply
1769 record it in model_schedule. We also compute the maximum pressure,
1770 MP, that was seen during this schedule.
1772 - Instructions are added to the ready queue even if they require
1773 a stall. The length of the stall is instead computed as:
1775 MAX (INSN_TICK (INSN) - clock_var, 0)
1777 (= insn_delay). This allows rank_for_schedule to choose between
1778 introducing a deliberate stall or increasing pressure.
1780 - Before sorting the ready queue, model_set_excess_costs assigns
1781 a pressure-based cost to each ready instruction in the queue.
1782 This is the instruction's INSN_REG_PRESSURE_EXCESS_COST_CHANGE
1783 (ECC for short) and is effectively measured in cycles.
1785 - rank_for_schedule ranks instructions based on:
1787 ECC (insn) + insn_delay (insn)
1789 then as:
1791 insn_delay (insn)
1793 So, for example, an instruction X1 with an ECC of 1 that can issue
1794 now will win over an instruction X0 with an ECC of zero that would
1795 introduce a stall of one cycle. However, an instruction X2 with an
1796 ECC of 2 that can issue now will lose to both X0 and X1.
1798 - When an instruction is scheduled, model_recompute updates the model
1799 schedule with the new pressures (some of which might now exceed the
1800 original maximum pressure MP). model_update_limit_points then searches
1801 for the new point of maximum pressure, if not already known. */
1803 /* Used to separate high-verbosity debug information for SCHED_PRESSURE_MODEL
1804 from surrounding debug information. */
1805 #define MODEL_BAR \
1806 ";;\t\t+------------------------------------------------------\n"
1808 /* Information about the pressure on a particular register class at a
1809 particular point of the model schedule. */
1810 struct model_pressure_data {
1811 /* The pressure at this point of the model schedule, or -1 if the
1812 point is associated with an instruction that has already been
1813 scheduled. */
1814 int ref_pressure;
1816 /* The maximum pressure during or after this point of the model schedule. */
1817 int max_pressure;
1820 /* Per-instruction information that is used while building the model
1821 schedule. Here, "schedule" refers to the model schedule rather
1822 than the main schedule. */
1823 struct model_insn_info {
1824 /* The instruction itself. */
1825 rtx insn;
1827 /* If this instruction is in model_worklist, these fields link to the
1828 previous (higher-priority) and next (lower-priority) instructions
1829 in the list. */
1830 struct model_insn_info *prev;
1831 struct model_insn_info *next;
1833 /* While constructing the schedule, QUEUE_INDEX describes whether an
1834 instruction has already been added to the schedule (QUEUE_SCHEDULED),
1835 is in model_worklist (QUEUE_READY), or neither (QUEUE_NOWHERE).
1836 old_queue records the value that QUEUE_INDEX had before scheduling
1837 started, so that we can restore it once the schedule is complete. */
1838 int old_queue;
1840 /* The relative importance of an unscheduled instruction. Higher
1841 values indicate greater importance. */
1842 unsigned int model_priority;
1844 /* The length of the longest path of satisfied true dependencies
1845 that leads to this instruction. */
1846 unsigned int depth;
1848 /* The length of the longest path of dependencies of any kind
1849 that leads from this instruction. */
1850 unsigned int alap;
1852 /* The number of predecessor nodes that must still be scheduled. */
1853 int unscheduled_preds;
1856 /* Information about the pressure limit for a particular register class.
1857 This structure is used when applying a model schedule to the main
1858 schedule. */
1859 struct model_pressure_limit {
1860 /* The maximum register pressure seen in the original model schedule. */
1861 int orig_pressure;
1863 /* The maximum register pressure seen in the current model schedule
1864 (which excludes instructions that have already been scheduled). */
1865 int pressure;
1867 /* The point of the current model schedule at which PRESSURE is first
1868 reached. It is set to -1 if the value needs to be recomputed. */
1869 int point;
1872 /* Describes a particular way of measuring register pressure. */
1873 struct model_pressure_group {
1874 /* Index PCI describes the maximum pressure on ira_pressure_classes[PCI]. */
1875 struct model_pressure_limit limits[N_REG_CLASSES];
1877 /* Index (POINT * ira_num_pressure_classes + PCI) describes the pressure
1878 on register class ira_pressure_classes[PCI] at point POINT of the
1879 current model schedule. A POINT of model_num_insns describes the
1880 pressure at the end of the schedule. */
1881 struct model_pressure_data *model;
1884 /* Index POINT gives the instruction at point POINT of the model schedule.
1885 This array doesn't change during main scheduling. */
1886 static vec<rtx> model_schedule;
1888 /* The list of instructions in the model worklist, sorted in order of
1889 decreasing priority. */
1890 static struct model_insn_info *model_worklist;
1892 /* Index I describes the instruction with INSN_LUID I. */
1893 static struct model_insn_info *model_insns;
1895 /* The number of instructions in the model schedule. */
1896 static int model_num_insns;
1898 /* The index of the first instruction in model_schedule that hasn't yet been
1899 added to the main schedule, or model_num_insns if all of them have. */
1900 static int model_curr_point;
1902 /* Describes the pressure before each instruction in the model schedule. */
1903 static struct model_pressure_group model_before_pressure;
1905 /* The first unused model_priority value (as used in model_insn_info). */
1906 static unsigned int model_next_priority;
1909 /* The model_pressure_data for ira_pressure_classes[PCI] in GROUP
1910 at point POINT of the model schedule. */
1911 #define MODEL_PRESSURE_DATA(GROUP, POINT, PCI) \
1912 (&(GROUP)->model[(POINT) * ira_pressure_classes_num + (PCI)])
1914 /* The maximum pressure on ira_pressure_classes[PCI] in GROUP at or
1915 after point POINT of the model schedule. */
1916 #define MODEL_MAX_PRESSURE(GROUP, POINT, PCI) \
1917 (MODEL_PRESSURE_DATA (GROUP, POINT, PCI)->max_pressure)
1919 /* The pressure on ira_pressure_classes[PCI] in GROUP at point POINT
1920 of the model schedule. */
1921 #define MODEL_REF_PRESSURE(GROUP, POINT, PCI) \
1922 (MODEL_PRESSURE_DATA (GROUP, POINT, PCI)->ref_pressure)
1924 /* Information about INSN that is used when creating the model schedule. */
1925 #define MODEL_INSN_INFO(INSN) \
1926 (&model_insns[INSN_LUID (INSN)])
1928 /* The instruction at point POINT of the model schedule. */
1929 #define MODEL_INSN(POINT) \
1930 (model_schedule[POINT])
1933 /* Return INSN's index in the model schedule, or model_num_insns if it
1934 doesn't belong to that schedule. */
1936 static int
1937 model_index (rtx insn)
1939 if (INSN_MODEL_INDEX (insn) == 0)
1940 return model_num_insns;
1941 return INSN_MODEL_INDEX (insn) - 1;
1944 /* Make sure that GROUP->limits is up-to-date for the current point
1945 of the model schedule. */
1947 static void
1948 model_update_limit_points_in_group (struct model_pressure_group *group)
1950 int pci, max_pressure, point;
1952 for (pci = 0; pci < ira_pressure_classes_num; pci++)
1954 /* We may have passed the final point at which the pressure in
1955 group->limits[pci].pressure was reached. Update the limit if so. */
1956 max_pressure = MODEL_MAX_PRESSURE (group, model_curr_point, pci);
1957 group->limits[pci].pressure = max_pressure;
1959 /* Find the point at which MAX_PRESSURE is first reached. We need
1960 to search in three cases:
1962 - We've already moved past the previous pressure point.
1963 In this case we search forward from model_curr_point.
1965 - We scheduled the previous point of maximum pressure ahead of
1966 its position in the model schedule, but doing so didn't bring
1967 the pressure point earlier. In this case we search forward
1968 from that previous pressure point.
1970 - Scheduling an instruction early caused the maximum pressure
1971 to decrease. In this case we will have set the pressure
1972 point to -1, and we search forward from model_curr_point. */
1973 point = MAX (group->limits[pci].point, model_curr_point);
1974 while (point < model_num_insns
1975 && MODEL_REF_PRESSURE (group, point, pci) < max_pressure)
1976 point++;
1977 group->limits[pci].point = point;
1979 gcc_assert (MODEL_REF_PRESSURE (group, point, pci) == max_pressure);
1980 gcc_assert (MODEL_MAX_PRESSURE (group, point, pci) == max_pressure);
1984 /* Make sure that all register-pressure limits are up-to-date for the
1985 current position in the model schedule. */
1987 static void
1988 model_update_limit_points (void)
1990 model_update_limit_points_in_group (&model_before_pressure);
1993 /* Return the model_index of the last unscheduled use in chain USE
1994 outside of USE's instruction. Return -1 if there are no other uses,
1995 or model_num_insns if the register is live at the end of the block. */
1997 static int
1998 model_last_use_except (struct reg_use_data *use)
2000 struct reg_use_data *next;
2001 int last, index;
2003 last = -1;
2004 for (next = use->next_regno_use; next != use; next = next->next_regno_use)
2005 if (NONDEBUG_INSN_P (next->insn)
2006 && QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED)
2008 index = model_index (next->insn);
2009 if (index == model_num_insns)
2010 return model_num_insns;
2011 if (last < index)
2012 last = index;
2014 return last;
2017 /* An instruction with model_index POINT has just been scheduled, and it
2018 adds DELTA to the pressure on ira_pressure_classes[PCI] after POINT - 1.
2019 Update MODEL_REF_PRESSURE (GROUP, POINT, PCI) and
2020 MODEL_MAX_PRESSURE (GROUP, POINT, PCI) accordingly. */
2022 static void
2023 model_start_update_pressure (struct model_pressure_group *group,
2024 int point, int pci, int delta)
2026 int next_max_pressure;
2028 if (point == model_num_insns)
2030 /* The instruction wasn't part of the model schedule; it was moved
2031 from a different block. Update the pressure for the end of
2032 the model schedule. */
2033 MODEL_REF_PRESSURE (group, point, pci) += delta;
2034 MODEL_MAX_PRESSURE (group, point, pci) += delta;
2036 else
2038 /* Record that this instruction has been scheduled. Nothing now
2039 changes between POINT and POINT + 1, so get the maximum pressure
2040 from the latter. If the maximum pressure decreases, the new
2041 pressure point may be before POINT. */
2042 MODEL_REF_PRESSURE (group, point, pci) = -1;
2043 next_max_pressure = MODEL_MAX_PRESSURE (group, point + 1, pci);
2044 if (MODEL_MAX_PRESSURE (group, point, pci) > next_max_pressure)
2046 MODEL_MAX_PRESSURE (group, point, pci) = next_max_pressure;
2047 if (group->limits[pci].point == point)
2048 group->limits[pci].point = -1;
2053 /* Record that scheduling a later instruction has changed the pressure
2054 at point POINT of the model schedule by DELTA (which might be 0).
2055 Update GROUP accordingly. Return nonzero if these changes might
2056 trigger changes to previous points as well. */
2058 static int
2059 model_update_pressure (struct model_pressure_group *group,
2060 int point, int pci, int delta)
2062 int ref_pressure, max_pressure, next_max_pressure;
2064 /* If POINT hasn't yet been scheduled, update its pressure. */
2065 ref_pressure = MODEL_REF_PRESSURE (group, point, pci);
2066 if (ref_pressure >= 0 && delta != 0)
2068 ref_pressure += delta;
2069 MODEL_REF_PRESSURE (group, point, pci) = ref_pressure;
2071 /* Check whether the maximum pressure in the overall schedule
2072 has increased. (This means that the MODEL_MAX_PRESSURE of
2073 every point <= POINT will need to increae too; see below.) */
2074 if (group->limits[pci].pressure < ref_pressure)
2075 group->limits[pci].pressure = ref_pressure;
2077 /* If we are at maximum pressure, and the maximum pressure
2078 point was previously unknown or later than POINT,
2079 bring it forward. */
2080 if (group->limits[pci].pressure == ref_pressure
2081 && !IN_RANGE (group->limits[pci].point, 0, point))
2082 group->limits[pci].point = point;
2084 /* If POINT used to be the point of maximum pressure, but isn't
2085 any longer, we need to recalculate it using a forward walk. */
2086 if (group->limits[pci].pressure > ref_pressure
2087 && group->limits[pci].point == point)
2088 group->limits[pci].point = -1;
2091 /* Update the maximum pressure at POINT. Changes here might also
2092 affect the maximum pressure at POINT - 1. */
2093 next_max_pressure = MODEL_MAX_PRESSURE (group, point + 1, pci);
2094 max_pressure = MAX (ref_pressure, next_max_pressure);
2095 if (MODEL_MAX_PRESSURE (group, point, pci) != max_pressure)
2097 MODEL_MAX_PRESSURE (group, point, pci) = max_pressure;
2098 return 1;
2100 return 0;
2103 /* INSN has just been scheduled. Update the model schedule accordingly. */
2105 static void
2106 model_recompute (rtx insn)
2108 struct {
2109 int last_use;
2110 int regno;
2111 } uses[FIRST_PSEUDO_REGISTER + MAX_RECOG_OPERANDS];
2112 struct reg_use_data *use;
2113 struct reg_pressure_data *reg_pressure;
2114 int delta[N_REG_CLASSES];
2115 int pci, point, mix, new_last, cl, ref_pressure, queue;
2116 unsigned int i, num_uses, num_pending_births;
2117 bool print_p;
2119 /* The destinations of INSN were previously live from POINT onwards, but are
2120 now live from model_curr_point onwards. Set up DELTA accordingly. */
2121 point = model_index (insn);
2122 reg_pressure = INSN_REG_PRESSURE (insn);
2123 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2125 cl = ira_pressure_classes[pci];
2126 delta[cl] = reg_pressure[pci].set_increase;
2129 /* Record which registers previously died at POINT, but which now die
2130 before POINT. Adjust DELTA so that it represents the effect of
2131 this change after POINT - 1. Set NUM_PENDING_BIRTHS to the number of
2132 registers that will be born in the range [model_curr_point, POINT). */
2133 num_uses = 0;
2134 num_pending_births = 0;
2135 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
2137 new_last = model_last_use_except (use);
2138 if (new_last < point)
2140 gcc_assert (num_uses < ARRAY_SIZE (uses));
2141 uses[num_uses].last_use = new_last;
2142 uses[num_uses].regno = use->regno;
2143 /* This register is no longer live after POINT - 1. */
2144 mark_regno_birth_or_death (NULL, delta, use->regno, false);
2145 num_uses++;
2146 if (new_last >= 0)
2147 num_pending_births++;
2151 /* Update the MODEL_REF_PRESSURE and MODEL_MAX_PRESSURE for POINT.
2152 Also set each group pressure limit for POINT. */
2153 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2155 cl = ira_pressure_classes[pci];
2156 model_start_update_pressure (&model_before_pressure,
2157 point, pci, delta[cl]);
2160 /* Walk the model schedule backwards, starting immediately before POINT. */
2161 print_p = false;
2162 if (point != model_curr_point)
2165 point--;
2166 insn = MODEL_INSN (point);
2167 queue = QUEUE_INDEX (insn);
2169 if (queue != QUEUE_SCHEDULED)
2171 /* DELTA describes the effect of the move on the register pressure
2172 after POINT. Make it describe the effect on the pressure
2173 before POINT. */
2174 i = 0;
2175 while (i < num_uses)
2177 if (uses[i].last_use == point)
2179 /* This register is now live again. */
2180 mark_regno_birth_or_death (NULL, delta,
2181 uses[i].regno, true);
2183 /* Remove this use from the array. */
2184 uses[i] = uses[num_uses - 1];
2185 num_uses--;
2186 num_pending_births--;
2188 else
2189 i++;
2192 if (sched_verbose >= 5)
2194 if (!print_p)
2196 fprintf (sched_dump, MODEL_BAR);
2197 fprintf (sched_dump, ";;\t\t| New pressure for model"
2198 " schedule\n");
2199 fprintf (sched_dump, MODEL_BAR);
2200 print_p = true;
2203 fprintf (sched_dump, ";;\t\t| %3d %4d %-30s ",
2204 point, INSN_UID (insn),
2205 str_pattern_slim (PATTERN (insn)));
2206 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2208 cl = ira_pressure_classes[pci];
2209 ref_pressure = MODEL_REF_PRESSURE (&model_before_pressure,
2210 point, pci);
2211 fprintf (sched_dump, " %s:[%d->%d]",
2212 reg_class_names[ira_pressure_classes[pci]],
2213 ref_pressure, ref_pressure + delta[cl]);
2215 fprintf (sched_dump, "\n");
2219 /* Adjust the pressure at POINT. Set MIX to nonzero if POINT - 1
2220 might have changed as well. */
2221 mix = num_pending_births;
2222 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2224 cl = ira_pressure_classes[pci];
2225 mix |= delta[cl];
2226 mix |= model_update_pressure (&model_before_pressure,
2227 point, pci, delta[cl]);
2230 while (mix && point > model_curr_point);
2232 if (print_p)
2233 fprintf (sched_dump, MODEL_BAR);
2236 /* After DEP, which was cancelled, has been resolved for insn NEXT,
2237 check whether the insn's pattern needs restoring. */
2238 static bool
2239 must_restore_pattern_p (rtx next, dep_t dep)
2241 if (QUEUE_INDEX (next) == QUEUE_SCHEDULED)
2242 return false;
2244 if (DEP_TYPE (dep) == REG_DEP_CONTROL)
2246 gcc_assert (ORIG_PAT (next) != NULL_RTX);
2247 gcc_assert (next == DEP_CON (dep));
2249 else
2251 struct dep_replacement *desc = DEP_REPLACE (dep);
2252 if (desc->insn != next)
2254 gcc_assert (*desc->loc == desc->orig);
2255 return false;
2258 return true;
2261 /* model_spill_cost (CL, P, P') returns the cost of increasing the
2262 pressure on CL from P to P'. We use this to calculate a "base ECC",
2263 baseECC (CL, X), for each pressure class CL and each instruction X.
2264 Supposing X changes the pressure on CL from P to P', and that the
2265 maximum pressure on CL in the current model schedule is MP', then:
2267 * if X occurs before or at the next point of maximum pressure in
2268 the model schedule and P' > MP', then:
2270 baseECC (CL, X) = model_spill_cost (CL, MP, P')
2272 The idea is that the pressure after scheduling a fixed set of
2273 instructions -- in this case, the set up to and including the
2274 next maximum pressure point -- is going to be the same regardless
2275 of the order; we simply want to keep the intermediate pressure
2276 under control. Thus X has a cost of zero unless scheduling it
2277 now would exceed MP'.
2279 If all increases in the set are by the same amount, no zero-cost
2280 instruction will ever cause the pressure to exceed MP'. However,
2281 if X is instead moved past an instruction X' with pressure in the
2282 range (MP' - (P' - P), MP'), the pressure at X' will increase
2283 beyond MP'. Since baseECC is very much a heuristic anyway,
2284 it doesn't seem worth the overhead of tracking cases like these.
2286 The cost of exceeding MP' is always based on the original maximum
2287 pressure MP. This is so that going 2 registers over the original
2288 limit has the same cost regardless of whether it comes from two
2289 separate +1 deltas or from a single +2 delta.
2291 * if X occurs after the next point of maximum pressure in the model
2292 schedule and P' > P, then:
2294 baseECC (CL, X) = model_spill_cost (CL, MP, MP' + (P' - P))
2296 That is, if we move X forward across a point of maximum pressure,
2297 and if X increases the pressure by P' - P, then we conservatively
2298 assume that scheduling X next would increase the maximum pressure
2299 by P' - P. Again, the cost of doing this is based on the original
2300 maximum pressure MP, for the same reason as above.
2302 * if P' < P, P > MP, and X occurs at or after the next point of
2303 maximum pressure, then:
2305 baseECC (CL, X) = -model_spill_cost (CL, MAX (MP, P'), P)
2307 That is, if we have already exceeded the original maximum pressure MP,
2308 and if X might reduce the maximum pressure again -- or at least push
2309 it further back, and thus allow more scheduling freedom -- it is given
2310 a negative cost to reflect the improvement.
2312 * otherwise,
2314 baseECC (CL, X) = 0
2316 In this case, X is not expected to affect the maximum pressure MP',
2317 so it has zero cost.
2319 We then create a combined value baseECC (X) that is the sum of
2320 baseECC (CL, X) for each pressure class CL.
2322 baseECC (X) could itself be used as the ECC value described above.
2323 However, this is often too conservative, in the sense that it
2324 tends to make high-priority instructions that increase pressure
2325 wait too long in cases where introducing a spill would be better.
2326 For this reason the final ECC is a priority-adjusted form of
2327 baseECC (X). Specifically, we calculate:
2329 P (X) = INSN_PRIORITY (X) - insn_delay (X) - baseECC (X)
2330 baseP = MAX { P (X) | baseECC (X) <= 0 }
2332 Then:
2334 ECC (X) = MAX (MIN (baseP - P (X), baseECC (X)), 0)
2336 Thus an instruction's effect on pressure is ignored if it has a high
2337 enough priority relative to the ones that don't increase pressure.
2338 Negative values of baseECC (X) do not increase the priority of X
2339 itself, but they do make it harder for other instructions to
2340 increase the pressure further.
2342 This pressure cost is deliberately timid. The intention has been
2343 to choose a heuristic that rarely interferes with the normal list
2344 scheduler in cases where that scheduler would produce good code.
2345 We simply want to curb some of its worst excesses. */
2347 /* Return the cost of increasing the pressure in class CL from FROM to TO.
2349 Here we use the very simplistic cost model that every register above
2350 ira_class_hard_regs_num[CL] has a spill cost of 1. We could use other
2351 measures instead, such as one based on MEMORY_MOVE_COST. However:
2353 (1) In order for an instruction to be scheduled, the higher cost
2354 would need to be justified in a single saving of that many stalls.
2355 This is overly pessimistic, because the benefit of spilling is
2356 often to avoid a sequence of several short stalls rather than
2357 a single long one.
2359 (2) The cost is still arbitrary. Because we are not allocating
2360 registers during scheduling, we have no way of knowing for
2361 sure how many memory accesses will be required by each spill,
2362 where the spills will be placed within the block, or even
2363 which block(s) will contain the spills.
2365 So a higher cost than 1 is often too conservative in practice,
2366 forcing blocks to contain unnecessary stalls instead of spill code.
2367 The simple cost below seems to be the best compromise. It reduces
2368 the interference with the normal list scheduler, which helps make
2369 it more suitable for a default-on option. */
2371 static int
2372 model_spill_cost (int cl, int from, int to)
2374 from = MAX (from, ira_class_hard_regs_num[cl]);
2375 return MAX (to, from) - from;
2378 /* Return baseECC (ira_pressure_classes[PCI], POINT), given that
2379 P = curr_reg_pressure[ira_pressure_classes[PCI]] and that
2380 P' = P + DELTA. */
2382 static int
2383 model_excess_group_cost (struct model_pressure_group *group,
2384 int point, int pci, int delta)
2386 int pressure, cl;
2388 cl = ira_pressure_classes[pci];
2389 if (delta < 0 && point >= group->limits[pci].point)
2391 pressure = MAX (group->limits[pci].orig_pressure,
2392 curr_reg_pressure[cl] + delta);
2393 return -model_spill_cost (cl, pressure, curr_reg_pressure[cl]);
2396 if (delta > 0)
2398 if (point > group->limits[pci].point)
2399 pressure = group->limits[pci].pressure + delta;
2400 else
2401 pressure = curr_reg_pressure[cl] + delta;
2403 if (pressure > group->limits[pci].pressure)
2404 return model_spill_cost (cl, group->limits[pci].orig_pressure,
2405 pressure);
2408 return 0;
2411 /* Return baseECC (MODEL_INSN (INSN)). Dump the costs to sched_dump
2412 if PRINT_P. */
2414 static int
2415 model_excess_cost (rtx insn, bool print_p)
2417 int point, pci, cl, cost, this_cost, delta;
2418 struct reg_pressure_data *insn_reg_pressure;
2419 int insn_death[N_REG_CLASSES];
2421 calculate_reg_deaths (insn, insn_death);
2422 point = model_index (insn);
2423 insn_reg_pressure = INSN_REG_PRESSURE (insn);
2424 cost = 0;
2426 if (print_p)
2427 fprintf (sched_dump, ";;\t\t| %3d %4d | %4d %+3d |", point,
2428 INSN_UID (insn), INSN_PRIORITY (insn), insn_delay (insn));
2430 /* Sum up the individual costs for each register class. */
2431 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2433 cl = ira_pressure_classes[pci];
2434 delta = insn_reg_pressure[pci].set_increase - insn_death[cl];
2435 this_cost = model_excess_group_cost (&model_before_pressure,
2436 point, pci, delta);
2437 cost += this_cost;
2438 if (print_p)
2439 fprintf (sched_dump, " %s:[%d base cost %d]",
2440 reg_class_names[cl], delta, this_cost);
2443 if (print_p)
2444 fprintf (sched_dump, "\n");
2446 return cost;
2449 /* Dump the next points of maximum pressure for GROUP. */
2451 static void
2452 model_dump_pressure_points (struct model_pressure_group *group)
2454 int pci, cl;
2456 fprintf (sched_dump, ";;\t\t| pressure points");
2457 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2459 cl = ira_pressure_classes[pci];
2460 fprintf (sched_dump, " %s:[%d->%d at ", reg_class_names[cl],
2461 curr_reg_pressure[cl], group->limits[pci].pressure);
2462 if (group->limits[pci].point < model_num_insns)
2463 fprintf (sched_dump, "%d:%d]", group->limits[pci].point,
2464 INSN_UID (MODEL_INSN (group->limits[pci].point)));
2465 else
2466 fprintf (sched_dump, "end]");
2468 fprintf (sched_dump, "\n");
2471 /* Set INSN_REG_PRESSURE_EXCESS_COST_CHANGE for INSNS[0...COUNT-1]. */
2473 static void
2474 model_set_excess_costs (rtx *insns, int count)
2476 int i, cost, priority_base, priority;
2477 bool print_p;
2479 /* Record the baseECC value for each instruction in the model schedule,
2480 except that negative costs are converted to zero ones now rather thatn
2481 later. Do not assign a cost to debug instructions, since they must
2482 not change code-generation decisions. Experiments suggest we also
2483 get better results by not assigning a cost to instructions from
2484 a different block.
2486 Set PRIORITY_BASE to baseP in the block comment above. This is the
2487 maximum priority of the "cheap" instructions, which should always
2488 include the next model instruction. */
2489 priority_base = 0;
2490 print_p = false;
2491 for (i = 0; i < count; i++)
2492 if (INSN_MODEL_INDEX (insns[i]))
2494 if (sched_verbose >= 6 && !print_p)
2496 fprintf (sched_dump, MODEL_BAR);
2497 fprintf (sched_dump, ";;\t\t| Pressure costs for ready queue\n");
2498 model_dump_pressure_points (&model_before_pressure);
2499 fprintf (sched_dump, MODEL_BAR);
2500 print_p = true;
2502 cost = model_excess_cost (insns[i], print_p);
2503 if (cost <= 0)
2505 priority = INSN_PRIORITY (insns[i]) - insn_delay (insns[i]) - cost;
2506 priority_base = MAX (priority_base, priority);
2507 cost = 0;
2509 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i]) = cost;
2511 if (print_p)
2512 fprintf (sched_dump, MODEL_BAR);
2514 /* Use MAX (baseECC, 0) and baseP to calculcate ECC for each
2515 instruction. */
2516 for (i = 0; i < count; i++)
2518 cost = INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i]);
2519 priority = INSN_PRIORITY (insns[i]) - insn_delay (insns[i]);
2520 if (cost > 0 && priority > priority_base)
2522 cost += priority_base - priority;
2523 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i]) = MAX (cost, 0);
2528 /* Returns a positive value if x is preferred; returns a negative value if
2529 y is preferred. Should never return 0, since that will make the sort
2530 unstable. */
2532 static int
2533 rank_for_schedule (const void *x, const void *y)
2535 rtx tmp = *(const rtx *) y;
2536 rtx tmp2 = *(const rtx *) x;
2537 int tmp_class, tmp2_class;
2538 int val, priority_val, info_val, diff;
2540 if (MAY_HAVE_DEBUG_INSNS)
2542 /* Schedule debug insns as early as possible. */
2543 if (DEBUG_INSN_P (tmp) && !DEBUG_INSN_P (tmp2))
2544 return -1;
2545 else if (!DEBUG_INSN_P (tmp) && DEBUG_INSN_P (tmp2))
2546 return 1;
2547 else if (DEBUG_INSN_P (tmp) && DEBUG_INSN_P (tmp2))
2548 return INSN_LUID (tmp) - INSN_LUID (tmp2);
2551 if (live_range_shrinkage_p)
2553 /* Don't use SCHED_PRESSURE_MODEL -- it results in much worse
2554 code. */
2555 gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED);
2556 if ((INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp) < 0
2557 || INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2) < 0)
2558 && (diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp)
2559 - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2))) != 0)
2560 return diff;
2561 /* Sort by INSN_LUID (original insn order), so that we make the
2562 sort stable. This minimizes instruction movement, thus
2563 minimizing sched's effect on debugging and cross-jumping. */
2564 return INSN_LUID (tmp) - INSN_LUID (tmp2);
2567 /* The insn in a schedule group should be issued the first. */
2568 if (flag_sched_group_heuristic &&
2569 SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
2570 return SCHED_GROUP_P (tmp2) ? 1 : -1;
2572 /* Make sure that priority of TMP and TMP2 are initialized. */
2573 gcc_assert (INSN_PRIORITY_KNOWN (tmp) && INSN_PRIORITY_KNOWN (tmp2));
2575 if (sched_pressure != SCHED_PRESSURE_NONE)
2577 /* Prefer insn whose scheduling results in the smallest register
2578 pressure excess. */
2579 if ((diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp)
2580 + insn_delay (tmp)
2581 - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2)
2582 - insn_delay (tmp2))))
2583 return diff;
2586 if (sched_pressure != SCHED_PRESSURE_NONE
2587 && (INSN_TICK (tmp2) > clock_var || INSN_TICK (tmp) > clock_var))
2589 if (INSN_TICK (tmp) <= clock_var)
2590 return -1;
2591 else if (INSN_TICK (tmp2) <= clock_var)
2592 return 1;
2593 else
2594 return INSN_TICK (tmp) - INSN_TICK (tmp2);
2597 /* If we are doing backtracking in this schedule, prefer insns that
2598 have forward dependencies with negative cost against an insn that
2599 was already scheduled. */
2600 if (current_sched_info->flags & DO_BACKTRACKING)
2602 priority_val = FEEDS_BACKTRACK_INSN (tmp2) - FEEDS_BACKTRACK_INSN (tmp);
2603 if (priority_val)
2604 return priority_val;
2607 /* Prefer insn with higher priority. */
2608 priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
2610 if (flag_sched_critical_path_heuristic && priority_val)
2611 return priority_val;
2613 /* Prefer speculative insn with greater dependencies weakness. */
2614 if (flag_sched_spec_insn_heuristic && spec_info)
2616 ds_t ds1, ds2;
2617 dw_t dw1, dw2;
2618 int dw;
2620 ds1 = TODO_SPEC (tmp) & SPECULATIVE;
2621 if (ds1)
2622 dw1 = ds_weak (ds1);
2623 else
2624 dw1 = NO_DEP_WEAK;
2626 ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
2627 if (ds2)
2628 dw2 = ds_weak (ds2);
2629 else
2630 dw2 = NO_DEP_WEAK;
2632 dw = dw2 - dw1;
2633 if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
2634 return dw;
2637 info_val = (*current_sched_info->rank) (tmp, tmp2);
2638 if (flag_sched_rank_heuristic && info_val)
2639 return info_val;
2641 /* Compare insns based on their relation to the last scheduled
2642 non-debug insn. */
2643 if (flag_sched_last_insn_heuristic && last_nondebug_scheduled_insn)
2645 dep_t dep1;
2646 dep_t dep2;
2647 rtx last = last_nondebug_scheduled_insn;
2649 /* Classify the instructions into three classes:
2650 1) Data dependent on last schedule insn.
2651 2) Anti/Output dependent on last scheduled insn.
2652 3) Independent of last scheduled insn, or has latency of one.
2653 Choose the insn from the highest numbered class if different. */
2654 dep1 = sd_find_dep_between (last, tmp, true);
2656 if (dep1 == NULL || dep_cost (dep1) == 1)
2657 tmp_class = 3;
2658 else if (/* Data dependence. */
2659 DEP_TYPE (dep1) == REG_DEP_TRUE)
2660 tmp_class = 1;
2661 else
2662 tmp_class = 2;
2664 dep2 = sd_find_dep_between (last, tmp2, true);
2666 if (dep2 == NULL || dep_cost (dep2) == 1)
2667 tmp2_class = 3;
2668 else if (/* Data dependence. */
2669 DEP_TYPE (dep2) == REG_DEP_TRUE)
2670 tmp2_class = 1;
2671 else
2672 tmp2_class = 2;
2674 if ((val = tmp2_class - tmp_class))
2675 return val;
2678 /* Prefer instructions that occur earlier in the model schedule. */
2679 if (sched_pressure == SCHED_PRESSURE_MODEL)
2681 int diff;
2683 diff = model_index (tmp) - model_index (tmp2);
2684 if (diff != 0)
2685 return diff;
2688 /* Prefer the insn which has more later insns that depend on it.
2689 This gives the scheduler more freedom when scheduling later
2690 instructions at the expense of added register pressure. */
2692 val = (dep_list_size (tmp2, SD_LIST_FORW)
2693 - dep_list_size (tmp, SD_LIST_FORW));
2695 if (flag_sched_dep_count_heuristic && val != 0)
2696 return val;
2698 /* If insns are equally good, sort by INSN_LUID (original insn order),
2699 so that we make the sort stable. This minimizes instruction movement,
2700 thus minimizing sched's effect on debugging and cross-jumping. */
2701 return INSN_LUID (tmp) - INSN_LUID (tmp2);
2704 /* Resort the array A in which only element at index N may be out of order. */
2706 HAIFA_INLINE static void
2707 swap_sort (rtx *a, int n)
2709 rtx insn = a[n - 1];
2710 int i = n - 2;
2712 while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
2714 a[i + 1] = a[i];
2715 i -= 1;
2717 a[i + 1] = insn;
2720 /* Add INSN to the insn queue so that it can be executed at least
2721 N_CYCLES after the currently executing insn. Preserve insns
2722 chain for debugging purposes. REASON will be printed in debugging
2723 output. */
2725 HAIFA_INLINE static void
2726 queue_insn (rtx insn, int n_cycles, const char *reason)
2728 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
2729 rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]);
2730 int new_tick;
2732 gcc_assert (n_cycles <= max_insn_queue_index);
2733 gcc_assert (!DEBUG_INSN_P (insn));
2735 insn_queue[next_q] = link;
2736 q_size += 1;
2738 if (sched_verbose >= 2)
2740 fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
2741 (*current_sched_info->print_insn) (insn, 0));
2743 fprintf (sched_dump, "queued for %d cycles (%s).\n", n_cycles, reason);
2746 QUEUE_INDEX (insn) = next_q;
2748 if (current_sched_info->flags & DO_BACKTRACKING)
2750 new_tick = clock_var + n_cycles;
2751 if (INSN_TICK (insn) == INVALID_TICK || INSN_TICK (insn) < new_tick)
2752 INSN_TICK (insn) = new_tick;
2754 if (INSN_EXACT_TICK (insn) != INVALID_TICK
2755 && INSN_EXACT_TICK (insn) < clock_var + n_cycles)
2757 must_backtrack = true;
2758 if (sched_verbose >= 2)
2759 fprintf (sched_dump, ";;\t\tcausing a backtrack.\n");
2764 /* Remove INSN from queue. */
2765 static void
2766 queue_remove (rtx insn)
2768 gcc_assert (QUEUE_INDEX (insn) >= 0);
2769 remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
2770 q_size--;
2771 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
2774 /* Return a pointer to the bottom of the ready list, i.e. the insn
2775 with the lowest priority. */
2777 rtx *
2778 ready_lastpos (struct ready_list *ready)
2780 gcc_assert (ready->n_ready >= 1);
2781 return ready->vec + ready->first - ready->n_ready + 1;
2784 /* Add an element INSN to the ready list so that it ends up with the
2785 lowest/highest priority depending on FIRST_P. */
2787 HAIFA_INLINE static void
2788 ready_add (struct ready_list *ready, rtx insn, bool first_p)
2790 if (!first_p)
2792 if (ready->first == ready->n_ready)
2794 memmove (ready->vec + ready->veclen - ready->n_ready,
2795 ready_lastpos (ready),
2796 ready->n_ready * sizeof (rtx));
2797 ready->first = ready->veclen - 1;
2799 ready->vec[ready->first - ready->n_ready] = insn;
2801 else
2803 if (ready->first == ready->veclen - 1)
2805 if (ready->n_ready)
2806 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
2807 memmove (ready->vec + ready->veclen - ready->n_ready - 1,
2808 ready_lastpos (ready),
2809 ready->n_ready * sizeof (rtx));
2810 ready->first = ready->veclen - 2;
2812 ready->vec[++(ready->first)] = insn;
2815 ready->n_ready++;
2816 if (DEBUG_INSN_P (insn))
2817 ready->n_debug++;
2819 gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
2820 QUEUE_INDEX (insn) = QUEUE_READY;
2822 if (INSN_EXACT_TICK (insn) != INVALID_TICK
2823 && INSN_EXACT_TICK (insn) < clock_var)
2825 must_backtrack = true;
2829 /* Remove the element with the highest priority from the ready list and
2830 return it. */
2832 HAIFA_INLINE static rtx
2833 ready_remove_first (struct ready_list *ready)
2835 rtx t;
2837 gcc_assert (ready->n_ready);
2838 t = ready->vec[ready->first--];
2839 ready->n_ready--;
2840 if (DEBUG_INSN_P (t))
2841 ready->n_debug--;
2842 /* If the queue becomes empty, reset it. */
2843 if (ready->n_ready == 0)
2844 ready->first = ready->veclen - 1;
2846 gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
2847 QUEUE_INDEX (t) = QUEUE_NOWHERE;
2849 return t;
2852 /* The following code implements multi-pass scheduling for the first
2853 cycle. In other words, we will try to choose ready insn which
2854 permits to start maximum number of insns on the same cycle. */
2856 /* Return a pointer to the element INDEX from the ready. INDEX for
2857 insn with the highest priority is 0, and the lowest priority has
2858 N_READY - 1. */
2861 ready_element (struct ready_list *ready, int index)
2863 gcc_assert (ready->n_ready && index < ready->n_ready);
2865 return ready->vec[ready->first - index];
2868 /* Remove the element INDEX from the ready list and return it. INDEX
2869 for insn with the highest priority is 0, and the lowest priority
2870 has N_READY - 1. */
2872 HAIFA_INLINE static rtx
2873 ready_remove (struct ready_list *ready, int index)
2875 rtx t;
2876 int i;
2878 if (index == 0)
2879 return ready_remove_first (ready);
2880 gcc_assert (ready->n_ready && index < ready->n_ready);
2881 t = ready->vec[ready->first - index];
2882 ready->n_ready--;
2883 if (DEBUG_INSN_P (t))
2884 ready->n_debug--;
2885 for (i = index; i < ready->n_ready; i++)
2886 ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
2887 QUEUE_INDEX (t) = QUEUE_NOWHERE;
2888 return t;
2891 /* Remove INSN from the ready list. */
2892 static void
2893 ready_remove_insn (rtx insn)
2895 int i;
2897 for (i = 0; i < readyp->n_ready; i++)
2898 if (ready_element (readyp, i) == insn)
2900 ready_remove (readyp, i);
2901 return;
2903 gcc_unreachable ();
2906 /* Sort the ready list READY by ascending priority, using the SCHED_SORT
2907 macro. */
2909 void
2910 ready_sort (struct ready_list *ready)
2912 int i;
2913 rtx *first = ready_lastpos (ready);
2915 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
2917 for (i = 0; i < ready->n_ready; i++)
2918 if (!DEBUG_INSN_P (first[i]))
2919 setup_insn_reg_pressure_info (first[i]);
2921 if (sched_pressure == SCHED_PRESSURE_MODEL
2922 && model_curr_point < model_num_insns)
2923 model_set_excess_costs (first, ready->n_ready);
2924 SCHED_SORT (first, ready->n_ready);
2927 /* PREV is an insn that is ready to execute. Adjust its priority if that
2928 will help shorten or lengthen register lifetimes as appropriate. Also
2929 provide a hook for the target to tweak itself. */
2931 HAIFA_INLINE static void
2932 adjust_priority (rtx prev)
2934 /* ??? There used to be code here to try and estimate how an insn
2935 affected register lifetimes, but it did it by looking at REG_DEAD
2936 notes, which we removed in schedule_region. Nor did it try to
2937 take into account register pressure or anything useful like that.
2939 Revisit when we have a machine model to work with and not before. */
2941 if (targetm.sched.adjust_priority)
2942 INSN_PRIORITY (prev) =
2943 targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
2946 /* Advance DFA state STATE on one cycle. */
2947 void
2948 advance_state (state_t state)
2950 if (targetm.sched.dfa_pre_advance_cycle)
2951 targetm.sched.dfa_pre_advance_cycle ();
2953 if (targetm.sched.dfa_pre_cycle_insn)
2954 state_transition (state,
2955 targetm.sched.dfa_pre_cycle_insn ());
2957 state_transition (state, NULL);
2959 if (targetm.sched.dfa_post_cycle_insn)
2960 state_transition (state,
2961 targetm.sched.dfa_post_cycle_insn ());
2963 if (targetm.sched.dfa_post_advance_cycle)
2964 targetm.sched.dfa_post_advance_cycle ();
2967 /* Advance time on one cycle. */
2968 HAIFA_INLINE static void
2969 advance_one_cycle (void)
2971 advance_state (curr_state);
2972 if (sched_verbose >= 4)
2973 fprintf (sched_dump, ";;\tAdvanced a state.\n");
2976 /* Update register pressure after scheduling INSN. */
2977 static void
2978 update_register_pressure (rtx insn)
2980 struct reg_use_data *use;
2981 struct reg_set_data *set;
2983 gcc_checking_assert (!DEBUG_INSN_P (insn));
2985 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
2986 if (dying_use_p (use))
2987 mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure,
2988 use->regno, false);
2989 for (set = INSN_REG_SET_LIST (insn); set != NULL; set = set->next_insn_set)
2990 mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure,
2991 set->regno, true);
2994 /* Set up or update (if UPDATE_P) max register pressure (see its
2995 meaning in sched-int.h::_haifa_insn_data) for all current BB insns
2996 after insn AFTER. */
2997 static void
2998 setup_insn_max_reg_pressure (rtx after, bool update_p)
3000 int i, p;
3001 bool eq_p;
3002 rtx insn;
3003 static int max_reg_pressure[N_REG_CLASSES];
3005 save_reg_pressure ();
3006 for (i = 0; i < ira_pressure_classes_num; i++)
3007 max_reg_pressure[ira_pressure_classes[i]]
3008 = curr_reg_pressure[ira_pressure_classes[i]];
3009 for (insn = NEXT_INSN (after);
3010 insn != NULL_RTX && ! BARRIER_P (insn)
3011 && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (after);
3012 insn = NEXT_INSN (insn))
3013 if (NONDEBUG_INSN_P (insn))
3015 eq_p = true;
3016 for (i = 0; i < ira_pressure_classes_num; i++)
3018 p = max_reg_pressure[ira_pressure_classes[i]];
3019 if (INSN_MAX_REG_PRESSURE (insn)[i] != p)
3021 eq_p = false;
3022 INSN_MAX_REG_PRESSURE (insn)[i]
3023 = max_reg_pressure[ira_pressure_classes[i]];
3026 if (update_p && eq_p)
3027 break;
3028 update_register_pressure (insn);
3029 for (i = 0; i < ira_pressure_classes_num; i++)
3030 if (max_reg_pressure[ira_pressure_classes[i]]
3031 < curr_reg_pressure[ira_pressure_classes[i]])
3032 max_reg_pressure[ira_pressure_classes[i]]
3033 = curr_reg_pressure[ira_pressure_classes[i]];
3035 restore_reg_pressure ();
3038 /* Update the current register pressure after scheduling INSN. Update
3039 also max register pressure for unscheduled insns of the current
3040 BB. */
3041 static void
3042 update_reg_and_insn_max_reg_pressure (rtx insn)
3044 int i;
3045 int before[N_REG_CLASSES];
3047 for (i = 0; i < ira_pressure_classes_num; i++)
3048 before[i] = curr_reg_pressure[ira_pressure_classes[i]];
3049 update_register_pressure (insn);
3050 for (i = 0; i < ira_pressure_classes_num; i++)
3051 if (curr_reg_pressure[ira_pressure_classes[i]] != before[i])
3052 break;
3053 if (i < ira_pressure_classes_num)
3054 setup_insn_max_reg_pressure (insn, true);
3057 /* Set up register pressure at the beginning of basic block BB whose
3058 insns starting after insn AFTER. Set up also max register pressure
3059 for all insns of the basic block. */
3060 void
3061 sched_setup_bb_reg_pressure_info (basic_block bb, rtx after)
3063 gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED);
3064 initiate_bb_reg_pressure_info (bb);
3065 setup_insn_max_reg_pressure (after, false);
3068 /* If doing predication while scheduling, verify whether INSN, which
3069 has just been scheduled, clobbers the conditions of any
3070 instructions that must be predicated in order to break their
3071 dependencies. If so, remove them from the queues so that they will
3072 only be scheduled once their control dependency is resolved. */
3074 static void
3075 check_clobbered_conditions (rtx insn)
3077 HARD_REG_SET t;
3078 int i;
3080 if ((current_sched_info->flags & DO_PREDICATION) == 0)
3081 return;
3083 find_all_hard_reg_sets (insn, &t, true);
3085 restart:
3086 for (i = 0; i < ready.n_ready; i++)
3088 rtx x = ready_element (&ready, i);
3089 if (TODO_SPEC (x) == DEP_CONTROL && cond_clobbered_p (x, t))
3091 ready_remove_insn (x);
3092 goto restart;
3095 for (i = 0; i <= max_insn_queue_index; i++)
3097 rtx link;
3098 int q = NEXT_Q_AFTER (q_ptr, i);
3100 restart_queue:
3101 for (link = insn_queue[q]; link; link = XEXP (link, 1))
3103 rtx x = XEXP (link, 0);
3104 if (TODO_SPEC (x) == DEP_CONTROL && cond_clobbered_p (x, t))
3106 queue_remove (x);
3107 goto restart_queue;
3113 /* Return (in order):
3115 - positive if INSN adversely affects the pressure on one
3116 register class
3118 - negative if INSN reduces the pressure on one register class
3120 - 0 if INSN doesn't affect the pressure on any register class. */
3122 static int
3123 model_classify_pressure (struct model_insn_info *insn)
3125 struct reg_pressure_data *reg_pressure;
3126 int death[N_REG_CLASSES];
3127 int pci, cl, sum;
3129 calculate_reg_deaths (insn->insn, death);
3130 reg_pressure = INSN_REG_PRESSURE (insn->insn);
3131 sum = 0;
3132 for (pci = 0; pci < ira_pressure_classes_num; pci++)
3134 cl = ira_pressure_classes[pci];
3135 if (death[cl] < reg_pressure[pci].set_increase)
3136 return 1;
3137 sum += reg_pressure[pci].set_increase - death[cl];
3139 return sum;
3142 /* Return true if INSN1 should come before INSN2 in the model schedule. */
3144 static int
3145 model_order_p (struct model_insn_info *insn1, struct model_insn_info *insn2)
3147 unsigned int height1, height2;
3148 unsigned int priority1, priority2;
3150 /* Prefer instructions with a higher model priority. */
3151 if (insn1->model_priority != insn2->model_priority)
3152 return insn1->model_priority > insn2->model_priority;
3154 /* Combine the length of the longest path of satisfied true dependencies
3155 that leads to each instruction (depth) with the length of the longest
3156 path of any dependencies that leads from the instruction (alap).
3157 Prefer instructions with the greatest combined length. If the combined
3158 lengths are equal, prefer instructions with the greatest depth.
3160 The idea is that, if we have a set S of "equal" instructions that each
3161 have ALAP value X, and we pick one such instruction I, any true-dependent
3162 successors of I that have ALAP value X - 1 should be preferred over S.
3163 This encourages the schedule to be "narrow" rather than "wide".
3164 However, if I is a low-priority instruction that we decided to
3165 schedule because of its model_classify_pressure, and if there
3166 is a set of higher-priority instructions T, the aforementioned
3167 successors of I should not have the edge over T. */
3168 height1 = insn1->depth + insn1->alap;
3169 height2 = insn2->depth + insn2->alap;
3170 if (height1 != height2)
3171 return height1 > height2;
3172 if (insn1->depth != insn2->depth)
3173 return insn1->depth > insn2->depth;
3175 /* We have no real preference between INSN1 an INSN2 as far as attempts
3176 to reduce pressure go. Prefer instructions with higher priorities. */
3177 priority1 = INSN_PRIORITY (insn1->insn);
3178 priority2 = INSN_PRIORITY (insn2->insn);
3179 if (priority1 != priority2)
3180 return priority1 > priority2;
3182 /* Use the original rtl sequence as a tie-breaker. */
3183 return insn1 < insn2;
3186 /* Add INSN to the model worklist immediately after PREV. Add it to the
3187 beginning of the list if PREV is null. */
3189 static void
3190 model_add_to_worklist_at (struct model_insn_info *insn,
3191 struct model_insn_info *prev)
3193 gcc_assert (QUEUE_INDEX (insn->insn) == QUEUE_NOWHERE);
3194 QUEUE_INDEX (insn->insn) = QUEUE_READY;
3196 insn->prev = prev;
3197 if (prev)
3199 insn->next = prev->next;
3200 prev->next = insn;
3202 else
3204 insn->next = model_worklist;
3205 model_worklist = insn;
3207 if (insn->next)
3208 insn->next->prev = insn;
3211 /* Remove INSN from the model worklist. */
3213 static void
3214 model_remove_from_worklist (struct model_insn_info *insn)
3216 gcc_assert (QUEUE_INDEX (insn->insn) == QUEUE_READY);
3217 QUEUE_INDEX (insn->insn) = QUEUE_NOWHERE;
3219 if (insn->prev)
3220 insn->prev->next = insn->next;
3221 else
3222 model_worklist = insn->next;
3223 if (insn->next)
3224 insn->next->prev = insn->prev;
3227 /* Add INSN to the model worklist. Start looking for a suitable position
3228 between neighbors PREV and NEXT, testing at most MAX_SCHED_READY_INSNS
3229 insns either side. A null PREV indicates the beginning of the list and
3230 a null NEXT indicates the end. */
3232 static void
3233 model_add_to_worklist (struct model_insn_info *insn,
3234 struct model_insn_info *prev,
3235 struct model_insn_info *next)
3237 int count;
3239 count = MAX_SCHED_READY_INSNS;
3240 if (count > 0 && prev && model_order_p (insn, prev))
3243 count--;
3244 prev = prev->prev;
3246 while (count > 0 && prev && model_order_p (insn, prev));
3247 else
3248 while (count > 0 && next && model_order_p (next, insn))
3250 count--;
3251 prev = next;
3252 next = next->next;
3254 model_add_to_worklist_at (insn, prev);
3257 /* INSN may now have a higher priority (in the model_order_p sense)
3258 than before. Move it up the worklist if necessary. */
3260 static void
3261 model_promote_insn (struct model_insn_info *insn)
3263 struct model_insn_info *prev;
3264 int count;
3266 prev = insn->prev;
3267 count = MAX_SCHED_READY_INSNS;
3268 while (count > 0 && prev && model_order_p (insn, prev))
3270 count--;
3271 prev = prev->prev;
3273 if (prev != insn->prev)
3275 model_remove_from_worklist (insn);
3276 model_add_to_worklist_at (insn, prev);
3280 /* Add INSN to the end of the model schedule. */
3282 static void
3283 model_add_to_schedule (rtx insn)
3285 unsigned int point;
3287 gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
3288 QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
3290 point = model_schedule.length ();
3291 model_schedule.quick_push (insn);
3292 INSN_MODEL_INDEX (insn) = point + 1;
3295 /* Analyze the instructions that are to be scheduled, setting up
3296 MODEL_INSN_INFO (...) and model_num_insns accordingly. Add ready
3297 instructions to model_worklist. */
3299 static void
3300 model_analyze_insns (void)
3302 rtx start, end, iter;
3303 sd_iterator_def sd_it;
3304 dep_t dep;
3305 struct model_insn_info *insn, *con;
3307 model_num_insns = 0;
3308 start = PREV_INSN (current_sched_info->next_tail);
3309 end = current_sched_info->prev_head;
3310 for (iter = start; iter != end; iter = PREV_INSN (iter))
3311 if (NONDEBUG_INSN_P (iter))
3313 insn = MODEL_INSN_INFO (iter);
3314 insn->insn = iter;
3315 FOR_EACH_DEP (iter, SD_LIST_FORW, sd_it, dep)
3317 con = MODEL_INSN_INFO (DEP_CON (dep));
3318 if (con->insn && insn->alap < con->alap + 1)
3319 insn->alap = con->alap + 1;
3322 insn->old_queue = QUEUE_INDEX (iter);
3323 QUEUE_INDEX (iter) = QUEUE_NOWHERE;
3325 insn->unscheduled_preds = dep_list_size (iter, SD_LIST_HARD_BACK);
3326 if (insn->unscheduled_preds == 0)
3327 model_add_to_worklist (insn, NULL, model_worklist);
3329 model_num_insns++;
3333 /* The global state describes the register pressure at the start of the
3334 model schedule. Initialize GROUP accordingly. */
3336 static void
3337 model_init_pressure_group (struct model_pressure_group *group)
3339 int pci, cl;
3341 for (pci = 0; pci < ira_pressure_classes_num; pci++)
3343 cl = ira_pressure_classes[pci];
3344 group->limits[pci].pressure = curr_reg_pressure[cl];
3345 group->limits[pci].point = 0;
3347 /* Use index model_num_insns to record the state after the last
3348 instruction in the model schedule. */
3349 group->model = XNEWVEC (struct model_pressure_data,
3350 (model_num_insns + 1) * ira_pressure_classes_num);
3353 /* Record that MODEL_REF_PRESSURE (GROUP, POINT, PCI) is PRESSURE.
3354 Update the maximum pressure for the whole schedule. */
3356 static void
3357 model_record_pressure (struct model_pressure_group *group,
3358 int point, int pci, int pressure)
3360 MODEL_REF_PRESSURE (group, point, pci) = pressure;
3361 if (group->limits[pci].pressure < pressure)
3363 group->limits[pci].pressure = pressure;
3364 group->limits[pci].point = point;
3368 /* INSN has just been added to the end of the model schedule. Record its
3369 register-pressure information. */
3371 static void
3372 model_record_pressures (struct model_insn_info *insn)
3374 struct reg_pressure_data *reg_pressure;
3375 int point, pci, cl, delta;
3376 int death[N_REG_CLASSES];
3378 point = model_index (insn->insn);
3379 if (sched_verbose >= 2)
3381 if (point == 0)
3383 fprintf (sched_dump, "\n;;\tModel schedule:\n;;\n");
3384 fprintf (sched_dump, ";;\t| idx insn | mpri hght dpth prio |\n");
3386 fprintf (sched_dump, ";;\t| %3d %4d | %4d %4d %4d %4d | %-30s ",
3387 point, INSN_UID (insn->insn), insn->model_priority,
3388 insn->depth + insn->alap, insn->depth,
3389 INSN_PRIORITY (insn->insn),
3390 str_pattern_slim (PATTERN (insn->insn)));
3392 calculate_reg_deaths (insn->insn, death);
3393 reg_pressure = INSN_REG_PRESSURE (insn->insn);
3394 for (pci = 0; pci < ira_pressure_classes_num; pci++)
3396 cl = ira_pressure_classes[pci];
3397 delta = reg_pressure[pci].set_increase - death[cl];
3398 if (sched_verbose >= 2)
3399 fprintf (sched_dump, " %s:[%d,%+d]", reg_class_names[cl],
3400 curr_reg_pressure[cl], delta);
3401 model_record_pressure (&model_before_pressure, point, pci,
3402 curr_reg_pressure[cl]);
3404 if (sched_verbose >= 2)
3405 fprintf (sched_dump, "\n");
3408 /* All instructions have been added to the model schedule. Record the
3409 final register pressure in GROUP and set up all MODEL_MAX_PRESSUREs. */
3411 static void
3412 model_record_final_pressures (struct model_pressure_group *group)
3414 int point, pci, max_pressure, ref_pressure, cl;
3416 for (pci = 0; pci < ira_pressure_classes_num; pci++)
3418 /* Record the final pressure for this class. */
3419 cl = ira_pressure_classes[pci];
3420 point = model_num_insns;
3421 ref_pressure = curr_reg_pressure[cl];
3422 model_record_pressure (group, point, pci, ref_pressure);
3424 /* Record the original maximum pressure. */
3425 group->limits[pci].orig_pressure = group->limits[pci].pressure;
3427 /* Update the MODEL_MAX_PRESSURE for every point of the schedule. */
3428 max_pressure = ref_pressure;
3429 MODEL_MAX_PRESSURE (group, point, pci) = max_pressure;
3430 while (point > 0)
3432 point--;
3433 ref_pressure = MODEL_REF_PRESSURE (group, point, pci);
3434 max_pressure = MAX (max_pressure, ref_pressure);
3435 MODEL_MAX_PRESSURE (group, point, pci) = max_pressure;
3440 /* Update all successors of INSN, given that INSN has just been scheduled. */
3442 static void
3443 model_add_successors_to_worklist (struct model_insn_info *insn)
3445 sd_iterator_def sd_it;
3446 struct model_insn_info *con;
3447 dep_t dep;
3449 FOR_EACH_DEP (insn->insn, SD_LIST_FORW, sd_it, dep)
3451 con = MODEL_INSN_INFO (DEP_CON (dep));
3452 /* Ignore debug instructions, and instructions from other blocks. */
3453 if (con->insn)
3455 con->unscheduled_preds--;
3457 /* Update the depth field of each true-dependent successor.
3458 Increasing the depth gives them a higher priority than
3459 before. */
3460 if (DEP_TYPE (dep) == REG_DEP_TRUE && con->depth < insn->depth + 1)
3462 con->depth = insn->depth + 1;
3463 if (QUEUE_INDEX (con->insn) == QUEUE_READY)
3464 model_promote_insn (con);
3467 /* If this is a true dependency, or if there are no remaining
3468 dependencies for CON (meaning that CON only had non-true
3469 dependencies), make sure that CON is on the worklist.
3470 We don't bother otherwise because it would tend to fill the
3471 worklist with a lot of low-priority instructions that are not
3472 yet ready to issue. */
3473 if ((con->depth > 0 || con->unscheduled_preds == 0)
3474 && QUEUE_INDEX (con->insn) == QUEUE_NOWHERE)
3475 model_add_to_worklist (con, insn, insn->next);
3480 /* Give INSN a higher priority than any current instruction, then give
3481 unscheduled predecessors of INSN a higher priority still. If any of
3482 those predecessors are not on the model worklist, do the same for its
3483 predecessors, and so on. */
3485 static void
3486 model_promote_predecessors (struct model_insn_info *insn)
3488 struct model_insn_info *pro, *first;
3489 sd_iterator_def sd_it;
3490 dep_t dep;
3492 if (sched_verbose >= 7)
3493 fprintf (sched_dump, ";;\t+--- priority of %d = %d, priority of",
3494 INSN_UID (insn->insn), model_next_priority);
3495 insn->model_priority = model_next_priority++;
3496 model_remove_from_worklist (insn);
3497 model_add_to_worklist_at (insn, NULL);
3499 first = NULL;
3500 for (;;)
3502 FOR_EACH_DEP (insn->insn, SD_LIST_HARD_BACK, sd_it, dep)
3504 pro = MODEL_INSN_INFO (DEP_PRO (dep));
3505 /* The first test is to ignore debug instructions, and instructions
3506 from other blocks. */
3507 if (pro->insn
3508 && pro->model_priority != model_next_priority
3509 && QUEUE_INDEX (pro->insn) != QUEUE_SCHEDULED)
3511 pro->model_priority = model_next_priority;
3512 if (sched_verbose >= 7)
3513 fprintf (sched_dump, " %d", INSN_UID (pro->insn));
3514 if (QUEUE_INDEX (pro->insn) == QUEUE_READY)
3516 /* PRO is already in the worklist, but it now has
3517 a higher priority than before. Move it at the
3518 appropriate place. */
3519 model_remove_from_worklist (pro);
3520 model_add_to_worklist (pro, NULL, model_worklist);
3522 else
3524 /* PRO isn't in the worklist. Recursively process
3525 its predecessors until we find one that is. */
3526 pro->next = first;
3527 first = pro;
3531 if (!first)
3532 break;
3533 insn = first;
3534 first = insn->next;
3536 if (sched_verbose >= 7)
3537 fprintf (sched_dump, " = %d\n", model_next_priority);
3538 model_next_priority++;
3541 /* Pick one instruction from model_worklist and process it. */
3543 static void
3544 model_choose_insn (void)
3546 struct model_insn_info *insn, *fallback;
3547 int count;
3549 if (sched_verbose >= 7)
3551 fprintf (sched_dump, ";;\t+--- worklist:\n");
3552 insn = model_worklist;
3553 count = MAX_SCHED_READY_INSNS;
3554 while (count > 0 && insn)
3556 fprintf (sched_dump, ";;\t+--- %d [%d, %d, %d, %d]\n",
3557 INSN_UID (insn->insn), insn->model_priority,
3558 insn->depth + insn->alap, insn->depth,
3559 INSN_PRIORITY (insn->insn));
3560 count--;
3561 insn = insn->next;
3565 /* Look for a ready instruction whose model_classify_priority is zero
3566 or negative, picking the highest-priority one. Adding such an
3567 instruction to the schedule now should do no harm, and may actually
3568 do some good.
3570 Failing that, see whether there is an instruction with the highest
3571 extant model_priority that is not yet ready, but which would reduce
3572 pressure if it became ready. This is designed to catch cases like:
3574 (set (mem (reg R1)) (reg R2))
3576 where the instruction is the last remaining use of R1 and where the
3577 value of R2 is not yet available (or vice versa). The death of R1
3578 means that this instruction already reduces pressure. It is of
3579 course possible that the computation of R2 involves other registers
3580 that are hard to kill, but such cases are rare enough for this
3581 heuristic to be a win in general.
3583 Failing that, just pick the highest-priority instruction in the
3584 worklist. */
3585 count = MAX_SCHED_READY_INSNS;
3586 insn = model_worklist;
3587 fallback = 0;
3588 for (;;)
3590 if (count == 0 || !insn)
3592 insn = fallback ? fallback : model_worklist;
3593 break;
3595 if (insn->unscheduled_preds)
3597 if (model_worklist->model_priority == insn->model_priority
3598 && !fallback
3599 && model_classify_pressure (insn) < 0)
3600 fallback = insn;
3602 else
3604 if (model_classify_pressure (insn) <= 0)
3605 break;
3607 count--;
3608 insn = insn->next;
3611 if (sched_verbose >= 7 && insn != model_worklist)
3613 if (insn->unscheduled_preds)
3614 fprintf (sched_dump, ";;\t+--- promoting insn %d, with dependencies\n",
3615 INSN_UID (insn->insn));
3616 else
3617 fprintf (sched_dump, ";;\t+--- promoting insn %d, which is ready\n",
3618 INSN_UID (insn->insn));
3620 if (insn->unscheduled_preds)
3621 /* INSN isn't yet ready to issue. Give all its predecessors the
3622 highest priority. */
3623 model_promote_predecessors (insn);
3624 else
3626 /* INSN is ready. Add it to the end of model_schedule and
3627 process its successors. */
3628 model_add_successors_to_worklist (insn);
3629 model_remove_from_worklist (insn);
3630 model_add_to_schedule (insn->insn);
3631 model_record_pressures (insn);
3632 update_register_pressure (insn->insn);
3636 /* Restore all QUEUE_INDEXs to the values that they had before
3637 model_start_schedule was called. */
3639 static void
3640 model_reset_queue_indices (void)
3642 unsigned int i;
3643 rtx insn;
3645 FOR_EACH_VEC_ELT (model_schedule, i, insn)
3646 QUEUE_INDEX (insn) = MODEL_INSN_INFO (insn)->old_queue;
3649 /* We have calculated the model schedule and spill costs. Print a summary
3650 to sched_dump. */
3652 static void
3653 model_dump_pressure_summary (void)
3655 int pci, cl;
3657 fprintf (sched_dump, ";; Pressure summary:");
3658 for (pci = 0; pci < ira_pressure_classes_num; pci++)
3660 cl = ira_pressure_classes[pci];
3661 fprintf (sched_dump, " %s:%d", reg_class_names[cl],
3662 model_before_pressure.limits[pci].pressure);
3664 fprintf (sched_dump, "\n\n");
3667 /* Initialize the SCHED_PRESSURE_MODEL information for the current
3668 scheduling region. */
3670 static void
3671 model_start_schedule (void)
3673 basic_block bb;
3675 model_next_priority = 1;
3676 model_schedule.create (sched_max_luid);
3677 model_insns = XCNEWVEC (struct model_insn_info, sched_max_luid);
3679 bb = BLOCK_FOR_INSN (NEXT_INSN (current_sched_info->prev_head));
3680 initiate_reg_pressure_info (df_get_live_in (bb));
3682 model_analyze_insns ();
3683 model_init_pressure_group (&model_before_pressure);
3684 while (model_worklist)
3685 model_choose_insn ();
3686 gcc_assert (model_num_insns == (int) model_schedule.length ());
3687 if (sched_verbose >= 2)
3688 fprintf (sched_dump, "\n");
3690 model_record_final_pressures (&model_before_pressure);
3691 model_reset_queue_indices ();
3693 XDELETEVEC (model_insns);
3695 model_curr_point = 0;
3696 initiate_reg_pressure_info (df_get_live_in (bb));
3697 if (sched_verbose >= 1)
3698 model_dump_pressure_summary ();
3701 /* Free the information associated with GROUP. */
3703 static void
3704 model_finalize_pressure_group (struct model_pressure_group *group)
3706 XDELETEVEC (group->model);
3709 /* Free the information created by model_start_schedule. */
3711 static void
3712 model_end_schedule (void)
3714 model_finalize_pressure_group (&model_before_pressure);
3715 model_schedule.release ();
3718 /* A structure that holds local state for the loop in schedule_block. */
3719 struct sched_block_state
3721 /* True if no real insns have been scheduled in the current cycle. */
3722 bool first_cycle_insn_p;
3723 /* True if a shadow insn has been scheduled in the current cycle, which
3724 means that no more normal insns can be issued. */
3725 bool shadows_only_p;
3726 /* True if we're winding down a modulo schedule, which means that we only
3727 issue insns with INSN_EXACT_TICK set. */
3728 bool modulo_epilogue;
3729 /* Initialized with the machine's issue rate every cycle, and updated
3730 by calls to the variable_issue hook. */
3731 int can_issue_more;
3734 /* INSN is the "currently executing insn". Launch each insn which was
3735 waiting on INSN. READY is the ready list which contains the insns
3736 that are ready to fire. CLOCK is the current cycle. The function
3737 returns necessary cycle advance after issuing the insn (it is not
3738 zero for insns in a schedule group). */
3740 static int
3741 schedule_insn (rtx insn)
3743 sd_iterator_def sd_it;
3744 dep_t dep;
3745 int i;
3746 int advance = 0;
3748 if (sched_verbose >= 1)
3750 struct reg_pressure_data *pressure_info;
3751 fprintf (sched_dump, ";;\t%3i--> %s %-40s:",
3752 clock_var, (*current_sched_info->print_insn) (insn, 1),
3753 str_pattern_slim (PATTERN (insn)));
3755 if (recog_memoized (insn) < 0)
3756 fprintf (sched_dump, "nothing");
3757 else
3758 print_reservation (sched_dump, insn);
3759 pressure_info = INSN_REG_PRESSURE (insn);
3760 if (pressure_info != NULL)
3762 fputc (':', sched_dump);
3763 for (i = 0; i < ira_pressure_classes_num; i++)
3764 fprintf (sched_dump, "%s%s%+d(%d)",
3765 scheduled_insns.length () > 1
3766 && INSN_LUID (insn)
3767 < INSN_LUID (scheduled_insns[scheduled_insns.length () - 2]) ? "@" : "",
3768 reg_class_names[ira_pressure_classes[i]],
3769 pressure_info[i].set_increase, pressure_info[i].change);
3771 if (sched_pressure == SCHED_PRESSURE_MODEL
3772 && model_curr_point < model_num_insns
3773 && model_index (insn) == model_curr_point)
3774 fprintf (sched_dump, ":model %d", model_curr_point);
3775 fputc ('\n', sched_dump);
3778 if (sched_pressure == SCHED_PRESSURE_WEIGHTED && !DEBUG_INSN_P (insn))
3779 update_reg_and_insn_max_reg_pressure (insn);
3781 /* Scheduling instruction should have all its dependencies resolved and
3782 should have been removed from the ready list. */
3783 gcc_assert (sd_lists_empty_p (insn, SD_LIST_HARD_BACK));
3785 /* Reset debug insns invalidated by moving this insn. */
3786 if (MAY_HAVE_DEBUG_INSNS && !DEBUG_INSN_P (insn))
3787 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
3788 sd_iterator_cond (&sd_it, &dep);)
3790 rtx dbg = DEP_PRO (dep);
3791 struct reg_use_data *use, *next;
3793 if (DEP_STATUS (dep) & DEP_CANCELLED)
3795 sd_iterator_next (&sd_it);
3796 continue;
3799 gcc_assert (DEBUG_INSN_P (dbg));
3801 if (sched_verbose >= 6)
3802 fprintf (sched_dump, ";;\t\tresetting: debug insn %d\n",
3803 INSN_UID (dbg));
3805 /* ??? Rather than resetting the debug insn, we might be able
3806 to emit a debug temp before the just-scheduled insn, but
3807 this would involve checking that the expression at the
3808 point of the debug insn is equivalent to the expression
3809 before the just-scheduled insn. They might not be: the
3810 expression in the debug insn may depend on other insns not
3811 yet scheduled that set MEMs, REGs or even other debug
3812 insns. It's not clear that attempting to preserve debug
3813 information in these cases is worth the effort, given how
3814 uncommon these resets are and the likelihood that the debug
3815 temps introduced won't survive the schedule change. */
3816 INSN_VAR_LOCATION_LOC (dbg) = gen_rtx_UNKNOWN_VAR_LOC ();
3817 df_insn_rescan (dbg);
3819 /* Unknown location doesn't use any registers. */
3820 for (use = INSN_REG_USE_LIST (dbg); use != NULL; use = next)
3822 struct reg_use_data *prev = use;
3824 /* Remove use from the cyclic next_regno_use chain first. */
3825 while (prev->next_regno_use != use)
3826 prev = prev->next_regno_use;
3827 prev->next_regno_use = use->next_regno_use;
3828 next = use->next_insn_use;
3829 free (use);
3831 INSN_REG_USE_LIST (dbg) = NULL;
3833 /* We delete rather than resolve these deps, otherwise we
3834 crash in sched_free_deps(), because forward deps are
3835 expected to be released before backward deps. */
3836 sd_delete_dep (sd_it);
3839 gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
3840 QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
3842 if (sched_pressure == SCHED_PRESSURE_MODEL
3843 && model_curr_point < model_num_insns
3844 && NONDEBUG_INSN_P (insn))
3846 if (model_index (insn) == model_curr_point)
3848 model_curr_point++;
3849 while (model_curr_point < model_num_insns
3850 && (QUEUE_INDEX (MODEL_INSN (model_curr_point))
3851 == QUEUE_SCHEDULED));
3852 else
3853 model_recompute (insn);
3854 model_update_limit_points ();
3855 update_register_pressure (insn);
3856 if (sched_verbose >= 2)
3857 print_curr_reg_pressure ();
3860 gcc_assert (INSN_TICK (insn) >= MIN_TICK);
3861 if (INSN_TICK (insn) > clock_var)
3862 /* INSN has been prematurely moved from the queue to the ready list.
3863 This is possible only if following flag is set. */
3864 gcc_assert (flag_sched_stalled_insns);
3866 /* ??? Probably, if INSN is scheduled prematurely, we should leave
3867 INSN_TICK untouched. This is a machine-dependent issue, actually. */
3868 INSN_TICK (insn) = clock_var;
3870 check_clobbered_conditions (insn);
3872 /* Update dependent instructions. First, see if by scheduling this insn
3873 now we broke a dependence in a way that requires us to change another
3874 insn. */
3875 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3876 sd_iterator_cond (&sd_it, &dep); sd_iterator_next (&sd_it))
3878 struct dep_replacement *desc = DEP_REPLACE (dep);
3879 rtx pro = DEP_PRO (dep);
3880 if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED
3881 && desc != NULL && desc->insn == pro)
3882 apply_replacement (dep, false);
3885 /* Go through and resolve forward dependencies. */
3886 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
3887 sd_iterator_cond (&sd_it, &dep);)
3889 rtx next = DEP_CON (dep);
3890 bool cancelled = (DEP_STATUS (dep) & DEP_CANCELLED) != 0;
3892 /* Resolve the dependence between INSN and NEXT.
3893 sd_resolve_dep () moves current dep to another list thus
3894 advancing the iterator. */
3895 sd_resolve_dep (sd_it);
3897 if (cancelled)
3899 if (must_restore_pattern_p (next, dep))
3900 restore_pattern (dep, false);
3901 continue;
3904 /* Don't bother trying to mark next as ready if insn is a debug
3905 insn. If insn is the last hard dependency, it will have
3906 already been discounted. */
3907 if (DEBUG_INSN_P (insn) && !DEBUG_INSN_P (next))
3908 continue;
3910 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
3912 int effective_cost;
3914 effective_cost = try_ready (next);
3916 if (effective_cost >= 0
3917 && SCHED_GROUP_P (next)
3918 && advance < effective_cost)
3919 advance = effective_cost;
3921 else
3922 /* Check always has only one forward dependence (to the first insn in
3923 the recovery block), therefore, this will be executed only once. */
3925 gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
3926 fix_recovery_deps (RECOVERY_BLOCK (insn));
3930 /* Annotate the instruction with issue information -- TImode
3931 indicates that the instruction is expected not to be able
3932 to issue on the same cycle as the previous insn. A machine
3933 may use this information to decide how the instruction should
3934 be aligned. */
3935 if (issue_rate > 1
3936 && GET_CODE (PATTERN (insn)) != USE
3937 && GET_CODE (PATTERN (insn)) != CLOBBER
3938 && !DEBUG_INSN_P (insn))
3940 if (reload_completed)
3941 PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
3942 last_clock_var = clock_var;
3945 if (nonscheduled_insns_begin != NULL_RTX)
3946 /* Indicate to debug counters that INSN is scheduled. */
3947 nonscheduled_insns_begin = insn;
3949 return advance;
3952 /* Functions for handling of notes. */
3954 /* Add note list that ends on FROM_END to the end of TO_ENDP. */
3955 void
3956 concat_note_lists (rtx from_end, rtx *to_endp)
3958 rtx from_start;
3960 /* It's easy when have nothing to concat. */
3961 if (from_end == NULL)
3962 return;
3964 /* It's also easy when destination is empty. */
3965 if (*to_endp == NULL)
3967 *to_endp = from_end;
3968 return;
3971 from_start = from_end;
3972 while (PREV_INSN (from_start) != NULL)
3973 from_start = PREV_INSN (from_start);
3975 PREV_INSN (from_start) = *to_endp;
3976 NEXT_INSN (*to_endp) = from_start;
3977 *to_endp = from_end;
3980 /* Delete notes between HEAD and TAIL and put them in the chain
3981 of notes ended by NOTE_LIST. */
3982 void
3983 remove_notes (rtx head, rtx tail)
3985 rtx next_tail, insn, next;
3987 note_list = 0;
3988 if (head == tail && !INSN_P (head))
3989 return;
3991 next_tail = NEXT_INSN (tail);
3992 for (insn = head; insn != next_tail; insn = next)
3994 next = NEXT_INSN (insn);
3995 if (!NOTE_P (insn))
3996 continue;
3998 switch (NOTE_KIND (insn))
4000 case NOTE_INSN_BASIC_BLOCK:
4001 continue;
4003 case NOTE_INSN_EPILOGUE_BEG:
4004 if (insn != tail)
4006 remove_insn (insn);
4007 add_reg_note (next, REG_SAVE_NOTE,
4008 GEN_INT (NOTE_INSN_EPILOGUE_BEG));
4009 break;
4011 /* FALLTHRU */
4013 default:
4014 remove_insn (insn);
4016 /* Add the note to list that ends at NOTE_LIST. */
4017 PREV_INSN (insn) = note_list;
4018 NEXT_INSN (insn) = NULL_RTX;
4019 if (note_list)
4020 NEXT_INSN (note_list) = insn;
4021 note_list = insn;
4022 break;
4025 gcc_assert ((sel_sched_p () || insn != tail) && insn != head);
4029 /* A structure to record enough data to allow us to backtrack the scheduler to
4030 a previous state. */
4031 struct haifa_saved_data
4033 /* Next entry on the list. */
4034 struct haifa_saved_data *next;
4036 /* Backtracking is associated with scheduling insns that have delay slots.
4037 DELAY_PAIR points to the structure that contains the insns involved, and
4038 the number of cycles between them. */
4039 struct delay_pair *delay_pair;
4041 /* Data used by the frontend (e.g. sched-ebb or sched-rgn). */
4042 void *fe_saved_data;
4043 /* Data used by the backend. */
4044 void *be_saved_data;
4046 /* Copies of global state. */
4047 int clock_var, last_clock_var;
4048 struct ready_list ready;
4049 state_t curr_state;
4051 rtx last_scheduled_insn;
4052 rtx last_nondebug_scheduled_insn;
4053 rtx nonscheduled_insns_begin;
4054 int cycle_issued_insns;
4056 /* Copies of state used in the inner loop of schedule_block. */
4057 struct sched_block_state sched_block;
4059 /* We don't need to save q_ptr, as its value is arbitrary and we can set it
4060 to 0 when restoring. */
4061 int q_size;
4062 rtx *insn_queue;
4064 /* Describe pattern replacements that occurred since this backtrack point
4065 was queued. */
4066 vec<dep_t> replacement_deps;
4067 vec<int> replace_apply;
4069 /* A copy of the next-cycle replacement vectors at the time of the backtrack
4070 point. */
4071 vec<dep_t> next_cycle_deps;
4072 vec<int> next_cycle_apply;
4075 /* A record, in reverse order, of all scheduled insns which have delay slots
4076 and may require backtracking. */
4077 static struct haifa_saved_data *backtrack_queue;
4079 /* For every dependency of INSN, set the FEEDS_BACKTRACK_INSN bit according
4080 to SET_P. */
4081 static void
4082 mark_backtrack_feeds (rtx insn, int set_p)
4084 sd_iterator_def sd_it;
4085 dep_t dep;
4086 FOR_EACH_DEP (insn, SD_LIST_HARD_BACK, sd_it, dep)
4088 FEEDS_BACKTRACK_INSN (DEP_PRO (dep)) = set_p;
4092 /* Save the current scheduler state so that we can backtrack to it
4093 later if necessary. PAIR gives the insns that make it necessary to
4094 save this point. SCHED_BLOCK is the local state of schedule_block
4095 that need to be saved. */
4096 static void
4097 save_backtrack_point (struct delay_pair *pair,
4098 struct sched_block_state sched_block)
4100 int i;
4101 struct haifa_saved_data *save = XNEW (struct haifa_saved_data);
4103 save->curr_state = xmalloc (dfa_state_size);
4104 memcpy (save->curr_state, curr_state, dfa_state_size);
4106 save->ready.first = ready.first;
4107 save->ready.n_ready = ready.n_ready;
4108 save->ready.n_debug = ready.n_debug;
4109 save->ready.veclen = ready.veclen;
4110 save->ready.vec = XNEWVEC (rtx, ready.veclen);
4111 memcpy (save->ready.vec, ready.vec, ready.veclen * sizeof (rtx));
4113 save->insn_queue = XNEWVEC (rtx, max_insn_queue_index + 1);
4114 save->q_size = q_size;
4115 for (i = 0; i <= max_insn_queue_index; i++)
4117 int q = NEXT_Q_AFTER (q_ptr, i);
4118 save->insn_queue[i] = copy_INSN_LIST (insn_queue[q]);
4121 save->clock_var = clock_var;
4122 save->last_clock_var = last_clock_var;
4123 save->cycle_issued_insns = cycle_issued_insns;
4124 save->last_scheduled_insn = last_scheduled_insn;
4125 save->last_nondebug_scheduled_insn = last_nondebug_scheduled_insn;
4126 save->nonscheduled_insns_begin = nonscheduled_insns_begin;
4128 save->sched_block = sched_block;
4130 save->replacement_deps.create (0);
4131 save->replace_apply.create (0);
4132 save->next_cycle_deps = next_cycle_replace_deps.copy ();
4133 save->next_cycle_apply = next_cycle_apply.copy ();
4135 if (current_sched_info->save_state)
4136 save->fe_saved_data = (*current_sched_info->save_state) ();
4138 if (targetm.sched.alloc_sched_context)
4140 save->be_saved_data = targetm.sched.alloc_sched_context ();
4141 targetm.sched.init_sched_context (save->be_saved_data, false);
4143 else
4144 save->be_saved_data = NULL;
4146 save->delay_pair = pair;
4148 save->next = backtrack_queue;
4149 backtrack_queue = save;
4151 while (pair)
4153 mark_backtrack_feeds (pair->i2, 1);
4154 INSN_TICK (pair->i2) = INVALID_TICK;
4155 INSN_EXACT_TICK (pair->i2) = clock_var + pair_delay (pair);
4156 SHADOW_P (pair->i2) = pair->stages == 0;
4157 pair = pair->next_same_i1;
4161 /* Walk the ready list and all queues. If any insns have unresolved backwards
4162 dependencies, these must be cancelled deps, broken by predication. Set or
4163 clear (depending on SET) the DEP_CANCELLED bit in DEP_STATUS. */
4165 static void
4166 toggle_cancelled_flags (bool set)
4168 int i;
4169 sd_iterator_def sd_it;
4170 dep_t dep;
4172 if (ready.n_ready > 0)
4174 rtx *first = ready_lastpos (&ready);
4175 for (i = 0; i < ready.n_ready; i++)
4176 FOR_EACH_DEP (first[i], SD_LIST_BACK, sd_it, dep)
4177 if (!DEBUG_INSN_P (DEP_PRO (dep)))
4179 if (set)
4180 DEP_STATUS (dep) |= DEP_CANCELLED;
4181 else
4182 DEP_STATUS (dep) &= ~DEP_CANCELLED;
4185 for (i = 0; i <= max_insn_queue_index; i++)
4187 int q = NEXT_Q_AFTER (q_ptr, i);
4188 rtx link;
4189 for (link = insn_queue[q]; link; link = XEXP (link, 1))
4191 rtx insn = XEXP (link, 0);
4192 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
4193 if (!DEBUG_INSN_P (DEP_PRO (dep)))
4195 if (set)
4196 DEP_STATUS (dep) |= DEP_CANCELLED;
4197 else
4198 DEP_STATUS (dep) &= ~DEP_CANCELLED;
4204 /* Undo the replacements that have occurred after backtrack point SAVE
4205 was placed. */
4206 static void
4207 undo_replacements_for_backtrack (struct haifa_saved_data *save)
4209 while (!save->replacement_deps.is_empty ())
4211 dep_t dep = save->replacement_deps.pop ();
4212 int apply_p = save->replace_apply.pop ();
4214 if (apply_p)
4215 restore_pattern (dep, true);
4216 else
4217 apply_replacement (dep, true);
4219 save->replacement_deps.release ();
4220 save->replace_apply.release ();
4223 /* Pop entries from the SCHEDULED_INSNS vector up to and including INSN.
4224 Restore their dependencies to an unresolved state, and mark them as
4225 queued nowhere. */
4227 static void
4228 unschedule_insns_until (rtx insn)
4230 auto_vec<rtx> recompute_vec;
4232 /* Make two passes over the insns to be unscheduled. First, we clear out
4233 dependencies and other trivial bookkeeping. */
4234 for (;;)
4236 rtx last;
4237 sd_iterator_def sd_it;
4238 dep_t dep;
4240 last = scheduled_insns.pop ();
4242 /* This will be changed by restore_backtrack_point if the insn is in
4243 any queue. */
4244 QUEUE_INDEX (last) = QUEUE_NOWHERE;
4245 if (last != insn)
4246 INSN_TICK (last) = INVALID_TICK;
4248 if (modulo_ii > 0 && INSN_UID (last) < modulo_iter0_max_uid)
4249 modulo_insns_scheduled--;
4251 for (sd_it = sd_iterator_start (last, SD_LIST_RES_FORW);
4252 sd_iterator_cond (&sd_it, &dep);)
4254 rtx con = DEP_CON (dep);
4255 sd_unresolve_dep (sd_it);
4256 if (!MUST_RECOMPUTE_SPEC_P (con))
4258 MUST_RECOMPUTE_SPEC_P (con) = 1;
4259 recompute_vec.safe_push (con);
4263 if (last == insn)
4264 break;
4267 /* A second pass, to update ready and speculation status for insns
4268 depending on the unscheduled ones. The first pass must have
4269 popped the scheduled_insns vector up to the point where we
4270 restart scheduling, as recompute_todo_spec requires it to be
4271 up-to-date. */
4272 while (!recompute_vec.is_empty ())
4274 rtx con;
4276 con = recompute_vec.pop ();
4277 MUST_RECOMPUTE_SPEC_P (con) = 0;
4278 if (!sd_lists_empty_p (con, SD_LIST_HARD_BACK))
4280 TODO_SPEC (con) = HARD_DEP;
4281 INSN_TICK (con) = INVALID_TICK;
4282 if (PREDICATED_PAT (con) != NULL_RTX)
4283 haifa_change_pattern (con, ORIG_PAT (con));
4285 else if (QUEUE_INDEX (con) != QUEUE_SCHEDULED)
4286 TODO_SPEC (con) = recompute_todo_spec (con, true);
4290 /* Restore scheduler state from the topmost entry on the backtracking queue.
4291 PSCHED_BLOCK_P points to the local data of schedule_block that we must
4292 overwrite with the saved data.
4293 The caller must already have called unschedule_insns_until. */
4295 static void
4296 restore_last_backtrack_point (struct sched_block_state *psched_block)
4298 rtx link;
4299 int i;
4300 struct haifa_saved_data *save = backtrack_queue;
4302 backtrack_queue = save->next;
4304 if (current_sched_info->restore_state)
4305 (*current_sched_info->restore_state) (save->fe_saved_data);
4307 if (targetm.sched.alloc_sched_context)
4309 targetm.sched.set_sched_context (save->be_saved_data);
4310 targetm.sched.free_sched_context (save->be_saved_data);
4313 /* Do this first since it clobbers INSN_TICK of the involved
4314 instructions. */
4315 undo_replacements_for_backtrack (save);
4317 /* Clear the QUEUE_INDEX of everything in the ready list or one
4318 of the queues. */
4319 if (ready.n_ready > 0)
4321 rtx *first = ready_lastpos (&ready);
4322 for (i = 0; i < ready.n_ready; i++)
4324 rtx insn = first[i];
4325 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
4326 INSN_TICK (insn) = INVALID_TICK;
4329 for (i = 0; i <= max_insn_queue_index; i++)
4331 int q = NEXT_Q_AFTER (q_ptr, i);
4333 for (link = insn_queue[q]; link; link = XEXP (link, 1))
4335 rtx x = XEXP (link, 0);
4336 QUEUE_INDEX (x) = QUEUE_NOWHERE;
4337 INSN_TICK (x) = INVALID_TICK;
4339 free_INSN_LIST_list (&insn_queue[q]);
4342 free (ready.vec);
4343 ready = save->ready;
4345 if (ready.n_ready > 0)
4347 rtx *first = ready_lastpos (&ready);
4348 for (i = 0; i < ready.n_ready; i++)
4350 rtx insn = first[i];
4351 QUEUE_INDEX (insn) = QUEUE_READY;
4352 TODO_SPEC (insn) = recompute_todo_spec (insn, true);
4353 INSN_TICK (insn) = save->clock_var;
4357 q_ptr = 0;
4358 q_size = save->q_size;
4359 for (i = 0; i <= max_insn_queue_index; i++)
4361 int q = NEXT_Q_AFTER (q_ptr, i);
4363 insn_queue[q] = save->insn_queue[q];
4365 for (link = insn_queue[q]; link; link = XEXP (link, 1))
4367 rtx x = XEXP (link, 0);
4368 QUEUE_INDEX (x) = i;
4369 TODO_SPEC (x) = recompute_todo_spec (x, true);
4370 INSN_TICK (x) = save->clock_var + i;
4373 free (save->insn_queue);
4375 toggle_cancelled_flags (true);
4377 clock_var = save->clock_var;
4378 last_clock_var = save->last_clock_var;
4379 cycle_issued_insns = save->cycle_issued_insns;
4380 last_scheduled_insn = save->last_scheduled_insn;
4381 last_nondebug_scheduled_insn = save->last_nondebug_scheduled_insn;
4382 nonscheduled_insns_begin = save->nonscheduled_insns_begin;
4384 *psched_block = save->sched_block;
4386 memcpy (curr_state, save->curr_state, dfa_state_size);
4387 free (save->curr_state);
4389 mark_backtrack_feeds (save->delay_pair->i2, 0);
4391 gcc_assert (next_cycle_replace_deps.is_empty ());
4392 next_cycle_replace_deps = save->next_cycle_deps.copy ();
4393 next_cycle_apply = save->next_cycle_apply.copy ();
4395 free (save);
4397 for (save = backtrack_queue; save; save = save->next)
4399 mark_backtrack_feeds (save->delay_pair->i2, 1);
4403 /* Discard all data associated with the topmost entry in the backtrack
4404 queue. If RESET_TICK is false, we just want to free the data. If true,
4405 we are doing this because we discovered a reason to backtrack. In the
4406 latter case, also reset the INSN_TICK for the shadow insn. */
4407 static void
4408 free_topmost_backtrack_point (bool reset_tick)
4410 struct haifa_saved_data *save = backtrack_queue;
4411 int i;
4413 backtrack_queue = save->next;
4415 if (reset_tick)
4417 struct delay_pair *pair = save->delay_pair;
4418 while (pair)
4420 INSN_TICK (pair->i2) = INVALID_TICK;
4421 INSN_EXACT_TICK (pair->i2) = INVALID_TICK;
4422 pair = pair->next_same_i1;
4424 undo_replacements_for_backtrack (save);
4426 else
4428 save->replacement_deps.release ();
4429 save->replace_apply.release ();
4432 if (targetm.sched.free_sched_context)
4433 targetm.sched.free_sched_context (save->be_saved_data);
4434 if (current_sched_info->restore_state)
4435 free (save->fe_saved_data);
4436 for (i = 0; i <= max_insn_queue_index; i++)
4437 free_INSN_LIST_list (&save->insn_queue[i]);
4438 free (save->insn_queue);
4439 free (save->curr_state);
4440 free (save->ready.vec);
4441 free (save);
4444 /* Free the entire backtrack queue. */
4445 static void
4446 free_backtrack_queue (void)
4448 while (backtrack_queue)
4449 free_topmost_backtrack_point (false);
4452 /* Apply a replacement described by DESC. If IMMEDIATELY is false, we
4453 may have to postpone the replacement until the start of the next cycle,
4454 at which point we will be called again with IMMEDIATELY true. This is
4455 only done for machines which have instruction packets with explicit
4456 parallelism however. */
4457 static void
4458 apply_replacement (dep_t dep, bool immediately)
4460 struct dep_replacement *desc = DEP_REPLACE (dep);
4461 if (!immediately && targetm.sched.exposed_pipeline && reload_completed)
4463 next_cycle_replace_deps.safe_push (dep);
4464 next_cycle_apply.safe_push (1);
4466 else
4468 bool success;
4470 if (QUEUE_INDEX (desc->insn) == QUEUE_SCHEDULED)
4471 return;
4473 if (sched_verbose >= 5)
4474 fprintf (sched_dump, "applying replacement for insn %d\n",
4475 INSN_UID (desc->insn));
4477 success = validate_change (desc->insn, desc->loc, desc->newval, 0);
4478 gcc_assert (success);
4480 update_insn_after_change (desc->insn);
4481 if ((TODO_SPEC (desc->insn) & (HARD_DEP | DEP_POSTPONED)) == 0)
4482 fix_tick_ready (desc->insn);
4484 if (backtrack_queue != NULL)
4486 backtrack_queue->replacement_deps.safe_push (dep);
4487 backtrack_queue->replace_apply.safe_push (1);
4492 /* We have determined that a pattern involved in DEP must be restored.
4493 If IMMEDIATELY is false, we may have to postpone the replacement
4494 until the start of the next cycle, at which point we will be called
4495 again with IMMEDIATELY true. */
4496 static void
4497 restore_pattern (dep_t dep, bool immediately)
4499 rtx next = DEP_CON (dep);
4500 int tick = INSN_TICK (next);
4502 /* If we already scheduled the insn, the modified version is
4503 correct. */
4504 if (QUEUE_INDEX (next) == QUEUE_SCHEDULED)
4505 return;
4507 if (!immediately && targetm.sched.exposed_pipeline && reload_completed)
4509 next_cycle_replace_deps.safe_push (dep);
4510 next_cycle_apply.safe_push (0);
4511 return;
4515 if (DEP_TYPE (dep) == REG_DEP_CONTROL)
4517 if (sched_verbose >= 5)
4518 fprintf (sched_dump, "restoring pattern for insn %d\n",
4519 INSN_UID (next));
4520 haifa_change_pattern (next, ORIG_PAT (next));
4522 else
4524 struct dep_replacement *desc = DEP_REPLACE (dep);
4525 bool success;
4527 if (sched_verbose >= 5)
4528 fprintf (sched_dump, "restoring pattern for insn %d\n",
4529 INSN_UID (desc->insn));
4530 tick = INSN_TICK (desc->insn);
4532 success = validate_change (desc->insn, desc->loc, desc->orig, 0);
4533 gcc_assert (success);
4534 update_insn_after_change (desc->insn);
4535 if (backtrack_queue != NULL)
4537 backtrack_queue->replacement_deps.safe_push (dep);
4538 backtrack_queue->replace_apply.safe_push (0);
4541 INSN_TICK (next) = tick;
4542 if (TODO_SPEC (next) == DEP_POSTPONED)
4543 return;
4545 if (sd_lists_empty_p (next, SD_LIST_BACK))
4546 TODO_SPEC (next) = 0;
4547 else if (!sd_lists_empty_p (next, SD_LIST_HARD_BACK))
4548 TODO_SPEC (next) = HARD_DEP;
4551 /* Perform pattern replacements that were queued up until the next
4552 cycle. */
4553 static void
4554 perform_replacements_new_cycle (void)
4556 int i;
4557 dep_t dep;
4558 FOR_EACH_VEC_ELT (next_cycle_replace_deps, i, dep)
4560 int apply_p = next_cycle_apply[i];
4561 if (apply_p)
4562 apply_replacement (dep, true);
4563 else
4564 restore_pattern (dep, true);
4566 next_cycle_replace_deps.truncate (0);
4567 next_cycle_apply.truncate (0);
4570 /* Compute INSN_TICK_ESTIMATE for INSN. PROCESSED is a bitmap of
4571 instructions we've previously encountered, a set bit prevents
4572 recursion. BUDGET is a limit on how far ahead we look, it is
4573 reduced on recursive calls. Return true if we produced a good
4574 estimate, or false if we exceeded the budget. */
4575 static bool
4576 estimate_insn_tick (bitmap processed, rtx insn, int budget)
4578 sd_iterator_def sd_it;
4579 dep_t dep;
4580 int earliest = INSN_TICK (insn);
4582 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
4584 rtx pro = DEP_PRO (dep);
4585 int t;
4587 if (DEP_STATUS (dep) & DEP_CANCELLED)
4588 continue;
4590 if (QUEUE_INDEX (pro) == QUEUE_SCHEDULED)
4591 gcc_assert (INSN_TICK (pro) + dep_cost (dep) <= INSN_TICK (insn));
4592 else
4594 int cost = dep_cost (dep);
4595 if (cost >= budget)
4596 return false;
4597 if (!bitmap_bit_p (processed, INSN_LUID (pro)))
4599 if (!estimate_insn_tick (processed, pro, budget - cost))
4600 return false;
4602 gcc_assert (INSN_TICK_ESTIMATE (pro) != INVALID_TICK);
4603 t = INSN_TICK_ESTIMATE (pro) + cost;
4604 if (earliest == INVALID_TICK || t > earliest)
4605 earliest = t;
4608 bitmap_set_bit (processed, INSN_LUID (insn));
4609 INSN_TICK_ESTIMATE (insn) = earliest;
4610 return true;
4613 /* Examine the pair of insns in P, and estimate (optimistically, assuming
4614 infinite resources) the cycle in which the delayed shadow can be issued.
4615 Return the number of cycles that must pass before the real insn can be
4616 issued in order to meet this constraint. */
4617 static int
4618 estimate_shadow_tick (struct delay_pair *p)
4620 bitmap_head processed;
4621 int t;
4622 bool cutoff;
4623 bitmap_initialize (&processed, 0);
4625 cutoff = !estimate_insn_tick (&processed, p->i2,
4626 max_insn_queue_index + pair_delay (p));
4627 bitmap_clear (&processed);
4628 if (cutoff)
4629 return max_insn_queue_index;
4630 t = INSN_TICK_ESTIMATE (p->i2) - (clock_var + pair_delay (p) + 1);
4631 if (t > 0)
4632 return t;
4633 return 0;
4636 /* If INSN has no unresolved backwards dependencies, add it to the schedule and
4637 recursively resolve all its forward dependencies. */
4638 static void
4639 resolve_dependencies (rtx insn)
4641 sd_iterator_def sd_it;
4642 dep_t dep;
4644 /* Don't use sd_lists_empty_p; it ignores debug insns. */
4645 if (DEPS_LIST_FIRST (INSN_HARD_BACK_DEPS (insn)) != NULL
4646 || DEPS_LIST_FIRST (INSN_SPEC_BACK_DEPS (insn)) != NULL)
4647 return;
4649 if (sched_verbose >= 4)
4650 fprintf (sched_dump, ";;\tquickly resolving %d\n", INSN_UID (insn));
4652 if (QUEUE_INDEX (insn) >= 0)
4653 queue_remove (insn);
4655 scheduled_insns.safe_push (insn);
4657 /* Update dependent instructions. */
4658 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
4659 sd_iterator_cond (&sd_it, &dep);)
4661 rtx next = DEP_CON (dep);
4663 if (sched_verbose >= 4)
4664 fprintf (sched_dump, ";;\t\tdep %d against %d\n", INSN_UID (insn),
4665 INSN_UID (next));
4667 /* Resolve the dependence between INSN and NEXT.
4668 sd_resolve_dep () moves current dep to another list thus
4669 advancing the iterator. */
4670 sd_resolve_dep (sd_it);
4672 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
4674 resolve_dependencies (next);
4676 else
4677 /* Check always has only one forward dependence (to the first insn in
4678 the recovery block), therefore, this will be executed only once. */
4680 gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
4686 /* Return the head and tail pointers of ebb starting at BEG and ending
4687 at END. */
4688 void
4689 get_ebb_head_tail (basic_block beg, basic_block end, rtx *headp, rtx *tailp)
4691 rtx beg_head = BB_HEAD (beg);
4692 rtx beg_tail = BB_END (beg);
4693 rtx end_head = BB_HEAD (end);
4694 rtx end_tail = BB_END (end);
4696 /* Don't include any notes or labels at the beginning of the BEG
4697 basic block, or notes at the end of the END basic blocks. */
4699 if (LABEL_P (beg_head))
4700 beg_head = NEXT_INSN (beg_head);
4702 while (beg_head != beg_tail)
4703 if (NOTE_P (beg_head))
4704 beg_head = NEXT_INSN (beg_head);
4705 else if (DEBUG_INSN_P (beg_head))
4707 rtx note, next;
4709 for (note = NEXT_INSN (beg_head);
4710 note != beg_tail;
4711 note = next)
4713 next = NEXT_INSN (note);
4714 if (NOTE_P (note))
4716 if (sched_verbose >= 9)
4717 fprintf (sched_dump, "reorder %i\n", INSN_UID (note));
4719 reorder_insns_nobb (note, note, PREV_INSN (beg_head));
4721 if (BLOCK_FOR_INSN (note) != beg)
4722 df_insn_change_bb (note, beg);
4724 else if (!DEBUG_INSN_P (note))
4725 break;
4728 break;
4730 else
4731 break;
4733 *headp = beg_head;
4735 if (beg == end)
4736 end_head = beg_head;
4737 else if (LABEL_P (end_head))
4738 end_head = NEXT_INSN (end_head);
4740 while (end_head != end_tail)
4741 if (NOTE_P (end_tail))
4742 end_tail = PREV_INSN (end_tail);
4743 else if (DEBUG_INSN_P (end_tail))
4745 rtx note, prev;
4747 for (note = PREV_INSN (end_tail);
4748 note != end_head;
4749 note = prev)
4751 prev = PREV_INSN (note);
4752 if (NOTE_P (note))
4754 if (sched_verbose >= 9)
4755 fprintf (sched_dump, "reorder %i\n", INSN_UID (note));
4757 reorder_insns_nobb (note, note, end_tail);
4759 if (end_tail == BB_END (end))
4760 BB_END (end) = note;
4762 if (BLOCK_FOR_INSN (note) != end)
4763 df_insn_change_bb (note, end);
4765 else if (!DEBUG_INSN_P (note))
4766 break;
4769 break;
4771 else
4772 break;
4774 *tailp = end_tail;
4777 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
4780 no_real_insns_p (const_rtx head, const_rtx tail)
4782 while (head != NEXT_INSN (tail))
4784 if (!NOTE_P (head) && !LABEL_P (head))
4785 return 0;
4786 head = NEXT_INSN (head);
4788 return 1;
4791 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
4792 previously found among the insns. Insert them just before HEAD. */
4794 restore_other_notes (rtx head, basic_block head_bb)
4796 if (note_list != 0)
4798 rtx note_head = note_list;
4800 if (head)
4801 head_bb = BLOCK_FOR_INSN (head);
4802 else
4803 head = NEXT_INSN (bb_note (head_bb));
4805 while (PREV_INSN (note_head))
4807 set_block_for_insn (note_head, head_bb);
4808 note_head = PREV_INSN (note_head);
4810 /* In the above cycle we've missed this note. */
4811 set_block_for_insn (note_head, head_bb);
4813 PREV_INSN (note_head) = PREV_INSN (head);
4814 NEXT_INSN (PREV_INSN (head)) = note_head;
4815 PREV_INSN (head) = note_list;
4816 NEXT_INSN (note_list) = head;
4818 if (BLOCK_FOR_INSN (head) != head_bb)
4819 BB_END (head_bb) = note_list;
4821 head = note_head;
4824 return head;
4827 /* When we know we are going to discard the schedule due to a failed attempt
4828 at modulo scheduling, undo all replacements. */
4829 static void
4830 undo_all_replacements (void)
4832 rtx insn;
4833 int i;
4835 FOR_EACH_VEC_ELT (scheduled_insns, i, insn)
4837 sd_iterator_def sd_it;
4838 dep_t dep;
4840 /* See if we must undo a replacement. */
4841 for (sd_it = sd_iterator_start (insn, SD_LIST_RES_FORW);
4842 sd_iterator_cond (&sd_it, &dep); sd_iterator_next (&sd_it))
4844 struct dep_replacement *desc = DEP_REPLACE (dep);
4845 if (desc != NULL)
4846 validate_change (desc->insn, desc->loc, desc->orig, 0);
4851 /* Return first non-scheduled insn in the current scheduling block.
4852 This is mostly used for debug-counter purposes. */
4853 static rtx
4854 first_nonscheduled_insn (void)
4856 rtx insn = (nonscheduled_insns_begin != NULL_RTX
4857 ? nonscheduled_insns_begin
4858 : current_sched_info->prev_head);
4862 insn = next_nonnote_nondebug_insn (insn);
4864 while (QUEUE_INDEX (insn) == QUEUE_SCHEDULED);
4866 return insn;
4869 /* Move insns that became ready to fire from queue to ready list. */
4871 static void
4872 queue_to_ready (struct ready_list *ready)
4874 rtx insn;
4875 rtx link;
4876 rtx skip_insn;
4878 q_ptr = NEXT_Q (q_ptr);
4880 if (dbg_cnt (sched_insn) == false)
4881 /* If debug counter is activated do not requeue the first
4882 nonscheduled insn. */
4883 skip_insn = first_nonscheduled_insn ();
4884 else
4885 skip_insn = NULL_RTX;
4887 /* Add all pending insns that can be scheduled without stalls to the
4888 ready list. */
4889 for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1))
4891 insn = XEXP (link, 0);
4892 q_size -= 1;
4894 if (sched_verbose >= 2)
4895 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
4896 (*current_sched_info->print_insn) (insn, 0));
4898 /* If the ready list is full, delay the insn for 1 cycle.
4899 See the comment in schedule_block for the rationale. */
4900 if (!reload_completed
4901 && (ready->n_ready - ready->n_debug > MAX_SCHED_READY_INSNS
4902 || (sched_pressure == SCHED_PRESSURE_MODEL
4903 /* Limit pressure recalculations to MAX_SCHED_READY_INSNS
4904 instructions too. */
4905 && model_index (insn) > (model_curr_point
4906 + MAX_SCHED_READY_INSNS)))
4907 && !(sched_pressure == SCHED_PRESSURE_MODEL
4908 && model_curr_point < model_num_insns
4909 /* Always allow the next model instruction to issue. */
4910 && model_index (insn) == model_curr_point)
4911 && !SCHED_GROUP_P (insn)
4912 && insn != skip_insn)
4914 if (sched_verbose >= 2)
4915 fprintf (sched_dump, "keeping in queue, ready full\n");
4916 queue_insn (insn, 1, "ready full");
4918 else
4920 ready_add (ready, insn, false);
4921 if (sched_verbose >= 2)
4922 fprintf (sched_dump, "moving to ready without stalls\n");
4925 free_INSN_LIST_list (&insn_queue[q_ptr]);
4927 /* If there are no ready insns, stall until one is ready and add all
4928 of the pending insns at that point to the ready list. */
4929 if (ready->n_ready == 0)
4931 int stalls;
4933 for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
4935 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
4937 for (; link; link = XEXP (link, 1))
4939 insn = XEXP (link, 0);
4940 q_size -= 1;
4942 if (sched_verbose >= 2)
4943 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
4944 (*current_sched_info->print_insn) (insn, 0));
4946 ready_add (ready, insn, false);
4947 if (sched_verbose >= 2)
4948 fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
4950 free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]);
4952 advance_one_cycle ();
4954 break;
4957 advance_one_cycle ();
4960 q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
4961 clock_var += stalls;
4962 if (sched_verbose >= 2)
4963 fprintf (sched_dump, ";;\tAdvancing clock by %d cycle[s] to %d\n",
4964 stalls, clock_var);
4968 /* Used by early_queue_to_ready. Determines whether it is "ok" to
4969 prematurely move INSN from the queue to the ready list. Currently,
4970 if a target defines the hook 'is_costly_dependence', this function
4971 uses the hook to check whether there exist any dependences which are
4972 considered costly by the target, between INSN and other insns that
4973 have already been scheduled. Dependences are checked up to Y cycles
4974 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
4975 controlling this value.
4976 (Other considerations could be taken into account instead (or in
4977 addition) depending on user flags and target hooks. */
4979 static bool
4980 ok_for_early_queue_removal (rtx insn)
4982 if (targetm.sched.is_costly_dependence)
4984 rtx prev_insn;
4985 int n_cycles;
4986 int i = scheduled_insns.length ();
4987 for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
4989 while (i-- > 0)
4991 int cost;
4993 prev_insn = scheduled_insns[i];
4995 if (!NOTE_P (prev_insn))
4997 dep_t dep;
4999 dep = sd_find_dep_between (prev_insn, insn, true);
5001 if (dep != NULL)
5003 cost = dep_cost (dep);
5005 if (targetm.sched.is_costly_dependence (dep, cost,
5006 flag_sched_stalled_insns_dep - n_cycles))
5007 return false;
5011 if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
5012 break;
5015 if (i == 0)
5016 break;
5020 return true;
5024 /* Remove insns from the queue, before they become "ready" with respect
5025 to FU latency considerations. */
5027 static int
5028 early_queue_to_ready (state_t state, struct ready_list *ready)
5030 rtx insn;
5031 rtx link;
5032 rtx next_link;
5033 rtx prev_link;
5034 bool move_to_ready;
5035 int cost;
5036 state_t temp_state = alloca (dfa_state_size);
5037 int stalls;
5038 int insns_removed = 0;
5041 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
5042 function:
5044 X == 0: There is no limit on how many queued insns can be removed
5045 prematurely. (flag_sched_stalled_insns = -1).
5047 X >= 1: Only X queued insns can be removed prematurely in each
5048 invocation. (flag_sched_stalled_insns = X).
5050 Otherwise: Early queue removal is disabled.
5051 (flag_sched_stalled_insns = 0)
5054 if (! flag_sched_stalled_insns)
5055 return 0;
5057 for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
5059 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
5061 if (sched_verbose > 6)
5062 fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
5064 prev_link = 0;
5065 while (link)
5067 next_link = XEXP (link, 1);
5068 insn = XEXP (link, 0);
5069 if (insn && sched_verbose > 6)
5070 print_rtl_single (sched_dump, insn);
5072 memcpy (temp_state, state, dfa_state_size);
5073 if (recog_memoized (insn) < 0)
5074 /* non-negative to indicate that it's not ready
5075 to avoid infinite Q->R->Q->R... */
5076 cost = 0;
5077 else
5078 cost = state_transition (temp_state, insn);
5080 if (sched_verbose >= 6)
5081 fprintf (sched_dump, "transition cost = %d\n", cost);
5083 move_to_ready = false;
5084 if (cost < 0)
5086 move_to_ready = ok_for_early_queue_removal (insn);
5087 if (move_to_ready == true)
5089 /* move from Q to R */
5090 q_size -= 1;
5091 ready_add (ready, insn, false);
5093 if (prev_link)
5094 XEXP (prev_link, 1) = next_link;
5095 else
5096 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
5098 free_INSN_LIST_node (link);
5100 if (sched_verbose >= 2)
5101 fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
5102 (*current_sched_info->print_insn) (insn, 0));
5104 insns_removed++;
5105 if (insns_removed == flag_sched_stalled_insns)
5106 /* Remove no more than flag_sched_stalled_insns insns
5107 from Q at a time. */
5108 return insns_removed;
5112 if (move_to_ready == false)
5113 prev_link = link;
5115 link = next_link;
5116 } /* while link */
5117 } /* if link */
5119 } /* for stalls.. */
5121 return insns_removed;
5125 /* Print the ready list for debugging purposes.
5126 If READY_TRY is non-zero then only print insns that max_issue
5127 will consider. */
5128 static void
5129 debug_ready_list_1 (struct ready_list *ready, signed char *ready_try)
5131 rtx *p;
5132 int i;
5134 if (ready->n_ready == 0)
5136 fprintf (sched_dump, "\n");
5137 return;
5140 p = ready_lastpos (ready);
5141 for (i = 0; i < ready->n_ready; i++)
5143 if (ready_try != NULL && ready_try[ready->n_ready - i - 1])
5144 continue;
5146 fprintf (sched_dump, " %s:%d",
5147 (*current_sched_info->print_insn) (p[i], 0),
5148 INSN_LUID (p[i]));
5149 if (sched_pressure != SCHED_PRESSURE_NONE)
5150 fprintf (sched_dump, "(cost=%d",
5151 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (p[i]));
5152 fprintf (sched_dump, ":prio=%d", INSN_PRIORITY (p[i]));
5153 if (INSN_TICK (p[i]) > clock_var)
5154 fprintf (sched_dump, ":delay=%d", INSN_TICK (p[i]) - clock_var);
5155 if (sched_pressure != SCHED_PRESSURE_NONE)
5156 fprintf (sched_dump, ")");
5158 fprintf (sched_dump, "\n");
5161 /* Print the ready list. Callable from debugger. */
5162 static void
5163 debug_ready_list (struct ready_list *ready)
5165 debug_ready_list_1 (ready, NULL);
5168 /* Search INSN for REG_SAVE_NOTE notes and convert them back into insn
5169 NOTEs. This is used for NOTE_INSN_EPILOGUE_BEG, so that sched-ebb
5170 replaces the epilogue note in the correct basic block. */
5171 void
5172 reemit_notes (rtx insn)
5174 rtx note, last = insn;
5176 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
5178 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
5180 enum insn_note note_type = (enum insn_note) INTVAL (XEXP (note, 0));
5182 last = emit_note_before (note_type, last);
5183 remove_note (insn, note);
5188 /* Move INSN. Reemit notes if needed. Update CFG, if needed. */
5189 static void
5190 move_insn (rtx insn, rtx last, rtx nt)
5192 if (PREV_INSN (insn) != last)
5194 basic_block bb;
5195 rtx note;
5196 int jump_p = 0;
5198 bb = BLOCK_FOR_INSN (insn);
5200 /* BB_HEAD is either LABEL or NOTE. */
5201 gcc_assert (BB_HEAD (bb) != insn);
5203 if (BB_END (bb) == insn)
5204 /* If this is last instruction in BB, move end marker one
5205 instruction up. */
5207 /* Jumps are always placed at the end of basic block. */
5208 jump_p = control_flow_insn_p (insn);
5210 gcc_assert (!jump_p
5211 || ((common_sched_info->sched_pass_id == SCHED_RGN_PASS)
5212 && IS_SPECULATION_BRANCHY_CHECK_P (insn))
5213 || (common_sched_info->sched_pass_id
5214 == SCHED_EBB_PASS));
5216 gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
5218 BB_END (bb) = PREV_INSN (insn);
5221 gcc_assert (BB_END (bb) != last);
5223 if (jump_p)
5224 /* We move the block note along with jump. */
5226 gcc_assert (nt);
5228 note = NEXT_INSN (insn);
5229 while (NOTE_NOT_BB_P (note) && note != nt)
5230 note = NEXT_INSN (note);
5232 if (note != nt
5233 && (LABEL_P (note)
5234 || BARRIER_P (note)))
5235 note = NEXT_INSN (note);
5237 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
5239 else
5240 note = insn;
5242 NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
5243 PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
5245 NEXT_INSN (note) = NEXT_INSN (last);
5246 PREV_INSN (NEXT_INSN (last)) = note;
5248 NEXT_INSN (last) = insn;
5249 PREV_INSN (insn) = last;
5251 bb = BLOCK_FOR_INSN (last);
5253 if (jump_p)
5255 fix_jump_move (insn);
5257 if (BLOCK_FOR_INSN (insn) != bb)
5258 move_block_after_check (insn);
5260 gcc_assert (BB_END (bb) == last);
5263 df_insn_change_bb (insn, bb);
5265 /* Update BB_END, if needed. */
5266 if (BB_END (bb) == last)
5267 BB_END (bb) = insn;
5270 SCHED_GROUP_P (insn) = 0;
5273 /* Return true if scheduling INSN will finish current clock cycle. */
5274 static bool
5275 insn_finishes_cycle_p (rtx insn)
5277 if (SCHED_GROUP_P (insn))
5278 /* After issuing INSN, rest of the sched_group will be forced to issue
5279 in order. Don't make any plans for the rest of cycle. */
5280 return true;
5282 /* Finishing the block will, apparently, finish the cycle. */
5283 if (current_sched_info->insn_finishes_block_p
5284 && current_sched_info->insn_finishes_block_p (insn))
5285 return true;
5287 return false;
5290 /* Define type for target data used in multipass scheduling. */
5291 #ifndef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T
5292 # define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T int
5293 #endif
5294 typedef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T first_cycle_multipass_data_t;
5296 /* The following structure describe an entry of the stack of choices. */
5297 struct choice_entry
5299 /* Ordinal number of the issued insn in the ready queue. */
5300 int index;
5301 /* The number of the rest insns whose issues we should try. */
5302 int rest;
5303 /* The number of issued essential insns. */
5304 int n;
5305 /* State after issuing the insn. */
5306 state_t state;
5307 /* Target-specific data. */
5308 first_cycle_multipass_data_t target_data;
5311 /* The following array is used to implement a stack of choices used in
5312 function max_issue. */
5313 static struct choice_entry *choice_stack;
5315 /* This holds the value of the target dfa_lookahead hook. */
5316 int dfa_lookahead;
5318 /* The following variable value is maximal number of tries of issuing
5319 insns for the first cycle multipass insn scheduling. We define
5320 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
5321 need this constraint if all real insns (with non-negative codes)
5322 had reservations because in this case the algorithm complexity is
5323 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
5324 might be incomplete and such insn might occur. For such
5325 descriptions, the complexity of algorithm (without the constraint)
5326 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
5327 static int max_lookahead_tries;
5329 /* The following value is value of hook
5330 `first_cycle_multipass_dfa_lookahead' at the last call of
5331 `max_issue'. */
5332 static int cached_first_cycle_multipass_dfa_lookahead = 0;
5334 /* The following value is value of `issue_rate' at the last call of
5335 `sched_init'. */
5336 static int cached_issue_rate = 0;
5338 /* The following function returns maximal (or close to maximal) number
5339 of insns which can be issued on the same cycle and one of which
5340 insns is insns with the best rank (the first insn in READY). To
5341 make this function tries different samples of ready insns. READY
5342 is current queue `ready'. Global array READY_TRY reflects what
5343 insns are already issued in this try. The function stops immediately,
5344 if it reached the such a solution, that all instruction can be issued.
5345 INDEX will contain index of the best insn in READY. The following
5346 function is used only for first cycle multipass scheduling.
5348 PRIVILEGED_N >= 0
5350 This function expects recognized insns only. All USEs,
5351 CLOBBERs, etc must be filtered elsewhere. */
5353 max_issue (struct ready_list *ready, int privileged_n, state_t state,
5354 bool first_cycle_insn_p, int *index)
5356 int n, i, all, n_ready, best, delay, tries_num;
5357 int more_issue;
5358 struct choice_entry *top;
5359 rtx insn;
5361 n_ready = ready->n_ready;
5362 gcc_assert (dfa_lookahead >= 1 && privileged_n >= 0
5363 && privileged_n <= n_ready);
5365 /* Init MAX_LOOKAHEAD_TRIES. */
5366 if (cached_first_cycle_multipass_dfa_lookahead != dfa_lookahead)
5368 cached_first_cycle_multipass_dfa_lookahead = dfa_lookahead;
5369 max_lookahead_tries = 100;
5370 for (i = 0; i < issue_rate; i++)
5371 max_lookahead_tries *= dfa_lookahead;
5374 /* Init max_points. */
5375 more_issue = issue_rate - cycle_issued_insns;
5376 gcc_assert (more_issue >= 0);
5378 /* The number of the issued insns in the best solution. */
5379 best = 0;
5381 top = choice_stack;
5383 /* Set initial state of the search. */
5384 memcpy (top->state, state, dfa_state_size);
5385 top->rest = dfa_lookahead;
5386 top->n = 0;
5387 if (targetm.sched.first_cycle_multipass_begin)
5388 targetm.sched.first_cycle_multipass_begin (&top->target_data,
5389 ready_try, n_ready,
5390 first_cycle_insn_p);
5392 /* Count the number of the insns to search among. */
5393 for (all = i = 0; i < n_ready; i++)
5394 if (!ready_try [i])
5395 all++;
5397 if (sched_verbose >= 2)
5399 fprintf (sched_dump, ";;\t\tmax_issue among %d insns:", all);
5400 debug_ready_list_1 (ready, ready_try);
5403 /* I is the index of the insn to try next. */
5404 i = 0;
5405 tries_num = 0;
5406 for (;;)
5408 if (/* If we've reached a dead end or searched enough of what we have
5409 been asked... */
5410 top->rest == 0
5411 /* or have nothing else to try... */
5412 || i >= n_ready
5413 /* or should not issue more. */
5414 || top->n >= more_issue)
5416 /* ??? (... || i == n_ready). */
5417 gcc_assert (i <= n_ready);
5419 /* We should not issue more than issue_rate instructions. */
5420 gcc_assert (top->n <= more_issue);
5422 if (top == choice_stack)
5423 break;
5425 if (best < top - choice_stack)
5427 if (privileged_n)
5429 n = privileged_n;
5430 /* Try to find issued privileged insn. */
5431 while (n && !ready_try[--n])
5435 if (/* If all insns are equally good... */
5436 privileged_n == 0
5437 /* Or a privileged insn will be issued. */
5438 || ready_try[n])
5439 /* Then we have a solution. */
5441 best = top - choice_stack;
5442 /* This is the index of the insn issued first in this
5443 solution. */
5444 *index = choice_stack [1].index;
5445 if (top->n == more_issue || best == all)
5446 break;
5450 /* Set ready-list index to point to the last insn
5451 ('i++' below will advance it to the next insn). */
5452 i = top->index;
5454 /* Backtrack. */
5455 ready_try [i] = 0;
5457 if (targetm.sched.first_cycle_multipass_backtrack)
5458 targetm.sched.first_cycle_multipass_backtrack (&top->target_data,
5459 ready_try, n_ready);
5461 top--;
5462 memcpy (state, top->state, dfa_state_size);
5464 else if (!ready_try [i])
5466 tries_num++;
5467 if (tries_num > max_lookahead_tries)
5468 break;
5469 insn = ready_element (ready, i);
5470 delay = state_transition (state, insn);
5471 if (delay < 0)
5473 if (state_dead_lock_p (state)
5474 || insn_finishes_cycle_p (insn))
5475 /* We won't issue any more instructions in the next
5476 choice_state. */
5477 top->rest = 0;
5478 else
5479 top->rest--;
5481 n = top->n;
5482 if (memcmp (top->state, state, dfa_state_size) != 0)
5483 n++;
5485 /* Advance to the next choice_entry. */
5486 top++;
5487 /* Initialize it. */
5488 top->rest = dfa_lookahead;
5489 top->index = i;
5490 top->n = n;
5491 memcpy (top->state, state, dfa_state_size);
5492 ready_try [i] = 1;
5494 if (targetm.sched.first_cycle_multipass_issue)
5495 targetm.sched.first_cycle_multipass_issue (&top->target_data,
5496 ready_try, n_ready,
5497 insn,
5498 &((top - 1)
5499 ->target_data));
5501 i = -1;
5505 /* Increase ready-list index. */
5506 i++;
5509 if (targetm.sched.first_cycle_multipass_end)
5510 targetm.sched.first_cycle_multipass_end (best != 0
5511 ? &choice_stack[1].target_data
5512 : NULL);
5514 /* Restore the original state of the DFA. */
5515 memcpy (state, choice_stack->state, dfa_state_size);
5517 return best;
5520 /* The following function chooses insn from READY and modifies
5521 READY. The following function is used only for first
5522 cycle multipass scheduling.
5523 Return:
5524 -1 if cycle should be advanced,
5525 0 if INSN_PTR is set to point to the desirable insn,
5526 1 if choose_ready () should be restarted without advancing the cycle. */
5527 static int
5528 choose_ready (struct ready_list *ready, bool first_cycle_insn_p,
5529 rtx *insn_ptr)
5531 int lookahead;
5533 if (dbg_cnt (sched_insn) == false)
5535 if (nonscheduled_insns_begin == NULL_RTX)
5536 nonscheduled_insns_begin = current_sched_info->prev_head;
5538 rtx insn = first_nonscheduled_insn ();
5540 if (QUEUE_INDEX (insn) == QUEUE_READY)
5541 /* INSN is in the ready_list. */
5543 ready_remove_insn (insn);
5544 *insn_ptr = insn;
5545 return 0;
5548 /* INSN is in the queue. Advance cycle to move it to the ready list. */
5549 gcc_assert (QUEUE_INDEX (insn) >= 0);
5550 return -1;
5553 lookahead = 0;
5555 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
5556 lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
5557 if (lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0))
5558 || DEBUG_INSN_P (ready_element (ready, 0)))
5560 if (targetm.sched.dispatch (NULL_RTX, IS_DISPATCH_ON))
5561 *insn_ptr = ready_remove_first_dispatch (ready);
5562 else
5563 *insn_ptr = ready_remove_first (ready);
5565 return 0;
5567 else
5569 /* Try to choose the best insn. */
5570 int index = 0, i;
5571 rtx insn;
5573 insn = ready_element (ready, 0);
5574 if (INSN_CODE (insn) < 0)
5576 *insn_ptr = ready_remove_first (ready);
5577 return 0;
5580 /* Filter the search space. */
5581 for (i = 0; i < ready->n_ready; i++)
5583 ready_try[i] = 0;
5585 insn = ready_element (ready, i);
5587 /* If this insn is recognizable we should have already
5588 recognized it earlier.
5589 ??? Not very clear where this is supposed to be done.
5590 See dep_cost_1. */
5591 gcc_checking_assert (INSN_CODE (insn) >= 0
5592 || recog_memoized (insn) < 0);
5593 if (INSN_CODE (insn) < 0)
5595 /* Non-recognized insns at position 0 are handled above. */
5596 gcc_assert (i > 0);
5597 ready_try[i] = 1;
5598 continue;
5601 if (targetm.sched.first_cycle_multipass_dfa_lookahead_guard)
5603 ready_try[i]
5604 = (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
5605 (insn, i));
5607 if (ready_try[i] < 0)
5608 /* Queue instruction for several cycles.
5609 We need to restart choose_ready as we have changed
5610 the ready list. */
5612 change_queue_index (insn, -ready_try[i]);
5613 return 1;
5616 /* Make sure that we didn't end up with 0'th insn filtered out.
5617 Don't be tempted to make life easier for backends and just
5618 requeue 0'th insn if (ready_try[0] == 0) and restart
5619 choose_ready. Backends should be very considerate about
5620 requeueing instructions -- especially the highest priority
5621 one at position 0. */
5622 gcc_assert (ready_try[i] == 0 || i > 0);
5623 if (ready_try[i])
5624 continue;
5627 gcc_assert (ready_try[i] == 0);
5628 /* INSN made it through the scrutiny of filters! */
5631 if (max_issue (ready, 1, curr_state, first_cycle_insn_p, &index) == 0)
5633 *insn_ptr = ready_remove_first (ready);
5634 if (sched_verbose >= 4)
5635 fprintf (sched_dump, ";;\t\tChosen insn (but can't issue) : %s \n",
5636 (*current_sched_info->print_insn) (*insn_ptr, 0));
5637 return 0;
5639 else
5641 if (sched_verbose >= 4)
5642 fprintf (sched_dump, ";;\t\tChosen insn : %s\n",
5643 (*current_sched_info->print_insn)
5644 (ready_element (ready, index), 0));
5646 *insn_ptr = ready_remove (ready, index);
5647 return 0;
5652 /* This function is called when we have successfully scheduled a
5653 block. It uses the schedule stored in the scheduled_insns vector
5654 to rearrange the RTL. PREV_HEAD is used as the anchor to which we
5655 append the scheduled insns; TAIL is the insn after the scheduled
5656 block. TARGET_BB is the argument passed to schedule_block. */
5658 static void
5659 commit_schedule (rtx prev_head, rtx tail, basic_block *target_bb)
5661 unsigned int i;
5662 rtx insn;
5664 last_scheduled_insn = prev_head;
5665 for (i = 0;
5666 scheduled_insns.iterate (i, &insn);
5667 i++)
5669 if (control_flow_insn_p (last_scheduled_insn)
5670 || current_sched_info->advance_target_bb (*target_bb, insn))
5672 *target_bb = current_sched_info->advance_target_bb (*target_bb, 0);
5674 if (sched_verbose)
5676 rtx x;
5678 x = next_real_insn (last_scheduled_insn);
5679 gcc_assert (x);
5680 dump_new_block_header (1, *target_bb, x, tail);
5683 last_scheduled_insn = bb_note (*target_bb);
5686 if (current_sched_info->begin_move_insn)
5687 (*current_sched_info->begin_move_insn) (insn, last_scheduled_insn);
5688 move_insn (insn, last_scheduled_insn,
5689 current_sched_info->next_tail);
5690 if (!DEBUG_INSN_P (insn))
5691 reemit_notes (insn);
5692 last_scheduled_insn = insn;
5695 scheduled_insns.truncate (0);
5698 /* Examine all insns on the ready list and queue those which can't be
5699 issued in this cycle. TEMP_STATE is temporary scheduler state we
5700 can use as scratch space. If FIRST_CYCLE_INSN_P is true, no insns
5701 have been issued for the current cycle, which means it is valid to
5702 issue an asm statement.
5704 If SHADOWS_ONLY_P is true, we eliminate all real insns and only
5705 leave those for which SHADOW_P is true. If MODULO_EPILOGUE is true,
5706 we only leave insns which have an INSN_EXACT_TICK. */
5708 static void
5709 prune_ready_list (state_t temp_state, bool first_cycle_insn_p,
5710 bool shadows_only_p, bool modulo_epilogue_p)
5712 int i, pass;
5713 bool sched_group_found = false;
5714 int min_cost_group = 1;
5716 for (i = 0; i < ready.n_ready; i++)
5718 rtx insn = ready_element (&ready, i);
5719 if (SCHED_GROUP_P (insn))
5721 sched_group_found = true;
5722 break;
5726 /* Make two passes if there's a SCHED_GROUP_P insn; make sure to handle
5727 such an insn first and note its cost, then schedule all other insns
5728 for one cycle later. */
5729 for (pass = sched_group_found ? 0 : 1; pass < 2; )
5731 int n = ready.n_ready;
5732 for (i = 0; i < n; i++)
5734 rtx insn = ready_element (&ready, i);
5735 int cost = 0;
5736 const char *reason = "resource conflict";
5738 if (DEBUG_INSN_P (insn))
5739 continue;
5741 if (sched_group_found && !SCHED_GROUP_P (insn))
5743 if (pass == 0)
5744 continue;
5745 cost = min_cost_group;
5746 reason = "not in sched group";
5748 else if (modulo_epilogue_p
5749 && INSN_EXACT_TICK (insn) == INVALID_TICK)
5751 cost = max_insn_queue_index;
5752 reason = "not an epilogue insn";
5754 else if (shadows_only_p && !SHADOW_P (insn))
5756 cost = 1;
5757 reason = "not a shadow";
5759 else if (recog_memoized (insn) < 0)
5761 if (!first_cycle_insn_p
5762 && (GET_CODE (PATTERN (insn)) == ASM_INPUT
5763 || asm_noperands (PATTERN (insn)) >= 0))
5764 cost = 1;
5765 reason = "asm";
5767 else if (sched_pressure != SCHED_PRESSURE_NONE)
5769 if (sched_pressure == SCHED_PRESSURE_MODEL
5770 && INSN_TICK (insn) <= clock_var)
5772 memcpy (temp_state, curr_state, dfa_state_size);
5773 if (state_transition (temp_state, insn) >= 0)
5774 INSN_TICK (insn) = clock_var + 1;
5776 cost = 0;
5778 else
5780 int delay_cost = 0;
5782 if (delay_htab.is_created ())
5784 struct delay_pair *delay_entry;
5785 delay_entry
5786 = delay_htab.find_with_hash (insn,
5787 htab_hash_pointer (insn));
5788 while (delay_entry && delay_cost == 0)
5790 delay_cost = estimate_shadow_tick (delay_entry);
5791 if (delay_cost > max_insn_queue_index)
5792 delay_cost = max_insn_queue_index;
5793 delay_entry = delay_entry->next_same_i1;
5797 memcpy (temp_state, curr_state, dfa_state_size);
5798 cost = state_transition (temp_state, insn);
5799 if (cost < 0)
5800 cost = 0;
5801 else if (cost == 0)
5802 cost = 1;
5803 if (cost < delay_cost)
5805 cost = delay_cost;
5806 reason = "shadow tick";
5809 if (cost >= 1)
5811 if (SCHED_GROUP_P (insn) && cost > min_cost_group)
5812 min_cost_group = cost;
5813 ready_remove (&ready, i);
5814 queue_insn (insn, cost, reason);
5815 if (i + 1 < n)
5816 break;
5819 if (i == n)
5820 pass++;
5824 /* Called when we detect that the schedule is impossible. We examine the
5825 backtrack queue to find the earliest insn that caused this condition. */
5827 static struct haifa_saved_data *
5828 verify_shadows (void)
5830 struct haifa_saved_data *save, *earliest_fail = NULL;
5831 for (save = backtrack_queue; save; save = save->next)
5833 int t;
5834 struct delay_pair *pair = save->delay_pair;
5835 rtx i1 = pair->i1;
5837 for (; pair; pair = pair->next_same_i1)
5839 rtx i2 = pair->i2;
5841 if (QUEUE_INDEX (i2) == QUEUE_SCHEDULED)
5842 continue;
5844 t = INSN_TICK (i1) + pair_delay (pair);
5845 if (t < clock_var)
5847 if (sched_verbose >= 2)
5848 fprintf (sched_dump,
5849 ";;\t\tfailed delay requirements for %d/%d (%d->%d)"
5850 ", not ready\n",
5851 INSN_UID (pair->i1), INSN_UID (pair->i2),
5852 INSN_TICK (pair->i1), INSN_EXACT_TICK (pair->i2));
5853 earliest_fail = save;
5854 break;
5856 if (QUEUE_INDEX (i2) >= 0)
5858 int queued_for = INSN_TICK (i2);
5860 if (t < queued_for)
5862 if (sched_verbose >= 2)
5863 fprintf (sched_dump,
5864 ";;\t\tfailed delay requirements for %d/%d"
5865 " (%d->%d), queued too late\n",
5866 INSN_UID (pair->i1), INSN_UID (pair->i2),
5867 INSN_TICK (pair->i1), INSN_EXACT_TICK (pair->i2));
5868 earliest_fail = save;
5869 break;
5875 return earliest_fail;
5878 /* Print instructions together with useful scheduling information between
5879 HEAD and TAIL (inclusive). */
5880 static void
5881 dump_insn_stream (rtx head, rtx tail)
5883 fprintf (sched_dump, ";;\t| insn | prio |\n");
5885 rtx next_tail = NEXT_INSN (tail);
5886 for (rtx insn = head; insn != next_tail; insn = NEXT_INSN (insn))
5888 int priority = NOTE_P (insn) ? 0 : INSN_PRIORITY (insn);
5889 const char *pattern = (NOTE_P (insn)
5890 ? "note"
5891 : str_pattern_slim (PATTERN (insn)));
5893 fprintf (sched_dump, ";;\t| %4d | %4d | %-30s ",
5894 INSN_UID (insn), priority, pattern);
5896 if (sched_verbose >= 4)
5898 if (NOTE_P (insn) || recog_memoized (insn) < 0)
5899 fprintf (sched_dump, "nothing");
5900 else
5901 print_reservation (sched_dump, insn);
5903 fprintf (sched_dump, "\n");
5907 /* Use forward list scheduling to rearrange insns of block pointed to by
5908 TARGET_BB, possibly bringing insns from subsequent blocks in the same
5909 region. */
5911 bool
5912 schedule_block (basic_block *target_bb, state_t init_state)
5914 int i;
5915 bool success = modulo_ii == 0;
5916 struct sched_block_state ls;
5917 state_t temp_state = NULL; /* It is used for multipass scheduling. */
5918 int sort_p, advance, start_clock_var;
5920 /* Head/tail info for this block. */
5921 rtx prev_head = current_sched_info->prev_head;
5922 rtx next_tail = current_sched_info->next_tail;
5923 rtx head = NEXT_INSN (prev_head);
5924 rtx tail = PREV_INSN (next_tail);
5926 if ((current_sched_info->flags & DONT_BREAK_DEPENDENCIES) == 0
5927 && sched_pressure != SCHED_PRESSURE_MODEL)
5928 find_modifiable_mems (head, tail);
5930 /* We used to have code to avoid getting parameters moved from hard
5931 argument registers into pseudos.
5933 However, it was removed when it proved to be of marginal benefit
5934 and caused problems because schedule_block and compute_forward_dependences
5935 had different notions of what the "head" insn was. */
5937 gcc_assert (head != tail || INSN_P (head));
5939 haifa_recovery_bb_recently_added_p = false;
5941 backtrack_queue = NULL;
5943 /* Debug info. */
5944 if (sched_verbose)
5946 dump_new_block_header (0, *target_bb, head, tail);
5948 if (sched_verbose >= 2)
5949 dump_insn_stream (head, tail);
5952 if (init_state == NULL)
5953 state_reset (curr_state);
5954 else
5955 memcpy (curr_state, init_state, dfa_state_size);
5957 /* Clear the ready list. */
5958 ready.first = ready.veclen - 1;
5959 ready.n_ready = 0;
5960 ready.n_debug = 0;
5962 /* It is used for first cycle multipass scheduling. */
5963 temp_state = alloca (dfa_state_size);
5965 if (targetm.sched.init)
5966 targetm.sched.init (sched_dump, sched_verbose, ready.veclen);
5968 /* We start inserting insns after PREV_HEAD. */
5969 last_scheduled_insn = prev_head;
5970 last_nondebug_scheduled_insn = NULL_RTX;
5971 nonscheduled_insns_begin = NULL_RTX;
5973 gcc_assert ((NOTE_P (last_scheduled_insn)
5974 || DEBUG_INSN_P (last_scheduled_insn))
5975 && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
5977 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
5978 queue. */
5979 q_ptr = 0;
5980 q_size = 0;
5982 insn_queue = XALLOCAVEC (rtx, max_insn_queue_index + 1);
5983 memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
5985 /* Start just before the beginning of time. */
5986 clock_var = -1;
5988 /* We need queue and ready lists and clock_var be initialized
5989 in try_ready () (which is called through init_ready_list ()). */
5990 (*current_sched_info->init_ready_list) ();
5992 if (sched_pressure == SCHED_PRESSURE_MODEL)
5993 model_start_schedule ();
5995 /* The algorithm is O(n^2) in the number of ready insns at any given
5996 time in the worst case. Before reload we are more likely to have
5997 big lists so truncate them to a reasonable size. */
5998 if (!reload_completed
5999 && ready.n_ready - ready.n_debug > MAX_SCHED_READY_INSNS)
6001 ready_sort (&ready);
6003 /* Find first free-standing insn past MAX_SCHED_READY_INSNS.
6004 If there are debug insns, we know they're first. */
6005 for (i = MAX_SCHED_READY_INSNS + ready.n_debug; i < ready.n_ready; i++)
6006 if (!SCHED_GROUP_P (ready_element (&ready, i)))
6007 break;
6009 if (sched_verbose >= 2)
6011 fprintf (sched_dump,
6012 ";;\t\tReady list on entry: %d insns\n", ready.n_ready);
6013 fprintf (sched_dump,
6014 ";;\t\t before reload => truncated to %d insns\n", i);
6017 /* Delay all insns past it for 1 cycle. If debug counter is
6018 activated make an exception for the insn right after
6019 nonscheduled_insns_begin. */
6021 rtx skip_insn;
6023 if (dbg_cnt (sched_insn) == false)
6024 skip_insn = first_nonscheduled_insn ();
6025 else
6026 skip_insn = NULL_RTX;
6028 while (i < ready.n_ready)
6030 rtx insn;
6032 insn = ready_remove (&ready, i);
6034 if (insn != skip_insn)
6035 queue_insn (insn, 1, "list truncated");
6037 if (skip_insn)
6038 ready_add (&ready, skip_insn, true);
6042 /* Now we can restore basic block notes and maintain precise cfg. */
6043 restore_bb_notes (*target_bb);
6045 last_clock_var = -1;
6047 advance = 0;
6049 gcc_assert (scheduled_insns.length () == 0);
6050 sort_p = TRUE;
6051 must_backtrack = false;
6052 modulo_insns_scheduled = 0;
6054 ls.modulo_epilogue = false;
6056 /* Loop until all the insns in BB are scheduled. */
6057 while ((*current_sched_info->schedule_more_p) ())
6059 perform_replacements_new_cycle ();
6062 start_clock_var = clock_var;
6064 clock_var++;
6066 advance_one_cycle ();
6068 /* Add to the ready list all pending insns that can be issued now.
6069 If there are no ready insns, increment clock until one
6070 is ready and add all pending insns at that point to the ready
6071 list. */
6072 queue_to_ready (&ready);
6074 gcc_assert (ready.n_ready);
6076 if (sched_verbose >= 2)
6078 fprintf (sched_dump, ";;\t\tReady list after queue_to_ready:");
6079 debug_ready_list (&ready);
6081 advance -= clock_var - start_clock_var;
6083 while (advance > 0);
6085 if (ls.modulo_epilogue)
6087 int stage = clock_var / modulo_ii;
6088 if (stage > modulo_last_stage * 2 + 2)
6090 if (sched_verbose >= 2)
6091 fprintf (sched_dump,
6092 ";;\t\tmodulo scheduled succeeded at II %d\n",
6093 modulo_ii);
6094 success = true;
6095 goto end_schedule;
6098 else if (modulo_ii > 0)
6100 int stage = clock_var / modulo_ii;
6101 if (stage > modulo_max_stages)
6103 if (sched_verbose >= 2)
6104 fprintf (sched_dump,
6105 ";;\t\tfailing schedule due to excessive stages\n");
6106 goto end_schedule;
6108 if (modulo_n_insns == modulo_insns_scheduled
6109 && stage > modulo_last_stage)
6111 if (sched_verbose >= 2)
6112 fprintf (sched_dump,
6113 ";;\t\tfound kernel after %d stages, II %d\n",
6114 stage, modulo_ii);
6115 ls.modulo_epilogue = true;
6119 prune_ready_list (temp_state, true, false, ls.modulo_epilogue);
6120 if (ready.n_ready == 0)
6121 continue;
6122 if (must_backtrack)
6123 goto do_backtrack;
6125 ls.first_cycle_insn_p = true;
6126 ls.shadows_only_p = false;
6127 cycle_issued_insns = 0;
6128 ls.can_issue_more = issue_rate;
6129 for (;;)
6131 rtx insn;
6132 int cost;
6133 bool asm_p;
6135 if (sort_p && ready.n_ready > 0)
6137 /* Sort the ready list based on priority. This must be
6138 done every iteration through the loop, as schedule_insn
6139 may have readied additional insns that will not be
6140 sorted correctly. */
6141 ready_sort (&ready);
6143 if (sched_verbose >= 2)
6145 fprintf (sched_dump,
6146 ";;\t\tReady list after ready_sort: ");
6147 debug_ready_list (&ready);
6151 /* We don't want md sched reorder to even see debug isns, so put
6152 them out right away. */
6153 if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0))
6154 && (*current_sched_info->schedule_more_p) ())
6156 while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
6158 rtx insn = ready_remove_first (&ready);
6159 gcc_assert (DEBUG_INSN_P (insn));
6160 (*current_sched_info->begin_schedule_ready) (insn);
6161 scheduled_insns.safe_push (insn);
6162 last_scheduled_insn = insn;
6163 advance = schedule_insn (insn);
6164 gcc_assert (advance == 0);
6165 if (ready.n_ready > 0)
6166 ready_sort (&ready);
6170 if (ls.first_cycle_insn_p && !ready.n_ready)
6171 break;
6173 resume_after_backtrack:
6174 /* Allow the target to reorder the list, typically for
6175 better instruction bundling. */
6176 if (sort_p
6177 && (ready.n_ready == 0
6178 || !SCHED_GROUP_P (ready_element (&ready, 0))))
6180 if (ls.first_cycle_insn_p && targetm.sched.reorder)
6181 ls.can_issue_more
6182 = targetm.sched.reorder (sched_dump, sched_verbose,
6183 ready_lastpos (&ready),
6184 &ready.n_ready, clock_var);
6185 else if (!ls.first_cycle_insn_p && targetm.sched.reorder2)
6186 ls.can_issue_more
6187 = targetm.sched.reorder2 (sched_dump, sched_verbose,
6188 ready.n_ready
6189 ? ready_lastpos (&ready) : NULL,
6190 &ready.n_ready, clock_var);
6193 restart_choose_ready:
6194 if (sched_verbose >= 2)
6196 fprintf (sched_dump, ";;\tReady list (t = %3d): ",
6197 clock_var);
6198 debug_ready_list (&ready);
6199 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
6200 print_curr_reg_pressure ();
6203 if (ready.n_ready == 0
6204 && ls.can_issue_more
6205 && reload_completed)
6207 /* Allow scheduling insns directly from the queue in case
6208 there's nothing better to do (ready list is empty) but
6209 there are still vacant dispatch slots in the current cycle. */
6210 if (sched_verbose >= 6)
6211 fprintf (sched_dump,";;\t\tSecond chance\n");
6212 memcpy (temp_state, curr_state, dfa_state_size);
6213 if (early_queue_to_ready (temp_state, &ready))
6214 ready_sort (&ready);
6217 if (ready.n_ready == 0
6218 || !ls.can_issue_more
6219 || state_dead_lock_p (curr_state)
6220 || !(*current_sched_info->schedule_more_p) ())
6221 break;
6223 /* Select and remove the insn from the ready list. */
6224 if (sort_p)
6226 int res;
6228 insn = NULL_RTX;
6229 res = choose_ready (&ready, ls.first_cycle_insn_p, &insn);
6231 if (res < 0)
6232 /* Finish cycle. */
6233 break;
6234 if (res > 0)
6235 goto restart_choose_ready;
6237 gcc_assert (insn != NULL_RTX);
6239 else
6240 insn = ready_remove_first (&ready);
6242 if (sched_pressure != SCHED_PRESSURE_NONE
6243 && INSN_TICK (insn) > clock_var)
6245 ready_add (&ready, insn, true);
6246 advance = 1;
6247 break;
6250 if (targetm.sched.dfa_new_cycle
6251 && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
6252 insn, last_clock_var,
6253 clock_var, &sort_p))
6254 /* SORT_P is used by the target to override sorting
6255 of the ready list. This is needed when the target
6256 has modified its internal structures expecting that
6257 the insn will be issued next. As we need the insn
6258 to have the highest priority (so it will be returned by
6259 the ready_remove_first call above), we invoke
6260 ready_add (&ready, insn, true).
6261 But, still, there is one issue: INSN can be later
6262 discarded by scheduler's front end through
6263 current_sched_info->can_schedule_ready_p, hence, won't
6264 be issued next. */
6266 ready_add (&ready, insn, true);
6267 break;
6270 sort_p = TRUE;
6272 if (current_sched_info->can_schedule_ready_p
6273 && ! (*current_sched_info->can_schedule_ready_p) (insn))
6274 /* We normally get here only if we don't want to move
6275 insn from the split block. */
6277 TODO_SPEC (insn) = DEP_POSTPONED;
6278 goto restart_choose_ready;
6281 if (delay_htab.is_created ())
6283 /* If this insn is the first part of a delay-slot pair, record a
6284 backtrack point. */
6285 struct delay_pair *delay_entry;
6286 delay_entry
6287 = delay_htab.find_with_hash (insn, htab_hash_pointer (insn));
6288 if (delay_entry)
6290 save_backtrack_point (delay_entry, ls);
6291 if (sched_verbose >= 2)
6292 fprintf (sched_dump, ";;\t\tsaving backtrack point\n");
6296 /* DECISION is made. */
6298 if (modulo_ii > 0 && INSN_UID (insn) < modulo_iter0_max_uid)
6300 modulo_insns_scheduled++;
6301 modulo_last_stage = clock_var / modulo_ii;
6303 if (TODO_SPEC (insn) & SPECULATIVE)
6304 generate_recovery_code (insn);
6306 if (targetm.sched.dispatch (NULL_RTX, IS_DISPATCH_ON))
6307 targetm.sched.dispatch_do (insn, ADD_TO_DISPATCH_WINDOW);
6309 /* Update counters, etc in the scheduler's front end. */
6310 (*current_sched_info->begin_schedule_ready) (insn);
6311 scheduled_insns.safe_push (insn);
6312 gcc_assert (NONDEBUG_INSN_P (insn));
6313 last_nondebug_scheduled_insn = last_scheduled_insn = insn;
6315 if (recog_memoized (insn) >= 0)
6317 memcpy (temp_state, curr_state, dfa_state_size);
6318 cost = state_transition (curr_state, insn);
6319 if (sched_pressure != SCHED_PRESSURE_WEIGHTED)
6320 gcc_assert (cost < 0);
6321 if (memcmp (temp_state, curr_state, dfa_state_size) != 0)
6322 cycle_issued_insns++;
6323 asm_p = false;
6325 else
6326 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
6327 || asm_noperands (PATTERN (insn)) >= 0);
6329 if (targetm.sched.variable_issue)
6330 ls.can_issue_more =
6331 targetm.sched.variable_issue (sched_dump, sched_verbose,
6332 insn, ls.can_issue_more);
6333 /* A naked CLOBBER or USE generates no instruction, so do
6334 not count them against the issue rate. */
6335 else if (GET_CODE (PATTERN (insn)) != USE
6336 && GET_CODE (PATTERN (insn)) != CLOBBER)
6337 ls.can_issue_more--;
6338 advance = schedule_insn (insn);
6340 if (SHADOW_P (insn))
6341 ls.shadows_only_p = true;
6343 /* After issuing an asm insn we should start a new cycle. */
6344 if (advance == 0 && asm_p)
6345 advance = 1;
6347 if (must_backtrack)
6348 break;
6350 if (advance != 0)
6351 break;
6353 ls.first_cycle_insn_p = false;
6354 if (ready.n_ready > 0)
6355 prune_ready_list (temp_state, false, ls.shadows_only_p,
6356 ls.modulo_epilogue);
6359 do_backtrack:
6360 if (!must_backtrack)
6361 for (i = 0; i < ready.n_ready; i++)
6363 rtx insn = ready_element (&ready, i);
6364 if (INSN_EXACT_TICK (insn) == clock_var)
6366 must_backtrack = true;
6367 clock_var++;
6368 break;
6371 if (must_backtrack && modulo_ii > 0)
6373 if (modulo_backtracks_left == 0)
6374 goto end_schedule;
6375 modulo_backtracks_left--;
6377 while (must_backtrack)
6379 struct haifa_saved_data *failed;
6380 rtx failed_insn;
6382 must_backtrack = false;
6383 failed = verify_shadows ();
6384 gcc_assert (failed);
6386 failed_insn = failed->delay_pair->i1;
6387 /* Clear these queues. */
6388 perform_replacements_new_cycle ();
6389 toggle_cancelled_flags (false);
6390 unschedule_insns_until (failed_insn);
6391 while (failed != backtrack_queue)
6392 free_topmost_backtrack_point (true);
6393 restore_last_backtrack_point (&ls);
6394 if (sched_verbose >= 2)
6395 fprintf (sched_dump, ";;\t\trewind to cycle %d\n", clock_var);
6396 /* Delay by at least a cycle. This could cause additional
6397 backtracking. */
6398 queue_insn (failed_insn, 1, "backtracked");
6399 advance = 0;
6400 if (must_backtrack)
6401 continue;
6402 if (ready.n_ready > 0)
6403 goto resume_after_backtrack;
6404 else
6406 if (clock_var == 0 && ls.first_cycle_insn_p)
6407 goto end_schedule;
6408 advance = 1;
6409 break;
6413 if (ls.modulo_epilogue)
6414 success = true;
6415 end_schedule:
6416 advance_one_cycle ();
6417 perform_replacements_new_cycle ();
6418 if (modulo_ii > 0)
6420 /* Once again, debug insn suckiness: they can be on the ready list
6421 even if they have unresolved dependencies. To make our view
6422 of the world consistent, remove such "ready" insns. */
6423 restart_debug_insn_loop:
6424 for (i = ready.n_ready - 1; i >= 0; i--)
6426 rtx x;
6428 x = ready_element (&ready, i);
6429 if (DEPS_LIST_FIRST (INSN_HARD_BACK_DEPS (x)) != NULL
6430 || DEPS_LIST_FIRST (INSN_SPEC_BACK_DEPS (x)) != NULL)
6432 ready_remove (&ready, i);
6433 goto restart_debug_insn_loop;
6436 for (i = ready.n_ready - 1; i >= 0; i--)
6438 rtx x;
6440 x = ready_element (&ready, i);
6441 resolve_dependencies (x);
6443 for (i = 0; i <= max_insn_queue_index; i++)
6445 rtx link;
6446 while ((link = insn_queue[i]) != NULL)
6448 rtx x = XEXP (link, 0);
6449 insn_queue[i] = XEXP (link, 1);
6450 QUEUE_INDEX (x) = QUEUE_NOWHERE;
6451 free_INSN_LIST_node (link);
6452 resolve_dependencies (x);
6457 if (!success)
6458 undo_all_replacements ();
6460 /* Debug info. */
6461 if (sched_verbose)
6463 fprintf (sched_dump, ";;\tReady list (final): ");
6464 debug_ready_list (&ready);
6467 if (modulo_ii == 0 && current_sched_info->queue_must_finish_empty)
6468 /* Sanity check -- queue must be empty now. Meaningless if region has
6469 multiple bbs. */
6470 gcc_assert (!q_size && !ready.n_ready && !ready.n_debug);
6471 else if (modulo_ii == 0)
6473 /* We must maintain QUEUE_INDEX between blocks in region. */
6474 for (i = ready.n_ready - 1; i >= 0; i--)
6476 rtx x;
6478 x = ready_element (&ready, i);
6479 QUEUE_INDEX (x) = QUEUE_NOWHERE;
6480 TODO_SPEC (x) = HARD_DEP;
6483 if (q_size)
6484 for (i = 0; i <= max_insn_queue_index; i++)
6486 rtx link;
6487 for (link = insn_queue[i]; link; link = XEXP (link, 1))
6489 rtx x;
6491 x = XEXP (link, 0);
6492 QUEUE_INDEX (x) = QUEUE_NOWHERE;
6493 TODO_SPEC (x) = HARD_DEP;
6495 free_INSN_LIST_list (&insn_queue[i]);
6499 if (sched_pressure == SCHED_PRESSURE_MODEL)
6500 model_end_schedule ();
6502 if (success)
6504 commit_schedule (prev_head, tail, target_bb);
6505 if (sched_verbose)
6506 fprintf (sched_dump, ";; total time = %d\n", clock_var);
6508 else
6509 last_scheduled_insn = tail;
6511 scheduled_insns.truncate (0);
6513 if (!current_sched_info->queue_must_finish_empty
6514 || haifa_recovery_bb_recently_added_p)
6516 /* INSN_TICK (minimum clock tick at which the insn becomes
6517 ready) may be not correct for the insn in the subsequent
6518 blocks of the region. We should use a correct value of
6519 `clock_var' or modify INSN_TICK. It is better to keep
6520 clock_var value equal to 0 at the start of a basic block.
6521 Therefore we modify INSN_TICK here. */
6522 fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
6525 if (targetm.sched.finish)
6527 targetm.sched.finish (sched_dump, sched_verbose);
6528 /* Target might have added some instructions to the scheduled block
6529 in its md_finish () hook. These new insns don't have any data
6530 initialized and to identify them we extend h_i_d so that they'll
6531 get zero luids. */
6532 sched_extend_luids ();
6535 /* Update head/tail boundaries. */
6536 head = NEXT_INSN (prev_head);
6537 tail = last_scheduled_insn;
6539 if (sched_verbose)
6541 fprintf (sched_dump, ";; new head = %d\n;; new tail = %d\n",
6542 INSN_UID (head), INSN_UID (tail));
6544 if (sched_verbose >= 2)
6545 dump_insn_stream (head, tail);
6547 fprintf (sched_dump, "\n");
6550 head = restore_other_notes (head, NULL);
6552 current_sched_info->head = head;
6553 current_sched_info->tail = tail;
6555 free_backtrack_queue ();
6557 return success;
6560 /* Set_priorities: compute priority of each insn in the block. */
6563 set_priorities (rtx head, rtx tail)
6565 rtx insn;
6566 int n_insn;
6567 int sched_max_insns_priority =
6568 current_sched_info->sched_max_insns_priority;
6569 rtx prev_head;
6571 if (head == tail && ! INSN_P (head))
6572 gcc_unreachable ();
6574 n_insn = 0;
6576 prev_head = PREV_INSN (head);
6577 for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
6579 if (!INSN_P (insn))
6580 continue;
6582 n_insn++;
6583 (void) priority (insn);
6585 gcc_assert (INSN_PRIORITY_KNOWN (insn));
6587 sched_max_insns_priority = MAX (sched_max_insns_priority,
6588 INSN_PRIORITY (insn));
6591 current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
6593 return n_insn;
6596 /* Set dump and sched_verbose for the desired debugging output. If no
6597 dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
6598 For -fsched-verbose=N, N>=10, print everything to stderr. */
6599 void
6600 setup_sched_dump (void)
6602 sched_verbose = sched_verbose_param;
6603 if (sched_verbose_param == 0 && dump_file)
6604 sched_verbose = 1;
6605 sched_dump = ((sched_verbose_param >= 10 || !dump_file)
6606 ? stderr : dump_file);
6609 /* Allocate data for register pressure sensitive scheduling. */
6610 static void
6611 alloc_global_sched_pressure_data (void)
6613 if (sched_pressure != SCHED_PRESSURE_NONE)
6615 int i, max_regno = max_reg_num ();
6617 if (sched_dump != NULL)
6618 /* We need info about pseudos for rtl dumps about pseudo
6619 classes and costs. */
6620 regstat_init_n_sets_and_refs ();
6621 ira_set_pseudo_classes (true, sched_verbose ? sched_dump : NULL);
6622 sched_regno_pressure_class
6623 = (enum reg_class *) xmalloc (max_regno * sizeof (enum reg_class));
6624 for (i = 0; i < max_regno; i++)
6625 sched_regno_pressure_class[i]
6626 = (i < FIRST_PSEUDO_REGISTER
6627 ? ira_pressure_class_translate[REGNO_REG_CLASS (i)]
6628 : ira_pressure_class_translate[reg_allocno_class (i)]);
6629 curr_reg_live = BITMAP_ALLOC (NULL);
6630 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
6632 saved_reg_live = BITMAP_ALLOC (NULL);
6633 region_ref_regs = BITMAP_ALLOC (NULL);
6638 /* Free data for register pressure sensitive scheduling. Also called
6639 from schedule_region when stopping sched-pressure early. */
6640 void
6641 free_global_sched_pressure_data (void)
6643 if (sched_pressure != SCHED_PRESSURE_NONE)
6645 if (regstat_n_sets_and_refs != NULL)
6646 regstat_free_n_sets_and_refs ();
6647 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
6649 BITMAP_FREE (region_ref_regs);
6650 BITMAP_FREE (saved_reg_live);
6652 BITMAP_FREE (curr_reg_live);
6653 free (sched_regno_pressure_class);
6657 /* Initialize some global state for the scheduler. This function works
6658 with the common data shared between all the schedulers. It is called
6659 from the scheduler specific initialization routine. */
6661 void
6662 sched_init (void)
6664 /* Disable speculative loads in their presence if cc0 defined. */
6665 #ifdef HAVE_cc0
6666 flag_schedule_speculative_load = 0;
6667 #endif
6669 if (targetm.sched.dispatch (NULL_RTX, IS_DISPATCH_ON))
6670 targetm.sched.dispatch_do (NULL_RTX, DISPATCH_INIT);
6672 if (live_range_shrinkage_p)
6673 sched_pressure = SCHED_PRESSURE_WEIGHTED;
6674 else if (flag_sched_pressure
6675 && !reload_completed
6676 && common_sched_info->sched_pass_id == SCHED_RGN_PASS)
6677 sched_pressure = ((enum sched_pressure_algorithm)
6678 PARAM_VALUE (PARAM_SCHED_PRESSURE_ALGORITHM));
6679 else
6680 sched_pressure = SCHED_PRESSURE_NONE;
6682 if (sched_pressure != SCHED_PRESSURE_NONE)
6683 ira_setup_eliminable_regset ();
6685 /* Initialize SPEC_INFO. */
6686 if (targetm.sched.set_sched_flags)
6688 spec_info = &spec_info_var;
6689 targetm.sched.set_sched_flags (spec_info);
6691 if (spec_info->mask != 0)
6693 spec_info->data_weakness_cutoff =
6694 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
6695 spec_info->control_weakness_cutoff =
6696 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF)
6697 * REG_BR_PROB_BASE) / 100;
6699 else
6700 /* So we won't read anything accidentally. */
6701 spec_info = NULL;
6704 else
6705 /* So we won't read anything accidentally. */
6706 spec_info = 0;
6708 /* Initialize issue_rate. */
6709 if (targetm.sched.issue_rate)
6710 issue_rate = targetm.sched.issue_rate ();
6711 else
6712 issue_rate = 1;
6714 if (cached_issue_rate != issue_rate)
6716 cached_issue_rate = issue_rate;
6717 /* To invalidate max_lookahead_tries: */
6718 cached_first_cycle_multipass_dfa_lookahead = 0;
6721 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
6722 dfa_lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
6723 else
6724 dfa_lookahead = 0;
6726 if (targetm.sched.init_dfa_pre_cycle_insn)
6727 targetm.sched.init_dfa_pre_cycle_insn ();
6729 if (targetm.sched.init_dfa_post_cycle_insn)
6730 targetm.sched.init_dfa_post_cycle_insn ();
6732 dfa_start ();
6733 dfa_state_size = state_size ();
6735 init_alias_analysis ();
6737 if (!sched_no_dce)
6738 df_set_flags (DF_LR_RUN_DCE);
6739 df_note_add_problem ();
6741 /* More problems needed for interloop dep calculation in SMS. */
6742 if (common_sched_info->sched_pass_id == SCHED_SMS_PASS)
6744 df_rd_add_problem ();
6745 df_chain_add_problem (DF_DU_CHAIN + DF_UD_CHAIN);
6748 df_analyze ();
6750 /* Do not run DCE after reload, as this can kill nops inserted
6751 by bundling. */
6752 if (reload_completed)
6753 df_clear_flags (DF_LR_RUN_DCE);
6755 regstat_compute_calls_crossed ();
6757 if (targetm.sched.init_global)
6758 targetm.sched.init_global (sched_dump, sched_verbose, get_max_uid () + 1);
6760 alloc_global_sched_pressure_data ();
6762 curr_state = xmalloc (dfa_state_size);
6765 static void haifa_init_only_bb (basic_block, basic_block);
6767 /* Initialize data structures specific to the Haifa scheduler. */
6768 void
6769 haifa_sched_init (void)
6771 setup_sched_dump ();
6772 sched_init ();
6774 scheduled_insns.create (0);
6776 if (spec_info != NULL)
6778 sched_deps_info->use_deps_list = 1;
6779 sched_deps_info->generate_spec_deps = 1;
6782 /* Initialize luids, dependency caches, target and h_i_d for the
6783 whole function. */
6785 bb_vec_t bbs;
6786 bbs.create (n_basic_blocks_for_fn (cfun));
6787 basic_block bb;
6789 sched_init_bbs ();
6791 FOR_EACH_BB_FN (bb, cfun)
6792 bbs.quick_push (bb);
6793 sched_init_luids (bbs);
6794 sched_deps_init (true);
6795 sched_extend_target ();
6796 haifa_init_h_i_d (bbs);
6798 bbs.release ();
6801 sched_init_only_bb = haifa_init_only_bb;
6802 sched_split_block = sched_split_block_1;
6803 sched_create_empty_bb = sched_create_empty_bb_1;
6804 haifa_recovery_bb_ever_added_p = false;
6806 nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
6807 before_recovery = 0;
6808 after_recovery = 0;
6810 modulo_ii = 0;
6813 /* Finish work with the data specific to the Haifa scheduler. */
6814 void
6815 haifa_sched_finish (void)
6817 sched_create_empty_bb = NULL;
6818 sched_split_block = NULL;
6819 sched_init_only_bb = NULL;
6821 if (spec_info && spec_info->dump)
6823 char c = reload_completed ? 'a' : 'b';
6825 fprintf (spec_info->dump,
6826 ";; %s:\n", current_function_name ());
6828 fprintf (spec_info->dump,
6829 ";; Procedure %cr-begin-data-spec motions == %d\n",
6830 c, nr_begin_data);
6831 fprintf (spec_info->dump,
6832 ";; Procedure %cr-be-in-data-spec motions == %d\n",
6833 c, nr_be_in_data);
6834 fprintf (spec_info->dump,
6835 ";; Procedure %cr-begin-control-spec motions == %d\n",
6836 c, nr_begin_control);
6837 fprintf (spec_info->dump,
6838 ";; Procedure %cr-be-in-control-spec motions == %d\n",
6839 c, nr_be_in_control);
6842 scheduled_insns.release ();
6844 /* Finalize h_i_d, dependency caches, and luids for the whole
6845 function. Target will be finalized in md_global_finish (). */
6846 sched_deps_finish ();
6847 sched_finish_luids ();
6848 current_sched_info = NULL;
6849 sched_finish ();
6852 /* Free global data used during insn scheduling. This function works with
6853 the common data shared between the schedulers. */
6855 void
6856 sched_finish (void)
6858 haifa_finish_h_i_d ();
6859 free_global_sched_pressure_data ();
6860 free (curr_state);
6862 if (targetm.sched.finish_global)
6863 targetm.sched.finish_global (sched_dump, sched_verbose);
6865 end_alias_analysis ();
6867 regstat_free_calls_crossed ();
6869 dfa_finish ();
6872 /* Free all delay_pair structures that were recorded. */
6873 void
6874 free_delay_pairs (void)
6876 if (delay_htab.is_created ())
6878 delay_htab.empty ();
6879 delay_htab_i2.empty ();
6883 /* Fix INSN_TICKs of the instructions in the current block as well as
6884 INSN_TICKs of their dependents.
6885 HEAD and TAIL are the begin and the end of the current scheduled block. */
6886 static void
6887 fix_inter_tick (rtx head, rtx tail)
6889 /* Set of instructions with corrected INSN_TICK. */
6890 bitmap_head processed;
6891 /* ??? It is doubtful if we should assume that cycle advance happens on
6892 basic block boundaries. Basically insns that are unconditionally ready
6893 on the start of the block are more preferable then those which have
6894 a one cycle dependency over insn from the previous block. */
6895 int next_clock = clock_var + 1;
6897 bitmap_initialize (&processed, 0);
6899 /* Iterates over scheduled instructions and fix their INSN_TICKs and
6900 INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
6901 across different blocks. */
6902 for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
6904 if (INSN_P (head))
6906 int tick;
6907 sd_iterator_def sd_it;
6908 dep_t dep;
6910 tick = INSN_TICK (head);
6911 gcc_assert (tick >= MIN_TICK);
6913 /* Fix INSN_TICK of instruction from just scheduled block. */
6914 if (bitmap_set_bit (&processed, INSN_LUID (head)))
6916 tick -= next_clock;
6918 if (tick < MIN_TICK)
6919 tick = MIN_TICK;
6921 INSN_TICK (head) = tick;
6924 if (DEBUG_INSN_P (head))
6925 continue;
6927 FOR_EACH_DEP (head, SD_LIST_RES_FORW, sd_it, dep)
6929 rtx next;
6931 next = DEP_CON (dep);
6932 tick = INSN_TICK (next);
6934 if (tick != INVALID_TICK
6935 /* If NEXT has its INSN_TICK calculated, fix it.
6936 If not - it will be properly calculated from
6937 scratch later in fix_tick_ready. */
6938 && bitmap_set_bit (&processed, INSN_LUID (next)))
6940 tick -= next_clock;
6942 if (tick < MIN_TICK)
6943 tick = MIN_TICK;
6945 if (tick > INTER_TICK (next))
6946 INTER_TICK (next) = tick;
6947 else
6948 tick = INTER_TICK (next);
6950 INSN_TICK (next) = tick;
6955 bitmap_clear (&processed);
6958 /* Check if NEXT is ready to be added to the ready or queue list.
6959 If "yes", add it to the proper list.
6960 Returns:
6961 -1 - is not ready yet,
6962 0 - added to the ready list,
6963 0 < N - queued for N cycles. */
6965 try_ready (rtx next)
6967 ds_t old_ts, new_ts;
6969 old_ts = TODO_SPEC (next);
6971 gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP | DEP_CONTROL | DEP_POSTPONED))
6972 && (old_ts == HARD_DEP
6973 || old_ts == DEP_POSTPONED
6974 || (old_ts & SPECULATIVE)
6975 || old_ts == DEP_CONTROL));
6977 new_ts = recompute_todo_spec (next, false);
6979 if (new_ts & (HARD_DEP | DEP_POSTPONED))
6980 gcc_assert (new_ts == old_ts
6981 && QUEUE_INDEX (next) == QUEUE_NOWHERE);
6982 else if (current_sched_info->new_ready)
6983 new_ts = current_sched_info->new_ready (next, new_ts);
6985 /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
6986 have its original pattern or changed (speculative) one. This is due
6987 to changing ebb in region scheduling.
6988 * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
6989 has speculative pattern.
6991 We can't assert (!(new_ts & HARD_DEP) || new_ts == old_ts) here because
6992 control-speculative NEXT could have been discarded by sched-rgn.c
6993 (the same case as when discarded by can_schedule_ready_p ()). */
6995 if ((new_ts & SPECULATIVE)
6996 /* If (old_ts == new_ts), then (old_ts & SPECULATIVE) and we don't
6997 need to change anything. */
6998 && new_ts != old_ts)
7000 int res;
7001 rtx new_pat;
7003 gcc_assert ((new_ts & SPECULATIVE) && !(new_ts & ~SPECULATIVE));
7005 res = haifa_speculate_insn (next, new_ts, &new_pat);
7007 switch (res)
7009 case -1:
7010 /* It would be nice to change DEP_STATUS of all dependences,
7011 which have ((DEP_STATUS & SPECULATIVE) == new_ts) to HARD_DEP,
7012 so we won't reanalyze anything. */
7013 new_ts = HARD_DEP;
7014 break;
7016 case 0:
7017 /* We follow the rule, that every speculative insn
7018 has non-null ORIG_PAT. */
7019 if (!ORIG_PAT (next))
7020 ORIG_PAT (next) = PATTERN (next);
7021 break;
7023 case 1:
7024 if (!ORIG_PAT (next))
7025 /* If we gonna to overwrite the original pattern of insn,
7026 save it. */
7027 ORIG_PAT (next) = PATTERN (next);
7029 res = haifa_change_pattern (next, new_pat);
7030 gcc_assert (res);
7031 break;
7033 default:
7034 gcc_unreachable ();
7038 /* We need to restore pattern only if (new_ts == 0), because otherwise it is
7039 either correct (new_ts & SPECULATIVE),
7040 or we simply don't care (new_ts & HARD_DEP). */
7042 gcc_assert (!ORIG_PAT (next)
7043 || !IS_SPECULATION_BRANCHY_CHECK_P (next));
7045 TODO_SPEC (next) = new_ts;
7047 if (new_ts & (HARD_DEP | DEP_POSTPONED))
7049 /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
7050 control-speculative NEXT could have been discarded by sched-rgn.c
7051 (the same case as when discarded by can_schedule_ready_p ()). */
7052 /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
7054 change_queue_index (next, QUEUE_NOWHERE);
7056 return -1;
7058 else if (!(new_ts & BEGIN_SPEC)
7059 && ORIG_PAT (next) && PREDICATED_PAT (next) == NULL_RTX
7060 && !IS_SPECULATION_CHECK_P (next))
7061 /* We should change pattern of every previously speculative
7062 instruction - and we determine if NEXT was speculative by using
7063 ORIG_PAT field. Except one case - speculation checks have ORIG_PAT
7064 pat too, so skip them. */
7066 bool success = haifa_change_pattern (next, ORIG_PAT (next));
7067 gcc_assert (success);
7068 ORIG_PAT (next) = 0;
7071 if (sched_verbose >= 2)
7073 fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
7074 (*current_sched_info->print_insn) (next, 0));
7076 if (spec_info && spec_info->dump)
7078 if (new_ts & BEGIN_DATA)
7079 fprintf (spec_info->dump, "; data-spec;");
7080 if (new_ts & BEGIN_CONTROL)
7081 fprintf (spec_info->dump, "; control-spec;");
7082 if (new_ts & BE_IN_CONTROL)
7083 fprintf (spec_info->dump, "; in-control-spec;");
7085 if (TODO_SPEC (next) & DEP_CONTROL)
7086 fprintf (sched_dump, " predicated");
7087 fprintf (sched_dump, "\n");
7090 adjust_priority (next);
7092 return fix_tick_ready (next);
7095 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
7096 static int
7097 fix_tick_ready (rtx next)
7099 int tick, delay;
7101 if (!DEBUG_INSN_P (next) && !sd_lists_empty_p (next, SD_LIST_RES_BACK))
7103 int full_p;
7104 sd_iterator_def sd_it;
7105 dep_t dep;
7107 tick = INSN_TICK (next);
7108 /* if tick is not equal to INVALID_TICK, then update
7109 INSN_TICK of NEXT with the most recent resolved dependence
7110 cost. Otherwise, recalculate from scratch. */
7111 full_p = (tick == INVALID_TICK);
7113 FOR_EACH_DEP (next, SD_LIST_RES_BACK, sd_it, dep)
7115 rtx pro = DEP_PRO (dep);
7116 int tick1;
7118 gcc_assert (INSN_TICK (pro) >= MIN_TICK);
7120 tick1 = INSN_TICK (pro) + dep_cost (dep);
7121 if (tick1 > tick)
7122 tick = tick1;
7124 if (!full_p)
7125 break;
7128 else
7129 tick = -1;
7131 INSN_TICK (next) = tick;
7133 delay = tick - clock_var;
7134 if (delay <= 0 || sched_pressure != SCHED_PRESSURE_NONE)
7135 delay = QUEUE_READY;
7137 change_queue_index (next, delay);
7139 return delay;
7142 /* Move NEXT to the proper queue list with (DELAY >= 1),
7143 or add it to the ready list (DELAY == QUEUE_READY),
7144 or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
7145 static void
7146 change_queue_index (rtx next, int delay)
7148 int i = QUEUE_INDEX (next);
7150 gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
7151 && delay != 0);
7152 gcc_assert (i != QUEUE_SCHEDULED);
7154 if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
7155 || (delay < 0 && delay == i))
7156 /* We have nothing to do. */
7157 return;
7159 /* Remove NEXT from wherever it is now. */
7160 if (i == QUEUE_READY)
7161 ready_remove_insn (next);
7162 else if (i >= 0)
7163 queue_remove (next);
7165 /* Add it to the proper place. */
7166 if (delay == QUEUE_READY)
7167 ready_add (readyp, next, false);
7168 else if (delay >= 1)
7169 queue_insn (next, delay, "change queue index");
7171 if (sched_verbose >= 2)
7173 fprintf (sched_dump, ";;\t\ttick updated: insn %s",
7174 (*current_sched_info->print_insn) (next, 0));
7176 if (delay == QUEUE_READY)
7177 fprintf (sched_dump, " into ready\n");
7178 else if (delay >= 1)
7179 fprintf (sched_dump, " into queue with cost=%d\n", delay);
7180 else
7181 fprintf (sched_dump, " removed from ready or queue lists\n");
7185 static int sched_ready_n_insns = -1;
7187 /* Initialize per region data structures. */
7188 void
7189 sched_extend_ready_list (int new_sched_ready_n_insns)
7191 int i;
7193 if (sched_ready_n_insns == -1)
7194 /* At the first call we need to initialize one more choice_stack
7195 entry. */
7197 i = 0;
7198 sched_ready_n_insns = 0;
7199 scheduled_insns.reserve (new_sched_ready_n_insns);
7201 else
7202 i = sched_ready_n_insns + 1;
7204 ready.veclen = new_sched_ready_n_insns + issue_rate;
7205 ready.vec = XRESIZEVEC (rtx, ready.vec, ready.veclen);
7207 gcc_assert (new_sched_ready_n_insns >= sched_ready_n_insns);
7209 ready_try = (signed char *) xrecalloc (ready_try, new_sched_ready_n_insns,
7210 sched_ready_n_insns,
7211 sizeof (*ready_try));
7213 /* We allocate +1 element to save initial state in the choice_stack[0]
7214 entry. */
7215 choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
7216 new_sched_ready_n_insns + 1);
7218 for (; i <= new_sched_ready_n_insns; i++)
7220 choice_stack[i].state = xmalloc (dfa_state_size);
7222 if (targetm.sched.first_cycle_multipass_init)
7223 targetm.sched.first_cycle_multipass_init (&(choice_stack[i]
7224 .target_data));
7227 sched_ready_n_insns = new_sched_ready_n_insns;
7230 /* Free per region data structures. */
7231 void
7232 sched_finish_ready_list (void)
7234 int i;
7236 free (ready.vec);
7237 ready.vec = NULL;
7238 ready.veclen = 0;
7240 free (ready_try);
7241 ready_try = NULL;
7243 for (i = 0; i <= sched_ready_n_insns; i++)
7245 if (targetm.sched.first_cycle_multipass_fini)
7246 targetm.sched.first_cycle_multipass_fini (&(choice_stack[i]
7247 .target_data));
7249 free (choice_stack [i].state);
7251 free (choice_stack);
7252 choice_stack = NULL;
7254 sched_ready_n_insns = -1;
7257 static int
7258 haifa_luid_for_non_insn (rtx x)
7260 gcc_assert (NOTE_P (x) || LABEL_P (x));
7262 return 0;
7265 /* Generates recovery code for INSN. */
7266 static void
7267 generate_recovery_code (rtx insn)
7269 if (TODO_SPEC (insn) & BEGIN_SPEC)
7270 begin_speculative_block (insn);
7272 /* Here we have insn with no dependencies to
7273 instructions other then CHECK_SPEC ones. */
7275 if (TODO_SPEC (insn) & BE_IN_SPEC)
7276 add_to_speculative_block (insn);
7279 /* Helper function.
7280 Tries to add speculative dependencies of type FS between instructions
7281 in deps_list L and TWIN. */
7282 static void
7283 process_insn_forw_deps_be_in_spec (rtx insn, rtx twin, ds_t fs)
7285 sd_iterator_def sd_it;
7286 dep_t dep;
7288 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
7290 ds_t ds;
7291 rtx consumer;
7293 consumer = DEP_CON (dep);
7295 ds = DEP_STATUS (dep);
7297 if (/* If we want to create speculative dep. */
7299 /* And we can do that because this is a true dep. */
7300 && (ds & DEP_TYPES) == DEP_TRUE)
7302 gcc_assert (!(ds & BE_IN_SPEC));
7304 if (/* If this dep can be overcome with 'begin speculation'. */
7305 ds & BEGIN_SPEC)
7306 /* Then we have a choice: keep the dep 'begin speculative'
7307 or transform it into 'be in speculative'. */
7309 if (/* In try_ready we assert that if insn once became ready
7310 it can be removed from the ready (or queue) list only
7311 due to backend decision. Hence we can't let the
7312 probability of the speculative dep to decrease. */
7313 ds_weak (ds) <= ds_weak (fs))
7315 ds_t new_ds;
7317 new_ds = (ds & ~BEGIN_SPEC) | fs;
7319 if (/* consumer can 'be in speculative'. */
7320 sched_insn_is_legitimate_for_speculation_p (consumer,
7321 new_ds))
7322 /* Transform it to be in speculative. */
7323 ds = new_ds;
7326 else
7327 /* Mark the dep as 'be in speculative'. */
7328 ds |= fs;
7332 dep_def _new_dep, *new_dep = &_new_dep;
7334 init_dep_1 (new_dep, twin, consumer, DEP_TYPE (dep), ds);
7335 sd_add_dep (new_dep, false);
7340 /* Generates recovery code for BEGIN speculative INSN. */
7341 static void
7342 begin_speculative_block (rtx insn)
7344 if (TODO_SPEC (insn) & BEGIN_DATA)
7345 nr_begin_data++;
7346 if (TODO_SPEC (insn) & BEGIN_CONTROL)
7347 nr_begin_control++;
7349 create_check_block_twin (insn, false);
7351 TODO_SPEC (insn) &= ~BEGIN_SPEC;
7354 static void haifa_init_insn (rtx);
7356 /* Generates recovery code for BE_IN speculative INSN. */
7357 static void
7358 add_to_speculative_block (rtx insn)
7360 ds_t ts;
7361 sd_iterator_def sd_it;
7362 dep_t dep;
7363 rtx twins = NULL;
7364 rtx_vec_t priorities_roots;
7366 ts = TODO_SPEC (insn);
7367 gcc_assert (!(ts & ~BE_IN_SPEC));
7369 if (ts & BE_IN_DATA)
7370 nr_be_in_data++;
7371 if (ts & BE_IN_CONTROL)
7372 nr_be_in_control++;
7374 TODO_SPEC (insn) &= ~BE_IN_SPEC;
7375 gcc_assert (!TODO_SPEC (insn));
7377 DONE_SPEC (insn) |= ts;
7379 /* First we convert all simple checks to branchy. */
7380 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
7381 sd_iterator_cond (&sd_it, &dep);)
7383 rtx check = DEP_PRO (dep);
7385 if (IS_SPECULATION_SIMPLE_CHECK_P (check))
7387 create_check_block_twin (check, true);
7389 /* Restart search. */
7390 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
7392 else
7393 /* Continue search. */
7394 sd_iterator_next (&sd_it);
7397 priorities_roots.create (0);
7398 clear_priorities (insn, &priorities_roots);
7400 while (1)
7402 rtx check, twin;
7403 basic_block rec;
7405 /* Get the first backward dependency of INSN. */
7406 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
7407 if (!sd_iterator_cond (&sd_it, &dep))
7408 /* INSN has no backward dependencies left. */
7409 break;
7411 gcc_assert ((DEP_STATUS (dep) & BEGIN_SPEC) == 0
7412 && (DEP_STATUS (dep) & BE_IN_SPEC) != 0
7413 && (DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
7415 check = DEP_PRO (dep);
7417 gcc_assert (!IS_SPECULATION_CHECK_P (check) && !ORIG_PAT (check)
7418 && QUEUE_INDEX (check) == QUEUE_NOWHERE);
7420 rec = BLOCK_FOR_INSN (check);
7422 twin = emit_insn_before (copy_insn (PATTERN (insn)), BB_END (rec));
7423 haifa_init_insn (twin);
7425 sd_copy_back_deps (twin, insn, true);
7427 if (sched_verbose && spec_info->dump)
7428 /* INSN_BB (insn) isn't determined for twin insns yet.
7429 So we can't use current_sched_info->print_insn. */
7430 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
7431 INSN_UID (twin), rec->index);
7433 twins = alloc_INSN_LIST (twin, twins);
7435 /* Add dependences between TWIN and all appropriate
7436 instructions from REC. */
7437 FOR_EACH_DEP (insn, SD_LIST_SPEC_BACK, sd_it, dep)
7439 rtx pro = DEP_PRO (dep);
7441 gcc_assert (DEP_TYPE (dep) == REG_DEP_TRUE);
7443 /* INSN might have dependencies from the instructions from
7444 several recovery blocks. At this iteration we process those
7445 producers that reside in REC. */
7446 if (BLOCK_FOR_INSN (pro) == rec)
7448 dep_def _new_dep, *new_dep = &_new_dep;
7450 init_dep (new_dep, pro, twin, REG_DEP_TRUE);
7451 sd_add_dep (new_dep, false);
7455 process_insn_forw_deps_be_in_spec (insn, twin, ts);
7457 /* Remove all dependencies between INSN and insns in REC. */
7458 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
7459 sd_iterator_cond (&sd_it, &dep);)
7461 rtx pro = DEP_PRO (dep);
7463 if (BLOCK_FOR_INSN (pro) == rec)
7464 sd_delete_dep (sd_it);
7465 else
7466 sd_iterator_next (&sd_it);
7470 /* We couldn't have added the dependencies between INSN and TWINS earlier
7471 because that would make TWINS appear in the INSN_BACK_DEPS (INSN). */
7472 while (twins)
7474 rtx twin;
7476 twin = XEXP (twins, 0);
7479 dep_def _new_dep, *new_dep = &_new_dep;
7481 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
7482 sd_add_dep (new_dep, false);
7485 twin = XEXP (twins, 1);
7486 free_INSN_LIST_node (twins);
7487 twins = twin;
7490 calc_priorities (priorities_roots);
7491 priorities_roots.release ();
7494 /* Extends and fills with zeros (only the new part) array pointed to by P. */
7495 void *
7496 xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
7498 gcc_assert (new_nmemb >= old_nmemb);
7499 p = XRESIZEVAR (void, p, new_nmemb * size);
7500 memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
7501 return p;
7504 /* Helper function.
7505 Find fallthru edge from PRED. */
7506 edge
7507 find_fallthru_edge_from (basic_block pred)
7509 edge e;
7510 basic_block succ;
7512 succ = pred->next_bb;
7513 gcc_assert (succ->prev_bb == pred);
7515 if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
7517 e = find_fallthru_edge (pred->succs);
7519 if (e)
7521 gcc_assert (e->dest == succ);
7522 return e;
7525 else
7527 e = find_fallthru_edge (succ->preds);
7529 if (e)
7531 gcc_assert (e->src == pred);
7532 return e;
7536 return NULL;
7539 /* Extend per basic block data structures. */
7540 static void
7541 sched_extend_bb (void)
7543 /* The following is done to keep current_sched_info->next_tail non null. */
7544 rtx end = BB_END (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
7545 rtx insn = DEBUG_INSN_P (end) ? prev_nondebug_insn (end) : end;
7546 if (NEXT_INSN (end) == 0
7547 || (!NOTE_P (insn)
7548 && !LABEL_P (insn)
7549 /* Don't emit a NOTE if it would end up before a BARRIER. */
7550 && !BARRIER_P (NEXT_INSN (end))))
7552 rtx note = emit_note_after (NOTE_INSN_DELETED, end);
7553 /* Make note appear outside BB. */
7554 set_block_for_insn (note, NULL);
7555 BB_END (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb) = end;
7559 /* Init per basic block data structures. */
7560 void
7561 sched_init_bbs (void)
7563 sched_extend_bb ();
7566 /* Initialize BEFORE_RECOVERY variable. */
7567 static void
7568 init_before_recovery (basic_block *before_recovery_ptr)
7570 basic_block last;
7571 edge e;
7573 last = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
7574 e = find_fallthru_edge_from (last);
7576 if (e)
7578 /* We create two basic blocks:
7579 1. Single instruction block is inserted right after E->SRC
7580 and has jump to
7581 2. Empty block right before EXIT_BLOCK.
7582 Between these two blocks recovery blocks will be emitted. */
7584 basic_block single, empty;
7585 rtx x, label;
7587 /* If the fallthrough edge to exit we've found is from the block we've
7588 created before, don't do anything more. */
7589 if (last == after_recovery)
7590 return;
7592 adding_bb_to_current_region_p = false;
7594 single = sched_create_empty_bb (last);
7595 empty = sched_create_empty_bb (single);
7597 /* Add new blocks to the root loop. */
7598 if (current_loops != NULL)
7600 add_bb_to_loop (single, (*current_loops->larray)[0]);
7601 add_bb_to_loop (empty, (*current_loops->larray)[0]);
7604 single->count = last->count;
7605 empty->count = last->count;
7606 single->frequency = last->frequency;
7607 empty->frequency = last->frequency;
7608 BB_COPY_PARTITION (single, last);
7609 BB_COPY_PARTITION (empty, last);
7611 redirect_edge_succ (e, single);
7612 make_single_succ_edge (single, empty, 0);
7613 make_single_succ_edge (empty, EXIT_BLOCK_PTR_FOR_FN (cfun),
7614 EDGE_FALLTHRU);
7616 label = block_label (empty);
7617 x = emit_jump_insn_after (gen_jump (label), BB_END (single));
7618 JUMP_LABEL (x) = label;
7619 LABEL_NUSES (label)++;
7620 haifa_init_insn (x);
7622 emit_barrier_after (x);
7624 sched_init_only_bb (empty, NULL);
7625 sched_init_only_bb (single, NULL);
7626 sched_extend_bb ();
7628 adding_bb_to_current_region_p = true;
7629 before_recovery = single;
7630 after_recovery = empty;
7632 if (before_recovery_ptr)
7633 *before_recovery_ptr = before_recovery;
7635 if (sched_verbose >= 2 && spec_info->dump)
7636 fprintf (spec_info->dump,
7637 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
7638 last->index, single->index, empty->index);
7640 else
7641 before_recovery = last;
7644 /* Returns new recovery block. */
7645 basic_block
7646 sched_create_recovery_block (basic_block *before_recovery_ptr)
7648 rtx label;
7649 rtx barrier;
7650 basic_block rec;
7652 haifa_recovery_bb_recently_added_p = true;
7653 haifa_recovery_bb_ever_added_p = true;
7655 init_before_recovery (before_recovery_ptr);
7657 barrier = get_last_bb_insn (before_recovery);
7658 gcc_assert (BARRIER_P (barrier));
7660 label = emit_label_after (gen_label_rtx (), barrier);
7662 rec = create_basic_block (label, label, before_recovery);
7664 /* A recovery block always ends with an unconditional jump. */
7665 emit_barrier_after (BB_END (rec));
7667 if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
7668 BB_SET_PARTITION (rec, BB_COLD_PARTITION);
7670 if (sched_verbose && spec_info->dump)
7671 fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
7672 rec->index);
7674 return rec;
7677 /* Create edges: FIRST_BB -> REC; FIRST_BB -> SECOND_BB; REC -> SECOND_BB
7678 and emit necessary jumps. */
7679 void
7680 sched_create_recovery_edges (basic_block first_bb, basic_block rec,
7681 basic_block second_bb)
7683 rtx label;
7684 rtx jump;
7685 int edge_flags;
7687 /* This is fixing of incoming edge. */
7688 /* ??? Which other flags should be specified? */
7689 if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
7690 /* Partition type is the same, if it is "unpartitioned". */
7691 edge_flags = EDGE_CROSSING;
7692 else
7693 edge_flags = 0;
7695 make_edge (first_bb, rec, edge_flags);
7696 label = block_label (second_bb);
7697 jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
7698 JUMP_LABEL (jump) = label;
7699 LABEL_NUSES (label)++;
7701 if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
7702 /* Partition type is the same, if it is "unpartitioned". */
7704 /* Rewritten from cfgrtl.c. */
7705 if (flag_reorder_blocks_and_partition
7706 && targetm_common.have_named_sections)
7708 /* We don't need the same note for the check because
7709 any_condjump_p (check) == true. */
7710 CROSSING_JUMP_P (jump) = 1;
7712 edge_flags = EDGE_CROSSING;
7714 else
7715 edge_flags = 0;
7717 make_single_succ_edge (rec, second_bb, edge_flags);
7718 if (dom_info_available_p (CDI_DOMINATORS))
7719 set_immediate_dominator (CDI_DOMINATORS, rec, first_bb);
7722 /* This function creates recovery code for INSN. If MUTATE_P is nonzero,
7723 INSN is a simple check, that should be converted to branchy one. */
7724 static void
7725 create_check_block_twin (rtx insn, bool mutate_p)
7727 basic_block rec;
7728 rtx label, check, twin;
7729 ds_t fs;
7730 sd_iterator_def sd_it;
7731 dep_t dep;
7732 dep_def _new_dep, *new_dep = &_new_dep;
7733 ds_t todo_spec;
7735 gcc_assert (ORIG_PAT (insn) != NULL_RTX);
7737 if (!mutate_p)
7738 todo_spec = TODO_SPEC (insn);
7739 else
7741 gcc_assert (IS_SPECULATION_SIMPLE_CHECK_P (insn)
7742 && (TODO_SPEC (insn) & SPECULATIVE) == 0);
7744 todo_spec = CHECK_SPEC (insn);
7747 todo_spec &= SPECULATIVE;
7749 /* Create recovery block. */
7750 if (mutate_p || targetm.sched.needs_block_p (todo_spec))
7752 rec = sched_create_recovery_block (NULL);
7753 label = BB_HEAD (rec);
7755 else
7757 rec = EXIT_BLOCK_PTR_FOR_FN (cfun);
7758 label = NULL_RTX;
7761 /* Emit CHECK. */
7762 check = targetm.sched.gen_spec_check (insn, label, todo_spec);
7764 if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
7766 /* To have mem_reg alive at the beginning of second_bb,
7767 we emit check BEFORE insn, so insn after splitting
7768 insn will be at the beginning of second_bb, which will
7769 provide us with the correct life information. */
7770 check = emit_jump_insn_before (check, insn);
7771 JUMP_LABEL (check) = label;
7772 LABEL_NUSES (label)++;
7774 else
7775 check = emit_insn_before (check, insn);
7777 /* Extend data structures. */
7778 haifa_init_insn (check);
7780 /* CHECK is being added to current region. Extend ready list. */
7781 gcc_assert (sched_ready_n_insns != -1);
7782 sched_extend_ready_list (sched_ready_n_insns + 1);
7784 if (current_sched_info->add_remove_insn)
7785 current_sched_info->add_remove_insn (insn, 0);
7787 RECOVERY_BLOCK (check) = rec;
7789 if (sched_verbose && spec_info->dump)
7790 fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
7791 (*current_sched_info->print_insn) (check, 0));
7793 gcc_assert (ORIG_PAT (insn));
7795 /* Initialize TWIN (twin is a duplicate of original instruction
7796 in the recovery block). */
7797 if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
7799 sd_iterator_def sd_it;
7800 dep_t dep;
7802 FOR_EACH_DEP (insn, SD_LIST_RES_BACK, sd_it, dep)
7803 if ((DEP_STATUS (dep) & DEP_OUTPUT) != 0)
7805 struct _dep _dep2, *dep2 = &_dep2;
7807 init_dep (dep2, DEP_PRO (dep), check, REG_DEP_TRUE);
7809 sd_add_dep (dep2, true);
7812 twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
7813 haifa_init_insn (twin);
7815 if (sched_verbose && spec_info->dump)
7816 /* INSN_BB (insn) isn't determined for twin insns yet.
7817 So we can't use current_sched_info->print_insn. */
7818 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
7819 INSN_UID (twin), rec->index);
7821 else
7823 ORIG_PAT (check) = ORIG_PAT (insn);
7824 HAS_INTERNAL_DEP (check) = 1;
7825 twin = check;
7826 /* ??? We probably should change all OUTPUT dependencies to
7827 (TRUE | OUTPUT). */
7830 /* Copy all resolved back dependencies of INSN to TWIN. This will
7831 provide correct value for INSN_TICK (TWIN). */
7832 sd_copy_back_deps (twin, insn, true);
7834 if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
7835 /* In case of branchy check, fix CFG. */
7837 basic_block first_bb, second_bb;
7838 rtx jump;
7840 first_bb = BLOCK_FOR_INSN (check);
7841 second_bb = sched_split_block (first_bb, check);
7843 sched_create_recovery_edges (first_bb, rec, second_bb);
7845 sched_init_only_bb (second_bb, first_bb);
7846 sched_init_only_bb (rec, EXIT_BLOCK_PTR_FOR_FN (cfun));
7848 jump = BB_END (rec);
7849 haifa_init_insn (jump);
7852 /* Move backward dependences from INSN to CHECK and
7853 move forward dependences from INSN to TWIN. */
7855 /* First, create dependencies between INSN's producers and CHECK & TWIN. */
7856 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
7858 rtx pro = DEP_PRO (dep);
7859 ds_t ds;
7861 /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
7862 check --TRUE--> producer ??? or ANTI ???
7863 twin --TRUE--> producer
7864 twin --ANTI--> check
7866 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
7867 check --ANTI--> producer
7868 twin --ANTI--> producer
7869 twin --ANTI--> check
7871 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
7872 check ~~TRUE~~> producer
7873 twin ~~TRUE~~> producer
7874 twin --ANTI--> check */
7876 ds = DEP_STATUS (dep);
7878 if (ds & BEGIN_SPEC)
7880 gcc_assert (!mutate_p);
7881 ds &= ~BEGIN_SPEC;
7884 init_dep_1 (new_dep, pro, check, DEP_TYPE (dep), ds);
7885 sd_add_dep (new_dep, false);
7887 if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
7889 DEP_CON (new_dep) = twin;
7890 sd_add_dep (new_dep, false);
7894 /* Second, remove backward dependencies of INSN. */
7895 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
7896 sd_iterator_cond (&sd_it, &dep);)
7898 if ((DEP_STATUS (dep) & BEGIN_SPEC)
7899 || mutate_p)
7900 /* We can delete this dep because we overcome it with
7901 BEGIN_SPECULATION. */
7902 sd_delete_dep (sd_it);
7903 else
7904 sd_iterator_next (&sd_it);
7907 /* Future Speculations. Determine what BE_IN speculations will be like. */
7908 fs = 0;
7910 /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
7911 here. */
7913 gcc_assert (!DONE_SPEC (insn));
7915 if (!mutate_p)
7917 ds_t ts = TODO_SPEC (insn);
7919 DONE_SPEC (insn) = ts & BEGIN_SPEC;
7920 CHECK_SPEC (check) = ts & BEGIN_SPEC;
7922 /* Luckiness of future speculations solely depends upon initial
7923 BEGIN speculation. */
7924 if (ts & BEGIN_DATA)
7925 fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
7926 if (ts & BEGIN_CONTROL)
7927 fs = set_dep_weak (fs, BE_IN_CONTROL,
7928 get_dep_weak (ts, BEGIN_CONTROL));
7930 else
7931 CHECK_SPEC (check) = CHECK_SPEC (insn);
7933 /* Future speculations: call the helper. */
7934 process_insn_forw_deps_be_in_spec (insn, twin, fs);
7936 if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
7938 /* Which types of dependencies should we use here is,
7939 generally, machine-dependent question... But, for now,
7940 it is not. */
7942 if (!mutate_p)
7944 init_dep (new_dep, insn, check, REG_DEP_TRUE);
7945 sd_add_dep (new_dep, false);
7947 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
7948 sd_add_dep (new_dep, false);
7950 else
7952 if (spec_info->dump)
7953 fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
7954 (*current_sched_info->print_insn) (insn, 0));
7956 /* Remove all dependencies of the INSN. */
7958 sd_it = sd_iterator_start (insn, (SD_LIST_FORW
7959 | SD_LIST_BACK
7960 | SD_LIST_RES_BACK));
7961 while (sd_iterator_cond (&sd_it, &dep))
7962 sd_delete_dep (sd_it);
7965 /* If former check (INSN) already was moved to the ready (or queue)
7966 list, add new check (CHECK) there too. */
7967 if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
7968 try_ready (check);
7970 /* Remove old check from instruction stream and free its
7971 data. */
7972 sched_remove_insn (insn);
7975 init_dep (new_dep, check, twin, REG_DEP_ANTI);
7976 sd_add_dep (new_dep, false);
7978 else
7980 init_dep_1 (new_dep, insn, check, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
7981 sd_add_dep (new_dep, false);
7984 if (!mutate_p)
7985 /* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
7986 because it'll be done later in add_to_speculative_block. */
7988 rtx_vec_t priorities_roots = rtx_vec_t ();
7990 clear_priorities (twin, &priorities_roots);
7991 calc_priorities (priorities_roots);
7992 priorities_roots.release ();
7996 /* Removes dependency between instructions in the recovery block REC
7997 and usual region instructions. It keeps inner dependences so it
7998 won't be necessary to recompute them. */
7999 static void
8000 fix_recovery_deps (basic_block rec)
8002 rtx note, insn, jump, ready_list = 0;
8003 bitmap_head in_ready;
8004 rtx link;
8006 bitmap_initialize (&in_ready, 0);
8008 /* NOTE - a basic block note. */
8009 note = NEXT_INSN (BB_HEAD (rec));
8010 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
8011 insn = BB_END (rec);
8012 gcc_assert (JUMP_P (insn));
8013 insn = PREV_INSN (insn);
8017 sd_iterator_def sd_it;
8018 dep_t dep;
8020 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
8021 sd_iterator_cond (&sd_it, &dep);)
8023 rtx consumer = DEP_CON (dep);
8025 if (BLOCK_FOR_INSN (consumer) != rec)
8027 sd_delete_dep (sd_it);
8029 if (bitmap_set_bit (&in_ready, INSN_LUID (consumer)))
8030 ready_list = alloc_INSN_LIST (consumer, ready_list);
8032 else
8034 gcc_assert ((DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
8036 sd_iterator_next (&sd_it);
8040 insn = PREV_INSN (insn);
8042 while (insn != note);
8044 bitmap_clear (&in_ready);
8046 /* Try to add instructions to the ready or queue list. */
8047 for (link = ready_list; link; link = XEXP (link, 1))
8048 try_ready (XEXP (link, 0));
8049 free_INSN_LIST_list (&ready_list);
8051 /* Fixing jump's dependences. */
8052 insn = BB_HEAD (rec);
8053 jump = BB_END (rec);
8055 gcc_assert (LABEL_P (insn));
8056 insn = NEXT_INSN (insn);
8058 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
8059 add_jump_dependencies (insn, jump);
8062 /* Change pattern of INSN to NEW_PAT. Invalidate cached haifa
8063 instruction data. */
8064 static bool
8065 haifa_change_pattern (rtx insn, rtx new_pat)
8067 int t;
8069 t = validate_change (insn, &PATTERN (insn), new_pat, 0);
8070 if (!t)
8071 return false;
8073 update_insn_after_change (insn);
8074 return true;
8077 /* -1 - can't speculate,
8078 0 - for speculation with REQUEST mode it is OK to use
8079 current instruction pattern,
8080 1 - need to change pattern for *NEW_PAT to be speculative. */
8082 sched_speculate_insn (rtx insn, ds_t request, rtx *new_pat)
8084 gcc_assert (current_sched_info->flags & DO_SPECULATION
8085 && (request & SPECULATIVE)
8086 && sched_insn_is_legitimate_for_speculation_p (insn, request));
8088 if ((request & spec_info->mask) != request)
8089 return -1;
8091 if (request & BE_IN_SPEC
8092 && !(request & BEGIN_SPEC))
8093 return 0;
8095 return targetm.sched.speculate_insn (insn, request, new_pat);
8098 static int
8099 haifa_speculate_insn (rtx insn, ds_t request, rtx *new_pat)
8101 gcc_assert (sched_deps_info->generate_spec_deps
8102 && !IS_SPECULATION_CHECK_P (insn));
8104 if (HAS_INTERNAL_DEP (insn)
8105 || SCHED_GROUP_P (insn))
8106 return -1;
8108 return sched_speculate_insn (insn, request, new_pat);
8111 /* Print some information about block BB, which starts with HEAD and
8112 ends with TAIL, before scheduling it.
8113 I is zero, if scheduler is about to start with the fresh ebb. */
8114 static void
8115 dump_new_block_header (int i, basic_block bb, rtx head, rtx tail)
8117 if (!i)
8118 fprintf (sched_dump,
8119 ";; ======================================================\n");
8120 else
8121 fprintf (sched_dump,
8122 ";; =====================ADVANCING TO=====================\n");
8123 fprintf (sched_dump,
8124 ";; -- basic block %d from %d to %d -- %s reload\n",
8125 bb->index, INSN_UID (head), INSN_UID (tail),
8126 (reload_completed ? "after" : "before"));
8127 fprintf (sched_dump,
8128 ";; ======================================================\n");
8129 fprintf (sched_dump, "\n");
8132 /* Unlink basic block notes and labels and saves them, so they
8133 can be easily restored. We unlink basic block notes in EBB to
8134 provide back-compatibility with the previous code, as target backends
8135 assume, that there'll be only instructions between
8136 current_sched_info->{head and tail}. We restore these notes as soon
8137 as we can.
8138 FIRST (LAST) is the first (last) basic block in the ebb.
8139 NB: In usual case (FIRST == LAST) nothing is really done. */
8140 void
8141 unlink_bb_notes (basic_block first, basic_block last)
8143 /* We DON'T unlink basic block notes of the first block in the ebb. */
8144 if (first == last)
8145 return;
8147 bb_header = XNEWVEC (rtx, last_basic_block_for_fn (cfun));
8149 /* Make a sentinel. */
8150 if (last->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
8151 bb_header[last->next_bb->index] = 0;
8153 first = first->next_bb;
8156 rtx prev, label, note, next;
8158 label = BB_HEAD (last);
8159 if (LABEL_P (label))
8160 note = NEXT_INSN (label);
8161 else
8162 note = label;
8163 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
8165 prev = PREV_INSN (label);
8166 next = NEXT_INSN (note);
8167 gcc_assert (prev && next);
8169 NEXT_INSN (prev) = next;
8170 PREV_INSN (next) = prev;
8172 bb_header[last->index] = label;
8174 if (last == first)
8175 break;
8177 last = last->prev_bb;
8179 while (1);
8182 /* Restore basic block notes.
8183 FIRST is the first basic block in the ebb. */
8184 static void
8185 restore_bb_notes (basic_block first)
8187 if (!bb_header)
8188 return;
8190 /* We DON'T unlink basic block notes of the first block in the ebb. */
8191 first = first->next_bb;
8192 /* Remember: FIRST is actually a second basic block in the ebb. */
8194 while (first != EXIT_BLOCK_PTR_FOR_FN (cfun)
8195 && bb_header[first->index])
8197 rtx prev, label, note, next;
8199 label = bb_header[first->index];
8200 prev = PREV_INSN (label);
8201 next = NEXT_INSN (prev);
8203 if (LABEL_P (label))
8204 note = NEXT_INSN (label);
8205 else
8206 note = label;
8207 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
8209 bb_header[first->index] = 0;
8211 NEXT_INSN (prev) = label;
8212 NEXT_INSN (note) = next;
8213 PREV_INSN (next) = note;
8215 first = first->next_bb;
8218 free (bb_header);
8219 bb_header = 0;
8222 /* Helper function.
8223 Fix CFG after both in- and inter-block movement of
8224 control_flow_insn_p JUMP. */
8225 static void
8226 fix_jump_move (rtx jump)
8228 basic_block bb, jump_bb, jump_bb_next;
8230 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
8231 jump_bb = BLOCK_FOR_INSN (jump);
8232 jump_bb_next = jump_bb->next_bb;
8234 gcc_assert (common_sched_info->sched_pass_id == SCHED_EBB_PASS
8235 || IS_SPECULATION_BRANCHY_CHECK_P (jump));
8237 if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
8238 /* if jump_bb_next is not empty. */
8239 BB_END (jump_bb) = BB_END (jump_bb_next);
8241 if (BB_END (bb) != PREV_INSN (jump))
8242 /* Then there are instruction after jump that should be placed
8243 to jump_bb_next. */
8244 BB_END (jump_bb_next) = BB_END (bb);
8245 else
8246 /* Otherwise jump_bb_next is empty. */
8247 BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
8249 /* To make assertion in move_insn happy. */
8250 BB_END (bb) = PREV_INSN (jump);
8252 update_bb_for_insn (jump_bb_next);
8255 /* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
8256 static void
8257 move_block_after_check (rtx jump)
8259 basic_block bb, jump_bb, jump_bb_next;
8260 vec<edge, va_gc> *t;
8262 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
8263 jump_bb = BLOCK_FOR_INSN (jump);
8264 jump_bb_next = jump_bb->next_bb;
8266 update_bb_for_insn (jump_bb);
8268 gcc_assert (IS_SPECULATION_CHECK_P (jump)
8269 || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next)));
8271 unlink_block (jump_bb_next);
8272 link_block (jump_bb_next, bb);
8274 t = bb->succs;
8275 bb->succs = 0;
8276 move_succs (&(jump_bb->succs), bb);
8277 move_succs (&(jump_bb_next->succs), jump_bb);
8278 move_succs (&t, jump_bb_next);
8280 df_mark_solutions_dirty ();
8282 common_sched_info->fix_recovery_cfg
8283 (bb->index, jump_bb->index, jump_bb_next->index);
8286 /* Helper function for move_block_after_check.
8287 This functions attaches edge vector pointed to by SUCCSP to
8288 block TO. */
8289 static void
8290 move_succs (vec<edge, va_gc> **succsp, basic_block to)
8292 edge e;
8293 edge_iterator ei;
8295 gcc_assert (to->succs == 0);
8297 to->succs = *succsp;
8299 FOR_EACH_EDGE (e, ei, to->succs)
8300 e->src = to;
8302 *succsp = 0;
8305 /* Remove INSN from the instruction stream.
8306 INSN should have any dependencies. */
8307 static void
8308 sched_remove_insn (rtx insn)
8310 sd_finish_insn (insn);
8312 change_queue_index (insn, QUEUE_NOWHERE);
8313 current_sched_info->add_remove_insn (insn, 1);
8314 delete_insn (insn);
8317 /* Clear priorities of all instructions, that are forward dependent on INSN.
8318 Store in vector pointed to by ROOTS_PTR insns on which priority () should
8319 be invoked to initialize all cleared priorities. */
8320 static void
8321 clear_priorities (rtx insn, rtx_vec_t *roots_ptr)
8323 sd_iterator_def sd_it;
8324 dep_t dep;
8325 bool insn_is_root_p = true;
8327 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
8329 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
8331 rtx pro = DEP_PRO (dep);
8333 if (INSN_PRIORITY_STATUS (pro) >= 0
8334 && QUEUE_INDEX (insn) != QUEUE_SCHEDULED)
8336 /* If DEP doesn't contribute to priority then INSN itself should
8337 be added to priority roots. */
8338 if (contributes_to_priority_p (dep))
8339 insn_is_root_p = false;
8341 INSN_PRIORITY_STATUS (pro) = -1;
8342 clear_priorities (pro, roots_ptr);
8346 if (insn_is_root_p)
8347 roots_ptr->safe_push (insn);
8350 /* Recompute priorities of instructions, whose priorities might have been
8351 changed. ROOTS is a vector of instructions whose priority computation will
8352 trigger initialization of all cleared priorities. */
8353 static void
8354 calc_priorities (rtx_vec_t roots)
8356 int i;
8357 rtx insn;
8359 FOR_EACH_VEC_ELT (roots, i, insn)
8360 priority (insn);
8364 /* Add dependences between JUMP and other instructions in the recovery
8365 block. INSN is the first insn the recovery block. */
8366 static void
8367 add_jump_dependencies (rtx insn, rtx jump)
8371 insn = NEXT_INSN (insn);
8372 if (insn == jump)
8373 break;
8375 if (dep_list_size (insn, SD_LIST_FORW) == 0)
8377 dep_def _new_dep, *new_dep = &_new_dep;
8379 init_dep (new_dep, insn, jump, REG_DEP_ANTI);
8380 sd_add_dep (new_dep, false);
8383 while (1);
8385 gcc_assert (!sd_lists_empty_p (jump, SD_LIST_BACK));
8388 /* Extend data structures for logical insn UID. */
8389 void
8390 sched_extend_luids (void)
8392 int new_luids_max_uid = get_max_uid () + 1;
8394 sched_luids.safe_grow_cleared (new_luids_max_uid);
8397 /* Initialize LUID for INSN. */
8398 void
8399 sched_init_insn_luid (rtx insn)
8401 int i = INSN_P (insn) ? 1 : common_sched_info->luid_for_non_insn (insn);
8402 int luid;
8404 if (i >= 0)
8406 luid = sched_max_luid;
8407 sched_max_luid += i;
8409 else
8410 luid = -1;
8412 SET_INSN_LUID (insn, luid);
8415 /* Initialize luids for BBS.
8416 The hook common_sched_info->luid_for_non_insn () is used to determine
8417 if notes, labels, etc. need luids. */
8418 void
8419 sched_init_luids (bb_vec_t bbs)
8421 int i;
8422 basic_block bb;
8424 sched_extend_luids ();
8425 FOR_EACH_VEC_ELT (bbs, i, bb)
8427 rtx insn;
8429 FOR_BB_INSNS (bb, insn)
8430 sched_init_insn_luid (insn);
8434 /* Free LUIDs. */
8435 void
8436 sched_finish_luids (void)
8438 sched_luids.release ();
8439 sched_max_luid = 1;
8442 /* Return logical uid of INSN. Helpful while debugging. */
8444 insn_luid (rtx insn)
8446 return INSN_LUID (insn);
8449 /* Extend per insn data in the target. */
8450 void
8451 sched_extend_target (void)
8453 if (targetm.sched.h_i_d_extended)
8454 targetm.sched.h_i_d_extended ();
8457 /* Extend global scheduler structures (those, that live across calls to
8458 schedule_block) to include information about just emitted INSN. */
8459 static void
8460 extend_h_i_d (void)
8462 int reserve = (get_max_uid () + 1 - h_i_d.length ());
8463 if (reserve > 0
8464 && ! h_i_d.space (reserve))
8466 h_i_d.safe_grow_cleared (3 * get_max_uid () / 2);
8467 sched_extend_target ();
8471 /* Initialize h_i_d entry of the INSN with default values.
8472 Values, that are not explicitly initialized here, hold zero. */
8473 static void
8474 init_h_i_d (rtx insn)
8476 if (INSN_LUID (insn) > 0)
8478 INSN_COST (insn) = -1;
8479 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
8480 INSN_TICK (insn) = INVALID_TICK;
8481 INSN_EXACT_TICK (insn) = INVALID_TICK;
8482 INTER_TICK (insn) = INVALID_TICK;
8483 TODO_SPEC (insn) = HARD_DEP;
8487 /* Initialize haifa_insn_data for BBS. */
8488 void
8489 haifa_init_h_i_d (bb_vec_t bbs)
8491 int i;
8492 basic_block bb;
8494 extend_h_i_d ();
8495 FOR_EACH_VEC_ELT (bbs, i, bb)
8497 rtx insn;
8499 FOR_BB_INSNS (bb, insn)
8500 init_h_i_d (insn);
8504 /* Finalize haifa_insn_data. */
8505 void
8506 haifa_finish_h_i_d (void)
8508 int i;
8509 haifa_insn_data_t data;
8510 struct reg_use_data *use, *next;
8512 FOR_EACH_VEC_ELT (h_i_d, i, data)
8514 free (data->max_reg_pressure);
8515 free (data->reg_pressure);
8516 for (use = data->reg_use_list; use != NULL; use = next)
8518 next = use->next_insn_use;
8519 free (use);
8522 h_i_d.release ();
8525 /* Init data for the new insn INSN. */
8526 static void
8527 haifa_init_insn (rtx insn)
8529 gcc_assert (insn != NULL);
8531 sched_extend_luids ();
8532 sched_init_insn_luid (insn);
8533 sched_extend_target ();
8534 sched_deps_init (false);
8535 extend_h_i_d ();
8536 init_h_i_d (insn);
8538 if (adding_bb_to_current_region_p)
8540 sd_init_insn (insn);
8542 /* Extend dependency caches by one element. */
8543 extend_dependency_caches (1, false);
8545 if (sched_pressure != SCHED_PRESSURE_NONE)
8546 init_insn_reg_pressure_info (insn);
8549 /* Init data for the new basic block BB which comes after AFTER. */
8550 static void
8551 haifa_init_only_bb (basic_block bb, basic_block after)
8553 gcc_assert (bb != NULL);
8555 sched_init_bbs ();
8557 if (common_sched_info->add_block)
8558 /* This changes only data structures of the front-end. */
8559 common_sched_info->add_block (bb, after);
8562 /* A generic version of sched_split_block (). */
8563 basic_block
8564 sched_split_block_1 (basic_block first_bb, rtx after)
8566 edge e;
8568 e = split_block (first_bb, after);
8569 gcc_assert (e->src == first_bb);
8571 /* sched_split_block emits note if *check == BB_END. Probably it
8572 is better to rip that note off. */
8574 return e->dest;
8577 /* A generic version of sched_create_empty_bb (). */
8578 basic_block
8579 sched_create_empty_bb_1 (basic_block after)
8581 return create_empty_bb (after);
8584 /* Insert PAT as an INSN into the schedule and update the necessary data
8585 structures to account for it. */
8587 sched_emit_insn (rtx pat)
8589 rtx insn = emit_insn_before (pat, first_nonscheduled_insn ());
8590 haifa_init_insn (insn);
8592 if (current_sched_info->add_remove_insn)
8593 current_sched_info->add_remove_insn (insn, 0);
8595 (*current_sched_info->begin_schedule_ready) (insn);
8596 scheduled_insns.safe_push (insn);
8598 last_scheduled_insn = insn;
8599 return insn;
8602 /* This function returns a candidate satisfying dispatch constraints from
8603 the ready list. */
8605 static rtx
8606 ready_remove_first_dispatch (struct ready_list *ready)
8608 int i;
8609 rtx insn = ready_element (ready, 0);
8611 if (ready->n_ready == 1
8612 || !INSN_P (insn)
8613 || INSN_CODE (insn) < 0
8614 || !active_insn_p (insn)
8615 || targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
8616 return ready_remove_first (ready);
8618 for (i = 1; i < ready->n_ready; i++)
8620 insn = ready_element (ready, i);
8622 if (!INSN_P (insn)
8623 || INSN_CODE (insn) < 0
8624 || !active_insn_p (insn))
8625 continue;
8627 if (targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
8629 /* Return ith element of ready. */
8630 insn = ready_remove (ready, i);
8631 return insn;
8635 if (targetm.sched.dispatch (NULL_RTX, DISPATCH_VIOLATION))
8636 return ready_remove_first (ready);
8638 for (i = 1; i < ready->n_ready; i++)
8640 insn = ready_element (ready, i);
8642 if (!INSN_P (insn)
8643 || INSN_CODE (insn) < 0
8644 || !active_insn_p (insn))
8645 continue;
8647 /* Return i-th element of ready. */
8648 if (targetm.sched.dispatch (insn, IS_CMP))
8649 return ready_remove (ready, i);
8652 return ready_remove_first (ready);
8655 /* Get number of ready insn in the ready list. */
8658 number_in_ready (void)
8660 return ready.n_ready;
8663 /* Get number of ready's in the ready list. */
8666 get_ready_element (int i)
8668 return ready_element (&ready, i);
8671 #endif /* INSN_SCHEDULING */