* tree.h (TYPE_OVERFLOW_SANITIZED): Define.
[official-gcc.git] / gcc / haifa-sched.c
blobdb3187e491ffcae8a46e34307ad2d072ac620312
1 /* Instruction scheduling pass.
2 Copyright (C) 1992-2014 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
4 and currently maintained by, Jim Wilson (wilson@cygnus.com)
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* Instruction scheduling pass. This file, along with sched-deps.c,
23 contains the generic parts. The actual entry point for
24 the normal instruction scheduling pass is found in sched-rgn.c.
26 We compute insn priorities based on data dependencies. Flow
27 analysis only creates a fraction of the data-dependencies we must
28 observe: namely, only those dependencies which the combiner can be
29 expected to use. For this pass, we must therefore create the
30 remaining dependencies we need to observe: register dependencies,
31 memory dependencies, dependencies to keep function calls in order,
32 and the dependence between a conditional branch and the setting of
33 condition codes are all dealt with here.
35 The scheduler first traverses the data flow graph, starting with
36 the last instruction, and proceeding to the first, assigning values
37 to insn_priority as it goes. This sorts the instructions
38 topologically by data dependence.
40 Once priorities have been established, we order the insns using
41 list scheduling. This works as follows: starting with a list of
42 all the ready insns, and sorted according to priority number, we
43 schedule the insn from the end of the list by placing its
44 predecessors in the list according to their priority order. We
45 consider this insn scheduled by setting the pointer to the "end" of
46 the list to point to the previous insn. When an insn has no
47 predecessors, we either queue it until sufficient time has elapsed
48 or add it to the ready list. As the instructions are scheduled or
49 when stalls are introduced, the queue advances and dumps insns into
50 the ready list. When all insns down to the lowest priority have
51 been scheduled, the critical path of the basic block has been made
52 as short as possible. The remaining insns are then scheduled in
53 remaining slots.
55 The following list shows the order in which we want to break ties
56 among insns in the ready list:
58 1. choose insn with the longest path to end of bb, ties
59 broken by
60 2. choose insn with least contribution to register pressure,
61 ties broken by
62 3. prefer in-block upon interblock motion, ties broken by
63 4. prefer useful upon speculative motion, ties broken by
64 5. choose insn with largest control flow probability, ties
65 broken by
66 6. choose insn with the least dependences upon the previously
67 scheduled insn, or finally
68 7 choose the insn which has the most insns dependent on it.
69 8. choose insn with lowest UID.
71 Memory references complicate matters. Only if we can be certain
72 that memory references are not part of the data dependency graph
73 (via true, anti, or output dependence), can we move operations past
74 memory references. To first approximation, reads can be done
75 independently, while writes introduce dependencies. Better
76 approximations will yield fewer dependencies.
78 Before reload, an extended analysis of interblock data dependences
79 is required for interblock scheduling. This is performed in
80 compute_block_dependences ().
82 Dependencies set up by memory references are treated in exactly the
83 same way as other dependencies, by using insn backward dependences
84 INSN_BACK_DEPS. INSN_BACK_DEPS are translated into forward dependences
85 INSN_FORW_DEPS for the purpose of forward list scheduling.
87 Having optimized the critical path, we may have also unduly
88 extended the lifetimes of some registers. If an operation requires
89 that constants be loaded into registers, it is certainly desirable
90 to load those constants as early as necessary, but no earlier.
91 I.e., it will not do to load up a bunch of registers at the
92 beginning of a basic block only to use them at the end, if they
93 could be loaded later, since this may result in excessive register
94 utilization.
96 Note that since branches are never in basic blocks, but only end
97 basic blocks, this pass will not move branches. But that is ok,
98 since we can use GNU's delayed branch scheduling pass to take care
99 of this case.
101 Also note that no further optimizations based on algebraic
102 identities are performed, so this pass would be a good one to
103 perform instruction splitting, such as breaking up a multiply
104 instruction into shifts and adds where that is profitable.
106 Given the memory aliasing analysis that this pass should perform,
107 it should be possible to remove redundant stores to memory, and to
108 load values from registers instead of hitting memory.
110 Before reload, speculative insns are moved only if a 'proof' exists
111 that no exception will be caused by this, and if no live registers
112 exist that inhibit the motion (live registers constraints are not
113 represented by data dependence edges).
115 This pass must update information that subsequent passes expect to
116 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
117 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
119 The information in the line number notes is carefully retained by
120 this pass. Notes that refer to the starting and ending of
121 exception regions are also carefully retained by this pass. All
122 other NOTE insns are grouped in their same relative order at the
123 beginning of basic blocks and regions that have been scheduled. */
125 #include "config.h"
126 #include "system.h"
127 #include "coretypes.h"
128 #include "tm.h"
129 #include "diagnostic-core.h"
130 #include "hard-reg-set.h"
131 #include "rtl.h"
132 #include "tm_p.h"
133 #include "regs.h"
134 #include "hashtab.h"
135 #include "hash-set.h"
136 #include "vec.h"
137 #include "machmode.h"
138 #include "input.h"
139 #include "function.h"
140 #include "flags.h"
141 #include "insn-config.h"
142 #include "insn-attr.h"
143 #include "except.h"
144 #include "recog.h"
145 #include "dominance.h"
146 #include "cfg.h"
147 #include "cfgrtl.h"
148 #include "cfgbuild.h"
149 #include "predict.h"
150 #include "basic-block.h"
151 #include "sched-int.h"
152 #include "target.h"
153 #include "common/common-target.h"
154 #include "params.h"
155 #include "dbgcnt.h"
156 #include "cfgloop.h"
157 #include "ira.h"
158 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
159 #include "hash-table.h"
160 #include "dumpfile.h"
162 #ifdef INSN_SCHEDULING
164 /* True if we do register pressure relief through live-range
165 shrinkage. */
166 static bool live_range_shrinkage_p;
168 /* Switch on live range shrinkage. */
169 void
170 initialize_live_range_shrinkage (void)
172 live_range_shrinkage_p = true;
175 /* Switch off live range shrinkage. */
176 void
177 finish_live_range_shrinkage (void)
179 live_range_shrinkage_p = false;
182 /* issue_rate is the number of insns that can be scheduled in the same
183 machine cycle. It can be defined in the config/mach/mach.h file,
184 otherwise we set it to 1. */
186 int issue_rate;
188 /* This can be set to true by a backend if the scheduler should not
189 enable a DCE pass. */
190 bool sched_no_dce;
192 /* The current initiation interval used when modulo scheduling. */
193 static int modulo_ii;
195 /* The maximum number of stages we are prepared to handle. */
196 static int modulo_max_stages;
198 /* The number of insns that exist in each iteration of the loop. We use this
199 to detect when we've scheduled all insns from the first iteration. */
200 static int modulo_n_insns;
202 /* The current count of insns in the first iteration of the loop that have
203 already been scheduled. */
204 static int modulo_insns_scheduled;
206 /* The maximum uid of insns from the first iteration of the loop. */
207 static int modulo_iter0_max_uid;
209 /* The number of times we should attempt to backtrack when modulo scheduling.
210 Decreased each time we have to backtrack. */
211 static int modulo_backtracks_left;
213 /* The stage in which the last insn from the original loop was
214 scheduled. */
215 static int modulo_last_stage;
217 /* sched-verbose controls the amount of debugging output the
218 scheduler prints. It is controlled by -fsched-verbose=N:
219 N>0 and no -DSR : the output is directed to stderr.
220 N>=10 will direct the printouts to stderr (regardless of -dSR).
221 N=1: same as -dSR.
222 N=2: bb's probabilities, detailed ready list info, unit/insn info.
223 N=3: rtl at abort point, control-flow, regions info.
224 N=5: dependences info. */
226 int sched_verbose = 0;
228 /* Debugging file. All printouts are sent to dump, which is always set,
229 either to stderr, or to the dump listing file (-dRS). */
230 FILE *sched_dump = 0;
232 /* This is a placeholder for the scheduler parameters common
233 to all schedulers. */
234 struct common_sched_info_def *common_sched_info;
236 #define INSN_TICK(INSN) (HID (INSN)->tick)
237 #define INSN_EXACT_TICK(INSN) (HID (INSN)->exact_tick)
238 #define INSN_TICK_ESTIMATE(INSN) (HID (INSN)->tick_estimate)
239 #define INTER_TICK(INSN) (HID (INSN)->inter_tick)
240 #define FEEDS_BACKTRACK_INSN(INSN) (HID (INSN)->feeds_backtrack_insn)
241 #define SHADOW_P(INSN) (HID (INSN)->shadow_p)
242 #define MUST_RECOMPUTE_SPEC_P(INSN) (HID (INSN)->must_recompute_spec)
243 /* Cached cost of the instruction. Use insn_cost to get cost of the
244 insn. -1 here means that the field is not initialized. */
245 #define INSN_COST(INSN) (HID (INSN)->cost)
247 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
248 then it should be recalculated from scratch. */
249 #define INVALID_TICK (-(max_insn_queue_index + 1))
250 /* The minimal value of the INSN_TICK of an instruction. */
251 #define MIN_TICK (-max_insn_queue_index)
253 /* The deciding reason for INSN's place in the ready list. */
254 #define INSN_LAST_RFS_WIN(INSN) (HID (INSN)->last_rfs_win)
256 /* List of important notes we must keep around. This is a pointer to the
257 last element in the list. */
258 rtx_insn *note_list;
260 static struct spec_info_def spec_info_var;
261 /* Description of the speculative part of the scheduling.
262 If NULL - no speculation. */
263 spec_info_t spec_info = NULL;
265 /* True, if recovery block was added during scheduling of current block.
266 Used to determine, if we need to fix INSN_TICKs. */
267 static bool haifa_recovery_bb_recently_added_p;
269 /* True, if recovery block was added during this scheduling pass.
270 Used to determine if we should have empty memory pools of dependencies
271 after finishing current region. */
272 bool haifa_recovery_bb_ever_added_p;
274 /* Counters of different types of speculative instructions. */
275 static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
277 /* Array used in {unlink, restore}_bb_notes. */
278 static rtx_insn **bb_header = 0;
280 /* Basic block after which recovery blocks will be created. */
281 static basic_block before_recovery;
283 /* Basic block just before the EXIT_BLOCK and after recovery, if we have
284 created it. */
285 basic_block after_recovery;
287 /* FALSE if we add bb to another region, so we don't need to initialize it. */
288 bool adding_bb_to_current_region_p = true;
290 /* Queues, etc. */
292 /* An instruction is ready to be scheduled when all insns preceding it
293 have already been scheduled. It is important to ensure that all
294 insns which use its result will not be executed until its result
295 has been computed. An insn is maintained in one of four structures:
297 (P) the "Pending" set of insns which cannot be scheduled until
298 their dependencies have been satisfied.
299 (Q) the "Queued" set of insns that can be scheduled when sufficient
300 time has passed.
301 (R) the "Ready" list of unscheduled, uncommitted insns.
302 (S) the "Scheduled" list of insns.
304 Initially, all insns are either "Pending" or "Ready" depending on
305 whether their dependencies are satisfied.
307 Insns move from the "Ready" list to the "Scheduled" list as they
308 are committed to the schedule. As this occurs, the insns in the
309 "Pending" list have their dependencies satisfied and move to either
310 the "Ready" list or the "Queued" set depending on whether
311 sufficient time has passed to make them ready. As time passes,
312 insns move from the "Queued" set to the "Ready" list.
314 The "Pending" list (P) are the insns in the INSN_FORW_DEPS of the
315 unscheduled insns, i.e., those that are ready, queued, and pending.
316 The "Queued" set (Q) is implemented by the variable `insn_queue'.
317 The "Ready" list (R) is implemented by the variables `ready' and
318 `n_ready'.
319 The "Scheduled" list (S) is the new insn chain built by this pass.
321 The transition (R->S) is implemented in the scheduling loop in
322 `schedule_block' when the best insn to schedule is chosen.
323 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
324 insns move from the ready list to the scheduled list.
325 The transition (Q->R) is implemented in 'queue_to_insn' as time
326 passes or stalls are introduced. */
328 /* Implement a circular buffer to delay instructions until sufficient
329 time has passed. For the new pipeline description interface,
330 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
331 than maximal time of instruction execution computed by genattr.c on
332 the base maximal time of functional unit reservations and getting a
333 result. This is the longest time an insn may be queued. */
335 static rtx_insn_list **insn_queue;
336 static int q_ptr = 0;
337 static int q_size = 0;
338 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
339 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
341 #define QUEUE_SCHEDULED (-3)
342 #define QUEUE_NOWHERE (-2)
343 #define QUEUE_READY (-1)
344 /* QUEUE_SCHEDULED - INSN is scheduled.
345 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
346 queue or ready list.
347 QUEUE_READY - INSN is in ready list.
348 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
350 #define QUEUE_INDEX(INSN) (HID (INSN)->queue_index)
352 /* The following variable value refers for all current and future
353 reservations of the processor units. */
354 state_t curr_state;
356 /* The following variable value is size of memory representing all
357 current and future reservations of the processor units. */
358 size_t dfa_state_size;
360 /* The following array is used to find the best insn from ready when
361 the automaton pipeline interface is used. */
362 signed char *ready_try = NULL;
364 /* The ready list. */
365 struct ready_list ready = {NULL, 0, 0, 0, 0};
367 /* The pointer to the ready list (to be removed). */
368 static struct ready_list *readyp = &ready;
370 /* Scheduling clock. */
371 static int clock_var;
373 /* Clock at which the previous instruction was issued. */
374 static int last_clock_var;
376 /* Set to true if, when queuing a shadow insn, we discover that it would be
377 scheduled too late. */
378 static bool must_backtrack;
380 /* The following variable value is number of essential insns issued on
381 the current cycle. An insn is essential one if it changes the
382 processors state. */
383 int cycle_issued_insns;
385 /* This records the actual schedule. It is built up during the main phase
386 of schedule_block, and afterwards used to reorder the insns in the RTL. */
387 static vec<rtx_insn *> scheduled_insns;
389 static int may_trap_exp (const_rtx, int);
391 /* Nonzero iff the address is comprised from at most 1 register. */
392 #define CONST_BASED_ADDRESS_P(x) \
393 (REG_P (x) \
394 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
395 || (GET_CODE (x) == LO_SUM)) \
396 && (CONSTANT_P (XEXP (x, 0)) \
397 || CONSTANT_P (XEXP (x, 1)))))
399 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
400 as found by analyzing insn's expression. */
403 static int haifa_luid_for_non_insn (rtx x);
405 /* Haifa version of sched_info hooks common to all headers. */
406 const struct common_sched_info_def haifa_common_sched_info =
408 NULL, /* fix_recovery_cfg */
409 NULL, /* add_block */
410 NULL, /* estimate_number_of_insns */
411 haifa_luid_for_non_insn, /* luid_for_non_insn */
412 SCHED_PASS_UNKNOWN /* sched_pass_id */
415 /* Mapping from instruction UID to its Logical UID. */
416 vec<int> sched_luids = vNULL;
418 /* Next LUID to assign to an instruction. */
419 int sched_max_luid = 1;
421 /* Haifa Instruction Data. */
422 vec<haifa_insn_data_def> h_i_d = vNULL;
424 void (* sched_init_only_bb) (basic_block, basic_block);
426 /* Split block function. Different schedulers might use different functions
427 to handle their internal data consistent. */
428 basic_block (* sched_split_block) (basic_block, rtx);
430 /* Create empty basic block after the specified block. */
431 basic_block (* sched_create_empty_bb) (basic_block);
433 /* Return the number of cycles until INSN is expected to be ready.
434 Return zero if it already is. */
435 static int
436 insn_delay (rtx_insn *insn)
438 return MAX (INSN_TICK (insn) - clock_var, 0);
441 static int
442 may_trap_exp (const_rtx x, int is_store)
444 enum rtx_code code;
446 if (x == 0)
447 return TRAP_FREE;
448 code = GET_CODE (x);
449 if (is_store)
451 if (code == MEM && may_trap_p (x))
452 return TRAP_RISKY;
453 else
454 return TRAP_FREE;
456 if (code == MEM)
458 /* The insn uses memory: a volatile load. */
459 if (MEM_VOLATILE_P (x))
460 return IRISKY;
461 /* An exception-free load. */
462 if (!may_trap_p (x))
463 return IFREE;
464 /* A load with 1 base register, to be further checked. */
465 if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
466 return PFREE_CANDIDATE;
467 /* No info on the load, to be further checked. */
468 return PRISKY_CANDIDATE;
470 else
472 const char *fmt;
473 int i, insn_class = TRAP_FREE;
475 /* Neither store nor load, check if it may cause a trap. */
476 if (may_trap_p (x))
477 return TRAP_RISKY;
478 /* Recursive step: walk the insn... */
479 fmt = GET_RTX_FORMAT (code);
480 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
482 if (fmt[i] == 'e')
484 int tmp_class = may_trap_exp (XEXP (x, i), is_store);
485 insn_class = WORST_CLASS (insn_class, tmp_class);
487 else if (fmt[i] == 'E')
489 int j;
490 for (j = 0; j < XVECLEN (x, i); j++)
492 int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
493 insn_class = WORST_CLASS (insn_class, tmp_class);
494 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
495 break;
498 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
499 break;
501 return insn_class;
505 /* Classifies rtx X of an insn for the purpose of verifying that X can be
506 executed speculatively (and consequently the insn can be moved
507 speculatively), by examining X, returning:
508 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
509 TRAP_FREE: non-load insn.
510 IFREE: load from a globally safe location.
511 IRISKY: volatile load.
512 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
513 being either PFREE or PRISKY. */
515 static int
516 haifa_classify_rtx (const_rtx x)
518 int tmp_class = TRAP_FREE;
519 int insn_class = TRAP_FREE;
520 enum rtx_code code;
522 if (GET_CODE (x) == PARALLEL)
524 int i, len = XVECLEN (x, 0);
526 for (i = len - 1; i >= 0; i--)
528 tmp_class = haifa_classify_rtx (XVECEXP (x, 0, i));
529 insn_class = WORST_CLASS (insn_class, tmp_class);
530 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
531 break;
534 else
536 code = GET_CODE (x);
537 switch (code)
539 case CLOBBER:
540 /* Test if it is a 'store'. */
541 tmp_class = may_trap_exp (XEXP (x, 0), 1);
542 break;
543 case SET:
544 /* Test if it is a store. */
545 tmp_class = may_trap_exp (SET_DEST (x), 1);
546 if (tmp_class == TRAP_RISKY)
547 break;
548 /* Test if it is a load. */
549 tmp_class =
550 WORST_CLASS (tmp_class,
551 may_trap_exp (SET_SRC (x), 0));
552 break;
553 case COND_EXEC:
554 tmp_class = haifa_classify_rtx (COND_EXEC_CODE (x));
555 if (tmp_class == TRAP_RISKY)
556 break;
557 tmp_class = WORST_CLASS (tmp_class,
558 may_trap_exp (COND_EXEC_TEST (x), 0));
559 break;
560 case TRAP_IF:
561 tmp_class = TRAP_RISKY;
562 break;
563 default:;
565 insn_class = tmp_class;
568 return insn_class;
572 haifa_classify_insn (const_rtx insn)
574 return haifa_classify_rtx (PATTERN (insn));
577 /* After the scheduler initialization function has been called, this function
578 can be called to enable modulo scheduling. II is the initiation interval
579 we should use, it affects the delays for delay_pairs that were recorded as
580 separated by a given number of stages.
582 MAX_STAGES provides us with a limit
583 after which we give up scheduling; the caller must have unrolled at least
584 as many copies of the loop body and recorded delay_pairs for them.
586 INSNS is the number of real (non-debug) insns in one iteration of
587 the loop. MAX_UID can be used to test whether an insn belongs to
588 the first iteration of the loop; all of them have a uid lower than
589 MAX_UID. */
590 void
591 set_modulo_params (int ii, int max_stages, int insns, int max_uid)
593 modulo_ii = ii;
594 modulo_max_stages = max_stages;
595 modulo_n_insns = insns;
596 modulo_iter0_max_uid = max_uid;
597 modulo_backtracks_left = PARAM_VALUE (PARAM_MAX_MODULO_BACKTRACK_ATTEMPTS);
600 /* A structure to record a pair of insns where the first one is a real
601 insn that has delay slots, and the second is its delayed shadow.
602 I1 is scheduled normally and will emit an assembly instruction,
603 while I2 describes the side effect that takes place at the
604 transition between cycles CYCLES and (CYCLES + 1) after I1. */
605 struct delay_pair
607 struct delay_pair *next_same_i1;
608 rtx_insn *i1, *i2;
609 int cycles;
610 /* When doing modulo scheduling, we a delay_pair can also be used to
611 show that I1 and I2 are the same insn in a different stage. If that
612 is the case, STAGES will be nonzero. */
613 int stages;
616 /* Helpers for delay hashing. */
618 struct delay_i1_hasher : typed_noop_remove <delay_pair>
620 typedef delay_pair value_type;
621 typedef void compare_type;
622 static inline hashval_t hash (const value_type *);
623 static inline bool equal (const value_type *, const compare_type *);
626 /* Returns a hash value for X, based on hashing just I1. */
628 inline hashval_t
629 delay_i1_hasher::hash (const value_type *x)
631 return htab_hash_pointer (x->i1);
634 /* Return true if I1 of pair X is the same as that of pair Y. */
636 inline bool
637 delay_i1_hasher::equal (const value_type *x, const compare_type *y)
639 return x->i1 == y;
642 struct delay_i2_hasher : typed_free_remove <delay_pair>
644 typedef delay_pair value_type;
645 typedef void compare_type;
646 static inline hashval_t hash (const value_type *);
647 static inline bool equal (const value_type *, const compare_type *);
650 /* Returns a hash value for X, based on hashing just I2. */
652 inline hashval_t
653 delay_i2_hasher::hash (const value_type *x)
655 return htab_hash_pointer (x->i2);
658 /* Return true if I2 of pair X is the same as that of pair Y. */
660 inline bool
661 delay_i2_hasher::equal (const value_type *x, const compare_type *y)
663 return x->i2 == y;
666 /* Two hash tables to record delay_pairs, one indexed by I1 and the other
667 indexed by I2. */
668 static hash_table<delay_i1_hasher> *delay_htab;
669 static hash_table<delay_i2_hasher> *delay_htab_i2;
671 /* Called through htab_traverse. Walk the hashtable using I2 as
672 index, and delete all elements involving an UID higher than
673 that pointed to by *DATA. */
675 haifa_htab_i2_traverse (delay_pair **slot, int *data)
677 int maxuid = *data;
678 struct delay_pair *p = *slot;
679 if (INSN_UID (p->i2) >= maxuid || INSN_UID (p->i1) >= maxuid)
681 delay_htab_i2->clear_slot (slot);
683 return 1;
686 /* Called through htab_traverse. Walk the hashtable using I2 as
687 index, and delete all elements involving an UID higher than
688 that pointed to by *DATA. */
690 haifa_htab_i1_traverse (delay_pair **pslot, int *data)
692 int maxuid = *data;
693 struct delay_pair *p, *first, **pprev;
695 if (INSN_UID ((*pslot)->i1) >= maxuid)
697 delay_htab->clear_slot (pslot);
698 return 1;
700 pprev = &first;
701 for (p = *pslot; p; p = p->next_same_i1)
703 if (INSN_UID (p->i2) < maxuid)
705 *pprev = p;
706 pprev = &p->next_same_i1;
709 *pprev = NULL;
710 if (first == NULL)
711 delay_htab->clear_slot (pslot);
712 else
713 *pslot = first;
714 return 1;
717 /* Discard all delay pairs which involve an insn with an UID higher
718 than MAX_UID. */
719 void
720 discard_delay_pairs_above (int max_uid)
722 delay_htab->traverse <int *, haifa_htab_i1_traverse> (&max_uid);
723 delay_htab_i2->traverse <int *, haifa_htab_i2_traverse> (&max_uid);
726 /* This function can be called by a port just before it starts the final
727 scheduling pass. It records the fact that an instruction with delay
728 slots has been split into two insns, I1 and I2. The first one will be
729 scheduled normally and initiates the operation. The second one is a
730 shadow which must follow a specific number of cycles after I1; its only
731 purpose is to show the side effect that occurs at that cycle in the RTL.
732 If a JUMP_INSN or a CALL_INSN has been split, I1 should be a normal INSN,
733 while I2 retains the original insn type.
735 There are two ways in which the number of cycles can be specified,
736 involving the CYCLES and STAGES arguments to this function. If STAGES
737 is zero, we just use the value of CYCLES. Otherwise, STAGES is a factor
738 which is multiplied by MODULO_II to give the number of cycles. This is
739 only useful if the caller also calls set_modulo_params to enable modulo
740 scheduling. */
742 void
743 record_delay_slot_pair (rtx_insn *i1, rtx_insn *i2, int cycles, int stages)
745 struct delay_pair *p = XNEW (struct delay_pair);
746 struct delay_pair **slot;
748 p->i1 = i1;
749 p->i2 = i2;
750 p->cycles = cycles;
751 p->stages = stages;
753 if (!delay_htab)
755 delay_htab = new hash_table<delay_i1_hasher> (10);
756 delay_htab_i2 = new hash_table<delay_i2_hasher> (10);
758 slot = delay_htab->find_slot_with_hash (i1, htab_hash_pointer (i1), INSERT);
759 p->next_same_i1 = *slot;
760 *slot = p;
761 slot = delay_htab_i2->find_slot (p, INSERT);
762 *slot = p;
765 /* Examine the delay pair hashtable to see if INSN is a shadow for another,
766 and return the other insn if so. Return NULL otherwise. */
767 rtx_insn *
768 real_insn_for_shadow (rtx_insn *insn)
770 struct delay_pair *pair;
772 if (!delay_htab)
773 return NULL;
775 pair = delay_htab_i2->find_with_hash (insn, htab_hash_pointer (insn));
776 if (!pair || pair->stages > 0)
777 return NULL;
778 return pair->i1;
781 /* For a pair P of insns, return the fixed distance in cycles from the first
782 insn after which the second must be scheduled. */
783 static int
784 pair_delay (struct delay_pair *p)
786 if (p->stages == 0)
787 return p->cycles;
788 else
789 return p->stages * modulo_ii;
792 /* Given an insn INSN, add a dependence on its delayed shadow if it
793 has one. Also try to find situations where shadows depend on each other
794 and add dependencies to the real insns to limit the amount of backtracking
795 needed. */
796 void
797 add_delay_dependencies (rtx_insn *insn)
799 struct delay_pair *pair;
800 sd_iterator_def sd_it;
801 dep_t dep;
803 if (!delay_htab)
804 return;
806 pair = delay_htab_i2->find_with_hash (insn, htab_hash_pointer (insn));
807 if (!pair)
808 return;
809 add_dependence (insn, pair->i1, REG_DEP_ANTI);
810 if (pair->stages)
811 return;
813 FOR_EACH_DEP (pair->i2, SD_LIST_BACK, sd_it, dep)
815 rtx_insn *pro = DEP_PRO (dep);
816 struct delay_pair *other_pair
817 = delay_htab_i2->find_with_hash (pro, htab_hash_pointer (pro));
818 if (!other_pair || other_pair->stages)
819 continue;
820 if (pair_delay (other_pair) >= pair_delay (pair))
822 if (sched_verbose >= 4)
824 fprintf (sched_dump, ";;\tadding dependence %d <- %d\n",
825 INSN_UID (other_pair->i1),
826 INSN_UID (pair->i1));
827 fprintf (sched_dump, ";;\tpair1 %d <- %d, cost %d\n",
828 INSN_UID (pair->i1),
829 INSN_UID (pair->i2),
830 pair_delay (pair));
831 fprintf (sched_dump, ";;\tpair2 %d <- %d, cost %d\n",
832 INSN_UID (other_pair->i1),
833 INSN_UID (other_pair->i2),
834 pair_delay (other_pair));
836 add_dependence (pair->i1, other_pair->i1, REG_DEP_ANTI);
841 /* Forward declarations. */
843 static int priority (rtx_insn *);
844 static int rank_for_schedule (const void *, const void *);
845 static void swap_sort (rtx_insn **, int);
846 static void queue_insn (rtx_insn *, int, const char *);
847 static int schedule_insn (rtx_insn *);
848 static void adjust_priority (rtx_insn *);
849 static void advance_one_cycle (void);
850 static void extend_h_i_d (void);
853 /* Notes handling mechanism:
854 =========================
855 Generally, NOTES are saved before scheduling and restored after scheduling.
856 The scheduler distinguishes between two types of notes:
858 (1) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
859 Before scheduling a region, a pointer to the note is added to the insn
860 that follows or precedes it. (This happens as part of the data dependence
861 computation). After scheduling an insn, the pointer contained in it is
862 used for regenerating the corresponding note (in reemit_notes).
864 (2) All other notes (e.g. INSN_DELETED): Before scheduling a block,
865 these notes are put in a list (in rm_other_notes() and
866 unlink_other_notes ()). After scheduling the block, these notes are
867 inserted at the beginning of the block (in schedule_block()). */
869 static void ready_add (struct ready_list *, rtx_insn *, bool);
870 static rtx_insn *ready_remove_first (struct ready_list *);
871 static rtx_insn *ready_remove_first_dispatch (struct ready_list *ready);
873 static void queue_to_ready (struct ready_list *);
874 static int early_queue_to_ready (state_t, struct ready_list *);
876 /* The following functions are used to implement multi-pass scheduling
877 on the first cycle. */
878 static rtx_insn *ready_remove (struct ready_list *, int);
879 static void ready_remove_insn (rtx);
881 static void fix_inter_tick (rtx_insn *, rtx_insn *);
882 static int fix_tick_ready (rtx_insn *);
883 static void change_queue_index (rtx_insn *, int);
885 /* The following functions are used to implement scheduling of data/control
886 speculative instructions. */
888 static void extend_h_i_d (void);
889 static void init_h_i_d (rtx_insn *);
890 static int haifa_speculate_insn (rtx_insn *, ds_t, rtx *);
891 static void generate_recovery_code (rtx_insn *);
892 static void process_insn_forw_deps_be_in_spec (rtx, rtx_insn *, ds_t);
893 static void begin_speculative_block (rtx_insn *);
894 static void add_to_speculative_block (rtx_insn *);
895 static void init_before_recovery (basic_block *);
896 static void create_check_block_twin (rtx_insn *, bool);
897 static void fix_recovery_deps (basic_block);
898 static bool haifa_change_pattern (rtx_insn *, rtx);
899 static void dump_new_block_header (int, basic_block, rtx_insn *, rtx_insn *);
900 static void restore_bb_notes (basic_block);
901 static void fix_jump_move (rtx_insn *);
902 static void move_block_after_check (rtx_insn *);
903 static void move_succs (vec<edge, va_gc> **, basic_block);
904 static void sched_remove_insn (rtx_insn *);
905 static void clear_priorities (rtx_insn *, rtx_vec_t *);
906 static void calc_priorities (rtx_vec_t);
907 static void add_jump_dependencies (rtx_insn *, rtx_insn *);
909 #endif /* INSN_SCHEDULING */
911 /* Point to state used for the current scheduling pass. */
912 struct haifa_sched_info *current_sched_info;
914 #ifndef INSN_SCHEDULING
915 void
916 schedule_insns (void)
919 #else
921 /* Do register pressure sensitive insn scheduling if the flag is set
922 up. */
923 enum sched_pressure_algorithm sched_pressure;
925 /* Map regno -> its pressure class. The map defined only when
926 SCHED_PRESSURE != SCHED_PRESSURE_NONE. */
927 enum reg_class *sched_regno_pressure_class;
929 /* The current register pressure. Only elements corresponding pressure
930 classes are defined. */
931 static int curr_reg_pressure[N_REG_CLASSES];
933 /* Saved value of the previous array. */
934 static int saved_reg_pressure[N_REG_CLASSES];
936 /* Register living at given scheduling point. */
937 static bitmap curr_reg_live;
939 /* Saved value of the previous array. */
940 static bitmap saved_reg_live;
942 /* Registers mentioned in the current region. */
943 static bitmap region_ref_regs;
945 /* Effective number of available registers of a given class (see comment
946 in sched_pressure_start_bb). */
947 static int sched_class_regs_num[N_REG_CLASSES];
948 /* Number of call_used_regs. This is a helper for calculating of
949 sched_class_regs_num. */
950 static int call_used_regs_num[N_REG_CLASSES];
952 /* Initiate register pressure relative info for scheduling the current
953 region. Currently it is only clearing register mentioned in the
954 current region. */
955 void
956 sched_init_region_reg_pressure_info (void)
958 bitmap_clear (region_ref_regs);
961 /* PRESSURE[CL] describes the pressure on register class CL. Update it
962 for the birth (if BIRTH_P) or death (if !BIRTH_P) of register REGNO.
963 LIVE tracks the set of live registers; if it is null, assume that
964 every birth or death is genuine. */
965 static inline void
966 mark_regno_birth_or_death (bitmap live, int *pressure, int regno, bool birth_p)
968 enum reg_class pressure_class;
970 pressure_class = sched_regno_pressure_class[regno];
971 if (regno >= FIRST_PSEUDO_REGISTER)
973 if (pressure_class != NO_REGS)
975 if (birth_p)
977 if (!live || bitmap_set_bit (live, regno))
978 pressure[pressure_class]
979 += (ira_reg_class_max_nregs
980 [pressure_class][PSEUDO_REGNO_MODE (regno)]);
982 else
984 if (!live || bitmap_clear_bit (live, regno))
985 pressure[pressure_class]
986 -= (ira_reg_class_max_nregs
987 [pressure_class][PSEUDO_REGNO_MODE (regno)]);
991 else if (pressure_class != NO_REGS
992 && ! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
994 if (birth_p)
996 if (!live || bitmap_set_bit (live, regno))
997 pressure[pressure_class]++;
999 else
1001 if (!live || bitmap_clear_bit (live, regno))
1002 pressure[pressure_class]--;
1007 /* Initiate current register pressure related info from living
1008 registers given by LIVE. */
1009 static void
1010 initiate_reg_pressure_info (bitmap live)
1012 int i;
1013 unsigned int j;
1014 bitmap_iterator bi;
1016 for (i = 0; i < ira_pressure_classes_num; i++)
1017 curr_reg_pressure[ira_pressure_classes[i]] = 0;
1018 bitmap_clear (curr_reg_live);
1019 EXECUTE_IF_SET_IN_BITMAP (live, 0, j, bi)
1020 if (sched_pressure == SCHED_PRESSURE_MODEL
1021 || current_nr_blocks == 1
1022 || bitmap_bit_p (region_ref_regs, j))
1023 mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure, j, true);
1026 /* Mark registers in X as mentioned in the current region. */
1027 static void
1028 setup_ref_regs (rtx x)
1030 int i, j, regno;
1031 const RTX_CODE code = GET_CODE (x);
1032 const char *fmt;
1034 if (REG_P (x))
1036 regno = REGNO (x);
1037 if (HARD_REGISTER_NUM_P (regno))
1038 bitmap_set_range (region_ref_regs, regno,
1039 hard_regno_nregs[regno][GET_MODE (x)]);
1040 else
1041 bitmap_set_bit (region_ref_regs, REGNO (x));
1042 return;
1044 fmt = GET_RTX_FORMAT (code);
1045 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1046 if (fmt[i] == 'e')
1047 setup_ref_regs (XEXP (x, i));
1048 else if (fmt[i] == 'E')
1050 for (j = 0; j < XVECLEN (x, i); j++)
1051 setup_ref_regs (XVECEXP (x, i, j));
1055 /* Initiate current register pressure related info at the start of
1056 basic block BB. */
1057 static void
1058 initiate_bb_reg_pressure_info (basic_block bb)
1060 unsigned int i ATTRIBUTE_UNUSED;
1061 rtx_insn *insn;
1063 if (current_nr_blocks > 1)
1064 FOR_BB_INSNS (bb, insn)
1065 if (NONDEBUG_INSN_P (insn))
1066 setup_ref_regs (PATTERN (insn));
1067 initiate_reg_pressure_info (df_get_live_in (bb));
1068 #ifdef EH_RETURN_DATA_REGNO
1069 if (bb_has_eh_pred (bb))
1070 for (i = 0; ; ++i)
1072 unsigned int regno = EH_RETURN_DATA_REGNO (i);
1074 if (regno == INVALID_REGNUM)
1075 break;
1076 if (! bitmap_bit_p (df_get_live_in (bb), regno))
1077 mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure,
1078 regno, true);
1080 #endif
1083 /* Save current register pressure related info. */
1084 static void
1085 save_reg_pressure (void)
1087 int i;
1089 for (i = 0; i < ira_pressure_classes_num; i++)
1090 saved_reg_pressure[ira_pressure_classes[i]]
1091 = curr_reg_pressure[ira_pressure_classes[i]];
1092 bitmap_copy (saved_reg_live, curr_reg_live);
1095 /* Restore saved register pressure related info. */
1096 static void
1097 restore_reg_pressure (void)
1099 int i;
1101 for (i = 0; i < ira_pressure_classes_num; i++)
1102 curr_reg_pressure[ira_pressure_classes[i]]
1103 = saved_reg_pressure[ira_pressure_classes[i]];
1104 bitmap_copy (curr_reg_live, saved_reg_live);
1107 /* Return TRUE if the register is dying after its USE. */
1108 static bool
1109 dying_use_p (struct reg_use_data *use)
1111 struct reg_use_data *next;
1113 for (next = use->next_regno_use; next != use; next = next->next_regno_use)
1114 if (NONDEBUG_INSN_P (next->insn)
1115 && QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED)
1116 return false;
1117 return true;
1120 /* Print info about the current register pressure and its excess for
1121 each pressure class. */
1122 static void
1123 print_curr_reg_pressure (void)
1125 int i;
1126 enum reg_class cl;
1128 fprintf (sched_dump, ";;\t");
1129 for (i = 0; i < ira_pressure_classes_num; i++)
1131 cl = ira_pressure_classes[i];
1132 gcc_assert (curr_reg_pressure[cl] >= 0);
1133 fprintf (sched_dump, " %s:%d(%d)", reg_class_names[cl],
1134 curr_reg_pressure[cl],
1135 curr_reg_pressure[cl] - sched_class_regs_num[cl]);
1137 fprintf (sched_dump, "\n");
1140 /* Determine if INSN has a condition that is clobbered if a register
1141 in SET_REGS is modified. */
1142 static bool
1143 cond_clobbered_p (rtx_insn *insn, HARD_REG_SET set_regs)
1145 rtx pat = PATTERN (insn);
1146 gcc_assert (GET_CODE (pat) == COND_EXEC);
1147 if (TEST_HARD_REG_BIT (set_regs, REGNO (XEXP (COND_EXEC_TEST (pat), 0))))
1149 sd_iterator_def sd_it;
1150 dep_t dep;
1151 haifa_change_pattern (insn, ORIG_PAT (insn));
1152 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1153 DEP_STATUS (dep) &= ~DEP_CANCELLED;
1154 TODO_SPEC (insn) = HARD_DEP;
1155 if (sched_verbose >= 2)
1156 fprintf (sched_dump,
1157 ";;\t\tdequeue insn %s because of clobbered condition\n",
1158 (*current_sched_info->print_insn) (insn, 0));
1159 return true;
1162 return false;
1165 /* This function should be called after modifying the pattern of INSN,
1166 to update scheduler data structures as needed. */
1167 static void
1168 update_insn_after_change (rtx_insn *insn)
1170 sd_iterator_def sd_it;
1171 dep_t dep;
1173 dfa_clear_single_insn_cache (insn);
1175 sd_it = sd_iterator_start (insn,
1176 SD_LIST_FORW | SD_LIST_BACK | SD_LIST_RES_BACK);
1177 while (sd_iterator_cond (&sd_it, &dep))
1179 DEP_COST (dep) = UNKNOWN_DEP_COST;
1180 sd_iterator_next (&sd_it);
1183 /* Invalidate INSN_COST, so it'll be recalculated. */
1184 INSN_COST (insn) = -1;
1185 /* Invalidate INSN_TICK, so it'll be recalculated. */
1186 INSN_TICK (insn) = INVALID_TICK;
1190 /* Two VECs, one to hold dependencies for which pattern replacements
1191 need to be applied or restored at the start of the next cycle, and
1192 another to hold an integer that is either one, to apply the
1193 corresponding replacement, or zero to restore it. */
1194 static vec<dep_t> next_cycle_replace_deps;
1195 static vec<int> next_cycle_apply;
1197 static void apply_replacement (dep_t, bool);
1198 static void restore_pattern (dep_t, bool);
1200 /* Look at the remaining dependencies for insn NEXT, and compute and return
1201 the TODO_SPEC value we should use for it. This is called after one of
1202 NEXT's dependencies has been resolved.
1203 We also perform pattern replacements for predication, and for broken
1204 replacement dependencies. The latter is only done if FOR_BACKTRACK is
1205 false. */
1207 static ds_t
1208 recompute_todo_spec (rtx_insn *next, bool for_backtrack)
1210 ds_t new_ds;
1211 sd_iterator_def sd_it;
1212 dep_t dep, modify_dep = NULL;
1213 int n_spec = 0;
1214 int n_control = 0;
1215 int n_replace = 0;
1216 bool first_p = true;
1218 if (sd_lists_empty_p (next, SD_LIST_BACK))
1219 /* NEXT has all its dependencies resolved. */
1220 return 0;
1222 if (!sd_lists_empty_p (next, SD_LIST_HARD_BACK))
1223 return HARD_DEP;
1225 /* Now we've got NEXT with speculative deps only.
1226 1. Look at the deps to see what we have to do.
1227 2. Check if we can do 'todo'. */
1228 new_ds = 0;
1230 FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
1232 rtx_insn *pro = DEP_PRO (dep);
1233 ds_t ds = DEP_STATUS (dep) & SPECULATIVE;
1235 if (DEBUG_INSN_P (pro) && !DEBUG_INSN_P (next))
1236 continue;
1238 if (ds)
1240 n_spec++;
1241 if (first_p)
1243 first_p = false;
1245 new_ds = ds;
1247 else
1248 new_ds = ds_merge (new_ds, ds);
1250 else if (DEP_TYPE (dep) == REG_DEP_CONTROL)
1252 if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED)
1254 n_control++;
1255 modify_dep = dep;
1257 DEP_STATUS (dep) &= ~DEP_CANCELLED;
1259 else if (DEP_REPLACE (dep) != NULL)
1261 if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED)
1263 n_replace++;
1264 modify_dep = dep;
1266 DEP_STATUS (dep) &= ~DEP_CANCELLED;
1270 if (n_replace > 0 && n_control == 0 && n_spec == 0)
1272 if (!dbg_cnt (sched_breakdep))
1273 return HARD_DEP;
1274 FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
1276 struct dep_replacement *desc = DEP_REPLACE (dep);
1277 if (desc != NULL)
1279 if (desc->insn == next && !for_backtrack)
1281 gcc_assert (n_replace == 1);
1282 apply_replacement (dep, true);
1284 DEP_STATUS (dep) |= DEP_CANCELLED;
1287 return 0;
1290 else if (n_control == 1 && n_replace == 0 && n_spec == 0)
1292 rtx_insn *pro, *other;
1293 rtx new_pat;
1294 rtx cond = NULL_RTX;
1295 bool success;
1296 rtx_insn *prev = NULL;
1297 int i;
1298 unsigned regno;
1300 if ((current_sched_info->flags & DO_PREDICATION) == 0
1301 || (ORIG_PAT (next) != NULL_RTX
1302 && PREDICATED_PAT (next) == NULL_RTX))
1303 return HARD_DEP;
1305 pro = DEP_PRO (modify_dep);
1306 other = real_insn_for_shadow (pro);
1307 if (other != NULL_RTX)
1308 pro = other;
1310 cond = sched_get_reverse_condition_uncached (pro);
1311 regno = REGNO (XEXP (cond, 0));
1313 /* Find the last scheduled insn that modifies the condition register.
1314 We can stop looking once we find the insn we depend on through the
1315 REG_DEP_CONTROL; if the condition register isn't modified after it,
1316 we know that it still has the right value. */
1317 if (QUEUE_INDEX (pro) == QUEUE_SCHEDULED)
1318 FOR_EACH_VEC_ELT_REVERSE (scheduled_insns, i, prev)
1320 HARD_REG_SET t;
1322 find_all_hard_reg_sets (prev, &t, true);
1323 if (TEST_HARD_REG_BIT (t, regno))
1324 return HARD_DEP;
1325 if (prev == pro)
1326 break;
1328 if (ORIG_PAT (next) == NULL_RTX)
1330 ORIG_PAT (next) = PATTERN (next);
1332 new_pat = gen_rtx_COND_EXEC (VOIDmode, cond, PATTERN (next));
1333 success = haifa_change_pattern (next, new_pat);
1334 if (!success)
1335 return HARD_DEP;
1336 PREDICATED_PAT (next) = new_pat;
1338 else if (PATTERN (next) != PREDICATED_PAT (next))
1340 bool success = haifa_change_pattern (next,
1341 PREDICATED_PAT (next));
1342 gcc_assert (success);
1344 DEP_STATUS (modify_dep) |= DEP_CANCELLED;
1345 return DEP_CONTROL;
1348 if (PREDICATED_PAT (next) != NULL_RTX)
1350 int tick = INSN_TICK (next);
1351 bool success = haifa_change_pattern (next,
1352 ORIG_PAT (next));
1353 INSN_TICK (next) = tick;
1354 gcc_assert (success);
1357 /* We can't handle the case where there are both speculative and control
1358 dependencies, so we return HARD_DEP in such a case. Also fail if
1359 we have speculative dependencies with not enough points, or more than
1360 one control dependency. */
1361 if ((n_spec > 0 && (n_control > 0 || n_replace > 0))
1362 || (n_spec > 0
1363 /* Too few points? */
1364 && ds_weak (new_ds) < spec_info->data_weakness_cutoff)
1365 || n_control > 0
1366 || n_replace > 0)
1367 return HARD_DEP;
1369 return new_ds;
1372 /* Pointer to the last instruction scheduled. */
1373 static rtx_insn *last_scheduled_insn;
1375 /* Pointer to the last nondebug instruction scheduled within the
1376 block, or the prev_head of the scheduling block. Used by
1377 rank_for_schedule, so that insns independent of the last scheduled
1378 insn will be preferred over dependent instructions. */
1379 static rtx last_nondebug_scheduled_insn;
1381 /* Pointer that iterates through the list of unscheduled insns if we
1382 have a dbg_cnt enabled. It always points at an insn prior to the
1383 first unscheduled one. */
1384 static rtx_insn *nonscheduled_insns_begin;
1386 /* Compute cost of executing INSN.
1387 This is the number of cycles between instruction issue and
1388 instruction results. */
1390 insn_cost (rtx_insn *insn)
1392 int cost;
1394 if (sel_sched_p ())
1396 if (recog_memoized (insn) < 0)
1397 return 0;
1399 cost = insn_default_latency (insn);
1400 if (cost < 0)
1401 cost = 0;
1403 return cost;
1406 cost = INSN_COST (insn);
1408 if (cost < 0)
1410 /* A USE insn, or something else we don't need to
1411 understand. We can't pass these directly to
1412 result_ready_cost or insn_default_latency because it will
1413 trigger a fatal error for unrecognizable insns. */
1414 if (recog_memoized (insn) < 0)
1416 INSN_COST (insn) = 0;
1417 return 0;
1419 else
1421 cost = insn_default_latency (insn);
1422 if (cost < 0)
1423 cost = 0;
1425 INSN_COST (insn) = cost;
1429 return cost;
1432 /* Compute cost of dependence LINK.
1433 This is the number of cycles between instruction issue and
1434 instruction results.
1435 ??? We also use this function to call recog_memoized on all insns. */
1437 dep_cost_1 (dep_t link, dw_t dw)
1439 rtx_insn *insn = DEP_PRO (link);
1440 rtx_insn *used = DEP_CON (link);
1441 int cost;
1443 if (DEP_COST (link) != UNKNOWN_DEP_COST)
1444 return DEP_COST (link);
1446 if (delay_htab)
1448 struct delay_pair *delay_entry;
1449 delay_entry
1450 = delay_htab_i2->find_with_hash (used, htab_hash_pointer (used));
1451 if (delay_entry)
1453 if (delay_entry->i1 == insn)
1455 DEP_COST (link) = pair_delay (delay_entry);
1456 return DEP_COST (link);
1461 /* A USE insn should never require the value used to be computed.
1462 This allows the computation of a function's result and parameter
1463 values to overlap the return and call. We don't care about the
1464 dependence cost when only decreasing register pressure. */
1465 if (recog_memoized (used) < 0)
1467 cost = 0;
1468 recog_memoized (insn);
1470 else
1472 enum reg_note dep_type = DEP_TYPE (link);
1474 cost = insn_cost (insn);
1476 if (INSN_CODE (insn) >= 0)
1478 if (dep_type == REG_DEP_ANTI)
1479 cost = 0;
1480 else if (dep_type == REG_DEP_OUTPUT)
1482 cost = (insn_default_latency (insn)
1483 - insn_default_latency (used));
1484 if (cost <= 0)
1485 cost = 1;
1487 else if (bypass_p (insn))
1488 cost = insn_latency (insn, used);
1492 if (targetm.sched.adjust_cost_2)
1493 cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost,
1494 dw);
1495 else if (targetm.sched.adjust_cost != NULL)
1497 /* This variable is used for backward compatibility with the
1498 targets. */
1499 rtx_insn_list *dep_cost_rtx_link =
1500 alloc_INSN_LIST (NULL_RTX, NULL);
1502 /* Make it self-cycled, so that if some tries to walk over this
1503 incomplete list he/she will be caught in an endless loop. */
1504 XEXP (dep_cost_rtx_link, 1) = dep_cost_rtx_link;
1506 /* Targets use only REG_NOTE_KIND of the link. */
1507 PUT_REG_NOTE_KIND (dep_cost_rtx_link, DEP_TYPE (link));
1509 cost = targetm.sched.adjust_cost (used, dep_cost_rtx_link,
1510 insn, cost);
1512 free_INSN_LIST_node (dep_cost_rtx_link);
1515 if (cost < 0)
1516 cost = 0;
1519 DEP_COST (link) = cost;
1520 return cost;
1523 /* Compute cost of dependence LINK.
1524 This is the number of cycles between instruction issue and
1525 instruction results. */
1527 dep_cost (dep_t link)
1529 return dep_cost_1 (link, 0);
1532 /* Use this sel-sched.c friendly function in reorder2 instead of increasing
1533 INSN_PRIORITY explicitly. */
1534 void
1535 increase_insn_priority (rtx_insn *insn, int amount)
1537 if (!sel_sched_p ())
1539 /* We're dealing with haifa-sched.c INSN_PRIORITY. */
1540 if (INSN_PRIORITY_KNOWN (insn))
1541 INSN_PRIORITY (insn) += amount;
1543 else
1545 /* In sel-sched.c INSN_PRIORITY is not kept up to date.
1546 Use EXPR_PRIORITY instead. */
1547 sel_add_to_insn_priority (insn, amount);
1551 /* Return 'true' if DEP should be included in priority calculations. */
1552 static bool
1553 contributes_to_priority_p (dep_t dep)
1555 if (DEBUG_INSN_P (DEP_CON (dep))
1556 || DEBUG_INSN_P (DEP_PRO (dep)))
1557 return false;
1559 /* Critical path is meaningful in block boundaries only. */
1560 if (!current_sched_info->contributes_to_priority (DEP_CON (dep),
1561 DEP_PRO (dep)))
1562 return false;
1564 if (DEP_REPLACE (dep) != NULL)
1565 return false;
1567 /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
1568 then speculative instructions will less likely be
1569 scheduled. That is because the priority of
1570 their producers will increase, and, thus, the
1571 producers will more likely be scheduled, thus,
1572 resolving the dependence. */
1573 if (sched_deps_info->generate_spec_deps
1574 && !(spec_info->flags & COUNT_SPEC_IN_CRITICAL_PATH)
1575 && (DEP_STATUS (dep) & SPECULATIVE))
1576 return false;
1578 return true;
1581 /* Compute the number of nondebug deps in list LIST for INSN. */
1583 static int
1584 dep_list_size (rtx insn, sd_list_types_def list)
1586 sd_iterator_def sd_it;
1587 dep_t dep;
1588 int dbgcount = 0, nodbgcount = 0;
1590 if (!MAY_HAVE_DEBUG_INSNS)
1591 return sd_lists_size (insn, list);
1593 FOR_EACH_DEP (insn, list, sd_it, dep)
1595 if (DEBUG_INSN_P (DEP_CON (dep)))
1596 dbgcount++;
1597 else if (!DEBUG_INSN_P (DEP_PRO (dep)))
1598 nodbgcount++;
1601 gcc_assert (dbgcount + nodbgcount == sd_lists_size (insn, list));
1603 return nodbgcount;
1606 /* Compute the priority number for INSN. */
1607 static int
1608 priority (rtx_insn *insn)
1610 if (! INSN_P (insn))
1611 return 0;
1613 /* We should not be interested in priority of an already scheduled insn. */
1614 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
1616 if (!INSN_PRIORITY_KNOWN (insn))
1618 int this_priority = -1;
1620 if (dep_list_size (insn, SD_LIST_FORW) == 0)
1621 /* ??? We should set INSN_PRIORITY to insn_cost when and insn has
1622 some forward deps but all of them are ignored by
1623 contributes_to_priority hook. At the moment we set priority of
1624 such insn to 0. */
1625 this_priority = insn_cost (insn);
1626 else
1628 rtx_insn *prev_first, *twin;
1629 basic_block rec;
1631 /* For recovery check instructions we calculate priority slightly
1632 different than that of normal instructions. Instead of walking
1633 through INSN_FORW_DEPS (check) list, we walk through
1634 INSN_FORW_DEPS list of each instruction in the corresponding
1635 recovery block. */
1637 /* Selective scheduling does not define RECOVERY_BLOCK macro. */
1638 rec = sel_sched_p () ? NULL : RECOVERY_BLOCK (insn);
1639 if (!rec || rec == EXIT_BLOCK_PTR_FOR_FN (cfun))
1641 prev_first = PREV_INSN (insn);
1642 twin = insn;
1644 else
1646 prev_first = NEXT_INSN (BB_HEAD (rec));
1647 twin = PREV_INSN (BB_END (rec));
1652 sd_iterator_def sd_it;
1653 dep_t dep;
1655 FOR_EACH_DEP (twin, SD_LIST_FORW, sd_it, dep)
1657 rtx_insn *next;
1658 int next_priority;
1660 next = DEP_CON (dep);
1662 if (BLOCK_FOR_INSN (next) != rec)
1664 int cost;
1666 if (!contributes_to_priority_p (dep))
1667 continue;
1669 if (twin == insn)
1670 cost = dep_cost (dep);
1671 else
1673 struct _dep _dep1, *dep1 = &_dep1;
1675 init_dep (dep1, insn, next, REG_DEP_ANTI);
1677 cost = dep_cost (dep1);
1680 next_priority = cost + priority (next);
1682 if (next_priority > this_priority)
1683 this_priority = next_priority;
1687 twin = PREV_INSN (twin);
1689 while (twin != prev_first);
1692 if (this_priority < 0)
1694 gcc_assert (this_priority == -1);
1696 this_priority = insn_cost (insn);
1699 INSN_PRIORITY (insn) = this_priority;
1700 INSN_PRIORITY_STATUS (insn) = 1;
1703 return INSN_PRIORITY (insn);
1706 /* Macros and functions for keeping the priority queue sorted, and
1707 dealing with queuing and dequeuing of instructions. */
1709 /* For each pressure class CL, set DEATH[CL] to the number of registers
1710 in that class that die in INSN. */
1712 static void
1713 calculate_reg_deaths (rtx_insn *insn, int *death)
1715 int i;
1716 struct reg_use_data *use;
1718 for (i = 0; i < ira_pressure_classes_num; i++)
1719 death[ira_pressure_classes[i]] = 0;
1720 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1721 if (dying_use_p (use))
1722 mark_regno_birth_or_death (0, death, use->regno, true);
1725 /* Setup info about the current register pressure impact of scheduling
1726 INSN at the current scheduling point. */
1727 static void
1728 setup_insn_reg_pressure_info (rtx_insn *insn)
1730 int i, change, before, after, hard_regno;
1731 int excess_cost_change;
1732 machine_mode mode;
1733 enum reg_class cl;
1734 struct reg_pressure_data *pressure_info;
1735 int *max_reg_pressure;
1736 static int death[N_REG_CLASSES];
1738 gcc_checking_assert (!DEBUG_INSN_P (insn));
1740 excess_cost_change = 0;
1741 calculate_reg_deaths (insn, death);
1742 pressure_info = INSN_REG_PRESSURE (insn);
1743 max_reg_pressure = INSN_MAX_REG_PRESSURE (insn);
1744 gcc_assert (pressure_info != NULL && max_reg_pressure != NULL);
1745 for (i = 0; i < ira_pressure_classes_num; i++)
1747 cl = ira_pressure_classes[i];
1748 gcc_assert (curr_reg_pressure[cl] >= 0);
1749 change = (int) pressure_info[i].set_increase - death[cl];
1750 before = MAX (0, max_reg_pressure[i] - sched_class_regs_num[cl]);
1751 after = MAX (0, max_reg_pressure[i] + change
1752 - sched_class_regs_num[cl]);
1753 hard_regno = ira_class_hard_regs[cl][0];
1754 gcc_assert (hard_regno >= 0);
1755 mode = reg_raw_mode[hard_regno];
1756 excess_cost_change += ((after - before)
1757 * (ira_memory_move_cost[mode][cl][0]
1758 + ira_memory_move_cost[mode][cl][1]));
1760 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insn) = excess_cost_change;
1763 /* This is the first page of code related to SCHED_PRESSURE_MODEL.
1764 It tries to make the scheduler take register pressure into account
1765 without introducing too many unnecessary stalls. It hooks into the
1766 main scheduling algorithm at several points:
1768 - Before scheduling starts, model_start_schedule constructs a
1769 "model schedule" for the current block. This model schedule is
1770 chosen solely to keep register pressure down. It does not take the
1771 target's pipeline or the original instruction order into account,
1772 except as a tie-breaker. It also doesn't work to a particular
1773 pressure limit.
1775 This model schedule gives us an idea of what pressure can be
1776 achieved for the block and gives us an example of a schedule that
1777 keeps to that pressure. It also makes the final schedule less
1778 dependent on the original instruction order. This is important
1779 because the original order can either be "wide" (many values live
1780 at once, such as in user-scheduled code) or "narrow" (few values
1781 live at once, such as after loop unrolling, where several
1782 iterations are executed sequentially).
1784 We do not apply this model schedule to the rtx stream. We simply
1785 record it in model_schedule. We also compute the maximum pressure,
1786 MP, that was seen during this schedule.
1788 - Instructions are added to the ready queue even if they require
1789 a stall. The length of the stall is instead computed as:
1791 MAX (INSN_TICK (INSN) - clock_var, 0)
1793 (= insn_delay). This allows rank_for_schedule to choose between
1794 introducing a deliberate stall or increasing pressure.
1796 - Before sorting the ready queue, model_set_excess_costs assigns
1797 a pressure-based cost to each ready instruction in the queue.
1798 This is the instruction's INSN_REG_PRESSURE_EXCESS_COST_CHANGE
1799 (ECC for short) and is effectively measured in cycles.
1801 - rank_for_schedule ranks instructions based on:
1803 ECC (insn) + insn_delay (insn)
1805 then as:
1807 insn_delay (insn)
1809 So, for example, an instruction X1 with an ECC of 1 that can issue
1810 now will win over an instruction X0 with an ECC of zero that would
1811 introduce a stall of one cycle. However, an instruction X2 with an
1812 ECC of 2 that can issue now will lose to both X0 and X1.
1814 - When an instruction is scheduled, model_recompute updates the model
1815 schedule with the new pressures (some of which might now exceed the
1816 original maximum pressure MP). model_update_limit_points then searches
1817 for the new point of maximum pressure, if not already known. */
1819 /* Used to separate high-verbosity debug information for SCHED_PRESSURE_MODEL
1820 from surrounding debug information. */
1821 #define MODEL_BAR \
1822 ";;\t\t+------------------------------------------------------\n"
1824 /* Information about the pressure on a particular register class at a
1825 particular point of the model schedule. */
1826 struct model_pressure_data {
1827 /* The pressure at this point of the model schedule, or -1 if the
1828 point is associated with an instruction that has already been
1829 scheduled. */
1830 int ref_pressure;
1832 /* The maximum pressure during or after this point of the model schedule. */
1833 int max_pressure;
1836 /* Per-instruction information that is used while building the model
1837 schedule. Here, "schedule" refers to the model schedule rather
1838 than the main schedule. */
1839 struct model_insn_info {
1840 /* The instruction itself. */
1841 rtx_insn *insn;
1843 /* If this instruction is in model_worklist, these fields link to the
1844 previous (higher-priority) and next (lower-priority) instructions
1845 in the list. */
1846 struct model_insn_info *prev;
1847 struct model_insn_info *next;
1849 /* While constructing the schedule, QUEUE_INDEX describes whether an
1850 instruction has already been added to the schedule (QUEUE_SCHEDULED),
1851 is in model_worklist (QUEUE_READY), or neither (QUEUE_NOWHERE).
1852 old_queue records the value that QUEUE_INDEX had before scheduling
1853 started, so that we can restore it once the schedule is complete. */
1854 int old_queue;
1856 /* The relative importance of an unscheduled instruction. Higher
1857 values indicate greater importance. */
1858 unsigned int model_priority;
1860 /* The length of the longest path of satisfied true dependencies
1861 that leads to this instruction. */
1862 unsigned int depth;
1864 /* The length of the longest path of dependencies of any kind
1865 that leads from this instruction. */
1866 unsigned int alap;
1868 /* The number of predecessor nodes that must still be scheduled. */
1869 int unscheduled_preds;
1872 /* Information about the pressure limit for a particular register class.
1873 This structure is used when applying a model schedule to the main
1874 schedule. */
1875 struct model_pressure_limit {
1876 /* The maximum register pressure seen in the original model schedule. */
1877 int orig_pressure;
1879 /* The maximum register pressure seen in the current model schedule
1880 (which excludes instructions that have already been scheduled). */
1881 int pressure;
1883 /* The point of the current model schedule at which PRESSURE is first
1884 reached. It is set to -1 if the value needs to be recomputed. */
1885 int point;
1888 /* Describes a particular way of measuring register pressure. */
1889 struct model_pressure_group {
1890 /* Index PCI describes the maximum pressure on ira_pressure_classes[PCI]. */
1891 struct model_pressure_limit limits[N_REG_CLASSES];
1893 /* Index (POINT * ira_num_pressure_classes + PCI) describes the pressure
1894 on register class ira_pressure_classes[PCI] at point POINT of the
1895 current model schedule. A POINT of model_num_insns describes the
1896 pressure at the end of the schedule. */
1897 struct model_pressure_data *model;
1900 /* Index POINT gives the instruction at point POINT of the model schedule.
1901 This array doesn't change during main scheduling. */
1902 static vec<rtx_insn *> model_schedule;
1904 /* The list of instructions in the model worklist, sorted in order of
1905 decreasing priority. */
1906 static struct model_insn_info *model_worklist;
1908 /* Index I describes the instruction with INSN_LUID I. */
1909 static struct model_insn_info *model_insns;
1911 /* The number of instructions in the model schedule. */
1912 static int model_num_insns;
1914 /* The index of the first instruction in model_schedule that hasn't yet been
1915 added to the main schedule, or model_num_insns if all of them have. */
1916 static int model_curr_point;
1918 /* Describes the pressure before each instruction in the model schedule. */
1919 static struct model_pressure_group model_before_pressure;
1921 /* The first unused model_priority value (as used in model_insn_info). */
1922 static unsigned int model_next_priority;
1925 /* The model_pressure_data for ira_pressure_classes[PCI] in GROUP
1926 at point POINT of the model schedule. */
1927 #define MODEL_PRESSURE_DATA(GROUP, POINT, PCI) \
1928 (&(GROUP)->model[(POINT) * ira_pressure_classes_num + (PCI)])
1930 /* The maximum pressure on ira_pressure_classes[PCI] in GROUP at or
1931 after point POINT of the model schedule. */
1932 #define MODEL_MAX_PRESSURE(GROUP, POINT, PCI) \
1933 (MODEL_PRESSURE_DATA (GROUP, POINT, PCI)->max_pressure)
1935 /* The pressure on ira_pressure_classes[PCI] in GROUP at point POINT
1936 of the model schedule. */
1937 #define MODEL_REF_PRESSURE(GROUP, POINT, PCI) \
1938 (MODEL_PRESSURE_DATA (GROUP, POINT, PCI)->ref_pressure)
1940 /* Information about INSN that is used when creating the model schedule. */
1941 #define MODEL_INSN_INFO(INSN) \
1942 (&model_insns[INSN_LUID (INSN)])
1944 /* The instruction at point POINT of the model schedule. */
1945 #define MODEL_INSN(POINT) \
1946 (model_schedule[POINT])
1949 /* Return INSN's index in the model schedule, or model_num_insns if it
1950 doesn't belong to that schedule. */
1952 static int
1953 model_index (rtx_insn *insn)
1955 if (INSN_MODEL_INDEX (insn) == 0)
1956 return model_num_insns;
1957 return INSN_MODEL_INDEX (insn) - 1;
1960 /* Make sure that GROUP->limits is up-to-date for the current point
1961 of the model schedule. */
1963 static void
1964 model_update_limit_points_in_group (struct model_pressure_group *group)
1966 int pci, max_pressure, point;
1968 for (pci = 0; pci < ira_pressure_classes_num; pci++)
1970 /* We may have passed the final point at which the pressure in
1971 group->limits[pci].pressure was reached. Update the limit if so. */
1972 max_pressure = MODEL_MAX_PRESSURE (group, model_curr_point, pci);
1973 group->limits[pci].pressure = max_pressure;
1975 /* Find the point at which MAX_PRESSURE is first reached. We need
1976 to search in three cases:
1978 - We've already moved past the previous pressure point.
1979 In this case we search forward from model_curr_point.
1981 - We scheduled the previous point of maximum pressure ahead of
1982 its position in the model schedule, but doing so didn't bring
1983 the pressure point earlier. In this case we search forward
1984 from that previous pressure point.
1986 - Scheduling an instruction early caused the maximum pressure
1987 to decrease. In this case we will have set the pressure
1988 point to -1, and we search forward from model_curr_point. */
1989 point = MAX (group->limits[pci].point, model_curr_point);
1990 while (point < model_num_insns
1991 && MODEL_REF_PRESSURE (group, point, pci) < max_pressure)
1992 point++;
1993 group->limits[pci].point = point;
1995 gcc_assert (MODEL_REF_PRESSURE (group, point, pci) == max_pressure);
1996 gcc_assert (MODEL_MAX_PRESSURE (group, point, pci) == max_pressure);
2000 /* Make sure that all register-pressure limits are up-to-date for the
2001 current position in the model schedule. */
2003 static void
2004 model_update_limit_points (void)
2006 model_update_limit_points_in_group (&model_before_pressure);
2009 /* Return the model_index of the last unscheduled use in chain USE
2010 outside of USE's instruction. Return -1 if there are no other uses,
2011 or model_num_insns if the register is live at the end of the block. */
2013 static int
2014 model_last_use_except (struct reg_use_data *use)
2016 struct reg_use_data *next;
2017 int last, index;
2019 last = -1;
2020 for (next = use->next_regno_use; next != use; next = next->next_regno_use)
2021 if (NONDEBUG_INSN_P (next->insn)
2022 && QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED)
2024 index = model_index (next->insn);
2025 if (index == model_num_insns)
2026 return model_num_insns;
2027 if (last < index)
2028 last = index;
2030 return last;
2033 /* An instruction with model_index POINT has just been scheduled, and it
2034 adds DELTA to the pressure on ira_pressure_classes[PCI] after POINT - 1.
2035 Update MODEL_REF_PRESSURE (GROUP, POINT, PCI) and
2036 MODEL_MAX_PRESSURE (GROUP, POINT, PCI) accordingly. */
2038 static void
2039 model_start_update_pressure (struct model_pressure_group *group,
2040 int point, int pci, int delta)
2042 int next_max_pressure;
2044 if (point == model_num_insns)
2046 /* The instruction wasn't part of the model schedule; it was moved
2047 from a different block. Update the pressure for the end of
2048 the model schedule. */
2049 MODEL_REF_PRESSURE (group, point, pci) += delta;
2050 MODEL_MAX_PRESSURE (group, point, pci) += delta;
2052 else
2054 /* Record that this instruction has been scheduled. Nothing now
2055 changes between POINT and POINT + 1, so get the maximum pressure
2056 from the latter. If the maximum pressure decreases, the new
2057 pressure point may be before POINT. */
2058 MODEL_REF_PRESSURE (group, point, pci) = -1;
2059 next_max_pressure = MODEL_MAX_PRESSURE (group, point + 1, pci);
2060 if (MODEL_MAX_PRESSURE (group, point, pci) > next_max_pressure)
2062 MODEL_MAX_PRESSURE (group, point, pci) = next_max_pressure;
2063 if (group->limits[pci].point == point)
2064 group->limits[pci].point = -1;
2069 /* Record that scheduling a later instruction has changed the pressure
2070 at point POINT of the model schedule by DELTA (which might be 0).
2071 Update GROUP accordingly. Return nonzero if these changes might
2072 trigger changes to previous points as well. */
2074 static int
2075 model_update_pressure (struct model_pressure_group *group,
2076 int point, int pci, int delta)
2078 int ref_pressure, max_pressure, next_max_pressure;
2080 /* If POINT hasn't yet been scheduled, update its pressure. */
2081 ref_pressure = MODEL_REF_PRESSURE (group, point, pci);
2082 if (ref_pressure >= 0 && delta != 0)
2084 ref_pressure += delta;
2085 MODEL_REF_PRESSURE (group, point, pci) = ref_pressure;
2087 /* Check whether the maximum pressure in the overall schedule
2088 has increased. (This means that the MODEL_MAX_PRESSURE of
2089 every point <= POINT will need to increase too; see below.) */
2090 if (group->limits[pci].pressure < ref_pressure)
2091 group->limits[pci].pressure = ref_pressure;
2093 /* If we are at maximum pressure, and the maximum pressure
2094 point was previously unknown or later than POINT,
2095 bring it forward. */
2096 if (group->limits[pci].pressure == ref_pressure
2097 && !IN_RANGE (group->limits[pci].point, 0, point))
2098 group->limits[pci].point = point;
2100 /* If POINT used to be the point of maximum pressure, but isn't
2101 any longer, we need to recalculate it using a forward walk. */
2102 if (group->limits[pci].pressure > ref_pressure
2103 && group->limits[pci].point == point)
2104 group->limits[pci].point = -1;
2107 /* Update the maximum pressure at POINT. Changes here might also
2108 affect the maximum pressure at POINT - 1. */
2109 next_max_pressure = MODEL_MAX_PRESSURE (group, point + 1, pci);
2110 max_pressure = MAX (ref_pressure, next_max_pressure);
2111 if (MODEL_MAX_PRESSURE (group, point, pci) != max_pressure)
2113 MODEL_MAX_PRESSURE (group, point, pci) = max_pressure;
2114 return 1;
2116 return 0;
2119 /* INSN has just been scheduled. Update the model schedule accordingly. */
2121 static void
2122 model_recompute (rtx_insn *insn)
2124 struct {
2125 int last_use;
2126 int regno;
2127 } uses[FIRST_PSEUDO_REGISTER + MAX_RECOG_OPERANDS];
2128 struct reg_use_data *use;
2129 struct reg_pressure_data *reg_pressure;
2130 int delta[N_REG_CLASSES];
2131 int pci, point, mix, new_last, cl, ref_pressure, queue;
2132 unsigned int i, num_uses, num_pending_births;
2133 bool print_p;
2135 /* The destinations of INSN were previously live from POINT onwards, but are
2136 now live from model_curr_point onwards. Set up DELTA accordingly. */
2137 point = model_index (insn);
2138 reg_pressure = INSN_REG_PRESSURE (insn);
2139 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2141 cl = ira_pressure_classes[pci];
2142 delta[cl] = reg_pressure[pci].set_increase;
2145 /* Record which registers previously died at POINT, but which now die
2146 before POINT. Adjust DELTA so that it represents the effect of
2147 this change after POINT - 1. Set NUM_PENDING_BIRTHS to the number of
2148 registers that will be born in the range [model_curr_point, POINT). */
2149 num_uses = 0;
2150 num_pending_births = 0;
2151 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
2153 new_last = model_last_use_except (use);
2154 if (new_last < point)
2156 gcc_assert (num_uses < ARRAY_SIZE (uses));
2157 uses[num_uses].last_use = new_last;
2158 uses[num_uses].regno = use->regno;
2159 /* This register is no longer live after POINT - 1. */
2160 mark_regno_birth_or_death (NULL, delta, use->regno, false);
2161 num_uses++;
2162 if (new_last >= 0)
2163 num_pending_births++;
2167 /* Update the MODEL_REF_PRESSURE and MODEL_MAX_PRESSURE for POINT.
2168 Also set each group pressure limit for POINT. */
2169 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2171 cl = ira_pressure_classes[pci];
2172 model_start_update_pressure (&model_before_pressure,
2173 point, pci, delta[cl]);
2176 /* Walk the model schedule backwards, starting immediately before POINT. */
2177 print_p = false;
2178 if (point != model_curr_point)
2181 point--;
2182 insn = MODEL_INSN (point);
2183 queue = QUEUE_INDEX (insn);
2185 if (queue != QUEUE_SCHEDULED)
2187 /* DELTA describes the effect of the move on the register pressure
2188 after POINT. Make it describe the effect on the pressure
2189 before POINT. */
2190 i = 0;
2191 while (i < num_uses)
2193 if (uses[i].last_use == point)
2195 /* This register is now live again. */
2196 mark_regno_birth_or_death (NULL, delta,
2197 uses[i].regno, true);
2199 /* Remove this use from the array. */
2200 uses[i] = uses[num_uses - 1];
2201 num_uses--;
2202 num_pending_births--;
2204 else
2205 i++;
2208 if (sched_verbose >= 5)
2210 if (!print_p)
2212 fprintf (sched_dump, MODEL_BAR);
2213 fprintf (sched_dump, ";;\t\t| New pressure for model"
2214 " schedule\n");
2215 fprintf (sched_dump, MODEL_BAR);
2216 print_p = true;
2219 fprintf (sched_dump, ";;\t\t| %3d %4d %-30s ",
2220 point, INSN_UID (insn),
2221 str_pattern_slim (PATTERN (insn)));
2222 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2224 cl = ira_pressure_classes[pci];
2225 ref_pressure = MODEL_REF_PRESSURE (&model_before_pressure,
2226 point, pci);
2227 fprintf (sched_dump, " %s:[%d->%d]",
2228 reg_class_names[ira_pressure_classes[pci]],
2229 ref_pressure, ref_pressure + delta[cl]);
2231 fprintf (sched_dump, "\n");
2235 /* Adjust the pressure at POINT. Set MIX to nonzero if POINT - 1
2236 might have changed as well. */
2237 mix = num_pending_births;
2238 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2240 cl = ira_pressure_classes[pci];
2241 mix |= delta[cl];
2242 mix |= model_update_pressure (&model_before_pressure,
2243 point, pci, delta[cl]);
2246 while (mix && point > model_curr_point);
2248 if (print_p)
2249 fprintf (sched_dump, MODEL_BAR);
2252 /* After DEP, which was cancelled, has been resolved for insn NEXT,
2253 check whether the insn's pattern needs restoring. */
2254 static bool
2255 must_restore_pattern_p (rtx_insn *next, dep_t dep)
2257 if (QUEUE_INDEX (next) == QUEUE_SCHEDULED)
2258 return false;
2260 if (DEP_TYPE (dep) == REG_DEP_CONTROL)
2262 gcc_assert (ORIG_PAT (next) != NULL_RTX);
2263 gcc_assert (next == DEP_CON (dep));
2265 else
2267 struct dep_replacement *desc = DEP_REPLACE (dep);
2268 if (desc->insn != next)
2270 gcc_assert (*desc->loc == desc->orig);
2271 return false;
2274 return true;
2277 /* model_spill_cost (CL, P, P') returns the cost of increasing the
2278 pressure on CL from P to P'. We use this to calculate a "base ECC",
2279 baseECC (CL, X), for each pressure class CL and each instruction X.
2280 Supposing X changes the pressure on CL from P to P', and that the
2281 maximum pressure on CL in the current model schedule is MP', then:
2283 * if X occurs before or at the next point of maximum pressure in
2284 the model schedule and P' > MP', then:
2286 baseECC (CL, X) = model_spill_cost (CL, MP, P')
2288 The idea is that the pressure after scheduling a fixed set of
2289 instructions -- in this case, the set up to and including the
2290 next maximum pressure point -- is going to be the same regardless
2291 of the order; we simply want to keep the intermediate pressure
2292 under control. Thus X has a cost of zero unless scheduling it
2293 now would exceed MP'.
2295 If all increases in the set are by the same amount, no zero-cost
2296 instruction will ever cause the pressure to exceed MP'. However,
2297 if X is instead moved past an instruction X' with pressure in the
2298 range (MP' - (P' - P), MP'), the pressure at X' will increase
2299 beyond MP'. Since baseECC is very much a heuristic anyway,
2300 it doesn't seem worth the overhead of tracking cases like these.
2302 The cost of exceeding MP' is always based on the original maximum
2303 pressure MP. This is so that going 2 registers over the original
2304 limit has the same cost regardless of whether it comes from two
2305 separate +1 deltas or from a single +2 delta.
2307 * if X occurs after the next point of maximum pressure in the model
2308 schedule and P' > P, then:
2310 baseECC (CL, X) = model_spill_cost (CL, MP, MP' + (P' - P))
2312 That is, if we move X forward across a point of maximum pressure,
2313 and if X increases the pressure by P' - P, then we conservatively
2314 assume that scheduling X next would increase the maximum pressure
2315 by P' - P. Again, the cost of doing this is based on the original
2316 maximum pressure MP, for the same reason as above.
2318 * if P' < P, P > MP, and X occurs at or after the next point of
2319 maximum pressure, then:
2321 baseECC (CL, X) = -model_spill_cost (CL, MAX (MP, P'), P)
2323 That is, if we have already exceeded the original maximum pressure MP,
2324 and if X might reduce the maximum pressure again -- or at least push
2325 it further back, and thus allow more scheduling freedom -- it is given
2326 a negative cost to reflect the improvement.
2328 * otherwise,
2330 baseECC (CL, X) = 0
2332 In this case, X is not expected to affect the maximum pressure MP',
2333 so it has zero cost.
2335 We then create a combined value baseECC (X) that is the sum of
2336 baseECC (CL, X) for each pressure class CL.
2338 baseECC (X) could itself be used as the ECC value described above.
2339 However, this is often too conservative, in the sense that it
2340 tends to make high-priority instructions that increase pressure
2341 wait too long in cases where introducing a spill would be better.
2342 For this reason the final ECC is a priority-adjusted form of
2343 baseECC (X). Specifically, we calculate:
2345 P (X) = INSN_PRIORITY (X) - insn_delay (X) - baseECC (X)
2346 baseP = MAX { P (X) | baseECC (X) <= 0 }
2348 Then:
2350 ECC (X) = MAX (MIN (baseP - P (X), baseECC (X)), 0)
2352 Thus an instruction's effect on pressure is ignored if it has a high
2353 enough priority relative to the ones that don't increase pressure.
2354 Negative values of baseECC (X) do not increase the priority of X
2355 itself, but they do make it harder for other instructions to
2356 increase the pressure further.
2358 This pressure cost is deliberately timid. The intention has been
2359 to choose a heuristic that rarely interferes with the normal list
2360 scheduler in cases where that scheduler would produce good code.
2361 We simply want to curb some of its worst excesses. */
2363 /* Return the cost of increasing the pressure in class CL from FROM to TO.
2365 Here we use the very simplistic cost model that every register above
2366 sched_class_regs_num[CL] has a spill cost of 1. We could use other
2367 measures instead, such as one based on MEMORY_MOVE_COST. However:
2369 (1) In order for an instruction to be scheduled, the higher cost
2370 would need to be justified in a single saving of that many stalls.
2371 This is overly pessimistic, because the benefit of spilling is
2372 often to avoid a sequence of several short stalls rather than
2373 a single long one.
2375 (2) The cost is still arbitrary. Because we are not allocating
2376 registers during scheduling, we have no way of knowing for
2377 sure how many memory accesses will be required by each spill,
2378 where the spills will be placed within the block, or even
2379 which block(s) will contain the spills.
2381 So a higher cost than 1 is often too conservative in practice,
2382 forcing blocks to contain unnecessary stalls instead of spill code.
2383 The simple cost below seems to be the best compromise. It reduces
2384 the interference with the normal list scheduler, which helps make
2385 it more suitable for a default-on option. */
2387 static int
2388 model_spill_cost (int cl, int from, int to)
2390 from = MAX (from, sched_class_regs_num[cl]);
2391 return MAX (to, from) - from;
2394 /* Return baseECC (ira_pressure_classes[PCI], POINT), given that
2395 P = curr_reg_pressure[ira_pressure_classes[PCI]] and that
2396 P' = P + DELTA. */
2398 static int
2399 model_excess_group_cost (struct model_pressure_group *group,
2400 int point, int pci, int delta)
2402 int pressure, cl;
2404 cl = ira_pressure_classes[pci];
2405 if (delta < 0 && point >= group->limits[pci].point)
2407 pressure = MAX (group->limits[pci].orig_pressure,
2408 curr_reg_pressure[cl] + delta);
2409 return -model_spill_cost (cl, pressure, curr_reg_pressure[cl]);
2412 if (delta > 0)
2414 if (point > group->limits[pci].point)
2415 pressure = group->limits[pci].pressure + delta;
2416 else
2417 pressure = curr_reg_pressure[cl] + delta;
2419 if (pressure > group->limits[pci].pressure)
2420 return model_spill_cost (cl, group->limits[pci].orig_pressure,
2421 pressure);
2424 return 0;
2427 /* Return baseECC (MODEL_INSN (INSN)). Dump the costs to sched_dump
2428 if PRINT_P. */
2430 static int
2431 model_excess_cost (rtx_insn *insn, bool print_p)
2433 int point, pci, cl, cost, this_cost, delta;
2434 struct reg_pressure_data *insn_reg_pressure;
2435 int insn_death[N_REG_CLASSES];
2437 calculate_reg_deaths (insn, insn_death);
2438 point = model_index (insn);
2439 insn_reg_pressure = INSN_REG_PRESSURE (insn);
2440 cost = 0;
2442 if (print_p)
2443 fprintf (sched_dump, ";;\t\t| %3d %4d | %4d %+3d |", point,
2444 INSN_UID (insn), INSN_PRIORITY (insn), insn_delay (insn));
2446 /* Sum up the individual costs for each register class. */
2447 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2449 cl = ira_pressure_classes[pci];
2450 delta = insn_reg_pressure[pci].set_increase - insn_death[cl];
2451 this_cost = model_excess_group_cost (&model_before_pressure,
2452 point, pci, delta);
2453 cost += this_cost;
2454 if (print_p)
2455 fprintf (sched_dump, " %s:[%d base cost %d]",
2456 reg_class_names[cl], delta, this_cost);
2459 if (print_p)
2460 fprintf (sched_dump, "\n");
2462 return cost;
2465 /* Dump the next points of maximum pressure for GROUP. */
2467 static void
2468 model_dump_pressure_points (struct model_pressure_group *group)
2470 int pci, cl;
2472 fprintf (sched_dump, ";;\t\t| pressure points");
2473 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2475 cl = ira_pressure_classes[pci];
2476 fprintf (sched_dump, " %s:[%d->%d at ", reg_class_names[cl],
2477 curr_reg_pressure[cl], group->limits[pci].pressure);
2478 if (group->limits[pci].point < model_num_insns)
2479 fprintf (sched_dump, "%d:%d]", group->limits[pci].point,
2480 INSN_UID (MODEL_INSN (group->limits[pci].point)));
2481 else
2482 fprintf (sched_dump, "end]");
2484 fprintf (sched_dump, "\n");
2487 /* Set INSN_REG_PRESSURE_EXCESS_COST_CHANGE for INSNS[0...COUNT-1]. */
2489 static void
2490 model_set_excess_costs (rtx_insn **insns, int count)
2492 int i, cost, priority_base, priority;
2493 bool print_p;
2495 /* Record the baseECC value for each instruction in the model schedule,
2496 except that negative costs are converted to zero ones now rather than
2497 later. Do not assign a cost to debug instructions, since they must
2498 not change code-generation decisions. Experiments suggest we also
2499 get better results by not assigning a cost to instructions from
2500 a different block.
2502 Set PRIORITY_BASE to baseP in the block comment above. This is the
2503 maximum priority of the "cheap" instructions, which should always
2504 include the next model instruction. */
2505 priority_base = 0;
2506 print_p = false;
2507 for (i = 0; i < count; i++)
2508 if (INSN_MODEL_INDEX (insns[i]))
2510 if (sched_verbose >= 6 && !print_p)
2512 fprintf (sched_dump, MODEL_BAR);
2513 fprintf (sched_dump, ";;\t\t| Pressure costs for ready queue\n");
2514 model_dump_pressure_points (&model_before_pressure);
2515 fprintf (sched_dump, MODEL_BAR);
2516 print_p = true;
2518 cost = model_excess_cost (insns[i], print_p);
2519 if (cost <= 0)
2521 priority = INSN_PRIORITY (insns[i]) - insn_delay (insns[i]) - cost;
2522 priority_base = MAX (priority_base, priority);
2523 cost = 0;
2525 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i]) = cost;
2527 if (print_p)
2528 fprintf (sched_dump, MODEL_BAR);
2530 /* Use MAX (baseECC, 0) and baseP to calculcate ECC for each
2531 instruction. */
2532 for (i = 0; i < count; i++)
2534 cost = INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i]);
2535 priority = INSN_PRIORITY (insns[i]) - insn_delay (insns[i]);
2536 if (cost > 0 && priority > priority_base)
2538 cost += priority_base - priority;
2539 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i]) = MAX (cost, 0);
2545 /* Enum of rank_for_schedule heuristic decisions. */
2546 enum rfs_decision {
2547 RFS_DEBUG, RFS_LIVE_RANGE_SHRINK1, RFS_LIVE_RANGE_SHRINK2,
2548 RFS_SCHED_GROUP, RFS_PRESSURE_DELAY, RFS_PRESSURE_TICK,
2549 RFS_FEEDS_BACKTRACK_INSN, RFS_PRIORITY, RFS_SPECULATION,
2550 RFS_SCHED_RANK, RFS_LAST_INSN, RFS_PRESSURE_INDEX,
2551 RFS_DEP_COUNT, RFS_TIE, RFS_N };
2553 /* Corresponding strings for print outs. */
2554 static const char *rfs_str[RFS_N] = {
2555 "RFS_DEBUG", "RFS_LIVE_RANGE_SHRINK1", "RFS_LIVE_RANGE_SHRINK2",
2556 "RFS_SCHED_GROUP", "RFS_PRESSURE_DELAY", "RFS_PRESSURE_TICK",
2557 "RFS_FEEDS_BACKTRACK_INSN", "RFS_PRIORITY", "RFS_SPECULATION",
2558 "RFS_SCHED_RANK", "RFS_LAST_INSN", "RFS_PRESSURE_INDEX",
2559 "RFS_DEP_COUNT", "RFS_TIE" };
2561 /* Statistical breakdown of rank_for_schedule decisions. */
2562 typedef struct { unsigned stats[RFS_N]; } rank_for_schedule_stats_t;
2563 static rank_for_schedule_stats_t rank_for_schedule_stats;
2565 /* Return the result of comparing insns TMP and TMP2 and update
2566 Rank_For_Schedule statistics. */
2567 static int
2568 rfs_result (enum rfs_decision decision, int result, rtx tmp, rtx tmp2)
2570 ++rank_for_schedule_stats.stats[decision];
2571 if (result < 0)
2572 INSN_LAST_RFS_WIN (tmp) = decision;
2573 else if (result > 0)
2574 INSN_LAST_RFS_WIN (tmp2) = decision;
2575 else
2576 gcc_unreachable ();
2577 return result;
2580 /* Returns a positive value if x is preferred; returns a negative value if
2581 y is preferred. Should never return 0, since that will make the sort
2582 unstable. */
2584 static int
2585 rank_for_schedule (const void *x, const void *y)
2587 rtx_insn *tmp = *(rtx_insn * const *) y;
2588 rtx_insn *tmp2 = *(rtx_insn * const *) x;
2589 int tmp_class, tmp2_class;
2590 int val, priority_val, info_val, diff;
2592 if (MAY_HAVE_DEBUG_INSNS)
2594 /* Schedule debug insns as early as possible. */
2595 if (DEBUG_INSN_P (tmp) && !DEBUG_INSN_P (tmp2))
2596 return rfs_result (RFS_DEBUG, -1, tmp, tmp2);
2597 else if (!DEBUG_INSN_P (tmp) && DEBUG_INSN_P (tmp2))
2598 return rfs_result (RFS_DEBUG, 1, tmp, tmp2);
2599 else if (DEBUG_INSN_P (tmp) && DEBUG_INSN_P (tmp2))
2600 return rfs_result (RFS_DEBUG, INSN_LUID (tmp) - INSN_LUID (tmp2),
2601 tmp, tmp2);
2604 if (live_range_shrinkage_p)
2606 /* Don't use SCHED_PRESSURE_MODEL -- it results in much worse
2607 code. */
2608 gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED);
2609 if ((INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp) < 0
2610 || INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2) < 0)
2611 && (diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp)
2612 - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2))) != 0)
2613 return rfs_result (RFS_LIVE_RANGE_SHRINK1, diff, tmp, tmp2);
2614 /* Sort by INSN_LUID (original insn order), so that we make the
2615 sort stable. This minimizes instruction movement, thus
2616 minimizing sched's effect on debugging and cross-jumping. */
2617 return rfs_result (RFS_LIVE_RANGE_SHRINK2,
2618 INSN_LUID (tmp) - INSN_LUID (tmp2), tmp, tmp2);
2621 /* The insn in a schedule group should be issued the first. */
2622 if (flag_sched_group_heuristic &&
2623 SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
2624 return rfs_result (RFS_SCHED_GROUP, SCHED_GROUP_P (tmp2) ? 1 : -1,
2625 tmp, tmp2);
2627 /* Make sure that priority of TMP and TMP2 are initialized. */
2628 gcc_assert (INSN_PRIORITY_KNOWN (tmp) && INSN_PRIORITY_KNOWN (tmp2));
2630 if (sched_pressure != SCHED_PRESSURE_NONE)
2632 /* Prefer insn whose scheduling results in the smallest register
2633 pressure excess. */
2634 if ((diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp)
2635 + insn_delay (tmp)
2636 - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2)
2637 - insn_delay (tmp2))))
2638 return rfs_result (RFS_PRESSURE_DELAY, diff, tmp, tmp2);
2641 if (sched_pressure != SCHED_PRESSURE_NONE
2642 && (INSN_TICK (tmp2) > clock_var || INSN_TICK (tmp) > clock_var)
2643 && INSN_TICK (tmp2) != INSN_TICK (tmp))
2645 diff = INSN_TICK (tmp) - INSN_TICK (tmp2);
2646 return rfs_result (RFS_PRESSURE_TICK, diff, tmp, tmp2);
2649 /* If we are doing backtracking in this schedule, prefer insns that
2650 have forward dependencies with negative cost against an insn that
2651 was already scheduled. */
2652 if (current_sched_info->flags & DO_BACKTRACKING)
2654 priority_val = FEEDS_BACKTRACK_INSN (tmp2) - FEEDS_BACKTRACK_INSN (tmp);
2655 if (priority_val)
2656 return rfs_result (RFS_FEEDS_BACKTRACK_INSN, priority_val, tmp, tmp2);
2659 /* Prefer insn with higher priority. */
2660 priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
2662 if (flag_sched_critical_path_heuristic && priority_val)
2663 return rfs_result (RFS_PRIORITY, priority_val, tmp, tmp2);
2665 /* Prefer speculative insn with greater dependencies weakness. */
2666 if (flag_sched_spec_insn_heuristic && spec_info)
2668 ds_t ds1, ds2;
2669 dw_t dw1, dw2;
2670 int dw;
2672 ds1 = TODO_SPEC (tmp) & SPECULATIVE;
2673 if (ds1)
2674 dw1 = ds_weak (ds1);
2675 else
2676 dw1 = NO_DEP_WEAK;
2678 ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
2679 if (ds2)
2680 dw2 = ds_weak (ds2);
2681 else
2682 dw2 = NO_DEP_WEAK;
2684 dw = dw2 - dw1;
2685 if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
2686 return rfs_result (RFS_SPECULATION, dw, tmp, tmp2);
2689 info_val = (*current_sched_info->rank) (tmp, tmp2);
2690 if (flag_sched_rank_heuristic && info_val)
2691 return rfs_result (RFS_SCHED_RANK, info_val, tmp, tmp2);
2693 /* Compare insns based on their relation to the last scheduled
2694 non-debug insn. */
2695 if (flag_sched_last_insn_heuristic && last_nondebug_scheduled_insn)
2697 dep_t dep1;
2698 dep_t dep2;
2699 rtx last = last_nondebug_scheduled_insn;
2701 /* Classify the instructions into three classes:
2702 1) Data dependent on last schedule insn.
2703 2) Anti/Output dependent on last scheduled insn.
2704 3) Independent of last scheduled insn, or has latency of one.
2705 Choose the insn from the highest numbered class if different. */
2706 dep1 = sd_find_dep_between (last, tmp, true);
2708 if (dep1 == NULL || dep_cost (dep1) == 1)
2709 tmp_class = 3;
2710 else if (/* Data dependence. */
2711 DEP_TYPE (dep1) == REG_DEP_TRUE)
2712 tmp_class = 1;
2713 else
2714 tmp_class = 2;
2716 dep2 = sd_find_dep_between (last, tmp2, true);
2718 if (dep2 == NULL || dep_cost (dep2) == 1)
2719 tmp2_class = 3;
2720 else if (/* Data dependence. */
2721 DEP_TYPE (dep2) == REG_DEP_TRUE)
2722 tmp2_class = 1;
2723 else
2724 tmp2_class = 2;
2726 if ((val = tmp2_class - tmp_class))
2727 return rfs_result (RFS_LAST_INSN, val, tmp, tmp2);
2730 /* Prefer instructions that occur earlier in the model schedule. */
2731 if (sched_pressure == SCHED_PRESSURE_MODEL
2732 && INSN_BB (tmp) == target_bb && INSN_BB (tmp2) == target_bb)
2734 diff = model_index (tmp) - model_index (tmp2);
2735 gcc_assert (diff != 0);
2736 return rfs_result (RFS_PRESSURE_INDEX, diff, tmp, tmp2);
2739 /* Prefer the insn which has more later insns that depend on it.
2740 This gives the scheduler more freedom when scheduling later
2741 instructions at the expense of added register pressure. */
2743 val = (dep_list_size (tmp2, SD_LIST_FORW)
2744 - dep_list_size (tmp, SD_LIST_FORW));
2746 if (flag_sched_dep_count_heuristic && val != 0)
2747 return rfs_result (RFS_DEP_COUNT, val, tmp, tmp2);
2749 /* If insns are equally good, sort by INSN_LUID (original insn order),
2750 so that we make the sort stable. This minimizes instruction movement,
2751 thus minimizing sched's effect on debugging and cross-jumping. */
2752 return rfs_result (RFS_TIE, INSN_LUID (tmp) - INSN_LUID (tmp2), tmp, tmp2);
2755 /* Resort the array A in which only element at index N may be out of order. */
2757 HAIFA_INLINE static void
2758 swap_sort (rtx_insn **a, int n)
2760 rtx_insn *insn = a[n - 1];
2761 int i = n - 2;
2763 while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
2765 a[i + 1] = a[i];
2766 i -= 1;
2768 a[i + 1] = insn;
2771 /* Add INSN to the insn queue so that it can be executed at least
2772 N_CYCLES after the currently executing insn. Preserve insns
2773 chain for debugging purposes. REASON will be printed in debugging
2774 output. */
2776 HAIFA_INLINE static void
2777 queue_insn (rtx_insn *insn, int n_cycles, const char *reason)
2779 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
2780 rtx_insn_list *link = alloc_INSN_LIST (insn, insn_queue[next_q]);
2781 int new_tick;
2783 gcc_assert (n_cycles <= max_insn_queue_index);
2784 gcc_assert (!DEBUG_INSN_P (insn));
2786 insn_queue[next_q] = link;
2787 q_size += 1;
2789 if (sched_verbose >= 2)
2791 fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
2792 (*current_sched_info->print_insn) (insn, 0));
2794 fprintf (sched_dump, "queued for %d cycles (%s).\n", n_cycles, reason);
2797 QUEUE_INDEX (insn) = next_q;
2799 if (current_sched_info->flags & DO_BACKTRACKING)
2801 new_tick = clock_var + n_cycles;
2802 if (INSN_TICK (insn) == INVALID_TICK || INSN_TICK (insn) < new_tick)
2803 INSN_TICK (insn) = new_tick;
2805 if (INSN_EXACT_TICK (insn) != INVALID_TICK
2806 && INSN_EXACT_TICK (insn) < clock_var + n_cycles)
2808 must_backtrack = true;
2809 if (sched_verbose >= 2)
2810 fprintf (sched_dump, ";;\t\tcausing a backtrack.\n");
2815 /* Remove INSN from queue. */
2816 static void
2817 queue_remove (rtx_insn *insn)
2819 gcc_assert (QUEUE_INDEX (insn) >= 0);
2820 remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
2821 q_size--;
2822 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
2825 /* Return a pointer to the bottom of the ready list, i.e. the insn
2826 with the lowest priority. */
2828 rtx_insn **
2829 ready_lastpos (struct ready_list *ready)
2831 gcc_assert (ready->n_ready >= 1);
2832 return ready->vec + ready->first - ready->n_ready + 1;
2835 /* Add an element INSN to the ready list so that it ends up with the
2836 lowest/highest priority depending on FIRST_P. */
2838 HAIFA_INLINE static void
2839 ready_add (struct ready_list *ready, rtx_insn *insn, bool first_p)
2841 if (!first_p)
2843 if (ready->first == ready->n_ready)
2845 memmove (ready->vec + ready->veclen - ready->n_ready,
2846 ready_lastpos (ready),
2847 ready->n_ready * sizeof (rtx));
2848 ready->first = ready->veclen - 1;
2850 ready->vec[ready->first - ready->n_ready] = insn;
2852 else
2854 if (ready->first == ready->veclen - 1)
2856 if (ready->n_ready)
2857 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
2858 memmove (ready->vec + ready->veclen - ready->n_ready - 1,
2859 ready_lastpos (ready),
2860 ready->n_ready * sizeof (rtx));
2861 ready->first = ready->veclen - 2;
2863 ready->vec[++(ready->first)] = insn;
2866 ready->n_ready++;
2867 if (DEBUG_INSN_P (insn))
2868 ready->n_debug++;
2870 gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
2871 QUEUE_INDEX (insn) = QUEUE_READY;
2873 if (INSN_EXACT_TICK (insn) != INVALID_TICK
2874 && INSN_EXACT_TICK (insn) < clock_var)
2876 must_backtrack = true;
2880 /* Remove the element with the highest priority from the ready list and
2881 return it. */
2883 HAIFA_INLINE static rtx_insn *
2884 ready_remove_first (struct ready_list *ready)
2886 rtx_insn *t;
2888 gcc_assert (ready->n_ready);
2889 t = ready->vec[ready->first--];
2890 ready->n_ready--;
2891 if (DEBUG_INSN_P (t))
2892 ready->n_debug--;
2893 /* If the queue becomes empty, reset it. */
2894 if (ready->n_ready == 0)
2895 ready->first = ready->veclen - 1;
2897 gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
2898 QUEUE_INDEX (t) = QUEUE_NOWHERE;
2900 return t;
2903 /* The following code implements multi-pass scheduling for the first
2904 cycle. In other words, we will try to choose ready insn which
2905 permits to start maximum number of insns on the same cycle. */
2907 /* Return a pointer to the element INDEX from the ready. INDEX for
2908 insn with the highest priority is 0, and the lowest priority has
2909 N_READY - 1. */
2911 rtx_insn *
2912 ready_element (struct ready_list *ready, int index)
2914 gcc_assert (ready->n_ready && index < ready->n_ready);
2916 return ready->vec[ready->first - index];
2919 /* Remove the element INDEX from the ready list and return it. INDEX
2920 for insn with the highest priority is 0, and the lowest priority
2921 has N_READY - 1. */
2923 HAIFA_INLINE static rtx_insn *
2924 ready_remove (struct ready_list *ready, int index)
2926 rtx_insn *t;
2927 int i;
2929 if (index == 0)
2930 return ready_remove_first (ready);
2931 gcc_assert (ready->n_ready && index < ready->n_ready);
2932 t = ready->vec[ready->first - index];
2933 ready->n_ready--;
2934 if (DEBUG_INSN_P (t))
2935 ready->n_debug--;
2936 for (i = index; i < ready->n_ready; i++)
2937 ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
2938 QUEUE_INDEX (t) = QUEUE_NOWHERE;
2939 return t;
2942 /* Remove INSN from the ready list. */
2943 static void
2944 ready_remove_insn (rtx insn)
2946 int i;
2948 for (i = 0; i < readyp->n_ready; i++)
2949 if (ready_element (readyp, i) == insn)
2951 ready_remove (readyp, i);
2952 return;
2954 gcc_unreachable ();
2957 /* Calculate difference of two statistics set WAS and NOW.
2958 Result returned in WAS. */
2959 static void
2960 rank_for_schedule_stats_diff (rank_for_schedule_stats_t *was,
2961 const rank_for_schedule_stats_t *now)
2963 for (int i = 0; i < RFS_N; ++i)
2964 was->stats[i] = now->stats[i] - was->stats[i];
2967 /* Print rank_for_schedule statistics. */
2968 static void
2969 print_rank_for_schedule_stats (const char *prefix,
2970 const rank_for_schedule_stats_t *stats,
2971 struct ready_list *ready)
2973 for (int i = 0; i < RFS_N; ++i)
2974 if (stats->stats[i])
2976 fprintf (sched_dump, "%s%20s: %u", prefix, rfs_str[i], stats->stats[i]);
2978 if (ready != NULL)
2979 /* Print out insns that won due to RFS_<I>. */
2981 rtx_insn **p = ready_lastpos (ready);
2983 fprintf (sched_dump, ":");
2984 /* Start with 1 since least-priority insn didn't have any wins. */
2985 for (int j = 1; j < ready->n_ready; ++j)
2986 if (INSN_LAST_RFS_WIN (p[j]) == i)
2987 fprintf (sched_dump, " %s",
2988 (*current_sched_info->print_insn) (p[j], 0));
2990 fprintf (sched_dump, "\n");
2994 /* Sort the ready list READY by ascending priority, using the SCHED_SORT
2995 macro. */
2997 void
2998 ready_sort (struct ready_list *ready)
3000 int i;
3001 rtx_insn **first = ready_lastpos (ready);
3003 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
3005 for (i = 0; i < ready->n_ready; i++)
3006 if (!DEBUG_INSN_P (first[i]))
3007 setup_insn_reg_pressure_info (first[i]);
3009 if (sched_pressure == SCHED_PRESSURE_MODEL
3010 && model_curr_point < model_num_insns)
3011 model_set_excess_costs (first, ready->n_ready);
3013 rank_for_schedule_stats_t stats1;
3014 if (sched_verbose >= 4)
3015 stats1 = rank_for_schedule_stats;
3017 if (ready->n_ready == 2)
3018 swap_sort (first, ready->n_ready);
3019 else if (ready->n_ready > 2)
3020 qsort (first, ready->n_ready, sizeof (rtx), rank_for_schedule);
3022 if (sched_verbose >= 4)
3024 rank_for_schedule_stats_diff (&stats1, &rank_for_schedule_stats);
3025 print_rank_for_schedule_stats (";;\t\t", &stats1, ready);
3029 /* PREV is an insn that is ready to execute. Adjust its priority if that
3030 will help shorten or lengthen register lifetimes as appropriate. Also
3031 provide a hook for the target to tweak itself. */
3033 HAIFA_INLINE static void
3034 adjust_priority (rtx_insn *prev)
3036 /* ??? There used to be code here to try and estimate how an insn
3037 affected register lifetimes, but it did it by looking at REG_DEAD
3038 notes, which we removed in schedule_region. Nor did it try to
3039 take into account register pressure or anything useful like that.
3041 Revisit when we have a machine model to work with and not before. */
3043 if (targetm.sched.adjust_priority)
3044 INSN_PRIORITY (prev) =
3045 targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
3048 /* Advance DFA state STATE on one cycle. */
3049 void
3050 advance_state (state_t state)
3052 if (targetm.sched.dfa_pre_advance_cycle)
3053 targetm.sched.dfa_pre_advance_cycle ();
3055 if (targetm.sched.dfa_pre_cycle_insn)
3056 state_transition (state,
3057 targetm.sched.dfa_pre_cycle_insn ());
3059 state_transition (state, NULL);
3061 if (targetm.sched.dfa_post_cycle_insn)
3062 state_transition (state,
3063 targetm.sched.dfa_post_cycle_insn ());
3065 if (targetm.sched.dfa_post_advance_cycle)
3066 targetm.sched.dfa_post_advance_cycle ();
3069 /* Advance time on one cycle. */
3070 HAIFA_INLINE static void
3071 advance_one_cycle (void)
3073 advance_state (curr_state);
3074 if (sched_verbose >= 4)
3075 fprintf (sched_dump, ";;\tAdvance the current state.\n");
3078 /* Update register pressure after scheduling INSN. */
3079 static void
3080 update_register_pressure (rtx_insn *insn)
3082 struct reg_use_data *use;
3083 struct reg_set_data *set;
3085 gcc_checking_assert (!DEBUG_INSN_P (insn));
3087 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
3088 if (dying_use_p (use))
3089 mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure,
3090 use->regno, false);
3091 for (set = INSN_REG_SET_LIST (insn); set != NULL; set = set->next_insn_set)
3092 mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure,
3093 set->regno, true);
3096 /* Set up or update (if UPDATE_P) max register pressure (see its
3097 meaning in sched-int.h::_haifa_insn_data) for all current BB insns
3098 after insn AFTER. */
3099 static void
3100 setup_insn_max_reg_pressure (rtx_insn *after, bool update_p)
3102 int i, p;
3103 bool eq_p;
3104 rtx_insn *insn;
3105 static int max_reg_pressure[N_REG_CLASSES];
3107 save_reg_pressure ();
3108 for (i = 0; i < ira_pressure_classes_num; i++)
3109 max_reg_pressure[ira_pressure_classes[i]]
3110 = curr_reg_pressure[ira_pressure_classes[i]];
3111 for (insn = NEXT_INSN (after);
3112 insn != NULL_RTX && ! BARRIER_P (insn)
3113 && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (after);
3114 insn = NEXT_INSN (insn))
3115 if (NONDEBUG_INSN_P (insn))
3117 eq_p = true;
3118 for (i = 0; i < ira_pressure_classes_num; i++)
3120 p = max_reg_pressure[ira_pressure_classes[i]];
3121 if (INSN_MAX_REG_PRESSURE (insn)[i] != p)
3123 eq_p = false;
3124 INSN_MAX_REG_PRESSURE (insn)[i]
3125 = max_reg_pressure[ira_pressure_classes[i]];
3128 if (update_p && eq_p)
3129 break;
3130 update_register_pressure (insn);
3131 for (i = 0; i < ira_pressure_classes_num; i++)
3132 if (max_reg_pressure[ira_pressure_classes[i]]
3133 < curr_reg_pressure[ira_pressure_classes[i]])
3134 max_reg_pressure[ira_pressure_classes[i]]
3135 = curr_reg_pressure[ira_pressure_classes[i]];
3137 restore_reg_pressure ();
3140 /* Update the current register pressure after scheduling INSN. Update
3141 also max register pressure for unscheduled insns of the current
3142 BB. */
3143 static void
3144 update_reg_and_insn_max_reg_pressure (rtx_insn *insn)
3146 int i;
3147 int before[N_REG_CLASSES];
3149 for (i = 0; i < ira_pressure_classes_num; i++)
3150 before[i] = curr_reg_pressure[ira_pressure_classes[i]];
3151 update_register_pressure (insn);
3152 for (i = 0; i < ira_pressure_classes_num; i++)
3153 if (curr_reg_pressure[ira_pressure_classes[i]] != before[i])
3154 break;
3155 if (i < ira_pressure_classes_num)
3156 setup_insn_max_reg_pressure (insn, true);
3159 /* Set up register pressure at the beginning of basic block BB whose
3160 insns starting after insn AFTER. Set up also max register pressure
3161 for all insns of the basic block. */
3162 void
3163 sched_setup_bb_reg_pressure_info (basic_block bb, rtx_insn *after)
3165 gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED);
3166 initiate_bb_reg_pressure_info (bb);
3167 setup_insn_max_reg_pressure (after, false);
3170 /* If doing predication while scheduling, verify whether INSN, which
3171 has just been scheduled, clobbers the conditions of any
3172 instructions that must be predicated in order to break their
3173 dependencies. If so, remove them from the queues so that they will
3174 only be scheduled once their control dependency is resolved. */
3176 static void
3177 check_clobbered_conditions (rtx insn)
3179 HARD_REG_SET t;
3180 int i;
3182 if ((current_sched_info->flags & DO_PREDICATION) == 0)
3183 return;
3185 find_all_hard_reg_sets (insn, &t, true);
3187 restart:
3188 for (i = 0; i < ready.n_ready; i++)
3190 rtx_insn *x = ready_element (&ready, i);
3191 if (TODO_SPEC (x) == DEP_CONTROL && cond_clobbered_p (x, t))
3193 ready_remove_insn (x);
3194 goto restart;
3197 for (i = 0; i <= max_insn_queue_index; i++)
3199 rtx_insn_list *link;
3200 int q = NEXT_Q_AFTER (q_ptr, i);
3202 restart_queue:
3203 for (link = insn_queue[q]; link; link = link->next ())
3205 rtx_insn *x = link->insn ();
3206 if (TODO_SPEC (x) == DEP_CONTROL && cond_clobbered_p (x, t))
3208 queue_remove (x);
3209 goto restart_queue;
3215 /* Return (in order):
3217 - positive if INSN adversely affects the pressure on one
3218 register class
3220 - negative if INSN reduces the pressure on one register class
3222 - 0 if INSN doesn't affect the pressure on any register class. */
3224 static int
3225 model_classify_pressure (struct model_insn_info *insn)
3227 struct reg_pressure_data *reg_pressure;
3228 int death[N_REG_CLASSES];
3229 int pci, cl, sum;
3231 calculate_reg_deaths (insn->insn, death);
3232 reg_pressure = INSN_REG_PRESSURE (insn->insn);
3233 sum = 0;
3234 for (pci = 0; pci < ira_pressure_classes_num; pci++)
3236 cl = ira_pressure_classes[pci];
3237 if (death[cl] < reg_pressure[pci].set_increase)
3238 return 1;
3239 sum += reg_pressure[pci].set_increase - death[cl];
3241 return sum;
3244 /* Return true if INSN1 should come before INSN2 in the model schedule. */
3246 static int
3247 model_order_p (struct model_insn_info *insn1, struct model_insn_info *insn2)
3249 unsigned int height1, height2;
3250 unsigned int priority1, priority2;
3252 /* Prefer instructions with a higher model priority. */
3253 if (insn1->model_priority != insn2->model_priority)
3254 return insn1->model_priority > insn2->model_priority;
3256 /* Combine the length of the longest path of satisfied true dependencies
3257 that leads to each instruction (depth) with the length of the longest
3258 path of any dependencies that leads from the instruction (alap).
3259 Prefer instructions with the greatest combined length. If the combined
3260 lengths are equal, prefer instructions with the greatest depth.
3262 The idea is that, if we have a set S of "equal" instructions that each
3263 have ALAP value X, and we pick one such instruction I, any true-dependent
3264 successors of I that have ALAP value X - 1 should be preferred over S.
3265 This encourages the schedule to be "narrow" rather than "wide".
3266 However, if I is a low-priority instruction that we decided to
3267 schedule because of its model_classify_pressure, and if there
3268 is a set of higher-priority instructions T, the aforementioned
3269 successors of I should not have the edge over T. */
3270 height1 = insn1->depth + insn1->alap;
3271 height2 = insn2->depth + insn2->alap;
3272 if (height1 != height2)
3273 return height1 > height2;
3274 if (insn1->depth != insn2->depth)
3275 return insn1->depth > insn2->depth;
3277 /* We have no real preference between INSN1 an INSN2 as far as attempts
3278 to reduce pressure go. Prefer instructions with higher priorities. */
3279 priority1 = INSN_PRIORITY (insn1->insn);
3280 priority2 = INSN_PRIORITY (insn2->insn);
3281 if (priority1 != priority2)
3282 return priority1 > priority2;
3284 /* Use the original rtl sequence as a tie-breaker. */
3285 return insn1 < insn2;
3288 /* Add INSN to the model worklist immediately after PREV. Add it to the
3289 beginning of the list if PREV is null. */
3291 static void
3292 model_add_to_worklist_at (struct model_insn_info *insn,
3293 struct model_insn_info *prev)
3295 gcc_assert (QUEUE_INDEX (insn->insn) == QUEUE_NOWHERE);
3296 QUEUE_INDEX (insn->insn) = QUEUE_READY;
3298 insn->prev = prev;
3299 if (prev)
3301 insn->next = prev->next;
3302 prev->next = insn;
3304 else
3306 insn->next = model_worklist;
3307 model_worklist = insn;
3309 if (insn->next)
3310 insn->next->prev = insn;
3313 /* Remove INSN from the model worklist. */
3315 static void
3316 model_remove_from_worklist (struct model_insn_info *insn)
3318 gcc_assert (QUEUE_INDEX (insn->insn) == QUEUE_READY);
3319 QUEUE_INDEX (insn->insn) = QUEUE_NOWHERE;
3321 if (insn->prev)
3322 insn->prev->next = insn->next;
3323 else
3324 model_worklist = insn->next;
3325 if (insn->next)
3326 insn->next->prev = insn->prev;
3329 /* Add INSN to the model worklist. Start looking for a suitable position
3330 between neighbors PREV and NEXT, testing at most MAX_SCHED_READY_INSNS
3331 insns either side. A null PREV indicates the beginning of the list and
3332 a null NEXT indicates the end. */
3334 static void
3335 model_add_to_worklist (struct model_insn_info *insn,
3336 struct model_insn_info *prev,
3337 struct model_insn_info *next)
3339 int count;
3341 count = MAX_SCHED_READY_INSNS;
3342 if (count > 0 && prev && model_order_p (insn, prev))
3345 count--;
3346 prev = prev->prev;
3348 while (count > 0 && prev && model_order_p (insn, prev));
3349 else
3350 while (count > 0 && next && model_order_p (next, insn))
3352 count--;
3353 prev = next;
3354 next = next->next;
3356 model_add_to_worklist_at (insn, prev);
3359 /* INSN may now have a higher priority (in the model_order_p sense)
3360 than before. Move it up the worklist if necessary. */
3362 static void
3363 model_promote_insn (struct model_insn_info *insn)
3365 struct model_insn_info *prev;
3366 int count;
3368 prev = insn->prev;
3369 count = MAX_SCHED_READY_INSNS;
3370 while (count > 0 && prev && model_order_p (insn, prev))
3372 count--;
3373 prev = prev->prev;
3375 if (prev != insn->prev)
3377 model_remove_from_worklist (insn);
3378 model_add_to_worklist_at (insn, prev);
3382 /* Add INSN to the end of the model schedule. */
3384 static void
3385 model_add_to_schedule (rtx_insn *insn)
3387 unsigned int point;
3389 gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
3390 QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
3392 point = model_schedule.length ();
3393 model_schedule.quick_push (insn);
3394 INSN_MODEL_INDEX (insn) = point + 1;
3397 /* Analyze the instructions that are to be scheduled, setting up
3398 MODEL_INSN_INFO (...) and model_num_insns accordingly. Add ready
3399 instructions to model_worklist. */
3401 static void
3402 model_analyze_insns (void)
3404 rtx_insn *start, *end, *iter;
3405 sd_iterator_def sd_it;
3406 dep_t dep;
3407 struct model_insn_info *insn, *con;
3409 model_num_insns = 0;
3410 start = PREV_INSN (current_sched_info->next_tail);
3411 end = current_sched_info->prev_head;
3412 for (iter = start; iter != end; iter = PREV_INSN (iter))
3413 if (NONDEBUG_INSN_P (iter))
3415 insn = MODEL_INSN_INFO (iter);
3416 insn->insn = iter;
3417 FOR_EACH_DEP (iter, SD_LIST_FORW, sd_it, dep)
3419 con = MODEL_INSN_INFO (DEP_CON (dep));
3420 if (con->insn && insn->alap < con->alap + 1)
3421 insn->alap = con->alap + 1;
3424 insn->old_queue = QUEUE_INDEX (iter);
3425 QUEUE_INDEX (iter) = QUEUE_NOWHERE;
3427 insn->unscheduled_preds = dep_list_size (iter, SD_LIST_HARD_BACK);
3428 if (insn->unscheduled_preds == 0)
3429 model_add_to_worklist (insn, NULL, model_worklist);
3431 model_num_insns++;
3435 /* The global state describes the register pressure at the start of the
3436 model schedule. Initialize GROUP accordingly. */
3438 static void
3439 model_init_pressure_group (struct model_pressure_group *group)
3441 int pci, cl;
3443 for (pci = 0; pci < ira_pressure_classes_num; pci++)
3445 cl = ira_pressure_classes[pci];
3446 group->limits[pci].pressure = curr_reg_pressure[cl];
3447 group->limits[pci].point = 0;
3449 /* Use index model_num_insns to record the state after the last
3450 instruction in the model schedule. */
3451 group->model = XNEWVEC (struct model_pressure_data,
3452 (model_num_insns + 1) * ira_pressure_classes_num);
3455 /* Record that MODEL_REF_PRESSURE (GROUP, POINT, PCI) is PRESSURE.
3456 Update the maximum pressure for the whole schedule. */
3458 static void
3459 model_record_pressure (struct model_pressure_group *group,
3460 int point, int pci, int pressure)
3462 MODEL_REF_PRESSURE (group, point, pci) = pressure;
3463 if (group->limits[pci].pressure < pressure)
3465 group->limits[pci].pressure = pressure;
3466 group->limits[pci].point = point;
3470 /* INSN has just been added to the end of the model schedule. Record its
3471 register-pressure information. */
3473 static void
3474 model_record_pressures (struct model_insn_info *insn)
3476 struct reg_pressure_data *reg_pressure;
3477 int point, pci, cl, delta;
3478 int death[N_REG_CLASSES];
3480 point = model_index (insn->insn);
3481 if (sched_verbose >= 2)
3483 if (point == 0)
3485 fprintf (sched_dump, "\n;;\tModel schedule:\n;;\n");
3486 fprintf (sched_dump, ";;\t| idx insn | mpri hght dpth prio |\n");
3488 fprintf (sched_dump, ";;\t| %3d %4d | %4d %4d %4d %4d | %-30s ",
3489 point, INSN_UID (insn->insn), insn->model_priority,
3490 insn->depth + insn->alap, insn->depth,
3491 INSN_PRIORITY (insn->insn),
3492 str_pattern_slim (PATTERN (insn->insn)));
3494 calculate_reg_deaths (insn->insn, death);
3495 reg_pressure = INSN_REG_PRESSURE (insn->insn);
3496 for (pci = 0; pci < ira_pressure_classes_num; pci++)
3498 cl = ira_pressure_classes[pci];
3499 delta = reg_pressure[pci].set_increase - death[cl];
3500 if (sched_verbose >= 2)
3501 fprintf (sched_dump, " %s:[%d,%+d]", reg_class_names[cl],
3502 curr_reg_pressure[cl], delta);
3503 model_record_pressure (&model_before_pressure, point, pci,
3504 curr_reg_pressure[cl]);
3506 if (sched_verbose >= 2)
3507 fprintf (sched_dump, "\n");
3510 /* All instructions have been added to the model schedule. Record the
3511 final register pressure in GROUP and set up all MODEL_MAX_PRESSUREs. */
3513 static void
3514 model_record_final_pressures (struct model_pressure_group *group)
3516 int point, pci, max_pressure, ref_pressure, cl;
3518 for (pci = 0; pci < ira_pressure_classes_num; pci++)
3520 /* Record the final pressure for this class. */
3521 cl = ira_pressure_classes[pci];
3522 point = model_num_insns;
3523 ref_pressure = curr_reg_pressure[cl];
3524 model_record_pressure (group, point, pci, ref_pressure);
3526 /* Record the original maximum pressure. */
3527 group->limits[pci].orig_pressure = group->limits[pci].pressure;
3529 /* Update the MODEL_MAX_PRESSURE for every point of the schedule. */
3530 max_pressure = ref_pressure;
3531 MODEL_MAX_PRESSURE (group, point, pci) = max_pressure;
3532 while (point > 0)
3534 point--;
3535 ref_pressure = MODEL_REF_PRESSURE (group, point, pci);
3536 max_pressure = MAX (max_pressure, ref_pressure);
3537 MODEL_MAX_PRESSURE (group, point, pci) = max_pressure;
3542 /* Update all successors of INSN, given that INSN has just been scheduled. */
3544 static void
3545 model_add_successors_to_worklist (struct model_insn_info *insn)
3547 sd_iterator_def sd_it;
3548 struct model_insn_info *con;
3549 dep_t dep;
3551 FOR_EACH_DEP (insn->insn, SD_LIST_FORW, sd_it, dep)
3553 con = MODEL_INSN_INFO (DEP_CON (dep));
3554 /* Ignore debug instructions, and instructions from other blocks. */
3555 if (con->insn)
3557 con->unscheduled_preds--;
3559 /* Update the depth field of each true-dependent successor.
3560 Increasing the depth gives them a higher priority than
3561 before. */
3562 if (DEP_TYPE (dep) == REG_DEP_TRUE && con->depth < insn->depth + 1)
3564 con->depth = insn->depth + 1;
3565 if (QUEUE_INDEX (con->insn) == QUEUE_READY)
3566 model_promote_insn (con);
3569 /* If this is a true dependency, or if there are no remaining
3570 dependencies for CON (meaning that CON only had non-true
3571 dependencies), make sure that CON is on the worklist.
3572 We don't bother otherwise because it would tend to fill the
3573 worklist with a lot of low-priority instructions that are not
3574 yet ready to issue. */
3575 if ((con->depth > 0 || con->unscheduled_preds == 0)
3576 && QUEUE_INDEX (con->insn) == QUEUE_NOWHERE)
3577 model_add_to_worklist (con, insn, insn->next);
3582 /* Give INSN a higher priority than any current instruction, then give
3583 unscheduled predecessors of INSN a higher priority still. If any of
3584 those predecessors are not on the model worklist, do the same for its
3585 predecessors, and so on. */
3587 static void
3588 model_promote_predecessors (struct model_insn_info *insn)
3590 struct model_insn_info *pro, *first;
3591 sd_iterator_def sd_it;
3592 dep_t dep;
3594 if (sched_verbose >= 7)
3595 fprintf (sched_dump, ";;\t+--- priority of %d = %d, priority of",
3596 INSN_UID (insn->insn), model_next_priority);
3597 insn->model_priority = model_next_priority++;
3598 model_remove_from_worklist (insn);
3599 model_add_to_worklist_at (insn, NULL);
3601 first = NULL;
3602 for (;;)
3604 FOR_EACH_DEP (insn->insn, SD_LIST_HARD_BACK, sd_it, dep)
3606 pro = MODEL_INSN_INFO (DEP_PRO (dep));
3607 /* The first test is to ignore debug instructions, and instructions
3608 from other blocks. */
3609 if (pro->insn
3610 && pro->model_priority != model_next_priority
3611 && QUEUE_INDEX (pro->insn) != QUEUE_SCHEDULED)
3613 pro->model_priority = model_next_priority;
3614 if (sched_verbose >= 7)
3615 fprintf (sched_dump, " %d", INSN_UID (pro->insn));
3616 if (QUEUE_INDEX (pro->insn) == QUEUE_READY)
3618 /* PRO is already in the worklist, but it now has
3619 a higher priority than before. Move it at the
3620 appropriate place. */
3621 model_remove_from_worklist (pro);
3622 model_add_to_worklist (pro, NULL, model_worklist);
3624 else
3626 /* PRO isn't in the worklist. Recursively process
3627 its predecessors until we find one that is. */
3628 pro->next = first;
3629 first = pro;
3633 if (!first)
3634 break;
3635 insn = first;
3636 first = insn->next;
3638 if (sched_verbose >= 7)
3639 fprintf (sched_dump, " = %d\n", model_next_priority);
3640 model_next_priority++;
3643 /* Pick one instruction from model_worklist and process it. */
3645 static void
3646 model_choose_insn (void)
3648 struct model_insn_info *insn, *fallback;
3649 int count;
3651 if (sched_verbose >= 7)
3653 fprintf (sched_dump, ";;\t+--- worklist:\n");
3654 insn = model_worklist;
3655 count = MAX_SCHED_READY_INSNS;
3656 while (count > 0 && insn)
3658 fprintf (sched_dump, ";;\t+--- %d [%d, %d, %d, %d]\n",
3659 INSN_UID (insn->insn), insn->model_priority,
3660 insn->depth + insn->alap, insn->depth,
3661 INSN_PRIORITY (insn->insn));
3662 count--;
3663 insn = insn->next;
3667 /* Look for a ready instruction whose model_classify_priority is zero
3668 or negative, picking the highest-priority one. Adding such an
3669 instruction to the schedule now should do no harm, and may actually
3670 do some good.
3672 Failing that, see whether there is an instruction with the highest
3673 extant model_priority that is not yet ready, but which would reduce
3674 pressure if it became ready. This is designed to catch cases like:
3676 (set (mem (reg R1)) (reg R2))
3678 where the instruction is the last remaining use of R1 and where the
3679 value of R2 is not yet available (or vice versa). The death of R1
3680 means that this instruction already reduces pressure. It is of
3681 course possible that the computation of R2 involves other registers
3682 that are hard to kill, but such cases are rare enough for this
3683 heuristic to be a win in general.
3685 Failing that, just pick the highest-priority instruction in the
3686 worklist. */
3687 count = MAX_SCHED_READY_INSNS;
3688 insn = model_worklist;
3689 fallback = 0;
3690 for (;;)
3692 if (count == 0 || !insn)
3694 insn = fallback ? fallback : model_worklist;
3695 break;
3697 if (insn->unscheduled_preds)
3699 if (model_worklist->model_priority == insn->model_priority
3700 && !fallback
3701 && model_classify_pressure (insn) < 0)
3702 fallback = insn;
3704 else
3706 if (model_classify_pressure (insn) <= 0)
3707 break;
3709 count--;
3710 insn = insn->next;
3713 if (sched_verbose >= 7 && insn != model_worklist)
3715 if (insn->unscheduled_preds)
3716 fprintf (sched_dump, ";;\t+--- promoting insn %d, with dependencies\n",
3717 INSN_UID (insn->insn));
3718 else
3719 fprintf (sched_dump, ";;\t+--- promoting insn %d, which is ready\n",
3720 INSN_UID (insn->insn));
3722 if (insn->unscheduled_preds)
3723 /* INSN isn't yet ready to issue. Give all its predecessors the
3724 highest priority. */
3725 model_promote_predecessors (insn);
3726 else
3728 /* INSN is ready. Add it to the end of model_schedule and
3729 process its successors. */
3730 model_add_successors_to_worklist (insn);
3731 model_remove_from_worklist (insn);
3732 model_add_to_schedule (insn->insn);
3733 model_record_pressures (insn);
3734 update_register_pressure (insn->insn);
3738 /* Restore all QUEUE_INDEXs to the values that they had before
3739 model_start_schedule was called. */
3741 static void
3742 model_reset_queue_indices (void)
3744 unsigned int i;
3745 rtx_insn *insn;
3747 FOR_EACH_VEC_ELT (model_schedule, i, insn)
3748 QUEUE_INDEX (insn) = MODEL_INSN_INFO (insn)->old_queue;
3751 /* We have calculated the model schedule and spill costs. Print a summary
3752 to sched_dump. */
3754 static void
3755 model_dump_pressure_summary (void)
3757 int pci, cl;
3759 fprintf (sched_dump, ";; Pressure summary:");
3760 for (pci = 0; pci < ira_pressure_classes_num; pci++)
3762 cl = ira_pressure_classes[pci];
3763 fprintf (sched_dump, " %s:%d", reg_class_names[cl],
3764 model_before_pressure.limits[pci].pressure);
3766 fprintf (sched_dump, "\n\n");
3769 /* Initialize the SCHED_PRESSURE_MODEL information for the current
3770 scheduling region. */
3772 static void
3773 model_start_schedule (basic_block bb)
3775 model_next_priority = 1;
3776 model_schedule.create (sched_max_luid);
3777 model_insns = XCNEWVEC (struct model_insn_info, sched_max_luid);
3779 gcc_assert (bb == BLOCK_FOR_INSN (NEXT_INSN (current_sched_info->prev_head)));
3780 initiate_reg_pressure_info (df_get_live_in (bb));
3782 model_analyze_insns ();
3783 model_init_pressure_group (&model_before_pressure);
3784 while (model_worklist)
3785 model_choose_insn ();
3786 gcc_assert (model_num_insns == (int) model_schedule.length ());
3787 if (sched_verbose >= 2)
3788 fprintf (sched_dump, "\n");
3790 model_record_final_pressures (&model_before_pressure);
3791 model_reset_queue_indices ();
3793 XDELETEVEC (model_insns);
3795 model_curr_point = 0;
3796 initiate_reg_pressure_info (df_get_live_in (bb));
3797 if (sched_verbose >= 1)
3798 model_dump_pressure_summary ();
3801 /* Free the information associated with GROUP. */
3803 static void
3804 model_finalize_pressure_group (struct model_pressure_group *group)
3806 XDELETEVEC (group->model);
3809 /* Free the information created by model_start_schedule. */
3811 static void
3812 model_end_schedule (void)
3814 model_finalize_pressure_group (&model_before_pressure);
3815 model_schedule.release ();
3818 /* Prepare reg pressure scheduling for basic block BB. */
3819 static void
3820 sched_pressure_start_bb (basic_block bb)
3822 /* Set the number of available registers for each class taking into account
3823 relative probability of current basic block versus function prologue and
3824 epilogue.
3825 * If the basic block executes much more often than the prologue/epilogue
3826 (e.g., inside a hot loop), then cost of spill in the prologue is close to
3827 nil, so the effective number of available registers is
3828 (ira_class_hard_regs_num[cl] - 0).
3829 * If the basic block executes as often as the prologue/epilogue,
3830 then spill in the block is as costly as in the prologue, so the effective
3831 number of available registers is
3832 (ira_class_hard_regs_num[cl] - call_used_regs_num[cl]).
3833 Note that all-else-equal, we prefer to spill in the prologue, since that
3834 allows "extra" registers for other basic blocks of the function.
3835 * If the basic block is on the cold path of the function and executes
3836 rarely, then we should always prefer to spill in the block, rather than
3837 in the prologue/epilogue. The effective number of available register is
3838 (ira_class_hard_regs_num[cl] - call_used_regs_num[cl]). */
3840 int i;
3841 int entry_freq = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency;
3842 int bb_freq = bb->frequency;
3844 if (bb_freq == 0)
3846 if (entry_freq == 0)
3847 entry_freq = bb_freq = 1;
3849 if (bb_freq < entry_freq)
3850 bb_freq = entry_freq;
3852 for (i = 0; i < ira_pressure_classes_num; ++i)
3854 enum reg_class cl = ira_pressure_classes[i];
3855 sched_class_regs_num[cl] = ira_class_hard_regs_num[cl];
3856 sched_class_regs_num[cl]
3857 -= (call_used_regs_num[cl] * entry_freq) / bb_freq;
3861 if (sched_pressure == SCHED_PRESSURE_MODEL)
3862 model_start_schedule (bb);
3865 /* A structure that holds local state for the loop in schedule_block. */
3866 struct sched_block_state
3868 /* True if no real insns have been scheduled in the current cycle. */
3869 bool first_cycle_insn_p;
3870 /* True if a shadow insn has been scheduled in the current cycle, which
3871 means that no more normal insns can be issued. */
3872 bool shadows_only_p;
3873 /* True if we're winding down a modulo schedule, which means that we only
3874 issue insns with INSN_EXACT_TICK set. */
3875 bool modulo_epilogue;
3876 /* Initialized with the machine's issue rate every cycle, and updated
3877 by calls to the variable_issue hook. */
3878 int can_issue_more;
3881 /* INSN is the "currently executing insn". Launch each insn which was
3882 waiting on INSN. READY is the ready list which contains the insns
3883 that are ready to fire. CLOCK is the current cycle. The function
3884 returns necessary cycle advance after issuing the insn (it is not
3885 zero for insns in a schedule group). */
3887 static int
3888 schedule_insn (rtx_insn *insn)
3890 sd_iterator_def sd_it;
3891 dep_t dep;
3892 int i;
3893 int advance = 0;
3895 if (sched_verbose >= 1)
3897 struct reg_pressure_data *pressure_info;
3898 fprintf (sched_dump, ";;\t%3i--> %s %-40s:",
3899 clock_var, (*current_sched_info->print_insn) (insn, 1),
3900 str_pattern_slim (PATTERN (insn)));
3902 if (recog_memoized (insn) < 0)
3903 fprintf (sched_dump, "nothing");
3904 else
3905 print_reservation (sched_dump, insn);
3906 pressure_info = INSN_REG_PRESSURE (insn);
3907 if (pressure_info != NULL)
3909 fputc (':', sched_dump);
3910 for (i = 0; i < ira_pressure_classes_num; i++)
3911 fprintf (sched_dump, "%s%s%+d(%d)",
3912 scheduled_insns.length () > 1
3913 && INSN_LUID (insn)
3914 < INSN_LUID (scheduled_insns[scheduled_insns.length () - 2]) ? "@" : "",
3915 reg_class_names[ira_pressure_classes[i]],
3916 pressure_info[i].set_increase, pressure_info[i].change);
3918 if (sched_pressure == SCHED_PRESSURE_MODEL
3919 && model_curr_point < model_num_insns
3920 && model_index (insn) == model_curr_point)
3921 fprintf (sched_dump, ":model %d", model_curr_point);
3922 fputc ('\n', sched_dump);
3925 if (sched_pressure == SCHED_PRESSURE_WEIGHTED && !DEBUG_INSN_P (insn))
3926 update_reg_and_insn_max_reg_pressure (insn);
3928 /* Scheduling instruction should have all its dependencies resolved and
3929 should have been removed from the ready list. */
3930 gcc_assert (sd_lists_empty_p (insn, SD_LIST_HARD_BACK));
3932 /* Reset debug insns invalidated by moving this insn. */
3933 if (MAY_HAVE_DEBUG_INSNS && !DEBUG_INSN_P (insn))
3934 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
3935 sd_iterator_cond (&sd_it, &dep);)
3937 rtx_insn *dbg = DEP_PRO (dep);
3938 struct reg_use_data *use, *next;
3940 if (DEP_STATUS (dep) & DEP_CANCELLED)
3942 sd_iterator_next (&sd_it);
3943 continue;
3946 gcc_assert (DEBUG_INSN_P (dbg));
3948 if (sched_verbose >= 6)
3949 fprintf (sched_dump, ";;\t\tresetting: debug insn %d\n",
3950 INSN_UID (dbg));
3952 /* ??? Rather than resetting the debug insn, we might be able
3953 to emit a debug temp before the just-scheduled insn, but
3954 this would involve checking that the expression at the
3955 point of the debug insn is equivalent to the expression
3956 before the just-scheduled insn. They might not be: the
3957 expression in the debug insn may depend on other insns not
3958 yet scheduled that set MEMs, REGs or even other debug
3959 insns. It's not clear that attempting to preserve debug
3960 information in these cases is worth the effort, given how
3961 uncommon these resets are and the likelihood that the debug
3962 temps introduced won't survive the schedule change. */
3963 INSN_VAR_LOCATION_LOC (dbg) = gen_rtx_UNKNOWN_VAR_LOC ();
3964 df_insn_rescan (dbg);
3966 /* Unknown location doesn't use any registers. */
3967 for (use = INSN_REG_USE_LIST (dbg); use != NULL; use = next)
3969 struct reg_use_data *prev = use;
3971 /* Remove use from the cyclic next_regno_use chain first. */
3972 while (prev->next_regno_use != use)
3973 prev = prev->next_regno_use;
3974 prev->next_regno_use = use->next_regno_use;
3975 next = use->next_insn_use;
3976 free (use);
3978 INSN_REG_USE_LIST (dbg) = NULL;
3980 /* We delete rather than resolve these deps, otherwise we
3981 crash in sched_free_deps(), because forward deps are
3982 expected to be released before backward deps. */
3983 sd_delete_dep (sd_it);
3986 gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
3987 QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
3989 if (sched_pressure == SCHED_PRESSURE_MODEL
3990 && model_curr_point < model_num_insns
3991 && NONDEBUG_INSN_P (insn))
3993 if (model_index (insn) == model_curr_point)
3995 model_curr_point++;
3996 while (model_curr_point < model_num_insns
3997 && (QUEUE_INDEX (MODEL_INSN (model_curr_point))
3998 == QUEUE_SCHEDULED));
3999 else
4000 model_recompute (insn);
4001 model_update_limit_points ();
4002 update_register_pressure (insn);
4003 if (sched_verbose >= 2)
4004 print_curr_reg_pressure ();
4007 gcc_assert (INSN_TICK (insn) >= MIN_TICK);
4008 if (INSN_TICK (insn) > clock_var)
4009 /* INSN has been prematurely moved from the queue to the ready list.
4010 This is possible only if following flag is set. */
4011 gcc_assert (flag_sched_stalled_insns);
4013 /* ??? Probably, if INSN is scheduled prematurely, we should leave
4014 INSN_TICK untouched. This is a machine-dependent issue, actually. */
4015 INSN_TICK (insn) = clock_var;
4017 check_clobbered_conditions (insn);
4019 /* Update dependent instructions. First, see if by scheduling this insn
4020 now we broke a dependence in a way that requires us to change another
4021 insn. */
4022 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4023 sd_iterator_cond (&sd_it, &dep); sd_iterator_next (&sd_it))
4025 struct dep_replacement *desc = DEP_REPLACE (dep);
4026 rtx_insn *pro = DEP_PRO (dep);
4027 if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED
4028 && desc != NULL && desc->insn == pro)
4029 apply_replacement (dep, false);
4032 /* Go through and resolve forward dependencies. */
4033 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
4034 sd_iterator_cond (&sd_it, &dep);)
4036 rtx_insn *next = DEP_CON (dep);
4037 bool cancelled = (DEP_STATUS (dep) & DEP_CANCELLED) != 0;
4039 /* Resolve the dependence between INSN and NEXT.
4040 sd_resolve_dep () moves current dep to another list thus
4041 advancing the iterator. */
4042 sd_resolve_dep (sd_it);
4044 if (cancelled)
4046 if (must_restore_pattern_p (next, dep))
4047 restore_pattern (dep, false);
4048 continue;
4051 /* Don't bother trying to mark next as ready if insn is a debug
4052 insn. If insn is the last hard dependency, it will have
4053 already been discounted. */
4054 if (DEBUG_INSN_P (insn) && !DEBUG_INSN_P (next))
4055 continue;
4057 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
4059 int effective_cost;
4061 effective_cost = try_ready (next);
4063 if (effective_cost >= 0
4064 && SCHED_GROUP_P (next)
4065 && advance < effective_cost)
4066 advance = effective_cost;
4068 else
4069 /* Check always has only one forward dependence (to the first insn in
4070 the recovery block), therefore, this will be executed only once. */
4072 gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
4073 fix_recovery_deps (RECOVERY_BLOCK (insn));
4077 /* Annotate the instruction with issue information -- TImode
4078 indicates that the instruction is expected not to be able
4079 to issue on the same cycle as the previous insn. A machine
4080 may use this information to decide how the instruction should
4081 be aligned. */
4082 if (issue_rate > 1
4083 && GET_CODE (PATTERN (insn)) != USE
4084 && GET_CODE (PATTERN (insn)) != CLOBBER
4085 && !DEBUG_INSN_P (insn))
4087 if (reload_completed)
4088 PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
4089 last_clock_var = clock_var;
4092 if (nonscheduled_insns_begin != NULL_RTX)
4093 /* Indicate to debug counters that INSN is scheduled. */
4094 nonscheduled_insns_begin = insn;
4096 return advance;
4099 /* Functions for handling of notes. */
4101 /* Add note list that ends on FROM_END to the end of TO_ENDP. */
4102 void
4103 concat_note_lists (rtx_insn *from_end, rtx_insn **to_endp)
4105 rtx_insn *from_start;
4107 /* It's easy when have nothing to concat. */
4108 if (from_end == NULL)
4109 return;
4111 /* It's also easy when destination is empty. */
4112 if (*to_endp == NULL)
4114 *to_endp = from_end;
4115 return;
4118 from_start = from_end;
4119 while (PREV_INSN (from_start) != NULL)
4120 from_start = PREV_INSN (from_start);
4122 SET_PREV_INSN (from_start) = *to_endp;
4123 SET_NEXT_INSN (*to_endp) = from_start;
4124 *to_endp = from_end;
4127 /* Delete notes between HEAD and TAIL and put them in the chain
4128 of notes ended by NOTE_LIST. */
4129 void
4130 remove_notes (rtx_insn *head, rtx_insn *tail)
4132 rtx_insn *next_tail, *insn, *next;
4134 note_list = 0;
4135 if (head == tail && !INSN_P (head))
4136 return;
4138 next_tail = NEXT_INSN (tail);
4139 for (insn = head; insn != next_tail; insn = next)
4141 next = NEXT_INSN (insn);
4142 if (!NOTE_P (insn))
4143 continue;
4145 switch (NOTE_KIND (insn))
4147 case NOTE_INSN_BASIC_BLOCK:
4148 continue;
4150 case NOTE_INSN_EPILOGUE_BEG:
4151 if (insn != tail)
4153 remove_insn (insn);
4154 add_reg_note (next, REG_SAVE_NOTE,
4155 GEN_INT (NOTE_INSN_EPILOGUE_BEG));
4156 break;
4158 /* FALLTHRU */
4160 default:
4161 remove_insn (insn);
4163 /* Add the note to list that ends at NOTE_LIST. */
4164 SET_PREV_INSN (insn) = note_list;
4165 SET_NEXT_INSN (insn) = NULL_RTX;
4166 if (note_list)
4167 SET_NEXT_INSN (note_list) = insn;
4168 note_list = insn;
4169 break;
4172 gcc_assert ((sel_sched_p () || insn != tail) && insn != head);
4176 /* A structure to record enough data to allow us to backtrack the scheduler to
4177 a previous state. */
4178 struct haifa_saved_data
4180 /* Next entry on the list. */
4181 struct haifa_saved_data *next;
4183 /* Backtracking is associated with scheduling insns that have delay slots.
4184 DELAY_PAIR points to the structure that contains the insns involved, and
4185 the number of cycles between them. */
4186 struct delay_pair *delay_pair;
4188 /* Data used by the frontend (e.g. sched-ebb or sched-rgn). */
4189 void *fe_saved_data;
4190 /* Data used by the backend. */
4191 void *be_saved_data;
4193 /* Copies of global state. */
4194 int clock_var, last_clock_var;
4195 struct ready_list ready;
4196 state_t curr_state;
4198 rtx_insn *last_scheduled_insn;
4199 rtx last_nondebug_scheduled_insn;
4200 rtx_insn *nonscheduled_insns_begin;
4201 int cycle_issued_insns;
4203 /* Copies of state used in the inner loop of schedule_block. */
4204 struct sched_block_state sched_block;
4206 /* We don't need to save q_ptr, as its value is arbitrary and we can set it
4207 to 0 when restoring. */
4208 int q_size;
4209 rtx_insn_list **insn_queue;
4211 /* Describe pattern replacements that occurred since this backtrack point
4212 was queued. */
4213 vec<dep_t> replacement_deps;
4214 vec<int> replace_apply;
4216 /* A copy of the next-cycle replacement vectors at the time of the backtrack
4217 point. */
4218 vec<dep_t> next_cycle_deps;
4219 vec<int> next_cycle_apply;
4222 /* A record, in reverse order, of all scheduled insns which have delay slots
4223 and may require backtracking. */
4224 static struct haifa_saved_data *backtrack_queue;
4226 /* For every dependency of INSN, set the FEEDS_BACKTRACK_INSN bit according
4227 to SET_P. */
4228 static void
4229 mark_backtrack_feeds (rtx insn, int set_p)
4231 sd_iterator_def sd_it;
4232 dep_t dep;
4233 FOR_EACH_DEP (insn, SD_LIST_HARD_BACK, sd_it, dep)
4235 FEEDS_BACKTRACK_INSN (DEP_PRO (dep)) = set_p;
4239 /* Save the current scheduler state so that we can backtrack to it
4240 later if necessary. PAIR gives the insns that make it necessary to
4241 save this point. SCHED_BLOCK is the local state of schedule_block
4242 that need to be saved. */
4243 static void
4244 save_backtrack_point (struct delay_pair *pair,
4245 struct sched_block_state sched_block)
4247 int i;
4248 struct haifa_saved_data *save = XNEW (struct haifa_saved_data);
4250 save->curr_state = xmalloc (dfa_state_size);
4251 memcpy (save->curr_state, curr_state, dfa_state_size);
4253 save->ready.first = ready.first;
4254 save->ready.n_ready = ready.n_ready;
4255 save->ready.n_debug = ready.n_debug;
4256 save->ready.veclen = ready.veclen;
4257 save->ready.vec = XNEWVEC (rtx_insn *, ready.veclen);
4258 memcpy (save->ready.vec, ready.vec, ready.veclen * sizeof (rtx));
4260 save->insn_queue = XNEWVEC (rtx_insn_list *, max_insn_queue_index + 1);
4261 save->q_size = q_size;
4262 for (i = 0; i <= max_insn_queue_index; i++)
4264 int q = NEXT_Q_AFTER (q_ptr, i);
4265 save->insn_queue[i] = copy_INSN_LIST (insn_queue[q]);
4268 save->clock_var = clock_var;
4269 save->last_clock_var = last_clock_var;
4270 save->cycle_issued_insns = cycle_issued_insns;
4271 save->last_scheduled_insn = last_scheduled_insn;
4272 save->last_nondebug_scheduled_insn = last_nondebug_scheduled_insn;
4273 save->nonscheduled_insns_begin = nonscheduled_insns_begin;
4275 save->sched_block = sched_block;
4277 save->replacement_deps.create (0);
4278 save->replace_apply.create (0);
4279 save->next_cycle_deps = next_cycle_replace_deps.copy ();
4280 save->next_cycle_apply = next_cycle_apply.copy ();
4282 if (current_sched_info->save_state)
4283 save->fe_saved_data = (*current_sched_info->save_state) ();
4285 if (targetm.sched.alloc_sched_context)
4287 save->be_saved_data = targetm.sched.alloc_sched_context ();
4288 targetm.sched.init_sched_context (save->be_saved_data, false);
4290 else
4291 save->be_saved_data = NULL;
4293 save->delay_pair = pair;
4295 save->next = backtrack_queue;
4296 backtrack_queue = save;
4298 while (pair)
4300 mark_backtrack_feeds (pair->i2, 1);
4301 INSN_TICK (pair->i2) = INVALID_TICK;
4302 INSN_EXACT_TICK (pair->i2) = clock_var + pair_delay (pair);
4303 SHADOW_P (pair->i2) = pair->stages == 0;
4304 pair = pair->next_same_i1;
4308 /* Walk the ready list and all queues. If any insns have unresolved backwards
4309 dependencies, these must be cancelled deps, broken by predication. Set or
4310 clear (depending on SET) the DEP_CANCELLED bit in DEP_STATUS. */
4312 static void
4313 toggle_cancelled_flags (bool set)
4315 int i;
4316 sd_iterator_def sd_it;
4317 dep_t dep;
4319 if (ready.n_ready > 0)
4321 rtx_insn **first = ready_lastpos (&ready);
4322 for (i = 0; i < ready.n_ready; i++)
4323 FOR_EACH_DEP (first[i], SD_LIST_BACK, sd_it, dep)
4324 if (!DEBUG_INSN_P (DEP_PRO (dep)))
4326 if (set)
4327 DEP_STATUS (dep) |= DEP_CANCELLED;
4328 else
4329 DEP_STATUS (dep) &= ~DEP_CANCELLED;
4332 for (i = 0; i <= max_insn_queue_index; i++)
4334 int q = NEXT_Q_AFTER (q_ptr, i);
4335 rtx_insn_list *link;
4336 for (link = insn_queue[q]; link; link = link->next ())
4338 rtx_insn *insn = link->insn ();
4339 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
4340 if (!DEBUG_INSN_P (DEP_PRO (dep)))
4342 if (set)
4343 DEP_STATUS (dep) |= DEP_CANCELLED;
4344 else
4345 DEP_STATUS (dep) &= ~DEP_CANCELLED;
4351 /* Undo the replacements that have occurred after backtrack point SAVE
4352 was placed. */
4353 static void
4354 undo_replacements_for_backtrack (struct haifa_saved_data *save)
4356 while (!save->replacement_deps.is_empty ())
4358 dep_t dep = save->replacement_deps.pop ();
4359 int apply_p = save->replace_apply.pop ();
4361 if (apply_p)
4362 restore_pattern (dep, true);
4363 else
4364 apply_replacement (dep, true);
4366 save->replacement_deps.release ();
4367 save->replace_apply.release ();
4370 /* Pop entries from the SCHEDULED_INSNS vector up to and including INSN.
4371 Restore their dependencies to an unresolved state, and mark them as
4372 queued nowhere. */
4374 static void
4375 unschedule_insns_until (rtx insn)
4377 auto_vec<rtx_insn *> recompute_vec;
4379 /* Make two passes over the insns to be unscheduled. First, we clear out
4380 dependencies and other trivial bookkeeping. */
4381 for (;;)
4383 rtx_insn *last;
4384 sd_iterator_def sd_it;
4385 dep_t dep;
4387 last = scheduled_insns.pop ();
4389 /* This will be changed by restore_backtrack_point if the insn is in
4390 any queue. */
4391 QUEUE_INDEX (last) = QUEUE_NOWHERE;
4392 if (last != insn)
4393 INSN_TICK (last) = INVALID_TICK;
4395 if (modulo_ii > 0 && INSN_UID (last) < modulo_iter0_max_uid)
4396 modulo_insns_scheduled--;
4398 for (sd_it = sd_iterator_start (last, SD_LIST_RES_FORW);
4399 sd_iterator_cond (&sd_it, &dep);)
4401 rtx_insn *con = DEP_CON (dep);
4402 sd_unresolve_dep (sd_it);
4403 if (!MUST_RECOMPUTE_SPEC_P (con))
4405 MUST_RECOMPUTE_SPEC_P (con) = 1;
4406 recompute_vec.safe_push (con);
4410 if (last == insn)
4411 break;
4414 /* A second pass, to update ready and speculation status for insns
4415 depending on the unscheduled ones. The first pass must have
4416 popped the scheduled_insns vector up to the point where we
4417 restart scheduling, as recompute_todo_spec requires it to be
4418 up-to-date. */
4419 while (!recompute_vec.is_empty ())
4421 rtx_insn *con;
4423 con = recompute_vec.pop ();
4424 MUST_RECOMPUTE_SPEC_P (con) = 0;
4425 if (!sd_lists_empty_p (con, SD_LIST_HARD_BACK))
4427 TODO_SPEC (con) = HARD_DEP;
4428 INSN_TICK (con) = INVALID_TICK;
4429 if (PREDICATED_PAT (con) != NULL_RTX)
4430 haifa_change_pattern (con, ORIG_PAT (con));
4432 else if (QUEUE_INDEX (con) != QUEUE_SCHEDULED)
4433 TODO_SPEC (con) = recompute_todo_spec (con, true);
4437 /* Restore scheduler state from the topmost entry on the backtracking queue.
4438 PSCHED_BLOCK_P points to the local data of schedule_block that we must
4439 overwrite with the saved data.
4440 The caller must already have called unschedule_insns_until. */
4442 static void
4443 restore_last_backtrack_point (struct sched_block_state *psched_block)
4445 int i;
4446 struct haifa_saved_data *save = backtrack_queue;
4448 backtrack_queue = save->next;
4450 if (current_sched_info->restore_state)
4451 (*current_sched_info->restore_state) (save->fe_saved_data);
4453 if (targetm.sched.alloc_sched_context)
4455 targetm.sched.set_sched_context (save->be_saved_data);
4456 targetm.sched.free_sched_context (save->be_saved_data);
4459 /* Do this first since it clobbers INSN_TICK of the involved
4460 instructions. */
4461 undo_replacements_for_backtrack (save);
4463 /* Clear the QUEUE_INDEX of everything in the ready list or one
4464 of the queues. */
4465 if (ready.n_ready > 0)
4467 rtx_insn **first = ready_lastpos (&ready);
4468 for (i = 0; i < ready.n_ready; i++)
4470 rtx_insn *insn = first[i];
4471 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
4472 INSN_TICK (insn) = INVALID_TICK;
4475 for (i = 0; i <= max_insn_queue_index; i++)
4477 int q = NEXT_Q_AFTER (q_ptr, i);
4479 for (rtx_insn_list *link = insn_queue[q]; link; link = link->next ())
4481 rtx_insn *x = link->insn ();
4482 QUEUE_INDEX (x) = QUEUE_NOWHERE;
4483 INSN_TICK (x) = INVALID_TICK;
4485 free_INSN_LIST_list (&insn_queue[q]);
4488 free (ready.vec);
4489 ready = save->ready;
4491 if (ready.n_ready > 0)
4493 rtx_insn **first = ready_lastpos (&ready);
4494 for (i = 0; i < ready.n_ready; i++)
4496 rtx_insn *insn = first[i];
4497 QUEUE_INDEX (insn) = QUEUE_READY;
4498 TODO_SPEC (insn) = recompute_todo_spec (insn, true);
4499 INSN_TICK (insn) = save->clock_var;
4503 q_ptr = 0;
4504 q_size = save->q_size;
4505 for (i = 0; i <= max_insn_queue_index; i++)
4507 int q = NEXT_Q_AFTER (q_ptr, i);
4509 insn_queue[q] = save->insn_queue[q];
4511 for (rtx_insn_list *link = insn_queue[q]; link; link = link->next ())
4513 rtx_insn *x = link->insn ();
4514 QUEUE_INDEX (x) = i;
4515 TODO_SPEC (x) = recompute_todo_spec (x, true);
4516 INSN_TICK (x) = save->clock_var + i;
4519 free (save->insn_queue);
4521 toggle_cancelled_flags (true);
4523 clock_var = save->clock_var;
4524 last_clock_var = save->last_clock_var;
4525 cycle_issued_insns = save->cycle_issued_insns;
4526 last_scheduled_insn = save->last_scheduled_insn;
4527 last_nondebug_scheduled_insn = save->last_nondebug_scheduled_insn;
4528 nonscheduled_insns_begin = save->nonscheduled_insns_begin;
4530 *psched_block = save->sched_block;
4532 memcpy (curr_state, save->curr_state, dfa_state_size);
4533 free (save->curr_state);
4535 mark_backtrack_feeds (save->delay_pair->i2, 0);
4537 gcc_assert (next_cycle_replace_deps.is_empty ());
4538 next_cycle_replace_deps = save->next_cycle_deps.copy ();
4539 next_cycle_apply = save->next_cycle_apply.copy ();
4541 free (save);
4543 for (save = backtrack_queue; save; save = save->next)
4545 mark_backtrack_feeds (save->delay_pair->i2, 1);
4549 /* Discard all data associated with the topmost entry in the backtrack
4550 queue. If RESET_TICK is false, we just want to free the data. If true,
4551 we are doing this because we discovered a reason to backtrack. In the
4552 latter case, also reset the INSN_TICK for the shadow insn. */
4553 static void
4554 free_topmost_backtrack_point (bool reset_tick)
4556 struct haifa_saved_data *save = backtrack_queue;
4557 int i;
4559 backtrack_queue = save->next;
4561 if (reset_tick)
4563 struct delay_pair *pair = save->delay_pair;
4564 while (pair)
4566 INSN_TICK (pair->i2) = INVALID_TICK;
4567 INSN_EXACT_TICK (pair->i2) = INVALID_TICK;
4568 pair = pair->next_same_i1;
4570 undo_replacements_for_backtrack (save);
4572 else
4574 save->replacement_deps.release ();
4575 save->replace_apply.release ();
4578 if (targetm.sched.free_sched_context)
4579 targetm.sched.free_sched_context (save->be_saved_data);
4580 if (current_sched_info->restore_state)
4581 free (save->fe_saved_data);
4582 for (i = 0; i <= max_insn_queue_index; i++)
4583 free_INSN_LIST_list (&save->insn_queue[i]);
4584 free (save->insn_queue);
4585 free (save->curr_state);
4586 free (save->ready.vec);
4587 free (save);
4590 /* Free the entire backtrack queue. */
4591 static void
4592 free_backtrack_queue (void)
4594 while (backtrack_queue)
4595 free_topmost_backtrack_point (false);
4598 /* Apply a replacement described by DESC. If IMMEDIATELY is false, we
4599 may have to postpone the replacement until the start of the next cycle,
4600 at which point we will be called again with IMMEDIATELY true. This is
4601 only done for machines which have instruction packets with explicit
4602 parallelism however. */
4603 static void
4604 apply_replacement (dep_t dep, bool immediately)
4606 struct dep_replacement *desc = DEP_REPLACE (dep);
4607 if (!immediately && targetm.sched.exposed_pipeline && reload_completed)
4609 next_cycle_replace_deps.safe_push (dep);
4610 next_cycle_apply.safe_push (1);
4612 else
4614 bool success;
4616 if (QUEUE_INDEX (desc->insn) == QUEUE_SCHEDULED)
4617 return;
4619 if (sched_verbose >= 5)
4620 fprintf (sched_dump, "applying replacement for insn %d\n",
4621 INSN_UID (desc->insn));
4623 success = validate_change (desc->insn, desc->loc, desc->newval, 0);
4624 gcc_assert (success);
4626 update_insn_after_change (desc->insn);
4627 if ((TODO_SPEC (desc->insn) & (HARD_DEP | DEP_POSTPONED)) == 0)
4628 fix_tick_ready (desc->insn);
4630 if (backtrack_queue != NULL)
4632 backtrack_queue->replacement_deps.safe_push (dep);
4633 backtrack_queue->replace_apply.safe_push (1);
4638 /* We have determined that a pattern involved in DEP must be restored.
4639 If IMMEDIATELY is false, we may have to postpone the replacement
4640 until the start of the next cycle, at which point we will be called
4641 again with IMMEDIATELY true. */
4642 static void
4643 restore_pattern (dep_t dep, bool immediately)
4645 rtx_insn *next = DEP_CON (dep);
4646 int tick = INSN_TICK (next);
4648 /* If we already scheduled the insn, the modified version is
4649 correct. */
4650 if (QUEUE_INDEX (next) == QUEUE_SCHEDULED)
4651 return;
4653 if (!immediately && targetm.sched.exposed_pipeline && reload_completed)
4655 next_cycle_replace_deps.safe_push (dep);
4656 next_cycle_apply.safe_push (0);
4657 return;
4661 if (DEP_TYPE (dep) == REG_DEP_CONTROL)
4663 if (sched_verbose >= 5)
4664 fprintf (sched_dump, "restoring pattern for insn %d\n",
4665 INSN_UID (next));
4666 haifa_change_pattern (next, ORIG_PAT (next));
4668 else
4670 struct dep_replacement *desc = DEP_REPLACE (dep);
4671 bool success;
4673 if (sched_verbose >= 5)
4674 fprintf (sched_dump, "restoring pattern for insn %d\n",
4675 INSN_UID (desc->insn));
4676 tick = INSN_TICK (desc->insn);
4678 success = validate_change (desc->insn, desc->loc, desc->orig, 0);
4679 gcc_assert (success);
4680 update_insn_after_change (desc->insn);
4681 if (backtrack_queue != NULL)
4683 backtrack_queue->replacement_deps.safe_push (dep);
4684 backtrack_queue->replace_apply.safe_push (0);
4687 INSN_TICK (next) = tick;
4688 if (TODO_SPEC (next) == DEP_POSTPONED)
4689 return;
4691 if (sd_lists_empty_p (next, SD_LIST_BACK))
4692 TODO_SPEC (next) = 0;
4693 else if (!sd_lists_empty_p (next, SD_LIST_HARD_BACK))
4694 TODO_SPEC (next) = HARD_DEP;
4697 /* Perform pattern replacements that were queued up until the next
4698 cycle. */
4699 static void
4700 perform_replacements_new_cycle (void)
4702 int i;
4703 dep_t dep;
4704 FOR_EACH_VEC_ELT (next_cycle_replace_deps, i, dep)
4706 int apply_p = next_cycle_apply[i];
4707 if (apply_p)
4708 apply_replacement (dep, true);
4709 else
4710 restore_pattern (dep, true);
4712 next_cycle_replace_deps.truncate (0);
4713 next_cycle_apply.truncate (0);
4716 /* Compute INSN_TICK_ESTIMATE for INSN. PROCESSED is a bitmap of
4717 instructions we've previously encountered, a set bit prevents
4718 recursion. BUDGET is a limit on how far ahead we look, it is
4719 reduced on recursive calls. Return true if we produced a good
4720 estimate, or false if we exceeded the budget. */
4721 static bool
4722 estimate_insn_tick (bitmap processed, rtx_insn *insn, int budget)
4724 sd_iterator_def sd_it;
4725 dep_t dep;
4726 int earliest = INSN_TICK (insn);
4728 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
4730 rtx_insn *pro = DEP_PRO (dep);
4731 int t;
4733 if (DEP_STATUS (dep) & DEP_CANCELLED)
4734 continue;
4736 if (QUEUE_INDEX (pro) == QUEUE_SCHEDULED)
4737 gcc_assert (INSN_TICK (pro) + dep_cost (dep) <= INSN_TICK (insn));
4738 else
4740 int cost = dep_cost (dep);
4741 if (cost >= budget)
4742 return false;
4743 if (!bitmap_bit_p (processed, INSN_LUID (pro)))
4745 if (!estimate_insn_tick (processed, pro, budget - cost))
4746 return false;
4748 gcc_assert (INSN_TICK_ESTIMATE (pro) != INVALID_TICK);
4749 t = INSN_TICK_ESTIMATE (pro) + cost;
4750 if (earliest == INVALID_TICK || t > earliest)
4751 earliest = t;
4754 bitmap_set_bit (processed, INSN_LUID (insn));
4755 INSN_TICK_ESTIMATE (insn) = earliest;
4756 return true;
4759 /* Examine the pair of insns in P, and estimate (optimistically, assuming
4760 infinite resources) the cycle in which the delayed shadow can be issued.
4761 Return the number of cycles that must pass before the real insn can be
4762 issued in order to meet this constraint. */
4763 static int
4764 estimate_shadow_tick (struct delay_pair *p)
4766 bitmap_head processed;
4767 int t;
4768 bool cutoff;
4769 bitmap_initialize (&processed, 0);
4771 cutoff = !estimate_insn_tick (&processed, p->i2,
4772 max_insn_queue_index + pair_delay (p));
4773 bitmap_clear (&processed);
4774 if (cutoff)
4775 return max_insn_queue_index;
4776 t = INSN_TICK_ESTIMATE (p->i2) - (clock_var + pair_delay (p) + 1);
4777 if (t > 0)
4778 return t;
4779 return 0;
4782 /* If INSN has no unresolved backwards dependencies, add it to the schedule and
4783 recursively resolve all its forward dependencies. */
4784 static void
4785 resolve_dependencies (rtx_insn *insn)
4787 sd_iterator_def sd_it;
4788 dep_t dep;
4790 /* Don't use sd_lists_empty_p; it ignores debug insns. */
4791 if (DEPS_LIST_FIRST (INSN_HARD_BACK_DEPS (insn)) != NULL
4792 || DEPS_LIST_FIRST (INSN_SPEC_BACK_DEPS (insn)) != NULL)
4793 return;
4795 if (sched_verbose >= 4)
4796 fprintf (sched_dump, ";;\tquickly resolving %d\n", INSN_UID (insn));
4798 if (QUEUE_INDEX (insn) >= 0)
4799 queue_remove (insn);
4801 scheduled_insns.safe_push (insn);
4803 /* Update dependent instructions. */
4804 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
4805 sd_iterator_cond (&sd_it, &dep);)
4807 rtx_insn *next = DEP_CON (dep);
4809 if (sched_verbose >= 4)
4810 fprintf (sched_dump, ";;\t\tdep %d against %d\n", INSN_UID (insn),
4811 INSN_UID (next));
4813 /* Resolve the dependence between INSN and NEXT.
4814 sd_resolve_dep () moves current dep to another list thus
4815 advancing the iterator. */
4816 sd_resolve_dep (sd_it);
4818 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
4820 resolve_dependencies (next);
4822 else
4823 /* Check always has only one forward dependence (to the first insn in
4824 the recovery block), therefore, this will be executed only once. */
4826 gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
4832 /* Return the head and tail pointers of ebb starting at BEG and ending
4833 at END. */
4834 void
4835 get_ebb_head_tail (basic_block beg, basic_block end,
4836 rtx_insn **headp, rtx_insn **tailp)
4838 rtx_insn *beg_head = BB_HEAD (beg);
4839 rtx_insn * beg_tail = BB_END (beg);
4840 rtx_insn * end_head = BB_HEAD (end);
4841 rtx_insn * end_tail = BB_END (end);
4843 /* Don't include any notes or labels at the beginning of the BEG
4844 basic block, or notes at the end of the END basic blocks. */
4846 if (LABEL_P (beg_head))
4847 beg_head = NEXT_INSN (beg_head);
4849 while (beg_head != beg_tail)
4850 if (NOTE_P (beg_head))
4851 beg_head = NEXT_INSN (beg_head);
4852 else if (DEBUG_INSN_P (beg_head))
4854 rtx_insn * note, *next;
4856 for (note = NEXT_INSN (beg_head);
4857 note != beg_tail;
4858 note = next)
4860 next = NEXT_INSN (note);
4861 if (NOTE_P (note))
4863 if (sched_verbose >= 9)
4864 fprintf (sched_dump, "reorder %i\n", INSN_UID (note));
4866 reorder_insns_nobb (note, note, PREV_INSN (beg_head));
4868 if (BLOCK_FOR_INSN (note) != beg)
4869 df_insn_change_bb (note, beg);
4871 else if (!DEBUG_INSN_P (note))
4872 break;
4875 break;
4877 else
4878 break;
4880 *headp = beg_head;
4882 if (beg == end)
4883 end_head = beg_head;
4884 else if (LABEL_P (end_head))
4885 end_head = NEXT_INSN (end_head);
4887 while (end_head != end_tail)
4888 if (NOTE_P (end_tail))
4889 end_tail = PREV_INSN (end_tail);
4890 else if (DEBUG_INSN_P (end_tail))
4892 rtx_insn * note, *prev;
4894 for (note = PREV_INSN (end_tail);
4895 note != end_head;
4896 note = prev)
4898 prev = PREV_INSN (note);
4899 if (NOTE_P (note))
4901 if (sched_verbose >= 9)
4902 fprintf (sched_dump, "reorder %i\n", INSN_UID (note));
4904 reorder_insns_nobb (note, note, end_tail);
4906 if (end_tail == BB_END (end))
4907 BB_END (end) = note;
4909 if (BLOCK_FOR_INSN (note) != end)
4910 df_insn_change_bb (note, end);
4912 else if (!DEBUG_INSN_P (note))
4913 break;
4916 break;
4918 else
4919 break;
4921 *tailp = end_tail;
4924 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
4927 no_real_insns_p (const rtx_insn *head, const rtx_insn *tail)
4929 while (head != NEXT_INSN (tail))
4931 if (!NOTE_P (head) && !LABEL_P (head))
4932 return 0;
4933 head = NEXT_INSN (head);
4935 return 1;
4938 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
4939 previously found among the insns. Insert them just before HEAD. */
4940 rtx_insn *
4941 restore_other_notes (rtx_insn *head, basic_block head_bb)
4943 if (note_list != 0)
4945 rtx_insn *note_head = note_list;
4947 if (head)
4948 head_bb = BLOCK_FOR_INSN (head);
4949 else
4950 head = NEXT_INSN (bb_note (head_bb));
4952 while (PREV_INSN (note_head))
4954 set_block_for_insn (note_head, head_bb);
4955 note_head = PREV_INSN (note_head);
4957 /* In the above cycle we've missed this note. */
4958 set_block_for_insn (note_head, head_bb);
4960 SET_PREV_INSN (note_head) = PREV_INSN (head);
4961 SET_NEXT_INSN (PREV_INSN (head)) = note_head;
4962 SET_PREV_INSN (head) = note_list;
4963 SET_NEXT_INSN (note_list) = head;
4965 if (BLOCK_FOR_INSN (head) != head_bb)
4966 BB_END (head_bb) = note_list;
4968 head = note_head;
4971 return head;
4974 /* When we know we are going to discard the schedule due to a failed attempt
4975 at modulo scheduling, undo all replacements. */
4976 static void
4977 undo_all_replacements (void)
4979 rtx_insn *insn;
4980 int i;
4982 FOR_EACH_VEC_ELT (scheduled_insns, i, insn)
4984 sd_iterator_def sd_it;
4985 dep_t dep;
4987 /* See if we must undo a replacement. */
4988 for (sd_it = sd_iterator_start (insn, SD_LIST_RES_FORW);
4989 sd_iterator_cond (&sd_it, &dep); sd_iterator_next (&sd_it))
4991 struct dep_replacement *desc = DEP_REPLACE (dep);
4992 if (desc != NULL)
4993 validate_change (desc->insn, desc->loc, desc->orig, 0);
4998 /* Return first non-scheduled insn in the current scheduling block.
4999 This is mostly used for debug-counter purposes. */
5000 static rtx_insn *
5001 first_nonscheduled_insn (void)
5003 rtx_insn *insn = (nonscheduled_insns_begin != NULL_RTX
5004 ? nonscheduled_insns_begin
5005 : current_sched_info->prev_head);
5009 insn = next_nonnote_nondebug_insn (insn);
5011 while (QUEUE_INDEX (insn) == QUEUE_SCHEDULED);
5013 return insn;
5016 /* Move insns that became ready to fire from queue to ready list. */
5018 static void
5019 queue_to_ready (struct ready_list *ready)
5021 rtx_insn *insn;
5022 rtx_insn_list *link;
5023 rtx skip_insn;
5025 q_ptr = NEXT_Q (q_ptr);
5027 if (dbg_cnt (sched_insn) == false)
5028 /* If debug counter is activated do not requeue the first
5029 nonscheduled insn. */
5030 skip_insn = first_nonscheduled_insn ();
5031 else
5032 skip_insn = NULL_RTX;
5034 /* Add all pending insns that can be scheduled without stalls to the
5035 ready list. */
5036 for (link = insn_queue[q_ptr]; link; link = link->next ())
5038 insn = link->insn ();
5039 q_size -= 1;
5041 if (sched_verbose >= 2)
5042 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
5043 (*current_sched_info->print_insn) (insn, 0));
5045 /* If the ready list is full, delay the insn for 1 cycle.
5046 See the comment in schedule_block for the rationale. */
5047 if (!reload_completed
5048 && (ready->n_ready - ready->n_debug > MAX_SCHED_READY_INSNS
5049 || (sched_pressure == SCHED_PRESSURE_MODEL
5050 /* Limit pressure recalculations to MAX_SCHED_READY_INSNS
5051 instructions too. */
5052 && model_index (insn) > (model_curr_point
5053 + MAX_SCHED_READY_INSNS)))
5054 && !(sched_pressure == SCHED_PRESSURE_MODEL
5055 && model_curr_point < model_num_insns
5056 /* Always allow the next model instruction to issue. */
5057 && model_index (insn) == model_curr_point)
5058 && !SCHED_GROUP_P (insn)
5059 && insn != skip_insn)
5061 if (sched_verbose >= 2)
5062 fprintf (sched_dump, "keeping in queue, ready full\n");
5063 queue_insn (insn, 1, "ready full");
5065 else
5067 ready_add (ready, insn, false);
5068 if (sched_verbose >= 2)
5069 fprintf (sched_dump, "moving to ready without stalls\n");
5072 free_INSN_LIST_list (&insn_queue[q_ptr]);
5074 /* If there are no ready insns, stall until one is ready and add all
5075 of the pending insns at that point to the ready list. */
5076 if (ready->n_ready == 0)
5078 int stalls;
5080 for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
5082 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
5084 for (; link; link = link->next ())
5086 insn = link->insn ();
5087 q_size -= 1;
5089 if (sched_verbose >= 2)
5090 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
5091 (*current_sched_info->print_insn) (insn, 0));
5093 ready_add (ready, insn, false);
5094 if (sched_verbose >= 2)
5095 fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
5097 free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]);
5099 advance_one_cycle ();
5101 break;
5104 advance_one_cycle ();
5107 q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
5108 clock_var += stalls;
5109 if (sched_verbose >= 2)
5110 fprintf (sched_dump, ";;\tAdvancing clock by %d cycle[s] to %d\n",
5111 stalls, clock_var);
5115 /* Used by early_queue_to_ready. Determines whether it is "ok" to
5116 prematurely move INSN from the queue to the ready list. Currently,
5117 if a target defines the hook 'is_costly_dependence', this function
5118 uses the hook to check whether there exist any dependences which are
5119 considered costly by the target, between INSN and other insns that
5120 have already been scheduled. Dependences are checked up to Y cycles
5121 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
5122 controlling this value.
5123 (Other considerations could be taken into account instead (or in
5124 addition) depending on user flags and target hooks. */
5126 static bool
5127 ok_for_early_queue_removal (rtx insn)
5129 if (targetm.sched.is_costly_dependence)
5131 rtx prev_insn;
5132 int n_cycles;
5133 int i = scheduled_insns.length ();
5134 for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
5136 while (i-- > 0)
5138 int cost;
5140 prev_insn = scheduled_insns[i];
5142 if (!NOTE_P (prev_insn))
5144 dep_t dep;
5146 dep = sd_find_dep_between (prev_insn, insn, true);
5148 if (dep != NULL)
5150 cost = dep_cost (dep);
5152 if (targetm.sched.is_costly_dependence (dep, cost,
5153 flag_sched_stalled_insns_dep - n_cycles))
5154 return false;
5158 if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
5159 break;
5162 if (i == 0)
5163 break;
5167 return true;
5171 /* Remove insns from the queue, before they become "ready" with respect
5172 to FU latency considerations. */
5174 static int
5175 early_queue_to_ready (state_t state, struct ready_list *ready)
5177 rtx_insn *insn;
5178 rtx_insn_list *link;
5179 rtx_insn_list *next_link;
5180 rtx_insn_list *prev_link;
5181 bool move_to_ready;
5182 int cost;
5183 state_t temp_state = alloca (dfa_state_size);
5184 int stalls;
5185 int insns_removed = 0;
5188 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
5189 function:
5191 X == 0: There is no limit on how many queued insns can be removed
5192 prematurely. (flag_sched_stalled_insns = -1).
5194 X >= 1: Only X queued insns can be removed prematurely in each
5195 invocation. (flag_sched_stalled_insns = X).
5197 Otherwise: Early queue removal is disabled.
5198 (flag_sched_stalled_insns = 0)
5201 if (! flag_sched_stalled_insns)
5202 return 0;
5204 for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
5206 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
5208 if (sched_verbose > 6)
5209 fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
5211 prev_link = 0;
5212 while (link)
5214 next_link = link->next ();
5215 insn = link->insn ();
5216 if (insn && sched_verbose > 6)
5217 print_rtl_single (sched_dump, insn);
5219 memcpy (temp_state, state, dfa_state_size);
5220 if (recog_memoized (insn) < 0)
5221 /* non-negative to indicate that it's not ready
5222 to avoid infinite Q->R->Q->R... */
5223 cost = 0;
5224 else
5225 cost = state_transition (temp_state, insn);
5227 if (sched_verbose >= 6)
5228 fprintf (sched_dump, "transition cost = %d\n", cost);
5230 move_to_ready = false;
5231 if (cost < 0)
5233 move_to_ready = ok_for_early_queue_removal (insn);
5234 if (move_to_ready == true)
5236 /* move from Q to R */
5237 q_size -= 1;
5238 ready_add (ready, insn, false);
5240 if (prev_link)
5241 XEXP (prev_link, 1) = next_link;
5242 else
5243 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
5245 free_INSN_LIST_node (link);
5247 if (sched_verbose >= 2)
5248 fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
5249 (*current_sched_info->print_insn) (insn, 0));
5251 insns_removed++;
5252 if (insns_removed == flag_sched_stalled_insns)
5253 /* Remove no more than flag_sched_stalled_insns insns
5254 from Q at a time. */
5255 return insns_removed;
5259 if (move_to_ready == false)
5260 prev_link = link;
5262 link = next_link;
5263 } /* while link */
5264 } /* if link */
5266 } /* for stalls.. */
5268 return insns_removed;
5272 /* Print the ready list for debugging purposes.
5273 If READY_TRY is non-zero then only print insns that max_issue
5274 will consider. */
5275 static void
5276 debug_ready_list_1 (struct ready_list *ready, signed char *ready_try)
5278 rtx_insn **p;
5279 int i;
5281 if (ready->n_ready == 0)
5283 fprintf (sched_dump, "\n");
5284 return;
5287 p = ready_lastpos (ready);
5288 for (i = 0; i < ready->n_ready; i++)
5290 if (ready_try != NULL && ready_try[ready->n_ready - i - 1])
5291 continue;
5293 fprintf (sched_dump, " %s:%d",
5294 (*current_sched_info->print_insn) (p[i], 0),
5295 INSN_LUID (p[i]));
5296 if (sched_pressure != SCHED_PRESSURE_NONE)
5297 fprintf (sched_dump, "(cost=%d",
5298 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (p[i]));
5299 fprintf (sched_dump, ":prio=%d", INSN_PRIORITY (p[i]));
5300 if (INSN_TICK (p[i]) > clock_var)
5301 fprintf (sched_dump, ":delay=%d", INSN_TICK (p[i]) - clock_var);
5302 if (sched_pressure == SCHED_PRESSURE_MODEL)
5303 fprintf (sched_dump, ":idx=%d",
5304 model_index (p[i]));
5305 if (sched_pressure != SCHED_PRESSURE_NONE)
5306 fprintf (sched_dump, ")");
5308 fprintf (sched_dump, "\n");
5311 /* Print the ready list. Callable from debugger. */
5312 static void
5313 debug_ready_list (struct ready_list *ready)
5315 debug_ready_list_1 (ready, NULL);
5318 /* Search INSN for REG_SAVE_NOTE notes and convert them back into insn
5319 NOTEs. This is used for NOTE_INSN_EPILOGUE_BEG, so that sched-ebb
5320 replaces the epilogue note in the correct basic block. */
5321 void
5322 reemit_notes (rtx_insn *insn)
5324 rtx note;
5325 rtx_insn *last = insn;
5327 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
5329 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
5331 enum insn_note note_type = (enum insn_note) INTVAL (XEXP (note, 0));
5333 last = emit_note_before (note_type, last);
5334 remove_note (insn, note);
5339 /* Move INSN. Reemit notes if needed. Update CFG, if needed. */
5340 static void
5341 move_insn (rtx_insn *insn, rtx_insn *last, rtx nt)
5343 if (PREV_INSN (insn) != last)
5345 basic_block bb;
5346 rtx_insn *note;
5347 int jump_p = 0;
5349 bb = BLOCK_FOR_INSN (insn);
5351 /* BB_HEAD is either LABEL or NOTE. */
5352 gcc_assert (BB_HEAD (bb) != insn);
5354 if (BB_END (bb) == insn)
5355 /* If this is last instruction in BB, move end marker one
5356 instruction up. */
5358 /* Jumps are always placed at the end of basic block. */
5359 jump_p = control_flow_insn_p (insn);
5361 gcc_assert (!jump_p
5362 || ((common_sched_info->sched_pass_id == SCHED_RGN_PASS)
5363 && IS_SPECULATION_BRANCHY_CHECK_P (insn))
5364 || (common_sched_info->sched_pass_id
5365 == SCHED_EBB_PASS));
5367 gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
5369 BB_END (bb) = PREV_INSN (insn);
5372 gcc_assert (BB_END (bb) != last);
5374 if (jump_p)
5375 /* We move the block note along with jump. */
5377 gcc_assert (nt);
5379 note = NEXT_INSN (insn);
5380 while (NOTE_NOT_BB_P (note) && note != nt)
5381 note = NEXT_INSN (note);
5383 if (note != nt
5384 && (LABEL_P (note)
5385 || BARRIER_P (note)))
5386 note = NEXT_INSN (note);
5388 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
5390 else
5391 note = insn;
5393 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
5394 SET_PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
5396 SET_NEXT_INSN (note) = NEXT_INSN (last);
5397 SET_PREV_INSN (NEXT_INSN (last)) = note;
5399 SET_NEXT_INSN (last) = insn;
5400 SET_PREV_INSN (insn) = last;
5402 bb = BLOCK_FOR_INSN (last);
5404 if (jump_p)
5406 fix_jump_move (insn);
5408 if (BLOCK_FOR_INSN (insn) != bb)
5409 move_block_after_check (insn);
5411 gcc_assert (BB_END (bb) == last);
5414 df_insn_change_bb (insn, bb);
5416 /* Update BB_END, if needed. */
5417 if (BB_END (bb) == last)
5418 BB_END (bb) = insn;
5421 SCHED_GROUP_P (insn) = 0;
5424 /* Return true if scheduling INSN will finish current clock cycle. */
5425 static bool
5426 insn_finishes_cycle_p (rtx_insn *insn)
5428 if (SCHED_GROUP_P (insn))
5429 /* After issuing INSN, rest of the sched_group will be forced to issue
5430 in order. Don't make any plans for the rest of cycle. */
5431 return true;
5433 /* Finishing the block will, apparently, finish the cycle. */
5434 if (current_sched_info->insn_finishes_block_p
5435 && current_sched_info->insn_finishes_block_p (insn))
5436 return true;
5438 return false;
5441 /* Define type for target data used in multipass scheduling. */
5442 #ifndef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T
5443 # define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T int
5444 #endif
5445 typedef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T first_cycle_multipass_data_t;
5447 /* The following structure describe an entry of the stack of choices. */
5448 struct choice_entry
5450 /* Ordinal number of the issued insn in the ready queue. */
5451 int index;
5452 /* The number of the rest insns whose issues we should try. */
5453 int rest;
5454 /* The number of issued essential insns. */
5455 int n;
5456 /* State after issuing the insn. */
5457 state_t state;
5458 /* Target-specific data. */
5459 first_cycle_multipass_data_t target_data;
5462 /* The following array is used to implement a stack of choices used in
5463 function max_issue. */
5464 static struct choice_entry *choice_stack;
5466 /* This holds the value of the target dfa_lookahead hook. */
5467 int dfa_lookahead;
5469 /* The following variable value is maximal number of tries of issuing
5470 insns for the first cycle multipass insn scheduling. We define
5471 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
5472 need this constraint if all real insns (with non-negative codes)
5473 had reservations because in this case the algorithm complexity is
5474 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
5475 might be incomplete and such insn might occur. For such
5476 descriptions, the complexity of algorithm (without the constraint)
5477 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
5478 static int max_lookahead_tries;
5480 /* The following function returns maximal (or close to maximal) number
5481 of insns which can be issued on the same cycle and one of which
5482 insns is insns with the best rank (the first insn in READY). To
5483 make this function tries different samples of ready insns. READY
5484 is current queue `ready'. Global array READY_TRY reflects what
5485 insns are already issued in this try. The function stops immediately,
5486 if it reached the such a solution, that all instruction can be issued.
5487 INDEX will contain index of the best insn in READY. The following
5488 function is used only for first cycle multipass scheduling.
5490 PRIVILEGED_N >= 0
5492 This function expects recognized insns only. All USEs,
5493 CLOBBERs, etc must be filtered elsewhere. */
5495 max_issue (struct ready_list *ready, int privileged_n, state_t state,
5496 bool first_cycle_insn_p, int *index)
5498 int n, i, all, n_ready, best, delay, tries_num;
5499 int more_issue;
5500 struct choice_entry *top;
5501 rtx_insn *insn;
5503 n_ready = ready->n_ready;
5504 gcc_assert (dfa_lookahead >= 1 && privileged_n >= 0
5505 && privileged_n <= n_ready);
5507 /* Init MAX_LOOKAHEAD_TRIES. */
5508 if (max_lookahead_tries == 0)
5510 max_lookahead_tries = 100;
5511 for (i = 0; i < issue_rate; i++)
5512 max_lookahead_tries *= dfa_lookahead;
5515 /* Init max_points. */
5516 more_issue = issue_rate - cycle_issued_insns;
5517 gcc_assert (more_issue >= 0);
5519 /* The number of the issued insns in the best solution. */
5520 best = 0;
5522 top = choice_stack;
5524 /* Set initial state of the search. */
5525 memcpy (top->state, state, dfa_state_size);
5526 top->rest = dfa_lookahead;
5527 top->n = 0;
5528 if (targetm.sched.first_cycle_multipass_begin)
5529 targetm.sched.first_cycle_multipass_begin (&top->target_data,
5530 ready_try, n_ready,
5531 first_cycle_insn_p);
5533 /* Count the number of the insns to search among. */
5534 for (all = i = 0; i < n_ready; i++)
5535 if (!ready_try [i])
5536 all++;
5538 if (sched_verbose >= 2)
5540 fprintf (sched_dump, ";;\t\tmax_issue among %d insns:", all);
5541 debug_ready_list_1 (ready, ready_try);
5544 /* I is the index of the insn to try next. */
5545 i = 0;
5546 tries_num = 0;
5547 for (;;)
5549 if (/* If we've reached a dead end or searched enough of what we have
5550 been asked... */
5551 top->rest == 0
5552 /* or have nothing else to try... */
5553 || i >= n_ready
5554 /* or should not issue more. */
5555 || top->n >= more_issue)
5557 /* ??? (... || i == n_ready). */
5558 gcc_assert (i <= n_ready);
5560 /* We should not issue more than issue_rate instructions. */
5561 gcc_assert (top->n <= more_issue);
5563 if (top == choice_stack)
5564 break;
5566 if (best < top - choice_stack)
5568 if (privileged_n)
5570 n = privileged_n;
5571 /* Try to find issued privileged insn. */
5572 while (n && !ready_try[--n])
5576 if (/* If all insns are equally good... */
5577 privileged_n == 0
5578 /* Or a privileged insn will be issued. */
5579 || ready_try[n])
5580 /* Then we have a solution. */
5582 best = top - choice_stack;
5583 /* This is the index of the insn issued first in this
5584 solution. */
5585 *index = choice_stack [1].index;
5586 if (top->n == more_issue || best == all)
5587 break;
5591 /* Set ready-list index to point to the last insn
5592 ('i++' below will advance it to the next insn). */
5593 i = top->index;
5595 /* Backtrack. */
5596 ready_try [i] = 0;
5598 if (targetm.sched.first_cycle_multipass_backtrack)
5599 targetm.sched.first_cycle_multipass_backtrack (&top->target_data,
5600 ready_try, n_ready);
5602 top--;
5603 memcpy (state, top->state, dfa_state_size);
5605 else if (!ready_try [i])
5607 tries_num++;
5608 if (tries_num > max_lookahead_tries)
5609 break;
5610 insn = ready_element (ready, i);
5611 delay = state_transition (state, insn);
5612 if (delay < 0)
5614 if (state_dead_lock_p (state)
5615 || insn_finishes_cycle_p (insn))
5616 /* We won't issue any more instructions in the next
5617 choice_state. */
5618 top->rest = 0;
5619 else
5620 top->rest--;
5622 n = top->n;
5623 if (memcmp (top->state, state, dfa_state_size) != 0)
5624 n++;
5626 /* Advance to the next choice_entry. */
5627 top++;
5628 /* Initialize it. */
5629 top->rest = dfa_lookahead;
5630 top->index = i;
5631 top->n = n;
5632 memcpy (top->state, state, dfa_state_size);
5633 ready_try [i] = 1;
5635 if (targetm.sched.first_cycle_multipass_issue)
5636 targetm.sched.first_cycle_multipass_issue (&top->target_data,
5637 ready_try, n_ready,
5638 insn,
5639 &((top - 1)
5640 ->target_data));
5642 i = -1;
5646 /* Increase ready-list index. */
5647 i++;
5650 if (targetm.sched.first_cycle_multipass_end)
5651 targetm.sched.first_cycle_multipass_end (best != 0
5652 ? &choice_stack[1].target_data
5653 : NULL);
5655 /* Restore the original state of the DFA. */
5656 memcpy (state, choice_stack->state, dfa_state_size);
5658 return best;
5661 /* The following function chooses insn from READY and modifies
5662 READY. The following function is used only for first
5663 cycle multipass scheduling.
5664 Return:
5665 -1 if cycle should be advanced,
5666 0 if INSN_PTR is set to point to the desirable insn,
5667 1 if choose_ready () should be restarted without advancing the cycle. */
5668 static int
5669 choose_ready (struct ready_list *ready, bool first_cycle_insn_p,
5670 rtx_insn **insn_ptr)
5672 if (dbg_cnt (sched_insn) == false)
5674 if (nonscheduled_insns_begin == NULL_RTX)
5675 nonscheduled_insns_begin = current_sched_info->prev_head;
5677 rtx_insn *insn = first_nonscheduled_insn ();
5679 if (QUEUE_INDEX (insn) == QUEUE_READY)
5680 /* INSN is in the ready_list. */
5682 ready_remove_insn (insn);
5683 *insn_ptr = insn;
5684 return 0;
5687 /* INSN is in the queue. Advance cycle to move it to the ready list. */
5688 gcc_assert (QUEUE_INDEX (insn) >= 0);
5689 return -1;
5692 if (dfa_lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0))
5693 || DEBUG_INSN_P (ready_element (ready, 0)))
5695 if (targetm.sched.dispatch (NULL, IS_DISPATCH_ON))
5696 *insn_ptr = ready_remove_first_dispatch (ready);
5697 else
5698 *insn_ptr = ready_remove_first (ready);
5700 return 0;
5702 else
5704 /* Try to choose the best insn. */
5705 int index = 0, i;
5706 rtx_insn *insn;
5708 insn = ready_element (ready, 0);
5709 if (INSN_CODE (insn) < 0)
5711 *insn_ptr = ready_remove_first (ready);
5712 return 0;
5715 /* Filter the search space. */
5716 for (i = 0; i < ready->n_ready; i++)
5718 ready_try[i] = 0;
5720 insn = ready_element (ready, i);
5722 /* If this insn is recognizable we should have already
5723 recognized it earlier.
5724 ??? Not very clear where this is supposed to be done.
5725 See dep_cost_1. */
5726 gcc_checking_assert (INSN_CODE (insn) >= 0
5727 || recog_memoized (insn) < 0);
5728 if (INSN_CODE (insn) < 0)
5730 /* Non-recognized insns at position 0 are handled above. */
5731 gcc_assert (i > 0);
5732 ready_try[i] = 1;
5733 continue;
5736 if (targetm.sched.first_cycle_multipass_dfa_lookahead_guard)
5738 ready_try[i]
5739 = (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
5740 (insn, i));
5742 if (ready_try[i] < 0)
5743 /* Queue instruction for several cycles.
5744 We need to restart choose_ready as we have changed
5745 the ready list. */
5747 change_queue_index (insn, -ready_try[i]);
5748 return 1;
5751 /* Make sure that we didn't end up with 0'th insn filtered out.
5752 Don't be tempted to make life easier for backends and just
5753 requeue 0'th insn if (ready_try[0] == 0) and restart
5754 choose_ready. Backends should be very considerate about
5755 requeueing instructions -- especially the highest priority
5756 one at position 0. */
5757 gcc_assert (ready_try[i] == 0 || i > 0);
5758 if (ready_try[i])
5759 continue;
5762 gcc_assert (ready_try[i] == 0);
5763 /* INSN made it through the scrutiny of filters! */
5766 if (max_issue (ready, 1, curr_state, first_cycle_insn_p, &index) == 0)
5768 *insn_ptr = ready_remove_first (ready);
5769 if (sched_verbose >= 4)
5770 fprintf (sched_dump, ";;\t\tChosen insn (but can't issue) : %s \n",
5771 (*current_sched_info->print_insn) (*insn_ptr, 0));
5772 return 0;
5774 else
5776 if (sched_verbose >= 4)
5777 fprintf (sched_dump, ";;\t\tChosen insn : %s\n",
5778 (*current_sched_info->print_insn)
5779 (ready_element (ready, index), 0));
5781 *insn_ptr = ready_remove (ready, index);
5782 return 0;
5787 /* This function is called when we have successfully scheduled a
5788 block. It uses the schedule stored in the scheduled_insns vector
5789 to rearrange the RTL. PREV_HEAD is used as the anchor to which we
5790 append the scheduled insns; TAIL is the insn after the scheduled
5791 block. TARGET_BB is the argument passed to schedule_block. */
5793 static void
5794 commit_schedule (rtx_insn *prev_head, rtx_insn *tail, basic_block *target_bb)
5796 unsigned int i;
5797 rtx_insn *insn;
5799 last_scheduled_insn = prev_head;
5800 for (i = 0;
5801 scheduled_insns.iterate (i, &insn);
5802 i++)
5804 if (control_flow_insn_p (last_scheduled_insn)
5805 || current_sched_info->advance_target_bb (*target_bb, insn))
5807 *target_bb = current_sched_info->advance_target_bb (*target_bb, 0);
5809 if (sched_verbose)
5811 rtx_insn *x;
5813 x = next_real_insn (last_scheduled_insn);
5814 gcc_assert (x);
5815 dump_new_block_header (1, *target_bb, x, tail);
5818 last_scheduled_insn = bb_note (*target_bb);
5821 if (current_sched_info->begin_move_insn)
5822 (*current_sched_info->begin_move_insn) (insn, last_scheduled_insn);
5823 move_insn (insn, last_scheduled_insn,
5824 current_sched_info->next_tail);
5825 if (!DEBUG_INSN_P (insn))
5826 reemit_notes (insn);
5827 last_scheduled_insn = insn;
5830 scheduled_insns.truncate (0);
5833 /* Examine all insns on the ready list and queue those which can't be
5834 issued in this cycle. TEMP_STATE is temporary scheduler state we
5835 can use as scratch space. If FIRST_CYCLE_INSN_P is true, no insns
5836 have been issued for the current cycle, which means it is valid to
5837 issue an asm statement.
5839 If SHADOWS_ONLY_P is true, we eliminate all real insns and only
5840 leave those for which SHADOW_P is true. If MODULO_EPILOGUE is true,
5841 we only leave insns which have an INSN_EXACT_TICK. */
5843 static void
5844 prune_ready_list (state_t temp_state, bool first_cycle_insn_p,
5845 bool shadows_only_p, bool modulo_epilogue_p)
5847 int i, pass;
5848 bool sched_group_found = false;
5849 int min_cost_group = 1;
5851 for (i = 0; i < ready.n_ready; i++)
5853 rtx_insn *insn = ready_element (&ready, i);
5854 if (SCHED_GROUP_P (insn))
5856 sched_group_found = true;
5857 break;
5861 /* Make two passes if there's a SCHED_GROUP_P insn; make sure to handle
5862 such an insn first and note its cost, then schedule all other insns
5863 for one cycle later. */
5864 for (pass = sched_group_found ? 0 : 1; pass < 2; )
5866 int n = ready.n_ready;
5867 for (i = 0; i < n; i++)
5869 rtx_insn *insn = ready_element (&ready, i);
5870 int cost = 0;
5871 const char *reason = "resource conflict";
5873 if (DEBUG_INSN_P (insn))
5874 continue;
5876 if (sched_group_found && !SCHED_GROUP_P (insn))
5878 if (pass == 0)
5879 continue;
5880 cost = min_cost_group;
5881 reason = "not in sched group";
5883 else if (modulo_epilogue_p
5884 && INSN_EXACT_TICK (insn) == INVALID_TICK)
5886 cost = max_insn_queue_index;
5887 reason = "not an epilogue insn";
5889 else if (shadows_only_p && !SHADOW_P (insn))
5891 cost = 1;
5892 reason = "not a shadow";
5894 else if (recog_memoized (insn) < 0)
5896 if (!first_cycle_insn_p
5897 && (GET_CODE (PATTERN (insn)) == ASM_INPUT
5898 || asm_noperands (PATTERN (insn)) >= 0))
5899 cost = 1;
5900 reason = "asm";
5902 else if (sched_pressure != SCHED_PRESSURE_NONE)
5904 if (sched_pressure == SCHED_PRESSURE_MODEL
5905 && INSN_TICK (insn) <= clock_var)
5907 memcpy (temp_state, curr_state, dfa_state_size);
5908 if (state_transition (temp_state, insn) >= 0)
5909 INSN_TICK (insn) = clock_var + 1;
5911 cost = 0;
5913 else
5915 int delay_cost = 0;
5917 if (delay_htab)
5919 struct delay_pair *delay_entry;
5920 delay_entry
5921 = delay_htab->find_with_hash (insn,
5922 htab_hash_pointer (insn));
5923 while (delay_entry && delay_cost == 0)
5925 delay_cost = estimate_shadow_tick (delay_entry);
5926 if (delay_cost > max_insn_queue_index)
5927 delay_cost = max_insn_queue_index;
5928 delay_entry = delay_entry->next_same_i1;
5932 memcpy (temp_state, curr_state, dfa_state_size);
5933 cost = state_transition (temp_state, insn);
5934 if (cost < 0)
5935 cost = 0;
5936 else if (cost == 0)
5937 cost = 1;
5938 if (cost < delay_cost)
5940 cost = delay_cost;
5941 reason = "shadow tick";
5944 if (cost >= 1)
5946 if (SCHED_GROUP_P (insn) && cost > min_cost_group)
5947 min_cost_group = cost;
5948 ready_remove (&ready, i);
5949 queue_insn (insn, cost, reason);
5950 if (i + 1 < n)
5951 break;
5954 if (i == n)
5955 pass++;
5959 /* Called when we detect that the schedule is impossible. We examine the
5960 backtrack queue to find the earliest insn that caused this condition. */
5962 static struct haifa_saved_data *
5963 verify_shadows (void)
5965 struct haifa_saved_data *save, *earliest_fail = NULL;
5966 for (save = backtrack_queue; save; save = save->next)
5968 int t;
5969 struct delay_pair *pair = save->delay_pair;
5970 rtx_insn *i1 = pair->i1;
5972 for (; pair; pair = pair->next_same_i1)
5974 rtx_insn *i2 = pair->i2;
5976 if (QUEUE_INDEX (i2) == QUEUE_SCHEDULED)
5977 continue;
5979 t = INSN_TICK (i1) + pair_delay (pair);
5980 if (t < clock_var)
5982 if (sched_verbose >= 2)
5983 fprintf (sched_dump,
5984 ";;\t\tfailed delay requirements for %d/%d (%d->%d)"
5985 ", not ready\n",
5986 INSN_UID (pair->i1), INSN_UID (pair->i2),
5987 INSN_TICK (pair->i1), INSN_EXACT_TICK (pair->i2));
5988 earliest_fail = save;
5989 break;
5991 if (QUEUE_INDEX (i2) >= 0)
5993 int queued_for = INSN_TICK (i2);
5995 if (t < queued_for)
5997 if (sched_verbose >= 2)
5998 fprintf (sched_dump,
5999 ";;\t\tfailed delay requirements for %d/%d"
6000 " (%d->%d), queued too late\n",
6001 INSN_UID (pair->i1), INSN_UID (pair->i2),
6002 INSN_TICK (pair->i1), INSN_EXACT_TICK (pair->i2));
6003 earliest_fail = save;
6004 break;
6010 return earliest_fail;
6013 /* Print instructions together with useful scheduling information between
6014 HEAD and TAIL (inclusive). */
6015 static void
6016 dump_insn_stream (rtx_insn *head, rtx_insn *tail)
6018 fprintf (sched_dump, ";;\t| insn | prio |\n");
6020 rtx_insn *next_tail = NEXT_INSN (tail);
6021 for (rtx_insn *insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6023 int priority = NOTE_P (insn) ? 0 : INSN_PRIORITY (insn);
6024 const char *pattern = (NOTE_P (insn)
6025 ? "note"
6026 : str_pattern_slim (PATTERN (insn)));
6028 fprintf (sched_dump, ";;\t| %4d | %4d | %-30s ",
6029 INSN_UID (insn), priority, pattern);
6031 if (sched_verbose >= 4)
6033 if (NOTE_P (insn) || recog_memoized (insn) < 0)
6034 fprintf (sched_dump, "nothing");
6035 else
6036 print_reservation (sched_dump, insn);
6038 fprintf (sched_dump, "\n");
6042 /* Use forward list scheduling to rearrange insns of block pointed to by
6043 TARGET_BB, possibly bringing insns from subsequent blocks in the same
6044 region. */
6046 bool
6047 schedule_block (basic_block *target_bb, state_t init_state)
6049 int i;
6050 bool success = modulo_ii == 0;
6051 struct sched_block_state ls;
6052 state_t temp_state = NULL; /* It is used for multipass scheduling. */
6053 int sort_p, advance, start_clock_var;
6055 /* Head/tail info for this block. */
6056 rtx_insn *prev_head = current_sched_info->prev_head;
6057 rtx_insn *next_tail = current_sched_info->next_tail;
6058 rtx_insn *head = NEXT_INSN (prev_head);
6059 rtx_insn *tail = PREV_INSN (next_tail);
6061 if ((current_sched_info->flags & DONT_BREAK_DEPENDENCIES) == 0
6062 && sched_pressure != SCHED_PRESSURE_MODEL)
6063 find_modifiable_mems (head, tail);
6065 /* We used to have code to avoid getting parameters moved from hard
6066 argument registers into pseudos.
6068 However, it was removed when it proved to be of marginal benefit
6069 and caused problems because schedule_block and compute_forward_dependences
6070 had different notions of what the "head" insn was. */
6072 gcc_assert (head != tail || INSN_P (head));
6074 haifa_recovery_bb_recently_added_p = false;
6076 backtrack_queue = NULL;
6078 /* Debug info. */
6079 if (sched_verbose)
6081 dump_new_block_header (0, *target_bb, head, tail);
6083 if (sched_verbose >= 2)
6085 dump_insn_stream (head, tail);
6086 memset (&rank_for_schedule_stats, 0,
6087 sizeof (rank_for_schedule_stats));
6091 if (init_state == NULL)
6092 state_reset (curr_state);
6093 else
6094 memcpy (curr_state, init_state, dfa_state_size);
6096 /* Clear the ready list. */
6097 ready.first = ready.veclen - 1;
6098 ready.n_ready = 0;
6099 ready.n_debug = 0;
6101 /* It is used for first cycle multipass scheduling. */
6102 temp_state = alloca (dfa_state_size);
6104 if (targetm.sched.init)
6105 targetm.sched.init (sched_dump, sched_verbose, ready.veclen);
6107 /* We start inserting insns after PREV_HEAD. */
6108 last_scheduled_insn = prev_head;
6109 last_nondebug_scheduled_insn = NULL_RTX;
6110 nonscheduled_insns_begin = NULL;
6112 gcc_assert ((NOTE_P (last_scheduled_insn)
6113 || DEBUG_INSN_P (last_scheduled_insn))
6114 && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
6116 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
6117 queue. */
6118 q_ptr = 0;
6119 q_size = 0;
6121 insn_queue = XALLOCAVEC (rtx_insn_list *, max_insn_queue_index + 1);
6122 memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
6124 /* Start just before the beginning of time. */
6125 clock_var = -1;
6127 /* We need queue and ready lists and clock_var be initialized
6128 in try_ready () (which is called through init_ready_list ()). */
6129 (*current_sched_info->init_ready_list) ();
6131 if (sched_pressure)
6132 sched_pressure_start_bb (*target_bb);
6134 /* The algorithm is O(n^2) in the number of ready insns at any given
6135 time in the worst case. Before reload we are more likely to have
6136 big lists so truncate them to a reasonable size. */
6137 if (!reload_completed
6138 && ready.n_ready - ready.n_debug > MAX_SCHED_READY_INSNS)
6140 ready_sort (&ready);
6142 /* Find first free-standing insn past MAX_SCHED_READY_INSNS.
6143 If there are debug insns, we know they're first. */
6144 for (i = MAX_SCHED_READY_INSNS + ready.n_debug; i < ready.n_ready; i++)
6145 if (!SCHED_GROUP_P (ready_element (&ready, i)))
6146 break;
6148 if (sched_verbose >= 2)
6150 fprintf (sched_dump,
6151 ";;\t\tReady list on entry: %d insns\n", ready.n_ready);
6152 fprintf (sched_dump,
6153 ";;\t\t before reload => truncated to %d insns\n", i);
6156 /* Delay all insns past it for 1 cycle. If debug counter is
6157 activated make an exception for the insn right after
6158 nonscheduled_insns_begin. */
6160 rtx_insn *skip_insn;
6162 if (dbg_cnt (sched_insn) == false)
6163 skip_insn = first_nonscheduled_insn ();
6164 else
6165 skip_insn = NULL;
6167 while (i < ready.n_ready)
6169 rtx_insn *insn;
6171 insn = ready_remove (&ready, i);
6173 if (insn != skip_insn)
6174 queue_insn (insn, 1, "list truncated");
6176 if (skip_insn)
6177 ready_add (&ready, skip_insn, true);
6181 /* Now we can restore basic block notes and maintain precise cfg. */
6182 restore_bb_notes (*target_bb);
6184 last_clock_var = -1;
6186 advance = 0;
6188 gcc_assert (scheduled_insns.length () == 0);
6189 sort_p = TRUE;
6190 must_backtrack = false;
6191 modulo_insns_scheduled = 0;
6193 ls.modulo_epilogue = false;
6194 ls.first_cycle_insn_p = true;
6196 /* Loop until all the insns in BB are scheduled. */
6197 while ((*current_sched_info->schedule_more_p) ())
6199 perform_replacements_new_cycle ();
6202 start_clock_var = clock_var;
6204 clock_var++;
6206 advance_one_cycle ();
6208 /* Add to the ready list all pending insns that can be issued now.
6209 If there are no ready insns, increment clock until one
6210 is ready and add all pending insns at that point to the ready
6211 list. */
6212 queue_to_ready (&ready);
6214 gcc_assert (ready.n_ready);
6216 if (sched_verbose >= 2)
6218 fprintf (sched_dump, ";;\t\tReady list after queue_to_ready:");
6219 debug_ready_list (&ready);
6221 advance -= clock_var - start_clock_var;
6223 while (advance > 0);
6225 if (ls.modulo_epilogue)
6227 int stage = clock_var / modulo_ii;
6228 if (stage > modulo_last_stage * 2 + 2)
6230 if (sched_verbose >= 2)
6231 fprintf (sched_dump,
6232 ";;\t\tmodulo scheduled succeeded at II %d\n",
6233 modulo_ii);
6234 success = true;
6235 goto end_schedule;
6238 else if (modulo_ii > 0)
6240 int stage = clock_var / modulo_ii;
6241 if (stage > modulo_max_stages)
6243 if (sched_verbose >= 2)
6244 fprintf (sched_dump,
6245 ";;\t\tfailing schedule due to excessive stages\n");
6246 goto end_schedule;
6248 if (modulo_n_insns == modulo_insns_scheduled
6249 && stage > modulo_last_stage)
6251 if (sched_verbose >= 2)
6252 fprintf (sched_dump,
6253 ";;\t\tfound kernel after %d stages, II %d\n",
6254 stage, modulo_ii);
6255 ls.modulo_epilogue = true;
6259 prune_ready_list (temp_state, true, false, ls.modulo_epilogue);
6260 if (ready.n_ready == 0)
6261 continue;
6262 if (must_backtrack)
6263 goto do_backtrack;
6265 ls.shadows_only_p = false;
6266 cycle_issued_insns = 0;
6267 ls.can_issue_more = issue_rate;
6268 for (;;)
6270 rtx_insn *insn;
6271 int cost;
6272 bool asm_p;
6274 if (sort_p && ready.n_ready > 0)
6276 /* Sort the ready list based on priority. This must be
6277 done every iteration through the loop, as schedule_insn
6278 may have readied additional insns that will not be
6279 sorted correctly. */
6280 ready_sort (&ready);
6282 if (sched_verbose >= 2)
6284 fprintf (sched_dump,
6285 ";;\t\tReady list after ready_sort: ");
6286 debug_ready_list (&ready);
6290 /* We don't want md sched reorder to even see debug isns, so put
6291 them out right away. */
6292 if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0))
6293 && (*current_sched_info->schedule_more_p) ())
6295 while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
6297 rtx_insn *insn = ready_remove_first (&ready);
6298 gcc_assert (DEBUG_INSN_P (insn));
6299 (*current_sched_info->begin_schedule_ready) (insn);
6300 scheduled_insns.safe_push (insn);
6301 last_scheduled_insn = insn;
6302 advance = schedule_insn (insn);
6303 gcc_assert (advance == 0);
6304 if (ready.n_ready > 0)
6305 ready_sort (&ready);
6309 if (ls.first_cycle_insn_p && !ready.n_ready)
6310 break;
6312 resume_after_backtrack:
6313 /* Allow the target to reorder the list, typically for
6314 better instruction bundling. */
6315 if (sort_p
6316 && (ready.n_ready == 0
6317 || !SCHED_GROUP_P (ready_element (&ready, 0))))
6319 if (ls.first_cycle_insn_p && targetm.sched.reorder)
6320 ls.can_issue_more
6321 = targetm.sched.reorder (sched_dump, sched_verbose,
6322 ready_lastpos (&ready),
6323 &ready.n_ready, clock_var);
6324 else if (!ls.first_cycle_insn_p && targetm.sched.reorder2)
6325 ls.can_issue_more
6326 = targetm.sched.reorder2 (sched_dump, sched_verbose,
6327 ready.n_ready
6328 ? ready_lastpos (&ready) : NULL,
6329 &ready.n_ready, clock_var);
6332 restart_choose_ready:
6333 if (sched_verbose >= 2)
6335 fprintf (sched_dump, ";;\tReady list (t = %3d): ",
6336 clock_var);
6337 debug_ready_list (&ready);
6338 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
6339 print_curr_reg_pressure ();
6342 if (ready.n_ready == 0
6343 && ls.can_issue_more
6344 && reload_completed)
6346 /* Allow scheduling insns directly from the queue in case
6347 there's nothing better to do (ready list is empty) but
6348 there are still vacant dispatch slots in the current cycle. */
6349 if (sched_verbose >= 6)
6350 fprintf (sched_dump,";;\t\tSecond chance\n");
6351 memcpy (temp_state, curr_state, dfa_state_size);
6352 if (early_queue_to_ready (temp_state, &ready))
6353 ready_sort (&ready);
6356 if (ready.n_ready == 0
6357 || !ls.can_issue_more
6358 || state_dead_lock_p (curr_state)
6359 || !(*current_sched_info->schedule_more_p) ())
6360 break;
6362 /* Select and remove the insn from the ready list. */
6363 if (sort_p)
6365 int res;
6367 insn = NULL;
6368 res = choose_ready (&ready, ls.first_cycle_insn_p, &insn);
6370 if (res < 0)
6371 /* Finish cycle. */
6372 break;
6373 if (res > 0)
6374 goto restart_choose_ready;
6376 gcc_assert (insn != NULL_RTX);
6378 else
6379 insn = ready_remove_first (&ready);
6381 if (sched_pressure != SCHED_PRESSURE_NONE
6382 && INSN_TICK (insn) > clock_var)
6384 ready_add (&ready, insn, true);
6385 advance = 1;
6386 break;
6389 if (targetm.sched.dfa_new_cycle
6390 && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
6391 insn, last_clock_var,
6392 clock_var, &sort_p))
6393 /* SORT_P is used by the target to override sorting
6394 of the ready list. This is needed when the target
6395 has modified its internal structures expecting that
6396 the insn will be issued next. As we need the insn
6397 to have the highest priority (so it will be returned by
6398 the ready_remove_first call above), we invoke
6399 ready_add (&ready, insn, true).
6400 But, still, there is one issue: INSN can be later
6401 discarded by scheduler's front end through
6402 current_sched_info->can_schedule_ready_p, hence, won't
6403 be issued next. */
6405 ready_add (&ready, insn, true);
6406 break;
6409 sort_p = TRUE;
6411 if (current_sched_info->can_schedule_ready_p
6412 && ! (*current_sched_info->can_schedule_ready_p) (insn))
6413 /* We normally get here only if we don't want to move
6414 insn from the split block. */
6416 TODO_SPEC (insn) = DEP_POSTPONED;
6417 goto restart_choose_ready;
6420 if (delay_htab)
6422 /* If this insn is the first part of a delay-slot pair, record a
6423 backtrack point. */
6424 struct delay_pair *delay_entry;
6425 delay_entry
6426 = delay_htab->find_with_hash (insn, htab_hash_pointer (insn));
6427 if (delay_entry)
6429 save_backtrack_point (delay_entry, ls);
6430 if (sched_verbose >= 2)
6431 fprintf (sched_dump, ";;\t\tsaving backtrack point\n");
6435 /* DECISION is made. */
6437 if (modulo_ii > 0 && INSN_UID (insn) < modulo_iter0_max_uid)
6439 modulo_insns_scheduled++;
6440 modulo_last_stage = clock_var / modulo_ii;
6442 if (TODO_SPEC (insn) & SPECULATIVE)
6443 generate_recovery_code (insn);
6445 if (targetm.sched.dispatch (NULL, IS_DISPATCH_ON))
6446 targetm.sched.dispatch_do (insn, ADD_TO_DISPATCH_WINDOW);
6448 /* Update counters, etc in the scheduler's front end. */
6449 (*current_sched_info->begin_schedule_ready) (insn);
6450 scheduled_insns.safe_push (insn);
6451 gcc_assert (NONDEBUG_INSN_P (insn));
6452 last_nondebug_scheduled_insn = last_scheduled_insn = insn;
6454 if (recog_memoized (insn) >= 0)
6456 memcpy (temp_state, curr_state, dfa_state_size);
6457 cost = state_transition (curr_state, insn);
6458 if (sched_pressure != SCHED_PRESSURE_WEIGHTED)
6459 gcc_assert (cost < 0);
6460 if (memcmp (temp_state, curr_state, dfa_state_size) != 0)
6461 cycle_issued_insns++;
6462 asm_p = false;
6464 else
6465 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
6466 || asm_noperands (PATTERN (insn)) >= 0);
6468 if (targetm.sched.variable_issue)
6469 ls.can_issue_more =
6470 targetm.sched.variable_issue (sched_dump, sched_verbose,
6471 insn, ls.can_issue_more);
6472 /* A naked CLOBBER or USE generates no instruction, so do
6473 not count them against the issue rate. */
6474 else if (GET_CODE (PATTERN (insn)) != USE
6475 && GET_CODE (PATTERN (insn)) != CLOBBER)
6476 ls.can_issue_more--;
6477 advance = schedule_insn (insn);
6479 if (SHADOW_P (insn))
6480 ls.shadows_only_p = true;
6482 /* After issuing an asm insn we should start a new cycle. */
6483 if (advance == 0 && asm_p)
6484 advance = 1;
6486 if (must_backtrack)
6487 break;
6489 if (advance != 0)
6490 break;
6492 ls.first_cycle_insn_p = false;
6493 if (ready.n_ready > 0)
6494 prune_ready_list (temp_state, false, ls.shadows_only_p,
6495 ls.modulo_epilogue);
6498 do_backtrack:
6499 if (!must_backtrack)
6500 for (i = 0; i < ready.n_ready; i++)
6502 rtx_insn *insn = ready_element (&ready, i);
6503 if (INSN_EXACT_TICK (insn) == clock_var)
6505 must_backtrack = true;
6506 clock_var++;
6507 break;
6510 if (must_backtrack && modulo_ii > 0)
6512 if (modulo_backtracks_left == 0)
6513 goto end_schedule;
6514 modulo_backtracks_left--;
6516 while (must_backtrack)
6518 struct haifa_saved_data *failed;
6519 rtx_insn *failed_insn;
6521 must_backtrack = false;
6522 failed = verify_shadows ();
6523 gcc_assert (failed);
6525 failed_insn = failed->delay_pair->i1;
6526 /* Clear these queues. */
6527 perform_replacements_new_cycle ();
6528 toggle_cancelled_flags (false);
6529 unschedule_insns_until (failed_insn);
6530 while (failed != backtrack_queue)
6531 free_topmost_backtrack_point (true);
6532 restore_last_backtrack_point (&ls);
6533 if (sched_verbose >= 2)
6534 fprintf (sched_dump, ";;\t\trewind to cycle %d\n", clock_var);
6535 /* Delay by at least a cycle. This could cause additional
6536 backtracking. */
6537 queue_insn (failed_insn, 1, "backtracked");
6538 advance = 0;
6539 if (must_backtrack)
6540 continue;
6541 if (ready.n_ready > 0)
6542 goto resume_after_backtrack;
6543 else
6545 if (clock_var == 0 && ls.first_cycle_insn_p)
6546 goto end_schedule;
6547 advance = 1;
6548 break;
6551 ls.first_cycle_insn_p = true;
6553 if (ls.modulo_epilogue)
6554 success = true;
6555 end_schedule:
6556 if (!ls.first_cycle_insn_p || advance)
6557 advance_one_cycle ();
6558 perform_replacements_new_cycle ();
6559 if (modulo_ii > 0)
6561 /* Once again, debug insn suckiness: they can be on the ready list
6562 even if they have unresolved dependencies. To make our view
6563 of the world consistent, remove such "ready" insns. */
6564 restart_debug_insn_loop:
6565 for (i = ready.n_ready - 1; i >= 0; i--)
6567 rtx_insn *x;
6569 x = ready_element (&ready, i);
6570 if (DEPS_LIST_FIRST (INSN_HARD_BACK_DEPS (x)) != NULL
6571 || DEPS_LIST_FIRST (INSN_SPEC_BACK_DEPS (x)) != NULL)
6573 ready_remove (&ready, i);
6574 goto restart_debug_insn_loop;
6577 for (i = ready.n_ready - 1; i >= 0; i--)
6579 rtx_insn *x;
6581 x = ready_element (&ready, i);
6582 resolve_dependencies (x);
6584 for (i = 0; i <= max_insn_queue_index; i++)
6586 rtx_insn_list *link;
6587 while ((link = insn_queue[i]) != NULL)
6589 rtx_insn *x = link->insn ();
6590 insn_queue[i] = link->next ();
6591 QUEUE_INDEX (x) = QUEUE_NOWHERE;
6592 free_INSN_LIST_node (link);
6593 resolve_dependencies (x);
6598 if (!success)
6599 undo_all_replacements ();
6601 /* Debug info. */
6602 if (sched_verbose)
6604 fprintf (sched_dump, ";;\tReady list (final): ");
6605 debug_ready_list (&ready);
6608 if (modulo_ii == 0 && current_sched_info->queue_must_finish_empty)
6609 /* Sanity check -- queue must be empty now. Meaningless if region has
6610 multiple bbs. */
6611 gcc_assert (!q_size && !ready.n_ready && !ready.n_debug);
6612 else if (modulo_ii == 0)
6614 /* We must maintain QUEUE_INDEX between blocks in region. */
6615 for (i = ready.n_ready - 1; i >= 0; i--)
6617 rtx_insn *x;
6619 x = ready_element (&ready, i);
6620 QUEUE_INDEX (x) = QUEUE_NOWHERE;
6621 TODO_SPEC (x) = HARD_DEP;
6624 if (q_size)
6625 for (i = 0; i <= max_insn_queue_index; i++)
6627 rtx_insn_list *link;
6628 for (link = insn_queue[i]; link; link = link->next ())
6630 rtx_insn *x;
6632 x = link->insn ();
6633 QUEUE_INDEX (x) = QUEUE_NOWHERE;
6634 TODO_SPEC (x) = HARD_DEP;
6636 free_INSN_LIST_list (&insn_queue[i]);
6640 if (sched_pressure == SCHED_PRESSURE_MODEL)
6641 model_end_schedule ();
6643 if (success)
6645 commit_schedule (prev_head, tail, target_bb);
6646 if (sched_verbose)
6647 fprintf (sched_dump, ";; total time = %d\n", clock_var);
6649 else
6650 last_scheduled_insn = tail;
6652 scheduled_insns.truncate (0);
6654 if (!current_sched_info->queue_must_finish_empty
6655 || haifa_recovery_bb_recently_added_p)
6657 /* INSN_TICK (minimum clock tick at which the insn becomes
6658 ready) may be not correct for the insn in the subsequent
6659 blocks of the region. We should use a correct value of
6660 `clock_var' or modify INSN_TICK. It is better to keep
6661 clock_var value equal to 0 at the start of a basic block.
6662 Therefore we modify INSN_TICK here. */
6663 fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
6666 if (targetm.sched.finish)
6668 targetm.sched.finish (sched_dump, sched_verbose);
6669 /* Target might have added some instructions to the scheduled block
6670 in its md_finish () hook. These new insns don't have any data
6671 initialized and to identify them we extend h_i_d so that they'll
6672 get zero luids. */
6673 sched_extend_luids ();
6676 /* Update head/tail boundaries. */
6677 head = NEXT_INSN (prev_head);
6678 tail = last_scheduled_insn;
6680 if (sched_verbose)
6682 fprintf (sched_dump, ";; new head = %d\n;; new tail = %d\n",
6683 INSN_UID (head), INSN_UID (tail));
6685 if (sched_verbose >= 2)
6687 dump_insn_stream (head, tail);
6688 print_rank_for_schedule_stats (";; TOTAL ", &rank_for_schedule_stats,
6689 NULL);
6692 fprintf (sched_dump, "\n");
6695 head = restore_other_notes (head, NULL);
6697 current_sched_info->head = head;
6698 current_sched_info->tail = tail;
6700 free_backtrack_queue ();
6702 return success;
6705 /* Set_priorities: compute priority of each insn in the block. */
6708 set_priorities (rtx_insn *head, rtx_insn *tail)
6710 rtx_insn *insn;
6711 int n_insn;
6712 int sched_max_insns_priority =
6713 current_sched_info->sched_max_insns_priority;
6714 rtx_insn *prev_head;
6716 if (head == tail && ! INSN_P (head))
6717 gcc_unreachable ();
6719 n_insn = 0;
6721 prev_head = PREV_INSN (head);
6722 for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
6724 if (!INSN_P (insn))
6725 continue;
6727 n_insn++;
6728 (void) priority (insn);
6730 gcc_assert (INSN_PRIORITY_KNOWN (insn));
6732 sched_max_insns_priority = MAX (sched_max_insns_priority,
6733 INSN_PRIORITY (insn));
6736 current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
6738 return n_insn;
6741 /* Set dump and sched_verbose for the desired debugging output. If no
6742 dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
6743 For -fsched-verbose=N, N>=10, print everything to stderr. */
6744 void
6745 setup_sched_dump (void)
6747 sched_verbose = sched_verbose_param;
6748 if (sched_verbose_param == 0 && dump_file)
6749 sched_verbose = 1;
6750 sched_dump = ((sched_verbose_param >= 10 || !dump_file)
6751 ? stderr : dump_file);
6754 /* Allocate data for register pressure sensitive scheduling. */
6755 static void
6756 alloc_global_sched_pressure_data (void)
6758 if (sched_pressure != SCHED_PRESSURE_NONE)
6760 int i, max_regno = max_reg_num ();
6762 if (sched_dump != NULL)
6763 /* We need info about pseudos for rtl dumps about pseudo
6764 classes and costs. */
6765 regstat_init_n_sets_and_refs ();
6766 ira_set_pseudo_classes (true, sched_verbose ? sched_dump : NULL);
6767 sched_regno_pressure_class
6768 = (enum reg_class *) xmalloc (max_regno * sizeof (enum reg_class));
6769 for (i = 0; i < max_regno; i++)
6770 sched_regno_pressure_class[i]
6771 = (i < FIRST_PSEUDO_REGISTER
6772 ? ira_pressure_class_translate[REGNO_REG_CLASS (i)]
6773 : ira_pressure_class_translate[reg_allocno_class (i)]);
6774 curr_reg_live = BITMAP_ALLOC (NULL);
6775 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
6777 saved_reg_live = BITMAP_ALLOC (NULL);
6778 region_ref_regs = BITMAP_ALLOC (NULL);
6781 /* Calculate number of CALL_USED_REGS in register classes that
6782 we calculate register pressure for. */
6783 for (int c = 0; c < ira_pressure_classes_num; ++c)
6785 enum reg_class cl = ira_pressure_classes[c];
6787 call_used_regs_num[cl] = 0;
6789 for (int i = 0; i < ira_class_hard_regs_num[cl]; ++i)
6790 if (call_used_regs[ira_class_hard_regs[cl][i]])
6791 ++call_used_regs_num[cl];
6796 /* Free data for register pressure sensitive scheduling. Also called
6797 from schedule_region when stopping sched-pressure early. */
6798 void
6799 free_global_sched_pressure_data (void)
6801 if (sched_pressure != SCHED_PRESSURE_NONE)
6803 if (regstat_n_sets_and_refs != NULL)
6804 regstat_free_n_sets_and_refs ();
6805 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
6807 BITMAP_FREE (region_ref_regs);
6808 BITMAP_FREE (saved_reg_live);
6810 BITMAP_FREE (curr_reg_live);
6811 free (sched_regno_pressure_class);
6815 /* Initialize some global state for the scheduler. This function works
6816 with the common data shared between all the schedulers. It is called
6817 from the scheduler specific initialization routine. */
6819 void
6820 sched_init (void)
6822 /* Disable speculative loads in their presence if cc0 defined. */
6823 #ifdef HAVE_cc0
6824 flag_schedule_speculative_load = 0;
6825 #endif
6827 if (targetm.sched.dispatch (NULL, IS_DISPATCH_ON))
6828 targetm.sched.dispatch_do (NULL, DISPATCH_INIT);
6830 if (live_range_shrinkage_p)
6831 sched_pressure = SCHED_PRESSURE_WEIGHTED;
6832 else if (flag_sched_pressure
6833 && !reload_completed
6834 && common_sched_info->sched_pass_id == SCHED_RGN_PASS)
6835 sched_pressure = ((enum sched_pressure_algorithm)
6836 PARAM_VALUE (PARAM_SCHED_PRESSURE_ALGORITHM));
6837 else
6838 sched_pressure = SCHED_PRESSURE_NONE;
6840 if (sched_pressure != SCHED_PRESSURE_NONE)
6841 ira_setup_eliminable_regset ();
6843 /* Initialize SPEC_INFO. */
6844 if (targetm.sched.set_sched_flags)
6846 spec_info = &spec_info_var;
6847 targetm.sched.set_sched_flags (spec_info);
6849 if (spec_info->mask != 0)
6851 spec_info->data_weakness_cutoff =
6852 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
6853 spec_info->control_weakness_cutoff =
6854 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF)
6855 * REG_BR_PROB_BASE) / 100;
6857 else
6858 /* So we won't read anything accidentally. */
6859 spec_info = NULL;
6862 else
6863 /* So we won't read anything accidentally. */
6864 spec_info = 0;
6866 /* Initialize issue_rate. */
6867 if (targetm.sched.issue_rate)
6868 issue_rate = targetm.sched.issue_rate ();
6869 else
6870 issue_rate = 1;
6872 if (targetm.sched.first_cycle_multipass_dfa_lookahead
6873 /* Don't use max_issue with reg_pressure scheduling. Multipass
6874 scheduling and reg_pressure scheduling undo each other's decisions. */
6875 && sched_pressure == SCHED_PRESSURE_NONE)
6876 dfa_lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
6877 else
6878 dfa_lookahead = 0;
6880 /* Set to "0" so that we recalculate. */
6881 max_lookahead_tries = 0;
6883 if (targetm.sched.init_dfa_pre_cycle_insn)
6884 targetm.sched.init_dfa_pre_cycle_insn ();
6886 if (targetm.sched.init_dfa_post_cycle_insn)
6887 targetm.sched.init_dfa_post_cycle_insn ();
6889 dfa_start ();
6890 dfa_state_size = state_size ();
6892 init_alias_analysis ();
6894 if (!sched_no_dce)
6895 df_set_flags (DF_LR_RUN_DCE);
6896 df_note_add_problem ();
6898 /* More problems needed for interloop dep calculation in SMS. */
6899 if (common_sched_info->sched_pass_id == SCHED_SMS_PASS)
6901 df_rd_add_problem ();
6902 df_chain_add_problem (DF_DU_CHAIN + DF_UD_CHAIN);
6905 df_analyze ();
6907 /* Do not run DCE after reload, as this can kill nops inserted
6908 by bundling. */
6909 if (reload_completed)
6910 df_clear_flags (DF_LR_RUN_DCE);
6912 regstat_compute_calls_crossed ();
6914 if (targetm.sched.init_global)
6915 targetm.sched.init_global (sched_dump, sched_verbose, get_max_uid () + 1);
6917 alloc_global_sched_pressure_data ();
6919 curr_state = xmalloc (dfa_state_size);
6922 static void haifa_init_only_bb (basic_block, basic_block);
6924 /* Initialize data structures specific to the Haifa scheduler. */
6925 void
6926 haifa_sched_init (void)
6928 setup_sched_dump ();
6929 sched_init ();
6931 scheduled_insns.create (0);
6933 if (spec_info != NULL)
6935 sched_deps_info->use_deps_list = 1;
6936 sched_deps_info->generate_spec_deps = 1;
6939 /* Initialize luids, dependency caches, target and h_i_d for the
6940 whole function. */
6942 bb_vec_t bbs;
6943 bbs.create (n_basic_blocks_for_fn (cfun));
6944 basic_block bb;
6946 sched_init_bbs ();
6948 FOR_EACH_BB_FN (bb, cfun)
6949 bbs.quick_push (bb);
6950 sched_init_luids (bbs);
6951 sched_deps_init (true);
6952 sched_extend_target ();
6953 haifa_init_h_i_d (bbs);
6955 bbs.release ();
6958 sched_init_only_bb = haifa_init_only_bb;
6959 sched_split_block = sched_split_block_1;
6960 sched_create_empty_bb = sched_create_empty_bb_1;
6961 haifa_recovery_bb_ever_added_p = false;
6963 nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
6964 before_recovery = 0;
6965 after_recovery = 0;
6967 modulo_ii = 0;
6970 /* Finish work with the data specific to the Haifa scheduler. */
6971 void
6972 haifa_sched_finish (void)
6974 sched_create_empty_bb = NULL;
6975 sched_split_block = NULL;
6976 sched_init_only_bb = NULL;
6978 if (spec_info && spec_info->dump)
6980 char c = reload_completed ? 'a' : 'b';
6982 fprintf (spec_info->dump,
6983 ";; %s:\n", current_function_name ());
6985 fprintf (spec_info->dump,
6986 ";; Procedure %cr-begin-data-spec motions == %d\n",
6987 c, nr_begin_data);
6988 fprintf (spec_info->dump,
6989 ";; Procedure %cr-be-in-data-spec motions == %d\n",
6990 c, nr_be_in_data);
6991 fprintf (spec_info->dump,
6992 ";; Procedure %cr-begin-control-spec motions == %d\n",
6993 c, nr_begin_control);
6994 fprintf (spec_info->dump,
6995 ";; Procedure %cr-be-in-control-spec motions == %d\n",
6996 c, nr_be_in_control);
6999 scheduled_insns.release ();
7001 /* Finalize h_i_d, dependency caches, and luids for the whole
7002 function. Target will be finalized in md_global_finish (). */
7003 sched_deps_finish ();
7004 sched_finish_luids ();
7005 current_sched_info = NULL;
7006 sched_finish ();
7009 /* Free global data used during insn scheduling. This function works with
7010 the common data shared between the schedulers. */
7012 void
7013 sched_finish (void)
7015 haifa_finish_h_i_d ();
7016 free_global_sched_pressure_data ();
7017 free (curr_state);
7019 if (targetm.sched.finish_global)
7020 targetm.sched.finish_global (sched_dump, sched_verbose);
7022 end_alias_analysis ();
7024 regstat_free_calls_crossed ();
7026 dfa_finish ();
7029 /* Free all delay_pair structures that were recorded. */
7030 void
7031 free_delay_pairs (void)
7033 if (delay_htab)
7035 delay_htab->empty ();
7036 delay_htab_i2->empty ();
7040 /* Fix INSN_TICKs of the instructions in the current block as well as
7041 INSN_TICKs of their dependents.
7042 HEAD and TAIL are the begin and the end of the current scheduled block. */
7043 static void
7044 fix_inter_tick (rtx_insn *head, rtx_insn *tail)
7046 /* Set of instructions with corrected INSN_TICK. */
7047 bitmap_head processed;
7048 /* ??? It is doubtful if we should assume that cycle advance happens on
7049 basic block boundaries. Basically insns that are unconditionally ready
7050 on the start of the block are more preferable then those which have
7051 a one cycle dependency over insn from the previous block. */
7052 int next_clock = clock_var + 1;
7054 bitmap_initialize (&processed, 0);
7056 /* Iterates over scheduled instructions and fix their INSN_TICKs and
7057 INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
7058 across different blocks. */
7059 for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
7061 if (INSN_P (head))
7063 int tick;
7064 sd_iterator_def sd_it;
7065 dep_t dep;
7067 tick = INSN_TICK (head);
7068 gcc_assert (tick >= MIN_TICK);
7070 /* Fix INSN_TICK of instruction from just scheduled block. */
7071 if (bitmap_set_bit (&processed, INSN_LUID (head)))
7073 tick -= next_clock;
7075 if (tick < MIN_TICK)
7076 tick = MIN_TICK;
7078 INSN_TICK (head) = tick;
7081 if (DEBUG_INSN_P (head))
7082 continue;
7084 FOR_EACH_DEP (head, SD_LIST_RES_FORW, sd_it, dep)
7086 rtx_insn *next;
7088 next = DEP_CON (dep);
7089 tick = INSN_TICK (next);
7091 if (tick != INVALID_TICK
7092 /* If NEXT has its INSN_TICK calculated, fix it.
7093 If not - it will be properly calculated from
7094 scratch later in fix_tick_ready. */
7095 && bitmap_set_bit (&processed, INSN_LUID (next)))
7097 tick -= next_clock;
7099 if (tick < MIN_TICK)
7100 tick = MIN_TICK;
7102 if (tick > INTER_TICK (next))
7103 INTER_TICK (next) = tick;
7104 else
7105 tick = INTER_TICK (next);
7107 INSN_TICK (next) = tick;
7112 bitmap_clear (&processed);
7115 /* Check if NEXT is ready to be added to the ready or queue list.
7116 If "yes", add it to the proper list.
7117 Returns:
7118 -1 - is not ready yet,
7119 0 - added to the ready list,
7120 0 < N - queued for N cycles. */
7122 try_ready (rtx_insn *next)
7124 ds_t old_ts, new_ts;
7126 old_ts = TODO_SPEC (next);
7128 gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP | DEP_CONTROL | DEP_POSTPONED))
7129 && (old_ts == HARD_DEP
7130 || old_ts == DEP_POSTPONED
7131 || (old_ts & SPECULATIVE)
7132 || old_ts == DEP_CONTROL));
7134 new_ts = recompute_todo_spec (next, false);
7136 if (new_ts & (HARD_DEP | DEP_POSTPONED))
7137 gcc_assert (new_ts == old_ts
7138 && QUEUE_INDEX (next) == QUEUE_NOWHERE);
7139 else if (current_sched_info->new_ready)
7140 new_ts = current_sched_info->new_ready (next, new_ts);
7142 /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
7143 have its original pattern or changed (speculative) one. This is due
7144 to changing ebb in region scheduling.
7145 * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
7146 has speculative pattern.
7148 We can't assert (!(new_ts & HARD_DEP) || new_ts == old_ts) here because
7149 control-speculative NEXT could have been discarded by sched-rgn.c
7150 (the same case as when discarded by can_schedule_ready_p ()). */
7152 if ((new_ts & SPECULATIVE)
7153 /* If (old_ts == new_ts), then (old_ts & SPECULATIVE) and we don't
7154 need to change anything. */
7155 && new_ts != old_ts)
7157 int res;
7158 rtx new_pat;
7160 gcc_assert ((new_ts & SPECULATIVE) && !(new_ts & ~SPECULATIVE));
7162 res = haifa_speculate_insn (next, new_ts, &new_pat);
7164 switch (res)
7166 case -1:
7167 /* It would be nice to change DEP_STATUS of all dependences,
7168 which have ((DEP_STATUS & SPECULATIVE) == new_ts) to HARD_DEP,
7169 so we won't reanalyze anything. */
7170 new_ts = HARD_DEP;
7171 break;
7173 case 0:
7174 /* We follow the rule, that every speculative insn
7175 has non-null ORIG_PAT. */
7176 if (!ORIG_PAT (next))
7177 ORIG_PAT (next) = PATTERN (next);
7178 break;
7180 case 1:
7181 if (!ORIG_PAT (next))
7182 /* If we gonna to overwrite the original pattern of insn,
7183 save it. */
7184 ORIG_PAT (next) = PATTERN (next);
7186 res = haifa_change_pattern (next, new_pat);
7187 gcc_assert (res);
7188 break;
7190 default:
7191 gcc_unreachable ();
7195 /* We need to restore pattern only if (new_ts == 0), because otherwise it is
7196 either correct (new_ts & SPECULATIVE),
7197 or we simply don't care (new_ts & HARD_DEP). */
7199 gcc_assert (!ORIG_PAT (next)
7200 || !IS_SPECULATION_BRANCHY_CHECK_P (next));
7202 TODO_SPEC (next) = new_ts;
7204 if (new_ts & (HARD_DEP | DEP_POSTPONED))
7206 /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
7207 control-speculative NEXT could have been discarded by sched-rgn.c
7208 (the same case as when discarded by can_schedule_ready_p ()). */
7209 /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
7211 change_queue_index (next, QUEUE_NOWHERE);
7213 return -1;
7215 else if (!(new_ts & BEGIN_SPEC)
7216 && ORIG_PAT (next) && PREDICATED_PAT (next) == NULL_RTX
7217 && !IS_SPECULATION_CHECK_P (next))
7218 /* We should change pattern of every previously speculative
7219 instruction - and we determine if NEXT was speculative by using
7220 ORIG_PAT field. Except one case - speculation checks have ORIG_PAT
7221 pat too, so skip them. */
7223 bool success = haifa_change_pattern (next, ORIG_PAT (next));
7224 gcc_assert (success);
7225 ORIG_PAT (next) = 0;
7228 if (sched_verbose >= 2)
7230 fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
7231 (*current_sched_info->print_insn) (next, 0));
7233 if (spec_info && spec_info->dump)
7235 if (new_ts & BEGIN_DATA)
7236 fprintf (spec_info->dump, "; data-spec;");
7237 if (new_ts & BEGIN_CONTROL)
7238 fprintf (spec_info->dump, "; control-spec;");
7239 if (new_ts & BE_IN_CONTROL)
7240 fprintf (spec_info->dump, "; in-control-spec;");
7242 if (TODO_SPEC (next) & DEP_CONTROL)
7243 fprintf (sched_dump, " predicated");
7244 fprintf (sched_dump, "\n");
7247 adjust_priority (next);
7249 return fix_tick_ready (next);
7252 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
7253 static int
7254 fix_tick_ready (rtx_insn *next)
7256 int tick, delay;
7258 if (!DEBUG_INSN_P (next) && !sd_lists_empty_p (next, SD_LIST_RES_BACK))
7260 int full_p;
7261 sd_iterator_def sd_it;
7262 dep_t dep;
7264 tick = INSN_TICK (next);
7265 /* if tick is not equal to INVALID_TICK, then update
7266 INSN_TICK of NEXT with the most recent resolved dependence
7267 cost. Otherwise, recalculate from scratch. */
7268 full_p = (tick == INVALID_TICK);
7270 FOR_EACH_DEP (next, SD_LIST_RES_BACK, sd_it, dep)
7272 rtx_insn *pro = DEP_PRO (dep);
7273 int tick1;
7275 gcc_assert (INSN_TICK (pro) >= MIN_TICK);
7277 tick1 = INSN_TICK (pro) + dep_cost (dep);
7278 if (tick1 > tick)
7279 tick = tick1;
7281 if (!full_p)
7282 break;
7285 else
7286 tick = -1;
7288 INSN_TICK (next) = tick;
7290 delay = tick - clock_var;
7291 if (delay <= 0 || sched_pressure != SCHED_PRESSURE_NONE)
7292 delay = QUEUE_READY;
7294 change_queue_index (next, delay);
7296 return delay;
7299 /* Move NEXT to the proper queue list with (DELAY >= 1),
7300 or add it to the ready list (DELAY == QUEUE_READY),
7301 or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
7302 static void
7303 change_queue_index (rtx_insn *next, int delay)
7305 int i = QUEUE_INDEX (next);
7307 gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
7308 && delay != 0);
7309 gcc_assert (i != QUEUE_SCHEDULED);
7311 if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
7312 || (delay < 0 && delay == i))
7313 /* We have nothing to do. */
7314 return;
7316 /* Remove NEXT from wherever it is now. */
7317 if (i == QUEUE_READY)
7318 ready_remove_insn (next);
7319 else if (i >= 0)
7320 queue_remove (next);
7322 /* Add it to the proper place. */
7323 if (delay == QUEUE_READY)
7324 ready_add (readyp, next, false);
7325 else if (delay >= 1)
7326 queue_insn (next, delay, "change queue index");
7328 if (sched_verbose >= 2)
7330 fprintf (sched_dump, ";;\t\ttick updated: insn %s",
7331 (*current_sched_info->print_insn) (next, 0));
7333 if (delay == QUEUE_READY)
7334 fprintf (sched_dump, " into ready\n");
7335 else if (delay >= 1)
7336 fprintf (sched_dump, " into queue with cost=%d\n", delay);
7337 else
7338 fprintf (sched_dump, " removed from ready or queue lists\n");
7342 static int sched_ready_n_insns = -1;
7344 /* Initialize per region data structures. */
7345 void
7346 sched_extend_ready_list (int new_sched_ready_n_insns)
7348 int i;
7350 if (sched_ready_n_insns == -1)
7351 /* At the first call we need to initialize one more choice_stack
7352 entry. */
7354 i = 0;
7355 sched_ready_n_insns = 0;
7356 scheduled_insns.reserve (new_sched_ready_n_insns);
7358 else
7359 i = sched_ready_n_insns + 1;
7361 ready.veclen = new_sched_ready_n_insns + issue_rate;
7362 ready.vec = XRESIZEVEC (rtx_insn *, ready.vec, ready.veclen);
7364 gcc_assert (new_sched_ready_n_insns >= sched_ready_n_insns);
7366 ready_try = (signed char *) xrecalloc (ready_try, new_sched_ready_n_insns,
7367 sched_ready_n_insns,
7368 sizeof (*ready_try));
7370 /* We allocate +1 element to save initial state in the choice_stack[0]
7371 entry. */
7372 choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
7373 new_sched_ready_n_insns + 1);
7375 for (; i <= new_sched_ready_n_insns; i++)
7377 choice_stack[i].state = xmalloc (dfa_state_size);
7379 if (targetm.sched.first_cycle_multipass_init)
7380 targetm.sched.first_cycle_multipass_init (&(choice_stack[i]
7381 .target_data));
7384 sched_ready_n_insns = new_sched_ready_n_insns;
7387 /* Free per region data structures. */
7388 void
7389 sched_finish_ready_list (void)
7391 int i;
7393 free (ready.vec);
7394 ready.vec = NULL;
7395 ready.veclen = 0;
7397 free (ready_try);
7398 ready_try = NULL;
7400 for (i = 0; i <= sched_ready_n_insns; i++)
7402 if (targetm.sched.first_cycle_multipass_fini)
7403 targetm.sched.first_cycle_multipass_fini (&(choice_stack[i]
7404 .target_data));
7406 free (choice_stack [i].state);
7408 free (choice_stack);
7409 choice_stack = NULL;
7411 sched_ready_n_insns = -1;
7414 static int
7415 haifa_luid_for_non_insn (rtx x)
7417 gcc_assert (NOTE_P (x) || LABEL_P (x));
7419 return 0;
7422 /* Generates recovery code for INSN. */
7423 static void
7424 generate_recovery_code (rtx_insn *insn)
7426 if (TODO_SPEC (insn) & BEGIN_SPEC)
7427 begin_speculative_block (insn);
7429 /* Here we have insn with no dependencies to
7430 instructions other then CHECK_SPEC ones. */
7432 if (TODO_SPEC (insn) & BE_IN_SPEC)
7433 add_to_speculative_block (insn);
7436 /* Helper function.
7437 Tries to add speculative dependencies of type FS between instructions
7438 in deps_list L and TWIN. */
7439 static void
7440 process_insn_forw_deps_be_in_spec (rtx insn, rtx_insn *twin, ds_t fs)
7442 sd_iterator_def sd_it;
7443 dep_t dep;
7445 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
7447 ds_t ds;
7448 rtx_insn *consumer;
7450 consumer = DEP_CON (dep);
7452 ds = DEP_STATUS (dep);
7454 if (/* If we want to create speculative dep. */
7456 /* And we can do that because this is a true dep. */
7457 && (ds & DEP_TYPES) == DEP_TRUE)
7459 gcc_assert (!(ds & BE_IN_SPEC));
7461 if (/* If this dep can be overcome with 'begin speculation'. */
7462 ds & BEGIN_SPEC)
7463 /* Then we have a choice: keep the dep 'begin speculative'
7464 or transform it into 'be in speculative'. */
7466 if (/* In try_ready we assert that if insn once became ready
7467 it can be removed from the ready (or queue) list only
7468 due to backend decision. Hence we can't let the
7469 probability of the speculative dep to decrease. */
7470 ds_weak (ds) <= ds_weak (fs))
7472 ds_t new_ds;
7474 new_ds = (ds & ~BEGIN_SPEC) | fs;
7476 if (/* consumer can 'be in speculative'. */
7477 sched_insn_is_legitimate_for_speculation_p (consumer,
7478 new_ds))
7479 /* Transform it to be in speculative. */
7480 ds = new_ds;
7483 else
7484 /* Mark the dep as 'be in speculative'. */
7485 ds |= fs;
7489 dep_def _new_dep, *new_dep = &_new_dep;
7491 init_dep_1 (new_dep, twin, consumer, DEP_TYPE (dep), ds);
7492 sd_add_dep (new_dep, false);
7497 /* Generates recovery code for BEGIN speculative INSN. */
7498 static void
7499 begin_speculative_block (rtx_insn *insn)
7501 if (TODO_SPEC (insn) & BEGIN_DATA)
7502 nr_begin_data++;
7503 if (TODO_SPEC (insn) & BEGIN_CONTROL)
7504 nr_begin_control++;
7506 create_check_block_twin (insn, false);
7508 TODO_SPEC (insn) &= ~BEGIN_SPEC;
7511 static void haifa_init_insn (rtx_insn *);
7513 /* Generates recovery code for BE_IN speculative INSN. */
7514 static void
7515 add_to_speculative_block (rtx_insn *insn)
7517 ds_t ts;
7518 sd_iterator_def sd_it;
7519 dep_t dep;
7520 rtx_insn_list *twins = NULL;
7521 rtx_vec_t priorities_roots;
7523 ts = TODO_SPEC (insn);
7524 gcc_assert (!(ts & ~BE_IN_SPEC));
7526 if (ts & BE_IN_DATA)
7527 nr_be_in_data++;
7528 if (ts & BE_IN_CONTROL)
7529 nr_be_in_control++;
7531 TODO_SPEC (insn) &= ~BE_IN_SPEC;
7532 gcc_assert (!TODO_SPEC (insn));
7534 DONE_SPEC (insn) |= ts;
7536 /* First we convert all simple checks to branchy. */
7537 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
7538 sd_iterator_cond (&sd_it, &dep);)
7540 rtx_insn *check = DEP_PRO (dep);
7542 if (IS_SPECULATION_SIMPLE_CHECK_P (check))
7544 create_check_block_twin (check, true);
7546 /* Restart search. */
7547 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
7549 else
7550 /* Continue search. */
7551 sd_iterator_next (&sd_it);
7554 priorities_roots.create (0);
7555 clear_priorities (insn, &priorities_roots);
7557 while (1)
7559 rtx_insn *check, *twin;
7560 basic_block rec;
7562 /* Get the first backward dependency of INSN. */
7563 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
7564 if (!sd_iterator_cond (&sd_it, &dep))
7565 /* INSN has no backward dependencies left. */
7566 break;
7568 gcc_assert ((DEP_STATUS (dep) & BEGIN_SPEC) == 0
7569 && (DEP_STATUS (dep) & BE_IN_SPEC) != 0
7570 && (DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
7572 check = DEP_PRO (dep);
7574 gcc_assert (!IS_SPECULATION_CHECK_P (check) && !ORIG_PAT (check)
7575 && QUEUE_INDEX (check) == QUEUE_NOWHERE);
7577 rec = BLOCK_FOR_INSN (check);
7579 twin = emit_insn_before (copy_insn (PATTERN (insn)), BB_END (rec));
7580 haifa_init_insn (twin);
7582 sd_copy_back_deps (twin, insn, true);
7584 if (sched_verbose && spec_info->dump)
7585 /* INSN_BB (insn) isn't determined for twin insns yet.
7586 So we can't use current_sched_info->print_insn. */
7587 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
7588 INSN_UID (twin), rec->index);
7590 twins = alloc_INSN_LIST (twin, twins);
7592 /* Add dependences between TWIN and all appropriate
7593 instructions from REC. */
7594 FOR_EACH_DEP (insn, SD_LIST_SPEC_BACK, sd_it, dep)
7596 rtx_insn *pro = DEP_PRO (dep);
7598 gcc_assert (DEP_TYPE (dep) == REG_DEP_TRUE);
7600 /* INSN might have dependencies from the instructions from
7601 several recovery blocks. At this iteration we process those
7602 producers that reside in REC. */
7603 if (BLOCK_FOR_INSN (pro) == rec)
7605 dep_def _new_dep, *new_dep = &_new_dep;
7607 init_dep (new_dep, pro, twin, REG_DEP_TRUE);
7608 sd_add_dep (new_dep, false);
7612 process_insn_forw_deps_be_in_spec (insn, twin, ts);
7614 /* Remove all dependencies between INSN and insns in REC. */
7615 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
7616 sd_iterator_cond (&sd_it, &dep);)
7618 rtx_insn *pro = DEP_PRO (dep);
7620 if (BLOCK_FOR_INSN (pro) == rec)
7621 sd_delete_dep (sd_it);
7622 else
7623 sd_iterator_next (&sd_it);
7627 /* We couldn't have added the dependencies between INSN and TWINS earlier
7628 because that would make TWINS appear in the INSN_BACK_DEPS (INSN). */
7629 while (twins)
7631 rtx_insn *twin;
7632 rtx_insn_list *next_node;
7634 twin = twins->insn ();
7637 dep_def _new_dep, *new_dep = &_new_dep;
7639 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
7640 sd_add_dep (new_dep, false);
7643 next_node = twins->next ();
7644 free_INSN_LIST_node (twins);
7645 twins = next_node;
7648 calc_priorities (priorities_roots);
7649 priorities_roots.release ();
7652 /* Extends and fills with zeros (only the new part) array pointed to by P. */
7653 void *
7654 xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
7656 gcc_assert (new_nmemb >= old_nmemb);
7657 p = XRESIZEVAR (void, p, new_nmemb * size);
7658 memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
7659 return p;
7662 /* Helper function.
7663 Find fallthru edge from PRED. */
7664 edge
7665 find_fallthru_edge_from (basic_block pred)
7667 edge e;
7668 basic_block succ;
7670 succ = pred->next_bb;
7671 gcc_assert (succ->prev_bb == pred);
7673 if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
7675 e = find_fallthru_edge (pred->succs);
7677 if (e)
7679 gcc_assert (e->dest == succ);
7680 return e;
7683 else
7685 e = find_fallthru_edge (succ->preds);
7687 if (e)
7689 gcc_assert (e->src == pred);
7690 return e;
7694 return NULL;
7697 /* Extend per basic block data structures. */
7698 static void
7699 sched_extend_bb (void)
7701 /* The following is done to keep current_sched_info->next_tail non null. */
7702 rtx_insn *end = BB_END (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
7703 rtx_insn *insn = DEBUG_INSN_P (end) ? prev_nondebug_insn (end) : end;
7704 if (NEXT_INSN (end) == 0
7705 || (!NOTE_P (insn)
7706 && !LABEL_P (insn)
7707 /* Don't emit a NOTE if it would end up before a BARRIER. */
7708 && !BARRIER_P (NEXT_INSN (end))))
7710 rtx_note *note = emit_note_after (NOTE_INSN_DELETED, end);
7711 /* Make note appear outside BB. */
7712 set_block_for_insn (note, NULL);
7713 BB_END (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb) = end;
7717 /* Init per basic block data structures. */
7718 void
7719 sched_init_bbs (void)
7721 sched_extend_bb ();
7724 /* Initialize BEFORE_RECOVERY variable. */
7725 static void
7726 init_before_recovery (basic_block *before_recovery_ptr)
7728 basic_block last;
7729 edge e;
7731 last = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
7732 e = find_fallthru_edge_from (last);
7734 if (e)
7736 /* We create two basic blocks:
7737 1. Single instruction block is inserted right after E->SRC
7738 and has jump to
7739 2. Empty block right before EXIT_BLOCK.
7740 Between these two blocks recovery blocks will be emitted. */
7742 basic_block single, empty;
7743 rtx_insn *x;
7744 rtx label;
7746 /* If the fallthrough edge to exit we've found is from the block we've
7747 created before, don't do anything more. */
7748 if (last == after_recovery)
7749 return;
7751 adding_bb_to_current_region_p = false;
7753 single = sched_create_empty_bb (last);
7754 empty = sched_create_empty_bb (single);
7756 /* Add new blocks to the root loop. */
7757 if (current_loops != NULL)
7759 add_bb_to_loop (single, (*current_loops->larray)[0]);
7760 add_bb_to_loop (empty, (*current_loops->larray)[0]);
7763 single->count = last->count;
7764 empty->count = last->count;
7765 single->frequency = last->frequency;
7766 empty->frequency = last->frequency;
7767 BB_COPY_PARTITION (single, last);
7768 BB_COPY_PARTITION (empty, last);
7770 redirect_edge_succ (e, single);
7771 make_single_succ_edge (single, empty, 0);
7772 make_single_succ_edge (empty, EXIT_BLOCK_PTR_FOR_FN (cfun),
7773 EDGE_FALLTHRU);
7775 label = block_label (empty);
7776 x = emit_jump_insn_after (gen_jump (label), BB_END (single));
7777 JUMP_LABEL (x) = label;
7778 LABEL_NUSES (label)++;
7779 haifa_init_insn (x);
7781 emit_barrier_after (x);
7783 sched_init_only_bb (empty, NULL);
7784 sched_init_only_bb (single, NULL);
7785 sched_extend_bb ();
7787 adding_bb_to_current_region_p = true;
7788 before_recovery = single;
7789 after_recovery = empty;
7791 if (before_recovery_ptr)
7792 *before_recovery_ptr = before_recovery;
7794 if (sched_verbose >= 2 && spec_info->dump)
7795 fprintf (spec_info->dump,
7796 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
7797 last->index, single->index, empty->index);
7799 else
7800 before_recovery = last;
7803 /* Returns new recovery block. */
7804 basic_block
7805 sched_create_recovery_block (basic_block *before_recovery_ptr)
7807 rtx label;
7808 rtx_insn *barrier;
7809 basic_block rec;
7811 haifa_recovery_bb_recently_added_p = true;
7812 haifa_recovery_bb_ever_added_p = true;
7814 init_before_recovery (before_recovery_ptr);
7816 barrier = get_last_bb_insn (before_recovery);
7817 gcc_assert (BARRIER_P (barrier));
7819 label = emit_label_after (gen_label_rtx (), barrier);
7821 rec = create_basic_block (label, label, before_recovery);
7823 /* A recovery block always ends with an unconditional jump. */
7824 emit_barrier_after (BB_END (rec));
7826 if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
7827 BB_SET_PARTITION (rec, BB_COLD_PARTITION);
7829 if (sched_verbose && spec_info->dump)
7830 fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
7831 rec->index);
7833 return rec;
7836 /* Create edges: FIRST_BB -> REC; FIRST_BB -> SECOND_BB; REC -> SECOND_BB
7837 and emit necessary jumps. */
7838 void
7839 sched_create_recovery_edges (basic_block first_bb, basic_block rec,
7840 basic_block second_bb)
7842 rtx label;
7843 rtx jump;
7844 int edge_flags;
7846 /* This is fixing of incoming edge. */
7847 /* ??? Which other flags should be specified? */
7848 if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
7849 /* Partition type is the same, if it is "unpartitioned". */
7850 edge_flags = EDGE_CROSSING;
7851 else
7852 edge_flags = 0;
7854 make_edge (first_bb, rec, edge_flags);
7855 label = block_label (second_bb);
7856 jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
7857 JUMP_LABEL (jump) = label;
7858 LABEL_NUSES (label)++;
7860 if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
7861 /* Partition type is the same, if it is "unpartitioned". */
7863 /* Rewritten from cfgrtl.c. */
7864 if (flag_reorder_blocks_and_partition
7865 && targetm_common.have_named_sections)
7867 /* We don't need the same note for the check because
7868 any_condjump_p (check) == true. */
7869 CROSSING_JUMP_P (jump) = 1;
7871 edge_flags = EDGE_CROSSING;
7873 else
7874 edge_flags = 0;
7876 make_single_succ_edge (rec, second_bb, edge_flags);
7877 if (dom_info_available_p (CDI_DOMINATORS))
7878 set_immediate_dominator (CDI_DOMINATORS, rec, first_bb);
7881 /* This function creates recovery code for INSN. If MUTATE_P is nonzero,
7882 INSN is a simple check, that should be converted to branchy one. */
7883 static void
7884 create_check_block_twin (rtx_insn *insn, bool mutate_p)
7886 basic_block rec;
7887 rtx_insn *label, *check, *twin;
7888 rtx check_pat;
7889 ds_t fs;
7890 sd_iterator_def sd_it;
7891 dep_t dep;
7892 dep_def _new_dep, *new_dep = &_new_dep;
7893 ds_t todo_spec;
7895 gcc_assert (ORIG_PAT (insn) != NULL_RTX);
7897 if (!mutate_p)
7898 todo_spec = TODO_SPEC (insn);
7899 else
7901 gcc_assert (IS_SPECULATION_SIMPLE_CHECK_P (insn)
7902 && (TODO_SPEC (insn) & SPECULATIVE) == 0);
7904 todo_spec = CHECK_SPEC (insn);
7907 todo_spec &= SPECULATIVE;
7909 /* Create recovery block. */
7910 if (mutate_p || targetm.sched.needs_block_p (todo_spec))
7912 rec = sched_create_recovery_block (NULL);
7913 label = BB_HEAD (rec);
7915 else
7917 rec = EXIT_BLOCK_PTR_FOR_FN (cfun);
7918 label = NULL;
7921 /* Emit CHECK. */
7922 check_pat = targetm.sched.gen_spec_check (insn, label, todo_spec);
7924 if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
7926 /* To have mem_reg alive at the beginning of second_bb,
7927 we emit check BEFORE insn, so insn after splitting
7928 insn will be at the beginning of second_bb, which will
7929 provide us with the correct life information. */
7930 check = emit_jump_insn_before (check_pat, insn);
7931 JUMP_LABEL (check) = label;
7932 LABEL_NUSES (label)++;
7934 else
7935 check = emit_insn_before (check_pat, insn);
7937 /* Extend data structures. */
7938 haifa_init_insn (check);
7940 /* CHECK is being added to current region. Extend ready list. */
7941 gcc_assert (sched_ready_n_insns != -1);
7942 sched_extend_ready_list (sched_ready_n_insns + 1);
7944 if (current_sched_info->add_remove_insn)
7945 current_sched_info->add_remove_insn (insn, 0);
7947 RECOVERY_BLOCK (check) = rec;
7949 if (sched_verbose && spec_info->dump)
7950 fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
7951 (*current_sched_info->print_insn) (check, 0));
7953 gcc_assert (ORIG_PAT (insn));
7955 /* Initialize TWIN (twin is a duplicate of original instruction
7956 in the recovery block). */
7957 if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
7959 sd_iterator_def sd_it;
7960 dep_t dep;
7962 FOR_EACH_DEP (insn, SD_LIST_RES_BACK, sd_it, dep)
7963 if ((DEP_STATUS (dep) & DEP_OUTPUT) != 0)
7965 struct _dep _dep2, *dep2 = &_dep2;
7967 init_dep (dep2, DEP_PRO (dep), check, REG_DEP_TRUE);
7969 sd_add_dep (dep2, true);
7972 twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
7973 haifa_init_insn (twin);
7975 if (sched_verbose && spec_info->dump)
7976 /* INSN_BB (insn) isn't determined for twin insns yet.
7977 So we can't use current_sched_info->print_insn. */
7978 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
7979 INSN_UID (twin), rec->index);
7981 else
7983 ORIG_PAT (check) = ORIG_PAT (insn);
7984 HAS_INTERNAL_DEP (check) = 1;
7985 twin = check;
7986 /* ??? We probably should change all OUTPUT dependencies to
7987 (TRUE | OUTPUT). */
7990 /* Copy all resolved back dependencies of INSN to TWIN. This will
7991 provide correct value for INSN_TICK (TWIN). */
7992 sd_copy_back_deps (twin, insn, true);
7994 if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
7995 /* In case of branchy check, fix CFG. */
7997 basic_block first_bb, second_bb;
7998 rtx_insn *jump;
8000 first_bb = BLOCK_FOR_INSN (check);
8001 second_bb = sched_split_block (first_bb, check);
8003 sched_create_recovery_edges (first_bb, rec, second_bb);
8005 sched_init_only_bb (second_bb, first_bb);
8006 sched_init_only_bb (rec, EXIT_BLOCK_PTR_FOR_FN (cfun));
8008 jump = BB_END (rec);
8009 haifa_init_insn (jump);
8012 /* Move backward dependences from INSN to CHECK and
8013 move forward dependences from INSN to TWIN. */
8015 /* First, create dependencies between INSN's producers and CHECK & TWIN. */
8016 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
8018 rtx_insn *pro = DEP_PRO (dep);
8019 ds_t ds;
8021 /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
8022 check --TRUE--> producer ??? or ANTI ???
8023 twin --TRUE--> producer
8024 twin --ANTI--> check
8026 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
8027 check --ANTI--> producer
8028 twin --ANTI--> producer
8029 twin --ANTI--> check
8031 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
8032 check ~~TRUE~~> producer
8033 twin ~~TRUE~~> producer
8034 twin --ANTI--> check */
8036 ds = DEP_STATUS (dep);
8038 if (ds & BEGIN_SPEC)
8040 gcc_assert (!mutate_p);
8041 ds &= ~BEGIN_SPEC;
8044 init_dep_1 (new_dep, pro, check, DEP_TYPE (dep), ds);
8045 sd_add_dep (new_dep, false);
8047 if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
8049 DEP_CON (new_dep) = twin;
8050 sd_add_dep (new_dep, false);
8054 /* Second, remove backward dependencies of INSN. */
8055 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
8056 sd_iterator_cond (&sd_it, &dep);)
8058 if ((DEP_STATUS (dep) & BEGIN_SPEC)
8059 || mutate_p)
8060 /* We can delete this dep because we overcome it with
8061 BEGIN_SPECULATION. */
8062 sd_delete_dep (sd_it);
8063 else
8064 sd_iterator_next (&sd_it);
8067 /* Future Speculations. Determine what BE_IN speculations will be like. */
8068 fs = 0;
8070 /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
8071 here. */
8073 gcc_assert (!DONE_SPEC (insn));
8075 if (!mutate_p)
8077 ds_t ts = TODO_SPEC (insn);
8079 DONE_SPEC (insn) = ts & BEGIN_SPEC;
8080 CHECK_SPEC (check) = ts & BEGIN_SPEC;
8082 /* Luckiness of future speculations solely depends upon initial
8083 BEGIN speculation. */
8084 if (ts & BEGIN_DATA)
8085 fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
8086 if (ts & BEGIN_CONTROL)
8087 fs = set_dep_weak (fs, BE_IN_CONTROL,
8088 get_dep_weak (ts, BEGIN_CONTROL));
8090 else
8091 CHECK_SPEC (check) = CHECK_SPEC (insn);
8093 /* Future speculations: call the helper. */
8094 process_insn_forw_deps_be_in_spec (insn, twin, fs);
8096 if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
8098 /* Which types of dependencies should we use here is,
8099 generally, machine-dependent question... But, for now,
8100 it is not. */
8102 if (!mutate_p)
8104 init_dep (new_dep, insn, check, REG_DEP_TRUE);
8105 sd_add_dep (new_dep, false);
8107 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
8108 sd_add_dep (new_dep, false);
8110 else
8112 if (spec_info->dump)
8113 fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
8114 (*current_sched_info->print_insn) (insn, 0));
8116 /* Remove all dependencies of the INSN. */
8118 sd_it = sd_iterator_start (insn, (SD_LIST_FORW
8119 | SD_LIST_BACK
8120 | SD_LIST_RES_BACK));
8121 while (sd_iterator_cond (&sd_it, &dep))
8122 sd_delete_dep (sd_it);
8125 /* If former check (INSN) already was moved to the ready (or queue)
8126 list, add new check (CHECK) there too. */
8127 if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
8128 try_ready (check);
8130 /* Remove old check from instruction stream and free its
8131 data. */
8132 sched_remove_insn (insn);
8135 init_dep (new_dep, check, twin, REG_DEP_ANTI);
8136 sd_add_dep (new_dep, false);
8138 else
8140 init_dep_1 (new_dep, insn, check, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
8141 sd_add_dep (new_dep, false);
8144 if (!mutate_p)
8145 /* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
8146 because it'll be done later in add_to_speculative_block. */
8148 rtx_vec_t priorities_roots = rtx_vec_t ();
8150 clear_priorities (twin, &priorities_roots);
8151 calc_priorities (priorities_roots);
8152 priorities_roots.release ();
8156 /* Removes dependency between instructions in the recovery block REC
8157 and usual region instructions. It keeps inner dependences so it
8158 won't be necessary to recompute them. */
8159 static void
8160 fix_recovery_deps (basic_block rec)
8162 rtx_insn *note, *insn, *jump;
8163 rtx_insn_list *ready_list = 0;
8164 bitmap_head in_ready;
8165 rtx_insn_list *link;
8167 bitmap_initialize (&in_ready, 0);
8169 /* NOTE - a basic block note. */
8170 note = NEXT_INSN (BB_HEAD (rec));
8171 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
8172 insn = BB_END (rec);
8173 gcc_assert (JUMP_P (insn));
8174 insn = PREV_INSN (insn);
8178 sd_iterator_def sd_it;
8179 dep_t dep;
8181 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
8182 sd_iterator_cond (&sd_it, &dep);)
8184 rtx_insn *consumer = DEP_CON (dep);
8186 if (BLOCK_FOR_INSN (consumer) != rec)
8188 sd_delete_dep (sd_it);
8190 if (bitmap_set_bit (&in_ready, INSN_LUID (consumer)))
8191 ready_list = alloc_INSN_LIST (consumer, ready_list);
8193 else
8195 gcc_assert ((DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
8197 sd_iterator_next (&sd_it);
8201 insn = PREV_INSN (insn);
8203 while (insn != note);
8205 bitmap_clear (&in_ready);
8207 /* Try to add instructions to the ready or queue list. */
8208 for (link = ready_list; link; link = link->next ())
8209 try_ready (link->insn ());
8210 free_INSN_LIST_list (&ready_list);
8212 /* Fixing jump's dependences. */
8213 insn = BB_HEAD (rec);
8214 jump = BB_END (rec);
8216 gcc_assert (LABEL_P (insn));
8217 insn = NEXT_INSN (insn);
8219 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
8220 add_jump_dependencies (insn, jump);
8223 /* Change pattern of INSN to NEW_PAT. Invalidate cached haifa
8224 instruction data. */
8225 static bool
8226 haifa_change_pattern (rtx_insn *insn, rtx new_pat)
8228 int t;
8230 t = validate_change (insn, &PATTERN (insn), new_pat, 0);
8231 if (!t)
8232 return false;
8234 update_insn_after_change (insn);
8235 return true;
8238 /* -1 - can't speculate,
8239 0 - for speculation with REQUEST mode it is OK to use
8240 current instruction pattern,
8241 1 - need to change pattern for *NEW_PAT to be speculative. */
8243 sched_speculate_insn (rtx_insn *insn, ds_t request, rtx *new_pat)
8245 gcc_assert (current_sched_info->flags & DO_SPECULATION
8246 && (request & SPECULATIVE)
8247 && sched_insn_is_legitimate_for_speculation_p (insn, request));
8249 if ((request & spec_info->mask) != request)
8250 return -1;
8252 if (request & BE_IN_SPEC
8253 && !(request & BEGIN_SPEC))
8254 return 0;
8256 return targetm.sched.speculate_insn (insn, request, new_pat);
8259 static int
8260 haifa_speculate_insn (rtx_insn *insn, ds_t request, rtx *new_pat)
8262 gcc_assert (sched_deps_info->generate_spec_deps
8263 && !IS_SPECULATION_CHECK_P (insn));
8265 if (HAS_INTERNAL_DEP (insn)
8266 || SCHED_GROUP_P (insn))
8267 return -1;
8269 return sched_speculate_insn (insn, request, new_pat);
8272 /* Print some information about block BB, which starts with HEAD and
8273 ends with TAIL, before scheduling it.
8274 I is zero, if scheduler is about to start with the fresh ebb. */
8275 static void
8276 dump_new_block_header (int i, basic_block bb, rtx_insn *head, rtx_insn *tail)
8278 if (!i)
8279 fprintf (sched_dump,
8280 ";; ======================================================\n");
8281 else
8282 fprintf (sched_dump,
8283 ";; =====================ADVANCING TO=====================\n");
8284 fprintf (sched_dump,
8285 ";; -- basic block %d from %d to %d -- %s reload\n",
8286 bb->index, INSN_UID (head), INSN_UID (tail),
8287 (reload_completed ? "after" : "before"));
8288 fprintf (sched_dump,
8289 ";; ======================================================\n");
8290 fprintf (sched_dump, "\n");
8293 /* Unlink basic block notes and labels and saves them, so they
8294 can be easily restored. We unlink basic block notes in EBB to
8295 provide back-compatibility with the previous code, as target backends
8296 assume, that there'll be only instructions between
8297 current_sched_info->{head and tail}. We restore these notes as soon
8298 as we can.
8299 FIRST (LAST) is the first (last) basic block in the ebb.
8300 NB: In usual case (FIRST == LAST) nothing is really done. */
8301 void
8302 unlink_bb_notes (basic_block first, basic_block last)
8304 /* We DON'T unlink basic block notes of the first block in the ebb. */
8305 if (first == last)
8306 return;
8308 bb_header = XNEWVEC (rtx_insn *, last_basic_block_for_fn (cfun));
8310 /* Make a sentinel. */
8311 if (last->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
8312 bb_header[last->next_bb->index] = 0;
8314 first = first->next_bb;
8317 rtx_insn *prev, *label, *note, *next;
8319 label = BB_HEAD (last);
8320 if (LABEL_P (label))
8321 note = NEXT_INSN (label);
8322 else
8323 note = label;
8324 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
8326 prev = PREV_INSN (label);
8327 next = NEXT_INSN (note);
8328 gcc_assert (prev && next);
8330 SET_NEXT_INSN (prev) = next;
8331 SET_PREV_INSN (next) = prev;
8333 bb_header[last->index] = label;
8335 if (last == first)
8336 break;
8338 last = last->prev_bb;
8340 while (1);
8343 /* Restore basic block notes.
8344 FIRST is the first basic block in the ebb. */
8345 static void
8346 restore_bb_notes (basic_block first)
8348 if (!bb_header)
8349 return;
8351 /* We DON'T unlink basic block notes of the first block in the ebb. */
8352 first = first->next_bb;
8353 /* Remember: FIRST is actually a second basic block in the ebb. */
8355 while (first != EXIT_BLOCK_PTR_FOR_FN (cfun)
8356 && bb_header[first->index])
8358 rtx_insn *prev, *label, *note, *next;
8360 label = bb_header[first->index];
8361 prev = PREV_INSN (label);
8362 next = NEXT_INSN (prev);
8364 if (LABEL_P (label))
8365 note = NEXT_INSN (label);
8366 else
8367 note = label;
8368 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
8370 bb_header[first->index] = 0;
8372 SET_NEXT_INSN (prev) = label;
8373 SET_NEXT_INSN (note) = next;
8374 SET_PREV_INSN (next) = note;
8376 first = first->next_bb;
8379 free (bb_header);
8380 bb_header = 0;
8383 /* Helper function.
8384 Fix CFG after both in- and inter-block movement of
8385 control_flow_insn_p JUMP. */
8386 static void
8387 fix_jump_move (rtx_insn *jump)
8389 basic_block bb, jump_bb, jump_bb_next;
8391 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
8392 jump_bb = BLOCK_FOR_INSN (jump);
8393 jump_bb_next = jump_bb->next_bb;
8395 gcc_assert (common_sched_info->sched_pass_id == SCHED_EBB_PASS
8396 || IS_SPECULATION_BRANCHY_CHECK_P (jump));
8398 if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
8399 /* if jump_bb_next is not empty. */
8400 BB_END (jump_bb) = BB_END (jump_bb_next);
8402 if (BB_END (bb) != PREV_INSN (jump))
8403 /* Then there are instruction after jump that should be placed
8404 to jump_bb_next. */
8405 BB_END (jump_bb_next) = BB_END (bb);
8406 else
8407 /* Otherwise jump_bb_next is empty. */
8408 BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
8410 /* To make assertion in move_insn happy. */
8411 BB_END (bb) = PREV_INSN (jump);
8413 update_bb_for_insn (jump_bb_next);
8416 /* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
8417 static void
8418 move_block_after_check (rtx_insn *jump)
8420 basic_block bb, jump_bb, jump_bb_next;
8421 vec<edge, va_gc> *t;
8423 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
8424 jump_bb = BLOCK_FOR_INSN (jump);
8425 jump_bb_next = jump_bb->next_bb;
8427 update_bb_for_insn (jump_bb);
8429 gcc_assert (IS_SPECULATION_CHECK_P (jump)
8430 || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next)));
8432 unlink_block (jump_bb_next);
8433 link_block (jump_bb_next, bb);
8435 t = bb->succs;
8436 bb->succs = 0;
8437 move_succs (&(jump_bb->succs), bb);
8438 move_succs (&(jump_bb_next->succs), jump_bb);
8439 move_succs (&t, jump_bb_next);
8441 df_mark_solutions_dirty ();
8443 common_sched_info->fix_recovery_cfg
8444 (bb->index, jump_bb->index, jump_bb_next->index);
8447 /* Helper function for move_block_after_check.
8448 This functions attaches edge vector pointed to by SUCCSP to
8449 block TO. */
8450 static void
8451 move_succs (vec<edge, va_gc> **succsp, basic_block to)
8453 edge e;
8454 edge_iterator ei;
8456 gcc_assert (to->succs == 0);
8458 to->succs = *succsp;
8460 FOR_EACH_EDGE (e, ei, to->succs)
8461 e->src = to;
8463 *succsp = 0;
8466 /* Remove INSN from the instruction stream.
8467 INSN should have any dependencies. */
8468 static void
8469 sched_remove_insn (rtx_insn *insn)
8471 sd_finish_insn (insn);
8473 change_queue_index (insn, QUEUE_NOWHERE);
8474 current_sched_info->add_remove_insn (insn, 1);
8475 delete_insn (insn);
8478 /* Clear priorities of all instructions, that are forward dependent on INSN.
8479 Store in vector pointed to by ROOTS_PTR insns on which priority () should
8480 be invoked to initialize all cleared priorities. */
8481 static void
8482 clear_priorities (rtx_insn *insn, rtx_vec_t *roots_ptr)
8484 sd_iterator_def sd_it;
8485 dep_t dep;
8486 bool insn_is_root_p = true;
8488 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
8490 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
8492 rtx_insn *pro = DEP_PRO (dep);
8494 if (INSN_PRIORITY_STATUS (pro) >= 0
8495 && QUEUE_INDEX (insn) != QUEUE_SCHEDULED)
8497 /* If DEP doesn't contribute to priority then INSN itself should
8498 be added to priority roots. */
8499 if (contributes_to_priority_p (dep))
8500 insn_is_root_p = false;
8502 INSN_PRIORITY_STATUS (pro) = -1;
8503 clear_priorities (pro, roots_ptr);
8507 if (insn_is_root_p)
8508 roots_ptr->safe_push (insn);
8511 /* Recompute priorities of instructions, whose priorities might have been
8512 changed. ROOTS is a vector of instructions whose priority computation will
8513 trigger initialization of all cleared priorities. */
8514 static void
8515 calc_priorities (rtx_vec_t roots)
8517 int i;
8518 rtx_insn *insn;
8520 FOR_EACH_VEC_ELT (roots, i, insn)
8521 priority (insn);
8525 /* Add dependences between JUMP and other instructions in the recovery
8526 block. INSN is the first insn the recovery block. */
8527 static void
8528 add_jump_dependencies (rtx_insn *insn, rtx_insn *jump)
8532 insn = NEXT_INSN (insn);
8533 if (insn == jump)
8534 break;
8536 if (dep_list_size (insn, SD_LIST_FORW) == 0)
8538 dep_def _new_dep, *new_dep = &_new_dep;
8540 init_dep (new_dep, insn, jump, REG_DEP_ANTI);
8541 sd_add_dep (new_dep, false);
8544 while (1);
8546 gcc_assert (!sd_lists_empty_p (jump, SD_LIST_BACK));
8549 /* Extend data structures for logical insn UID. */
8550 void
8551 sched_extend_luids (void)
8553 int new_luids_max_uid = get_max_uid () + 1;
8555 sched_luids.safe_grow_cleared (new_luids_max_uid);
8558 /* Initialize LUID for INSN. */
8559 void
8560 sched_init_insn_luid (rtx_insn *insn)
8562 int i = INSN_P (insn) ? 1 : common_sched_info->luid_for_non_insn (insn);
8563 int luid;
8565 if (i >= 0)
8567 luid = sched_max_luid;
8568 sched_max_luid += i;
8570 else
8571 luid = -1;
8573 SET_INSN_LUID (insn, luid);
8576 /* Initialize luids for BBS.
8577 The hook common_sched_info->luid_for_non_insn () is used to determine
8578 if notes, labels, etc. need luids. */
8579 void
8580 sched_init_luids (bb_vec_t bbs)
8582 int i;
8583 basic_block bb;
8585 sched_extend_luids ();
8586 FOR_EACH_VEC_ELT (bbs, i, bb)
8588 rtx_insn *insn;
8590 FOR_BB_INSNS (bb, insn)
8591 sched_init_insn_luid (insn);
8595 /* Free LUIDs. */
8596 void
8597 sched_finish_luids (void)
8599 sched_luids.release ();
8600 sched_max_luid = 1;
8603 /* Return logical uid of INSN. Helpful while debugging. */
8605 insn_luid (rtx_insn *insn)
8607 return INSN_LUID (insn);
8610 /* Extend per insn data in the target. */
8611 void
8612 sched_extend_target (void)
8614 if (targetm.sched.h_i_d_extended)
8615 targetm.sched.h_i_d_extended ();
8618 /* Extend global scheduler structures (those, that live across calls to
8619 schedule_block) to include information about just emitted INSN. */
8620 static void
8621 extend_h_i_d (void)
8623 int reserve = (get_max_uid () + 1 - h_i_d.length ());
8624 if (reserve > 0
8625 && ! h_i_d.space (reserve))
8627 h_i_d.safe_grow_cleared (3 * get_max_uid () / 2);
8628 sched_extend_target ();
8632 /* Initialize h_i_d entry of the INSN with default values.
8633 Values, that are not explicitly initialized here, hold zero. */
8634 static void
8635 init_h_i_d (rtx_insn *insn)
8637 if (INSN_LUID (insn) > 0)
8639 INSN_COST (insn) = -1;
8640 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
8641 INSN_TICK (insn) = INVALID_TICK;
8642 INSN_EXACT_TICK (insn) = INVALID_TICK;
8643 INTER_TICK (insn) = INVALID_TICK;
8644 TODO_SPEC (insn) = HARD_DEP;
8648 /* Initialize haifa_insn_data for BBS. */
8649 void
8650 haifa_init_h_i_d (bb_vec_t bbs)
8652 int i;
8653 basic_block bb;
8655 extend_h_i_d ();
8656 FOR_EACH_VEC_ELT (bbs, i, bb)
8658 rtx_insn *insn;
8660 FOR_BB_INSNS (bb, insn)
8661 init_h_i_d (insn);
8665 /* Finalize haifa_insn_data. */
8666 void
8667 haifa_finish_h_i_d (void)
8669 int i;
8670 haifa_insn_data_t data;
8671 struct reg_use_data *use, *next;
8673 FOR_EACH_VEC_ELT (h_i_d, i, data)
8675 free (data->max_reg_pressure);
8676 free (data->reg_pressure);
8677 for (use = data->reg_use_list; use != NULL; use = next)
8679 next = use->next_insn_use;
8680 free (use);
8683 h_i_d.release ();
8686 /* Init data for the new insn INSN. */
8687 static void
8688 haifa_init_insn (rtx_insn *insn)
8690 gcc_assert (insn != NULL);
8692 sched_extend_luids ();
8693 sched_init_insn_luid (insn);
8694 sched_extend_target ();
8695 sched_deps_init (false);
8696 extend_h_i_d ();
8697 init_h_i_d (insn);
8699 if (adding_bb_to_current_region_p)
8701 sd_init_insn (insn);
8703 /* Extend dependency caches by one element. */
8704 extend_dependency_caches (1, false);
8706 if (sched_pressure != SCHED_PRESSURE_NONE)
8707 init_insn_reg_pressure_info (insn);
8710 /* Init data for the new basic block BB which comes after AFTER. */
8711 static void
8712 haifa_init_only_bb (basic_block bb, basic_block after)
8714 gcc_assert (bb != NULL);
8716 sched_init_bbs ();
8718 if (common_sched_info->add_block)
8719 /* This changes only data structures of the front-end. */
8720 common_sched_info->add_block (bb, after);
8723 /* A generic version of sched_split_block (). */
8724 basic_block
8725 sched_split_block_1 (basic_block first_bb, rtx after)
8727 edge e;
8729 e = split_block (first_bb, after);
8730 gcc_assert (e->src == first_bb);
8732 /* sched_split_block emits note if *check == BB_END. Probably it
8733 is better to rip that note off. */
8735 return e->dest;
8738 /* A generic version of sched_create_empty_bb (). */
8739 basic_block
8740 sched_create_empty_bb_1 (basic_block after)
8742 return create_empty_bb (after);
8745 /* Insert PAT as an INSN into the schedule and update the necessary data
8746 structures to account for it. */
8747 rtx_insn *
8748 sched_emit_insn (rtx pat)
8750 rtx_insn *insn = emit_insn_before (pat, first_nonscheduled_insn ());
8751 haifa_init_insn (insn);
8753 if (current_sched_info->add_remove_insn)
8754 current_sched_info->add_remove_insn (insn, 0);
8756 (*current_sched_info->begin_schedule_ready) (insn);
8757 scheduled_insns.safe_push (insn);
8759 last_scheduled_insn = insn;
8760 return insn;
8763 /* This function returns a candidate satisfying dispatch constraints from
8764 the ready list. */
8766 static rtx_insn *
8767 ready_remove_first_dispatch (struct ready_list *ready)
8769 int i;
8770 rtx_insn *insn = ready_element (ready, 0);
8772 if (ready->n_ready == 1
8773 || !INSN_P (insn)
8774 || INSN_CODE (insn) < 0
8775 || !active_insn_p (insn)
8776 || targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
8777 return ready_remove_first (ready);
8779 for (i = 1; i < ready->n_ready; i++)
8781 insn = ready_element (ready, i);
8783 if (!INSN_P (insn)
8784 || INSN_CODE (insn) < 0
8785 || !active_insn_p (insn))
8786 continue;
8788 if (targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
8790 /* Return ith element of ready. */
8791 insn = ready_remove (ready, i);
8792 return insn;
8796 if (targetm.sched.dispatch (NULL, DISPATCH_VIOLATION))
8797 return ready_remove_first (ready);
8799 for (i = 1; i < ready->n_ready; i++)
8801 insn = ready_element (ready, i);
8803 if (!INSN_P (insn)
8804 || INSN_CODE (insn) < 0
8805 || !active_insn_p (insn))
8806 continue;
8808 /* Return i-th element of ready. */
8809 if (targetm.sched.dispatch (insn, IS_CMP))
8810 return ready_remove (ready, i);
8813 return ready_remove_first (ready);
8816 /* Get number of ready insn in the ready list. */
8819 number_in_ready (void)
8821 return ready.n_ready;
8824 /* Get number of ready's in the ready list. */
8826 rtx_insn *
8827 get_ready_element (int i)
8829 return ready_element (&ready, i);
8832 #endif /* INSN_SCHEDULING */