PR c++/56838
[official-gcc.git] / gcc / haifa-sched.c
blobc4591bfe35b9d855309bda5c9a8780d5dd7b51a9
1 /* Instruction scheduling pass.
2 Copyright (C) 1992-2013 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
4 and currently maintained by, Jim Wilson (wilson@cygnus.com)
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* Instruction scheduling pass. This file, along with sched-deps.c,
23 contains the generic parts. The actual entry point is found for
24 the normal instruction scheduling pass is found in sched-rgn.c.
26 We compute insn priorities based on data dependencies. Flow
27 analysis only creates a fraction of the data-dependencies we must
28 observe: namely, only those dependencies which the combiner can be
29 expected to use. For this pass, we must therefore create the
30 remaining dependencies we need to observe: register dependencies,
31 memory dependencies, dependencies to keep function calls in order,
32 and the dependence between a conditional branch and the setting of
33 condition codes are all dealt with here.
35 The scheduler first traverses the data flow graph, starting with
36 the last instruction, and proceeding to the first, assigning values
37 to insn_priority as it goes. This sorts the instructions
38 topologically by data dependence.
40 Once priorities have been established, we order the insns using
41 list scheduling. This works as follows: starting with a list of
42 all the ready insns, and sorted according to priority number, we
43 schedule the insn from the end of the list by placing its
44 predecessors in the list according to their priority order. We
45 consider this insn scheduled by setting the pointer to the "end" of
46 the list to point to the previous insn. When an insn has no
47 predecessors, we either queue it until sufficient time has elapsed
48 or add it to the ready list. As the instructions are scheduled or
49 when stalls are introduced, the queue advances and dumps insns into
50 the ready list. When all insns down to the lowest priority have
51 been scheduled, the critical path of the basic block has been made
52 as short as possible. The remaining insns are then scheduled in
53 remaining slots.
55 The following list shows the order in which we want to break ties
56 among insns in the ready list:
58 1. choose insn with the longest path to end of bb, ties
59 broken by
60 2. choose insn with least contribution to register pressure,
61 ties broken by
62 3. prefer in-block upon interblock motion, ties broken by
63 4. prefer useful upon speculative motion, ties broken by
64 5. choose insn with largest control flow probability, ties
65 broken by
66 6. choose insn with the least dependences upon the previously
67 scheduled insn, or finally
68 7 choose the insn which has the most insns dependent on it.
69 8. choose insn with lowest UID.
71 Memory references complicate matters. Only if we can be certain
72 that memory references are not part of the data dependency graph
73 (via true, anti, or output dependence), can we move operations past
74 memory references. To first approximation, reads can be done
75 independently, while writes introduce dependencies. Better
76 approximations will yield fewer dependencies.
78 Before reload, an extended analysis of interblock data dependences
79 is required for interblock scheduling. This is performed in
80 compute_block_backward_dependences ().
82 Dependencies set up by memory references are treated in exactly the
83 same way as other dependencies, by using insn backward dependences
84 INSN_BACK_DEPS. INSN_BACK_DEPS are translated into forward dependences
85 INSN_FORW_DEPS the purpose of forward list scheduling.
87 Having optimized the critical path, we may have also unduly
88 extended the lifetimes of some registers. If an operation requires
89 that constants be loaded into registers, it is certainly desirable
90 to load those constants as early as necessary, but no earlier.
91 I.e., it will not do to load up a bunch of registers at the
92 beginning of a basic block only to use them at the end, if they
93 could be loaded later, since this may result in excessive register
94 utilization.
96 Note that since branches are never in basic blocks, but only end
97 basic blocks, this pass will not move branches. But that is ok,
98 since we can use GNU's delayed branch scheduling pass to take care
99 of this case.
101 Also note that no further optimizations based on algebraic
102 identities are performed, so this pass would be a good one to
103 perform instruction splitting, such as breaking up a multiply
104 instruction into shifts and adds where that is profitable.
106 Given the memory aliasing analysis that this pass should perform,
107 it should be possible to remove redundant stores to memory, and to
108 load values from registers instead of hitting memory.
110 Before reload, speculative insns are moved only if a 'proof' exists
111 that no exception will be caused by this, and if no live registers
112 exist that inhibit the motion (live registers constraints are not
113 represented by data dependence edges).
115 This pass must update information that subsequent passes expect to
116 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
117 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
119 The information in the line number notes is carefully retained by
120 this pass. Notes that refer to the starting and ending of
121 exception regions are also carefully retained by this pass. All
122 other NOTE insns are grouped in their same relative order at the
123 beginning of basic blocks and regions that have been scheduled. */
125 #include "config.h"
126 #include "system.h"
127 #include "coretypes.h"
128 #include "tm.h"
129 #include "diagnostic-core.h"
130 #include "hard-reg-set.h"
131 #include "rtl.h"
132 #include "tm_p.h"
133 #include "regs.h"
134 #include "function.h"
135 #include "flags.h"
136 #include "insn-config.h"
137 #include "insn-attr.h"
138 #include "except.h"
139 #include "recog.h"
140 #include "sched-int.h"
141 #include "target.h"
142 #include "common/common-target.h"
143 #include "params.h"
144 #include "dbgcnt.h"
145 #include "cfgloop.h"
146 #include "ira.h"
147 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
148 #include "hashtab.h"
149 #include "dumpfile.h"
151 #ifdef INSN_SCHEDULING
153 /* issue_rate is the number of insns that can be scheduled in the same
154 machine cycle. It can be defined in the config/mach/mach.h file,
155 otherwise we set it to 1. */
157 int issue_rate;
159 /* This can be set to true by a backend if the scheduler should not
160 enable a DCE pass. */
161 bool sched_no_dce;
163 /* The current initiation interval used when modulo scheduling. */
164 static int modulo_ii;
166 /* The maximum number of stages we are prepared to handle. */
167 static int modulo_max_stages;
169 /* The number of insns that exist in each iteration of the loop. We use this
170 to detect when we've scheduled all insns from the first iteration. */
171 static int modulo_n_insns;
173 /* The current count of insns in the first iteration of the loop that have
174 already been scheduled. */
175 static int modulo_insns_scheduled;
177 /* The maximum uid of insns from the first iteration of the loop. */
178 static int modulo_iter0_max_uid;
180 /* The number of times we should attempt to backtrack when modulo scheduling.
181 Decreased each time we have to backtrack. */
182 static int modulo_backtracks_left;
184 /* The stage in which the last insn from the original loop was
185 scheduled. */
186 static int modulo_last_stage;
188 /* sched-verbose controls the amount of debugging output the
189 scheduler prints. It is controlled by -fsched-verbose=N:
190 N>0 and no -DSR : the output is directed to stderr.
191 N>=10 will direct the printouts to stderr (regardless of -dSR).
192 N=1: same as -dSR.
193 N=2: bb's probabilities, detailed ready list info, unit/insn info.
194 N=3: rtl at abort point, control-flow, regions info.
195 N=5: dependences info. */
197 int sched_verbose = 0;
199 /* Debugging file. All printouts are sent to dump, which is always set,
200 either to stderr, or to the dump listing file (-dRS). */
201 FILE *sched_dump = 0;
203 /* This is a placeholder for the scheduler parameters common
204 to all schedulers. */
205 struct common_sched_info_def *common_sched_info;
207 #define INSN_TICK(INSN) (HID (INSN)->tick)
208 #define INSN_EXACT_TICK(INSN) (HID (INSN)->exact_tick)
209 #define INSN_TICK_ESTIMATE(INSN) (HID (INSN)->tick_estimate)
210 #define INTER_TICK(INSN) (HID (INSN)->inter_tick)
211 #define FEEDS_BACKTRACK_INSN(INSN) (HID (INSN)->feeds_backtrack_insn)
212 #define SHADOW_P(INSN) (HID (INSN)->shadow_p)
213 #define MUST_RECOMPUTE_SPEC_P(INSN) (HID (INSN)->must_recompute_spec)
214 /* Cached cost of the instruction. Use insn_cost to get cost of the
215 insn. -1 here means that the field is not initialized. */
216 #define INSN_COST(INSN) (HID (INSN)->cost)
218 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
219 then it should be recalculated from scratch. */
220 #define INVALID_TICK (-(max_insn_queue_index + 1))
221 /* The minimal value of the INSN_TICK of an instruction. */
222 #define MIN_TICK (-max_insn_queue_index)
224 /* List of important notes we must keep around. This is a pointer to the
225 last element in the list. */
226 rtx note_list;
228 static struct spec_info_def spec_info_var;
229 /* Description of the speculative part of the scheduling.
230 If NULL - no speculation. */
231 spec_info_t spec_info = NULL;
233 /* True, if recovery block was added during scheduling of current block.
234 Used to determine, if we need to fix INSN_TICKs. */
235 static bool haifa_recovery_bb_recently_added_p;
237 /* True, if recovery block was added during this scheduling pass.
238 Used to determine if we should have empty memory pools of dependencies
239 after finishing current region. */
240 bool haifa_recovery_bb_ever_added_p;
242 /* Counters of different types of speculative instructions. */
243 static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
245 /* Array used in {unlink, restore}_bb_notes. */
246 static rtx *bb_header = 0;
248 /* Basic block after which recovery blocks will be created. */
249 static basic_block before_recovery;
251 /* Basic block just before the EXIT_BLOCK and after recovery, if we have
252 created it. */
253 basic_block after_recovery;
255 /* FALSE if we add bb to another region, so we don't need to initialize it. */
256 bool adding_bb_to_current_region_p = true;
258 /* Queues, etc. */
260 /* An instruction is ready to be scheduled when all insns preceding it
261 have already been scheduled. It is important to ensure that all
262 insns which use its result will not be executed until its result
263 has been computed. An insn is maintained in one of four structures:
265 (P) the "Pending" set of insns which cannot be scheduled until
266 their dependencies have been satisfied.
267 (Q) the "Queued" set of insns that can be scheduled when sufficient
268 time has passed.
269 (R) the "Ready" list of unscheduled, uncommitted insns.
270 (S) the "Scheduled" list of insns.
272 Initially, all insns are either "Pending" or "Ready" depending on
273 whether their dependencies are satisfied.
275 Insns move from the "Ready" list to the "Scheduled" list as they
276 are committed to the schedule. As this occurs, the insns in the
277 "Pending" list have their dependencies satisfied and move to either
278 the "Ready" list or the "Queued" set depending on whether
279 sufficient time has passed to make them ready. As time passes,
280 insns move from the "Queued" set to the "Ready" list.
282 The "Pending" list (P) are the insns in the INSN_FORW_DEPS of the
283 unscheduled insns, i.e., those that are ready, queued, and pending.
284 The "Queued" set (Q) is implemented by the variable `insn_queue'.
285 The "Ready" list (R) is implemented by the variables `ready' and
286 `n_ready'.
287 The "Scheduled" list (S) is the new insn chain built by this pass.
289 The transition (R->S) is implemented in the scheduling loop in
290 `schedule_block' when the best insn to schedule is chosen.
291 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
292 insns move from the ready list to the scheduled list.
293 The transition (Q->R) is implemented in 'queue_to_insn' as time
294 passes or stalls are introduced. */
296 /* Implement a circular buffer to delay instructions until sufficient
297 time has passed. For the new pipeline description interface,
298 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
299 than maximal time of instruction execution computed by genattr.c on
300 the base maximal time of functional unit reservations and getting a
301 result. This is the longest time an insn may be queued. */
303 static rtx *insn_queue;
304 static int q_ptr = 0;
305 static int q_size = 0;
306 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
307 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
309 #define QUEUE_SCHEDULED (-3)
310 #define QUEUE_NOWHERE (-2)
311 #define QUEUE_READY (-1)
312 /* QUEUE_SCHEDULED - INSN is scheduled.
313 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
314 queue or ready list.
315 QUEUE_READY - INSN is in ready list.
316 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
318 #define QUEUE_INDEX(INSN) (HID (INSN)->queue_index)
320 /* The following variable value refers for all current and future
321 reservations of the processor units. */
322 state_t curr_state;
324 /* The following variable value is size of memory representing all
325 current and future reservations of the processor units. */
326 size_t dfa_state_size;
328 /* The following array is used to find the best insn from ready when
329 the automaton pipeline interface is used. */
330 char *ready_try = NULL;
332 /* The ready list. */
333 struct ready_list ready = {NULL, 0, 0, 0, 0};
335 /* The pointer to the ready list (to be removed). */
336 static struct ready_list *readyp = &ready;
338 /* Scheduling clock. */
339 static int clock_var;
341 /* Clock at which the previous instruction was issued. */
342 static int last_clock_var;
344 /* Set to true if, when queuing a shadow insn, we discover that it would be
345 scheduled too late. */
346 static bool must_backtrack;
348 /* The following variable value is number of essential insns issued on
349 the current cycle. An insn is essential one if it changes the
350 processors state. */
351 int cycle_issued_insns;
353 /* This records the actual schedule. It is built up during the main phase
354 of schedule_block, and afterwards used to reorder the insns in the RTL. */
355 static vec<rtx> scheduled_insns;
357 static int may_trap_exp (const_rtx, int);
359 /* Nonzero iff the address is comprised from at most 1 register. */
360 #define CONST_BASED_ADDRESS_P(x) \
361 (REG_P (x) \
362 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
363 || (GET_CODE (x) == LO_SUM)) \
364 && (CONSTANT_P (XEXP (x, 0)) \
365 || CONSTANT_P (XEXP (x, 1)))))
367 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
368 as found by analyzing insn's expression. */
371 static int haifa_luid_for_non_insn (rtx x);
373 /* Haifa version of sched_info hooks common to all headers. */
374 const struct common_sched_info_def haifa_common_sched_info =
376 NULL, /* fix_recovery_cfg */
377 NULL, /* add_block */
378 NULL, /* estimate_number_of_insns */
379 haifa_luid_for_non_insn, /* luid_for_non_insn */
380 SCHED_PASS_UNKNOWN /* sched_pass_id */
383 /* Mapping from instruction UID to its Logical UID. */
384 vec<int> sched_luids = vNULL;
386 /* Next LUID to assign to an instruction. */
387 int sched_max_luid = 1;
389 /* Haifa Instruction Data. */
390 vec<haifa_insn_data_def> h_i_d = vNULL;
392 void (* sched_init_only_bb) (basic_block, basic_block);
394 /* Split block function. Different schedulers might use different functions
395 to handle their internal data consistent. */
396 basic_block (* sched_split_block) (basic_block, rtx);
398 /* Create empty basic block after the specified block. */
399 basic_block (* sched_create_empty_bb) (basic_block);
401 /* Return the number of cycles until INSN is expected to be ready.
402 Return zero if it already is. */
403 static int
404 insn_delay (rtx insn)
406 return MAX (INSN_TICK (insn) - clock_var, 0);
409 static int
410 may_trap_exp (const_rtx x, int is_store)
412 enum rtx_code code;
414 if (x == 0)
415 return TRAP_FREE;
416 code = GET_CODE (x);
417 if (is_store)
419 if (code == MEM && may_trap_p (x))
420 return TRAP_RISKY;
421 else
422 return TRAP_FREE;
424 if (code == MEM)
426 /* The insn uses memory: a volatile load. */
427 if (MEM_VOLATILE_P (x))
428 return IRISKY;
429 /* An exception-free load. */
430 if (!may_trap_p (x))
431 return IFREE;
432 /* A load with 1 base register, to be further checked. */
433 if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
434 return PFREE_CANDIDATE;
435 /* No info on the load, to be further checked. */
436 return PRISKY_CANDIDATE;
438 else
440 const char *fmt;
441 int i, insn_class = TRAP_FREE;
443 /* Neither store nor load, check if it may cause a trap. */
444 if (may_trap_p (x))
445 return TRAP_RISKY;
446 /* Recursive step: walk the insn... */
447 fmt = GET_RTX_FORMAT (code);
448 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
450 if (fmt[i] == 'e')
452 int tmp_class = may_trap_exp (XEXP (x, i), is_store);
453 insn_class = WORST_CLASS (insn_class, tmp_class);
455 else if (fmt[i] == 'E')
457 int j;
458 for (j = 0; j < XVECLEN (x, i); j++)
460 int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
461 insn_class = WORST_CLASS (insn_class, tmp_class);
462 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
463 break;
466 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
467 break;
469 return insn_class;
473 /* Classifies rtx X of an insn for the purpose of verifying that X can be
474 executed speculatively (and consequently the insn can be moved
475 speculatively), by examining X, returning:
476 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
477 TRAP_FREE: non-load insn.
478 IFREE: load from a globally safe location.
479 IRISKY: volatile load.
480 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
481 being either PFREE or PRISKY. */
483 static int
484 haifa_classify_rtx (const_rtx x)
486 int tmp_class = TRAP_FREE;
487 int insn_class = TRAP_FREE;
488 enum rtx_code code;
490 if (GET_CODE (x) == PARALLEL)
492 int i, len = XVECLEN (x, 0);
494 for (i = len - 1; i >= 0; i--)
496 tmp_class = haifa_classify_rtx (XVECEXP (x, 0, i));
497 insn_class = WORST_CLASS (insn_class, tmp_class);
498 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
499 break;
502 else
504 code = GET_CODE (x);
505 switch (code)
507 case CLOBBER:
508 /* Test if it is a 'store'. */
509 tmp_class = may_trap_exp (XEXP (x, 0), 1);
510 break;
511 case SET:
512 /* Test if it is a store. */
513 tmp_class = may_trap_exp (SET_DEST (x), 1);
514 if (tmp_class == TRAP_RISKY)
515 break;
516 /* Test if it is a load. */
517 tmp_class =
518 WORST_CLASS (tmp_class,
519 may_trap_exp (SET_SRC (x), 0));
520 break;
521 case COND_EXEC:
522 tmp_class = haifa_classify_rtx (COND_EXEC_CODE (x));
523 if (tmp_class == TRAP_RISKY)
524 break;
525 tmp_class = WORST_CLASS (tmp_class,
526 may_trap_exp (COND_EXEC_TEST (x), 0));
527 break;
528 case TRAP_IF:
529 tmp_class = TRAP_RISKY;
530 break;
531 default:;
533 insn_class = tmp_class;
536 return insn_class;
540 haifa_classify_insn (const_rtx insn)
542 return haifa_classify_rtx (PATTERN (insn));
545 /* After the scheduler initialization function has been called, this function
546 can be called to enable modulo scheduling. II is the initiation interval
547 we should use, it affects the delays for delay_pairs that were recorded as
548 separated by a given number of stages.
550 MAX_STAGES provides us with a limit
551 after which we give up scheduling; the caller must have unrolled at least
552 as many copies of the loop body and recorded delay_pairs for them.
554 INSNS is the number of real (non-debug) insns in one iteration of
555 the loop. MAX_UID can be used to test whether an insn belongs to
556 the first iteration of the loop; all of them have a uid lower than
557 MAX_UID. */
558 void
559 set_modulo_params (int ii, int max_stages, int insns, int max_uid)
561 modulo_ii = ii;
562 modulo_max_stages = max_stages;
563 modulo_n_insns = insns;
564 modulo_iter0_max_uid = max_uid;
565 modulo_backtracks_left = PARAM_VALUE (PARAM_MAX_MODULO_BACKTRACK_ATTEMPTS);
568 /* A structure to record a pair of insns where the first one is a real
569 insn that has delay slots, and the second is its delayed shadow.
570 I1 is scheduled normally and will emit an assembly instruction,
571 while I2 describes the side effect that takes place at the
572 transition between cycles CYCLES and (CYCLES + 1) after I1. */
573 struct delay_pair
575 struct delay_pair *next_same_i1;
576 rtx i1, i2;
577 int cycles;
578 /* When doing modulo scheduling, we a delay_pair can also be used to
579 show that I1 and I2 are the same insn in a different stage. If that
580 is the case, STAGES will be nonzero. */
581 int stages;
584 /* Two hash tables to record delay_pairs, one indexed by I1 and the other
585 indexed by I2. */
586 static htab_t delay_htab;
587 static htab_t delay_htab_i2;
589 /* Called through htab_traverse. Walk the hashtable using I2 as
590 index, and delete all elements involving an UID higher than
591 that pointed to by *DATA. */
592 static int
593 htab_i2_traverse (void **slot, void *data)
595 int maxuid = *(int *)data;
596 struct delay_pair *p = *(struct delay_pair **)slot;
597 if (INSN_UID (p->i2) >= maxuid || INSN_UID (p->i1) >= maxuid)
599 htab_clear_slot (delay_htab_i2, slot);
601 return 1;
604 /* Called through htab_traverse. Walk the hashtable using I2 as
605 index, and delete all elements involving an UID higher than
606 that pointed to by *DATA. */
607 static int
608 htab_i1_traverse (void **slot, void *data)
610 int maxuid = *(int *)data;
611 struct delay_pair **pslot = (struct delay_pair **)slot;
612 struct delay_pair *p, *first, **pprev;
614 if (INSN_UID ((*pslot)->i1) >= maxuid)
616 htab_clear_slot (delay_htab, slot);
617 return 1;
619 pprev = &first;
620 for (p = *pslot; p; p = p->next_same_i1)
622 if (INSN_UID (p->i2) < maxuid)
624 *pprev = p;
625 pprev = &p->next_same_i1;
628 *pprev = NULL;
629 if (first == NULL)
630 htab_clear_slot (delay_htab, slot);
631 else
632 *pslot = first;
633 return 1;
636 /* Discard all delay pairs which involve an insn with an UID higher
637 than MAX_UID. */
638 void
639 discard_delay_pairs_above (int max_uid)
641 htab_traverse (delay_htab, htab_i1_traverse, &max_uid);
642 htab_traverse (delay_htab_i2, htab_i2_traverse, &max_uid);
645 /* Returns a hash value for X (which really is a delay_pair), based on
646 hashing just I1. */
647 static hashval_t
648 delay_hash_i1 (const void *x)
650 return htab_hash_pointer (((const struct delay_pair *) x)->i1);
653 /* Returns a hash value for X (which really is a delay_pair), based on
654 hashing just I2. */
655 static hashval_t
656 delay_hash_i2 (const void *x)
658 return htab_hash_pointer (((const struct delay_pair *) x)->i2);
661 /* Return nonzero if I1 of pair X is the same as that of pair Y. */
662 static int
663 delay_i1_eq (const void *x, const void *y)
665 return ((const struct delay_pair *) x)->i1 == y;
668 /* Return nonzero if I2 of pair X is the same as that of pair Y. */
669 static int
670 delay_i2_eq (const void *x, const void *y)
672 return ((const struct delay_pair *) x)->i2 == y;
675 /* This function can be called by a port just before it starts the final
676 scheduling pass. It records the fact that an instruction with delay
677 slots has been split into two insns, I1 and I2. The first one will be
678 scheduled normally and initiates the operation. The second one is a
679 shadow which must follow a specific number of cycles after I1; its only
680 purpose is to show the side effect that occurs at that cycle in the RTL.
681 If a JUMP_INSN or a CALL_INSN has been split, I1 should be a normal INSN,
682 while I2 retains the original insn type.
684 There are two ways in which the number of cycles can be specified,
685 involving the CYCLES and STAGES arguments to this function. If STAGES
686 is zero, we just use the value of CYCLES. Otherwise, STAGES is a factor
687 which is multiplied by MODULO_II to give the number of cycles. This is
688 only useful if the caller also calls set_modulo_params to enable modulo
689 scheduling. */
691 void
692 record_delay_slot_pair (rtx i1, rtx i2, int cycles, int stages)
694 struct delay_pair *p = XNEW (struct delay_pair);
695 struct delay_pair **slot;
697 p->i1 = i1;
698 p->i2 = i2;
699 p->cycles = cycles;
700 p->stages = stages;
702 if (!delay_htab)
704 delay_htab = htab_create (10, delay_hash_i1, delay_i1_eq, NULL);
705 delay_htab_i2 = htab_create (10, delay_hash_i2, delay_i2_eq, free);
707 slot = ((struct delay_pair **)
708 htab_find_slot_with_hash (delay_htab, i1, htab_hash_pointer (i1),
709 INSERT));
710 p->next_same_i1 = *slot;
711 *slot = p;
712 slot = ((struct delay_pair **)
713 htab_find_slot_with_hash (delay_htab_i2, i2, htab_hash_pointer (i2),
714 INSERT));
715 *slot = p;
718 /* Examine the delay pair hashtable to see if INSN is a shadow for another,
719 and return the other insn if so. Return NULL otherwise. */
721 real_insn_for_shadow (rtx insn)
723 struct delay_pair *pair;
725 if (delay_htab == NULL)
726 return NULL_RTX;
728 pair
729 = (struct delay_pair *)htab_find_with_hash (delay_htab_i2, insn,
730 htab_hash_pointer (insn));
731 if (!pair || pair->stages > 0)
732 return NULL_RTX;
733 return pair->i1;
736 /* For a pair P of insns, return the fixed distance in cycles from the first
737 insn after which the second must be scheduled. */
738 static int
739 pair_delay (struct delay_pair *p)
741 if (p->stages == 0)
742 return p->cycles;
743 else
744 return p->stages * modulo_ii;
747 /* Given an insn INSN, add a dependence on its delayed shadow if it
748 has one. Also try to find situations where shadows depend on each other
749 and add dependencies to the real insns to limit the amount of backtracking
750 needed. */
751 void
752 add_delay_dependencies (rtx insn)
754 struct delay_pair *pair;
755 sd_iterator_def sd_it;
756 dep_t dep;
758 if (!delay_htab)
759 return;
761 pair
762 = (struct delay_pair *)htab_find_with_hash (delay_htab_i2, insn,
763 htab_hash_pointer (insn));
764 if (!pair)
765 return;
766 add_dependence (insn, pair->i1, REG_DEP_ANTI);
767 if (pair->stages)
768 return;
770 FOR_EACH_DEP (pair->i2, SD_LIST_BACK, sd_it, dep)
772 rtx pro = DEP_PRO (dep);
773 struct delay_pair *other_pair
774 = (struct delay_pair *)htab_find_with_hash (delay_htab_i2, pro,
775 htab_hash_pointer (pro));
776 if (!other_pair || other_pair->stages)
777 continue;
778 if (pair_delay (other_pair) >= pair_delay (pair))
780 if (sched_verbose >= 4)
782 fprintf (sched_dump, ";;\tadding dependence %d <- %d\n",
783 INSN_UID (other_pair->i1),
784 INSN_UID (pair->i1));
785 fprintf (sched_dump, ";;\tpair1 %d <- %d, cost %d\n",
786 INSN_UID (pair->i1),
787 INSN_UID (pair->i2),
788 pair_delay (pair));
789 fprintf (sched_dump, ";;\tpair2 %d <- %d, cost %d\n",
790 INSN_UID (other_pair->i1),
791 INSN_UID (other_pair->i2),
792 pair_delay (other_pair));
794 add_dependence (pair->i1, other_pair->i1, REG_DEP_ANTI);
799 /* Forward declarations. */
801 static int priority (rtx);
802 static int rank_for_schedule (const void *, const void *);
803 static void swap_sort (rtx *, int);
804 static void queue_insn (rtx, int, const char *);
805 static int schedule_insn (rtx);
806 static void adjust_priority (rtx);
807 static void advance_one_cycle (void);
808 static void extend_h_i_d (void);
811 /* Notes handling mechanism:
812 =========================
813 Generally, NOTES are saved before scheduling and restored after scheduling.
814 The scheduler distinguishes between two types of notes:
816 (1) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
817 Before scheduling a region, a pointer to the note is added to the insn
818 that follows or precedes it. (This happens as part of the data dependence
819 computation). After scheduling an insn, the pointer contained in it is
820 used for regenerating the corresponding note (in reemit_notes).
822 (2) All other notes (e.g. INSN_DELETED): Before scheduling a block,
823 these notes are put in a list (in rm_other_notes() and
824 unlink_other_notes ()). After scheduling the block, these notes are
825 inserted at the beginning of the block (in schedule_block()). */
827 static void ready_add (struct ready_list *, rtx, bool);
828 static rtx ready_remove_first (struct ready_list *);
829 static rtx ready_remove_first_dispatch (struct ready_list *ready);
831 static void queue_to_ready (struct ready_list *);
832 static int early_queue_to_ready (state_t, struct ready_list *);
834 static void debug_ready_list (struct ready_list *);
836 /* The following functions are used to implement multi-pass scheduling
837 on the first cycle. */
838 static rtx ready_remove (struct ready_list *, int);
839 static void ready_remove_insn (rtx);
841 static void fix_inter_tick (rtx, rtx);
842 static int fix_tick_ready (rtx);
843 static void change_queue_index (rtx, int);
845 /* The following functions are used to implement scheduling of data/control
846 speculative instructions. */
848 static void extend_h_i_d (void);
849 static void init_h_i_d (rtx);
850 static int haifa_speculate_insn (rtx, ds_t, rtx *);
851 static void generate_recovery_code (rtx);
852 static void process_insn_forw_deps_be_in_spec (rtx, rtx, ds_t);
853 static void begin_speculative_block (rtx);
854 static void add_to_speculative_block (rtx);
855 static void init_before_recovery (basic_block *);
856 static void create_check_block_twin (rtx, bool);
857 static void fix_recovery_deps (basic_block);
858 static bool haifa_change_pattern (rtx, rtx);
859 static void dump_new_block_header (int, basic_block, rtx, rtx);
860 static void restore_bb_notes (basic_block);
861 static void fix_jump_move (rtx);
862 static void move_block_after_check (rtx);
863 static void move_succs (vec<edge, va_gc> **, basic_block);
864 static void sched_remove_insn (rtx);
865 static void clear_priorities (rtx, rtx_vec_t *);
866 static void calc_priorities (rtx_vec_t);
867 static void add_jump_dependencies (rtx, rtx);
869 #endif /* INSN_SCHEDULING */
871 /* Point to state used for the current scheduling pass. */
872 struct haifa_sched_info *current_sched_info;
874 #ifndef INSN_SCHEDULING
875 void
876 schedule_insns (void)
879 #else
881 /* Do register pressure sensitive insn scheduling if the flag is set
882 up. */
883 enum sched_pressure_algorithm sched_pressure;
885 /* Map regno -> its pressure class. The map defined only when
886 SCHED_PRESSURE != SCHED_PRESSURE_NONE. */
887 enum reg_class *sched_regno_pressure_class;
889 /* The current register pressure. Only elements corresponding pressure
890 classes are defined. */
891 static int curr_reg_pressure[N_REG_CLASSES];
893 /* Saved value of the previous array. */
894 static int saved_reg_pressure[N_REG_CLASSES];
896 /* Register living at given scheduling point. */
897 static bitmap curr_reg_live;
899 /* Saved value of the previous array. */
900 static bitmap saved_reg_live;
902 /* Registers mentioned in the current region. */
903 static bitmap region_ref_regs;
905 /* Initiate register pressure relative info for scheduling the current
906 region. Currently it is only clearing register mentioned in the
907 current region. */
908 void
909 sched_init_region_reg_pressure_info (void)
911 bitmap_clear (region_ref_regs);
914 /* PRESSURE[CL] describes the pressure on register class CL. Update it
915 for the birth (if BIRTH_P) or death (if !BIRTH_P) of register REGNO.
916 LIVE tracks the set of live registers; if it is null, assume that
917 every birth or death is genuine. */
918 static inline void
919 mark_regno_birth_or_death (bitmap live, int *pressure, int regno, bool birth_p)
921 enum reg_class pressure_class;
923 pressure_class = sched_regno_pressure_class[regno];
924 if (regno >= FIRST_PSEUDO_REGISTER)
926 if (pressure_class != NO_REGS)
928 if (birth_p)
930 if (!live || bitmap_set_bit (live, regno))
931 pressure[pressure_class]
932 += (ira_reg_class_max_nregs
933 [pressure_class][PSEUDO_REGNO_MODE (regno)]);
935 else
937 if (!live || bitmap_clear_bit (live, regno))
938 pressure[pressure_class]
939 -= (ira_reg_class_max_nregs
940 [pressure_class][PSEUDO_REGNO_MODE (regno)]);
944 else if (pressure_class != NO_REGS
945 && ! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
947 if (birth_p)
949 if (!live || bitmap_set_bit (live, regno))
950 pressure[pressure_class]++;
952 else
954 if (!live || bitmap_clear_bit (live, regno))
955 pressure[pressure_class]--;
960 /* Initiate current register pressure related info from living
961 registers given by LIVE. */
962 static void
963 initiate_reg_pressure_info (bitmap live)
965 int i;
966 unsigned int j;
967 bitmap_iterator bi;
969 for (i = 0; i < ira_pressure_classes_num; i++)
970 curr_reg_pressure[ira_pressure_classes[i]] = 0;
971 bitmap_clear (curr_reg_live);
972 EXECUTE_IF_SET_IN_BITMAP (live, 0, j, bi)
973 if (sched_pressure == SCHED_PRESSURE_MODEL
974 || current_nr_blocks == 1
975 || bitmap_bit_p (region_ref_regs, j))
976 mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure, j, true);
979 /* Mark registers in X as mentioned in the current region. */
980 static void
981 setup_ref_regs (rtx x)
983 int i, j, regno;
984 const RTX_CODE code = GET_CODE (x);
985 const char *fmt;
987 if (REG_P (x))
989 regno = REGNO (x);
990 if (HARD_REGISTER_NUM_P (regno))
991 bitmap_set_range (region_ref_regs, regno,
992 hard_regno_nregs[regno][GET_MODE (x)]);
993 else
994 bitmap_set_bit (region_ref_regs, REGNO (x));
995 return;
997 fmt = GET_RTX_FORMAT (code);
998 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
999 if (fmt[i] == 'e')
1000 setup_ref_regs (XEXP (x, i));
1001 else if (fmt[i] == 'E')
1003 for (j = 0; j < XVECLEN (x, i); j++)
1004 setup_ref_regs (XVECEXP (x, i, j));
1008 /* Initiate current register pressure related info at the start of
1009 basic block BB. */
1010 static void
1011 initiate_bb_reg_pressure_info (basic_block bb)
1013 unsigned int i ATTRIBUTE_UNUSED;
1014 rtx insn;
1016 if (current_nr_blocks > 1)
1017 FOR_BB_INSNS (bb, insn)
1018 if (NONDEBUG_INSN_P (insn))
1019 setup_ref_regs (PATTERN (insn));
1020 initiate_reg_pressure_info (df_get_live_in (bb));
1021 #ifdef EH_RETURN_DATA_REGNO
1022 if (bb_has_eh_pred (bb))
1023 for (i = 0; ; ++i)
1025 unsigned int regno = EH_RETURN_DATA_REGNO (i);
1027 if (regno == INVALID_REGNUM)
1028 break;
1029 if (! bitmap_bit_p (df_get_live_in (bb), regno))
1030 mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure,
1031 regno, true);
1033 #endif
1036 /* Save current register pressure related info. */
1037 static void
1038 save_reg_pressure (void)
1040 int i;
1042 for (i = 0; i < ira_pressure_classes_num; i++)
1043 saved_reg_pressure[ira_pressure_classes[i]]
1044 = curr_reg_pressure[ira_pressure_classes[i]];
1045 bitmap_copy (saved_reg_live, curr_reg_live);
1048 /* Restore saved register pressure related info. */
1049 static void
1050 restore_reg_pressure (void)
1052 int i;
1054 for (i = 0; i < ira_pressure_classes_num; i++)
1055 curr_reg_pressure[ira_pressure_classes[i]]
1056 = saved_reg_pressure[ira_pressure_classes[i]];
1057 bitmap_copy (curr_reg_live, saved_reg_live);
1060 /* Return TRUE if the register is dying after its USE. */
1061 static bool
1062 dying_use_p (struct reg_use_data *use)
1064 struct reg_use_data *next;
1066 for (next = use->next_regno_use; next != use; next = next->next_regno_use)
1067 if (NONDEBUG_INSN_P (next->insn)
1068 && QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED)
1069 return false;
1070 return true;
1073 /* Print info about the current register pressure and its excess for
1074 each pressure class. */
1075 static void
1076 print_curr_reg_pressure (void)
1078 int i;
1079 enum reg_class cl;
1081 fprintf (sched_dump, ";;\t");
1082 for (i = 0; i < ira_pressure_classes_num; i++)
1084 cl = ira_pressure_classes[i];
1085 gcc_assert (curr_reg_pressure[cl] >= 0);
1086 fprintf (sched_dump, " %s:%d(%d)", reg_class_names[cl],
1087 curr_reg_pressure[cl],
1088 curr_reg_pressure[cl] - ira_class_hard_regs_num[cl]);
1090 fprintf (sched_dump, "\n");
1093 /* Determine if INSN has a condition that is clobbered if a register
1094 in SET_REGS is modified. */
1095 static bool
1096 cond_clobbered_p (rtx insn, HARD_REG_SET set_regs)
1098 rtx pat = PATTERN (insn);
1099 gcc_assert (GET_CODE (pat) == COND_EXEC);
1100 if (TEST_HARD_REG_BIT (set_regs, REGNO (XEXP (COND_EXEC_TEST (pat), 0))))
1102 sd_iterator_def sd_it;
1103 dep_t dep;
1104 haifa_change_pattern (insn, ORIG_PAT (insn));
1105 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1106 DEP_STATUS (dep) &= ~DEP_CANCELLED;
1107 TODO_SPEC (insn) = HARD_DEP;
1108 if (sched_verbose >= 2)
1109 fprintf (sched_dump,
1110 ";;\t\tdequeue insn %s because of clobbered condition\n",
1111 (*current_sched_info->print_insn) (insn, 0));
1112 return true;
1115 return false;
1118 /* This function should be called after modifying the pattern of INSN,
1119 to update scheduler data structures as needed. */
1120 static void
1121 update_insn_after_change (rtx insn)
1123 sd_iterator_def sd_it;
1124 dep_t dep;
1126 dfa_clear_single_insn_cache (insn);
1128 sd_it = sd_iterator_start (insn,
1129 SD_LIST_FORW | SD_LIST_BACK | SD_LIST_RES_BACK);
1130 while (sd_iterator_cond (&sd_it, &dep))
1132 DEP_COST (dep) = UNKNOWN_DEP_COST;
1133 sd_iterator_next (&sd_it);
1136 /* Invalidate INSN_COST, so it'll be recalculated. */
1137 INSN_COST (insn) = -1;
1138 /* Invalidate INSN_TICK, so it'll be recalculated. */
1139 INSN_TICK (insn) = INVALID_TICK;
1143 /* Two VECs, one to hold dependencies for which pattern replacements
1144 need to be applied or restored at the start of the next cycle, and
1145 another to hold an integer that is either one, to apply the
1146 corresponding replacement, or zero to restore it. */
1147 static vec<dep_t> next_cycle_replace_deps;
1148 static vec<int> next_cycle_apply;
1150 static void apply_replacement (dep_t, bool);
1151 static void restore_pattern (dep_t, bool);
1153 /* Look at the remaining dependencies for insn NEXT, and compute and return
1154 the TODO_SPEC value we should use for it. This is called after one of
1155 NEXT's dependencies has been resolved.
1156 We also perform pattern replacements for predication, and for broken
1157 replacement dependencies. The latter is only done if FOR_BACKTRACK is
1158 false. */
1160 static ds_t
1161 recompute_todo_spec (rtx next, bool for_backtrack)
1163 ds_t new_ds;
1164 sd_iterator_def sd_it;
1165 dep_t dep, modify_dep = NULL;
1166 int n_spec = 0;
1167 int n_control = 0;
1168 int n_replace = 0;
1169 bool first_p = true;
1171 if (sd_lists_empty_p (next, SD_LIST_BACK))
1172 /* NEXT has all its dependencies resolved. */
1173 return 0;
1175 if (!sd_lists_empty_p (next, SD_LIST_HARD_BACK))
1176 return HARD_DEP;
1178 /* Now we've got NEXT with speculative deps only.
1179 1. Look at the deps to see what we have to do.
1180 2. Check if we can do 'todo'. */
1181 new_ds = 0;
1183 FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
1185 rtx pro = DEP_PRO (dep);
1186 ds_t ds = DEP_STATUS (dep) & SPECULATIVE;
1188 if (DEBUG_INSN_P (pro) && !DEBUG_INSN_P (next))
1189 continue;
1191 if (ds)
1193 n_spec++;
1194 if (first_p)
1196 first_p = false;
1198 new_ds = ds;
1200 else
1201 new_ds = ds_merge (new_ds, ds);
1203 else if (DEP_TYPE (dep) == REG_DEP_CONTROL)
1205 if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED)
1207 n_control++;
1208 modify_dep = dep;
1210 DEP_STATUS (dep) &= ~DEP_CANCELLED;
1212 else if (DEP_REPLACE (dep) != NULL)
1214 if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED)
1216 n_replace++;
1217 modify_dep = dep;
1219 DEP_STATUS (dep) &= ~DEP_CANCELLED;
1223 if (n_replace > 0 && n_control == 0 && n_spec == 0)
1225 if (!dbg_cnt (sched_breakdep))
1226 return HARD_DEP;
1227 FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
1229 struct dep_replacement *desc = DEP_REPLACE (dep);
1230 if (desc != NULL)
1232 if (desc->insn == next && !for_backtrack)
1234 gcc_assert (n_replace == 1);
1235 apply_replacement (dep, true);
1237 DEP_STATUS (dep) |= DEP_CANCELLED;
1240 return 0;
1243 else if (n_control == 1 && n_replace == 0 && n_spec == 0)
1245 rtx pro, other, new_pat;
1246 rtx cond = NULL_RTX;
1247 bool success;
1248 rtx prev = NULL_RTX;
1249 int i;
1250 unsigned regno;
1252 if ((current_sched_info->flags & DO_PREDICATION) == 0
1253 || (ORIG_PAT (next) != NULL_RTX
1254 && PREDICATED_PAT (next) == NULL_RTX))
1255 return HARD_DEP;
1257 pro = DEP_PRO (modify_dep);
1258 other = real_insn_for_shadow (pro);
1259 if (other != NULL_RTX)
1260 pro = other;
1262 cond = sched_get_reverse_condition_uncached (pro);
1263 regno = REGNO (XEXP (cond, 0));
1265 /* Find the last scheduled insn that modifies the condition register.
1266 We can stop looking once we find the insn we depend on through the
1267 REG_DEP_CONTROL; if the condition register isn't modified after it,
1268 we know that it still has the right value. */
1269 if (QUEUE_INDEX (pro) == QUEUE_SCHEDULED)
1270 FOR_EACH_VEC_ELT_REVERSE (scheduled_insns, i, prev)
1272 HARD_REG_SET t;
1274 find_all_hard_reg_sets (prev, &t);
1275 if (TEST_HARD_REG_BIT (t, regno))
1276 return HARD_DEP;
1277 if (prev == pro)
1278 break;
1280 if (ORIG_PAT (next) == NULL_RTX)
1282 ORIG_PAT (next) = PATTERN (next);
1284 new_pat = gen_rtx_COND_EXEC (VOIDmode, cond, PATTERN (next));
1285 success = haifa_change_pattern (next, new_pat);
1286 if (!success)
1287 return HARD_DEP;
1288 PREDICATED_PAT (next) = new_pat;
1290 else if (PATTERN (next) != PREDICATED_PAT (next))
1292 bool success = haifa_change_pattern (next,
1293 PREDICATED_PAT (next));
1294 gcc_assert (success);
1296 DEP_STATUS (modify_dep) |= DEP_CANCELLED;
1297 return DEP_CONTROL;
1300 if (PREDICATED_PAT (next) != NULL_RTX)
1302 int tick = INSN_TICK (next);
1303 bool success = haifa_change_pattern (next,
1304 ORIG_PAT (next));
1305 INSN_TICK (next) = tick;
1306 gcc_assert (success);
1309 /* We can't handle the case where there are both speculative and control
1310 dependencies, so we return HARD_DEP in such a case. Also fail if
1311 we have speculative dependencies with not enough points, or more than
1312 one control dependency. */
1313 if ((n_spec > 0 && (n_control > 0 || n_replace > 0))
1314 || (n_spec > 0
1315 /* Too few points? */
1316 && ds_weak (new_ds) < spec_info->data_weakness_cutoff)
1317 || n_control > 0
1318 || n_replace > 0)
1319 return HARD_DEP;
1321 return new_ds;
1324 /* Pointer to the last instruction scheduled. */
1325 static rtx last_scheduled_insn;
1327 /* Pointer to the last nondebug instruction scheduled within the
1328 block, or the prev_head of the scheduling block. Used by
1329 rank_for_schedule, so that insns independent of the last scheduled
1330 insn will be preferred over dependent instructions. */
1331 static rtx last_nondebug_scheduled_insn;
1333 /* Pointer that iterates through the list of unscheduled insns if we
1334 have a dbg_cnt enabled. It always points at an insn prior to the
1335 first unscheduled one. */
1336 static rtx nonscheduled_insns_begin;
1338 /* Compute cost of executing INSN.
1339 This is the number of cycles between instruction issue and
1340 instruction results. */
1342 insn_cost (rtx insn)
1344 int cost;
1346 if (sel_sched_p ())
1348 if (recog_memoized (insn) < 0)
1349 return 0;
1351 cost = insn_default_latency (insn);
1352 if (cost < 0)
1353 cost = 0;
1355 return cost;
1358 cost = INSN_COST (insn);
1360 if (cost < 0)
1362 /* A USE insn, or something else we don't need to
1363 understand. We can't pass these directly to
1364 result_ready_cost or insn_default_latency because it will
1365 trigger a fatal error for unrecognizable insns. */
1366 if (recog_memoized (insn) < 0)
1368 INSN_COST (insn) = 0;
1369 return 0;
1371 else
1373 cost = insn_default_latency (insn);
1374 if (cost < 0)
1375 cost = 0;
1377 INSN_COST (insn) = cost;
1381 return cost;
1384 /* Compute cost of dependence LINK.
1385 This is the number of cycles between instruction issue and
1386 instruction results.
1387 ??? We also use this function to call recog_memoized on all insns. */
1389 dep_cost_1 (dep_t link, dw_t dw)
1391 rtx insn = DEP_PRO (link);
1392 rtx used = DEP_CON (link);
1393 int cost;
1395 if (DEP_COST (link) != UNKNOWN_DEP_COST)
1396 return DEP_COST (link);
1398 if (delay_htab)
1400 struct delay_pair *delay_entry;
1401 delay_entry
1402 = (struct delay_pair *)htab_find_with_hash (delay_htab_i2, used,
1403 htab_hash_pointer (used));
1404 if (delay_entry)
1406 if (delay_entry->i1 == insn)
1408 DEP_COST (link) = pair_delay (delay_entry);
1409 return DEP_COST (link);
1414 /* A USE insn should never require the value used to be computed.
1415 This allows the computation of a function's result and parameter
1416 values to overlap the return and call. We don't care about the
1417 dependence cost when only decreasing register pressure. */
1418 if (recog_memoized (used) < 0)
1420 cost = 0;
1421 recog_memoized (insn);
1423 else
1425 enum reg_note dep_type = DEP_TYPE (link);
1427 cost = insn_cost (insn);
1429 if (INSN_CODE (insn) >= 0)
1431 if (dep_type == REG_DEP_ANTI)
1432 cost = 0;
1433 else if (dep_type == REG_DEP_OUTPUT)
1435 cost = (insn_default_latency (insn)
1436 - insn_default_latency (used));
1437 if (cost <= 0)
1438 cost = 1;
1440 else if (bypass_p (insn))
1441 cost = insn_latency (insn, used);
1445 if (targetm.sched.adjust_cost_2)
1446 cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost,
1447 dw);
1448 else if (targetm.sched.adjust_cost != NULL)
1450 /* This variable is used for backward compatibility with the
1451 targets. */
1452 rtx dep_cost_rtx_link = alloc_INSN_LIST (NULL_RTX, NULL_RTX);
1454 /* Make it self-cycled, so that if some tries to walk over this
1455 incomplete list he/she will be caught in an endless loop. */
1456 XEXP (dep_cost_rtx_link, 1) = dep_cost_rtx_link;
1458 /* Targets use only REG_NOTE_KIND of the link. */
1459 PUT_REG_NOTE_KIND (dep_cost_rtx_link, DEP_TYPE (link));
1461 cost = targetm.sched.adjust_cost (used, dep_cost_rtx_link,
1462 insn, cost);
1464 free_INSN_LIST_node (dep_cost_rtx_link);
1467 if (cost < 0)
1468 cost = 0;
1471 DEP_COST (link) = cost;
1472 return cost;
1475 /* Compute cost of dependence LINK.
1476 This is the number of cycles between instruction issue and
1477 instruction results. */
1479 dep_cost (dep_t link)
1481 return dep_cost_1 (link, 0);
1484 /* Use this sel-sched.c friendly function in reorder2 instead of increasing
1485 INSN_PRIORITY explicitly. */
1486 void
1487 increase_insn_priority (rtx insn, int amount)
1489 if (!sel_sched_p ())
1491 /* We're dealing with haifa-sched.c INSN_PRIORITY. */
1492 if (INSN_PRIORITY_KNOWN (insn))
1493 INSN_PRIORITY (insn) += amount;
1495 else
1497 /* In sel-sched.c INSN_PRIORITY is not kept up to date.
1498 Use EXPR_PRIORITY instead. */
1499 sel_add_to_insn_priority (insn, amount);
1503 /* Return 'true' if DEP should be included in priority calculations. */
1504 static bool
1505 contributes_to_priority_p (dep_t dep)
1507 if (DEBUG_INSN_P (DEP_CON (dep))
1508 || DEBUG_INSN_P (DEP_PRO (dep)))
1509 return false;
1511 /* Critical path is meaningful in block boundaries only. */
1512 if (!current_sched_info->contributes_to_priority (DEP_CON (dep),
1513 DEP_PRO (dep)))
1514 return false;
1516 if (DEP_REPLACE (dep) != NULL)
1517 return false;
1519 /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
1520 then speculative instructions will less likely be
1521 scheduled. That is because the priority of
1522 their producers will increase, and, thus, the
1523 producers will more likely be scheduled, thus,
1524 resolving the dependence. */
1525 if (sched_deps_info->generate_spec_deps
1526 && !(spec_info->flags & COUNT_SPEC_IN_CRITICAL_PATH)
1527 && (DEP_STATUS (dep) & SPECULATIVE))
1528 return false;
1530 return true;
1533 /* Compute the number of nondebug deps in list LIST for INSN. */
1535 static int
1536 dep_list_size (rtx insn, sd_list_types_def list)
1538 sd_iterator_def sd_it;
1539 dep_t dep;
1540 int dbgcount = 0, nodbgcount = 0;
1542 if (!MAY_HAVE_DEBUG_INSNS)
1543 return sd_lists_size (insn, list);
1545 FOR_EACH_DEP (insn, list, sd_it, dep)
1547 if (DEBUG_INSN_P (DEP_CON (dep)))
1548 dbgcount++;
1549 else if (!DEBUG_INSN_P (DEP_PRO (dep)))
1550 nodbgcount++;
1553 gcc_assert (dbgcount + nodbgcount == sd_lists_size (insn, list));
1555 return nodbgcount;
1558 /* Compute the priority number for INSN. */
1559 static int
1560 priority (rtx insn)
1562 if (! INSN_P (insn))
1563 return 0;
1565 /* We should not be interested in priority of an already scheduled insn. */
1566 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
1568 if (!INSN_PRIORITY_KNOWN (insn))
1570 int this_priority = -1;
1572 if (dep_list_size (insn, SD_LIST_FORW) == 0)
1573 /* ??? We should set INSN_PRIORITY to insn_cost when and insn has
1574 some forward deps but all of them are ignored by
1575 contributes_to_priority hook. At the moment we set priority of
1576 such insn to 0. */
1577 this_priority = insn_cost (insn);
1578 else
1580 rtx prev_first, twin;
1581 basic_block rec;
1583 /* For recovery check instructions we calculate priority slightly
1584 different than that of normal instructions. Instead of walking
1585 through INSN_FORW_DEPS (check) list, we walk through
1586 INSN_FORW_DEPS list of each instruction in the corresponding
1587 recovery block. */
1589 /* Selective scheduling does not define RECOVERY_BLOCK macro. */
1590 rec = sel_sched_p () ? NULL : RECOVERY_BLOCK (insn);
1591 if (!rec || rec == EXIT_BLOCK_PTR)
1593 prev_first = PREV_INSN (insn);
1594 twin = insn;
1596 else
1598 prev_first = NEXT_INSN (BB_HEAD (rec));
1599 twin = PREV_INSN (BB_END (rec));
1604 sd_iterator_def sd_it;
1605 dep_t dep;
1607 FOR_EACH_DEP (twin, SD_LIST_FORW, sd_it, dep)
1609 rtx next;
1610 int next_priority;
1612 next = DEP_CON (dep);
1614 if (BLOCK_FOR_INSN (next) != rec)
1616 int cost;
1618 if (!contributes_to_priority_p (dep))
1619 continue;
1621 if (twin == insn)
1622 cost = dep_cost (dep);
1623 else
1625 struct _dep _dep1, *dep1 = &_dep1;
1627 init_dep (dep1, insn, next, REG_DEP_ANTI);
1629 cost = dep_cost (dep1);
1632 next_priority = cost + priority (next);
1634 if (next_priority > this_priority)
1635 this_priority = next_priority;
1639 twin = PREV_INSN (twin);
1641 while (twin != prev_first);
1644 if (this_priority < 0)
1646 gcc_assert (this_priority == -1);
1648 this_priority = insn_cost (insn);
1651 INSN_PRIORITY (insn) = this_priority;
1652 INSN_PRIORITY_STATUS (insn) = 1;
1655 return INSN_PRIORITY (insn);
1658 /* Macros and functions for keeping the priority queue sorted, and
1659 dealing with queuing and dequeuing of instructions. */
1661 #define SCHED_SORT(READY, N_READY) \
1662 do { if ((N_READY) == 2) \
1663 swap_sort (READY, N_READY); \
1664 else if ((N_READY) > 2) \
1665 qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \
1666 while (0)
1668 /* For each pressure class CL, set DEATH[CL] to the number of registers
1669 in that class that die in INSN. */
1671 static void
1672 calculate_reg_deaths (rtx insn, int *death)
1674 int i;
1675 struct reg_use_data *use;
1677 for (i = 0; i < ira_pressure_classes_num; i++)
1678 death[ira_pressure_classes[i]] = 0;
1679 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1680 if (dying_use_p (use))
1681 mark_regno_birth_or_death (0, death, use->regno, true);
1684 /* Setup info about the current register pressure impact of scheduling
1685 INSN at the current scheduling point. */
1686 static void
1687 setup_insn_reg_pressure_info (rtx insn)
1689 int i, change, before, after, hard_regno;
1690 int excess_cost_change;
1691 enum machine_mode mode;
1692 enum reg_class cl;
1693 struct reg_pressure_data *pressure_info;
1694 int *max_reg_pressure;
1695 static int death[N_REG_CLASSES];
1697 gcc_checking_assert (!DEBUG_INSN_P (insn));
1699 excess_cost_change = 0;
1700 calculate_reg_deaths (insn, death);
1701 pressure_info = INSN_REG_PRESSURE (insn);
1702 max_reg_pressure = INSN_MAX_REG_PRESSURE (insn);
1703 gcc_assert (pressure_info != NULL && max_reg_pressure != NULL);
1704 for (i = 0; i < ira_pressure_classes_num; i++)
1706 cl = ira_pressure_classes[i];
1707 gcc_assert (curr_reg_pressure[cl] >= 0);
1708 change = (int) pressure_info[i].set_increase - death[cl];
1709 before = MAX (0, max_reg_pressure[i] - ira_class_hard_regs_num[cl]);
1710 after = MAX (0, max_reg_pressure[i] + change
1711 - ira_class_hard_regs_num[cl]);
1712 hard_regno = ira_class_hard_regs[cl][0];
1713 gcc_assert (hard_regno >= 0);
1714 mode = reg_raw_mode[hard_regno];
1715 excess_cost_change += ((after - before)
1716 * (ira_memory_move_cost[mode][cl][0]
1717 + ira_memory_move_cost[mode][cl][1]));
1719 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insn) = excess_cost_change;
1722 /* This is the first page of code related to SCHED_PRESSURE_MODEL.
1723 It tries to make the scheduler take register pressure into account
1724 without introducing too many unnecessary stalls. It hooks into the
1725 main scheduling algorithm at several points:
1727 - Before scheduling starts, model_start_schedule constructs a
1728 "model schedule" for the current block. This model schedule is
1729 chosen solely to keep register pressure down. It does not take the
1730 target's pipeline or the original instruction order into account,
1731 except as a tie-breaker. It also doesn't work to a particular
1732 pressure limit.
1734 This model schedule gives us an idea of what pressure can be
1735 achieved for the block and gives us an example of a schedule that
1736 keeps to that pressure. It also makes the final schedule less
1737 dependent on the original instruction order. This is important
1738 because the original order can either be "wide" (many values live
1739 at once, such as in user-scheduled code) or "narrow" (few values
1740 live at once, such as after loop unrolling, where several
1741 iterations are executed sequentially).
1743 We do not apply this model schedule to the rtx stream. We simply
1744 record it in model_schedule. We also compute the maximum pressure,
1745 MP, that was seen during this schedule.
1747 - Instructions are added to the ready queue even if they require
1748 a stall. The length of the stall is instead computed as:
1750 MAX (INSN_TICK (INSN) - clock_var, 0)
1752 (= insn_delay). This allows rank_for_schedule to choose between
1753 introducing a deliberate stall or increasing pressure.
1755 - Before sorting the ready queue, model_set_excess_costs assigns
1756 a pressure-based cost to each ready instruction in the queue.
1757 This is the instruction's INSN_REG_PRESSURE_EXCESS_COST_CHANGE
1758 (ECC for short) and is effectively measured in cycles.
1760 - rank_for_schedule ranks instructions based on:
1762 ECC (insn) + insn_delay (insn)
1764 then as:
1766 insn_delay (insn)
1768 So, for example, an instruction X1 with an ECC of 1 that can issue
1769 now will win over an instruction X0 with an ECC of zero that would
1770 introduce a stall of one cycle. However, an instruction X2 with an
1771 ECC of 2 that can issue now will lose to both X0 and X1.
1773 - When an instruction is scheduled, model_recompute updates the model
1774 schedule with the new pressures (some of which might now exceed the
1775 original maximum pressure MP). model_update_limit_points then searches
1776 for the new point of maximum pressure, if not already known. */
1778 /* Used to separate high-verbosity debug information for SCHED_PRESSURE_MODEL
1779 from surrounding debug information. */
1780 #define MODEL_BAR \
1781 ";;\t\t+------------------------------------------------------\n"
1783 /* Information about the pressure on a particular register class at a
1784 particular point of the model schedule. */
1785 struct model_pressure_data {
1786 /* The pressure at this point of the model schedule, or -1 if the
1787 point is associated with an instruction that has already been
1788 scheduled. */
1789 int ref_pressure;
1791 /* The maximum pressure during or after this point of the model schedule. */
1792 int max_pressure;
1795 /* Per-instruction information that is used while building the model
1796 schedule. Here, "schedule" refers to the model schedule rather
1797 than the main schedule. */
1798 struct model_insn_info {
1799 /* The instruction itself. */
1800 rtx insn;
1802 /* If this instruction is in model_worklist, these fields link to the
1803 previous (higher-priority) and next (lower-priority) instructions
1804 in the list. */
1805 struct model_insn_info *prev;
1806 struct model_insn_info *next;
1808 /* While constructing the schedule, QUEUE_INDEX describes whether an
1809 instruction has already been added to the schedule (QUEUE_SCHEDULED),
1810 is in model_worklist (QUEUE_READY), or neither (QUEUE_NOWHERE).
1811 old_queue records the value that QUEUE_INDEX had before scheduling
1812 started, so that we can restore it once the schedule is complete. */
1813 int old_queue;
1815 /* The relative importance of an unscheduled instruction. Higher
1816 values indicate greater importance. */
1817 unsigned int model_priority;
1819 /* The length of the longest path of satisfied true dependencies
1820 that leads to this instruction. */
1821 unsigned int depth;
1823 /* The length of the longest path of dependencies of any kind
1824 that leads from this instruction. */
1825 unsigned int alap;
1827 /* The number of predecessor nodes that must still be scheduled. */
1828 int unscheduled_preds;
1831 /* Information about the pressure limit for a particular register class.
1832 This structure is used when applying a model schedule to the main
1833 schedule. */
1834 struct model_pressure_limit {
1835 /* The maximum register pressure seen in the original model schedule. */
1836 int orig_pressure;
1838 /* The maximum register pressure seen in the current model schedule
1839 (which excludes instructions that have already been scheduled). */
1840 int pressure;
1842 /* The point of the current model schedule at which PRESSURE is first
1843 reached. It is set to -1 if the value needs to be recomputed. */
1844 int point;
1847 /* Describes a particular way of measuring register pressure. */
1848 struct model_pressure_group {
1849 /* Index PCI describes the maximum pressure on ira_pressure_classes[PCI]. */
1850 struct model_pressure_limit limits[N_REG_CLASSES];
1852 /* Index (POINT * ira_num_pressure_classes + PCI) describes the pressure
1853 on register class ira_pressure_classes[PCI] at point POINT of the
1854 current model schedule. A POINT of model_num_insns describes the
1855 pressure at the end of the schedule. */
1856 struct model_pressure_data *model;
1859 /* Index POINT gives the instruction at point POINT of the model schedule.
1860 This array doesn't change during main scheduling. */
1861 static vec<rtx> model_schedule;
1863 /* The list of instructions in the model worklist, sorted in order of
1864 decreasing priority. */
1865 static struct model_insn_info *model_worklist;
1867 /* Index I describes the instruction with INSN_LUID I. */
1868 static struct model_insn_info *model_insns;
1870 /* The number of instructions in the model schedule. */
1871 static int model_num_insns;
1873 /* The index of the first instruction in model_schedule that hasn't yet been
1874 added to the main schedule, or model_num_insns if all of them have. */
1875 static int model_curr_point;
1877 /* Describes the pressure before each instruction in the model schedule. */
1878 static struct model_pressure_group model_before_pressure;
1880 /* The first unused model_priority value (as used in model_insn_info). */
1881 static unsigned int model_next_priority;
1884 /* The model_pressure_data for ira_pressure_classes[PCI] in GROUP
1885 at point POINT of the model schedule. */
1886 #define MODEL_PRESSURE_DATA(GROUP, POINT, PCI) \
1887 (&(GROUP)->model[(POINT) * ira_pressure_classes_num + (PCI)])
1889 /* The maximum pressure on ira_pressure_classes[PCI] in GROUP at or
1890 after point POINT of the model schedule. */
1891 #define MODEL_MAX_PRESSURE(GROUP, POINT, PCI) \
1892 (MODEL_PRESSURE_DATA (GROUP, POINT, PCI)->max_pressure)
1894 /* The pressure on ira_pressure_classes[PCI] in GROUP at point POINT
1895 of the model schedule. */
1896 #define MODEL_REF_PRESSURE(GROUP, POINT, PCI) \
1897 (MODEL_PRESSURE_DATA (GROUP, POINT, PCI)->ref_pressure)
1899 /* Information about INSN that is used when creating the model schedule. */
1900 #define MODEL_INSN_INFO(INSN) \
1901 (&model_insns[INSN_LUID (INSN)])
1903 /* The instruction at point POINT of the model schedule. */
1904 #define MODEL_INSN(POINT) \
1905 (model_schedule[POINT])
1908 /* Return INSN's index in the model schedule, or model_num_insns if it
1909 doesn't belong to that schedule. */
1911 static int
1912 model_index (rtx insn)
1914 if (INSN_MODEL_INDEX (insn) == 0)
1915 return model_num_insns;
1916 return INSN_MODEL_INDEX (insn) - 1;
1919 /* Make sure that GROUP->limits is up-to-date for the current point
1920 of the model schedule. */
1922 static void
1923 model_update_limit_points_in_group (struct model_pressure_group *group)
1925 int pci, max_pressure, point;
1927 for (pci = 0; pci < ira_pressure_classes_num; pci++)
1929 /* We may have passed the final point at which the pressure in
1930 group->limits[pci].pressure was reached. Update the limit if so. */
1931 max_pressure = MODEL_MAX_PRESSURE (group, model_curr_point, pci);
1932 group->limits[pci].pressure = max_pressure;
1934 /* Find the point at which MAX_PRESSURE is first reached. We need
1935 to search in three cases:
1937 - We've already moved past the previous pressure point.
1938 In this case we search forward from model_curr_point.
1940 - We scheduled the previous point of maximum pressure ahead of
1941 its position in the model schedule, but doing so didn't bring
1942 the pressure point earlier. In this case we search forward
1943 from that previous pressure point.
1945 - Scheduling an instruction early caused the maximum pressure
1946 to decrease. In this case we will have set the pressure
1947 point to -1, and we search forward from model_curr_point. */
1948 point = MAX (group->limits[pci].point, model_curr_point);
1949 while (point < model_num_insns
1950 && MODEL_REF_PRESSURE (group, point, pci) < max_pressure)
1951 point++;
1952 group->limits[pci].point = point;
1954 gcc_assert (MODEL_REF_PRESSURE (group, point, pci) == max_pressure);
1955 gcc_assert (MODEL_MAX_PRESSURE (group, point, pci) == max_pressure);
1959 /* Make sure that all register-pressure limits are up-to-date for the
1960 current position in the model schedule. */
1962 static void
1963 model_update_limit_points (void)
1965 model_update_limit_points_in_group (&model_before_pressure);
1968 /* Return the model_index of the last unscheduled use in chain USE
1969 outside of USE's instruction. Return -1 if there are no other uses,
1970 or model_num_insns if the register is live at the end of the block. */
1972 static int
1973 model_last_use_except (struct reg_use_data *use)
1975 struct reg_use_data *next;
1976 int last, index;
1978 last = -1;
1979 for (next = use->next_regno_use; next != use; next = next->next_regno_use)
1980 if (NONDEBUG_INSN_P (next->insn)
1981 && QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED)
1983 index = model_index (next->insn);
1984 if (index == model_num_insns)
1985 return model_num_insns;
1986 if (last < index)
1987 last = index;
1989 return last;
1992 /* An instruction with model_index POINT has just been scheduled, and it
1993 adds DELTA to the pressure on ira_pressure_classes[PCI] after POINT - 1.
1994 Update MODEL_REF_PRESSURE (GROUP, POINT, PCI) and
1995 MODEL_MAX_PRESSURE (GROUP, POINT, PCI) accordingly. */
1997 static void
1998 model_start_update_pressure (struct model_pressure_group *group,
1999 int point, int pci, int delta)
2001 int next_max_pressure;
2003 if (point == model_num_insns)
2005 /* The instruction wasn't part of the model schedule; it was moved
2006 from a different block. Update the pressure for the end of
2007 the model schedule. */
2008 MODEL_REF_PRESSURE (group, point, pci) += delta;
2009 MODEL_MAX_PRESSURE (group, point, pci) += delta;
2011 else
2013 /* Record that this instruction has been scheduled. Nothing now
2014 changes between POINT and POINT + 1, so get the maximum pressure
2015 from the latter. If the maximum pressure decreases, the new
2016 pressure point may be before POINT. */
2017 MODEL_REF_PRESSURE (group, point, pci) = -1;
2018 next_max_pressure = MODEL_MAX_PRESSURE (group, point + 1, pci);
2019 if (MODEL_MAX_PRESSURE (group, point, pci) > next_max_pressure)
2021 MODEL_MAX_PRESSURE (group, point, pci) = next_max_pressure;
2022 if (group->limits[pci].point == point)
2023 group->limits[pci].point = -1;
2028 /* Record that scheduling a later instruction has changed the pressure
2029 at point POINT of the model schedule by DELTA (which might be 0).
2030 Update GROUP accordingly. Return nonzero if these changes might
2031 trigger changes to previous points as well. */
2033 static int
2034 model_update_pressure (struct model_pressure_group *group,
2035 int point, int pci, int delta)
2037 int ref_pressure, max_pressure, next_max_pressure;
2039 /* If POINT hasn't yet been scheduled, update its pressure. */
2040 ref_pressure = MODEL_REF_PRESSURE (group, point, pci);
2041 if (ref_pressure >= 0 && delta != 0)
2043 ref_pressure += delta;
2044 MODEL_REF_PRESSURE (group, point, pci) = ref_pressure;
2046 /* Check whether the maximum pressure in the overall schedule
2047 has increased. (This means that the MODEL_MAX_PRESSURE of
2048 every point <= POINT will need to increae too; see below.) */
2049 if (group->limits[pci].pressure < ref_pressure)
2050 group->limits[pci].pressure = ref_pressure;
2052 /* If we are at maximum pressure, and the maximum pressure
2053 point was previously unknown or later than POINT,
2054 bring it forward. */
2055 if (group->limits[pci].pressure == ref_pressure
2056 && !IN_RANGE (group->limits[pci].point, 0, point))
2057 group->limits[pci].point = point;
2059 /* If POINT used to be the point of maximum pressure, but isn't
2060 any longer, we need to recalculate it using a forward walk. */
2061 if (group->limits[pci].pressure > ref_pressure
2062 && group->limits[pci].point == point)
2063 group->limits[pci].point = -1;
2066 /* Update the maximum pressure at POINT. Changes here might also
2067 affect the maximum pressure at POINT - 1. */
2068 next_max_pressure = MODEL_MAX_PRESSURE (group, point + 1, pci);
2069 max_pressure = MAX (ref_pressure, next_max_pressure);
2070 if (MODEL_MAX_PRESSURE (group, point, pci) != max_pressure)
2072 MODEL_MAX_PRESSURE (group, point, pci) = max_pressure;
2073 return 1;
2075 return 0;
2078 /* INSN has just been scheduled. Update the model schedule accordingly. */
2080 static void
2081 model_recompute (rtx insn)
2083 struct {
2084 int last_use;
2085 int regno;
2086 } uses[FIRST_PSEUDO_REGISTER + MAX_RECOG_OPERANDS];
2087 struct reg_use_data *use;
2088 struct reg_pressure_data *reg_pressure;
2089 int delta[N_REG_CLASSES];
2090 int pci, point, mix, new_last, cl, ref_pressure, queue;
2091 unsigned int i, num_uses, num_pending_births;
2092 bool print_p;
2094 /* The destinations of INSN were previously live from POINT onwards, but are
2095 now live from model_curr_point onwards. Set up DELTA accordingly. */
2096 point = model_index (insn);
2097 reg_pressure = INSN_REG_PRESSURE (insn);
2098 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2100 cl = ira_pressure_classes[pci];
2101 delta[cl] = reg_pressure[pci].set_increase;
2104 /* Record which registers previously died at POINT, but which now die
2105 before POINT. Adjust DELTA so that it represents the effect of
2106 this change after POINT - 1. Set NUM_PENDING_BIRTHS to the number of
2107 registers that will be born in the range [model_curr_point, POINT). */
2108 num_uses = 0;
2109 num_pending_births = 0;
2110 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
2112 new_last = model_last_use_except (use);
2113 if (new_last < point)
2115 gcc_assert (num_uses < ARRAY_SIZE (uses));
2116 uses[num_uses].last_use = new_last;
2117 uses[num_uses].regno = use->regno;
2118 /* This register is no longer live after POINT - 1. */
2119 mark_regno_birth_or_death (NULL, delta, use->regno, false);
2120 num_uses++;
2121 if (new_last >= 0)
2122 num_pending_births++;
2126 /* Update the MODEL_REF_PRESSURE and MODEL_MAX_PRESSURE for POINT.
2127 Also set each group pressure limit for POINT. */
2128 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2130 cl = ira_pressure_classes[pci];
2131 model_start_update_pressure (&model_before_pressure,
2132 point, pci, delta[cl]);
2135 /* Walk the model schedule backwards, starting immediately before POINT. */
2136 print_p = false;
2137 if (point != model_curr_point)
2140 point--;
2141 insn = MODEL_INSN (point);
2142 queue = QUEUE_INDEX (insn);
2144 if (queue != QUEUE_SCHEDULED)
2146 /* DELTA describes the effect of the move on the register pressure
2147 after POINT. Make it describe the effect on the pressure
2148 before POINT. */
2149 i = 0;
2150 while (i < num_uses)
2152 if (uses[i].last_use == point)
2154 /* This register is now live again. */
2155 mark_regno_birth_or_death (NULL, delta,
2156 uses[i].regno, true);
2158 /* Remove this use from the array. */
2159 uses[i] = uses[num_uses - 1];
2160 num_uses--;
2161 num_pending_births--;
2163 else
2164 i++;
2167 if (sched_verbose >= 5)
2169 if (!print_p)
2171 fprintf (sched_dump, MODEL_BAR);
2172 fprintf (sched_dump, ";;\t\t| New pressure for model"
2173 " schedule\n");
2174 fprintf (sched_dump, MODEL_BAR);
2175 print_p = true;
2178 fprintf (sched_dump, ";;\t\t| %3d %4d %-30s ",
2179 point, INSN_UID (insn),
2180 str_pattern_slim (PATTERN (insn)));
2181 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2183 cl = ira_pressure_classes[pci];
2184 ref_pressure = MODEL_REF_PRESSURE (&model_before_pressure,
2185 point, pci);
2186 fprintf (sched_dump, " %s:[%d->%d]",
2187 reg_class_names[ira_pressure_classes[pci]],
2188 ref_pressure, ref_pressure + delta[cl]);
2190 fprintf (sched_dump, "\n");
2194 /* Adjust the pressure at POINT. Set MIX to nonzero if POINT - 1
2195 might have changed as well. */
2196 mix = num_pending_births;
2197 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2199 cl = ira_pressure_classes[pci];
2200 mix |= delta[cl];
2201 mix |= model_update_pressure (&model_before_pressure,
2202 point, pci, delta[cl]);
2205 while (mix && point > model_curr_point);
2207 if (print_p)
2208 fprintf (sched_dump, MODEL_BAR);
2211 /* After DEP, which was cancelled, has been resolved for insn NEXT,
2212 check whether the insn's pattern needs restoring. */
2213 static bool
2214 must_restore_pattern_p (rtx next, dep_t dep)
2216 if (QUEUE_INDEX (next) == QUEUE_SCHEDULED)
2217 return false;
2219 if (DEP_TYPE (dep) == REG_DEP_CONTROL)
2221 gcc_assert (ORIG_PAT (next) != NULL_RTX);
2222 gcc_assert (next == DEP_CON (dep));
2224 else
2226 struct dep_replacement *desc = DEP_REPLACE (dep);
2227 if (desc->insn != next)
2229 gcc_assert (*desc->loc == desc->orig);
2230 return false;
2233 return true;
2236 /* model_spill_cost (CL, P, P') returns the cost of increasing the
2237 pressure on CL from P to P'. We use this to calculate a "base ECC",
2238 baseECC (CL, X), for each pressure class CL and each instruction X.
2239 Supposing X changes the pressure on CL from P to P', and that the
2240 maximum pressure on CL in the current model schedule is MP', then:
2242 * if X occurs before or at the next point of maximum pressure in
2243 the model schedule and P' > MP', then:
2245 baseECC (CL, X) = model_spill_cost (CL, MP, P')
2247 The idea is that the pressure after scheduling a fixed set of
2248 instructions -- in this case, the set up to and including the
2249 next maximum pressure point -- is going to be the same regardless
2250 of the order; we simply want to keep the intermediate pressure
2251 under control. Thus X has a cost of zero unless scheduling it
2252 now would exceed MP'.
2254 If all increases in the set are by the same amount, no zero-cost
2255 instruction will ever cause the pressure to exceed MP'. However,
2256 if X is instead moved past an instruction X' with pressure in the
2257 range (MP' - (P' - P), MP'), the pressure at X' will increase
2258 beyond MP'. Since baseECC is very much a heuristic anyway,
2259 it doesn't seem worth the overhead of tracking cases like these.
2261 The cost of exceeding MP' is always based on the original maximum
2262 pressure MP. This is so that going 2 registers over the original
2263 limit has the same cost regardless of whether it comes from two
2264 separate +1 deltas or from a single +2 delta.
2266 * if X occurs after the next point of maximum pressure in the model
2267 schedule and P' > P, then:
2269 baseECC (CL, X) = model_spill_cost (CL, MP, MP' + (P' - P))
2271 That is, if we move X forward across a point of maximum pressure,
2272 and if X increases the pressure by P' - P, then we conservatively
2273 assume that scheduling X next would increase the maximum pressure
2274 by P' - P. Again, the cost of doing this is based on the original
2275 maximum pressure MP, for the same reason as above.
2277 * if P' < P, P > MP, and X occurs at or after the next point of
2278 maximum pressure, then:
2280 baseECC (CL, X) = -model_spill_cost (CL, MAX (MP, P'), P)
2282 That is, if we have already exceeded the original maximum pressure MP,
2283 and if X might reduce the maximum pressure again -- or at least push
2284 it further back, and thus allow more scheduling freedom -- it is given
2285 a negative cost to reflect the improvement.
2287 * otherwise,
2289 baseECC (CL, X) = 0
2291 In this case, X is not expected to affect the maximum pressure MP',
2292 so it has zero cost.
2294 We then create a combined value baseECC (X) that is the sum of
2295 baseECC (CL, X) for each pressure class CL.
2297 baseECC (X) could itself be used as the ECC value described above.
2298 However, this is often too conservative, in the sense that it
2299 tends to make high-priority instructions that increase pressure
2300 wait too long in cases where introducing a spill would be better.
2301 For this reason the final ECC is a priority-adjusted form of
2302 baseECC (X). Specifically, we calculate:
2304 P (X) = INSN_PRIORITY (X) - insn_delay (X) - baseECC (X)
2305 baseP = MAX { P (X) | baseECC (X) <= 0 }
2307 Then:
2309 ECC (X) = MAX (MIN (baseP - P (X), baseECC (X)), 0)
2311 Thus an instruction's effect on pressure is ignored if it has a high
2312 enough priority relative to the ones that don't increase pressure.
2313 Negative values of baseECC (X) do not increase the priority of X
2314 itself, but they do make it harder for other instructions to
2315 increase the pressure further.
2317 This pressure cost is deliberately timid. The intention has been
2318 to choose a heuristic that rarely interferes with the normal list
2319 scheduler in cases where that scheduler would produce good code.
2320 We simply want to curb some of its worst excesses. */
2322 /* Return the cost of increasing the pressure in class CL from FROM to TO.
2324 Here we use the very simplistic cost model that every register above
2325 ira_class_hard_regs_num[CL] has a spill cost of 1. We could use other
2326 measures instead, such as one based on MEMORY_MOVE_COST. However:
2328 (1) In order for an instruction to be scheduled, the higher cost
2329 would need to be justified in a single saving of that many stalls.
2330 This is overly pessimistic, because the benefit of spilling is
2331 often to avoid a sequence of several short stalls rather than
2332 a single long one.
2334 (2) The cost is still arbitrary. Because we are not allocating
2335 registers during scheduling, we have no way of knowing for
2336 sure how many memory accesses will be required by each spill,
2337 where the spills will be placed within the block, or even
2338 which block(s) will contain the spills.
2340 So a higher cost than 1 is often too conservative in practice,
2341 forcing blocks to contain unnecessary stalls instead of spill code.
2342 The simple cost below seems to be the best compromise. It reduces
2343 the interference with the normal list scheduler, which helps make
2344 it more suitable for a default-on option. */
2346 static int
2347 model_spill_cost (int cl, int from, int to)
2349 from = MAX (from, ira_class_hard_regs_num[cl]);
2350 return MAX (to, from) - from;
2353 /* Return baseECC (ira_pressure_classes[PCI], POINT), given that
2354 P = curr_reg_pressure[ira_pressure_classes[PCI]] and that
2355 P' = P + DELTA. */
2357 static int
2358 model_excess_group_cost (struct model_pressure_group *group,
2359 int point, int pci, int delta)
2361 int pressure, cl;
2363 cl = ira_pressure_classes[pci];
2364 if (delta < 0 && point >= group->limits[pci].point)
2366 pressure = MAX (group->limits[pci].orig_pressure,
2367 curr_reg_pressure[cl] + delta);
2368 return -model_spill_cost (cl, pressure, curr_reg_pressure[cl]);
2371 if (delta > 0)
2373 if (point > group->limits[pci].point)
2374 pressure = group->limits[pci].pressure + delta;
2375 else
2376 pressure = curr_reg_pressure[cl] + delta;
2378 if (pressure > group->limits[pci].pressure)
2379 return model_spill_cost (cl, group->limits[pci].orig_pressure,
2380 pressure);
2383 return 0;
2386 /* Return baseECC (MODEL_INSN (INSN)). Dump the costs to sched_dump
2387 if PRINT_P. */
2389 static int
2390 model_excess_cost (rtx insn, bool print_p)
2392 int point, pci, cl, cost, this_cost, delta;
2393 struct reg_pressure_data *insn_reg_pressure;
2394 int insn_death[N_REG_CLASSES];
2396 calculate_reg_deaths (insn, insn_death);
2397 point = model_index (insn);
2398 insn_reg_pressure = INSN_REG_PRESSURE (insn);
2399 cost = 0;
2401 if (print_p)
2402 fprintf (sched_dump, ";;\t\t| %3d %4d | %4d %+3d |", point,
2403 INSN_UID (insn), INSN_PRIORITY (insn), insn_delay (insn));
2405 /* Sum up the individual costs for each register class. */
2406 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2408 cl = ira_pressure_classes[pci];
2409 delta = insn_reg_pressure[pci].set_increase - insn_death[cl];
2410 this_cost = model_excess_group_cost (&model_before_pressure,
2411 point, pci, delta);
2412 cost += this_cost;
2413 if (print_p)
2414 fprintf (sched_dump, " %s:[%d base cost %d]",
2415 reg_class_names[cl], delta, this_cost);
2418 if (print_p)
2419 fprintf (sched_dump, "\n");
2421 return cost;
2424 /* Dump the next points of maximum pressure for GROUP. */
2426 static void
2427 model_dump_pressure_points (struct model_pressure_group *group)
2429 int pci, cl;
2431 fprintf (sched_dump, ";;\t\t| pressure points");
2432 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2434 cl = ira_pressure_classes[pci];
2435 fprintf (sched_dump, " %s:[%d->%d at ", reg_class_names[cl],
2436 curr_reg_pressure[cl], group->limits[pci].pressure);
2437 if (group->limits[pci].point < model_num_insns)
2438 fprintf (sched_dump, "%d:%d]", group->limits[pci].point,
2439 INSN_UID (MODEL_INSN (group->limits[pci].point)));
2440 else
2441 fprintf (sched_dump, "end]");
2443 fprintf (sched_dump, "\n");
2446 /* Set INSN_REG_PRESSURE_EXCESS_COST_CHANGE for INSNS[0...COUNT-1]. */
2448 static void
2449 model_set_excess_costs (rtx *insns, int count)
2451 int i, cost, priority_base, priority;
2452 bool print_p;
2454 /* Record the baseECC value for each instruction in the model schedule,
2455 except that negative costs are converted to zero ones now rather thatn
2456 later. Do not assign a cost to debug instructions, since they must
2457 not change code-generation decisions. Experiments suggest we also
2458 get better results by not assigning a cost to instructions from
2459 a different block.
2461 Set PRIORITY_BASE to baseP in the block comment above. This is the
2462 maximum priority of the "cheap" instructions, which should always
2463 include the next model instruction. */
2464 priority_base = 0;
2465 print_p = false;
2466 for (i = 0; i < count; i++)
2467 if (INSN_MODEL_INDEX (insns[i]))
2469 if (sched_verbose >= 6 && !print_p)
2471 fprintf (sched_dump, MODEL_BAR);
2472 fprintf (sched_dump, ";;\t\t| Pressure costs for ready queue\n");
2473 model_dump_pressure_points (&model_before_pressure);
2474 fprintf (sched_dump, MODEL_BAR);
2475 print_p = true;
2477 cost = model_excess_cost (insns[i], print_p);
2478 if (cost <= 0)
2480 priority = INSN_PRIORITY (insns[i]) - insn_delay (insns[i]) - cost;
2481 priority_base = MAX (priority_base, priority);
2482 cost = 0;
2484 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i]) = cost;
2486 if (print_p)
2487 fprintf (sched_dump, MODEL_BAR);
2489 /* Use MAX (baseECC, 0) and baseP to calculcate ECC for each
2490 instruction. */
2491 for (i = 0; i < count; i++)
2493 cost = INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i]);
2494 priority = INSN_PRIORITY (insns[i]) - insn_delay (insns[i]);
2495 if (cost > 0 && priority > priority_base)
2497 cost += priority_base - priority;
2498 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i]) = MAX (cost, 0);
2503 /* Returns a positive value if x is preferred; returns a negative value if
2504 y is preferred. Should never return 0, since that will make the sort
2505 unstable. */
2507 static int
2508 rank_for_schedule (const void *x, const void *y)
2510 rtx tmp = *(const rtx *) y;
2511 rtx tmp2 = *(const rtx *) x;
2512 int tmp_class, tmp2_class;
2513 int val, priority_val, info_val;
2515 if (MAY_HAVE_DEBUG_INSNS)
2517 /* Schedule debug insns as early as possible. */
2518 if (DEBUG_INSN_P (tmp) && !DEBUG_INSN_P (tmp2))
2519 return -1;
2520 else if (!DEBUG_INSN_P (tmp) && DEBUG_INSN_P (tmp2))
2521 return 1;
2522 else if (DEBUG_INSN_P (tmp) && DEBUG_INSN_P (tmp2))
2523 return INSN_LUID (tmp) - INSN_LUID (tmp2);
2526 /* The insn in a schedule group should be issued the first. */
2527 if (flag_sched_group_heuristic &&
2528 SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
2529 return SCHED_GROUP_P (tmp2) ? 1 : -1;
2531 /* Make sure that priority of TMP and TMP2 are initialized. */
2532 gcc_assert (INSN_PRIORITY_KNOWN (tmp) && INSN_PRIORITY_KNOWN (tmp2));
2534 if (sched_pressure != SCHED_PRESSURE_NONE)
2536 int diff;
2538 /* Prefer insn whose scheduling results in the smallest register
2539 pressure excess. */
2540 if ((diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp)
2541 + insn_delay (tmp)
2542 - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2)
2543 - insn_delay (tmp2))))
2544 return diff;
2547 if (sched_pressure != SCHED_PRESSURE_NONE
2548 && (INSN_TICK (tmp2) > clock_var || INSN_TICK (tmp) > clock_var))
2550 if (INSN_TICK (tmp) <= clock_var)
2551 return -1;
2552 else if (INSN_TICK (tmp2) <= clock_var)
2553 return 1;
2554 else
2555 return INSN_TICK (tmp) - INSN_TICK (tmp2);
2558 /* If we are doing backtracking in this schedule, prefer insns that
2559 have forward dependencies with negative cost against an insn that
2560 was already scheduled. */
2561 if (current_sched_info->flags & DO_BACKTRACKING)
2563 priority_val = FEEDS_BACKTRACK_INSN (tmp2) - FEEDS_BACKTRACK_INSN (tmp);
2564 if (priority_val)
2565 return priority_val;
2568 /* Prefer insn with higher priority. */
2569 priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
2571 if (flag_sched_critical_path_heuristic && priority_val)
2572 return priority_val;
2574 /* Prefer speculative insn with greater dependencies weakness. */
2575 if (flag_sched_spec_insn_heuristic && spec_info)
2577 ds_t ds1, ds2;
2578 dw_t dw1, dw2;
2579 int dw;
2581 ds1 = TODO_SPEC (tmp) & SPECULATIVE;
2582 if (ds1)
2583 dw1 = ds_weak (ds1);
2584 else
2585 dw1 = NO_DEP_WEAK;
2587 ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
2588 if (ds2)
2589 dw2 = ds_weak (ds2);
2590 else
2591 dw2 = NO_DEP_WEAK;
2593 dw = dw2 - dw1;
2594 if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
2595 return dw;
2598 info_val = (*current_sched_info->rank) (tmp, tmp2);
2599 if(flag_sched_rank_heuristic && info_val)
2600 return info_val;
2602 /* Compare insns based on their relation to the last scheduled
2603 non-debug insn. */
2604 if (flag_sched_last_insn_heuristic && last_nondebug_scheduled_insn)
2606 dep_t dep1;
2607 dep_t dep2;
2608 rtx last = last_nondebug_scheduled_insn;
2610 /* Classify the instructions into three classes:
2611 1) Data dependent on last schedule insn.
2612 2) Anti/Output dependent on last scheduled insn.
2613 3) Independent of last scheduled insn, or has latency of one.
2614 Choose the insn from the highest numbered class if different. */
2615 dep1 = sd_find_dep_between (last, tmp, true);
2617 if (dep1 == NULL || dep_cost (dep1) == 1)
2618 tmp_class = 3;
2619 else if (/* Data dependence. */
2620 DEP_TYPE (dep1) == REG_DEP_TRUE)
2621 tmp_class = 1;
2622 else
2623 tmp_class = 2;
2625 dep2 = sd_find_dep_between (last, tmp2, true);
2627 if (dep2 == NULL || dep_cost (dep2) == 1)
2628 tmp2_class = 3;
2629 else if (/* Data dependence. */
2630 DEP_TYPE (dep2) == REG_DEP_TRUE)
2631 tmp2_class = 1;
2632 else
2633 tmp2_class = 2;
2635 if ((val = tmp2_class - tmp_class))
2636 return val;
2639 /* Prefer instructions that occur earlier in the model schedule. */
2640 if (sched_pressure == SCHED_PRESSURE_MODEL)
2642 int diff;
2644 diff = model_index (tmp) - model_index (tmp2);
2645 if (diff != 0)
2646 return diff;
2649 /* Prefer the insn which has more later insns that depend on it.
2650 This gives the scheduler more freedom when scheduling later
2651 instructions at the expense of added register pressure. */
2653 val = (dep_list_size (tmp2, SD_LIST_FORW)
2654 - dep_list_size (tmp, SD_LIST_FORW));
2656 if (flag_sched_dep_count_heuristic && val != 0)
2657 return val;
2659 /* If insns are equally good, sort by INSN_LUID (original insn order),
2660 so that we make the sort stable. This minimizes instruction movement,
2661 thus minimizing sched's effect on debugging and cross-jumping. */
2662 return INSN_LUID (tmp) - INSN_LUID (tmp2);
2665 /* Resort the array A in which only element at index N may be out of order. */
2667 HAIFA_INLINE static void
2668 swap_sort (rtx *a, int n)
2670 rtx insn = a[n - 1];
2671 int i = n - 2;
2673 while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
2675 a[i + 1] = a[i];
2676 i -= 1;
2678 a[i + 1] = insn;
2681 /* Add INSN to the insn queue so that it can be executed at least
2682 N_CYCLES after the currently executing insn. Preserve insns
2683 chain for debugging purposes. REASON will be printed in debugging
2684 output. */
2686 HAIFA_INLINE static void
2687 queue_insn (rtx insn, int n_cycles, const char *reason)
2689 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
2690 rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]);
2691 int new_tick;
2693 gcc_assert (n_cycles <= max_insn_queue_index);
2694 gcc_assert (!DEBUG_INSN_P (insn));
2696 insn_queue[next_q] = link;
2697 q_size += 1;
2699 if (sched_verbose >= 2)
2701 fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
2702 (*current_sched_info->print_insn) (insn, 0));
2704 fprintf (sched_dump, "queued for %d cycles (%s).\n", n_cycles, reason);
2707 QUEUE_INDEX (insn) = next_q;
2709 if (current_sched_info->flags & DO_BACKTRACKING)
2711 new_tick = clock_var + n_cycles;
2712 if (INSN_TICK (insn) == INVALID_TICK || INSN_TICK (insn) < new_tick)
2713 INSN_TICK (insn) = new_tick;
2715 if (INSN_EXACT_TICK (insn) != INVALID_TICK
2716 && INSN_EXACT_TICK (insn) < clock_var + n_cycles)
2718 must_backtrack = true;
2719 if (sched_verbose >= 2)
2720 fprintf (sched_dump, ";;\t\tcausing a backtrack.\n");
2725 /* Remove INSN from queue. */
2726 static void
2727 queue_remove (rtx insn)
2729 gcc_assert (QUEUE_INDEX (insn) >= 0);
2730 remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
2731 q_size--;
2732 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
2735 /* Return a pointer to the bottom of the ready list, i.e. the insn
2736 with the lowest priority. */
2738 rtx *
2739 ready_lastpos (struct ready_list *ready)
2741 gcc_assert (ready->n_ready >= 1);
2742 return ready->vec + ready->first - ready->n_ready + 1;
2745 /* Add an element INSN to the ready list so that it ends up with the
2746 lowest/highest priority depending on FIRST_P. */
2748 HAIFA_INLINE static void
2749 ready_add (struct ready_list *ready, rtx insn, bool first_p)
2751 if (!first_p)
2753 if (ready->first == ready->n_ready)
2755 memmove (ready->vec + ready->veclen - ready->n_ready,
2756 ready_lastpos (ready),
2757 ready->n_ready * sizeof (rtx));
2758 ready->first = ready->veclen - 1;
2760 ready->vec[ready->first - ready->n_ready] = insn;
2762 else
2764 if (ready->first == ready->veclen - 1)
2766 if (ready->n_ready)
2767 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
2768 memmove (ready->vec + ready->veclen - ready->n_ready - 1,
2769 ready_lastpos (ready),
2770 ready->n_ready * sizeof (rtx));
2771 ready->first = ready->veclen - 2;
2773 ready->vec[++(ready->first)] = insn;
2776 ready->n_ready++;
2777 if (DEBUG_INSN_P (insn))
2778 ready->n_debug++;
2780 gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
2781 QUEUE_INDEX (insn) = QUEUE_READY;
2783 if (INSN_EXACT_TICK (insn) != INVALID_TICK
2784 && INSN_EXACT_TICK (insn) < clock_var)
2786 must_backtrack = true;
2790 /* Remove the element with the highest priority from the ready list and
2791 return it. */
2793 HAIFA_INLINE static rtx
2794 ready_remove_first (struct ready_list *ready)
2796 rtx t;
2798 gcc_assert (ready->n_ready);
2799 t = ready->vec[ready->first--];
2800 ready->n_ready--;
2801 if (DEBUG_INSN_P (t))
2802 ready->n_debug--;
2803 /* If the queue becomes empty, reset it. */
2804 if (ready->n_ready == 0)
2805 ready->first = ready->veclen - 1;
2807 gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
2808 QUEUE_INDEX (t) = QUEUE_NOWHERE;
2810 return t;
2813 /* The following code implements multi-pass scheduling for the first
2814 cycle. In other words, we will try to choose ready insn which
2815 permits to start maximum number of insns on the same cycle. */
2817 /* Return a pointer to the element INDEX from the ready. INDEX for
2818 insn with the highest priority is 0, and the lowest priority has
2819 N_READY - 1. */
2822 ready_element (struct ready_list *ready, int index)
2824 gcc_assert (ready->n_ready && index < ready->n_ready);
2826 return ready->vec[ready->first - index];
2829 /* Remove the element INDEX from the ready list and return it. INDEX
2830 for insn with the highest priority is 0, and the lowest priority
2831 has N_READY - 1. */
2833 HAIFA_INLINE static rtx
2834 ready_remove (struct ready_list *ready, int index)
2836 rtx t;
2837 int i;
2839 if (index == 0)
2840 return ready_remove_first (ready);
2841 gcc_assert (ready->n_ready && index < ready->n_ready);
2842 t = ready->vec[ready->first - index];
2843 ready->n_ready--;
2844 if (DEBUG_INSN_P (t))
2845 ready->n_debug--;
2846 for (i = index; i < ready->n_ready; i++)
2847 ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
2848 QUEUE_INDEX (t) = QUEUE_NOWHERE;
2849 return t;
2852 /* Remove INSN from the ready list. */
2853 static void
2854 ready_remove_insn (rtx insn)
2856 int i;
2858 for (i = 0; i < readyp->n_ready; i++)
2859 if (ready_element (readyp, i) == insn)
2861 ready_remove (readyp, i);
2862 return;
2864 gcc_unreachable ();
2867 /* Sort the ready list READY by ascending priority, using the SCHED_SORT
2868 macro. */
2870 void
2871 ready_sort (struct ready_list *ready)
2873 int i;
2874 rtx *first = ready_lastpos (ready);
2876 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
2878 for (i = 0; i < ready->n_ready; i++)
2879 if (!DEBUG_INSN_P (first[i]))
2880 setup_insn_reg_pressure_info (first[i]);
2882 if (sched_pressure == SCHED_PRESSURE_MODEL
2883 && model_curr_point < model_num_insns)
2884 model_set_excess_costs (first, ready->n_ready);
2885 SCHED_SORT (first, ready->n_ready);
2888 /* PREV is an insn that is ready to execute. Adjust its priority if that
2889 will help shorten or lengthen register lifetimes as appropriate. Also
2890 provide a hook for the target to tweak itself. */
2892 HAIFA_INLINE static void
2893 adjust_priority (rtx prev)
2895 /* ??? There used to be code here to try and estimate how an insn
2896 affected register lifetimes, but it did it by looking at REG_DEAD
2897 notes, which we removed in schedule_region. Nor did it try to
2898 take into account register pressure or anything useful like that.
2900 Revisit when we have a machine model to work with and not before. */
2902 if (targetm.sched.adjust_priority)
2903 INSN_PRIORITY (prev) =
2904 targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
2907 /* Advance DFA state STATE on one cycle. */
2908 void
2909 advance_state (state_t state)
2911 if (targetm.sched.dfa_pre_advance_cycle)
2912 targetm.sched.dfa_pre_advance_cycle ();
2914 if (targetm.sched.dfa_pre_cycle_insn)
2915 state_transition (state,
2916 targetm.sched.dfa_pre_cycle_insn ());
2918 state_transition (state, NULL);
2920 if (targetm.sched.dfa_post_cycle_insn)
2921 state_transition (state,
2922 targetm.sched.dfa_post_cycle_insn ());
2924 if (targetm.sched.dfa_post_advance_cycle)
2925 targetm.sched.dfa_post_advance_cycle ();
2928 /* Advance time on one cycle. */
2929 HAIFA_INLINE static void
2930 advance_one_cycle (void)
2932 advance_state (curr_state);
2933 if (sched_verbose >= 6)
2934 fprintf (sched_dump, ";;\tAdvanced a state.\n");
2937 /* Update register pressure after scheduling INSN. */
2938 static void
2939 update_register_pressure (rtx insn)
2941 struct reg_use_data *use;
2942 struct reg_set_data *set;
2944 gcc_checking_assert (!DEBUG_INSN_P (insn));
2946 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
2947 if (dying_use_p (use))
2948 mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure,
2949 use->regno, false);
2950 for (set = INSN_REG_SET_LIST (insn); set != NULL; set = set->next_insn_set)
2951 mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure,
2952 set->regno, true);
2955 /* Set up or update (if UPDATE_P) max register pressure (see its
2956 meaning in sched-int.h::_haifa_insn_data) for all current BB insns
2957 after insn AFTER. */
2958 static void
2959 setup_insn_max_reg_pressure (rtx after, bool update_p)
2961 int i, p;
2962 bool eq_p;
2963 rtx insn;
2964 static int max_reg_pressure[N_REG_CLASSES];
2966 save_reg_pressure ();
2967 for (i = 0; i < ira_pressure_classes_num; i++)
2968 max_reg_pressure[ira_pressure_classes[i]]
2969 = curr_reg_pressure[ira_pressure_classes[i]];
2970 for (insn = NEXT_INSN (after);
2971 insn != NULL_RTX && ! BARRIER_P (insn)
2972 && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (after);
2973 insn = NEXT_INSN (insn))
2974 if (NONDEBUG_INSN_P (insn))
2976 eq_p = true;
2977 for (i = 0; i < ira_pressure_classes_num; i++)
2979 p = max_reg_pressure[ira_pressure_classes[i]];
2980 if (INSN_MAX_REG_PRESSURE (insn)[i] != p)
2982 eq_p = false;
2983 INSN_MAX_REG_PRESSURE (insn)[i]
2984 = max_reg_pressure[ira_pressure_classes[i]];
2987 if (update_p && eq_p)
2988 break;
2989 update_register_pressure (insn);
2990 for (i = 0; i < ira_pressure_classes_num; i++)
2991 if (max_reg_pressure[ira_pressure_classes[i]]
2992 < curr_reg_pressure[ira_pressure_classes[i]])
2993 max_reg_pressure[ira_pressure_classes[i]]
2994 = curr_reg_pressure[ira_pressure_classes[i]];
2996 restore_reg_pressure ();
2999 /* Update the current register pressure after scheduling INSN. Update
3000 also max register pressure for unscheduled insns of the current
3001 BB. */
3002 static void
3003 update_reg_and_insn_max_reg_pressure (rtx insn)
3005 int i;
3006 int before[N_REG_CLASSES];
3008 for (i = 0; i < ira_pressure_classes_num; i++)
3009 before[i] = curr_reg_pressure[ira_pressure_classes[i]];
3010 update_register_pressure (insn);
3011 for (i = 0; i < ira_pressure_classes_num; i++)
3012 if (curr_reg_pressure[ira_pressure_classes[i]] != before[i])
3013 break;
3014 if (i < ira_pressure_classes_num)
3015 setup_insn_max_reg_pressure (insn, true);
3018 /* Set up register pressure at the beginning of basic block BB whose
3019 insns starting after insn AFTER. Set up also max register pressure
3020 for all insns of the basic block. */
3021 void
3022 sched_setup_bb_reg_pressure_info (basic_block bb, rtx after)
3024 gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED);
3025 initiate_bb_reg_pressure_info (bb);
3026 setup_insn_max_reg_pressure (after, false);
3029 /* If doing predication while scheduling, verify whether INSN, which
3030 has just been scheduled, clobbers the conditions of any
3031 instructions that must be predicated in order to break their
3032 dependencies. If so, remove them from the queues so that they will
3033 only be scheduled once their control dependency is resolved. */
3035 static void
3036 check_clobbered_conditions (rtx insn)
3038 HARD_REG_SET t;
3039 int i;
3041 if ((current_sched_info->flags & DO_PREDICATION) == 0)
3042 return;
3044 find_all_hard_reg_sets (insn, &t);
3046 restart:
3047 for (i = 0; i < ready.n_ready; i++)
3049 rtx x = ready_element (&ready, i);
3050 if (TODO_SPEC (x) == DEP_CONTROL && cond_clobbered_p (x, t))
3052 ready_remove_insn (x);
3053 goto restart;
3056 for (i = 0; i <= max_insn_queue_index; i++)
3058 rtx link;
3059 int q = NEXT_Q_AFTER (q_ptr, i);
3061 restart_queue:
3062 for (link = insn_queue[q]; link; link = XEXP (link, 1))
3064 rtx x = XEXP (link, 0);
3065 if (TODO_SPEC (x) == DEP_CONTROL && cond_clobbered_p (x, t))
3067 queue_remove (x);
3068 goto restart_queue;
3074 /* Return (in order):
3076 - positive if INSN adversely affects the pressure on one
3077 register class
3079 - negative if INSN reduces the pressure on one register class
3081 - 0 if INSN doesn't affect the pressure on any register class. */
3083 static int
3084 model_classify_pressure (struct model_insn_info *insn)
3086 struct reg_pressure_data *reg_pressure;
3087 int death[N_REG_CLASSES];
3088 int pci, cl, sum;
3090 calculate_reg_deaths (insn->insn, death);
3091 reg_pressure = INSN_REG_PRESSURE (insn->insn);
3092 sum = 0;
3093 for (pci = 0; pci < ira_pressure_classes_num; pci++)
3095 cl = ira_pressure_classes[pci];
3096 if (death[cl] < reg_pressure[pci].set_increase)
3097 return 1;
3098 sum += reg_pressure[pci].set_increase - death[cl];
3100 return sum;
3103 /* Return true if INSN1 should come before INSN2 in the model schedule. */
3105 static int
3106 model_order_p (struct model_insn_info *insn1, struct model_insn_info *insn2)
3108 unsigned int height1, height2;
3109 unsigned int priority1, priority2;
3111 /* Prefer instructions with a higher model priority. */
3112 if (insn1->model_priority != insn2->model_priority)
3113 return insn1->model_priority > insn2->model_priority;
3115 /* Combine the length of the longest path of satisfied true dependencies
3116 that leads to each instruction (depth) with the length of the longest
3117 path of any dependencies that leads from the instruction (alap).
3118 Prefer instructions with the greatest combined length. If the combined
3119 lengths are equal, prefer instructions with the greatest depth.
3121 The idea is that, if we have a set S of "equal" instructions that each
3122 have ALAP value X, and we pick one such instruction I, any true-dependent
3123 successors of I that have ALAP value X - 1 should be preferred over S.
3124 This encourages the schedule to be "narrow" rather than "wide".
3125 However, if I is a low-priority instruction that we decided to
3126 schedule because of its model_classify_pressure, and if there
3127 is a set of higher-priority instructions T, the aforementioned
3128 successors of I should not have the edge over T. */
3129 height1 = insn1->depth + insn1->alap;
3130 height2 = insn2->depth + insn2->alap;
3131 if (height1 != height2)
3132 return height1 > height2;
3133 if (insn1->depth != insn2->depth)
3134 return insn1->depth > insn2->depth;
3136 /* We have no real preference between INSN1 an INSN2 as far as attempts
3137 to reduce pressure go. Prefer instructions with higher priorities. */
3138 priority1 = INSN_PRIORITY (insn1->insn);
3139 priority2 = INSN_PRIORITY (insn2->insn);
3140 if (priority1 != priority2)
3141 return priority1 > priority2;
3143 /* Use the original rtl sequence as a tie-breaker. */
3144 return insn1 < insn2;
3147 /* Add INSN to the model worklist immediately after PREV. Add it to the
3148 beginning of the list if PREV is null. */
3150 static void
3151 model_add_to_worklist_at (struct model_insn_info *insn,
3152 struct model_insn_info *prev)
3154 gcc_assert (QUEUE_INDEX (insn->insn) == QUEUE_NOWHERE);
3155 QUEUE_INDEX (insn->insn) = QUEUE_READY;
3157 insn->prev = prev;
3158 if (prev)
3160 insn->next = prev->next;
3161 prev->next = insn;
3163 else
3165 insn->next = model_worklist;
3166 model_worklist = insn;
3168 if (insn->next)
3169 insn->next->prev = insn;
3172 /* Remove INSN from the model worklist. */
3174 static void
3175 model_remove_from_worklist (struct model_insn_info *insn)
3177 gcc_assert (QUEUE_INDEX (insn->insn) == QUEUE_READY);
3178 QUEUE_INDEX (insn->insn) = QUEUE_NOWHERE;
3180 if (insn->prev)
3181 insn->prev->next = insn->next;
3182 else
3183 model_worklist = insn->next;
3184 if (insn->next)
3185 insn->next->prev = insn->prev;
3188 /* Add INSN to the model worklist. Start looking for a suitable position
3189 between neighbors PREV and NEXT, testing at most MAX_SCHED_READY_INSNS
3190 insns either side. A null PREV indicates the beginning of the list and
3191 a null NEXT indicates the end. */
3193 static void
3194 model_add_to_worklist (struct model_insn_info *insn,
3195 struct model_insn_info *prev,
3196 struct model_insn_info *next)
3198 int count;
3200 count = MAX_SCHED_READY_INSNS;
3201 if (count > 0 && prev && model_order_p (insn, prev))
3204 count--;
3205 prev = prev->prev;
3207 while (count > 0 && prev && model_order_p (insn, prev));
3208 else
3209 while (count > 0 && next && model_order_p (next, insn))
3211 count--;
3212 prev = next;
3213 next = next->next;
3215 model_add_to_worklist_at (insn, prev);
3218 /* INSN may now have a higher priority (in the model_order_p sense)
3219 than before. Move it up the worklist if necessary. */
3221 static void
3222 model_promote_insn (struct model_insn_info *insn)
3224 struct model_insn_info *prev;
3225 int count;
3227 prev = insn->prev;
3228 count = MAX_SCHED_READY_INSNS;
3229 while (count > 0 && prev && model_order_p (insn, prev))
3231 count--;
3232 prev = prev->prev;
3234 if (prev != insn->prev)
3236 model_remove_from_worklist (insn);
3237 model_add_to_worklist_at (insn, prev);
3241 /* Add INSN to the end of the model schedule. */
3243 static void
3244 model_add_to_schedule (rtx insn)
3246 unsigned int point;
3248 gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
3249 QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
3251 point = model_schedule.length ();
3252 model_schedule.quick_push (insn);
3253 INSN_MODEL_INDEX (insn) = point + 1;
3256 /* Analyze the instructions that are to be scheduled, setting up
3257 MODEL_INSN_INFO (...) and model_num_insns accordingly. Add ready
3258 instructions to model_worklist. */
3260 static void
3261 model_analyze_insns (void)
3263 rtx start, end, iter;
3264 sd_iterator_def sd_it;
3265 dep_t dep;
3266 struct model_insn_info *insn, *con;
3268 model_num_insns = 0;
3269 start = PREV_INSN (current_sched_info->next_tail);
3270 end = current_sched_info->prev_head;
3271 for (iter = start; iter != end; iter = PREV_INSN (iter))
3272 if (NONDEBUG_INSN_P (iter))
3274 insn = MODEL_INSN_INFO (iter);
3275 insn->insn = iter;
3276 FOR_EACH_DEP (iter, SD_LIST_FORW, sd_it, dep)
3278 con = MODEL_INSN_INFO (DEP_CON (dep));
3279 if (con->insn && insn->alap < con->alap + 1)
3280 insn->alap = con->alap + 1;
3283 insn->old_queue = QUEUE_INDEX (iter);
3284 QUEUE_INDEX (iter) = QUEUE_NOWHERE;
3286 insn->unscheduled_preds = dep_list_size (iter, SD_LIST_HARD_BACK);
3287 if (insn->unscheduled_preds == 0)
3288 model_add_to_worklist (insn, NULL, model_worklist);
3290 model_num_insns++;
3294 /* The global state describes the register pressure at the start of the
3295 model schedule. Initialize GROUP accordingly. */
3297 static void
3298 model_init_pressure_group (struct model_pressure_group *group)
3300 int pci, cl;
3302 for (pci = 0; pci < ira_pressure_classes_num; pci++)
3304 cl = ira_pressure_classes[pci];
3305 group->limits[pci].pressure = curr_reg_pressure[cl];
3306 group->limits[pci].point = 0;
3308 /* Use index model_num_insns to record the state after the last
3309 instruction in the model schedule. */
3310 group->model = XNEWVEC (struct model_pressure_data,
3311 (model_num_insns + 1) * ira_pressure_classes_num);
3314 /* Record that MODEL_REF_PRESSURE (GROUP, POINT, PCI) is PRESSURE.
3315 Update the maximum pressure for the whole schedule. */
3317 static void
3318 model_record_pressure (struct model_pressure_group *group,
3319 int point, int pci, int pressure)
3321 MODEL_REF_PRESSURE (group, point, pci) = pressure;
3322 if (group->limits[pci].pressure < pressure)
3324 group->limits[pci].pressure = pressure;
3325 group->limits[pci].point = point;
3329 /* INSN has just been added to the end of the model schedule. Record its
3330 register-pressure information. */
3332 static void
3333 model_record_pressures (struct model_insn_info *insn)
3335 struct reg_pressure_data *reg_pressure;
3336 int point, pci, cl, delta;
3337 int death[N_REG_CLASSES];
3339 point = model_index (insn->insn);
3340 if (sched_verbose >= 2)
3342 if (point == 0)
3344 fprintf (sched_dump, "\n;;\tModel schedule:\n;;\n");
3345 fprintf (sched_dump, ";;\t| idx insn | mpri hght dpth prio |\n");
3347 fprintf (sched_dump, ";;\t| %3d %4d | %4d %4d %4d %4d | %-30s ",
3348 point, INSN_UID (insn->insn), insn->model_priority,
3349 insn->depth + insn->alap, insn->depth,
3350 INSN_PRIORITY (insn->insn),
3351 str_pattern_slim (PATTERN (insn->insn)));
3353 calculate_reg_deaths (insn->insn, death);
3354 reg_pressure = INSN_REG_PRESSURE (insn->insn);
3355 for (pci = 0; pci < ira_pressure_classes_num; pci++)
3357 cl = ira_pressure_classes[pci];
3358 delta = reg_pressure[pci].set_increase - death[cl];
3359 if (sched_verbose >= 2)
3360 fprintf (sched_dump, " %s:[%d,%+d]", reg_class_names[cl],
3361 curr_reg_pressure[cl], delta);
3362 model_record_pressure (&model_before_pressure, point, pci,
3363 curr_reg_pressure[cl]);
3365 if (sched_verbose >= 2)
3366 fprintf (sched_dump, "\n");
3369 /* All instructions have been added to the model schedule. Record the
3370 final register pressure in GROUP and set up all MODEL_MAX_PRESSUREs. */
3372 static void
3373 model_record_final_pressures (struct model_pressure_group *group)
3375 int point, pci, max_pressure, ref_pressure, cl;
3377 for (pci = 0; pci < ira_pressure_classes_num; pci++)
3379 /* Record the final pressure for this class. */
3380 cl = ira_pressure_classes[pci];
3381 point = model_num_insns;
3382 ref_pressure = curr_reg_pressure[cl];
3383 model_record_pressure (group, point, pci, ref_pressure);
3385 /* Record the original maximum pressure. */
3386 group->limits[pci].orig_pressure = group->limits[pci].pressure;
3388 /* Update the MODEL_MAX_PRESSURE for every point of the schedule. */
3389 max_pressure = ref_pressure;
3390 MODEL_MAX_PRESSURE (group, point, pci) = max_pressure;
3391 while (point > 0)
3393 point--;
3394 ref_pressure = MODEL_REF_PRESSURE (group, point, pci);
3395 max_pressure = MAX (max_pressure, ref_pressure);
3396 MODEL_MAX_PRESSURE (group, point, pci) = max_pressure;
3401 /* Update all successors of INSN, given that INSN has just been scheduled. */
3403 static void
3404 model_add_successors_to_worklist (struct model_insn_info *insn)
3406 sd_iterator_def sd_it;
3407 struct model_insn_info *con;
3408 dep_t dep;
3410 FOR_EACH_DEP (insn->insn, SD_LIST_FORW, sd_it, dep)
3412 con = MODEL_INSN_INFO (DEP_CON (dep));
3413 /* Ignore debug instructions, and instructions from other blocks. */
3414 if (con->insn)
3416 con->unscheduled_preds--;
3418 /* Update the depth field of each true-dependent successor.
3419 Increasing the depth gives them a higher priority than
3420 before. */
3421 if (DEP_TYPE (dep) == REG_DEP_TRUE && con->depth < insn->depth + 1)
3423 con->depth = insn->depth + 1;
3424 if (QUEUE_INDEX (con->insn) == QUEUE_READY)
3425 model_promote_insn (con);
3428 /* If this is a true dependency, or if there are no remaining
3429 dependencies for CON (meaning that CON only had non-true
3430 dependencies), make sure that CON is on the worklist.
3431 We don't bother otherwise because it would tend to fill the
3432 worklist with a lot of low-priority instructions that are not
3433 yet ready to issue. */
3434 if ((con->depth > 0 || con->unscheduled_preds == 0)
3435 && QUEUE_INDEX (con->insn) == QUEUE_NOWHERE)
3436 model_add_to_worklist (con, insn, insn->next);
3441 /* Give INSN a higher priority than any current instruction, then give
3442 unscheduled predecessors of INSN a higher priority still. If any of
3443 those predecessors are not on the model worklist, do the same for its
3444 predecessors, and so on. */
3446 static void
3447 model_promote_predecessors (struct model_insn_info *insn)
3449 struct model_insn_info *pro, *first;
3450 sd_iterator_def sd_it;
3451 dep_t dep;
3453 if (sched_verbose >= 7)
3454 fprintf (sched_dump, ";;\t+--- priority of %d = %d, priority of",
3455 INSN_UID (insn->insn), model_next_priority);
3456 insn->model_priority = model_next_priority++;
3457 model_remove_from_worklist (insn);
3458 model_add_to_worklist_at (insn, NULL);
3460 first = NULL;
3461 for (;;)
3463 FOR_EACH_DEP (insn->insn, SD_LIST_HARD_BACK, sd_it, dep)
3465 pro = MODEL_INSN_INFO (DEP_PRO (dep));
3466 /* The first test is to ignore debug instructions, and instructions
3467 from other blocks. */
3468 if (pro->insn
3469 && pro->model_priority != model_next_priority
3470 && QUEUE_INDEX (pro->insn) != QUEUE_SCHEDULED)
3472 pro->model_priority = model_next_priority;
3473 if (sched_verbose >= 7)
3474 fprintf (sched_dump, " %d", INSN_UID (pro->insn));
3475 if (QUEUE_INDEX (pro->insn) == QUEUE_READY)
3477 /* PRO is already in the worklist, but it now has
3478 a higher priority than before. Move it at the
3479 appropriate place. */
3480 model_remove_from_worklist (pro);
3481 model_add_to_worklist (pro, NULL, model_worklist);
3483 else
3485 /* PRO isn't in the worklist. Recursively process
3486 its predecessors until we find one that is. */
3487 pro->next = first;
3488 first = pro;
3492 if (!first)
3493 break;
3494 insn = first;
3495 first = insn->next;
3497 if (sched_verbose >= 7)
3498 fprintf (sched_dump, " = %d\n", model_next_priority);
3499 model_next_priority++;
3502 /* Pick one instruction from model_worklist and process it. */
3504 static void
3505 model_choose_insn (void)
3507 struct model_insn_info *insn, *fallback;
3508 int count;
3510 if (sched_verbose >= 7)
3512 fprintf (sched_dump, ";;\t+--- worklist:\n");
3513 insn = model_worklist;
3514 count = MAX_SCHED_READY_INSNS;
3515 while (count > 0 && insn)
3517 fprintf (sched_dump, ";;\t+--- %d [%d, %d, %d, %d]\n",
3518 INSN_UID (insn->insn), insn->model_priority,
3519 insn->depth + insn->alap, insn->depth,
3520 INSN_PRIORITY (insn->insn));
3521 count--;
3522 insn = insn->next;
3526 /* Look for a ready instruction whose model_classify_priority is zero
3527 or negative, picking the highest-priority one. Adding such an
3528 instruction to the schedule now should do no harm, and may actually
3529 do some good.
3531 Failing that, see whether there is an instruction with the highest
3532 extant model_priority that is not yet ready, but which would reduce
3533 pressure if it became ready. This is designed to catch cases like:
3535 (set (mem (reg R1)) (reg R2))
3537 where the instruction is the last remaining use of R1 and where the
3538 value of R2 is not yet available (or vice versa). The death of R1
3539 means that this instruction already reduces pressure. It is of
3540 course possible that the computation of R2 involves other registers
3541 that are hard to kill, but such cases are rare enough for this
3542 heuristic to be a win in general.
3544 Failing that, just pick the highest-priority instruction in the
3545 worklist. */
3546 count = MAX_SCHED_READY_INSNS;
3547 insn = model_worklist;
3548 fallback = 0;
3549 for (;;)
3551 if (count == 0 || !insn)
3553 insn = fallback ? fallback : model_worklist;
3554 break;
3556 if (insn->unscheduled_preds)
3558 if (model_worklist->model_priority == insn->model_priority
3559 && !fallback
3560 && model_classify_pressure (insn) < 0)
3561 fallback = insn;
3563 else
3565 if (model_classify_pressure (insn) <= 0)
3566 break;
3568 count--;
3569 insn = insn->next;
3572 if (sched_verbose >= 7 && insn != model_worklist)
3574 if (insn->unscheduled_preds)
3575 fprintf (sched_dump, ";;\t+--- promoting insn %d, with dependencies\n",
3576 INSN_UID (insn->insn));
3577 else
3578 fprintf (sched_dump, ";;\t+--- promoting insn %d, which is ready\n",
3579 INSN_UID (insn->insn));
3581 if (insn->unscheduled_preds)
3582 /* INSN isn't yet ready to issue. Give all its predecessors the
3583 highest priority. */
3584 model_promote_predecessors (insn);
3585 else
3587 /* INSN is ready. Add it to the end of model_schedule and
3588 process its successors. */
3589 model_add_successors_to_worklist (insn);
3590 model_remove_from_worklist (insn);
3591 model_add_to_schedule (insn->insn);
3592 model_record_pressures (insn);
3593 update_register_pressure (insn->insn);
3597 /* Restore all QUEUE_INDEXs to the values that they had before
3598 model_start_schedule was called. */
3600 static void
3601 model_reset_queue_indices (void)
3603 unsigned int i;
3604 rtx insn;
3606 FOR_EACH_VEC_ELT (model_schedule, i, insn)
3607 QUEUE_INDEX (insn) = MODEL_INSN_INFO (insn)->old_queue;
3610 /* We have calculated the model schedule and spill costs. Print a summary
3611 to sched_dump. */
3613 static void
3614 model_dump_pressure_summary (void)
3616 int pci, cl;
3618 fprintf (sched_dump, ";; Pressure summary:");
3619 for (pci = 0; pci < ira_pressure_classes_num; pci++)
3621 cl = ira_pressure_classes[pci];
3622 fprintf (sched_dump, " %s:%d", reg_class_names[cl],
3623 model_before_pressure.limits[pci].pressure);
3625 fprintf (sched_dump, "\n\n");
3628 /* Initialize the SCHED_PRESSURE_MODEL information for the current
3629 scheduling region. */
3631 static void
3632 model_start_schedule (void)
3634 basic_block bb;
3636 model_next_priority = 1;
3637 model_schedule.create (sched_max_luid);
3638 model_insns = XCNEWVEC (struct model_insn_info, sched_max_luid);
3640 bb = BLOCK_FOR_INSN (NEXT_INSN (current_sched_info->prev_head));
3641 initiate_reg_pressure_info (df_get_live_in (bb));
3643 model_analyze_insns ();
3644 model_init_pressure_group (&model_before_pressure);
3645 while (model_worklist)
3646 model_choose_insn ();
3647 gcc_assert (model_num_insns == (int) model_schedule.length ());
3648 if (sched_verbose >= 2)
3649 fprintf (sched_dump, "\n");
3651 model_record_final_pressures (&model_before_pressure);
3652 model_reset_queue_indices ();
3654 XDELETEVEC (model_insns);
3656 model_curr_point = 0;
3657 initiate_reg_pressure_info (df_get_live_in (bb));
3658 if (sched_verbose >= 1)
3659 model_dump_pressure_summary ();
3662 /* Free the information associated with GROUP. */
3664 static void
3665 model_finalize_pressure_group (struct model_pressure_group *group)
3667 XDELETEVEC (group->model);
3670 /* Free the information created by model_start_schedule. */
3672 static void
3673 model_end_schedule (void)
3675 model_finalize_pressure_group (&model_before_pressure);
3676 model_schedule.release ();
3679 /* A structure that holds local state for the loop in schedule_block. */
3680 struct sched_block_state
3682 /* True if no real insns have been scheduled in the current cycle. */
3683 bool first_cycle_insn_p;
3684 /* True if a shadow insn has been scheduled in the current cycle, which
3685 means that no more normal insns can be issued. */
3686 bool shadows_only_p;
3687 /* True if we're winding down a modulo schedule, which means that we only
3688 issue insns with INSN_EXACT_TICK set. */
3689 bool modulo_epilogue;
3690 /* Initialized with the machine's issue rate every cycle, and updated
3691 by calls to the variable_issue hook. */
3692 int can_issue_more;
3695 /* INSN is the "currently executing insn". Launch each insn which was
3696 waiting on INSN. READY is the ready list which contains the insns
3697 that are ready to fire. CLOCK is the current cycle. The function
3698 returns necessary cycle advance after issuing the insn (it is not
3699 zero for insns in a schedule group). */
3701 static int
3702 schedule_insn (rtx insn)
3704 sd_iterator_def sd_it;
3705 dep_t dep;
3706 int i;
3707 int advance = 0;
3709 if (sched_verbose >= 1)
3711 struct reg_pressure_data *pressure_info;
3712 fprintf (sched_dump, ";;\t%3i--> %s%-40s:",
3713 clock_var, (*current_sched_info->print_insn) (insn, 1),
3714 str_pattern_slim (PATTERN (insn)));
3716 if (recog_memoized (insn) < 0)
3717 fprintf (sched_dump, "nothing");
3718 else
3719 print_reservation (sched_dump, insn);
3720 pressure_info = INSN_REG_PRESSURE (insn);
3721 if (pressure_info != NULL)
3723 fputc (':', sched_dump);
3724 for (i = 0; i < ira_pressure_classes_num; i++)
3725 fprintf (sched_dump, "%s%+d(%d)",
3726 reg_class_names[ira_pressure_classes[i]],
3727 pressure_info[i].set_increase, pressure_info[i].change);
3729 if (sched_pressure == SCHED_PRESSURE_MODEL
3730 && model_curr_point < model_num_insns
3731 && model_index (insn) == model_curr_point)
3732 fprintf (sched_dump, ":model %d", model_curr_point);
3733 fputc ('\n', sched_dump);
3736 if (sched_pressure == SCHED_PRESSURE_WEIGHTED && !DEBUG_INSN_P (insn))
3737 update_reg_and_insn_max_reg_pressure (insn);
3739 /* Scheduling instruction should have all its dependencies resolved and
3740 should have been removed from the ready list. */
3741 gcc_assert (sd_lists_empty_p (insn, SD_LIST_HARD_BACK));
3743 /* Reset debug insns invalidated by moving this insn. */
3744 if (MAY_HAVE_DEBUG_INSNS && !DEBUG_INSN_P (insn))
3745 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
3746 sd_iterator_cond (&sd_it, &dep);)
3748 rtx dbg = DEP_PRO (dep);
3749 struct reg_use_data *use, *next;
3751 if (DEP_STATUS (dep) & DEP_CANCELLED)
3753 sd_iterator_next (&sd_it);
3754 continue;
3757 gcc_assert (DEBUG_INSN_P (dbg));
3759 if (sched_verbose >= 6)
3760 fprintf (sched_dump, ";;\t\tresetting: debug insn %d\n",
3761 INSN_UID (dbg));
3763 /* ??? Rather than resetting the debug insn, we might be able
3764 to emit a debug temp before the just-scheduled insn, but
3765 this would involve checking that the expression at the
3766 point of the debug insn is equivalent to the expression
3767 before the just-scheduled insn. They might not be: the
3768 expression in the debug insn may depend on other insns not
3769 yet scheduled that set MEMs, REGs or even other debug
3770 insns. It's not clear that attempting to preserve debug
3771 information in these cases is worth the effort, given how
3772 uncommon these resets are and the likelihood that the debug
3773 temps introduced won't survive the schedule change. */
3774 INSN_VAR_LOCATION_LOC (dbg) = gen_rtx_UNKNOWN_VAR_LOC ();
3775 df_insn_rescan (dbg);
3777 /* Unknown location doesn't use any registers. */
3778 for (use = INSN_REG_USE_LIST (dbg); use != NULL; use = next)
3780 struct reg_use_data *prev = use;
3782 /* Remove use from the cyclic next_regno_use chain first. */
3783 while (prev->next_regno_use != use)
3784 prev = prev->next_regno_use;
3785 prev->next_regno_use = use->next_regno_use;
3786 next = use->next_insn_use;
3787 free (use);
3789 INSN_REG_USE_LIST (dbg) = NULL;
3791 /* We delete rather than resolve these deps, otherwise we
3792 crash in sched_free_deps(), because forward deps are
3793 expected to be released before backward deps. */
3794 sd_delete_dep (sd_it);
3797 gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
3798 QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
3800 if (sched_pressure == SCHED_PRESSURE_MODEL
3801 && model_curr_point < model_num_insns
3802 && NONDEBUG_INSN_P (insn))
3804 if (model_index (insn) == model_curr_point)
3806 model_curr_point++;
3807 while (model_curr_point < model_num_insns
3808 && (QUEUE_INDEX (MODEL_INSN (model_curr_point))
3809 == QUEUE_SCHEDULED));
3810 else
3811 model_recompute (insn);
3812 model_update_limit_points ();
3813 update_register_pressure (insn);
3814 if (sched_verbose >= 2)
3815 print_curr_reg_pressure ();
3818 gcc_assert (INSN_TICK (insn) >= MIN_TICK);
3819 if (INSN_TICK (insn) > clock_var)
3820 /* INSN has been prematurely moved from the queue to the ready list.
3821 This is possible only if following flag is set. */
3822 gcc_assert (flag_sched_stalled_insns);
3824 /* ??? Probably, if INSN is scheduled prematurely, we should leave
3825 INSN_TICK untouched. This is a machine-dependent issue, actually. */
3826 INSN_TICK (insn) = clock_var;
3828 check_clobbered_conditions (insn);
3830 /* Update dependent instructions. First, see if by scheduling this insn
3831 now we broke a dependence in a way that requires us to change another
3832 insn. */
3833 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3834 sd_iterator_cond (&sd_it, &dep); sd_iterator_next (&sd_it))
3836 struct dep_replacement *desc = DEP_REPLACE (dep);
3837 rtx pro = DEP_PRO (dep);
3838 if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED
3839 && desc != NULL && desc->insn == pro)
3840 apply_replacement (dep, false);
3843 /* Go through and resolve forward dependencies. */
3844 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
3845 sd_iterator_cond (&sd_it, &dep);)
3847 rtx next = DEP_CON (dep);
3848 bool cancelled = (DEP_STATUS (dep) & DEP_CANCELLED) != 0;
3850 /* Resolve the dependence between INSN and NEXT.
3851 sd_resolve_dep () moves current dep to another list thus
3852 advancing the iterator. */
3853 sd_resolve_dep (sd_it);
3855 if (cancelled)
3857 if (must_restore_pattern_p (next, dep))
3858 restore_pattern (dep, false);
3859 continue;
3862 /* Don't bother trying to mark next as ready if insn is a debug
3863 insn. If insn is the last hard dependency, it will have
3864 already been discounted. */
3865 if (DEBUG_INSN_P (insn) && !DEBUG_INSN_P (next))
3866 continue;
3868 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
3870 int effective_cost;
3872 effective_cost = try_ready (next);
3874 if (effective_cost >= 0
3875 && SCHED_GROUP_P (next)
3876 && advance < effective_cost)
3877 advance = effective_cost;
3879 else
3880 /* Check always has only one forward dependence (to the first insn in
3881 the recovery block), therefore, this will be executed only once. */
3883 gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
3884 fix_recovery_deps (RECOVERY_BLOCK (insn));
3888 /* Annotate the instruction with issue information -- TImode
3889 indicates that the instruction is expected not to be able
3890 to issue on the same cycle as the previous insn. A machine
3891 may use this information to decide how the instruction should
3892 be aligned. */
3893 if (issue_rate > 1
3894 && GET_CODE (PATTERN (insn)) != USE
3895 && GET_CODE (PATTERN (insn)) != CLOBBER
3896 && !DEBUG_INSN_P (insn))
3898 if (reload_completed)
3899 PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
3900 last_clock_var = clock_var;
3903 return advance;
3906 /* Functions for handling of notes. */
3908 /* Add note list that ends on FROM_END to the end of TO_ENDP. */
3909 void
3910 concat_note_lists (rtx from_end, rtx *to_endp)
3912 rtx from_start;
3914 /* It's easy when have nothing to concat. */
3915 if (from_end == NULL)
3916 return;
3918 /* It's also easy when destination is empty. */
3919 if (*to_endp == NULL)
3921 *to_endp = from_end;
3922 return;
3925 from_start = from_end;
3926 while (PREV_INSN (from_start) != NULL)
3927 from_start = PREV_INSN (from_start);
3929 PREV_INSN (from_start) = *to_endp;
3930 NEXT_INSN (*to_endp) = from_start;
3931 *to_endp = from_end;
3934 /* Delete notes between HEAD and TAIL and put them in the chain
3935 of notes ended by NOTE_LIST. */
3936 void
3937 remove_notes (rtx head, rtx tail)
3939 rtx next_tail, insn, next;
3941 note_list = 0;
3942 if (head == tail && !INSN_P (head))
3943 return;
3945 next_tail = NEXT_INSN (tail);
3946 for (insn = head; insn != next_tail; insn = next)
3948 next = NEXT_INSN (insn);
3949 if (!NOTE_P (insn))
3950 continue;
3952 switch (NOTE_KIND (insn))
3954 case NOTE_INSN_BASIC_BLOCK:
3955 continue;
3957 case NOTE_INSN_EPILOGUE_BEG:
3958 if (insn != tail)
3960 remove_insn (insn);
3961 add_reg_note (next, REG_SAVE_NOTE,
3962 GEN_INT (NOTE_INSN_EPILOGUE_BEG));
3963 break;
3965 /* FALLTHRU */
3967 default:
3968 remove_insn (insn);
3970 /* Add the note to list that ends at NOTE_LIST. */
3971 PREV_INSN (insn) = note_list;
3972 NEXT_INSN (insn) = NULL_RTX;
3973 if (note_list)
3974 NEXT_INSN (note_list) = insn;
3975 note_list = insn;
3976 break;
3979 gcc_assert ((sel_sched_p () || insn != tail) && insn != head);
3983 /* A structure to record enough data to allow us to backtrack the scheduler to
3984 a previous state. */
3985 struct haifa_saved_data
3987 /* Next entry on the list. */
3988 struct haifa_saved_data *next;
3990 /* Backtracking is associated with scheduling insns that have delay slots.
3991 DELAY_PAIR points to the structure that contains the insns involved, and
3992 the number of cycles between them. */
3993 struct delay_pair *delay_pair;
3995 /* Data used by the frontend (e.g. sched-ebb or sched-rgn). */
3996 void *fe_saved_data;
3997 /* Data used by the backend. */
3998 void *be_saved_data;
4000 /* Copies of global state. */
4001 int clock_var, last_clock_var;
4002 struct ready_list ready;
4003 state_t curr_state;
4005 rtx last_scheduled_insn;
4006 rtx last_nondebug_scheduled_insn;
4007 int cycle_issued_insns;
4009 /* Copies of state used in the inner loop of schedule_block. */
4010 struct sched_block_state sched_block;
4012 /* We don't need to save q_ptr, as its value is arbitrary and we can set it
4013 to 0 when restoring. */
4014 int q_size;
4015 rtx *insn_queue;
4017 /* Describe pattern replacements that occurred since this backtrack point
4018 was queued. */
4019 vec<dep_t> replacement_deps;
4020 vec<int> replace_apply;
4022 /* A copy of the next-cycle replacement vectors at the time of the backtrack
4023 point. */
4024 vec<dep_t> next_cycle_deps;
4025 vec<int> next_cycle_apply;
4028 /* A record, in reverse order, of all scheduled insns which have delay slots
4029 and may require backtracking. */
4030 static struct haifa_saved_data *backtrack_queue;
4032 /* For every dependency of INSN, set the FEEDS_BACKTRACK_INSN bit according
4033 to SET_P. */
4034 static void
4035 mark_backtrack_feeds (rtx insn, int set_p)
4037 sd_iterator_def sd_it;
4038 dep_t dep;
4039 FOR_EACH_DEP (insn, SD_LIST_HARD_BACK, sd_it, dep)
4041 FEEDS_BACKTRACK_INSN (DEP_PRO (dep)) = set_p;
4045 /* Save the current scheduler state so that we can backtrack to it
4046 later if necessary. PAIR gives the insns that make it necessary to
4047 save this point. SCHED_BLOCK is the local state of schedule_block
4048 that need to be saved. */
4049 static void
4050 save_backtrack_point (struct delay_pair *pair,
4051 struct sched_block_state sched_block)
4053 int i;
4054 struct haifa_saved_data *save = XNEW (struct haifa_saved_data);
4056 save->curr_state = xmalloc (dfa_state_size);
4057 memcpy (save->curr_state, curr_state, dfa_state_size);
4059 save->ready.first = ready.first;
4060 save->ready.n_ready = ready.n_ready;
4061 save->ready.n_debug = ready.n_debug;
4062 save->ready.veclen = ready.veclen;
4063 save->ready.vec = XNEWVEC (rtx, ready.veclen);
4064 memcpy (save->ready.vec, ready.vec, ready.veclen * sizeof (rtx));
4066 save->insn_queue = XNEWVEC (rtx, max_insn_queue_index + 1);
4067 save->q_size = q_size;
4068 for (i = 0; i <= max_insn_queue_index; i++)
4070 int q = NEXT_Q_AFTER (q_ptr, i);
4071 save->insn_queue[i] = copy_INSN_LIST (insn_queue[q]);
4074 save->clock_var = clock_var;
4075 save->last_clock_var = last_clock_var;
4076 save->cycle_issued_insns = cycle_issued_insns;
4077 save->last_scheduled_insn = last_scheduled_insn;
4078 save->last_nondebug_scheduled_insn = last_nondebug_scheduled_insn;
4080 save->sched_block = sched_block;
4082 save->replacement_deps.create (0);
4083 save->replace_apply.create (0);
4084 save->next_cycle_deps = next_cycle_replace_deps.copy ();
4085 save->next_cycle_apply = next_cycle_apply.copy ();
4087 if (current_sched_info->save_state)
4088 save->fe_saved_data = (*current_sched_info->save_state) ();
4090 if (targetm.sched.alloc_sched_context)
4092 save->be_saved_data = targetm.sched.alloc_sched_context ();
4093 targetm.sched.init_sched_context (save->be_saved_data, false);
4095 else
4096 save->be_saved_data = NULL;
4098 save->delay_pair = pair;
4100 save->next = backtrack_queue;
4101 backtrack_queue = save;
4103 while (pair)
4105 mark_backtrack_feeds (pair->i2, 1);
4106 INSN_TICK (pair->i2) = INVALID_TICK;
4107 INSN_EXACT_TICK (pair->i2) = clock_var + pair_delay (pair);
4108 SHADOW_P (pair->i2) = pair->stages == 0;
4109 pair = pair->next_same_i1;
4113 /* Walk the ready list and all queues. If any insns have unresolved backwards
4114 dependencies, these must be cancelled deps, broken by predication. Set or
4115 clear (depending on SET) the DEP_CANCELLED bit in DEP_STATUS. */
4117 static void
4118 toggle_cancelled_flags (bool set)
4120 int i;
4121 sd_iterator_def sd_it;
4122 dep_t dep;
4124 if (ready.n_ready > 0)
4126 rtx *first = ready_lastpos (&ready);
4127 for (i = 0; i < ready.n_ready; i++)
4128 FOR_EACH_DEP (first[i], SD_LIST_BACK, sd_it, dep)
4129 if (!DEBUG_INSN_P (DEP_PRO (dep)))
4131 if (set)
4132 DEP_STATUS (dep) |= DEP_CANCELLED;
4133 else
4134 DEP_STATUS (dep) &= ~DEP_CANCELLED;
4137 for (i = 0; i <= max_insn_queue_index; i++)
4139 int q = NEXT_Q_AFTER (q_ptr, i);
4140 rtx link;
4141 for (link = insn_queue[q]; link; link = XEXP (link, 1))
4143 rtx insn = XEXP (link, 0);
4144 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
4145 if (!DEBUG_INSN_P (DEP_PRO (dep)))
4147 if (set)
4148 DEP_STATUS (dep) |= DEP_CANCELLED;
4149 else
4150 DEP_STATUS (dep) &= ~DEP_CANCELLED;
4156 /* Undo the replacements that have occurred after backtrack point SAVE
4157 was placed. */
4158 static void
4159 undo_replacements_for_backtrack (struct haifa_saved_data *save)
4161 while (!save->replacement_deps.is_empty ())
4163 dep_t dep = save->replacement_deps.pop ();
4164 int apply_p = save->replace_apply.pop ();
4166 if (apply_p)
4167 restore_pattern (dep, true);
4168 else
4169 apply_replacement (dep, true);
4171 save->replacement_deps.release ();
4172 save->replace_apply.release ();
4175 /* Pop entries from the SCHEDULED_INSNS vector up to and including INSN.
4176 Restore their dependencies to an unresolved state, and mark them as
4177 queued nowhere. */
4179 static void
4180 unschedule_insns_until (rtx insn)
4182 vec<rtx> recompute_vec = vNULL;
4184 /* Make two passes over the insns to be unscheduled. First, we clear out
4185 dependencies and other trivial bookkeeping. */
4186 for (;;)
4188 rtx last;
4189 sd_iterator_def sd_it;
4190 dep_t dep;
4192 last = scheduled_insns.pop ();
4194 /* This will be changed by restore_backtrack_point if the insn is in
4195 any queue. */
4196 QUEUE_INDEX (last) = QUEUE_NOWHERE;
4197 if (last != insn)
4198 INSN_TICK (last) = INVALID_TICK;
4200 if (modulo_ii > 0 && INSN_UID (last) < modulo_iter0_max_uid)
4201 modulo_insns_scheduled--;
4203 for (sd_it = sd_iterator_start (last, SD_LIST_RES_FORW);
4204 sd_iterator_cond (&sd_it, &dep);)
4206 rtx con = DEP_CON (dep);
4207 sd_unresolve_dep (sd_it);
4208 if (!MUST_RECOMPUTE_SPEC_P (con))
4210 MUST_RECOMPUTE_SPEC_P (con) = 1;
4211 recompute_vec.safe_push (con);
4215 if (last == insn)
4216 break;
4219 /* A second pass, to update ready and speculation status for insns
4220 depending on the unscheduled ones. The first pass must have
4221 popped the scheduled_insns vector up to the point where we
4222 restart scheduling, as recompute_todo_spec requires it to be
4223 up-to-date. */
4224 while (!recompute_vec.is_empty ())
4226 rtx con;
4228 con = recompute_vec.pop ();
4229 MUST_RECOMPUTE_SPEC_P (con) = 0;
4230 if (!sd_lists_empty_p (con, SD_LIST_HARD_BACK))
4232 TODO_SPEC (con) = HARD_DEP;
4233 INSN_TICK (con) = INVALID_TICK;
4234 if (PREDICATED_PAT (con) != NULL_RTX)
4235 haifa_change_pattern (con, ORIG_PAT (con));
4237 else if (QUEUE_INDEX (con) != QUEUE_SCHEDULED)
4238 TODO_SPEC (con) = recompute_todo_spec (con, true);
4240 recompute_vec.release ();
4243 /* Restore scheduler state from the topmost entry on the backtracking queue.
4244 PSCHED_BLOCK_P points to the local data of schedule_block that we must
4245 overwrite with the saved data.
4246 The caller must already have called unschedule_insns_until. */
4248 static void
4249 restore_last_backtrack_point (struct sched_block_state *psched_block)
4251 rtx link;
4252 int i;
4253 struct haifa_saved_data *save = backtrack_queue;
4255 backtrack_queue = save->next;
4257 if (current_sched_info->restore_state)
4258 (*current_sched_info->restore_state) (save->fe_saved_data);
4260 if (targetm.sched.alloc_sched_context)
4262 targetm.sched.set_sched_context (save->be_saved_data);
4263 targetm.sched.free_sched_context (save->be_saved_data);
4266 /* Do this first since it clobbers INSN_TICK of the involved
4267 instructions. */
4268 undo_replacements_for_backtrack (save);
4270 /* Clear the QUEUE_INDEX of everything in the ready list or one
4271 of the queues. */
4272 if (ready.n_ready > 0)
4274 rtx *first = ready_lastpos (&ready);
4275 for (i = 0; i < ready.n_ready; i++)
4277 rtx insn = first[i];
4278 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
4279 INSN_TICK (insn) = INVALID_TICK;
4282 for (i = 0; i <= max_insn_queue_index; i++)
4284 int q = NEXT_Q_AFTER (q_ptr, i);
4286 for (link = insn_queue[q]; link; link = XEXP (link, 1))
4288 rtx x = XEXP (link, 0);
4289 QUEUE_INDEX (x) = QUEUE_NOWHERE;
4290 INSN_TICK (x) = INVALID_TICK;
4292 free_INSN_LIST_list (&insn_queue[q]);
4295 free (ready.vec);
4296 ready = save->ready;
4298 if (ready.n_ready > 0)
4300 rtx *first = ready_lastpos (&ready);
4301 for (i = 0; i < ready.n_ready; i++)
4303 rtx insn = first[i];
4304 QUEUE_INDEX (insn) = QUEUE_READY;
4305 TODO_SPEC (insn) = recompute_todo_spec (insn, true);
4306 INSN_TICK (insn) = save->clock_var;
4310 q_ptr = 0;
4311 q_size = save->q_size;
4312 for (i = 0; i <= max_insn_queue_index; i++)
4314 int q = NEXT_Q_AFTER (q_ptr, i);
4316 insn_queue[q] = save->insn_queue[q];
4318 for (link = insn_queue[q]; link; link = XEXP (link, 1))
4320 rtx x = XEXP (link, 0);
4321 QUEUE_INDEX (x) = i;
4322 TODO_SPEC (x) = recompute_todo_spec (x, true);
4323 INSN_TICK (x) = save->clock_var + i;
4326 free (save->insn_queue);
4328 toggle_cancelled_flags (true);
4330 clock_var = save->clock_var;
4331 last_clock_var = save->last_clock_var;
4332 cycle_issued_insns = save->cycle_issued_insns;
4333 last_scheduled_insn = save->last_scheduled_insn;
4334 last_nondebug_scheduled_insn = save->last_nondebug_scheduled_insn;
4336 *psched_block = save->sched_block;
4338 memcpy (curr_state, save->curr_state, dfa_state_size);
4339 free (save->curr_state);
4341 mark_backtrack_feeds (save->delay_pair->i2, 0);
4343 gcc_assert (next_cycle_replace_deps.is_empty ());
4344 next_cycle_replace_deps = save->next_cycle_deps.copy ();
4345 next_cycle_apply = save->next_cycle_apply.copy ();
4347 free (save);
4349 for (save = backtrack_queue; save; save = save->next)
4351 mark_backtrack_feeds (save->delay_pair->i2, 1);
4355 /* Discard all data associated with the topmost entry in the backtrack
4356 queue. If RESET_TICK is false, we just want to free the data. If true,
4357 we are doing this because we discovered a reason to backtrack. In the
4358 latter case, also reset the INSN_TICK for the shadow insn. */
4359 static void
4360 free_topmost_backtrack_point (bool reset_tick)
4362 struct haifa_saved_data *save = backtrack_queue;
4363 int i;
4365 backtrack_queue = save->next;
4367 if (reset_tick)
4369 struct delay_pair *pair = save->delay_pair;
4370 while (pair)
4372 INSN_TICK (pair->i2) = INVALID_TICK;
4373 INSN_EXACT_TICK (pair->i2) = INVALID_TICK;
4374 pair = pair->next_same_i1;
4376 undo_replacements_for_backtrack (save);
4378 else
4380 save->replacement_deps.release ();
4381 save->replace_apply.release ();
4384 if (targetm.sched.free_sched_context)
4385 targetm.sched.free_sched_context (save->be_saved_data);
4386 if (current_sched_info->restore_state)
4387 free (save->fe_saved_data);
4388 for (i = 0; i <= max_insn_queue_index; i++)
4389 free_INSN_LIST_list (&save->insn_queue[i]);
4390 free (save->insn_queue);
4391 free (save->curr_state);
4392 free (save->ready.vec);
4393 free (save);
4396 /* Free the entire backtrack queue. */
4397 static void
4398 free_backtrack_queue (void)
4400 while (backtrack_queue)
4401 free_topmost_backtrack_point (false);
4404 /* Apply a replacement described by DESC. If IMMEDIATELY is false, we
4405 may have to postpone the replacement until the start of the next cycle,
4406 at which point we will be called again with IMMEDIATELY true. This is
4407 only done for machines which have instruction packets with explicit
4408 parallelism however. */
4409 static void
4410 apply_replacement (dep_t dep, bool immediately)
4412 struct dep_replacement *desc = DEP_REPLACE (dep);
4413 if (!immediately && targetm.sched.exposed_pipeline && reload_completed)
4415 next_cycle_replace_deps.safe_push (dep);
4416 next_cycle_apply.safe_push (1);
4418 else
4420 bool success;
4422 if (QUEUE_INDEX (desc->insn) == QUEUE_SCHEDULED)
4423 return;
4425 if (sched_verbose >= 5)
4426 fprintf (sched_dump, "applying replacement for insn %d\n",
4427 INSN_UID (desc->insn));
4429 success = validate_change (desc->insn, desc->loc, desc->newval, 0);
4430 gcc_assert (success);
4432 update_insn_after_change (desc->insn);
4433 if ((TODO_SPEC (desc->insn) & (HARD_DEP | DEP_POSTPONED)) == 0)
4434 fix_tick_ready (desc->insn);
4436 if (backtrack_queue != NULL)
4438 backtrack_queue->replacement_deps.safe_push (dep);
4439 backtrack_queue->replace_apply.safe_push (1);
4444 /* We have determined that a pattern involved in DEP must be restored.
4445 If IMMEDIATELY is false, we may have to postpone the replacement
4446 until the start of the next cycle, at which point we will be called
4447 again with IMMEDIATELY true. */
4448 static void
4449 restore_pattern (dep_t dep, bool immediately)
4451 rtx next = DEP_CON (dep);
4452 int tick = INSN_TICK (next);
4454 /* If we already scheduled the insn, the modified version is
4455 correct. */
4456 if (QUEUE_INDEX (next) == QUEUE_SCHEDULED)
4457 return;
4459 if (!immediately && targetm.sched.exposed_pipeline && reload_completed)
4461 next_cycle_replace_deps.safe_push (dep);
4462 next_cycle_apply.safe_push (0);
4463 return;
4467 if (DEP_TYPE (dep) == REG_DEP_CONTROL)
4469 if (sched_verbose >= 5)
4470 fprintf (sched_dump, "restoring pattern for insn %d\n",
4471 INSN_UID (next));
4472 haifa_change_pattern (next, ORIG_PAT (next));
4474 else
4476 struct dep_replacement *desc = DEP_REPLACE (dep);
4477 bool success;
4479 if (sched_verbose >= 5)
4480 fprintf (sched_dump, "restoring pattern for insn %d\n",
4481 INSN_UID (desc->insn));
4482 tick = INSN_TICK (desc->insn);
4484 success = validate_change (desc->insn, desc->loc, desc->orig, 0);
4485 gcc_assert (success);
4486 update_insn_after_change (desc->insn);
4487 if (backtrack_queue != NULL)
4489 backtrack_queue->replacement_deps.safe_push (dep);
4490 backtrack_queue->replace_apply.safe_push (0);
4493 INSN_TICK (next) = tick;
4494 if (TODO_SPEC (next) == DEP_POSTPONED)
4495 return;
4497 if (sd_lists_empty_p (next, SD_LIST_BACK))
4498 TODO_SPEC (next) = 0;
4499 else if (!sd_lists_empty_p (next, SD_LIST_HARD_BACK))
4500 TODO_SPEC (next) = HARD_DEP;
4503 /* Perform pattern replacements that were queued up until the next
4504 cycle. */
4505 static void
4506 perform_replacements_new_cycle (void)
4508 int i;
4509 dep_t dep;
4510 FOR_EACH_VEC_ELT (next_cycle_replace_deps, i, dep)
4512 int apply_p = next_cycle_apply[i];
4513 if (apply_p)
4514 apply_replacement (dep, true);
4515 else
4516 restore_pattern (dep, true);
4518 next_cycle_replace_deps.truncate (0);
4519 next_cycle_apply.truncate (0);
4522 /* Compute INSN_TICK_ESTIMATE for INSN. PROCESSED is a bitmap of
4523 instructions we've previously encountered, a set bit prevents
4524 recursion. BUDGET is a limit on how far ahead we look, it is
4525 reduced on recursive calls. Return true if we produced a good
4526 estimate, or false if we exceeded the budget. */
4527 static bool
4528 estimate_insn_tick (bitmap processed, rtx insn, int budget)
4530 sd_iterator_def sd_it;
4531 dep_t dep;
4532 int earliest = INSN_TICK (insn);
4534 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
4536 rtx pro = DEP_PRO (dep);
4537 int t;
4539 if (DEP_STATUS (dep) & DEP_CANCELLED)
4540 continue;
4542 if (QUEUE_INDEX (pro) == QUEUE_SCHEDULED)
4543 gcc_assert (INSN_TICK (pro) + dep_cost (dep) <= INSN_TICK (insn));
4544 else
4546 int cost = dep_cost (dep);
4547 if (cost >= budget)
4548 return false;
4549 if (!bitmap_bit_p (processed, INSN_LUID (pro)))
4551 if (!estimate_insn_tick (processed, pro, budget - cost))
4552 return false;
4554 gcc_assert (INSN_TICK_ESTIMATE (pro) != INVALID_TICK);
4555 t = INSN_TICK_ESTIMATE (pro) + cost;
4556 if (earliest == INVALID_TICK || t > earliest)
4557 earliest = t;
4560 bitmap_set_bit (processed, INSN_LUID (insn));
4561 INSN_TICK_ESTIMATE (insn) = earliest;
4562 return true;
4565 /* Examine the pair of insns in P, and estimate (optimistically, assuming
4566 infinite resources) the cycle in which the delayed shadow can be issued.
4567 Return the number of cycles that must pass before the real insn can be
4568 issued in order to meet this constraint. */
4569 static int
4570 estimate_shadow_tick (struct delay_pair *p)
4572 bitmap_head processed;
4573 int t;
4574 bool cutoff;
4575 bitmap_initialize (&processed, 0);
4577 cutoff = !estimate_insn_tick (&processed, p->i2,
4578 max_insn_queue_index + pair_delay (p));
4579 bitmap_clear (&processed);
4580 if (cutoff)
4581 return max_insn_queue_index;
4582 t = INSN_TICK_ESTIMATE (p->i2) - (clock_var + pair_delay (p) + 1);
4583 if (t > 0)
4584 return t;
4585 return 0;
4588 /* If INSN has no unresolved backwards dependencies, add it to the schedule and
4589 recursively resolve all its forward dependencies. */
4590 static void
4591 resolve_dependencies (rtx insn)
4593 sd_iterator_def sd_it;
4594 dep_t dep;
4596 /* Don't use sd_lists_empty_p; it ignores debug insns. */
4597 if (DEPS_LIST_FIRST (INSN_HARD_BACK_DEPS (insn)) != NULL
4598 || DEPS_LIST_FIRST (INSN_SPEC_BACK_DEPS (insn)) != NULL)
4599 return;
4601 if (sched_verbose >= 4)
4602 fprintf (sched_dump, ";;\tquickly resolving %d\n", INSN_UID (insn));
4604 if (QUEUE_INDEX (insn) >= 0)
4605 queue_remove (insn);
4607 scheduled_insns.safe_push (insn);
4609 /* Update dependent instructions. */
4610 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
4611 sd_iterator_cond (&sd_it, &dep);)
4613 rtx next = DEP_CON (dep);
4615 if (sched_verbose >= 4)
4616 fprintf (sched_dump, ";;\t\tdep %d against %d\n", INSN_UID (insn),
4617 INSN_UID (next));
4619 /* Resolve the dependence between INSN and NEXT.
4620 sd_resolve_dep () moves current dep to another list thus
4621 advancing the iterator. */
4622 sd_resolve_dep (sd_it);
4624 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
4626 resolve_dependencies (next);
4628 else
4629 /* Check always has only one forward dependence (to the first insn in
4630 the recovery block), therefore, this will be executed only once. */
4632 gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
4638 /* Return the head and tail pointers of ebb starting at BEG and ending
4639 at END. */
4640 void
4641 get_ebb_head_tail (basic_block beg, basic_block end, rtx *headp, rtx *tailp)
4643 rtx beg_head = BB_HEAD (beg);
4644 rtx beg_tail = BB_END (beg);
4645 rtx end_head = BB_HEAD (end);
4646 rtx end_tail = BB_END (end);
4648 /* Don't include any notes or labels at the beginning of the BEG
4649 basic block, or notes at the end of the END basic blocks. */
4651 if (LABEL_P (beg_head))
4652 beg_head = NEXT_INSN (beg_head);
4654 while (beg_head != beg_tail)
4655 if (NOTE_P (beg_head))
4656 beg_head = NEXT_INSN (beg_head);
4657 else if (DEBUG_INSN_P (beg_head))
4659 rtx note, next;
4661 for (note = NEXT_INSN (beg_head);
4662 note != beg_tail;
4663 note = next)
4665 next = NEXT_INSN (note);
4666 if (NOTE_P (note))
4668 if (sched_verbose >= 9)
4669 fprintf (sched_dump, "reorder %i\n", INSN_UID (note));
4671 reorder_insns_nobb (note, note, PREV_INSN (beg_head));
4673 if (BLOCK_FOR_INSN (note) != beg)
4674 df_insn_change_bb (note, beg);
4676 else if (!DEBUG_INSN_P (note))
4677 break;
4680 break;
4682 else
4683 break;
4685 *headp = beg_head;
4687 if (beg == end)
4688 end_head = beg_head;
4689 else if (LABEL_P (end_head))
4690 end_head = NEXT_INSN (end_head);
4692 while (end_head != end_tail)
4693 if (NOTE_P (end_tail))
4694 end_tail = PREV_INSN (end_tail);
4695 else if (DEBUG_INSN_P (end_tail))
4697 rtx note, prev;
4699 for (note = PREV_INSN (end_tail);
4700 note != end_head;
4701 note = prev)
4703 prev = PREV_INSN (note);
4704 if (NOTE_P (note))
4706 if (sched_verbose >= 9)
4707 fprintf (sched_dump, "reorder %i\n", INSN_UID (note));
4709 reorder_insns_nobb (note, note, end_tail);
4711 if (end_tail == BB_END (end))
4712 BB_END (end) = note;
4714 if (BLOCK_FOR_INSN (note) != end)
4715 df_insn_change_bb (note, end);
4717 else if (!DEBUG_INSN_P (note))
4718 break;
4721 break;
4723 else
4724 break;
4726 *tailp = end_tail;
4729 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
4732 no_real_insns_p (const_rtx head, const_rtx tail)
4734 while (head != NEXT_INSN (tail))
4736 if (!NOTE_P (head) && !LABEL_P (head))
4737 return 0;
4738 head = NEXT_INSN (head);
4740 return 1;
4743 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
4744 previously found among the insns. Insert them just before HEAD. */
4746 restore_other_notes (rtx head, basic_block head_bb)
4748 if (note_list != 0)
4750 rtx note_head = note_list;
4752 if (head)
4753 head_bb = BLOCK_FOR_INSN (head);
4754 else
4755 head = NEXT_INSN (bb_note (head_bb));
4757 while (PREV_INSN (note_head))
4759 set_block_for_insn (note_head, head_bb);
4760 note_head = PREV_INSN (note_head);
4762 /* In the above cycle we've missed this note. */
4763 set_block_for_insn (note_head, head_bb);
4765 PREV_INSN (note_head) = PREV_INSN (head);
4766 NEXT_INSN (PREV_INSN (head)) = note_head;
4767 PREV_INSN (head) = note_list;
4768 NEXT_INSN (note_list) = head;
4770 if (BLOCK_FOR_INSN (head) != head_bb)
4771 BB_END (head_bb) = note_list;
4773 head = note_head;
4776 return head;
4779 /* When we know we are going to discard the schedule due to a failed attempt
4780 at modulo scheduling, undo all replacements. */
4781 static void
4782 undo_all_replacements (void)
4784 rtx insn;
4785 int i;
4787 FOR_EACH_VEC_ELT (scheduled_insns, i, insn)
4789 sd_iterator_def sd_it;
4790 dep_t dep;
4792 /* See if we must undo a replacement. */
4793 for (sd_it = sd_iterator_start (insn, SD_LIST_RES_FORW);
4794 sd_iterator_cond (&sd_it, &dep); sd_iterator_next (&sd_it))
4796 struct dep_replacement *desc = DEP_REPLACE (dep);
4797 if (desc != NULL)
4798 validate_change (desc->insn, desc->loc, desc->orig, 0);
4803 /* Move insns that became ready to fire from queue to ready list. */
4805 static void
4806 queue_to_ready (struct ready_list *ready)
4808 rtx insn;
4809 rtx link;
4810 rtx skip_insn;
4812 q_ptr = NEXT_Q (q_ptr);
4814 if (dbg_cnt (sched_insn) == false)
4816 /* If debug counter is activated do not requeue the first
4817 nonscheduled insn. */
4818 skip_insn = nonscheduled_insns_begin;
4821 skip_insn = next_nonnote_nondebug_insn (skip_insn);
4823 while (QUEUE_INDEX (skip_insn) == QUEUE_SCHEDULED);
4825 else
4826 skip_insn = NULL_RTX;
4828 /* Add all pending insns that can be scheduled without stalls to the
4829 ready list. */
4830 for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1))
4832 insn = XEXP (link, 0);
4833 q_size -= 1;
4835 if (sched_verbose >= 2)
4836 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
4837 (*current_sched_info->print_insn) (insn, 0));
4839 /* If the ready list is full, delay the insn for 1 cycle.
4840 See the comment in schedule_block for the rationale. */
4841 if (!reload_completed
4842 && (ready->n_ready - ready->n_debug > MAX_SCHED_READY_INSNS
4843 || (sched_pressure == SCHED_PRESSURE_MODEL
4844 /* Limit pressure recalculations to MAX_SCHED_READY_INSNS
4845 instructions too. */
4846 && model_index (insn) > (model_curr_point
4847 + MAX_SCHED_READY_INSNS)))
4848 && !(sched_pressure == SCHED_PRESSURE_MODEL
4849 && model_curr_point < model_num_insns
4850 /* Always allow the next model instruction to issue. */
4851 && model_index (insn) == model_curr_point)
4852 && !SCHED_GROUP_P (insn)
4853 && insn != skip_insn)
4854 queue_insn (insn, 1, "ready full");
4855 else
4857 ready_add (ready, insn, false);
4858 if (sched_verbose >= 2)
4859 fprintf (sched_dump, "moving to ready without stalls\n");
4862 free_INSN_LIST_list (&insn_queue[q_ptr]);
4864 /* If there are no ready insns, stall until one is ready and add all
4865 of the pending insns at that point to the ready list. */
4866 if (ready->n_ready == 0)
4868 int stalls;
4870 for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
4872 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
4874 for (; link; link = XEXP (link, 1))
4876 insn = XEXP (link, 0);
4877 q_size -= 1;
4879 if (sched_verbose >= 2)
4880 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
4881 (*current_sched_info->print_insn) (insn, 0));
4883 ready_add (ready, insn, false);
4884 if (sched_verbose >= 2)
4885 fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
4887 free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]);
4889 advance_one_cycle ();
4891 break;
4894 advance_one_cycle ();
4897 q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
4898 clock_var += stalls;
4902 /* Used by early_queue_to_ready. Determines whether it is "ok" to
4903 prematurely move INSN from the queue to the ready list. Currently,
4904 if a target defines the hook 'is_costly_dependence', this function
4905 uses the hook to check whether there exist any dependences which are
4906 considered costly by the target, between INSN and other insns that
4907 have already been scheduled. Dependences are checked up to Y cycles
4908 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
4909 controlling this value.
4910 (Other considerations could be taken into account instead (or in
4911 addition) depending on user flags and target hooks. */
4913 static bool
4914 ok_for_early_queue_removal (rtx insn)
4916 if (targetm.sched.is_costly_dependence)
4918 rtx prev_insn;
4919 int n_cycles;
4920 int i = scheduled_insns.length ();
4921 for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
4923 while (i-- > 0)
4925 int cost;
4927 prev_insn = scheduled_insns[i];
4929 if (!NOTE_P (prev_insn))
4931 dep_t dep;
4933 dep = sd_find_dep_between (prev_insn, insn, true);
4935 if (dep != NULL)
4937 cost = dep_cost (dep);
4939 if (targetm.sched.is_costly_dependence (dep, cost,
4940 flag_sched_stalled_insns_dep - n_cycles))
4941 return false;
4945 if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
4946 break;
4949 if (i == 0)
4950 break;
4954 return true;
4958 /* Remove insns from the queue, before they become "ready" with respect
4959 to FU latency considerations. */
4961 static int
4962 early_queue_to_ready (state_t state, struct ready_list *ready)
4964 rtx insn;
4965 rtx link;
4966 rtx next_link;
4967 rtx prev_link;
4968 bool move_to_ready;
4969 int cost;
4970 state_t temp_state = alloca (dfa_state_size);
4971 int stalls;
4972 int insns_removed = 0;
4975 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
4976 function:
4978 X == 0: There is no limit on how many queued insns can be removed
4979 prematurely. (flag_sched_stalled_insns = -1).
4981 X >= 1: Only X queued insns can be removed prematurely in each
4982 invocation. (flag_sched_stalled_insns = X).
4984 Otherwise: Early queue removal is disabled.
4985 (flag_sched_stalled_insns = 0)
4988 if (! flag_sched_stalled_insns)
4989 return 0;
4991 for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
4993 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
4995 if (sched_verbose > 6)
4996 fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
4998 prev_link = 0;
4999 while (link)
5001 next_link = XEXP (link, 1);
5002 insn = XEXP (link, 0);
5003 if (insn && sched_verbose > 6)
5004 print_rtl_single (sched_dump, insn);
5006 memcpy (temp_state, state, dfa_state_size);
5007 if (recog_memoized (insn) < 0)
5008 /* non-negative to indicate that it's not ready
5009 to avoid infinite Q->R->Q->R... */
5010 cost = 0;
5011 else
5012 cost = state_transition (temp_state, insn);
5014 if (sched_verbose >= 6)
5015 fprintf (sched_dump, "transition cost = %d\n", cost);
5017 move_to_ready = false;
5018 if (cost < 0)
5020 move_to_ready = ok_for_early_queue_removal (insn);
5021 if (move_to_ready == true)
5023 /* move from Q to R */
5024 q_size -= 1;
5025 ready_add (ready, insn, false);
5027 if (prev_link)
5028 XEXP (prev_link, 1) = next_link;
5029 else
5030 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
5032 free_INSN_LIST_node (link);
5034 if (sched_verbose >= 2)
5035 fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
5036 (*current_sched_info->print_insn) (insn, 0));
5038 insns_removed++;
5039 if (insns_removed == flag_sched_stalled_insns)
5040 /* Remove no more than flag_sched_stalled_insns insns
5041 from Q at a time. */
5042 return insns_removed;
5046 if (move_to_ready == false)
5047 prev_link = link;
5049 link = next_link;
5050 } /* while link */
5051 } /* if link */
5053 } /* for stalls.. */
5055 return insns_removed;
5059 /* Print the ready list for debugging purposes. Callable from debugger. */
5061 static void
5062 debug_ready_list (struct ready_list *ready)
5064 rtx *p;
5065 int i;
5067 if (ready->n_ready == 0)
5069 fprintf (sched_dump, "\n");
5070 return;
5073 p = ready_lastpos (ready);
5074 for (i = 0; i < ready->n_ready; i++)
5076 fprintf (sched_dump, " %s:%d",
5077 (*current_sched_info->print_insn) (p[i], 0),
5078 INSN_LUID (p[i]));
5079 if (sched_pressure != SCHED_PRESSURE_NONE)
5080 fprintf (sched_dump, "(cost=%d",
5081 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (p[i]));
5082 if (INSN_TICK (p[i]) > clock_var)
5083 fprintf (sched_dump, ":delay=%d", INSN_TICK (p[i]) - clock_var);
5084 if (sched_pressure != SCHED_PRESSURE_NONE)
5085 fprintf (sched_dump, ")");
5087 fprintf (sched_dump, "\n");
5090 /* Search INSN for REG_SAVE_NOTE notes and convert them back into insn
5091 NOTEs. This is used for NOTE_INSN_EPILOGUE_BEG, so that sched-ebb
5092 replaces the epilogue note in the correct basic block. */
5093 void
5094 reemit_notes (rtx insn)
5096 rtx note, last = insn;
5098 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
5100 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
5102 enum insn_note note_type = (enum insn_note) INTVAL (XEXP (note, 0));
5104 last = emit_note_before (note_type, last);
5105 remove_note (insn, note);
5110 /* Move INSN. Reemit notes if needed. Update CFG, if needed. */
5111 static void
5112 move_insn (rtx insn, rtx last, rtx nt)
5114 if (PREV_INSN (insn) != last)
5116 basic_block bb;
5117 rtx note;
5118 int jump_p = 0;
5120 bb = BLOCK_FOR_INSN (insn);
5122 /* BB_HEAD is either LABEL or NOTE. */
5123 gcc_assert (BB_HEAD (bb) != insn);
5125 if (BB_END (bb) == insn)
5126 /* If this is last instruction in BB, move end marker one
5127 instruction up. */
5129 /* Jumps are always placed at the end of basic block. */
5130 jump_p = control_flow_insn_p (insn);
5132 gcc_assert (!jump_p
5133 || ((common_sched_info->sched_pass_id == SCHED_RGN_PASS)
5134 && IS_SPECULATION_BRANCHY_CHECK_P (insn))
5135 || (common_sched_info->sched_pass_id
5136 == SCHED_EBB_PASS));
5138 gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
5140 BB_END (bb) = PREV_INSN (insn);
5143 gcc_assert (BB_END (bb) != last);
5145 if (jump_p)
5146 /* We move the block note along with jump. */
5148 gcc_assert (nt);
5150 note = NEXT_INSN (insn);
5151 while (NOTE_NOT_BB_P (note) && note != nt)
5152 note = NEXT_INSN (note);
5154 if (note != nt
5155 && (LABEL_P (note)
5156 || BARRIER_P (note)))
5157 note = NEXT_INSN (note);
5159 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
5161 else
5162 note = insn;
5164 NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
5165 PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
5167 NEXT_INSN (note) = NEXT_INSN (last);
5168 PREV_INSN (NEXT_INSN (last)) = note;
5170 NEXT_INSN (last) = insn;
5171 PREV_INSN (insn) = last;
5173 bb = BLOCK_FOR_INSN (last);
5175 if (jump_p)
5177 fix_jump_move (insn);
5179 if (BLOCK_FOR_INSN (insn) != bb)
5180 move_block_after_check (insn);
5182 gcc_assert (BB_END (bb) == last);
5185 df_insn_change_bb (insn, bb);
5187 /* Update BB_END, if needed. */
5188 if (BB_END (bb) == last)
5189 BB_END (bb) = insn;
5192 SCHED_GROUP_P (insn) = 0;
5195 /* Return true if scheduling INSN will finish current clock cycle. */
5196 static bool
5197 insn_finishes_cycle_p (rtx insn)
5199 if (SCHED_GROUP_P (insn))
5200 /* After issuing INSN, rest of the sched_group will be forced to issue
5201 in order. Don't make any plans for the rest of cycle. */
5202 return true;
5204 /* Finishing the block will, apparently, finish the cycle. */
5205 if (current_sched_info->insn_finishes_block_p
5206 && current_sched_info->insn_finishes_block_p (insn))
5207 return true;
5209 return false;
5212 /* Define type for target data used in multipass scheduling. */
5213 #ifndef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T
5214 # define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T int
5215 #endif
5216 typedef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T first_cycle_multipass_data_t;
5218 /* The following structure describe an entry of the stack of choices. */
5219 struct choice_entry
5221 /* Ordinal number of the issued insn in the ready queue. */
5222 int index;
5223 /* The number of the rest insns whose issues we should try. */
5224 int rest;
5225 /* The number of issued essential insns. */
5226 int n;
5227 /* State after issuing the insn. */
5228 state_t state;
5229 /* Target-specific data. */
5230 first_cycle_multipass_data_t target_data;
5233 /* The following array is used to implement a stack of choices used in
5234 function max_issue. */
5235 static struct choice_entry *choice_stack;
5237 /* This holds the value of the target dfa_lookahead hook. */
5238 int dfa_lookahead;
5240 /* The following variable value is maximal number of tries of issuing
5241 insns for the first cycle multipass insn scheduling. We define
5242 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
5243 need this constraint if all real insns (with non-negative codes)
5244 had reservations because in this case the algorithm complexity is
5245 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
5246 might be incomplete and such insn might occur. For such
5247 descriptions, the complexity of algorithm (without the constraint)
5248 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
5249 static int max_lookahead_tries;
5251 /* The following value is value of hook
5252 `first_cycle_multipass_dfa_lookahead' at the last call of
5253 `max_issue'. */
5254 static int cached_first_cycle_multipass_dfa_lookahead = 0;
5256 /* The following value is value of `issue_rate' at the last call of
5257 `sched_init'. */
5258 static int cached_issue_rate = 0;
5260 /* The following function returns maximal (or close to maximal) number
5261 of insns which can be issued on the same cycle and one of which
5262 insns is insns with the best rank (the first insn in READY). To
5263 make this function tries different samples of ready insns. READY
5264 is current queue `ready'. Global array READY_TRY reflects what
5265 insns are already issued in this try. The function stops immediately,
5266 if it reached the such a solution, that all instruction can be issued.
5267 INDEX will contain index of the best insn in READY. The following
5268 function is used only for first cycle multipass scheduling.
5270 PRIVILEGED_N >= 0
5272 This function expects recognized insns only. All USEs,
5273 CLOBBERs, etc must be filtered elsewhere. */
5275 max_issue (struct ready_list *ready, int privileged_n, state_t state,
5276 bool first_cycle_insn_p, int *index)
5278 int n, i, all, n_ready, best, delay, tries_num;
5279 int more_issue;
5280 struct choice_entry *top;
5281 rtx insn;
5283 n_ready = ready->n_ready;
5284 gcc_assert (dfa_lookahead >= 1 && privileged_n >= 0
5285 && privileged_n <= n_ready);
5287 /* Init MAX_LOOKAHEAD_TRIES. */
5288 if (cached_first_cycle_multipass_dfa_lookahead != dfa_lookahead)
5290 cached_first_cycle_multipass_dfa_lookahead = dfa_lookahead;
5291 max_lookahead_tries = 100;
5292 for (i = 0; i < issue_rate; i++)
5293 max_lookahead_tries *= dfa_lookahead;
5296 /* Init max_points. */
5297 more_issue = issue_rate - cycle_issued_insns;
5298 gcc_assert (more_issue >= 0);
5300 /* The number of the issued insns in the best solution. */
5301 best = 0;
5303 top = choice_stack;
5305 /* Set initial state of the search. */
5306 memcpy (top->state, state, dfa_state_size);
5307 top->rest = dfa_lookahead;
5308 top->n = 0;
5309 if (targetm.sched.first_cycle_multipass_begin)
5310 targetm.sched.first_cycle_multipass_begin (&top->target_data,
5311 ready_try, n_ready,
5312 first_cycle_insn_p);
5314 /* Count the number of the insns to search among. */
5315 for (all = i = 0; i < n_ready; i++)
5316 if (!ready_try [i])
5317 all++;
5319 /* I is the index of the insn to try next. */
5320 i = 0;
5321 tries_num = 0;
5322 for (;;)
5324 if (/* If we've reached a dead end or searched enough of what we have
5325 been asked... */
5326 top->rest == 0
5327 /* or have nothing else to try... */
5328 || i >= n_ready
5329 /* or should not issue more. */
5330 || top->n >= more_issue)
5332 /* ??? (... || i == n_ready). */
5333 gcc_assert (i <= n_ready);
5335 /* We should not issue more than issue_rate instructions. */
5336 gcc_assert (top->n <= more_issue);
5338 if (top == choice_stack)
5339 break;
5341 if (best < top - choice_stack)
5343 if (privileged_n)
5345 n = privileged_n;
5346 /* Try to find issued privileged insn. */
5347 while (n && !ready_try[--n])
5351 if (/* If all insns are equally good... */
5352 privileged_n == 0
5353 /* Or a privileged insn will be issued. */
5354 || ready_try[n])
5355 /* Then we have a solution. */
5357 best = top - choice_stack;
5358 /* This is the index of the insn issued first in this
5359 solution. */
5360 *index = choice_stack [1].index;
5361 if (top->n == more_issue || best == all)
5362 break;
5366 /* Set ready-list index to point to the last insn
5367 ('i++' below will advance it to the next insn). */
5368 i = top->index;
5370 /* Backtrack. */
5371 ready_try [i] = 0;
5373 if (targetm.sched.first_cycle_multipass_backtrack)
5374 targetm.sched.first_cycle_multipass_backtrack (&top->target_data,
5375 ready_try, n_ready);
5377 top--;
5378 memcpy (state, top->state, dfa_state_size);
5380 else if (!ready_try [i])
5382 tries_num++;
5383 if (tries_num > max_lookahead_tries)
5384 break;
5385 insn = ready_element (ready, i);
5386 delay = state_transition (state, insn);
5387 if (delay < 0)
5389 if (state_dead_lock_p (state)
5390 || insn_finishes_cycle_p (insn))
5391 /* We won't issue any more instructions in the next
5392 choice_state. */
5393 top->rest = 0;
5394 else
5395 top->rest--;
5397 n = top->n;
5398 if (memcmp (top->state, state, dfa_state_size) != 0)
5399 n++;
5401 /* Advance to the next choice_entry. */
5402 top++;
5403 /* Initialize it. */
5404 top->rest = dfa_lookahead;
5405 top->index = i;
5406 top->n = n;
5407 memcpy (top->state, state, dfa_state_size);
5408 ready_try [i] = 1;
5410 if (targetm.sched.first_cycle_multipass_issue)
5411 targetm.sched.first_cycle_multipass_issue (&top->target_data,
5412 ready_try, n_ready,
5413 insn,
5414 &((top - 1)
5415 ->target_data));
5417 i = -1;
5421 /* Increase ready-list index. */
5422 i++;
5425 if (targetm.sched.first_cycle_multipass_end)
5426 targetm.sched.first_cycle_multipass_end (best != 0
5427 ? &choice_stack[1].target_data
5428 : NULL);
5430 /* Restore the original state of the DFA. */
5431 memcpy (state, choice_stack->state, dfa_state_size);
5433 return best;
5436 /* The following function chooses insn from READY and modifies
5437 READY. The following function is used only for first
5438 cycle multipass scheduling.
5439 Return:
5440 -1 if cycle should be advanced,
5441 0 if INSN_PTR is set to point to the desirable insn,
5442 1 if choose_ready () should be restarted without advancing the cycle. */
5443 static int
5444 choose_ready (struct ready_list *ready, bool first_cycle_insn_p,
5445 rtx *insn_ptr)
5447 int lookahead;
5449 if (dbg_cnt (sched_insn) == false)
5451 rtx insn = nonscheduled_insns_begin;
5454 insn = next_nonnote_insn (insn);
5456 while (QUEUE_INDEX (insn) == QUEUE_SCHEDULED);
5458 if (QUEUE_INDEX (insn) == QUEUE_READY)
5459 /* INSN is in the ready_list. */
5461 nonscheduled_insns_begin = insn;
5462 ready_remove_insn (insn);
5463 *insn_ptr = insn;
5464 return 0;
5467 /* INSN is in the queue. Advance cycle to move it to the ready list. */
5468 return -1;
5471 lookahead = 0;
5473 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
5474 lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
5475 if (lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0))
5476 || DEBUG_INSN_P (ready_element (ready, 0)))
5478 if (targetm.sched.dispatch (NULL_RTX, IS_DISPATCH_ON))
5479 *insn_ptr = ready_remove_first_dispatch (ready);
5480 else
5481 *insn_ptr = ready_remove_first (ready);
5483 return 0;
5485 else
5487 /* Try to choose the better insn. */
5488 int index = 0, i, n;
5489 rtx insn;
5490 int try_data = 1, try_control = 1;
5491 ds_t ts;
5493 insn = ready_element (ready, 0);
5494 if (INSN_CODE (insn) < 0)
5496 *insn_ptr = ready_remove_first (ready);
5497 return 0;
5500 if (spec_info
5501 && spec_info->flags & (PREFER_NON_DATA_SPEC
5502 | PREFER_NON_CONTROL_SPEC))
5504 for (i = 0, n = ready->n_ready; i < n; i++)
5506 rtx x;
5507 ds_t s;
5509 x = ready_element (ready, i);
5510 s = TODO_SPEC (x);
5512 if (spec_info->flags & PREFER_NON_DATA_SPEC
5513 && !(s & DATA_SPEC))
5515 try_data = 0;
5516 if (!(spec_info->flags & PREFER_NON_CONTROL_SPEC)
5517 || !try_control)
5518 break;
5521 if (spec_info->flags & PREFER_NON_CONTROL_SPEC
5522 && !(s & CONTROL_SPEC))
5524 try_control = 0;
5525 if (!(spec_info->flags & PREFER_NON_DATA_SPEC) || !try_data)
5526 break;
5531 ts = TODO_SPEC (insn);
5532 if ((ts & SPECULATIVE)
5533 && (((!try_data && (ts & DATA_SPEC))
5534 || (!try_control && (ts & CONTROL_SPEC)))
5535 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
5536 && !targetm.sched
5537 .first_cycle_multipass_dfa_lookahead_guard_spec (insn))))
5538 /* Discard speculative instruction that stands first in the ready
5539 list. */
5541 change_queue_index (insn, 1);
5542 return 1;
5545 ready_try[0] = 0;
5547 for (i = 1; i < ready->n_ready; i++)
5549 insn = ready_element (ready, i);
5551 ready_try [i]
5552 = ((!try_data && (TODO_SPEC (insn) & DATA_SPEC))
5553 || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC)));
5556 /* Let the target filter the search space. */
5557 for (i = 1; i < ready->n_ready; i++)
5558 if (!ready_try[i])
5560 insn = ready_element (ready, i);
5562 /* If this insn is recognizable we should have already
5563 recognized it earlier.
5564 ??? Not very clear where this is supposed to be done.
5565 See dep_cost_1. */
5566 gcc_checking_assert (INSN_CODE (insn) >= 0
5567 || recog_memoized (insn) < 0);
5569 ready_try [i]
5570 = (/* INSN_CODE check can be omitted here as it is also done later
5571 in max_issue (). */
5572 INSN_CODE (insn) < 0
5573 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
5574 && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard
5575 (insn)));
5578 if (max_issue (ready, 1, curr_state, first_cycle_insn_p, &index) == 0)
5580 *insn_ptr = ready_remove_first (ready);
5581 if (sched_verbose >= 4)
5582 fprintf (sched_dump, ";;\t\tChosen insn (but can't issue) : %s \n",
5583 (*current_sched_info->print_insn) (*insn_ptr, 0));
5584 return 0;
5586 else
5588 if (sched_verbose >= 4)
5589 fprintf (sched_dump, ";;\t\tChosen insn : %s\n",
5590 (*current_sched_info->print_insn)
5591 (ready_element (ready, index), 0));
5593 *insn_ptr = ready_remove (ready, index);
5594 return 0;
5599 /* This function is called when we have successfully scheduled a
5600 block. It uses the schedule stored in the scheduled_insns vector
5601 to rearrange the RTL. PREV_HEAD is used as the anchor to which we
5602 append the scheduled insns; TAIL is the insn after the scheduled
5603 block. TARGET_BB is the argument passed to schedule_block. */
5605 static void
5606 commit_schedule (rtx prev_head, rtx tail, basic_block *target_bb)
5608 unsigned int i;
5609 rtx insn;
5611 last_scheduled_insn = prev_head;
5612 for (i = 0;
5613 scheduled_insns.iterate (i, &insn);
5614 i++)
5616 if (control_flow_insn_p (last_scheduled_insn)
5617 || current_sched_info->advance_target_bb (*target_bb, insn))
5619 *target_bb = current_sched_info->advance_target_bb (*target_bb, 0);
5621 if (sched_verbose)
5623 rtx x;
5625 x = next_real_insn (last_scheduled_insn);
5626 gcc_assert (x);
5627 dump_new_block_header (1, *target_bb, x, tail);
5630 last_scheduled_insn = bb_note (*target_bb);
5633 if (current_sched_info->begin_move_insn)
5634 (*current_sched_info->begin_move_insn) (insn, last_scheduled_insn);
5635 move_insn (insn, last_scheduled_insn,
5636 current_sched_info->next_tail);
5637 if (!DEBUG_INSN_P (insn))
5638 reemit_notes (insn);
5639 last_scheduled_insn = insn;
5642 scheduled_insns.truncate (0);
5645 /* Examine all insns on the ready list and queue those which can't be
5646 issued in this cycle. TEMP_STATE is temporary scheduler state we
5647 can use as scratch space. If FIRST_CYCLE_INSN_P is true, no insns
5648 have been issued for the current cycle, which means it is valid to
5649 issue an asm statement.
5651 If SHADOWS_ONLY_P is true, we eliminate all real insns and only
5652 leave those for which SHADOW_P is true. If MODULO_EPILOGUE is true,
5653 we only leave insns which have an INSN_EXACT_TICK. */
5655 static void
5656 prune_ready_list (state_t temp_state, bool first_cycle_insn_p,
5657 bool shadows_only_p, bool modulo_epilogue_p)
5659 int i, pass;
5660 bool sched_group_found = false;
5661 int min_cost_group = 1;
5663 for (i = 0; i < ready.n_ready; i++)
5665 rtx insn = ready_element (&ready, i);
5666 if (SCHED_GROUP_P (insn))
5668 sched_group_found = true;
5669 break;
5673 /* Make two passes if there's a SCHED_GROUP_P insn; make sure to handle
5674 such an insn first and note its cost, then schedule all other insns
5675 for one cycle later. */
5676 for (pass = sched_group_found ? 0 : 1; pass < 2; )
5678 int n = ready.n_ready;
5679 for (i = 0; i < n; i++)
5681 rtx insn = ready_element (&ready, i);
5682 int cost = 0;
5683 const char *reason = "resource conflict";
5685 if (DEBUG_INSN_P (insn))
5686 continue;
5688 if (sched_group_found && !SCHED_GROUP_P (insn))
5690 if (pass == 0)
5691 continue;
5692 cost = min_cost_group;
5693 reason = "not in sched group";
5695 else if (modulo_epilogue_p
5696 && INSN_EXACT_TICK (insn) == INVALID_TICK)
5698 cost = max_insn_queue_index;
5699 reason = "not an epilogue insn";
5701 else if (shadows_only_p && !SHADOW_P (insn))
5703 cost = 1;
5704 reason = "not a shadow";
5706 else if (recog_memoized (insn) < 0)
5708 if (!first_cycle_insn_p
5709 && (GET_CODE (PATTERN (insn)) == ASM_INPUT
5710 || asm_noperands (PATTERN (insn)) >= 0))
5711 cost = 1;
5712 reason = "asm";
5714 else if (sched_pressure != SCHED_PRESSURE_NONE)
5716 if (sched_pressure == SCHED_PRESSURE_MODEL
5717 && INSN_TICK (insn) <= clock_var)
5719 memcpy (temp_state, curr_state, dfa_state_size);
5720 if (state_transition (temp_state, insn) >= 0)
5721 INSN_TICK (insn) = clock_var + 1;
5723 cost = 0;
5725 else
5727 int delay_cost = 0;
5729 if (delay_htab)
5731 struct delay_pair *delay_entry;
5732 delay_entry
5733 = (struct delay_pair *)htab_find_with_hash (delay_htab, insn,
5734 htab_hash_pointer (insn));
5735 while (delay_entry && delay_cost == 0)
5737 delay_cost = estimate_shadow_tick (delay_entry);
5738 if (delay_cost > max_insn_queue_index)
5739 delay_cost = max_insn_queue_index;
5740 delay_entry = delay_entry->next_same_i1;
5744 memcpy (temp_state, curr_state, dfa_state_size);
5745 cost = state_transition (temp_state, insn);
5746 if (cost < 0)
5747 cost = 0;
5748 else if (cost == 0)
5749 cost = 1;
5750 if (cost < delay_cost)
5752 cost = delay_cost;
5753 reason = "shadow tick";
5756 if (cost >= 1)
5758 if (SCHED_GROUP_P (insn) && cost > min_cost_group)
5759 min_cost_group = cost;
5760 ready_remove (&ready, i);
5761 queue_insn (insn, cost, reason);
5762 if (i + 1 < n)
5763 break;
5766 if (i == n)
5767 pass++;
5771 /* Called when we detect that the schedule is impossible. We examine the
5772 backtrack queue to find the earliest insn that caused this condition. */
5774 static struct haifa_saved_data *
5775 verify_shadows (void)
5777 struct haifa_saved_data *save, *earliest_fail = NULL;
5778 for (save = backtrack_queue; save; save = save->next)
5780 int t;
5781 struct delay_pair *pair = save->delay_pair;
5782 rtx i1 = pair->i1;
5784 for (; pair; pair = pair->next_same_i1)
5786 rtx i2 = pair->i2;
5788 if (QUEUE_INDEX (i2) == QUEUE_SCHEDULED)
5789 continue;
5791 t = INSN_TICK (i1) + pair_delay (pair);
5792 if (t < clock_var)
5794 if (sched_verbose >= 2)
5795 fprintf (sched_dump,
5796 ";;\t\tfailed delay requirements for %d/%d (%d->%d)"
5797 ", not ready\n",
5798 INSN_UID (pair->i1), INSN_UID (pair->i2),
5799 INSN_TICK (pair->i1), INSN_EXACT_TICK (pair->i2));
5800 earliest_fail = save;
5801 break;
5803 if (QUEUE_INDEX (i2) >= 0)
5805 int queued_for = INSN_TICK (i2);
5807 if (t < queued_for)
5809 if (sched_verbose >= 2)
5810 fprintf (sched_dump,
5811 ";;\t\tfailed delay requirements for %d/%d"
5812 " (%d->%d), queued too late\n",
5813 INSN_UID (pair->i1), INSN_UID (pair->i2),
5814 INSN_TICK (pair->i1), INSN_EXACT_TICK (pair->i2));
5815 earliest_fail = save;
5816 break;
5822 return earliest_fail;
5825 /* Use forward list scheduling to rearrange insns of block pointed to by
5826 TARGET_BB, possibly bringing insns from subsequent blocks in the same
5827 region. */
5829 bool
5830 schedule_block (basic_block *target_bb, state_t init_state)
5832 int i;
5833 bool success = modulo_ii == 0;
5834 struct sched_block_state ls;
5835 state_t temp_state = NULL; /* It is used for multipass scheduling. */
5836 int sort_p, advance, start_clock_var;
5838 /* Head/tail info for this block. */
5839 rtx prev_head = current_sched_info->prev_head;
5840 rtx next_tail = current_sched_info->next_tail;
5841 rtx head = NEXT_INSN (prev_head);
5842 rtx tail = PREV_INSN (next_tail);
5844 if ((current_sched_info->flags & DONT_BREAK_DEPENDENCIES) == 0
5845 && sched_pressure != SCHED_PRESSURE_MODEL)
5846 find_modifiable_mems (head, tail);
5848 /* We used to have code to avoid getting parameters moved from hard
5849 argument registers into pseudos.
5851 However, it was removed when it proved to be of marginal benefit
5852 and caused problems because schedule_block and compute_forward_dependences
5853 had different notions of what the "head" insn was. */
5855 gcc_assert (head != tail || INSN_P (head));
5857 haifa_recovery_bb_recently_added_p = false;
5859 backtrack_queue = NULL;
5861 /* Debug info. */
5862 if (sched_verbose)
5863 dump_new_block_header (0, *target_bb, head, tail);
5865 if (init_state == NULL)
5866 state_reset (curr_state);
5867 else
5868 memcpy (curr_state, init_state, dfa_state_size);
5870 /* Clear the ready list. */
5871 ready.first = ready.veclen - 1;
5872 ready.n_ready = 0;
5873 ready.n_debug = 0;
5875 /* It is used for first cycle multipass scheduling. */
5876 temp_state = alloca (dfa_state_size);
5878 if (targetm.sched.init)
5879 targetm.sched.init (sched_dump, sched_verbose, ready.veclen);
5881 /* We start inserting insns after PREV_HEAD. */
5882 last_scheduled_insn = nonscheduled_insns_begin = prev_head;
5883 last_nondebug_scheduled_insn = NULL_RTX;
5885 gcc_assert ((NOTE_P (last_scheduled_insn)
5886 || DEBUG_INSN_P (last_scheduled_insn))
5887 && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
5889 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
5890 queue. */
5891 q_ptr = 0;
5892 q_size = 0;
5894 insn_queue = XALLOCAVEC (rtx, max_insn_queue_index + 1);
5895 memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
5897 /* Start just before the beginning of time. */
5898 clock_var = -1;
5900 /* We need queue and ready lists and clock_var be initialized
5901 in try_ready () (which is called through init_ready_list ()). */
5902 (*current_sched_info->init_ready_list) ();
5904 if (sched_pressure == SCHED_PRESSURE_MODEL)
5905 model_start_schedule ();
5907 /* The algorithm is O(n^2) in the number of ready insns at any given
5908 time in the worst case. Before reload we are more likely to have
5909 big lists so truncate them to a reasonable size. */
5910 if (!reload_completed
5911 && ready.n_ready - ready.n_debug > MAX_SCHED_READY_INSNS)
5913 ready_sort (&ready);
5915 /* Find first free-standing insn past MAX_SCHED_READY_INSNS.
5916 If there are debug insns, we know they're first. */
5917 for (i = MAX_SCHED_READY_INSNS + ready.n_debug; i < ready.n_ready; i++)
5918 if (!SCHED_GROUP_P (ready_element (&ready, i)))
5919 break;
5921 if (sched_verbose >= 2)
5923 fprintf (sched_dump,
5924 ";;\t\tReady list on entry: %d insns\n", ready.n_ready);
5925 fprintf (sched_dump,
5926 ";;\t\t before reload => truncated to %d insns\n", i);
5929 /* Delay all insns past it for 1 cycle. If debug counter is
5930 activated make an exception for the insn right after
5931 nonscheduled_insns_begin. */
5933 rtx skip_insn;
5935 if (dbg_cnt (sched_insn) == false)
5936 skip_insn = next_nonnote_insn (nonscheduled_insns_begin);
5937 else
5938 skip_insn = NULL_RTX;
5940 while (i < ready.n_ready)
5942 rtx insn;
5944 insn = ready_remove (&ready, i);
5946 if (insn != skip_insn)
5947 queue_insn (insn, 1, "list truncated");
5949 if (skip_insn)
5950 ready_add (&ready, skip_insn, true);
5954 /* Now we can restore basic block notes and maintain precise cfg. */
5955 restore_bb_notes (*target_bb);
5957 last_clock_var = -1;
5959 advance = 0;
5961 gcc_assert (scheduled_insns.length () == 0);
5962 sort_p = TRUE;
5963 must_backtrack = false;
5964 modulo_insns_scheduled = 0;
5966 ls.modulo_epilogue = false;
5968 /* Loop until all the insns in BB are scheduled. */
5969 while ((*current_sched_info->schedule_more_p) ())
5971 perform_replacements_new_cycle ();
5974 start_clock_var = clock_var;
5976 clock_var++;
5978 advance_one_cycle ();
5980 /* Add to the ready list all pending insns that can be issued now.
5981 If there are no ready insns, increment clock until one
5982 is ready and add all pending insns at that point to the ready
5983 list. */
5984 queue_to_ready (&ready);
5986 gcc_assert (ready.n_ready);
5988 if (sched_verbose >= 2)
5990 fprintf (sched_dump, ";;\t\tReady list after queue_to_ready: ");
5991 debug_ready_list (&ready);
5993 advance -= clock_var - start_clock_var;
5995 while (advance > 0);
5997 if (ls.modulo_epilogue)
5999 int stage = clock_var / modulo_ii;
6000 if (stage > modulo_last_stage * 2 + 2)
6002 if (sched_verbose >= 2)
6003 fprintf (sched_dump,
6004 ";;\t\tmodulo scheduled succeeded at II %d\n",
6005 modulo_ii);
6006 success = true;
6007 goto end_schedule;
6010 else if (modulo_ii > 0)
6012 int stage = clock_var / modulo_ii;
6013 if (stage > modulo_max_stages)
6015 if (sched_verbose >= 2)
6016 fprintf (sched_dump,
6017 ";;\t\tfailing schedule due to excessive stages\n");
6018 goto end_schedule;
6020 if (modulo_n_insns == modulo_insns_scheduled
6021 && stage > modulo_last_stage)
6023 if (sched_verbose >= 2)
6024 fprintf (sched_dump,
6025 ";;\t\tfound kernel after %d stages, II %d\n",
6026 stage, modulo_ii);
6027 ls.modulo_epilogue = true;
6031 prune_ready_list (temp_state, true, false, ls.modulo_epilogue);
6032 if (ready.n_ready == 0)
6033 continue;
6034 if (must_backtrack)
6035 goto do_backtrack;
6037 ls.first_cycle_insn_p = true;
6038 ls.shadows_only_p = false;
6039 cycle_issued_insns = 0;
6040 ls.can_issue_more = issue_rate;
6041 for (;;)
6043 rtx insn;
6044 int cost;
6045 bool asm_p;
6047 if (sort_p && ready.n_ready > 0)
6049 /* Sort the ready list based on priority. This must be
6050 done every iteration through the loop, as schedule_insn
6051 may have readied additional insns that will not be
6052 sorted correctly. */
6053 ready_sort (&ready);
6055 if (sched_verbose >= 2)
6057 fprintf (sched_dump, ";;\t\tReady list after ready_sort: ");
6058 debug_ready_list (&ready);
6062 /* We don't want md sched reorder to even see debug isns, so put
6063 them out right away. */
6064 if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0))
6065 && (*current_sched_info->schedule_more_p) ())
6067 while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
6069 rtx insn = ready_remove_first (&ready);
6070 gcc_assert (DEBUG_INSN_P (insn));
6071 (*current_sched_info->begin_schedule_ready) (insn);
6072 scheduled_insns.safe_push (insn);
6073 last_scheduled_insn = insn;
6074 advance = schedule_insn (insn);
6075 gcc_assert (advance == 0);
6076 if (ready.n_ready > 0)
6077 ready_sort (&ready);
6081 if (ls.first_cycle_insn_p && !ready.n_ready)
6082 break;
6084 resume_after_backtrack:
6085 /* Allow the target to reorder the list, typically for
6086 better instruction bundling. */
6087 if (sort_p
6088 && (ready.n_ready == 0
6089 || !SCHED_GROUP_P (ready_element (&ready, 0))))
6091 if (ls.first_cycle_insn_p && targetm.sched.reorder)
6092 ls.can_issue_more
6093 = targetm.sched.reorder (sched_dump, sched_verbose,
6094 ready_lastpos (&ready),
6095 &ready.n_ready, clock_var);
6096 else if (!ls.first_cycle_insn_p && targetm.sched.reorder2)
6097 ls.can_issue_more
6098 = targetm.sched.reorder2 (sched_dump, sched_verbose,
6099 ready.n_ready
6100 ? ready_lastpos (&ready) : NULL,
6101 &ready.n_ready, clock_var);
6104 restart_choose_ready:
6105 if (sched_verbose >= 2)
6107 fprintf (sched_dump, ";;\tReady list (t = %3d): ",
6108 clock_var);
6109 debug_ready_list (&ready);
6110 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
6111 print_curr_reg_pressure ();
6114 if (ready.n_ready == 0
6115 && ls.can_issue_more
6116 && reload_completed)
6118 /* Allow scheduling insns directly from the queue in case
6119 there's nothing better to do (ready list is empty) but
6120 there are still vacant dispatch slots in the current cycle. */
6121 if (sched_verbose >= 6)
6122 fprintf (sched_dump,";;\t\tSecond chance\n");
6123 memcpy (temp_state, curr_state, dfa_state_size);
6124 if (early_queue_to_ready (temp_state, &ready))
6125 ready_sort (&ready);
6128 if (ready.n_ready == 0
6129 || !ls.can_issue_more
6130 || state_dead_lock_p (curr_state)
6131 || !(*current_sched_info->schedule_more_p) ())
6132 break;
6134 /* Select and remove the insn from the ready list. */
6135 if (sort_p)
6137 int res;
6139 insn = NULL_RTX;
6140 res = choose_ready (&ready, ls.first_cycle_insn_p, &insn);
6142 if (res < 0)
6143 /* Finish cycle. */
6144 break;
6145 if (res > 0)
6146 goto restart_choose_ready;
6148 gcc_assert (insn != NULL_RTX);
6150 else
6151 insn = ready_remove_first (&ready);
6153 if (sched_pressure != SCHED_PRESSURE_NONE
6154 && INSN_TICK (insn) > clock_var)
6156 ready_add (&ready, insn, true);
6157 advance = 1;
6158 break;
6161 if (targetm.sched.dfa_new_cycle
6162 && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
6163 insn, last_clock_var,
6164 clock_var, &sort_p))
6165 /* SORT_P is used by the target to override sorting
6166 of the ready list. This is needed when the target
6167 has modified its internal structures expecting that
6168 the insn will be issued next. As we need the insn
6169 to have the highest priority (so it will be returned by
6170 the ready_remove_first call above), we invoke
6171 ready_add (&ready, insn, true).
6172 But, still, there is one issue: INSN can be later
6173 discarded by scheduler's front end through
6174 current_sched_info->can_schedule_ready_p, hence, won't
6175 be issued next. */
6177 ready_add (&ready, insn, true);
6178 break;
6181 sort_p = TRUE;
6183 if (current_sched_info->can_schedule_ready_p
6184 && ! (*current_sched_info->can_schedule_ready_p) (insn))
6185 /* We normally get here only if we don't want to move
6186 insn from the split block. */
6188 TODO_SPEC (insn) = DEP_POSTPONED;
6189 goto restart_choose_ready;
6192 if (delay_htab)
6194 /* If this insn is the first part of a delay-slot pair, record a
6195 backtrack point. */
6196 struct delay_pair *delay_entry;
6197 delay_entry
6198 = (struct delay_pair *)htab_find_with_hash (delay_htab, insn,
6199 htab_hash_pointer (insn));
6200 if (delay_entry)
6202 save_backtrack_point (delay_entry, ls);
6203 if (sched_verbose >= 2)
6204 fprintf (sched_dump, ";;\t\tsaving backtrack point\n");
6208 /* DECISION is made. */
6210 if (modulo_ii > 0 && INSN_UID (insn) < modulo_iter0_max_uid)
6212 modulo_insns_scheduled++;
6213 modulo_last_stage = clock_var / modulo_ii;
6215 if (TODO_SPEC (insn) & SPECULATIVE)
6216 generate_recovery_code (insn);
6218 if (targetm.sched.dispatch (NULL_RTX, IS_DISPATCH_ON))
6219 targetm.sched.dispatch_do (insn, ADD_TO_DISPATCH_WINDOW);
6221 /* Update counters, etc in the scheduler's front end. */
6222 (*current_sched_info->begin_schedule_ready) (insn);
6223 scheduled_insns.safe_push (insn);
6224 gcc_assert (NONDEBUG_INSN_P (insn));
6225 last_nondebug_scheduled_insn = last_scheduled_insn = insn;
6227 if (recog_memoized (insn) >= 0)
6229 memcpy (temp_state, curr_state, dfa_state_size);
6230 cost = state_transition (curr_state, insn);
6231 if (sched_pressure != SCHED_PRESSURE_WEIGHTED)
6232 gcc_assert (cost < 0);
6233 if (memcmp (temp_state, curr_state, dfa_state_size) != 0)
6234 cycle_issued_insns++;
6235 asm_p = false;
6237 else
6238 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
6239 || asm_noperands (PATTERN (insn)) >= 0);
6241 if (targetm.sched.variable_issue)
6242 ls.can_issue_more =
6243 targetm.sched.variable_issue (sched_dump, sched_verbose,
6244 insn, ls.can_issue_more);
6245 /* A naked CLOBBER or USE generates no instruction, so do
6246 not count them against the issue rate. */
6247 else if (GET_CODE (PATTERN (insn)) != USE
6248 && GET_CODE (PATTERN (insn)) != CLOBBER)
6249 ls.can_issue_more--;
6250 advance = schedule_insn (insn);
6252 if (SHADOW_P (insn))
6253 ls.shadows_only_p = true;
6255 /* After issuing an asm insn we should start a new cycle. */
6256 if (advance == 0 && asm_p)
6257 advance = 1;
6259 if (must_backtrack)
6260 break;
6262 if (advance != 0)
6263 break;
6265 ls.first_cycle_insn_p = false;
6266 if (ready.n_ready > 0)
6267 prune_ready_list (temp_state, false, ls.shadows_only_p,
6268 ls.modulo_epilogue);
6271 do_backtrack:
6272 if (!must_backtrack)
6273 for (i = 0; i < ready.n_ready; i++)
6275 rtx insn = ready_element (&ready, i);
6276 if (INSN_EXACT_TICK (insn) == clock_var)
6278 must_backtrack = true;
6279 clock_var++;
6280 break;
6283 if (must_backtrack && modulo_ii > 0)
6285 if (modulo_backtracks_left == 0)
6286 goto end_schedule;
6287 modulo_backtracks_left--;
6289 while (must_backtrack)
6291 struct haifa_saved_data *failed;
6292 rtx failed_insn;
6294 must_backtrack = false;
6295 failed = verify_shadows ();
6296 gcc_assert (failed);
6298 failed_insn = failed->delay_pair->i1;
6299 /* Clear these queues. */
6300 perform_replacements_new_cycle ();
6301 toggle_cancelled_flags (false);
6302 unschedule_insns_until (failed_insn);
6303 while (failed != backtrack_queue)
6304 free_topmost_backtrack_point (true);
6305 restore_last_backtrack_point (&ls);
6306 if (sched_verbose >= 2)
6307 fprintf (sched_dump, ";;\t\trewind to cycle %d\n", clock_var);
6308 /* Delay by at least a cycle. This could cause additional
6309 backtracking. */
6310 queue_insn (failed_insn, 1, "backtracked");
6311 advance = 0;
6312 if (must_backtrack)
6313 continue;
6314 if (ready.n_ready > 0)
6315 goto resume_after_backtrack;
6316 else
6318 if (clock_var == 0 && ls.first_cycle_insn_p)
6319 goto end_schedule;
6320 advance = 1;
6321 break;
6325 if (ls.modulo_epilogue)
6326 success = true;
6327 end_schedule:
6328 advance_one_cycle ();
6329 perform_replacements_new_cycle ();
6330 if (modulo_ii > 0)
6332 /* Once again, debug insn suckiness: they can be on the ready list
6333 even if they have unresolved dependencies. To make our view
6334 of the world consistent, remove such "ready" insns. */
6335 restart_debug_insn_loop:
6336 for (i = ready.n_ready - 1; i >= 0; i--)
6338 rtx x;
6340 x = ready_element (&ready, i);
6341 if (DEPS_LIST_FIRST (INSN_HARD_BACK_DEPS (x)) != NULL
6342 || DEPS_LIST_FIRST (INSN_SPEC_BACK_DEPS (x)) != NULL)
6344 ready_remove (&ready, i);
6345 goto restart_debug_insn_loop;
6348 for (i = ready.n_ready - 1; i >= 0; i--)
6350 rtx x;
6352 x = ready_element (&ready, i);
6353 resolve_dependencies (x);
6355 for (i = 0; i <= max_insn_queue_index; i++)
6357 rtx link;
6358 while ((link = insn_queue[i]) != NULL)
6360 rtx x = XEXP (link, 0);
6361 insn_queue[i] = XEXP (link, 1);
6362 QUEUE_INDEX (x) = QUEUE_NOWHERE;
6363 free_INSN_LIST_node (link);
6364 resolve_dependencies (x);
6369 if (!success)
6370 undo_all_replacements ();
6372 /* Debug info. */
6373 if (sched_verbose)
6375 fprintf (sched_dump, ";;\tReady list (final): ");
6376 debug_ready_list (&ready);
6379 if (modulo_ii == 0 && current_sched_info->queue_must_finish_empty)
6380 /* Sanity check -- queue must be empty now. Meaningless if region has
6381 multiple bbs. */
6382 gcc_assert (!q_size && !ready.n_ready && !ready.n_debug);
6383 else if (modulo_ii == 0)
6385 /* We must maintain QUEUE_INDEX between blocks in region. */
6386 for (i = ready.n_ready - 1; i >= 0; i--)
6388 rtx x;
6390 x = ready_element (&ready, i);
6391 QUEUE_INDEX (x) = QUEUE_NOWHERE;
6392 TODO_SPEC (x) = HARD_DEP;
6395 if (q_size)
6396 for (i = 0; i <= max_insn_queue_index; i++)
6398 rtx link;
6399 for (link = insn_queue[i]; link; link = XEXP (link, 1))
6401 rtx x;
6403 x = XEXP (link, 0);
6404 QUEUE_INDEX (x) = QUEUE_NOWHERE;
6405 TODO_SPEC (x) = HARD_DEP;
6407 free_INSN_LIST_list (&insn_queue[i]);
6411 if (sched_pressure == SCHED_PRESSURE_MODEL)
6412 model_end_schedule ();
6414 if (success)
6416 commit_schedule (prev_head, tail, target_bb);
6417 if (sched_verbose)
6418 fprintf (sched_dump, ";; total time = %d\n", clock_var);
6420 else
6421 last_scheduled_insn = tail;
6423 scheduled_insns.truncate (0);
6425 if (!current_sched_info->queue_must_finish_empty
6426 || haifa_recovery_bb_recently_added_p)
6428 /* INSN_TICK (minimum clock tick at which the insn becomes
6429 ready) may be not correct for the insn in the subsequent
6430 blocks of the region. We should use a correct value of
6431 `clock_var' or modify INSN_TICK. It is better to keep
6432 clock_var value equal to 0 at the start of a basic block.
6433 Therefore we modify INSN_TICK here. */
6434 fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
6437 if (targetm.sched.finish)
6439 targetm.sched.finish (sched_dump, sched_verbose);
6440 /* Target might have added some instructions to the scheduled block
6441 in its md_finish () hook. These new insns don't have any data
6442 initialized and to identify them we extend h_i_d so that they'll
6443 get zero luids. */
6444 sched_extend_luids ();
6447 if (sched_verbose)
6448 fprintf (sched_dump, ";; new head = %d\n;; new tail = %d\n\n",
6449 INSN_UID (head), INSN_UID (tail));
6451 /* Update head/tail boundaries. */
6452 head = NEXT_INSN (prev_head);
6453 tail = last_scheduled_insn;
6455 head = restore_other_notes (head, NULL);
6457 current_sched_info->head = head;
6458 current_sched_info->tail = tail;
6460 free_backtrack_queue ();
6462 return success;
6465 /* Set_priorities: compute priority of each insn in the block. */
6468 set_priorities (rtx head, rtx tail)
6470 rtx insn;
6471 int n_insn;
6472 int sched_max_insns_priority =
6473 current_sched_info->sched_max_insns_priority;
6474 rtx prev_head;
6476 if (head == tail && ! INSN_P (head))
6477 gcc_unreachable ();
6479 n_insn = 0;
6481 prev_head = PREV_INSN (head);
6482 for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
6484 if (!INSN_P (insn))
6485 continue;
6487 n_insn++;
6488 (void) priority (insn);
6490 gcc_assert (INSN_PRIORITY_KNOWN (insn));
6492 sched_max_insns_priority = MAX (sched_max_insns_priority,
6493 INSN_PRIORITY (insn));
6496 current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
6498 return n_insn;
6501 /* Set dump and sched_verbose for the desired debugging output. If no
6502 dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
6503 For -fsched-verbose=N, N>=10, print everything to stderr. */
6504 void
6505 setup_sched_dump (void)
6507 sched_verbose = sched_verbose_param;
6508 if (sched_verbose_param == 0 && dump_file)
6509 sched_verbose = 1;
6510 sched_dump = ((sched_verbose_param >= 10 || !dump_file)
6511 ? stderr : dump_file);
6514 /* Initialize some global state for the scheduler. This function works
6515 with the common data shared between all the schedulers. It is called
6516 from the scheduler specific initialization routine. */
6518 void
6519 sched_init (void)
6521 /* Disable speculative loads in their presence if cc0 defined. */
6522 #ifdef HAVE_cc0
6523 flag_schedule_speculative_load = 0;
6524 #endif
6526 if (targetm.sched.dispatch (NULL_RTX, IS_DISPATCH_ON))
6527 targetm.sched.dispatch_do (NULL_RTX, DISPATCH_INIT);
6529 if (flag_sched_pressure
6530 && !reload_completed
6531 && common_sched_info->sched_pass_id == SCHED_RGN_PASS)
6532 sched_pressure = ((enum sched_pressure_algorithm)
6533 PARAM_VALUE (PARAM_SCHED_PRESSURE_ALGORITHM));
6534 else
6535 sched_pressure = SCHED_PRESSURE_NONE;
6537 if (sched_pressure != SCHED_PRESSURE_NONE)
6538 ira_setup_eliminable_regset (false);
6540 /* Initialize SPEC_INFO. */
6541 if (targetm.sched.set_sched_flags)
6543 spec_info = &spec_info_var;
6544 targetm.sched.set_sched_flags (spec_info);
6546 if (spec_info->mask != 0)
6548 spec_info->data_weakness_cutoff =
6549 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
6550 spec_info->control_weakness_cutoff =
6551 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF)
6552 * REG_BR_PROB_BASE) / 100;
6554 else
6555 /* So we won't read anything accidentally. */
6556 spec_info = NULL;
6559 else
6560 /* So we won't read anything accidentally. */
6561 spec_info = 0;
6563 /* Initialize issue_rate. */
6564 if (targetm.sched.issue_rate)
6565 issue_rate = targetm.sched.issue_rate ();
6566 else
6567 issue_rate = 1;
6569 if (cached_issue_rate != issue_rate)
6571 cached_issue_rate = issue_rate;
6572 /* To invalidate max_lookahead_tries: */
6573 cached_first_cycle_multipass_dfa_lookahead = 0;
6576 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
6577 dfa_lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
6578 else
6579 dfa_lookahead = 0;
6581 if (targetm.sched.init_dfa_pre_cycle_insn)
6582 targetm.sched.init_dfa_pre_cycle_insn ();
6584 if (targetm.sched.init_dfa_post_cycle_insn)
6585 targetm.sched.init_dfa_post_cycle_insn ();
6587 dfa_start ();
6588 dfa_state_size = state_size ();
6590 init_alias_analysis ();
6592 if (!sched_no_dce)
6593 df_set_flags (DF_LR_RUN_DCE);
6594 df_note_add_problem ();
6596 /* More problems needed for interloop dep calculation in SMS. */
6597 if (common_sched_info->sched_pass_id == SCHED_SMS_PASS)
6599 df_rd_add_problem ();
6600 df_chain_add_problem (DF_DU_CHAIN + DF_UD_CHAIN);
6603 df_analyze ();
6605 /* Do not run DCE after reload, as this can kill nops inserted
6606 by bundling. */
6607 if (reload_completed)
6608 df_clear_flags (DF_LR_RUN_DCE);
6610 regstat_compute_calls_crossed ();
6612 if (targetm.sched.init_global)
6613 targetm.sched.init_global (sched_dump, sched_verbose, get_max_uid () + 1);
6615 if (sched_pressure != SCHED_PRESSURE_NONE)
6617 int i, max_regno = max_reg_num ();
6619 if (sched_dump != NULL)
6620 /* We need info about pseudos for rtl dumps about pseudo
6621 classes and costs. */
6622 regstat_init_n_sets_and_refs ();
6623 ira_set_pseudo_classes (true, sched_verbose ? sched_dump : NULL);
6624 sched_regno_pressure_class
6625 = (enum reg_class *) xmalloc (max_regno * sizeof (enum reg_class));
6626 for (i = 0; i < max_regno; i++)
6627 sched_regno_pressure_class[i]
6628 = (i < FIRST_PSEUDO_REGISTER
6629 ? ira_pressure_class_translate[REGNO_REG_CLASS (i)]
6630 : ira_pressure_class_translate[reg_allocno_class (i)]);
6631 curr_reg_live = BITMAP_ALLOC (NULL);
6632 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
6634 saved_reg_live = BITMAP_ALLOC (NULL);
6635 region_ref_regs = BITMAP_ALLOC (NULL);
6639 curr_state = xmalloc (dfa_state_size);
6642 static void haifa_init_only_bb (basic_block, basic_block);
6644 /* Initialize data structures specific to the Haifa scheduler. */
6645 void
6646 haifa_sched_init (void)
6648 setup_sched_dump ();
6649 sched_init ();
6651 scheduled_insns.create (0);
6653 if (spec_info != NULL)
6655 sched_deps_info->use_deps_list = 1;
6656 sched_deps_info->generate_spec_deps = 1;
6659 /* Initialize luids, dependency caches, target and h_i_d for the
6660 whole function. */
6662 bb_vec_t bbs;
6663 bbs.create (n_basic_blocks);
6664 basic_block bb;
6666 sched_init_bbs ();
6668 FOR_EACH_BB (bb)
6669 bbs.quick_push (bb);
6670 sched_init_luids (bbs);
6671 sched_deps_init (true);
6672 sched_extend_target ();
6673 haifa_init_h_i_d (bbs);
6675 bbs.release ();
6678 sched_init_only_bb = haifa_init_only_bb;
6679 sched_split_block = sched_split_block_1;
6680 sched_create_empty_bb = sched_create_empty_bb_1;
6681 haifa_recovery_bb_ever_added_p = false;
6683 nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
6684 before_recovery = 0;
6685 after_recovery = 0;
6687 modulo_ii = 0;
6690 /* Finish work with the data specific to the Haifa scheduler. */
6691 void
6692 haifa_sched_finish (void)
6694 sched_create_empty_bb = NULL;
6695 sched_split_block = NULL;
6696 sched_init_only_bb = NULL;
6698 if (spec_info && spec_info->dump)
6700 char c = reload_completed ? 'a' : 'b';
6702 fprintf (spec_info->dump,
6703 ";; %s:\n", current_function_name ());
6705 fprintf (spec_info->dump,
6706 ";; Procedure %cr-begin-data-spec motions == %d\n",
6707 c, nr_begin_data);
6708 fprintf (spec_info->dump,
6709 ";; Procedure %cr-be-in-data-spec motions == %d\n",
6710 c, nr_be_in_data);
6711 fprintf (spec_info->dump,
6712 ";; Procedure %cr-begin-control-spec motions == %d\n",
6713 c, nr_begin_control);
6714 fprintf (spec_info->dump,
6715 ";; Procedure %cr-be-in-control-spec motions == %d\n",
6716 c, nr_be_in_control);
6719 scheduled_insns.release ();
6721 /* Finalize h_i_d, dependency caches, and luids for the whole
6722 function. Target will be finalized in md_global_finish (). */
6723 sched_deps_finish ();
6724 sched_finish_luids ();
6725 current_sched_info = NULL;
6726 sched_finish ();
6729 /* Free global data used during insn scheduling. This function works with
6730 the common data shared between the schedulers. */
6732 void
6733 sched_finish (void)
6735 haifa_finish_h_i_d ();
6736 if (sched_pressure != SCHED_PRESSURE_NONE)
6738 if (regstat_n_sets_and_refs != NULL)
6739 regstat_free_n_sets_and_refs ();
6740 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
6742 BITMAP_FREE (region_ref_regs);
6743 BITMAP_FREE (saved_reg_live);
6745 BITMAP_FREE (curr_reg_live);
6746 free (sched_regno_pressure_class);
6748 free (curr_state);
6750 if (targetm.sched.finish_global)
6751 targetm.sched.finish_global (sched_dump, sched_verbose);
6753 end_alias_analysis ();
6755 regstat_free_calls_crossed ();
6757 dfa_finish ();
6760 /* Free all delay_pair structures that were recorded. */
6761 void
6762 free_delay_pairs (void)
6764 if (delay_htab)
6766 htab_empty (delay_htab);
6767 htab_empty (delay_htab_i2);
6771 /* Fix INSN_TICKs of the instructions in the current block as well as
6772 INSN_TICKs of their dependents.
6773 HEAD and TAIL are the begin and the end of the current scheduled block. */
6774 static void
6775 fix_inter_tick (rtx head, rtx tail)
6777 /* Set of instructions with corrected INSN_TICK. */
6778 bitmap_head processed;
6779 /* ??? It is doubtful if we should assume that cycle advance happens on
6780 basic block boundaries. Basically insns that are unconditionally ready
6781 on the start of the block are more preferable then those which have
6782 a one cycle dependency over insn from the previous block. */
6783 int next_clock = clock_var + 1;
6785 bitmap_initialize (&processed, 0);
6787 /* Iterates over scheduled instructions and fix their INSN_TICKs and
6788 INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
6789 across different blocks. */
6790 for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
6792 if (INSN_P (head))
6794 int tick;
6795 sd_iterator_def sd_it;
6796 dep_t dep;
6798 tick = INSN_TICK (head);
6799 gcc_assert (tick >= MIN_TICK);
6801 /* Fix INSN_TICK of instruction from just scheduled block. */
6802 if (bitmap_set_bit (&processed, INSN_LUID (head)))
6804 tick -= next_clock;
6806 if (tick < MIN_TICK)
6807 tick = MIN_TICK;
6809 INSN_TICK (head) = tick;
6812 if (DEBUG_INSN_P (head))
6813 continue;
6815 FOR_EACH_DEP (head, SD_LIST_RES_FORW, sd_it, dep)
6817 rtx next;
6819 next = DEP_CON (dep);
6820 tick = INSN_TICK (next);
6822 if (tick != INVALID_TICK
6823 /* If NEXT has its INSN_TICK calculated, fix it.
6824 If not - it will be properly calculated from
6825 scratch later in fix_tick_ready. */
6826 && bitmap_set_bit (&processed, INSN_LUID (next)))
6828 tick -= next_clock;
6830 if (tick < MIN_TICK)
6831 tick = MIN_TICK;
6833 if (tick > INTER_TICK (next))
6834 INTER_TICK (next) = tick;
6835 else
6836 tick = INTER_TICK (next);
6838 INSN_TICK (next) = tick;
6843 bitmap_clear (&processed);
6846 /* Check if NEXT is ready to be added to the ready or queue list.
6847 If "yes", add it to the proper list.
6848 Returns:
6849 -1 - is not ready yet,
6850 0 - added to the ready list,
6851 0 < N - queued for N cycles. */
6853 try_ready (rtx next)
6855 ds_t old_ts, new_ts;
6857 old_ts = TODO_SPEC (next);
6859 gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP | DEP_CONTROL | DEP_POSTPONED))
6860 && (old_ts == HARD_DEP
6861 || old_ts == DEP_POSTPONED
6862 || (old_ts & SPECULATIVE)
6863 || old_ts == DEP_CONTROL));
6865 new_ts = recompute_todo_spec (next, false);
6867 if (new_ts & (HARD_DEP | DEP_POSTPONED))
6868 gcc_assert (new_ts == old_ts
6869 && QUEUE_INDEX (next) == QUEUE_NOWHERE);
6870 else if (current_sched_info->new_ready)
6871 new_ts = current_sched_info->new_ready (next, new_ts);
6873 /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
6874 have its original pattern or changed (speculative) one. This is due
6875 to changing ebb in region scheduling.
6876 * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
6877 has speculative pattern.
6879 We can't assert (!(new_ts & HARD_DEP) || new_ts == old_ts) here because
6880 control-speculative NEXT could have been discarded by sched-rgn.c
6881 (the same case as when discarded by can_schedule_ready_p ()). */
6883 if ((new_ts & SPECULATIVE)
6884 /* If (old_ts == new_ts), then (old_ts & SPECULATIVE) and we don't
6885 need to change anything. */
6886 && new_ts != old_ts)
6888 int res;
6889 rtx new_pat;
6891 gcc_assert ((new_ts & SPECULATIVE) && !(new_ts & ~SPECULATIVE));
6893 res = haifa_speculate_insn (next, new_ts, &new_pat);
6895 switch (res)
6897 case -1:
6898 /* It would be nice to change DEP_STATUS of all dependences,
6899 which have ((DEP_STATUS & SPECULATIVE) == new_ts) to HARD_DEP,
6900 so we won't reanalyze anything. */
6901 new_ts = HARD_DEP;
6902 break;
6904 case 0:
6905 /* We follow the rule, that every speculative insn
6906 has non-null ORIG_PAT. */
6907 if (!ORIG_PAT (next))
6908 ORIG_PAT (next) = PATTERN (next);
6909 break;
6911 case 1:
6912 if (!ORIG_PAT (next))
6913 /* If we gonna to overwrite the original pattern of insn,
6914 save it. */
6915 ORIG_PAT (next) = PATTERN (next);
6917 res = haifa_change_pattern (next, new_pat);
6918 gcc_assert (res);
6919 break;
6921 default:
6922 gcc_unreachable ();
6926 /* We need to restore pattern only if (new_ts == 0), because otherwise it is
6927 either correct (new_ts & SPECULATIVE),
6928 or we simply don't care (new_ts & HARD_DEP). */
6930 gcc_assert (!ORIG_PAT (next)
6931 || !IS_SPECULATION_BRANCHY_CHECK_P (next));
6933 TODO_SPEC (next) = new_ts;
6935 if (new_ts & (HARD_DEP | DEP_POSTPONED))
6937 /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
6938 control-speculative NEXT could have been discarded by sched-rgn.c
6939 (the same case as when discarded by can_schedule_ready_p ()). */
6940 /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
6942 change_queue_index (next, QUEUE_NOWHERE);
6944 return -1;
6946 else if (!(new_ts & BEGIN_SPEC)
6947 && ORIG_PAT (next) && PREDICATED_PAT (next) == NULL_RTX
6948 && !IS_SPECULATION_CHECK_P (next))
6949 /* We should change pattern of every previously speculative
6950 instruction - and we determine if NEXT was speculative by using
6951 ORIG_PAT field. Except one case - speculation checks have ORIG_PAT
6952 pat too, so skip them. */
6954 bool success = haifa_change_pattern (next, ORIG_PAT (next));
6955 gcc_assert (success);
6956 ORIG_PAT (next) = 0;
6959 if (sched_verbose >= 2)
6961 fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
6962 (*current_sched_info->print_insn) (next, 0));
6964 if (spec_info && spec_info->dump)
6966 if (new_ts & BEGIN_DATA)
6967 fprintf (spec_info->dump, "; data-spec;");
6968 if (new_ts & BEGIN_CONTROL)
6969 fprintf (spec_info->dump, "; control-spec;");
6970 if (new_ts & BE_IN_CONTROL)
6971 fprintf (spec_info->dump, "; in-control-spec;");
6973 if (TODO_SPEC (next) & DEP_CONTROL)
6974 fprintf (sched_dump, " predicated");
6975 fprintf (sched_dump, "\n");
6978 adjust_priority (next);
6980 return fix_tick_ready (next);
6983 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
6984 static int
6985 fix_tick_ready (rtx next)
6987 int tick, delay;
6989 if (!DEBUG_INSN_P (next) && !sd_lists_empty_p (next, SD_LIST_RES_BACK))
6991 int full_p;
6992 sd_iterator_def sd_it;
6993 dep_t dep;
6995 tick = INSN_TICK (next);
6996 /* if tick is not equal to INVALID_TICK, then update
6997 INSN_TICK of NEXT with the most recent resolved dependence
6998 cost. Otherwise, recalculate from scratch. */
6999 full_p = (tick == INVALID_TICK);
7001 FOR_EACH_DEP (next, SD_LIST_RES_BACK, sd_it, dep)
7003 rtx pro = DEP_PRO (dep);
7004 int tick1;
7006 gcc_assert (INSN_TICK (pro) >= MIN_TICK);
7008 tick1 = INSN_TICK (pro) + dep_cost (dep);
7009 if (tick1 > tick)
7010 tick = tick1;
7012 if (!full_p)
7013 break;
7016 else
7017 tick = -1;
7019 INSN_TICK (next) = tick;
7021 delay = tick - clock_var;
7022 if (delay <= 0 || sched_pressure != SCHED_PRESSURE_NONE)
7023 delay = QUEUE_READY;
7025 change_queue_index (next, delay);
7027 return delay;
7030 /* Move NEXT to the proper queue list with (DELAY >= 1),
7031 or add it to the ready list (DELAY == QUEUE_READY),
7032 or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
7033 static void
7034 change_queue_index (rtx next, int delay)
7036 int i = QUEUE_INDEX (next);
7038 gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
7039 && delay != 0);
7040 gcc_assert (i != QUEUE_SCHEDULED);
7042 if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
7043 || (delay < 0 && delay == i))
7044 /* We have nothing to do. */
7045 return;
7047 /* Remove NEXT from wherever it is now. */
7048 if (i == QUEUE_READY)
7049 ready_remove_insn (next);
7050 else if (i >= 0)
7051 queue_remove (next);
7053 /* Add it to the proper place. */
7054 if (delay == QUEUE_READY)
7055 ready_add (readyp, next, false);
7056 else if (delay >= 1)
7057 queue_insn (next, delay, "change queue index");
7059 if (sched_verbose >= 2)
7061 fprintf (sched_dump, ";;\t\ttick updated: insn %s",
7062 (*current_sched_info->print_insn) (next, 0));
7064 if (delay == QUEUE_READY)
7065 fprintf (sched_dump, " into ready\n");
7066 else if (delay >= 1)
7067 fprintf (sched_dump, " into queue with cost=%d\n", delay);
7068 else
7069 fprintf (sched_dump, " removed from ready or queue lists\n");
7073 static int sched_ready_n_insns = -1;
7075 /* Initialize per region data structures. */
7076 void
7077 sched_extend_ready_list (int new_sched_ready_n_insns)
7079 int i;
7081 if (sched_ready_n_insns == -1)
7082 /* At the first call we need to initialize one more choice_stack
7083 entry. */
7085 i = 0;
7086 sched_ready_n_insns = 0;
7087 scheduled_insns.reserve (new_sched_ready_n_insns);
7089 else
7090 i = sched_ready_n_insns + 1;
7092 ready.veclen = new_sched_ready_n_insns + issue_rate;
7093 ready.vec = XRESIZEVEC (rtx, ready.vec, ready.veclen);
7095 gcc_assert (new_sched_ready_n_insns >= sched_ready_n_insns);
7097 ready_try = (char *) xrecalloc (ready_try, new_sched_ready_n_insns,
7098 sched_ready_n_insns, sizeof (*ready_try));
7100 /* We allocate +1 element to save initial state in the choice_stack[0]
7101 entry. */
7102 choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
7103 new_sched_ready_n_insns + 1);
7105 for (; i <= new_sched_ready_n_insns; i++)
7107 choice_stack[i].state = xmalloc (dfa_state_size);
7109 if (targetm.sched.first_cycle_multipass_init)
7110 targetm.sched.first_cycle_multipass_init (&(choice_stack[i]
7111 .target_data));
7114 sched_ready_n_insns = new_sched_ready_n_insns;
7117 /* Free per region data structures. */
7118 void
7119 sched_finish_ready_list (void)
7121 int i;
7123 free (ready.vec);
7124 ready.vec = NULL;
7125 ready.veclen = 0;
7127 free (ready_try);
7128 ready_try = NULL;
7130 for (i = 0; i <= sched_ready_n_insns; i++)
7132 if (targetm.sched.first_cycle_multipass_fini)
7133 targetm.sched.first_cycle_multipass_fini (&(choice_stack[i]
7134 .target_data));
7136 free (choice_stack [i].state);
7138 free (choice_stack);
7139 choice_stack = NULL;
7141 sched_ready_n_insns = -1;
7144 static int
7145 haifa_luid_for_non_insn (rtx x)
7147 gcc_assert (NOTE_P (x) || LABEL_P (x));
7149 return 0;
7152 /* Generates recovery code for INSN. */
7153 static void
7154 generate_recovery_code (rtx insn)
7156 if (TODO_SPEC (insn) & BEGIN_SPEC)
7157 begin_speculative_block (insn);
7159 /* Here we have insn with no dependencies to
7160 instructions other then CHECK_SPEC ones. */
7162 if (TODO_SPEC (insn) & BE_IN_SPEC)
7163 add_to_speculative_block (insn);
7166 /* Helper function.
7167 Tries to add speculative dependencies of type FS between instructions
7168 in deps_list L and TWIN. */
7169 static void
7170 process_insn_forw_deps_be_in_spec (rtx insn, rtx twin, ds_t fs)
7172 sd_iterator_def sd_it;
7173 dep_t dep;
7175 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
7177 ds_t ds;
7178 rtx consumer;
7180 consumer = DEP_CON (dep);
7182 ds = DEP_STATUS (dep);
7184 if (/* If we want to create speculative dep. */
7186 /* And we can do that because this is a true dep. */
7187 && (ds & DEP_TYPES) == DEP_TRUE)
7189 gcc_assert (!(ds & BE_IN_SPEC));
7191 if (/* If this dep can be overcome with 'begin speculation'. */
7192 ds & BEGIN_SPEC)
7193 /* Then we have a choice: keep the dep 'begin speculative'
7194 or transform it into 'be in speculative'. */
7196 if (/* In try_ready we assert that if insn once became ready
7197 it can be removed from the ready (or queue) list only
7198 due to backend decision. Hence we can't let the
7199 probability of the speculative dep to decrease. */
7200 ds_weak (ds) <= ds_weak (fs))
7202 ds_t new_ds;
7204 new_ds = (ds & ~BEGIN_SPEC) | fs;
7206 if (/* consumer can 'be in speculative'. */
7207 sched_insn_is_legitimate_for_speculation_p (consumer,
7208 new_ds))
7209 /* Transform it to be in speculative. */
7210 ds = new_ds;
7213 else
7214 /* Mark the dep as 'be in speculative'. */
7215 ds |= fs;
7219 dep_def _new_dep, *new_dep = &_new_dep;
7221 init_dep_1 (new_dep, twin, consumer, DEP_TYPE (dep), ds);
7222 sd_add_dep (new_dep, false);
7227 /* Generates recovery code for BEGIN speculative INSN. */
7228 static void
7229 begin_speculative_block (rtx insn)
7231 if (TODO_SPEC (insn) & BEGIN_DATA)
7232 nr_begin_data++;
7233 if (TODO_SPEC (insn) & BEGIN_CONTROL)
7234 nr_begin_control++;
7236 create_check_block_twin (insn, false);
7238 TODO_SPEC (insn) &= ~BEGIN_SPEC;
7241 static void haifa_init_insn (rtx);
7243 /* Generates recovery code for BE_IN speculative INSN. */
7244 static void
7245 add_to_speculative_block (rtx insn)
7247 ds_t ts;
7248 sd_iterator_def sd_it;
7249 dep_t dep;
7250 rtx twins = NULL;
7251 rtx_vec_t priorities_roots;
7253 ts = TODO_SPEC (insn);
7254 gcc_assert (!(ts & ~BE_IN_SPEC));
7256 if (ts & BE_IN_DATA)
7257 nr_be_in_data++;
7258 if (ts & BE_IN_CONTROL)
7259 nr_be_in_control++;
7261 TODO_SPEC (insn) &= ~BE_IN_SPEC;
7262 gcc_assert (!TODO_SPEC (insn));
7264 DONE_SPEC (insn) |= ts;
7266 /* First we convert all simple checks to branchy. */
7267 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
7268 sd_iterator_cond (&sd_it, &dep);)
7270 rtx check = DEP_PRO (dep);
7272 if (IS_SPECULATION_SIMPLE_CHECK_P (check))
7274 create_check_block_twin (check, true);
7276 /* Restart search. */
7277 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
7279 else
7280 /* Continue search. */
7281 sd_iterator_next (&sd_it);
7284 priorities_roots.create (0);
7285 clear_priorities (insn, &priorities_roots);
7287 while (1)
7289 rtx check, twin;
7290 basic_block rec;
7292 /* Get the first backward dependency of INSN. */
7293 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
7294 if (!sd_iterator_cond (&sd_it, &dep))
7295 /* INSN has no backward dependencies left. */
7296 break;
7298 gcc_assert ((DEP_STATUS (dep) & BEGIN_SPEC) == 0
7299 && (DEP_STATUS (dep) & BE_IN_SPEC) != 0
7300 && (DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
7302 check = DEP_PRO (dep);
7304 gcc_assert (!IS_SPECULATION_CHECK_P (check) && !ORIG_PAT (check)
7305 && QUEUE_INDEX (check) == QUEUE_NOWHERE);
7307 rec = BLOCK_FOR_INSN (check);
7309 twin = emit_insn_before (copy_insn (PATTERN (insn)), BB_END (rec));
7310 haifa_init_insn (twin);
7312 sd_copy_back_deps (twin, insn, true);
7314 if (sched_verbose && spec_info->dump)
7315 /* INSN_BB (insn) isn't determined for twin insns yet.
7316 So we can't use current_sched_info->print_insn. */
7317 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
7318 INSN_UID (twin), rec->index);
7320 twins = alloc_INSN_LIST (twin, twins);
7322 /* Add dependences between TWIN and all appropriate
7323 instructions from REC. */
7324 FOR_EACH_DEP (insn, SD_LIST_SPEC_BACK, sd_it, dep)
7326 rtx pro = DEP_PRO (dep);
7328 gcc_assert (DEP_TYPE (dep) == REG_DEP_TRUE);
7330 /* INSN might have dependencies from the instructions from
7331 several recovery blocks. At this iteration we process those
7332 producers that reside in REC. */
7333 if (BLOCK_FOR_INSN (pro) == rec)
7335 dep_def _new_dep, *new_dep = &_new_dep;
7337 init_dep (new_dep, pro, twin, REG_DEP_TRUE);
7338 sd_add_dep (new_dep, false);
7342 process_insn_forw_deps_be_in_spec (insn, twin, ts);
7344 /* Remove all dependencies between INSN and insns in REC. */
7345 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
7346 sd_iterator_cond (&sd_it, &dep);)
7348 rtx pro = DEP_PRO (dep);
7350 if (BLOCK_FOR_INSN (pro) == rec)
7351 sd_delete_dep (sd_it);
7352 else
7353 sd_iterator_next (&sd_it);
7357 /* We couldn't have added the dependencies between INSN and TWINS earlier
7358 because that would make TWINS appear in the INSN_BACK_DEPS (INSN). */
7359 while (twins)
7361 rtx twin;
7363 twin = XEXP (twins, 0);
7366 dep_def _new_dep, *new_dep = &_new_dep;
7368 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
7369 sd_add_dep (new_dep, false);
7372 twin = XEXP (twins, 1);
7373 free_INSN_LIST_node (twins);
7374 twins = twin;
7377 calc_priorities (priorities_roots);
7378 priorities_roots.release ();
7381 /* Extends and fills with zeros (only the new part) array pointed to by P. */
7382 void *
7383 xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
7385 gcc_assert (new_nmemb >= old_nmemb);
7386 p = XRESIZEVAR (void, p, new_nmemb * size);
7387 memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
7388 return p;
7391 /* Helper function.
7392 Find fallthru edge from PRED. */
7393 edge
7394 find_fallthru_edge_from (basic_block pred)
7396 edge e;
7397 basic_block succ;
7399 succ = pred->next_bb;
7400 gcc_assert (succ->prev_bb == pred);
7402 if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
7404 e = find_fallthru_edge (pred->succs);
7406 if (e)
7408 gcc_assert (e->dest == succ);
7409 return e;
7412 else
7414 e = find_fallthru_edge (succ->preds);
7416 if (e)
7418 gcc_assert (e->src == pred);
7419 return e;
7423 return NULL;
7426 /* Extend per basic block data structures. */
7427 static void
7428 sched_extend_bb (void)
7430 rtx insn;
7432 /* The following is done to keep current_sched_info->next_tail non null. */
7433 insn = BB_END (EXIT_BLOCK_PTR->prev_bb);
7434 if (NEXT_INSN (insn) == 0
7435 || (!NOTE_P (insn)
7436 && !LABEL_P (insn)
7437 /* Don't emit a NOTE if it would end up before a BARRIER. */
7438 && !BARRIER_P (NEXT_INSN (insn))))
7440 rtx note = emit_note_after (NOTE_INSN_DELETED, insn);
7441 /* Make insn appear outside BB. */
7442 set_block_for_insn (note, NULL);
7443 BB_END (EXIT_BLOCK_PTR->prev_bb) = insn;
7447 /* Init per basic block data structures. */
7448 void
7449 sched_init_bbs (void)
7451 sched_extend_bb ();
7454 /* Initialize BEFORE_RECOVERY variable. */
7455 static void
7456 init_before_recovery (basic_block *before_recovery_ptr)
7458 basic_block last;
7459 edge e;
7461 last = EXIT_BLOCK_PTR->prev_bb;
7462 e = find_fallthru_edge_from (last);
7464 if (e)
7466 /* We create two basic blocks:
7467 1. Single instruction block is inserted right after E->SRC
7468 and has jump to
7469 2. Empty block right before EXIT_BLOCK.
7470 Between these two blocks recovery blocks will be emitted. */
7472 basic_block single, empty;
7473 rtx x, label;
7475 /* If the fallthrough edge to exit we've found is from the block we've
7476 created before, don't do anything more. */
7477 if (last == after_recovery)
7478 return;
7480 adding_bb_to_current_region_p = false;
7482 single = sched_create_empty_bb (last);
7483 empty = sched_create_empty_bb (single);
7485 /* Add new blocks to the root loop. */
7486 if (current_loops != NULL)
7488 add_bb_to_loop (single, (*current_loops->larray)[0]);
7489 add_bb_to_loop (empty, (*current_loops->larray)[0]);
7492 single->count = last->count;
7493 empty->count = last->count;
7494 single->frequency = last->frequency;
7495 empty->frequency = last->frequency;
7496 BB_COPY_PARTITION (single, last);
7497 BB_COPY_PARTITION (empty, last);
7499 redirect_edge_succ (e, single);
7500 make_single_succ_edge (single, empty, 0);
7501 make_single_succ_edge (empty, EXIT_BLOCK_PTR, EDGE_FALLTHRU);
7503 label = block_label (empty);
7504 x = emit_jump_insn_after (gen_jump (label), BB_END (single));
7505 JUMP_LABEL (x) = label;
7506 LABEL_NUSES (label)++;
7507 haifa_init_insn (x);
7509 emit_barrier_after (x);
7511 sched_init_only_bb (empty, NULL);
7512 sched_init_only_bb (single, NULL);
7513 sched_extend_bb ();
7515 adding_bb_to_current_region_p = true;
7516 before_recovery = single;
7517 after_recovery = empty;
7519 if (before_recovery_ptr)
7520 *before_recovery_ptr = before_recovery;
7522 if (sched_verbose >= 2 && spec_info->dump)
7523 fprintf (spec_info->dump,
7524 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
7525 last->index, single->index, empty->index);
7527 else
7528 before_recovery = last;
7531 /* Returns new recovery block. */
7532 basic_block
7533 sched_create_recovery_block (basic_block *before_recovery_ptr)
7535 rtx label;
7536 rtx barrier;
7537 basic_block rec;
7539 haifa_recovery_bb_recently_added_p = true;
7540 haifa_recovery_bb_ever_added_p = true;
7542 init_before_recovery (before_recovery_ptr);
7544 barrier = get_last_bb_insn (before_recovery);
7545 gcc_assert (BARRIER_P (barrier));
7547 label = emit_label_after (gen_label_rtx (), barrier);
7549 rec = create_basic_block (label, label, before_recovery);
7551 /* A recovery block always ends with an unconditional jump. */
7552 emit_barrier_after (BB_END (rec));
7554 if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
7555 BB_SET_PARTITION (rec, BB_COLD_PARTITION);
7557 if (sched_verbose && spec_info->dump)
7558 fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
7559 rec->index);
7561 return rec;
7564 /* Create edges: FIRST_BB -> REC; FIRST_BB -> SECOND_BB; REC -> SECOND_BB
7565 and emit necessary jumps. */
7566 void
7567 sched_create_recovery_edges (basic_block first_bb, basic_block rec,
7568 basic_block second_bb)
7570 rtx label;
7571 rtx jump;
7572 int edge_flags;
7574 /* This is fixing of incoming edge. */
7575 /* ??? Which other flags should be specified? */
7576 if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
7577 /* Partition type is the same, if it is "unpartitioned". */
7578 edge_flags = EDGE_CROSSING;
7579 else
7580 edge_flags = 0;
7582 make_edge (first_bb, rec, edge_flags);
7583 label = block_label (second_bb);
7584 jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
7585 JUMP_LABEL (jump) = label;
7586 LABEL_NUSES (label)++;
7588 if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
7589 /* Partition type is the same, if it is "unpartitioned". */
7591 /* Rewritten from cfgrtl.c. */
7592 if (flag_reorder_blocks_and_partition
7593 && targetm_common.have_named_sections)
7595 /* We don't need the same note for the check because
7596 any_condjump_p (check) == true. */
7597 add_reg_note (jump, REG_CROSSING_JUMP, NULL_RTX);
7599 edge_flags = EDGE_CROSSING;
7601 else
7602 edge_flags = 0;
7604 make_single_succ_edge (rec, second_bb, edge_flags);
7605 if (dom_info_available_p (CDI_DOMINATORS))
7606 set_immediate_dominator (CDI_DOMINATORS, rec, first_bb);
7609 /* This function creates recovery code for INSN. If MUTATE_P is nonzero,
7610 INSN is a simple check, that should be converted to branchy one. */
7611 static void
7612 create_check_block_twin (rtx insn, bool mutate_p)
7614 basic_block rec;
7615 rtx label, check, twin;
7616 ds_t fs;
7617 sd_iterator_def sd_it;
7618 dep_t dep;
7619 dep_def _new_dep, *new_dep = &_new_dep;
7620 ds_t todo_spec;
7622 gcc_assert (ORIG_PAT (insn) != NULL_RTX);
7624 if (!mutate_p)
7625 todo_spec = TODO_SPEC (insn);
7626 else
7628 gcc_assert (IS_SPECULATION_SIMPLE_CHECK_P (insn)
7629 && (TODO_SPEC (insn) & SPECULATIVE) == 0);
7631 todo_spec = CHECK_SPEC (insn);
7634 todo_spec &= SPECULATIVE;
7636 /* Create recovery block. */
7637 if (mutate_p || targetm.sched.needs_block_p (todo_spec))
7639 rec = sched_create_recovery_block (NULL);
7640 label = BB_HEAD (rec);
7642 else
7644 rec = EXIT_BLOCK_PTR;
7645 label = NULL_RTX;
7648 /* Emit CHECK. */
7649 check = targetm.sched.gen_spec_check (insn, label, todo_spec);
7651 if (rec != EXIT_BLOCK_PTR)
7653 /* To have mem_reg alive at the beginning of second_bb,
7654 we emit check BEFORE insn, so insn after splitting
7655 insn will be at the beginning of second_bb, which will
7656 provide us with the correct life information. */
7657 check = emit_jump_insn_before (check, insn);
7658 JUMP_LABEL (check) = label;
7659 LABEL_NUSES (label)++;
7661 else
7662 check = emit_insn_before (check, insn);
7664 /* Extend data structures. */
7665 haifa_init_insn (check);
7667 /* CHECK is being added to current region. Extend ready list. */
7668 gcc_assert (sched_ready_n_insns != -1);
7669 sched_extend_ready_list (sched_ready_n_insns + 1);
7671 if (current_sched_info->add_remove_insn)
7672 current_sched_info->add_remove_insn (insn, 0);
7674 RECOVERY_BLOCK (check) = rec;
7676 if (sched_verbose && spec_info->dump)
7677 fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
7678 (*current_sched_info->print_insn) (check, 0));
7680 gcc_assert (ORIG_PAT (insn));
7682 /* Initialize TWIN (twin is a duplicate of original instruction
7683 in the recovery block). */
7684 if (rec != EXIT_BLOCK_PTR)
7686 sd_iterator_def sd_it;
7687 dep_t dep;
7689 FOR_EACH_DEP (insn, SD_LIST_RES_BACK, sd_it, dep)
7690 if ((DEP_STATUS (dep) & DEP_OUTPUT) != 0)
7692 struct _dep _dep2, *dep2 = &_dep2;
7694 init_dep (dep2, DEP_PRO (dep), check, REG_DEP_TRUE);
7696 sd_add_dep (dep2, true);
7699 twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
7700 haifa_init_insn (twin);
7702 if (sched_verbose && spec_info->dump)
7703 /* INSN_BB (insn) isn't determined for twin insns yet.
7704 So we can't use current_sched_info->print_insn. */
7705 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
7706 INSN_UID (twin), rec->index);
7708 else
7710 ORIG_PAT (check) = ORIG_PAT (insn);
7711 HAS_INTERNAL_DEP (check) = 1;
7712 twin = check;
7713 /* ??? We probably should change all OUTPUT dependencies to
7714 (TRUE | OUTPUT). */
7717 /* Copy all resolved back dependencies of INSN to TWIN. This will
7718 provide correct value for INSN_TICK (TWIN). */
7719 sd_copy_back_deps (twin, insn, true);
7721 if (rec != EXIT_BLOCK_PTR)
7722 /* In case of branchy check, fix CFG. */
7724 basic_block first_bb, second_bb;
7725 rtx jump;
7727 first_bb = BLOCK_FOR_INSN (check);
7728 second_bb = sched_split_block (first_bb, check);
7730 sched_create_recovery_edges (first_bb, rec, second_bb);
7732 sched_init_only_bb (second_bb, first_bb);
7733 sched_init_only_bb (rec, EXIT_BLOCK_PTR);
7735 jump = BB_END (rec);
7736 haifa_init_insn (jump);
7739 /* Move backward dependences from INSN to CHECK and
7740 move forward dependences from INSN to TWIN. */
7742 /* First, create dependencies between INSN's producers and CHECK & TWIN. */
7743 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
7745 rtx pro = DEP_PRO (dep);
7746 ds_t ds;
7748 /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
7749 check --TRUE--> producer ??? or ANTI ???
7750 twin --TRUE--> producer
7751 twin --ANTI--> check
7753 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
7754 check --ANTI--> producer
7755 twin --ANTI--> producer
7756 twin --ANTI--> check
7758 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
7759 check ~~TRUE~~> producer
7760 twin ~~TRUE~~> producer
7761 twin --ANTI--> check */
7763 ds = DEP_STATUS (dep);
7765 if (ds & BEGIN_SPEC)
7767 gcc_assert (!mutate_p);
7768 ds &= ~BEGIN_SPEC;
7771 init_dep_1 (new_dep, pro, check, DEP_TYPE (dep), ds);
7772 sd_add_dep (new_dep, false);
7774 if (rec != EXIT_BLOCK_PTR)
7776 DEP_CON (new_dep) = twin;
7777 sd_add_dep (new_dep, false);
7781 /* Second, remove backward dependencies of INSN. */
7782 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
7783 sd_iterator_cond (&sd_it, &dep);)
7785 if ((DEP_STATUS (dep) & BEGIN_SPEC)
7786 || mutate_p)
7787 /* We can delete this dep because we overcome it with
7788 BEGIN_SPECULATION. */
7789 sd_delete_dep (sd_it);
7790 else
7791 sd_iterator_next (&sd_it);
7794 /* Future Speculations. Determine what BE_IN speculations will be like. */
7795 fs = 0;
7797 /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
7798 here. */
7800 gcc_assert (!DONE_SPEC (insn));
7802 if (!mutate_p)
7804 ds_t ts = TODO_SPEC (insn);
7806 DONE_SPEC (insn) = ts & BEGIN_SPEC;
7807 CHECK_SPEC (check) = ts & BEGIN_SPEC;
7809 /* Luckiness of future speculations solely depends upon initial
7810 BEGIN speculation. */
7811 if (ts & BEGIN_DATA)
7812 fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
7813 if (ts & BEGIN_CONTROL)
7814 fs = set_dep_weak (fs, BE_IN_CONTROL,
7815 get_dep_weak (ts, BEGIN_CONTROL));
7817 else
7818 CHECK_SPEC (check) = CHECK_SPEC (insn);
7820 /* Future speculations: call the helper. */
7821 process_insn_forw_deps_be_in_spec (insn, twin, fs);
7823 if (rec != EXIT_BLOCK_PTR)
7825 /* Which types of dependencies should we use here is,
7826 generally, machine-dependent question... But, for now,
7827 it is not. */
7829 if (!mutate_p)
7831 init_dep (new_dep, insn, check, REG_DEP_TRUE);
7832 sd_add_dep (new_dep, false);
7834 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
7835 sd_add_dep (new_dep, false);
7837 else
7839 if (spec_info->dump)
7840 fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
7841 (*current_sched_info->print_insn) (insn, 0));
7843 /* Remove all dependencies of the INSN. */
7845 sd_it = sd_iterator_start (insn, (SD_LIST_FORW
7846 | SD_LIST_BACK
7847 | SD_LIST_RES_BACK));
7848 while (sd_iterator_cond (&sd_it, &dep))
7849 sd_delete_dep (sd_it);
7852 /* If former check (INSN) already was moved to the ready (or queue)
7853 list, add new check (CHECK) there too. */
7854 if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
7855 try_ready (check);
7857 /* Remove old check from instruction stream and free its
7858 data. */
7859 sched_remove_insn (insn);
7862 init_dep (new_dep, check, twin, REG_DEP_ANTI);
7863 sd_add_dep (new_dep, false);
7865 else
7867 init_dep_1 (new_dep, insn, check, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
7868 sd_add_dep (new_dep, false);
7871 if (!mutate_p)
7872 /* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
7873 because it'll be done later in add_to_speculative_block. */
7875 rtx_vec_t priorities_roots = rtx_vec_t();
7877 clear_priorities (twin, &priorities_roots);
7878 calc_priorities (priorities_roots);
7879 priorities_roots.release ();
7883 /* Removes dependency between instructions in the recovery block REC
7884 and usual region instructions. It keeps inner dependences so it
7885 won't be necessary to recompute them. */
7886 static void
7887 fix_recovery_deps (basic_block rec)
7889 rtx note, insn, jump, ready_list = 0;
7890 bitmap_head in_ready;
7891 rtx link;
7893 bitmap_initialize (&in_ready, 0);
7895 /* NOTE - a basic block note. */
7896 note = NEXT_INSN (BB_HEAD (rec));
7897 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
7898 insn = BB_END (rec);
7899 gcc_assert (JUMP_P (insn));
7900 insn = PREV_INSN (insn);
7904 sd_iterator_def sd_it;
7905 dep_t dep;
7907 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
7908 sd_iterator_cond (&sd_it, &dep);)
7910 rtx consumer = DEP_CON (dep);
7912 if (BLOCK_FOR_INSN (consumer) != rec)
7914 sd_delete_dep (sd_it);
7916 if (bitmap_set_bit (&in_ready, INSN_LUID (consumer)))
7917 ready_list = alloc_INSN_LIST (consumer, ready_list);
7919 else
7921 gcc_assert ((DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
7923 sd_iterator_next (&sd_it);
7927 insn = PREV_INSN (insn);
7929 while (insn != note);
7931 bitmap_clear (&in_ready);
7933 /* Try to add instructions to the ready or queue list. */
7934 for (link = ready_list; link; link = XEXP (link, 1))
7935 try_ready (XEXP (link, 0));
7936 free_INSN_LIST_list (&ready_list);
7938 /* Fixing jump's dependences. */
7939 insn = BB_HEAD (rec);
7940 jump = BB_END (rec);
7942 gcc_assert (LABEL_P (insn));
7943 insn = NEXT_INSN (insn);
7945 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
7946 add_jump_dependencies (insn, jump);
7949 /* Change pattern of INSN to NEW_PAT. Invalidate cached haifa
7950 instruction data. */
7951 static bool
7952 haifa_change_pattern (rtx insn, rtx new_pat)
7954 int t;
7956 t = validate_change (insn, &PATTERN (insn), new_pat, 0);
7957 if (!t)
7958 return false;
7960 update_insn_after_change (insn);
7961 return true;
7964 /* -1 - can't speculate,
7965 0 - for speculation with REQUEST mode it is OK to use
7966 current instruction pattern,
7967 1 - need to change pattern for *NEW_PAT to be speculative. */
7969 sched_speculate_insn (rtx insn, ds_t request, rtx *new_pat)
7971 gcc_assert (current_sched_info->flags & DO_SPECULATION
7972 && (request & SPECULATIVE)
7973 && sched_insn_is_legitimate_for_speculation_p (insn, request));
7975 if ((request & spec_info->mask) != request)
7976 return -1;
7978 if (request & BE_IN_SPEC
7979 && !(request & BEGIN_SPEC))
7980 return 0;
7982 return targetm.sched.speculate_insn (insn, request, new_pat);
7985 static int
7986 haifa_speculate_insn (rtx insn, ds_t request, rtx *new_pat)
7988 gcc_assert (sched_deps_info->generate_spec_deps
7989 && !IS_SPECULATION_CHECK_P (insn));
7991 if (HAS_INTERNAL_DEP (insn)
7992 || SCHED_GROUP_P (insn))
7993 return -1;
7995 return sched_speculate_insn (insn, request, new_pat);
7998 /* Print some information about block BB, which starts with HEAD and
7999 ends with TAIL, before scheduling it.
8000 I is zero, if scheduler is about to start with the fresh ebb. */
8001 static void
8002 dump_new_block_header (int i, basic_block bb, rtx head, rtx tail)
8004 if (!i)
8005 fprintf (sched_dump,
8006 ";; ======================================================\n");
8007 else
8008 fprintf (sched_dump,
8009 ";; =====================ADVANCING TO=====================\n");
8010 fprintf (sched_dump,
8011 ";; -- basic block %d from %d to %d -- %s reload\n",
8012 bb->index, INSN_UID (head), INSN_UID (tail),
8013 (reload_completed ? "after" : "before"));
8014 fprintf (sched_dump,
8015 ";; ======================================================\n");
8016 fprintf (sched_dump, "\n");
8019 /* Unlink basic block notes and labels and saves them, so they
8020 can be easily restored. We unlink basic block notes in EBB to
8021 provide back-compatibility with the previous code, as target backends
8022 assume, that there'll be only instructions between
8023 current_sched_info->{head and tail}. We restore these notes as soon
8024 as we can.
8025 FIRST (LAST) is the first (last) basic block in the ebb.
8026 NB: In usual case (FIRST == LAST) nothing is really done. */
8027 void
8028 unlink_bb_notes (basic_block first, basic_block last)
8030 /* We DON'T unlink basic block notes of the first block in the ebb. */
8031 if (first == last)
8032 return;
8034 bb_header = XNEWVEC (rtx, last_basic_block);
8036 /* Make a sentinel. */
8037 if (last->next_bb != EXIT_BLOCK_PTR)
8038 bb_header[last->next_bb->index] = 0;
8040 first = first->next_bb;
8043 rtx prev, label, note, next;
8045 label = BB_HEAD (last);
8046 if (LABEL_P (label))
8047 note = NEXT_INSN (label);
8048 else
8049 note = label;
8050 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
8052 prev = PREV_INSN (label);
8053 next = NEXT_INSN (note);
8054 gcc_assert (prev && next);
8056 NEXT_INSN (prev) = next;
8057 PREV_INSN (next) = prev;
8059 bb_header[last->index] = label;
8061 if (last == first)
8062 break;
8064 last = last->prev_bb;
8066 while (1);
8069 /* Restore basic block notes.
8070 FIRST is the first basic block in the ebb. */
8071 static void
8072 restore_bb_notes (basic_block first)
8074 if (!bb_header)
8075 return;
8077 /* We DON'T unlink basic block notes of the first block in the ebb. */
8078 first = first->next_bb;
8079 /* Remember: FIRST is actually a second basic block in the ebb. */
8081 while (first != EXIT_BLOCK_PTR
8082 && bb_header[first->index])
8084 rtx prev, label, note, next;
8086 label = bb_header[first->index];
8087 prev = PREV_INSN (label);
8088 next = NEXT_INSN (prev);
8090 if (LABEL_P (label))
8091 note = NEXT_INSN (label);
8092 else
8093 note = label;
8094 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
8096 bb_header[first->index] = 0;
8098 NEXT_INSN (prev) = label;
8099 NEXT_INSN (note) = next;
8100 PREV_INSN (next) = note;
8102 first = first->next_bb;
8105 free (bb_header);
8106 bb_header = 0;
8109 /* Helper function.
8110 Fix CFG after both in- and inter-block movement of
8111 control_flow_insn_p JUMP. */
8112 static void
8113 fix_jump_move (rtx jump)
8115 basic_block bb, jump_bb, jump_bb_next;
8117 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
8118 jump_bb = BLOCK_FOR_INSN (jump);
8119 jump_bb_next = jump_bb->next_bb;
8121 gcc_assert (common_sched_info->sched_pass_id == SCHED_EBB_PASS
8122 || IS_SPECULATION_BRANCHY_CHECK_P (jump));
8124 if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
8125 /* if jump_bb_next is not empty. */
8126 BB_END (jump_bb) = BB_END (jump_bb_next);
8128 if (BB_END (bb) != PREV_INSN (jump))
8129 /* Then there are instruction after jump that should be placed
8130 to jump_bb_next. */
8131 BB_END (jump_bb_next) = BB_END (bb);
8132 else
8133 /* Otherwise jump_bb_next is empty. */
8134 BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
8136 /* To make assertion in move_insn happy. */
8137 BB_END (bb) = PREV_INSN (jump);
8139 update_bb_for_insn (jump_bb_next);
8142 /* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
8143 static void
8144 move_block_after_check (rtx jump)
8146 basic_block bb, jump_bb, jump_bb_next;
8147 vec<edge, va_gc> *t;
8149 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
8150 jump_bb = BLOCK_FOR_INSN (jump);
8151 jump_bb_next = jump_bb->next_bb;
8153 update_bb_for_insn (jump_bb);
8155 gcc_assert (IS_SPECULATION_CHECK_P (jump)
8156 || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next)));
8158 unlink_block (jump_bb_next);
8159 link_block (jump_bb_next, bb);
8161 t = bb->succs;
8162 bb->succs = 0;
8163 move_succs (&(jump_bb->succs), bb);
8164 move_succs (&(jump_bb_next->succs), jump_bb);
8165 move_succs (&t, jump_bb_next);
8167 df_mark_solutions_dirty ();
8169 common_sched_info->fix_recovery_cfg
8170 (bb->index, jump_bb->index, jump_bb_next->index);
8173 /* Helper function for move_block_after_check.
8174 This functions attaches edge vector pointed to by SUCCSP to
8175 block TO. */
8176 static void
8177 move_succs (vec<edge, va_gc> **succsp, basic_block to)
8179 edge e;
8180 edge_iterator ei;
8182 gcc_assert (to->succs == 0);
8184 to->succs = *succsp;
8186 FOR_EACH_EDGE (e, ei, to->succs)
8187 e->src = to;
8189 *succsp = 0;
8192 /* Remove INSN from the instruction stream.
8193 INSN should have any dependencies. */
8194 static void
8195 sched_remove_insn (rtx insn)
8197 sd_finish_insn (insn);
8199 change_queue_index (insn, QUEUE_NOWHERE);
8200 current_sched_info->add_remove_insn (insn, 1);
8201 remove_insn (insn);
8204 /* Clear priorities of all instructions, that are forward dependent on INSN.
8205 Store in vector pointed to by ROOTS_PTR insns on which priority () should
8206 be invoked to initialize all cleared priorities. */
8207 static void
8208 clear_priorities (rtx insn, rtx_vec_t *roots_ptr)
8210 sd_iterator_def sd_it;
8211 dep_t dep;
8212 bool insn_is_root_p = true;
8214 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
8216 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
8218 rtx pro = DEP_PRO (dep);
8220 if (INSN_PRIORITY_STATUS (pro) >= 0
8221 && QUEUE_INDEX (insn) != QUEUE_SCHEDULED)
8223 /* If DEP doesn't contribute to priority then INSN itself should
8224 be added to priority roots. */
8225 if (contributes_to_priority_p (dep))
8226 insn_is_root_p = false;
8228 INSN_PRIORITY_STATUS (pro) = -1;
8229 clear_priorities (pro, roots_ptr);
8233 if (insn_is_root_p)
8234 roots_ptr->safe_push (insn);
8237 /* Recompute priorities of instructions, whose priorities might have been
8238 changed. ROOTS is a vector of instructions whose priority computation will
8239 trigger initialization of all cleared priorities. */
8240 static void
8241 calc_priorities (rtx_vec_t roots)
8243 int i;
8244 rtx insn;
8246 FOR_EACH_VEC_ELT (roots, i, insn)
8247 priority (insn);
8251 /* Add dependences between JUMP and other instructions in the recovery
8252 block. INSN is the first insn the recovery block. */
8253 static void
8254 add_jump_dependencies (rtx insn, rtx jump)
8258 insn = NEXT_INSN (insn);
8259 if (insn == jump)
8260 break;
8262 if (dep_list_size (insn, SD_LIST_FORW) == 0)
8264 dep_def _new_dep, *new_dep = &_new_dep;
8266 init_dep (new_dep, insn, jump, REG_DEP_ANTI);
8267 sd_add_dep (new_dep, false);
8270 while (1);
8272 gcc_assert (!sd_lists_empty_p (jump, SD_LIST_BACK));
8275 /* Extend data structures for logical insn UID. */
8276 void
8277 sched_extend_luids (void)
8279 int new_luids_max_uid = get_max_uid () + 1;
8281 sched_luids.safe_grow_cleared (new_luids_max_uid);
8284 /* Initialize LUID for INSN. */
8285 void
8286 sched_init_insn_luid (rtx insn)
8288 int i = INSN_P (insn) ? 1 : common_sched_info->luid_for_non_insn (insn);
8289 int luid;
8291 if (i >= 0)
8293 luid = sched_max_luid;
8294 sched_max_luid += i;
8296 else
8297 luid = -1;
8299 SET_INSN_LUID (insn, luid);
8302 /* Initialize luids for BBS.
8303 The hook common_sched_info->luid_for_non_insn () is used to determine
8304 if notes, labels, etc. need luids. */
8305 void
8306 sched_init_luids (bb_vec_t bbs)
8308 int i;
8309 basic_block bb;
8311 sched_extend_luids ();
8312 FOR_EACH_VEC_ELT (bbs, i, bb)
8314 rtx insn;
8316 FOR_BB_INSNS (bb, insn)
8317 sched_init_insn_luid (insn);
8321 /* Free LUIDs. */
8322 void
8323 sched_finish_luids (void)
8325 sched_luids.release ();
8326 sched_max_luid = 1;
8329 /* Return logical uid of INSN. Helpful while debugging. */
8331 insn_luid (rtx insn)
8333 return INSN_LUID (insn);
8336 /* Extend per insn data in the target. */
8337 void
8338 sched_extend_target (void)
8340 if (targetm.sched.h_i_d_extended)
8341 targetm.sched.h_i_d_extended ();
8344 /* Extend global scheduler structures (those, that live across calls to
8345 schedule_block) to include information about just emitted INSN. */
8346 static void
8347 extend_h_i_d (void)
8349 int reserve = (get_max_uid () + 1 - h_i_d.length ());
8350 if (reserve > 0
8351 && ! h_i_d.space (reserve))
8353 h_i_d.safe_grow_cleared (3 * get_max_uid () / 2);
8354 sched_extend_target ();
8358 /* Initialize h_i_d entry of the INSN with default values.
8359 Values, that are not explicitly initialized here, hold zero. */
8360 static void
8361 init_h_i_d (rtx insn)
8363 if (INSN_LUID (insn) > 0)
8365 INSN_COST (insn) = -1;
8366 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
8367 INSN_TICK (insn) = INVALID_TICK;
8368 INSN_EXACT_TICK (insn) = INVALID_TICK;
8369 INTER_TICK (insn) = INVALID_TICK;
8370 TODO_SPEC (insn) = HARD_DEP;
8374 /* Initialize haifa_insn_data for BBS. */
8375 void
8376 haifa_init_h_i_d (bb_vec_t bbs)
8378 int i;
8379 basic_block bb;
8381 extend_h_i_d ();
8382 FOR_EACH_VEC_ELT (bbs, i, bb)
8384 rtx insn;
8386 FOR_BB_INSNS (bb, insn)
8387 init_h_i_d (insn);
8391 /* Finalize haifa_insn_data. */
8392 void
8393 haifa_finish_h_i_d (void)
8395 int i;
8396 haifa_insn_data_t data;
8397 struct reg_use_data *use, *next;
8399 FOR_EACH_VEC_ELT (h_i_d, i, data)
8401 free (data->max_reg_pressure);
8402 free (data->reg_pressure);
8403 for (use = data->reg_use_list; use != NULL; use = next)
8405 next = use->next_insn_use;
8406 free (use);
8409 h_i_d.release ();
8412 /* Init data for the new insn INSN. */
8413 static void
8414 haifa_init_insn (rtx insn)
8416 gcc_assert (insn != NULL);
8418 sched_extend_luids ();
8419 sched_init_insn_luid (insn);
8420 sched_extend_target ();
8421 sched_deps_init (false);
8422 extend_h_i_d ();
8423 init_h_i_d (insn);
8425 if (adding_bb_to_current_region_p)
8427 sd_init_insn (insn);
8429 /* Extend dependency caches by one element. */
8430 extend_dependency_caches (1, false);
8432 if (sched_pressure != SCHED_PRESSURE_NONE)
8433 init_insn_reg_pressure_info (insn);
8436 /* Init data for the new basic block BB which comes after AFTER. */
8437 static void
8438 haifa_init_only_bb (basic_block bb, basic_block after)
8440 gcc_assert (bb != NULL);
8442 sched_init_bbs ();
8444 if (common_sched_info->add_block)
8445 /* This changes only data structures of the front-end. */
8446 common_sched_info->add_block (bb, after);
8449 /* A generic version of sched_split_block (). */
8450 basic_block
8451 sched_split_block_1 (basic_block first_bb, rtx after)
8453 edge e;
8455 e = split_block (first_bb, after);
8456 gcc_assert (e->src == first_bb);
8458 /* sched_split_block emits note if *check == BB_END. Probably it
8459 is better to rip that note off. */
8461 return e->dest;
8464 /* A generic version of sched_create_empty_bb (). */
8465 basic_block
8466 sched_create_empty_bb_1 (basic_block after)
8468 return create_empty_bb (after);
8471 /* Insert PAT as an INSN into the schedule and update the necessary data
8472 structures to account for it. */
8474 sched_emit_insn (rtx pat)
8476 rtx insn = emit_insn_before (pat, nonscheduled_insns_begin);
8477 haifa_init_insn (insn);
8479 if (current_sched_info->add_remove_insn)
8480 current_sched_info->add_remove_insn (insn, 0);
8482 (*current_sched_info->begin_schedule_ready) (insn);
8483 scheduled_insns.safe_push (insn);
8485 last_scheduled_insn = insn;
8486 return insn;
8489 /* This function returns a candidate satisfying dispatch constraints from
8490 the ready list. */
8492 static rtx
8493 ready_remove_first_dispatch (struct ready_list *ready)
8495 int i;
8496 rtx insn = ready_element (ready, 0);
8498 if (ready->n_ready == 1
8499 || INSN_CODE (insn) < 0
8500 || !INSN_P (insn)
8501 || !active_insn_p (insn)
8502 || targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
8503 return ready_remove_first (ready);
8505 for (i = 1; i < ready->n_ready; i++)
8507 insn = ready_element (ready, i);
8509 if (INSN_CODE (insn) < 0
8510 || !INSN_P (insn)
8511 || !active_insn_p (insn))
8512 continue;
8514 if (targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
8516 /* Return ith element of ready. */
8517 insn = ready_remove (ready, i);
8518 return insn;
8522 if (targetm.sched.dispatch (NULL_RTX, DISPATCH_VIOLATION))
8523 return ready_remove_first (ready);
8525 for (i = 1; i < ready->n_ready; i++)
8527 insn = ready_element (ready, i);
8529 if (INSN_CODE (insn) < 0
8530 || !INSN_P (insn)
8531 || !active_insn_p (insn))
8532 continue;
8534 /* Return i-th element of ready. */
8535 if (targetm.sched.dispatch (insn, IS_CMP))
8536 return ready_remove (ready, i);
8539 return ready_remove_first (ready);
8542 /* Get number of ready insn in the ready list. */
8545 number_in_ready (void)
8547 return ready.n_ready;
8550 /* Get number of ready's in the ready list. */
8553 get_ready_element (int i)
8555 return ready_element (&ready, i);
8558 #endif /* INSN_SCHEDULING */