1 /* Instruction scheduling pass.
2 Copyright (C) 1992-2013 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
4 and currently maintained by, Jim Wilson (wilson@cygnus.com)
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* Instruction scheduling pass. This file, along with sched-deps.c,
23 contains the generic parts. The actual entry point is found for
24 the normal instruction scheduling pass is found in sched-rgn.c.
26 We compute insn priorities based on data dependencies. Flow
27 analysis only creates a fraction of the data-dependencies we must
28 observe: namely, only those dependencies which the combiner can be
29 expected to use. For this pass, we must therefore create the
30 remaining dependencies we need to observe: register dependencies,
31 memory dependencies, dependencies to keep function calls in order,
32 and the dependence between a conditional branch and the setting of
33 condition codes are all dealt with here.
35 The scheduler first traverses the data flow graph, starting with
36 the last instruction, and proceeding to the first, assigning values
37 to insn_priority as it goes. This sorts the instructions
38 topologically by data dependence.
40 Once priorities have been established, we order the insns using
41 list scheduling. This works as follows: starting with a list of
42 all the ready insns, and sorted according to priority number, we
43 schedule the insn from the end of the list by placing its
44 predecessors in the list according to their priority order. We
45 consider this insn scheduled by setting the pointer to the "end" of
46 the list to point to the previous insn. When an insn has no
47 predecessors, we either queue it until sufficient time has elapsed
48 or add it to the ready list. As the instructions are scheduled or
49 when stalls are introduced, the queue advances and dumps insns into
50 the ready list. When all insns down to the lowest priority have
51 been scheduled, the critical path of the basic block has been made
52 as short as possible. The remaining insns are then scheduled in
55 The following list shows the order in which we want to break ties
56 among insns in the ready list:
58 1. choose insn with the longest path to end of bb, ties
60 2. choose insn with least contribution to register pressure,
62 3. prefer in-block upon interblock motion, ties broken by
63 4. prefer useful upon speculative motion, ties broken by
64 5. choose insn with largest control flow probability, ties
66 6. choose insn with the least dependences upon the previously
67 scheduled insn, or finally
68 7 choose the insn which has the most insns dependent on it.
69 8. choose insn with lowest UID.
71 Memory references complicate matters. Only if we can be certain
72 that memory references are not part of the data dependency graph
73 (via true, anti, or output dependence), can we move operations past
74 memory references. To first approximation, reads can be done
75 independently, while writes introduce dependencies. Better
76 approximations will yield fewer dependencies.
78 Before reload, an extended analysis of interblock data dependences
79 is required for interblock scheduling. This is performed in
80 compute_block_backward_dependences ().
82 Dependencies set up by memory references are treated in exactly the
83 same way as other dependencies, by using insn backward dependences
84 INSN_BACK_DEPS. INSN_BACK_DEPS are translated into forward dependences
85 INSN_FORW_DEPS the purpose of forward list scheduling.
87 Having optimized the critical path, we may have also unduly
88 extended the lifetimes of some registers. If an operation requires
89 that constants be loaded into registers, it is certainly desirable
90 to load those constants as early as necessary, but no earlier.
91 I.e., it will not do to load up a bunch of registers at the
92 beginning of a basic block only to use them at the end, if they
93 could be loaded later, since this may result in excessive register
96 Note that since branches are never in basic blocks, but only end
97 basic blocks, this pass will not move branches. But that is ok,
98 since we can use GNU's delayed branch scheduling pass to take care
101 Also note that no further optimizations based on algebraic
102 identities are performed, so this pass would be a good one to
103 perform instruction splitting, such as breaking up a multiply
104 instruction into shifts and adds where that is profitable.
106 Given the memory aliasing analysis that this pass should perform,
107 it should be possible to remove redundant stores to memory, and to
108 load values from registers instead of hitting memory.
110 Before reload, speculative insns are moved only if a 'proof' exists
111 that no exception will be caused by this, and if no live registers
112 exist that inhibit the motion (live registers constraints are not
113 represented by data dependence edges).
115 This pass must update information that subsequent passes expect to
116 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
117 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
119 The information in the line number notes is carefully retained by
120 this pass. Notes that refer to the starting and ending of
121 exception regions are also carefully retained by this pass. All
122 other NOTE insns are grouped in their same relative order at the
123 beginning of basic blocks and regions that have been scheduled. */
127 #include "coretypes.h"
129 #include "diagnostic-core.h"
130 #include "hard-reg-set.h"
134 #include "function.h"
136 #include "insn-config.h"
137 #include "insn-attr.h"
140 #include "sched-int.h"
142 #include "common/common-target.h"
147 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
149 #include "dumpfile.h"
151 #ifdef INSN_SCHEDULING
153 /* issue_rate is the number of insns that can be scheduled in the same
154 machine cycle. It can be defined in the config/mach/mach.h file,
155 otherwise we set it to 1. */
159 /* This can be set to true by a backend if the scheduler should not
160 enable a DCE pass. */
163 /* The current initiation interval used when modulo scheduling. */
164 static int modulo_ii
;
166 /* The maximum number of stages we are prepared to handle. */
167 static int modulo_max_stages
;
169 /* The number of insns that exist in each iteration of the loop. We use this
170 to detect when we've scheduled all insns from the first iteration. */
171 static int modulo_n_insns
;
173 /* The current count of insns in the first iteration of the loop that have
174 already been scheduled. */
175 static int modulo_insns_scheduled
;
177 /* The maximum uid of insns from the first iteration of the loop. */
178 static int modulo_iter0_max_uid
;
180 /* The number of times we should attempt to backtrack when modulo scheduling.
181 Decreased each time we have to backtrack. */
182 static int modulo_backtracks_left
;
184 /* The stage in which the last insn from the original loop was
186 static int modulo_last_stage
;
188 /* sched-verbose controls the amount of debugging output the
189 scheduler prints. It is controlled by -fsched-verbose=N:
190 N>0 and no -DSR : the output is directed to stderr.
191 N>=10 will direct the printouts to stderr (regardless of -dSR).
193 N=2: bb's probabilities, detailed ready list info, unit/insn info.
194 N=3: rtl at abort point, control-flow, regions info.
195 N=5: dependences info. */
197 int sched_verbose
= 0;
199 /* Debugging file. All printouts are sent to dump, which is always set,
200 either to stderr, or to the dump listing file (-dRS). */
201 FILE *sched_dump
= 0;
203 /* This is a placeholder for the scheduler parameters common
204 to all schedulers. */
205 struct common_sched_info_def
*common_sched_info
;
207 #define INSN_TICK(INSN) (HID (INSN)->tick)
208 #define INSN_EXACT_TICK(INSN) (HID (INSN)->exact_tick)
209 #define INSN_TICK_ESTIMATE(INSN) (HID (INSN)->tick_estimate)
210 #define INTER_TICK(INSN) (HID (INSN)->inter_tick)
211 #define FEEDS_BACKTRACK_INSN(INSN) (HID (INSN)->feeds_backtrack_insn)
212 #define SHADOW_P(INSN) (HID (INSN)->shadow_p)
213 #define MUST_RECOMPUTE_SPEC_P(INSN) (HID (INSN)->must_recompute_spec)
214 /* Cached cost of the instruction. Use insn_cost to get cost of the
215 insn. -1 here means that the field is not initialized. */
216 #define INSN_COST(INSN) (HID (INSN)->cost)
218 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
219 then it should be recalculated from scratch. */
220 #define INVALID_TICK (-(max_insn_queue_index + 1))
221 /* The minimal value of the INSN_TICK of an instruction. */
222 #define MIN_TICK (-max_insn_queue_index)
224 /* List of important notes we must keep around. This is a pointer to the
225 last element in the list. */
228 static struct spec_info_def spec_info_var
;
229 /* Description of the speculative part of the scheduling.
230 If NULL - no speculation. */
231 spec_info_t spec_info
= NULL
;
233 /* True, if recovery block was added during scheduling of current block.
234 Used to determine, if we need to fix INSN_TICKs. */
235 static bool haifa_recovery_bb_recently_added_p
;
237 /* True, if recovery block was added during this scheduling pass.
238 Used to determine if we should have empty memory pools of dependencies
239 after finishing current region. */
240 bool haifa_recovery_bb_ever_added_p
;
242 /* Counters of different types of speculative instructions. */
243 static int nr_begin_data
, nr_be_in_data
, nr_begin_control
, nr_be_in_control
;
245 /* Array used in {unlink, restore}_bb_notes. */
246 static rtx
*bb_header
= 0;
248 /* Basic block after which recovery blocks will be created. */
249 static basic_block before_recovery
;
251 /* Basic block just before the EXIT_BLOCK and after recovery, if we have
253 basic_block after_recovery
;
255 /* FALSE if we add bb to another region, so we don't need to initialize it. */
256 bool adding_bb_to_current_region_p
= true;
260 /* An instruction is ready to be scheduled when all insns preceding it
261 have already been scheduled. It is important to ensure that all
262 insns which use its result will not be executed until its result
263 has been computed. An insn is maintained in one of four structures:
265 (P) the "Pending" set of insns which cannot be scheduled until
266 their dependencies have been satisfied.
267 (Q) the "Queued" set of insns that can be scheduled when sufficient
269 (R) the "Ready" list of unscheduled, uncommitted insns.
270 (S) the "Scheduled" list of insns.
272 Initially, all insns are either "Pending" or "Ready" depending on
273 whether their dependencies are satisfied.
275 Insns move from the "Ready" list to the "Scheduled" list as they
276 are committed to the schedule. As this occurs, the insns in the
277 "Pending" list have their dependencies satisfied and move to either
278 the "Ready" list or the "Queued" set depending on whether
279 sufficient time has passed to make them ready. As time passes,
280 insns move from the "Queued" set to the "Ready" list.
282 The "Pending" list (P) are the insns in the INSN_FORW_DEPS of the
283 unscheduled insns, i.e., those that are ready, queued, and pending.
284 The "Queued" set (Q) is implemented by the variable `insn_queue'.
285 The "Ready" list (R) is implemented by the variables `ready' and
287 The "Scheduled" list (S) is the new insn chain built by this pass.
289 The transition (R->S) is implemented in the scheduling loop in
290 `schedule_block' when the best insn to schedule is chosen.
291 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
292 insns move from the ready list to the scheduled list.
293 The transition (Q->R) is implemented in 'queue_to_insn' as time
294 passes or stalls are introduced. */
296 /* Implement a circular buffer to delay instructions until sufficient
297 time has passed. For the new pipeline description interface,
298 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
299 than maximal time of instruction execution computed by genattr.c on
300 the base maximal time of functional unit reservations and getting a
301 result. This is the longest time an insn may be queued. */
303 static rtx
*insn_queue
;
304 static int q_ptr
= 0;
305 static int q_size
= 0;
306 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
307 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
309 #define QUEUE_SCHEDULED (-3)
310 #define QUEUE_NOWHERE (-2)
311 #define QUEUE_READY (-1)
312 /* QUEUE_SCHEDULED - INSN is scheduled.
313 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
315 QUEUE_READY - INSN is in ready list.
316 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
318 #define QUEUE_INDEX(INSN) (HID (INSN)->queue_index)
320 /* The following variable value refers for all current and future
321 reservations of the processor units. */
324 /* The following variable value is size of memory representing all
325 current and future reservations of the processor units. */
326 size_t dfa_state_size
;
328 /* The following array is used to find the best insn from ready when
329 the automaton pipeline interface is used. */
330 char *ready_try
= NULL
;
332 /* The ready list. */
333 struct ready_list ready
= {NULL
, 0, 0, 0, 0};
335 /* The pointer to the ready list (to be removed). */
336 static struct ready_list
*readyp
= &ready
;
338 /* Scheduling clock. */
339 static int clock_var
;
341 /* Clock at which the previous instruction was issued. */
342 static int last_clock_var
;
344 /* Set to true if, when queuing a shadow insn, we discover that it would be
345 scheduled too late. */
346 static bool must_backtrack
;
348 /* The following variable value is number of essential insns issued on
349 the current cycle. An insn is essential one if it changes the
351 int cycle_issued_insns
;
353 /* This records the actual schedule. It is built up during the main phase
354 of schedule_block, and afterwards used to reorder the insns in the RTL. */
355 static vec
<rtx
> scheduled_insns
;
357 static int may_trap_exp (const_rtx
, int);
359 /* Nonzero iff the address is comprised from at most 1 register. */
360 #define CONST_BASED_ADDRESS_P(x) \
362 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
363 || (GET_CODE (x) == LO_SUM)) \
364 && (CONSTANT_P (XEXP (x, 0)) \
365 || CONSTANT_P (XEXP (x, 1)))))
367 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
368 as found by analyzing insn's expression. */
371 static int haifa_luid_for_non_insn (rtx x
);
373 /* Haifa version of sched_info hooks common to all headers. */
374 const struct common_sched_info_def haifa_common_sched_info
=
376 NULL
, /* fix_recovery_cfg */
377 NULL
, /* add_block */
378 NULL
, /* estimate_number_of_insns */
379 haifa_luid_for_non_insn
, /* luid_for_non_insn */
380 SCHED_PASS_UNKNOWN
/* sched_pass_id */
383 /* Mapping from instruction UID to its Logical UID. */
384 vec
<int> sched_luids
= vNULL
;
386 /* Next LUID to assign to an instruction. */
387 int sched_max_luid
= 1;
389 /* Haifa Instruction Data. */
390 vec
<haifa_insn_data_def
> h_i_d
= vNULL
;
392 void (* sched_init_only_bb
) (basic_block
, basic_block
);
394 /* Split block function. Different schedulers might use different functions
395 to handle their internal data consistent. */
396 basic_block (* sched_split_block
) (basic_block
, rtx
);
398 /* Create empty basic block after the specified block. */
399 basic_block (* sched_create_empty_bb
) (basic_block
);
401 /* Return the number of cycles until INSN is expected to be ready.
402 Return zero if it already is. */
404 insn_delay (rtx insn
)
406 return MAX (INSN_TICK (insn
) - clock_var
, 0);
410 may_trap_exp (const_rtx x
, int is_store
)
419 if (code
== MEM
&& may_trap_p (x
))
426 /* The insn uses memory: a volatile load. */
427 if (MEM_VOLATILE_P (x
))
429 /* An exception-free load. */
432 /* A load with 1 base register, to be further checked. */
433 if (CONST_BASED_ADDRESS_P (XEXP (x
, 0)))
434 return PFREE_CANDIDATE
;
435 /* No info on the load, to be further checked. */
436 return PRISKY_CANDIDATE
;
441 int i
, insn_class
= TRAP_FREE
;
443 /* Neither store nor load, check if it may cause a trap. */
446 /* Recursive step: walk the insn... */
447 fmt
= GET_RTX_FORMAT (code
);
448 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
452 int tmp_class
= may_trap_exp (XEXP (x
, i
), is_store
);
453 insn_class
= WORST_CLASS (insn_class
, tmp_class
);
455 else if (fmt
[i
] == 'E')
458 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
460 int tmp_class
= may_trap_exp (XVECEXP (x
, i
, j
), is_store
);
461 insn_class
= WORST_CLASS (insn_class
, tmp_class
);
462 if (insn_class
== TRAP_RISKY
|| insn_class
== IRISKY
)
466 if (insn_class
== TRAP_RISKY
|| insn_class
== IRISKY
)
473 /* Classifies rtx X of an insn for the purpose of verifying that X can be
474 executed speculatively (and consequently the insn can be moved
475 speculatively), by examining X, returning:
476 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
477 TRAP_FREE: non-load insn.
478 IFREE: load from a globally safe location.
479 IRISKY: volatile load.
480 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
481 being either PFREE or PRISKY. */
484 haifa_classify_rtx (const_rtx x
)
486 int tmp_class
= TRAP_FREE
;
487 int insn_class
= TRAP_FREE
;
490 if (GET_CODE (x
) == PARALLEL
)
492 int i
, len
= XVECLEN (x
, 0);
494 for (i
= len
- 1; i
>= 0; i
--)
496 tmp_class
= haifa_classify_rtx (XVECEXP (x
, 0, i
));
497 insn_class
= WORST_CLASS (insn_class
, tmp_class
);
498 if (insn_class
== TRAP_RISKY
|| insn_class
== IRISKY
)
508 /* Test if it is a 'store'. */
509 tmp_class
= may_trap_exp (XEXP (x
, 0), 1);
512 /* Test if it is a store. */
513 tmp_class
= may_trap_exp (SET_DEST (x
), 1);
514 if (tmp_class
== TRAP_RISKY
)
516 /* Test if it is a load. */
518 WORST_CLASS (tmp_class
,
519 may_trap_exp (SET_SRC (x
), 0));
522 tmp_class
= haifa_classify_rtx (COND_EXEC_CODE (x
));
523 if (tmp_class
== TRAP_RISKY
)
525 tmp_class
= WORST_CLASS (tmp_class
,
526 may_trap_exp (COND_EXEC_TEST (x
), 0));
529 tmp_class
= TRAP_RISKY
;
533 insn_class
= tmp_class
;
540 haifa_classify_insn (const_rtx insn
)
542 return haifa_classify_rtx (PATTERN (insn
));
545 /* After the scheduler initialization function has been called, this function
546 can be called to enable modulo scheduling. II is the initiation interval
547 we should use, it affects the delays for delay_pairs that were recorded as
548 separated by a given number of stages.
550 MAX_STAGES provides us with a limit
551 after which we give up scheduling; the caller must have unrolled at least
552 as many copies of the loop body and recorded delay_pairs for them.
554 INSNS is the number of real (non-debug) insns in one iteration of
555 the loop. MAX_UID can be used to test whether an insn belongs to
556 the first iteration of the loop; all of them have a uid lower than
559 set_modulo_params (int ii
, int max_stages
, int insns
, int max_uid
)
562 modulo_max_stages
= max_stages
;
563 modulo_n_insns
= insns
;
564 modulo_iter0_max_uid
= max_uid
;
565 modulo_backtracks_left
= PARAM_VALUE (PARAM_MAX_MODULO_BACKTRACK_ATTEMPTS
);
568 /* A structure to record a pair of insns where the first one is a real
569 insn that has delay slots, and the second is its delayed shadow.
570 I1 is scheduled normally and will emit an assembly instruction,
571 while I2 describes the side effect that takes place at the
572 transition between cycles CYCLES and (CYCLES + 1) after I1. */
575 struct delay_pair
*next_same_i1
;
578 /* When doing modulo scheduling, we a delay_pair can also be used to
579 show that I1 and I2 are the same insn in a different stage. If that
580 is the case, STAGES will be nonzero. */
584 /* Two hash tables to record delay_pairs, one indexed by I1 and the other
586 static htab_t delay_htab
;
587 static htab_t delay_htab_i2
;
589 /* Called through htab_traverse. Walk the hashtable using I2 as
590 index, and delete all elements involving an UID higher than
591 that pointed to by *DATA. */
593 htab_i2_traverse (void **slot
, void *data
)
595 int maxuid
= *(int *)data
;
596 struct delay_pair
*p
= *(struct delay_pair
**)slot
;
597 if (INSN_UID (p
->i2
) >= maxuid
|| INSN_UID (p
->i1
) >= maxuid
)
599 htab_clear_slot (delay_htab_i2
, slot
);
604 /* Called through htab_traverse. Walk the hashtable using I2 as
605 index, and delete all elements involving an UID higher than
606 that pointed to by *DATA. */
608 htab_i1_traverse (void **slot
, void *data
)
610 int maxuid
= *(int *)data
;
611 struct delay_pair
**pslot
= (struct delay_pair
**)slot
;
612 struct delay_pair
*p
, *first
, **pprev
;
614 if (INSN_UID ((*pslot
)->i1
) >= maxuid
)
616 htab_clear_slot (delay_htab
, slot
);
620 for (p
= *pslot
; p
; p
= p
->next_same_i1
)
622 if (INSN_UID (p
->i2
) < maxuid
)
625 pprev
= &p
->next_same_i1
;
630 htab_clear_slot (delay_htab
, slot
);
636 /* Discard all delay pairs which involve an insn with an UID higher
639 discard_delay_pairs_above (int max_uid
)
641 htab_traverse (delay_htab
, htab_i1_traverse
, &max_uid
);
642 htab_traverse (delay_htab_i2
, htab_i2_traverse
, &max_uid
);
645 /* Returns a hash value for X (which really is a delay_pair), based on
648 delay_hash_i1 (const void *x
)
650 return htab_hash_pointer (((const struct delay_pair
*) x
)->i1
);
653 /* Returns a hash value for X (which really is a delay_pair), based on
656 delay_hash_i2 (const void *x
)
658 return htab_hash_pointer (((const struct delay_pair
*) x
)->i2
);
661 /* Return nonzero if I1 of pair X is the same as that of pair Y. */
663 delay_i1_eq (const void *x
, const void *y
)
665 return ((const struct delay_pair
*) x
)->i1
== y
;
668 /* Return nonzero if I2 of pair X is the same as that of pair Y. */
670 delay_i2_eq (const void *x
, const void *y
)
672 return ((const struct delay_pair
*) x
)->i2
== y
;
675 /* This function can be called by a port just before it starts the final
676 scheduling pass. It records the fact that an instruction with delay
677 slots has been split into two insns, I1 and I2. The first one will be
678 scheduled normally and initiates the operation. The second one is a
679 shadow which must follow a specific number of cycles after I1; its only
680 purpose is to show the side effect that occurs at that cycle in the RTL.
681 If a JUMP_INSN or a CALL_INSN has been split, I1 should be a normal INSN,
682 while I2 retains the original insn type.
684 There are two ways in which the number of cycles can be specified,
685 involving the CYCLES and STAGES arguments to this function. If STAGES
686 is zero, we just use the value of CYCLES. Otherwise, STAGES is a factor
687 which is multiplied by MODULO_II to give the number of cycles. This is
688 only useful if the caller also calls set_modulo_params to enable modulo
692 record_delay_slot_pair (rtx i1
, rtx i2
, int cycles
, int stages
)
694 struct delay_pair
*p
= XNEW (struct delay_pair
);
695 struct delay_pair
**slot
;
704 delay_htab
= htab_create (10, delay_hash_i1
, delay_i1_eq
, NULL
);
705 delay_htab_i2
= htab_create (10, delay_hash_i2
, delay_i2_eq
, free
);
707 slot
= ((struct delay_pair
**)
708 htab_find_slot_with_hash (delay_htab
, i1
, htab_hash_pointer (i1
),
710 p
->next_same_i1
= *slot
;
712 slot
= ((struct delay_pair
**)
713 htab_find_slot_with_hash (delay_htab_i2
, i2
, htab_hash_pointer (i2
),
718 /* Examine the delay pair hashtable to see if INSN is a shadow for another,
719 and return the other insn if so. Return NULL otherwise. */
721 real_insn_for_shadow (rtx insn
)
723 struct delay_pair
*pair
;
725 if (delay_htab
== NULL
)
729 = (struct delay_pair
*)htab_find_with_hash (delay_htab_i2
, insn
,
730 htab_hash_pointer (insn
));
731 if (!pair
|| pair
->stages
> 0)
736 /* For a pair P of insns, return the fixed distance in cycles from the first
737 insn after which the second must be scheduled. */
739 pair_delay (struct delay_pair
*p
)
744 return p
->stages
* modulo_ii
;
747 /* Given an insn INSN, add a dependence on its delayed shadow if it
748 has one. Also try to find situations where shadows depend on each other
749 and add dependencies to the real insns to limit the amount of backtracking
752 add_delay_dependencies (rtx insn
)
754 struct delay_pair
*pair
;
755 sd_iterator_def sd_it
;
762 = (struct delay_pair
*)htab_find_with_hash (delay_htab_i2
, insn
,
763 htab_hash_pointer (insn
));
766 add_dependence (insn
, pair
->i1
, REG_DEP_ANTI
);
770 FOR_EACH_DEP (pair
->i2
, SD_LIST_BACK
, sd_it
, dep
)
772 rtx pro
= DEP_PRO (dep
);
773 struct delay_pair
*other_pair
774 = (struct delay_pair
*)htab_find_with_hash (delay_htab_i2
, pro
,
775 htab_hash_pointer (pro
));
776 if (!other_pair
|| other_pair
->stages
)
778 if (pair_delay (other_pair
) >= pair_delay (pair
))
780 if (sched_verbose
>= 4)
782 fprintf (sched_dump
, ";;\tadding dependence %d <- %d\n",
783 INSN_UID (other_pair
->i1
),
784 INSN_UID (pair
->i1
));
785 fprintf (sched_dump
, ";;\tpair1 %d <- %d, cost %d\n",
789 fprintf (sched_dump
, ";;\tpair2 %d <- %d, cost %d\n",
790 INSN_UID (other_pair
->i1
),
791 INSN_UID (other_pair
->i2
),
792 pair_delay (other_pair
));
794 add_dependence (pair
->i1
, other_pair
->i1
, REG_DEP_ANTI
);
799 /* Forward declarations. */
801 static int priority (rtx
);
802 static int rank_for_schedule (const void *, const void *);
803 static void swap_sort (rtx
*, int);
804 static void queue_insn (rtx
, int, const char *);
805 static int schedule_insn (rtx
);
806 static void adjust_priority (rtx
);
807 static void advance_one_cycle (void);
808 static void extend_h_i_d (void);
811 /* Notes handling mechanism:
812 =========================
813 Generally, NOTES are saved before scheduling and restored after scheduling.
814 The scheduler distinguishes between two types of notes:
816 (1) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
817 Before scheduling a region, a pointer to the note is added to the insn
818 that follows or precedes it. (This happens as part of the data dependence
819 computation). After scheduling an insn, the pointer contained in it is
820 used for regenerating the corresponding note (in reemit_notes).
822 (2) All other notes (e.g. INSN_DELETED): Before scheduling a block,
823 these notes are put in a list (in rm_other_notes() and
824 unlink_other_notes ()). After scheduling the block, these notes are
825 inserted at the beginning of the block (in schedule_block()). */
827 static void ready_add (struct ready_list
*, rtx
, bool);
828 static rtx
ready_remove_first (struct ready_list
*);
829 static rtx
ready_remove_first_dispatch (struct ready_list
*ready
);
831 static void queue_to_ready (struct ready_list
*);
832 static int early_queue_to_ready (state_t
, struct ready_list
*);
834 static void debug_ready_list (struct ready_list
*);
836 /* The following functions are used to implement multi-pass scheduling
837 on the first cycle. */
838 static rtx
ready_remove (struct ready_list
*, int);
839 static void ready_remove_insn (rtx
);
841 static void fix_inter_tick (rtx
, rtx
);
842 static int fix_tick_ready (rtx
);
843 static void change_queue_index (rtx
, int);
845 /* The following functions are used to implement scheduling of data/control
846 speculative instructions. */
848 static void extend_h_i_d (void);
849 static void init_h_i_d (rtx
);
850 static int haifa_speculate_insn (rtx
, ds_t
, rtx
*);
851 static void generate_recovery_code (rtx
);
852 static void process_insn_forw_deps_be_in_spec (rtx
, rtx
, ds_t
);
853 static void begin_speculative_block (rtx
);
854 static void add_to_speculative_block (rtx
);
855 static void init_before_recovery (basic_block
*);
856 static void create_check_block_twin (rtx
, bool);
857 static void fix_recovery_deps (basic_block
);
858 static bool haifa_change_pattern (rtx
, rtx
);
859 static void dump_new_block_header (int, basic_block
, rtx
, rtx
);
860 static void restore_bb_notes (basic_block
);
861 static void fix_jump_move (rtx
);
862 static void move_block_after_check (rtx
);
863 static void move_succs (vec
<edge
, va_gc
> **, basic_block
);
864 static void sched_remove_insn (rtx
);
865 static void clear_priorities (rtx
, rtx_vec_t
*);
866 static void calc_priorities (rtx_vec_t
);
867 static void add_jump_dependencies (rtx
, rtx
);
869 #endif /* INSN_SCHEDULING */
871 /* Point to state used for the current scheduling pass. */
872 struct haifa_sched_info
*current_sched_info
;
874 #ifndef INSN_SCHEDULING
876 schedule_insns (void)
881 /* Do register pressure sensitive insn scheduling if the flag is set
883 enum sched_pressure_algorithm sched_pressure
;
885 /* Map regno -> its pressure class. The map defined only when
886 SCHED_PRESSURE != SCHED_PRESSURE_NONE. */
887 enum reg_class
*sched_regno_pressure_class
;
889 /* The current register pressure. Only elements corresponding pressure
890 classes are defined. */
891 static int curr_reg_pressure
[N_REG_CLASSES
];
893 /* Saved value of the previous array. */
894 static int saved_reg_pressure
[N_REG_CLASSES
];
896 /* Register living at given scheduling point. */
897 static bitmap curr_reg_live
;
899 /* Saved value of the previous array. */
900 static bitmap saved_reg_live
;
902 /* Registers mentioned in the current region. */
903 static bitmap region_ref_regs
;
905 /* Initiate register pressure relative info for scheduling the current
906 region. Currently it is only clearing register mentioned in the
909 sched_init_region_reg_pressure_info (void)
911 bitmap_clear (region_ref_regs
);
914 /* PRESSURE[CL] describes the pressure on register class CL. Update it
915 for the birth (if BIRTH_P) or death (if !BIRTH_P) of register REGNO.
916 LIVE tracks the set of live registers; if it is null, assume that
917 every birth or death is genuine. */
919 mark_regno_birth_or_death (bitmap live
, int *pressure
, int regno
, bool birth_p
)
921 enum reg_class pressure_class
;
923 pressure_class
= sched_regno_pressure_class
[regno
];
924 if (regno
>= FIRST_PSEUDO_REGISTER
)
926 if (pressure_class
!= NO_REGS
)
930 if (!live
|| bitmap_set_bit (live
, regno
))
931 pressure
[pressure_class
]
932 += (ira_reg_class_max_nregs
933 [pressure_class
][PSEUDO_REGNO_MODE (regno
)]);
937 if (!live
|| bitmap_clear_bit (live
, regno
))
938 pressure
[pressure_class
]
939 -= (ira_reg_class_max_nregs
940 [pressure_class
][PSEUDO_REGNO_MODE (regno
)]);
944 else if (pressure_class
!= NO_REGS
945 && ! TEST_HARD_REG_BIT (ira_no_alloc_regs
, regno
))
949 if (!live
|| bitmap_set_bit (live
, regno
))
950 pressure
[pressure_class
]++;
954 if (!live
|| bitmap_clear_bit (live
, regno
))
955 pressure
[pressure_class
]--;
960 /* Initiate current register pressure related info from living
961 registers given by LIVE. */
963 initiate_reg_pressure_info (bitmap live
)
969 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
970 curr_reg_pressure
[ira_pressure_classes
[i
]] = 0;
971 bitmap_clear (curr_reg_live
);
972 EXECUTE_IF_SET_IN_BITMAP (live
, 0, j
, bi
)
973 if (sched_pressure
== SCHED_PRESSURE_MODEL
974 || current_nr_blocks
== 1
975 || bitmap_bit_p (region_ref_regs
, j
))
976 mark_regno_birth_or_death (curr_reg_live
, curr_reg_pressure
, j
, true);
979 /* Mark registers in X as mentioned in the current region. */
981 setup_ref_regs (rtx x
)
984 const RTX_CODE code
= GET_CODE (x
);
990 if (HARD_REGISTER_NUM_P (regno
))
991 bitmap_set_range (region_ref_regs
, regno
,
992 hard_regno_nregs
[regno
][GET_MODE (x
)]);
994 bitmap_set_bit (region_ref_regs
, REGNO (x
));
997 fmt
= GET_RTX_FORMAT (code
);
998 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1000 setup_ref_regs (XEXP (x
, i
));
1001 else if (fmt
[i
] == 'E')
1003 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
1004 setup_ref_regs (XVECEXP (x
, i
, j
));
1008 /* Initiate current register pressure related info at the start of
1011 initiate_bb_reg_pressure_info (basic_block bb
)
1013 unsigned int i ATTRIBUTE_UNUSED
;
1016 if (current_nr_blocks
> 1)
1017 FOR_BB_INSNS (bb
, insn
)
1018 if (NONDEBUG_INSN_P (insn
))
1019 setup_ref_regs (PATTERN (insn
));
1020 initiate_reg_pressure_info (df_get_live_in (bb
));
1021 #ifdef EH_RETURN_DATA_REGNO
1022 if (bb_has_eh_pred (bb
))
1025 unsigned int regno
= EH_RETURN_DATA_REGNO (i
);
1027 if (regno
== INVALID_REGNUM
)
1029 if (! bitmap_bit_p (df_get_live_in (bb
), regno
))
1030 mark_regno_birth_or_death (curr_reg_live
, curr_reg_pressure
,
1036 /* Save current register pressure related info. */
1038 save_reg_pressure (void)
1042 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
1043 saved_reg_pressure
[ira_pressure_classes
[i
]]
1044 = curr_reg_pressure
[ira_pressure_classes
[i
]];
1045 bitmap_copy (saved_reg_live
, curr_reg_live
);
1048 /* Restore saved register pressure related info. */
1050 restore_reg_pressure (void)
1054 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
1055 curr_reg_pressure
[ira_pressure_classes
[i
]]
1056 = saved_reg_pressure
[ira_pressure_classes
[i
]];
1057 bitmap_copy (curr_reg_live
, saved_reg_live
);
1060 /* Return TRUE if the register is dying after its USE. */
1062 dying_use_p (struct reg_use_data
*use
)
1064 struct reg_use_data
*next
;
1066 for (next
= use
->next_regno_use
; next
!= use
; next
= next
->next_regno_use
)
1067 if (NONDEBUG_INSN_P (next
->insn
)
1068 && QUEUE_INDEX (next
->insn
) != QUEUE_SCHEDULED
)
1073 /* Print info about the current register pressure and its excess for
1074 each pressure class. */
1076 print_curr_reg_pressure (void)
1081 fprintf (sched_dump
, ";;\t");
1082 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
1084 cl
= ira_pressure_classes
[i
];
1085 gcc_assert (curr_reg_pressure
[cl
] >= 0);
1086 fprintf (sched_dump
, " %s:%d(%d)", reg_class_names
[cl
],
1087 curr_reg_pressure
[cl
],
1088 curr_reg_pressure
[cl
] - ira_class_hard_regs_num
[cl
]);
1090 fprintf (sched_dump
, "\n");
1093 /* Determine if INSN has a condition that is clobbered if a register
1094 in SET_REGS is modified. */
1096 cond_clobbered_p (rtx insn
, HARD_REG_SET set_regs
)
1098 rtx pat
= PATTERN (insn
);
1099 gcc_assert (GET_CODE (pat
) == COND_EXEC
);
1100 if (TEST_HARD_REG_BIT (set_regs
, REGNO (XEXP (COND_EXEC_TEST (pat
), 0))))
1102 sd_iterator_def sd_it
;
1104 haifa_change_pattern (insn
, ORIG_PAT (insn
));
1105 FOR_EACH_DEP (insn
, SD_LIST_BACK
, sd_it
, dep
)
1106 DEP_STATUS (dep
) &= ~DEP_CANCELLED
;
1107 TODO_SPEC (insn
) = HARD_DEP
;
1108 if (sched_verbose
>= 2)
1109 fprintf (sched_dump
,
1110 ";;\t\tdequeue insn %s because of clobbered condition\n",
1111 (*current_sched_info
->print_insn
) (insn
, 0));
1118 /* This function should be called after modifying the pattern of INSN,
1119 to update scheduler data structures as needed. */
1121 update_insn_after_change (rtx insn
)
1123 sd_iterator_def sd_it
;
1126 dfa_clear_single_insn_cache (insn
);
1128 sd_it
= sd_iterator_start (insn
,
1129 SD_LIST_FORW
| SD_LIST_BACK
| SD_LIST_RES_BACK
);
1130 while (sd_iterator_cond (&sd_it
, &dep
))
1132 DEP_COST (dep
) = UNKNOWN_DEP_COST
;
1133 sd_iterator_next (&sd_it
);
1136 /* Invalidate INSN_COST, so it'll be recalculated. */
1137 INSN_COST (insn
) = -1;
1138 /* Invalidate INSN_TICK, so it'll be recalculated. */
1139 INSN_TICK (insn
) = INVALID_TICK
;
1143 /* Two VECs, one to hold dependencies for which pattern replacements
1144 need to be applied or restored at the start of the next cycle, and
1145 another to hold an integer that is either one, to apply the
1146 corresponding replacement, or zero to restore it. */
1147 static vec
<dep_t
> next_cycle_replace_deps
;
1148 static vec
<int> next_cycle_apply
;
1150 static void apply_replacement (dep_t
, bool);
1151 static void restore_pattern (dep_t
, bool);
1153 /* Look at the remaining dependencies for insn NEXT, and compute and return
1154 the TODO_SPEC value we should use for it. This is called after one of
1155 NEXT's dependencies has been resolved.
1156 We also perform pattern replacements for predication, and for broken
1157 replacement dependencies. The latter is only done if FOR_BACKTRACK is
1161 recompute_todo_spec (rtx next
, bool for_backtrack
)
1164 sd_iterator_def sd_it
;
1165 dep_t dep
, modify_dep
= NULL
;
1169 bool first_p
= true;
1171 if (sd_lists_empty_p (next
, SD_LIST_BACK
))
1172 /* NEXT has all its dependencies resolved. */
1175 if (!sd_lists_empty_p (next
, SD_LIST_HARD_BACK
))
1178 /* Now we've got NEXT with speculative deps only.
1179 1. Look at the deps to see what we have to do.
1180 2. Check if we can do 'todo'. */
1183 FOR_EACH_DEP (next
, SD_LIST_BACK
, sd_it
, dep
)
1185 rtx pro
= DEP_PRO (dep
);
1186 ds_t ds
= DEP_STATUS (dep
) & SPECULATIVE
;
1188 if (DEBUG_INSN_P (pro
) && !DEBUG_INSN_P (next
))
1201 new_ds
= ds_merge (new_ds
, ds
);
1203 else if (DEP_TYPE (dep
) == REG_DEP_CONTROL
)
1205 if (QUEUE_INDEX (pro
) != QUEUE_SCHEDULED
)
1210 DEP_STATUS (dep
) &= ~DEP_CANCELLED
;
1212 else if (DEP_REPLACE (dep
) != NULL
)
1214 if (QUEUE_INDEX (pro
) != QUEUE_SCHEDULED
)
1219 DEP_STATUS (dep
) &= ~DEP_CANCELLED
;
1223 if (n_replace
> 0 && n_control
== 0 && n_spec
== 0)
1225 if (!dbg_cnt (sched_breakdep
))
1227 FOR_EACH_DEP (next
, SD_LIST_BACK
, sd_it
, dep
)
1229 struct dep_replacement
*desc
= DEP_REPLACE (dep
);
1232 if (desc
->insn
== next
&& !for_backtrack
)
1234 gcc_assert (n_replace
== 1);
1235 apply_replacement (dep
, true);
1237 DEP_STATUS (dep
) |= DEP_CANCELLED
;
1243 else if (n_control
== 1 && n_replace
== 0 && n_spec
== 0)
1245 rtx pro
, other
, new_pat
;
1246 rtx cond
= NULL_RTX
;
1248 rtx prev
= NULL_RTX
;
1252 if ((current_sched_info
->flags
& DO_PREDICATION
) == 0
1253 || (ORIG_PAT (next
) != NULL_RTX
1254 && PREDICATED_PAT (next
) == NULL_RTX
))
1257 pro
= DEP_PRO (modify_dep
);
1258 other
= real_insn_for_shadow (pro
);
1259 if (other
!= NULL_RTX
)
1262 cond
= sched_get_reverse_condition_uncached (pro
);
1263 regno
= REGNO (XEXP (cond
, 0));
1265 /* Find the last scheduled insn that modifies the condition register.
1266 We can stop looking once we find the insn we depend on through the
1267 REG_DEP_CONTROL; if the condition register isn't modified after it,
1268 we know that it still has the right value. */
1269 if (QUEUE_INDEX (pro
) == QUEUE_SCHEDULED
)
1270 FOR_EACH_VEC_ELT_REVERSE (scheduled_insns
, i
, prev
)
1274 find_all_hard_reg_sets (prev
, &t
);
1275 if (TEST_HARD_REG_BIT (t
, regno
))
1280 if (ORIG_PAT (next
) == NULL_RTX
)
1282 ORIG_PAT (next
) = PATTERN (next
);
1284 new_pat
= gen_rtx_COND_EXEC (VOIDmode
, cond
, PATTERN (next
));
1285 success
= haifa_change_pattern (next
, new_pat
);
1288 PREDICATED_PAT (next
) = new_pat
;
1290 else if (PATTERN (next
) != PREDICATED_PAT (next
))
1292 bool success
= haifa_change_pattern (next
,
1293 PREDICATED_PAT (next
));
1294 gcc_assert (success
);
1296 DEP_STATUS (modify_dep
) |= DEP_CANCELLED
;
1300 if (PREDICATED_PAT (next
) != NULL_RTX
)
1302 int tick
= INSN_TICK (next
);
1303 bool success
= haifa_change_pattern (next
,
1305 INSN_TICK (next
) = tick
;
1306 gcc_assert (success
);
1309 /* We can't handle the case where there are both speculative and control
1310 dependencies, so we return HARD_DEP in such a case. Also fail if
1311 we have speculative dependencies with not enough points, or more than
1312 one control dependency. */
1313 if ((n_spec
> 0 && (n_control
> 0 || n_replace
> 0))
1315 /* Too few points? */
1316 && ds_weak (new_ds
) < spec_info
->data_weakness_cutoff
)
1324 /* Pointer to the last instruction scheduled. */
1325 static rtx last_scheduled_insn
;
1327 /* Pointer to the last nondebug instruction scheduled within the
1328 block, or the prev_head of the scheduling block. Used by
1329 rank_for_schedule, so that insns independent of the last scheduled
1330 insn will be preferred over dependent instructions. */
1331 static rtx last_nondebug_scheduled_insn
;
1333 /* Pointer that iterates through the list of unscheduled insns if we
1334 have a dbg_cnt enabled. It always points at an insn prior to the
1335 first unscheduled one. */
1336 static rtx nonscheduled_insns_begin
;
1338 /* Compute cost of executing INSN.
1339 This is the number of cycles between instruction issue and
1340 instruction results. */
1342 insn_cost (rtx insn
)
1348 if (recog_memoized (insn
) < 0)
1351 cost
= insn_default_latency (insn
);
1358 cost
= INSN_COST (insn
);
1362 /* A USE insn, or something else we don't need to
1363 understand. We can't pass these directly to
1364 result_ready_cost or insn_default_latency because it will
1365 trigger a fatal error for unrecognizable insns. */
1366 if (recog_memoized (insn
) < 0)
1368 INSN_COST (insn
) = 0;
1373 cost
= insn_default_latency (insn
);
1377 INSN_COST (insn
) = cost
;
1384 /* Compute cost of dependence LINK.
1385 This is the number of cycles between instruction issue and
1386 instruction results.
1387 ??? We also use this function to call recog_memoized on all insns. */
1389 dep_cost_1 (dep_t link
, dw_t dw
)
1391 rtx insn
= DEP_PRO (link
);
1392 rtx used
= DEP_CON (link
);
1395 if (DEP_COST (link
) != UNKNOWN_DEP_COST
)
1396 return DEP_COST (link
);
1400 struct delay_pair
*delay_entry
;
1402 = (struct delay_pair
*)htab_find_with_hash (delay_htab_i2
, used
,
1403 htab_hash_pointer (used
));
1406 if (delay_entry
->i1
== insn
)
1408 DEP_COST (link
) = pair_delay (delay_entry
);
1409 return DEP_COST (link
);
1414 /* A USE insn should never require the value used to be computed.
1415 This allows the computation of a function's result and parameter
1416 values to overlap the return and call. We don't care about the
1417 dependence cost when only decreasing register pressure. */
1418 if (recog_memoized (used
) < 0)
1421 recog_memoized (insn
);
1425 enum reg_note dep_type
= DEP_TYPE (link
);
1427 cost
= insn_cost (insn
);
1429 if (INSN_CODE (insn
) >= 0)
1431 if (dep_type
== REG_DEP_ANTI
)
1433 else if (dep_type
== REG_DEP_OUTPUT
)
1435 cost
= (insn_default_latency (insn
)
1436 - insn_default_latency (used
));
1440 else if (bypass_p (insn
))
1441 cost
= insn_latency (insn
, used
);
1445 if (targetm
.sched
.adjust_cost_2
)
1446 cost
= targetm
.sched
.adjust_cost_2 (used
, (int) dep_type
, insn
, cost
,
1448 else if (targetm
.sched
.adjust_cost
!= NULL
)
1450 /* This variable is used for backward compatibility with the
1452 rtx dep_cost_rtx_link
= alloc_INSN_LIST (NULL_RTX
, NULL_RTX
);
1454 /* Make it self-cycled, so that if some tries to walk over this
1455 incomplete list he/she will be caught in an endless loop. */
1456 XEXP (dep_cost_rtx_link
, 1) = dep_cost_rtx_link
;
1458 /* Targets use only REG_NOTE_KIND of the link. */
1459 PUT_REG_NOTE_KIND (dep_cost_rtx_link
, DEP_TYPE (link
));
1461 cost
= targetm
.sched
.adjust_cost (used
, dep_cost_rtx_link
,
1464 free_INSN_LIST_node (dep_cost_rtx_link
);
1471 DEP_COST (link
) = cost
;
1475 /* Compute cost of dependence LINK.
1476 This is the number of cycles between instruction issue and
1477 instruction results. */
1479 dep_cost (dep_t link
)
1481 return dep_cost_1 (link
, 0);
1484 /* Use this sel-sched.c friendly function in reorder2 instead of increasing
1485 INSN_PRIORITY explicitly. */
1487 increase_insn_priority (rtx insn
, int amount
)
1489 if (!sel_sched_p ())
1491 /* We're dealing with haifa-sched.c INSN_PRIORITY. */
1492 if (INSN_PRIORITY_KNOWN (insn
))
1493 INSN_PRIORITY (insn
) += amount
;
1497 /* In sel-sched.c INSN_PRIORITY is not kept up to date.
1498 Use EXPR_PRIORITY instead. */
1499 sel_add_to_insn_priority (insn
, amount
);
1503 /* Return 'true' if DEP should be included in priority calculations. */
1505 contributes_to_priority_p (dep_t dep
)
1507 if (DEBUG_INSN_P (DEP_CON (dep
))
1508 || DEBUG_INSN_P (DEP_PRO (dep
)))
1511 /* Critical path is meaningful in block boundaries only. */
1512 if (!current_sched_info
->contributes_to_priority (DEP_CON (dep
),
1516 if (DEP_REPLACE (dep
) != NULL
)
1519 /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
1520 then speculative instructions will less likely be
1521 scheduled. That is because the priority of
1522 their producers will increase, and, thus, the
1523 producers will more likely be scheduled, thus,
1524 resolving the dependence. */
1525 if (sched_deps_info
->generate_spec_deps
1526 && !(spec_info
->flags
& COUNT_SPEC_IN_CRITICAL_PATH
)
1527 && (DEP_STATUS (dep
) & SPECULATIVE
))
1533 /* Compute the number of nondebug deps in list LIST for INSN. */
1536 dep_list_size (rtx insn
, sd_list_types_def list
)
1538 sd_iterator_def sd_it
;
1540 int dbgcount
= 0, nodbgcount
= 0;
1542 if (!MAY_HAVE_DEBUG_INSNS
)
1543 return sd_lists_size (insn
, list
);
1545 FOR_EACH_DEP (insn
, list
, sd_it
, dep
)
1547 if (DEBUG_INSN_P (DEP_CON (dep
)))
1549 else if (!DEBUG_INSN_P (DEP_PRO (dep
)))
1553 gcc_assert (dbgcount
+ nodbgcount
== sd_lists_size (insn
, list
));
1558 /* Compute the priority number for INSN. */
1562 if (! INSN_P (insn
))
1565 /* We should not be interested in priority of an already scheduled insn. */
1566 gcc_assert (QUEUE_INDEX (insn
) != QUEUE_SCHEDULED
);
1568 if (!INSN_PRIORITY_KNOWN (insn
))
1570 int this_priority
= -1;
1572 if (dep_list_size (insn
, SD_LIST_FORW
) == 0)
1573 /* ??? We should set INSN_PRIORITY to insn_cost when and insn has
1574 some forward deps but all of them are ignored by
1575 contributes_to_priority hook. At the moment we set priority of
1577 this_priority
= insn_cost (insn
);
1580 rtx prev_first
, twin
;
1583 /* For recovery check instructions we calculate priority slightly
1584 different than that of normal instructions. Instead of walking
1585 through INSN_FORW_DEPS (check) list, we walk through
1586 INSN_FORW_DEPS list of each instruction in the corresponding
1589 /* Selective scheduling does not define RECOVERY_BLOCK macro. */
1590 rec
= sel_sched_p () ? NULL
: RECOVERY_BLOCK (insn
);
1591 if (!rec
|| rec
== EXIT_BLOCK_PTR
)
1593 prev_first
= PREV_INSN (insn
);
1598 prev_first
= NEXT_INSN (BB_HEAD (rec
));
1599 twin
= PREV_INSN (BB_END (rec
));
1604 sd_iterator_def sd_it
;
1607 FOR_EACH_DEP (twin
, SD_LIST_FORW
, sd_it
, dep
)
1612 next
= DEP_CON (dep
);
1614 if (BLOCK_FOR_INSN (next
) != rec
)
1618 if (!contributes_to_priority_p (dep
))
1622 cost
= dep_cost (dep
);
1625 struct _dep _dep1
, *dep1
= &_dep1
;
1627 init_dep (dep1
, insn
, next
, REG_DEP_ANTI
);
1629 cost
= dep_cost (dep1
);
1632 next_priority
= cost
+ priority (next
);
1634 if (next_priority
> this_priority
)
1635 this_priority
= next_priority
;
1639 twin
= PREV_INSN (twin
);
1641 while (twin
!= prev_first
);
1644 if (this_priority
< 0)
1646 gcc_assert (this_priority
== -1);
1648 this_priority
= insn_cost (insn
);
1651 INSN_PRIORITY (insn
) = this_priority
;
1652 INSN_PRIORITY_STATUS (insn
) = 1;
1655 return INSN_PRIORITY (insn
);
1658 /* Macros and functions for keeping the priority queue sorted, and
1659 dealing with queuing and dequeuing of instructions. */
1661 #define SCHED_SORT(READY, N_READY) \
1662 do { if ((N_READY) == 2) \
1663 swap_sort (READY, N_READY); \
1664 else if ((N_READY) > 2) \
1665 qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \
1668 /* For each pressure class CL, set DEATH[CL] to the number of registers
1669 in that class that die in INSN. */
1672 calculate_reg_deaths (rtx insn
, int *death
)
1675 struct reg_use_data
*use
;
1677 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
1678 death
[ira_pressure_classes
[i
]] = 0;
1679 for (use
= INSN_REG_USE_LIST (insn
); use
!= NULL
; use
= use
->next_insn_use
)
1680 if (dying_use_p (use
))
1681 mark_regno_birth_or_death (0, death
, use
->regno
, true);
1684 /* Setup info about the current register pressure impact of scheduling
1685 INSN at the current scheduling point. */
1687 setup_insn_reg_pressure_info (rtx insn
)
1689 int i
, change
, before
, after
, hard_regno
;
1690 int excess_cost_change
;
1691 enum machine_mode mode
;
1693 struct reg_pressure_data
*pressure_info
;
1694 int *max_reg_pressure
;
1695 static int death
[N_REG_CLASSES
];
1697 gcc_checking_assert (!DEBUG_INSN_P (insn
));
1699 excess_cost_change
= 0;
1700 calculate_reg_deaths (insn
, death
);
1701 pressure_info
= INSN_REG_PRESSURE (insn
);
1702 max_reg_pressure
= INSN_MAX_REG_PRESSURE (insn
);
1703 gcc_assert (pressure_info
!= NULL
&& max_reg_pressure
!= NULL
);
1704 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
1706 cl
= ira_pressure_classes
[i
];
1707 gcc_assert (curr_reg_pressure
[cl
] >= 0);
1708 change
= (int) pressure_info
[i
].set_increase
- death
[cl
];
1709 before
= MAX (0, max_reg_pressure
[i
] - ira_class_hard_regs_num
[cl
]);
1710 after
= MAX (0, max_reg_pressure
[i
] + change
1711 - ira_class_hard_regs_num
[cl
]);
1712 hard_regno
= ira_class_hard_regs
[cl
][0];
1713 gcc_assert (hard_regno
>= 0);
1714 mode
= reg_raw_mode
[hard_regno
];
1715 excess_cost_change
+= ((after
- before
)
1716 * (ira_memory_move_cost
[mode
][cl
][0]
1717 + ira_memory_move_cost
[mode
][cl
][1]));
1719 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insn
) = excess_cost_change
;
1722 /* This is the first page of code related to SCHED_PRESSURE_MODEL.
1723 It tries to make the scheduler take register pressure into account
1724 without introducing too many unnecessary stalls. It hooks into the
1725 main scheduling algorithm at several points:
1727 - Before scheduling starts, model_start_schedule constructs a
1728 "model schedule" for the current block. This model schedule is
1729 chosen solely to keep register pressure down. It does not take the
1730 target's pipeline or the original instruction order into account,
1731 except as a tie-breaker. It also doesn't work to a particular
1734 This model schedule gives us an idea of what pressure can be
1735 achieved for the block and gives us an example of a schedule that
1736 keeps to that pressure. It also makes the final schedule less
1737 dependent on the original instruction order. This is important
1738 because the original order can either be "wide" (many values live
1739 at once, such as in user-scheduled code) or "narrow" (few values
1740 live at once, such as after loop unrolling, where several
1741 iterations are executed sequentially).
1743 We do not apply this model schedule to the rtx stream. We simply
1744 record it in model_schedule. We also compute the maximum pressure,
1745 MP, that was seen during this schedule.
1747 - Instructions are added to the ready queue even if they require
1748 a stall. The length of the stall is instead computed as:
1750 MAX (INSN_TICK (INSN) - clock_var, 0)
1752 (= insn_delay). This allows rank_for_schedule to choose between
1753 introducing a deliberate stall or increasing pressure.
1755 - Before sorting the ready queue, model_set_excess_costs assigns
1756 a pressure-based cost to each ready instruction in the queue.
1757 This is the instruction's INSN_REG_PRESSURE_EXCESS_COST_CHANGE
1758 (ECC for short) and is effectively measured in cycles.
1760 - rank_for_schedule ranks instructions based on:
1762 ECC (insn) + insn_delay (insn)
1768 So, for example, an instruction X1 with an ECC of 1 that can issue
1769 now will win over an instruction X0 with an ECC of zero that would
1770 introduce a stall of one cycle. However, an instruction X2 with an
1771 ECC of 2 that can issue now will lose to both X0 and X1.
1773 - When an instruction is scheduled, model_recompute updates the model
1774 schedule with the new pressures (some of which might now exceed the
1775 original maximum pressure MP). model_update_limit_points then searches
1776 for the new point of maximum pressure, if not already known. */
1778 /* Used to separate high-verbosity debug information for SCHED_PRESSURE_MODEL
1779 from surrounding debug information. */
1781 ";;\t\t+------------------------------------------------------\n"
1783 /* Information about the pressure on a particular register class at a
1784 particular point of the model schedule. */
1785 struct model_pressure_data
{
1786 /* The pressure at this point of the model schedule, or -1 if the
1787 point is associated with an instruction that has already been
1791 /* The maximum pressure during or after this point of the model schedule. */
1795 /* Per-instruction information that is used while building the model
1796 schedule. Here, "schedule" refers to the model schedule rather
1797 than the main schedule. */
1798 struct model_insn_info
{
1799 /* The instruction itself. */
1802 /* If this instruction is in model_worklist, these fields link to the
1803 previous (higher-priority) and next (lower-priority) instructions
1805 struct model_insn_info
*prev
;
1806 struct model_insn_info
*next
;
1808 /* While constructing the schedule, QUEUE_INDEX describes whether an
1809 instruction has already been added to the schedule (QUEUE_SCHEDULED),
1810 is in model_worklist (QUEUE_READY), or neither (QUEUE_NOWHERE).
1811 old_queue records the value that QUEUE_INDEX had before scheduling
1812 started, so that we can restore it once the schedule is complete. */
1815 /* The relative importance of an unscheduled instruction. Higher
1816 values indicate greater importance. */
1817 unsigned int model_priority
;
1819 /* The length of the longest path of satisfied true dependencies
1820 that leads to this instruction. */
1823 /* The length of the longest path of dependencies of any kind
1824 that leads from this instruction. */
1827 /* The number of predecessor nodes that must still be scheduled. */
1828 int unscheduled_preds
;
1831 /* Information about the pressure limit for a particular register class.
1832 This structure is used when applying a model schedule to the main
1834 struct model_pressure_limit
{
1835 /* The maximum register pressure seen in the original model schedule. */
1838 /* The maximum register pressure seen in the current model schedule
1839 (which excludes instructions that have already been scheduled). */
1842 /* The point of the current model schedule at which PRESSURE is first
1843 reached. It is set to -1 if the value needs to be recomputed. */
1847 /* Describes a particular way of measuring register pressure. */
1848 struct model_pressure_group
{
1849 /* Index PCI describes the maximum pressure on ira_pressure_classes[PCI]. */
1850 struct model_pressure_limit limits
[N_REG_CLASSES
];
1852 /* Index (POINT * ira_num_pressure_classes + PCI) describes the pressure
1853 on register class ira_pressure_classes[PCI] at point POINT of the
1854 current model schedule. A POINT of model_num_insns describes the
1855 pressure at the end of the schedule. */
1856 struct model_pressure_data
*model
;
1859 /* Index POINT gives the instruction at point POINT of the model schedule.
1860 This array doesn't change during main scheduling. */
1861 static vec
<rtx
> model_schedule
;
1863 /* The list of instructions in the model worklist, sorted in order of
1864 decreasing priority. */
1865 static struct model_insn_info
*model_worklist
;
1867 /* Index I describes the instruction with INSN_LUID I. */
1868 static struct model_insn_info
*model_insns
;
1870 /* The number of instructions in the model schedule. */
1871 static int model_num_insns
;
1873 /* The index of the first instruction in model_schedule that hasn't yet been
1874 added to the main schedule, or model_num_insns if all of them have. */
1875 static int model_curr_point
;
1877 /* Describes the pressure before each instruction in the model schedule. */
1878 static struct model_pressure_group model_before_pressure
;
1880 /* The first unused model_priority value (as used in model_insn_info). */
1881 static unsigned int model_next_priority
;
1884 /* The model_pressure_data for ira_pressure_classes[PCI] in GROUP
1885 at point POINT of the model schedule. */
1886 #define MODEL_PRESSURE_DATA(GROUP, POINT, PCI) \
1887 (&(GROUP)->model[(POINT) * ira_pressure_classes_num + (PCI)])
1889 /* The maximum pressure on ira_pressure_classes[PCI] in GROUP at or
1890 after point POINT of the model schedule. */
1891 #define MODEL_MAX_PRESSURE(GROUP, POINT, PCI) \
1892 (MODEL_PRESSURE_DATA (GROUP, POINT, PCI)->max_pressure)
1894 /* The pressure on ira_pressure_classes[PCI] in GROUP at point POINT
1895 of the model schedule. */
1896 #define MODEL_REF_PRESSURE(GROUP, POINT, PCI) \
1897 (MODEL_PRESSURE_DATA (GROUP, POINT, PCI)->ref_pressure)
1899 /* Information about INSN that is used when creating the model schedule. */
1900 #define MODEL_INSN_INFO(INSN) \
1901 (&model_insns[INSN_LUID (INSN)])
1903 /* The instruction at point POINT of the model schedule. */
1904 #define MODEL_INSN(POINT) \
1905 (model_schedule[POINT])
1908 /* Return INSN's index in the model schedule, or model_num_insns if it
1909 doesn't belong to that schedule. */
1912 model_index (rtx insn
)
1914 if (INSN_MODEL_INDEX (insn
) == 0)
1915 return model_num_insns
;
1916 return INSN_MODEL_INDEX (insn
) - 1;
1919 /* Make sure that GROUP->limits is up-to-date for the current point
1920 of the model schedule. */
1923 model_update_limit_points_in_group (struct model_pressure_group
*group
)
1925 int pci
, max_pressure
, point
;
1927 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
1929 /* We may have passed the final point at which the pressure in
1930 group->limits[pci].pressure was reached. Update the limit if so. */
1931 max_pressure
= MODEL_MAX_PRESSURE (group
, model_curr_point
, pci
);
1932 group
->limits
[pci
].pressure
= max_pressure
;
1934 /* Find the point at which MAX_PRESSURE is first reached. We need
1935 to search in three cases:
1937 - We've already moved past the previous pressure point.
1938 In this case we search forward from model_curr_point.
1940 - We scheduled the previous point of maximum pressure ahead of
1941 its position in the model schedule, but doing so didn't bring
1942 the pressure point earlier. In this case we search forward
1943 from that previous pressure point.
1945 - Scheduling an instruction early caused the maximum pressure
1946 to decrease. In this case we will have set the pressure
1947 point to -1, and we search forward from model_curr_point. */
1948 point
= MAX (group
->limits
[pci
].point
, model_curr_point
);
1949 while (point
< model_num_insns
1950 && MODEL_REF_PRESSURE (group
, point
, pci
) < max_pressure
)
1952 group
->limits
[pci
].point
= point
;
1954 gcc_assert (MODEL_REF_PRESSURE (group
, point
, pci
) == max_pressure
);
1955 gcc_assert (MODEL_MAX_PRESSURE (group
, point
, pci
) == max_pressure
);
1959 /* Make sure that all register-pressure limits are up-to-date for the
1960 current position in the model schedule. */
1963 model_update_limit_points (void)
1965 model_update_limit_points_in_group (&model_before_pressure
);
1968 /* Return the model_index of the last unscheduled use in chain USE
1969 outside of USE's instruction. Return -1 if there are no other uses,
1970 or model_num_insns if the register is live at the end of the block. */
1973 model_last_use_except (struct reg_use_data
*use
)
1975 struct reg_use_data
*next
;
1979 for (next
= use
->next_regno_use
; next
!= use
; next
= next
->next_regno_use
)
1980 if (NONDEBUG_INSN_P (next
->insn
)
1981 && QUEUE_INDEX (next
->insn
) != QUEUE_SCHEDULED
)
1983 index
= model_index (next
->insn
);
1984 if (index
== model_num_insns
)
1985 return model_num_insns
;
1992 /* An instruction with model_index POINT has just been scheduled, and it
1993 adds DELTA to the pressure on ira_pressure_classes[PCI] after POINT - 1.
1994 Update MODEL_REF_PRESSURE (GROUP, POINT, PCI) and
1995 MODEL_MAX_PRESSURE (GROUP, POINT, PCI) accordingly. */
1998 model_start_update_pressure (struct model_pressure_group
*group
,
1999 int point
, int pci
, int delta
)
2001 int next_max_pressure
;
2003 if (point
== model_num_insns
)
2005 /* The instruction wasn't part of the model schedule; it was moved
2006 from a different block. Update the pressure for the end of
2007 the model schedule. */
2008 MODEL_REF_PRESSURE (group
, point
, pci
) += delta
;
2009 MODEL_MAX_PRESSURE (group
, point
, pci
) += delta
;
2013 /* Record that this instruction has been scheduled. Nothing now
2014 changes between POINT and POINT + 1, so get the maximum pressure
2015 from the latter. If the maximum pressure decreases, the new
2016 pressure point may be before POINT. */
2017 MODEL_REF_PRESSURE (group
, point
, pci
) = -1;
2018 next_max_pressure
= MODEL_MAX_PRESSURE (group
, point
+ 1, pci
);
2019 if (MODEL_MAX_PRESSURE (group
, point
, pci
) > next_max_pressure
)
2021 MODEL_MAX_PRESSURE (group
, point
, pci
) = next_max_pressure
;
2022 if (group
->limits
[pci
].point
== point
)
2023 group
->limits
[pci
].point
= -1;
2028 /* Record that scheduling a later instruction has changed the pressure
2029 at point POINT of the model schedule by DELTA (which might be 0).
2030 Update GROUP accordingly. Return nonzero if these changes might
2031 trigger changes to previous points as well. */
2034 model_update_pressure (struct model_pressure_group
*group
,
2035 int point
, int pci
, int delta
)
2037 int ref_pressure
, max_pressure
, next_max_pressure
;
2039 /* If POINT hasn't yet been scheduled, update its pressure. */
2040 ref_pressure
= MODEL_REF_PRESSURE (group
, point
, pci
);
2041 if (ref_pressure
>= 0 && delta
!= 0)
2043 ref_pressure
+= delta
;
2044 MODEL_REF_PRESSURE (group
, point
, pci
) = ref_pressure
;
2046 /* Check whether the maximum pressure in the overall schedule
2047 has increased. (This means that the MODEL_MAX_PRESSURE of
2048 every point <= POINT will need to increae too; see below.) */
2049 if (group
->limits
[pci
].pressure
< ref_pressure
)
2050 group
->limits
[pci
].pressure
= ref_pressure
;
2052 /* If we are at maximum pressure, and the maximum pressure
2053 point was previously unknown or later than POINT,
2054 bring it forward. */
2055 if (group
->limits
[pci
].pressure
== ref_pressure
2056 && !IN_RANGE (group
->limits
[pci
].point
, 0, point
))
2057 group
->limits
[pci
].point
= point
;
2059 /* If POINT used to be the point of maximum pressure, but isn't
2060 any longer, we need to recalculate it using a forward walk. */
2061 if (group
->limits
[pci
].pressure
> ref_pressure
2062 && group
->limits
[pci
].point
== point
)
2063 group
->limits
[pci
].point
= -1;
2066 /* Update the maximum pressure at POINT. Changes here might also
2067 affect the maximum pressure at POINT - 1. */
2068 next_max_pressure
= MODEL_MAX_PRESSURE (group
, point
+ 1, pci
);
2069 max_pressure
= MAX (ref_pressure
, next_max_pressure
);
2070 if (MODEL_MAX_PRESSURE (group
, point
, pci
) != max_pressure
)
2072 MODEL_MAX_PRESSURE (group
, point
, pci
) = max_pressure
;
2078 /* INSN has just been scheduled. Update the model schedule accordingly. */
2081 model_recompute (rtx insn
)
2086 } uses
[FIRST_PSEUDO_REGISTER
+ MAX_RECOG_OPERANDS
];
2087 struct reg_use_data
*use
;
2088 struct reg_pressure_data
*reg_pressure
;
2089 int delta
[N_REG_CLASSES
];
2090 int pci
, point
, mix
, new_last
, cl
, ref_pressure
, queue
;
2091 unsigned int i
, num_uses
, num_pending_births
;
2094 /* The destinations of INSN were previously live from POINT onwards, but are
2095 now live from model_curr_point onwards. Set up DELTA accordingly. */
2096 point
= model_index (insn
);
2097 reg_pressure
= INSN_REG_PRESSURE (insn
);
2098 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
2100 cl
= ira_pressure_classes
[pci
];
2101 delta
[cl
] = reg_pressure
[pci
].set_increase
;
2104 /* Record which registers previously died at POINT, but which now die
2105 before POINT. Adjust DELTA so that it represents the effect of
2106 this change after POINT - 1. Set NUM_PENDING_BIRTHS to the number of
2107 registers that will be born in the range [model_curr_point, POINT). */
2109 num_pending_births
= 0;
2110 for (use
= INSN_REG_USE_LIST (insn
); use
!= NULL
; use
= use
->next_insn_use
)
2112 new_last
= model_last_use_except (use
);
2113 if (new_last
< point
)
2115 gcc_assert (num_uses
< ARRAY_SIZE (uses
));
2116 uses
[num_uses
].last_use
= new_last
;
2117 uses
[num_uses
].regno
= use
->regno
;
2118 /* This register is no longer live after POINT - 1. */
2119 mark_regno_birth_or_death (NULL
, delta
, use
->regno
, false);
2122 num_pending_births
++;
2126 /* Update the MODEL_REF_PRESSURE and MODEL_MAX_PRESSURE for POINT.
2127 Also set each group pressure limit for POINT. */
2128 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
2130 cl
= ira_pressure_classes
[pci
];
2131 model_start_update_pressure (&model_before_pressure
,
2132 point
, pci
, delta
[cl
]);
2135 /* Walk the model schedule backwards, starting immediately before POINT. */
2137 if (point
!= model_curr_point
)
2141 insn
= MODEL_INSN (point
);
2142 queue
= QUEUE_INDEX (insn
);
2144 if (queue
!= QUEUE_SCHEDULED
)
2146 /* DELTA describes the effect of the move on the register pressure
2147 after POINT. Make it describe the effect on the pressure
2150 while (i
< num_uses
)
2152 if (uses
[i
].last_use
== point
)
2154 /* This register is now live again. */
2155 mark_regno_birth_or_death (NULL
, delta
,
2156 uses
[i
].regno
, true);
2158 /* Remove this use from the array. */
2159 uses
[i
] = uses
[num_uses
- 1];
2161 num_pending_births
--;
2167 if (sched_verbose
>= 5)
2171 fprintf (sched_dump
, MODEL_BAR
);
2172 fprintf (sched_dump
, ";;\t\t| New pressure for model"
2174 fprintf (sched_dump
, MODEL_BAR
);
2178 fprintf (sched_dump
, ";;\t\t| %3d %4d %-30s ",
2179 point
, INSN_UID (insn
),
2180 str_pattern_slim (PATTERN (insn
)));
2181 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
2183 cl
= ira_pressure_classes
[pci
];
2184 ref_pressure
= MODEL_REF_PRESSURE (&model_before_pressure
,
2186 fprintf (sched_dump
, " %s:[%d->%d]",
2187 reg_class_names
[ira_pressure_classes
[pci
]],
2188 ref_pressure
, ref_pressure
+ delta
[cl
]);
2190 fprintf (sched_dump
, "\n");
2194 /* Adjust the pressure at POINT. Set MIX to nonzero if POINT - 1
2195 might have changed as well. */
2196 mix
= num_pending_births
;
2197 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
2199 cl
= ira_pressure_classes
[pci
];
2201 mix
|= model_update_pressure (&model_before_pressure
,
2202 point
, pci
, delta
[cl
]);
2205 while (mix
&& point
> model_curr_point
);
2208 fprintf (sched_dump
, MODEL_BAR
);
2211 /* After DEP, which was cancelled, has been resolved for insn NEXT,
2212 check whether the insn's pattern needs restoring. */
2214 must_restore_pattern_p (rtx next
, dep_t dep
)
2216 if (QUEUE_INDEX (next
) == QUEUE_SCHEDULED
)
2219 if (DEP_TYPE (dep
) == REG_DEP_CONTROL
)
2221 gcc_assert (ORIG_PAT (next
) != NULL_RTX
);
2222 gcc_assert (next
== DEP_CON (dep
));
2226 struct dep_replacement
*desc
= DEP_REPLACE (dep
);
2227 if (desc
->insn
!= next
)
2229 gcc_assert (*desc
->loc
== desc
->orig
);
2236 /* model_spill_cost (CL, P, P') returns the cost of increasing the
2237 pressure on CL from P to P'. We use this to calculate a "base ECC",
2238 baseECC (CL, X), for each pressure class CL and each instruction X.
2239 Supposing X changes the pressure on CL from P to P', and that the
2240 maximum pressure on CL in the current model schedule is MP', then:
2242 * if X occurs before or at the next point of maximum pressure in
2243 the model schedule and P' > MP', then:
2245 baseECC (CL, X) = model_spill_cost (CL, MP, P')
2247 The idea is that the pressure after scheduling a fixed set of
2248 instructions -- in this case, the set up to and including the
2249 next maximum pressure point -- is going to be the same regardless
2250 of the order; we simply want to keep the intermediate pressure
2251 under control. Thus X has a cost of zero unless scheduling it
2252 now would exceed MP'.
2254 If all increases in the set are by the same amount, no zero-cost
2255 instruction will ever cause the pressure to exceed MP'. However,
2256 if X is instead moved past an instruction X' with pressure in the
2257 range (MP' - (P' - P), MP'), the pressure at X' will increase
2258 beyond MP'. Since baseECC is very much a heuristic anyway,
2259 it doesn't seem worth the overhead of tracking cases like these.
2261 The cost of exceeding MP' is always based on the original maximum
2262 pressure MP. This is so that going 2 registers over the original
2263 limit has the same cost regardless of whether it comes from two
2264 separate +1 deltas or from a single +2 delta.
2266 * if X occurs after the next point of maximum pressure in the model
2267 schedule and P' > P, then:
2269 baseECC (CL, X) = model_spill_cost (CL, MP, MP' + (P' - P))
2271 That is, if we move X forward across a point of maximum pressure,
2272 and if X increases the pressure by P' - P, then we conservatively
2273 assume that scheduling X next would increase the maximum pressure
2274 by P' - P. Again, the cost of doing this is based on the original
2275 maximum pressure MP, for the same reason as above.
2277 * if P' < P, P > MP, and X occurs at or after the next point of
2278 maximum pressure, then:
2280 baseECC (CL, X) = -model_spill_cost (CL, MAX (MP, P'), P)
2282 That is, if we have already exceeded the original maximum pressure MP,
2283 and if X might reduce the maximum pressure again -- or at least push
2284 it further back, and thus allow more scheduling freedom -- it is given
2285 a negative cost to reflect the improvement.
2291 In this case, X is not expected to affect the maximum pressure MP',
2292 so it has zero cost.
2294 We then create a combined value baseECC (X) that is the sum of
2295 baseECC (CL, X) for each pressure class CL.
2297 baseECC (X) could itself be used as the ECC value described above.
2298 However, this is often too conservative, in the sense that it
2299 tends to make high-priority instructions that increase pressure
2300 wait too long in cases where introducing a spill would be better.
2301 For this reason the final ECC is a priority-adjusted form of
2302 baseECC (X). Specifically, we calculate:
2304 P (X) = INSN_PRIORITY (X) - insn_delay (X) - baseECC (X)
2305 baseP = MAX { P (X) | baseECC (X) <= 0 }
2309 ECC (X) = MAX (MIN (baseP - P (X), baseECC (X)), 0)
2311 Thus an instruction's effect on pressure is ignored if it has a high
2312 enough priority relative to the ones that don't increase pressure.
2313 Negative values of baseECC (X) do not increase the priority of X
2314 itself, but they do make it harder for other instructions to
2315 increase the pressure further.
2317 This pressure cost is deliberately timid. The intention has been
2318 to choose a heuristic that rarely interferes with the normal list
2319 scheduler in cases where that scheduler would produce good code.
2320 We simply want to curb some of its worst excesses. */
2322 /* Return the cost of increasing the pressure in class CL from FROM to TO.
2324 Here we use the very simplistic cost model that every register above
2325 ira_class_hard_regs_num[CL] has a spill cost of 1. We could use other
2326 measures instead, such as one based on MEMORY_MOVE_COST. However:
2328 (1) In order for an instruction to be scheduled, the higher cost
2329 would need to be justified in a single saving of that many stalls.
2330 This is overly pessimistic, because the benefit of spilling is
2331 often to avoid a sequence of several short stalls rather than
2334 (2) The cost is still arbitrary. Because we are not allocating
2335 registers during scheduling, we have no way of knowing for
2336 sure how many memory accesses will be required by each spill,
2337 where the spills will be placed within the block, or even
2338 which block(s) will contain the spills.
2340 So a higher cost than 1 is often too conservative in practice,
2341 forcing blocks to contain unnecessary stalls instead of spill code.
2342 The simple cost below seems to be the best compromise. It reduces
2343 the interference with the normal list scheduler, which helps make
2344 it more suitable for a default-on option. */
2347 model_spill_cost (int cl
, int from
, int to
)
2349 from
= MAX (from
, ira_class_hard_regs_num
[cl
]);
2350 return MAX (to
, from
) - from
;
2353 /* Return baseECC (ira_pressure_classes[PCI], POINT), given that
2354 P = curr_reg_pressure[ira_pressure_classes[PCI]] and that
2358 model_excess_group_cost (struct model_pressure_group
*group
,
2359 int point
, int pci
, int delta
)
2363 cl
= ira_pressure_classes
[pci
];
2364 if (delta
< 0 && point
>= group
->limits
[pci
].point
)
2366 pressure
= MAX (group
->limits
[pci
].orig_pressure
,
2367 curr_reg_pressure
[cl
] + delta
);
2368 return -model_spill_cost (cl
, pressure
, curr_reg_pressure
[cl
]);
2373 if (point
> group
->limits
[pci
].point
)
2374 pressure
= group
->limits
[pci
].pressure
+ delta
;
2376 pressure
= curr_reg_pressure
[cl
] + delta
;
2378 if (pressure
> group
->limits
[pci
].pressure
)
2379 return model_spill_cost (cl
, group
->limits
[pci
].orig_pressure
,
2386 /* Return baseECC (MODEL_INSN (INSN)). Dump the costs to sched_dump
2390 model_excess_cost (rtx insn
, bool print_p
)
2392 int point
, pci
, cl
, cost
, this_cost
, delta
;
2393 struct reg_pressure_data
*insn_reg_pressure
;
2394 int insn_death
[N_REG_CLASSES
];
2396 calculate_reg_deaths (insn
, insn_death
);
2397 point
= model_index (insn
);
2398 insn_reg_pressure
= INSN_REG_PRESSURE (insn
);
2402 fprintf (sched_dump
, ";;\t\t| %3d %4d | %4d %+3d |", point
,
2403 INSN_UID (insn
), INSN_PRIORITY (insn
), insn_delay (insn
));
2405 /* Sum up the individual costs for each register class. */
2406 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
2408 cl
= ira_pressure_classes
[pci
];
2409 delta
= insn_reg_pressure
[pci
].set_increase
- insn_death
[cl
];
2410 this_cost
= model_excess_group_cost (&model_before_pressure
,
2414 fprintf (sched_dump
, " %s:[%d base cost %d]",
2415 reg_class_names
[cl
], delta
, this_cost
);
2419 fprintf (sched_dump
, "\n");
2424 /* Dump the next points of maximum pressure for GROUP. */
2427 model_dump_pressure_points (struct model_pressure_group
*group
)
2431 fprintf (sched_dump
, ";;\t\t| pressure points");
2432 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
2434 cl
= ira_pressure_classes
[pci
];
2435 fprintf (sched_dump
, " %s:[%d->%d at ", reg_class_names
[cl
],
2436 curr_reg_pressure
[cl
], group
->limits
[pci
].pressure
);
2437 if (group
->limits
[pci
].point
< model_num_insns
)
2438 fprintf (sched_dump
, "%d:%d]", group
->limits
[pci
].point
,
2439 INSN_UID (MODEL_INSN (group
->limits
[pci
].point
)));
2441 fprintf (sched_dump
, "end]");
2443 fprintf (sched_dump
, "\n");
2446 /* Set INSN_REG_PRESSURE_EXCESS_COST_CHANGE for INSNS[0...COUNT-1]. */
2449 model_set_excess_costs (rtx
*insns
, int count
)
2451 int i
, cost
, priority_base
, priority
;
2454 /* Record the baseECC value for each instruction in the model schedule,
2455 except that negative costs are converted to zero ones now rather thatn
2456 later. Do not assign a cost to debug instructions, since they must
2457 not change code-generation decisions. Experiments suggest we also
2458 get better results by not assigning a cost to instructions from
2461 Set PRIORITY_BASE to baseP in the block comment above. This is the
2462 maximum priority of the "cheap" instructions, which should always
2463 include the next model instruction. */
2466 for (i
= 0; i
< count
; i
++)
2467 if (INSN_MODEL_INDEX (insns
[i
]))
2469 if (sched_verbose
>= 6 && !print_p
)
2471 fprintf (sched_dump
, MODEL_BAR
);
2472 fprintf (sched_dump
, ";;\t\t| Pressure costs for ready queue\n");
2473 model_dump_pressure_points (&model_before_pressure
);
2474 fprintf (sched_dump
, MODEL_BAR
);
2477 cost
= model_excess_cost (insns
[i
], print_p
);
2480 priority
= INSN_PRIORITY (insns
[i
]) - insn_delay (insns
[i
]) - cost
;
2481 priority_base
= MAX (priority_base
, priority
);
2484 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns
[i
]) = cost
;
2487 fprintf (sched_dump
, MODEL_BAR
);
2489 /* Use MAX (baseECC, 0) and baseP to calculcate ECC for each
2491 for (i
= 0; i
< count
; i
++)
2493 cost
= INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns
[i
]);
2494 priority
= INSN_PRIORITY (insns
[i
]) - insn_delay (insns
[i
]);
2495 if (cost
> 0 && priority
> priority_base
)
2497 cost
+= priority_base
- priority
;
2498 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns
[i
]) = MAX (cost
, 0);
2503 /* Returns a positive value if x is preferred; returns a negative value if
2504 y is preferred. Should never return 0, since that will make the sort
2508 rank_for_schedule (const void *x
, const void *y
)
2510 rtx tmp
= *(const rtx
*) y
;
2511 rtx tmp2
= *(const rtx
*) x
;
2512 int tmp_class
, tmp2_class
;
2513 int val
, priority_val
, info_val
;
2515 if (MAY_HAVE_DEBUG_INSNS
)
2517 /* Schedule debug insns as early as possible. */
2518 if (DEBUG_INSN_P (tmp
) && !DEBUG_INSN_P (tmp2
))
2520 else if (!DEBUG_INSN_P (tmp
) && DEBUG_INSN_P (tmp2
))
2522 else if (DEBUG_INSN_P (tmp
) && DEBUG_INSN_P (tmp2
))
2523 return INSN_LUID (tmp
) - INSN_LUID (tmp2
);
2526 /* The insn in a schedule group should be issued the first. */
2527 if (flag_sched_group_heuristic
&&
2528 SCHED_GROUP_P (tmp
) != SCHED_GROUP_P (tmp2
))
2529 return SCHED_GROUP_P (tmp2
) ? 1 : -1;
2531 /* Make sure that priority of TMP and TMP2 are initialized. */
2532 gcc_assert (INSN_PRIORITY_KNOWN (tmp
) && INSN_PRIORITY_KNOWN (tmp2
));
2534 if (sched_pressure
!= SCHED_PRESSURE_NONE
)
2538 /* Prefer insn whose scheduling results in the smallest register
2540 if ((diff
= (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp
)
2542 - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2
)
2543 - insn_delay (tmp2
))))
2547 if (sched_pressure
!= SCHED_PRESSURE_NONE
2548 && (INSN_TICK (tmp2
) > clock_var
|| INSN_TICK (tmp
) > clock_var
))
2550 if (INSN_TICK (tmp
) <= clock_var
)
2552 else if (INSN_TICK (tmp2
) <= clock_var
)
2555 return INSN_TICK (tmp
) - INSN_TICK (tmp2
);
2558 /* If we are doing backtracking in this schedule, prefer insns that
2559 have forward dependencies with negative cost against an insn that
2560 was already scheduled. */
2561 if (current_sched_info
->flags
& DO_BACKTRACKING
)
2563 priority_val
= FEEDS_BACKTRACK_INSN (tmp2
) - FEEDS_BACKTRACK_INSN (tmp
);
2565 return priority_val
;
2568 /* Prefer insn with higher priority. */
2569 priority_val
= INSN_PRIORITY (tmp2
) - INSN_PRIORITY (tmp
);
2571 if (flag_sched_critical_path_heuristic
&& priority_val
)
2572 return priority_val
;
2574 /* Prefer speculative insn with greater dependencies weakness. */
2575 if (flag_sched_spec_insn_heuristic
&& spec_info
)
2581 ds1
= TODO_SPEC (tmp
) & SPECULATIVE
;
2583 dw1
= ds_weak (ds1
);
2587 ds2
= TODO_SPEC (tmp2
) & SPECULATIVE
;
2589 dw2
= ds_weak (ds2
);
2594 if (dw
> (NO_DEP_WEAK
/ 8) || dw
< -(NO_DEP_WEAK
/ 8))
2598 info_val
= (*current_sched_info
->rank
) (tmp
, tmp2
);
2599 if(flag_sched_rank_heuristic
&& info_val
)
2602 /* Compare insns based on their relation to the last scheduled
2604 if (flag_sched_last_insn_heuristic
&& last_nondebug_scheduled_insn
)
2608 rtx last
= last_nondebug_scheduled_insn
;
2610 /* Classify the instructions into three classes:
2611 1) Data dependent on last schedule insn.
2612 2) Anti/Output dependent on last scheduled insn.
2613 3) Independent of last scheduled insn, or has latency of one.
2614 Choose the insn from the highest numbered class if different. */
2615 dep1
= sd_find_dep_between (last
, tmp
, true);
2617 if (dep1
== NULL
|| dep_cost (dep1
) == 1)
2619 else if (/* Data dependence. */
2620 DEP_TYPE (dep1
) == REG_DEP_TRUE
)
2625 dep2
= sd_find_dep_between (last
, tmp2
, true);
2627 if (dep2
== NULL
|| dep_cost (dep2
) == 1)
2629 else if (/* Data dependence. */
2630 DEP_TYPE (dep2
) == REG_DEP_TRUE
)
2635 if ((val
= tmp2_class
- tmp_class
))
2639 /* Prefer instructions that occur earlier in the model schedule. */
2640 if (sched_pressure
== SCHED_PRESSURE_MODEL
)
2644 diff
= model_index (tmp
) - model_index (tmp2
);
2649 /* Prefer the insn which has more later insns that depend on it.
2650 This gives the scheduler more freedom when scheduling later
2651 instructions at the expense of added register pressure. */
2653 val
= (dep_list_size (tmp2
, SD_LIST_FORW
)
2654 - dep_list_size (tmp
, SD_LIST_FORW
));
2656 if (flag_sched_dep_count_heuristic
&& val
!= 0)
2659 /* If insns are equally good, sort by INSN_LUID (original insn order),
2660 so that we make the sort stable. This minimizes instruction movement,
2661 thus minimizing sched's effect on debugging and cross-jumping. */
2662 return INSN_LUID (tmp
) - INSN_LUID (tmp2
);
2665 /* Resort the array A in which only element at index N may be out of order. */
2667 HAIFA_INLINE
static void
2668 swap_sort (rtx
*a
, int n
)
2670 rtx insn
= a
[n
- 1];
2673 while (i
>= 0 && rank_for_schedule (a
+ i
, &insn
) >= 0)
2681 /* Add INSN to the insn queue so that it can be executed at least
2682 N_CYCLES after the currently executing insn. Preserve insns
2683 chain for debugging purposes. REASON will be printed in debugging
2686 HAIFA_INLINE
static void
2687 queue_insn (rtx insn
, int n_cycles
, const char *reason
)
2689 int next_q
= NEXT_Q_AFTER (q_ptr
, n_cycles
);
2690 rtx link
= alloc_INSN_LIST (insn
, insn_queue
[next_q
]);
2693 gcc_assert (n_cycles
<= max_insn_queue_index
);
2694 gcc_assert (!DEBUG_INSN_P (insn
));
2696 insn_queue
[next_q
] = link
;
2699 if (sched_verbose
>= 2)
2701 fprintf (sched_dump
, ";;\t\tReady-->Q: insn %s: ",
2702 (*current_sched_info
->print_insn
) (insn
, 0));
2704 fprintf (sched_dump
, "queued for %d cycles (%s).\n", n_cycles
, reason
);
2707 QUEUE_INDEX (insn
) = next_q
;
2709 if (current_sched_info
->flags
& DO_BACKTRACKING
)
2711 new_tick
= clock_var
+ n_cycles
;
2712 if (INSN_TICK (insn
) == INVALID_TICK
|| INSN_TICK (insn
) < new_tick
)
2713 INSN_TICK (insn
) = new_tick
;
2715 if (INSN_EXACT_TICK (insn
) != INVALID_TICK
2716 && INSN_EXACT_TICK (insn
) < clock_var
+ n_cycles
)
2718 must_backtrack
= true;
2719 if (sched_verbose
>= 2)
2720 fprintf (sched_dump
, ";;\t\tcausing a backtrack.\n");
2725 /* Remove INSN from queue. */
2727 queue_remove (rtx insn
)
2729 gcc_assert (QUEUE_INDEX (insn
) >= 0);
2730 remove_free_INSN_LIST_elem (insn
, &insn_queue
[QUEUE_INDEX (insn
)]);
2732 QUEUE_INDEX (insn
) = QUEUE_NOWHERE
;
2735 /* Return a pointer to the bottom of the ready list, i.e. the insn
2736 with the lowest priority. */
2739 ready_lastpos (struct ready_list
*ready
)
2741 gcc_assert (ready
->n_ready
>= 1);
2742 return ready
->vec
+ ready
->first
- ready
->n_ready
+ 1;
2745 /* Add an element INSN to the ready list so that it ends up with the
2746 lowest/highest priority depending on FIRST_P. */
2748 HAIFA_INLINE
static void
2749 ready_add (struct ready_list
*ready
, rtx insn
, bool first_p
)
2753 if (ready
->first
== ready
->n_ready
)
2755 memmove (ready
->vec
+ ready
->veclen
- ready
->n_ready
,
2756 ready_lastpos (ready
),
2757 ready
->n_ready
* sizeof (rtx
));
2758 ready
->first
= ready
->veclen
- 1;
2760 ready
->vec
[ready
->first
- ready
->n_ready
] = insn
;
2764 if (ready
->first
== ready
->veclen
- 1)
2767 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
2768 memmove (ready
->vec
+ ready
->veclen
- ready
->n_ready
- 1,
2769 ready_lastpos (ready
),
2770 ready
->n_ready
* sizeof (rtx
));
2771 ready
->first
= ready
->veclen
- 2;
2773 ready
->vec
[++(ready
->first
)] = insn
;
2777 if (DEBUG_INSN_P (insn
))
2780 gcc_assert (QUEUE_INDEX (insn
) != QUEUE_READY
);
2781 QUEUE_INDEX (insn
) = QUEUE_READY
;
2783 if (INSN_EXACT_TICK (insn
) != INVALID_TICK
2784 && INSN_EXACT_TICK (insn
) < clock_var
)
2786 must_backtrack
= true;
2790 /* Remove the element with the highest priority from the ready list and
2793 HAIFA_INLINE
static rtx
2794 ready_remove_first (struct ready_list
*ready
)
2798 gcc_assert (ready
->n_ready
);
2799 t
= ready
->vec
[ready
->first
--];
2801 if (DEBUG_INSN_P (t
))
2803 /* If the queue becomes empty, reset it. */
2804 if (ready
->n_ready
== 0)
2805 ready
->first
= ready
->veclen
- 1;
2807 gcc_assert (QUEUE_INDEX (t
) == QUEUE_READY
);
2808 QUEUE_INDEX (t
) = QUEUE_NOWHERE
;
2813 /* The following code implements multi-pass scheduling for the first
2814 cycle. In other words, we will try to choose ready insn which
2815 permits to start maximum number of insns on the same cycle. */
2817 /* Return a pointer to the element INDEX from the ready. INDEX for
2818 insn with the highest priority is 0, and the lowest priority has
2822 ready_element (struct ready_list
*ready
, int index
)
2824 gcc_assert (ready
->n_ready
&& index
< ready
->n_ready
);
2826 return ready
->vec
[ready
->first
- index
];
2829 /* Remove the element INDEX from the ready list and return it. INDEX
2830 for insn with the highest priority is 0, and the lowest priority
2833 HAIFA_INLINE
static rtx
2834 ready_remove (struct ready_list
*ready
, int index
)
2840 return ready_remove_first (ready
);
2841 gcc_assert (ready
->n_ready
&& index
< ready
->n_ready
);
2842 t
= ready
->vec
[ready
->first
- index
];
2844 if (DEBUG_INSN_P (t
))
2846 for (i
= index
; i
< ready
->n_ready
; i
++)
2847 ready
->vec
[ready
->first
- i
] = ready
->vec
[ready
->first
- i
- 1];
2848 QUEUE_INDEX (t
) = QUEUE_NOWHERE
;
2852 /* Remove INSN from the ready list. */
2854 ready_remove_insn (rtx insn
)
2858 for (i
= 0; i
< readyp
->n_ready
; i
++)
2859 if (ready_element (readyp
, i
) == insn
)
2861 ready_remove (readyp
, i
);
2867 /* Sort the ready list READY by ascending priority, using the SCHED_SORT
2871 ready_sort (struct ready_list
*ready
)
2874 rtx
*first
= ready_lastpos (ready
);
2876 if (sched_pressure
== SCHED_PRESSURE_WEIGHTED
)
2878 for (i
= 0; i
< ready
->n_ready
; i
++)
2879 if (!DEBUG_INSN_P (first
[i
]))
2880 setup_insn_reg_pressure_info (first
[i
]);
2882 if (sched_pressure
== SCHED_PRESSURE_MODEL
2883 && model_curr_point
< model_num_insns
)
2884 model_set_excess_costs (first
, ready
->n_ready
);
2885 SCHED_SORT (first
, ready
->n_ready
);
2888 /* PREV is an insn that is ready to execute. Adjust its priority if that
2889 will help shorten or lengthen register lifetimes as appropriate. Also
2890 provide a hook for the target to tweak itself. */
2892 HAIFA_INLINE
static void
2893 adjust_priority (rtx prev
)
2895 /* ??? There used to be code here to try and estimate how an insn
2896 affected register lifetimes, but it did it by looking at REG_DEAD
2897 notes, which we removed in schedule_region. Nor did it try to
2898 take into account register pressure or anything useful like that.
2900 Revisit when we have a machine model to work with and not before. */
2902 if (targetm
.sched
.adjust_priority
)
2903 INSN_PRIORITY (prev
) =
2904 targetm
.sched
.adjust_priority (prev
, INSN_PRIORITY (prev
));
2907 /* Advance DFA state STATE on one cycle. */
2909 advance_state (state_t state
)
2911 if (targetm
.sched
.dfa_pre_advance_cycle
)
2912 targetm
.sched
.dfa_pre_advance_cycle ();
2914 if (targetm
.sched
.dfa_pre_cycle_insn
)
2915 state_transition (state
,
2916 targetm
.sched
.dfa_pre_cycle_insn ());
2918 state_transition (state
, NULL
);
2920 if (targetm
.sched
.dfa_post_cycle_insn
)
2921 state_transition (state
,
2922 targetm
.sched
.dfa_post_cycle_insn ());
2924 if (targetm
.sched
.dfa_post_advance_cycle
)
2925 targetm
.sched
.dfa_post_advance_cycle ();
2928 /* Advance time on one cycle. */
2929 HAIFA_INLINE
static void
2930 advance_one_cycle (void)
2932 advance_state (curr_state
);
2933 if (sched_verbose
>= 6)
2934 fprintf (sched_dump
, ";;\tAdvance the current state.\n");
2937 /* Update register pressure after scheduling INSN. */
2939 update_register_pressure (rtx insn
)
2941 struct reg_use_data
*use
;
2942 struct reg_set_data
*set
;
2944 gcc_checking_assert (!DEBUG_INSN_P (insn
));
2946 for (use
= INSN_REG_USE_LIST (insn
); use
!= NULL
; use
= use
->next_insn_use
)
2947 if (dying_use_p (use
))
2948 mark_regno_birth_or_death (curr_reg_live
, curr_reg_pressure
,
2950 for (set
= INSN_REG_SET_LIST (insn
); set
!= NULL
; set
= set
->next_insn_set
)
2951 mark_regno_birth_or_death (curr_reg_live
, curr_reg_pressure
,
2955 /* Set up or update (if UPDATE_P) max register pressure (see its
2956 meaning in sched-int.h::_haifa_insn_data) for all current BB insns
2957 after insn AFTER. */
2959 setup_insn_max_reg_pressure (rtx after
, bool update_p
)
2964 static int max_reg_pressure
[N_REG_CLASSES
];
2966 save_reg_pressure ();
2967 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
2968 max_reg_pressure
[ira_pressure_classes
[i
]]
2969 = curr_reg_pressure
[ira_pressure_classes
[i
]];
2970 for (insn
= NEXT_INSN (after
);
2971 insn
!= NULL_RTX
&& ! BARRIER_P (insn
)
2972 && BLOCK_FOR_INSN (insn
) == BLOCK_FOR_INSN (after
);
2973 insn
= NEXT_INSN (insn
))
2974 if (NONDEBUG_INSN_P (insn
))
2977 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
2979 p
= max_reg_pressure
[ira_pressure_classes
[i
]];
2980 if (INSN_MAX_REG_PRESSURE (insn
)[i
] != p
)
2983 INSN_MAX_REG_PRESSURE (insn
)[i
]
2984 = max_reg_pressure
[ira_pressure_classes
[i
]];
2987 if (update_p
&& eq_p
)
2989 update_register_pressure (insn
);
2990 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
2991 if (max_reg_pressure
[ira_pressure_classes
[i
]]
2992 < curr_reg_pressure
[ira_pressure_classes
[i
]])
2993 max_reg_pressure
[ira_pressure_classes
[i
]]
2994 = curr_reg_pressure
[ira_pressure_classes
[i
]];
2996 restore_reg_pressure ();
2999 /* Update the current register pressure after scheduling INSN. Update
3000 also max register pressure for unscheduled insns of the current
3003 update_reg_and_insn_max_reg_pressure (rtx insn
)
3006 int before
[N_REG_CLASSES
];
3008 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
3009 before
[i
] = curr_reg_pressure
[ira_pressure_classes
[i
]];
3010 update_register_pressure (insn
);
3011 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
3012 if (curr_reg_pressure
[ira_pressure_classes
[i
]] != before
[i
])
3014 if (i
< ira_pressure_classes_num
)
3015 setup_insn_max_reg_pressure (insn
, true);
3018 /* Set up register pressure at the beginning of basic block BB whose
3019 insns starting after insn AFTER. Set up also max register pressure
3020 for all insns of the basic block. */
3022 sched_setup_bb_reg_pressure_info (basic_block bb
, rtx after
)
3024 gcc_assert (sched_pressure
== SCHED_PRESSURE_WEIGHTED
);
3025 initiate_bb_reg_pressure_info (bb
);
3026 setup_insn_max_reg_pressure (after
, false);
3029 /* If doing predication while scheduling, verify whether INSN, which
3030 has just been scheduled, clobbers the conditions of any
3031 instructions that must be predicated in order to break their
3032 dependencies. If so, remove them from the queues so that they will
3033 only be scheduled once their control dependency is resolved. */
3036 check_clobbered_conditions (rtx insn
)
3041 if ((current_sched_info
->flags
& DO_PREDICATION
) == 0)
3044 find_all_hard_reg_sets (insn
, &t
);
3047 for (i
= 0; i
< ready
.n_ready
; i
++)
3049 rtx x
= ready_element (&ready
, i
);
3050 if (TODO_SPEC (x
) == DEP_CONTROL
&& cond_clobbered_p (x
, t
))
3052 ready_remove_insn (x
);
3056 for (i
= 0; i
<= max_insn_queue_index
; i
++)
3059 int q
= NEXT_Q_AFTER (q_ptr
, i
);
3062 for (link
= insn_queue
[q
]; link
; link
= XEXP (link
, 1))
3064 rtx x
= XEXP (link
, 0);
3065 if (TODO_SPEC (x
) == DEP_CONTROL
&& cond_clobbered_p (x
, t
))
3074 /* Return (in order):
3076 - positive if INSN adversely affects the pressure on one
3079 - negative if INSN reduces the pressure on one register class
3081 - 0 if INSN doesn't affect the pressure on any register class. */
3084 model_classify_pressure (struct model_insn_info
*insn
)
3086 struct reg_pressure_data
*reg_pressure
;
3087 int death
[N_REG_CLASSES
];
3090 calculate_reg_deaths (insn
->insn
, death
);
3091 reg_pressure
= INSN_REG_PRESSURE (insn
->insn
);
3093 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
3095 cl
= ira_pressure_classes
[pci
];
3096 if (death
[cl
] < reg_pressure
[pci
].set_increase
)
3098 sum
+= reg_pressure
[pci
].set_increase
- death
[cl
];
3103 /* Return true if INSN1 should come before INSN2 in the model schedule. */
3106 model_order_p (struct model_insn_info
*insn1
, struct model_insn_info
*insn2
)
3108 unsigned int height1
, height2
;
3109 unsigned int priority1
, priority2
;
3111 /* Prefer instructions with a higher model priority. */
3112 if (insn1
->model_priority
!= insn2
->model_priority
)
3113 return insn1
->model_priority
> insn2
->model_priority
;
3115 /* Combine the length of the longest path of satisfied true dependencies
3116 that leads to each instruction (depth) with the length of the longest
3117 path of any dependencies that leads from the instruction (alap).
3118 Prefer instructions with the greatest combined length. If the combined
3119 lengths are equal, prefer instructions with the greatest depth.
3121 The idea is that, if we have a set S of "equal" instructions that each
3122 have ALAP value X, and we pick one such instruction I, any true-dependent
3123 successors of I that have ALAP value X - 1 should be preferred over S.
3124 This encourages the schedule to be "narrow" rather than "wide".
3125 However, if I is a low-priority instruction that we decided to
3126 schedule because of its model_classify_pressure, and if there
3127 is a set of higher-priority instructions T, the aforementioned
3128 successors of I should not have the edge over T. */
3129 height1
= insn1
->depth
+ insn1
->alap
;
3130 height2
= insn2
->depth
+ insn2
->alap
;
3131 if (height1
!= height2
)
3132 return height1
> height2
;
3133 if (insn1
->depth
!= insn2
->depth
)
3134 return insn1
->depth
> insn2
->depth
;
3136 /* We have no real preference between INSN1 an INSN2 as far as attempts
3137 to reduce pressure go. Prefer instructions with higher priorities. */
3138 priority1
= INSN_PRIORITY (insn1
->insn
);
3139 priority2
= INSN_PRIORITY (insn2
->insn
);
3140 if (priority1
!= priority2
)
3141 return priority1
> priority2
;
3143 /* Use the original rtl sequence as a tie-breaker. */
3144 return insn1
< insn2
;
3147 /* Add INSN to the model worklist immediately after PREV. Add it to the
3148 beginning of the list if PREV is null. */
3151 model_add_to_worklist_at (struct model_insn_info
*insn
,
3152 struct model_insn_info
*prev
)
3154 gcc_assert (QUEUE_INDEX (insn
->insn
) == QUEUE_NOWHERE
);
3155 QUEUE_INDEX (insn
->insn
) = QUEUE_READY
;
3160 insn
->next
= prev
->next
;
3165 insn
->next
= model_worklist
;
3166 model_worklist
= insn
;
3169 insn
->next
->prev
= insn
;
3172 /* Remove INSN from the model worklist. */
3175 model_remove_from_worklist (struct model_insn_info
*insn
)
3177 gcc_assert (QUEUE_INDEX (insn
->insn
) == QUEUE_READY
);
3178 QUEUE_INDEX (insn
->insn
) = QUEUE_NOWHERE
;
3181 insn
->prev
->next
= insn
->next
;
3183 model_worklist
= insn
->next
;
3185 insn
->next
->prev
= insn
->prev
;
3188 /* Add INSN to the model worklist. Start looking for a suitable position
3189 between neighbors PREV and NEXT, testing at most MAX_SCHED_READY_INSNS
3190 insns either side. A null PREV indicates the beginning of the list and
3191 a null NEXT indicates the end. */
3194 model_add_to_worklist (struct model_insn_info
*insn
,
3195 struct model_insn_info
*prev
,
3196 struct model_insn_info
*next
)
3200 count
= MAX_SCHED_READY_INSNS
;
3201 if (count
> 0 && prev
&& model_order_p (insn
, prev
))
3207 while (count
> 0 && prev
&& model_order_p (insn
, prev
));
3209 while (count
> 0 && next
&& model_order_p (next
, insn
))
3215 model_add_to_worklist_at (insn
, prev
);
3218 /* INSN may now have a higher priority (in the model_order_p sense)
3219 than before. Move it up the worklist if necessary. */
3222 model_promote_insn (struct model_insn_info
*insn
)
3224 struct model_insn_info
*prev
;
3228 count
= MAX_SCHED_READY_INSNS
;
3229 while (count
> 0 && prev
&& model_order_p (insn
, prev
))
3234 if (prev
!= insn
->prev
)
3236 model_remove_from_worklist (insn
);
3237 model_add_to_worklist_at (insn
, prev
);
3241 /* Add INSN to the end of the model schedule. */
3244 model_add_to_schedule (rtx insn
)
3248 gcc_assert (QUEUE_INDEX (insn
) == QUEUE_NOWHERE
);
3249 QUEUE_INDEX (insn
) = QUEUE_SCHEDULED
;
3251 point
= model_schedule
.length ();
3252 model_schedule
.quick_push (insn
);
3253 INSN_MODEL_INDEX (insn
) = point
+ 1;
3256 /* Analyze the instructions that are to be scheduled, setting up
3257 MODEL_INSN_INFO (...) and model_num_insns accordingly. Add ready
3258 instructions to model_worklist. */
3261 model_analyze_insns (void)
3263 rtx start
, end
, iter
;
3264 sd_iterator_def sd_it
;
3266 struct model_insn_info
*insn
, *con
;
3268 model_num_insns
= 0;
3269 start
= PREV_INSN (current_sched_info
->next_tail
);
3270 end
= current_sched_info
->prev_head
;
3271 for (iter
= start
; iter
!= end
; iter
= PREV_INSN (iter
))
3272 if (NONDEBUG_INSN_P (iter
))
3274 insn
= MODEL_INSN_INFO (iter
);
3276 FOR_EACH_DEP (iter
, SD_LIST_FORW
, sd_it
, dep
)
3278 con
= MODEL_INSN_INFO (DEP_CON (dep
));
3279 if (con
->insn
&& insn
->alap
< con
->alap
+ 1)
3280 insn
->alap
= con
->alap
+ 1;
3283 insn
->old_queue
= QUEUE_INDEX (iter
);
3284 QUEUE_INDEX (iter
) = QUEUE_NOWHERE
;
3286 insn
->unscheduled_preds
= dep_list_size (iter
, SD_LIST_HARD_BACK
);
3287 if (insn
->unscheduled_preds
== 0)
3288 model_add_to_worklist (insn
, NULL
, model_worklist
);
3294 /* The global state describes the register pressure at the start of the
3295 model schedule. Initialize GROUP accordingly. */
3298 model_init_pressure_group (struct model_pressure_group
*group
)
3302 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
3304 cl
= ira_pressure_classes
[pci
];
3305 group
->limits
[pci
].pressure
= curr_reg_pressure
[cl
];
3306 group
->limits
[pci
].point
= 0;
3308 /* Use index model_num_insns to record the state after the last
3309 instruction in the model schedule. */
3310 group
->model
= XNEWVEC (struct model_pressure_data
,
3311 (model_num_insns
+ 1) * ira_pressure_classes_num
);
3314 /* Record that MODEL_REF_PRESSURE (GROUP, POINT, PCI) is PRESSURE.
3315 Update the maximum pressure for the whole schedule. */
3318 model_record_pressure (struct model_pressure_group
*group
,
3319 int point
, int pci
, int pressure
)
3321 MODEL_REF_PRESSURE (group
, point
, pci
) = pressure
;
3322 if (group
->limits
[pci
].pressure
< pressure
)
3324 group
->limits
[pci
].pressure
= pressure
;
3325 group
->limits
[pci
].point
= point
;
3329 /* INSN has just been added to the end of the model schedule. Record its
3330 register-pressure information. */
3333 model_record_pressures (struct model_insn_info
*insn
)
3335 struct reg_pressure_data
*reg_pressure
;
3336 int point
, pci
, cl
, delta
;
3337 int death
[N_REG_CLASSES
];
3339 point
= model_index (insn
->insn
);
3340 if (sched_verbose
>= 2)
3344 fprintf (sched_dump
, "\n;;\tModel schedule:\n;;\n");
3345 fprintf (sched_dump
, ";;\t| idx insn | mpri hght dpth prio |\n");
3347 fprintf (sched_dump
, ";;\t| %3d %4d | %4d %4d %4d %4d | %-30s ",
3348 point
, INSN_UID (insn
->insn
), insn
->model_priority
,
3349 insn
->depth
+ insn
->alap
, insn
->depth
,
3350 INSN_PRIORITY (insn
->insn
),
3351 str_pattern_slim (PATTERN (insn
->insn
)));
3353 calculate_reg_deaths (insn
->insn
, death
);
3354 reg_pressure
= INSN_REG_PRESSURE (insn
->insn
);
3355 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
3357 cl
= ira_pressure_classes
[pci
];
3358 delta
= reg_pressure
[pci
].set_increase
- death
[cl
];
3359 if (sched_verbose
>= 2)
3360 fprintf (sched_dump
, " %s:[%d,%+d]", reg_class_names
[cl
],
3361 curr_reg_pressure
[cl
], delta
);
3362 model_record_pressure (&model_before_pressure
, point
, pci
,
3363 curr_reg_pressure
[cl
]);
3365 if (sched_verbose
>= 2)
3366 fprintf (sched_dump
, "\n");
3369 /* All instructions have been added to the model schedule. Record the
3370 final register pressure in GROUP and set up all MODEL_MAX_PRESSUREs. */
3373 model_record_final_pressures (struct model_pressure_group
*group
)
3375 int point
, pci
, max_pressure
, ref_pressure
, cl
;
3377 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
3379 /* Record the final pressure for this class. */
3380 cl
= ira_pressure_classes
[pci
];
3381 point
= model_num_insns
;
3382 ref_pressure
= curr_reg_pressure
[cl
];
3383 model_record_pressure (group
, point
, pci
, ref_pressure
);
3385 /* Record the original maximum pressure. */
3386 group
->limits
[pci
].orig_pressure
= group
->limits
[pci
].pressure
;
3388 /* Update the MODEL_MAX_PRESSURE for every point of the schedule. */
3389 max_pressure
= ref_pressure
;
3390 MODEL_MAX_PRESSURE (group
, point
, pci
) = max_pressure
;
3394 ref_pressure
= MODEL_REF_PRESSURE (group
, point
, pci
);
3395 max_pressure
= MAX (max_pressure
, ref_pressure
);
3396 MODEL_MAX_PRESSURE (group
, point
, pci
) = max_pressure
;
3401 /* Update all successors of INSN, given that INSN has just been scheduled. */
3404 model_add_successors_to_worklist (struct model_insn_info
*insn
)
3406 sd_iterator_def sd_it
;
3407 struct model_insn_info
*con
;
3410 FOR_EACH_DEP (insn
->insn
, SD_LIST_FORW
, sd_it
, dep
)
3412 con
= MODEL_INSN_INFO (DEP_CON (dep
));
3413 /* Ignore debug instructions, and instructions from other blocks. */
3416 con
->unscheduled_preds
--;
3418 /* Update the depth field of each true-dependent successor.
3419 Increasing the depth gives them a higher priority than
3421 if (DEP_TYPE (dep
) == REG_DEP_TRUE
&& con
->depth
< insn
->depth
+ 1)
3423 con
->depth
= insn
->depth
+ 1;
3424 if (QUEUE_INDEX (con
->insn
) == QUEUE_READY
)
3425 model_promote_insn (con
);
3428 /* If this is a true dependency, or if there are no remaining
3429 dependencies for CON (meaning that CON only had non-true
3430 dependencies), make sure that CON is on the worklist.
3431 We don't bother otherwise because it would tend to fill the
3432 worklist with a lot of low-priority instructions that are not
3433 yet ready to issue. */
3434 if ((con
->depth
> 0 || con
->unscheduled_preds
== 0)
3435 && QUEUE_INDEX (con
->insn
) == QUEUE_NOWHERE
)
3436 model_add_to_worklist (con
, insn
, insn
->next
);
3441 /* Give INSN a higher priority than any current instruction, then give
3442 unscheduled predecessors of INSN a higher priority still. If any of
3443 those predecessors are not on the model worklist, do the same for its
3444 predecessors, and so on. */
3447 model_promote_predecessors (struct model_insn_info
*insn
)
3449 struct model_insn_info
*pro
, *first
;
3450 sd_iterator_def sd_it
;
3453 if (sched_verbose
>= 7)
3454 fprintf (sched_dump
, ";;\t+--- priority of %d = %d, priority of",
3455 INSN_UID (insn
->insn
), model_next_priority
);
3456 insn
->model_priority
= model_next_priority
++;
3457 model_remove_from_worklist (insn
);
3458 model_add_to_worklist_at (insn
, NULL
);
3463 FOR_EACH_DEP (insn
->insn
, SD_LIST_HARD_BACK
, sd_it
, dep
)
3465 pro
= MODEL_INSN_INFO (DEP_PRO (dep
));
3466 /* The first test is to ignore debug instructions, and instructions
3467 from other blocks. */
3469 && pro
->model_priority
!= model_next_priority
3470 && QUEUE_INDEX (pro
->insn
) != QUEUE_SCHEDULED
)
3472 pro
->model_priority
= model_next_priority
;
3473 if (sched_verbose
>= 7)
3474 fprintf (sched_dump
, " %d", INSN_UID (pro
->insn
));
3475 if (QUEUE_INDEX (pro
->insn
) == QUEUE_READY
)
3477 /* PRO is already in the worklist, but it now has
3478 a higher priority than before. Move it at the
3479 appropriate place. */
3480 model_remove_from_worklist (pro
);
3481 model_add_to_worklist (pro
, NULL
, model_worklist
);
3485 /* PRO isn't in the worklist. Recursively process
3486 its predecessors until we find one that is. */
3497 if (sched_verbose
>= 7)
3498 fprintf (sched_dump
, " = %d\n", model_next_priority
);
3499 model_next_priority
++;
3502 /* Pick one instruction from model_worklist and process it. */
3505 model_choose_insn (void)
3507 struct model_insn_info
*insn
, *fallback
;
3510 if (sched_verbose
>= 7)
3512 fprintf (sched_dump
, ";;\t+--- worklist:\n");
3513 insn
= model_worklist
;
3514 count
= MAX_SCHED_READY_INSNS
;
3515 while (count
> 0 && insn
)
3517 fprintf (sched_dump
, ";;\t+--- %d [%d, %d, %d, %d]\n",
3518 INSN_UID (insn
->insn
), insn
->model_priority
,
3519 insn
->depth
+ insn
->alap
, insn
->depth
,
3520 INSN_PRIORITY (insn
->insn
));
3526 /* Look for a ready instruction whose model_classify_priority is zero
3527 or negative, picking the highest-priority one. Adding such an
3528 instruction to the schedule now should do no harm, and may actually
3531 Failing that, see whether there is an instruction with the highest
3532 extant model_priority that is not yet ready, but which would reduce
3533 pressure if it became ready. This is designed to catch cases like:
3535 (set (mem (reg R1)) (reg R2))
3537 where the instruction is the last remaining use of R1 and where the
3538 value of R2 is not yet available (or vice versa). The death of R1
3539 means that this instruction already reduces pressure. It is of
3540 course possible that the computation of R2 involves other registers
3541 that are hard to kill, but such cases are rare enough for this
3542 heuristic to be a win in general.
3544 Failing that, just pick the highest-priority instruction in the
3546 count
= MAX_SCHED_READY_INSNS
;
3547 insn
= model_worklist
;
3551 if (count
== 0 || !insn
)
3553 insn
= fallback
? fallback
: model_worklist
;
3556 if (insn
->unscheduled_preds
)
3558 if (model_worklist
->model_priority
== insn
->model_priority
3560 && model_classify_pressure (insn
) < 0)
3565 if (model_classify_pressure (insn
) <= 0)
3572 if (sched_verbose
>= 7 && insn
!= model_worklist
)
3574 if (insn
->unscheduled_preds
)
3575 fprintf (sched_dump
, ";;\t+--- promoting insn %d, with dependencies\n",
3576 INSN_UID (insn
->insn
));
3578 fprintf (sched_dump
, ";;\t+--- promoting insn %d, which is ready\n",
3579 INSN_UID (insn
->insn
));
3581 if (insn
->unscheduled_preds
)
3582 /* INSN isn't yet ready to issue. Give all its predecessors the
3583 highest priority. */
3584 model_promote_predecessors (insn
);
3587 /* INSN is ready. Add it to the end of model_schedule and
3588 process its successors. */
3589 model_add_successors_to_worklist (insn
);
3590 model_remove_from_worklist (insn
);
3591 model_add_to_schedule (insn
->insn
);
3592 model_record_pressures (insn
);
3593 update_register_pressure (insn
->insn
);
3597 /* Restore all QUEUE_INDEXs to the values that they had before
3598 model_start_schedule was called. */
3601 model_reset_queue_indices (void)
3606 FOR_EACH_VEC_ELT (model_schedule
, i
, insn
)
3607 QUEUE_INDEX (insn
) = MODEL_INSN_INFO (insn
)->old_queue
;
3610 /* We have calculated the model schedule and spill costs. Print a summary
3614 model_dump_pressure_summary (void)
3618 fprintf (sched_dump
, ";; Pressure summary:");
3619 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
3621 cl
= ira_pressure_classes
[pci
];
3622 fprintf (sched_dump
, " %s:%d", reg_class_names
[cl
],
3623 model_before_pressure
.limits
[pci
].pressure
);
3625 fprintf (sched_dump
, "\n\n");
3628 /* Initialize the SCHED_PRESSURE_MODEL information for the current
3629 scheduling region. */
3632 model_start_schedule (void)
3636 model_next_priority
= 1;
3637 model_schedule
.create (sched_max_luid
);
3638 model_insns
= XCNEWVEC (struct model_insn_info
, sched_max_luid
);
3640 bb
= BLOCK_FOR_INSN (NEXT_INSN (current_sched_info
->prev_head
));
3641 initiate_reg_pressure_info (df_get_live_in (bb
));
3643 model_analyze_insns ();
3644 model_init_pressure_group (&model_before_pressure
);
3645 while (model_worklist
)
3646 model_choose_insn ();
3647 gcc_assert (model_num_insns
== (int) model_schedule
.length ());
3648 if (sched_verbose
>= 2)
3649 fprintf (sched_dump
, "\n");
3651 model_record_final_pressures (&model_before_pressure
);
3652 model_reset_queue_indices ();
3654 XDELETEVEC (model_insns
);
3656 model_curr_point
= 0;
3657 initiate_reg_pressure_info (df_get_live_in (bb
));
3658 if (sched_verbose
>= 1)
3659 model_dump_pressure_summary ();
3662 /* Free the information associated with GROUP. */
3665 model_finalize_pressure_group (struct model_pressure_group
*group
)
3667 XDELETEVEC (group
->model
);
3670 /* Free the information created by model_start_schedule. */
3673 model_end_schedule (void)
3675 model_finalize_pressure_group (&model_before_pressure
);
3676 model_schedule
.release ();
3679 /* A structure that holds local state for the loop in schedule_block. */
3680 struct sched_block_state
3682 /* True if no real insns have been scheduled in the current cycle. */
3683 bool first_cycle_insn_p
;
3684 /* True if a shadow insn has been scheduled in the current cycle, which
3685 means that no more normal insns can be issued. */
3686 bool shadows_only_p
;
3687 /* True if we're winding down a modulo schedule, which means that we only
3688 issue insns with INSN_EXACT_TICK set. */
3689 bool modulo_epilogue
;
3690 /* Initialized with the machine's issue rate every cycle, and updated
3691 by calls to the variable_issue hook. */
3695 /* INSN is the "currently executing insn". Launch each insn which was
3696 waiting on INSN. READY is the ready list which contains the insns
3697 that are ready to fire. CLOCK is the current cycle. The function
3698 returns necessary cycle advance after issuing the insn (it is not
3699 zero for insns in a schedule group). */
3702 schedule_insn (rtx insn
)
3704 sd_iterator_def sd_it
;
3709 if (sched_verbose
>= 1)
3711 struct reg_pressure_data
*pressure_info
;
3712 fprintf (sched_dump
, ";;\t%3i--> %s%-40s:",
3713 clock_var
, (*current_sched_info
->print_insn
) (insn
, 1),
3714 str_pattern_slim (PATTERN (insn
)));
3716 if (recog_memoized (insn
) < 0)
3717 fprintf (sched_dump
, "nothing");
3719 print_reservation (sched_dump
, insn
);
3720 pressure_info
= INSN_REG_PRESSURE (insn
);
3721 if (pressure_info
!= NULL
)
3723 fputc (':', sched_dump
);
3724 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
3725 fprintf (sched_dump
, "%s%+d(%d)",
3726 reg_class_names
[ira_pressure_classes
[i
]],
3727 pressure_info
[i
].set_increase
, pressure_info
[i
].change
);
3729 if (sched_pressure
== SCHED_PRESSURE_MODEL
3730 && model_curr_point
< model_num_insns
3731 && model_index (insn
) == model_curr_point
)
3732 fprintf (sched_dump
, ":model %d", model_curr_point
);
3733 fputc ('\n', sched_dump
);
3736 if (sched_pressure
== SCHED_PRESSURE_WEIGHTED
&& !DEBUG_INSN_P (insn
))
3737 update_reg_and_insn_max_reg_pressure (insn
);
3739 /* Scheduling instruction should have all its dependencies resolved and
3740 should have been removed from the ready list. */
3741 gcc_assert (sd_lists_empty_p (insn
, SD_LIST_HARD_BACK
));
3743 /* Reset debug insns invalidated by moving this insn. */
3744 if (MAY_HAVE_DEBUG_INSNS
&& !DEBUG_INSN_P (insn
))
3745 for (sd_it
= sd_iterator_start (insn
, SD_LIST_BACK
);
3746 sd_iterator_cond (&sd_it
, &dep
);)
3748 rtx dbg
= DEP_PRO (dep
);
3749 struct reg_use_data
*use
, *next
;
3751 if (DEP_STATUS (dep
) & DEP_CANCELLED
)
3753 sd_iterator_next (&sd_it
);
3757 gcc_assert (DEBUG_INSN_P (dbg
));
3759 if (sched_verbose
>= 6)
3760 fprintf (sched_dump
, ";;\t\tresetting: debug insn %d\n",
3763 /* ??? Rather than resetting the debug insn, we might be able
3764 to emit a debug temp before the just-scheduled insn, but
3765 this would involve checking that the expression at the
3766 point of the debug insn is equivalent to the expression
3767 before the just-scheduled insn. They might not be: the
3768 expression in the debug insn may depend on other insns not
3769 yet scheduled that set MEMs, REGs or even other debug
3770 insns. It's not clear that attempting to preserve debug
3771 information in these cases is worth the effort, given how
3772 uncommon these resets are and the likelihood that the debug
3773 temps introduced won't survive the schedule change. */
3774 INSN_VAR_LOCATION_LOC (dbg
) = gen_rtx_UNKNOWN_VAR_LOC ();
3775 df_insn_rescan (dbg
);
3777 /* Unknown location doesn't use any registers. */
3778 for (use
= INSN_REG_USE_LIST (dbg
); use
!= NULL
; use
= next
)
3780 struct reg_use_data
*prev
= use
;
3782 /* Remove use from the cyclic next_regno_use chain first. */
3783 while (prev
->next_regno_use
!= use
)
3784 prev
= prev
->next_regno_use
;
3785 prev
->next_regno_use
= use
->next_regno_use
;
3786 next
= use
->next_insn_use
;
3789 INSN_REG_USE_LIST (dbg
) = NULL
;
3791 /* We delete rather than resolve these deps, otherwise we
3792 crash in sched_free_deps(), because forward deps are
3793 expected to be released before backward deps. */
3794 sd_delete_dep (sd_it
);
3797 gcc_assert (QUEUE_INDEX (insn
) == QUEUE_NOWHERE
);
3798 QUEUE_INDEX (insn
) = QUEUE_SCHEDULED
;
3800 if (sched_pressure
== SCHED_PRESSURE_MODEL
3801 && model_curr_point
< model_num_insns
3802 && NONDEBUG_INSN_P (insn
))
3804 if (model_index (insn
) == model_curr_point
)
3807 while (model_curr_point
< model_num_insns
3808 && (QUEUE_INDEX (MODEL_INSN (model_curr_point
))
3809 == QUEUE_SCHEDULED
));
3811 model_recompute (insn
);
3812 model_update_limit_points ();
3813 update_register_pressure (insn
);
3814 if (sched_verbose
>= 2)
3815 print_curr_reg_pressure ();
3818 gcc_assert (INSN_TICK (insn
) >= MIN_TICK
);
3819 if (INSN_TICK (insn
) > clock_var
)
3820 /* INSN has been prematurely moved from the queue to the ready list.
3821 This is possible only if following flag is set. */
3822 gcc_assert (flag_sched_stalled_insns
);
3824 /* ??? Probably, if INSN is scheduled prematurely, we should leave
3825 INSN_TICK untouched. This is a machine-dependent issue, actually. */
3826 INSN_TICK (insn
) = clock_var
;
3828 check_clobbered_conditions (insn
);
3830 /* Update dependent instructions. First, see if by scheduling this insn
3831 now we broke a dependence in a way that requires us to change another
3833 for (sd_it
= sd_iterator_start (insn
, SD_LIST_SPEC_BACK
);
3834 sd_iterator_cond (&sd_it
, &dep
); sd_iterator_next (&sd_it
))
3836 struct dep_replacement
*desc
= DEP_REPLACE (dep
);
3837 rtx pro
= DEP_PRO (dep
);
3838 if (QUEUE_INDEX (pro
) != QUEUE_SCHEDULED
3839 && desc
!= NULL
&& desc
->insn
== pro
)
3840 apply_replacement (dep
, false);
3843 /* Go through and resolve forward dependencies. */
3844 for (sd_it
= sd_iterator_start (insn
, SD_LIST_FORW
);
3845 sd_iterator_cond (&sd_it
, &dep
);)
3847 rtx next
= DEP_CON (dep
);
3848 bool cancelled
= (DEP_STATUS (dep
) & DEP_CANCELLED
) != 0;
3850 /* Resolve the dependence between INSN and NEXT.
3851 sd_resolve_dep () moves current dep to another list thus
3852 advancing the iterator. */
3853 sd_resolve_dep (sd_it
);
3857 if (must_restore_pattern_p (next
, dep
))
3858 restore_pattern (dep
, false);
3862 /* Don't bother trying to mark next as ready if insn is a debug
3863 insn. If insn is the last hard dependency, it will have
3864 already been discounted. */
3865 if (DEBUG_INSN_P (insn
) && !DEBUG_INSN_P (next
))
3868 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn
))
3872 effective_cost
= try_ready (next
);
3874 if (effective_cost
>= 0
3875 && SCHED_GROUP_P (next
)
3876 && advance
< effective_cost
)
3877 advance
= effective_cost
;
3880 /* Check always has only one forward dependence (to the first insn in
3881 the recovery block), therefore, this will be executed only once. */
3883 gcc_assert (sd_lists_empty_p (insn
, SD_LIST_FORW
));
3884 fix_recovery_deps (RECOVERY_BLOCK (insn
));
3888 /* Annotate the instruction with issue information -- TImode
3889 indicates that the instruction is expected not to be able
3890 to issue on the same cycle as the previous insn. A machine
3891 may use this information to decide how the instruction should
3894 && GET_CODE (PATTERN (insn
)) != USE
3895 && GET_CODE (PATTERN (insn
)) != CLOBBER
3896 && !DEBUG_INSN_P (insn
))
3898 if (reload_completed
)
3899 PUT_MODE (insn
, clock_var
> last_clock_var
? TImode
: VOIDmode
);
3900 last_clock_var
= clock_var
;
3906 /* Functions for handling of notes. */
3908 /* Add note list that ends on FROM_END to the end of TO_ENDP. */
3910 concat_note_lists (rtx from_end
, rtx
*to_endp
)
3914 /* It's easy when have nothing to concat. */
3915 if (from_end
== NULL
)
3918 /* It's also easy when destination is empty. */
3919 if (*to_endp
== NULL
)
3921 *to_endp
= from_end
;
3925 from_start
= from_end
;
3926 while (PREV_INSN (from_start
) != NULL
)
3927 from_start
= PREV_INSN (from_start
);
3929 PREV_INSN (from_start
) = *to_endp
;
3930 NEXT_INSN (*to_endp
) = from_start
;
3931 *to_endp
= from_end
;
3934 /* Delete notes between HEAD and TAIL and put them in the chain
3935 of notes ended by NOTE_LIST. */
3937 remove_notes (rtx head
, rtx tail
)
3939 rtx next_tail
, insn
, next
;
3942 if (head
== tail
&& !INSN_P (head
))
3945 next_tail
= NEXT_INSN (tail
);
3946 for (insn
= head
; insn
!= next_tail
; insn
= next
)
3948 next
= NEXT_INSN (insn
);
3952 switch (NOTE_KIND (insn
))
3954 case NOTE_INSN_BASIC_BLOCK
:
3957 case NOTE_INSN_EPILOGUE_BEG
:
3961 add_reg_note (next
, REG_SAVE_NOTE
,
3962 GEN_INT (NOTE_INSN_EPILOGUE_BEG
));
3970 /* Add the note to list that ends at NOTE_LIST. */
3971 PREV_INSN (insn
) = note_list
;
3972 NEXT_INSN (insn
) = NULL_RTX
;
3974 NEXT_INSN (note_list
) = insn
;
3979 gcc_assert ((sel_sched_p () || insn
!= tail
) && insn
!= head
);
3983 /* A structure to record enough data to allow us to backtrack the scheduler to
3984 a previous state. */
3985 struct haifa_saved_data
3987 /* Next entry on the list. */
3988 struct haifa_saved_data
*next
;
3990 /* Backtracking is associated with scheduling insns that have delay slots.
3991 DELAY_PAIR points to the structure that contains the insns involved, and
3992 the number of cycles between them. */
3993 struct delay_pair
*delay_pair
;
3995 /* Data used by the frontend (e.g. sched-ebb or sched-rgn). */
3996 void *fe_saved_data
;
3997 /* Data used by the backend. */
3998 void *be_saved_data
;
4000 /* Copies of global state. */
4001 int clock_var
, last_clock_var
;
4002 struct ready_list ready
;
4005 rtx last_scheduled_insn
;
4006 rtx last_nondebug_scheduled_insn
;
4007 int cycle_issued_insns
;
4009 /* Copies of state used in the inner loop of schedule_block. */
4010 struct sched_block_state sched_block
;
4012 /* We don't need to save q_ptr, as its value is arbitrary and we can set it
4013 to 0 when restoring. */
4017 /* Describe pattern replacements that occurred since this backtrack point
4019 vec
<dep_t
> replacement_deps
;
4020 vec
<int> replace_apply
;
4022 /* A copy of the next-cycle replacement vectors at the time of the backtrack
4024 vec
<dep_t
> next_cycle_deps
;
4025 vec
<int> next_cycle_apply
;
4028 /* A record, in reverse order, of all scheduled insns which have delay slots
4029 and may require backtracking. */
4030 static struct haifa_saved_data
*backtrack_queue
;
4032 /* For every dependency of INSN, set the FEEDS_BACKTRACK_INSN bit according
4035 mark_backtrack_feeds (rtx insn
, int set_p
)
4037 sd_iterator_def sd_it
;
4039 FOR_EACH_DEP (insn
, SD_LIST_HARD_BACK
, sd_it
, dep
)
4041 FEEDS_BACKTRACK_INSN (DEP_PRO (dep
)) = set_p
;
4045 /* Save the current scheduler state so that we can backtrack to it
4046 later if necessary. PAIR gives the insns that make it necessary to
4047 save this point. SCHED_BLOCK is the local state of schedule_block
4048 that need to be saved. */
4050 save_backtrack_point (struct delay_pair
*pair
,
4051 struct sched_block_state sched_block
)
4054 struct haifa_saved_data
*save
= XNEW (struct haifa_saved_data
);
4056 save
->curr_state
= xmalloc (dfa_state_size
);
4057 memcpy (save
->curr_state
, curr_state
, dfa_state_size
);
4059 save
->ready
.first
= ready
.first
;
4060 save
->ready
.n_ready
= ready
.n_ready
;
4061 save
->ready
.n_debug
= ready
.n_debug
;
4062 save
->ready
.veclen
= ready
.veclen
;
4063 save
->ready
.vec
= XNEWVEC (rtx
, ready
.veclen
);
4064 memcpy (save
->ready
.vec
, ready
.vec
, ready
.veclen
* sizeof (rtx
));
4066 save
->insn_queue
= XNEWVEC (rtx
, max_insn_queue_index
+ 1);
4067 save
->q_size
= q_size
;
4068 for (i
= 0; i
<= max_insn_queue_index
; i
++)
4070 int q
= NEXT_Q_AFTER (q_ptr
, i
);
4071 save
->insn_queue
[i
] = copy_INSN_LIST (insn_queue
[q
]);
4074 save
->clock_var
= clock_var
;
4075 save
->last_clock_var
= last_clock_var
;
4076 save
->cycle_issued_insns
= cycle_issued_insns
;
4077 save
->last_scheduled_insn
= last_scheduled_insn
;
4078 save
->last_nondebug_scheduled_insn
= last_nondebug_scheduled_insn
;
4080 save
->sched_block
= sched_block
;
4082 save
->replacement_deps
.create (0);
4083 save
->replace_apply
.create (0);
4084 save
->next_cycle_deps
= next_cycle_replace_deps
.copy ();
4085 save
->next_cycle_apply
= next_cycle_apply
.copy ();
4087 if (current_sched_info
->save_state
)
4088 save
->fe_saved_data
= (*current_sched_info
->save_state
) ();
4090 if (targetm
.sched
.alloc_sched_context
)
4092 save
->be_saved_data
= targetm
.sched
.alloc_sched_context ();
4093 targetm
.sched
.init_sched_context (save
->be_saved_data
, false);
4096 save
->be_saved_data
= NULL
;
4098 save
->delay_pair
= pair
;
4100 save
->next
= backtrack_queue
;
4101 backtrack_queue
= save
;
4105 mark_backtrack_feeds (pair
->i2
, 1);
4106 INSN_TICK (pair
->i2
) = INVALID_TICK
;
4107 INSN_EXACT_TICK (pair
->i2
) = clock_var
+ pair_delay (pair
);
4108 SHADOW_P (pair
->i2
) = pair
->stages
== 0;
4109 pair
= pair
->next_same_i1
;
4113 /* Walk the ready list and all queues. If any insns have unresolved backwards
4114 dependencies, these must be cancelled deps, broken by predication. Set or
4115 clear (depending on SET) the DEP_CANCELLED bit in DEP_STATUS. */
4118 toggle_cancelled_flags (bool set
)
4121 sd_iterator_def sd_it
;
4124 if (ready
.n_ready
> 0)
4126 rtx
*first
= ready_lastpos (&ready
);
4127 for (i
= 0; i
< ready
.n_ready
; i
++)
4128 FOR_EACH_DEP (first
[i
], SD_LIST_BACK
, sd_it
, dep
)
4129 if (!DEBUG_INSN_P (DEP_PRO (dep
)))
4132 DEP_STATUS (dep
) |= DEP_CANCELLED
;
4134 DEP_STATUS (dep
) &= ~DEP_CANCELLED
;
4137 for (i
= 0; i
<= max_insn_queue_index
; i
++)
4139 int q
= NEXT_Q_AFTER (q_ptr
, i
);
4141 for (link
= insn_queue
[q
]; link
; link
= XEXP (link
, 1))
4143 rtx insn
= XEXP (link
, 0);
4144 FOR_EACH_DEP (insn
, SD_LIST_BACK
, sd_it
, dep
)
4145 if (!DEBUG_INSN_P (DEP_PRO (dep
)))
4148 DEP_STATUS (dep
) |= DEP_CANCELLED
;
4150 DEP_STATUS (dep
) &= ~DEP_CANCELLED
;
4156 /* Undo the replacements that have occurred after backtrack point SAVE
4159 undo_replacements_for_backtrack (struct haifa_saved_data
*save
)
4161 while (!save
->replacement_deps
.is_empty ())
4163 dep_t dep
= save
->replacement_deps
.pop ();
4164 int apply_p
= save
->replace_apply
.pop ();
4167 restore_pattern (dep
, true);
4169 apply_replacement (dep
, true);
4171 save
->replacement_deps
.release ();
4172 save
->replace_apply
.release ();
4175 /* Pop entries from the SCHEDULED_INSNS vector up to and including INSN.
4176 Restore their dependencies to an unresolved state, and mark them as
4180 unschedule_insns_until (rtx insn
)
4182 vec
<rtx
> recompute_vec
= vNULL
;
4184 /* Make two passes over the insns to be unscheduled. First, we clear out
4185 dependencies and other trivial bookkeeping. */
4189 sd_iterator_def sd_it
;
4192 last
= scheduled_insns
.pop ();
4194 /* This will be changed by restore_backtrack_point if the insn is in
4196 QUEUE_INDEX (last
) = QUEUE_NOWHERE
;
4198 INSN_TICK (last
) = INVALID_TICK
;
4200 if (modulo_ii
> 0 && INSN_UID (last
) < modulo_iter0_max_uid
)
4201 modulo_insns_scheduled
--;
4203 for (sd_it
= sd_iterator_start (last
, SD_LIST_RES_FORW
);
4204 sd_iterator_cond (&sd_it
, &dep
);)
4206 rtx con
= DEP_CON (dep
);
4207 sd_unresolve_dep (sd_it
);
4208 if (!MUST_RECOMPUTE_SPEC_P (con
))
4210 MUST_RECOMPUTE_SPEC_P (con
) = 1;
4211 recompute_vec
.safe_push (con
);
4219 /* A second pass, to update ready and speculation status for insns
4220 depending on the unscheduled ones. The first pass must have
4221 popped the scheduled_insns vector up to the point where we
4222 restart scheduling, as recompute_todo_spec requires it to be
4224 while (!recompute_vec
.is_empty ())
4228 con
= recompute_vec
.pop ();
4229 MUST_RECOMPUTE_SPEC_P (con
) = 0;
4230 if (!sd_lists_empty_p (con
, SD_LIST_HARD_BACK
))
4232 TODO_SPEC (con
) = HARD_DEP
;
4233 INSN_TICK (con
) = INVALID_TICK
;
4234 if (PREDICATED_PAT (con
) != NULL_RTX
)
4235 haifa_change_pattern (con
, ORIG_PAT (con
));
4237 else if (QUEUE_INDEX (con
) != QUEUE_SCHEDULED
)
4238 TODO_SPEC (con
) = recompute_todo_spec (con
, true);
4240 recompute_vec
.release ();
4243 /* Restore scheduler state from the topmost entry on the backtracking queue.
4244 PSCHED_BLOCK_P points to the local data of schedule_block that we must
4245 overwrite with the saved data.
4246 The caller must already have called unschedule_insns_until. */
4249 restore_last_backtrack_point (struct sched_block_state
*psched_block
)
4253 struct haifa_saved_data
*save
= backtrack_queue
;
4255 backtrack_queue
= save
->next
;
4257 if (current_sched_info
->restore_state
)
4258 (*current_sched_info
->restore_state
) (save
->fe_saved_data
);
4260 if (targetm
.sched
.alloc_sched_context
)
4262 targetm
.sched
.set_sched_context (save
->be_saved_data
);
4263 targetm
.sched
.free_sched_context (save
->be_saved_data
);
4266 /* Do this first since it clobbers INSN_TICK of the involved
4268 undo_replacements_for_backtrack (save
);
4270 /* Clear the QUEUE_INDEX of everything in the ready list or one
4272 if (ready
.n_ready
> 0)
4274 rtx
*first
= ready_lastpos (&ready
);
4275 for (i
= 0; i
< ready
.n_ready
; i
++)
4277 rtx insn
= first
[i
];
4278 QUEUE_INDEX (insn
) = QUEUE_NOWHERE
;
4279 INSN_TICK (insn
) = INVALID_TICK
;
4282 for (i
= 0; i
<= max_insn_queue_index
; i
++)
4284 int q
= NEXT_Q_AFTER (q_ptr
, i
);
4286 for (link
= insn_queue
[q
]; link
; link
= XEXP (link
, 1))
4288 rtx x
= XEXP (link
, 0);
4289 QUEUE_INDEX (x
) = QUEUE_NOWHERE
;
4290 INSN_TICK (x
) = INVALID_TICK
;
4292 free_INSN_LIST_list (&insn_queue
[q
]);
4296 ready
= save
->ready
;
4298 if (ready
.n_ready
> 0)
4300 rtx
*first
= ready_lastpos (&ready
);
4301 for (i
= 0; i
< ready
.n_ready
; i
++)
4303 rtx insn
= first
[i
];
4304 QUEUE_INDEX (insn
) = QUEUE_READY
;
4305 TODO_SPEC (insn
) = recompute_todo_spec (insn
, true);
4306 INSN_TICK (insn
) = save
->clock_var
;
4311 q_size
= save
->q_size
;
4312 for (i
= 0; i
<= max_insn_queue_index
; i
++)
4314 int q
= NEXT_Q_AFTER (q_ptr
, i
);
4316 insn_queue
[q
] = save
->insn_queue
[q
];
4318 for (link
= insn_queue
[q
]; link
; link
= XEXP (link
, 1))
4320 rtx x
= XEXP (link
, 0);
4321 QUEUE_INDEX (x
) = i
;
4322 TODO_SPEC (x
) = recompute_todo_spec (x
, true);
4323 INSN_TICK (x
) = save
->clock_var
+ i
;
4326 free (save
->insn_queue
);
4328 toggle_cancelled_flags (true);
4330 clock_var
= save
->clock_var
;
4331 last_clock_var
= save
->last_clock_var
;
4332 cycle_issued_insns
= save
->cycle_issued_insns
;
4333 last_scheduled_insn
= save
->last_scheduled_insn
;
4334 last_nondebug_scheduled_insn
= save
->last_nondebug_scheduled_insn
;
4336 *psched_block
= save
->sched_block
;
4338 memcpy (curr_state
, save
->curr_state
, dfa_state_size
);
4339 free (save
->curr_state
);
4341 mark_backtrack_feeds (save
->delay_pair
->i2
, 0);
4343 gcc_assert (next_cycle_replace_deps
.is_empty ());
4344 next_cycle_replace_deps
= save
->next_cycle_deps
.copy ();
4345 next_cycle_apply
= save
->next_cycle_apply
.copy ();
4349 for (save
= backtrack_queue
; save
; save
= save
->next
)
4351 mark_backtrack_feeds (save
->delay_pair
->i2
, 1);
4355 /* Discard all data associated with the topmost entry in the backtrack
4356 queue. If RESET_TICK is false, we just want to free the data. If true,
4357 we are doing this because we discovered a reason to backtrack. In the
4358 latter case, also reset the INSN_TICK for the shadow insn. */
4360 free_topmost_backtrack_point (bool reset_tick
)
4362 struct haifa_saved_data
*save
= backtrack_queue
;
4365 backtrack_queue
= save
->next
;
4369 struct delay_pair
*pair
= save
->delay_pair
;
4372 INSN_TICK (pair
->i2
) = INVALID_TICK
;
4373 INSN_EXACT_TICK (pair
->i2
) = INVALID_TICK
;
4374 pair
= pair
->next_same_i1
;
4376 undo_replacements_for_backtrack (save
);
4380 save
->replacement_deps
.release ();
4381 save
->replace_apply
.release ();
4384 if (targetm
.sched
.free_sched_context
)
4385 targetm
.sched
.free_sched_context (save
->be_saved_data
);
4386 if (current_sched_info
->restore_state
)
4387 free (save
->fe_saved_data
);
4388 for (i
= 0; i
<= max_insn_queue_index
; i
++)
4389 free_INSN_LIST_list (&save
->insn_queue
[i
]);
4390 free (save
->insn_queue
);
4391 free (save
->curr_state
);
4392 free (save
->ready
.vec
);
4396 /* Free the entire backtrack queue. */
4398 free_backtrack_queue (void)
4400 while (backtrack_queue
)
4401 free_topmost_backtrack_point (false);
4404 /* Apply a replacement described by DESC. If IMMEDIATELY is false, we
4405 may have to postpone the replacement until the start of the next cycle,
4406 at which point we will be called again with IMMEDIATELY true. This is
4407 only done for machines which have instruction packets with explicit
4408 parallelism however. */
4410 apply_replacement (dep_t dep
, bool immediately
)
4412 struct dep_replacement
*desc
= DEP_REPLACE (dep
);
4413 if (!immediately
&& targetm
.sched
.exposed_pipeline
&& reload_completed
)
4415 next_cycle_replace_deps
.safe_push (dep
);
4416 next_cycle_apply
.safe_push (1);
4422 if (QUEUE_INDEX (desc
->insn
) == QUEUE_SCHEDULED
)
4425 if (sched_verbose
>= 5)
4426 fprintf (sched_dump
, "applying replacement for insn %d\n",
4427 INSN_UID (desc
->insn
));
4429 success
= validate_change (desc
->insn
, desc
->loc
, desc
->newval
, 0);
4430 gcc_assert (success
);
4432 update_insn_after_change (desc
->insn
);
4433 if ((TODO_SPEC (desc
->insn
) & (HARD_DEP
| DEP_POSTPONED
)) == 0)
4434 fix_tick_ready (desc
->insn
);
4436 if (backtrack_queue
!= NULL
)
4438 backtrack_queue
->replacement_deps
.safe_push (dep
);
4439 backtrack_queue
->replace_apply
.safe_push (1);
4444 /* We have determined that a pattern involved in DEP must be restored.
4445 If IMMEDIATELY is false, we may have to postpone the replacement
4446 until the start of the next cycle, at which point we will be called
4447 again with IMMEDIATELY true. */
4449 restore_pattern (dep_t dep
, bool immediately
)
4451 rtx next
= DEP_CON (dep
);
4452 int tick
= INSN_TICK (next
);
4454 /* If we already scheduled the insn, the modified version is
4456 if (QUEUE_INDEX (next
) == QUEUE_SCHEDULED
)
4459 if (!immediately
&& targetm
.sched
.exposed_pipeline
&& reload_completed
)
4461 next_cycle_replace_deps
.safe_push (dep
);
4462 next_cycle_apply
.safe_push (0);
4467 if (DEP_TYPE (dep
) == REG_DEP_CONTROL
)
4469 if (sched_verbose
>= 5)
4470 fprintf (sched_dump
, "restoring pattern for insn %d\n",
4472 haifa_change_pattern (next
, ORIG_PAT (next
));
4476 struct dep_replacement
*desc
= DEP_REPLACE (dep
);
4479 if (sched_verbose
>= 5)
4480 fprintf (sched_dump
, "restoring pattern for insn %d\n",
4481 INSN_UID (desc
->insn
));
4482 tick
= INSN_TICK (desc
->insn
);
4484 success
= validate_change (desc
->insn
, desc
->loc
, desc
->orig
, 0);
4485 gcc_assert (success
);
4486 update_insn_after_change (desc
->insn
);
4487 if (backtrack_queue
!= NULL
)
4489 backtrack_queue
->replacement_deps
.safe_push (dep
);
4490 backtrack_queue
->replace_apply
.safe_push (0);
4493 INSN_TICK (next
) = tick
;
4494 if (TODO_SPEC (next
) == DEP_POSTPONED
)
4497 if (sd_lists_empty_p (next
, SD_LIST_BACK
))
4498 TODO_SPEC (next
) = 0;
4499 else if (!sd_lists_empty_p (next
, SD_LIST_HARD_BACK
))
4500 TODO_SPEC (next
) = HARD_DEP
;
4503 /* Perform pattern replacements that were queued up until the next
4506 perform_replacements_new_cycle (void)
4510 FOR_EACH_VEC_ELT (next_cycle_replace_deps
, i
, dep
)
4512 int apply_p
= next_cycle_apply
[i
];
4514 apply_replacement (dep
, true);
4516 restore_pattern (dep
, true);
4518 next_cycle_replace_deps
.truncate (0);
4519 next_cycle_apply
.truncate (0);
4522 /* Compute INSN_TICK_ESTIMATE for INSN. PROCESSED is a bitmap of
4523 instructions we've previously encountered, a set bit prevents
4524 recursion. BUDGET is a limit on how far ahead we look, it is
4525 reduced on recursive calls. Return true if we produced a good
4526 estimate, or false if we exceeded the budget. */
4528 estimate_insn_tick (bitmap processed
, rtx insn
, int budget
)
4530 sd_iterator_def sd_it
;
4532 int earliest
= INSN_TICK (insn
);
4534 FOR_EACH_DEP (insn
, SD_LIST_BACK
, sd_it
, dep
)
4536 rtx pro
= DEP_PRO (dep
);
4539 if (DEP_STATUS (dep
) & DEP_CANCELLED
)
4542 if (QUEUE_INDEX (pro
) == QUEUE_SCHEDULED
)
4543 gcc_assert (INSN_TICK (pro
) + dep_cost (dep
) <= INSN_TICK (insn
));
4546 int cost
= dep_cost (dep
);
4549 if (!bitmap_bit_p (processed
, INSN_LUID (pro
)))
4551 if (!estimate_insn_tick (processed
, pro
, budget
- cost
))
4554 gcc_assert (INSN_TICK_ESTIMATE (pro
) != INVALID_TICK
);
4555 t
= INSN_TICK_ESTIMATE (pro
) + cost
;
4556 if (earliest
== INVALID_TICK
|| t
> earliest
)
4560 bitmap_set_bit (processed
, INSN_LUID (insn
));
4561 INSN_TICK_ESTIMATE (insn
) = earliest
;
4565 /* Examine the pair of insns in P, and estimate (optimistically, assuming
4566 infinite resources) the cycle in which the delayed shadow can be issued.
4567 Return the number of cycles that must pass before the real insn can be
4568 issued in order to meet this constraint. */
4570 estimate_shadow_tick (struct delay_pair
*p
)
4572 bitmap_head processed
;
4575 bitmap_initialize (&processed
, 0);
4577 cutoff
= !estimate_insn_tick (&processed
, p
->i2
,
4578 max_insn_queue_index
+ pair_delay (p
));
4579 bitmap_clear (&processed
);
4581 return max_insn_queue_index
;
4582 t
= INSN_TICK_ESTIMATE (p
->i2
) - (clock_var
+ pair_delay (p
) + 1);
4588 /* If INSN has no unresolved backwards dependencies, add it to the schedule and
4589 recursively resolve all its forward dependencies. */
4591 resolve_dependencies (rtx insn
)
4593 sd_iterator_def sd_it
;
4596 /* Don't use sd_lists_empty_p; it ignores debug insns. */
4597 if (DEPS_LIST_FIRST (INSN_HARD_BACK_DEPS (insn
)) != NULL
4598 || DEPS_LIST_FIRST (INSN_SPEC_BACK_DEPS (insn
)) != NULL
)
4601 if (sched_verbose
>= 4)
4602 fprintf (sched_dump
, ";;\tquickly resolving %d\n", INSN_UID (insn
));
4604 if (QUEUE_INDEX (insn
) >= 0)
4605 queue_remove (insn
);
4607 scheduled_insns
.safe_push (insn
);
4609 /* Update dependent instructions. */
4610 for (sd_it
= sd_iterator_start (insn
, SD_LIST_FORW
);
4611 sd_iterator_cond (&sd_it
, &dep
);)
4613 rtx next
= DEP_CON (dep
);
4615 if (sched_verbose
>= 4)
4616 fprintf (sched_dump
, ";;\t\tdep %d against %d\n", INSN_UID (insn
),
4619 /* Resolve the dependence between INSN and NEXT.
4620 sd_resolve_dep () moves current dep to another list thus
4621 advancing the iterator. */
4622 sd_resolve_dep (sd_it
);
4624 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn
))
4626 resolve_dependencies (next
);
4629 /* Check always has only one forward dependence (to the first insn in
4630 the recovery block), therefore, this will be executed only once. */
4632 gcc_assert (sd_lists_empty_p (insn
, SD_LIST_FORW
));
4638 /* Return the head and tail pointers of ebb starting at BEG and ending
4641 get_ebb_head_tail (basic_block beg
, basic_block end
, rtx
*headp
, rtx
*tailp
)
4643 rtx beg_head
= BB_HEAD (beg
);
4644 rtx beg_tail
= BB_END (beg
);
4645 rtx end_head
= BB_HEAD (end
);
4646 rtx end_tail
= BB_END (end
);
4648 /* Don't include any notes or labels at the beginning of the BEG
4649 basic block, or notes at the end of the END basic blocks. */
4651 if (LABEL_P (beg_head
))
4652 beg_head
= NEXT_INSN (beg_head
);
4654 while (beg_head
!= beg_tail
)
4655 if (NOTE_P (beg_head
))
4656 beg_head
= NEXT_INSN (beg_head
);
4657 else if (DEBUG_INSN_P (beg_head
))
4661 for (note
= NEXT_INSN (beg_head
);
4665 next
= NEXT_INSN (note
);
4668 if (sched_verbose
>= 9)
4669 fprintf (sched_dump
, "reorder %i\n", INSN_UID (note
));
4671 reorder_insns_nobb (note
, note
, PREV_INSN (beg_head
));
4673 if (BLOCK_FOR_INSN (note
) != beg
)
4674 df_insn_change_bb (note
, beg
);
4676 else if (!DEBUG_INSN_P (note
))
4688 end_head
= beg_head
;
4689 else if (LABEL_P (end_head
))
4690 end_head
= NEXT_INSN (end_head
);
4692 while (end_head
!= end_tail
)
4693 if (NOTE_P (end_tail
))
4694 end_tail
= PREV_INSN (end_tail
);
4695 else if (DEBUG_INSN_P (end_tail
))
4699 for (note
= PREV_INSN (end_tail
);
4703 prev
= PREV_INSN (note
);
4706 if (sched_verbose
>= 9)
4707 fprintf (sched_dump
, "reorder %i\n", INSN_UID (note
));
4709 reorder_insns_nobb (note
, note
, end_tail
);
4711 if (end_tail
== BB_END (end
))
4712 BB_END (end
) = note
;
4714 if (BLOCK_FOR_INSN (note
) != end
)
4715 df_insn_change_bb (note
, end
);
4717 else if (!DEBUG_INSN_P (note
))
4729 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
4732 no_real_insns_p (const_rtx head
, const_rtx tail
)
4734 while (head
!= NEXT_INSN (tail
))
4736 if (!NOTE_P (head
) && !LABEL_P (head
))
4738 head
= NEXT_INSN (head
);
4743 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
4744 previously found among the insns. Insert them just before HEAD. */
4746 restore_other_notes (rtx head
, basic_block head_bb
)
4750 rtx note_head
= note_list
;
4753 head_bb
= BLOCK_FOR_INSN (head
);
4755 head
= NEXT_INSN (bb_note (head_bb
));
4757 while (PREV_INSN (note_head
))
4759 set_block_for_insn (note_head
, head_bb
);
4760 note_head
= PREV_INSN (note_head
);
4762 /* In the above cycle we've missed this note. */
4763 set_block_for_insn (note_head
, head_bb
);
4765 PREV_INSN (note_head
) = PREV_INSN (head
);
4766 NEXT_INSN (PREV_INSN (head
)) = note_head
;
4767 PREV_INSN (head
) = note_list
;
4768 NEXT_INSN (note_list
) = head
;
4770 if (BLOCK_FOR_INSN (head
) != head_bb
)
4771 BB_END (head_bb
) = note_list
;
4779 /* When we know we are going to discard the schedule due to a failed attempt
4780 at modulo scheduling, undo all replacements. */
4782 undo_all_replacements (void)
4787 FOR_EACH_VEC_ELT (scheduled_insns
, i
, insn
)
4789 sd_iterator_def sd_it
;
4792 /* See if we must undo a replacement. */
4793 for (sd_it
= sd_iterator_start (insn
, SD_LIST_RES_FORW
);
4794 sd_iterator_cond (&sd_it
, &dep
); sd_iterator_next (&sd_it
))
4796 struct dep_replacement
*desc
= DEP_REPLACE (dep
);
4798 validate_change (desc
->insn
, desc
->loc
, desc
->orig
, 0);
4803 /* Move insns that became ready to fire from queue to ready list. */
4806 queue_to_ready (struct ready_list
*ready
)
4812 q_ptr
= NEXT_Q (q_ptr
);
4814 if (dbg_cnt (sched_insn
) == false)
4816 /* If debug counter is activated do not requeue the first
4817 nonscheduled insn. */
4818 skip_insn
= nonscheduled_insns_begin
;
4821 skip_insn
= next_nonnote_nondebug_insn (skip_insn
);
4823 while (QUEUE_INDEX (skip_insn
) == QUEUE_SCHEDULED
);
4826 skip_insn
= NULL_RTX
;
4828 /* Add all pending insns that can be scheduled without stalls to the
4830 for (link
= insn_queue
[q_ptr
]; link
; link
= XEXP (link
, 1))
4832 insn
= XEXP (link
, 0);
4835 if (sched_verbose
>= 2)
4836 fprintf (sched_dump
, ";;\t\tQ-->Ready: insn %s: ",
4837 (*current_sched_info
->print_insn
) (insn
, 0));
4839 /* If the ready list is full, delay the insn for 1 cycle.
4840 See the comment in schedule_block for the rationale. */
4841 if (!reload_completed
4842 && (ready
->n_ready
- ready
->n_debug
> MAX_SCHED_READY_INSNS
4843 || (sched_pressure
== SCHED_PRESSURE_MODEL
4844 /* Limit pressure recalculations to MAX_SCHED_READY_INSNS
4845 instructions too. */
4846 && model_index (insn
) > (model_curr_point
4847 + MAX_SCHED_READY_INSNS
)))
4848 && !(sched_pressure
== SCHED_PRESSURE_MODEL
4849 && model_curr_point
< model_num_insns
4850 /* Always allow the next model instruction to issue. */
4851 && model_index (insn
) == model_curr_point
)
4852 && !SCHED_GROUP_P (insn
)
4853 && insn
!= skip_insn
)
4854 queue_insn (insn
, 1, "ready full");
4857 ready_add (ready
, insn
, false);
4858 if (sched_verbose
>= 2)
4859 fprintf (sched_dump
, "moving to ready without stalls\n");
4862 free_INSN_LIST_list (&insn_queue
[q_ptr
]);
4864 /* If there are no ready insns, stall until one is ready and add all
4865 of the pending insns at that point to the ready list. */
4866 if (ready
->n_ready
== 0)
4870 for (stalls
= 1; stalls
<= max_insn_queue_index
; stalls
++)
4872 if ((link
= insn_queue
[NEXT_Q_AFTER (q_ptr
, stalls
)]))
4874 for (; link
; link
= XEXP (link
, 1))
4876 insn
= XEXP (link
, 0);
4879 if (sched_verbose
>= 2)
4880 fprintf (sched_dump
, ";;\t\tQ-->Ready: insn %s: ",
4881 (*current_sched_info
->print_insn
) (insn
, 0));
4883 ready_add (ready
, insn
, false);
4884 if (sched_verbose
>= 2)
4885 fprintf (sched_dump
, "moving to ready with %d stalls\n", stalls
);
4887 free_INSN_LIST_list (&insn_queue
[NEXT_Q_AFTER (q_ptr
, stalls
)]);
4889 advance_one_cycle ();
4894 advance_one_cycle ();
4897 q_ptr
= NEXT_Q_AFTER (q_ptr
, stalls
);
4898 clock_var
+= stalls
;
4902 /* Used by early_queue_to_ready. Determines whether it is "ok" to
4903 prematurely move INSN from the queue to the ready list. Currently,
4904 if a target defines the hook 'is_costly_dependence', this function
4905 uses the hook to check whether there exist any dependences which are
4906 considered costly by the target, between INSN and other insns that
4907 have already been scheduled. Dependences are checked up to Y cycles
4908 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
4909 controlling this value.
4910 (Other considerations could be taken into account instead (or in
4911 addition) depending on user flags and target hooks. */
4914 ok_for_early_queue_removal (rtx insn
)
4916 if (targetm
.sched
.is_costly_dependence
)
4920 int i
= scheduled_insns
.length ();
4921 for (n_cycles
= flag_sched_stalled_insns_dep
; n_cycles
; n_cycles
--)
4927 prev_insn
= scheduled_insns
[i
];
4929 if (!NOTE_P (prev_insn
))
4933 dep
= sd_find_dep_between (prev_insn
, insn
, true);
4937 cost
= dep_cost (dep
);
4939 if (targetm
.sched
.is_costly_dependence (dep
, cost
,
4940 flag_sched_stalled_insns_dep
- n_cycles
))
4945 if (GET_MODE (prev_insn
) == TImode
) /* end of dispatch group */
4958 /* Remove insns from the queue, before they become "ready" with respect
4959 to FU latency considerations. */
4962 early_queue_to_ready (state_t state
, struct ready_list
*ready
)
4970 state_t temp_state
= alloca (dfa_state_size
);
4972 int insns_removed
= 0;
4975 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
4978 X == 0: There is no limit on how many queued insns can be removed
4979 prematurely. (flag_sched_stalled_insns = -1).
4981 X >= 1: Only X queued insns can be removed prematurely in each
4982 invocation. (flag_sched_stalled_insns = X).
4984 Otherwise: Early queue removal is disabled.
4985 (flag_sched_stalled_insns = 0)
4988 if (! flag_sched_stalled_insns
)
4991 for (stalls
= 0; stalls
<= max_insn_queue_index
; stalls
++)
4993 if ((link
= insn_queue
[NEXT_Q_AFTER (q_ptr
, stalls
)]))
4995 if (sched_verbose
> 6)
4996 fprintf (sched_dump
, ";; look at index %d + %d\n", q_ptr
, stalls
);
5001 next_link
= XEXP (link
, 1);
5002 insn
= XEXP (link
, 0);
5003 if (insn
&& sched_verbose
> 6)
5004 print_rtl_single (sched_dump
, insn
);
5006 memcpy (temp_state
, state
, dfa_state_size
);
5007 if (recog_memoized (insn
) < 0)
5008 /* non-negative to indicate that it's not ready
5009 to avoid infinite Q->R->Q->R... */
5012 cost
= state_transition (temp_state
, insn
);
5014 if (sched_verbose
>= 6)
5015 fprintf (sched_dump
, "transition cost = %d\n", cost
);
5017 move_to_ready
= false;
5020 move_to_ready
= ok_for_early_queue_removal (insn
);
5021 if (move_to_ready
== true)
5023 /* move from Q to R */
5025 ready_add (ready
, insn
, false);
5028 XEXP (prev_link
, 1) = next_link
;
5030 insn_queue
[NEXT_Q_AFTER (q_ptr
, stalls
)] = next_link
;
5032 free_INSN_LIST_node (link
);
5034 if (sched_verbose
>= 2)
5035 fprintf (sched_dump
, ";;\t\tEarly Q-->Ready: insn %s\n",
5036 (*current_sched_info
->print_insn
) (insn
, 0));
5039 if (insns_removed
== flag_sched_stalled_insns
)
5040 /* Remove no more than flag_sched_stalled_insns insns
5041 from Q at a time. */
5042 return insns_removed
;
5046 if (move_to_ready
== false)
5053 } /* for stalls.. */
5055 return insns_removed
;
5059 /* Print the ready list for debugging purposes. Callable from debugger. */
5062 debug_ready_list (struct ready_list
*ready
)
5067 if (ready
->n_ready
== 0)
5069 fprintf (sched_dump
, "\n");
5073 p
= ready_lastpos (ready
);
5074 for (i
= 0; i
< ready
->n_ready
; i
++)
5076 fprintf (sched_dump
, " %s:%d",
5077 (*current_sched_info
->print_insn
) (p
[i
], 0),
5079 if (sched_pressure
!= SCHED_PRESSURE_NONE
)
5080 fprintf (sched_dump
, "(cost=%d",
5081 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (p
[i
]));
5082 if (INSN_TICK (p
[i
]) > clock_var
)
5083 fprintf (sched_dump
, ":delay=%d", INSN_TICK (p
[i
]) - clock_var
);
5084 if (sched_pressure
!= SCHED_PRESSURE_NONE
)
5085 fprintf (sched_dump
, ")");
5087 fprintf (sched_dump
, "\n");
5090 /* Search INSN for REG_SAVE_NOTE notes and convert them back into insn
5091 NOTEs. This is used for NOTE_INSN_EPILOGUE_BEG, so that sched-ebb
5092 replaces the epilogue note in the correct basic block. */
5094 reemit_notes (rtx insn
)
5096 rtx note
, last
= insn
;
5098 for (note
= REG_NOTES (insn
); note
; note
= XEXP (note
, 1))
5100 if (REG_NOTE_KIND (note
) == REG_SAVE_NOTE
)
5102 enum insn_note note_type
= (enum insn_note
) INTVAL (XEXP (note
, 0));
5104 last
= emit_note_before (note_type
, last
);
5105 remove_note (insn
, note
);
5110 /* Move INSN. Reemit notes if needed. Update CFG, if needed. */
5112 move_insn (rtx insn
, rtx last
, rtx nt
)
5114 if (PREV_INSN (insn
) != last
)
5120 bb
= BLOCK_FOR_INSN (insn
);
5122 /* BB_HEAD is either LABEL or NOTE. */
5123 gcc_assert (BB_HEAD (bb
) != insn
);
5125 if (BB_END (bb
) == insn
)
5126 /* If this is last instruction in BB, move end marker one
5129 /* Jumps are always placed at the end of basic block. */
5130 jump_p
= control_flow_insn_p (insn
);
5133 || ((common_sched_info
->sched_pass_id
== SCHED_RGN_PASS
)
5134 && IS_SPECULATION_BRANCHY_CHECK_P (insn
))
5135 || (common_sched_info
->sched_pass_id
5136 == SCHED_EBB_PASS
));
5138 gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn
)) == bb
);
5140 BB_END (bb
) = PREV_INSN (insn
);
5143 gcc_assert (BB_END (bb
) != last
);
5146 /* We move the block note along with jump. */
5150 note
= NEXT_INSN (insn
);
5151 while (NOTE_NOT_BB_P (note
) && note
!= nt
)
5152 note
= NEXT_INSN (note
);
5156 || BARRIER_P (note
)))
5157 note
= NEXT_INSN (note
);
5159 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note
));
5164 NEXT_INSN (PREV_INSN (insn
)) = NEXT_INSN (note
);
5165 PREV_INSN (NEXT_INSN (note
)) = PREV_INSN (insn
);
5167 NEXT_INSN (note
) = NEXT_INSN (last
);
5168 PREV_INSN (NEXT_INSN (last
)) = note
;
5170 NEXT_INSN (last
) = insn
;
5171 PREV_INSN (insn
) = last
;
5173 bb
= BLOCK_FOR_INSN (last
);
5177 fix_jump_move (insn
);
5179 if (BLOCK_FOR_INSN (insn
) != bb
)
5180 move_block_after_check (insn
);
5182 gcc_assert (BB_END (bb
) == last
);
5185 df_insn_change_bb (insn
, bb
);
5187 /* Update BB_END, if needed. */
5188 if (BB_END (bb
) == last
)
5192 SCHED_GROUP_P (insn
) = 0;
5195 /* Return true if scheduling INSN will finish current clock cycle. */
5197 insn_finishes_cycle_p (rtx insn
)
5199 if (SCHED_GROUP_P (insn
))
5200 /* After issuing INSN, rest of the sched_group will be forced to issue
5201 in order. Don't make any plans for the rest of cycle. */
5204 /* Finishing the block will, apparently, finish the cycle. */
5205 if (current_sched_info
->insn_finishes_block_p
5206 && current_sched_info
->insn_finishes_block_p (insn
))
5212 /* Define type for target data used in multipass scheduling. */
5213 #ifndef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T
5214 # define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T int
5216 typedef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T first_cycle_multipass_data_t
;
5218 /* The following structure describe an entry of the stack of choices. */
5221 /* Ordinal number of the issued insn in the ready queue. */
5223 /* The number of the rest insns whose issues we should try. */
5225 /* The number of issued essential insns. */
5227 /* State after issuing the insn. */
5229 /* Target-specific data. */
5230 first_cycle_multipass_data_t target_data
;
5233 /* The following array is used to implement a stack of choices used in
5234 function max_issue. */
5235 static struct choice_entry
*choice_stack
;
5237 /* This holds the value of the target dfa_lookahead hook. */
5240 /* The following variable value is maximal number of tries of issuing
5241 insns for the first cycle multipass insn scheduling. We define
5242 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
5243 need this constraint if all real insns (with non-negative codes)
5244 had reservations because in this case the algorithm complexity is
5245 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
5246 might be incomplete and such insn might occur. For such
5247 descriptions, the complexity of algorithm (without the constraint)
5248 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
5249 static int max_lookahead_tries
;
5251 /* The following value is value of hook
5252 `first_cycle_multipass_dfa_lookahead' at the last call of
5254 static int cached_first_cycle_multipass_dfa_lookahead
= 0;
5256 /* The following value is value of `issue_rate' at the last call of
5258 static int cached_issue_rate
= 0;
5260 /* The following function returns maximal (or close to maximal) number
5261 of insns which can be issued on the same cycle and one of which
5262 insns is insns with the best rank (the first insn in READY). To
5263 make this function tries different samples of ready insns. READY
5264 is current queue `ready'. Global array READY_TRY reflects what
5265 insns are already issued in this try. The function stops immediately,
5266 if it reached the such a solution, that all instruction can be issued.
5267 INDEX will contain index of the best insn in READY. The following
5268 function is used only for first cycle multipass scheduling.
5272 This function expects recognized insns only. All USEs,
5273 CLOBBERs, etc must be filtered elsewhere. */
5275 max_issue (struct ready_list
*ready
, int privileged_n
, state_t state
,
5276 bool first_cycle_insn_p
, int *index
)
5278 int n
, i
, all
, n_ready
, best
, delay
, tries_num
;
5280 struct choice_entry
*top
;
5283 n_ready
= ready
->n_ready
;
5284 gcc_assert (dfa_lookahead
>= 1 && privileged_n
>= 0
5285 && privileged_n
<= n_ready
);
5287 /* Init MAX_LOOKAHEAD_TRIES. */
5288 if (cached_first_cycle_multipass_dfa_lookahead
!= dfa_lookahead
)
5290 cached_first_cycle_multipass_dfa_lookahead
= dfa_lookahead
;
5291 max_lookahead_tries
= 100;
5292 for (i
= 0; i
< issue_rate
; i
++)
5293 max_lookahead_tries
*= dfa_lookahead
;
5296 /* Init max_points. */
5297 more_issue
= issue_rate
- cycle_issued_insns
;
5298 gcc_assert (more_issue
>= 0);
5300 /* The number of the issued insns in the best solution. */
5305 /* Set initial state of the search. */
5306 memcpy (top
->state
, state
, dfa_state_size
);
5307 top
->rest
= dfa_lookahead
;
5309 if (targetm
.sched
.first_cycle_multipass_begin
)
5310 targetm
.sched
.first_cycle_multipass_begin (&top
->target_data
,
5312 first_cycle_insn_p
);
5314 /* Count the number of the insns to search among. */
5315 for (all
= i
= 0; i
< n_ready
; i
++)
5319 /* I is the index of the insn to try next. */
5324 if (/* If we've reached a dead end or searched enough of what we have
5327 /* or have nothing else to try... */
5329 /* or should not issue more. */
5330 || top
->n
>= more_issue
)
5332 /* ??? (... || i == n_ready). */
5333 gcc_assert (i
<= n_ready
);
5335 /* We should not issue more than issue_rate instructions. */
5336 gcc_assert (top
->n
<= more_issue
);
5338 if (top
== choice_stack
)
5341 if (best
< top
- choice_stack
)
5346 /* Try to find issued privileged insn. */
5347 while (n
&& !ready_try
[--n
])
5351 if (/* If all insns are equally good... */
5353 /* Or a privileged insn will be issued. */
5355 /* Then we have a solution. */
5357 best
= top
- choice_stack
;
5358 /* This is the index of the insn issued first in this
5360 *index
= choice_stack
[1].index
;
5361 if (top
->n
== more_issue
|| best
== all
)
5366 /* Set ready-list index to point to the last insn
5367 ('i++' below will advance it to the next insn). */
5373 if (targetm
.sched
.first_cycle_multipass_backtrack
)
5374 targetm
.sched
.first_cycle_multipass_backtrack (&top
->target_data
,
5375 ready_try
, n_ready
);
5378 memcpy (state
, top
->state
, dfa_state_size
);
5380 else if (!ready_try
[i
])
5383 if (tries_num
> max_lookahead_tries
)
5385 insn
= ready_element (ready
, i
);
5386 delay
= state_transition (state
, insn
);
5389 if (state_dead_lock_p (state
)
5390 || insn_finishes_cycle_p (insn
))
5391 /* We won't issue any more instructions in the next
5398 if (memcmp (top
->state
, state
, dfa_state_size
) != 0)
5401 /* Advance to the next choice_entry. */
5403 /* Initialize it. */
5404 top
->rest
= dfa_lookahead
;
5407 memcpy (top
->state
, state
, dfa_state_size
);
5410 if (targetm
.sched
.first_cycle_multipass_issue
)
5411 targetm
.sched
.first_cycle_multipass_issue (&top
->target_data
,
5421 /* Increase ready-list index. */
5425 if (targetm
.sched
.first_cycle_multipass_end
)
5426 targetm
.sched
.first_cycle_multipass_end (best
!= 0
5427 ? &choice_stack
[1].target_data
5430 /* Restore the original state of the DFA. */
5431 memcpy (state
, choice_stack
->state
, dfa_state_size
);
5436 /* The following function chooses insn from READY and modifies
5437 READY. The following function is used only for first
5438 cycle multipass scheduling.
5440 -1 if cycle should be advanced,
5441 0 if INSN_PTR is set to point to the desirable insn,
5442 1 if choose_ready () should be restarted without advancing the cycle. */
5444 choose_ready (struct ready_list
*ready
, bool first_cycle_insn_p
,
5449 if (dbg_cnt (sched_insn
) == false)
5451 rtx insn
= nonscheduled_insns_begin
;
5454 insn
= next_nonnote_insn (insn
);
5456 while (QUEUE_INDEX (insn
) == QUEUE_SCHEDULED
);
5458 if (QUEUE_INDEX (insn
) == QUEUE_READY
)
5459 /* INSN is in the ready_list. */
5461 nonscheduled_insns_begin
= insn
;
5462 ready_remove_insn (insn
);
5467 /* INSN is in the queue. Advance cycle to move it to the ready list. */
5473 if (targetm
.sched
.first_cycle_multipass_dfa_lookahead
)
5474 lookahead
= targetm
.sched
.first_cycle_multipass_dfa_lookahead ();
5475 if (lookahead
<= 0 || SCHED_GROUP_P (ready_element (ready
, 0))
5476 || DEBUG_INSN_P (ready_element (ready
, 0)))
5478 if (targetm
.sched
.dispatch (NULL_RTX
, IS_DISPATCH_ON
))
5479 *insn_ptr
= ready_remove_first_dispatch (ready
);
5481 *insn_ptr
= ready_remove_first (ready
);
5487 /* Try to choose the better insn. */
5488 int index
= 0, i
, n
;
5490 int try_data
= 1, try_control
= 1;
5493 insn
= ready_element (ready
, 0);
5494 if (INSN_CODE (insn
) < 0)
5496 *insn_ptr
= ready_remove_first (ready
);
5501 && spec_info
->flags
& (PREFER_NON_DATA_SPEC
5502 | PREFER_NON_CONTROL_SPEC
))
5504 for (i
= 0, n
= ready
->n_ready
; i
< n
; i
++)
5509 x
= ready_element (ready
, i
);
5512 if (spec_info
->flags
& PREFER_NON_DATA_SPEC
5513 && !(s
& DATA_SPEC
))
5516 if (!(spec_info
->flags
& PREFER_NON_CONTROL_SPEC
)
5521 if (spec_info
->flags
& PREFER_NON_CONTROL_SPEC
5522 && !(s
& CONTROL_SPEC
))
5525 if (!(spec_info
->flags
& PREFER_NON_DATA_SPEC
) || !try_data
)
5531 ts
= TODO_SPEC (insn
);
5532 if ((ts
& SPECULATIVE
)
5533 && (((!try_data
&& (ts
& DATA_SPEC
))
5534 || (!try_control
&& (ts
& CONTROL_SPEC
)))
5535 || (targetm
.sched
.first_cycle_multipass_dfa_lookahead_guard_spec
5537 .first_cycle_multipass_dfa_lookahead_guard_spec (insn
))))
5538 /* Discard speculative instruction that stands first in the ready
5541 change_queue_index (insn
, 1);
5547 for (i
= 1; i
< ready
->n_ready
; i
++)
5549 insn
= ready_element (ready
, i
);
5552 = ((!try_data
&& (TODO_SPEC (insn
) & DATA_SPEC
))
5553 || (!try_control
&& (TODO_SPEC (insn
) & CONTROL_SPEC
)));
5556 /* Let the target filter the search space. */
5557 for (i
= 1; i
< ready
->n_ready
; i
++)
5560 insn
= ready_element (ready
, i
);
5562 /* If this insn is recognizable we should have already
5563 recognized it earlier.
5564 ??? Not very clear where this is supposed to be done.
5566 gcc_checking_assert (INSN_CODE (insn
) >= 0
5567 || recog_memoized (insn
) < 0);
5570 = (/* INSN_CODE check can be omitted here as it is also done later
5572 INSN_CODE (insn
) < 0
5573 || (targetm
.sched
.first_cycle_multipass_dfa_lookahead_guard
5574 && !targetm
.sched
.first_cycle_multipass_dfa_lookahead_guard
5578 if (max_issue (ready
, 1, curr_state
, first_cycle_insn_p
, &index
) == 0)
5580 *insn_ptr
= ready_remove_first (ready
);
5581 if (sched_verbose
>= 4)
5582 fprintf (sched_dump
, ";;\t\tChosen insn (but can't issue) : %s \n",
5583 (*current_sched_info
->print_insn
) (*insn_ptr
, 0));
5588 if (sched_verbose
>= 4)
5589 fprintf (sched_dump
, ";;\t\tChosen insn : %s\n",
5590 (*current_sched_info
->print_insn
)
5591 (ready_element (ready
, index
), 0));
5593 *insn_ptr
= ready_remove (ready
, index
);
5599 /* This function is called when we have successfully scheduled a
5600 block. It uses the schedule stored in the scheduled_insns vector
5601 to rearrange the RTL. PREV_HEAD is used as the anchor to which we
5602 append the scheduled insns; TAIL is the insn after the scheduled
5603 block. TARGET_BB is the argument passed to schedule_block. */
5606 commit_schedule (rtx prev_head
, rtx tail
, basic_block
*target_bb
)
5611 last_scheduled_insn
= prev_head
;
5613 scheduled_insns
.iterate (i
, &insn
);
5616 if (control_flow_insn_p (last_scheduled_insn
)
5617 || current_sched_info
->advance_target_bb (*target_bb
, insn
))
5619 *target_bb
= current_sched_info
->advance_target_bb (*target_bb
, 0);
5625 x
= next_real_insn (last_scheduled_insn
);
5627 dump_new_block_header (1, *target_bb
, x
, tail
);
5630 last_scheduled_insn
= bb_note (*target_bb
);
5633 if (current_sched_info
->begin_move_insn
)
5634 (*current_sched_info
->begin_move_insn
) (insn
, last_scheduled_insn
);
5635 move_insn (insn
, last_scheduled_insn
,
5636 current_sched_info
->next_tail
);
5637 if (!DEBUG_INSN_P (insn
))
5638 reemit_notes (insn
);
5639 last_scheduled_insn
= insn
;
5642 scheduled_insns
.truncate (0);
5645 /* Examine all insns on the ready list and queue those which can't be
5646 issued in this cycle. TEMP_STATE is temporary scheduler state we
5647 can use as scratch space. If FIRST_CYCLE_INSN_P is true, no insns
5648 have been issued for the current cycle, which means it is valid to
5649 issue an asm statement.
5651 If SHADOWS_ONLY_P is true, we eliminate all real insns and only
5652 leave those for which SHADOW_P is true. If MODULO_EPILOGUE is true,
5653 we only leave insns which have an INSN_EXACT_TICK. */
5656 prune_ready_list (state_t temp_state
, bool first_cycle_insn_p
,
5657 bool shadows_only_p
, bool modulo_epilogue_p
)
5660 bool sched_group_found
= false;
5661 int min_cost_group
= 1;
5663 for (i
= 0; i
< ready
.n_ready
; i
++)
5665 rtx insn
= ready_element (&ready
, i
);
5666 if (SCHED_GROUP_P (insn
))
5668 sched_group_found
= true;
5673 /* Make two passes if there's a SCHED_GROUP_P insn; make sure to handle
5674 such an insn first and note its cost, then schedule all other insns
5675 for one cycle later. */
5676 for (pass
= sched_group_found
? 0 : 1; pass
< 2; )
5678 int n
= ready
.n_ready
;
5679 for (i
= 0; i
< n
; i
++)
5681 rtx insn
= ready_element (&ready
, i
);
5683 const char *reason
= "resource conflict";
5685 if (DEBUG_INSN_P (insn
))
5688 if (sched_group_found
&& !SCHED_GROUP_P (insn
))
5692 cost
= min_cost_group
;
5693 reason
= "not in sched group";
5695 else if (modulo_epilogue_p
5696 && INSN_EXACT_TICK (insn
) == INVALID_TICK
)
5698 cost
= max_insn_queue_index
;
5699 reason
= "not an epilogue insn";
5701 else if (shadows_only_p
&& !SHADOW_P (insn
))
5704 reason
= "not a shadow";
5706 else if (recog_memoized (insn
) < 0)
5708 if (!first_cycle_insn_p
5709 && (GET_CODE (PATTERN (insn
)) == ASM_INPUT
5710 || asm_noperands (PATTERN (insn
)) >= 0))
5714 else if (sched_pressure
!= SCHED_PRESSURE_NONE
)
5716 if (sched_pressure
== SCHED_PRESSURE_MODEL
5717 && INSN_TICK (insn
) <= clock_var
)
5719 memcpy (temp_state
, curr_state
, dfa_state_size
);
5720 if (state_transition (temp_state
, insn
) >= 0)
5721 INSN_TICK (insn
) = clock_var
+ 1;
5731 struct delay_pair
*delay_entry
;
5733 = (struct delay_pair
*)htab_find_with_hash (delay_htab
, insn
,
5734 htab_hash_pointer (insn
));
5735 while (delay_entry
&& delay_cost
== 0)
5737 delay_cost
= estimate_shadow_tick (delay_entry
);
5738 if (delay_cost
> max_insn_queue_index
)
5739 delay_cost
= max_insn_queue_index
;
5740 delay_entry
= delay_entry
->next_same_i1
;
5744 memcpy (temp_state
, curr_state
, dfa_state_size
);
5745 cost
= state_transition (temp_state
, insn
);
5750 if (cost
< delay_cost
)
5753 reason
= "shadow tick";
5758 if (SCHED_GROUP_P (insn
) && cost
> min_cost_group
)
5759 min_cost_group
= cost
;
5760 ready_remove (&ready
, i
);
5761 queue_insn (insn
, cost
, reason
);
5771 /* Called when we detect that the schedule is impossible. We examine the
5772 backtrack queue to find the earliest insn that caused this condition. */
5774 static struct haifa_saved_data
*
5775 verify_shadows (void)
5777 struct haifa_saved_data
*save
, *earliest_fail
= NULL
;
5778 for (save
= backtrack_queue
; save
; save
= save
->next
)
5781 struct delay_pair
*pair
= save
->delay_pair
;
5784 for (; pair
; pair
= pair
->next_same_i1
)
5788 if (QUEUE_INDEX (i2
) == QUEUE_SCHEDULED
)
5791 t
= INSN_TICK (i1
) + pair_delay (pair
);
5794 if (sched_verbose
>= 2)
5795 fprintf (sched_dump
,
5796 ";;\t\tfailed delay requirements for %d/%d (%d->%d)"
5798 INSN_UID (pair
->i1
), INSN_UID (pair
->i2
),
5799 INSN_TICK (pair
->i1
), INSN_EXACT_TICK (pair
->i2
));
5800 earliest_fail
= save
;
5803 if (QUEUE_INDEX (i2
) >= 0)
5805 int queued_for
= INSN_TICK (i2
);
5809 if (sched_verbose
>= 2)
5810 fprintf (sched_dump
,
5811 ";;\t\tfailed delay requirements for %d/%d"
5812 " (%d->%d), queued too late\n",
5813 INSN_UID (pair
->i1
), INSN_UID (pair
->i2
),
5814 INSN_TICK (pair
->i1
), INSN_EXACT_TICK (pair
->i2
));
5815 earliest_fail
= save
;
5822 return earliest_fail
;
5825 /* Use forward list scheduling to rearrange insns of block pointed to by
5826 TARGET_BB, possibly bringing insns from subsequent blocks in the same
5830 schedule_block (basic_block
*target_bb
, state_t init_state
)
5833 bool success
= modulo_ii
== 0;
5834 struct sched_block_state ls
;
5835 state_t temp_state
= NULL
; /* It is used for multipass scheduling. */
5836 int sort_p
, advance
, start_clock_var
;
5838 /* Head/tail info for this block. */
5839 rtx prev_head
= current_sched_info
->prev_head
;
5840 rtx next_tail
= current_sched_info
->next_tail
;
5841 rtx head
= NEXT_INSN (prev_head
);
5842 rtx tail
= PREV_INSN (next_tail
);
5844 if ((current_sched_info
->flags
& DONT_BREAK_DEPENDENCIES
) == 0
5845 && sched_pressure
!= SCHED_PRESSURE_MODEL
)
5846 find_modifiable_mems (head
, tail
);
5848 /* We used to have code to avoid getting parameters moved from hard
5849 argument registers into pseudos.
5851 However, it was removed when it proved to be of marginal benefit
5852 and caused problems because schedule_block and compute_forward_dependences
5853 had different notions of what the "head" insn was. */
5855 gcc_assert (head
!= tail
|| INSN_P (head
));
5857 haifa_recovery_bb_recently_added_p
= false;
5859 backtrack_queue
= NULL
;
5863 dump_new_block_header (0, *target_bb
, head
, tail
);
5865 if (init_state
== NULL
)
5866 state_reset (curr_state
);
5868 memcpy (curr_state
, init_state
, dfa_state_size
);
5870 /* Clear the ready list. */
5871 ready
.first
= ready
.veclen
- 1;
5875 /* It is used for first cycle multipass scheduling. */
5876 temp_state
= alloca (dfa_state_size
);
5878 if (targetm
.sched
.init
)
5879 targetm
.sched
.init (sched_dump
, sched_verbose
, ready
.veclen
);
5881 /* We start inserting insns after PREV_HEAD. */
5882 last_scheduled_insn
= nonscheduled_insns_begin
= prev_head
;
5883 last_nondebug_scheduled_insn
= NULL_RTX
;
5885 gcc_assert ((NOTE_P (last_scheduled_insn
)
5886 || DEBUG_INSN_P (last_scheduled_insn
))
5887 && BLOCK_FOR_INSN (last_scheduled_insn
) == *target_bb
);
5889 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
5894 insn_queue
= XALLOCAVEC (rtx
, max_insn_queue_index
+ 1);
5895 memset (insn_queue
, 0, (max_insn_queue_index
+ 1) * sizeof (rtx
));
5897 /* Start just before the beginning of time. */
5900 /* We need queue and ready lists and clock_var be initialized
5901 in try_ready () (which is called through init_ready_list ()). */
5902 (*current_sched_info
->init_ready_list
) ();
5904 if (sched_pressure
== SCHED_PRESSURE_MODEL
)
5905 model_start_schedule ();
5907 /* The algorithm is O(n^2) in the number of ready insns at any given
5908 time in the worst case. Before reload we are more likely to have
5909 big lists so truncate them to a reasonable size. */
5910 if (!reload_completed
5911 && ready
.n_ready
- ready
.n_debug
> MAX_SCHED_READY_INSNS
)
5913 ready_sort (&ready
);
5915 /* Find first free-standing insn past MAX_SCHED_READY_INSNS.
5916 If there are debug insns, we know they're first. */
5917 for (i
= MAX_SCHED_READY_INSNS
+ ready
.n_debug
; i
< ready
.n_ready
; i
++)
5918 if (!SCHED_GROUP_P (ready_element (&ready
, i
)))
5921 if (sched_verbose
>= 2)
5923 fprintf (sched_dump
,
5924 ";;\t\tReady list on entry: %d insns\n", ready
.n_ready
);
5925 fprintf (sched_dump
,
5926 ";;\t\t before reload => truncated to %d insns\n", i
);
5929 /* Delay all insns past it for 1 cycle. If debug counter is
5930 activated make an exception for the insn right after
5931 nonscheduled_insns_begin. */
5935 if (dbg_cnt (sched_insn
) == false)
5936 skip_insn
= next_nonnote_insn (nonscheduled_insns_begin
);
5938 skip_insn
= NULL_RTX
;
5940 while (i
< ready
.n_ready
)
5944 insn
= ready_remove (&ready
, i
);
5946 if (insn
!= skip_insn
)
5947 queue_insn (insn
, 1, "list truncated");
5950 ready_add (&ready
, skip_insn
, true);
5954 /* Now we can restore basic block notes and maintain precise cfg. */
5955 restore_bb_notes (*target_bb
);
5957 last_clock_var
= -1;
5961 gcc_assert (scheduled_insns
.length () == 0);
5963 must_backtrack
= false;
5964 modulo_insns_scheduled
= 0;
5966 ls
.modulo_epilogue
= false;
5967 ls
.first_cycle_insn_p
= true;
5969 /* Loop until all the insns in BB are scheduled. */
5970 while ((*current_sched_info
->schedule_more_p
) ())
5972 perform_replacements_new_cycle ();
5975 start_clock_var
= clock_var
;
5979 advance_one_cycle ();
5981 /* Add to the ready list all pending insns that can be issued now.
5982 If there are no ready insns, increment clock until one
5983 is ready and add all pending insns at that point to the ready
5985 queue_to_ready (&ready
);
5987 gcc_assert (ready
.n_ready
);
5989 if (sched_verbose
>= 2)
5991 fprintf (sched_dump
, ";;\t\tReady list after queue_to_ready: ");
5992 debug_ready_list (&ready
);
5994 advance
-= clock_var
- start_clock_var
;
5996 while (advance
> 0);
5998 if (ls
.modulo_epilogue
)
6000 int stage
= clock_var
/ modulo_ii
;
6001 if (stage
> modulo_last_stage
* 2 + 2)
6003 if (sched_verbose
>= 2)
6004 fprintf (sched_dump
,
6005 ";;\t\tmodulo scheduled succeeded at II %d\n",
6011 else if (modulo_ii
> 0)
6013 int stage
= clock_var
/ modulo_ii
;
6014 if (stage
> modulo_max_stages
)
6016 if (sched_verbose
>= 2)
6017 fprintf (sched_dump
,
6018 ";;\t\tfailing schedule due to excessive stages\n");
6021 if (modulo_n_insns
== modulo_insns_scheduled
6022 && stage
> modulo_last_stage
)
6024 if (sched_verbose
>= 2)
6025 fprintf (sched_dump
,
6026 ";;\t\tfound kernel after %d stages, II %d\n",
6028 ls
.modulo_epilogue
= true;
6032 prune_ready_list (temp_state
, true, false, ls
.modulo_epilogue
);
6033 if (ready
.n_ready
== 0)
6038 ls
.shadows_only_p
= false;
6039 cycle_issued_insns
= 0;
6040 ls
.can_issue_more
= issue_rate
;
6047 if (sort_p
&& ready
.n_ready
> 0)
6049 /* Sort the ready list based on priority. This must be
6050 done every iteration through the loop, as schedule_insn
6051 may have readied additional insns that will not be
6052 sorted correctly. */
6053 ready_sort (&ready
);
6055 if (sched_verbose
>= 2)
6057 fprintf (sched_dump
, ";;\t\tReady list after ready_sort: ");
6058 debug_ready_list (&ready
);
6062 /* We don't want md sched reorder to even see debug isns, so put
6063 them out right away. */
6064 if (ready
.n_ready
&& DEBUG_INSN_P (ready_element (&ready
, 0))
6065 && (*current_sched_info
->schedule_more_p
) ())
6067 while (ready
.n_ready
&& DEBUG_INSN_P (ready_element (&ready
, 0)))
6069 rtx insn
= ready_remove_first (&ready
);
6070 gcc_assert (DEBUG_INSN_P (insn
));
6071 (*current_sched_info
->begin_schedule_ready
) (insn
);
6072 scheduled_insns
.safe_push (insn
);
6073 last_scheduled_insn
= insn
;
6074 advance
= schedule_insn (insn
);
6075 gcc_assert (advance
== 0);
6076 if (ready
.n_ready
> 0)
6077 ready_sort (&ready
);
6081 if (ls
.first_cycle_insn_p
&& !ready
.n_ready
)
6084 resume_after_backtrack
:
6085 /* Allow the target to reorder the list, typically for
6086 better instruction bundling. */
6088 && (ready
.n_ready
== 0
6089 || !SCHED_GROUP_P (ready_element (&ready
, 0))))
6091 if (ls
.first_cycle_insn_p
&& targetm
.sched
.reorder
)
6093 = targetm
.sched
.reorder (sched_dump
, sched_verbose
,
6094 ready_lastpos (&ready
),
6095 &ready
.n_ready
, clock_var
);
6096 else if (!ls
.first_cycle_insn_p
&& targetm
.sched
.reorder2
)
6098 = targetm
.sched
.reorder2 (sched_dump
, sched_verbose
,
6100 ? ready_lastpos (&ready
) : NULL
,
6101 &ready
.n_ready
, clock_var
);
6104 restart_choose_ready
:
6105 if (sched_verbose
>= 2)
6107 fprintf (sched_dump
, ";;\tReady list (t = %3d): ",
6109 debug_ready_list (&ready
);
6110 if (sched_pressure
== SCHED_PRESSURE_WEIGHTED
)
6111 print_curr_reg_pressure ();
6114 if (ready
.n_ready
== 0
6115 && ls
.can_issue_more
6116 && reload_completed
)
6118 /* Allow scheduling insns directly from the queue in case
6119 there's nothing better to do (ready list is empty) but
6120 there are still vacant dispatch slots in the current cycle. */
6121 if (sched_verbose
>= 6)
6122 fprintf (sched_dump
,";;\t\tSecond chance\n");
6123 memcpy (temp_state
, curr_state
, dfa_state_size
);
6124 if (early_queue_to_ready (temp_state
, &ready
))
6125 ready_sort (&ready
);
6128 if (ready
.n_ready
== 0
6129 || !ls
.can_issue_more
6130 || state_dead_lock_p (curr_state
)
6131 || !(*current_sched_info
->schedule_more_p
) ())
6134 /* Select and remove the insn from the ready list. */
6140 res
= choose_ready (&ready
, ls
.first_cycle_insn_p
, &insn
);
6146 goto restart_choose_ready
;
6148 gcc_assert (insn
!= NULL_RTX
);
6151 insn
= ready_remove_first (&ready
);
6153 if (sched_pressure
!= SCHED_PRESSURE_NONE
6154 && INSN_TICK (insn
) > clock_var
)
6156 ready_add (&ready
, insn
, true);
6161 if (targetm
.sched
.dfa_new_cycle
6162 && targetm
.sched
.dfa_new_cycle (sched_dump
, sched_verbose
,
6163 insn
, last_clock_var
,
6164 clock_var
, &sort_p
))
6165 /* SORT_P is used by the target to override sorting
6166 of the ready list. This is needed when the target
6167 has modified its internal structures expecting that
6168 the insn will be issued next. As we need the insn
6169 to have the highest priority (so it will be returned by
6170 the ready_remove_first call above), we invoke
6171 ready_add (&ready, insn, true).
6172 But, still, there is one issue: INSN can be later
6173 discarded by scheduler's front end through
6174 current_sched_info->can_schedule_ready_p, hence, won't
6177 ready_add (&ready
, insn
, true);
6183 if (current_sched_info
->can_schedule_ready_p
6184 && ! (*current_sched_info
->can_schedule_ready_p
) (insn
))
6185 /* We normally get here only if we don't want to move
6186 insn from the split block. */
6188 TODO_SPEC (insn
) = DEP_POSTPONED
;
6189 goto restart_choose_ready
;
6194 /* If this insn is the first part of a delay-slot pair, record a
6196 struct delay_pair
*delay_entry
;
6198 = (struct delay_pair
*)htab_find_with_hash (delay_htab
, insn
,
6199 htab_hash_pointer (insn
));
6202 save_backtrack_point (delay_entry
, ls
);
6203 if (sched_verbose
>= 2)
6204 fprintf (sched_dump
, ";;\t\tsaving backtrack point\n");
6208 /* DECISION is made. */
6210 if (modulo_ii
> 0 && INSN_UID (insn
) < modulo_iter0_max_uid
)
6212 modulo_insns_scheduled
++;
6213 modulo_last_stage
= clock_var
/ modulo_ii
;
6215 if (TODO_SPEC (insn
) & SPECULATIVE
)
6216 generate_recovery_code (insn
);
6218 if (targetm
.sched
.dispatch (NULL_RTX
, IS_DISPATCH_ON
))
6219 targetm
.sched
.dispatch_do (insn
, ADD_TO_DISPATCH_WINDOW
);
6221 /* Update counters, etc in the scheduler's front end. */
6222 (*current_sched_info
->begin_schedule_ready
) (insn
);
6223 scheduled_insns
.safe_push (insn
);
6224 gcc_assert (NONDEBUG_INSN_P (insn
));
6225 last_nondebug_scheduled_insn
= last_scheduled_insn
= insn
;
6227 if (recog_memoized (insn
) >= 0)
6229 memcpy (temp_state
, curr_state
, dfa_state_size
);
6230 cost
= state_transition (curr_state
, insn
);
6231 if (sched_pressure
!= SCHED_PRESSURE_WEIGHTED
)
6232 gcc_assert (cost
< 0);
6233 if (memcmp (temp_state
, curr_state
, dfa_state_size
) != 0)
6234 cycle_issued_insns
++;
6238 asm_p
= (GET_CODE (PATTERN (insn
)) == ASM_INPUT
6239 || asm_noperands (PATTERN (insn
)) >= 0);
6241 if (targetm
.sched
.variable_issue
)
6243 targetm
.sched
.variable_issue (sched_dump
, sched_verbose
,
6244 insn
, ls
.can_issue_more
);
6245 /* A naked CLOBBER or USE generates no instruction, so do
6246 not count them against the issue rate. */
6247 else if (GET_CODE (PATTERN (insn
)) != USE
6248 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
6249 ls
.can_issue_more
--;
6250 advance
= schedule_insn (insn
);
6252 if (SHADOW_P (insn
))
6253 ls
.shadows_only_p
= true;
6255 /* After issuing an asm insn we should start a new cycle. */
6256 if (advance
== 0 && asm_p
)
6265 ls
.first_cycle_insn_p
= false;
6266 if (ready
.n_ready
> 0)
6267 prune_ready_list (temp_state
, false, ls
.shadows_only_p
,
6268 ls
.modulo_epilogue
);
6272 if (!must_backtrack
)
6273 for (i
= 0; i
< ready
.n_ready
; i
++)
6275 rtx insn
= ready_element (&ready
, i
);
6276 if (INSN_EXACT_TICK (insn
) == clock_var
)
6278 must_backtrack
= true;
6283 if (must_backtrack
&& modulo_ii
> 0)
6285 if (modulo_backtracks_left
== 0)
6287 modulo_backtracks_left
--;
6289 while (must_backtrack
)
6291 struct haifa_saved_data
*failed
;
6294 must_backtrack
= false;
6295 failed
= verify_shadows ();
6296 gcc_assert (failed
);
6298 failed_insn
= failed
->delay_pair
->i1
;
6299 /* Clear these queues. */
6300 perform_replacements_new_cycle ();
6301 toggle_cancelled_flags (false);
6302 unschedule_insns_until (failed_insn
);
6303 while (failed
!= backtrack_queue
)
6304 free_topmost_backtrack_point (true);
6305 restore_last_backtrack_point (&ls
);
6306 if (sched_verbose
>= 2)
6307 fprintf (sched_dump
, ";;\t\trewind to cycle %d\n", clock_var
);
6308 /* Delay by at least a cycle. This could cause additional
6310 queue_insn (failed_insn
, 1, "backtracked");
6314 if (ready
.n_ready
> 0)
6315 goto resume_after_backtrack
;
6318 if (clock_var
== 0 && ls
.first_cycle_insn_p
)
6324 ls
.first_cycle_insn_p
= true;
6326 if (ls
.modulo_epilogue
)
6329 if (!ls
.first_cycle_insn_p
)
6330 advance_one_cycle ();
6331 perform_replacements_new_cycle ();
6334 /* Once again, debug insn suckiness: they can be on the ready list
6335 even if they have unresolved dependencies. To make our view
6336 of the world consistent, remove such "ready" insns. */
6337 restart_debug_insn_loop
:
6338 for (i
= ready
.n_ready
- 1; i
>= 0; i
--)
6342 x
= ready_element (&ready
, i
);
6343 if (DEPS_LIST_FIRST (INSN_HARD_BACK_DEPS (x
)) != NULL
6344 || DEPS_LIST_FIRST (INSN_SPEC_BACK_DEPS (x
)) != NULL
)
6346 ready_remove (&ready
, i
);
6347 goto restart_debug_insn_loop
;
6350 for (i
= ready
.n_ready
- 1; i
>= 0; i
--)
6354 x
= ready_element (&ready
, i
);
6355 resolve_dependencies (x
);
6357 for (i
= 0; i
<= max_insn_queue_index
; i
++)
6360 while ((link
= insn_queue
[i
]) != NULL
)
6362 rtx x
= XEXP (link
, 0);
6363 insn_queue
[i
] = XEXP (link
, 1);
6364 QUEUE_INDEX (x
) = QUEUE_NOWHERE
;
6365 free_INSN_LIST_node (link
);
6366 resolve_dependencies (x
);
6372 undo_all_replacements ();
6377 fprintf (sched_dump
, ";;\tReady list (final): ");
6378 debug_ready_list (&ready
);
6381 if (modulo_ii
== 0 && current_sched_info
->queue_must_finish_empty
)
6382 /* Sanity check -- queue must be empty now. Meaningless if region has
6384 gcc_assert (!q_size
&& !ready
.n_ready
&& !ready
.n_debug
);
6385 else if (modulo_ii
== 0)
6387 /* We must maintain QUEUE_INDEX between blocks in region. */
6388 for (i
= ready
.n_ready
- 1; i
>= 0; i
--)
6392 x
= ready_element (&ready
, i
);
6393 QUEUE_INDEX (x
) = QUEUE_NOWHERE
;
6394 TODO_SPEC (x
) = HARD_DEP
;
6398 for (i
= 0; i
<= max_insn_queue_index
; i
++)
6401 for (link
= insn_queue
[i
]; link
; link
= XEXP (link
, 1))
6406 QUEUE_INDEX (x
) = QUEUE_NOWHERE
;
6407 TODO_SPEC (x
) = HARD_DEP
;
6409 free_INSN_LIST_list (&insn_queue
[i
]);
6413 if (sched_pressure
== SCHED_PRESSURE_MODEL
)
6414 model_end_schedule ();
6418 commit_schedule (prev_head
, tail
, target_bb
);
6420 fprintf (sched_dump
, ";; total time = %d\n", clock_var
);
6423 last_scheduled_insn
= tail
;
6425 scheduled_insns
.truncate (0);
6427 if (!current_sched_info
->queue_must_finish_empty
6428 || haifa_recovery_bb_recently_added_p
)
6430 /* INSN_TICK (minimum clock tick at which the insn becomes
6431 ready) may be not correct for the insn in the subsequent
6432 blocks of the region. We should use a correct value of
6433 `clock_var' or modify INSN_TICK. It is better to keep
6434 clock_var value equal to 0 at the start of a basic block.
6435 Therefore we modify INSN_TICK here. */
6436 fix_inter_tick (NEXT_INSN (prev_head
), last_scheduled_insn
);
6439 if (targetm
.sched
.finish
)
6441 targetm
.sched
.finish (sched_dump
, sched_verbose
);
6442 /* Target might have added some instructions to the scheduled block
6443 in its md_finish () hook. These new insns don't have any data
6444 initialized and to identify them we extend h_i_d so that they'll
6446 sched_extend_luids ();
6450 fprintf (sched_dump
, ";; new head = %d\n;; new tail = %d\n\n",
6451 INSN_UID (head
), INSN_UID (tail
));
6453 /* Update head/tail boundaries. */
6454 head
= NEXT_INSN (prev_head
);
6455 tail
= last_scheduled_insn
;
6457 head
= restore_other_notes (head
, NULL
);
6459 current_sched_info
->head
= head
;
6460 current_sched_info
->tail
= tail
;
6462 free_backtrack_queue ();
6467 /* Set_priorities: compute priority of each insn in the block. */
6470 set_priorities (rtx head
, rtx tail
)
6474 int sched_max_insns_priority
=
6475 current_sched_info
->sched_max_insns_priority
;
6478 if (head
== tail
&& ! INSN_P (head
))
6483 prev_head
= PREV_INSN (head
);
6484 for (insn
= tail
; insn
!= prev_head
; insn
= PREV_INSN (insn
))
6490 (void) priority (insn
);
6492 gcc_assert (INSN_PRIORITY_KNOWN (insn
));
6494 sched_max_insns_priority
= MAX (sched_max_insns_priority
,
6495 INSN_PRIORITY (insn
));
6498 current_sched_info
->sched_max_insns_priority
= sched_max_insns_priority
;
6503 /* Set dump and sched_verbose for the desired debugging output. If no
6504 dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
6505 For -fsched-verbose=N, N>=10, print everything to stderr. */
6507 setup_sched_dump (void)
6509 sched_verbose
= sched_verbose_param
;
6510 if (sched_verbose_param
== 0 && dump_file
)
6512 sched_dump
= ((sched_verbose_param
>= 10 || !dump_file
)
6513 ? stderr
: dump_file
);
6516 /* Initialize some global state for the scheduler. This function works
6517 with the common data shared between all the schedulers. It is called
6518 from the scheduler specific initialization routine. */
6523 /* Disable speculative loads in their presence if cc0 defined. */
6525 flag_schedule_speculative_load
= 0;
6528 if (targetm
.sched
.dispatch (NULL_RTX
, IS_DISPATCH_ON
))
6529 targetm
.sched
.dispatch_do (NULL_RTX
, DISPATCH_INIT
);
6531 if (flag_sched_pressure
6532 && !reload_completed
6533 && common_sched_info
->sched_pass_id
== SCHED_RGN_PASS
)
6534 sched_pressure
= ((enum sched_pressure_algorithm
)
6535 PARAM_VALUE (PARAM_SCHED_PRESSURE_ALGORITHM
));
6537 sched_pressure
= SCHED_PRESSURE_NONE
;
6539 if (sched_pressure
!= SCHED_PRESSURE_NONE
)
6540 ira_setup_eliminable_regset (false);
6542 /* Initialize SPEC_INFO. */
6543 if (targetm
.sched
.set_sched_flags
)
6545 spec_info
= &spec_info_var
;
6546 targetm
.sched
.set_sched_flags (spec_info
);
6548 if (spec_info
->mask
!= 0)
6550 spec_info
->data_weakness_cutoff
=
6551 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF
) * MAX_DEP_WEAK
) / 100;
6552 spec_info
->control_weakness_cutoff
=
6553 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF
)
6554 * REG_BR_PROB_BASE
) / 100;
6557 /* So we won't read anything accidentally. */
6562 /* So we won't read anything accidentally. */
6565 /* Initialize issue_rate. */
6566 if (targetm
.sched
.issue_rate
)
6567 issue_rate
= targetm
.sched
.issue_rate ();
6571 if (cached_issue_rate
!= issue_rate
)
6573 cached_issue_rate
= issue_rate
;
6574 /* To invalidate max_lookahead_tries: */
6575 cached_first_cycle_multipass_dfa_lookahead
= 0;
6578 if (targetm
.sched
.first_cycle_multipass_dfa_lookahead
)
6579 dfa_lookahead
= targetm
.sched
.first_cycle_multipass_dfa_lookahead ();
6583 if (targetm
.sched
.init_dfa_pre_cycle_insn
)
6584 targetm
.sched
.init_dfa_pre_cycle_insn ();
6586 if (targetm
.sched
.init_dfa_post_cycle_insn
)
6587 targetm
.sched
.init_dfa_post_cycle_insn ();
6590 dfa_state_size
= state_size ();
6592 init_alias_analysis ();
6595 df_set_flags (DF_LR_RUN_DCE
);
6596 df_note_add_problem ();
6598 /* More problems needed for interloop dep calculation in SMS. */
6599 if (common_sched_info
->sched_pass_id
== SCHED_SMS_PASS
)
6601 df_rd_add_problem ();
6602 df_chain_add_problem (DF_DU_CHAIN
+ DF_UD_CHAIN
);
6607 /* Do not run DCE after reload, as this can kill nops inserted
6609 if (reload_completed
)
6610 df_clear_flags (DF_LR_RUN_DCE
);
6612 regstat_compute_calls_crossed ();
6614 if (targetm
.sched
.init_global
)
6615 targetm
.sched
.init_global (sched_dump
, sched_verbose
, get_max_uid () + 1);
6617 if (sched_pressure
!= SCHED_PRESSURE_NONE
)
6619 int i
, max_regno
= max_reg_num ();
6621 if (sched_dump
!= NULL
)
6622 /* We need info about pseudos for rtl dumps about pseudo
6623 classes and costs. */
6624 regstat_init_n_sets_and_refs ();
6625 ira_set_pseudo_classes (true, sched_verbose
? sched_dump
: NULL
);
6626 sched_regno_pressure_class
6627 = (enum reg_class
*) xmalloc (max_regno
* sizeof (enum reg_class
));
6628 for (i
= 0; i
< max_regno
; i
++)
6629 sched_regno_pressure_class
[i
]
6630 = (i
< FIRST_PSEUDO_REGISTER
6631 ? ira_pressure_class_translate
[REGNO_REG_CLASS (i
)]
6632 : ira_pressure_class_translate
[reg_allocno_class (i
)]);
6633 curr_reg_live
= BITMAP_ALLOC (NULL
);
6634 if (sched_pressure
== SCHED_PRESSURE_WEIGHTED
)
6636 saved_reg_live
= BITMAP_ALLOC (NULL
);
6637 region_ref_regs
= BITMAP_ALLOC (NULL
);
6641 curr_state
= xmalloc (dfa_state_size
);
6644 static void haifa_init_only_bb (basic_block
, basic_block
);
6646 /* Initialize data structures specific to the Haifa scheduler. */
6648 haifa_sched_init (void)
6650 setup_sched_dump ();
6653 scheduled_insns
.create (0);
6655 if (spec_info
!= NULL
)
6657 sched_deps_info
->use_deps_list
= 1;
6658 sched_deps_info
->generate_spec_deps
= 1;
6661 /* Initialize luids, dependency caches, target and h_i_d for the
6665 bbs
.create (n_basic_blocks
);
6671 bbs
.quick_push (bb
);
6672 sched_init_luids (bbs
);
6673 sched_deps_init (true);
6674 sched_extend_target ();
6675 haifa_init_h_i_d (bbs
);
6680 sched_init_only_bb
= haifa_init_only_bb
;
6681 sched_split_block
= sched_split_block_1
;
6682 sched_create_empty_bb
= sched_create_empty_bb_1
;
6683 haifa_recovery_bb_ever_added_p
= false;
6685 nr_begin_data
= nr_begin_control
= nr_be_in_data
= nr_be_in_control
= 0;
6686 before_recovery
= 0;
6692 /* Finish work with the data specific to the Haifa scheduler. */
6694 haifa_sched_finish (void)
6696 sched_create_empty_bb
= NULL
;
6697 sched_split_block
= NULL
;
6698 sched_init_only_bb
= NULL
;
6700 if (spec_info
&& spec_info
->dump
)
6702 char c
= reload_completed
? 'a' : 'b';
6704 fprintf (spec_info
->dump
,
6705 ";; %s:\n", current_function_name ());
6707 fprintf (spec_info
->dump
,
6708 ";; Procedure %cr-begin-data-spec motions == %d\n",
6710 fprintf (spec_info
->dump
,
6711 ";; Procedure %cr-be-in-data-spec motions == %d\n",
6713 fprintf (spec_info
->dump
,
6714 ";; Procedure %cr-begin-control-spec motions == %d\n",
6715 c
, nr_begin_control
);
6716 fprintf (spec_info
->dump
,
6717 ";; Procedure %cr-be-in-control-spec motions == %d\n",
6718 c
, nr_be_in_control
);
6721 scheduled_insns
.release ();
6723 /* Finalize h_i_d, dependency caches, and luids for the whole
6724 function. Target will be finalized in md_global_finish (). */
6725 sched_deps_finish ();
6726 sched_finish_luids ();
6727 current_sched_info
= NULL
;
6731 /* Free global data used during insn scheduling. This function works with
6732 the common data shared between the schedulers. */
6737 haifa_finish_h_i_d ();
6738 if (sched_pressure
!= SCHED_PRESSURE_NONE
)
6740 if (regstat_n_sets_and_refs
!= NULL
)
6741 regstat_free_n_sets_and_refs ();
6742 if (sched_pressure
== SCHED_PRESSURE_WEIGHTED
)
6744 BITMAP_FREE (region_ref_regs
);
6745 BITMAP_FREE (saved_reg_live
);
6747 BITMAP_FREE (curr_reg_live
);
6748 free (sched_regno_pressure_class
);
6752 if (targetm
.sched
.finish_global
)
6753 targetm
.sched
.finish_global (sched_dump
, sched_verbose
);
6755 end_alias_analysis ();
6757 regstat_free_calls_crossed ();
6762 /* Free all delay_pair structures that were recorded. */
6764 free_delay_pairs (void)
6768 htab_empty (delay_htab
);
6769 htab_empty (delay_htab_i2
);
6773 /* Fix INSN_TICKs of the instructions in the current block as well as
6774 INSN_TICKs of their dependents.
6775 HEAD and TAIL are the begin and the end of the current scheduled block. */
6777 fix_inter_tick (rtx head
, rtx tail
)
6779 /* Set of instructions with corrected INSN_TICK. */
6780 bitmap_head processed
;
6781 /* ??? It is doubtful if we should assume that cycle advance happens on
6782 basic block boundaries. Basically insns that are unconditionally ready
6783 on the start of the block are more preferable then those which have
6784 a one cycle dependency over insn from the previous block. */
6785 int next_clock
= clock_var
+ 1;
6787 bitmap_initialize (&processed
, 0);
6789 /* Iterates over scheduled instructions and fix their INSN_TICKs and
6790 INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
6791 across different blocks. */
6792 for (tail
= NEXT_INSN (tail
); head
!= tail
; head
= NEXT_INSN (head
))
6797 sd_iterator_def sd_it
;
6800 tick
= INSN_TICK (head
);
6801 gcc_assert (tick
>= MIN_TICK
);
6803 /* Fix INSN_TICK of instruction from just scheduled block. */
6804 if (bitmap_set_bit (&processed
, INSN_LUID (head
)))
6808 if (tick
< MIN_TICK
)
6811 INSN_TICK (head
) = tick
;
6814 if (DEBUG_INSN_P (head
))
6817 FOR_EACH_DEP (head
, SD_LIST_RES_FORW
, sd_it
, dep
)
6821 next
= DEP_CON (dep
);
6822 tick
= INSN_TICK (next
);
6824 if (tick
!= INVALID_TICK
6825 /* If NEXT has its INSN_TICK calculated, fix it.
6826 If not - it will be properly calculated from
6827 scratch later in fix_tick_ready. */
6828 && bitmap_set_bit (&processed
, INSN_LUID (next
)))
6832 if (tick
< MIN_TICK
)
6835 if (tick
> INTER_TICK (next
))
6836 INTER_TICK (next
) = tick
;
6838 tick
= INTER_TICK (next
);
6840 INSN_TICK (next
) = tick
;
6845 bitmap_clear (&processed
);
6848 /* Check if NEXT is ready to be added to the ready or queue list.
6849 If "yes", add it to the proper list.
6851 -1 - is not ready yet,
6852 0 - added to the ready list,
6853 0 < N - queued for N cycles. */
6855 try_ready (rtx next
)
6857 ds_t old_ts
, new_ts
;
6859 old_ts
= TODO_SPEC (next
);
6861 gcc_assert (!(old_ts
& ~(SPECULATIVE
| HARD_DEP
| DEP_CONTROL
| DEP_POSTPONED
))
6862 && (old_ts
== HARD_DEP
6863 || old_ts
== DEP_POSTPONED
6864 || (old_ts
& SPECULATIVE
)
6865 || old_ts
== DEP_CONTROL
));
6867 new_ts
= recompute_todo_spec (next
, false);
6869 if (new_ts
& (HARD_DEP
| DEP_POSTPONED
))
6870 gcc_assert (new_ts
== old_ts
6871 && QUEUE_INDEX (next
) == QUEUE_NOWHERE
);
6872 else if (current_sched_info
->new_ready
)
6873 new_ts
= current_sched_info
->new_ready (next
, new_ts
);
6875 /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
6876 have its original pattern or changed (speculative) one. This is due
6877 to changing ebb in region scheduling.
6878 * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
6879 has speculative pattern.
6881 We can't assert (!(new_ts & HARD_DEP) || new_ts == old_ts) here because
6882 control-speculative NEXT could have been discarded by sched-rgn.c
6883 (the same case as when discarded by can_schedule_ready_p ()). */
6885 if ((new_ts
& SPECULATIVE
)
6886 /* If (old_ts == new_ts), then (old_ts & SPECULATIVE) and we don't
6887 need to change anything. */
6888 && new_ts
!= old_ts
)
6893 gcc_assert ((new_ts
& SPECULATIVE
) && !(new_ts
& ~SPECULATIVE
));
6895 res
= haifa_speculate_insn (next
, new_ts
, &new_pat
);
6900 /* It would be nice to change DEP_STATUS of all dependences,
6901 which have ((DEP_STATUS & SPECULATIVE) == new_ts) to HARD_DEP,
6902 so we won't reanalyze anything. */
6907 /* We follow the rule, that every speculative insn
6908 has non-null ORIG_PAT. */
6909 if (!ORIG_PAT (next
))
6910 ORIG_PAT (next
) = PATTERN (next
);
6914 if (!ORIG_PAT (next
))
6915 /* If we gonna to overwrite the original pattern of insn,
6917 ORIG_PAT (next
) = PATTERN (next
);
6919 res
= haifa_change_pattern (next
, new_pat
);
6928 /* We need to restore pattern only if (new_ts == 0), because otherwise it is
6929 either correct (new_ts & SPECULATIVE),
6930 or we simply don't care (new_ts & HARD_DEP). */
6932 gcc_assert (!ORIG_PAT (next
)
6933 || !IS_SPECULATION_BRANCHY_CHECK_P (next
));
6935 TODO_SPEC (next
) = new_ts
;
6937 if (new_ts
& (HARD_DEP
| DEP_POSTPONED
))
6939 /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
6940 control-speculative NEXT could have been discarded by sched-rgn.c
6941 (the same case as when discarded by can_schedule_ready_p ()). */
6942 /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
6944 change_queue_index (next
, QUEUE_NOWHERE
);
6948 else if (!(new_ts
& BEGIN_SPEC
)
6949 && ORIG_PAT (next
) && PREDICATED_PAT (next
) == NULL_RTX
6950 && !IS_SPECULATION_CHECK_P (next
))
6951 /* We should change pattern of every previously speculative
6952 instruction - and we determine if NEXT was speculative by using
6953 ORIG_PAT field. Except one case - speculation checks have ORIG_PAT
6954 pat too, so skip them. */
6956 bool success
= haifa_change_pattern (next
, ORIG_PAT (next
));
6957 gcc_assert (success
);
6958 ORIG_PAT (next
) = 0;
6961 if (sched_verbose
>= 2)
6963 fprintf (sched_dump
, ";;\t\tdependencies resolved: insn %s",
6964 (*current_sched_info
->print_insn
) (next
, 0));
6966 if (spec_info
&& spec_info
->dump
)
6968 if (new_ts
& BEGIN_DATA
)
6969 fprintf (spec_info
->dump
, "; data-spec;");
6970 if (new_ts
& BEGIN_CONTROL
)
6971 fprintf (spec_info
->dump
, "; control-spec;");
6972 if (new_ts
& BE_IN_CONTROL
)
6973 fprintf (spec_info
->dump
, "; in-control-spec;");
6975 if (TODO_SPEC (next
) & DEP_CONTROL
)
6976 fprintf (sched_dump
, " predicated");
6977 fprintf (sched_dump
, "\n");
6980 adjust_priority (next
);
6982 return fix_tick_ready (next
);
6985 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
6987 fix_tick_ready (rtx next
)
6991 if (!DEBUG_INSN_P (next
) && !sd_lists_empty_p (next
, SD_LIST_RES_BACK
))
6994 sd_iterator_def sd_it
;
6997 tick
= INSN_TICK (next
);
6998 /* if tick is not equal to INVALID_TICK, then update
6999 INSN_TICK of NEXT with the most recent resolved dependence
7000 cost. Otherwise, recalculate from scratch. */
7001 full_p
= (tick
== INVALID_TICK
);
7003 FOR_EACH_DEP (next
, SD_LIST_RES_BACK
, sd_it
, dep
)
7005 rtx pro
= DEP_PRO (dep
);
7008 gcc_assert (INSN_TICK (pro
) >= MIN_TICK
);
7010 tick1
= INSN_TICK (pro
) + dep_cost (dep
);
7021 INSN_TICK (next
) = tick
;
7023 delay
= tick
- clock_var
;
7024 if (delay
<= 0 || sched_pressure
!= SCHED_PRESSURE_NONE
)
7025 delay
= QUEUE_READY
;
7027 change_queue_index (next
, delay
);
7032 /* Move NEXT to the proper queue list with (DELAY >= 1),
7033 or add it to the ready list (DELAY == QUEUE_READY),
7034 or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
7036 change_queue_index (rtx next
, int delay
)
7038 int i
= QUEUE_INDEX (next
);
7040 gcc_assert (QUEUE_NOWHERE
<= delay
&& delay
<= max_insn_queue_index
7042 gcc_assert (i
!= QUEUE_SCHEDULED
);
7044 if ((delay
> 0 && NEXT_Q_AFTER (q_ptr
, delay
) == i
)
7045 || (delay
< 0 && delay
== i
))
7046 /* We have nothing to do. */
7049 /* Remove NEXT from wherever it is now. */
7050 if (i
== QUEUE_READY
)
7051 ready_remove_insn (next
);
7053 queue_remove (next
);
7055 /* Add it to the proper place. */
7056 if (delay
== QUEUE_READY
)
7057 ready_add (readyp
, next
, false);
7058 else if (delay
>= 1)
7059 queue_insn (next
, delay
, "change queue index");
7061 if (sched_verbose
>= 2)
7063 fprintf (sched_dump
, ";;\t\ttick updated: insn %s",
7064 (*current_sched_info
->print_insn
) (next
, 0));
7066 if (delay
== QUEUE_READY
)
7067 fprintf (sched_dump
, " into ready\n");
7068 else if (delay
>= 1)
7069 fprintf (sched_dump
, " into queue with cost=%d\n", delay
);
7071 fprintf (sched_dump
, " removed from ready or queue lists\n");
7075 static int sched_ready_n_insns
= -1;
7077 /* Initialize per region data structures. */
7079 sched_extend_ready_list (int new_sched_ready_n_insns
)
7083 if (sched_ready_n_insns
== -1)
7084 /* At the first call we need to initialize one more choice_stack
7088 sched_ready_n_insns
= 0;
7089 scheduled_insns
.reserve (new_sched_ready_n_insns
);
7092 i
= sched_ready_n_insns
+ 1;
7094 ready
.veclen
= new_sched_ready_n_insns
+ issue_rate
;
7095 ready
.vec
= XRESIZEVEC (rtx
, ready
.vec
, ready
.veclen
);
7097 gcc_assert (new_sched_ready_n_insns
>= sched_ready_n_insns
);
7099 ready_try
= (char *) xrecalloc (ready_try
, new_sched_ready_n_insns
,
7100 sched_ready_n_insns
, sizeof (*ready_try
));
7102 /* We allocate +1 element to save initial state in the choice_stack[0]
7104 choice_stack
= XRESIZEVEC (struct choice_entry
, choice_stack
,
7105 new_sched_ready_n_insns
+ 1);
7107 for (; i
<= new_sched_ready_n_insns
; i
++)
7109 choice_stack
[i
].state
= xmalloc (dfa_state_size
);
7111 if (targetm
.sched
.first_cycle_multipass_init
)
7112 targetm
.sched
.first_cycle_multipass_init (&(choice_stack
[i
]
7116 sched_ready_n_insns
= new_sched_ready_n_insns
;
7119 /* Free per region data structures. */
7121 sched_finish_ready_list (void)
7132 for (i
= 0; i
<= sched_ready_n_insns
; i
++)
7134 if (targetm
.sched
.first_cycle_multipass_fini
)
7135 targetm
.sched
.first_cycle_multipass_fini (&(choice_stack
[i
]
7138 free (choice_stack
[i
].state
);
7140 free (choice_stack
);
7141 choice_stack
= NULL
;
7143 sched_ready_n_insns
= -1;
7147 haifa_luid_for_non_insn (rtx x
)
7149 gcc_assert (NOTE_P (x
) || LABEL_P (x
));
7154 /* Generates recovery code for INSN. */
7156 generate_recovery_code (rtx insn
)
7158 if (TODO_SPEC (insn
) & BEGIN_SPEC
)
7159 begin_speculative_block (insn
);
7161 /* Here we have insn with no dependencies to
7162 instructions other then CHECK_SPEC ones. */
7164 if (TODO_SPEC (insn
) & BE_IN_SPEC
)
7165 add_to_speculative_block (insn
);
7169 Tries to add speculative dependencies of type FS between instructions
7170 in deps_list L and TWIN. */
7172 process_insn_forw_deps_be_in_spec (rtx insn
, rtx twin
, ds_t fs
)
7174 sd_iterator_def sd_it
;
7177 FOR_EACH_DEP (insn
, SD_LIST_FORW
, sd_it
, dep
)
7182 consumer
= DEP_CON (dep
);
7184 ds
= DEP_STATUS (dep
);
7186 if (/* If we want to create speculative dep. */
7188 /* And we can do that because this is a true dep. */
7189 && (ds
& DEP_TYPES
) == DEP_TRUE
)
7191 gcc_assert (!(ds
& BE_IN_SPEC
));
7193 if (/* If this dep can be overcome with 'begin speculation'. */
7195 /* Then we have a choice: keep the dep 'begin speculative'
7196 or transform it into 'be in speculative'. */
7198 if (/* In try_ready we assert that if insn once became ready
7199 it can be removed from the ready (or queue) list only
7200 due to backend decision. Hence we can't let the
7201 probability of the speculative dep to decrease. */
7202 ds_weak (ds
) <= ds_weak (fs
))
7206 new_ds
= (ds
& ~BEGIN_SPEC
) | fs
;
7208 if (/* consumer can 'be in speculative'. */
7209 sched_insn_is_legitimate_for_speculation_p (consumer
,
7211 /* Transform it to be in speculative. */
7216 /* Mark the dep as 'be in speculative'. */
7221 dep_def _new_dep
, *new_dep
= &_new_dep
;
7223 init_dep_1 (new_dep
, twin
, consumer
, DEP_TYPE (dep
), ds
);
7224 sd_add_dep (new_dep
, false);
7229 /* Generates recovery code for BEGIN speculative INSN. */
7231 begin_speculative_block (rtx insn
)
7233 if (TODO_SPEC (insn
) & BEGIN_DATA
)
7235 if (TODO_SPEC (insn
) & BEGIN_CONTROL
)
7238 create_check_block_twin (insn
, false);
7240 TODO_SPEC (insn
) &= ~BEGIN_SPEC
;
7243 static void haifa_init_insn (rtx
);
7245 /* Generates recovery code for BE_IN speculative INSN. */
7247 add_to_speculative_block (rtx insn
)
7250 sd_iterator_def sd_it
;
7253 rtx_vec_t priorities_roots
;
7255 ts
= TODO_SPEC (insn
);
7256 gcc_assert (!(ts
& ~BE_IN_SPEC
));
7258 if (ts
& BE_IN_DATA
)
7260 if (ts
& BE_IN_CONTROL
)
7263 TODO_SPEC (insn
) &= ~BE_IN_SPEC
;
7264 gcc_assert (!TODO_SPEC (insn
));
7266 DONE_SPEC (insn
) |= ts
;
7268 /* First we convert all simple checks to branchy. */
7269 for (sd_it
= sd_iterator_start (insn
, SD_LIST_SPEC_BACK
);
7270 sd_iterator_cond (&sd_it
, &dep
);)
7272 rtx check
= DEP_PRO (dep
);
7274 if (IS_SPECULATION_SIMPLE_CHECK_P (check
))
7276 create_check_block_twin (check
, true);
7278 /* Restart search. */
7279 sd_it
= sd_iterator_start (insn
, SD_LIST_SPEC_BACK
);
7282 /* Continue search. */
7283 sd_iterator_next (&sd_it
);
7286 priorities_roots
.create (0);
7287 clear_priorities (insn
, &priorities_roots
);
7294 /* Get the first backward dependency of INSN. */
7295 sd_it
= sd_iterator_start (insn
, SD_LIST_SPEC_BACK
);
7296 if (!sd_iterator_cond (&sd_it
, &dep
))
7297 /* INSN has no backward dependencies left. */
7300 gcc_assert ((DEP_STATUS (dep
) & BEGIN_SPEC
) == 0
7301 && (DEP_STATUS (dep
) & BE_IN_SPEC
) != 0
7302 && (DEP_STATUS (dep
) & DEP_TYPES
) == DEP_TRUE
);
7304 check
= DEP_PRO (dep
);
7306 gcc_assert (!IS_SPECULATION_CHECK_P (check
) && !ORIG_PAT (check
)
7307 && QUEUE_INDEX (check
) == QUEUE_NOWHERE
);
7309 rec
= BLOCK_FOR_INSN (check
);
7311 twin
= emit_insn_before (copy_insn (PATTERN (insn
)), BB_END (rec
));
7312 haifa_init_insn (twin
);
7314 sd_copy_back_deps (twin
, insn
, true);
7316 if (sched_verbose
&& spec_info
->dump
)
7317 /* INSN_BB (insn) isn't determined for twin insns yet.
7318 So we can't use current_sched_info->print_insn. */
7319 fprintf (spec_info
->dump
, ";;\t\tGenerated twin insn : %d/rec%d\n",
7320 INSN_UID (twin
), rec
->index
);
7322 twins
= alloc_INSN_LIST (twin
, twins
);
7324 /* Add dependences between TWIN and all appropriate
7325 instructions from REC. */
7326 FOR_EACH_DEP (insn
, SD_LIST_SPEC_BACK
, sd_it
, dep
)
7328 rtx pro
= DEP_PRO (dep
);
7330 gcc_assert (DEP_TYPE (dep
) == REG_DEP_TRUE
);
7332 /* INSN might have dependencies from the instructions from
7333 several recovery blocks. At this iteration we process those
7334 producers that reside in REC. */
7335 if (BLOCK_FOR_INSN (pro
) == rec
)
7337 dep_def _new_dep
, *new_dep
= &_new_dep
;
7339 init_dep (new_dep
, pro
, twin
, REG_DEP_TRUE
);
7340 sd_add_dep (new_dep
, false);
7344 process_insn_forw_deps_be_in_spec (insn
, twin
, ts
);
7346 /* Remove all dependencies between INSN and insns in REC. */
7347 for (sd_it
= sd_iterator_start (insn
, SD_LIST_SPEC_BACK
);
7348 sd_iterator_cond (&sd_it
, &dep
);)
7350 rtx pro
= DEP_PRO (dep
);
7352 if (BLOCK_FOR_INSN (pro
) == rec
)
7353 sd_delete_dep (sd_it
);
7355 sd_iterator_next (&sd_it
);
7359 /* We couldn't have added the dependencies between INSN and TWINS earlier
7360 because that would make TWINS appear in the INSN_BACK_DEPS (INSN). */
7365 twin
= XEXP (twins
, 0);
7368 dep_def _new_dep
, *new_dep
= &_new_dep
;
7370 init_dep (new_dep
, insn
, twin
, REG_DEP_OUTPUT
);
7371 sd_add_dep (new_dep
, false);
7374 twin
= XEXP (twins
, 1);
7375 free_INSN_LIST_node (twins
);
7379 calc_priorities (priorities_roots
);
7380 priorities_roots
.release ();
7383 /* Extends and fills with zeros (only the new part) array pointed to by P. */
7385 xrecalloc (void *p
, size_t new_nmemb
, size_t old_nmemb
, size_t size
)
7387 gcc_assert (new_nmemb
>= old_nmemb
);
7388 p
= XRESIZEVAR (void, p
, new_nmemb
* size
);
7389 memset (((char *) p
) + old_nmemb
* size
, 0, (new_nmemb
- old_nmemb
) * size
);
7394 Find fallthru edge from PRED. */
7396 find_fallthru_edge_from (basic_block pred
)
7401 succ
= pred
->next_bb
;
7402 gcc_assert (succ
->prev_bb
== pred
);
7404 if (EDGE_COUNT (pred
->succs
) <= EDGE_COUNT (succ
->preds
))
7406 e
= find_fallthru_edge (pred
->succs
);
7410 gcc_assert (e
->dest
== succ
);
7416 e
= find_fallthru_edge (succ
->preds
);
7420 gcc_assert (e
->src
== pred
);
7428 /* Extend per basic block data structures. */
7430 sched_extend_bb (void)
7434 /* The following is done to keep current_sched_info->next_tail non null. */
7435 insn
= BB_END (EXIT_BLOCK_PTR
->prev_bb
);
7436 if (NEXT_INSN (insn
) == 0
7439 /* Don't emit a NOTE if it would end up before a BARRIER. */
7440 && !BARRIER_P (NEXT_INSN (insn
))))
7442 rtx note
= emit_note_after (NOTE_INSN_DELETED
, insn
);
7443 /* Make insn appear outside BB. */
7444 set_block_for_insn (note
, NULL
);
7445 BB_END (EXIT_BLOCK_PTR
->prev_bb
) = insn
;
7449 /* Init per basic block data structures. */
7451 sched_init_bbs (void)
7456 /* Initialize BEFORE_RECOVERY variable. */
7458 init_before_recovery (basic_block
*before_recovery_ptr
)
7463 last
= EXIT_BLOCK_PTR
->prev_bb
;
7464 e
= find_fallthru_edge_from (last
);
7468 /* We create two basic blocks:
7469 1. Single instruction block is inserted right after E->SRC
7471 2. Empty block right before EXIT_BLOCK.
7472 Between these two blocks recovery blocks will be emitted. */
7474 basic_block single
, empty
;
7477 /* If the fallthrough edge to exit we've found is from the block we've
7478 created before, don't do anything more. */
7479 if (last
== after_recovery
)
7482 adding_bb_to_current_region_p
= false;
7484 single
= sched_create_empty_bb (last
);
7485 empty
= sched_create_empty_bb (single
);
7487 /* Add new blocks to the root loop. */
7488 if (current_loops
!= NULL
)
7490 add_bb_to_loop (single
, (*current_loops
->larray
)[0]);
7491 add_bb_to_loop (empty
, (*current_loops
->larray
)[0]);
7494 single
->count
= last
->count
;
7495 empty
->count
= last
->count
;
7496 single
->frequency
= last
->frequency
;
7497 empty
->frequency
= last
->frequency
;
7498 BB_COPY_PARTITION (single
, last
);
7499 BB_COPY_PARTITION (empty
, last
);
7501 redirect_edge_succ (e
, single
);
7502 make_single_succ_edge (single
, empty
, 0);
7503 make_single_succ_edge (empty
, EXIT_BLOCK_PTR
, EDGE_FALLTHRU
);
7505 label
= block_label (empty
);
7506 x
= emit_jump_insn_after (gen_jump (label
), BB_END (single
));
7507 JUMP_LABEL (x
) = label
;
7508 LABEL_NUSES (label
)++;
7509 haifa_init_insn (x
);
7511 emit_barrier_after (x
);
7513 sched_init_only_bb (empty
, NULL
);
7514 sched_init_only_bb (single
, NULL
);
7517 adding_bb_to_current_region_p
= true;
7518 before_recovery
= single
;
7519 after_recovery
= empty
;
7521 if (before_recovery_ptr
)
7522 *before_recovery_ptr
= before_recovery
;
7524 if (sched_verbose
>= 2 && spec_info
->dump
)
7525 fprintf (spec_info
->dump
,
7526 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
7527 last
->index
, single
->index
, empty
->index
);
7530 before_recovery
= last
;
7533 /* Returns new recovery block. */
7535 sched_create_recovery_block (basic_block
*before_recovery_ptr
)
7541 haifa_recovery_bb_recently_added_p
= true;
7542 haifa_recovery_bb_ever_added_p
= true;
7544 init_before_recovery (before_recovery_ptr
);
7546 barrier
= get_last_bb_insn (before_recovery
);
7547 gcc_assert (BARRIER_P (barrier
));
7549 label
= emit_label_after (gen_label_rtx (), barrier
);
7551 rec
= create_basic_block (label
, label
, before_recovery
);
7553 /* A recovery block always ends with an unconditional jump. */
7554 emit_barrier_after (BB_END (rec
));
7556 if (BB_PARTITION (before_recovery
) != BB_UNPARTITIONED
)
7557 BB_SET_PARTITION (rec
, BB_COLD_PARTITION
);
7559 if (sched_verbose
&& spec_info
->dump
)
7560 fprintf (spec_info
->dump
, ";;\t\tGenerated recovery block rec%d\n",
7566 /* Create edges: FIRST_BB -> REC; FIRST_BB -> SECOND_BB; REC -> SECOND_BB
7567 and emit necessary jumps. */
7569 sched_create_recovery_edges (basic_block first_bb
, basic_block rec
,
7570 basic_block second_bb
)
7576 /* This is fixing of incoming edge. */
7577 /* ??? Which other flags should be specified? */
7578 if (BB_PARTITION (first_bb
) != BB_PARTITION (rec
))
7579 /* Partition type is the same, if it is "unpartitioned". */
7580 edge_flags
= EDGE_CROSSING
;
7584 make_edge (first_bb
, rec
, edge_flags
);
7585 label
= block_label (second_bb
);
7586 jump
= emit_jump_insn_after (gen_jump (label
), BB_END (rec
));
7587 JUMP_LABEL (jump
) = label
;
7588 LABEL_NUSES (label
)++;
7590 if (BB_PARTITION (second_bb
) != BB_PARTITION (rec
))
7591 /* Partition type is the same, if it is "unpartitioned". */
7593 /* Rewritten from cfgrtl.c. */
7594 if (flag_reorder_blocks_and_partition
7595 && targetm_common
.have_named_sections
)
7597 /* We don't need the same note for the check because
7598 any_condjump_p (check) == true. */
7599 add_reg_note (jump
, REG_CROSSING_JUMP
, NULL_RTX
);
7601 edge_flags
= EDGE_CROSSING
;
7606 make_single_succ_edge (rec
, second_bb
, edge_flags
);
7607 if (dom_info_available_p (CDI_DOMINATORS
))
7608 set_immediate_dominator (CDI_DOMINATORS
, rec
, first_bb
);
7611 /* This function creates recovery code for INSN. If MUTATE_P is nonzero,
7612 INSN is a simple check, that should be converted to branchy one. */
7614 create_check_block_twin (rtx insn
, bool mutate_p
)
7617 rtx label
, check
, twin
;
7619 sd_iterator_def sd_it
;
7621 dep_def _new_dep
, *new_dep
= &_new_dep
;
7624 gcc_assert (ORIG_PAT (insn
) != NULL_RTX
);
7627 todo_spec
= TODO_SPEC (insn
);
7630 gcc_assert (IS_SPECULATION_SIMPLE_CHECK_P (insn
)
7631 && (TODO_SPEC (insn
) & SPECULATIVE
) == 0);
7633 todo_spec
= CHECK_SPEC (insn
);
7636 todo_spec
&= SPECULATIVE
;
7638 /* Create recovery block. */
7639 if (mutate_p
|| targetm
.sched
.needs_block_p (todo_spec
))
7641 rec
= sched_create_recovery_block (NULL
);
7642 label
= BB_HEAD (rec
);
7646 rec
= EXIT_BLOCK_PTR
;
7651 check
= targetm
.sched
.gen_spec_check (insn
, label
, todo_spec
);
7653 if (rec
!= EXIT_BLOCK_PTR
)
7655 /* To have mem_reg alive at the beginning of second_bb,
7656 we emit check BEFORE insn, so insn after splitting
7657 insn will be at the beginning of second_bb, which will
7658 provide us with the correct life information. */
7659 check
= emit_jump_insn_before (check
, insn
);
7660 JUMP_LABEL (check
) = label
;
7661 LABEL_NUSES (label
)++;
7664 check
= emit_insn_before (check
, insn
);
7666 /* Extend data structures. */
7667 haifa_init_insn (check
);
7669 /* CHECK is being added to current region. Extend ready list. */
7670 gcc_assert (sched_ready_n_insns
!= -1);
7671 sched_extend_ready_list (sched_ready_n_insns
+ 1);
7673 if (current_sched_info
->add_remove_insn
)
7674 current_sched_info
->add_remove_insn (insn
, 0);
7676 RECOVERY_BLOCK (check
) = rec
;
7678 if (sched_verbose
&& spec_info
->dump
)
7679 fprintf (spec_info
->dump
, ";;\t\tGenerated check insn : %s\n",
7680 (*current_sched_info
->print_insn
) (check
, 0));
7682 gcc_assert (ORIG_PAT (insn
));
7684 /* Initialize TWIN (twin is a duplicate of original instruction
7685 in the recovery block). */
7686 if (rec
!= EXIT_BLOCK_PTR
)
7688 sd_iterator_def sd_it
;
7691 FOR_EACH_DEP (insn
, SD_LIST_RES_BACK
, sd_it
, dep
)
7692 if ((DEP_STATUS (dep
) & DEP_OUTPUT
) != 0)
7694 struct _dep _dep2
, *dep2
= &_dep2
;
7696 init_dep (dep2
, DEP_PRO (dep
), check
, REG_DEP_TRUE
);
7698 sd_add_dep (dep2
, true);
7701 twin
= emit_insn_after (ORIG_PAT (insn
), BB_END (rec
));
7702 haifa_init_insn (twin
);
7704 if (sched_verbose
&& spec_info
->dump
)
7705 /* INSN_BB (insn) isn't determined for twin insns yet.
7706 So we can't use current_sched_info->print_insn. */
7707 fprintf (spec_info
->dump
, ";;\t\tGenerated twin insn : %d/rec%d\n",
7708 INSN_UID (twin
), rec
->index
);
7712 ORIG_PAT (check
) = ORIG_PAT (insn
);
7713 HAS_INTERNAL_DEP (check
) = 1;
7715 /* ??? We probably should change all OUTPUT dependencies to
7719 /* Copy all resolved back dependencies of INSN to TWIN. This will
7720 provide correct value for INSN_TICK (TWIN). */
7721 sd_copy_back_deps (twin
, insn
, true);
7723 if (rec
!= EXIT_BLOCK_PTR
)
7724 /* In case of branchy check, fix CFG. */
7726 basic_block first_bb
, second_bb
;
7729 first_bb
= BLOCK_FOR_INSN (check
);
7730 second_bb
= sched_split_block (first_bb
, check
);
7732 sched_create_recovery_edges (first_bb
, rec
, second_bb
);
7734 sched_init_only_bb (second_bb
, first_bb
);
7735 sched_init_only_bb (rec
, EXIT_BLOCK_PTR
);
7737 jump
= BB_END (rec
);
7738 haifa_init_insn (jump
);
7741 /* Move backward dependences from INSN to CHECK and
7742 move forward dependences from INSN to TWIN. */
7744 /* First, create dependencies between INSN's producers and CHECK & TWIN. */
7745 FOR_EACH_DEP (insn
, SD_LIST_BACK
, sd_it
, dep
)
7747 rtx pro
= DEP_PRO (dep
);
7750 /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
7751 check --TRUE--> producer ??? or ANTI ???
7752 twin --TRUE--> producer
7753 twin --ANTI--> check
7755 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
7756 check --ANTI--> producer
7757 twin --ANTI--> producer
7758 twin --ANTI--> check
7760 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
7761 check ~~TRUE~~> producer
7762 twin ~~TRUE~~> producer
7763 twin --ANTI--> check */
7765 ds
= DEP_STATUS (dep
);
7767 if (ds
& BEGIN_SPEC
)
7769 gcc_assert (!mutate_p
);
7773 init_dep_1 (new_dep
, pro
, check
, DEP_TYPE (dep
), ds
);
7774 sd_add_dep (new_dep
, false);
7776 if (rec
!= EXIT_BLOCK_PTR
)
7778 DEP_CON (new_dep
) = twin
;
7779 sd_add_dep (new_dep
, false);
7783 /* Second, remove backward dependencies of INSN. */
7784 for (sd_it
= sd_iterator_start (insn
, SD_LIST_SPEC_BACK
);
7785 sd_iterator_cond (&sd_it
, &dep
);)
7787 if ((DEP_STATUS (dep
) & BEGIN_SPEC
)
7789 /* We can delete this dep because we overcome it with
7790 BEGIN_SPECULATION. */
7791 sd_delete_dep (sd_it
);
7793 sd_iterator_next (&sd_it
);
7796 /* Future Speculations. Determine what BE_IN speculations will be like. */
7799 /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
7802 gcc_assert (!DONE_SPEC (insn
));
7806 ds_t ts
= TODO_SPEC (insn
);
7808 DONE_SPEC (insn
) = ts
& BEGIN_SPEC
;
7809 CHECK_SPEC (check
) = ts
& BEGIN_SPEC
;
7811 /* Luckiness of future speculations solely depends upon initial
7812 BEGIN speculation. */
7813 if (ts
& BEGIN_DATA
)
7814 fs
= set_dep_weak (fs
, BE_IN_DATA
, get_dep_weak (ts
, BEGIN_DATA
));
7815 if (ts
& BEGIN_CONTROL
)
7816 fs
= set_dep_weak (fs
, BE_IN_CONTROL
,
7817 get_dep_weak (ts
, BEGIN_CONTROL
));
7820 CHECK_SPEC (check
) = CHECK_SPEC (insn
);
7822 /* Future speculations: call the helper. */
7823 process_insn_forw_deps_be_in_spec (insn
, twin
, fs
);
7825 if (rec
!= EXIT_BLOCK_PTR
)
7827 /* Which types of dependencies should we use here is,
7828 generally, machine-dependent question... But, for now,
7833 init_dep (new_dep
, insn
, check
, REG_DEP_TRUE
);
7834 sd_add_dep (new_dep
, false);
7836 init_dep (new_dep
, insn
, twin
, REG_DEP_OUTPUT
);
7837 sd_add_dep (new_dep
, false);
7841 if (spec_info
->dump
)
7842 fprintf (spec_info
->dump
, ";;\t\tRemoved simple check : %s\n",
7843 (*current_sched_info
->print_insn
) (insn
, 0));
7845 /* Remove all dependencies of the INSN. */
7847 sd_it
= sd_iterator_start (insn
, (SD_LIST_FORW
7849 | SD_LIST_RES_BACK
));
7850 while (sd_iterator_cond (&sd_it
, &dep
))
7851 sd_delete_dep (sd_it
);
7854 /* If former check (INSN) already was moved to the ready (or queue)
7855 list, add new check (CHECK) there too. */
7856 if (QUEUE_INDEX (insn
) != QUEUE_NOWHERE
)
7859 /* Remove old check from instruction stream and free its
7861 sched_remove_insn (insn
);
7864 init_dep (new_dep
, check
, twin
, REG_DEP_ANTI
);
7865 sd_add_dep (new_dep
, false);
7869 init_dep_1 (new_dep
, insn
, check
, REG_DEP_TRUE
, DEP_TRUE
| DEP_OUTPUT
);
7870 sd_add_dep (new_dep
, false);
7874 /* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
7875 because it'll be done later in add_to_speculative_block. */
7877 rtx_vec_t priorities_roots
= rtx_vec_t();
7879 clear_priorities (twin
, &priorities_roots
);
7880 calc_priorities (priorities_roots
);
7881 priorities_roots
.release ();
7885 /* Removes dependency between instructions in the recovery block REC
7886 and usual region instructions. It keeps inner dependences so it
7887 won't be necessary to recompute them. */
7889 fix_recovery_deps (basic_block rec
)
7891 rtx note
, insn
, jump
, ready_list
= 0;
7892 bitmap_head in_ready
;
7895 bitmap_initialize (&in_ready
, 0);
7897 /* NOTE - a basic block note. */
7898 note
= NEXT_INSN (BB_HEAD (rec
));
7899 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note
));
7900 insn
= BB_END (rec
);
7901 gcc_assert (JUMP_P (insn
));
7902 insn
= PREV_INSN (insn
);
7906 sd_iterator_def sd_it
;
7909 for (sd_it
= sd_iterator_start (insn
, SD_LIST_FORW
);
7910 sd_iterator_cond (&sd_it
, &dep
);)
7912 rtx consumer
= DEP_CON (dep
);
7914 if (BLOCK_FOR_INSN (consumer
) != rec
)
7916 sd_delete_dep (sd_it
);
7918 if (bitmap_set_bit (&in_ready
, INSN_LUID (consumer
)))
7919 ready_list
= alloc_INSN_LIST (consumer
, ready_list
);
7923 gcc_assert ((DEP_STATUS (dep
) & DEP_TYPES
) == DEP_TRUE
);
7925 sd_iterator_next (&sd_it
);
7929 insn
= PREV_INSN (insn
);
7931 while (insn
!= note
);
7933 bitmap_clear (&in_ready
);
7935 /* Try to add instructions to the ready or queue list. */
7936 for (link
= ready_list
; link
; link
= XEXP (link
, 1))
7937 try_ready (XEXP (link
, 0));
7938 free_INSN_LIST_list (&ready_list
);
7940 /* Fixing jump's dependences. */
7941 insn
= BB_HEAD (rec
);
7942 jump
= BB_END (rec
);
7944 gcc_assert (LABEL_P (insn
));
7945 insn
= NEXT_INSN (insn
);
7947 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn
));
7948 add_jump_dependencies (insn
, jump
);
7951 /* Change pattern of INSN to NEW_PAT. Invalidate cached haifa
7952 instruction data. */
7954 haifa_change_pattern (rtx insn
, rtx new_pat
)
7958 t
= validate_change (insn
, &PATTERN (insn
), new_pat
, 0);
7962 update_insn_after_change (insn
);
7966 /* -1 - can't speculate,
7967 0 - for speculation with REQUEST mode it is OK to use
7968 current instruction pattern,
7969 1 - need to change pattern for *NEW_PAT to be speculative. */
7971 sched_speculate_insn (rtx insn
, ds_t request
, rtx
*new_pat
)
7973 gcc_assert (current_sched_info
->flags
& DO_SPECULATION
7974 && (request
& SPECULATIVE
)
7975 && sched_insn_is_legitimate_for_speculation_p (insn
, request
));
7977 if ((request
& spec_info
->mask
) != request
)
7980 if (request
& BE_IN_SPEC
7981 && !(request
& BEGIN_SPEC
))
7984 return targetm
.sched
.speculate_insn (insn
, request
, new_pat
);
7988 haifa_speculate_insn (rtx insn
, ds_t request
, rtx
*new_pat
)
7990 gcc_assert (sched_deps_info
->generate_spec_deps
7991 && !IS_SPECULATION_CHECK_P (insn
));
7993 if (HAS_INTERNAL_DEP (insn
)
7994 || SCHED_GROUP_P (insn
))
7997 return sched_speculate_insn (insn
, request
, new_pat
);
8000 /* Print some information about block BB, which starts with HEAD and
8001 ends with TAIL, before scheduling it.
8002 I is zero, if scheduler is about to start with the fresh ebb. */
8004 dump_new_block_header (int i
, basic_block bb
, rtx head
, rtx tail
)
8007 fprintf (sched_dump
,
8008 ";; ======================================================\n");
8010 fprintf (sched_dump
,
8011 ";; =====================ADVANCING TO=====================\n");
8012 fprintf (sched_dump
,
8013 ";; -- basic block %d from %d to %d -- %s reload\n",
8014 bb
->index
, INSN_UID (head
), INSN_UID (tail
),
8015 (reload_completed
? "after" : "before"));
8016 fprintf (sched_dump
,
8017 ";; ======================================================\n");
8018 fprintf (sched_dump
, "\n");
8021 /* Unlink basic block notes and labels and saves them, so they
8022 can be easily restored. We unlink basic block notes in EBB to
8023 provide back-compatibility with the previous code, as target backends
8024 assume, that there'll be only instructions between
8025 current_sched_info->{head and tail}. We restore these notes as soon
8027 FIRST (LAST) is the first (last) basic block in the ebb.
8028 NB: In usual case (FIRST == LAST) nothing is really done. */
8030 unlink_bb_notes (basic_block first
, basic_block last
)
8032 /* We DON'T unlink basic block notes of the first block in the ebb. */
8036 bb_header
= XNEWVEC (rtx
, last_basic_block
);
8038 /* Make a sentinel. */
8039 if (last
->next_bb
!= EXIT_BLOCK_PTR
)
8040 bb_header
[last
->next_bb
->index
] = 0;
8042 first
= first
->next_bb
;
8045 rtx prev
, label
, note
, next
;
8047 label
= BB_HEAD (last
);
8048 if (LABEL_P (label
))
8049 note
= NEXT_INSN (label
);
8052 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note
));
8054 prev
= PREV_INSN (label
);
8055 next
= NEXT_INSN (note
);
8056 gcc_assert (prev
&& next
);
8058 NEXT_INSN (prev
) = next
;
8059 PREV_INSN (next
) = prev
;
8061 bb_header
[last
->index
] = label
;
8066 last
= last
->prev_bb
;
8071 /* Restore basic block notes.
8072 FIRST is the first basic block in the ebb. */
8074 restore_bb_notes (basic_block first
)
8079 /* We DON'T unlink basic block notes of the first block in the ebb. */
8080 first
= first
->next_bb
;
8081 /* Remember: FIRST is actually a second basic block in the ebb. */
8083 while (first
!= EXIT_BLOCK_PTR
8084 && bb_header
[first
->index
])
8086 rtx prev
, label
, note
, next
;
8088 label
= bb_header
[first
->index
];
8089 prev
= PREV_INSN (label
);
8090 next
= NEXT_INSN (prev
);
8092 if (LABEL_P (label
))
8093 note
= NEXT_INSN (label
);
8096 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note
));
8098 bb_header
[first
->index
] = 0;
8100 NEXT_INSN (prev
) = label
;
8101 NEXT_INSN (note
) = next
;
8102 PREV_INSN (next
) = note
;
8104 first
= first
->next_bb
;
8112 Fix CFG after both in- and inter-block movement of
8113 control_flow_insn_p JUMP. */
8115 fix_jump_move (rtx jump
)
8117 basic_block bb
, jump_bb
, jump_bb_next
;
8119 bb
= BLOCK_FOR_INSN (PREV_INSN (jump
));
8120 jump_bb
= BLOCK_FOR_INSN (jump
);
8121 jump_bb_next
= jump_bb
->next_bb
;
8123 gcc_assert (common_sched_info
->sched_pass_id
== SCHED_EBB_PASS
8124 || IS_SPECULATION_BRANCHY_CHECK_P (jump
));
8126 if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next
)))
8127 /* if jump_bb_next is not empty. */
8128 BB_END (jump_bb
) = BB_END (jump_bb_next
);
8130 if (BB_END (bb
) != PREV_INSN (jump
))
8131 /* Then there are instruction after jump that should be placed
8133 BB_END (jump_bb_next
) = BB_END (bb
);
8135 /* Otherwise jump_bb_next is empty. */
8136 BB_END (jump_bb_next
) = NEXT_INSN (BB_HEAD (jump_bb_next
));
8138 /* To make assertion in move_insn happy. */
8139 BB_END (bb
) = PREV_INSN (jump
);
8141 update_bb_for_insn (jump_bb_next
);
8144 /* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
8146 move_block_after_check (rtx jump
)
8148 basic_block bb
, jump_bb
, jump_bb_next
;
8149 vec
<edge
, va_gc
> *t
;
8151 bb
= BLOCK_FOR_INSN (PREV_INSN (jump
));
8152 jump_bb
= BLOCK_FOR_INSN (jump
);
8153 jump_bb_next
= jump_bb
->next_bb
;
8155 update_bb_for_insn (jump_bb
);
8157 gcc_assert (IS_SPECULATION_CHECK_P (jump
)
8158 || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next
)));
8160 unlink_block (jump_bb_next
);
8161 link_block (jump_bb_next
, bb
);
8165 move_succs (&(jump_bb
->succs
), bb
);
8166 move_succs (&(jump_bb_next
->succs
), jump_bb
);
8167 move_succs (&t
, jump_bb_next
);
8169 df_mark_solutions_dirty ();
8171 common_sched_info
->fix_recovery_cfg
8172 (bb
->index
, jump_bb
->index
, jump_bb_next
->index
);
8175 /* Helper function for move_block_after_check.
8176 This functions attaches edge vector pointed to by SUCCSP to
8179 move_succs (vec
<edge
, va_gc
> **succsp
, basic_block to
)
8184 gcc_assert (to
->succs
== 0);
8186 to
->succs
= *succsp
;
8188 FOR_EACH_EDGE (e
, ei
, to
->succs
)
8194 /* Remove INSN from the instruction stream.
8195 INSN should have any dependencies. */
8197 sched_remove_insn (rtx insn
)
8199 sd_finish_insn (insn
);
8201 change_queue_index (insn
, QUEUE_NOWHERE
);
8202 current_sched_info
->add_remove_insn (insn
, 1);
8206 /* Clear priorities of all instructions, that are forward dependent on INSN.
8207 Store in vector pointed to by ROOTS_PTR insns on which priority () should
8208 be invoked to initialize all cleared priorities. */
8210 clear_priorities (rtx insn
, rtx_vec_t
*roots_ptr
)
8212 sd_iterator_def sd_it
;
8214 bool insn_is_root_p
= true;
8216 gcc_assert (QUEUE_INDEX (insn
) != QUEUE_SCHEDULED
);
8218 FOR_EACH_DEP (insn
, SD_LIST_BACK
, sd_it
, dep
)
8220 rtx pro
= DEP_PRO (dep
);
8222 if (INSN_PRIORITY_STATUS (pro
) >= 0
8223 && QUEUE_INDEX (insn
) != QUEUE_SCHEDULED
)
8225 /* If DEP doesn't contribute to priority then INSN itself should
8226 be added to priority roots. */
8227 if (contributes_to_priority_p (dep
))
8228 insn_is_root_p
= false;
8230 INSN_PRIORITY_STATUS (pro
) = -1;
8231 clear_priorities (pro
, roots_ptr
);
8236 roots_ptr
->safe_push (insn
);
8239 /* Recompute priorities of instructions, whose priorities might have been
8240 changed. ROOTS is a vector of instructions whose priority computation will
8241 trigger initialization of all cleared priorities. */
8243 calc_priorities (rtx_vec_t roots
)
8248 FOR_EACH_VEC_ELT (roots
, i
, insn
)
8253 /* Add dependences between JUMP and other instructions in the recovery
8254 block. INSN is the first insn the recovery block. */
8256 add_jump_dependencies (rtx insn
, rtx jump
)
8260 insn
= NEXT_INSN (insn
);
8264 if (dep_list_size (insn
, SD_LIST_FORW
) == 0)
8266 dep_def _new_dep
, *new_dep
= &_new_dep
;
8268 init_dep (new_dep
, insn
, jump
, REG_DEP_ANTI
);
8269 sd_add_dep (new_dep
, false);
8274 gcc_assert (!sd_lists_empty_p (jump
, SD_LIST_BACK
));
8277 /* Extend data structures for logical insn UID. */
8279 sched_extend_luids (void)
8281 int new_luids_max_uid
= get_max_uid () + 1;
8283 sched_luids
.safe_grow_cleared (new_luids_max_uid
);
8286 /* Initialize LUID for INSN. */
8288 sched_init_insn_luid (rtx insn
)
8290 int i
= INSN_P (insn
) ? 1 : common_sched_info
->luid_for_non_insn (insn
);
8295 luid
= sched_max_luid
;
8296 sched_max_luid
+= i
;
8301 SET_INSN_LUID (insn
, luid
);
8304 /* Initialize luids for BBS.
8305 The hook common_sched_info->luid_for_non_insn () is used to determine
8306 if notes, labels, etc. need luids. */
8308 sched_init_luids (bb_vec_t bbs
)
8313 sched_extend_luids ();
8314 FOR_EACH_VEC_ELT (bbs
, i
, bb
)
8318 FOR_BB_INSNS (bb
, insn
)
8319 sched_init_insn_luid (insn
);
8325 sched_finish_luids (void)
8327 sched_luids
.release ();
8331 /* Return logical uid of INSN. Helpful while debugging. */
8333 insn_luid (rtx insn
)
8335 return INSN_LUID (insn
);
8338 /* Extend per insn data in the target. */
8340 sched_extend_target (void)
8342 if (targetm
.sched
.h_i_d_extended
)
8343 targetm
.sched
.h_i_d_extended ();
8346 /* Extend global scheduler structures (those, that live across calls to
8347 schedule_block) to include information about just emitted INSN. */
8351 int reserve
= (get_max_uid () + 1 - h_i_d
.length ());
8353 && ! h_i_d
.space (reserve
))
8355 h_i_d
.safe_grow_cleared (3 * get_max_uid () / 2);
8356 sched_extend_target ();
8360 /* Initialize h_i_d entry of the INSN with default values.
8361 Values, that are not explicitly initialized here, hold zero. */
8363 init_h_i_d (rtx insn
)
8365 if (INSN_LUID (insn
) > 0)
8367 INSN_COST (insn
) = -1;
8368 QUEUE_INDEX (insn
) = QUEUE_NOWHERE
;
8369 INSN_TICK (insn
) = INVALID_TICK
;
8370 INSN_EXACT_TICK (insn
) = INVALID_TICK
;
8371 INTER_TICK (insn
) = INVALID_TICK
;
8372 TODO_SPEC (insn
) = HARD_DEP
;
8376 /* Initialize haifa_insn_data for BBS. */
8378 haifa_init_h_i_d (bb_vec_t bbs
)
8384 FOR_EACH_VEC_ELT (bbs
, i
, bb
)
8388 FOR_BB_INSNS (bb
, insn
)
8393 /* Finalize haifa_insn_data. */
8395 haifa_finish_h_i_d (void)
8398 haifa_insn_data_t data
;
8399 struct reg_use_data
*use
, *next
;
8401 FOR_EACH_VEC_ELT (h_i_d
, i
, data
)
8403 free (data
->max_reg_pressure
);
8404 free (data
->reg_pressure
);
8405 for (use
= data
->reg_use_list
; use
!= NULL
; use
= next
)
8407 next
= use
->next_insn_use
;
8414 /* Init data for the new insn INSN. */
8416 haifa_init_insn (rtx insn
)
8418 gcc_assert (insn
!= NULL
);
8420 sched_extend_luids ();
8421 sched_init_insn_luid (insn
);
8422 sched_extend_target ();
8423 sched_deps_init (false);
8427 if (adding_bb_to_current_region_p
)
8429 sd_init_insn (insn
);
8431 /* Extend dependency caches by one element. */
8432 extend_dependency_caches (1, false);
8434 if (sched_pressure
!= SCHED_PRESSURE_NONE
)
8435 init_insn_reg_pressure_info (insn
);
8438 /* Init data for the new basic block BB which comes after AFTER. */
8440 haifa_init_only_bb (basic_block bb
, basic_block after
)
8442 gcc_assert (bb
!= NULL
);
8446 if (common_sched_info
->add_block
)
8447 /* This changes only data structures of the front-end. */
8448 common_sched_info
->add_block (bb
, after
);
8451 /* A generic version of sched_split_block (). */
8453 sched_split_block_1 (basic_block first_bb
, rtx after
)
8457 e
= split_block (first_bb
, after
);
8458 gcc_assert (e
->src
== first_bb
);
8460 /* sched_split_block emits note if *check == BB_END. Probably it
8461 is better to rip that note off. */
8466 /* A generic version of sched_create_empty_bb (). */
8468 sched_create_empty_bb_1 (basic_block after
)
8470 return create_empty_bb (after
);
8473 /* Insert PAT as an INSN into the schedule and update the necessary data
8474 structures to account for it. */
8476 sched_emit_insn (rtx pat
)
8478 rtx insn
= emit_insn_before (pat
, nonscheduled_insns_begin
);
8479 haifa_init_insn (insn
);
8481 if (current_sched_info
->add_remove_insn
)
8482 current_sched_info
->add_remove_insn (insn
, 0);
8484 (*current_sched_info
->begin_schedule_ready
) (insn
);
8485 scheduled_insns
.safe_push (insn
);
8487 last_scheduled_insn
= insn
;
8491 /* This function returns a candidate satisfying dispatch constraints from
8495 ready_remove_first_dispatch (struct ready_list
*ready
)
8498 rtx insn
= ready_element (ready
, 0);
8500 if (ready
->n_ready
== 1
8501 || INSN_CODE (insn
) < 0
8503 || !active_insn_p (insn
)
8504 || targetm
.sched
.dispatch (insn
, FITS_DISPATCH_WINDOW
))
8505 return ready_remove_first (ready
);
8507 for (i
= 1; i
< ready
->n_ready
; i
++)
8509 insn
= ready_element (ready
, i
);
8511 if (INSN_CODE (insn
) < 0
8513 || !active_insn_p (insn
))
8516 if (targetm
.sched
.dispatch (insn
, FITS_DISPATCH_WINDOW
))
8518 /* Return ith element of ready. */
8519 insn
= ready_remove (ready
, i
);
8524 if (targetm
.sched
.dispatch (NULL_RTX
, DISPATCH_VIOLATION
))
8525 return ready_remove_first (ready
);
8527 for (i
= 1; i
< ready
->n_ready
; i
++)
8529 insn
= ready_element (ready
, i
);
8531 if (INSN_CODE (insn
) < 0
8533 || !active_insn_p (insn
))
8536 /* Return i-th element of ready. */
8537 if (targetm
.sched
.dispatch (insn
, IS_CMP
))
8538 return ready_remove (ready
, i
);
8541 return ready_remove_first (ready
);
8544 /* Get number of ready insn in the ready list. */
8547 number_in_ready (void)
8549 return ready
.n_ready
;
8552 /* Get number of ready's in the ready list. */
8555 get_ready_element (int i
)
8557 return ready_element (&ready
, i
);
8560 #endif /* INSN_SCHEDULING */