1 /* Instruction scheduling pass.
2 Copyright (C) 1992-2015 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
4 and currently maintained by, Jim Wilson (wilson@cygnus.com)
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* Instruction scheduling pass. This file, along with sched-deps.c,
23 contains the generic parts. The actual entry point for
24 the normal instruction scheduling pass is found in sched-rgn.c.
26 We compute insn priorities based on data dependencies. Flow
27 analysis only creates a fraction of the data-dependencies we must
28 observe: namely, only those dependencies which the combiner can be
29 expected to use. For this pass, we must therefore create the
30 remaining dependencies we need to observe: register dependencies,
31 memory dependencies, dependencies to keep function calls in order,
32 and the dependence between a conditional branch and the setting of
33 condition codes are all dealt with here.
35 The scheduler first traverses the data flow graph, starting with
36 the last instruction, and proceeding to the first, assigning values
37 to insn_priority as it goes. This sorts the instructions
38 topologically by data dependence.
40 Once priorities have been established, we order the insns using
41 list scheduling. This works as follows: starting with a list of
42 all the ready insns, and sorted according to priority number, we
43 schedule the insn from the end of the list by placing its
44 predecessors in the list according to their priority order. We
45 consider this insn scheduled by setting the pointer to the "end" of
46 the list to point to the previous insn. When an insn has no
47 predecessors, we either queue it until sufficient time has elapsed
48 or add it to the ready list. As the instructions are scheduled or
49 when stalls are introduced, the queue advances and dumps insns into
50 the ready list. When all insns down to the lowest priority have
51 been scheduled, the critical path of the basic block has been made
52 as short as possible. The remaining insns are then scheduled in
55 The following list shows the order in which we want to break ties
56 among insns in the ready list:
58 1. choose insn with the longest path to end of bb, ties
60 2. choose insn with least contribution to register pressure,
62 3. prefer in-block upon interblock motion, ties broken by
63 4. prefer useful upon speculative motion, ties broken by
64 5. choose insn with largest control flow probability, ties
66 6. choose insn with the least dependences upon the previously
67 scheduled insn, or finally
68 7 choose the insn which has the most insns dependent on it.
69 8. choose insn with lowest UID.
71 Memory references complicate matters. Only if we can be certain
72 that memory references are not part of the data dependency graph
73 (via true, anti, or output dependence), can we move operations past
74 memory references. To first approximation, reads can be done
75 independently, while writes introduce dependencies. Better
76 approximations will yield fewer dependencies.
78 Before reload, an extended analysis of interblock data dependences
79 is required for interblock scheduling. This is performed in
80 compute_block_dependences ().
82 Dependencies set up by memory references are treated in exactly the
83 same way as other dependencies, by using insn backward dependences
84 INSN_BACK_DEPS. INSN_BACK_DEPS are translated into forward dependences
85 INSN_FORW_DEPS for the purpose of forward list scheduling.
87 Having optimized the critical path, we may have also unduly
88 extended the lifetimes of some registers. If an operation requires
89 that constants be loaded into registers, it is certainly desirable
90 to load those constants as early as necessary, but no earlier.
91 I.e., it will not do to load up a bunch of registers at the
92 beginning of a basic block only to use them at the end, if they
93 could be loaded later, since this may result in excessive register
96 Note that since branches are never in basic blocks, but only end
97 basic blocks, this pass will not move branches. But that is ok,
98 since we can use GNU's delayed branch scheduling pass to take care
101 Also note that no further optimizations based on algebraic
102 identities are performed, so this pass would be a good one to
103 perform instruction splitting, such as breaking up a multiply
104 instruction into shifts and adds where that is profitable.
106 Given the memory aliasing analysis that this pass should perform,
107 it should be possible to remove redundant stores to memory, and to
108 load values from registers instead of hitting memory.
110 Before reload, speculative insns are moved only if a 'proof' exists
111 that no exception will be caused by this, and if no live registers
112 exist that inhibit the motion (live registers constraints are not
113 represented by data dependence edges).
115 This pass must update information that subsequent passes expect to
116 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
117 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
119 The information in the line number notes is carefully retained by
120 this pass. Notes that refer to the starting and ending of
121 exception regions are also carefully retained by this pass. All
122 other NOTE insns are grouped in their same relative order at the
123 beginning of basic blocks and regions that have been scheduled. */
127 #include "coretypes.h"
129 #include "diagnostic-core.h"
130 #include "hard-reg-set.h"
135 #include "hash-set.h"
137 #include "machmode.h"
139 #include "function.h"
141 #include "insn-config.h"
142 #include "insn-attr.h"
145 #include "dominance.h"
148 #include "cfgbuild.h"
150 #include "basic-block.h"
151 #include "sched-int.h"
153 #include "common/common-target.h"
158 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
159 #include "hash-table.h"
160 #include "dumpfile.h"
162 #ifdef INSN_SCHEDULING
164 /* True if we do register pressure relief through live-range
166 static bool live_range_shrinkage_p
;
168 /* Switch on live range shrinkage. */
170 initialize_live_range_shrinkage (void)
172 live_range_shrinkage_p
= true;
175 /* Switch off live range shrinkage. */
177 finish_live_range_shrinkage (void)
179 live_range_shrinkage_p
= false;
182 /* issue_rate is the number of insns that can be scheduled in the same
183 machine cycle. It can be defined in the config/mach/mach.h file,
184 otherwise we set it to 1. */
188 /* This can be set to true by a backend if the scheduler should not
189 enable a DCE pass. */
192 /* The current initiation interval used when modulo scheduling. */
193 static int modulo_ii
;
195 /* The maximum number of stages we are prepared to handle. */
196 static int modulo_max_stages
;
198 /* The number of insns that exist in each iteration of the loop. We use this
199 to detect when we've scheduled all insns from the first iteration. */
200 static int modulo_n_insns
;
202 /* The current count of insns in the first iteration of the loop that have
203 already been scheduled. */
204 static int modulo_insns_scheduled
;
206 /* The maximum uid of insns from the first iteration of the loop. */
207 static int modulo_iter0_max_uid
;
209 /* The number of times we should attempt to backtrack when modulo scheduling.
210 Decreased each time we have to backtrack. */
211 static int modulo_backtracks_left
;
213 /* The stage in which the last insn from the original loop was
215 static int modulo_last_stage
;
217 /* sched-verbose controls the amount of debugging output the
218 scheduler prints. It is controlled by -fsched-verbose=N:
219 N>0 and no -DSR : the output is directed to stderr.
220 N>=10 will direct the printouts to stderr (regardless of -dSR).
222 N=2: bb's probabilities, detailed ready list info, unit/insn info.
223 N=3: rtl at abort point, control-flow, regions info.
224 N=5: dependences info. */
226 int sched_verbose
= 0;
228 /* Debugging file. All printouts are sent to dump, which is always set,
229 either to stderr, or to the dump listing file (-dRS). */
230 FILE *sched_dump
= 0;
232 /* This is a placeholder for the scheduler parameters common
233 to all schedulers. */
234 struct common_sched_info_def
*common_sched_info
;
236 #define INSN_TICK(INSN) (HID (INSN)->tick)
237 #define INSN_EXACT_TICK(INSN) (HID (INSN)->exact_tick)
238 #define INSN_TICK_ESTIMATE(INSN) (HID (INSN)->tick_estimate)
239 #define INTER_TICK(INSN) (HID (INSN)->inter_tick)
240 #define FEEDS_BACKTRACK_INSN(INSN) (HID (INSN)->feeds_backtrack_insn)
241 #define SHADOW_P(INSN) (HID (INSN)->shadow_p)
242 #define MUST_RECOMPUTE_SPEC_P(INSN) (HID (INSN)->must_recompute_spec)
243 /* Cached cost of the instruction. Use insn_cost to get cost of the
244 insn. -1 here means that the field is not initialized. */
245 #define INSN_COST(INSN) (HID (INSN)->cost)
247 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
248 then it should be recalculated from scratch. */
249 #define INVALID_TICK (-(max_insn_queue_index + 1))
250 /* The minimal value of the INSN_TICK of an instruction. */
251 #define MIN_TICK (-max_insn_queue_index)
253 /* Original order of insns in the ready list.
254 Used to keep order of normal insns while separating DEBUG_INSNs. */
255 #define INSN_RFS_DEBUG_ORIG_ORDER(INSN) (HID (INSN)->rfs_debug_orig_order)
257 /* The deciding reason for INSN's place in the ready list. */
258 #define INSN_LAST_RFS_WIN(INSN) (HID (INSN)->last_rfs_win)
260 /* List of important notes we must keep around. This is a pointer to the
261 last element in the list. */
264 static struct spec_info_def spec_info_var
;
265 /* Description of the speculative part of the scheduling.
266 If NULL - no speculation. */
267 spec_info_t spec_info
= NULL
;
269 /* True, if recovery block was added during scheduling of current block.
270 Used to determine, if we need to fix INSN_TICKs. */
271 static bool haifa_recovery_bb_recently_added_p
;
273 /* True, if recovery block was added during this scheduling pass.
274 Used to determine if we should have empty memory pools of dependencies
275 after finishing current region. */
276 bool haifa_recovery_bb_ever_added_p
;
278 /* Counters of different types of speculative instructions. */
279 static int nr_begin_data
, nr_be_in_data
, nr_begin_control
, nr_be_in_control
;
281 /* Array used in {unlink, restore}_bb_notes. */
282 static rtx_insn
**bb_header
= 0;
284 /* Basic block after which recovery blocks will be created. */
285 static basic_block before_recovery
;
287 /* Basic block just before the EXIT_BLOCK and after recovery, if we have
289 basic_block after_recovery
;
291 /* FALSE if we add bb to another region, so we don't need to initialize it. */
292 bool adding_bb_to_current_region_p
= true;
296 /* An instruction is ready to be scheduled when all insns preceding it
297 have already been scheduled. It is important to ensure that all
298 insns which use its result will not be executed until its result
299 has been computed. An insn is maintained in one of four structures:
301 (P) the "Pending" set of insns which cannot be scheduled until
302 their dependencies have been satisfied.
303 (Q) the "Queued" set of insns that can be scheduled when sufficient
305 (R) the "Ready" list of unscheduled, uncommitted insns.
306 (S) the "Scheduled" list of insns.
308 Initially, all insns are either "Pending" or "Ready" depending on
309 whether their dependencies are satisfied.
311 Insns move from the "Ready" list to the "Scheduled" list as they
312 are committed to the schedule. As this occurs, the insns in the
313 "Pending" list have their dependencies satisfied and move to either
314 the "Ready" list or the "Queued" set depending on whether
315 sufficient time has passed to make them ready. As time passes,
316 insns move from the "Queued" set to the "Ready" list.
318 The "Pending" list (P) are the insns in the INSN_FORW_DEPS of the
319 unscheduled insns, i.e., those that are ready, queued, and pending.
320 The "Queued" set (Q) is implemented by the variable `insn_queue'.
321 The "Ready" list (R) is implemented by the variables `ready' and
323 The "Scheduled" list (S) is the new insn chain built by this pass.
325 The transition (R->S) is implemented in the scheduling loop in
326 `schedule_block' when the best insn to schedule is chosen.
327 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
328 insns move from the ready list to the scheduled list.
329 The transition (Q->R) is implemented in 'queue_to_insn' as time
330 passes or stalls are introduced. */
332 /* Implement a circular buffer to delay instructions until sufficient
333 time has passed. For the new pipeline description interface,
334 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
335 than maximal time of instruction execution computed by genattr.c on
336 the base maximal time of functional unit reservations and getting a
337 result. This is the longest time an insn may be queued. */
339 static rtx_insn_list
**insn_queue
;
340 static int q_ptr
= 0;
341 static int q_size
= 0;
342 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
343 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
345 #define QUEUE_SCHEDULED (-3)
346 #define QUEUE_NOWHERE (-2)
347 #define QUEUE_READY (-1)
348 /* QUEUE_SCHEDULED - INSN is scheduled.
349 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
351 QUEUE_READY - INSN is in ready list.
352 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
354 #define QUEUE_INDEX(INSN) (HID (INSN)->queue_index)
356 /* The following variable value refers for all current and future
357 reservations of the processor units. */
360 /* The following variable value is size of memory representing all
361 current and future reservations of the processor units. */
362 size_t dfa_state_size
;
364 /* The following array is used to find the best insn from ready when
365 the automaton pipeline interface is used. */
366 signed char *ready_try
= NULL
;
368 /* The ready list. */
369 struct ready_list ready
= {NULL
, 0, 0, 0, 0};
371 /* The pointer to the ready list (to be removed). */
372 static struct ready_list
*readyp
= &ready
;
374 /* Scheduling clock. */
375 static int clock_var
;
377 /* Clock at which the previous instruction was issued. */
378 static int last_clock_var
;
380 /* Set to true if, when queuing a shadow insn, we discover that it would be
381 scheduled too late. */
382 static bool must_backtrack
;
384 /* The following variable value is number of essential insns issued on
385 the current cycle. An insn is essential one if it changes the
387 int cycle_issued_insns
;
389 /* This records the actual schedule. It is built up during the main phase
390 of schedule_block, and afterwards used to reorder the insns in the RTL. */
391 static vec
<rtx_insn
*> scheduled_insns
;
393 static int may_trap_exp (const_rtx
, int);
395 /* Nonzero iff the address is comprised from at most 1 register. */
396 #define CONST_BASED_ADDRESS_P(x) \
398 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
399 || (GET_CODE (x) == LO_SUM)) \
400 && (CONSTANT_P (XEXP (x, 0)) \
401 || CONSTANT_P (XEXP (x, 1)))))
403 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
404 as found by analyzing insn's expression. */
407 static int haifa_luid_for_non_insn (rtx x
);
409 /* Haifa version of sched_info hooks common to all headers. */
410 const struct common_sched_info_def haifa_common_sched_info
=
412 NULL
, /* fix_recovery_cfg */
413 NULL
, /* add_block */
414 NULL
, /* estimate_number_of_insns */
415 haifa_luid_for_non_insn
, /* luid_for_non_insn */
416 SCHED_PASS_UNKNOWN
/* sched_pass_id */
419 /* Mapping from instruction UID to its Logical UID. */
420 vec
<int> sched_luids
= vNULL
;
422 /* Next LUID to assign to an instruction. */
423 int sched_max_luid
= 1;
425 /* Haifa Instruction Data. */
426 vec
<haifa_insn_data_def
> h_i_d
= vNULL
;
428 void (* sched_init_only_bb
) (basic_block
, basic_block
);
430 /* Split block function. Different schedulers might use different functions
431 to handle their internal data consistent. */
432 basic_block (* sched_split_block
) (basic_block
, rtx
);
434 /* Create empty basic block after the specified block. */
435 basic_block (* sched_create_empty_bb
) (basic_block
);
437 /* Return the number of cycles until INSN is expected to be ready.
438 Return zero if it already is. */
440 insn_delay (rtx_insn
*insn
)
442 return MAX (INSN_TICK (insn
) - clock_var
, 0);
446 may_trap_exp (const_rtx x
, int is_store
)
455 if (code
== MEM
&& may_trap_p (x
))
462 /* The insn uses memory: a volatile load. */
463 if (MEM_VOLATILE_P (x
))
465 /* An exception-free load. */
468 /* A load with 1 base register, to be further checked. */
469 if (CONST_BASED_ADDRESS_P (XEXP (x
, 0)))
470 return PFREE_CANDIDATE
;
471 /* No info on the load, to be further checked. */
472 return PRISKY_CANDIDATE
;
477 int i
, insn_class
= TRAP_FREE
;
479 /* Neither store nor load, check if it may cause a trap. */
482 /* Recursive step: walk the insn... */
483 fmt
= GET_RTX_FORMAT (code
);
484 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
488 int tmp_class
= may_trap_exp (XEXP (x
, i
), is_store
);
489 insn_class
= WORST_CLASS (insn_class
, tmp_class
);
491 else if (fmt
[i
] == 'E')
494 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
496 int tmp_class
= may_trap_exp (XVECEXP (x
, i
, j
), is_store
);
497 insn_class
= WORST_CLASS (insn_class
, tmp_class
);
498 if (insn_class
== TRAP_RISKY
|| insn_class
== IRISKY
)
502 if (insn_class
== TRAP_RISKY
|| insn_class
== IRISKY
)
509 /* Classifies rtx X of an insn for the purpose of verifying that X can be
510 executed speculatively (and consequently the insn can be moved
511 speculatively), by examining X, returning:
512 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
513 TRAP_FREE: non-load insn.
514 IFREE: load from a globally safe location.
515 IRISKY: volatile load.
516 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
517 being either PFREE or PRISKY. */
520 haifa_classify_rtx (const_rtx x
)
522 int tmp_class
= TRAP_FREE
;
523 int insn_class
= TRAP_FREE
;
526 if (GET_CODE (x
) == PARALLEL
)
528 int i
, len
= XVECLEN (x
, 0);
530 for (i
= len
- 1; i
>= 0; i
--)
532 tmp_class
= haifa_classify_rtx (XVECEXP (x
, 0, i
));
533 insn_class
= WORST_CLASS (insn_class
, tmp_class
);
534 if (insn_class
== TRAP_RISKY
|| insn_class
== IRISKY
)
544 /* Test if it is a 'store'. */
545 tmp_class
= may_trap_exp (XEXP (x
, 0), 1);
548 /* Test if it is a store. */
549 tmp_class
= may_trap_exp (SET_DEST (x
), 1);
550 if (tmp_class
== TRAP_RISKY
)
552 /* Test if it is a load. */
554 WORST_CLASS (tmp_class
,
555 may_trap_exp (SET_SRC (x
), 0));
558 tmp_class
= haifa_classify_rtx (COND_EXEC_CODE (x
));
559 if (tmp_class
== TRAP_RISKY
)
561 tmp_class
= WORST_CLASS (tmp_class
,
562 may_trap_exp (COND_EXEC_TEST (x
), 0));
565 tmp_class
= TRAP_RISKY
;
569 insn_class
= tmp_class
;
576 haifa_classify_insn (const_rtx insn
)
578 return haifa_classify_rtx (PATTERN (insn
));
581 /* After the scheduler initialization function has been called, this function
582 can be called to enable modulo scheduling. II is the initiation interval
583 we should use, it affects the delays for delay_pairs that were recorded as
584 separated by a given number of stages.
586 MAX_STAGES provides us with a limit
587 after which we give up scheduling; the caller must have unrolled at least
588 as many copies of the loop body and recorded delay_pairs for them.
590 INSNS is the number of real (non-debug) insns in one iteration of
591 the loop. MAX_UID can be used to test whether an insn belongs to
592 the first iteration of the loop; all of them have a uid lower than
595 set_modulo_params (int ii
, int max_stages
, int insns
, int max_uid
)
598 modulo_max_stages
= max_stages
;
599 modulo_n_insns
= insns
;
600 modulo_iter0_max_uid
= max_uid
;
601 modulo_backtracks_left
= PARAM_VALUE (PARAM_MAX_MODULO_BACKTRACK_ATTEMPTS
);
604 /* A structure to record a pair of insns where the first one is a real
605 insn that has delay slots, and the second is its delayed shadow.
606 I1 is scheduled normally and will emit an assembly instruction,
607 while I2 describes the side effect that takes place at the
608 transition between cycles CYCLES and (CYCLES + 1) after I1. */
611 struct delay_pair
*next_same_i1
;
614 /* When doing modulo scheduling, we a delay_pair can also be used to
615 show that I1 and I2 are the same insn in a different stage. If that
616 is the case, STAGES will be nonzero. */
620 /* Helpers for delay hashing. */
622 struct delay_i1_hasher
: typed_noop_remove
<delay_pair
>
624 typedef delay_pair value_type
;
625 typedef void compare_type
;
626 static inline hashval_t
hash (const value_type
*);
627 static inline bool equal (const value_type
*, const compare_type
*);
630 /* Returns a hash value for X, based on hashing just I1. */
633 delay_i1_hasher::hash (const value_type
*x
)
635 return htab_hash_pointer (x
->i1
);
638 /* Return true if I1 of pair X is the same as that of pair Y. */
641 delay_i1_hasher::equal (const value_type
*x
, const compare_type
*y
)
646 struct delay_i2_hasher
: typed_free_remove
<delay_pair
>
648 typedef delay_pair value_type
;
649 typedef void compare_type
;
650 static inline hashval_t
hash (const value_type
*);
651 static inline bool equal (const value_type
*, const compare_type
*);
654 /* Returns a hash value for X, based on hashing just I2. */
657 delay_i2_hasher::hash (const value_type
*x
)
659 return htab_hash_pointer (x
->i2
);
662 /* Return true if I2 of pair X is the same as that of pair Y. */
665 delay_i2_hasher::equal (const value_type
*x
, const compare_type
*y
)
670 /* Two hash tables to record delay_pairs, one indexed by I1 and the other
672 static hash_table
<delay_i1_hasher
> *delay_htab
;
673 static hash_table
<delay_i2_hasher
> *delay_htab_i2
;
675 /* Called through htab_traverse. Walk the hashtable using I2 as
676 index, and delete all elements involving an UID higher than
677 that pointed to by *DATA. */
679 haifa_htab_i2_traverse (delay_pair
**slot
, int *data
)
682 struct delay_pair
*p
= *slot
;
683 if (INSN_UID (p
->i2
) >= maxuid
|| INSN_UID (p
->i1
) >= maxuid
)
685 delay_htab_i2
->clear_slot (slot
);
690 /* Called through htab_traverse. Walk the hashtable using I2 as
691 index, and delete all elements involving an UID higher than
692 that pointed to by *DATA. */
694 haifa_htab_i1_traverse (delay_pair
**pslot
, int *data
)
697 struct delay_pair
*p
, *first
, **pprev
;
699 if (INSN_UID ((*pslot
)->i1
) >= maxuid
)
701 delay_htab
->clear_slot (pslot
);
705 for (p
= *pslot
; p
; p
= p
->next_same_i1
)
707 if (INSN_UID (p
->i2
) < maxuid
)
710 pprev
= &p
->next_same_i1
;
715 delay_htab
->clear_slot (pslot
);
721 /* Discard all delay pairs which involve an insn with an UID higher
724 discard_delay_pairs_above (int max_uid
)
726 delay_htab
->traverse
<int *, haifa_htab_i1_traverse
> (&max_uid
);
727 delay_htab_i2
->traverse
<int *, haifa_htab_i2_traverse
> (&max_uid
);
730 /* This function can be called by a port just before it starts the final
731 scheduling pass. It records the fact that an instruction with delay
732 slots has been split into two insns, I1 and I2. The first one will be
733 scheduled normally and initiates the operation. The second one is a
734 shadow which must follow a specific number of cycles after I1; its only
735 purpose is to show the side effect that occurs at that cycle in the RTL.
736 If a JUMP_INSN or a CALL_INSN has been split, I1 should be a normal INSN,
737 while I2 retains the original insn type.
739 There are two ways in which the number of cycles can be specified,
740 involving the CYCLES and STAGES arguments to this function. If STAGES
741 is zero, we just use the value of CYCLES. Otherwise, STAGES is a factor
742 which is multiplied by MODULO_II to give the number of cycles. This is
743 only useful if the caller also calls set_modulo_params to enable modulo
747 record_delay_slot_pair (rtx_insn
*i1
, rtx_insn
*i2
, int cycles
, int stages
)
749 struct delay_pair
*p
= XNEW (struct delay_pair
);
750 struct delay_pair
**slot
;
759 delay_htab
= new hash_table
<delay_i1_hasher
> (10);
760 delay_htab_i2
= new hash_table
<delay_i2_hasher
> (10);
762 slot
= delay_htab
->find_slot_with_hash (i1
, htab_hash_pointer (i1
), INSERT
);
763 p
->next_same_i1
= *slot
;
765 slot
= delay_htab_i2
->find_slot (p
, INSERT
);
769 /* Examine the delay pair hashtable to see if INSN is a shadow for another,
770 and return the other insn if so. Return NULL otherwise. */
772 real_insn_for_shadow (rtx_insn
*insn
)
774 struct delay_pair
*pair
;
779 pair
= delay_htab_i2
->find_with_hash (insn
, htab_hash_pointer (insn
));
780 if (!pair
|| pair
->stages
> 0)
785 /* For a pair P of insns, return the fixed distance in cycles from the first
786 insn after which the second must be scheduled. */
788 pair_delay (struct delay_pair
*p
)
793 return p
->stages
* modulo_ii
;
796 /* Given an insn INSN, add a dependence on its delayed shadow if it
797 has one. Also try to find situations where shadows depend on each other
798 and add dependencies to the real insns to limit the amount of backtracking
801 add_delay_dependencies (rtx_insn
*insn
)
803 struct delay_pair
*pair
;
804 sd_iterator_def sd_it
;
810 pair
= delay_htab_i2
->find_with_hash (insn
, htab_hash_pointer (insn
));
813 add_dependence (insn
, pair
->i1
, REG_DEP_ANTI
);
817 FOR_EACH_DEP (pair
->i2
, SD_LIST_BACK
, sd_it
, dep
)
819 rtx_insn
*pro
= DEP_PRO (dep
);
820 struct delay_pair
*other_pair
821 = delay_htab_i2
->find_with_hash (pro
, htab_hash_pointer (pro
));
822 if (!other_pair
|| other_pair
->stages
)
824 if (pair_delay (other_pair
) >= pair_delay (pair
))
826 if (sched_verbose
>= 4)
828 fprintf (sched_dump
, ";;\tadding dependence %d <- %d\n",
829 INSN_UID (other_pair
->i1
),
830 INSN_UID (pair
->i1
));
831 fprintf (sched_dump
, ";;\tpair1 %d <- %d, cost %d\n",
835 fprintf (sched_dump
, ";;\tpair2 %d <- %d, cost %d\n",
836 INSN_UID (other_pair
->i1
),
837 INSN_UID (other_pair
->i2
),
838 pair_delay (other_pair
));
840 add_dependence (pair
->i1
, other_pair
->i1
, REG_DEP_ANTI
);
845 /* Forward declarations. */
847 static int priority (rtx_insn
*);
848 static int autopref_rank_for_schedule (const rtx_insn
*, const rtx_insn
*);
849 static int rank_for_schedule (const void *, const void *);
850 static void swap_sort (rtx_insn
**, int);
851 static void queue_insn (rtx_insn
*, int, const char *);
852 static int schedule_insn (rtx_insn
*);
853 static void adjust_priority (rtx_insn
*);
854 static void advance_one_cycle (void);
855 static void extend_h_i_d (void);
858 /* Notes handling mechanism:
859 =========================
860 Generally, NOTES are saved before scheduling and restored after scheduling.
861 The scheduler distinguishes between two types of notes:
863 (1) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
864 Before scheduling a region, a pointer to the note is added to the insn
865 that follows or precedes it. (This happens as part of the data dependence
866 computation). After scheduling an insn, the pointer contained in it is
867 used for regenerating the corresponding note (in reemit_notes).
869 (2) All other notes (e.g. INSN_DELETED): Before scheduling a block,
870 these notes are put in a list (in rm_other_notes() and
871 unlink_other_notes ()). After scheduling the block, these notes are
872 inserted at the beginning of the block (in schedule_block()). */
874 static void ready_add (struct ready_list
*, rtx_insn
*, bool);
875 static rtx_insn
*ready_remove_first (struct ready_list
*);
876 static rtx_insn
*ready_remove_first_dispatch (struct ready_list
*ready
);
878 static void queue_to_ready (struct ready_list
*);
879 static int early_queue_to_ready (state_t
, struct ready_list
*);
881 /* The following functions are used to implement multi-pass scheduling
882 on the first cycle. */
883 static rtx_insn
*ready_remove (struct ready_list
*, int);
884 static void ready_remove_insn (rtx
);
886 static void fix_inter_tick (rtx_insn
*, rtx_insn
*);
887 static int fix_tick_ready (rtx_insn
*);
888 static void change_queue_index (rtx_insn
*, int);
890 /* The following functions are used to implement scheduling of data/control
891 speculative instructions. */
893 static void extend_h_i_d (void);
894 static void init_h_i_d (rtx_insn
*);
895 static int haifa_speculate_insn (rtx_insn
*, ds_t
, rtx
*);
896 static void generate_recovery_code (rtx_insn
*);
897 static void process_insn_forw_deps_be_in_spec (rtx
, rtx_insn
*, ds_t
);
898 static void begin_speculative_block (rtx_insn
*);
899 static void add_to_speculative_block (rtx_insn
*);
900 static void init_before_recovery (basic_block
*);
901 static void create_check_block_twin (rtx_insn
*, bool);
902 static void fix_recovery_deps (basic_block
);
903 static bool haifa_change_pattern (rtx_insn
*, rtx
);
904 static void dump_new_block_header (int, basic_block
, rtx_insn
*, rtx_insn
*);
905 static void restore_bb_notes (basic_block
);
906 static void fix_jump_move (rtx_insn
*);
907 static void move_block_after_check (rtx_insn
*);
908 static void move_succs (vec
<edge
, va_gc
> **, basic_block
);
909 static void sched_remove_insn (rtx_insn
*);
910 static void clear_priorities (rtx_insn
*, rtx_vec_t
*);
911 static void calc_priorities (rtx_vec_t
);
912 static void add_jump_dependencies (rtx_insn
*, rtx_insn
*);
914 #endif /* INSN_SCHEDULING */
916 /* Point to state used for the current scheduling pass. */
917 struct haifa_sched_info
*current_sched_info
;
919 #ifndef INSN_SCHEDULING
921 schedule_insns (void)
926 /* Do register pressure sensitive insn scheduling if the flag is set
928 enum sched_pressure_algorithm sched_pressure
;
930 /* Map regno -> its pressure class. The map defined only when
931 SCHED_PRESSURE != SCHED_PRESSURE_NONE. */
932 enum reg_class
*sched_regno_pressure_class
;
934 /* The current register pressure. Only elements corresponding pressure
935 classes are defined. */
936 static int curr_reg_pressure
[N_REG_CLASSES
];
938 /* Saved value of the previous array. */
939 static int saved_reg_pressure
[N_REG_CLASSES
];
941 /* Register living at given scheduling point. */
942 static bitmap curr_reg_live
;
944 /* Saved value of the previous array. */
945 static bitmap saved_reg_live
;
947 /* Registers mentioned in the current region. */
948 static bitmap region_ref_regs
;
950 /* Effective number of available registers of a given class (see comment
951 in sched_pressure_start_bb). */
952 static int sched_class_regs_num
[N_REG_CLASSES
];
953 /* Number of call_used_regs. This is a helper for calculating of
954 sched_class_regs_num. */
955 static int call_used_regs_num
[N_REG_CLASSES
];
957 /* Initiate register pressure relative info for scheduling the current
958 region. Currently it is only clearing register mentioned in the
961 sched_init_region_reg_pressure_info (void)
963 bitmap_clear (region_ref_regs
);
966 /* PRESSURE[CL] describes the pressure on register class CL. Update it
967 for the birth (if BIRTH_P) or death (if !BIRTH_P) of register REGNO.
968 LIVE tracks the set of live registers; if it is null, assume that
969 every birth or death is genuine. */
971 mark_regno_birth_or_death (bitmap live
, int *pressure
, int regno
, bool birth_p
)
973 enum reg_class pressure_class
;
975 pressure_class
= sched_regno_pressure_class
[regno
];
976 if (regno
>= FIRST_PSEUDO_REGISTER
)
978 if (pressure_class
!= NO_REGS
)
982 if (!live
|| bitmap_set_bit (live
, regno
))
983 pressure
[pressure_class
]
984 += (ira_reg_class_max_nregs
985 [pressure_class
][PSEUDO_REGNO_MODE (regno
)]);
989 if (!live
|| bitmap_clear_bit (live
, regno
))
990 pressure
[pressure_class
]
991 -= (ira_reg_class_max_nregs
992 [pressure_class
][PSEUDO_REGNO_MODE (regno
)]);
996 else if (pressure_class
!= NO_REGS
997 && ! TEST_HARD_REG_BIT (ira_no_alloc_regs
, regno
))
1001 if (!live
|| bitmap_set_bit (live
, regno
))
1002 pressure
[pressure_class
]++;
1006 if (!live
|| bitmap_clear_bit (live
, regno
))
1007 pressure
[pressure_class
]--;
1012 /* Initiate current register pressure related info from living
1013 registers given by LIVE. */
1015 initiate_reg_pressure_info (bitmap live
)
1021 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
1022 curr_reg_pressure
[ira_pressure_classes
[i
]] = 0;
1023 bitmap_clear (curr_reg_live
);
1024 EXECUTE_IF_SET_IN_BITMAP (live
, 0, j
, bi
)
1025 if (sched_pressure
== SCHED_PRESSURE_MODEL
1026 || current_nr_blocks
== 1
1027 || bitmap_bit_p (region_ref_regs
, j
))
1028 mark_regno_birth_or_death (curr_reg_live
, curr_reg_pressure
, j
, true);
1031 /* Mark registers in X as mentioned in the current region. */
1033 setup_ref_regs (rtx x
)
1036 const RTX_CODE code
= GET_CODE (x
);
1042 if (HARD_REGISTER_NUM_P (regno
))
1043 bitmap_set_range (region_ref_regs
, regno
,
1044 hard_regno_nregs
[regno
][GET_MODE (x
)]);
1046 bitmap_set_bit (region_ref_regs
, REGNO (x
));
1049 fmt
= GET_RTX_FORMAT (code
);
1050 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1052 setup_ref_regs (XEXP (x
, i
));
1053 else if (fmt
[i
] == 'E')
1055 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
1056 setup_ref_regs (XVECEXP (x
, i
, j
));
1060 /* Initiate current register pressure related info at the start of
1063 initiate_bb_reg_pressure_info (basic_block bb
)
1065 unsigned int i ATTRIBUTE_UNUSED
;
1068 if (current_nr_blocks
> 1)
1069 FOR_BB_INSNS (bb
, insn
)
1070 if (NONDEBUG_INSN_P (insn
))
1071 setup_ref_regs (PATTERN (insn
));
1072 initiate_reg_pressure_info (df_get_live_in (bb
));
1073 #ifdef EH_RETURN_DATA_REGNO
1074 if (bb_has_eh_pred (bb
))
1077 unsigned int regno
= EH_RETURN_DATA_REGNO (i
);
1079 if (regno
== INVALID_REGNUM
)
1081 if (! bitmap_bit_p (df_get_live_in (bb
), regno
))
1082 mark_regno_birth_or_death (curr_reg_live
, curr_reg_pressure
,
1088 /* Save current register pressure related info. */
1090 save_reg_pressure (void)
1094 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
1095 saved_reg_pressure
[ira_pressure_classes
[i
]]
1096 = curr_reg_pressure
[ira_pressure_classes
[i
]];
1097 bitmap_copy (saved_reg_live
, curr_reg_live
);
1100 /* Restore saved register pressure related info. */
1102 restore_reg_pressure (void)
1106 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
1107 curr_reg_pressure
[ira_pressure_classes
[i
]]
1108 = saved_reg_pressure
[ira_pressure_classes
[i
]];
1109 bitmap_copy (curr_reg_live
, saved_reg_live
);
1112 /* Return TRUE if the register is dying after its USE. */
1114 dying_use_p (struct reg_use_data
*use
)
1116 struct reg_use_data
*next
;
1118 for (next
= use
->next_regno_use
; next
!= use
; next
= next
->next_regno_use
)
1119 if (NONDEBUG_INSN_P (next
->insn
)
1120 && QUEUE_INDEX (next
->insn
) != QUEUE_SCHEDULED
)
1125 /* Print info about the current register pressure and its excess for
1126 each pressure class. */
1128 print_curr_reg_pressure (void)
1133 fprintf (sched_dump
, ";;\t");
1134 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
1136 cl
= ira_pressure_classes
[i
];
1137 gcc_assert (curr_reg_pressure
[cl
] >= 0);
1138 fprintf (sched_dump
, " %s:%d(%d)", reg_class_names
[cl
],
1139 curr_reg_pressure
[cl
],
1140 curr_reg_pressure
[cl
] - sched_class_regs_num
[cl
]);
1142 fprintf (sched_dump
, "\n");
1145 /* Determine if INSN has a condition that is clobbered if a register
1146 in SET_REGS is modified. */
1148 cond_clobbered_p (rtx_insn
*insn
, HARD_REG_SET set_regs
)
1150 rtx pat
= PATTERN (insn
);
1151 gcc_assert (GET_CODE (pat
) == COND_EXEC
);
1152 if (TEST_HARD_REG_BIT (set_regs
, REGNO (XEXP (COND_EXEC_TEST (pat
), 0))))
1154 sd_iterator_def sd_it
;
1156 haifa_change_pattern (insn
, ORIG_PAT (insn
));
1157 FOR_EACH_DEP (insn
, SD_LIST_BACK
, sd_it
, dep
)
1158 DEP_STATUS (dep
) &= ~DEP_CANCELLED
;
1159 TODO_SPEC (insn
) = HARD_DEP
;
1160 if (sched_verbose
>= 2)
1161 fprintf (sched_dump
,
1162 ";;\t\tdequeue insn %s because of clobbered condition\n",
1163 (*current_sched_info
->print_insn
) (insn
, 0));
1170 /* This function should be called after modifying the pattern of INSN,
1171 to update scheduler data structures as needed. */
1173 update_insn_after_change (rtx_insn
*insn
)
1175 sd_iterator_def sd_it
;
1178 dfa_clear_single_insn_cache (insn
);
1180 sd_it
= sd_iterator_start (insn
,
1181 SD_LIST_FORW
| SD_LIST_BACK
| SD_LIST_RES_BACK
);
1182 while (sd_iterator_cond (&sd_it
, &dep
))
1184 DEP_COST (dep
) = UNKNOWN_DEP_COST
;
1185 sd_iterator_next (&sd_it
);
1188 /* Invalidate INSN_COST, so it'll be recalculated. */
1189 INSN_COST (insn
) = -1;
1190 /* Invalidate INSN_TICK, so it'll be recalculated. */
1191 INSN_TICK (insn
) = INVALID_TICK
;
1193 /* Invalidate autoprefetch data entry. */
1194 INSN_AUTOPREF_MULTIPASS_DATA (insn
)[0].status
1195 = AUTOPREF_MULTIPASS_DATA_UNINITIALIZED
;
1196 INSN_AUTOPREF_MULTIPASS_DATA (insn
)[1].status
1197 = AUTOPREF_MULTIPASS_DATA_UNINITIALIZED
;
1201 /* Two VECs, one to hold dependencies for which pattern replacements
1202 need to be applied or restored at the start of the next cycle, and
1203 another to hold an integer that is either one, to apply the
1204 corresponding replacement, or zero to restore it. */
1205 static vec
<dep_t
> next_cycle_replace_deps
;
1206 static vec
<int> next_cycle_apply
;
1208 static void apply_replacement (dep_t
, bool);
1209 static void restore_pattern (dep_t
, bool);
1211 /* Look at the remaining dependencies for insn NEXT, and compute and return
1212 the TODO_SPEC value we should use for it. This is called after one of
1213 NEXT's dependencies has been resolved.
1214 We also perform pattern replacements for predication, and for broken
1215 replacement dependencies. The latter is only done if FOR_BACKTRACK is
1219 recompute_todo_spec (rtx_insn
*next
, bool for_backtrack
)
1222 sd_iterator_def sd_it
;
1223 dep_t dep
, modify_dep
= NULL
;
1227 bool first_p
= true;
1229 if (sd_lists_empty_p (next
, SD_LIST_BACK
))
1230 /* NEXT has all its dependencies resolved. */
1233 if (!sd_lists_empty_p (next
, SD_LIST_HARD_BACK
))
1236 /* If NEXT is intended to sit adjacent to this instruction, we don't
1237 want to try to break any dependencies. Treat it as a HARD_DEP. */
1238 if (SCHED_GROUP_P (next
))
1241 /* Now we've got NEXT with speculative deps only.
1242 1. Look at the deps to see what we have to do.
1243 2. Check if we can do 'todo'. */
1246 FOR_EACH_DEP (next
, SD_LIST_BACK
, sd_it
, dep
)
1248 rtx_insn
*pro
= DEP_PRO (dep
);
1249 ds_t ds
= DEP_STATUS (dep
) & SPECULATIVE
;
1251 if (DEBUG_INSN_P (pro
) && !DEBUG_INSN_P (next
))
1264 new_ds
= ds_merge (new_ds
, ds
);
1266 else if (DEP_TYPE (dep
) == REG_DEP_CONTROL
)
1268 if (QUEUE_INDEX (pro
) != QUEUE_SCHEDULED
)
1273 DEP_STATUS (dep
) &= ~DEP_CANCELLED
;
1275 else if (DEP_REPLACE (dep
) != NULL
)
1277 if (QUEUE_INDEX (pro
) != QUEUE_SCHEDULED
)
1282 DEP_STATUS (dep
) &= ~DEP_CANCELLED
;
1286 if (n_replace
> 0 && n_control
== 0 && n_spec
== 0)
1288 if (!dbg_cnt (sched_breakdep
))
1290 FOR_EACH_DEP (next
, SD_LIST_BACK
, sd_it
, dep
)
1292 struct dep_replacement
*desc
= DEP_REPLACE (dep
);
1295 if (desc
->insn
== next
&& !for_backtrack
)
1297 gcc_assert (n_replace
== 1);
1298 apply_replacement (dep
, true);
1300 DEP_STATUS (dep
) |= DEP_CANCELLED
;
1306 else if (n_control
== 1 && n_replace
== 0 && n_spec
== 0)
1308 rtx_insn
*pro
, *other
;
1310 rtx cond
= NULL_RTX
;
1312 rtx_insn
*prev
= NULL
;
1316 if ((current_sched_info
->flags
& DO_PREDICATION
) == 0
1317 || (ORIG_PAT (next
) != NULL_RTX
1318 && PREDICATED_PAT (next
) == NULL_RTX
))
1321 pro
= DEP_PRO (modify_dep
);
1322 other
= real_insn_for_shadow (pro
);
1323 if (other
!= NULL_RTX
)
1326 cond
= sched_get_reverse_condition_uncached (pro
);
1327 regno
= REGNO (XEXP (cond
, 0));
1329 /* Find the last scheduled insn that modifies the condition register.
1330 We can stop looking once we find the insn we depend on through the
1331 REG_DEP_CONTROL; if the condition register isn't modified after it,
1332 we know that it still has the right value. */
1333 if (QUEUE_INDEX (pro
) == QUEUE_SCHEDULED
)
1334 FOR_EACH_VEC_ELT_REVERSE (scheduled_insns
, i
, prev
)
1338 find_all_hard_reg_sets (prev
, &t
, true);
1339 if (TEST_HARD_REG_BIT (t
, regno
))
1344 if (ORIG_PAT (next
) == NULL_RTX
)
1346 ORIG_PAT (next
) = PATTERN (next
);
1348 new_pat
= gen_rtx_COND_EXEC (VOIDmode
, cond
, PATTERN (next
));
1349 success
= haifa_change_pattern (next
, new_pat
);
1352 PREDICATED_PAT (next
) = new_pat
;
1354 else if (PATTERN (next
) != PREDICATED_PAT (next
))
1356 bool success
= haifa_change_pattern (next
,
1357 PREDICATED_PAT (next
));
1358 gcc_assert (success
);
1360 DEP_STATUS (modify_dep
) |= DEP_CANCELLED
;
1364 if (PREDICATED_PAT (next
) != NULL_RTX
)
1366 int tick
= INSN_TICK (next
);
1367 bool success
= haifa_change_pattern (next
,
1369 INSN_TICK (next
) = tick
;
1370 gcc_assert (success
);
1373 /* We can't handle the case where there are both speculative and control
1374 dependencies, so we return HARD_DEP in such a case. Also fail if
1375 we have speculative dependencies with not enough points, or more than
1376 one control dependency. */
1377 if ((n_spec
> 0 && (n_control
> 0 || n_replace
> 0))
1379 /* Too few points? */
1380 && ds_weak (new_ds
) < spec_info
->data_weakness_cutoff
)
1388 /* Pointer to the last instruction scheduled. */
1389 static rtx_insn
*last_scheduled_insn
;
1391 /* Pointer to the last nondebug instruction scheduled within the
1392 block, or the prev_head of the scheduling block. Used by
1393 rank_for_schedule, so that insns independent of the last scheduled
1394 insn will be preferred over dependent instructions. */
1395 static rtx last_nondebug_scheduled_insn
;
1397 /* Pointer that iterates through the list of unscheduled insns if we
1398 have a dbg_cnt enabled. It always points at an insn prior to the
1399 first unscheduled one. */
1400 static rtx_insn
*nonscheduled_insns_begin
;
1402 /* Compute cost of executing INSN.
1403 This is the number of cycles between instruction issue and
1404 instruction results. */
1406 insn_cost (rtx_insn
*insn
)
1415 if (recog_memoized (insn
) < 0)
1418 cost
= insn_default_latency (insn
);
1425 cost
= INSN_COST (insn
);
1429 /* A USE insn, or something else we don't need to
1430 understand. We can't pass these directly to
1431 result_ready_cost or insn_default_latency because it will
1432 trigger a fatal error for unrecognizable insns. */
1433 if (recog_memoized (insn
) < 0)
1435 INSN_COST (insn
) = 0;
1440 cost
= insn_default_latency (insn
);
1444 INSN_COST (insn
) = cost
;
1451 /* Compute cost of dependence LINK.
1452 This is the number of cycles between instruction issue and
1453 instruction results.
1454 ??? We also use this function to call recog_memoized on all insns. */
1456 dep_cost_1 (dep_t link
, dw_t dw
)
1458 rtx_insn
*insn
= DEP_PRO (link
);
1459 rtx_insn
*used
= DEP_CON (link
);
1462 if (DEP_COST (link
) != UNKNOWN_DEP_COST
)
1463 return DEP_COST (link
);
1467 struct delay_pair
*delay_entry
;
1469 = delay_htab_i2
->find_with_hash (used
, htab_hash_pointer (used
));
1472 if (delay_entry
->i1
== insn
)
1474 DEP_COST (link
) = pair_delay (delay_entry
);
1475 return DEP_COST (link
);
1480 /* A USE insn should never require the value used to be computed.
1481 This allows the computation of a function's result and parameter
1482 values to overlap the return and call. We don't care about the
1483 dependence cost when only decreasing register pressure. */
1484 if (recog_memoized (used
) < 0)
1487 recog_memoized (insn
);
1491 enum reg_note dep_type
= DEP_TYPE (link
);
1493 cost
= insn_cost (insn
);
1495 if (INSN_CODE (insn
) >= 0)
1497 if (dep_type
== REG_DEP_ANTI
)
1499 else if (dep_type
== REG_DEP_OUTPUT
)
1501 cost
= (insn_default_latency (insn
)
1502 - insn_default_latency (used
));
1506 else if (bypass_p (insn
))
1507 cost
= insn_latency (insn
, used
);
1511 if (targetm
.sched
.adjust_cost_2
)
1512 cost
= targetm
.sched
.adjust_cost_2 (used
, (int) dep_type
, insn
, cost
,
1514 else if (targetm
.sched
.adjust_cost
!= NULL
)
1516 /* This variable is used for backward compatibility with the
1518 rtx_insn_list
*dep_cost_rtx_link
=
1519 alloc_INSN_LIST (NULL_RTX
, NULL
);
1521 /* Make it self-cycled, so that if some tries to walk over this
1522 incomplete list he/she will be caught in an endless loop. */
1523 XEXP (dep_cost_rtx_link
, 1) = dep_cost_rtx_link
;
1525 /* Targets use only REG_NOTE_KIND of the link. */
1526 PUT_REG_NOTE_KIND (dep_cost_rtx_link
, DEP_TYPE (link
));
1528 cost
= targetm
.sched
.adjust_cost (used
, dep_cost_rtx_link
,
1531 free_INSN_LIST_node (dep_cost_rtx_link
);
1538 DEP_COST (link
) = cost
;
1542 /* Compute cost of dependence LINK.
1543 This is the number of cycles between instruction issue and
1544 instruction results. */
1546 dep_cost (dep_t link
)
1548 return dep_cost_1 (link
, 0);
1551 /* Use this sel-sched.c friendly function in reorder2 instead of increasing
1552 INSN_PRIORITY explicitly. */
1554 increase_insn_priority (rtx_insn
*insn
, int amount
)
1556 if (!sel_sched_p ())
1558 /* We're dealing with haifa-sched.c INSN_PRIORITY. */
1559 if (INSN_PRIORITY_KNOWN (insn
))
1560 INSN_PRIORITY (insn
) += amount
;
1564 /* In sel-sched.c INSN_PRIORITY is not kept up to date.
1565 Use EXPR_PRIORITY instead. */
1566 sel_add_to_insn_priority (insn
, amount
);
1570 /* Return 'true' if DEP should be included in priority calculations. */
1572 contributes_to_priority_p (dep_t dep
)
1574 if (DEBUG_INSN_P (DEP_CON (dep
))
1575 || DEBUG_INSN_P (DEP_PRO (dep
)))
1578 /* Critical path is meaningful in block boundaries only. */
1579 if (!current_sched_info
->contributes_to_priority (DEP_CON (dep
),
1583 if (DEP_REPLACE (dep
) != NULL
)
1586 /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
1587 then speculative instructions will less likely be
1588 scheduled. That is because the priority of
1589 their producers will increase, and, thus, the
1590 producers will more likely be scheduled, thus,
1591 resolving the dependence. */
1592 if (sched_deps_info
->generate_spec_deps
1593 && !(spec_info
->flags
& COUNT_SPEC_IN_CRITICAL_PATH
)
1594 && (DEP_STATUS (dep
) & SPECULATIVE
))
1600 /* Compute the number of nondebug deps in list LIST for INSN. */
1603 dep_list_size (rtx insn
, sd_list_types_def list
)
1605 sd_iterator_def sd_it
;
1607 int dbgcount
= 0, nodbgcount
= 0;
1609 if (!MAY_HAVE_DEBUG_INSNS
)
1610 return sd_lists_size (insn
, list
);
1612 FOR_EACH_DEP (insn
, list
, sd_it
, dep
)
1614 if (DEBUG_INSN_P (DEP_CON (dep
)))
1616 else if (!DEBUG_INSN_P (DEP_PRO (dep
)))
1620 gcc_assert (dbgcount
+ nodbgcount
== sd_lists_size (insn
, list
));
1627 /* Compute the priority number for INSN. */
1629 priority (rtx_insn
*insn
)
1631 if (! INSN_P (insn
))
1634 /* We should not be interested in priority of an already scheduled insn. */
1635 gcc_assert (QUEUE_INDEX (insn
) != QUEUE_SCHEDULED
);
1637 if (!INSN_PRIORITY_KNOWN (insn
))
1639 int this_priority
= -1;
1643 int this_fusion_priority
;
1645 targetm
.sched
.fusion_priority (insn
, FUSION_MAX_PRIORITY
,
1646 &this_fusion_priority
, &this_priority
);
1647 INSN_FUSION_PRIORITY (insn
) = this_fusion_priority
;
1649 else if (dep_list_size (insn
, SD_LIST_FORW
) == 0)
1650 /* ??? We should set INSN_PRIORITY to insn_cost when and insn has
1651 some forward deps but all of them are ignored by
1652 contributes_to_priority hook. At the moment we set priority of
1654 this_priority
= insn_cost (insn
);
1657 rtx_insn
*prev_first
, *twin
;
1660 /* For recovery check instructions we calculate priority slightly
1661 different than that of normal instructions. Instead of walking
1662 through INSN_FORW_DEPS (check) list, we walk through
1663 INSN_FORW_DEPS list of each instruction in the corresponding
1666 /* Selective scheduling does not define RECOVERY_BLOCK macro. */
1667 rec
= sel_sched_p () ? NULL
: RECOVERY_BLOCK (insn
);
1668 if (!rec
|| rec
== EXIT_BLOCK_PTR_FOR_FN (cfun
))
1670 prev_first
= PREV_INSN (insn
);
1675 prev_first
= NEXT_INSN (BB_HEAD (rec
));
1676 twin
= PREV_INSN (BB_END (rec
));
1681 sd_iterator_def sd_it
;
1684 FOR_EACH_DEP (twin
, SD_LIST_FORW
, sd_it
, dep
)
1689 next
= DEP_CON (dep
);
1691 if (BLOCK_FOR_INSN (next
) != rec
)
1695 if (!contributes_to_priority_p (dep
))
1699 cost
= dep_cost (dep
);
1702 struct _dep _dep1
, *dep1
= &_dep1
;
1704 init_dep (dep1
, insn
, next
, REG_DEP_ANTI
);
1706 cost
= dep_cost (dep1
);
1709 next_priority
= cost
+ priority (next
);
1711 if (next_priority
> this_priority
)
1712 this_priority
= next_priority
;
1716 twin
= PREV_INSN (twin
);
1718 while (twin
!= prev_first
);
1721 if (this_priority
< 0)
1723 gcc_assert (this_priority
== -1);
1725 this_priority
= insn_cost (insn
);
1728 INSN_PRIORITY (insn
) = this_priority
;
1729 INSN_PRIORITY_STATUS (insn
) = 1;
1732 return INSN_PRIORITY (insn
);
1735 /* Macros and functions for keeping the priority queue sorted, and
1736 dealing with queuing and dequeuing of instructions. */
1738 /* For each pressure class CL, set DEATH[CL] to the number of registers
1739 in that class that die in INSN. */
1742 calculate_reg_deaths (rtx_insn
*insn
, int *death
)
1745 struct reg_use_data
*use
;
1747 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
1748 death
[ira_pressure_classes
[i
]] = 0;
1749 for (use
= INSN_REG_USE_LIST (insn
); use
!= NULL
; use
= use
->next_insn_use
)
1750 if (dying_use_p (use
))
1751 mark_regno_birth_or_death (0, death
, use
->regno
, true);
1754 /* Setup info about the current register pressure impact of scheduling
1755 INSN at the current scheduling point. */
1757 setup_insn_reg_pressure_info (rtx_insn
*insn
)
1759 int i
, change
, before
, after
, hard_regno
;
1760 int excess_cost_change
;
1763 struct reg_pressure_data
*pressure_info
;
1764 int *max_reg_pressure
;
1765 static int death
[N_REG_CLASSES
];
1767 gcc_checking_assert (!DEBUG_INSN_P (insn
));
1769 excess_cost_change
= 0;
1770 calculate_reg_deaths (insn
, death
);
1771 pressure_info
= INSN_REG_PRESSURE (insn
);
1772 max_reg_pressure
= INSN_MAX_REG_PRESSURE (insn
);
1773 gcc_assert (pressure_info
!= NULL
&& max_reg_pressure
!= NULL
);
1774 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
1776 cl
= ira_pressure_classes
[i
];
1777 gcc_assert (curr_reg_pressure
[cl
] >= 0);
1778 change
= (int) pressure_info
[i
].set_increase
- death
[cl
];
1779 before
= MAX (0, max_reg_pressure
[i
] - sched_class_regs_num
[cl
]);
1780 after
= MAX (0, max_reg_pressure
[i
] + change
1781 - sched_class_regs_num
[cl
]);
1782 hard_regno
= ira_class_hard_regs
[cl
][0];
1783 gcc_assert (hard_regno
>= 0);
1784 mode
= reg_raw_mode
[hard_regno
];
1785 excess_cost_change
+= ((after
- before
)
1786 * (ira_memory_move_cost
[mode
][cl
][0]
1787 + ira_memory_move_cost
[mode
][cl
][1]));
1789 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insn
) = excess_cost_change
;
1792 /* This is the first page of code related to SCHED_PRESSURE_MODEL.
1793 It tries to make the scheduler take register pressure into account
1794 without introducing too many unnecessary stalls. It hooks into the
1795 main scheduling algorithm at several points:
1797 - Before scheduling starts, model_start_schedule constructs a
1798 "model schedule" for the current block. This model schedule is
1799 chosen solely to keep register pressure down. It does not take the
1800 target's pipeline or the original instruction order into account,
1801 except as a tie-breaker. It also doesn't work to a particular
1804 This model schedule gives us an idea of what pressure can be
1805 achieved for the block and gives us an example of a schedule that
1806 keeps to that pressure. It also makes the final schedule less
1807 dependent on the original instruction order. This is important
1808 because the original order can either be "wide" (many values live
1809 at once, such as in user-scheduled code) or "narrow" (few values
1810 live at once, such as after loop unrolling, where several
1811 iterations are executed sequentially).
1813 We do not apply this model schedule to the rtx stream. We simply
1814 record it in model_schedule. We also compute the maximum pressure,
1815 MP, that was seen during this schedule.
1817 - Instructions are added to the ready queue even if they require
1818 a stall. The length of the stall is instead computed as:
1820 MAX (INSN_TICK (INSN) - clock_var, 0)
1822 (= insn_delay). This allows rank_for_schedule to choose between
1823 introducing a deliberate stall or increasing pressure.
1825 - Before sorting the ready queue, model_set_excess_costs assigns
1826 a pressure-based cost to each ready instruction in the queue.
1827 This is the instruction's INSN_REG_PRESSURE_EXCESS_COST_CHANGE
1828 (ECC for short) and is effectively measured in cycles.
1830 - rank_for_schedule ranks instructions based on:
1832 ECC (insn) + insn_delay (insn)
1838 So, for example, an instruction X1 with an ECC of 1 that can issue
1839 now will win over an instruction X0 with an ECC of zero that would
1840 introduce a stall of one cycle. However, an instruction X2 with an
1841 ECC of 2 that can issue now will lose to both X0 and X1.
1843 - When an instruction is scheduled, model_recompute updates the model
1844 schedule with the new pressures (some of which might now exceed the
1845 original maximum pressure MP). model_update_limit_points then searches
1846 for the new point of maximum pressure, if not already known. */
1848 /* Used to separate high-verbosity debug information for SCHED_PRESSURE_MODEL
1849 from surrounding debug information. */
1851 ";;\t\t+------------------------------------------------------\n"
1853 /* Information about the pressure on a particular register class at a
1854 particular point of the model schedule. */
1855 struct model_pressure_data
{
1856 /* The pressure at this point of the model schedule, or -1 if the
1857 point is associated with an instruction that has already been
1861 /* The maximum pressure during or after this point of the model schedule. */
1865 /* Per-instruction information that is used while building the model
1866 schedule. Here, "schedule" refers to the model schedule rather
1867 than the main schedule. */
1868 struct model_insn_info
{
1869 /* The instruction itself. */
1872 /* If this instruction is in model_worklist, these fields link to the
1873 previous (higher-priority) and next (lower-priority) instructions
1875 struct model_insn_info
*prev
;
1876 struct model_insn_info
*next
;
1878 /* While constructing the schedule, QUEUE_INDEX describes whether an
1879 instruction has already been added to the schedule (QUEUE_SCHEDULED),
1880 is in model_worklist (QUEUE_READY), or neither (QUEUE_NOWHERE).
1881 old_queue records the value that QUEUE_INDEX had before scheduling
1882 started, so that we can restore it once the schedule is complete. */
1885 /* The relative importance of an unscheduled instruction. Higher
1886 values indicate greater importance. */
1887 unsigned int model_priority
;
1889 /* The length of the longest path of satisfied true dependencies
1890 that leads to this instruction. */
1893 /* The length of the longest path of dependencies of any kind
1894 that leads from this instruction. */
1897 /* The number of predecessor nodes that must still be scheduled. */
1898 int unscheduled_preds
;
1901 /* Information about the pressure limit for a particular register class.
1902 This structure is used when applying a model schedule to the main
1904 struct model_pressure_limit
{
1905 /* The maximum register pressure seen in the original model schedule. */
1908 /* The maximum register pressure seen in the current model schedule
1909 (which excludes instructions that have already been scheduled). */
1912 /* The point of the current model schedule at which PRESSURE is first
1913 reached. It is set to -1 if the value needs to be recomputed. */
1917 /* Describes a particular way of measuring register pressure. */
1918 struct model_pressure_group
{
1919 /* Index PCI describes the maximum pressure on ira_pressure_classes[PCI]. */
1920 struct model_pressure_limit limits
[N_REG_CLASSES
];
1922 /* Index (POINT * ira_num_pressure_classes + PCI) describes the pressure
1923 on register class ira_pressure_classes[PCI] at point POINT of the
1924 current model schedule. A POINT of model_num_insns describes the
1925 pressure at the end of the schedule. */
1926 struct model_pressure_data
*model
;
1929 /* Index POINT gives the instruction at point POINT of the model schedule.
1930 This array doesn't change during main scheduling. */
1931 static vec
<rtx_insn
*> model_schedule
;
1933 /* The list of instructions in the model worklist, sorted in order of
1934 decreasing priority. */
1935 static struct model_insn_info
*model_worklist
;
1937 /* Index I describes the instruction with INSN_LUID I. */
1938 static struct model_insn_info
*model_insns
;
1940 /* The number of instructions in the model schedule. */
1941 static int model_num_insns
;
1943 /* The index of the first instruction in model_schedule that hasn't yet been
1944 added to the main schedule, or model_num_insns if all of them have. */
1945 static int model_curr_point
;
1947 /* Describes the pressure before each instruction in the model schedule. */
1948 static struct model_pressure_group model_before_pressure
;
1950 /* The first unused model_priority value (as used in model_insn_info). */
1951 static unsigned int model_next_priority
;
1954 /* The model_pressure_data for ira_pressure_classes[PCI] in GROUP
1955 at point POINT of the model schedule. */
1956 #define MODEL_PRESSURE_DATA(GROUP, POINT, PCI) \
1957 (&(GROUP)->model[(POINT) * ira_pressure_classes_num + (PCI)])
1959 /* The maximum pressure on ira_pressure_classes[PCI] in GROUP at or
1960 after point POINT of the model schedule. */
1961 #define MODEL_MAX_PRESSURE(GROUP, POINT, PCI) \
1962 (MODEL_PRESSURE_DATA (GROUP, POINT, PCI)->max_pressure)
1964 /* The pressure on ira_pressure_classes[PCI] in GROUP at point POINT
1965 of the model schedule. */
1966 #define MODEL_REF_PRESSURE(GROUP, POINT, PCI) \
1967 (MODEL_PRESSURE_DATA (GROUP, POINT, PCI)->ref_pressure)
1969 /* Information about INSN that is used when creating the model schedule. */
1970 #define MODEL_INSN_INFO(INSN) \
1971 (&model_insns[INSN_LUID (INSN)])
1973 /* The instruction at point POINT of the model schedule. */
1974 #define MODEL_INSN(POINT) \
1975 (model_schedule[POINT])
1978 /* Return INSN's index in the model schedule, or model_num_insns if it
1979 doesn't belong to that schedule. */
1982 model_index (rtx_insn
*insn
)
1984 if (INSN_MODEL_INDEX (insn
) == 0)
1985 return model_num_insns
;
1986 return INSN_MODEL_INDEX (insn
) - 1;
1989 /* Make sure that GROUP->limits is up-to-date for the current point
1990 of the model schedule. */
1993 model_update_limit_points_in_group (struct model_pressure_group
*group
)
1995 int pci
, max_pressure
, point
;
1997 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
1999 /* We may have passed the final point at which the pressure in
2000 group->limits[pci].pressure was reached. Update the limit if so. */
2001 max_pressure
= MODEL_MAX_PRESSURE (group
, model_curr_point
, pci
);
2002 group
->limits
[pci
].pressure
= max_pressure
;
2004 /* Find the point at which MAX_PRESSURE is first reached. We need
2005 to search in three cases:
2007 - We've already moved past the previous pressure point.
2008 In this case we search forward from model_curr_point.
2010 - We scheduled the previous point of maximum pressure ahead of
2011 its position in the model schedule, but doing so didn't bring
2012 the pressure point earlier. In this case we search forward
2013 from that previous pressure point.
2015 - Scheduling an instruction early caused the maximum pressure
2016 to decrease. In this case we will have set the pressure
2017 point to -1, and we search forward from model_curr_point. */
2018 point
= MAX (group
->limits
[pci
].point
, model_curr_point
);
2019 while (point
< model_num_insns
2020 && MODEL_REF_PRESSURE (group
, point
, pci
) < max_pressure
)
2022 group
->limits
[pci
].point
= point
;
2024 gcc_assert (MODEL_REF_PRESSURE (group
, point
, pci
) == max_pressure
);
2025 gcc_assert (MODEL_MAX_PRESSURE (group
, point
, pci
) == max_pressure
);
2029 /* Make sure that all register-pressure limits are up-to-date for the
2030 current position in the model schedule. */
2033 model_update_limit_points (void)
2035 model_update_limit_points_in_group (&model_before_pressure
);
2038 /* Return the model_index of the last unscheduled use in chain USE
2039 outside of USE's instruction. Return -1 if there are no other uses,
2040 or model_num_insns if the register is live at the end of the block. */
2043 model_last_use_except (struct reg_use_data
*use
)
2045 struct reg_use_data
*next
;
2049 for (next
= use
->next_regno_use
; next
!= use
; next
= next
->next_regno_use
)
2050 if (NONDEBUG_INSN_P (next
->insn
)
2051 && QUEUE_INDEX (next
->insn
) != QUEUE_SCHEDULED
)
2053 index
= model_index (next
->insn
);
2054 if (index
== model_num_insns
)
2055 return model_num_insns
;
2062 /* An instruction with model_index POINT has just been scheduled, and it
2063 adds DELTA to the pressure on ira_pressure_classes[PCI] after POINT - 1.
2064 Update MODEL_REF_PRESSURE (GROUP, POINT, PCI) and
2065 MODEL_MAX_PRESSURE (GROUP, POINT, PCI) accordingly. */
2068 model_start_update_pressure (struct model_pressure_group
*group
,
2069 int point
, int pci
, int delta
)
2071 int next_max_pressure
;
2073 if (point
== model_num_insns
)
2075 /* The instruction wasn't part of the model schedule; it was moved
2076 from a different block. Update the pressure for the end of
2077 the model schedule. */
2078 MODEL_REF_PRESSURE (group
, point
, pci
) += delta
;
2079 MODEL_MAX_PRESSURE (group
, point
, pci
) += delta
;
2083 /* Record that this instruction has been scheduled. Nothing now
2084 changes between POINT and POINT + 1, so get the maximum pressure
2085 from the latter. If the maximum pressure decreases, the new
2086 pressure point may be before POINT. */
2087 MODEL_REF_PRESSURE (group
, point
, pci
) = -1;
2088 next_max_pressure
= MODEL_MAX_PRESSURE (group
, point
+ 1, pci
);
2089 if (MODEL_MAX_PRESSURE (group
, point
, pci
) > next_max_pressure
)
2091 MODEL_MAX_PRESSURE (group
, point
, pci
) = next_max_pressure
;
2092 if (group
->limits
[pci
].point
== point
)
2093 group
->limits
[pci
].point
= -1;
2098 /* Record that scheduling a later instruction has changed the pressure
2099 at point POINT of the model schedule by DELTA (which might be 0).
2100 Update GROUP accordingly. Return nonzero if these changes might
2101 trigger changes to previous points as well. */
2104 model_update_pressure (struct model_pressure_group
*group
,
2105 int point
, int pci
, int delta
)
2107 int ref_pressure
, max_pressure
, next_max_pressure
;
2109 /* If POINT hasn't yet been scheduled, update its pressure. */
2110 ref_pressure
= MODEL_REF_PRESSURE (group
, point
, pci
);
2111 if (ref_pressure
>= 0 && delta
!= 0)
2113 ref_pressure
+= delta
;
2114 MODEL_REF_PRESSURE (group
, point
, pci
) = ref_pressure
;
2116 /* Check whether the maximum pressure in the overall schedule
2117 has increased. (This means that the MODEL_MAX_PRESSURE of
2118 every point <= POINT will need to increase too; see below.) */
2119 if (group
->limits
[pci
].pressure
< ref_pressure
)
2120 group
->limits
[pci
].pressure
= ref_pressure
;
2122 /* If we are at maximum pressure, and the maximum pressure
2123 point was previously unknown or later than POINT,
2124 bring it forward. */
2125 if (group
->limits
[pci
].pressure
== ref_pressure
2126 && !IN_RANGE (group
->limits
[pci
].point
, 0, point
))
2127 group
->limits
[pci
].point
= point
;
2129 /* If POINT used to be the point of maximum pressure, but isn't
2130 any longer, we need to recalculate it using a forward walk. */
2131 if (group
->limits
[pci
].pressure
> ref_pressure
2132 && group
->limits
[pci
].point
== point
)
2133 group
->limits
[pci
].point
= -1;
2136 /* Update the maximum pressure at POINT. Changes here might also
2137 affect the maximum pressure at POINT - 1. */
2138 next_max_pressure
= MODEL_MAX_PRESSURE (group
, point
+ 1, pci
);
2139 max_pressure
= MAX (ref_pressure
, next_max_pressure
);
2140 if (MODEL_MAX_PRESSURE (group
, point
, pci
) != max_pressure
)
2142 MODEL_MAX_PRESSURE (group
, point
, pci
) = max_pressure
;
2148 /* INSN has just been scheduled. Update the model schedule accordingly. */
2151 model_recompute (rtx_insn
*insn
)
2156 } uses
[FIRST_PSEUDO_REGISTER
+ MAX_RECOG_OPERANDS
];
2157 struct reg_use_data
*use
;
2158 struct reg_pressure_data
*reg_pressure
;
2159 int delta
[N_REG_CLASSES
];
2160 int pci
, point
, mix
, new_last
, cl
, ref_pressure
, queue
;
2161 unsigned int i
, num_uses
, num_pending_births
;
2164 /* The destinations of INSN were previously live from POINT onwards, but are
2165 now live from model_curr_point onwards. Set up DELTA accordingly. */
2166 point
= model_index (insn
);
2167 reg_pressure
= INSN_REG_PRESSURE (insn
);
2168 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
2170 cl
= ira_pressure_classes
[pci
];
2171 delta
[cl
] = reg_pressure
[pci
].set_increase
;
2174 /* Record which registers previously died at POINT, but which now die
2175 before POINT. Adjust DELTA so that it represents the effect of
2176 this change after POINT - 1. Set NUM_PENDING_BIRTHS to the number of
2177 registers that will be born in the range [model_curr_point, POINT). */
2179 num_pending_births
= 0;
2180 for (use
= INSN_REG_USE_LIST (insn
); use
!= NULL
; use
= use
->next_insn_use
)
2182 new_last
= model_last_use_except (use
);
2183 if (new_last
< point
)
2185 gcc_assert (num_uses
< ARRAY_SIZE (uses
));
2186 uses
[num_uses
].last_use
= new_last
;
2187 uses
[num_uses
].regno
= use
->regno
;
2188 /* This register is no longer live after POINT - 1. */
2189 mark_regno_birth_or_death (NULL
, delta
, use
->regno
, false);
2192 num_pending_births
++;
2196 /* Update the MODEL_REF_PRESSURE and MODEL_MAX_PRESSURE for POINT.
2197 Also set each group pressure limit for POINT. */
2198 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
2200 cl
= ira_pressure_classes
[pci
];
2201 model_start_update_pressure (&model_before_pressure
,
2202 point
, pci
, delta
[cl
]);
2205 /* Walk the model schedule backwards, starting immediately before POINT. */
2207 if (point
!= model_curr_point
)
2211 insn
= MODEL_INSN (point
);
2212 queue
= QUEUE_INDEX (insn
);
2214 if (queue
!= QUEUE_SCHEDULED
)
2216 /* DELTA describes the effect of the move on the register pressure
2217 after POINT. Make it describe the effect on the pressure
2220 while (i
< num_uses
)
2222 if (uses
[i
].last_use
== point
)
2224 /* This register is now live again. */
2225 mark_regno_birth_or_death (NULL
, delta
,
2226 uses
[i
].regno
, true);
2228 /* Remove this use from the array. */
2229 uses
[i
] = uses
[num_uses
- 1];
2231 num_pending_births
--;
2237 if (sched_verbose
>= 5)
2241 fprintf (sched_dump
, MODEL_BAR
);
2242 fprintf (sched_dump
, ";;\t\t| New pressure for model"
2244 fprintf (sched_dump
, MODEL_BAR
);
2248 fprintf (sched_dump
, ";;\t\t| %3d %4d %-30s ",
2249 point
, INSN_UID (insn
),
2250 str_pattern_slim (PATTERN (insn
)));
2251 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
2253 cl
= ira_pressure_classes
[pci
];
2254 ref_pressure
= MODEL_REF_PRESSURE (&model_before_pressure
,
2256 fprintf (sched_dump
, " %s:[%d->%d]",
2257 reg_class_names
[ira_pressure_classes
[pci
]],
2258 ref_pressure
, ref_pressure
+ delta
[cl
]);
2260 fprintf (sched_dump
, "\n");
2264 /* Adjust the pressure at POINT. Set MIX to nonzero if POINT - 1
2265 might have changed as well. */
2266 mix
= num_pending_births
;
2267 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
2269 cl
= ira_pressure_classes
[pci
];
2271 mix
|= model_update_pressure (&model_before_pressure
,
2272 point
, pci
, delta
[cl
]);
2275 while (mix
&& point
> model_curr_point
);
2278 fprintf (sched_dump
, MODEL_BAR
);
2281 /* After DEP, which was cancelled, has been resolved for insn NEXT,
2282 check whether the insn's pattern needs restoring. */
2284 must_restore_pattern_p (rtx_insn
*next
, dep_t dep
)
2286 if (QUEUE_INDEX (next
) == QUEUE_SCHEDULED
)
2289 if (DEP_TYPE (dep
) == REG_DEP_CONTROL
)
2291 gcc_assert (ORIG_PAT (next
) != NULL_RTX
);
2292 gcc_assert (next
== DEP_CON (dep
));
2296 struct dep_replacement
*desc
= DEP_REPLACE (dep
);
2297 if (desc
->insn
!= next
)
2299 gcc_assert (*desc
->loc
== desc
->orig
);
2306 /* model_spill_cost (CL, P, P') returns the cost of increasing the
2307 pressure on CL from P to P'. We use this to calculate a "base ECC",
2308 baseECC (CL, X), for each pressure class CL and each instruction X.
2309 Supposing X changes the pressure on CL from P to P', and that the
2310 maximum pressure on CL in the current model schedule is MP', then:
2312 * if X occurs before or at the next point of maximum pressure in
2313 the model schedule and P' > MP', then:
2315 baseECC (CL, X) = model_spill_cost (CL, MP, P')
2317 The idea is that the pressure after scheduling a fixed set of
2318 instructions -- in this case, the set up to and including the
2319 next maximum pressure point -- is going to be the same regardless
2320 of the order; we simply want to keep the intermediate pressure
2321 under control. Thus X has a cost of zero unless scheduling it
2322 now would exceed MP'.
2324 If all increases in the set are by the same amount, no zero-cost
2325 instruction will ever cause the pressure to exceed MP'. However,
2326 if X is instead moved past an instruction X' with pressure in the
2327 range (MP' - (P' - P), MP'), the pressure at X' will increase
2328 beyond MP'. Since baseECC is very much a heuristic anyway,
2329 it doesn't seem worth the overhead of tracking cases like these.
2331 The cost of exceeding MP' is always based on the original maximum
2332 pressure MP. This is so that going 2 registers over the original
2333 limit has the same cost regardless of whether it comes from two
2334 separate +1 deltas or from a single +2 delta.
2336 * if X occurs after the next point of maximum pressure in the model
2337 schedule and P' > P, then:
2339 baseECC (CL, X) = model_spill_cost (CL, MP, MP' + (P' - P))
2341 That is, if we move X forward across a point of maximum pressure,
2342 and if X increases the pressure by P' - P, then we conservatively
2343 assume that scheduling X next would increase the maximum pressure
2344 by P' - P. Again, the cost of doing this is based on the original
2345 maximum pressure MP, for the same reason as above.
2347 * if P' < P, P > MP, and X occurs at or after the next point of
2348 maximum pressure, then:
2350 baseECC (CL, X) = -model_spill_cost (CL, MAX (MP, P'), P)
2352 That is, if we have already exceeded the original maximum pressure MP,
2353 and if X might reduce the maximum pressure again -- or at least push
2354 it further back, and thus allow more scheduling freedom -- it is given
2355 a negative cost to reflect the improvement.
2361 In this case, X is not expected to affect the maximum pressure MP',
2362 so it has zero cost.
2364 We then create a combined value baseECC (X) that is the sum of
2365 baseECC (CL, X) for each pressure class CL.
2367 baseECC (X) could itself be used as the ECC value described above.
2368 However, this is often too conservative, in the sense that it
2369 tends to make high-priority instructions that increase pressure
2370 wait too long in cases where introducing a spill would be better.
2371 For this reason the final ECC is a priority-adjusted form of
2372 baseECC (X). Specifically, we calculate:
2374 P (X) = INSN_PRIORITY (X) - insn_delay (X) - baseECC (X)
2375 baseP = MAX { P (X) | baseECC (X) <= 0 }
2379 ECC (X) = MAX (MIN (baseP - P (X), baseECC (X)), 0)
2381 Thus an instruction's effect on pressure is ignored if it has a high
2382 enough priority relative to the ones that don't increase pressure.
2383 Negative values of baseECC (X) do not increase the priority of X
2384 itself, but they do make it harder for other instructions to
2385 increase the pressure further.
2387 This pressure cost is deliberately timid. The intention has been
2388 to choose a heuristic that rarely interferes with the normal list
2389 scheduler in cases where that scheduler would produce good code.
2390 We simply want to curb some of its worst excesses. */
2392 /* Return the cost of increasing the pressure in class CL from FROM to TO.
2394 Here we use the very simplistic cost model that every register above
2395 sched_class_regs_num[CL] has a spill cost of 1. We could use other
2396 measures instead, such as one based on MEMORY_MOVE_COST. However:
2398 (1) In order for an instruction to be scheduled, the higher cost
2399 would need to be justified in a single saving of that many stalls.
2400 This is overly pessimistic, because the benefit of spilling is
2401 often to avoid a sequence of several short stalls rather than
2404 (2) The cost is still arbitrary. Because we are not allocating
2405 registers during scheduling, we have no way of knowing for
2406 sure how many memory accesses will be required by each spill,
2407 where the spills will be placed within the block, or even
2408 which block(s) will contain the spills.
2410 So a higher cost than 1 is often too conservative in practice,
2411 forcing blocks to contain unnecessary stalls instead of spill code.
2412 The simple cost below seems to be the best compromise. It reduces
2413 the interference with the normal list scheduler, which helps make
2414 it more suitable for a default-on option. */
2417 model_spill_cost (int cl
, int from
, int to
)
2419 from
= MAX (from
, sched_class_regs_num
[cl
]);
2420 return MAX (to
, from
) - from
;
2423 /* Return baseECC (ira_pressure_classes[PCI], POINT), given that
2424 P = curr_reg_pressure[ira_pressure_classes[PCI]] and that
2428 model_excess_group_cost (struct model_pressure_group
*group
,
2429 int point
, int pci
, int delta
)
2433 cl
= ira_pressure_classes
[pci
];
2434 if (delta
< 0 && point
>= group
->limits
[pci
].point
)
2436 pressure
= MAX (group
->limits
[pci
].orig_pressure
,
2437 curr_reg_pressure
[cl
] + delta
);
2438 return -model_spill_cost (cl
, pressure
, curr_reg_pressure
[cl
]);
2443 if (point
> group
->limits
[pci
].point
)
2444 pressure
= group
->limits
[pci
].pressure
+ delta
;
2446 pressure
= curr_reg_pressure
[cl
] + delta
;
2448 if (pressure
> group
->limits
[pci
].pressure
)
2449 return model_spill_cost (cl
, group
->limits
[pci
].orig_pressure
,
2456 /* Return baseECC (MODEL_INSN (INSN)). Dump the costs to sched_dump
2460 model_excess_cost (rtx_insn
*insn
, bool print_p
)
2462 int point
, pci
, cl
, cost
, this_cost
, delta
;
2463 struct reg_pressure_data
*insn_reg_pressure
;
2464 int insn_death
[N_REG_CLASSES
];
2466 calculate_reg_deaths (insn
, insn_death
);
2467 point
= model_index (insn
);
2468 insn_reg_pressure
= INSN_REG_PRESSURE (insn
);
2472 fprintf (sched_dump
, ";;\t\t| %3d %4d | %4d %+3d |", point
,
2473 INSN_UID (insn
), INSN_PRIORITY (insn
), insn_delay (insn
));
2475 /* Sum up the individual costs for each register class. */
2476 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
2478 cl
= ira_pressure_classes
[pci
];
2479 delta
= insn_reg_pressure
[pci
].set_increase
- insn_death
[cl
];
2480 this_cost
= model_excess_group_cost (&model_before_pressure
,
2484 fprintf (sched_dump
, " %s:[%d base cost %d]",
2485 reg_class_names
[cl
], delta
, this_cost
);
2489 fprintf (sched_dump
, "\n");
2494 /* Dump the next points of maximum pressure for GROUP. */
2497 model_dump_pressure_points (struct model_pressure_group
*group
)
2501 fprintf (sched_dump
, ";;\t\t| pressure points");
2502 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
2504 cl
= ira_pressure_classes
[pci
];
2505 fprintf (sched_dump
, " %s:[%d->%d at ", reg_class_names
[cl
],
2506 curr_reg_pressure
[cl
], group
->limits
[pci
].pressure
);
2507 if (group
->limits
[pci
].point
< model_num_insns
)
2508 fprintf (sched_dump
, "%d:%d]", group
->limits
[pci
].point
,
2509 INSN_UID (MODEL_INSN (group
->limits
[pci
].point
)));
2511 fprintf (sched_dump
, "end]");
2513 fprintf (sched_dump
, "\n");
2516 /* Set INSN_REG_PRESSURE_EXCESS_COST_CHANGE for INSNS[0...COUNT-1]. */
2519 model_set_excess_costs (rtx_insn
**insns
, int count
)
2521 int i
, cost
, priority_base
, priority
;
2524 /* Record the baseECC value for each instruction in the model schedule,
2525 except that negative costs are converted to zero ones now rather than
2526 later. Do not assign a cost to debug instructions, since they must
2527 not change code-generation decisions. Experiments suggest we also
2528 get better results by not assigning a cost to instructions from
2531 Set PRIORITY_BASE to baseP in the block comment above. This is the
2532 maximum priority of the "cheap" instructions, which should always
2533 include the next model instruction. */
2536 for (i
= 0; i
< count
; i
++)
2537 if (INSN_MODEL_INDEX (insns
[i
]))
2539 if (sched_verbose
>= 6 && !print_p
)
2541 fprintf (sched_dump
, MODEL_BAR
);
2542 fprintf (sched_dump
, ";;\t\t| Pressure costs for ready queue\n");
2543 model_dump_pressure_points (&model_before_pressure
);
2544 fprintf (sched_dump
, MODEL_BAR
);
2547 cost
= model_excess_cost (insns
[i
], print_p
);
2550 priority
= INSN_PRIORITY (insns
[i
]) - insn_delay (insns
[i
]) - cost
;
2551 priority_base
= MAX (priority_base
, priority
);
2554 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns
[i
]) = cost
;
2557 fprintf (sched_dump
, MODEL_BAR
);
2559 /* Use MAX (baseECC, 0) and baseP to calculcate ECC for each
2561 for (i
= 0; i
< count
; i
++)
2563 cost
= INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns
[i
]);
2564 priority
= INSN_PRIORITY (insns
[i
]) - insn_delay (insns
[i
]);
2565 if (cost
> 0 && priority
> priority_base
)
2567 cost
+= priority_base
- priority
;
2568 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns
[i
]) = MAX (cost
, 0);
2574 /* Enum of rank_for_schedule heuristic decisions. */
2576 RFS_LIVE_RANGE_SHRINK1
, RFS_LIVE_RANGE_SHRINK2
,
2577 RFS_SCHED_GROUP
, RFS_PRESSURE_DELAY
, RFS_PRESSURE_TICK
,
2578 RFS_FEEDS_BACKTRACK_INSN
, RFS_PRIORITY
, RFS_SPECULATION
,
2579 RFS_SCHED_RANK
, RFS_LAST_INSN
, RFS_PRESSURE_INDEX
,
2580 RFS_DEP_COUNT
, RFS_TIE
, RFS_FUSION
, RFS_N
};
2582 /* Corresponding strings for print outs. */
2583 static const char *rfs_str
[RFS_N
] = {
2584 "RFS_LIVE_RANGE_SHRINK1", "RFS_LIVE_RANGE_SHRINK2",
2585 "RFS_SCHED_GROUP", "RFS_PRESSURE_DELAY", "RFS_PRESSURE_TICK",
2586 "RFS_FEEDS_BACKTRACK_INSN", "RFS_PRIORITY", "RFS_SPECULATION",
2587 "RFS_SCHED_RANK", "RFS_LAST_INSN", "RFS_PRESSURE_INDEX",
2588 "RFS_DEP_COUNT", "RFS_TIE", "RFS_FUSION" };
2590 /* Statistical breakdown of rank_for_schedule decisions. */
2591 typedef struct { unsigned stats
[RFS_N
]; } rank_for_schedule_stats_t
;
2592 static rank_for_schedule_stats_t rank_for_schedule_stats
;
2594 /* Return the result of comparing insns TMP and TMP2 and update
2595 Rank_For_Schedule statistics. */
2597 rfs_result (enum rfs_decision decision
, int result
, rtx tmp
, rtx tmp2
)
2599 ++rank_for_schedule_stats
.stats
[decision
];
2601 INSN_LAST_RFS_WIN (tmp
) = decision
;
2602 else if (result
> 0)
2603 INSN_LAST_RFS_WIN (tmp2
) = decision
;
2609 /* Sorting predicate to move DEBUG_INSNs to the top of ready list, while
2610 keeping normal insns in original order. */
2613 rank_for_schedule_debug (const void *x
, const void *y
)
2615 rtx_insn
*tmp
= *(rtx_insn
* const *) y
;
2616 rtx_insn
*tmp2
= *(rtx_insn
* const *) x
;
2618 /* Schedule debug insns as early as possible. */
2619 if (DEBUG_INSN_P (tmp
) && !DEBUG_INSN_P (tmp2
))
2621 else if (!DEBUG_INSN_P (tmp
) && DEBUG_INSN_P (tmp2
))
2623 else if (DEBUG_INSN_P (tmp
) && DEBUG_INSN_P (tmp2
))
2624 return INSN_LUID (tmp
) - INSN_LUID (tmp2
);
2626 return INSN_RFS_DEBUG_ORIG_ORDER (tmp2
) - INSN_RFS_DEBUG_ORIG_ORDER (tmp
);
2629 /* Returns a positive value if x is preferred; returns a negative value if
2630 y is preferred. Should never return 0, since that will make the sort
2634 rank_for_schedule (const void *x
, const void *y
)
2636 rtx_insn
*tmp
= *(rtx_insn
* const *) y
;
2637 rtx_insn
*tmp2
= *(rtx_insn
* const *) x
;
2638 int tmp_class
, tmp2_class
;
2639 int val
, priority_val
, info_val
, diff
;
2641 if (live_range_shrinkage_p
)
2643 /* Don't use SCHED_PRESSURE_MODEL -- it results in much worse
2645 gcc_assert (sched_pressure
== SCHED_PRESSURE_WEIGHTED
);
2646 if ((INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp
) < 0
2647 || INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2
) < 0)
2648 && (diff
= (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp
)
2649 - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2
))) != 0)
2650 return rfs_result (RFS_LIVE_RANGE_SHRINK1
, diff
, tmp
, tmp2
);
2651 /* Sort by INSN_LUID (original insn order), so that we make the
2652 sort stable. This minimizes instruction movement, thus
2653 minimizing sched's effect on debugging and cross-jumping. */
2654 return rfs_result (RFS_LIVE_RANGE_SHRINK2
,
2655 INSN_LUID (tmp
) - INSN_LUID (tmp2
), tmp
, tmp2
);
2658 /* The insn in a schedule group should be issued the first. */
2659 if (flag_sched_group_heuristic
&&
2660 SCHED_GROUP_P (tmp
) != SCHED_GROUP_P (tmp2
))
2661 return rfs_result (RFS_SCHED_GROUP
, SCHED_GROUP_P (tmp2
) ? 1 : -1,
2664 /* Make sure that priority of TMP and TMP2 are initialized. */
2665 gcc_assert (INSN_PRIORITY_KNOWN (tmp
) && INSN_PRIORITY_KNOWN (tmp2
));
2669 /* The instruction that has the same fusion priority as the last
2670 instruction is the instruction we picked next. If that is not
2671 the case, we sort ready list firstly by fusion priority, then
2672 by priority, and at last by INSN_LUID. */
2673 int a
= INSN_FUSION_PRIORITY (tmp
);
2674 int b
= INSN_FUSION_PRIORITY (tmp2
);
2677 if (last_nondebug_scheduled_insn
2678 && !NOTE_P (last_nondebug_scheduled_insn
)
2679 && BLOCK_FOR_INSN (tmp
)
2680 == BLOCK_FOR_INSN (last_nondebug_scheduled_insn
))
2681 last
= INSN_FUSION_PRIORITY (last_nondebug_scheduled_insn
);
2683 if (a
!= last
&& b
!= last
)
2687 a
= INSN_PRIORITY (tmp
);
2688 b
= INSN_PRIORITY (tmp2
);
2691 return rfs_result (RFS_FUSION
, b
- a
, tmp
, tmp2
);
2693 return rfs_result (RFS_FUSION
,
2694 INSN_LUID (tmp
) - INSN_LUID (tmp2
), tmp
, tmp2
);
2698 gcc_assert (last_nondebug_scheduled_insn
2699 && !NOTE_P (last_nondebug_scheduled_insn
));
2700 last
= INSN_PRIORITY (last_nondebug_scheduled_insn
);
2702 a
= abs (INSN_PRIORITY (tmp
) - last
);
2703 b
= abs (INSN_PRIORITY (tmp2
) - last
);
2705 return rfs_result (RFS_FUSION
, a
- b
, tmp
, tmp2
);
2707 return rfs_result (RFS_FUSION
,
2708 INSN_LUID (tmp
) - INSN_LUID (tmp2
), tmp
, tmp2
);
2711 return rfs_result (RFS_FUSION
, -1, tmp
, tmp2
);
2713 return rfs_result (RFS_FUSION
, 1, tmp
, tmp2
);
2716 if (sched_pressure
!= SCHED_PRESSURE_NONE
)
2718 /* Prefer insn whose scheduling results in the smallest register
2720 if ((diff
= (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp
)
2722 - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2
)
2723 - insn_delay (tmp2
))))
2724 return rfs_result (RFS_PRESSURE_DELAY
, diff
, tmp
, tmp2
);
2727 if (sched_pressure
!= SCHED_PRESSURE_NONE
2728 && (INSN_TICK (tmp2
) > clock_var
|| INSN_TICK (tmp
) > clock_var
)
2729 && INSN_TICK (tmp2
) != INSN_TICK (tmp
))
2731 diff
= INSN_TICK (tmp
) - INSN_TICK (tmp2
);
2732 return rfs_result (RFS_PRESSURE_TICK
, diff
, tmp
, tmp2
);
2735 /* If we are doing backtracking in this schedule, prefer insns that
2736 have forward dependencies with negative cost against an insn that
2737 was already scheduled. */
2738 if (current_sched_info
->flags
& DO_BACKTRACKING
)
2740 priority_val
= FEEDS_BACKTRACK_INSN (tmp2
) - FEEDS_BACKTRACK_INSN (tmp
);
2742 return rfs_result (RFS_FEEDS_BACKTRACK_INSN
, priority_val
, tmp
, tmp2
);
2745 /* Prefer insn with higher priority. */
2746 priority_val
= INSN_PRIORITY (tmp2
) - INSN_PRIORITY (tmp
);
2748 if (flag_sched_critical_path_heuristic
&& priority_val
)
2749 return rfs_result (RFS_PRIORITY
, priority_val
, tmp
, tmp2
);
2751 if (PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH
) >= 0)
2753 int autopref
= autopref_rank_for_schedule (tmp
, tmp2
);
2758 /* Prefer speculative insn with greater dependencies weakness. */
2759 if (flag_sched_spec_insn_heuristic
&& spec_info
)
2765 ds1
= TODO_SPEC (tmp
) & SPECULATIVE
;
2767 dw1
= ds_weak (ds1
);
2771 ds2
= TODO_SPEC (tmp2
) & SPECULATIVE
;
2773 dw2
= ds_weak (ds2
);
2778 if (dw
> (NO_DEP_WEAK
/ 8) || dw
< -(NO_DEP_WEAK
/ 8))
2779 return rfs_result (RFS_SPECULATION
, dw
, tmp
, tmp2
);
2782 info_val
= (*current_sched_info
->rank
) (tmp
, tmp2
);
2783 if (flag_sched_rank_heuristic
&& info_val
)
2784 return rfs_result (RFS_SCHED_RANK
, info_val
, tmp
, tmp2
);
2786 /* Compare insns based on their relation to the last scheduled
2788 if (flag_sched_last_insn_heuristic
&& last_nondebug_scheduled_insn
)
2792 rtx last
= last_nondebug_scheduled_insn
;
2794 /* Classify the instructions into three classes:
2795 1) Data dependent on last schedule insn.
2796 2) Anti/Output dependent on last scheduled insn.
2797 3) Independent of last scheduled insn, or has latency of one.
2798 Choose the insn from the highest numbered class if different. */
2799 dep1
= sd_find_dep_between (last
, tmp
, true);
2801 if (dep1
== NULL
|| dep_cost (dep1
) == 1)
2803 else if (/* Data dependence. */
2804 DEP_TYPE (dep1
) == REG_DEP_TRUE
)
2809 dep2
= sd_find_dep_between (last
, tmp2
, true);
2811 if (dep2
== NULL
|| dep_cost (dep2
) == 1)
2813 else if (/* Data dependence. */
2814 DEP_TYPE (dep2
) == REG_DEP_TRUE
)
2819 if ((val
= tmp2_class
- tmp_class
))
2820 return rfs_result (RFS_LAST_INSN
, val
, tmp
, tmp2
);
2823 /* Prefer instructions that occur earlier in the model schedule. */
2824 if (sched_pressure
== SCHED_PRESSURE_MODEL
2825 && INSN_BB (tmp
) == target_bb
&& INSN_BB (tmp2
) == target_bb
)
2827 diff
= model_index (tmp
) - model_index (tmp2
);
2828 gcc_assert (diff
!= 0);
2829 return rfs_result (RFS_PRESSURE_INDEX
, diff
, tmp
, tmp2
);
2832 /* Prefer the insn which has more later insns that depend on it.
2833 This gives the scheduler more freedom when scheduling later
2834 instructions at the expense of added register pressure. */
2836 val
= (dep_list_size (tmp2
, SD_LIST_FORW
)
2837 - dep_list_size (tmp
, SD_LIST_FORW
));
2839 if (flag_sched_dep_count_heuristic
&& val
!= 0)
2840 return rfs_result (RFS_DEP_COUNT
, val
, tmp
, tmp2
);
2842 /* If insns are equally good, sort by INSN_LUID (original insn order),
2843 so that we make the sort stable. This minimizes instruction movement,
2844 thus minimizing sched's effect on debugging and cross-jumping. */
2845 return rfs_result (RFS_TIE
, INSN_LUID (tmp
) - INSN_LUID (tmp2
), tmp
, tmp2
);
2848 /* Resort the array A in which only element at index N may be out of order. */
2850 HAIFA_INLINE
static void
2851 swap_sort (rtx_insn
**a
, int n
)
2853 rtx_insn
*insn
= a
[n
- 1];
2856 while (i
>= 0 && rank_for_schedule (a
+ i
, &insn
) >= 0)
2864 /* Add INSN to the insn queue so that it can be executed at least
2865 N_CYCLES after the currently executing insn. Preserve insns
2866 chain for debugging purposes. REASON will be printed in debugging
2869 HAIFA_INLINE
static void
2870 queue_insn (rtx_insn
*insn
, int n_cycles
, const char *reason
)
2872 int next_q
= NEXT_Q_AFTER (q_ptr
, n_cycles
);
2873 rtx_insn_list
*link
= alloc_INSN_LIST (insn
, insn_queue
[next_q
]);
2876 gcc_assert (n_cycles
<= max_insn_queue_index
);
2877 gcc_assert (!DEBUG_INSN_P (insn
));
2879 insn_queue
[next_q
] = link
;
2882 if (sched_verbose
>= 2)
2884 fprintf (sched_dump
, ";;\t\tReady-->Q: insn %s: ",
2885 (*current_sched_info
->print_insn
) (insn
, 0));
2887 fprintf (sched_dump
, "queued for %d cycles (%s).\n", n_cycles
, reason
);
2890 QUEUE_INDEX (insn
) = next_q
;
2892 if (current_sched_info
->flags
& DO_BACKTRACKING
)
2894 new_tick
= clock_var
+ n_cycles
;
2895 if (INSN_TICK (insn
) == INVALID_TICK
|| INSN_TICK (insn
) < new_tick
)
2896 INSN_TICK (insn
) = new_tick
;
2898 if (INSN_EXACT_TICK (insn
) != INVALID_TICK
2899 && INSN_EXACT_TICK (insn
) < clock_var
+ n_cycles
)
2901 must_backtrack
= true;
2902 if (sched_verbose
>= 2)
2903 fprintf (sched_dump
, ";;\t\tcausing a backtrack.\n");
2908 /* Remove INSN from queue. */
2910 queue_remove (rtx_insn
*insn
)
2912 gcc_assert (QUEUE_INDEX (insn
) >= 0);
2913 remove_free_INSN_LIST_elem (insn
, &insn_queue
[QUEUE_INDEX (insn
)]);
2915 QUEUE_INDEX (insn
) = QUEUE_NOWHERE
;
2918 /* Return a pointer to the bottom of the ready list, i.e. the insn
2919 with the lowest priority. */
2922 ready_lastpos (struct ready_list
*ready
)
2924 gcc_assert (ready
->n_ready
>= 1);
2925 return ready
->vec
+ ready
->first
- ready
->n_ready
+ 1;
2928 /* Add an element INSN to the ready list so that it ends up with the
2929 lowest/highest priority depending on FIRST_P. */
2931 HAIFA_INLINE
static void
2932 ready_add (struct ready_list
*ready
, rtx_insn
*insn
, bool first_p
)
2936 if (ready
->first
== ready
->n_ready
)
2938 memmove (ready
->vec
+ ready
->veclen
- ready
->n_ready
,
2939 ready_lastpos (ready
),
2940 ready
->n_ready
* sizeof (rtx
));
2941 ready
->first
= ready
->veclen
- 1;
2943 ready
->vec
[ready
->first
- ready
->n_ready
] = insn
;
2947 if (ready
->first
== ready
->veclen
- 1)
2950 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
2951 memmove (ready
->vec
+ ready
->veclen
- ready
->n_ready
- 1,
2952 ready_lastpos (ready
),
2953 ready
->n_ready
* sizeof (rtx
));
2954 ready
->first
= ready
->veclen
- 2;
2956 ready
->vec
[++(ready
->first
)] = insn
;
2960 if (DEBUG_INSN_P (insn
))
2963 gcc_assert (QUEUE_INDEX (insn
) != QUEUE_READY
);
2964 QUEUE_INDEX (insn
) = QUEUE_READY
;
2966 if (INSN_EXACT_TICK (insn
) != INVALID_TICK
2967 && INSN_EXACT_TICK (insn
) < clock_var
)
2969 must_backtrack
= true;
2973 /* Remove the element with the highest priority from the ready list and
2976 HAIFA_INLINE
static rtx_insn
*
2977 ready_remove_first (struct ready_list
*ready
)
2981 gcc_assert (ready
->n_ready
);
2982 t
= ready
->vec
[ready
->first
--];
2984 if (DEBUG_INSN_P (t
))
2986 /* If the queue becomes empty, reset it. */
2987 if (ready
->n_ready
== 0)
2988 ready
->first
= ready
->veclen
- 1;
2990 gcc_assert (QUEUE_INDEX (t
) == QUEUE_READY
);
2991 QUEUE_INDEX (t
) = QUEUE_NOWHERE
;
2996 /* The following code implements multi-pass scheduling for the first
2997 cycle. In other words, we will try to choose ready insn which
2998 permits to start maximum number of insns on the same cycle. */
3000 /* Return a pointer to the element INDEX from the ready. INDEX for
3001 insn with the highest priority is 0, and the lowest priority has
3005 ready_element (struct ready_list
*ready
, int index
)
3007 gcc_assert (ready
->n_ready
&& index
< ready
->n_ready
);
3009 return ready
->vec
[ready
->first
- index
];
3012 /* Remove the element INDEX from the ready list and return it. INDEX
3013 for insn with the highest priority is 0, and the lowest priority
3016 HAIFA_INLINE
static rtx_insn
*
3017 ready_remove (struct ready_list
*ready
, int index
)
3023 return ready_remove_first (ready
);
3024 gcc_assert (ready
->n_ready
&& index
< ready
->n_ready
);
3025 t
= ready
->vec
[ready
->first
- index
];
3027 if (DEBUG_INSN_P (t
))
3029 for (i
= index
; i
< ready
->n_ready
; i
++)
3030 ready
->vec
[ready
->first
- i
] = ready
->vec
[ready
->first
- i
- 1];
3031 QUEUE_INDEX (t
) = QUEUE_NOWHERE
;
3035 /* Remove INSN from the ready list. */
3037 ready_remove_insn (rtx insn
)
3041 for (i
= 0; i
< readyp
->n_ready
; i
++)
3042 if (ready_element (readyp
, i
) == insn
)
3044 ready_remove (readyp
, i
);
3050 /* Calculate difference of two statistics set WAS and NOW.
3051 Result returned in WAS. */
3053 rank_for_schedule_stats_diff (rank_for_schedule_stats_t
*was
,
3054 const rank_for_schedule_stats_t
*now
)
3056 for (int i
= 0; i
< RFS_N
; ++i
)
3057 was
->stats
[i
] = now
->stats
[i
] - was
->stats
[i
];
3060 /* Print rank_for_schedule statistics. */
3062 print_rank_for_schedule_stats (const char *prefix
,
3063 const rank_for_schedule_stats_t
*stats
,
3064 struct ready_list
*ready
)
3066 for (int i
= 0; i
< RFS_N
; ++i
)
3067 if (stats
->stats
[i
])
3069 fprintf (sched_dump
, "%s%20s: %u", prefix
, rfs_str
[i
], stats
->stats
[i
]);
3072 /* Print out insns that won due to RFS_<I>. */
3074 rtx_insn
**p
= ready_lastpos (ready
);
3076 fprintf (sched_dump
, ":");
3077 /* Start with 1 since least-priority insn didn't have any wins. */
3078 for (int j
= 1; j
< ready
->n_ready
; ++j
)
3079 if (INSN_LAST_RFS_WIN (p
[j
]) == i
)
3080 fprintf (sched_dump
, " %s",
3081 (*current_sched_info
->print_insn
) (p
[j
], 0));
3083 fprintf (sched_dump
, "\n");
3087 /* Separate DEBUG_INSNS from normal insns. DEBUG_INSNs go to the end
3090 ready_sort_debug (struct ready_list
*ready
)
3093 rtx_insn
**first
= ready_lastpos (ready
);
3095 for (i
= 0; i
< ready
->n_ready
; ++i
)
3096 if (!DEBUG_INSN_P (first
[i
]))
3097 INSN_RFS_DEBUG_ORIG_ORDER (first
[i
]) = i
;
3099 qsort (first
, ready
->n_ready
, sizeof (rtx
), rank_for_schedule_debug
);
3102 /* Sort non-debug insns in the ready list READY by ascending priority.
3103 Assumes that all debug insns are separated from the real insns. */
3105 ready_sort_real (struct ready_list
*ready
)
3108 rtx_insn
**first
= ready_lastpos (ready
);
3109 int n_ready_real
= ready
->n_ready
- ready
->n_debug
;
3111 if (sched_pressure
== SCHED_PRESSURE_WEIGHTED
)
3112 for (i
= 0; i
< n_ready_real
; ++i
)
3113 setup_insn_reg_pressure_info (first
[i
]);
3114 else if (sched_pressure
== SCHED_PRESSURE_MODEL
3115 && model_curr_point
< model_num_insns
)
3116 model_set_excess_costs (first
, n_ready_real
);
3118 rank_for_schedule_stats_t stats1
;
3119 if (sched_verbose
>= 4)
3120 stats1
= rank_for_schedule_stats
;
3122 if (n_ready_real
== 2)
3123 swap_sort (first
, n_ready_real
);
3124 else if (n_ready_real
> 2)
3125 qsort (first
, n_ready_real
, sizeof (rtx
), rank_for_schedule
);
3127 if (sched_verbose
>= 4)
3129 rank_for_schedule_stats_diff (&stats1
, &rank_for_schedule_stats
);
3130 print_rank_for_schedule_stats (";;\t\t", &stats1
, ready
);
3134 /* Sort the ready list READY by ascending priority. */
3136 ready_sort (struct ready_list
*ready
)
3138 if (ready
->n_debug
> 0)
3139 ready_sort_debug (ready
);
3141 ready_sort_real (ready
);
3144 /* PREV is an insn that is ready to execute. Adjust its priority if that
3145 will help shorten or lengthen register lifetimes as appropriate. Also
3146 provide a hook for the target to tweak itself. */
3148 HAIFA_INLINE
static void
3149 adjust_priority (rtx_insn
*prev
)
3151 /* ??? There used to be code here to try and estimate how an insn
3152 affected register lifetimes, but it did it by looking at REG_DEAD
3153 notes, which we removed in schedule_region. Nor did it try to
3154 take into account register pressure or anything useful like that.
3156 Revisit when we have a machine model to work with and not before. */
3158 if (targetm
.sched
.adjust_priority
)
3159 INSN_PRIORITY (prev
) =
3160 targetm
.sched
.adjust_priority (prev
, INSN_PRIORITY (prev
));
3163 /* Advance DFA state STATE on one cycle. */
3165 advance_state (state_t state
)
3167 if (targetm
.sched
.dfa_pre_advance_cycle
)
3168 targetm
.sched
.dfa_pre_advance_cycle ();
3170 if (targetm
.sched
.dfa_pre_cycle_insn
)
3171 state_transition (state
,
3172 targetm
.sched
.dfa_pre_cycle_insn ());
3174 state_transition (state
, NULL
);
3176 if (targetm
.sched
.dfa_post_cycle_insn
)
3177 state_transition (state
,
3178 targetm
.sched
.dfa_post_cycle_insn ());
3180 if (targetm
.sched
.dfa_post_advance_cycle
)
3181 targetm
.sched
.dfa_post_advance_cycle ();
3184 /* Advance time on one cycle. */
3185 HAIFA_INLINE
static void
3186 advance_one_cycle (void)
3188 advance_state (curr_state
);
3189 if (sched_verbose
>= 4)
3190 fprintf (sched_dump
, ";;\tAdvance the current state.\n");
3193 /* Update register pressure after scheduling INSN. */
3195 update_register_pressure (rtx_insn
*insn
)
3197 struct reg_use_data
*use
;
3198 struct reg_set_data
*set
;
3200 gcc_checking_assert (!DEBUG_INSN_P (insn
));
3202 for (use
= INSN_REG_USE_LIST (insn
); use
!= NULL
; use
= use
->next_insn_use
)
3203 if (dying_use_p (use
))
3204 mark_regno_birth_or_death (curr_reg_live
, curr_reg_pressure
,
3206 for (set
= INSN_REG_SET_LIST (insn
); set
!= NULL
; set
= set
->next_insn_set
)
3207 mark_regno_birth_or_death (curr_reg_live
, curr_reg_pressure
,
3211 /* Set up or update (if UPDATE_P) max register pressure (see its
3212 meaning in sched-int.h::_haifa_insn_data) for all current BB insns
3213 after insn AFTER. */
3215 setup_insn_max_reg_pressure (rtx_insn
*after
, bool update_p
)
3220 static int max_reg_pressure
[N_REG_CLASSES
];
3222 save_reg_pressure ();
3223 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
3224 max_reg_pressure
[ira_pressure_classes
[i
]]
3225 = curr_reg_pressure
[ira_pressure_classes
[i
]];
3226 for (insn
= NEXT_INSN (after
);
3227 insn
!= NULL_RTX
&& ! BARRIER_P (insn
)
3228 && BLOCK_FOR_INSN (insn
) == BLOCK_FOR_INSN (after
);
3229 insn
= NEXT_INSN (insn
))
3230 if (NONDEBUG_INSN_P (insn
))
3233 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
3235 p
= max_reg_pressure
[ira_pressure_classes
[i
]];
3236 if (INSN_MAX_REG_PRESSURE (insn
)[i
] != p
)
3239 INSN_MAX_REG_PRESSURE (insn
)[i
]
3240 = max_reg_pressure
[ira_pressure_classes
[i
]];
3243 if (update_p
&& eq_p
)
3245 update_register_pressure (insn
);
3246 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
3247 if (max_reg_pressure
[ira_pressure_classes
[i
]]
3248 < curr_reg_pressure
[ira_pressure_classes
[i
]])
3249 max_reg_pressure
[ira_pressure_classes
[i
]]
3250 = curr_reg_pressure
[ira_pressure_classes
[i
]];
3252 restore_reg_pressure ();
3255 /* Update the current register pressure after scheduling INSN. Update
3256 also max register pressure for unscheduled insns of the current
3259 update_reg_and_insn_max_reg_pressure (rtx_insn
*insn
)
3262 int before
[N_REG_CLASSES
];
3264 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
3265 before
[i
] = curr_reg_pressure
[ira_pressure_classes
[i
]];
3266 update_register_pressure (insn
);
3267 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
3268 if (curr_reg_pressure
[ira_pressure_classes
[i
]] != before
[i
])
3270 if (i
< ira_pressure_classes_num
)
3271 setup_insn_max_reg_pressure (insn
, true);
3274 /* Set up register pressure at the beginning of basic block BB whose
3275 insns starting after insn AFTER. Set up also max register pressure
3276 for all insns of the basic block. */
3278 sched_setup_bb_reg_pressure_info (basic_block bb
, rtx_insn
*after
)
3280 gcc_assert (sched_pressure
== SCHED_PRESSURE_WEIGHTED
);
3281 initiate_bb_reg_pressure_info (bb
);
3282 setup_insn_max_reg_pressure (after
, false);
3285 /* If doing predication while scheduling, verify whether INSN, which
3286 has just been scheduled, clobbers the conditions of any
3287 instructions that must be predicated in order to break their
3288 dependencies. If so, remove them from the queues so that they will
3289 only be scheduled once their control dependency is resolved. */
3292 check_clobbered_conditions (rtx insn
)
3297 if ((current_sched_info
->flags
& DO_PREDICATION
) == 0)
3300 find_all_hard_reg_sets (insn
, &t
, true);
3303 for (i
= 0; i
< ready
.n_ready
; i
++)
3305 rtx_insn
*x
= ready_element (&ready
, i
);
3306 if (TODO_SPEC (x
) == DEP_CONTROL
&& cond_clobbered_p (x
, t
))
3308 ready_remove_insn (x
);
3312 for (i
= 0; i
<= max_insn_queue_index
; i
++)
3314 rtx_insn_list
*link
;
3315 int q
= NEXT_Q_AFTER (q_ptr
, i
);
3318 for (link
= insn_queue
[q
]; link
; link
= link
->next ())
3320 rtx_insn
*x
= link
->insn ();
3321 if (TODO_SPEC (x
) == DEP_CONTROL
&& cond_clobbered_p (x
, t
))
3330 /* Return (in order):
3332 - positive if INSN adversely affects the pressure on one
3335 - negative if INSN reduces the pressure on one register class
3337 - 0 if INSN doesn't affect the pressure on any register class. */
3340 model_classify_pressure (struct model_insn_info
*insn
)
3342 struct reg_pressure_data
*reg_pressure
;
3343 int death
[N_REG_CLASSES
];
3346 calculate_reg_deaths (insn
->insn
, death
);
3347 reg_pressure
= INSN_REG_PRESSURE (insn
->insn
);
3349 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
3351 cl
= ira_pressure_classes
[pci
];
3352 if (death
[cl
] < reg_pressure
[pci
].set_increase
)
3354 sum
+= reg_pressure
[pci
].set_increase
- death
[cl
];
3359 /* Return true if INSN1 should come before INSN2 in the model schedule. */
3362 model_order_p (struct model_insn_info
*insn1
, struct model_insn_info
*insn2
)
3364 unsigned int height1
, height2
;
3365 unsigned int priority1
, priority2
;
3367 /* Prefer instructions with a higher model priority. */
3368 if (insn1
->model_priority
!= insn2
->model_priority
)
3369 return insn1
->model_priority
> insn2
->model_priority
;
3371 /* Combine the length of the longest path of satisfied true dependencies
3372 that leads to each instruction (depth) with the length of the longest
3373 path of any dependencies that leads from the instruction (alap).
3374 Prefer instructions with the greatest combined length. If the combined
3375 lengths are equal, prefer instructions with the greatest depth.
3377 The idea is that, if we have a set S of "equal" instructions that each
3378 have ALAP value X, and we pick one such instruction I, any true-dependent
3379 successors of I that have ALAP value X - 1 should be preferred over S.
3380 This encourages the schedule to be "narrow" rather than "wide".
3381 However, if I is a low-priority instruction that we decided to
3382 schedule because of its model_classify_pressure, and if there
3383 is a set of higher-priority instructions T, the aforementioned
3384 successors of I should not have the edge over T. */
3385 height1
= insn1
->depth
+ insn1
->alap
;
3386 height2
= insn2
->depth
+ insn2
->alap
;
3387 if (height1
!= height2
)
3388 return height1
> height2
;
3389 if (insn1
->depth
!= insn2
->depth
)
3390 return insn1
->depth
> insn2
->depth
;
3392 /* We have no real preference between INSN1 an INSN2 as far as attempts
3393 to reduce pressure go. Prefer instructions with higher priorities. */
3394 priority1
= INSN_PRIORITY (insn1
->insn
);
3395 priority2
= INSN_PRIORITY (insn2
->insn
);
3396 if (priority1
!= priority2
)
3397 return priority1
> priority2
;
3399 /* Use the original rtl sequence as a tie-breaker. */
3400 return insn1
< insn2
;
3403 /* Add INSN to the model worklist immediately after PREV. Add it to the
3404 beginning of the list if PREV is null. */
3407 model_add_to_worklist_at (struct model_insn_info
*insn
,
3408 struct model_insn_info
*prev
)
3410 gcc_assert (QUEUE_INDEX (insn
->insn
) == QUEUE_NOWHERE
);
3411 QUEUE_INDEX (insn
->insn
) = QUEUE_READY
;
3416 insn
->next
= prev
->next
;
3421 insn
->next
= model_worklist
;
3422 model_worklist
= insn
;
3425 insn
->next
->prev
= insn
;
3428 /* Remove INSN from the model worklist. */
3431 model_remove_from_worklist (struct model_insn_info
*insn
)
3433 gcc_assert (QUEUE_INDEX (insn
->insn
) == QUEUE_READY
);
3434 QUEUE_INDEX (insn
->insn
) = QUEUE_NOWHERE
;
3437 insn
->prev
->next
= insn
->next
;
3439 model_worklist
= insn
->next
;
3441 insn
->next
->prev
= insn
->prev
;
3444 /* Add INSN to the model worklist. Start looking for a suitable position
3445 between neighbors PREV and NEXT, testing at most MAX_SCHED_READY_INSNS
3446 insns either side. A null PREV indicates the beginning of the list and
3447 a null NEXT indicates the end. */
3450 model_add_to_worklist (struct model_insn_info
*insn
,
3451 struct model_insn_info
*prev
,
3452 struct model_insn_info
*next
)
3456 count
= MAX_SCHED_READY_INSNS
;
3457 if (count
> 0 && prev
&& model_order_p (insn
, prev
))
3463 while (count
> 0 && prev
&& model_order_p (insn
, prev
));
3465 while (count
> 0 && next
&& model_order_p (next
, insn
))
3471 model_add_to_worklist_at (insn
, prev
);
3474 /* INSN may now have a higher priority (in the model_order_p sense)
3475 than before. Move it up the worklist if necessary. */
3478 model_promote_insn (struct model_insn_info
*insn
)
3480 struct model_insn_info
*prev
;
3484 count
= MAX_SCHED_READY_INSNS
;
3485 while (count
> 0 && prev
&& model_order_p (insn
, prev
))
3490 if (prev
!= insn
->prev
)
3492 model_remove_from_worklist (insn
);
3493 model_add_to_worklist_at (insn
, prev
);
3497 /* Add INSN to the end of the model schedule. */
3500 model_add_to_schedule (rtx_insn
*insn
)
3504 gcc_assert (QUEUE_INDEX (insn
) == QUEUE_NOWHERE
);
3505 QUEUE_INDEX (insn
) = QUEUE_SCHEDULED
;
3507 point
= model_schedule
.length ();
3508 model_schedule
.quick_push (insn
);
3509 INSN_MODEL_INDEX (insn
) = point
+ 1;
3512 /* Analyze the instructions that are to be scheduled, setting up
3513 MODEL_INSN_INFO (...) and model_num_insns accordingly. Add ready
3514 instructions to model_worklist. */
3517 model_analyze_insns (void)
3519 rtx_insn
*start
, *end
, *iter
;
3520 sd_iterator_def sd_it
;
3522 struct model_insn_info
*insn
, *con
;
3524 model_num_insns
= 0;
3525 start
= PREV_INSN (current_sched_info
->next_tail
);
3526 end
= current_sched_info
->prev_head
;
3527 for (iter
= start
; iter
!= end
; iter
= PREV_INSN (iter
))
3528 if (NONDEBUG_INSN_P (iter
))
3530 insn
= MODEL_INSN_INFO (iter
);
3532 FOR_EACH_DEP (iter
, SD_LIST_FORW
, sd_it
, dep
)
3534 con
= MODEL_INSN_INFO (DEP_CON (dep
));
3535 if (con
->insn
&& insn
->alap
< con
->alap
+ 1)
3536 insn
->alap
= con
->alap
+ 1;
3539 insn
->old_queue
= QUEUE_INDEX (iter
);
3540 QUEUE_INDEX (iter
) = QUEUE_NOWHERE
;
3542 insn
->unscheduled_preds
= dep_list_size (iter
, SD_LIST_HARD_BACK
);
3543 if (insn
->unscheduled_preds
== 0)
3544 model_add_to_worklist (insn
, NULL
, model_worklist
);
3550 /* The global state describes the register pressure at the start of the
3551 model schedule. Initialize GROUP accordingly. */
3554 model_init_pressure_group (struct model_pressure_group
*group
)
3558 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
3560 cl
= ira_pressure_classes
[pci
];
3561 group
->limits
[pci
].pressure
= curr_reg_pressure
[cl
];
3562 group
->limits
[pci
].point
= 0;
3564 /* Use index model_num_insns to record the state after the last
3565 instruction in the model schedule. */
3566 group
->model
= XNEWVEC (struct model_pressure_data
,
3567 (model_num_insns
+ 1) * ira_pressure_classes_num
);
3570 /* Record that MODEL_REF_PRESSURE (GROUP, POINT, PCI) is PRESSURE.
3571 Update the maximum pressure for the whole schedule. */
3574 model_record_pressure (struct model_pressure_group
*group
,
3575 int point
, int pci
, int pressure
)
3577 MODEL_REF_PRESSURE (group
, point
, pci
) = pressure
;
3578 if (group
->limits
[pci
].pressure
< pressure
)
3580 group
->limits
[pci
].pressure
= pressure
;
3581 group
->limits
[pci
].point
= point
;
3585 /* INSN has just been added to the end of the model schedule. Record its
3586 register-pressure information. */
3589 model_record_pressures (struct model_insn_info
*insn
)
3591 struct reg_pressure_data
*reg_pressure
;
3592 int point
, pci
, cl
, delta
;
3593 int death
[N_REG_CLASSES
];
3595 point
= model_index (insn
->insn
);
3596 if (sched_verbose
>= 2)
3600 fprintf (sched_dump
, "\n;;\tModel schedule:\n;;\n");
3601 fprintf (sched_dump
, ";;\t| idx insn | mpri hght dpth prio |\n");
3603 fprintf (sched_dump
, ";;\t| %3d %4d | %4d %4d %4d %4d | %-30s ",
3604 point
, INSN_UID (insn
->insn
), insn
->model_priority
,
3605 insn
->depth
+ insn
->alap
, insn
->depth
,
3606 INSN_PRIORITY (insn
->insn
),
3607 str_pattern_slim (PATTERN (insn
->insn
)));
3609 calculate_reg_deaths (insn
->insn
, death
);
3610 reg_pressure
= INSN_REG_PRESSURE (insn
->insn
);
3611 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
3613 cl
= ira_pressure_classes
[pci
];
3614 delta
= reg_pressure
[pci
].set_increase
- death
[cl
];
3615 if (sched_verbose
>= 2)
3616 fprintf (sched_dump
, " %s:[%d,%+d]", reg_class_names
[cl
],
3617 curr_reg_pressure
[cl
], delta
);
3618 model_record_pressure (&model_before_pressure
, point
, pci
,
3619 curr_reg_pressure
[cl
]);
3621 if (sched_verbose
>= 2)
3622 fprintf (sched_dump
, "\n");
3625 /* All instructions have been added to the model schedule. Record the
3626 final register pressure in GROUP and set up all MODEL_MAX_PRESSUREs. */
3629 model_record_final_pressures (struct model_pressure_group
*group
)
3631 int point
, pci
, max_pressure
, ref_pressure
, cl
;
3633 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
3635 /* Record the final pressure for this class. */
3636 cl
= ira_pressure_classes
[pci
];
3637 point
= model_num_insns
;
3638 ref_pressure
= curr_reg_pressure
[cl
];
3639 model_record_pressure (group
, point
, pci
, ref_pressure
);
3641 /* Record the original maximum pressure. */
3642 group
->limits
[pci
].orig_pressure
= group
->limits
[pci
].pressure
;
3644 /* Update the MODEL_MAX_PRESSURE for every point of the schedule. */
3645 max_pressure
= ref_pressure
;
3646 MODEL_MAX_PRESSURE (group
, point
, pci
) = max_pressure
;
3650 ref_pressure
= MODEL_REF_PRESSURE (group
, point
, pci
);
3651 max_pressure
= MAX (max_pressure
, ref_pressure
);
3652 MODEL_MAX_PRESSURE (group
, point
, pci
) = max_pressure
;
3657 /* Update all successors of INSN, given that INSN has just been scheduled. */
3660 model_add_successors_to_worklist (struct model_insn_info
*insn
)
3662 sd_iterator_def sd_it
;
3663 struct model_insn_info
*con
;
3666 FOR_EACH_DEP (insn
->insn
, SD_LIST_FORW
, sd_it
, dep
)
3668 con
= MODEL_INSN_INFO (DEP_CON (dep
));
3669 /* Ignore debug instructions, and instructions from other blocks. */
3672 con
->unscheduled_preds
--;
3674 /* Update the depth field of each true-dependent successor.
3675 Increasing the depth gives them a higher priority than
3677 if (DEP_TYPE (dep
) == REG_DEP_TRUE
&& con
->depth
< insn
->depth
+ 1)
3679 con
->depth
= insn
->depth
+ 1;
3680 if (QUEUE_INDEX (con
->insn
) == QUEUE_READY
)
3681 model_promote_insn (con
);
3684 /* If this is a true dependency, or if there are no remaining
3685 dependencies for CON (meaning that CON only had non-true
3686 dependencies), make sure that CON is on the worklist.
3687 We don't bother otherwise because it would tend to fill the
3688 worklist with a lot of low-priority instructions that are not
3689 yet ready to issue. */
3690 if ((con
->depth
> 0 || con
->unscheduled_preds
== 0)
3691 && QUEUE_INDEX (con
->insn
) == QUEUE_NOWHERE
)
3692 model_add_to_worklist (con
, insn
, insn
->next
);
3697 /* Give INSN a higher priority than any current instruction, then give
3698 unscheduled predecessors of INSN a higher priority still. If any of
3699 those predecessors are not on the model worklist, do the same for its
3700 predecessors, and so on. */
3703 model_promote_predecessors (struct model_insn_info
*insn
)
3705 struct model_insn_info
*pro
, *first
;
3706 sd_iterator_def sd_it
;
3709 if (sched_verbose
>= 7)
3710 fprintf (sched_dump
, ";;\t+--- priority of %d = %d, priority of",
3711 INSN_UID (insn
->insn
), model_next_priority
);
3712 insn
->model_priority
= model_next_priority
++;
3713 model_remove_from_worklist (insn
);
3714 model_add_to_worklist_at (insn
, NULL
);
3719 FOR_EACH_DEP (insn
->insn
, SD_LIST_HARD_BACK
, sd_it
, dep
)
3721 pro
= MODEL_INSN_INFO (DEP_PRO (dep
));
3722 /* The first test is to ignore debug instructions, and instructions
3723 from other blocks. */
3725 && pro
->model_priority
!= model_next_priority
3726 && QUEUE_INDEX (pro
->insn
) != QUEUE_SCHEDULED
)
3728 pro
->model_priority
= model_next_priority
;
3729 if (sched_verbose
>= 7)
3730 fprintf (sched_dump
, " %d", INSN_UID (pro
->insn
));
3731 if (QUEUE_INDEX (pro
->insn
) == QUEUE_READY
)
3733 /* PRO is already in the worklist, but it now has
3734 a higher priority than before. Move it at the
3735 appropriate place. */
3736 model_remove_from_worklist (pro
);
3737 model_add_to_worklist (pro
, NULL
, model_worklist
);
3741 /* PRO isn't in the worklist. Recursively process
3742 its predecessors until we find one that is. */
3753 if (sched_verbose
>= 7)
3754 fprintf (sched_dump
, " = %d\n", model_next_priority
);
3755 model_next_priority
++;
3758 /* Pick one instruction from model_worklist and process it. */
3761 model_choose_insn (void)
3763 struct model_insn_info
*insn
, *fallback
;
3766 if (sched_verbose
>= 7)
3768 fprintf (sched_dump
, ";;\t+--- worklist:\n");
3769 insn
= model_worklist
;
3770 count
= MAX_SCHED_READY_INSNS
;
3771 while (count
> 0 && insn
)
3773 fprintf (sched_dump
, ";;\t+--- %d [%d, %d, %d, %d]\n",
3774 INSN_UID (insn
->insn
), insn
->model_priority
,
3775 insn
->depth
+ insn
->alap
, insn
->depth
,
3776 INSN_PRIORITY (insn
->insn
));
3782 /* Look for a ready instruction whose model_classify_priority is zero
3783 or negative, picking the highest-priority one. Adding such an
3784 instruction to the schedule now should do no harm, and may actually
3787 Failing that, see whether there is an instruction with the highest
3788 extant model_priority that is not yet ready, but which would reduce
3789 pressure if it became ready. This is designed to catch cases like:
3791 (set (mem (reg R1)) (reg R2))
3793 where the instruction is the last remaining use of R1 and where the
3794 value of R2 is not yet available (or vice versa). The death of R1
3795 means that this instruction already reduces pressure. It is of
3796 course possible that the computation of R2 involves other registers
3797 that are hard to kill, but such cases are rare enough for this
3798 heuristic to be a win in general.
3800 Failing that, just pick the highest-priority instruction in the
3802 count
= MAX_SCHED_READY_INSNS
;
3803 insn
= model_worklist
;
3807 if (count
== 0 || !insn
)
3809 insn
= fallback
? fallback
: model_worklist
;
3812 if (insn
->unscheduled_preds
)
3814 if (model_worklist
->model_priority
== insn
->model_priority
3816 && model_classify_pressure (insn
) < 0)
3821 if (model_classify_pressure (insn
) <= 0)
3828 if (sched_verbose
>= 7 && insn
!= model_worklist
)
3830 if (insn
->unscheduled_preds
)
3831 fprintf (sched_dump
, ";;\t+--- promoting insn %d, with dependencies\n",
3832 INSN_UID (insn
->insn
));
3834 fprintf (sched_dump
, ";;\t+--- promoting insn %d, which is ready\n",
3835 INSN_UID (insn
->insn
));
3837 if (insn
->unscheduled_preds
)
3838 /* INSN isn't yet ready to issue. Give all its predecessors the
3839 highest priority. */
3840 model_promote_predecessors (insn
);
3843 /* INSN is ready. Add it to the end of model_schedule and
3844 process its successors. */
3845 model_add_successors_to_worklist (insn
);
3846 model_remove_from_worklist (insn
);
3847 model_add_to_schedule (insn
->insn
);
3848 model_record_pressures (insn
);
3849 update_register_pressure (insn
->insn
);
3853 /* Restore all QUEUE_INDEXs to the values that they had before
3854 model_start_schedule was called. */
3857 model_reset_queue_indices (void)
3862 FOR_EACH_VEC_ELT (model_schedule
, i
, insn
)
3863 QUEUE_INDEX (insn
) = MODEL_INSN_INFO (insn
)->old_queue
;
3866 /* We have calculated the model schedule and spill costs. Print a summary
3870 model_dump_pressure_summary (void)
3874 fprintf (sched_dump
, ";; Pressure summary:");
3875 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
3877 cl
= ira_pressure_classes
[pci
];
3878 fprintf (sched_dump
, " %s:%d", reg_class_names
[cl
],
3879 model_before_pressure
.limits
[pci
].pressure
);
3881 fprintf (sched_dump
, "\n\n");
3884 /* Initialize the SCHED_PRESSURE_MODEL information for the current
3885 scheduling region. */
3888 model_start_schedule (basic_block bb
)
3890 model_next_priority
= 1;
3891 model_schedule
.create (sched_max_luid
);
3892 model_insns
= XCNEWVEC (struct model_insn_info
, sched_max_luid
);
3894 gcc_assert (bb
== BLOCK_FOR_INSN (NEXT_INSN (current_sched_info
->prev_head
)));
3895 initiate_reg_pressure_info (df_get_live_in (bb
));
3897 model_analyze_insns ();
3898 model_init_pressure_group (&model_before_pressure
);
3899 while (model_worklist
)
3900 model_choose_insn ();
3901 gcc_assert (model_num_insns
== (int) model_schedule
.length ());
3902 if (sched_verbose
>= 2)
3903 fprintf (sched_dump
, "\n");
3905 model_record_final_pressures (&model_before_pressure
);
3906 model_reset_queue_indices ();
3908 XDELETEVEC (model_insns
);
3910 model_curr_point
= 0;
3911 initiate_reg_pressure_info (df_get_live_in (bb
));
3912 if (sched_verbose
>= 1)
3913 model_dump_pressure_summary ();
3916 /* Free the information associated with GROUP. */
3919 model_finalize_pressure_group (struct model_pressure_group
*group
)
3921 XDELETEVEC (group
->model
);
3924 /* Free the information created by model_start_schedule. */
3927 model_end_schedule (void)
3929 model_finalize_pressure_group (&model_before_pressure
);
3930 model_schedule
.release ();
3933 /* Prepare reg pressure scheduling for basic block BB. */
3935 sched_pressure_start_bb (basic_block bb
)
3937 /* Set the number of available registers for each class taking into account
3938 relative probability of current basic block versus function prologue and
3940 * If the basic block executes much more often than the prologue/epilogue
3941 (e.g., inside a hot loop), then cost of spill in the prologue is close to
3942 nil, so the effective number of available registers is
3943 (ira_class_hard_regs_num[cl] - 0).
3944 * If the basic block executes as often as the prologue/epilogue,
3945 then spill in the block is as costly as in the prologue, so the effective
3946 number of available registers is
3947 (ira_class_hard_regs_num[cl] - call_used_regs_num[cl]).
3948 Note that all-else-equal, we prefer to spill in the prologue, since that
3949 allows "extra" registers for other basic blocks of the function.
3950 * If the basic block is on the cold path of the function and executes
3951 rarely, then we should always prefer to spill in the block, rather than
3952 in the prologue/epilogue. The effective number of available register is
3953 (ira_class_hard_regs_num[cl] - call_used_regs_num[cl]). */
3956 int entry_freq
= ENTRY_BLOCK_PTR_FOR_FN (cfun
)->frequency
;
3957 int bb_freq
= bb
->frequency
;
3961 if (entry_freq
== 0)
3962 entry_freq
= bb_freq
= 1;
3964 if (bb_freq
< entry_freq
)
3965 bb_freq
= entry_freq
;
3967 for (i
= 0; i
< ira_pressure_classes_num
; ++i
)
3969 enum reg_class cl
= ira_pressure_classes
[i
];
3970 sched_class_regs_num
[cl
] = ira_class_hard_regs_num
[cl
];
3971 sched_class_regs_num
[cl
]
3972 -= (call_used_regs_num
[cl
] * entry_freq
) / bb_freq
;
3976 if (sched_pressure
== SCHED_PRESSURE_MODEL
)
3977 model_start_schedule (bb
);
3980 /* A structure that holds local state for the loop in schedule_block. */
3981 struct sched_block_state
3983 /* True if no real insns have been scheduled in the current cycle. */
3984 bool first_cycle_insn_p
;
3985 /* True if a shadow insn has been scheduled in the current cycle, which
3986 means that no more normal insns can be issued. */
3987 bool shadows_only_p
;
3988 /* True if we're winding down a modulo schedule, which means that we only
3989 issue insns with INSN_EXACT_TICK set. */
3990 bool modulo_epilogue
;
3991 /* Initialized with the machine's issue rate every cycle, and updated
3992 by calls to the variable_issue hook. */
3996 /* INSN is the "currently executing insn". Launch each insn which was
3997 waiting on INSN. READY is the ready list which contains the insns
3998 that are ready to fire. CLOCK is the current cycle. The function
3999 returns necessary cycle advance after issuing the insn (it is not
4000 zero for insns in a schedule group). */
4003 schedule_insn (rtx_insn
*insn
)
4005 sd_iterator_def sd_it
;
4010 if (sched_verbose
>= 1)
4012 struct reg_pressure_data
*pressure_info
;
4013 fprintf (sched_dump
, ";;\t%3i--> %s %-40s:",
4014 clock_var
, (*current_sched_info
->print_insn
) (insn
, 1),
4015 str_pattern_slim (PATTERN (insn
)));
4017 if (recog_memoized (insn
) < 0)
4018 fprintf (sched_dump
, "nothing");
4020 print_reservation (sched_dump
, insn
);
4021 pressure_info
= INSN_REG_PRESSURE (insn
);
4022 if (pressure_info
!= NULL
)
4024 fputc (':', sched_dump
);
4025 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
4026 fprintf (sched_dump
, "%s%s%+d(%d)",
4027 scheduled_insns
.length () > 1
4029 < INSN_LUID (scheduled_insns
[scheduled_insns
.length () - 2]) ? "@" : "",
4030 reg_class_names
[ira_pressure_classes
[i
]],
4031 pressure_info
[i
].set_increase
, pressure_info
[i
].change
);
4033 if (sched_pressure
== SCHED_PRESSURE_MODEL
4034 && model_curr_point
< model_num_insns
4035 && model_index (insn
) == model_curr_point
)
4036 fprintf (sched_dump
, ":model %d", model_curr_point
);
4037 fputc ('\n', sched_dump
);
4040 if (sched_pressure
== SCHED_PRESSURE_WEIGHTED
&& !DEBUG_INSN_P (insn
))
4041 update_reg_and_insn_max_reg_pressure (insn
);
4043 /* Scheduling instruction should have all its dependencies resolved and
4044 should have been removed from the ready list. */
4045 gcc_assert (sd_lists_empty_p (insn
, SD_LIST_HARD_BACK
));
4047 /* Reset debug insns invalidated by moving this insn. */
4048 if (MAY_HAVE_DEBUG_INSNS
&& !DEBUG_INSN_P (insn
))
4049 for (sd_it
= sd_iterator_start (insn
, SD_LIST_BACK
);
4050 sd_iterator_cond (&sd_it
, &dep
);)
4052 rtx_insn
*dbg
= DEP_PRO (dep
);
4053 struct reg_use_data
*use
, *next
;
4055 if (DEP_STATUS (dep
) & DEP_CANCELLED
)
4057 sd_iterator_next (&sd_it
);
4061 gcc_assert (DEBUG_INSN_P (dbg
));
4063 if (sched_verbose
>= 6)
4064 fprintf (sched_dump
, ";;\t\tresetting: debug insn %d\n",
4067 /* ??? Rather than resetting the debug insn, we might be able
4068 to emit a debug temp before the just-scheduled insn, but
4069 this would involve checking that the expression at the
4070 point of the debug insn is equivalent to the expression
4071 before the just-scheduled insn. They might not be: the
4072 expression in the debug insn may depend on other insns not
4073 yet scheduled that set MEMs, REGs or even other debug
4074 insns. It's not clear that attempting to preserve debug
4075 information in these cases is worth the effort, given how
4076 uncommon these resets are and the likelihood that the debug
4077 temps introduced won't survive the schedule change. */
4078 INSN_VAR_LOCATION_LOC (dbg
) = gen_rtx_UNKNOWN_VAR_LOC ();
4079 df_insn_rescan (dbg
);
4081 /* Unknown location doesn't use any registers. */
4082 for (use
= INSN_REG_USE_LIST (dbg
); use
!= NULL
; use
= next
)
4084 struct reg_use_data
*prev
= use
;
4086 /* Remove use from the cyclic next_regno_use chain first. */
4087 while (prev
->next_regno_use
!= use
)
4088 prev
= prev
->next_regno_use
;
4089 prev
->next_regno_use
= use
->next_regno_use
;
4090 next
= use
->next_insn_use
;
4093 INSN_REG_USE_LIST (dbg
) = NULL
;
4095 /* We delete rather than resolve these deps, otherwise we
4096 crash in sched_free_deps(), because forward deps are
4097 expected to be released before backward deps. */
4098 sd_delete_dep (sd_it
);
4101 gcc_assert (QUEUE_INDEX (insn
) == QUEUE_NOWHERE
);
4102 QUEUE_INDEX (insn
) = QUEUE_SCHEDULED
;
4104 if (sched_pressure
== SCHED_PRESSURE_MODEL
4105 && model_curr_point
< model_num_insns
4106 && NONDEBUG_INSN_P (insn
))
4108 if (model_index (insn
) == model_curr_point
)
4111 while (model_curr_point
< model_num_insns
4112 && (QUEUE_INDEX (MODEL_INSN (model_curr_point
))
4113 == QUEUE_SCHEDULED
));
4115 model_recompute (insn
);
4116 model_update_limit_points ();
4117 update_register_pressure (insn
);
4118 if (sched_verbose
>= 2)
4119 print_curr_reg_pressure ();
4122 gcc_assert (INSN_TICK (insn
) >= MIN_TICK
);
4123 if (INSN_TICK (insn
) > clock_var
)
4124 /* INSN has been prematurely moved from the queue to the ready list.
4125 This is possible only if following flags are set. */
4126 gcc_assert (flag_sched_stalled_insns
|| sched_fusion
);
4128 /* ??? Probably, if INSN is scheduled prematurely, we should leave
4129 INSN_TICK untouched. This is a machine-dependent issue, actually. */
4130 INSN_TICK (insn
) = clock_var
;
4132 check_clobbered_conditions (insn
);
4134 /* Update dependent instructions. First, see if by scheduling this insn
4135 now we broke a dependence in a way that requires us to change another
4137 for (sd_it
= sd_iterator_start (insn
, SD_LIST_SPEC_BACK
);
4138 sd_iterator_cond (&sd_it
, &dep
); sd_iterator_next (&sd_it
))
4140 struct dep_replacement
*desc
= DEP_REPLACE (dep
);
4141 rtx_insn
*pro
= DEP_PRO (dep
);
4142 if (QUEUE_INDEX (pro
) != QUEUE_SCHEDULED
4143 && desc
!= NULL
&& desc
->insn
== pro
)
4144 apply_replacement (dep
, false);
4147 /* Go through and resolve forward dependencies. */
4148 for (sd_it
= sd_iterator_start (insn
, SD_LIST_FORW
);
4149 sd_iterator_cond (&sd_it
, &dep
);)
4151 rtx_insn
*next
= DEP_CON (dep
);
4152 bool cancelled
= (DEP_STATUS (dep
) & DEP_CANCELLED
) != 0;
4154 /* Resolve the dependence between INSN and NEXT.
4155 sd_resolve_dep () moves current dep to another list thus
4156 advancing the iterator. */
4157 sd_resolve_dep (sd_it
);
4161 if (must_restore_pattern_p (next
, dep
))
4162 restore_pattern (dep
, false);
4166 /* Don't bother trying to mark next as ready if insn is a debug
4167 insn. If insn is the last hard dependency, it will have
4168 already been discounted. */
4169 if (DEBUG_INSN_P (insn
) && !DEBUG_INSN_P (next
))
4172 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn
))
4176 effective_cost
= try_ready (next
);
4178 if (effective_cost
>= 0
4179 && SCHED_GROUP_P (next
)
4180 && advance
< effective_cost
)
4181 advance
= effective_cost
;
4184 /* Check always has only one forward dependence (to the first insn in
4185 the recovery block), therefore, this will be executed only once. */
4187 gcc_assert (sd_lists_empty_p (insn
, SD_LIST_FORW
));
4188 fix_recovery_deps (RECOVERY_BLOCK (insn
));
4192 /* Annotate the instruction with issue information -- TImode
4193 indicates that the instruction is expected not to be able
4194 to issue on the same cycle as the previous insn. A machine
4195 may use this information to decide how the instruction should
4198 && GET_CODE (PATTERN (insn
)) != USE
4199 && GET_CODE (PATTERN (insn
)) != CLOBBER
4200 && !DEBUG_INSN_P (insn
))
4202 if (reload_completed
)
4203 PUT_MODE (insn
, clock_var
> last_clock_var
? TImode
: VOIDmode
);
4204 last_clock_var
= clock_var
;
4207 if (nonscheduled_insns_begin
!= NULL_RTX
)
4208 /* Indicate to debug counters that INSN is scheduled. */
4209 nonscheduled_insns_begin
= insn
;
4214 /* Functions for handling of notes. */
4216 /* Add note list that ends on FROM_END to the end of TO_ENDP. */
4218 concat_note_lists (rtx_insn
*from_end
, rtx_insn
**to_endp
)
4220 rtx_insn
*from_start
;
4222 /* It's easy when have nothing to concat. */
4223 if (from_end
== NULL
)
4226 /* It's also easy when destination is empty. */
4227 if (*to_endp
== NULL
)
4229 *to_endp
= from_end
;
4233 from_start
= from_end
;
4234 while (PREV_INSN (from_start
) != NULL
)
4235 from_start
= PREV_INSN (from_start
);
4237 SET_PREV_INSN (from_start
) = *to_endp
;
4238 SET_NEXT_INSN (*to_endp
) = from_start
;
4239 *to_endp
= from_end
;
4242 /* Delete notes between HEAD and TAIL and put them in the chain
4243 of notes ended by NOTE_LIST. */
4245 remove_notes (rtx_insn
*head
, rtx_insn
*tail
)
4247 rtx_insn
*next_tail
, *insn
, *next
;
4250 if (head
== tail
&& !INSN_P (head
))
4253 next_tail
= NEXT_INSN (tail
);
4254 for (insn
= head
; insn
!= next_tail
; insn
= next
)
4256 next
= NEXT_INSN (insn
);
4260 switch (NOTE_KIND (insn
))
4262 case NOTE_INSN_BASIC_BLOCK
:
4265 case NOTE_INSN_EPILOGUE_BEG
:
4269 add_reg_note (next
, REG_SAVE_NOTE
,
4270 GEN_INT (NOTE_INSN_EPILOGUE_BEG
));
4278 /* Add the note to list that ends at NOTE_LIST. */
4279 SET_PREV_INSN (insn
) = note_list
;
4280 SET_NEXT_INSN (insn
) = NULL_RTX
;
4282 SET_NEXT_INSN (note_list
) = insn
;
4287 gcc_assert ((sel_sched_p () || insn
!= tail
) && insn
!= head
);
4291 /* A structure to record enough data to allow us to backtrack the scheduler to
4292 a previous state. */
4293 struct haifa_saved_data
4295 /* Next entry on the list. */
4296 struct haifa_saved_data
*next
;
4298 /* Backtracking is associated with scheduling insns that have delay slots.
4299 DELAY_PAIR points to the structure that contains the insns involved, and
4300 the number of cycles between them. */
4301 struct delay_pair
*delay_pair
;
4303 /* Data used by the frontend (e.g. sched-ebb or sched-rgn). */
4304 void *fe_saved_data
;
4305 /* Data used by the backend. */
4306 void *be_saved_data
;
4308 /* Copies of global state. */
4309 int clock_var
, last_clock_var
;
4310 struct ready_list ready
;
4313 rtx_insn
*last_scheduled_insn
;
4314 rtx last_nondebug_scheduled_insn
;
4315 rtx_insn
*nonscheduled_insns_begin
;
4316 int cycle_issued_insns
;
4318 /* Copies of state used in the inner loop of schedule_block. */
4319 struct sched_block_state sched_block
;
4321 /* We don't need to save q_ptr, as its value is arbitrary and we can set it
4322 to 0 when restoring. */
4324 rtx_insn_list
**insn_queue
;
4326 /* Describe pattern replacements that occurred since this backtrack point
4328 vec
<dep_t
> replacement_deps
;
4329 vec
<int> replace_apply
;
4331 /* A copy of the next-cycle replacement vectors at the time of the backtrack
4333 vec
<dep_t
> next_cycle_deps
;
4334 vec
<int> next_cycle_apply
;
4337 /* A record, in reverse order, of all scheduled insns which have delay slots
4338 and may require backtracking. */
4339 static struct haifa_saved_data
*backtrack_queue
;
4341 /* For every dependency of INSN, set the FEEDS_BACKTRACK_INSN bit according
4344 mark_backtrack_feeds (rtx insn
, int set_p
)
4346 sd_iterator_def sd_it
;
4348 FOR_EACH_DEP (insn
, SD_LIST_HARD_BACK
, sd_it
, dep
)
4350 FEEDS_BACKTRACK_INSN (DEP_PRO (dep
)) = set_p
;
4354 /* Save the current scheduler state so that we can backtrack to it
4355 later if necessary. PAIR gives the insns that make it necessary to
4356 save this point. SCHED_BLOCK is the local state of schedule_block
4357 that need to be saved. */
4359 save_backtrack_point (struct delay_pair
*pair
,
4360 struct sched_block_state sched_block
)
4363 struct haifa_saved_data
*save
= XNEW (struct haifa_saved_data
);
4365 save
->curr_state
= xmalloc (dfa_state_size
);
4366 memcpy (save
->curr_state
, curr_state
, dfa_state_size
);
4368 save
->ready
.first
= ready
.first
;
4369 save
->ready
.n_ready
= ready
.n_ready
;
4370 save
->ready
.n_debug
= ready
.n_debug
;
4371 save
->ready
.veclen
= ready
.veclen
;
4372 save
->ready
.vec
= XNEWVEC (rtx_insn
*, ready
.veclen
);
4373 memcpy (save
->ready
.vec
, ready
.vec
, ready
.veclen
* sizeof (rtx
));
4375 save
->insn_queue
= XNEWVEC (rtx_insn_list
*, max_insn_queue_index
+ 1);
4376 save
->q_size
= q_size
;
4377 for (i
= 0; i
<= max_insn_queue_index
; i
++)
4379 int q
= NEXT_Q_AFTER (q_ptr
, i
);
4380 save
->insn_queue
[i
] = copy_INSN_LIST (insn_queue
[q
]);
4383 save
->clock_var
= clock_var
;
4384 save
->last_clock_var
= last_clock_var
;
4385 save
->cycle_issued_insns
= cycle_issued_insns
;
4386 save
->last_scheduled_insn
= last_scheduled_insn
;
4387 save
->last_nondebug_scheduled_insn
= last_nondebug_scheduled_insn
;
4388 save
->nonscheduled_insns_begin
= nonscheduled_insns_begin
;
4390 save
->sched_block
= sched_block
;
4392 save
->replacement_deps
.create (0);
4393 save
->replace_apply
.create (0);
4394 save
->next_cycle_deps
= next_cycle_replace_deps
.copy ();
4395 save
->next_cycle_apply
= next_cycle_apply
.copy ();
4397 if (current_sched_info
->save_state
)
4398 save
->fe_saved_data
= (*current_sched_info
->save_state
) ();
4400 if (targetm
.sched
.alloc_sched_context
)
4402 save
->be_saved_data
= targetm
.sched
.alloc_sched_context ();
4403 targetm
.sched
.init_sched_context (save
->be_saved_data
, false);
4406 save
->be_saved_data
= NULL
;
4408 save
->delay_pair
= pair
;
4410 save
->next
= backtrack_queue
;
4411 backtrack_queue
= save
;
4415 mark_backtrack_feeds (pair
->i2
, 1);
4416 INSN_TICK (pair
->i2
) = INVALID_TICK
;
4417 INSN_EXACT_TICK (pair
->i2
) = clock_var
+ pair_delay (pair
);
4418 SHADOW_P (pair
->i2
) = pair
->stages
== 0;
4419 pair
= pair
->next_same_i1
;
4423 /* Walk the ready list and all queues. If any insns have unresolved backwards
4424 dependencies, these must be cancelled deps, broken by predication. Set or
4425 clear (depending on SET) the DEP_CANCELLED bit in DEP_STATUS. */
4428 toggle_cancelled_flags (bool set
)
4431 sd_iterator_def sd_it
;
4434 if (ready
.n_ready
> 0)
4436 rtx_insn
**first
= ready_lastpos (&ready
);
4437 for (i
= 0; i
< ready
.n_ready
; i
++)
4438 FOR_EACH_DEP (first
[i
], SD_LIST_BACK
, sd_it
, dep
)
4439 if (!DEBUG_INSN_P (DEP_PRO (dep
)))
4442 DEP_STATUS (dep
) |= DEP_CANCELLED
;
4444 DEP_STATUS (dep
) &= ~DEP_CANCELLED
;
4447 for (i
= 0; i
<= max_insn_queue_index
; i
++)
4449 int q
= NEXT_Q_AFTER (q_ptr
, i
);
4450 rtx_insn_list
*link
;
4451 for (link
= insn_queue
[q
]; link
; link
= link
->next ())
4453 rtx_insn
*insn
= link
->insn ();
4454 FOR_EACH_DEP (insn
, SD_LIST_BACK
, sd_it
, dep
)
4455 if (!DEBUG_INSN_P (DEP_PRO (dep
)))
4458 DEP_STATUS (dep
) |= DEP_CANCELLED
;
4460 DEP_STATUS (dep
) &= ~DEP_CANCELLED
;
4466 /* Undo the replacements that have occurred after backtrack point SAVE
4469 undo_replacements_for_backtrack (struct haifa_saved_data
*save
)
4471 while (!save
->replacement_deps
.is_empty ())
4473 dep_t dep
= save
->replacement_deps
.pop ();
4474 int apply_p
= save
->replace_apply
.pop ();
4477 restore_pattern (dep
, true);
4479 apply_replacement (dep
, true);
4481 save
->replacement_deps
.release ();
4482 save
->replace_apply
.release ();
4485 /* Pop entries from the SCHEDULED_INSNS vector up to and including INSN.
4486 Restore their dependencies to an unresolved state, and mark them as
4490 unschedule_insns_until (rtx insn
)
4492 auto_vec
<rtx_insn
*> recompute_vec
;
4494 /* Make two passes over the insns to be unscheduled. First, we clear out
4495 dependencies and other trivial bookkeeping. */
4499 sd_iterator_def sd_it
;
4502 last
= scheduled_insns
.pop ();
4504 /* This will be changed by restore_backtrack_point if the insn is in
4506 QUEUE_INDEX (last
) = QUEUE_NOWHERE
;
4508 INSN_TICK (last
) = INVALID_TICK
;
4510 if (modulo_ii
> 0 && INSN_UID (last
) < modulo_iter0_max_uid
)
4511 modulo_insns_scheduled
--;
4513 for (sd_it
= sd_iterator_start (last
, SD_LIST_RES_FORW
);
4514 sd_iterator_cond (&sd_it
, &dep
);)
4516 rtx_insn
*con
= DEP_CON (dep
);
4517 sd_unresolve_dep (sd_it
);
4518 if (!MUST_RECOMPUTE_SPEC_P (con
))
4520 MUST_RECOMPUTE_SPEC_P (con
) = 1;
4521 recompute_vec
.safe_push (con
);
4529 /* A second pass, to update ready and speculation status for insns
4530 depending on the unscheduled ones. The first pass must have
4531 popped the scheduled_insns vector up to the point where we
4532 restart scheduling, as recompute_todo_spec requires it to be
4534 while (!recompute_vec
.is_empty ())
4538 con
= recompute_vec
.pop ();
4539 MUST_RECOMPUTE_SPEC_P (con
) = 0;
4540 if (!sd_lists_empty_p (con
, SD_LIST_HARD_BACK
))
4542 TODO_SPEC (con
) = HARD_DEP
;
4543 INSN_TICK (con
) = INVALID_TICK
;
4544 if (PREDICATED_PAT (con
) != NULL_RTX
)
4545 haifa_change_pattern (con
, ORIG_PAT (con
));
4547 else if (QUEUE_INDEX (con
) != QUEUE_SCHEDULED
)
4548 TODO_SPEC (con
) = recompute_todo_spec (con
, true);
4552 /* Restore scheduler state from the topmost entry on the backtracking queue.
4553 PSCHED_BLOCK_P points to the local data of schedule_block that we must
4554 overwrite with the saved data.
4555 The caller must already have called unschedule_insns_until. */
4558 restore_last_backtrack_point (struct sched_block_state
*psched_block
)
4561 struct haifa_saved_data
*save
= backtrack_queue
;
4563 backtrack_queue
= save
->next
;
4565 if (current_sched_info
->restore_state
)
4566 (*current_sched_info
->restore_state
) (save
->fe_saved_data
);
4568 if (targetm
.sched
.alloc_sched_context
)
4570 targetm
.sched
.set_sched_context (save
->be_saved_data
);
4571 targetm
.sched
.free_sched_context (save
->be_saved_data
);
4574 /* Do this first since it clobbers INSN_TICK of the involved
4576 undo_replacements_for_backtrack (save
);
4578 /* Clear the QUEUE_INDEX of everything in the ready list or one
4580 if (ready
.n_ready
> 0)
4582 rtx_insn
**first
= ready_lastpos (&ready
);
4583 for (i
= 0; i
< ready
.n_ready
; i
++)
4585 rtx_insn
*insn
= first
[i
];
4586 QUEUE_INDEX (insn
) = QUEUE_NOWHERE
;
4587 INSN_TICK (insn
) = INVALID_TICK
;
4590 for (i
= 0; i
<= max_insn_queue_index
; i
++)
4592 int q
= NEXT_Q_AFTER (q_ptr
, i
);
4594 for (rtx_insn_list
*link
= insn_queue
[q
]; link
; link
= link
->next ())
4596 rtx_insn
*x
= link
->insn ();
4597 QUEUE_INDEX (x
) = QUEUE_NOWHERE
;
4598 INSN_TICK (x
) = INVALID_TICK
;
4600 free_INSN_LIST_list (&insn_queue
[q
]);
4604 ready
= save
->ready
;
4606 if (ready
.n_ready
> 0)
4608 rtx_insn
**first
= ready_lastpos (&ready
);
4609 for (i
= 0; i
< ready
.n_ready
; i
++)
4611 rtx_insn
*insn
= first
[i
];
4612 QUEUE_INDEX (insn
) = QUEUE_READY
;
4613 TODO_SPEC (insn
) = recompute_todo_spec (insn
, true);
4614 INSN_TICK (insn
) = save
->clock_var
;
4619 q_size
= save
->q_size
;
4620 for (i
= 0; i
<= max_insn_queue_index
; i
++)
4622 int q
= NEXT_Q_AFTER (q_ptr
, i
);
4624 insn_queue
[q
] = save
->insn_queue
[q
];
4626 for (rtx_insn_list
*link
= insn_queue
[q
]; link
; link
= link
->next ())
4628 rtx_insn
*x
= link
->insn ();
4629 QUEUE_INDEX (x
) = i
;
4630 TODO_SPEC (x
) = recompute_todo_spec (x
, true);
4631 INSN_TICK (x
) = save
->clock_var
+ i
;
4634 free (save
->insn_queue
);
4636 toggle_cancelled_flags (true);
4638 clock_var
= save
->clock_var
;
4639 last_clock_var
= save
->last_clock_var
;
4640 cycle_issued_insns
= save
->cycle_issued_insns
;
4641 last_scheduled_insn
= save
->last_scheduled_insn
;
4642 last_nondebug_scheduled_insn
= save
->last_nondebug_scheduled_insn
;
4643 nonscheduled_insns_begin
= save
->nonscheduled_insns_begin
;
4645 *psched_block
= save
->sched_block
;
4647 memcpy (curr_state
, save
->curr_state
, dfa_state_size
);
4648 free (save
->curr_state
);
4650 mark_backtrack_feeds (save
->delay_pair
->i2
, 0);
4652 gcc_assert (next_cycle_replace_deps
.is_empty ());
4653 next_cycle_replace_deps
= save
->next_cycle_deps
.copy ();
4654 next_cycle_apply
= save
->next_cycle_apply
.copy ();
4658 for (save
= backtrack_queue
; save
; save
= save
->next
)
4660 mark_backtrack_feeds (save
->delay_pair
->i2
, 1);
4664 /* Discard all data associated with the topmost entry in the backtrack
4665 queue. If RESET_TICK is false, we just want to free the data. If true,
4666 we are doing this because we discovered a reason to backtrack. In the
4667 latter case, also reset the INSN_TICK for the shadow insn. */
4669 free_topmost_backtrack_point (bool reset_tick
)
4671 struct haifa_saved_data
*save
= backtrack_queue
;
4674 backtrack_queue
= save
->next
;
4678 struct delay_pair
*pair
= save
->delay_pair
;
4681 INSN_TICK (pair
->i2
) = INVALID_TICK
;
4682 INSN_EXACT_TICK (pair
->i2
) = INVALID_TICK
;
4683 pair
= pair
->next_same_i1
;
4685 undo_replacements_for_backtrack (save
);
4689 save
->replacement_deps
.release ();
4690 save
->replace_apply
.release ();
4693 if (targetm
.sched
.free_sched_context
)
4694 targetm
.sched
.free_sched_context (save
->be_saved_data
);
4695 if (current_sched_info
->restore_state
)
4696 free (save
->fe_saved_data
);
4697 for (i
= 0; i
<= max_insn_queue_index
; i
++)
4698 free_INSN_LIST_list (&save
->insn_queue
[i
]);
4699 free (save
->insn_queue
);
4700 free (save
->curr_state
);
4701 free (save
->ready
.vec
);
4705 /* Free the entire backtrack queue. */
4707 free_backtrack_queue (void)
4709 while (backtrack_queue
)
4710 free_topmost_backtrack_point (false);
4713 /* Apply a replacement described by DESC. If IMMEDIATELY is false, we
4714 may have to postpone the replacement until the start of the next cycle,
4715 at which point we will be called again with IMMEDIATELY true. This is
4716 only done for machines which have instruction packets with explicit
4717 parallelism however. */
4719 apply_replacement (dep_t dep
, bool immediately
)
4721 struct dep_replacement
*desc
= DEP_REPLACE (dep
);
4722 if (!immediately
&& targetm
.sched
.exposed_pipeline
&& reload_completed
)
4724 next_cycle_replace_deps
.safe_push (dep
);
4725 next_cycle_apply
.safe_push (1);
4731 if (QUEUE_INDEX (desc
->insn
) == QUEUE_SCHEDULED
)
4734 if (sched_verbose
>= 5)
4735 fprintf (sched_dump
, "applying replacement for insn %d\n",
4736 INSN_UID (desc
->insn
));
4738 success
= validate_change (desc
->insn
, desc
->loc
, desc
->newval
, 0);
4739 gcc_assert (success
);
4741 update_insn_after_change (desc
->insn
);
4742 if ((TODO_SPEC (desc
->insn
) & (HARD_DEP
| DEP_POSTPONED
)) == 0)
4743 fix_tick_ready (desc
->insn
);
4745 if (backtrack_queue
!= NULL
)
4747 backtrack_queue
->replacement_deps
.safe_push (dep
);
4748 backtrack_queue
->replace_apply
.safe_push (1);
4753 /* We have determined that a pattern involved in DEP must be restored.
4754 If IMMEDIATELY is false, we may have to postpone the replacement
4755 until the start of the next cycle, at which point we will be called
4756 again with IMMEDIATELY true. */
4758 restore_pattern (dep_t dep
, bool immediately
)
4760 rtx_insn
*next
= DEP_CON (dep
);
4761 int tick
= INSN_TICK (next
);
4763 /* If we already scheduled the insn, the modified version is
4765 if (QUEUE_INDEX (next
) == QUEUE_SCHEDULED
)
4768 if (!immediately
&& targetm
.sched
.exposed_pipeline
&& reload_completed
)
4770 next_cycle_replace_deps
.safe_push (dep
);
4771 next_cycle_apply
.safe_push (0);
4776 if (DEP_TYPE (dep
) == REG_DEP_CONTROL
)
4778 if (sched_verbose
>= 5)
4779 fprintf (sched_dump
, "restoring pattern for insn %d\n",
4781 haifa_change_pattern (next
, ORIG_PAT (next
));
4785 struct dep_replacement
*desc
= DEP_REPLACE (dep
);
4788 if (sched_verbose
>= 5)
4789 fprintf (sched_dump
, "restoring pattern for insn %d\n",
4790 INSN_UID (desc
->insn
));
4791 tick
= INSN_TICK (desc
->insn
);
4793 success
= validate_change (desc
->insn
, desc
->loc
, desc
->orig
, 0);
4794 gcc_assert (success
);
4795 update_insn_after_change (desc
->insn
);
4796 if (backtrack_queue
!= NULL
)
4798 backtrack_queue
->replacement_deps
.safe_push (dep
);
4799 backtrack_queue
->replace_apply
.safe_push (0);
4802 INSN_TICK (next
) = tick
;
4803 if (TODO_SPEC (next
) == DEP_POSTPONED
)
4806 if (sd_lists_empty_p (next
, SD_LIST_BACK
))
4807 TODO_SPEC (next
) = 0;
4808 else if (!sd_lists_empty_p (next
, SD_LIST_HARD_BACK
))
4809 TODO_SPEC (next
) = HARD_DEP
;
4812 /* Perform pattern replacements that were queued up until the next
4815 perform_replacements_new_cycle (void)
4819 FOR_EACH_VEC_ELT (next_cycle_replace_deps
, i
, dep
)
4821 int apply_p
= next_cycle_apply
[i
];
4823 apply_replacement (dep
, true);
4825 restore_pattern (dep
, true);
4827 next_cycle_replace_deps
.truncate (0);
4828 next_cycle_apply
.truncate (0);
4831 /* Compute INSN_TICK_ESTIMATE for INSN. PROCESSED is a bitmap of
4832 instructions we've previously encountered, a set bit prevents
4833 recursion. BUDGET is a limit on how far ahead we look, it is
4834 reduced on recursive calls. Return true if we produced a good
4835 estimate, or false if we exceeded the budget. */
4837 estimate_insn_tick (bitmap processed
, rtx_insn
*insn
, int budget
)
4839 sd_iterator_def sd_it
;
4841 int earliest
= INSN_TICK (insn
);
4843 FOR_EACH_DEP (insn
, SD_LIST_BACK
, sd_it
, dep
)
4845 rtx_insn
*pro
= DEP_PRO (dep
);
4848 if (DEP_STATUS (dep
) & DEP_CANCELLED
)
4851 if (QUEUE_INDEX (pro
) == QUEUE_SCHEDULED
)
4852 gcc_assert (INSN_TICK (pro
) + dep_cost (dep
) <= INSN_TICK (insn
));
4855 int cost
= dep_cost (dep
);
4858 if (!bitmap_bit_p (processed
, INSN_LUID (pro
)))
4860 if (!estimate_insn_tick (processed
, pro
, budget
- cost
))
4863 gcc_assert (INSN_TICK_ESTIMATE (pro
) != INVALID_TICK
);
4864 t
= INSN_TICK_ESTIMATE (pro
) + cost
;
4865 if (earliest
== INVALID_TICK
|| t
> earliest
)
4869 bitmap_set_bit (processed
, INSN_LUID (insn
));
4870 INSN_TICK_ESTIMATE (insn
) = earliest
;
4874 /* Examine the pair of insns in P, and estimate (optimistically, assuming
4875 infinite resources) the cycle in which the delayed shadow can be issued.
4876 Return the number of cycles that must pass before the real insn can be
4877 issued in order to meet this constraint. */
4879 estimate_shadow_tick (struct delay_pair
*p
)
4881 bitmap_head processed
;
4884 bitmap_initialize (&processed
, 0);
4886 cutoff
= !estimate_insn_tick (&processed
, p
->i2
,
4887 max_insn_queue_index
+ pair_delay (p
));
4888 bitmap_clear (&processed
);
4890 return max_insn_queue_index
;
4891 t
= INSN_TICK_ESTIMATE (p
->i2
) - (clock_var
+ pair_delay (p
) + 1);
4897 /* If INSN has no unresolved backwards dependencies, add it to the schedule and
4898 recursively resolve all its forward dependencies. */
4900 resolve_dependencies (rtx_insn
*insn
)
4902 sd_iterator_def sd_it
;
4905 /* Don't use sd_lists_empty_p; it ignores debug insns. */
4906 if (DEPS_LIST_FIRST (INSN_HARD_BACK_DEPS (insn
)) != NULL
4907 || DEPS_LIST_FIRST (INSN_SPEC_BACK_DEPS (insn
)) != NULL
)
4910 if (sched_verbose
>= 4)
4911 fprintf (sched_dump
, ";;\tquickly resolving %d\n", INSN_UID (insn
));
4913 if (QUEUE_INDEX (insn
) >= 0)
4914 queue_remove (insn
);
4916 scheduled_insns
.safe_push (insn
);
4918 /* Update dependent instructions. */
4919 for (sd_it
= sd_iterator_start (insn
, SD_LIST_FORW
);
4920 sd_iterator_cond (&sd_it
, &dep
);)
4922 rtx_insn
*next
= DEP_CON (dep
);
4924 if (sched_verbose
>= 4)
4925 fprintf (sched_dump
, ";;\t\tdep %d against %d\n", INSN_UID (insn
),
4928 /* Resolve the dependence between INSN and NEXT.
4929 sd_resolve_dep () moves current dep to another list thus
4930 advancing the iterator. */
4931 sd_resolve_dep (sd_it
);
4933 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn
))
4935 resolve_dependencies (next
);
4938 /* Check always has only one forward dependence (to the first insn in
4939 the recovery block), therefore, this will be executed only once. */
4941 gcc_assert (sd_lists_empty_p (insn
, SD_LIST_FORW
));
4947 /* Return the head and tail pointers of ebb starting at BEG and ending
4950 get_ebb_head_tail (basic_block beg
, basic_block end
,
4951 rtx_insn
**headp
, rtx_insn
**tailp
)
4953 rtx_insn
*beg_head
= BB_HEAD (beg
);
4954 rtx_insn
* beg_tail
= BB_END (beg
);
4955 rtx_insn
* end_head
= BB_HEAD (end
);
4956 rtx_insn
* end_tail
= BB_END (end
);
4958 /* Don't include any notes or labels at the beginning of the BEG
4959 basic block, or notes at the end of the END basic blocks. */
4961 if (LABEL_P (beg_head
))
4962 beg_head
= NEXT_INSN (beg_head
);
4964 while (beg_head
!= beg_tail
)
4965 if (NOTE_P (beg_head
))
4966 beg_head
= NEXT_INSN (beg_head
);
4967 else if (DEBUG_INSN_P (beg_head
))
4969 rtx_insn
* note
, *next
;
4971 for (note
= NEXT_INSN (beg_head
);
4975 next
= NEXT_INSN (note
);
4978 if (sched_verbose
>= 9)
4979 fprintf (sched_dump
, "reorder %i\n", INSN_UID (note
));
4981 reorder_insns_nobb (note
, note
, PREV_INSN (beg_head
));
4983 if (BLOCK_FOR_INSN (note
) != beg
)
4984 df_insn_change_bb (note
, beg
);
4986 else if (!DEBUG_INSN_P (note
))
4998 end_head
= beg_head
;
4999 else if (LABEL_P (end_head
))
5000 end_head
= NEXT_INSN (end_head
);
5002 while (end_head
!= end_tail
)
5003 if (NOTE_P (end_tail
))
5004 end_tail
= PREV_INSN (end_tail
);
5005 else if (DEBUG_INSN_P (end_tail
))
5007 rtx_insn
* note
, *prev
;
5009 for (note
= PREV_INSN (end_tail
);
5013 prev
= PREV_INSN (note
);
5016 if (sched_verbose
>= 9)
5017 fprintf (sched_dump
, "reorder %i\n", INSN_UID (note
));
5019 reorder_insns_nobb (note
, note
, end_tail
);
5021 if (end_tail
== BB_END (end
))
5022 BB_END (end
) = note
;
5024 if (BLOCK_FOR_INSN (note
) != end
)
5025 df_insn_change_bb (note
, end
);
5027 else if (!DEBUG_INSN_P (note
))
5039 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
5042 no_real_insns_p (const rtx_insn
*head
, const rtx_insn
*tail
)
5044 while (head
!= NEXT_INSN (tail
))
5046 if (!NOTE_P (head
) && !LABEL_P (head
))
5048 head
= NEXT_INSN (head
);
5053 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
5054 previously found among the insns. Insert them just before HEAD. */
5056 restore_other_notes (rtx_insn
*head
, basic_block head_bb
)
5060 rtx_insn
*note_head
= note_list
;
5063 head_bb
= BLOCK_FOR_INSN (head
);
5065 head
= NEXT_INSN (bb_note (head_bb
));
5067 while (PREV_INSN (note_head
))
5069 set_block_for_insn (note_head
, head_bb
);
5070 note_head
= PREV_INSN (note_head
);
5072 /* In the above cycle we've missed this note. */
5073 set_block_for_insn (note_head
, head_bb
);
5075 SET_PREV_INSN (note_head
) = PREV_INSN (head
);
5076 SET_NEXT_INSN (PREV_INSN (head
)) = note_head
;
5077 SET_PREV_INSN (head
) = note_list
;
5078 SET_NEXT_INSN (note_list
) = head
;
5080 if (BLOCK_FOR_INSN (head
) != head_bb
)
5081 BB_END (head_bb
) = note_list
;
5089 /* When we know we are going to discard the schedule due to a failed attempt
5090 at modulo scheduling, undo all replacements. */
5092 undo_all_replacements (void)
5097 FOR_EACH_VEC_ELT (scheduled_insns
, i
, insn
)
5099 sd_iterator_def sd_it
;
5102 /* See if we must undo a replacement. */
5103 for (sd_it
= sd_iterator_start (insn
, SD_LIST_RES_FORW
);
5104 sd_iterator_cond (&sd_it
, &dep
); sd_iterator_next (&sd_it
))
5106 struct dep_replacement
*desc
= DEP_REPLACE (dep
);
5108 validate_change (desc
->insn
, desc
->loc
, desc
->orig
, 0);
5113 /* Return first non-scheduled insn in the current scheduling block.
5114 This is mostly used for debug-counter purposes. */
5116 first_nonscheduled_insn (void)
5118 rtx_insn
*insn
= (nonscheduled_insns_begin
!= NULL_RTX
5119 ? nonscheduled_insns_begin
5120 : current_sched_info
->prev_head
);
5124 insn
= next_nonnote_nondebug_insn (insn
);
5126 while (QUEUE_INDEX (insn
) == QUEUE_SCHEDULED
);
5131 /* Move insns that became ready to fire from queue to ready list. */
5134 queue_to_ready (struct ready_list
*ready
)
5137 rtx_insn_list
*link
;
5140 q_ptr
= NEXT_Q (q_ptr
);
5142 if (dbg_cnt (sched_insn
) == false)
5143 /* If debug counter is activated do not requeue the first
5144 nonscheduled insn. */
5145 skip_insn
= first_nonscheduled_insn ();
5147 skip_insn
= NULL_RTX
;
5149 /* Add all pending insns that can be scheduled without stalls to the
5151 for (link
= insn_queue
[q_ptr
]; link
; link
= link
->next ())
5153 insn
= link
->insn ();
5156 if (sched_verbose
>= 2)
5157 fprintf (sched_dump
, ";;\t\tQ-->Ready: insn %s: ",
5158 (*current_sched_info
->print_insn
) (insn
, 0));
5160 /* If the ready list is full, delay the insn for 1 cycle.
5161 See the comment in schedule_block for the rationale. */
5162 if (!reload_completed
5163 && (ready
->n_ready
- ready
->n_debug
> MAX_SCHED_READY_INSNS
5164 || (sched_pressure
== SCHED_PRESSURE_MODEL
5165 /* Limit pressure recalculations to MAX_SCHED_READY_INSNS
5166 instructions too. */
5167 && model_index (insn
) > (model_curr_point
5168 + MAX_SCHED_READY_INSNS
)))
5169 && !(sched_pressure
== SCHED_PRESSURE_MODEL
5170 && model_curr_point
< model_num_insns
5171 /* Always allow the next model instruction to issue. */
5172 && model_index (insn
) == model_curr_point
)
5173 && !SCHED_GROUP_P (insn
)
5174 && insn
!= skip_insn
)
5176 if (sched_verbose
>= 2)
5177 fprintf (sched_dump
, "keeping in queue, ready full\n");
5178 queue_insn (insn
, 1, "ready full");
5182 ready_add (ready
, insn
, false);
5183 if (sched_verbose
>= 2)
5184 fprintf (sched_dump
, "moving to ready without stalls\n");
5187 free_INSN_LIST_list (&insn_queue
[q_ptr
]);
5189 /* If there are no ready insns, stall until one is ready and add all
5190 of the pending insns at that point to the ready list. */
5191 if (ready
->n_ready
== 0)
5195 for (stalls
= 1; stalls
<= max_insn_queue_index
; stalls
++)
5197 if ((link
= insn_queue
[NEXT_Q_AFTER (q_ptr
, stalls
)]))
5199 for (; link
; link
= link
->next ())
5201 insn
= link
->insn ();
5204 if (sched_verbose
>= 2)
5205 fprintf (sched_dump
, ";;\t\tQ-->Ready: insn %s: ",
5206 (*current_sched_info
->print_insn
) (insn
, 0));
5208 ready_add (ready
, insn
, false);
5209 if (sched_verbose
>= 2)
5210 fprintf (sched_dump
, "moving to ready with %d stalls\n", stalls
);
5212 free_INSN_LIST_list (&insn_queue
[NEXT_Q_AFTER (q_ptr
, stalls
)]);
5214 advance_one_cycle ();
5219 advance_one_cycle ();
5222 q_ptr
= NEXT_Q_AFTER (q_ptr
, stalls
);
5223 clock_var
+= stalls
;
5224 if (sched_verbose
>= 2)
5225 fprintf (sched_dump
, ";;\tAdvancing clock by %d cycle[s] to %d\n",
5230 /* Used by early_queue_to_ready. Determines whether it is "ok" to
5231 prematurely move INSN from the queue to the ready list. Currently,
5232 if a target defines the hook 'is_costly_dependence', this function
5233 uses the hook to check whether there exist any dependences which are
5234 considered costly by the target, between INSN and other insns that
5235 have already been scheduled. Dependences are checked up to Y cycles
5236 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
5237 controlling this value.
5238 (Other considerations could be taken into account instead (or in
5239 addition) depending on user flags and target hooks. */
5242 ok_for_early_queue_removal (rtx insn
)
5244 if (targetm
.sched
.is_costly_dependence
)
5248 int i
= scheduled_insns
.length ();
5249 for (n_cycles
= flag_sched_stalled_insns_dep
; n_cycles
; n_cycles
--)
5255 prev_insn
= scheduled_insns
[i
];
5257 if (!NOTE_P (prev_insn
))
5261 dep
= sd_find_dep_between (prev_insn
, insn
, true);
5265 cost
= dep_cost (dep
);
5267 if (targetm
.sched
.is_costly_dependence (dep
, cost
,
5268 flag_sched_stalled_insns_dep
- n_cycles
))
5273 if (GET_MODE (prev_insn
) == TImode
) /* end of dispatch group */
5286 /* Remove insns from the queue, before they become "ready" with respect
5287 to FU latency considerations. */
5290 early_queue_to_ready (state_t state
, struct ready_list
*ready
)
5293 rtx_insn_list
*link
;
5294 rtx_insn_list
*next_link
;
5295 rtx_insn_list
*prev_link
;
5298 state_t temp_state
= alloca (dfa_state_size
);
5300 int insns_removed
= 0;
5303 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
5306 X == 0: There is no limit on how many queued insns can be removed
5307 prematurely. (flag_sched_stalled_insns = -1).
5309 X >= 1: Only X queued insns can be removed prematurely in each
5310 invocation. (flag_sched_stalled_insns = X).
5312 Otherwise: Early queue removal is disabled.
5313 (flag_sched_stalled_insns = 0)
5316 if (! flag_sched_stalled_insns
)
5319 for (stalls
= 0; stalls
<= max_insn_queue_index
; stalls
++)
5321 if ((link
= insn_queue
[NEXT_Q_AFTER (q_ptr
, stalls
)]))
5323 if (sched_verbose
> 6)
5324 fprintf (sched_dump
, ";; look at index %d + %d\n", q_ptr
, stalls
);
5329 next_link
= link
->next ();
5330 insn
= link
->insn ();
5331 if (insn
&& sched_verbose
> 6)
5332 print_rtl_single (sched_dump
, insn
);
5334 memcpy (temp_state
, state
, dfa_state_size
);
5335 if (recog_memoized (insn
) < 0)
5336 /* non-negative to indicate that it's not ready
5337 to avoid infinite Q->R->Q->R... */
5340 cost
= state_transition (temp_state
, insn
);
5342 if (sched_verbose
>= 6)
5343 fprintf (sched_dump
, "transition cost = %d\n", cost
);
5345 move_to_ready
= false;
5348 move_to_ready
= ok_for_early_queue_removal (insn
);
5349 if (move_to_ready
== true)
5351 /* move from Q to R */
5353 ready_add (ready
, insn
, false);
5356 XEXP (prev_link
, 1) = next_link
;
5358 insn_queue
[NEXT_Q_AFTER (q_ptr
, stalls
)] = next_link
;
5360 free_INSN_LIST_node (link
);
5362 if (sched_verbose
>= 2)
5363 fprintf (sched_dump
, ";;\t\tEarly Q-->Ready: insn %s\n",
5364 (*current_sched_info
->print_insn
) (insn
, 0));
5367 if (insns_removed
== flag_sched_stalled_insns
)
5368 /* Remove no more than flag_sched_stalled_insns insns
5369 from Q at a time. */
5370 return insns_removed
;
5374 if (move_to_ready
== false)
5381 } /* for stalls.. */
5383 return insns_removed
;
5387 /* Print the ready list for debugging purposes.
5388 If READY_TRY is non-zero then only print insns that max_issue
5391 debug_ready_list_1 (struct ready_list
*ready
, signed char *ready_try
)
5396 if (ready
->n_ready
== 0)
5398 fprintf (sched_dump
, "\n");
5402 p
= ready_lastpos (ready
);
5403 for (i
= 0; i
< ready
->n_ready
; i
++)
5405 if (ready_try
!= NULL
&& ready_try
[ready
->n_ready
- i
- 1])
5408 fprintf (sched_dump
, " %s:%d",
5409 (*current_sched_info
->print_insn
) (p
[i
], 0),
5411 if (sched_pressure
!= SCHED_PRESSURE_NONE
)
5412 fprintf (sched_dump
, "(cost=%d",
5413 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (p
[i
]));
5414 fprintf (sched_dump
, ":prio=%d", INSN_PRIORITY (p
[i
]));
5415 if (INSN_TICK (p
[i
]) > clock_var
)
5416 fprintf (sched_dump
, ":delay=%d", INSN_TICK (p
[i
]) - clock_var
);
5417 if (sched_pressure
== SCHED_PRESSURE_MODEL
)
5418 fprintf (sched_dump
, ":idx=%d",
5419 model_index (p
[i
]));
5420 if (sched_pressure
!= SCHED_PRESSURE_NONE
)
5421 fprintf (sched_dump
, ")");
5423 fprintf (sched_dump
, "\n");
5426 /* Print the ready list. Callable from debugger. */
5428 debug_ready_list (struct ready_list
*ready
)
5430 debug_ready_list_1 (ready
, NULL
);
5433 /* Search INSN for REG_SAVE_NOTE notes and convert them back into insn
5434 NOTEs. This is used for NOTE_INSN_EPILOGUE_BEG, so that sched-ebb
5435 replaces the epilogue note in the correct basic block. */
5437 reemit_notes (rtx_insn
*insn
)
5440 rtx_insn
*last
= insn
;
5442 for (note
= REG_NOTES (insn
); note
; note
= XEXP (note
, 1))
5444 if (REG_NOTE_KIND (note
) == REG_SAVE_NOTE
)
5446 enum insn_note note_type
= (enum insn_note
) INTVAL (XEXP (note
, 0));
5448 last
= emit_note_before (note_type
, last
);
5449 remove_note (insn
, note
);
5454 /* Move INSN. Reemit notes if needed. Update CFG, if needed. */
5456 move_insn (rtx_insn
*insn
, rtx_insn
*last
, rtx nt
)
5458 if (PREV_INSN (insn
) != last
)
5464 bb
= BLOCK_FOR_INSN (insn
);
5466 /* BB_HEAD is either LABEL or NOTE. */
5467 gcc_assert (BB_HEAD (bb
) != insn
);
5469 if (BB_END (bb
) == insn
)
5470 /* If this is last instruction in BB, move end marker one
5473 /* Jumps are always placed at the end of basic block. */
5474 jump_p
= control_flow_insn_p (insn
);
5477 || ((common_sched_info
->sched_pass_id
== SCHED_RGN_PASS
)
5478 && IS_SPECULATION_BRANCHY_CHECK_P (insn
))
5479 || (common_sched_info
->sched_pass_id
5480 == SCHED_EBB_PASS
));
5482 gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn
)) == bb
);
5484 BB_END (bb
) = PREV_INSN (insn
);
5487 gcc_assert (BB_END (bb
) != last
);
5490 /* We move the block note along with jump. */
5494 note
= NEXT_INSN (insn
);
5495 while (NOTE_NOT_BB_P (note
) && note
!= nt
)
5496 note
= NEXT_INSN (note
);
5500 || BARRIER_P (note
)))
5501 note
= NEXT_INSN (note
);
5503 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note
));
5508 SET_NEXT_INSN (PREV_INSN (insn
)) = NEXT_INSN (note
);
5509 SET_PREV_INSN (NEXT_INSN (note
)) = PREV_INSN (insn
);
5511 SET_NEXT_INSN (note
) = NEXT_INSN (last
);
5512 SET_PREV_INSN (NEXT_INSN (last
)) = note
;
5514 SET_NEXT_INSN (last
) = insn
;
5515 SET_PREV_INSN (insn
) = last
;
5517 bb
= BLOCK_FOR_INSN (last
);
5521 fix_jump_move (insn
);
5523 if (BLOCK_FOR_INSN (insn
) != bb
)
5524 move_block_after_check (insn
);
5526 gcc_assert (BB_END (bb
) == last
);
5529 df_insn_change_bb (insn
, bb
);
5531 /* Update BB_END, if needed. */
5532 if (BB_END (bb
) == last
)
5536 SCHED_GROUP_P (insn
) = 0;
5539 /* Return true if scheduling INSN will finish current clock cycle. */
5541 insn_finishes_cycle_p (rtx_insn
*insn
)
5543 if (SCHED_GROUP_P (insn
))
5544 /* After issuing INSN, rest of the sched_group will be forced to issue
5545 in order. Don't make any plans for the rest of cycle. */
5548 /* Finishing the block will, apparently, finish the cycle. */
5549 if (current_sched_info
->insn_finishes_block_p
5550 && current_sched_info
->insn_finishes_block_p (insn
))
5556 /* Functions to model cache auto-prefetcher.
5558 Some of the CPUs have cache auto-prefetcher, which /seems/ to initiate
5559 memory prefetches if it sees instructions with consequitive memory accesses
5560 in the instruction stream. Details of such hardware units are not published,
5561 so we can only guess what exactly is going on there.
5562 In the scheduler, we model abstract auto-prefetcher. If there are memory
5563 insns in the ready list (or the queue) that have same memory base, but
5564 different offsets, then we delay the insns with larger offsets until insns
5565 with smaller offsets get scheduled. If PARAM_SCHED_AUTOPREF_QUEUE_DEPTH
5566 is "1", then we look at the ready list; if it is N>1, then we also look
5567 through N-1 queue entries.
5568 If the param is N>=0, then rank_for_schedule will consider auto-prefetching
5569 among its heuristics.
5570 Param value of "-1" disables modelling of the auto-prefetcher. */
5572 /* Initialize autoprefetcher model data for INSN. */
5574 autopref_multipass_init (const rtx_insn
*insn
, int write
)
5576 autopref_multipass_data_t data
= &INSN_AUTOPREF_MULTIPASS_DATA (insn
)[write
];
5578 gcc_assert (data
->status
== AUTOPREF_MULTIPASS_DATA_UNINITIALIZED
);
5579 data
->base
= NULL_RTX
;
5581 /* Set insn entry initialized, but not relevant for auto-prefetcher. */
5582 data
->status
= AUTOPREF_MULTIPASS_DATA_IRRELEVANT
;
5584 rtx set
= single_set (insn
);
5585 if (set
== NULL_RTX
)
5588 rtx mem
= write
? SET_DEST (set
) : SET_SRC (set
);
5592 struct address_info info
;
5593 decompose_mem_address (&info
, mem
);
5595 /* TODO: Currently only (base+const) addressing is supported. */
5596 if (info
.base
== NULL
|| !REG_P (*info
.base
)
5597 || (info
.disp
!= NULL
&& !CONST_INT_P (*info
.disp
)))
5600 /* This insn is relevant for auto-prefetcher. */
5601 data
->base
= *info
.base
;
5602 data
->offset
= info
.disp
? INTVAL (*info
.disp
) : 0;
5603 data
->status
= AUTOPREF_MULTIPASS_DATA_NORMAL
;
5606 /* Helper function for rank_for_schedule sorting. */
5608 autopref_rank_for_schedule (const rtx_insn
*insn1
, const rtx_insn
*insn2
)
5610 for (int write
= 0; write
< 2; ++write
)
5612 autopref_multipass_data_t data1
5613 = &INSN_AUTOPREF_MULTIPASS_DATA (insn1
)[write
];
5614 autopref_multipass_data_t data2
5615 = &INSN_AUTOPREF_MULTIPASS_DATA (insn2
)[write
];
5617 if (data1
->status
== AUTOPREF_MULTIPASS_DATA_UNINITIALIZED
)
5618 autopref_multipass_init (insn1
, write
);
5619 if (data1
->status
== AUTOPREF_MULTIPASS_DATA_IRRELEVANT
)
5622 if (data2
->status
== AUTOPREF_MULTIPASS_DATA_UNINITIALIZED
)
5623 autopref_multipass_init (insn2
, write
);
5624 if (data2
->status
== AUTOPREF_MULTIPASS_DATA_IRRELEVANT
)
5627 if (!rtx_equal_p (data1
->base
, data2
->base
))
5630 return data1
->offset
- data2
->offset
;
5636 /* True if header of debug dump was printed. */
5637 static bool autopref_multipass_dfa_lookahead_guard_started_dump_p
;
5639 /* Helper for autopref_multipass_dfa_lookahead_guard.
5640 Return "1" if INSN1 should be delayed in favor of INSN2. */
5642 autopref_multipass_dfa_lookahead_guard_1 (const rtx_insn
*insn1
,
5643 const rtx_insn
*insn2
, int write
)
5645 autopref_multipass_data_t data1
5646 = &INSN_AUTOPREF_MULTIPASS_DATA (insn1
)[write
];
5647 autopref_multipass_data_t data2
5648 = &INSN_AUTOPREF_MULTIPASS_DATA (insn2
)[write
];
5650 if (data2
->status
== AUTOPREF_MULTIPASS_DATA_UNINITIALIZED
)
5651 autopref_multipass_init (insn2
, write
);
5652 if (data2
->status
== AUTOPREF_MULTIPASS_DATA_IRRELEVANT
)
5655 if (rtx_equal_p (data1
->base
, data2
->base
)
5656 && data1
->offset
> data2
->offset
)
5658 if (sched_verbose
>= 2)
5660 if (!autopref_multipass_dfa_lookahead_guard_started_dump_p
)
5662 fprintf (sched_dump
,
5663 ";;\t\tnot trying in max_issue due to autoprefetch "
5665 autopref_multipass_dfa_lookahead_guard_started_dump_p
= true;
5668 fprintf (sched_dump
, " %d(%d)", INSN_UID (insn1
), INSN_UID (insn2
));
5679 We could have also hooked autoprefetcher model into
5680 first_cycle_multipass_backtrack / first_cycle_multipass_issue hooks
5681 to enable intelligent selection of "[r1+0]=r2; [r1+4]=r3" on the same cycle
5682 (e.g., once "[r1+0]=r2" is issued in max_issue(), "[r1+4]=r3" gets
5683 unblocked). We don't bother about this yet because target of interest
5684 (ARM Cortex-A15) can issue only 1 memory operation per cycle. */
5686 /* Implementation of first_cycle_multipass_dfa_lookahead_guard hook.
5687 Return "1" if INSN1 should not be considered in max_issue due to
5688 auto-prefetcher considerations. */
5690 autopref_multipass_dfa_lookahead_guard (rtx_insn
*insn1
, int ready_index
)
5694 if (PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH
) <= 0)
5697 if (sched_verbose
>= 2 && ready_index
== 0)
5698 autopref_multipass_dfa_lookahead_guard_started_dump_p
= false;
5700 for (int write
= 0; write
< 2; ++write
)
5702 autopref_multipass_data_t data1
5703 = &INSN_AUTOPREF_MULTIPASS_DATA (insn1
)[write
];
5705 if (data1
->status
== AUTOPREF_MULTIPASS_DATA_UNINITIALIZED
)
5706 autopref_multipass_init (insn1
, write
);
5707 if (data1
->status
== AUTOPREF_MULTIPASS_DATA_IRRELEVANT
)
5710 if (ready_index
== 0
5711 && data1
->status
== AUTOPREF_MULTIPASS_DATA_DONT_DELAY
)
5712 /* We allow only a single delay on priviledged instructions.
5713 Doing otherwise would cause infinite loop. */
5715 if (sched_verbose
>= 2)
5717 if (!autopref_multipass_dfa_lookahead_guard_started_dump_p
)
5719 fprintf (sched_dump
,
5720 ";;\t\tnot trying in max_issue due to autoprefetch "
5722 autopref_multipass_dfa_lookahead_guard_started_dump_p
= true;
5725 fprintf (sched_dump
, " *%d*", INSN_UID (insn1
));
5730 for (int i2
= 0; i2
< ready
.n_ready
; ++i2
)
5732 rtx_insn
*insn2
= get_ready_element (i2
);
5735 r
= autopref_multipass_dfa_lookahead_guard_1 (insn1
, insn2
, write
);
5738 if (ready_index
== 0)
5741 data1
->status
= AUTOPREF_MULTIPASS_DATA_DONT_DELAY
;
5747 if (PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH
) == 1)
5750 /* Everything from the current queue slot should have been moved to
5752 gcc_assert (insn_queue
[NEXT_Q_AFTER (q_ptr
, 0)] == NULL_RTX
);
5754 int n_stalls
= PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH
) - 1;
5755 if (n_stalls
> max_insn_queue_index
)
5756 n_stalls
= max_insn_queue_index
;
5758 for (int stalls
= 1; stalls
<= n_stalls
; ++stalls
)
5760 for (rtx_insn_list
*link
= insn_queue
[NEXT_Q_AFTER (q_ptr
, stalls
)];
5762 link
= link
->next ())
5764 rtx_insn
*insn2
= link
->insn ();
5765 r
= autopref_multipass_dfa_lookahead_guard_1 (insn1
, insn2
,
5769 /* Queue INSN1 until INSN2 can issue. */
5771 if (ready_index
== 0)
5772 data1
->status
= AUTOPREF_MULTIPASS_DATA_DONT_DELAY
;
5780 if (sched_verbose
>= 2
5781 && autopref_multipass_dfa_lookahead_guard_started_dump_p
5782 && (ready_index
== ready
.n_ready
- 1 || r
< 0))
5783 /* This does not /always/ trigger. We don't output EOL if the last
5784 insn is not recognized (INSN_CODE < 0) and lookahead_guard is not
5785 called. We can live with this. */
5786 fprintf (sched_dump
, "\n");
5791 /* Define type for target data used in multipass scheduling. */
5792 #ifndef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T
5793 # define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T int
5795 typedef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T first_cycle_multipass_data_t
;
5797 /* The following structure describe an entry of the stack of choices. */
5800 /* Ordinal number of the issued insn in the ready queue. */
5802 /* The number of the rest insns whose issues we should try. */
5804 /* The number of issued essential insns. */
5806 /* State after issuing the insn. */
5808 /* Target-specific data. */
5809 first_cycle_multipass_data_t target_data
;
5812 /* The following array is used to implement a stack of choices used in
5813 function max_issue. */
5814 static struct choice_entry
*choice_stack
;
5816 /* This holds the value of the target dfa_lookahead hook. */
5819 /* The following variable value is maximal number of tries of issuing
5820 insns for the first cycle multipass insn scheduling. We define
5821 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
5822 need this constraint if all real insns (with non-negative codes)
5823 had reservations because in this case the algorithm complexity is
5824 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
5825 might be incomplete and such insn might occur. For such
5826 descriptions, the complexity of algorithm (without the constraint)
5827 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
5828 static int max_lookahead_tries
;
5830 /* The following function returns maximal (or close to maximal) number
5831 of insns which can be issued on the same cycle and one of which
5832 insns is insns with the best rank (the first insn in READY). To
5833 make this function tries different samples of ready insns. READY
5834 is current queue `ready'. Global array READY_TRY reflects what
5835 insns are already issued in this try. The function stops immediately,
5836 if it reached the such a solution, that all instruction can be issued.
5837 INDEX will contain index of the best insn in READY. The following
5838 function is used only for first cycle multipass scheduling.
5842 This function expects recognized insns only. All USEs,
5843 CLOBBERs, etc must be filtered elsewhere. */
5845 max_issue (struct ready_list
*ready
, int privileged_n
, state_t state
,
5846 bool first_cycle_insn_p
, int *index
)
5848 int n
, i
, all
, n_ready
, best
, delay
, tries_num
;
5850 struct choice_entry
*top
;
5856 n_ready
= ready
->n_ready
;
5857 gcc_assert (dfa_lookahead
>= 1 && privileged_n
>= 0
5858 && privileged_n
<= n_ready
);
5860 /* Init MAX_LOOKAHEAD_TRIES. */
5861 if (max_lookahead_tries
== 0)
5863 max_lookahead_tries
= 100;
5864 for (i
= 0; i
< issue_rate
; i
++)
5865 max_lookahead_tries
*= dfa_lookahead
;
5868 /* Init max_points. */
5869 more_issue
= issue_rate
- cycle_issued_insns
;
5870 gcc_assert (more_issue
>= 0);
5872 /* The number of the issued insns in the best solution. */
5877 /* Set initial state of the search. */
5878 memcpy (top
->state
, state
, dfa_state_size
);
5879 top
->rest
= dfa_lookahead
;
5881 if (targetm
.sched
.first_cycle_multipass_begin
)
5882 targetm
.sched
.first_cycle_multipass_begin (&top
->target_data
,
5884 first_cycle_insn_p
);
5886 /* Count the number of the insns to search among. */
5887 for (all
= i
= 0; i
< n_ready
; i
++)
5891 if (sched_verbose
>= 2)
5893 fprintf (sched_dump
, ";;\t\tmax_issue among %d insns:", all
);
5894 debug_ready_list_1 (ready
, ready_try
);
5897 /* I is the index of the insn to try next. */
5902 if (/* If we've reached a dead end or searched enough of what we have
5905 /* or have nothing else to try... */
5907 /* or should not issue more. */
5908 || top
->n
>= more_issue
)
5910 /* ??? (... || i == n_ready). */
5911 gcc_assert (i
<= n_ready
);
5913 /* We should not issue more than issue_rate instructions. */
5914 gcc_assert (top
->n
<= more_issue
);
5916 if (top
== choice_stack
)
5919 if (best
< top
- choice_stack
)
5924 /* Try to find issued privileged insn. */
5925 while (n
&& !ready_try
[--n
])
5929 if (/* If all insns are equally good... */
5931 /* Or a privileged insn will be issued. */
5933 /* Then we have a solution. */
5935 best
= top
- choice_stack
;
5936 /* This is the index of the insn issued first in this
5938 *index
= choice_stack
[1].index
;
5939 if (top
->n
== more_issue
|| best
== all
)
5944 /* Set ready-list index to point to the last insn
5945 ('i++' below will advance it to the next insn). */
5951 if (targetm
.sched
.first_cycle_multipass_backtrack
)
5952 targetm
.sched
.first_cycle_multipass_backtrack (&top
->target_data
,
5953 ready_try
, n_ready
);
5956 memcpy (state
, top
->state
, dfa_state_size
);
5958 else if (!ready_try
[i
])
5961 if (tries_num
> max_lookahead_tries
)
5963 insn
= ready_element (ready
, i
);
5964 delay
= state_transition (state
, insn
);
5967 if (state_dead_lock_p (state
)
5968 || insn_finishes_cycle_p (insn
))
5969 /* We won't issue any more instructions in the next
5976 if (memcmp (top
->state
, state
, dfa_state_size
) != 0)
5979 /* Advance to the next choice_entry. */
5981 /* Initialize it. */
5982 top
->rest
= dfa_lookahead
;
5985 memcpy (top
->state
, state
, dfa_state_size
);
5988 if (targetm
.sched
.first_cycle_multipass_issue
)
5989 targetm
.sched
.first_cycle_multipass_issue (&top
->target_data
,
5999 /* Increase ready-list index. */
6003 if (targetm
.sched
.first_cycle_multipass_end
)
6004 targetm
.sched
.first_cycle_multipass_end (best
!= 0
6005 ? &choice_stack
[1].target_data
6008 /* Restore the original state of the DFA. */
6009 memcpy (state
, choice_stack
->state
, dfa_state_size
);
6014 /* The following function chooses insn from READY and modifies
6015 READY. The following function is used only for first
6016 cycle multipass scheduling.
6018 -1 if cycle should be advanced,
6019 0 if INSN_PTR is set to point to the desirable insn,
6020 1 if choose_ready () should be restarted without advancing the cycle. */
6022 choose_ready (struct ready_list
*ready
, bool first_cycle_insn_p
,
6023 rtx_insn
**insn_ptr
)
6025 if (dbg_cnt (sched_insn
) == false)
6027 if (nonscheduled_insns_begin
== NULL_RTX
)
6028 nonscheduled_insns_begin
= current_sched_info
->prev_head
;
6030 rtx_insn
*insn
= first_nonscheduled_insn ();
6032 if (QUEUE_INDEX (insn
) == QUEUE_READY
)
6033 /* INSN is in the ready_list. */
6035 ready_remove_insn (insn
);
6040 /* INSN is in the queue. Advance cycle to move it to the ready list. */
6041 gcc_assert (QUEUE_INDEX (insn
) >= 0);
6045 if (dfa_lookahead
<= 0 || SCHED_GROUP_P (ready_element (ready
, 0))
6046 || DEBUG_INSN_P (ready_element (ready
, 0)))
6048 if (targetm
.sched
.dispatch (NULL
, IS_DISPATCH_ON
))
6049 *insn_ptr
= ready_remove_first_dispatch (ready
);
6051 *insn_ptr
= ready_remove_first (ready
);
6057 /* Try to choose the best insn. */
6061 insn
= ready_element (ready
, 0);
6062 if (INSN_CODE (insn
) < 0)
6064 *insn_ptr
= ready_remove_first (ready
);
6068 /* Filter the search space. */
6069 for (i
= 0; i
< ready
->n_ready
; i
++)
6073 insn
= ready_element (ready
, i
);
6075 /* If this insn is recognizable we should have already
6076 recognized it earlier.
6077 ??? Not very clear where this is supposed to be done.
6079 gcc_checking_assert (INSN_CODE (insn
) >= 0
6080 || recog_memoized (insn
) < 0);
6081 if (INSN_CODE (insn
) < 0)
6083 /* Non-recognized insns at position 0 are handled above. */
6089 if (targetm
.sched
.first_cycle_multipass_dfa_lookahead_guard
)
6092 = (targetm
.sched
.first_cycle_multipass_dfa_lookahead_guard
6095 if (ready_try
[i
] < 0)
6096 /* Queue instruction for several cycles.
6097 We need to restart choose_ready as we have changed
6100 change_queue_index (insn
, -ready_try
[i
]);
6104 /* Make sure that we didn't end up with 0'th insn filtered out.
6105 Don't be tempted to make life easier for backends and just
6106 requeue 0'th insn if (ready_try[0] == 0) and restart
6107 choose_ready. Backends should be very considerate about
6108 requeueing instructions -- especially the highest priority
6109 one at position 0. */
6110 gcc_assert (ready_try
[i
] == 0 || i
> 0);
6115 gcc_assert (ready_try
[i
] == 0);
6116 /* INSN made it through the scrutiny of filters! */
6119 if (max_issue (ready
, 1, curr_state
, first_cycle_insn_p
, &index
) == 0)
6121 *insn_ptr
= ready_remove_first (ready
);
6122 if (sched_verbose
>= 4)
6123 fprintf (sched_dump
, ";;\t\tChosen insn (but can't issue) : %s \n",
6124 (*current_sched_info
->print_insn
) (*insn_ptr
, 0));
6129 if (sched_verbose
>= 4)
6130 fprintf (sched_dump
, ";;\t\tChosen insn : %s\n",
6131 (*current_sched_info
->print_insn
)
6132 (ready_element (ready
, index
), 0));
6134 *insn_ptr
= ready_remove (ready
, index
);
6140 /* This function is called when we have successfully scheduled a
6141 block. It uses the schedule stored in the scheduled_insns vector
6142 to rearrange the RTL. PREV_HEAD is used as the anchor to which we
6143 append the scheduled insns; TAIL is the insn after the scheduled
6144 block. TARGET_BB is the argument passed to schedule_block. */
6147 commit_schedule (rtx_insn
*prev_head
, rtx_insn
*tail
, basic_block
*target_bb
)
6152 last_scheduled_insn
= prev_head
;
6154 scheduled_insns
.iterate (i
, &insn
);
6157 if (control_flow_insn_p (last_scheduled_insn
)
6158 || current_sched_info
->advance_target_bb (*target_bb
, insn
))
6160 *target_bb
= current_sched_info
->advance_target_bb (*target_bb
, 0);
6166 x
= next_real_insn (last_scheduled_insn
);
6168 dump_new_block_header (1, *target_bb
, x
, tail
);
6171 last_scheduled_insn
= bb_note (*target_bb
);
6174 if (current_sched_info
->begin_move_insn
)
6175 (*current_sched_info
->begin_move_insn
) (insn
, last_scheduled_insn
);
6176 move_insn (insn
, last_scheduled_insn
,
6177 current_sched_info
->next_tail
);
6178 if (!DEBUG_INSN_P (insn
))
6179 reemit_notes (insn
);
6180 last_scheduled_insn
= insn
;
6183 scheduled_insns
.truncate (0);
6186 /* Examine all insns on the ready list and queue those which can't be
6187 issued in this cycle. TEMP_STATE is temporary scheduler state we
6188 can use as scratch space. If FIRST_CYCLE_INSN_P is true, no insns
6189 have been issued for the current cycle, which means it is valid to
6190 issue an asm statement.
6192 If SHADOWS_ONLY_P is true, we eliminate all real insns and only
6193 leave those for which SHADOW_P is true. If MODULO_EPILOGUE is true,
6194 we only leave insns which have an INSN_EXACT_TICK. */
6197 prune_ready_list (state_t temp_state
, bool first_cycle_insn_p
,
6198 bool shadows_only_p
, bool modulo_epilogue_p
)
6201 bool sched_group_found
= false;
6202 int min_cost_group
= 1;
6207 for (i
= 0; i
< ready
.n_ready
; i
++)
6209 rtx_insn
*insn
= ready_element (&ready
, i
);
6210 if (SCHED_GROUP_P (insn
))
6212 sched_group_found
= true;
6217 /* Make two passes if there's a SCHED_GROUP_P insn; make sure to handle
6218 such an insn first and note its cost, then schedule all other insns
6219 for one cycle later. */
6220 for (pass
= sched_group_found
? 0 : 1; pass
< 2; )
6222 int n
= ready
.n_ready
;
6223 for (i
= 0; i
< n
; i
++)
6225 rtx_insn
*insn
= ready_element (&ready
, i
);
6227 const char *reason
= "resource conflict";
6229 if (DEBUG_INSN_P (insn
))
6232 if (sched_group_found
&& !SCHED_GROUP_P (insn
))
6236 cost
= min_cost_group
;
6237 reason
= "not in sched group";
6239 else if (modulo_epilogue_p
6240 && INSN_EXACT_TICK (insn
) == INVALID_TICK
)
6242 cost
= max_insn_queue_index
;
6243 reason
= "not an epilogue insn";
6245 else if (shadows_only_p
&& !SHADOW_P (insn
))
6248 reason
= "not a shadow";
6250 else if (recog_memoized (insn
) < 0)
6252 if (!first_cycle_insn_p
6253 && (GET_CODE (PATTERN (insn
)) == ASM_INPUT
6254 || asm_noperands (PATTERN (insn
)) >= 0))
6258 else if (sched_pressure
!= SCHED_PRESSURE_NONE
)
6260 if (sched_pressure
== SCHED_PRESSURE_MODEL
6261 && INSN_TICK (insn
) <= clock_var
)
6263 memcpy (temp_state
, curr_state
, dfa_state_size
);
6264 if (state_transition (temp_state
, insn
) >= 0)
6265 INSN_TICK (insn
) = clock_var
+ 1;
6275 struct delay_pair
*delay_entry
;
6277 = delay_htab
->find_with_hash (insn
,
6278 htab_hash_pointer (insn
));
6279 while (delay_entry
&& delay_cost
== 0)
6281 delay_cost
= estimate_shadow_tick (delay_entry
);
6282 if (delay_cost
> max_insn_queue_index
)
6283 delay_cost
= max_insn_queue_index
;
6284 delay_entry
= delay_entry
->next_same_i1
;
6288 memcpy (temp_state
, curr_state
, dfa_state_size
);
6289 cost
= state_transition (temp_state
, insn
);
6294 if (cost
< delay_cost
)
6297 reason
= "shadow tick";
6302 if (SCHED_GROUP_P (insn
) && cost
> min_cost_group
)
6303 min_cost_group
= cost
;
6304 ready_remove (&ready
, i
);
6305 /* Normally we'd want to queue INSN for COST cycles. However,
6306 if SCHED_GROUP_P is set, then we must ensure that nothing
6307 else comes between INSN and its predecessor. If there is
6308 some other insn ready to fire on the next cycle, then that
6309 invariant would be broken.
6311 So when SCHED_GROUP_P is set, just queue this insn for a
6313 queue_insn (insn
, SCHED_GROUP_P (insn
) ? 1 : cost
, reason
);
6323 /* Called when we detect that the schedule is impossible. We examine the
6324 backtrack queue to find the earliest insn that caused this condition. */
6326 static struct haifa_saved_data
*
6327 verify_shadows (void)
6329 struct haifa_saved_data
*save
, *earliest_fail
= NULL
;
6330 for (save
= backtrack_queue
; save
; save
= save
->next
)
6333 struct delay_pair
*pair
= save
->delay_pair
;
6334 rtx_insn
*i1
= pair
->i1
;
6336 for (; pair
; pair
= pair
->next_same_i1
)
6338 rtx_insn
*i2
= pair
->i2
;
6340 if (QUEUE_INDEX (i2
) == QUEUE_SCHEDULED
)
6343 t
= INSN_TICK (i1
) + pair_delay (pair
);
6346 if (sched_verbose
>= 2)
6347 fprintf (sched_dump
,
6348 ";;\t\tfailed delay requirements for %d/%d (%d->%d)"
6350 INSN_UID (pair
->i1
), INSN_UID (pair
->i2
),
6351 INSN_TICK (pair
->i1
), INSN_EXACT_TICK (pair
->i2
));
6352 earliest_fail
= save
;
6355 if (QUEUE_INDEX (i2
) >= 0)
6357 int queued_for
= INSN_TICK (i2
);
6361 if (sched_verbose
>= 2)
6362 fprintf (sched_dump
,
6363 ";;\t\tfailed delay requirements for %d/%d"
6364 " (%d->%d), queued too late\n",
6365 INSN_UID (pair
->i1
), INSN_UID (pair
->i2
),
6366 INSN_TICK (pair
->i1
), INSN_EXACT_TICK (pair
->i2
));
6367 earliest_fail
= save
;
6374 return earliest_fail
;
6377 /* Print instructions together with useful scheduling information between
6378 HEAD and TAIL (inclusive). */
6380 dump_insn_stream (rtx_insn
*head
, rtx_insn
*tail
)
6382 fprintf (sched_dump
, ";;\t| insn | prio |\n");
6384 rtx_insn
*next_tail
= NEXT_INSN (tail
);
6385 for (rtx_insn
*insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
6387 int priority
= NOTE_P (insn
) ? 0 : INSN_PRIORITY (insn
);
6388 const char *pattern
= (NOTE_P (insn
)
6390 : str_pattern_slim (PATTERN (insn
)));
6392 fprintf (sched_dump
, ";;\t| %4d | %4d | %-30s ",
6393 INSN_UID (insn
), priority
, pattern
);
6395 if (sched_verbose
>= 4)
6397 if (NOTE_P (insn
) || recog_memoized (insn
) < 0)
6398 fprintf (sched_dump
, "nothing");
6400 print_reservation (sched_dump
, insn
);
6402 fprintf (sched_dump
, "\n");
6406 /* Use forward list scheduling to rearrange insns of block pointed to by
6407 TARGET_BB, possibly bringing insns from subsequent blocks in the same
6411 schedule_block (basic_block
*target_bb
, state_t init_state
)
6414 bool success
= modulo_ii
== 0;
6415 struct sched_block_state ls
;
6416 state_t temp_state
= NULL
; /* It is used for multipass scheduling. */
6417 int sort_p
, advance
, start_clock_var
;
6419 /* Head/tail info for this block. */
6420 rtx_insn
*prev_head
= current_sched_info
->prev_head
;
6421 rtx_insn
*next_tail
= current_sched_info
->next_tail
;
6422 rtx_insn
*head
= NEXT_INSN (prev_head
);
6423 rtx_insn
*tail
= PREV_INSN (next_tail
);
6425 if ((current_sched_info
->flags
& DONT_BREAK_DEPENDENCIES
) == 0
6426 && sched_pressure
!= SCHED_PRESSURE_MODEL
&& !sched_fusion
)
6427 find_modifiable_mems (head
, tail
);
6429 /* We used to have code to avoid getting parameters moved from hard
6430 argument registers into pseudos.
6432 However, it was removed when it proved to be of marginal benefit
6433 and caused problems because schedule_block and compute_forward_dependences
6434 had different notions of what the "head" insn was. */
6436 gcc_assert (head
!= tail
|| INSN_P (head
));
6438 haifa_recovery_bb_recently_added_p
= false;
6440 backtrack_queue
= NULL
;
6445 dump_new_block_header (0, *target_bb
, head
, tail
);
6447 if (sched_verbose
>= 2)
6449 dump_insn_stream (head
, tail
);
6450 memset (&rank_for_schedule_stats
, 0,
6451 sizeof (rank_for_schedule_stats
));
6455 if (init_state
== NULL
)
6456 state_reset (curr_state
);
6458 memcpy (curr_state
, init_state
, dfa_state_size
);
6460 /* Clear the ready list. */
6461 ready
.first
= ready
.veclen
- 1;
6465 /* It is used for first cycle multipass scheduling. */
6466 temp_state
= alloca (dfa_state_size
);
6468 if (targetm
.sched
.init
)
6469 targetm
.sched
.init (sched_dump
, sched_verbose
, ready
.veclen
);
6471 /* We start inserting insns after PREV_HEAD. */
6472 last_scheduled_insn
= prev_head
;
6473 last_nondebug_scheduled_insn
= NULL_RTX
;
6474 nonscheduled_insns_begin
= NULL
;
6476 gcc_assert ((NOTE_P (last_scheduled_insn
)
6477 || DEBUG_INSN_P (last_scheduled_insn
))
6478 && BLOCK_FOR_INSN (last_scheduled_insn
) == *target_bb
);
6480 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
6485 insn_queue
= XALLOCAVEC (rtx_insn_list
*, max_insn_queue_index
+ 1);
6486 memset (insn_queue
, 0, (max_insn_queue_index
+ 1) * sizeof (rtx
));
6488 /* Start just before the beginning of time. */
6491 /* We need queue and ready lists and clock_var be initialized
6492 in try_ready () (which is called through init_ready_list ()). */
6493 (*current_sched_info
->init_ready_list
) ();
6496 sched_pressure_start_bb (*target_bb
);
6498 /* The algorithm is O(n^2) in the number of ready insns at any given
6499 time in the worst case. Before reload we are more likely to have
6500 big lists so truncate them to a reasonable size. */
6501 if (!reload_completed
6502 && ready
.n_ready
- ready
.n_debug
> MAX_SCHED_READY_INSNS
)
6504 ready_sort_debug (&ready
);
6505 ready_sort_real (&ready
);
6507 /* Find first free-standing insn past MAX_SCHED_READY_INSNS.
6508 If there are debug insns, we know they're first. */
6509 for (i
= MAX_SCHED_READY_INSNS
+ ready
.n_debug
; i
< ready
.n_ready
; i
++)
6510 if (!SCHED_GROUP_P (ready_element (&ready
, i
)))
6513 if (sched_verbose
>= 2)
6515 fprintf (sched_dump
,
6516 ";;\t\tReady list on entry: %d insns: ", ready
.n_ready
);
6517 debug_ready_list (&ready
);
6518 fprintf (sched_dump
,
6519 ";;\t\t before reload => truncated to %d insns\n", i
);
6522 /* Delay all insns past it for 1 cycle. If debug counter is
6523 activated make an exception for the insn right after
6524 nonscheduled_insns_begin. */
6526 rtx_insn
*skip_insn
;
6528 if (dbg_cnt (sched_insn
) == false)
6529 skip_insn
= first_nonscheduled_insn ();
6533 while (i
< ready
.n_ready
)
6537 insn
= ready_remove (&ready
, i
);
6539 if (insn
!= skip_insn
)
6540 queue_insn (insn
, 1, "list truncated");
6543 ready_add (&ready
, skip_insn
, true);
6547 /* Now we can restore basic block notes and maintain precise cfg. */
6548 restore_bb_notes (*target_bb
);
6550 last_clock_var
= -1;
6554 gcc_assert (scheduled_insns
.length () == 0);
6556 must_backtrack
= false;
6557 modulo_insns_scheduled
= 0;
6559 ls
.modulo_epilogue
= false;
6560 ls
.first_cycle_insn_p
= true;
6562 /* Loop until all the insns in BB are scheduled. */
6563 while ((*current_sched_info
->schedule_more_p
) ())
6565 perform_replacements_new_cycle ();
6568 start_clock_var
= clock_var
;
6572 advance_one_cycle ();
6574 /* Add to the ready list all pending insns that can be issued now.
6575 If there are no ready insns, increment clock until one
6576 is ready and add all pending insns at that point to the ready
6578 queue_to_ready (&ready
);
6580 gcc_assert (ready
.n_ready
);
6582 if (sched_verbose
>= 2)
6584 fprintf (sched_dump
, ";;\t\tReady list after queue_to_ready:");
6585 debug_ready_list (&ready
);
6587 advance
-= clock_var
- start_clock_var
;
6589 while (advance
> 0);
6591 if (ls
.modulo_epilogue
)
6593 int stage
= clock_var
/ modulo_ii
;
6594 if (stage
> modulo_last_stage
* 2 + 2)
6596 if (sched_verbose
>= 2)
6597 fprintf (sched_dump
,
6598 ";;\t\tmodulo scheduled succeeded at II %d\n",
6604 else if (modulo_ii
> 0)
6606 int stage
= clock_var
/ modulo_ii
;
6607 if (stage
> modulo_max_stages
)
6609 if (sched_verbose
>= 2)
6610 fprintf (sched_dump
,
6611 ";;\t\tfailing schedule due to excessive stages\n");
6614 if (modulo_n_insns
== modulo_insns_scheduled
6615 && stage
> modulo_last_stage
)
6617 if (sched_verbose
>= 2)
6618 fprintf (sched_dump
,
6619 ";;\t\tfound kernel after %d stages, II %d\n",
6621 ls
.modulo_epilogue
= true;
6625 prune_ready_list (temp_state
, true, false, ls
.modulo_epilogue
);
6626 if (ready
.n_ready
== 0)
6631 ls
.shadows_only_p
= false;
6632 cycle_issued_insns
= 0;
6633 ls
.can_issue_more
= issue_rate
;
6640 if (sort_p
&& ready
.n_ready
> 0)
6642 /* Sort the ready list based on priority. This must be
6643 done every iteration through the loop, as schedule_insn
6644 may have readied additional insns that will not be
6645 sorted correctly. */
6646 ready_sort (&ready
);
6648 if (sched_verbose
>= 2)
6650 fprintf (sched_dump
,
6651 ";;\t\tReady list after ready_sort: ");
6652 debug_ready_list (&ready
);
6656 /* We don't want md sched reorder to even see debug isns, so put
6657 them out right away. */
6658 if (ready
.n_ready
&& DEBUG_INSN_P (ready_element (&ready
, 0))
6659 && (*current_sched_info
->schedule_more_p
) ())
6661 while (ready
.n_ready
&& DEBUG_INSN_P (ready_element (&ready
, 0)))
6663 rtx_insn
*insn
= ready_remove_first (&ready
);
6664 gcc_assert (DEBUG_INSN_P (insn
));
6665 (*current_sched_info
->begin_schedule_ready
) (insn
);
6666 scheduled_insns
.safe_push (insn
);
6667 last_scheduled_insn
= insn
;
6668 advance
= schedule_insn (insn
);
6669 gcc_assert (advance
== 0);
6670 if (ready
.n_ready
> 0)
6671 ready_sort (&ready
);
6675 if (ls
.first_cycle_insn_p
&& !ready
.n_ready
)
6678 resume_after_backtrack
:
6679 /* Allow the target to reorder the list, typically for
6680 better instruction bundling. */
6682 && (ready
.n_ready
== 0
6683 || !SCHED_GROUP_P (ready_element (&ready
, 0))))
6685 if (ls
.first_cycle_insn_p
&& targetm
.sched
.reorder
)
6687 = targetm
.sched
.reorder (sched_dump
, sched_verbose
,
6688 ready_lastpos (&ready
),
6689 &ready
.n_ready
, clock_var
);
6690 else if (!ls
.first_cycle_insn_p
&& targetm
.sched
.reorder2
)
6692 = targetm
.sched
.reorder2 (sched_dump
, sched_verbose
,
6694 ? ready_lastpos (&ready
) : NULL
,
6695 &ready
.n_ready
, clock_var
);
6698 restart_choose_ready
:
6699 if (sched_verbose
>= 2)
6701 fprintf (sched_dump
, ";;\tReady list (t = %3d): ",
6703 debug_ready_list (&ready
);
6704 if (sched_pressure
== SCHED_PRESSURE_WEIGHTED
)
6705 print_curr_reg_pressure ();
6708 if (ready
.n_ready
== 0
6709 && ls
.can_issue_more
6710 && reload_completed
)
6712 /* Allow scheduling insns directly from the queue in case
6713 there's nothing better to do (ready list is empty) but
6714 there are still vacant dispatch slots in the current cycle. */
6715 if (sched_verbose
>= 6)
6716 fprintf (sched_dump
,";;\t\tSecond chance\n");
6717 memcpy (temp_state
, curr_state
, dfa_state_size
);
6718 if (early_queue_to_ready (temp_state
, &ready
))
6719 ready_sort (&ready
);
6722 if (ready
.n_ready
== 0
6723 || !ls
.can_issue_more
6724 || state_dead_lock_p (curr_state
)
6725 || !(*current_sched_info
->schedule_more_p
) ())
6728 /* Select and remove the insn from the ready list. */
6734 res
= choose_ready (&ready
, ls
.first_cycle_insn_p
, &insn
);
6740 goto restart_choose_ready
;
6742 gcc_assert (insn
!= NULL_RTX
);
6745 insn
= ready_remove_first (&ready
);
6747 if (sched_pressure
!= SCHED_PRESSURE_NONE
6748 && INSN_TICK (insn
) > clock_var
)
6750 ready_add (&ready
, insn
, true);
6755 if (targetm
.sched
.dfa_new_cycle
6756 && targetm
.sched
.dfa_new_cycle (sched_dump
, sched_verbose
,
6757 insn
, last_clock_var
,
6758 clock_var
, &sort_p
))
6759 /* SORT_P is used by the target to override sorting
6760 of the ready list. This is needed when the target
6761 has modified its internal structures expecting that
6762 the insn will be issued next. As we need the insn
6763 to have the highest priority (so it will be returned by
6764 the ready_remove_first call above), we invoke
6765 ready_add (&ready, insn, true).
6766 But, still, there is one issue: INSN can be later
6767 discarded by scheduler's front end through
6768 current_sched_info->can_schedule_ready_p, hence, won't
6771 ready_add (&ready
, insn
, true);
6777 if (current_sched_info
->can_schedule_ready_p
6778 && ! (*current_sched_info
->can_schedule_ready_p
) (insn
))
6779 /* We normally get here only if we don't want to move
6780 insn from the split block. */
6782 TODO_SPEC (insn
) = DEP_POSTPONED
;
6783 goto restart_choose_ready
;
6788 /* If this insn is the first part of a delay-slot pair, record a
6790 struct delay_pair
*delay_entry
;
6792 = delay_htab
->find_with_hash (insn
, htab_hash_pointer (insn
));
6795 save_backtrack_point (delay_entry
, ls
);
6796 if (sched_verbose
>= 2)
6797 fprintf (sched_dump
, ";;\t\tsaving backtrack point\n");
6801 /* DECISION is made. */
6803 if (modulo_ii
> 0 && INSN_UID (insn
) < modulo_iter0_max_uid
)
6805 modulo_insns_scheduled
++;
6806 modulo_last_stage
= clock_var
/ modulo_ii
;
6808 if (TODO_SPEC (insn
) & SPECULATIVE
)
6809 generate_recovery_code (insn
);
6811 if (targetm
.sched
.dispatch (NULL
, IS_DISPATCH_ON
))
6812 targetm
.sched
.dispatch_do (insn
, ADD_TO_DISPATCH_WINDOW
);
6814 /* Update counters, etc in the scheduler's front end. */
6815 (*current_sched_info
->begin_schedule_ready
) (insn
);
6816 scheduled_insns
.safe_push (insn
);
6817 gcc_assert (NONDEBUG_INSN_P (insn
));
6818 last_nondebug_scheduled_insn
= last_scheduled_insn
= insn
;
6820 if (recog_memoized (insn
) >= 0)
6822 memcpy (temp_state
, curr_state
, dfa_state_size
);
6823 cost
= state_transition (curr_state
, insn
);
6824 if (sched_pressure
!= SCHED_PRESSURE_WEIGHTED
&& !sched_fusion
)
6825 gcc_assert (cost
< 0);
6826 if (memcmp (temp_state
, curr_state
, dfa_state_size
) != 0)
6827 cycle_issued_insns
++;
6831 asm_p
= (GET_CODE (PATTERN (insn
)) == ASM_INPUT
6832 || asm_noperands (PATTERN (insn
)) >= 0);
6834 if (targetm
.sched
.variable_issue
)
6836 targetm
.sched
.variable_issue (sched_dump
, sched_verbose
,
6837 insn
, ls
.can_issue_more
);
6838 /* A naked CLOBBER or USE generates no instruction, so do
6839 not count them against the issue rate. */
6840 else if (GET_CODE (PATTERN (insn
)) != USE
6841 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
6842 ls
.can_issue_more
--;
6843 advance
= schedule_insn (insn
);
6845 if (SHADOW_P (insn
))
6846 ls
.shadows_only_p
= true;
6848 /* After issuing an asm insn we should start a new cycle. */
6849 if (advance
== 0 && asm_p
)
6858 ls
.first_cycle_insn_p
= false;
6859 if (ready
.n_ready
> 0)
6860 prune_ready_list (temp_state
, false, ls
.shadows_only_p
,
6861 ls
.modulo_epilogue
);
6865 if (!must_backtrack
)
6866 for (i
= 0; i
< ready
.n_ready
; i
++)
6868 rtx_insn
*insn
= ready_element (&ready
, i
);
6869 if (INSN_EXACT_TICK (insn
) == clock_var
)
6871 must_backtrack
= true;
6876 if (must_backtrack
&& modulo_ii
> 0)
6878 if (modulo_backtracks_left
== 0)
6880 modulo_backtracks_left
--;
6882 while (must_backtrack
)
6884 struct haifa_saved_data
*failed
;
6885 rtx_insn
*failed_insn
;
6887 must_backtrack
= false;
6888 failed
= verify_shadows ();
6889 gcc_assert (failed
);
6891 failed_insn
= failed
->delay_pair
->i1
;
6892 /* Clear these queues. */
6893 perform_replacements_new_cycle ();
6894 toggle_cancelled_flags (false);
6895 unschedule_insns_until (failed_insn
);
6896 while (failed
!= backtrack_queue
)
6897 free_topmost_backtrack_point (true);
6898 restore_last_backtrack_point (&ls
);
6899 if (sched_verbose
>= 2)
6900 fprintf (sched_dump
, ";;\t\trewind to cycle %d\n", clock_var
);
6901 /* Delay by at least a cycle. This could cause additional
6903 queue_insn (failed_insn
, 1, "backtracked");
6907 if (ready
.n_ready
> 0)
6908 goto resume_after_backtrack
;
6911 if (clock_var
== 0 && ls
.first_cycle_insn_p
)
6917 ls
.first_cycle_insn_p
= true;
6919 if (ls
.modulo_epilogue
)
6922 if (!ls
.first_cycle_insn_p
|| advance
)
6923 advance_one_cycle ();
6924 perform_replacements_new_cycle ();
6927 /* Once again, debug insn suckiness: they can be on the ready list
6928 even if they have unresolved dependencies. To make our view
6929 of the world consistent, remove such "ready" insns. */
6930 restart_debug_insn_loop
:
6931 for (i
= ready
.n_ready
- 1; i
>= 0; i
--)
6935 x
= ready_element (&ready
, i
);
6936 if (DEPS_LIST_FIRST (INSN_HARD_BACK_DEPS (x
)) != NULL
6937 || DEPS_LIST_FIRST (INSN_SPEC_BACK_DEPS (x
)) != NULL
)
6939 ready_remove (&ready
, i
);
6940 goto restart_debug_insn_loop
;
6943 for (i
= ready
.n_ready
- 1; i
>= 0; i
--)
6947 x
= ready_element (&ready
, i
);
6948 resolve_dependencies (x
);
6950 for (i
= 0; i
<= max_insn_queue_index
; i
++)
6952 rtx_insn_list
*link
;
6953 while ((link
= insn_queue
[i
]) != NULL
)
6955 rtx_insn
*x
= link
->insn ();
6956 insn_queue
[i
] = link
->next ();
6957 QUEUE_INDEX (x
) = QUEUE_NOWHERE
;
6958 free_INSN_LIST_node (link
);
6959 resolve_dependencies (x
);
6965 undo_all_replacements ();
6970 fprintf (sched_dump
, ";;\tReady list (final): ");
6971 debug_ready_list (&ready
);
6974 if (modulo_ii
== 0 && current_sched_info
->queue_must_finish_empty
)
6975 /* Sanity check -- queue must be empty now. Meaningless if region has
6977 gcc_assert (!q_size
&& !ready
.n_ready
&& !ready
.n_debug
);
6978 else if (modulo_ii
== 0)
6980 /* We must maintain QUEUE_INDEX between blocks in region. */
6981 for (i
= ready
.n_ready
- 1; i
>= 0; i
--)
6985 x
= ready_element (&ready
, i
);
6986 QUEUE_INDEX (x
) = QUEUE_NOWHERE
;
6987 TODO_SPEC (x
) = HARD_DEP
;
6991 for (i
= 0; i
<= max_insn_queue_index
; i
++)
6993 rtx_insn_list
*link
;
6994 for (link
= insn_queue
[i
]; link
; link
= link
->next ())
6999 QUEUE_INDEX (x
) = QUEUE_NOWHERE
;
7000 TODO_SPEC (x
) = HARD_DEP
;
7002 free_INSN_LIST_list (&insn_queue
[i
]);
7006 if (sched_pressure
== SCHED_PRESSURE_MODEL
)
7007 model_end_schedule ();
7011 commit_schedule (prev_head
, tail
, target_bb
);
7013 fprintf (sched_dump
, ";; total time = %d\n", clock_var
);
7016 last_scheduled_insn
= tail
;
7018 scheduled_insns
.truncate (0);
7020 if (!current_sched_info
->queue_must_finish_empty
7021 || haifa_recovery_bb_recently_added_p
)
7023 /* INSN_TICK (minimum clock tick at which the insn becomes
7024 ready) may be not correct for the insn in the subsequent
7025 blocks of the region. We should use a correct value of
7026 `clock_var' or modify INSN_TICK. It is better to keep
7027 clock_var value equal to 0 at the start of a basic block.
7028 Therefore we modify INSN_TICK here. */
7029 fix_inter_tick (NEXT_INSN (prev_head
), last_scheduled_insn
);
7032 if (targetm
.sched
.finish
)
7034 targetm
.sched
.finish (sched_dump
, sched_verbose
);
7035 /* Target might have added some instructions to the scheduled block
7036 in its md_finish () hook. These new insns don't have any data
7037 initialized and to identify them we extend h_i_d so that they'll
7039 sched_extend_luids ();
7042 /* Update head/tail boundaries. */
7043 head
= NEXT_INSN (prev_head
);
7044 tail
= last_scheduled_insn
;
7048 fprintf (sched_dump
, ";; new head = %d\n;; new tail = %d\n",
7049 INSN_UID (head
), INSN_UID (tail
));
7051 if (sched_verbose
>= 2)
7053 dump_insn_stream (head
, tail
);
7054 print_rank_for_schedule_stats (";; TOTAL ", &rank_for_schedule_stats
,
7058 fprintf (sched_dump
, "\n");
7061 head
= restore_other_notes (head
, NULL
);
7063 current_sched_info
->head
= head
;
7064 current_sched_info
->tail
= tail
;
7066 free_backtrack_queue ();
7071 /* Set_priorities: compute priority of each insn in the block. */
7074 set_priorities (rtx_insn
*head
, rtx_insn
*tail
)
7078 int sched_max_insns_priority
=
7079 current_sched_info
->sched_max_insns_priority
;
7080 rtx_insn
*prev_head
;
7082 if (head
== tail
&& ! INSN_P (head
))
7087 prev_head
= PREV_INSN (head
);
7088 for (insn
= tail
; insn
!= prev_head
; insn
= PREV_INSN (insn
))
7094 (void) priority (insn
);
7096 gcc_assert (INSN_PRIORITY_KNOWN (insn
));
7098 sched_max_insns_priority
= MAX (sched_max_insns_priority
,
7099 INSN_PRIORITY (insn
));
7102 current_sched_info
->sched_max_insns_priority
= sched_max_insns_priority
;
7107 /* Set dump and sched_verbose for the desired debugging output. If no
7108 dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
7109 For -fsched-verbose=N, N>=10, print everything to stderr. */
7111 setup_sched_dump (void)
7113 sched_verbose
= sched_verbose_param
;
7114 if (sched_verbose_param
== 0 && dump_file
)
7116 sched_dump
= ((sched_verbose_param
>= 10 || !dump_file
)
7117 ? stderr
: dump_file
);
7120 /* Allocate data for register pressure sensitive scheduling. */
7122 alloc_global_sched_pressure_data (void)
7124 if (sched_pressure
!= SCHED_PRESSURE_NONE
)
7126 int i
, max_regno
= max_reg_num ();
7128 if (sched_dump
!= NULL
)
7129 /* We need info about pseudos for rtl dumps about pseudo
7130 classes and costs. */
7131 regstat_init_n_sets_and_refs ();
7132 ira_set_pseudo_classes (true, sched_verbose
? sched_dump
: NULL
);
7133 sched_regno_pressure_class
7134 = (enum reg_class
*) xmalloc (max_regno
* sizeof (enum reg_class
));
7135 for (i
= 0; i
< max_regno
; i
++)
7136 sched_regno_pressure_class
[i
]
7137 = (i
< FIRST_PSEUDO_REGISTER
7138 ? ira_pressure_class_translate
[REGNO_REG_CLASS (i
)]
7139 : ira_pressure_class_translate
[reg_allocno_class (i
)]);
7140 curr_reg_live
= BITMAP_ALLOC (NULL
);
7141 if (sched_pressure
== SCHED_PRESSURE_WEIGHTED
)
7143 saved_reg_live
= BITMAP_ALLOC (NULL
);
7144 region_ref_regs
= BITMAP_ALLOC (NULL
);
7147 /* Calculate number of CALL_USED_REGS in register classes that
7148 we calculate register pressure for. */
7149 for (int c
= 0; c
< ira_pressure_classes_num
; ++c
)
7151 enum reg_class cl
= ira_pressure_classes
[c
];
7153 call_used_regs_num
[cl
] = 0;
7155 for (int i
= 0; i
< ira_class_hard_regs_num
[cl
]; ++i
)
7156 if (call_used_regs
[ira_class_hard_regs
[cl
][i
]])
7157 ++call_used_regs_num
[cl
];
7162 /* Free data for register pressure sensitive scheduling. Also called
7163 from schedule_region when stopping sched-pressure early. */
7165 free_global_sched_pressure_data (void)
7167 if (sched_pressure
!= SCHED_PRESSURE_NONE
)
7169 if (regstat_n_sets_and_refs
!= NULL
)
7170 regstat_free_n_sets_and_refs ();
7171 if (sched_pressure
== SCHED_PRESSURE_WEIGHTED
)
7173 BITMAP_FREE (region_ref_regs
);
7174 BITMAP_FREE (saved_reg_live
);
7176 BITMAP_FREE (curr_reg_live
);
7177 free (sched_regno_pressure_class
);
7181 /* Initialize some global state for the scheduler. This function works
7182 with the common data shared between all the schedulers. It is called
7183 from the scheduler specific initialization routine. */
7188 /* Disable speculative loads in their presence if cc0 defined. */
7190 flag_schedule_speculative_load
= 0;
7193 if (targetm
.sched
.dispatch (NULL
, IS_DISPATCH_ON
))
7194 targetm
.sched
.dispatch_do (NULL
, DISPATCH_INIT
);
7196 if (live_range_shrinkage_p
)
7197 sched_pressure
= SCHED_PRESSURE_WEIGHTED
;
7198 else if (flag_sched_pressure
7199 && !reload_completed
7200 && common_sched_info
->sched_pass_id
== SCHED_RGN_PASS
)
7201 sched_pressure
= ((enum sched_pressure_algorithm
)
7202 PARAM_VALUE (PARAM_SCHED_PRESSURE_ALGORITHM
));
7204 sched_pressure
= SCHED_PRESSURE_NONE
;
7206 if (sched_pressure
!= SCHED_PRESSURE_NONE
)
7207 ira_setup_eliminable_regset ();
7209 /* Initialize SPEC_INFO. */
7210 if (targetm
.sched
.set_sched_flags
)
7212 spec_info
= &spec_info_var
;
7213 targetm
.sched
.set_sched_flags (spec_info
);
7215 if (spec_info
->mask
!= 0)
7217 spec_info
->data_weakness_cutoff
=
7218 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF
) * MAX_DEP_WEAK
) / 100;
7219 spec_info
->control_weakness_cutoff
=
7220 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF
)
7221 * REG_BR_PROB_BASE
) / 100;
7224 /* So we won't read anything accidentally. */
7229 /* So we won't read anything accidentally. */
7232 /* Initialize issue_rate. */
7233 if (targetm
.sched
.issue_rate
)
7234 issue_rate
= targetm
.sched
.issue_rate ();
7238 if (targetm
.sched
.first_cycle_multipass_dfa_lookahead
7239 /* Don't use max_issue with reg_pressure scheduling. Multipass
7240 scheduling and reg_pressure scheduling undo each other's decisions. */
7241 && sched_pressure
== SCHED_PRESSURE_NONE
)
7242 dfa_lookahead
= targetm
.sched
.first_cycle_multipass_dfa_lookahead ();
7246 /* Set to "0" so that we recalculate. */
7247 max_lookahead_tries
= 0;
7249 if (targetm
.sched
.init_dfa_pre_cycle_insn
)
7250 targetm
.sched
.init_dfa_pre_cycle_insn ();
7252 if (targetm
.sched
.init_dfa_post_cycle_insn
)
7253 targetm
.sched
.init_dfa_post_cycle_insn ();
7256 dfa_state_size
= state_size ();
7258 init_alias_analysis ();
7261 df_set_flags (DF_LR_RUN_DCE
);
7262 df_note_add_problem ();
7264 /* More problems needed for interloop dep calculation in SMS. */
7265 if (common_sched_info
->sched_pass_id
== SCHED_SMS_PASS
)
7267 df_rd_add_problem ();
7268 df_chain_add_problem (DF_DU_CHAIN
+ DF_UD_CHAIN
);
7273 /* Do not run DCE after reload, as this can kill nops inserted
7275 if (reload_completed
)
7276 df_clear_flags (DF_LR_RUN_DCE
);
7278 regstat_compute_calls_crossed ();
7280 if (targetm
.sched
.init_global
)
7281 targetm
.sched
.init_global (sched_dump
, sched_verbose
, get_max_uid () + 1);
7283 alloc_global_sched_pressure_data ();
7285 curr_state
= xmalloc (dfa_state_size
);
7288 static void haifa_init_only_bb (basic_block
, basic_block
);
7290 /* Initialize data structures specific to the Haifa scheduler. */
7292 haifa_sched_init (void)
7294 setup_sched_dump ();
7297 scheduled_insns
.create (0);
7299 if (spec_info
!= NULL
)
7301 sched_deps_info
->use_deps_list
= 1;
7302 sched_deps_info
->generate_spec_deps
= 1;
7305 /* Initialize luids, dependency caches, target and h_i_d for the
7309 bbs
.create (n_basic_blocks_for_fn (cfun
));
7314 FOR_EACH_BB_FN (bb
, cfun
)
7315 bbs
.quick_push (bb
);
7316 sched_init_luids (bbs
);
7317 sched_deps_init (true);
7318 sched_extend_target ();
7319 haifa_init_h_i_d (bbs
);
7324 sched_init_only_bb
= haifa_init_only_bb
;
7325 sched_split_block
= sched_split_block_1
;
7326 sched_create_empty_bb
= sched_create_empty_bb_1
;
7327 haifa_recovery_bb_ever_added_p
= false;
7329 nr_begin_data
= nr_begin_control
= nr_be_in_data
= nr_be_in_control
= 0;
7330 before_recovery
= 0;
7336 /* Finish work with the data specific to the Haifa scheduler. */
7338 haifa_sched_finish (void)
7340 sched_create_empty_bb
= NULL
;
7341 sched_split_block
= NULL
;
7342 sched_init_only_bb
= NULL
;
7344 if (spec_info
&& spec_info
->dump
)
7346 char c
= reload_completed
? 'a' : 'b';
7348 fprintf (spec_info
->dump
,
7349 ";; %s:\n", current_function_name ());
7351 fprintf (spec_info
->dump
,
7352 ";; Procedure %cr-begin-data-spec motions == %d\n",
7354 fprintf (spec_info
->dump
,
7355 ";; Procedure %cr-be-in-data-spec motions == %d\n",
7357 fprintf (spec_info
->dump
,
7358 ";; Procedure %cr-begin-control-spec motions == %d\n",
7359 c
, nr_begin_control
);
7360 fprintf (spec_info
->dump
,
7361 ";; Procedure %cr-be-in-control-spec motions == %d\n",
7362 c
, nr_be_in_control
);
7365 scheduled_insns
.release ();
7367 /* Finalize h_i_d, dependency caches, and luids for the whole
7368 function. Target will be finalized in md_global_finish (). */
7369 sched_deps_finish ();
7370 sched_finish_luids ();
7371 current_sched_info
= NULL
;
7375 /* Free global data used during insn scheduling. This function works with
7376 the common data shared between the schedulers. */
7381 haifa_finish_h_i_d ();
7382 free_global_sched_pressure_data ();
7385 if (targetm
.sched
.finish_global
)
7386 targetm
.sched
.finish_global (sched_dump
, sched_verbose
);
7388 end_alias_analysis ();
7390 regstat_free_calls_crossed ();
7395 /* Free all delay_pair structures that were recorded. */
7397 free_delay_pairs (void)
7401 delay_htab
->empty ();
7402 delay_htab_i2
->empty ();
7406 /* Fix INSN_TICKs of the instructions in the current block as well as
7407 INSN_TICKs of their dependents.
7408 HEAD and TAIL are the begin and the end of the current scheduled block. */
7410 fix_inter_tick (rtx_insn
*head
, rtx_insn
*tail
)
7412 /* Set of instructions with corrected INSN_TICK. */
7413 bitmap_head processed
;
7414 /* ??? It is doubtful if we should assume that cycle advance happens on
7415 basic block boundaries. Basically insns that are unconditionally ready
7416 on the start of the block are more preferable then those which have
7417 a one cycle dependency over insn from the previous block. */
7418 int next_clock
= clock_var
+ 1;
7420 bitmap_initialize (&processed
, 0);
7422 /* Iterates over scheduled instructions and fix their INSN_TICKs and
7423 INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
7424 across different blocks. */
7425 for (tail
= NEXT_INSN (tail
); head
!= tail
; head
= NEXT_INSN (head
))
7430 sd_iterator_def sd_it
;
7433 tick
= INSN_TICK (head
);
7434 gcc_assert (tick
>= MIN_TICK
);
7436 /* Fix INSN_TICK of instruction from just scheduled block. */
7437 if (bitmap_set_bit (&processed
, INSN_LUID (head
)))
7441 if (tick
< MIN_TICK
)
7444 INSN_TICK (head
) = tick
;
7447 if (DEBUG_INSN_P (head
))
7450 FOR_EACH_DEP (head
, SD_LIST_RES_FORW
, sd_it
, dep
)
7454 next
= DEP_CON (dep
);
7455 tick
= INSN_TICK (next
);
7457 if (tick
!= INVALID_TICK
7458 /* If NEXT has its INSN_TICK calculated, fix it.
7459 If not - it will be properly calculated from
7460 scratch later in fix_tick_ready. */
7461 && bitmap_set_bit (&processed
, INSN_LUID (next
)))
7465 if (tick
< MIN_TICK
)
7468 if (tick
> INTER_TICK (next
))
7469 INTER_TICK (next
) = tick
;
7471 tick
= INTER_TICK (next
);
7473 INSN_TICK (next
) = tick
;
7478 bitmap_clear (&processed
);
7481 /* Check if NEXT is ready to be added to the ready or queue list.
7482 If "yes", add it to the proper list.
7484 -1 - is not ready yet,
7485 0 - added to the ready list,
7486 0 < N - queued for N cycles. */
7488 try_ready (rtx_insn
*next
)
7490 ds_t old_ts
, new_ts
;
7492 old_ts
= TODO_SPEC (next
);
7494 gcc_assert (!(old_ts
& ~(SPECULATIVE
| HARD_DEP
| DEP_CONTROL
| DEP_POSTPONED
))
7495 && (old_ts
== HARD_DEP
7496 || old_ts
== DEP_POSTPONED
7497 || (old_ts
& SPECULATIVE
)
7498 || old_ts
== DEP_CONTROL
));
7500 new_ts
= recompute_todo_spec (next
, false);
7502 if (new_ts
& (HARD_DEP
| DEP_POSTPONED
))
7503 gcc_assert (new_ts
== old_ts
7504 && QUEUE_INDEX (next
) == QUEUE_NOWHERE
);
7505 else if (current_sched_info
->new_ready
)
7506 new_ts
= current_sched_info
->new_ready (next
, new_ts
);
7508 /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
7509 have its original pattern or changed (speculative) one. This is due
7510 to changing ebb in region scheduling.
7511 * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
7512 has speculative pattern.
7514 We can't assert (!(new_ts & HARD_DEP) || new_ts == old_ts) here because
7515 control-speculative NEXT could have been discarded by sched-rgn.c
7516 (the same case as when discarded by can_schedule_ready_p ()). */
7518 if ((new_ts
& SPECULATIVE
)
7519 /* If (old_ts == new_ts), then (old_ts & SPECULATIVE) and we don't
7520 need to change anything. */
7521 && new_ts
!= old_ts
)
7526 gcc_assert ((new_ts
& SPECULATIVE
) && !(new_ts
& ~SPECULATIVE
));
7528 res
= haifa_speculate_insn (next
, new_ts
, &new_pat
);
7533 /* It would be nice to change DEP_STATUS of all dependences,
7534 which have ((DEP_STATUS & SPECULATIVE) == new_ts) to HARD_DEP,
7535 so we won't reanalyze anything. */
7540 /* We follow the rule, that every speculative insn
7541 has non-null ORIG_PAT. */
7542 if (!ORIG_PAT (next
))
7543 ORIG_PAT (next
) = PATTERN (next
);
7547 if (!ORIG_PAT (next
))
7548 /* If we gonna to overwrite the original pattern of insn,
7550 ORIG_PAT (next
) = PATTERN (next
);
7552 res
= haifa_change_pattern (next
, new_pat
);
7561 /* We need to restore pattern only if (new_ts == 0), because otherwise it is
7562 either correct (new_ts & SPECULATIVE),
7563 or we simply don't care (new_ts & HARD_DEP). */
7565 gcc_assert (!ORIG_PAT (next
)
7566 || !IS_SPECULATION_BRANCHY_CHECK_P (next
));
7568 TODO_SPEC (next
) = new_ts
;
7570 if (new_ts
& (HARD_DEP
| DEP_POSTPONED
))
7572 /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
7573 control-speculative NEXT could have been discarded by sched-rgn.c
7574 (the same case as when discarded by can_schedule_ready_p ()). */
7575 /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
7577 change_queue_index (next
, QUEUE_NOWHERE
);
7581 else if (!(new_ts
& BEGIN_SPEC
)
7582 && ORIG_PAT (next
) && PREDICATED_PAT (next
) == NULL_RTX
7583 && !IS_SPECULATION_CHECK_P (next
))
7584 /* We should change pattern of every previously speculative
7585 instruction - and we determine if NEXT was speculative by using
7586 ORIG_PAT field. Except one case - speculation checks have ORIG_PAT
7587 pat too, so skip them. */
7589 bool success
= haifa_change_pattern (next
, ORIG_PAT (next
));
7590 gcc_assert (success
);
7591 ORIG_PAT (next
) = 0;
7594 if (sched_verbose
>= 2)
7596 fprintf (sched_dump
, ";;\t\tdependencies resolved: insn %s",
7597 (*current_sched_info
->print_insn
) (next
, 0));
7599 if (spec_info
&& spec_info
->dump
)
7601 if (new_ts
& BEGIN_DATA
)
7602 fprintf (spec_info
->dump
, "; data-spec;");
7603 if (new_ts
& BEGIN_CONTROL
)
7604 fprintf (spec_info
->dump
, "; control-spec;");
7605 if (new_ts
& BE_IN_CONTROL
)
7606 fprintf (spec_info
->dump
, "; in-control-spec;");
7608 if (TODO_SPEC (next
) & DEP_CONTROL
)
7609 fprintf (sched_dump
, " predicated");
7610 fprintf (sched_dump
, "\n");
7613 adjust_priority (next
);
7615 return fix_tick_ready (next
);
7618 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
7620 fix_tick_ready (rtx_insn
*next
)
7624 if (!DEBUG_INSN_P (next
) && !sd_lists_empty_p (next
, SD_LIST_RES_BACK
))
7627 sd_iterator_def sd_it
;
7630 tick
= INSN_TICK (next
);
7631 /* if tick is not equal to INVALID_TICK, then update
7632 INSN_TICK of NEXT with the most recent resolved dependence
7633 cost. Otherwise, recalculate from scratch. */
7634 full_p
= (tick
== INVALID_TICK
);
7636 FOR_EACH_DEP (next
, SD_LIST_RES_BACK
, sd_it
, dep
)
7638 rtx_insn
*pro
= DEP_PRO (dep
);
7641 gcc_assert (INSN_TICK (pro
) >= MIN_TICK
);
7643 tick1
= INSN_TICK (pro
) + dep_cost (dep
);
7654 INSN_TICK (next
) = tick
;
7656 delay
= tick
- clock_var
;
7657 if (delay
<= 0 || sched_pressure
!= SCHED_PRESSURE_NONE
|| sched_fusion
)
7658 delay
= QUEUE_READY
;
7660 change_queue_index (next
, delay
);
7665 /* Move NEXT to the proper queue list with (DELAY >= 1),
7666 or add it to the ready list (DELAY == QUEUE_READY),
7667 or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
7669 change_queue_index (rtx_insn
*next
, int delay
)
7671 int i
= QUEUE_INDEX (next
);
7673 gcc_assert (QUEUE_NOWHERE
<= delay
&& delay
<= max_insn_queue_index
7675 gcc_assert (i
!= QUEUE_SCHEDULED
);
7677 if ((delay
> 0 && NEXT_Q_AFTER (q_ptr
, delay
) == i
)
7678 || (delay
< 0 && delay
== i
))
7679 /* We have nothing to do. */
7682 /* Remove NEXT from wherever it is now. */
7683 if (i
== QUEUE_READY
)
7684 ready_remove_insn (next
);
7686 queue_remove (next
);
7688 /* Add it to the proper place. */
7689 if (delay
== QUEUE_READY
)
7690 ready_add (readyp
, next
, false);
7691 else if (delay
>= 1)
7692 queue_insn (next
, delay
, "change queue index");
7694 if (sched_verbose
>= 2)
7696 fprintf (sched_dump
, ";;\t\ttick updated: insn %s",
7697 (*current_sched_info
->print_insn
) (next
, 0));
7699 if (delay
== QUEUE_READY
)
7700 fprintf (sched_dump
, " into ready\n");
7701 else if (delay
>= 1)
7702 fprintf (sched_dump
, " into queue with cost=%d\n", delay
);
7704 fprintf (sched_dump
, " removed from ready or queue lists\n");
7708 static int sched_ready_n_insns
= -1;
7710 /* Initialize per region data structures. */
7712 sched_extend_ready_list (int new_sched_ready_n_insns
)
7716 if (sched_ready_n_insns
== -1)
7717 /* At the first call we need to initialize one more choice_stack
7721 sched_ready_n_insns
= 0;
7722 scheduled_insns
.reserve (new_sched_ready_n_insns
);
7725 i
= sched_ready_n_insns
+ 1;
7727 ready
.veclen
= new_sched_ready_n_insns
+ issue_rate
;
7728 ready
.vec
= XRESIZEVEC (rtx_insn
*, ready
.vec
, ready
.veclen
);
7730 gcc_assert (new_sched_ready_n_insns
>= sched_ready_n_insns
);
7732 ready_try
= (signed char *) xrecalloc (ready_try
, new_sched_ready_n_insns
,
7733 sched_ready_n_insns
,
7734 sizeof (*ready_try
));
7736 /* We allocate +1 element to save initial state in the choice_stack[0]
7738 choice_stack
= XRESIZEVEC (struct choice_entry
, choice_stack
,
7739 new_sched_ready_n_insns
+ 1);
7741 for (; i
<= new_sched_ready_n_insns
; i
++)
7743 choice_stack
[i
].state
= xmalloc (dfa_state_size
);
7745 if (targetm
.sched
.first_cycle_multipass_init
)
7746 targetm
.sched
.first_cycle_multipass_init (&(choice_stack
[i
]
7750 sched_ready_n_insns
= new_sched_ready_n_insns
;
7753 /* Free per region data structures. */
7755 sched_finish_ready_list (void)
7766 for (i
= 0; i
<= sched_ready_n_insns
; i
++)
7768 if (targetm
.sched
.first_cycle_multipass_fini
)
7769 targetm
.sched
.first_cycle_multipass_fini (&(choice_stack
[i
]
7772 free (choice_stack
[i
].state
);
7774 free (choice_stack
);
7775 choice_stack
= NULL
;
7777 sched_ready_n_insns
= -1;
7781 haifa_luid_for_non_insn (rtx x
)
7783 gcc_assert (NOTE_P (x
) || LABEL_P (x
));
7788 /* Generates recovery code for INSN. */
7790 generate_recovery_code (rtx_insn
*insn
)
7792 if (TODO_SPEC (insn
) & BEGIN_SPEC
)
7793 begin_speculative_block (insn
);
7795 /* Here we have insn with no dependencies to
7796 instructions other then CHECK_SPEC ones. */
7798 if (TODO_SPEC (insn
) & BE_IN_SPEC
)
7799 add_to_speculative_block (insn
);
7803 Tries to add speculative dependencies of type FS between instructions
7804 in deps_list L and TWIN. */
7806 process_insn_forw_deps_be_in_spec (rtx insn
, rtx_insn
*twin
, ds_t fs
)
7808 sd_iterator_def sd_it
;
7811 FOR_EACH_DEP (insn
, SD_LIST_FORW
, sd_it
, dep
)
7816 consumer
= DEP_CON (dep
);
7818 ds
= DEP_STATUS (dep
);
7820 if (/* If we want to create speculative dep. */
7822 /* And we can do that because this is a true dep. */
7823 && (ds
& DEP_TYPES
) == DEP_TRUE
)
7825 gcc_assert (!(ds
& BE_IN_SPEC
));
7827 if (/* If this dep can be overcome with 'begin speculation'. */
7829 /* Then we have a choice: keep the dep 'begin speculative'
7830 or transform it into 'be in speculative'. */
7832 if (/* In try_ready we assert that if insn once became ready
7833 it can be removed from the ready (or queue) list only
7834 due to backend decision. Hence we can't let the
7835 probability of the speculative dep to decrease. */
7836 ds_weak (ds
) <= ds_weak (fs
))
7840 new_ds
= (ds
& ~BEGIN_SPEC
) | fs
;
7842 if (/* consumer can 'be in speculative'. */
7843 sched_insn_is_legitimate_for_speculation_p (consumer
,
7845 /* Transform it to be in speculative. */
7850 /* Mark the dep as 'be in speculative'. */
7855 dep_def _new_dep
, *new_dep
= &_new_dep
;
7857 init_dep_1 (new_dep
, twin
, consumer
, DEP_TYPE (dep
), ds
);
7858 sd_add_dep (new_dep
, false);
7863 /* Generates recovery code for BEGIN speculative INSN. */
7865 begin_speculative_block (rtx_insn
*insn
)
7867 if (TODO_SPEC (insn
) & BEGIN_DATA
)
7869 if (TODO_SPEC (insn
) & BEGIN_CONTROL
)
7872 create_check_block_twin (insn
, false);
7874 TODO_SPEC (insn
) &= ~BEGIN_SPEC
;
7877 static void haifa_init_insn (rtx_insn
*);
7879 /* Generates recovery code for BE_IN speculative INSN. */
7881 add_to_speculative_block (rtx_insn
*insn
)
7884 sd_iterator_def sd_it
;
7886 rtx_insn_list
*twins
= NULL
;
7887 rtx_vec_t priorities_roots
;
7889 ts
= TODO_SPEC (insn
);
7890 gcc_assert (!(ts
& ~BE_IN_SPEC
));
7892 if (ts
& BE_IN_DATA
)
7894 if (ts
& BE_IN_CONTROL
)
7897 TODO_SPEC (insn
) &= ~BE_IN_SPEC
;
7898 gcc_assert (!TODO_SPEC (insn
));
7900 DONE_SPEC (insn
) |= ts
;
7902 /* First we convert all simple checks to branchy. */
7903 for (sd_it
= sd_iterator_start (insn
, SD_LIST_SPEC_BACK
);
7904 sd_iterator_cond (&sd_it
, &dep
);)
7906 rtx_insn
*check
= DEP_PRO (dep
);
7908 if (IS_SPECULATION_SIMPLE_CHECK_P (check
))
7910 create_check_block_twin (check
, true);
7912 /* Restart search. */
7913 sd_it
= sd_iterator_start (insn
, SD_LIST_SPEC_BACK
);
7916 /* Continue search. */
7917 sd_iterator_next (&sd_it
);
7920 priorities_roots
.create (0);
7921 clear_priorities (insn
, &priorities_roots
);
7925 rtx_insn
*check
, *twin
;
7928 /* Get the first backward dependency of INSN. */
7929 sd_it
= sd_iterator_start (insn
, SD_LIST_SPEC_BACK
);
7930 if (!sd_iterator_cond (&sd_it
, &dep
))
7931 /* INSN has no backward dependencies left. */
7934 gcc_assert ((DEP_STATUS (dep
) & BEGIN_SPEC
) == 0
7935 && (DEP_STATUS (dep
) & BE_IN_SPEC
) != 0
7936 && (DEP_STATUS (dep
) & DEP_TYPES
) == DEP_TRUE
);
7938 check
= DEP_PRO (dep
);
7940 gcc_assert (!IS_SPECULATION_CHECK_P (check
) && !ORIG_PAT (check
)
7941 && QUEUE_INDEX (check
) == QUEUE_NOWHERE
);
7943 rec
= BLOCK_FOR_INSN (check
);
7945 twin
= emit_insn_before (copy_insn (PATTERN (insn
)), BB_END (rec
));
7946 haifa_init_insn (twin
);
7948 sd_copy_back_deps (twin
, insn
, true);
7950 if (sched_verbose
&& spec_info
->dump
)
7951 /* INSN_BB (insn) isn't determined for twin insns yet.
7952 So we can't use current_sched_info->print_insn. */
7953 fprintf (spec_info
->dump
, ";;\t\tGenerated twin insn : %d/rec%d\n",
7954 INSN_UID (twin
), rec
->index
);
7956 twins
= alloc_INSN_LIST (twin
, twins
);
7958 /* Add dependences between TWIN and all appropriate
7959 instructions from REC. */
7960 FOR_EACH_DEP (insn
, SD_LIST_SPEC_BACK
, sd_it
, dep
)
7962 rtx_insn
*pro
= DEP_PRO (dep
);
7964 gcc_assert (DEP_TYPE (dep
) == REG_DEP_TRUE
);
7966 /* INSN might have dependencies from the instructions from
7967 several recovery blocks. At this iteration we process those
7968 producers that reside in REC. */
7969 if (BLOCK_FOR_INSN (pro
) == rec
)
7971 dep_def _new_dep
, *new_dep
= &_new_dep
;
7973 init_dep (new_dep
, pro
, twin
, REG_DEP_TRUE
);
7974 sd_add_dep (new_dep
, false);
7978 process_insn_forw_deps_be_in_spec (insn
, twin
, ts
);
7980 /* Remove all dependencies between INSN and insns in REC. */
7981 for (sd_it
= sd_iterator_start (insn
, SD_LIST_SPEC_BACK
);
7982 sd_iterator_cond (&sd_it
, &dep
);)
7984 rtx_insn
*pro
= DEP_PRO (dep
);
7986 if (BLOCK_FOR_INSN (pro
) == rec
)
7987 sd_delete_dep (sd_it
);
7989 sd_iterator_next (&sd_it
);
7993 /* We couldn't have added the dependencies between INSN and TWINS earlier
7994 because that would make TWINS appear in the INSN_BACK_DEPS (INSN). */
7998 rtx_insn_list
*next_node
;
8000 twin
= twins
->insn ();
8003 dep_def _new_dep
, *new_dep
= &_new_dep
;
8005 init_dep (new_dep
, insn
, twin
, REG_DEP_OUTPUT
);
8006 sd_add_dep (new_dep
, false);
8009 next_node
= twins
->next ();
8010 free_INSN_LIST_node (twins
);
8014 calc_priorities (priorities_roots
);
8015 priorities_roots
.release ();
8018 /* Extends and fills with zeros (only the new part) array pointed to by P. */
8020 xrecalloc (void *p
, size_t new_nmemb
, size_t old_nmemb
, size_t size
)
8022 gcc_assert (new_nmemb
>= old_nmemb
);
8023 p
= XRESIZEVAR (void, p
, new_nmemb
* size
);
8024 memset (((char *) p
) + old_nmemb
* size
, 0, (new_nmemb
- old_nmemb
) * size
);
8029 Find fallthru edge from PRED. */
8031 find_fallthru_edge_from (basic_block pred
)
8036 succ
= pred
->next_bb
;
8037 gcc_assert (succ
->prev_bb
== pred
);
8039 if (EDGE_COUNT (pred
->succs
) <= EDGE_COUNT (succ
->preds
))
8041 e
= find_fallthru_edge (pred
->succs
);
8045 gcc_assert (e
->dest
== succ
);
8051 e
= find_fallthru_edge (succ
->preds
);
8055 gcc_assert (e
->src
== pred
);
8063 /* Extend per basic block data structures. */
8065 sched_extend_bb (void)
8067 /* The following is done to keep current_sched_info->next_tail non null. */
8068 rtx_insn
*end
= BB_END (EXIT_BLOCK_PTR_FOR_FN (cfun
)->prev_bb
);
8069 rtx_insn
*insn
= DEBUG_INSN_P (end
) ? prev_nondebug_insn (end
) : end
;
8070 if (NEXT_INSN (end
) == 0
8073 /* Don't emit a NOTE if it would end up before a BARRIER. */
8074 && !BARRIER_P (NEXT_INSN (end
))))
8076 rtx_note
*note
= emit_note_after (NOTE_INSN_DELETED
, end
);
8077 /* Make note appear outside BB. */
8078 set_block_for_insn (note
, NULL
);
8079 BB_END (EXIT_BLOCK_PTR_FOR_FN (cfun
)->prev_bb
) = end
;
8083 /* Init per basic block data structures. */
8085 sched_init_bbs (void)
8090 /* Initialize BEFORE_RECOVERY variable. */
8092 init_before_recovery (basic_block
*before_recovery_ptr
)
8097 last
= EXIT_BLOCK_PTR_FOR_FN (cfun
)->prev_bb
;
8098 e
= find_fallthru_edge_from (last
);
8102 /* We create two basic blocks:
8103 1. Single instruction block is inserted right after E->SRC
8105 2. Empty block right before EXIT_BLOCK.
8106 Between these two blocks recovery blocks will be emitted. */
8108 basic_block single
, empty
;
8112 /* If the fallthrough edge to exit we've found is from the block we've
8113 created before, don't do anything more. */
8114 if (last
== after_recovery
)
8117 adding_bb_to_current_region_p
= false;
8119 single
= sched_create_empty_bb (last
);
8120 empty
= sched_create_empty_bb (single
);
8122 /* Add new blocks to the root loop. */
8123 if (current_loops
!= NULL
)
8125 add_bb_to_loop (single
, (*current_loops
->larray
)[0]);
8126 add_bb_to_loop (empty
, (*current_loops
->larray
)[0]);
8129 single
->count
= last
->count
;
8130 empty
->count
= last
->count
;
8131 single
->frequency
= last
->frequency
;
8132 empty
->frequency
= last
->frequency
;
8133 BB_COPY_PARTITION (single
, last
);
8134 BB_COPY_PARTITION (empty
, last
);
8136 redirect_edge_succ (e
, single
);
8137 make_single_succ_edge (single
, empty
, 0);
8138 make_single_succ_edge (empty
, EXIT_BLOCK_PTR_FOR_FN (cfun
),
8141 label
= block_label (empty
);
8142 x
= emit_jump_insn_after (gen_jump (label
), BB_END (single
));
8143 JUMP_LABEL (x
) = label
;
8144 LABEL_NUSES (label
)++;
8145 haifa_init_insn (x
);
8147 emit_barrier_after (x
);
8149 sched_init_only_bb (empty
, NULL
);
8150 sched_init_only_bb (single
, NULL
);
8153 adding_bb_to_current_region_p
= true;
8154 before_recovery
= single
;
8155 after_recovery
= empty
;
8157 if (before_recovery_ptr
)
8158 *before_recovery_ptr
= before_recovery
;
8160 if (sched_verbose
>= 2 && spec_info
->dump
)
8161 fprintf (spec_info
->dump
,
8162 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
8163 last
->index
, single
->index
, empty
->index
);
8166 before_recovery
= last
;
8169 /* Returns new recovery block. */
8171 sched_create_recovery_block (basic_block
*before_recovery_ptr
)
8177 haifa_recovery_bb_recently_added_p
= true;
8178 haifa_recovery_bb_ever_added_p
= true;
8180 init_before_recovery (before_recovery_ptr
);
8182 barrier
= get_last_bb_insn (before_recovery
);
8183 gcc_assert (BARRIER_P (barrier
));
8185 label
= emit_label_after (gen_label_rtx (), barrier
);
8187 rec
= create_basic_block (label
, label
, before_recovery
);
8189 /* A recovery block always ends with an unconditional jump. */
8190 emit_barrier_after (BB_END (rec
));
8192 if (BB_PARTITION (before_recovery
) != BB_UNPARTITIONED
)
8193 BB_SET_PARTITION (rec
, BB_COLD_PARTITION
);
8195 if (sched_verbose
&& spec_info
->dump
)
8196 fprintf (spec_info
->dump
, ";;\t\tGenerated recovery block rec%d\n",
8202 /* Create edges: FIRST_BB -> REC; FIRST_BB -> SECOND_BB; REC -> SECOND_BB
8203 and emit necessary jumps. */
8205 sched_create_recovery_edges (basic_block first_bb
, basic_block rec
,
8206 basic_block second_bb
)
8212 /* This is fixing of incoming edge. */
8213 /* ??? Which other flags should be specified? */
8214 if (BB_PARTITION (first_bb
) != BB_PARTITION (rec
))
8215 /* Partition type is the same, if it is "unpartitioned". */
8216 edge_flags
= EDGE_CROSSING
;
8220 make_edge (first_bb
, rec
, edge_flags
);
8221 label
= block_label (second_bb
);
8222 jump
= emit_jump_insn_after (gen_jump (label
), BB_END (rec
));
8223 JUMP_LABEL (jump
) = label
;
8224 LABEL_NUSES (label
)++;
8226 if (BB_PARTITION (second_bb
) != BB_PARTITION (rec
))
8227 /* Partition type is the same, if it is "unpartitioned". */
8229 /* Rewritten from cfgrtl.c. */
8230 if (flag_reorder_blocks_and_partition
8231 && targetm_common
.have_named_sections
)
8233 /* We don't need the same note for the check because
8234 any_condjump_p (check) == true. */
8235 CROSSING_JUMP_P (jump
) = 1;
8237 edge_flags
= EDGE_CROSSING
;
8242 make_single_succ_edge (rec
, second_bb
, edge_flags
);
8243 if (dom_info_available_p (CDI_DOMINATORS
))
8244 set_immediate_dominator (CDI_DOMINATORS
, rec
, first_bb
);
8247 /* This function creates recovery code for INSN. If MUTATE_P is nonzero,
8248 INSN is a simple check, that should be converted to branchy one. */
8250 create_check_block_twin (rtx_insn
*insn
, bool mutate_p
)
8253 rtx_insn
*label
, *check
, *twin
;
8256 sd_iterator_def sd_it
;
8258 dep_def _new_dep
, *new_dep
= &_new_dep
;
8261 gcc_assert (ORIG_PAT (insn
) != NULL_RTX
);
8264 todo_spec
= TODO_SPEC (insn
);
8267 gcc_assert (IS_SPECULATION_SIMPLE_CHECK_P (insn
)
8268 && (TODO_SPEC (insn
) & SPECULATIVE
) == 0);
8270 todo_spec
= CHECK_SPEC (insn
);
8273 todo_spec
&= SPECULATIVE
;
8275 /* Create recovery block. */
8276 if (mutate_p
|| targetm
.sched
.needs_block_p (todo_spec
))
8278 rec
= sched_create_recovery_block (NULL
);
8279 label
= BB_HEAD (rec
);
8283 rec
= EXIT_BLOCK_PTR_FOR_FN (cfun
);
8288 check_pat
= targetm
.sched
.gen_spec_check (insn
, label
, todo_spec
);
8290 if (rec
!= EXIT_BLOCK_PTR_FOR_FN (cfun
))
8292 /* To have mem_reg alive at the beginning of second_bb,
8293 we emit check BEFORE insn, so insn after splitting
8294 insn will be at the beginning of second_bb, which will
8295 provide us with the correct life information. */
8296 check
= emit_jump_insn_before (check_pat
, insn
);
8297 JUMP_LABEL (check
) = label
;
8298 LABEL_NUSES (label
)++;
8301 check
= emit_insn_before (check_pat
, insn
);
8303 /* Extend data structures. */
8304 haifa_init_insn (check
);
8306 /* CHECK is being added to current region. Extend ready list. */
8307 gcc_assert (sched_ready_n_insns
!= -1);
8308 sched_extend_ready_list (sched_ready_n_insns
+ 1);
8310 if (current_sched_info
->add_remove_insn
)
8311 current_sched_info
->add_remove_insn (insn
, 0);
8313 RECOVERY_BLOCK (check
) = rec
;
8315 if (sched_verbose
&& spec_info
->dump
)
8316 fprintf (spec_info
->dump
, ";;\t\tGenerated check insn : %s\n",
8317 (*current_sched_info
->print_insn
) (check
, 0));
8319 gcc_assert (ORIG_PAT (insn
));
8321 /* Initialize TWIN (twin is a duplicate of original instruction
8322 in the recovery block). */
8323 if (rec
!= EXIT_BLOCK_PTR_FOR_FN (cfun
))
8325 sd_iterator_def sd_it
;
8328 FOR_EACH_DEP (insn
, SD_LIST_RES_BACK
, sd_it
, dep
)
8329 if ((DEP_STATUS (dep
) & DEP_OUTPUT
) != 0)
8331 struct _dep _dep2
, *dep2
= &_dep2
;
8333 init_dep (dep2
, DEP_PRO (dep
), check
, REG_DEP_TRUE
);
8335 sd_add_dep (dep2
, true);
8338 twin
= emit_insn_after (ORIG_PAT (insn
), BB_END (rec
));
8339 haifa_init_insn (twin
);
8341 if (sched_verbose
&& spec_info
->dump
)
8342 /* INSN_BB (insn) isn't determined for twin insns yet.
8343 So we can't use current_sched_info->print_insn. */
8344 fprintf (spec_info
->dump
, ";;\t\tGenerated twin insn : %d/rec%d\n",
8345 INSN_UID (twin
), rec
->index
);
8349 ORIG_PAT (check
) = ORIG_PAT (insn
);
8350 HAS_INTERNAL_DEP (check
) = 1;
8352 /* ??? We probably should change all OUTPUT dependencies to
8356 /* Copy all resolved back dependencies of INSN to TWIN. This will
8357 provide correct value for INSN_TICK (TWIN). */
8358 sd_copy_back_deps (twin
, insn
, true);
8360 if (rec
!= EXIT_BLOCK_PTR_FOR_FN (cfun
))
8361 /* In case of branchy check, fix CFG. */
8363 basic_block first_bb
, second_bb
;
8366 first_bb
= BLOCK_FOR_INSN (check
);
8367 second_bb
= sched_split_block (first_bb
, check
);
8369 sched_create_recovery_edges (first_bb
, rec
, second_bb
);
8371 sched_init_only_bb (second_bb
, first_bb
);
8372 sched_init_only_bb (rec
, EXIT_BLOCK_PTR_FOR_FN (cfun
));
8374 jump
= BB_END (rec
);
8375 haifa_init_insn (jump
);
8378 /* Move backward dependences from INSN to CHECK and
8379 move forward dependences from INSN to TWIN. */
8381 /* First, create dependencies between INSN's producers and CHECK & TWIN. */
8382 FOR_EACH_DEP (insn
, SD_LIST_BACK
, sd_it
, dep
)
8384 rtx_insn
*pro
= DEP_PRO (dep
);
8387 /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
8388 check --TRUE--> producer ??? or ANTI ???
8389 twin --TRUE--> producer
8390 twin --ANTI--> check
8392 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
8393 check --ANTI--> producer
8394 twin --ANTI--> producer
8395 twin --ANTI--> check
8397 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
8398 check ~~TRUE~~> producer
8399 twin ~~TRUE~~> producer
8400 twin --ANTI--> check */
8402 ds
= DEP_STATUS (dep
);
8404 if (ds
& BEGIN_SPEC
)
8406 gcc_assert (!mutate_p
);
8410 init_dep_1 (new_dep
, pro
, check
, DEP_TYPE (dep
), ds
);
8411 sd_add_dep (new_dep
, false);
8413 if (rec
!= EXIT_BLOCK_PTR_FOR_FN (cfun
))
8415 DEP_CON (new_dep
) = twin
;
8416 sd_add_dep (new_dep
, false);
8420 /* Second, remove backward dependencies of INSN. */
8421 for (sd_it
= sd_iterator_start (insn
, SD_LIST_SPEC_BACK
);
8422 sd_iterator_cond (&sd_it
, &dep
);)
8424 if ((DEP_STATUS (dep
) & BEGIN_SPEC
)
8426 /* We can delete this dep because we overcome it with
8427 BEGIN_SPECULATION. */
8428 sd_delete_dep (sd_it
);
8430 sd_iterator_next (&sd_it
);
8433 /* Future Speculations. Determine what BE_IN speculations will be like. */
8436 /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
8439 gcc_assert (!DONE_SPEC (insn
));
8443 ds_t ts
= TODO_SPEC (insn
);
8445 DONE_SPEC (insn
) = ts
& BEGIN_SPEC
;
8446 CHECK_SPEC (check
) = ts
& BEGIN_SPEC
;
8448 /* Luckiness of future speculations solely depends upon initial
8449 BEGIN speculation. */
8450 if (ts
& BEGIN_DATA
)
8451 fs
= set_dep_weak (fs
, BE_IN_DATA
, get_dep_weak (ts
, BEGIN_DATA
));
8452 if (ts
& BEGIN_CONTROL
)
8453 fs
= set_dep_weak (fs
, BE_IN_CONTROL
,
8454 get_dep_weak (ts
, BEGIN_CONTROL
));
8457 CHECK_SPEC (check
) = CHECK_SPEC (insn
);
8459 /* Future speculations: call the helper. */
8460 process_insn_forw_deps_be_in_spec (insn
, twin
, fs
);
8462 if (rec
!= EXIT_BLOCK_PTR_FOR_FN (cfun
))
8464 /* Which types of dependencies should we use here is,
8465 generally, machine-dependent question... But, for now,
8470 init_dep (new_dep
, insn
, check
, REG_DEP_TRUE
);
8471 sd_add_dep (new_dep
, false);
8473 init_dep (new_dep
, insn
, twin
, REG_DEP_OUTPUT
);
8474 sd_add_dep (new_dep
, false);
8478 if (spec_info
->dump
)
8479 fprintf (spec_info
->dump
, ";;\t\tRemoved simple check : %s\n",
8480 (*current_sched_info
->print_insn
) (insn
, 0));
8482 /* Remove all dependencies of the INSN. */
8484 sd_it
= sd_iterator_start (insn
, (SD_LIST_FORW
8486 | SD_LIST_RES_BACK
));
8487 while (sd_iterator_cond (&sd_it
, &dep
))
8488 sd_delete_dep (sd_it
);
8491 /* If former check (INSN) already was moved to the ready (or queue)
8492 list, add new check (CHECK) there too. */
8493 if (QUEUE_INDEX (insn
) != QUEUE_NOWHERE
)
8496 /* Remove old check from instruction stream and free its
8498 sched_remove_insn (insn
);
8501 init_dep (new_dep
, check
, twin
, REG_DEP_ANTI
);
8502 sd_add_dep (new_dep
, false);
8506 init_dep_1 (new_dep
, insn
, check
, REG_DEP_TRUE
, DEP_TRUE
| DEP_OUTPUT
);
8507 sd_add_dep (new_dep
, false);
8511 /* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
8512 because it'll be done later in add_to_speculative_block. */
8514 rtx_vec_t priorities_roots
= rtx_vec_t ();
8516 clear_priorities (twin
, &priorities_roots
);
8517 calc_priorities (priorities_roots
);
8518 priorities_roots
.release ();
8522 /* Removes dependency between instructions in the recovery block REC
8523 and usual region instructions. It keeps inner dependences so it
8524 won't be necessary to recompute them. */
8526 fix_recovery_deps (basic_block rec
)
8528 rtx_insn
*note
, *insn
, *jump
;
8529 rtx_insn_list
*ready_list
= 0;
8530 bitmap_head in_ready
;
8531 rtx_insn_list
*link
;
8533 bitmap_initialize (&in_ready
, 0);
8535 /* NOTE - a basic block note. */
8536 note
= NEXT_INSN (BB_HEAD (rec
));
8537 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note
));
8538 insn
= BB_END (rec
);
8539 gcc_assert (JUMP_P (insn
));
8540 insn
= PREV_INSN (insn
);
8544 sd_iterator_def sd_it
;
8547 for (sd_it
= sd_iterator_start (insn
, SD_LIST_FORW
);
8548 sd_iterator_cond (&sd_it
, &dep
);)
8550 rtx_insn
*consumer
= DEP_CON (dep
);
8552 if (BLOCK_FOR_INSN (consumer
) != rec
)
8554 sd_delete_dep (sd_it
);
8556 if (bitmap_set_bit (&in_ready
, INSN_LUID (consumer
)))
8557 ready_list
= alloc_INSN_LIST (consumer
, ready_list
);
8561 gcc_assert ((DEP_STATUS (dep
) & DEP_TYPES
) == DEP_TRUE
);
8563 sd_iterator_next (&sd_it
);
8567 insn
= PREV_INSN (insn
);
8569 while (insn
!= note
);
8571 bitmap_clear (&in_ready
);
8573 /* Try to add instructions to the ready or queue list. */
8574 for (link
= ready_list
; link
; link
= link
->next ())
8575 try_ready (link
->insn ());
8576 free_INSN_LIST_list (&ready_list
);
8578 /* Fixing jump's dependences. */
8579 insn
= BB_HEAD (rec
);
8580 jump
= BB_END (rec
);
8582 gcc_assert (LABEL_P (insn
));
8583 insn
= NEXT_INSN (insn
);
8585 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn
));
8586 add_jump_dependencies (insn
, jump
);
8589 /* Change pattern of INSN to NEW_PAT. Invalidate cached haifa
8590 instruction data. */
8592 haifa_change_pattern (rtx_insn
*insn
, rtx new_pat
)
8596 t
= validate_change (insn
, &PATTERN (insn
), new_pat
, 0);
8600 update_insn_after_change (insn
);
8604 /* -1 - can't speculate,
8605 0 - for speculation with REQUEST mode it is OK to use
8606 current instruction pattern,
8607 1 - need to change pattern for *NEW_PAT to be speculative. */
8609 sched_speculate_insn (rtx_insn
*insn
, ds_t request
, rtx
*new_pat
)
8611 gcc_assert (current_sched_info
->flags
& DO_SPECULATION
8612 && (request
& SPECULATIVE
)
8613 && sched_insn_is_legitimate_for_speculation_p (insn
, request
));
8615 if ((request
& spec_info
->mask
) != request
)
8618 if (request
& BE_IN_SPEC
8619 && !(request
& BEGIN_SPEC
))
8622 return targetm
.sched
.speculate_insn (insn
, request
, new_pat
);
8626 haifa_speculate_insn (rtx_insn
*insn
, ds_t request
, rtx
*new_pat
)
8628 gcc_assert (sched_deps_info
->generate_spec_deps
8629 && !IS_SPECULATION_CHECK_P (insn
));
8631 if (HAS_INTERNAL_DEP (insn
)
8632 || SCHED_GROUP_P (insn
))
8635 return sched_speculate_insn (insn
, request
, new_pat
);
8638 /* Print some information about block BB, which starts with HEAD and
8639 ends with TAIL, before scheduling it.
8640 I is zero, if scheduler is about to start with the fresh ebb. */
8642 dump_new_block_header (int i
, basic_block bb
, rtx_insn
*head
, rtx_insn
*tail
)
8645 fprintf (sched_dump
,
8646 ";; ======================================================\n");
8648 fprintf (sched_dump
,
8649 ";; =====================ADVANCING TO=====================\n");
8650 fprintf (sched_dump
,
8651 ";; -- basic block %d from %d to %d -- %s reload\n",
8652 bb
->index
, INSN_UID (head
), INSN_UID (tail
),
8653 (reload_completed
? "after" : "before"));
8654 fprintf (sched_dump
,
8655 ";; ======================================================\n");
8656 fprintf (sched_dump
, "\n");
8659 /* Unlink basic block notes and labels and saves them, so they
8660 can be easily restored. We unlink basic block notes in EBB to
8661 provide back-compatibility with the previous code, as target backends
8662 assume, that there'll be only instructions between
8663 current_sched_info->{head and tail}. We restore these notes as soon
8665 FIRST (LAST) is the first (last) basic block in the ebb.
8666 NB: In usual case (FIRST == LAST) nothing is really done. */
8668 unlink_bb_notes (basic_block first
, basic_block last
)
8670 /* We DON'T unlink basic block notes of the first block in the ebb. */
8674 bb_header
= XNEWVEC (rtx_insn
*, last_basic_block_for_fn (cfun
));
8676 /* Make a sentinel. */
8677 if (last
->next_bb
!= EXIT_BLOCK_PTR_FOR_FN (cfun
))
8678 bb_header
[last
->next_bb
->index
] = 0;
8680 first
= first
->next_bb
;
8683 rtx_insn
*prev
, *label
, *note
, *next
;
8685 label
= BB_HEAD (last
);
8686 if (LABEL_P (label
))
8687 note
= NEXT_INSN (label
);
8690 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note
));
8692 prev
= PREV_INSN (label
);
8693 next
= NEXT_INSN (note
);
8694 gcc_assert (prev
&& next
);
8696 SET_NEXT_INSN (prev
) = next
;
8697 SET_PREV_INSN (next
) = prev
;
8699 bb_header
[last
->index
] = label
;
8704 last
= last
->prev_bb
;
8709 /* Restore basic block notes.
8710 FIRST is the first basic block in the ebb. */
8712 restore_bb_notes (basic_block first
)
8717 /* We DON'T unlink basic block notes of the first block in the ebb. */
8718 first
= first
->next_bb
;
8719 /* Remember: FIRST is actually a second basic block in the ebb. */
8721 while (first
!= EXIT_BLOCK_PTR_FOR_FN (cfun
)
8722 && bb_header
[first
->index
])
8724 rtx_insn
*prev
, *label
, *note
, *next
;
8726 label
= bb_header
[first
->index
];
8727 prev
= PREV_INSN (label
);
8728 next
= NEXT_INSN (prev
);
8730 if (LABEL_P (label
))
8731 note
= NEXT_INSN (label
);
8734 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note
));
8736 bb_header
[first
->index
] = 0;
8738 SET_NEXT_INSN (prev
) = label
;
8739 SET_NEXT_INSN (note
) = next
;
8740 SET_PREV_INSN (next
) = note
;
8742 first
= first
->next_bb
;
8750 Fix CFG after both in- and inter-block movement of
8751 control_flow_insn_p JUMP. */
8753 fix_jump_move (rtx_insn
*jump
)
8755 basic_block bb
, jump_bb
, jump_bb_next
;
8757 bb
= BLOCK_FOR_INSN (PREV_INSN (jump
));
8758 jump_bb
= BLOCK_FOR_INSN (jump
);
8759 jump_bb_next
= jump_bb
->next_bb
;
8761 gcc_assert (common_sched_info
->sched_pass_id
== SCHED_EBB_PASS
8762 || IS_SPECULATION_BRANCHY_CHECK_P (jump
));
8764 if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next
)))
8765 /* if jump_bb_next is not empty. */
8766 BB_END (jump_bb
) = BB_END (jump_bb_next
);
8768 if (BB_END (bb
) != PREV_INSN (jump
))
8769 /* Then there are instruction after jump that should be placed
8771 BB_END (jump_bb_next
) = BB_END (bb
);
8773 /* Otherwise jump_bb_next is empty. */
8774 BB_END (jump_bb_next
) = NEXT_INSN (BB_HEAD (jump_bb_next
));
8776 /* To make assertion in move_insn happy. */
8777 BB_END (bb
) = PREV_INSN (jump
);
8779 update_bb_for_insn (jump_bb_next
);
8782 /* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
8784 move_block_after_check (rtx_insn
*jump
)
8786 basic_block bb
, jump_bb
, jump_bb_next
;
8787 vec
<edge
, va_gc
> *t
;
8789 bb
= BLOCK_FOR_INSN (PREV_INSN (jump
));
8790 jump_bb
= BLOCK_FOR_INSN (jump
);
8791 jump_bb_next
= jump_bb
->next_bb
;
8793 update_bb_for_insn (jump_bb
);
8795 gcc_assert (IS_SPECULATION_CHECK_P (jump
)
8796 || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next
)));
8798 unlink_block (jump_bb_next
);
8799 link_block (jump_bb_next
, bb
);
8803 move_succs (&(jump_bb
->succs
), bb
);
8804 move_succs (&(jump_bb_next
->succs
), jump_bb
);
8805 move_succs (&t
, jump_bb_next
);
8807 df_mark_solutions_dirty ();
8809 common_sched_info
->fix_recovery_cfg
8810 (bb
->index
, jump_bb
->index
, jump_bb_next
->index
);
8813 /* Helper function for move_block_after_check.
8814 This functions attaches edge vector pointed to by SUCCSP to
8817 move_succs (vec
<edge
, va_gc
> **succsp
, basic_block to
)
8822 gcc_assert (to
->succs
== 0);
8824 to
->succs
= *succsp
;
8826 FOR_EACH_EDGE (e
, ei
, to
->succs
)
8832 /* Remove INSN from the instruction stream.
8833 INSN should have any dependencies. */
8835 sched_remove_insn (rtx_insn
*insn
)
8837 sd_finish_insn (insn
);
8839 change_queue_index (insn
, QUEUE_NOWHERE
);
8840 current_sched_info
->add_remove_insn (insn
, 1);
8844 /* Clear priorities of all instructions, that are forward dependent on INSN.
8845 Store in vector pointed to by ROOTS_PTR insns on which priority () should
8846 be invoked to initialize all cleared priorities. */
8848 clear_priorities (rtx_insn
*insn
, rtx_vec_t
*roots_ptr
)
8850 sd_iterator_def sd_it
;
8852 bool insn_is_root_p
= true;
8854 gcc_assert (QUEUE_INDEX (insn
) != QUEUE_SCHEDULED
);
8856 FOR_EACH_DEP (insn
, SD_LIST_BACK
, sd_it
, dep
)
8858 rtx_insn
*pro
= DEP_PRO (dep
);
8860 if (INSN_PRIORITY_STATUS (pro
) >= 0
8861 && QUEUE_INDEX (insn
) != QUEUE_SCHEDULED
)
8863 /* If DEP doesn't contribute to priority then INSN itself should
8864 be added to priority roots. */
8865 if (contributes_to_priority_p (dep
))
8866 insn_is_root_p
= false;
8868 INSN_PRIORITY_STATUS (pro
) = -1;
8869 clear_priorities (pro
, roots_ptr
);
8874 roots_ptr
->safe_push (insn
);
8877 /* Recompute priorities of instructions, whose priorities might have been
8878 changed. ROOTS is a vector of instructions whose priority computation will
8879 trigger initialization of all cleared priorities. */
8881 calc_priorities (rtx_vec_t roots
)
8886 FOR_EACH_VEC_ELT (roots
, i
, insn
)
8891 /* Add dependences between JUMP and other instructions in the recovery
8892 block. INSN is the first insn the recovery block. */
8894 add_jump_dependencies (rtx_insn
*insn
, rtx_insn
*jump
)
8898 insn
= NEXT_INSN (insn
);
8902 if (dep_list_size (insn
, SD_LIST_FORW
) == 0)
8904 dep_def _new_dep
, *new_dep
= &_new_dep
;
8906 init_dep (new_dep
, insn
, jump
, REG_DEP_ANTI
);
8907 sd_add_dep (new_dep
, false);
8912 gcc_assert (!sd_lists_empty_p (jump
, SD_LIST_BACK
));
8915 /* Extend data structures for logical insn UID. */
8917 sched_extend_luids (void)
8919 int new_luids_max_uid
= get_max_uid () + 1;
8921 sched_luids
.safe_grow_cleared (new_luids_max_uid
);
8924 /* Initialize LUID for INSN. */
8926 sched_init_insn_luid (rtx_insn
*insn
)
8928 int i
= INSN_P (insn
) ? 1 : common_sched_info
->luid_for_non_insn (insn
);
8933 luid
= sched_max_luid
;
8934 sched_max_luid
+= i
;
8939 SET_INSN_LUID (insn
, luid
);
8942 /* Initialize luids for BBS.
8943 The hook common_sched_info->luid_for_non_insn () is used to determine
8944 if notes, labels, etc. need luids. */
8946 sched_init_luids (bb_vec_t bbs
)
8951 sched_extend_luids ();
8952 FOR_EACH_VEC_ELT (bbs
, i
, bb
)
8956 FOR_BB_INSNS (bb
, insn
)
8957 sched_init_insn_luid (insn
);
8963 sched_finish_luids (void)
8965 sched_luids
.release ();
8969 /* Return logical uid of INSN. Helpful while debugging. */
8971 insn_luid (rtx_insn
*insn
)
8973 return INSN_LUID (insn
);
8976 /* Extend per insn data in the target. */
8978 sched_extend_target (void)
8980 if (targetm
.sched
.h_i_d_extended
)
8981 targetm
.sched
.h_i_d_extended ();
8984 /* Extend global scheduler structures (those, that live across calls to
8985 schedule_block) to include information about just emitted INSN. */
8989 int reserve
= (get_max_uid () + 1 - h_i_d
.length ());
8991 && ! h_i_d
.space (reserve
))
8993 h_i_d
.safe_grow_cleared (3 * get_max_uid () / 2);
8994 sched_extend_target ();
8998 /* Initialize h_i_d entry of the INSN with default values.
8999 Values, that are not explicitly initialized here, hold zero. */
9001 init_h_i_d (rtx_insn
*insn
)
9003 if (INSN_LUID (insn
) > 0)
9005 INSN_COST (insn
) = -1;
9006 QUEUE_INDEX (insn
) = QUEUE_NOWHERE
;
9007 INSN_TICK (insn
) = INVALID_TICK
;
9008 INSN_EXACT_TICK (insn
) = INVALID_TICK
;
9009 INTER_TICK (insn
) = INVALID_TICK
;
9010 TODO_SPEC (insn
) = HARD_DEP
;
9011 INSN_AUTOPREF_MULTIPASS_DATA (insn
)[0].status
9012 = AUTOPREF_MULTIPASS_DATA_UNINITIALIZED
;
9013 INSN_AUTOPREF_MULTIPASS_DATA (insn
)[1].status
9014 = AUTOPREF_MULTIPASS_DATA_UNINITIALIZED
;
9018 /* Initialize haifa_insn_data for BBS. */
9020 haifa_init_h_i_d (bb_vec_t bbs
)
9026 FOR_EACH_VEC_ELT (bbs
, i
, bb
)
9030 FOR_BB_INSNS (bb
, insn
)
9035 /* Finalize haifa_insn_data. */
9037 haifa_finish_h_i_d (void)
9040 haifa_insn_data_t data
;
9041 struct reg_use_data
*use
, *next
;
9043 FOR_EACH_VEC_ELT (h_i_d
, i
, data
)
9045 free (data
->max_reg_pressure
);
9046 free (data
->reg_pressure
);
9047 for (use
= data
->reg_use_list
; use
!= NULL
; use
= next
)
9049 next
= use
->next_insn_use
;
9056 /* Init data for the new insn INSN. */
9058 haifa_init_insn (rtx_insn
*insn
)
9060 gcc_assert (insn
!= NULL
);
9062 sched_extend_luids ();
9063 sched_init_insn_luid (insn
);
9064 sched_extend_target ();
9065 sched_deps_init (false);
9069 if (adding_bb_to_current_region_p
)
9071 sd_init_insn (insn
);
9073 /* Extend dependency caches by one element. */
9074 extend_dependency_caches (1, false);
9076 if (sched_pressure
!= SCHED_PRESSURE_NONE
)
9077 init_insn_reg_pressure_info (insn
);
9080 /* Init data for the new basic block BB which comes after AFTER. */
9082 haifa_init_only_bb (basic_block bb
, basic_block after
)
9084 gcc_assert (bb
!= NULL
);
9088 if (common_sched_info
->add_block
)
9089 /* This changes only data structures of the front-end. */
9090 common_sched_info
->add_block (bb
, after
);
9093 /* A generic version of sched_split_block (). */
9095 sched_split_block_1 (basic_block first_bb
, rtx after
)
9099 e
= split_block (first_bb
, after
);
9100 gcc_assert (e
->src
== first_bb
);
9102 /* sched_split_block emits note if *check == BB_END. Probably it
9103 is better to rip that note off. */
9108 /* A generic version of sched_create_empty_bb (). */
9110 sched_create_empty_bb_1 (basic_block after
)
9112 return create_empty_bb (after
);
9115 /* Insert PAT as an INSN into the schedule and update the necessary data
9116 structures to account for it. */
9118 sched_emit_insn (rtx pat
)
9120 rtx_insn
*insn
= emit_insn_before (pat
, first_nonscheduled_insn ());
9121 haifa_init_insn (insn
);
9123 if (current_sched_info
->add_remove_insn
)
9124 current_sched_info
->add_remove_insn (insn
, 0);
9126 (*current_sched_info
->begin_schedule_ready
) (insn
);
9127 scheduled_insns
.safe_push (insn
);
9129 last_scheduled_insn
= insn
;
9133 /* This function returns a candidate satisfying dispatch constraints from
9137 ready_remove_first_dispatch (struct ready_list
*ready
)
9140 rtx_insn
*insn
= ready_element (ready
, 0);
9142 if (ready
->n_ready
== 1
9144 || INSN_CODE (insn
) < 0
9145 || !active_insn_p (insn
)
9146 || targetm
.sched
.dispatch (insn
, FITS_DISPATCH_WINDOW
))
9147 return ready_remove_first (ready
);
9149 for (i
= 1; i
< ready
->n_ready
; i
++)
9151 insn
= ready_element (ready
, i
);
9154 || INSN_CODE (insn
) < 0
9155 || !active_insn_p (insn
))
9158 if (targetm
.sched
.dispatch (insn
, FITS_DISPATCH_WINDOW
))
9160 /* Return ith element of ready. */
9161 insn
= ready_remove (ready
, i
);
9166 if (targetm
.sched
.dispatch (NULL
, DISPATCH_VIOLATION
))
9167 return ready_remove_first (ready
);
9169 for (i
= 1; i
< ready
->n_ready
; i
++)
9171 insn
= ready_element (ready
, i
);
9174 || INSN_CODE (insn
) < 0
9175 || !active_insn_p (insn
))
9178 /* Return i-th element of ready. */
9179 if (targetm
.sched
.dispatch (insn
, IS_CMP
))
9180 return ready_remove (ready
, i
);
9183 return ready_remove_first (ready
);
9186 /* Get number of ready insn in the ready list. */
9189 number_in_ready (void)
9191 return ready
.n_ready
;
9194 /* Get number of ready's in the ready list. */
9197 get_ready_element (int i
)
9199 return ready_element (&ready
, i
);
9202 #endif /* INSN_SCHEDULING */