1 /* Instruction scheduling pass.
2 Copyright (C) 1992-2017 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
4 and currently maintained by, Jim Wilson (wilson@cygnus.com)
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* Instruction scheduling pass. This file, along with sched-deps.c,
23 contains the generic parts. The actual entry point for
24 the normal instruction scheduling pass is found in sched-rgn.c.
26 We compute insn priorities based on data dependencies. Flow
27 analysis only creates a fraction of the data-dependencies we must
28 observe: namely, only those dependencies which the combiner can be
29 expected to use. For this pass, we must therefore create the
30 remaining dependencies we need to observe: register dependencies,
31 memory dependencies, dependencies to keep function calls in order,
32 and the dependence between a conditional branch and the setting of
33 condition codes are all dealt with here.
35 The scheduler first traverses the data flow graph, starting with
36 the last instruction, and proceeding to the first, assigning values
37 to insn_priority as it goes. This sorts the instructions
38 topologically by data dependence.
40 Once priorities have been established, we order the insns using
41 list scheduling. This works as follows: starting with a list of
42 all the ready insns, and sorted according to priority number, we
43 schedule the insn from the end of the list by placing its
44 predecessors in the list according to their priority order. We
45 consider this insn scheduled by setting the pointer to the "end" of
46 the list to point to the previous insn. When an insn has no
47 predecessors, we either queue it until sufficient time has elapsed
48 or add it to the ready list. As the instructions are scheduled or
49 when stalls are introduced, the queue advances and dumps insns into
50 the ready list. When all insns down to the lowest priority have
51 been scheduled, the critical path of the basic block has been made
52 as short as possible. The remaining insns are then scheduled in
55 The following list shows the order in which we want to break ties
56 among insns in the ready list:
58 1. choose insn with the longest path to end of bb, ties
60 2. choose insn with least contribution to register pressure,
62 3. prefer in-block upon interblock motion, ties broken by
63 4. prefer useful upon speculative motion, ties broken by
64 5. choose insn with largest control flow probability, ties
66 6. choose insn with the least dependences upon the previously
67 scheduled insn, or finally
68 7 choose the insn which has the most insns dependent on it.
69 8. choose insn with lowest UID.
71 Memory references complicate matters. Only if we can be certain
72 that memory references are not part of the data dependency graph
73 (via true, anti, or output dependence), can we move operations past
74 memory references. To first approximation, reads can be done
75 independently, while writes introduce dependencies. Better
76 approximations will yield fewer dependencies.
78 Before reload, an extended analysis of interblock data dependences
79 is required for interblock scheduling. This is performed in
80 compute_block_dependences ().
82 Dependencies set up by memory references are treated in exactly the
83 same way as other dependencies, by using insn backward dependences
84 INSN_BACK_DEPS. INSN_BACK_DEPS are translated into forward dependences
85 INSN_FORW_DEPS for the purpose of forward list scheduling.
87 Having optimized the critical path, we may have also unduly
88 extended the lifetimes of some registers. If an operation requires
89 that constants be loaded into registers, it is certainly desirable
90 to load those constants as early as necessary, but no earlier.
91 I.e., it will not do to load up a bunch of registers at the
92 beginning of a basic block only to use them at the end, if they
93 could be loaded later, since this may result in excessive register
96 Note that since branches are never in basic blocks, but only end
97 basic blocks, this pass will not move branches. But that is ok,
98 since we can use GNU's delayed branch scheduling pass to take care
101 Also note that no further optimizations based on algebraic
102 identities are performed, so this pass would be a good one to
103 perform instruction splitting, such as breaking up a multiply
104 instruction into shifts and adds where that is profitable.
106 Given the memory aliasing analysis that this pass should perform,
107 it should be possible to remove redundant stores to memory, and to
108 load values from registers instead of hitting memory.
110 Before reload, speculative insns are moved only if a 'proof' exists
111 that no exception will be caused by this, and if no live registers
112 exist that inhibit the motion (live registers constraints are not
113 represented by data dependence edges).
115 This pass must update information that subsequent passes expect to
116 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
117 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
119 The information in the line number notes is carefully retained by
120 this pass. Notes that refer to the starting and ending of
121 exception regions are also carefully retained by this pass. All
122 other NOTE insns are grouped in their same relative order at the
123 beginning of basic blocks and regions that have been scheduled. */
127 #include "coretypes.h"
131 #include "cfghooks.h"
133 #include "memmodel.h"
135 #include "insn-config.h"
139 #include "insn-attr.h"
141 #include "cfgbuild.h"
142 #include "sched-int.h"
143 #include "common/common-target.h"
147 #include "dumpfile.h"
148 #include "print-rtl.h"
150 #ifdef INSN_SCHEDULING
152 /* True if we do register pressure relief through live-range
154 static bool live_range_shrinkage_p
;
156 /* Switch on live range shrinkage. */
158 initialize_live_range_shrinkage (void)
160 live_range_shrinkage_p
= true;
163 /* Switch off live range shrinkage. */
165 finish_live_range_shrinkage (void)
167 live_range_shrinkage_p
= false;
170 /* issue_rate is the number of insns that can be scheduled in the same
171 machine cycle. It can be defined in the config/mach/mach.h file,
172 otherwise we set it to 1. */
176 /* This can be set to true by a backend if the scheduler should not
177 enable a DCE pass. */
180 /* The current initiation interval used when modulo scheduling. */
181 static int modulo_ii
;
183 /* The maximum number of stages we are prepared to handle. */
184 static int modulo_max_stages
;
186 /* The number of insns that exist in each iteration of the loop. We use this
187 to detect when we've scheduled all insns from the first iteration. */
188 static int modulo_n_insns
;
190 /* The current count of insns in the first iteration of the loop that have
191 already been scheduled. */
192 static int modulo_insns_scheduled
;
194 /* The maximum uid of insns from the first iteration of the loop. */
195 static int modulo_iter0_max_uid
;
197 /* The number of times we should attempt to backtrack when modulo scheduling.
198 Decreased each time we have to backtrack. */
199 static int modulo_backtracks_left
;
201 /* The stage in which the last insn from the original loop was
203 static int modulo_last_stage
;
205 /* sched-verbose controls the amount of debugging output the
206 scheduler prints. It is controlled by -fsched-verbose=N:
207 N=0: no debugging output.
209 N=2: bb's probabilities, detailed ready list info, unit/insn info.
210 N=3: rtl at abort point, control-flow, regions info.
211 N=5: dependences info. */
212 int sched_verbose
= 0;
214 /* Debugging file. All printouts are sent to dump. */
215 FILE *sched_dump
= 0;
217 /* This is a placeholder for the scheduler parameters common
218 to all schedulers. */
219 struct common_sched_info_def
*common_sched_info
;
221 #define INSN_TICK(INSN) (HID (INSN)->tick)
222 #define INSN_EXACT_TICK(INSN) (HID (INSN)->exact_tick)
223 #define INSN_TICK_ESTIMATE(INSN) (HID (INSN)->tick_estimate)
224 #define INTER_TICK(INSN) (HID (INSN)->inter_tick)
225 #define FEEDS_BACKTRACK_INSN(INSN) (HID (INSN)->feeds_backtrack_insn)
226 #define SHADOW_P(INSN) (HID (INSN)->shadow_p)
227 #define MUST_RECOMPUTE_SPEC_P(INSN) (HID (INSN)->must_recompute_spec)
228 /* Cached cost of the instruction. Use insn_sched_cost to get cost of the
229 insn. -1 here means that the field is not initialized. */
230 #define INSN_COST(INSN) (HID (INSN)->cost)
232 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
233 then it should be recalculated from scratch. */
234 #define INVALID_TICK (-(max_insn_queue_index + 1))
235 /* The minimal value of the INSN_TICK of an instruction. */
236 #define MIN_TICK (-max_insn_queue_index)
238 /* Original order of insns in the ready list.
239 Used to keep order of normal insns while separating DEBUG_INSNs. */
240 #define INSN_RFS_DEBUG_ORIG_ORDER(INSN) (HID (INSN)->rfs_debug_orig_order)
242 /* The deciding reason for INSN's place in the ready list. */
243 #define INSN_LAST_RFS_WIN(INSN) (HID (INSN)->last_rfs_win)
245 /* List of important notes we must keep around. This is a pointer to the
246 last element in the list. */
249 static struct spec_info_def spec_info_var
;
250 /* Description of the speculative part of the scheduling.
251 If NULL - no speculation. */
252 spec_info_t spec_info
= NULL
;
254 /* True, if recovery block was added during scheduling of current block.
255 Used to determine, if we need to fix INSN_TICKs. */
256 static bool haifa_recovery_bb_recently_added_p
;
258 /* True, if recovery block was added during this scheduling pass.
259 Used to determine if we should have empty memory pools of dependencies
260 after finishing current region. */
261 bool haifa_recovery_bb_ever_added_p
;
263 /* Counters of different types of speculative instructions. */
264 static int nr_begin_data
, nr_be_in_data
, nr_begin_control
, nr_be_in_control
;
266 /* Array used in {unlink, restore}_bb_notes. */
267 static rtx_insn
**bb_header
= 0;
269 /* Basic block after which recovery blocks will be created. */
270 static basic_block before_recovery
;
272 /* Basic block just before the EXIT_BLOCK and after recovery, if we have
274 basic_block after_recovery
;
276 /* FALSE if we add bb to another region, so we don't need to initialize it. */
277 bool adding_bb_to_current_region_p
= true;
281 /* An instruction is ready to be scheduled when all insns preceding it
282 have already been scheduled. It is important to ensure that all
283 insns which use its result will not be executed until its result
284 has been computed. An insn is maintained in one of four structures:
286 (P) the "Pending" set of insns which cannot be scheduled until
287 their dependencies have been satisfied.
288 (Q) the "Queued" set of insns that can be scheduled when sufficient
290 (R) the "Ready" list of unscheduled, uncommitted insns.
291 (S) the "Scheduled" list of insns.
293 Initially, all insns are either "Pending" or "Ready" depending on
294 whether their dependencies are satisfied.
296 Insns move from the "Ready" list to the "Scheduled" list as they
297 are committed to the schedule. As this occurs, the insns in the
298 "Pending" list have their dependencies satisfied and move to either
299 the "Ready" list or the "Queued" set depending on whether
300 sufficient time has passed to make them ready. As time passes,
301 insns move from the "Queued" set to the "Ready" list.
303 The "Pending" list (P) are the insns in the INSN_FORW_DEPS of the
304 unscheduled insns, i.e., those that are ready, queued, and pending.
305 The "Queued" set (Q) is implemented by the variable `insn_queue'.
306 The "Ready" list (R) is implemented by the variables `ready' and
308 The "Scheduled" list (S) is the new insn chain built by this pass.
310 The transition (R->S) is implemented in the scheduling loop in
311 `schedule_block' when the best insn to schedule is chosen.
312 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
313 insns move from the ready list to the scheduled list.
314 The transition (Q->R) is implemented in 'queue_to_insn' as time
315 passes or stalls are introduced. */
317 /* Implement a circular buffer to delay instructions until sufficient
318 time has passed. For the new pipeline description interface,
319 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
320 than maximal time of instruction execution computed by genattr.c on
321 the base maximal time of functional unit reservations and getting a
322 result. This is the longest time an insn may be queued. */
324 static rtx_insn_list
**insn_queue
;
325 static int q_ptr
= 0;
326 static int q_size
= 0;
327 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
328 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
330 #define QUEUE_SCHEDULED (-3)
331 #define QUEUE_NOWHERE (-2)
332 #define QUEUE_READY (-1)
333 /* QUEUE_SCHEDULED - INSN is scheduled.
334 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
336 QUEUE_READY - INSN is in ready list.
337 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
339 #define QUEUE_INDEX(INSN) (HID (INSN)->queue_index)
341 /* The following variable value refers for all current and future
342 reservations of the processor units. */
345 /* The following variable value is size of memory representing all
346 current and future reservations of the processor units. */
347 size_t dfa_state_size
;
349 /* The following array is used to find the best insn from ready when
350 the automaton pipeline interface is used. */
351 signed char *ready_try
= NULL
;
353 /* The ready list. */
354 struct ready_list ready
= {NULL
, 0, 0, 0, 0};
356 /* The pointer to the ready list (to be removed). */
357 static struct ready_list
*readyp
= &ready
;
359 /* Scheduling clock. */
360 static int clock_var
;
362 /* Clock at which the previous instruction was issued. */
363 static int last_clock_var
;
365 /* Set to true if, when queuing a shadow insn, we discover that it would be
366 scheduled too late. */
367 static bool must_backtrack
;
369 /* The following variable value is number of essential insns issued on
370 the current cycle. An insn is essential one if it changes the
372 int cycle_issued_insns
;
374 /* This records the actual schedule. It is built up during the main phase
375 of schedule_block, and afterwards used to reorder the insns in the RTL. */
376 static vec
<rtx_insn
*> scheduled_insns
;
378 static int may_trap_exp (const_rtx
, int);
380 /* Nonzero iff the address is comprised from at most 1 register. */
381 #define CONST_BASED_ADDRESS_P(x) \
383 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
384 || (GET_CODE (x) == LO_SUM)) \
385 && (CONSTANT_P (XEXP (x, 0)) \
386 || CONSTANT_P (XEXP (x, 1)))))
388 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
389 as found by analyzing insn's expression. */
392 static int haifa_luid_for_non_insn (rtx x
);
394 /* Haifa version of sched_info hooks common to all headers. */
395 const struct common_sched_info_def haifa_common_sched_info
=
397 NULL
, /* fix_recovery_cfg */
398 NULL
, /* add_block */
399 NULL
, /* estimate_number_of_insns */
400 haifa_luid_for_non_insn
, /* luid_for_non_insn */
401 SCHED_PASS_UNKNOWN
/* sched_pass_id */
404 /* Mapping from instruction UID to its Logical UID. */
405 vec
<int> sched_luids
;
407 /* Next LUID to assign to an instruction. */
408 int sched_max_luid
= 1;
410 /* Haifa Instruction Data. */
411 vec
<haifa_insn_data_def
> h_i_d
;
413 void (* sched_init_only_bb
) (basic_block
, basic_block
);
415 /* Split block function. Different schedulers might use different functions
416 to handle their internal data consistent. */
417 basic_block (* sched_split_block
) (basic_block
, rtx
);
419 /* Create empty basic block after the specified block. */
420 basic_block (* sched_create_empty_bb
) (basic_block
);
422 /* Return the number of cycles until INSN is expected to be ready.
423 Return zero if it already is. */
425 insn_delay (rtx_insn
*insn
)
427 return MAX (INSN_TICK (insn
) - clock_var
, 0);
431 may_trap_exp (const_rtx x
, int is_store
)
440 if (code
== MEM
&& may_trap_p (x
))
447 /* The insn uses memory: a volatile load. */
448 if (MEM_VOLATILE_P (x
))
450 /* An exception-free load. */
453 /* A load with 1 base register, to be further checked. */
454 if (CONST_BASED_ADDRESS_P (XEXP (x
, 0)))
455 return PFREE_CANDIDATE
;
456 /* No info on the load, to be further checked. */
457 return PRISKY_CANDIDATE
;
462 int i
, insn_class
= TRAP_FREE
;
464 /* Neither store nor load, check if it may cause a trap. */
467 /* Recursive step: walk the insn... */
468 fmt
= GET_RTX_FORMAT (code
);
469 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
473 int tmp_class
= may_trap_exp (XEXP (x
, i
), is_store
);
474 insn_class
= WORST_CLASS (insn_class
, tmp_class
);
476 else if (fmt
[i
] == 'E')
479 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
481 int tmp_class
= may_trap_exp (XVECEXP (x
, i
, j
), is_store
);
482 insn_class
= WORST_CLASS (insn_class
, tmp_class
);
483 if (insn_class
== TRAP_RISKY
|| insn_class
== IRISKY
)
487 if (insn_class
== TRAP_RISKY
|| insn_class
== IRISKY
)
494 /* Classifies rtx X of an insn for the purpose of verifying that X can be
495 executed speculatively (and consequently the insn can be moved
496 speculatively), by examining X, returning:
497 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
498 TRAP_FREE: non-load insn.
499 IFREE: load from a globally safe location.
500 IRISKY: volatile load.
501 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
502 being either PFREE or PRISKY. */
505 haifa_classify_rtx (const_rtx x
)
507 int tmp_class
= TRAP_FREE
;
508 int insn_class
= TRAP_FREE
;
511 if (GET_CODE (x
) == PARALLEL
)
513 int i
, len
= XVECLEN (x
, 0);
515 for (i
= len
- 1; i
>= 0; i
--)
517 tmp_class
= haifa_classify_rtx (XVECEXP (x
, 0, i
));
518 insn_class
= WORST_CLASS (insn_class
, tmp_class
);
519 if (insn_class
== TRAP_RISKY
|| insn_class
== IRISKY
)
529 /* Test if it is a 'store'. */
530 tmp_class
= may_trap_exp (XEXP (x
, 0), 1);
533 /* Test if it is a store. */
534 tmp_class
= may_trap_exp (SET_DEST (x
), 1);
535 if (tmp_class
== TRAP_RISKY
)
537 /* Test if it is a load. */
539 WORST_CLASS (tmp_class
,
540 may_trap_exp (SET_SRC (x
), 0));
543 tmp_class
= haifa_classify_rtx (COND_EXEC_CODE (x
));
544 if (tmp_class
== TRAP_RISKY
)
546 tmp_class
= WORST_CLASS (tmp_class
,
547 may_trap_exp (COND_EXEC_TEST (x
), 0));
550 tmp_class
= TRAP_RISKY
;
554 insn_class
= tmp_class
;
561 haifa_classify_insn (const_rtx insn
)
563 return haifa_classify_rtx (PATTERN (insn
));
566 /* After the scheduler initialization function has been called, this function
567 can be called to enable modulo scheduling. II is the initiation interval
568 we should use, it affects the delays for delay_pairs that were recorded as
569 separated by a given number of stages.
571 MAX_STAGES provides us with a limit
572 after which we give up scheduling; the caller must have unrolled at least
573 as many copies of the loop body and recorded delay_pairs for them.
575 INSNS is the number of real (non-debug) insns in one iteration of
576 the loop. MAX_UID can be used to test whether an insn belongs to
577 the first iteration of the loop; all of them have a uid lower than
580 set_modulo_params (int ii
, int max_stages
, int insns
, int max_uid
)
583 modulo_max_stages
= max_stages
;
584 modulo_n_insns
= insns
;
585 modulo_iter0_max_uid
= max_uid
;
586 modulo_backtracks_left
= PARAM_VALUE (PARAM_MAX_MODULO_BACKTRACK_ATTEMPTS
);
589 /* A structure to record a pair of insns where the first one is a real
590 insn that has delay slots, and the second is its delayed shadow.
591 I1 is scheduled normally and will emit an assembly instruction,
592 while I2 describes the side effect that takes place at the
593 transition between cycles CYCLES and (CYCLES + 1) after I1. */
596 struct delay_pair
*next_same_i1
;
599 /* When doing modulo scheduling, we a delay_pair can also be used to
600 show that I1 and I2 are the same insn in a different stage. If that
601 is the case, STAGES will be nonzero. */
605 /* Helpers for delay hashing. */
607 struct delay_i1_hasher
: nofree_ptr_hash
<delay_pair
>
609 typedef void *compare_type
;
610 static inline hashval_t
hash (const delay_pair
*);
611 static inline bool equal (const delay_pair
*, const void *);
614 /* Returns a hash value for X, based on hashing just I1. */
617 delay_i1_hasher::hash (const delay_pair
*x
)
619 return htab_hash_pointer (x
->i1
);
622 /* Return true if I1 of pair X is the same as that of pair Y. */
625 delay_i1_hasher::equal (const delay_pair
*x
, const void *y
)
630 struct delay_i2_hasher
: free_ptr_hash
<delay_pair
>
632 typedef void *compare_type
;
633 static inline hashval_t
hash (const delay_pair
*);
634 static inline bool equal (const delay_pair
*, const void *);
637 /* Returns a hash value for X, based on hashing just I2. */
640 delay_i2_hasher::hash (const delay_pair
*x
)
642 return htab_hash_pointer (x
->i2
);
645 /* Return true if I2 of pair X is the same as that of pair Y. */
648 delay_i2_hasher::equal (const delay_pair
*x
, const void *y
)
653 /* Two hash tables to record delay_pairs, one indexed by I1 and the other
655 static hash_table
<delay_i1_hasher
> *delay_htab
;
656 static hash_table
<delay_i2_hasher
> *delay_htab_i2
;
658 /* Called through htab_traverse. Walk the hashtable using I2 as
659 index, and delete all elements involving an UID higher than
660 that pointed to by *DATA. */
662 haifa_htab_i2_traverse (delay_pair
**slot
, int *data
)
665 struct delay_pair
*p
= *slot
;
666 if (INSN_UID (p
->i2
) >= maxuid
|| INSN_UID (p
->i1
) >= maxuid
)
668 delay_htab_i2
->clear_slot (slot
);
673 /* Called through htab_traverse. Walk the hashtable using I2 as
674 index, and delete all elements involving an UID higher than
675 that pointed to by *DATA. */
677 haifa_htab_i1_traverse (delay_pair
**pslot
, int *data
)
680 struct delay_pair
*p
, *first
, **pprev
;
682 if (INSN_UID ((*pslot
)->i1
) >= maxuid
)
684 delay_htab
->clear_slot (pslot
);
688 for (p
= *pslot
; p
; p
= p
->next_same_i1
)
690 if (INSN_UID (p
->i2
) < maxuid
)
693 pprev
= &p
->next_same_i1
;
698 delay_htab
->clear_slot (pslot
);
704 /* Discard all delay pairs which involve an insn with an UID higher
707 discard_delay_pairs_above (int max_uid
)
709 delay_htab
->traverse
<int *, haifa_htab_i1_traverse
> (&max_uid
);
710 delay_htab_i2
->traverse
<int *, haifa_htab_i2_traverse
> (&max_uid
);
713 /* This function can be called by a port just before it starts the final
714 scheduling pass. It records the fact that an instruction with delay
715 slots has been split into two insns, I1 and I2. The first one will be
716 scheduled normally and initiates the operation. The second one is a
717 shadow which must follow a specific number of cycles after I1; its only
718 purpose is to show the side effect that occurs at that cycle in the RTL.
719 If a JUMP_INSN or a CALL_INSN has been split, I1 should be a normal INSN,
720 while I2 retains the original insn type.
722 There are two ways in which the number of cycles can be specified,
723 involving the CYCLES and STAGES arguments to this function. If STAGES
724 is zero, we just use the value of CYCLES. Otherwise, STAGES is a factor
725 which is multiplied by MODULO_II to give the number of cycles. This is
726 only useful if the caller also calls set_modulo_params to enable modulo
730 record_delay_slot_pair (rtx_insn
*i1
, rtx_insn
*i2
, int cycles
, int stages
)
732 struct delay_pair
*p
= XNEW (struct delay_pair
);
733 struct delay_pair
**slot
;
742 delay_htab
= new hash_table
<delay_i1_hasher
> (10);
743 delay_htab_i2
= new hash_table
<delay_i2_hasher
> (10);
745 slot
= delay_htab
->find_slot_with_hash (i1
, htab_hash_pointer (i1
), INSERT
);
746 p
->next_same_i1
= *slot
;
748 slot
= delay_htab_i2
->find_slot (p
, INSERT
);
752 /* Examine the delay pair hashtable to see if INSN is a shadow for another,
753 and return the other insn if so. Return NULL otherwise. */
755 real_insn_for_shadow (rtx_insn
*insn
)
757 struct delay_pair
*pair
;
762 pair
= delay_htab_i2
->find_with_hash (insn
, htab_hash_pointer (insn
));
763 if (!pair
|| pair
->stages
> 0)
768 /* For a pair P of insns, return the fixed distance in cycles from the first
769 insn after which the second must be scheduled. */
771 pair_delay (struct delay_pair
*p
)
776 return p
->stages
* modulo_ii
;
779 /* Given an insn INSN, add a dependence on its delayed shadow if it
780 has one. Also try to find situations where shadows depend on each other
781 and add dependencies to the real insns to limit the amount of backtracking
784 add_delay_dependencies (rtx_insn
*insn
)
786 struct delay_pair
*pair
;
787 sd_iterator_def sd_it
;
793 pair
= delay_htab_i2
->find_with_hash (insn
, htab_hash_pointer (insn
));
796 add_dependence (insn
, pair
->i1
, REG_DEP_ANTI
);
800 FOR_EACH_DEP (pair
->i2
, SD_LIST_BACK
, sd_it
, dep
)
802 rtx_insn
*pro
= DEP_PRO (dep
);
803 struct delay_pair
*other_pair
804 = delay_htab_i2
->find_with_hash (pro
, htab_hash_pointer (pro
));
805 if (!other_pair
|| other_pair
->stages
)
807 if (pair_delay (other_pair
) >= pair_delay (pair
))
809 if (sched_verbose
>= 4)
811 fprintf (sched_dump
, ";;\tadding dependence %d <- %d\n",
812 INSN_UID (other_pair
->i1
),
813 INSN_UID (pair
->i1
));
814 fprintf (sched_dump
, ";;\tpair1 %d <- %d, cost %d\n",
818 fprintf (sched_dump
, ";;\tpair2 %d <- %d, cost %d\n",
819 INSN_UID (other_pair
->i1
),
820 INSN_UID (other_pair
->i2
),
821 pair_delay (other_pair
));
823 add_dependence (pair
->i1
, other_pair
->i1
, REG_DEP_ANTI
);
828 /* Forward declarations. */
830 static int priority (rtx_insn
*);
831 static int autopref_rank_for_schedule (const rtx_insn
*, const rtx_insn
*);
832 static int rank_for_schedule (const void *, const void *);
833 static void swap_sort (rtx_insn
**, int);
834 static void queue_insn (rtx_insn
*, int, const char *);
835 static int schedule_insn (rtx_insn
*);
836 static void adjust_priority (rtx_insn
*);
837 static void advance_one_cycle (void);
838 static void extend_h_i_d (void);
841 /* Notes handling mechanism:
842 =========================
843 Generally, NOTES are saved before scheduling and restored after scheduling.
844 The scheduler distinguishes between two types of notes:
846 (1) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
847 Before scheduling a region, a pointer to the note is added to the insn
848 that follows or precedes it. (This happens as part of the data dependence
849 computation). After scheduling an insn, the pointer contained in it is
850 used for regenerating the corresponding note (in reemit_notes).
852 (2) All other notes (e.g. INSN_DELETED): Before scheduling a block,
853 these notes are put in a list (in rm_other_notes() and
854 unlink_other_notes ()). After scheduling the block, these notes are
855 inserted at the beginning of the block (in schedule_block()). */
857 static void ready_add (struct ready_list
*, rtx_insn
*, bool);
858 static rtx_insn
*ready_remove_first (struct ready_list
*);
859 static rtx_insn
*ready_remove_first_dispatch (struct ready_list
*ready
);
861 static void queue_to_ready (struct ready_list
*);
862 static int early_queue_to_ready (state_t
, struct ready_list
*);
864 /* The following functions are used to implement multi-pass scheduling
865 on the first cycle. */
866 static rtx_insn
*ready_remove (struct ready_list
*, int);
867 static void ready_remove_insn (rtx_insn
*);
869 static void fix_inter_tick (rtx_insn
*, rtx_insn
*);
870 static int fix_tick_ready (rtx_insn
*);
871 static void change_queue_index (rtx_insn
*, int);
873 /* The following functions are used to implement scheduling of data/control
874 speculative instructions. */
876 static void extend_h_i_d (void);
877 static void init_h_i_d (rtx_insn
*);
878 static int haifa_speculate_insn (rtx_insn
*, ds_t
, rtx
*);
879 static void generate_recovery_code (rtx_insn
*);
880 static void process_insn_forw_deps_be_in_spec (rtx_insn
*, rtx_insn
*, ds_t
);
881 static void begin_speculative_block (rtx_insn
*);
882 static void add_to_speculative_block (rtx_insn
*);
883 static void init_before_recovery (basic_block
*);
884 static void create_check_block_twin (rtx_insn
*, bool);
885 static void fix_recovery_deps (basic_block
);
886 static bool haifa_change_pattern (rtx_insn
*, rtx
);
887 static void dump_new_block_header (int, basic_block
, rtx_insn
*, rtx_insn
*);
888 static void restore_bb_notes (basic_block
);
889 static void fix_jump_move (rtx_insn
*);
890 static void move_block_after_check (rtx_insn
*);
891 static void move_succs (vec
<edge
, va_gc
> **, basic_block
);
892 static void sched_remove_insn (rtx_insn
*);
893 static void clear_priorities (rtx_insn
*, rtx_vec_t
*);
894 static void calc_priorities (rtx_vec_t
);
895 static void add_jump_dependencies (rtx_insn
*, rtx_insn
*);
897 #endif /* INSN_SCHEDULING */
899 /* Point to state used for the current scheduling pass. */
900 struct haifa_sched_info
*current_sched_info
;
902 #ifndef INSN_SCHEDULING
904 schedule_insns (void)
909 /* Do register pressure sensitive insn scheduling if the flag is set
911 enum sched_pressure_algorithm sched_pressure
;
913 /* Map regno -> its pressure class. The map defined only when
914 SCHED_PRESSURE != SCHED_PRESSURE_NONE. */
915 enum reg_class
*sched_regno_pressure_class
;
917 /* The current register pressure. Only elements corresponding pressure
918 classes are defined. */
919 static int curr_reg_pressure
[N_REG_CLASSES
];
921 /* Saved value of the previous array. */
922 static int saved_reg_pressure
[N_REG_CLASSES
];
924 /* Register living at given scheduling point. */
925 static bitmap curr_reg_live
;
927 /* Saved value of the previous array. */
928 static bitmap saved_reg_live
;
930 /* Registers mentioned in the current region. */
931 static bitmap region_ref_regs
;
933 /* Temporary bitmap used for SCHED_PRESSURE_MODEL. */
934 static bitmap tmp_bitmap
;
936 /* Effective number of available registers of a given class (see comment
937 in sched_pressure_start_bb). */
938 static int sched_class_regs_num
[N_REG_CLASSES
];
939 /* Number of call_saved_regs and fixed_regs. Helpers for calculating of
940 sched_class_regs_num. */
941 static int call_saved_regs_num
[N_REG_CLASSES
];
942 static int fixed_regs_num
[N_REG_CLASSES
];
944 /* Initiate register pressure relative info for scheduling the current
945 region. Currently it is only clearing register mentioned in the
948 sched_init_region_reg_pressure_info (void)
950 bitmap_clear (region_ref_regs
);
953 /* PRESSURE[CL] describes the pressure on register class CL. Update it
954 for the birth (if BIRTH_P) or death (if !BIRTH_P) of register REGNO.
955 LIVE tracks the set of live registers; if it is null, assume that
956 every birth or death is genuine. */
958 mark_regno_birth_or_death (bitmap live
, int *pressure
, int regno
, bool birth_p
)
960 enum reg_class pressure_class
;
962 pressure_class
= sched_regno_pressure_class
[regno
];
963 if (regno
>= FIRST_PSEUDO_REGISTER
)
965 if (pressure_class
!= NO_REGS
)
969 if (!live
|| bitmap_set_bit (live
, regno
))
970 pressure
[pressure_class
]
971 += (ira_reg_class_max_nregs
972 [pressure_class
][PSEUDO_REGNO_MODE (regno
)]);
976 if (!live
|| bitmap_clear_bit (live
, regno
))
977 pressure
[pressure_class
]
978 -= (ira_reg_class_max_nregs
979 [pressure_class
][PSEUDO_REGNO_MODE (regno
)]);
983 else if (pressure_class
!= NO_REGS
984 && ! TEST_HARD_REG_BIT (ira_no_alloc_regs
, regno
))
988 if (!live
|| bitmap_set_bit (live
, regno
))
989 pressure
[pressure_class
]++;
993 if (!live
|| bitmap_clear_bit (live
, regno
))
994 pressure
[pressure_class
]--;
999 /* Initiate current register pressure related info from living
1000 registers given by LIVE. */
1002 initiate_reg_pressure_info (bitmap live
)
1008 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
1009 curr_reg_pressure
[ira_pressure_classes
[i
]] = 0;
1010 bitmap_clear (curr_reg_live
);
1011 EXECUTE_IF_SET_IN_BITMAP (live
, 0, j
, bi
)
1012 if (sched_pressure
== SCHED_PRESSURE_MODEL
1013 || current_nr_blocks
== 1
1014 || bitmap_bit_p (region_ref_regs
, j
))
1015 mark_regno_birth_or_death (curr_reg_live
, curr_reg_pressure
, j
, true);
1018 /* Mark registers in X as mentioned in the current region. */
1020 setup_ref_regs (rtx x
)
1023 const RTX_CODE code
= GET_CODE (x
);
1028 bitmap_set_range (region_ref_regs
, REGNO (x
), REG_NREGS (x
));
1031 fmt
= GET_RTX_FORMAT (code
);
1032 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1034 setup_ref_regs (XEXP (x
, i
));
1035 else if (fmt
[i
] == 'E')
1037 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
1038 setup_ref_regs (XVECEXP (x
, i
, j
));
1042 /* Initiate current register pressure related info at the start of
1045 initiate_bb_reg_pressure_info (basic_block bb
)
1047 unsigned int i ATTRIBUTE_UNUSED
;
1050 if (current_nr_blocks
> 1)
1051 FOR_BB_INSNS (bb
, insn
)
1052 if (NONDEBUG_INSN_P (insn
))
1053 setup_ref_regs (PATTERN (insn
));
1054 initiate_reg_pressure_info (df_get_live_in (bb
));
1055 if (bb_has_eh_pred (bb
))
1058 unsigned int regno
= EH_RETURN_DATA_REGNO (i
);
1060 if (regno
== INVALID_REGNUM
)
1062 if (! bitmap_bit_p (df_get_live_in (bb
), regno
))
1063 mark_regno_birth_or_death (curr_reg_live
, curr_reg_pressure
,
1068 /* Save current register pressure related info. */
1070 save_reg_pressure (void)
1074 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
1075 saved_reg_pressure
[ira_pressure_classes
[i
]]
1076 = curr_reg_pressure
[ira_pressure_classes
[i
]];
1077 bitmap_copy (saved_reg_live
, curr_reg_live
);
1080 /* Restore saved register pressure related info. */
1082 restore_reg_pressure (void)
1086 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
1087 curr_reg_pressure
[ira_pressure_classes
[i
]]
1088 = saved_reg_pressure
[ira_pressure_classes
[i
]];
1089 bitmap_copy (curr_reg_live
, saved_reg_live
);
1092 /* Return TRUE if the register is dying after its USE. */
1094 dying_use_p (struct reg_use_data
*use
)
1096 struct reg_use_data
*next
;
1098 for (next
= use
->next_regno_use
; next
!= use
; next
= next
->next_regno_use
)
1099 if (NONDEBUG_INSN_P (next
->insn
)
1100 && QUEUE_INDEX (next
->insn
) != QUEUE_SCHEDULED
)
1105 /* Print info about the current register pressure and its excess for
1106 each pressure class. */
1108 print_curr_reg_pressure (void)
1113 fprintf (sched_dump
, ";;\t");
1114 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
1116 cl
= ira_pressure_classes
[i
];
1117 gcc_assert (curr_reg_pressure
[cl
] >= 0);
1118 fprintf (sched_dump
, " %s:%d(%d)", reg_class_names
[cl
],
1119 curr_reg_pressure
[cl
],
1120 curr_reg_pressure
[cl
] - sched_class_regs_num
[cl
]);
1122 fprintf (sched_dump
, "\n");
1125 /* Determine if INSN has a condition that is clobbered if a register
1126 in SET_REGS is modified. */
1128 cond_clobbered_p (rtx_insn
*insn
, HARD_REG_SET set_regs
)
1130 rtx pat
= PATTERN (insn
);
1131 gcc_assert (GET_CODE (pat
) == COND_EXEC
);
1132 if (TEST_HARD_REG_BIT (set_regs
, REGNO (XEXP (COND_EXEC_TEST (pat
), 0))))
1134 sd_iterator_def sd_it
;
1136 haifa_change_pattern (insn
, ORIG_PAT (insn
));
1137 FOR_EACH_DEP (insn
, SD_LIST_BACK
, sd_it
, dep
)
1138 DEP_STATUS (dep
) &= ~DEP_CANCELLED
;
1139 TODO_SPEC (insn
) = HARD_DEP
;
1140 if (sched_verbose
>= 2)
1141 fprintf (sched_dump
,
1142 ";;\t\tdequeue insn %s because of clobbered condition\n",
1143 (*current_sched_info
->print_insn
) (insn
, 0));
1150 /* This function should be called after modifying the pattern of INSN,
1151 to update scheduler data structures as needed. */
1153 update_insn_after_change (rtx_insn
*insn
)
1155 sd_iterator_def sd_it
;
1158 dfa_clear_single_insn_cache (insn
);
1160 sd_it
= sd_iterator_start (insn
,
1161 SD_LIST_FORW
| SD_LIST_BACK
| SD_LIST_RES_BACK
);
1162 while (sd_iterator_cond (&sd_it
, &dep
))
1164 DEP_COST (dep
) = UNKNOWN_DEP_COST
;
1165 sd_iterator_next (&sd_it
);
1168 /* Invalidate INSN_COST, so it'll be recalculated. */
1169 INSN_COST (insn
) = -1;
1170 /* Invalidate INSN_TICK, so it'll be recalculated. */
1171 INSN_TICK (insn
) = INVALID_TICK
;
1173 /* Invalidate autoprefetch data entry. */
1174 INSN_AUTOPREF_MULTIPASS_DATA (insn
)[0].status
1175 = AUTOPREF_MULTIPASS_DATA_UNINITIALIZED
;
1176 INSN_AUTOPREF_MULTIPASS_DATA (insn
)[1].status
1177 = AUTOPREF_MULTIPASS_DATA_UNINITIALIZED
;
1181 /* Two VECs, one to hold dependencies for which pattern replacements
1182 need to be applied or restored at the start of the next cycle, and
1183 another to hold an integer that is either one, to apply the
1184 corresponding replacement, or zero to restore it. */
1185 static vec
<dep_t
> next_cycle_replace_deps
;
1186 static vec
<int> next_cycle_apply
;
1188 static void apply_replacement (dep_t
, bool);
1189 static void restore_pattern (dep_t
, bool);
1191 /* Look at the remaining dependencies for insn NEXT, and compute and return
1192 the TODO_SPEC value we should use for it. This is called after one of
1193 NEXT's dependencies has been resolved.
1194 We also perform pattern replacements for predication, and for broken
1195 replacement dependencies. The latter is only done if FOR_BACKTRACK is
1199 recompute_todo_spec (rtx_insn
*next
, bool for_backtrack
)
1202 sd_iterator_def sd_it
;
1203 dep_t dep
, modify_dep
= NULL
;
1207 bool first_p
= true;
1209 if (sd_lists_empty_p (next
, SD_LIST_BACK
))
1210 /* NEXT has all its dependencies resolved. */
1213 if (!sd_lists_empty_p (next
, SD_LIST_HARD_BACK
))
1216 /* If NEXT is intended to sit adjacent to this instruction, we don't
1217 want to try to break any dependencies. Treat it as a HARD_DEP. */
1218 if (SCHED_GROUP_P (next
))
1221 /* Now we've got NEXT with speculative deps only.
1222 1. Look at the deps to see what we have to do.
1223 2. Check if we can do 'todo'. */
1226 FOR_EACH_DEP (next
, SD_LIST_BACK
, sd_it
, dep
)
1228 rtx_insn
*pro
= DEP_PRO (dep
);
1229 ds_t ds
= DEP_STATUS (dep
) & SPECULATIVE
;
1231 if (DEBUG_INSN_P (pro
) && !DEBUG_INSN_P (next
))
1244 new_ds
= ds_merge (new_ds
, ds
);
1246 else if (DEP_TYPE (dep
) == REG_DEP_CONTROL
)
1248 if (QUEUE_INDEX (pro
) != QUEUE_SCHEDULED
)
1253 DEP_STATUS (dep
) &= ~DEP_CANCELLED
;
1255 else if (DEP_REPLACE (dep
) != NULL
)
1257 if (QUEUE_INDEX (pro
) != QUEUE_SCHEDULED
)
1262 DEP_STATUS (dep
) &= ~DEP_CANCELLED
;
1266 if (n_replace
> 0 && n_control
== 0 && n_spec
== 0)
1268 if (!dbg_cnt (sched_breakdep
))
1270 FOR_EACH_DEP (next
, SD_LIST_BACK
, sd_it
, dep
)
1272 struct dep_replacement
*desc
= DEP_REPLACE (dep
);
1275 if (desc
->insn
== next
&& !for_backtrack
)
1277 gcc_assert (n_replace
== 1);
1278 apply_replacement (dep
, true);
1280 DEP_STATUS (dep
) |= DEP_CANCELLED
;
1286 else if (n_control
== 1 && n_replace
== 0 && n_spec
== 0)
1288 rtx_insn
*pro
, *other
;
1290 rtx cond
= NULL_RTX
;
1292 rtx_insn
*prev
= NULL
;
1296 if ((current_sched_info
->flags
& DO_PREDICATION
) == 0
1297 || (ORIG_PAT (next
) != NULL_RTX
1298 && PREDICATED_PAT (next
) == NULL_RTX
))
1301 pro
= DEP_PRO (modify_dep
);
1302 other
= real_insn_for_shadow (pro
);
1303 if (other
!= NULL_RTX
)
1306 cond
= sched_get_reverse_condition_uncached (pro
);
1307 regno
= REGNO (XEXP (cond
, 0));
1309 /* Find the last scheduled insn that modifies the condition register.
1310 We can stop looking once we find the insn we depend on through the
1311 REG_DEP_CONTROL; if the condition register isn't modified after it,
1312 we know that it still has the right value. */
1313 if (QUEUE_INDEX (pro
) == QUEUE_SCHEDULED
)
1314 FOR_EACH_VEC_ELT_REVERSE (scheduled_insns
, i
, prev
)
1318 find_all_hard_reg_sets (prev
, &t
, true);
1319 if (TEST_HARD_REG_BIT (t
, regno
))
1324 if (ORIG_PAT (next
) == NULL_RTX
)
1326 ORIG_PAT (next
) = PATTERN (next
);
1328 new_pat
= gen_rtx_COND_EXEC (VOIDmode
, cond
, PATTERN (next
));
1329 success
= haifa_change_pattern (next
, new_pat
);
1332 PREDICATED_PAT (next
) = new_pat
;
1334 else if (PATTERN (next
) != PREDICATED_PAT (next
))
1336 bool success
= haifa_change_pattern (next
,
1337 PREDICATED_PAT (next
));
1338 gcc_assert (success
);
1340 DEP_STATUS (modify_dep
) |= DEP_CANCELLED
;
1344 if (PREDICATED_PAT (next
) != NULL_RTX
)
1346 int tick
= INSN_TICK (next
);
1347 bool success
= haifa_change_pattern (next
,
1349 INSN_TICK (next
) = tick
;
1350 gcc_assert (success
);
1353 /* We can't handle the case where there are both speculative and control
1354 dependencies, so we return HARD_DEP in such a case. Also fail if
1355 we have speculative dependencies with not enough points, or more than
1356 one control dependency. */
1357 if ((n_spec
> 0 && (n_control
> 0 || n_replace
> 0))
1359 /* Too few points? */
1360 && ds_weak (new_ds
) < spec_info
->data_weakness_cutoff
)
1368 /* Pointer to the last instruction scheduled. */
1369 static rtx_insn
*last_scheduled_insn
;
1371 /* Pointer to the last nondebug instruction scheduled within the
1372 block, or the prev_head of the scheduling block. Used by
1373 rank_for_schedule, so that insns independent of the last scheduled
1374 insn will be preferred over dependent instructions. */
1375 static rtx_insn
*last_nondebug_scheduled_insn
;
1377 /* Pointer that iterates through the list of unscheduled insns if we
1378 have a dbg_cnt enabled. It always points at an insn prior to the
1379 first unscheduled one. */
1380 static rtx_insn
*nonscheduled_insns_begin
;
1382 /* Compute cost of executing INSN.
1383 This is the number of cycles between instruction issue and
1384 instruction results. */
1386 insn_sched_cost (rtx_insn
*insn
)
1395 if (recog_memoized (insn
) < 0)
1398 cost
= insn_default_latency (insn
);
1405 cost
= INSN_COST (insn
);
1409 /* A USE insn, or something else we don't need to
1410 understand. We can't pass these directly to
1411 result_ready_cost or insn_default_latency because it will
1412 trigger a fatal error for unrecognizable insns. */
1413 if (recog_memoized (insn
) < 0)
1415 INSN_COST (insn
) = 0;
1420 cost
= insn_default_latency (insn
);
1424 INSN_COST (insn
) = cost
;
1431 /* Compute cost of dependence LINK.
1432 This is the number of cycles between instruction issue and
1433 instruction results.
1434 ??? We also use this function to call recog_memoized on all insns. */
1436 dep_cost_1 (dep_t link
, dw_t dw
)
1438 rtx_insn
*insn
= DEP_PRO (link
);
1439 rtx_insn
*used
= DEP_CON (link
);
1442 if (DEP_COST (link
) != UNKNOWN_DEP_COST
)
1443 return DEP_COST (link
);
1447 struct delay_pair
*delay_entry
;
1449 = delay_htab_i2
->find_with_hash (used
, htab_hash_pointer (used
));
1452 if (delay_entry
->i1
== insn
)
1454 DEP_COST (link
) = pair_delay (delay_entry
);
1455 return DEP_COST (link
);
1460 /* A USE insn should never require the value used to be computed.
1461 This allows the computation of a function's result and parameter
1462 values to overlap the return and call. We don't care about the
1463 dependence cost when only decreasing register pressure. */
1464 if (recog_memoized (used
) < 0)
1467 recog_memoized (insn
);
1471 enum reg_note dep_type
= DEP_TYPE (link
);
1473 cost
= insn_sched_cost (insn
);
1475 if (INSN_CODE (insn
) >= 0)
1477 if (dep_type
== REG_DEP_ANTI
)
1479 else if (dep_type
== REG_DEP_OUTPUT
)
1481 cost
= (insn_default_latency (insn
)
1482 - insn_default_latency (used
));
1486 else if (bypass_p (insn
))
1487 cost
= insn_latency (insn
, used
);
1491 if (targetm
.sched
.adjust_cost
)
1492 cost
= targetm
.sched
.adjust_cost (used
, (int) dep_type
, insn
, cost
,
1499 DEP_COST (link
) = cost
;
1503 /* Compute cost of dependence LINK.
1504 This is the number of cycles between instruction issue and
1505 instruction results. */
1507 dep_cost (dep_t link
)
1509 return dep_cost_1 (link
, 0);
1512 /* Use this sel-sched.c friendly function in reorder2 instead of increasing
1513 INSN_PRIORITY explicitly. */
1515 increase_insn_priority (rtx_insn
*insn
, int amount
)
1517 if (!sel_sched_p ())
1519 /* We're dealing with haifa-sched.c INSN_PRIORITY. */
1520 if (INSN_PRIORITY_KNOWN (insn
))
1521 INSN_PRIORITY (insn
) += amount
;
1525 /* In sel-sched.c INSN_PRIORITY is not kept up to date.
1526 Use EXPR_PRIORITY instead. */
1527 sel_add_to_insn_priority (insn
, amount
);
1531 /* Return 'true' if DEP should be included in priority calculations. */
1533 contributes_to_priority_p (dep_t dep
)
1535 if (DEBUG_INSN_P (DEP_CON (dep
))
1536 || DEBUG_INSN_P (DEP_PRO (dep
)))
1539 /* Critical path is meaningful in block boundaries only. */
1540 if (!current_sched_info
->contributes_to_priority (DEP_CON (dep
),
1544 if (DEP_REPLACE (dep
) != NULL
)
1547 /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
1548 then speculative instructions will less likely be
1549 scheduled. That is because the priority of
1550 their producers will increase, and, thus, the
1551 producers will more likely be scheduled, thus,
1552 resolving the dependence. */
1553 if (sched_deps_info
->generate_spec_deps
1554 && !(spec_info
->flags
& COUNT_SPEC_IN_CRITICAL_PATH
)
1555 && (DEP_STATUS (dep
) & SPECULATIVE
))
1561 /* Compute the number of nondebug deps in list LIST for INSN. */
1564 dep_list_size (rtx_insn
*insn
, sd_list_types_def list
)
1566 sd_iterator_def sd_it
;
1568 int dbgcount
= 0, nodbgcount
= 0;
1570 if (!MAY_HAVE_DEBUG_INSNS
)
1571 return sd_lists_size (insn
, list
);
1573 FOR_EACH_DEP (insn
, list
, sd_it
, dep
)
1575 if (DEBUG_INSN_P (DEP_CON (dep
)))
1577 else if (!DEBUG_INSN_P (DEP_PRO (dep
)))
1581 gcc_assert (dbgcount
+ nodbgcount
== sd_lists_size (insn
, list
));
1588 /* Compute the priority number for INSN. */
1590 priority (rtx_insn
*insn
)
1592 if (! INSN_P (insn
))
1595 /* We should not be interested in priority of an already scheduled insn. */
1596 gcc_assert (QUEUE_INDEX (insn
) != QUEUE_SCHEDULED
);
1598 if (!INSN_PRIORITY_KNOWN (insn
))
1600 int this_priority
= -1;
1604 int this_fusion_priority
;
1606 targetm
.sched
.fusion_priority (insn
, FUSION_MAX_PRIORITY
,
1607 &this_fusion_priority
, &this_priority
);
1608 INSN_FUSION_PRIORITY (insn
) = this_fusion_priority
;
1610 else if (dep_list_size (insn
, SD_LIST_FORW
) == 0)
1611 /* ??? We should set INSN_PRIORITY to insn_sched_cost when and insn
1612 has some forward deps but all of them are ignored by
1613 contributes_to_priority hook. At the moment we set priority of
1615 this_priority
= insn_sched_cost (insn
);
1618 rtx_insn
*prev_first
, *twin
;
1621 /* For recovery check instructions we calculate priority slightly
1622 different than that of normal instructions. Instead of walking
1623 through INSN_FORW_DEPS (check) list, we walk through
1624 INSN_FORW_DEPS list of each instruction in the corresponding
1627 /* Selective scheduling does not define RECOVERY_BLOCK macro. */
1628 rec
= sel_sched_p () ? NULL
: RECOVERY_BLOCK (insn
);
1629 if (!rec
|| rec
== EXIT_BLOCK_PTR_FOR_FN (cfun
))
1631 prev_first
= PREV_INSN (insn
);
1636 prev_first
= NEXT_INSN (BB_HEAD (rec
));
1637 twin
= PREV_INSN (BB_END (rec
));
1642 sd_iterator_def sd_it
;
1645 FOR_EACH_DEP (twin
, SD_LIST_FORW
, sd_it
, dep
)
1650 next
= DEP_CON (dep
);
1652 if (BLOCK_FOR_INSN (next
) != rec
)
1656 if (!contributes_to_priority_p (dep
))
1660 cost
= dep_cost (dep
);
1663 struct _dep _dep1
, *dep1
= &_dep1
;
1665 init_dep (dep1
, insn
, next
, REG_DEP_ANTI
);
1667 cost
= dep_cost (dep1
);
1670 next_priority
= cost
+ priority (next
);
1672 if (next_priority
> this_priority
)
1673 this_priority
= next_priority
;
1677 twin
= PREV_INSN (twin
);
1679 while (twin
!= prev_first
);
1682 if (this_priority
< 0)
1684 gcc_assert (this_priority
== -1);
1686 this_priority
= insn_sched_cost (insn
);
1689 INSN_PRIORITY (insn
) = this_priority
;
1690 INSN_PRIORITY_STATUS (insn
) = 1;
1693 return INSN_PRIORITY (insn
);
1696 /* Macros and functions for keeping the priority queue sorted, and
1697 dealing with queuing and dequeuing of instructions. */
1699 /* For each pressure class CL, set DEATH[CL] to the number of registers
1700 in that class that die in INSN. */
1703 calculate_reg_deaths (rtx_insn
*insn
, int *death
)
1706 struct reg_use_data
*use
;
1708 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
1709 death
[ira_pressure_classes
[i
]] = 0;
1710 for (use
= INSN_REG_USE_LIST (insn
); use
!= NULL
; use
= use
->next_insn_use
)
1711 if (dying_use_p (use
))
1712 mark_regno_birth_or_death (0, death
, use
->regno
, true);
1715 /* Setup info about the current register pressure impact of scheduling
1716 INSN at the current scheduling point. */
1718 setup_insn_reg_pressure_info (rtx_insn
*insn
)
1720 int i
, change
, before
, after
, hard_regno
;
1721 int excess_cost_change
;
1724 struct reg_pressure_data
*pressure_info
;
1725 int *max_reg_pressure
;
1726 static int death
[N_REG_CLASSES
];
1728 gcc_checking_assert (!DEBUG_INSN_P (insn
));
1730 excess_cost_change
= 0;
1731 calculate_reg_deaths (insn
, death
);
1732 pressure_info
= INSN_REG_PRESSURE (insn
);
1733 max_reg_pressure
= INSN_MAX_REG_PRESSURE (insn
);
1734 gcc_assert (pressure_info
!= NULL
&& max_reg_pressure
!= NULL
);
1735 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
1737 cl
= ira_pressure_classes
[i
];
1738 gcc_assert (curr_reg_pressure
[cl
] >= 0);
1739 change
= (int) pressure_info
[i
].set_increase
- death
[cl
];
1740 before
= MAX (0, max_reg_pressure
[i
] - sched_class_regs_num
[cl
]);
1741 after
= MAX (0, max_reg_pressure
[i
] + change
1742 - sched_class_regs_num
[cl
]);
1743 hard_regno
= ira_class_hard_regs
[cl
][0];
1744 gcc_assert (hard_regno
>= 0);
1745 mode
= reg_raw_mode
[hard_regno
];
1746 excess_cost_change
+= ((after
- before
)
1747 * (ira_memory_move_cost
[mode
][cl
][0]
1748 + ira_memory_move_cost
[mode
][cl
][1]));
1750 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insn
) = excess_cost_change
;
1753 /* This is the first page of code related to SCHED_PRESSURE_MODEL.
1754 It tries to make the scheduler take register pressure into account
1755 without introducing too many unnecessary stalls. It hooks into the
1756 main scheduling algorithm at several points:
1758 - Before scheduling starts, model_start_schedule constructs a
1759 "model schedule" for the current block. This model schedule is
1760 chosen solely to keep register pressure down. It does not take the
1761 target's pipeline or the original instruction order into account,
1762 except as a tie-breaker. It also doesn't work to a particular
1765 This model schedule gives us an idea of what pressure can be
1766 achieved for the block and gives us an example of a schedule that
1767 keeps to that pressure. It also makes the final schedule less
1768 dependent on the original instruction order. This is important
1769 because the original order can either be "wide" (many values live
1770 at once, such as in user-scheduled code) or "narrow" (few values
1771 live at once, such as after loop unrolling, where several
1772 iterations are executed sequentially).
1774 We do not apply this model schedule to the rtx stream. We simply
1775 record it in model_schedule. We also compute the maximum pressure,
1776 MP, that was seen during this schedule.
1778 - Instructions are added to the ready queue even if they require
1779 a stall. The length of the stall is instead computed as:
1781 MAX (INSN_TICK (INSN) - clock_var, 0)
1783 (= insn_delay). This allows rank_for_schedule to choose between
1784 introducing a deliberate stall or increasing pressure.
1786 - Before sorting the ready queue, model_set_excess_costs assigns
1787 a pressure-based cost to each ready instruction in the queue.
1788 This is the instruction's INSN_REG_PRESSURE_EXCESS_COST_CHANGE
1789 (ECC for short) and is effectively measured in cycles.
1791 - rank_for_schedule ranks instructions based on:
1793 ECC (insn) + insn_delay (insn)
1799 So, for example, an instruction X1 with an ECC of 1 that can issue
1800 now will win over an instruction X0 with an ECC of zero that would
1801 introduce a stall of one cycle. However, an instruction X2 with an
1802 ECC of 2 that can issue now will lose to both X0 and X1.
1804 - When an instruction is scheduled, model_recompute updates the model
1805 schedule with the new pressures (some of which might now exceed the
1806 original maximum pressure MP). model_update_limit_points then searches
1807 for the new point of maximum pressure, if not already known. */
1809 /* Used to separate high-verbosity debug information for SCHED_PRESSURE_MODEL
1810 from surrounding debug information. */
1812 ";;\t\t+------------------------------------------------------\n"
1814 /* Information about the pressure on a particular register class at a
1815 particular point of the model schedule. */
1816 struct model_pressure_data
{
1817 /* The pressure at this point of the model schedule, or -1 if the
1818 point is associated with an instruction that has already been
1822 /* The maximum pressure during or after this point of the model schedule. */
1826 /* Per-instruction information that is used while building the model
1827 schedule. Here, "schedule" refers to the model schedule rather
1828 than the main schedule. */
1829 struct model_insn_info
{
1830 /* The instruction itself. */
1833 /* If this instruction is in model_worklist, these fields link to the
1834 previous (higher-priority) and next (lower-priority) instructions
1836 struct model_insn_info
*prev
;
1837 struct model_insn_info
*next
;
1839 /* While constructing the schedule, QUEUE_INDEX describes whether an
1840 instruction has already been added to the schedule (QUEUE_SCHEDULED),
1841 is in model_worklist (QUEUE_READY), or neither (QUEUE_NOWHERE).
1842 old_queue records the value that QUEUE_INDEX had before scheduling
1843 started, so that we can restore it once the schedule is complete. */
1846 /* The relative importance of an unscheduled instruction. Higher
1847 values indicate greater importance. */
1848 unsigned int model_priority
;
1850 /* The length of the longest path of satisfied true dependencies
1851 that leads to this instruction. */
1854 /* The length of the longest path of dependencies of any kind
1855 that leads from this instruction. */
1858 /* The number of predecessor nodes that must still be scheduled. */
1859 int unscheduled_preds
;
1862 /* Information about the pressure limit for a particular register class.
1863 This structure is used when applying a model schedule to the main
1865 struct model_pressure_limit
{
1866 /* The maximum register pressure seen in the original model schedule. */
1869 /* The maximum register pressure seen in the current model schedule
1870 (which excludes instructions that have already been scheduled). */
1873 /* The point of the current model schedule at which PRESSURE is first
1874 reached. It is set to -1 if the value needs to be recomputed. */
1878 /* Describes a particular way of measuring register pressure. */
1879 struct model_pressure_group
{
1880 /* Index PCI describes the maximum pressure on ira_pressure_classes[PCI]. */
1881 struct model_pressure_limit limits
[N_REG_CLASSES
];
1883 /* Index (POINT * ira_num_pressure_classes + PCI) describes the pressure
1884 on register class ira_pressure_classes[PCI] at point POINT of the
1885 current model schedule. A POINT of model_num_insns describes the
1886 pressure at the end of the schedule. */
1887 struct model_pressure_data
*model
;
1890 /* Index POINT gives the instruction at point POINT of the model schedule.
1891 This array doesn't change during main scheduling. */
1892 static vec
<rtx_insn
*> model_schedule
;
1894 /* The list of instructions in the model worklist, sorted in order of
1895 decreasing priority. */
1896 static struct model_insn_info
*model_worklist
;
1898 /* Index I describes the instruction with INSN_LUID I. */
1899 static struct model_insn_info
*model_insns
;
1901 /* The number of instructions in the model schedule. */
1902 static int model_num_insns
;
1904 /* The index of the first instruction in model_schedule that hasn't yet been
1905 added to the main schedule, or model_num_insns if all of them have. */
1906 static int model_curr_point
;
1908 /* Describes the pressure before each instruction in the model schedule. */
1909 static struct model_pressure_group model_before_pressure
;
1911 /* The first unused model_priority value (as used in model_insn_info). */
1912 static unsigned int model_next_priority
;
1915 /* The model_pressure_data for ira_pressure_classes[PCI] in GROUP
1916 at point POINT of the model schedule. */
1917 #define MODEL_PRESSURE_DATA(GROUP, POINT, PCI) \
1918 (&(GROUP)->model[(POINT) * ira_pressure_classes_num + (PCI)])
1920 /* The maximum pressure on ira_pressure_classes[PCI] in GROUP at or
1921 after point POINT of the model schedule. */
1922 #define MODEL_MAX_PRESSURE(GROUP, POINT, PCI) \
1923 (MODEL_PRESSURE_DATA (GROUP, POINT, PCI)->max_pressure)
1925 /* The pressure on ira_pressure_classes[PCI] in GROUP at point POINT
1926 of the model schedule. */
1927 #define MODEL_REF_PRESSURE(GROUP, POINT, PCI) \
1928 (MODEL_PRESSURE_DATA (GROUP, POINT, PCI)->ref_pressure)
1930 /* Information about INSN that is used when creating the model schedule. */
1931 #define MODEL_INSN_INFO(INSN) \
1932 (&model_insns[INSN_LUID (INSN)])
1934 /* The instruction at point POINT of the model schedule. */
1935 #define MODEL_INSN(POINT) \
1936 (model_schedule[POINT])
1939 /* Return INSN's index in the model schedule, or model_num_insns if it
1940 doesn't belong to that schedule. */
1943 model_index (rtx_insn
*insn
)
1945 if (INSN_MODEL_INDEX (insn
) == 0)
1946 return model_num_insns
;
1947 return INSN_MODEL_INDEX (insn
) - 1;
1950 /* Make sure that GROUP->limits is up-to-date for the current point
1951 of the model schedule. */
1954 model_update_limit_points_in_group (struct model_pressure_group
*group
)
1956 int pci
, max_pressure
, point
;
1958 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
1960 /* We may have passed the final point at which the pressure in
1961 group->limits[pci].pressure was reached. Update the limit if so. */
1962 max_pressure
= MODEL_MAX_PRESSURE (group
, model_curr_point
, pci
);
1963 group
->limits
[pci
].pressure
= max_pressure
;
1965 /* Find the point at which MAX_PRESSURE is first reached. We need
1966 to search in three cases:
1968 - We've already moved past the previous pressure point.
1969 In this case we search forward from model_curr_point.
1971 - We scheduled the previous point of maximum pressure ahead of
1972 its position in the model schedule, but doing so didn't bring
1973 the pressure point earlier. In this case we search forward
1974 from that previous pressure point.
1976 - Scheduling an instruction early caused the maximum pressure
1977 to decrease. In this case we will have set the pressure
1978 point to -1, and we search forward from model_curr_point. */
1979 point
= MAX (group
->limits
[pci
].point
, model_curr_point
);
1980 while (point
< model_num_insns
1981 && MODEL_REF_PRESSURE (group
, point
, pci
) < max_pressure
)
1983 group
->limits
[pci
].point
= point
;
1985 gcc_assert (MODEL_REF_PRESSURE (group
, point
, pci
) == max_pressure
);
1986 gcc_assert (MODEL_MAX_PRESSURE (group
, point
, pci
) == max_pressure
);
1990 /* Make sure that all register-pressure limits are up-to-date for the
1991 current position in the model schedule. */
1994 model_update_limit_points (void)
1996 model_update_limit_points_in_group (&model_before_pressure
);
1999 /* Return the model_index of the last unscheduled use in chain USE
2000 outside of USE's instruction. Return -1 if there are no other uses,
2001 or model_num_insns if the register is live at the end of the block. */
2004 model_last_use_except (struct reg_use_data
*use
)
2006 struct reg_use_data
*next
;
2010 for (next
= use
->next_regno_use
; next
!= use
; next
= next
->next_regno_use
)
2011 if (NONDEBUG_INSN_P (next
->insn
)
2012 && QUEUE_INDEX (next
->insn
) != QUEUE_SCHEDULED
)
2014 index
= model_index (next
->insn
);
2015 if (index
== model_num_insns
)
2016 return model_num_insns
;
2023 /* An instruction with model_index POINT has just been scheduled, and it
2024 adds DELTA to the pressure on ira_pressure_classes[PCI] after POINT - 1.
2025 Update MODEL_REF_PRESSURE (GROUP, POINT, PCI) and
2026 MODEL_MAX_PRESSURE (GROUP, POINT, PCI) accordingly. */
2029 model_start_update_pressure (struct model_pressure_group
*group
,
2030 int point
, int pci
, int delta
)
2032 int next_max_pressure
;
2034 if (point
== model_num_insns
)
2036 /* The instruction wasn't part of the model schedule; it was moved
2037 from a different block. Update the pressure for the end of
2038 the model schedule. */
2039 MODEL_REF_PRESSURE (group
, point
, pci
) += delta
;
2040 MODEL_MAX_PRESSURE (group
, point
, pci
) += delta
;
2044 /* Record that this instruction has been scheduled. Nothing now
2045 changes between POINT and POINT + 1, so get the maximum pressure
2046 from the latter. If the maximum pressure decreases, the new
2047 pressure point may be before POINT. */
2048 MODEL_REF_PRESSURE (group
, point
, pci
) = -1;
2049 next_max_pressure
= MODEL_MAX_PRESSURE (group
, point
+ 1, pci
);
2050 if (MODEL_MAX_PRESSURE (group
, point
, pci
) > next_max_pressure
)
2052 MODEL_MAX_PRESSURE (group
, point
, pci
) = next_max_pressure
;
2053 if (group
->limits
[pci
].point
== point
)
2054 group
->limits
[pci
].point
= -1;
2059 /* Record that scheduling a later instruction has changed the pressure
2060 at point POINT of the model schedule by DELTA (which might be 0).
2061 Update GROUP accordingly. Return nonzero if these changes might
2062 trigger changes to previous points as well. */
2065 model_update_pressure (struct model_pressure_group
*group
,
2066 int point
, int pci
, int delta
)
2068 int ref_pressure
, max_pressure
, next_max_pressure
;
2070 /* If POINT hasn't yet been scheduled, update its pressure. */
2071 ref_pressure
= MODEL_REF_PRESSURE (group
, point
, pci
);
2072 if (ref_pressure
>= 0 && delta
!= 0)
2074 ref_pressure
+= delta
;
2075 MODEL_REF_PRESSURE (group
, point
, pci
) = ref_pressure
;
2077 /* Check whether the maximum pressure in the overall schedule
2078 has increased. (This means that the MODEL_MAX_PRESSURE of
2079 every point <= POINT will need to increase too; see below.) */
2080 if (group
->limits
[pci
].pressure
< ref_pressure
)
2081 group
->limits
[pci
].pressure
= ref_pressure
;
2083 /* If we are at maximum pressure, and the maximum pressure
2084 point was previously unknown or later than POINT,
2085 bring it forward. */
2086 if (group
->limits
[pci
].pressure
== ref_pressure
2087 && !IN_RANGE (group
->limits
[pci
].point
, 0, point
))
2088 group
->limits
[pci
].point
= point
;
2090 /* If POINT used to be the point of maximum pressure, but isn't
2091 any longer, we need to recalculate it using a forward walk. */
2092 if (group
->limits
[pci
].pressure
> ref_pressure
2093 && group
->limits
[pci
].point
== point
)
2094 group
->limits
[pci
].point
= -1;
2097 /* Update the maximum pressure at POINT. Changes here might also
2098 affect the maximum pressure at POINT - 1. */
2099 next_max_pressure
= MODEL_MAX_PRESSURE (group
, point
+ 1, pci
);
2100 max_pressure
= MAX (ref_pressure
, next_max_pressure
);
2101 if (MODEL_MAX_PRESSURE (group
, point
, pci
) != max_pressure
)
2103 MODEL_MAX_PRESSURE (group
, point
, pci
) = max_pressure
;
2109 /* INSN has just been scheduled. Update the model schedule accordingly. */
2112 model_recompute (rtx_insn
*insn
)
2117 } uses
[FIRST_PSEUDO_REGISTER
+ MAX_RECOG_OPERANDS
];
2118 struct reg_use_data
*use
;
2119 struct reg_pressure_data
*reg_pressure
;
2120 int delta
[N_REG_CLASSES
];
2121 int pci
, point
, mix
, new_last
, cl
, ref_pressure
, queue
;
2122 unsigned int i
, num_uses
, num_pending_births
;
2125 /* The destinations of INSN were previously live from POINT onwards, but are
2126 now live from model_curr_point onwards. Set up DELTA accordingly. */
2127 point
= model_index (insn
);
2128 reg_pressure
= INSN_REG_PRESSURE (insn
);
2129 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
2131 cl
= ira_pressure_classes
[pci
];
2132 delta
[cl
] = reg_pressure
[pci
].set_increase
;
2135 /* Record which registers previously died at POINT, but which now die
2136 before POINT. Adjust DELTA so that it represents the effect of
2137 this change after POINT - 1. Set NUM_PENDING_BIRTHS to the number of
2138 registers that will be born in the range [model_curr_point, POINT). */
2140 num_pending_births
= 0;
2141 bitmap_clear (tmp_bitmap
);
2142 for (use
= INSN_REG_USE_LIST (insn
); use
!= NULL
; use
= use
->next_insn_use
)
2144 new_last
= model_last_use_except (use
);
2145 if (new_last
< point
&& bitmap_set_bit (tmp_bitmap
, use
->regno
))
2147 gcc_assert (num_uses
< ARRAY_SIZE (uses
));
2148 uses
[num_uses
].last_use
= new_last
;
2149 uses
[num_uses
].regno
= use
->regno
;
2150 /* This register is no longer live after POINT - 1. */
2151 mark_regno_birth_or_death (NULL
, delta
, use
->regno
, false);
2154 num_pending_births
++;
2158 /* Update the MODEL_REF_PRESSURE and MODEL_MAX_PRESSURE for POINT.
2159 Also set each group pressure limit for POINT. */
2160 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
2162 cl
= ira_pressure_classes
[pci
];
2163 model_start_update_pressure (&model_before_pressure
,
2164 point
, pci
, delta
[cl
]);
2167 /* Walk the model schedule backwards, starting immediately before POINT. */
2169 if (point
!= model_curr_point
)
2173 insn
= MODEL_INSN (point
);
2174 queue
= QUEUE_INDEX (insn
);
2176 if (queue
!= QUEUE_SCHEDULED
)
2178 /* DELTA describes the effect of the move on the register pressure
2179 after POINT. Make it describe the effect on the pressure
2182 while (i
< num_uses
)
2184 if (uses
[i
].last_use
== point
)
2186 /* This register is now live again. */
2187 mark_regno_birth_or_death (NULL
, delta
,
2188 uses
[i
].regno
, true);
2190 /* Remove this use from the array. */
2191 uses
[i
] = uses
[num_uses
- 1];
2193 num_pending_births
--;
2199 if (sched_verbose
>= 5)
2203 fprintf (sched_dump
, MODEL_BAR
);
2204 fprintf (sched_dump
, ";;\t\t| New pressure for model"
2206 fprintf (sched_dump
, MODEL_BAR
);
2210 fprintf (sched_dump
, ";;\t\t| %3d %4d %-30s ",
2211 point
, INSN_UID (insn
),
2212 str_pattern_slim (PATTERN (insn
)));
2213 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
2215 cl
= ira_pressure_classes
[pci
];
2216 ref_pressure
= MODEL_REF_PRESSURE (&model_before_pressure
,
2218 fprintf (sched_dump
, " %s:[%d->%d]",
2219 reg_class_names
[ira_pressure_classes
[pci
]],
2220 ref_pressure
, ref_pressure
+ delta
[cl
]);
2222 fprintf (sched_dump
, "\n");
2226 /* Adjust the pressure at POINT. Set MIX to nonzero if POINT - 1
2227 might have changed as well. */
2228 mix
= num_pending_births
;
2229 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
2231 cl
= ira_pressure_classes
[pci
];
2233 mix
|= model_update_pressure (&model_before_pressure
,
2234 point
, pci
, delta
[cl
]);
2237 while (mix
&& point
> model_curr_point
);
2240 fprintf (sched_dump
, MODEL_BAR
);
2243 /* After DEP, which was cancelled, has been resolved for insn NEXT,
2244 check whether the insn's pattern needs restoring. */
2246 must_restore_pattern_p (rtx_insn
*next
, dep_t dep
)
2248 if (QUEUE_INDEX (next
) == QUEUE_SCHEDULED
)
2251 if (DEP_TYPE (dep
) == REG_DEP_CONTROL
)
2253 gcc_assert (ORIG_PAT (next
) != NULL_RTX
);
2254 gcc_assert (next
== DEP_CON (dep
));
2258 struct dep_replacement
*desc
= DEP_REPLACE (dep
);
2259 if (desc
->insn
!= next
)
2261 gcc_assert (*desc
->loc
== desc
->orig
);
2268 /* model_spill_cost (CL, P, P') returns the cost of increasing the
2269 pressure on CL from P to P'. We use this to calculate a "base ECC",
2270 baseECC (CL, X), for each pressure class CL and each instruction X.
2271 Supposing X changes the pressure on CL from P to P', and that the
2272 maximum pressure on CL in the current model schedule is MP', then:
2274 * if X occurs before or at the next point of maximum pressure in
2275 the model schedule and P' > MP', then:
2277 baseECC (CL, X) = model_spill_cost (CL, MP, P')
2279 The idea is that the pressure after scheduling a fixed set of
2280 instructions -- in this case, the set up to and including the
2281 next maximum pressure point -- is going to be the same regardless
2282 of the order; we simply want to keep the intermediate pressure
2283 under control. Thus X has a cost of zero unless scheduling it
2284 now would exceed MP'.
2286 If all increases in the set are by the same amount, no zero-cost
2287 instruction will ever cause the pressure to exceed MP'. However,
2288 if X is instead moved past an instruction X' with pressure in the
2289 range (MP' - (P' - P), MP'), the pressure at X' will increase
2290 beyond MP'. Since baseECC is very much a heuristic anyway,
2291 it doesn't seem worth the overhead of tracking cases like these.
2293 The cost of exceeding MP' is always based on the original maximum
2294 pressure MP. This is so that going 2 registers over the original
2295 limit has the same cost regardless of whether it comes from two
2296 separate +1 deltas or from a single +2 delta.
2298 * if X occurs after the next point of maximum pressure in the model
2299 schedule and P' > P, then:
2301 baseECC (CL, X) = model_spill_cost (CL, MP, MP' + (P' - P))
2303 That is, if we move X forward across a point of maximum pressure,
2304 and if X increases the pressure by P' - P, then we conservatively
2305 assume that scheduling X next would increase the maximum pressure
2306 by P' - P. Again, the cost of doing this is based on the original
2307 maximum pressure MP, for the same reason as above.
2309 * if P' < P, P > MP, and X occurs at or after the next point of
2310 maximum pressure, then:
2312 baseECC (CL, X) = -model_spill_cost (CL, MAX (MP, P'), P)
2314 That is, if we have already exceeded the original maximum pressure MP,
2315 and if X might reduce the maximum pressure again -- or at least push
2316 it further back, and thus allow more scheduling freedom -- it is given
2317 a negative cost to reflect the improvement.
2323 In this case, X is not expected to affect the maximum pressure MP',
2324 so it has zero cost.
2326 We then create a combined value baseECC (X) that is the sum of
2327 baseECC (CL, X) for each pressure class CL.
2329 baseECC (X) could itself be used as the ECC value described above.
2330 However, this is often too conservative, in the sense that it
2331 tends to make high-priority instructions that increase pressure
2332 wait too long in cases where introducing a spill would be better.
2333 For this reason the final ECC is a priority-adjusted form of
2334 baseECC (X). Specifically, we calculate:
2336 P (X) = INSN_PRIORITY (X) - insn_delay (X) - baseECC (X)
2337 baseP = MAX { P (X) | baseECC (X) <= 0 }
2341 ECC (X) = MAX (MIN (baseP - P (X), baseECC (X)), 0)
2343 Thus an instruction's effect on pressure is ignored if it has a high
2344 enough priority relative to the ones that don't increase pressure.
2345 Negative values of baseECC (X) do not increase the priority of X
2346 itself, but they do make it harder for other instructions to
2347 increase the pressure further.
2349 This pressure cost is deliberately timid. The intention has been
2350 to choose a heuristic that rarely interferes with the normal list
2351 scheduler in cases where that scheduler would produce good code.
2352 We simply want to curb some of its worst excesses. */
2354 /* Return the cost of increasing the pressure in class CL from FROM to TO.
2356 Here we use the very simplistic cost model that every register above
2357 sched_class_regs_num[CL] has a spill cost of 1. We could use other
2358 measures instead, such as one based on MEMORY_MOVE_COST. However:
2360 (1) In order for an instruction to be scheduled, the higher cost
2361 would need to be justified in a single saving of that many stalls.
2362 This is overly pessimistic, because the benefit of spilling is
2363 often to avoid a sequence of several short stalls rather than
2366 (2) The cost is still arbitrary. Because we are not allocating
2367 registers during scheduling, we have no way of knowing for
2368 sure how many memory accesses will be required by each spill,
2369 where the spills will be placed within the block, or even
2370 which block(s) will contain the spills.
2372 So a higher cost than 1 is often too conservative in practice,
2373 forcing blocks to contain unnecessary stalls instead of spill code.
2374 The simple cost below seems to be the best compromise. It reduces
2375 the interference with the normal list scheduler, which helps make
2376 it more suitable for a default-on option. */
2379 model_spill_cost (int cl
, int from
, int to
)
2381 from
= MAX (from
, sched_class_regs_num
[cl
]);
2382 return MAX (to
, from
) - from
;
2385 /* Return baseECC (ira_pressure_classes[PCI], POINT), given that
2386 P = curr_reg_pressure[ira_pressure_classes[PCI]] and that
2390 model_excess_group_cost (struct model_pressure_group
*group
,
2391 int point
, int pci
, int delta
)
2395 cl
= ira_pressure_classes
[pci
];
2396 if (delta
< 0 && point
>= group
->limits
[pci
].point
)
2398 pressure
= MAX (group
->limits
[pci
].orig_pressure
,
2399 curr_reg_pressure
[cl
] + delta
);
2400 return -model_spill_cost (cl
, pressure
, curr_reg_pressure
[cl
]);
2405 if (point
> group
->limits
[pci
].point
)
2406 pressure
= group
->limits
[pci
].pressure
+ delta
;
2408 pressure
= curr_reg_pressure
[cl
] + delta
;
2410 if (pressure
> group
->limits
[pci
].pressure
)
2411 return model_spill_cost (cl
, group
->limits
[pci
].orig_pressure
,
2418 /* Return baseECC (MODEL_INSN (INSN)). Dump the costs to sched_dump
2422 model_excess_cost (rtx_insn
*insn
, bool print_p
)
2424 int point
, pci
, cl
, cost
, this_cost
, delta
;
2425 struct reg_pressure_data
*insn_reg_pressure
;
2426 int insn_death
[N_REG_CLASSES
];
2428 calculate_reg_deaths (insn
, insn_death
);
2429 point
= model_index (insn
);
2430 insn_reg_pressure
= INSN_REG_PRESSURE (insn
);
2434 fprintf (sched_dump
, ";;\t\t| %3d %4d | %4d %+3d |", point
,
2435 INSN_UID (insn
), INSN_PRIORITY (insn
), insn_delay (insn
));
2437 /* Sum up the individual costs for each register class. */
2438 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
2440 cl
= ira_pressure_classes
[pci
];
2441 delta
= insn_reg_pressure
[pci
].set_increase
- insn_death
[cl
];
2442 this_cost
= model_excess_group_cost (&model_before_pressure
,
2446 fprintf (sched_dump
, " %s:[%d base cost %d]",
2447 reg_class_names
[cl
], delta
, this_cost
);
2451 fprintf (sched_dump
, "\n");
2456 /* Dump the next points of maximum pressure for GROUP. */
2459 model_dump_pressure_points (struct model_pressure_group
*group
)
2463 fprintf (sched_dump
, ";;\t\t| pressure points");
2464 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
2466 cl
= ira_pressure_classes
[pci
];
2467 fprintf (sched_dump
, " %s:[%d->%d at ", reg_class_names
[cl
],
2468 curr_reg_pressure
[cl
], group
->limits
[pci
].pressure
);
2469 if (group
->limits
[pci
].point
< model_num_insns
)
2470 fprintf (sched_dump
, "%d:%d]", group
->limits
[pci
].point
,
2471 INSN_UID (MODEL_INSN (group
->limits
[pci
].point
)));
2473 fprintf (sched_dump
, "end]");
2475 fprintf (sched_dump
, "\n");
2478 /* Set INSN_REG_PRESSURE_EXCESS_COST_CHANGE for INSNS[0...COUNT-1]. */
2481 model_set_excess_costs (rtx_insn
**insns
, int count
)
2483 int i
, cost
, priority_base
, priority
;
2486 /* Record the baseECC value for each instruction in the model schedule,
2487 except that negative costs are converted to zero ones now rather than
2488 later. Do not assign a cost to debug instructions, since they must
2489 not change code-generation decisions. Experiments suggest we also
2490 get better results by not assigning a cost to instructions from
2493 Set PRIORITY_BASE to baseP in the block comment above. This is the
2494 maximum priority of the "cheap" instructions, which should always
2495 include the next model instruction. */
2498 for (i
= 0; i
< count
; i
++)
2499 if (INSN_MODEL_INDEX (insns
[i
]))
2501 if (sched_verbose
>= 6 && !print_p
)
2503 fprintf (sched_dump
, MODEL_BAR
);
2504 fprintf (sched_dump
, ";;\t\t| Pressure costs for ready queue\n");
2505 model_dump_pressure_points (&model_before_pressure
);
2506 fprintf (sched_dump
, MODEL_BAR
);
2509 cost
= model_excess_cost (insns
[i
], print_p
);
2512 priority
= INSN_PRIORITY (insns
[i
]) - insn_delay (insns
[i
]) - cost
;
2513 priority_base
= MAX (priority_base
, priority
);
2516 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns
[i
]) = cost
;
2519 fprintf (sched_dump
, MODEL_BAR
);
2521 /* Use MAX (baseECC, 0) and baseP to calculcate ECC for each
2523 for (i
= 0; i
< count
; i
++)
2525 cost
= INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns
[i
]);
2526 priority
= INSN_PRIORITY (insns
[i
]) - insn_delay (insns
[i
]);
2527 if (cost
> 0 && priority
> priority_base
)
2529 cost
+= priority_base
- priority
;
2530 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns
[i
]) = MAX (cost
, 0);
2536 /* Enum of rank_for_schedule heuristic decisions. */
2538 RFS_LIVE_RANGE_SHRINK1
, RFS_LIVE_RANGE_SHRINK2
,
2539 RFS_SCHED_GROUP
, RFS_PRESSURE_DELAY
, RFS_PRESSURE_TICK
,
2540 RFS_FEEDS_BACKTRACK_INSN
, RFS_PRIORITY
, RFS_SPECULATION
,
2541 RFS_SCHED_RANK
, RFS_LAST_INSN
, RFS_PRESSURE_INDEX
,
2542 RFS_DEP_COUNT
, RFS_TIE
, RFS_FUSION
, RFS_N
};
2544 /* Corresponding strings for print outs. */
2545 static const char *rfs_str
[RFS_N
] = {
2546 "RFS_LIVE_RANGE_SHRINK1", "RFS_LIVE_RANGE_SHRINK2",
2547 "RFS_SCHED_GROUP", "RFS_PRESSURE_DELAY", "RFS_PRESSURE_TICK",
2548 "RFS_FEEDS_BACKTRACK_INSN", "RFS_PRIORITY", "RFS_SPECULATION",
2549 "RFS_SCHED_RANK", "RFS_LAST_INSN", "RFS_PRESSURE_INDEX",
2550 "RFS_DEP_COUNT", "RFS_TIE", "RFS_FUSION" };
2552 /* Statistical breakdown of rank_for_schedule decisions. */
2553 struct rank_for_schedule_stats_t
{ unsigned stats
[RFS_N
]; };
2554 static rank_for_schedule_stats_t rank_for_schedule_stats
;
2556 /* Return the result of comparing insns TMP and TMP2 and update
2557 Rank_For_Schedule statistics. */
2559 rfs_result (enum rfs_decision decision
, int result
, rtx tmp
, rtx tmp2
)
2561 ++rank_for_schedule_stats
.stats
[decision
];
2563 INSN_LAST_RFS_WIN (tmp
) = decision
;
2564 else if (result
> 0)
2565 INSN_LAST_RFS_WIN (tmp2
) = decision
;
2571 /* Sorting predicate to move DEBUG_INSNs to the top of ready list, while
2572 keeping normal insns in original order. */
2575 rank_for_schedule_debug (const void *x
, const void *y
)
2577 rtx_insn
*tmp
= *(rtx_insn
* const *) y
;
2578 rtx_insn
*tmp2
= *(rtx_insn
* const *) x
;
2580 /* Schedule debug insns as early as possible. */
2581 if (DEBUG_INSN_P (tmp
) && !DEBUG_INSN_P (tmp2
))
2583 else if (!DEBUG_INSN_P (tmp
) && DEBUG_INSN_P (tmp2
))
2585 else if (DEBUG_INSN_P (tmp
) && DEBUG_INSN_P (tmp2
))
2586 return INSN_LUID (tmp
) - INSN_LUID (tmp2
);
2588 return INSN_RFS_DEBUG_ORIG_ORDER (tmp2
) - INSN_RFS_DEBUG_ORIG_ORDER (tmp
);
2591 /* Returns a positive value if x is preferred; returns a negative value if
2592 y is preferred. Should never return 0, since that will make the sort
2596 rank_for_schedule (const void *x
, const void *y
)
2598 rtx_insn
*tmp
= *(rtx_insn
* const *) y
;
2599 rtx_insn
*tmp2
= *(rtx_insn
* const *) x
;
2600 int tmp_class
, tmp2_class
;
2601 int val
, priority_val
, info_val
, diff
;
2603 if (live_range_shrinkage_p
)
2605 /* Don't use SCHED_PRESSURE_MODEL -- it results in much worse
2607 gcc_assert (sched_pressure
== SCHED_PRESSURE_WEIGHTED
);
2608 if ((INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp
) < 0
2609 || INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2
) < 0)
2610 && (diff
= (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp
)
2611 - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2
))) != 0)
2612 return rfs_result (RFS_LIVE_RANGE_SHRINK1
, diff
, tmp
, tmp2
);
2613 /* Sort by INSN_LUID (original insn order), so that we make the
2614 sort stable. This minimizes instruction movement, thus
2615 minimizing sched's effect on debugging and cross-jumping. */
2616 return rfs_result (RFS_LIVE_RANGE_SHRINK2
,
2617 INSN_LUID (tmp
) - INSN_LUID (tmp2
), tmp
, tmp2
);
2620 /* The insn in a schedule group should be issued the first. */
2621 if (flag_sched_group_heuristic
&&
2622 SCHED_GROUP_P (tmp
) != SCHED_GROUP_P (tmp2
))
2623 return rfs_result (RFS_SCHED_GROUP
, SCHED_GROUP_P (tmp2
) ? 1 : -1,
2626 /* Make sure that priority of TMP and TMP2 are initialized. */
2627 gcc_assert (INSN_PRIORITY_KNOWN (tmp
) && INSN_PRIORITY_KNOWN (tmp2
));
2631 /* The instruction that has the same fusion priority as the last
2632 instruction is the instruction we picked next. If that is not
2633 the case, we sort ready list firstly by fusion priority, then
2634 by priority, and at last by INSN_LUID. */
2635 int a
= INSN_FUSION_PRIORITY (tmp
);
2636 int b
= INSN_FUSION_PRIORITY (tmp2
);
2639 if (last_nondebug_scheduled_insn
2640 && !NOTE_P (last_nondebug_scheduled_insn
)
2641 && BLOCK_FOR_INSN (tmp
)
2642 == BLOCK_FOR_INSN (last_nondebug_scheduled_insn
))
2643 last
= INSN_FUSION_PRIORITY (last_nondebug_scheduled_insn
);
2645 if (a
!= last
&& b
!= last
)
2649 a
= INSN_PRIORITY (tmp
);
2650 b
= INSN_PRIORITY (tmp2
);
2653 return rfs_result (RFS_FUSION
, b
- a
, tmp
, tmp2
);
2655 return rfs_result (RFS_FUSION
,
2656 INSN_LUID (tmp
) - INSN_LUID (tmp2
), tmp
, tmp2
);
2660 gcc_assert (last_nondebug_scheduled_insn
2661 && !NOTE_P (last_nondebug_scheduled_insn
));
2662 last
= INSN_PRIORITY (last_nondebug_scheduled_insn
);
2664 a
= abs (INSN_PRIORITY (tmp
) - last
);
2665 b
= abs (INSN_PRIORITY (tmp2
) - last
);
2667 return rfs_result (RFS_FUSION
, a
- b
, tmp
, tmp2
);
2669 return rfs_result (RFS_FUSION
,
2670 INSN_LUID (tmp
) - INSN_LUID (tmp2
), tmp
, tmp2
);
2673 return rfs_result (RFS_FUSION
, -1, tmp
, tmp2
);
2675 return rfs_result (RFS_FUSION
, 1, tmp
, tmp2
);
2678 if (sched_pressure
!= SCHED_PRESSURE_NONE
)
2680 /* Prefer insn whose scheduling results in the smallest register
2682 if ((diff
= (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp
)
2684 - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2
)
2685 - insn_delay (tmp2
))))
2686 return rfs_result (RFS_PRESSURE_DELAY
, diff
, tmp
, tmp2
);
2689 if (sched_pressure
!= SCHED_PRESSURE_NONE
2690 && (INSN_TICK (tmp2
) > clock_var
|| INSN_TICK (tmp
) > clock_var
)
2691 && INSN_TICK (tmp2
) != INSN_TICK (tmp
))
2693 diff
= INSN_TICK (tmp
) - INSN_TICK (tmp2
);
2694 return rfs_result (RFS_PRESSURE_TICK
, diff
, tmp
, tmp2
);
2697 /* If we are doing backtracking in this schedule, prefer insns that
2698 have forward dependencies with negative cost against an insn that
2699 was already scheduled. */
2700 if (current_sched_info
->flags
& DO_BACKTRACKING
)
2702 priority_val
= FEEDS_BACKTRACK_INSN (tmp2
) - FEEDS_BACKTRACK_INSN (tmp
);
2704 return rfs_result (RFS_FEEDS_BACKTRACK_INSN
, priority_val
, tmp
, tmp2
);
2707 /* Prefer insn with higher priority. */
2708 priority_val
= INSN_PRIORITY (tmp2
) - INSN_PRIORITY (tmp
);
2710 if (flag_sched_critical_path_heuristic
&& priority_val
)
2711 return rfs_result (RFS_PRIORITY
, priority_val
, tmp
, tmp2
);
2713 if (PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH
) >= 0)
2715 int autopref
= autopref_rank_for_schedule (tmp
, tmp2
);
2720 /* Prefer speculative insn with greater dependencies weakness. */
2721 if (flag_sched_spec_insn_heuristic
&& spec_info
)
2727 ds1
= TODO_SPEC (tmp
) & SPECULATIVE
;
2729 dw1
= ds_weak (ds1
);
2733 ds2
= TODO_SPEC (tmp2
) & SPECULATIVE
;
2735 dw2
= ds_weak (ds2
);
2740 if (dw
> (NO_DEP_WEAK
/ 8) || dw
< -(NO_DEP_WEAK
/ 8))
2741 return rfs_result (RFS_SPECULATION
, dw
, tmp
, tmp2
);
2744 info_val
= (*current_sched_info
->rank
) (tmp
, tmp2
);
2745 if (flag_sched_rank_heuristic
&& info_val
)
2746 return rfs_result (RFS_SCHED_RANK
, info_val
, tmp
, tmp2
);
2748 /* Compare insns based on their relation to the last scheduled
2750 if (flag_sched_last_insn_heuristic
&& last_nondebug_scheduled_insn
)
2754 rtx_insn
*last
= last_nondebug_scheduled_insn
;
2756 /* Classify the instructions into three classes:
2757 1) Data dependent on last schedule insn.
2758 2) Anti/Output dependent on last scheduled insn.
2759 3) Independent of last scheduled insn, or has latency of one.
2760 Choose the insn from the highest numbered class if different. */
2761 dep1
= sd_find_dep_between (last
, tmp
, true);
2763 if (dep1
== NULL
|| dep_cost (dep1
) == 1)
2765 else if (/* Data dependence. */
2766 DEP_TYPE (dep1
) == REG_DEP_TRUE
)
2771 dep2
= sd_find_dep_between (last
, tmp2
, true);
2773 if (dep2
== NULL
|| dep_cost (dep2
) == 1)
2775 else if (/* Data dependence. */
2776 DEP_TYPE (dep2
) == REG_DEP_TRUE
)
2781 if ((val
= tmp2_class
- tmp_class
))
2782 return rfs_result (RFS_LAST_INSN
, val
, tmp
, tmp2
);
2785 /* Prefer instructions that occur earlier in the model schedule. */
2786 if (sched_pressure
== SCHED_PRESSURE_MODEL
2787 && INSN_BB (tmp
) == target_bb
&& INSN_BB (tmp2
) == target_bb
)
2789 diff
= model_index (tmp
) - model_index (tmp2
);
2790 gcc_assert (diff
!= 0);
2791 return rfs_result (RFS_PRESSURE_INDEX
, diff
, tmp
, tmp2
);
2794 /* Prefer the insn which has more later insns that depend on it.
2795 This gives the scheduler more freedom when scheduling later
2796 instructions at the expense of added register pressure. */
2798 val
= (dep_list_size (tmp2
, SD_LIST_FORW
)
2799 - dep_list_size (tmp
, SD_LIST_FORW
));
2801 if (flag_sched_dep_count_heuristic
&& val
!= 0)
2802 return rfs_result (RFS_DEP_COUNT
, val
, tmp
, tmp2
);
2804 /* If insns are equally good, sort by INSN_LUID (original insn order),
2805 so that we make the sort stable. This minimizes instruction movement,
2806 thus minimizing sched's effect on debugging and cross-jumping. */
2807 return rfs_result (RFS_TIE
, INSN_LUID (tmp
) - INSN_LUID (tmp2
), tmp
, tmp2
);
2810 /* Resort the array A in which only element at index N may be out of order. */
2812 HAIFA_INLINE
static void
2813 swap_sort (rtx_insn
**a
, int n
)
2815 rtx_insn
*insn
= a
[n
- 1];
2818 while (i
>= 0 && rank_for_schedule (a
+ i
, &insn
) >= 0)
2826 /* Add INSN to the insn queue so that it can be executed at least
2827 N_CYCLES after the currently executing insn. Preserve insns
2828 chain for debugging purposes. REASON will be printed in debugging
2831 HAIFA_INLINE
static void
2832 queue_insn (rtx_insn
*insn
, int n_cycles
, const char *reason
)
2834 int next_q
= NEXT_Q_AFTER (q_ptr
, n_cycles
);
2835 rtx_insn_list
*link
= alloc_INSN_LIST (insn
, insn_queue
[next_q
]);
2838 gcc_assert (n_cycles
<= max_insn_queue_index
);
2839 gcc_assert (!DEBUG_INSN_P (insn
));
2841 insn_queue
[next_q
] = link
;
2844 if (sched_verbose
>= 2)
2846 fprintf (sched_dump
, ";;\t\tReady-->Q: insn %s: ",
2847 (*current_sched_info
->print_insn
) (insn
, 0));
2849 fprintf (sched_dump
, "queued for %d cycles (%s).\n", n_cycles
, reason
);
2852 QUEUE_INDEX (insn
) = next_q
;
2854 if (current_sched_info
->flags
& DO_BACKTRACKING
)
2856 new_tick
= clock_var
+ n_cycles
;
2857 if (INSN_TICK (insn
) == INVALID_TICK
|| INSN_TICK (insn
) < new_tick
)
2858 INSN_TICK (insn
) = new_tick
;
2860 if (INSN_EXACT_TICK (insn
) != INVALID_TICK
2861 && INSN_EXACT_TICK (insn
) < clock_var
+ n_cycles
)
2863 must_backtrack
= true;
2864 if (sched_verbose
>= 2)
2865 fprintf (sched_dump
, ";;\t\tcausing a backtrack.\n");
2870 /* Remove INSN from queue. */
2872 queue_remove (rtx_insn
*insn
)
2874 gcc_assert (QUEUE_INDEX (insn
) >= 0);
2875 remove_free_INSN_LIST_elem (insn
, &insn_queue
[QUEUE_INDEX (insn
)]);
2877 QUEUE_INDEX (insn
) = QUEUE_NOWHERE
;
2880 /* Return a pointer to the bottom of the ready list, i.e. the insn
2881 with the lowest priority. */
2884 ready_lastpos (struct ready_list
*ready
)
2886 gcc_assert (ready
->n_ready
>= 1);
2887 return ready
->vec
+ ready
->first
- ready
->n_ready
+ 1;
2890 /* Add an element INSN to the ready list so that it ends up with the
2891 lowest/highest priority depending on FIRST_P. */
2893 HAIFA_INLINE
static void
2894 ready_add (struct ready_list
*ready
, rtx_insn
*insn
, bool first_p
)
2898 if (ready
->first
== ready
->n_ready
)
2900 memmove (ready
->vec
+ ready
->veclen
- ready
->n_ready
,
2901 ready_lastpos (ready
),
2902 ready
->n_ready
* sizeof (rtx
));
2903 ready
->first
= ready
->veclen
- 1;
2905 ready
->vec
[ready
->first
- ready
->n_ready
] = insn
;
2909 if (ready
->first
== ready
->veclen
- 1)
2912 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
2913 memmove (ready
->vec
+ ready
->veclen
- ready
->n_ready
- 1,
2914 ready_lastpos (ready
),
2915 ready
->n_ready
* sizeof (rtx
));
2916 ready
->first
= ready
->veclen
- 2;
2918 ready
->vec
[++(ready
->first
)] = insn
;
2922 if (DEBUG_INSN_P (insn
))
2925 gcc_assert (QUEUE_INDEX (insn
) != QUEUE_READY
);
2926 QUEUE_INDEX (insn
) = QUEUE_READY
;
2928 if (INSN_EXACT_TICK (insn
) != INVALID_TICK
2929 && INSN_EXACT_TICK (insn
) < clock_var
)
2931 must_backtrack
= true;
2935 /* Remove the element with the highest priority from the ready list and
2938 HAIFA_INLINE
static rtx_insn
*
2939 ready_remove_first (struct ready_list
*ready
)
2943 gcc_assert (ready
->n_ready
);
2944 t
= ready
->vec
[ready
->first
--];
2946 if (DEBUG_INSN_P (t
))
2948 /* If the queue becomes empty, reset it. */
2949 if (ready
->n_ready
== 0)
2950 ready
->first
= ready
->veclen
- 1;
2952 gcc_assert (QUEUE_INDEX (t
) == QUEUE_READY
);
2953 QUEUE_INDEX (t
) = QUEUE_NOWHERE
;
2958 /* The following code implements multi-pass scheduling for the first
2959 cycle. In other words, we will try to choose ready insn which
2960 permits to start maximum number of insns on the same cycle. */
2962 /* Return a pointer to the element INDEX from the ready. INDEX for
2963 insn with the highest priority is 0, and the lowest priority has
2967 ready_element (struct ready_list
*ready
, int index
)
2969 gcc_assert (ready
->n_ready
&& index
< ready
->n_ready
);
2971 return ready
->vec
[ready
->first
- index
];
2974 /* Remove the element INDEX from the ready list and return it. INDEX
2975 for insn with the highest priority is 0, and the lowest priority
2978 HAIFA_INLINE
static rtx_insn
*
2979 ready_remove (struct ready_list
*ready
, int index
)
2985 return ready_remove_first (ready
);
2986 gcc_assert (ready
->n_ready
&& index
< ready
->n_ready
);
2987 t
= ready
->vec
[ready
->first
- index
];
2989 if (DEBUG_INSN_P (t
))
2991 for (i
= index
; i
< ready
->n_ready
; i
++)
2992 ready
->vec
[ready
->first
- i
] = ready
->vec
[ready
->first
- i
- 1];
2993 QUEUE_INDEX (t
) = QUEUE_NOWHERE
;
2997 /* Remove INSN from the ready list. */
2999 ready_remove_insn (rtx_insn
*insn
)
3003 for (i
= 0; i
< readyp
->n_ready
; i
++)
3004 if (ready_element (readyp
, i
) == insn
)
3006 ready_remove (readyp
, i
);
3012 /* Calculate difference of two statistics set WAS and NOW.
3013 Result returned in WAS. */
3015 rank_for_schedule_stats_diff (rank_for_schedule_stats_t
*was
,
3016 const rank_for_schedule_stats_t
*now
)
3018 for (int i
= 0; i
< RFS_N
; ++i
)
3019 was
->stats
[i
] = now
->stats
[i
] - was
->stats
[i
];
3022 /* Print rank_for_schedule statistics. */
3024 print_rank_for_schedule_stats (const char *prefix
,
3025 const rank_for_schedule_stats_t
*stats
,
3026 struct ready_list
*ready
)
3028 for (int i
= 0; i
< RFS_N
; ++i
)
3029 if (stats
->stats
[i
])
3031 fprintf (sched_dump
, "%s%20s: %u", prefix
, rfs_str
[i
], stats
->stats
[i
]);
3034 /* Print out insns that won due to RFS_<I>. */
3036 rtx_insn
**p
= ready_lastpos (ready
);
3038 fprintf (sched_dump
, ":");
3039 /* Start with 1 since least-priority insn didn't have any wins. */
3040 for (int j
= 1; j
< ready
->n_ready
; ++j
)
3041 if (INSN_LAST_RFS_WIN (p
[j
]) == i
)
3042 fprintf (sched_dump
, " %s",
3043 (*current_sched_info
->print_insn
) (p
[j
], 0));
3045 fprintf (sched_dump
, "\n");
3049 /* Separate DEBUG_INSNS from normal insns. DEBUG_INSNs go to the end
3052 ready_sort_debug (struct ready_list
*ready
)
3055 rtx_insn
**first
= ready_lastpos (ready
);
3057 for (i
= 0; i
< ready
->n_ready
; ++i
)
3058 if (!DEBUG_INSN_P (first
[i
]))
3059 INSN_RFS_DEBUG_ORIG_ORDER (first
[i
]) = i
;
3061 qsort (first
, ready
->n_ready
, sizeof (rtx
), rank_for_schedule_debug
);
3064 /* Sort non-debug insns in the ready list READY by ascending priority.
3065 Assumes that all debug insns are separated from the real insns. */
3067 ready_sort_real (struct ready_list
*ready
)
3070 rtx_insn
**first
= ready_lastpos (ready
);
3071 int n_ready_real
= ready
->n_ready
- ready
->n_debug
;
3073 if (sched_pressure
== SCHED_PRESSURE_WEIGHTED
)
3074 for (i
= 0; i
< n_ready_real
; ++i
)
3075 setup_insn_reg_pressure_info (first
[i
]);
3076 else if (sched_pressure
== SCHED_PRESSURE_MODEL
3077 && model_curr_point
< model_num_insns
)
3078 model_set_excess_costs (first
, n_ready_real
);
3080 rank_for_schedule_stats_t stats1
;
3081 if (sched_verbose
>= 4)
3082 stats1
= rank_for_schedule_stats
;
3084 if (n_ready_real
== 2)
3085 swap_sort (first
, n_ready_real
);
3086 else if (n_ready_real
> 2)
3087 /* HACK: Disable qsort checking for now (PR82396). */
3088 (qsort
) (first
, n_ready_real
, sizeof (rtx
), rank_for_schedule
);
3090 if (sched_verbose
>= 4)
3092 rank_for_schedule_stats_diff (&stats1
, &rank_for_schedule_stats
);
3093 print_rank_for_schedule_stats (";;\t\t", &stats1
, ready
);
3097 /* Sort the ready list READY by ascending priority. */
3099 ready_sort (struct ready_list
*ready
)
3101 if (ready
->n_debug
> 0)
3102 ready_sort_debug (ready
);
3104 ready_sort_real (ready
);
3107 /* PREV is an insn that is ready to execute. Adjust its priority if that
3108 will help shorten or lengthen register lifetimes as appropriate. Also
3109 provide a hook for the target to tweak itself. */
3111 HAIFA_INLINE
static void
3112 adjust_priority (rtx_insn
*prev
)
3114 /* ??? There used to be code here to try and estimate how an insn
3115 affected register lifetimes, but it did it by looking at REG_DEAD
3116 notes, which we removed in schedule_region. Nor did it try to
3117 take into account register pressure or anything useful like that.
3119 Revisit when we have a machine model to work with and not before. */
3121 if (targetm
.sched
.adjust_priority
)
3122 INSN_PRIORITY (prev
) =
3123 targetm
.sched
.adjust_priority (prev
, INSN_PRIORITY (prev
));
3126 /* Advance DFA state STATE on one cycle. */
3128 advance_state (state_t state
)
3130 if (targetm
.sched
.dfa_pre_advance_cycle
)
3131 targetm
.sched
.dfa_pre_advance_cycle ();
3133 if (targetm
.sched
.dfa_pre_cycle_insn
)
3134 state_transition (state
,
3135 targetm
.sched
.dfa_pre_cycle_insn ());
3137 state_transition (state
, NULL
);
3139 if (targetm
.sched
.dfa_post_cycle_insn
)
3140 state_transition (state
,
3141 targetm
.sched
.dfa_post_cycle_insn ());
3143 if (targetm
.sched
.dfa_post_advance_cycle
)
3144 targetm
.sched
.dfa_post_advance_cycle ();
3147 /* Advance time on one cycle. */
3148 HAIFA_INLINE
static void
3149 advance_one_cycle (void)
3151 advance_state (curr_state
);
3152 if (sched_verbose
>= 4)
3153 fprintf (sched_dump
, ";;\tAdvance the current state.\n");
3156 /* Update register pressure after scheduling INSN. */
3158 update_register_pressure (rtx_insn
*insn
)
3160 struct reg_use_data
*use
;
3161 struct reg_set_data
*set
;
3163 gcc_checking_assert (!DEBUG_INSN_P (insn
));
3165 for (use
= INSN_REG_USE_LIST (insn
); use
!= NULL
; use
= use
->next_insn_use
)
3166 if (dying_use_p (use
))
3167 mark_regno_birth_or_death (curr_reg_live
, curr_reg_pressure
,
3169 for (set
= INSN_REG_SET_LIST (insn
); set
!= NULL
; set
= set
->next_insn_set
)
3170 mark_regno_birth_or_death (curr_reg_live
, curr_reg_pressure
,
3174 /* Set up or update (if UPDATE_P) max register pressure (see its
3175 meaning in sched-int.h::_haifa_insn_data) for all current BB insns
3176 after insn AFTER. */
3178 setup_insn_max_reg_pressure (rtx_insn
*after
, bool update_p
)
3183 static int max_reg_pressure
[N_REG_CLASSES
];
3185 save_reg_pressure ();
3186 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
3187 max_reg_pressure
[ira_pressure_classes
[i
]]
3188 = curr_reg_pressure
[ira_pressure_classes
[i
]];
3189 for (insn
= NEXT_INSN (after
);
3190 insn
!= NULL_RTX
&& ! BARRIER_P (insn
)
3191 && BLOCK_FOR_INSN (insn
) == BLOCK_FOR_INSN (after
);
3192 insn
= NEXT_INSN (insn
))
3193 if (NONDEBUG_INSN_P (insn
))
3196 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
3198 p
= max_reg_pressure
[ira_pressure_classes
[i
]];
3199 if (INSN_MAX_REG_PRESSURE (insn
)[i
] != p
)
3202 INSN_MAX_REG_PRESSURE (insn
)[i
]
3203 = max_reg_pressure
[ira_pressure_classes
[i
]];
3206 if (update_p
&& eq_p
)
3208 update_register_pressure (insn
);
3209 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
3210 if (max_reg_pressure
[ira_pressure_classes
[i
]]
3211 < curr_reg_pressure
[ira_pressure_classes
[i
]])
3212 max_reg_pressure
[ira_pressure_classes
[i
]]
3213 = curr_reg_pressure
[ira_pressure_classes
[i
]];
3215 restore_reg_pressure ();
3218 /* Update the current register pressure after scheduling INSN. Update
3219 also max register pressure for unscheduled insns of the current
3222 update_reg_and_insn_max_reg_pressure (rtx_insn
*insn
)
3225 int before
[N_REG_CLASSES
];
3227 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
3228 before
[i
] = curr_reg_pressure
[ira_pressure_classes
[i
]];
3229 update_register_pressure (insn
);
3230 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
3231 if (curr_reg_pressure
[ira_pressure_classes
[i
]] != before
[i
])
3233 if (i
< ira_pressure_classes_num
)
3234 setup_insn_max_reg_pressure (insn
, true);
3237 /* Set up register pressure at the beginning of basic block BB whose
3238 insns starting after insn AFTER. Set up also max register pressure
3239 for all insns of the basic block. */
3241 sched_setup_bb_reg_pressure_info (basic_block bb
, rtx_insn
*after
)
3243 gcc_assert (sched_pressure
== SCHED_PRESSURE_WEIGHTED
);
3244 initiate_bb_reg_pressure_info (bb
);
3245 setup_insn_max_reg_pressure (after
, false);
3248 /* If doing predication while scheduling, verify whether INSN, which
3249 has just been scheduled, clobbers the conditions of any
3250 instructions that must be predicated in order to break their
3251 dependencies. If so, remove them from the queues so that they will
3252 only be scheduled once their control dependency is resolved. */
3255 check_clobbered_conditions (rtx_insn
*insn
)
3260 if ((current_sched_info
->flags
& DO_PREDICATION
) == 0)
3263 find_all_hard_reg_sets (insn
, &t
, true);
3266 for (i
= 0; i
< ready
.n_ready
; i
++)
3268 rtx_insn
*x
= ready_element (&ready
, i
);
3269 if (TODO_SPEC (x
) == DEP_CONTROL
&& cond_clobbered_p (x
, t
))
3271 ready_remove_insn (x
);
3275 for (i
= 0; i
<= max_insn_queue_index
; i
++)
3277 rtx_insn_list
*link
;
3278 int q
= NEXT_Q_AFTER (q_ptr
, i
);
3281 for (link
= insn_queue
[q
]; link
; link
= link
->next ())
3283 rtx_insn
*x
= link
->insn ();
3284 if (TODO_SPEC (x
) == DEP_CONTROL
&& cond_clobbered_p (x
, t
))
3293 /* Return (in order):
3295 - positive if INSN adversely affects the pressure on one
3298 - negative if INSN reduces the pressure on one register class
3300 - 0 if INSN doesn't affect the pressure on any register class. */
3303 model_classify_pressure (struct model_insn_info
*insn
)
3305 struct reg_pressure_data
*reg_pressure
;
3306 int death
[N_REG_CLASSES
];
3309 calculate_reg_deaths (insn
->insn
, death
);
3310 reg_pressure
= INSN_REG_PRESSURE (insn
->insn
);
3312 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
3314 cl
= ira_pressure_classes
[pci
];
3315 if (death
[cl
] < reg_pressure
[pci
].set_increase
)
3317 sum
+= reg_pressure
[pci
].set_increase
- death
[cl
];
3322 /* Return true if INSN1 should come before INSN2 in the model schedule. */
3325 model_order_p (struct model_insn_info
*insn1
, struct model_insn_info
*insn2
)
3327 unsigned int height1
, height2
;
3328 unsigned int priority1
, priority2
;
3330 /* Prefer instructions with a higher model priority. */
3331 if (insn1
->model_priority
!= insn2
->model_priority
)
3332 return insn1
->model_priority
> insn2
->model_priority
;
3334 /* Combine the length of the longest path of satisfied true dependencies
3335 that leads to each instruction (depth) with the length of the longest
3336 path of any dependencies that leads from the instruction (alap).
3337 Prefer instructions with the greatest combined length. If the combined
3338 lengths are equal, prefer instructions with the greatest depth.
3340 The idea is that, if we have a set S of "equal" instructions that each
3341 have ALAP value X, and we pick one such instruction I, any true-dependent
3342 successors of I that have ALAP value X - 1 should be preferred over S.
3343 This encourages the schedule to be "narrow" rather than "wide".
3344 However, if I is a low-priority instruction that we decided to
3345 schedule because of its model_classify_pressure, and if there
3346 is a set of higher-priority instructions T, the aforementioned
3347 successors of I should not have the edge over T. */
3348 height1
= insn1
->depth
+ insn1
->alap
;
3349 height2
= insn2
->depth
+ insn2
->alap
;
3350 if (height1
!= height2
)
3351 return height1
> height2
;
3352 if (insn1
->depth
!= insn2
->depth
)
3353 return insn1
->depth
> insn2
->depth
;
3355 /* We have no real preference between INSN1 an INSN2 as far as attempts
3356 to reduce pressure go. Prefer instructions with higher priorities. */
3357 priority1
= INSN_PRIORITY (insn1
->insn
);
3358 priority2
= INSN_PRIORITY (insn2
->insn
);
3359 if (priority1
!= priority2
)
3360 return priority1
> priority2
;
3362 /* Use the original rtl sequence as a tie-breaker. */
3363 return insn1
< insn2
;
3366 /* Add INSN to the model worklist immediately after PREV. Add it to the
3367 beginning of the list if PREV is null. */
3370 model_add_to_worklist_at (struct model_insn_info
*insn
,
3371 struct model_insn_info
*prev
)
3373 gcc_assert (QUEUE_INDEX (insn
->insn
) == QUEUE_NOWHERE
);
3374 QUEUE_INDEX (insn
->insn
) = QUEUE_READY
;
3379 insn
->next
= prev
->next
;
3384 insn
->next
= model_worklist
;
3385 model_worklist
= insn
;
3388 insn
->next
->prev
= insn
;
3391 /* Remove INSN from the model worklist. */
3394 model_remove_from_worklist (struct model_insn_info
*insn
)
3396 gcc_assert (QUEUE_INDEX (insn
->insn
) == QUEUE_READY
);
3397 QUEUE_INDEX (insn
->insn
) = QUEUE_NOWHERE
;
3400 insn
->prev
->next
= insn
->next
;
3402 model_worklist
= insn
->next
;
3404 insn
->next
->prev
= insn
->prev
;
3407 /* Add INSN to the model worklist. Start looking for a suitable position
3408 between neighbors PREV and NEXT, testing at most MAX_SCHED_READY_INSNS
3409 insns either side. A null PREV indicates the beginning of the list and
3410 a null NEXT indicates the end. */
3413 model_add_to_worklist (struct model_insn_info
*insn
,
3414 struct model_insn_info
*prev
,
3415 struct model_insn_info
*next
)
3419 count
= MAX_SCHED_READY_INSNS
;
3420 if (count
> 0 && prev
&& model_order_p (insn
, prev
))
3426 while (count
> 0 && prev
&& model_order_p (insn
, prev
));
3428 while (count
> 0 && next
&& model_order_p (next
, insn
))
3434 model_add_to_worklist_at (insn
, prev
);
3437 /* INSN may now have a higher priority (in the model_order_p sense)
3438 than before. Move it up the worklist if necessary. */
3441 model_promote_insn (struct model_insn_info
*insn
)
3443 struct model_insn_info
*prev
;
3447 count
= MAX_SCHED_READY_INSNS
;
3448 while (count
> 0 && prev
&& model_order_p (insn
, prev
))
3453 if (prev
!= insn
->prev
)
3455 model_remove_from_worklist (insn
);
3456 model_add_to_worklist_at (insn
, prev
);
3460 /* Add INSN to the end of the model schedule. */
3463 model_add_to_schedule (rtx_insn
*insn
)
3467 gcc_assert (QUEUE_INDEX (insn
) == QUEUE_NOWHERE
);
3468 QUEUE_INDEX (insn
) = QUEUE_SCHEDULED
;
3470 point
= model_schedule
.length ();
3471 model_schedule
.quick_push (insn
);
3472 INSN_MODEL_INDEX (insn
) = point
+ 1;
3475 /* Analyze the instructions that are to be scheduled, setting up
3476 MODEL_INSN_INFO (...) and model_num_insns accordingly. Add ready
3477 instructions to model_worklist. */
3480 model_analyze_insns (void)
3482 rtx_insn
*start
, *end
, *iter
;
3483 sd_iterator_def sd_it
;
3485 struct model_insn_info
*insn
, *con
;
3487 model_num_insns
= 0;
3488 start
= PREV_INSN (current_sched_info
->next_tail
);
3489 end
= current_sched_info
->prev_head
;
3490 for (iter
= start
; iter
!= end
; iter
= PREV_INSN (iter
))
3491 if (NONDEBUG_INSN_P (iter
))
3493 insn
= MODEL_INSN_INFO (iter
);
3495 FOR_EACH_DEP (iter
, SD_LIST_FORW
, sd_it
, dep
)
3497 con
= MODEL_INSN_INFO (DEP_CON (dep
));
3498 if (con
->insn
&& insn
->alap
< con
->alap
+ 1)
3499 insn
->alap
= con
->alap
+ 1;
3502 insn
->old_queue
= QUEUE_INDEX (iter
);
3503 QUEUE_INDEX (iter
) = QUEUE_NOWHERE
;
3505 insn
->unscheduled_preds
= dep_list_size (iter
, SD_LIST_HARD_BACK
);
3506 if (insn
->unscheduled_preds
== 0)
3507 model_add_to_worklist (insn
, NULL
, model_worklist
);
3513 /* The global state describes the register pressure at the start of the
3514 model schedule. Initialize GROUP accordingly. */
3517 model_init_pressure_group (struct model_pressure_group
*group
)
3521 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
3523 cl
= ira_pressure_classes
[pci
];
3524 group
->limits
[pci
].pressure
= curr_reg_pressure
[cl
];
3525 group
->limits
[pci
].point
= 0;
3527 /* Use index model_num_insns to record the state after the last
3528 instruction in the model schedule. */
3529 group
->model
= XNEWVEC (struct model_pressure_data
,
3530 (model_num_insns
+ 1) * ira_pressure_classes_num
);
3533 /* Record that MODEL_REF_PRESSURE (GROUP, POINT, PCI) is PRESSURE.
3534 Update the maximum pressure for the whole schedule. */
3537 model_record_pressure (struct model_pressure_group
*group
,
3538 int point
, int pci
, int pressure
)
3540 MODEL_REF_PRESSURE (group
, point
, pci
) = pressure
;
3541 if (group
->limits
[pci
].pressure
< pressure
)
3543 group
->limits
[pci
].pressure
= pressure
;
3544 group
->limits
[pci
].point
= point
;
3548 /* INSN has just been added to the end of the model schedule. Record its
3549 register-pressure information. */
3552 model_record_pressures (struct model_insn_info
*insn
)
3554 struct reg_pressure_data
*reg_pressure
;
3555 int point
, pci
, cl
, delta
;
3556 int death
[N_REG_CLASSES
];
3558 point
= model_index (insn
->insn
);
3559 if (sched_verbose
>= 2)
3563 fprintf (sched_dump
, "\n;;\tModel schedule:\n;;\n");
3564 fprintf (sched_dump
, ";;\t| idx insn | mpri hght dpth prio |\n");
3566 fprintf (sched_dump
, ";;\t| %3d %4d | %4d %4d %4d %4d | %-30s ",
3567 point
, INSN_UID (insn
->insn
), insn
->model_priority
,
3568 insn
->depth
+ insn
->alap
, insn
->depth
,
3569 INSN_PRIORITY (insn
->insn
),
3570 str_pattern_slim (PATTERN (insn
->insn
)));
3572 calculate_reg_deaths (insn
->insn
, death
);
3573 reg_pressure
= INSN_REG_PRESSURE (insn
->insn
);
3574 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
3576 cl
= ira_pressure_classes
[pci
];
3577 delta
= reg_pressure
[pci
].set_increase
- death
[cl
];
3578 if (sched_verbose
>= 2)
3579 fprintf (sched_dump
, " %s:[%d,%+d]", reg_class_names
[cl
],
3580 curr_reg_pressure
[cl
], delta
);
3581 model_record_pressure (&model_before_pressure
, point
, pci
,
3582 curr_reg_pressure
[cl
]);
3584 if (sched_verbose
>= 2)
3585 fprintf (sched_dump
, "\n");
3588 /* All instructions have been added to the model schedule. Record the
3589 final register pressure in GROUP and set up all MODEL_MAX_PRESSUREs. */
3592 model_record_final_pressures (struct model_pressure_group
*group
)
3594 int point
, pci
, max_pressure
, ref_pressure
, cl
;
3596 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
3598 /* Record the final pressure for this class. */
3599 cl
= ira_pressure_classes
[pci
];
3600 point
= model_num_insns
;
3601 ref_pressure
= curr_reg_pressure
[cl
];
3602 model_record_pressure (group
, point
, pci
, ref_pressure
);
3604 /* Record the original maximum pressure. */
3605 group
->limits
[pci
].orig_pressure
= group
->limits
[pci
].pressure
;
3607 /* Update the MODEL_MAX_PRESSURE for every point of the schedule. */
3608 max_pressure
= ref_pressure
;
3609 MODEL_MAX_PRESSURE (group
, point
, pci
) = max_pressure
;
3613 ref_pressure
= MODEL_REF_PRESSURE (group
, point
, pci
);
3614 max_pressure
= MAX (max_pressure
, ref_pressure
);
3615 MODEL_MAX_PRESSURE (group
, point
, pci
) = max_pressure
;
3620 /* Update all successors of INSN, given that INSN has just been scheduled. */
3623 model_add_successors_to_worklist (struct model_insn_info
*insn
)
3625 sd_iterator_def sd_it
;
3626 struct model_insn_info
*con
;
3629 FOR_EACH_DEP (insn
->insn
, SD_LIST_FORW
, sd_it
, dep
)
3631 con
= MODEL_INSN_INFO (DEP_CON (dep
));
3632 /* Ignore debug instructions, and instructions from other blocks. */
3635 con
->unscheduled_preds
--;
3637 /* Update the depth field of each true-dependent successor.
3638 Increasing the depth gives them a higher priority than
3640 if (DEP_TYPE (dep
) == REG_DEP_TRUE
&& con
->depth
< insn
->depth
+ 1)
3642 con
->depth
= insn
->depth
+ 1;
3643 if (QUEUE_INDEX (con
->insn
) == QUEUE_READY
)
3644 model_promote_insn (con
);
3647 /* If this is a true dependency, or if there are no remaining
3648 dependencies for CON (meaning that CON only had non-true
3649 dependencies), make sure that CON is on the worklist.
3650 We don't bother otherwise because it would tend to fill the
3651 worklist with a lot of low-priority instructions that are not
3652 yet ready to issue. */
3653 if ((con
->depth
> 0 || con
->unscheduled_preds
== 0)
3654 && QUEUE_INDEX (con
->insn
) == QUEUE_NOWHERE
)
3655 model_add_to_worklist (con
, insn
, insn
->next
);
3660 /* Give INSN a higher priority than any current instruction, then give
3661 unscheduled predecessors of INSN a higher priority still. If any of
3662 those predecessors are not on the model worklist, do the same for its
3663 predecessors, and so on. */
3666 model_promote_predecessors (struct model_insn_info
*insn
)
3668 struct model_insn_info
*pro
, *first
;
3669 sd_iterator_def sd_it
;
3672 if (sched_verbose
>= 7)
3673 fprintf (sched_dump
, ";;\t+--- priority of %d = %d, priority of",
3674 INSN_UID (insn
->insn
), model_next_priority
);
3675 insn
->model_priority
= model_next_priority
++;
3676 model_remove_from_worklist (insn
);
3677 model_add_to_worklist_at (insn
, NULL
);
3682 FOR_EACH_DEP (insn
->insn
, SD_LIST_HARD_BACK
, sd_it
, dep
)
3684 pro
= MODEL_INSN_INFO (DEP_PRO (dep
));
3685 /* The first test is to ignore debug instructions, and instructions
3686 from other blocks. */
3688 && pro
->model_priority
!= model_next_priority
3689 && QUEUE_INDEX (pro
->insn
) != QUEUE_SCHEDULED
)
3691 pro
->model_priority
= model_next_priority
;
3692 if (sched_verbose
>= 7)
3693 fprintf (sched_dump
, " %d", INSN_UID (pro
->insn
));
3694 if (QUEUE_INDEX (pro
->insn
) == QUEUE_READY
)
3696 /* PRO is already in the worklist, but it now has
3697 a higher priority than before. Move it at the
3698 appropriate place. */
3699 model_remove_from_worklist (pro
);
3700 model_add_to_worklist (pro
, NULL
, model_worklist
);
3704 /* PRO isn't in the worklist. Recursively process
3705 its predecessors until we find one that is. */
3716 if (sched_verbose
>= 7)
3717 fprintf (sched_dump
, " = %d\n", model_next_priority
);
3718 model_next_priority
++;
3721 /* Pick one instruction from model_worklist and process it. */
3724 model_choose_insn (void)
3726 struct model_insn_info
*insn
, *fallback
;
3729 if (sched_verbose
>= 7)
3731 fprintf (sched_dump
, ";;\t+--- worklist:\n");
3732 insn
= model_worklist
;
3733 count
= MAX_SCHED_READY_INSNS
;
3734 while (count
> 0 && insn
)
3736 fprintf (sched_dump
, ";;\t+--- %d [%d, %d, %d, %d]\n",
3737 INSN_UID (insn
->insn
), insn
->model_priority
,
3738 insn
->depth
+ insn
->alap
, insn
->depth
,
3739 INSN_PRIORITY (insn
->insn
));
3745 /* Look for a ready instruction whose model_classify_priority is zero
3746 or negative, picking the highest-priority one. Adding such an
3747 instruction to the schedule now should do no harm, and may actually
3750 Failing that, see whether there is an instruction with the highest
3751 extant model_priority that is not yet ready, but which would reduce
3752 pressure if it became ready. This is designed to catch cases like:
3754 (set (mem (reg R1)) (reg R2))
3756 where the instruction is the last remaining use of R1 and where the
3757 value of R2 is not yet available (or vice versa). The death of R1
3758 means that this instruction already reduces pressure. It is of
3759 course possible that the computation of R2 involves other registers
3760 that are hard to kill, but such cases are rare enough for this
3761 heuristic to be a win in general.
3763 Failing that, just pick the highest-priority instruction in the
3765 count
= MAX_SCHED_READY_INSNS
;
3766 insn
= model_worklist
;
3770 if (count
== 0 || !insn
)
3772 insn
= fallback
? fallback
: model_worklist
;
3775 if (insn
->unscheduled_preds
)
3777 if (model_worklist
->model_priority
== insn
->model_priority
3779 && model_classify_pressure (insn
) < 0)
3784 if (model_classify_pressure (insn
) <= 0)
3791 if (sched_verbose
>= 7 && insn
!= model_worklist
)
3793 if (insn
->unscheduled_preds
)
3794 fprintf (sched_dump
, ";;\t+--- promoting insn %d, with dependencies\n",
3795 INSN_UID (insn
->insn
));
3797 fprintf (sched_dump
, ";;\t+--- promoting insn %d, which is ready\n",
3798 INSN_UID (insn
->insn
));
3800 if (insn
->unscheduled_preds
)
3801 /* INSN isn't yet ready to issue. Give all its predecessors the
3802 highest priority. */
3803 model_promote_predecessors (insn
);
3806 /* INSN is ready. Add it to the end of model_schedule and
3807 process its successors. */
3808 model_add_successors_to_worklist (insn
);
3809 model_remove_from_worklist (insn
);
3810 model_add_to_schedule (insn
->insn
);
3811 model_record_pressures (insn
);
3812 update_register_pressure (insn
->insn
);
3816 /* Restore all QUEUE_INDEXs to the values that they had before
3817 model_start_schedule was called. */
3820 model_reset_queue_indices (void)
3825 FOR_EACH_VEC_ELT (model_schedule
, i
, insn
)
3826 QUEUE_INDEX (insn
) = MODEL_INSN_INFO (insn
)->old_queue
;
3829 /* We have calculated the model schedule and spill costs. Print a summary
3833 model_dump_pressure_summary (void)
3837 fprintf (sched_dump
, ";; Pressure summary:");
3838 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
3840 cl
= ira_pressure_classes
[pci
];
3841 fprintf (sched_dump
, " %s:%d", reg_class_names
[cl
],
3842 model_before_pressure
.limits
[pci
].pressure
);
3844 fprintf (sched_dump
, "\n\n");
3847 /* Initialize the SCHED_PRESSURE_MODEL information for the current
3848 scheduling region. */
3851 model_start_schedule (basic_block bb
)
3853 model_next_priority
= 1;
3854 model_schedule
.create (sched_max_luid
);
3855 model_insns
= XCNEWVEC (struct model_insn_info
, sched_max_luid
);
3857 gcc_assert (bb
== BLOCK_FOR_INSN (NEXT_INSN (current_sched_info
->prev_head
)));
3858 initiate_reg_pressure_info (df_get_live_in (bb
));
3860 model_analyze_insns ();
3861 model_init_pressure_group (&model_before_pressure
);
3862 while (model_worklist
)
3863 model_choose_insn ();
3864 gcc_assert (model_num_insns
== (int) model_schedule
.length ());
3865 if (sched_verbose
>= 2)
3866 fprintf (sched_dump
, "\n");
3868 model_record_final_pressures (&model_before_pressure
);
3869 model_reset_queue_indices ();
3871 XDELETEVEC (model_insns
);
3873 model_curr_point
= 0;
3874 initiate_reg_pressure_info (df_get_live_in (bb
));
3875 if (sched_verbose
>= 1)
3876 model_dump_pressure_summary ();
3879 /* Free the information associated with GROUP. */
3882 model_finalize_pressure_group (struct model_pressure_group
*group
)
3884 XDELETEVEC (group
->model
);
3887 /* Free the information created by model_start_schedule. */
3890 model_end_schedule (void)
3892 model_finalize_pressure_group (&model_before_pressure
);
3893 model_schedule
.release ();
3896 /* Prepare reg pressure scheduling for basic block BB. */
3898 sched_pressure_start_bb (basic_block bb
)
3900 /* Set the number of available registers for each class taking into account
3901 relative probability of current basic block versus function prologue and
3903 * If the basic block executes much more often than the prologue/epilogue
3904 (e.g., inside a hot loop), then cost of spill in the prologue is close to
3905 nil, so the effective number of available registers is
3906 (ira_class_hard_regs_num[cl] - fixed_regs_num[cl] - 0).
3907 * If the basic block executes as often as the prologue/epilogue,
3908 then spill in the block is as costly as in the prologue, so the effective
3909 number of available registers is
3910 (ira_class_hard_regs_num[cl] - fixed_regs_num[cl]
3911 - call_saved_regs_num[cl]).
3912 Note that all-else-equal, we prefer to spill in the prologue, since that
3913 allows "extra" registers for other basic blocks of the function.
3914 * If the basic block is on the cold path of the function and executes
3915 rarely, then we should always prefer to spill in the block, rather than
3916 in the prologue/epilogue. The effective number of available register is
3917 (ira_class_hard_regs_num[cl] - fixed_regs_num[cl]
3918 - call_saved_regs_num[cl]). */
3921 int entry_freq
= ENTRY_BLOCK_PTR_FOR_FN (cfun
)->frequency
;
3922 int bb_freq
= bb
->frequency
;
3926 if (entry_freq
== 0)
3927 entry_freq
= bb_freq
= 1;
3929 if (bb_freq
< entry_freq
)
3930 bb_freq
= entry_freq
;
3932 for (i
= 0; i
< ira_pressure_classes_num
; ++i
)
3934 enum reg_class cl
= ira_pressure_classes
[i
];
3935 sched_class_regs_num
[cl
] = ira_class_hard_regs_num
[cl
]
3936 - fixed_regs_num
[cl
];
3937 sched_class_regs_num
[cl
]
3938 -= (call_saved_regs_num
[cl
] * entry_freq
) / bb_freq
;
3942 if (sched_pressure
== SCHED_PRESSURE_MODEL
)
3943 model_start_schedule (bb
);
3946 /* A structure that holds local state for the loop in schedule_block. */
3947 struct sched_block_state
3949 /* True if no real insns have been scheduled in the current cycle. */
3950 bool first_cycle_insn_p
;
3951 /* True if a shadow insn has been scheduled in the current cycle, which
3952 means that no more normal insns can be issued. */
3953 bool shadows_only_p
;
3954 /* True if we're winding down a modulo schedule, which means that we only
3955 issue insns with INSN_EXACT_TICK set. */
3956 bool modulo_epilogue
;
3957 /* Initialized with the machine's issue rate every cycle, and updated
3958 by calls to the variable_issue hook. */
3962 /* INSN is the "currently executing insn". Launch each insn which was
3963 waiting on INSN. READY is the ready list which contains the insns
3964 that are ready to fire. CLOCK is the current cycle. The function
3965 returns necessary cycle advance after issuing the insn (it is not
3966 zero for insns in a schedule group). */
3969 schedule_insn (rtx_insn
*insn
)
3971 sd_iterator_def sd_it
;
3976 if (sched_verbose
>= 1)
3978 struct reg_pressure_data
*pressure_info
;
3979 fprintf (sched_dump
, ";;\t%3i--> %s %-40s:",
3980 clock_var
, (*current_sched_info
->print_insn
) (insn
, 1),
3981 str_pattern_slim (PATTERN (insn
)));
3983 if (recog_memoized (insn
) < 0)
3984 fprintf (sched_dump
, "nothing");
3986 print_reservation (sched_dump
, insn
);
3987 pressure_info
= INSN_REG_PRESSURE (insn
);
3988 if (pressure_info
!= NULL
)
3990 fputc (':', sched_dump
);
3991 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
3992 fprintf (sched_dump
, "%s%s%+d(%d)",
3993 scheduled_insns
.length () > 1
3995 < INSN_LUID (scheduled_insns
[scheduled_insns
.length () - 2]) ? "@" : "",
3996 reg_class_names
[ira_pressure_classes
[i
]],
3997 pressure_info
[i
].set_increase
, pressure_info
[i
].change
);
3999 if (sched_pressure
== SCHED_PRESSURE_MODEL
4000 && model_curr_point
< model_num_insns
4001 && model_index (insn
) == model_curr_point
)
4002 fprintf (sched_dump
, ":model %d", model_curr_point
);
4003 fputc ('\n', sched_dump
);
4006 if (sched_pressure
== SCHED_PRESSURE_WEIGHTED
&& !DEBUG_INSN_P (insn
))
4007 update_reg_and_insn_max_reg_pressure (insn
);
4009 /* Scheduling instruction should have all its dependencies resolved and
4010 should have been removed from the ready list. */
4011 gcc_assert (sd_lists_empty_p (insn
, SD_LIST_HARD_BACK
));
4013 /* Reset debug insns invalidated by moving this insn. */
4014 if (MAY_HAVE_DEBUG_INSNS
&& !DEBUG_INSN_P (insn
))
4015 for (sd_it
= sd_iterator_start (insn
, SD_LIST_BACK
);
4016 sd_iterator_cond (&sd_it
, &dep
);)
4018 rtx_insn
*dbg
= DEP_PRO (dep
);
4019 struct reg_use_data
*use
, *next
;
4021 if (DEP_STATUS (dep
) & DEP_CANCELLED
)
4023 sd_iterator_next (&sd_it
);
4027 gcc_assert (DEBUG_INSN_P (dbg
));
4029 if (sched_verbose
>= 6)
4030 fprintf (sched_dump
, ";;\t\tresetting: debug insn %d\n",
4033 /* ??? Rather than resetting the debug insn, we might be able
4034 to emit a debug temp before the just-scheduled insn, but
4035 this would involve checking that the expression at the
4036 point of the debug insn is equivalent to the expression
4037 before the just-scheduled insn. They might not be: the
4038 expression in the debug insn may depend on other insns not
4039 yet scheduled that set MEMs, REGs or even other debug
4040 insns. It's not clear that attempting to preserve debug
4041 information in these cases is worth the effort, given how
4042 uncommon these resets are and the likelihood that the debug
4043 temps introduced won't survive the schedule change. */
4044 INSN_VAR_LOCATION_LOC (dbg
) = gen_rtx_UNKNOWN_VAR_LOC ();
4045 df_insn_rescan (dbg
);
4047 /* Unknown location doesn't use any registers. */
4048 for (use
= INSN_REG_USE_LIST (dbg
); use
!= NULL
; use
= next
)
4050 struct reg_use_data
*prev
= use
;
4052 /* Remove use from the cyclic next_regno_use chain first. */
4053 while (prev
->next_regno_use
!= use
)
4054 prev
= prev
->next_regno_use
;
4055 prev
->next_regno_use
= use
->next_regno_use
;
4056 next
= use
->next_insn_use
;
4059 INSN_REG_USE_LIST (dbg
) = NULL
;
4061 /* We delete rather than resolve these deps, otherwise we
4062 crash in sched_free_deps(), because forward deps are
4063 expected to be released before backward deps. */
4064 sd_delete_dep (sd_it
);
4067 gcc_assert (QUEUE_INDEX (insn
) == QUEUE_NOWHERE
);
4068 QUEUE_INDEX (insn
) = QUEUE_SCHEDULED
;
4070 if (sched_pressure
== SCHED_PRESSURE_MODEL
4071 && model_curr_point
< model_num_insns
4072 && NONDEBUG_INSN_P (insn
))
4074 if (model_index (insn
) == model_curr_point
)
4077 while (model_curr_point
< model_num_insns
4078 && (QUEUE_INDEX (MODEL_INSN (model_curr_point
))
4079 == QUEUE_SCHEDULED
));
4081 model_recompute (insn
);
4082 model_update_limit_points ();
4083 update_register_pressure (insn
);
4084 if (sched_verbose
>= 2)
4085 print_curr_reg_pressure ();
4088 gcc_assert (INSN_TICK (insn
) >= MIN_TICK
);
4089 if (INSN_TICK (insn
) > clock_var
)
4090 /* INSN has been prematurely moved from the queue to the ready list.
4091 This is possible only if following flags are set. */
4092 gcc_assert (flag_sched_stalled_insns
|| sched_fusion
);
4094 /* ??? Probably, if INSN is scheduled prematurely, we should leave
4095 INSN_TICK untouched. This is a machine-dependent issue, actually. */
4096 INSN_TICK (insn
) = clock_var
;
4098 check_clobbered_conditions (insn
);
4100 /* Update dependent instructions. First, see if by scheduling this insn
4101 now we broke a dependence in a way that requires us to change another
4103 for (sd_it
= sd_iterator_start (insn
, SD_LIST_SPEC_BACK
);
4104 sd_iterator_cond (&sd_it
, &dep
); sd_iterator_next (&sd_it
))
4106 struct dep_replacement
*desc
= DEP_REPLACE (dep
);
4107 rtx_insn
*pro
= DEP_PRO (dep
);
4108 if (QUEUE_INDEX (pro
) != QUEUE_SCHEDULED
4109 && desc
!= NULL
&& desc
->insn
== pro
)
4110 apply_replacement (dep
, false);
4113 /* Go through and resolve forward dependencies. */
4114 for (sd_it
= sd_iterator_start (insn
, SD_LIST_FORW
);
4115 sd_iterator_cond (&sd_it
, &dep
);)
4117 rtx_insn
*next
= DEP_CON (dep
);
4118 bool cancelled
= (DEP_STATUS (dep
) & DEP_CANCELLED
) != 0;
4120 /* Resolve the dependence between INSN and NEXT.
4121 sd_resolve_dep () moves current dep to another list thus
4122 advancing the iterator. */
4123 sd_resolve_dep (sd_it
);
4127 if (must_restore_pattern_p (next
, dep
))
4128 restore_pattern (dep
, false);
4132 /* Don't bother trying to mark next as ready if insn is a debug
4133 insn. If insn is the last hard dependency, it will have
4134 already been discounted. */
4135 if (DEBUG_INSN_P (insn
) && !DEBUG_INSN_P (next
))
4138 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn
))
4142 effective_cost
= try_ready (next
);
4144 if (effective_cost
>= 0
4145 && SCHED_GROUP_P (next
)
4146 && advance
< effective_cost
)
4147 advance
= effective_cost
;
4150 /* Check always has only one forward dependence (to the first insn in
4151 the recovery block), therefore, this will be executed only once. */
4153 gcc_assert (sd_lists_empty_p (insn
, SD_LIST_FORW
));
4154 fix_recovery_deps (RECOVERY_BLOCK (insn
));
4158 /* Annotate the instruction with issue information -- TImode
4159 indicates that the instruction is expected not to be able
4160 to issue on the same cycle as the previous insn. A machine
4161 may use this information to decide how the instruction should
4164 && GET_CODE (PATTERN (insn
)) != USE
4165 && GET_CODE (PATTERN (insn
)) != CLOBBER
4166 && !DEBUG_INSN_P (insn
))
4168 if (reload_completed
)
4169 PUT_MODE (insn
, clock_var
> last_clock_var
? TImode
: VOIDmode
);
4170 last_clock_var
= clock_var
;
4173 if (nonscheduled_insns_begin
!= NULL_RTX
)
4174 /* Indicate to debug counters that INSN is scheduled. */
4175 nonscheduled_insns_begin
= insn
;
4180 /* Functions for handling of notes. */
4182 /* Add note list that ends on FROM_END to the end of TO_ENDP. */
4184 concat_note_lists (rtx_insn
*from_end
, rtx_insn
**to_endp
)
4186 rtx_insn
*from_start
;
4188 /* It's easy when have nothing to concat. */
4189 if (from_end
== NULL
)
4192 /* It's also easy when destination is empty. */
4193 if (*to_endp
== NULL
)
4195 *to_endp
= from_end
;
4199 from_start
= from_end
;
4200 while (PREV_INSN (from_start
) != NULL
)
4201 from_start
= PREV_INSN (from_start
);
4203 SET_PREV_INSN (from_start
) = *to_endp
;
4204 SET_NEXT_INSN (*to_endp
) = from_start
;
4205 *to_endp
= from_end
;
4208 /* Delete notes between HEAD and TAIL and put them in the chain
4209 of notes ended by NOTE_LIST. */
4211 remove_notes (rtx_insn
*head
, rtx_insn
*tail
)
4213 rtx_insn
*next_tail
, *insn
, *next
;
4216 if (head
== tail
&& !INSN_P (head
))
4219 next_tail
= NEXT_INSN (tail
);
4220 for (insn
= head
; insn
!= next_tail
; insn
= next
)
4222 next
= NEXT_INSN (insn
);
4226 switch (NOTE_KIND (insn
))
4228 case NOTE_INSN_BASIC_BLOCK
:
4231 case NOTE_INSN_EPILOGUE_BEG
:
4235 add_reg_note (next
, REG_SAVE_NOTE
,
4236 GEN_INT (NOTE_INSN_EPILOGUE_BEG
));
4244 /* Add the note to list that ends at NOTE_LIST. */
4245 SET_PREV_INSN (insn
) = note_list
;
4246 SET_NEXT_INSN (insn
) = NULL_RTX
;
4248 SET_NEXT_INSN (note_list
) = insn
;
4253 gcc_assert ((sel_sched_p () || insn
!= tail
) && insn
!= head
);
4257 /* A structure to record enough data to allow us to backtrack the scheduler to
4258 a previous state. */
4259 struct haifa_saved_data
4261 /* Next entry on the list. */
4262 struct haifa_saved_data
*next
;
4264 /* Backtracking is associated with scheduling insns that have delay slots.
4265 DELAY_PAIR points to the structure that contains the insns involved, and
4266 the number of cycles between them. */
4267 struct delay_pair
*delay_pair
;
4269 /* Data used by the frontend (e.g. sched-ebb or sched-rgn). */
4270 void *fe_saved_data
;
4271 /* Data used by the backend. */
4272 void *be_saved_data
;
4274 /* Copies of global state. */
4275 int clock_var
, last_clock_var
;
4276 struct ready_list ready
;
4279 rtx_insn
*last_scheduled_insn
;
4280 rtx_insn
*last_nondebug_scheduled_insn
;
4281 rtx_insn
*nonscheduled_insns_begin
;
4282 int cycle_issued_insns
;
4284 /* Copies of state used in the inner loop of schedule_block. */
4285 struct sched_block_state sched_block
;
4287 /* We don't need to save q_ptr, as its value is arbitrary and we can set it
4288 to 0 when restoring. */
4290 rtx_insn_list
**insn_queue
;
4292 /* Describe pattern replacements that occurred since this backtrack point
4294 vec
<dep_t
> replacement_deps
;
4295 vec
<int> replace_apply
;
4297 /* A copy of the next-cycle replacement vectors at the time of the backtrack
4299 vec
<dep_t
> next_cycle_deps
;
4300 vec
<int> next_cycle_apply
;
4303 /* A record, in reverse order, of all scheduled insns which have delay slots
4304 and may require backtracking. */
4305 static struct haifa_saved_data
*backtrack_queue
;
4307 /* For every dependency of INSN, set the FEEDS_BACKTRACK_INSN bit according
4310 mark_backtrack_feeds (rtx_insn
*insn
, int set_p
)
4312 sd_iterator_def sd_it
;
4314 FOR_EACH_DEP (insn
, SD_LIST_HARD_BACK
, sd_it
, dep
)
4316 FEEDS_BACKTRACK_INSN (DEP_PRO (dep
)) = set_p
;
4320 /* Save the current scheduler state so that we can backtrack to it
4321 later if necessary. PAIR gives the insns that make it necessary to
4322 save this point. SCHED_BLOCK is the local state of schedule_block
4323 that need to be saved. */
4325 save_backtrack_point (struct delay_pair
*pair
,
4326 struct sched_block_state sched_block
)
4329 struct haifa_saved_data
*save
= XNEW (struct haifa_saved_data
);
4331 save
->curr_state
= xmalloc (dfa_state_size
);
4332 memcpy (save
->curr_state
, curr_state
, dfa_state_size
);
4334 save
->ready
.first
= ready
.first
;
4335 save
->ready
.n_ready
= ready
.n_ready
;
4336 save
->ready
.n_debug
= ready
.n_debug
;
4337 save
->ready
.veclen
= ready
.veclen
;
4338 save
->ready
.vec
= XNEWVEC (rtx_insn
*, ready
.veclen
);
4339 memcpy (save
->ready
.vec
, ready
.vec
, ready
.veclen
* sizeof (rtx
));
4341 save
->insn_queue
= XNEWVEC (rtx_insn_list
*, max_insn_queue_index
+ 1);
4342 save
->q_size
= q_size
;
4343 for (i
= 0; i
<= max_insn_queue_index
; i
++)
4345 int q
= NEXT_Q_AFTER (q_ptr
, i
);
4346 save
->insn_queue
[i
] = copy_INSN_LIST (insn_queue
[q
]);
4349 save
->clock_var
= clock_var
;
4350 save
->last_clock_var
= last_clock_var
;
4351 save
->cycle_issued_insns
= cycle_issued_insns
;
4352 save
->last_scheduled_insn
= last_scheduled_insn
;
4353 save
->last_nondebug_scheduled_insn
= last_nondebug_scheduled_insn
;
4354 save
->nonscheduled_insns_begin
= nonscheduled_insns_begin
;
4356 save
->sched_block
= sched_block
;
4358 save
->replacement_deps
.create (0);
4359 save
->replace_apply
.create (0);
4360 save
->next_cycle_deps
= next_cycle_replace_deps
.copy ();
4361 save
->next_cycle_apply
= next_cycle_apply
.copy ();
4363 if (current_sched_info
->save_state
)
4364 save
->fe_saved_data
= (*current_sched_info
->save_state
) ();
4366 if (targetm
.sched
.alloc_sched_context
)
4368 save
->be_saved_data
= targetm
.sched
.alloc_sched_context ();
4369 targetm
.sched
.init_sched_context (save
->be_saved_data
, false);
4372 save
->be_saved_data
= NULL
;
4374 save
->delay_pair
= pair
;
4376 save
->next
= backtrack_queue
;
4377 backtrack_queue
= save
;
4381 mark_backtrack_feeds (pair
->i2
, 1);
4382 INSN_TICK (pair
->i2
) = INVALID_TICK
;
4383 INSN_EXACT_TICK (pair
->i2
) = clock_var
+ pair_delay (pair
);
4384 SHADOW_P (pair
->i2
) = pair
->stages
== 0;
4385 pair
= pair
->next_same_i1
;
4389 /* Walk the ready list and all queues. If any insns have unresolved backwards
4390 dependencies, these must be cancelled deps, broken by predication. Set or
4391 clear (depending on SET) the DEP_CANCELLED bit in DEP_STATUS. */
4394 toggle_cancelled_flags (bool set
)
4397 sd_iterator_def sd_it
;
4400 if (ready
.n_ready
> 0)
4402 rtx_insn
**first
= ready_lastpos (&ready
);
4403 for (i
= 0; i
< ready
.n_ready
; i
++)
4404 FOR_EACH_DEP (first
[i
], SD_LIST_BACK
, sd_it
, dep
)
4405 if (!DEBUG_INSN_P (DEP_PRO (dep
)))
4408 DEP_STATUS (dep
) |= DEP_CANCELLED
;
4410 DEP_STATUS (dep
) &= ~DEP_CANCELLED
;
4413 for (i
= 0; i
<= max_insn_queue_index
; i
++)
4415 int q
= NEXT_Q_AFTER (q_ptr
, i
);
4416 rtx_insn_list
*link
;
4417 for (link
= insn_queue
[q
]; link
; link
= link
->next ())
4419 rtx_insn
*insn
= link
->insn ();
4420 FOR_EACH_DEP (insn
, SD_LIST_BACK
, sd_it
, dep
)
4421 if (!DEBUG_INSN_P (DEP_PRO (dep
)))
4424 DEP_STATUS (dep
) |= DEP_CANCELLED
;
4426 DEP_STATUS (dep
) &= ~DEP_CANCELLED
;
4432 /* Undo the replacements that have occurred after backtrack point SAVE
4435 undo_replacements_for_backtrack (struct haifa_saved_data
*save
)
4437 while (!save
->replacement_deps
.is_empty ())
4439 dep_t dep
= save
->replacement_deps
.pop ();
4440 int apply_p
= save
->replace_apply
.pop ();
4443 restore_pattern (dep
, true);
4445 apply_replacement (dep
, true);
4447 save
->replacement_deps
.release ();
4448 save
->replace_apply
.release ();
4451 /* Pop entries from the SCHEDULED_INSNS vector up to and including INSN.
4452 Restore their dependencies to an unresolved state, and mark them as
4456 unschedule_insns_until (rtx_insn
*insn
)
4458 auto_vec
<rtx_insn
*> recompute_vec
;
4460 /* Make two passes over the insns to be unscheduled. First, we clear out
4461 dependencies and other trivial bookkeeping. */
4465 sd_iterator_def sd_it
;
4468 last
= scheduled_insns
.pop ();
4470 /* This will be changed by restore_backtrack_point if the insn is in
4472 QUEUE_INDEX (last
) = QUEUE_NOWHERE
;
4474 INSN_TICK (last
) = INVALID_TICK
;
4476 if (modulo_ii
> 0 && INSN_UID (last
) < modulo_iter0_max_uid
)
4477 modulo_insns_scheduled
--;
4479 for (sd_it
= sd_iterator_start (last
, SD_LIST_RES_FORW
);
4480 sd_iterator_cond (&sd_it
, &dep
);)
4482 rtx_insn
*con
= DEP_CON (dep
);
4483 sd_unresolve_dep (sd_it
);
4484 if (!MUST_RECOMPUTE_SPEC_P (con
))
4486 MUST_RECOMPUTE_SPEC_P (con
) = 1;
4487 recompute_vec
.safe_push (con
);
4495 /* A second pass, to update ready and speculation status for insns
4496 depending on the unscheduled ones. The first pass must have
4497 popped the scheduled_insns vector up to the point where we
4498 restart scheduling, as recompute_todo_spec requires it to be
4500 while (!recompute_vec
.is_empty ())
4504 con
= recompute_vec
.pop ();
4505 MUST_RECOMPUTE_SPEC_P (con
) = 0;
4506 if (!sd_lists_empty_p (con
, SD_LIST_HARD_BACK
))
4508 TODO_SPEC (con
) = HARD_DEP
;
4509 INSN_TICK (con
) = INVALID_TICK
;
4510 if (PREDICATED_PAT (con
) != NULL_RTX
)
4511 haifa_change_pattern (con
, ORIG_PAT (con
));
4513 else if (QUEUE_INDEX (con
) != QUEUE_SCHEDULED
)
4514 TODO_SPEC (con
) = recompute_todo_spec (con
, true);
4518 /* Restore scheduler state from the topmost entry on the backtracking queue.
4519 PSCHED_BLOCK_P points to the local data of schedule_block that we must
4520 overwrite with the saved data.
4521 The caller must already have called unschedule_insns_until. */
4524 restore_last_backtrack_point (struct sched_block_state
*psched_block
)
4527 struct haifa_saved_data
*save
= backtrack_queue
;
4529 backtrack_queue
= save
->next
;
4531 if (current_sched_info
->restore_state
)
4532 (*current_sched_info
->restore_state
) (save
->fe_saved_data
);
4534 if (targetm
.sched
.alloc_sched_context
)
4536 targetm
.sched
.set_sched_context (save
->be_saved_data
);
4537 targetm
.sched
.free_sched_context (save
->be_saved_data
);
4540 /* Do this first since it clobbers INSN_TICK of the involved
4542 undo_replacements_for_backtrack (save
);
4544 /* Clear the QUEUE_INDEX of everything in the ready list or one
4546 if (ready
.n_ready
> 0)
4548 rtx_insn
**first
= ready_lastpos (&ready
);
4549 for (i
= 0; i
< ready
.n_ready
; i
++)
4551 rtx_insn
*insn
= first
[i
];
4552 QUEUE_INDEX (insn
) = QUEUE_NOWHERE
;
4553 INSN_TICK (insn
) = INVALID_TICK
;
4556 for (i
= 0; i
<= max_insn_queue_index
; i
++)
4558 int q
= NEXT_Q_AFTER (q_ptr
, i
);
4560 for (rtx_insn_list
*link
= insn_queue
[q
]; link
; link
= link
->next ())
4562 rtx_insn
*x
= link
->insn ();
4563 QUEUE_INDEX (x
) = QUEUE_NOWHERE
;
4564 INSN_TICK (x
) = INVALID_TICK
;
4566 free_INSN_LIST_list (&insn_queue
[q
]);
4570 ready
= save
->ready
;
4572 if (ready
.n_ready
> 0)
4574 rtx_insn
**first
= ready_lastpos (&ready
);
4575 for (i
= 0; i
< ready
.n_ready
; i
++)
4577 rtx_insn
*insn
= first
[i
];
4578 QUEUE_INDEX (insn
) = QUEUE_READY
;
4579 TODO_SPEC (insn
) = recompute_todo_spec (insn
, true);
4580 INSN_TICK (insn
) = save
->clock_var
;
4585 q_size
= save
->q_size
;
4586 for (i
= 0; i
<= max_insn_queue_index
; i
++)
4588 int q
= NEXT_Q_AFTER (q_ptr
, i
);
4590 insn_queue
[q
] = save
->insn_queue
[q
];
4592 for (rtx_insn_list
*link
= insn_queue
[q
]; link
; link
= link
->next ())
4594 rtx_insn
*x
= link
->insn ();
4595 QUEUE_INDEX (x
) = i
;
4596 TODO_SPEC (x
) = recompute_todo_spec (x
, true);
4597 INSN_TICK (x
) = save
->clock_var
+ i
;
4600 free (save
->insn_queue
);
4602 toggle_cancelled_flags (true);
4604 clock_var
= save
->clock_var
;
4605 last_clock_var
= save
->last_clock_var
;
4606 cycle_issued_insns
= save
->cycle_issued_insns
;
4607 last_scheduled_insn
= save
->last_scheduled_insn
;
4608 last_nondebug_scheduled_insn
= save
->last_nondebug_scheduled_insn
;
4609 nonscheduled_insns_begin
= save
->nonscheduled_insns_begin
;
4611 *psched_block
= save
->sched_block
;
4613 memcpy (curr_state
, save
->curr_state
, dfa_state_size
);
4614 free (save
->curr_state
);
4616 mark_backtrack_feeds (save
->delay_pair
->i2
, 0);
4618 gcc_assert (next_cycle_replace_deps
.is_empty ());
4619 next_cycle_replace_deps
= save
->next_cycle_deps
.copy ();
4620 next_cycle_apply
= save
->next_cycle_apply
.copy ();
4624 for (save
= backtrack_queue
; save
; save
= save
->next
)
4626 mark_backtrack_feeds (save
->delay_pair
->i2
, 1);
4630 /* Discard all data associated with the topmost entry in the backtrack
4631 queue. If RESET_TICK is false, we just want to free the data. If true,
4632 we are doing this because we discovered a reason to backtrack. In the
4633 latter case, also reset the INSN_TICK for the shadow insn. */
4635 free_topmost_backtrack_point (bool reset_tick
)
4637 struct haifa_saved_data
*save
= backtrack_queue
;
4640 backtrack_queue
= save
->next
;
4644 struct delay_pair
*pair
= save
->delay_pair
;
4647 INSN_TICK (pair
->i2
) = INVALID_TICK
;
4648 INSN_EXACT_TICK (pair
->i2
) = INVALID_TICK
;
4649 pair
= pair
->next_same_i1
;
4651 undo_replacements_for_backtrack (save
);
4655 save
->replacement_deps
.release ();
4656 save
->replace_apply
.release ();
4659 if (targetm
.sched
.free_sched_context
)
4660 targetm
.sched
.free_sched_context (save
->be_saved_data
);
4661 if (current_sched_info
->restore_state
)
4662 free (save
->fe_saved_data
);
4663 for (i
= 0; i
<= max_insn_queue_index
; i
++)
4664 free_INSN_LIST_list (&save
->insn_queue
[i
]);
4665 free (save
->insn_queue
);
4666 free (save
->curr_state
);
4667 free (save
->ready
.vec
);
4671 /* Free the entire backtrack queue. */
4673 free_backtrack_queue (void)
4675 while (backtrack_queue
)
4676 free_topmost_backtrack_point (false);
4679 /* Apply a replacement described by DESC. If IMMEDIATELY is false, we
4680 may have to postpone the replacement until the start of the next cycle,
4681 at which point we will be called again with IMMEDIATELY true. This is
4682 only done for machines which have instruction packets with explicit
4683 parallelism however. */
4685 apply_replacement (dep_t dep
, bool immediately
)
4687 struct dep_replacement
*desc
= DEP_REPLACE (dep
);
4688 if (!immediately
&& targetm
.sched
.exposed_pipeline
&& reload_completed
)
4690 next_cycle_replace_deps
.safe_push (dep
);
4691 next_cycle_apply
.safe_push (1);
4697 if (QUEUE_INDEX (desc
->insn
) == QUEUE_SCHEDULED
)
4700 if (sched_verbose
>= 5)
4701 fprintf (sched_dump
, "applying replacement for insn %d\n",
4702 INSN_UID (desc
->insn
));
4704 success
= validate_change (desc
->insn
, desc
->loc
, desc
->newval
, 0);
4705 gcc_assert (success
);
4707 update_insn_after_change (desc
->insn
);
4708 if ((TODO_SPEC (desc
->insn
) & (HARD_DEP
| DEP_POSTPONED
)) == 0)
4709 fix_tick_ready (desc
->insn
);
4711 if (backtrack_queue
!= NULL
)
4713 backtrack_queue
->replacement_deps
.safe_push (dep
);
4714 backtrack_queue
->replace_apply
.safe_push (1);
4719 /* We have determined that a pattern involved in DEP must be restored.
4720 If IMMEDIATELY is false, we may have to postpone the replacement
4721 until the start of the next cycle, at which point we will be called
4722 again with IMMEDIATELY true. */
4724 restore_pattern (dep_t dep
, bool immediately
)
4726 rtx_insn
*next
= DEP_CON (dep
);
4727 int tick
= INSN_TICK (next
);
4729 /* If we already scheduled the insn, the modified version is
4731 if (QUEUE_INDEX (next
) == QUEUE_SCHEDULED
)
4734 if (!immediately
&& targetm
.sched
.exposed_pipeline
&& reload_completed
)
4736 next_cycle_replace_deps
.safe_push (dep
);
4737 next_cycle_apply
.safe_push (0);
4742 if (DEP_TYPE (dep
) == REG_DEP_CONTROL
)
4744 if (sched_verbose
>= 5)
4745 fprintf (sched_dump
, "restoring pattern for insn %d\n",
4747 haifa_change_pattern (next
, ORIG_PAT (next
));
4751 struct dep_replacement
*desc
= DEP_REPLACE (dep
);
4754 if (sched_verbose
>= 5)
4755 fprintf (sched_dump
, "restoring pattern for insn %d\n",
4756 INSN_UID (desc
->insn
));
4757 tick
= INSN_TICK (desc
->insn
);
4759 success
= validate_change (desc
->insn
, desc
->loc
, desc
->orig
, 0);
4760 gcc_assert (success
);
4761 update_insn_after_change (desc
->insn
);
4762 if (backtrack_queue
!= NULL
)
4764 backtrack_queue
->replacement_deps
.safe_push (dep
);
4765 backtrack_queue
->replace_apply
.safe_push (0);
4768 INSN_TICK (next
) = tick
;
4769 if (TODO_SPEC (next
) == DEP_POSTPONED
)
4772 if (sd_lists_empty_p (next
, SD_LIST_BACK
))
4773 TODO_SPEC (next
) = 0;
4774 else if (!sd_lists_empty_p (next
, SD_LIST_HARD_BACK
))
4775 TODO_SPEC (next
) = HARD_DEP
;
4778 /* Perform pattern replacements that were queued up until the next
4781 perform_replacements_new_cycle (void)
4785 FOR_EACH_VEC_ELT (next_cycle_replace_deps
, i
, dep
)
4787 int apply_p
= next_cycle_apply
[i
];
4789 apply_replacement (dep
, true);
4791 restore_pattern (dep
, true);
4793 next_cycle_replace_deps
.truncate (0);
4794 next_cycle_apply
.truncate (0);
4797 /* Compute INSN_TICK_ESTIMATE for INSN. PROCESSED is a bitmap of
4798 instructions we've previously encountered, a set bit prevents
4799 recursion. BUDGET is a limit on how far ahead we look, it is
4800 reduced on recursive calls. Return true if we produced a good
4801 estimate, or false if we exceeded the budget. */
4803 estimate_insn_tick (bitmap processed
, rtx_insn
*insn
, int budget
)
4805 sd_iterator_def sd_it
;
4807 int earliest
= INSN_TICK (insn
);
4809 FOR_EACH_DEP (insn
, SD_LIST_BACK
, sd_it
, dep
)
4811 rtx_insn
*pro
= DEP_PRO (dep
);
4814 if (DEP_STATUS (dep
) & DEP_CANCELLED
)
4817 if (QUEUE_INDEX (pro
) == QUEUE_SCHEDULED
)
4818 gcc_assert (INSN_TICK (pro
) + dep_cost (dep
) <= INSN_TICK (insn
));
4821 int cost
= dep_cost (dep
);
4824 if (!bitmap_bit_p (processed
, INSN_LUID (pro
)))
4826 if (!estimate_insn_tick (processed
, pro
, budget
- cost
))
4829 gcc_assert (INSN_TICK_ESTIMATE (pro
) != INVALID_TICK
);
4830 t
= INSN_TICK_ESTIMATE (pro
) + cost
;
4831 if (earliest
== INVALID_TICK
|| t
> earliest
)
4835 bitmap_set_bit (processed
, INSN_LUID (insn
));
4836 INSN_TICK_ESTIMATE (insn
) = earliest
;
4840 /* Examine the pair of insns in P, and estimate (optimistically, assuming
4841 infinite resources) the cycle in which the delayed shadow can be issued.
4842 Return the number of cycles that must pass before the real insn can be
4843 issued in order to meet this constraint. */
4845 estimate_shadow_tick (struct delay_pair
*p
)
4847 auto_bitmap processed
;
4851 cutoff
= !estimate_insn_tick (processed
, p
->i2
,
4852 max_insn_queue_index
+ pair_delay (p
));
4854 return max_insn_queue_index
;
4855 t
= INSN_TICK_ESTIMATE (p
->i2
) - (clock_var
+ pair_delay (p
) + 1);
4861 /* If INSN has no unresolved backwards dependencies, add it to the schedule and
4862 recursively resolve all its forward dependencies. */
4864 resolve_dependencies (rtx_insn
*insn
)
4866 sd_iterator_def sd_it
;
4869 /* Don't use sd_lists_empty_p; it ignores debug insns. */
4870 if (DEPS_LIST_FIRST (INSN_HARD_BACK_DEPS (insn
)) != NULL
4871 || DEPS_LIST_FIRST (INSN_SPEC_BACK_DEPS (insn
)) != NULL
)
4874 if (sched_verbose
>= 4)
4875 fprintf (sched_dump
, ";;\tquickly resolving %d\n", INSN_UID (insn
));
4877 if (QUEUE_INDEX (insn
) >= 0)
4878 queue_remove (insn
);
4880 scheduled_insns
.safe_push (insn
);
4882 /* Update dependent instructions. */
4883 for (sd_it
= sd_iterator_start (insn
, SD_LIST_FORW
);
4884 sd_iterator_cond (&sd_it
, &dep
);)
4886 rtx_insn
*next
= DEP_CON (dep
);
4888 if (sched_verbose
>= 4)
4889 fprintf (sched_dump
, ";;\t\tdep %d against %d\n", INSN_UID (insn
),
4892 /* Resolve the dependence between INSN and NEXT.
4893 sd_resolve_dep () moves current dep to another list thus
4894 advancing the iterator. */
4895 sd_resolve_dep (sd_it
);
4897 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn
))
4899 resolve_dependencies (next
);
4902 /* Check always has only one forward dependence (to the first insn in
4903 the recovery block), therefore, this will be executed only once. */
4905 gcc_assert (sd_lists_empty_p (insn
, SD_LIST_FORW
));
4911 /* Return the head and tail pointers of ebb starting at BEG and ending
4914 get_ebb_head_tail (basic_block beg
, basic_block end
,
4915 rtx_insn
**headp
, rtx_insn
**tailp
)
4917 rtx_insn
*beg_head
= BB_HEAD (beg
);
4918 rtx_insn
* beg_tail
= BB_END (beg
);
4919 rtx_insn
* end_head
= BB_HEAD (end
);
4920 rtx_insn
* end_tail
= BB_END (end
);
4922 /* Don't include any notes or labels at the beginning of the BEG
4923 basic block, or notes at the end of the END basic blocks. */
4925 if (LABEL_P (beg_head
))
4926 beg_head
= NEXT_INSN (beg_head
);
4928 while (beg_head
!= beg_tail
)
4929 if (NOTE_P (beg_head
))
4930 beg_head
= NEXT_INSN (beg_head
);
4931 else if (DEBUG_INSN_P (beg_head
))
4933 rtx_insn
* note
, *next
;
4935 for (note
= NEXT_INSN (beg_head
);
4939 next
= NEXT_INSN (note
);
4942 if (sched_verbose
>= 9)
4943 fprintf (sched_dump
, "reorder %i\n", INSN_UID (note
));
4945 reorder_insns_nobb (note
, note
, PREV_INSN (beg_head
));
4947 if (BLOCK_FOR_INSN (note
) != beg
)
4948 df_insn_change_bb (note
, beg
);
4950 else if (!DEBUG_INSN_P (note
))
4962 end_head
= beg_head
;
4963 else if (LABEL_P (end_head
))
4964 end_head
= NEXT_INSN (end_head
);
4966 while (end_head
!= end_tail
)
4967 if (NOTE_P (end_tail
))
4968 end_tail
= PREV_INSN (end_tail
);
4969 else if (DEBUG_INSN_P (end_tail
))
4971 rtx_insn
* note
, *prev
;
4973 for (note
= PREV_INSN (end_tail
);
4977 prev
= PREV_INSN (note
);
4980 if (sched_verbose
>= 9)
4981 fprintf (sched_dump
, "reorder %i\n", INSN_UID (note
));
4983 reorder_insns_nobb (note
, note
, end_tail
);
4985 if (end_tail
== BB_END (end
))
4986 BB_END (end
) = note
;
4988 if (BLOCK_FOR_INSN (note
) != end
)
4989 df_insn_change_bb (note
, end
);
4991 else if (!DEBUG_INSN_P (note
))
5003 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
5006 no_real_insns_p (const rtx_insn
*head
, const rtx_insn
*tail
)
5008 while (head
!= NEXT_INSN (tail
))
5010 if (!NOTE_P (head
) && !LABEL_P (head
))
5012 head
= NEXT_INSN (head
);
5017 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
5018 previously found among the insns. Insert them just before HEAD. */
5020 restore_other_notes (rtx_insn
*head
, basic_block head_bb
)
5024 rtx_insn
*note_head
= note_list
;
5027 head_bb
= BLOCK_FOR_INSN (head
);
5029 head
= NEXT_INSN (bb_note (head_bb
));
5031 while (PREV_INSN (note_head
))
5033 set_block_for_insn (note_head
, head_bb
);
5034 note_head
= PREV_INSN (note_head
);
5036 /* In the above cycle we've missed this note. */
5037 set_block_for_insn (note_head
, head_bb
);
5039 SET_PREV_INSN (note_head
) = PREV_INSN (head
);
5040 SET_NEXT_INSN (PREV_INSN (head
)) = note_head
;
5041 SET_PREV_INSN (head
) = note_list
;
5042 SET_NEXT_INSN (note_list
) = head
;
5044 if (BLOCK_FOR_INSN (head
) != head_bb
)
5045 BB_END (head_bb
) = note_list
;
5053 /* When we know we are going to discard the schedule due to a failed attempt
5054 at modulo scheduling, undo all replacements. */
5056 undo_all_replacements (void)
5061 FOR_EACH_VEC_ELT (scheduled_insns
, i
, insn
)
5063 sd_iterator_def sd_it
;
5066 /* See if we must undo a replacement. */
5067 for (sd_it
= sd_iterator_start (insn
, SD_LIST_RES_FORW
);
5068 sd_iterator_cond (&sd_it
, &dep
); sd_iterator_next (&sd_it
))
5070 struct dep_replacement
*desc
= DEP_REPLACE (dep
);
5072 validate_change (desc
->insn
, desc
->loc
, desc
->orig
, 0);
5077 /* Return first non-scheduled insn in the current scheduling block.
5078 This is mostly used for debug-counter purposes. */
5080 first_nonscheduled_insn (void)
5082 rtx_insn
*insn
= (nonscheduled_insns_begin
!= NULL_RTX
5083 ? nonscheduled_insns_begin
5084 : current_sched_info
->prev_head
);
5088 insn
= next_nonnote_nondebug_insn (insn
);
5090 while (QUEUE_INDEX (insn
) == QUEUE_SCHEDULED
);
5095 /* Move insns that became ready to fire from queue to ready list. */
5098 queue_to_ready (struct ready_list
*ready
)
5101 rtx_insn_list
*link
;
5102 rtx_insn
*skip_insn
;
5104 q_ptr
= NEXT_Q (q_ptr
);
5106 if (dbg_cnt (sched_insn
) == false)
5107 /* If debug counter is activated do not requeue the first
5108 nonscheduled insn. */
5109 skip_insn
= first_nonscheduled_insn ();
5113 /* Add all pending insns that can be scheduled without stalls to the
5115 for (link
= insn_queue
[q_ptr
]; link
; link
= link
->next ())
5117 insn
= link
->insn ();
5120 if (sched_verbose
>= 2)
5121 fprintf (sched_dump
, ";;\t\tQ-->Ready: insn %s: ",
5122 (*current_sched_info
->print_insn
) (insn
, 0));
5124 /* If the ready list is full, delay the insn for 1 cycle.
5125 See the comment in schedule_block for the rationale. */
5126 if (!reload_completed
5127 && (ready
->n_ready
- ready
->n_debug
> MAX_SCHED_READY_INSNS
5128 || (sched_pressure
== SCHED_PRESSURE_MODEL
5129 /* Limit pressure recalculations to MAX_SCHED_READY_INSNS
5130 instructions too. */
5131 && model_index (insn
) > (model_curr_point
5132 + MAX_SCHED_READY_INSNS
)))
5133 && !(sched_pressure
== SCHED_PRESSURE_MODEL
5134 && model_curr_point
< model_num_insns
5135 /* Always allow the next model instruction to issue. */
5136 && model_index (insn
) == model_curr_point
)
5137 && !SCHED_GROUP_P (insn
)
5138 && insn
!= skip_insn
)
5140 if (sched_verbose
>= 2)
5141 fprintf (sched_dump
, "keeping in queue, ready full\n");
5142 queue_insn (insn
, 1, "ready full");
5146 ready_add (ready
, insn
, false);
5147 if (sched_verbose
>= 2)
5148 fprintf (sched_dump
, "moving to ready without stalls\n");
5151 free_INSN_LIST_list (&insn_queue
[q_ptr
]);
5153 /* If there are no ready insns, stall until one is ready and add all
5154 of the pending insns at that point to the ready list. */
5155 if (ready
->n_ready
== 0)
5159 for (stalls
= 1; stalls
<= max_insn_queue_index
; stalls
++)
5161 if ((link
= insn_queue
[NEXT_Q_AFTER (q_ptr
, stalls
)]))
5163 for (; link
; link
= link
->next ())
5165 insn
= link
->insn ();
5168 if (sched_verbose
>= 2)
5169 fprintf (sched_dump
, ";;\t\tQ-->Ready: insn %s: ",
5170 (*current_sched_info
->print_insn
) (insn
, 0));
5172 ready_add (ready
, insn
, false);
5173 if (sched_verbose
>= 2)
5174 fprintf (sched_dump
, "moving to ready with %d stalls\n", stalls
);
5176 free_INSN_LIST_list (&insn_queue
[NEXT_Q_AFTER (q_ptr
, stalls
)]);
5178 advance_one_cycle ();
5183 advance_one_cycle ();
5186 q_ptr
= NEXT_Q_AFTER (q_ptr
, stalls
);
5187 clock_var
+= stalls
;
5188 if (sched_verbose
>= 2)
5189 fprintf (sched_dump
, ";;\tAdvancing clock by %d cycle[s] to %d\n",
5194 /* Used by early_queue_to_ready. Determines whether it is "ok" to
5195 prematurely move INSN from the queue to the ready list. Currently,
5196 if a target defines the hook 'is_costly_dependence', this function
5197 uses the hook to check whether there exist any dependences which are
5198 considered costly by the target, between INSN and other insns that
5199 have already been scheduled. Dependences are checked up to Y cycles
5200 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
5201 controlling this value.
5202 (Other considerations could be taken into account instead (or in
5203 addition) depending on user flags and target hooks. */
5206 ok_for_early_queue_removal (rtx_insn
*insn
)
5208 if (targetm
.sched
.is_costly_dependence
)
5211 int i
= scheduled_insns
.length ();
5212 for (n_cycles
= flag_sched_stalled_insns_dep
; n_cycles
; n_cycles
--)
5218 rtx_insn
*prev_insn
= scheduled_insns
[i
];
5220 if (!NOTE_P (prev_insn
))
5224 dep
= sd_find_dep_between (prev_insn
, insn
, true);
5228 cost
= dep_cost (dep
);
5230 if (targetm
.sched
.is_costly_dependence (dep
, cost
,
5231 flag_sched_stalled_insns_dep
- n_cycles
))
5236 if (GET_MODE (prev_insn
) == TImode
) /* end of dispatch group */
5249 /* Remove insns from the queue, before they become "ready" with respect
5250 to FU latency considerations. */
5253 early_queue_to_ready (state_t state
, struct ready_list
*ready
)
5256 rtx_insn_list
*link
;
5257 rtx_insn_list
*next_link
;
5258 rtx_insn_list
*prev_link
;
5261 state_t temp_state
= alloca (dfa_state_size
);
5263 int insns_removed
= 0;
5266 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
5269 X == 0: There is no limit on how many queued insns can be removed
5270 prematurely. (flag_sched_stalled_insns = -1).
5272 X >= 1: Only X queued insns can be removed prematurely in each
5273 invocation. (flag_sched_stalled_insns = X).
5275 Otherwise: Early queue removal is disabled.
5276 (flag_sched_stalled_insns = 0)
5279 if (! flag_sched_stalled_insns
)
5282 for (stalls
= 0; stalls
<= max_insn_queue_index
; stalls
++)
5284 if ((link
= insn_queue
[NEXT_Q_AFTER (q_ptr
, stalls
)]))
5286 if (sched_verbose
> 6)
5287 fprintf (sched_dump
, ";; look at index %d + %d\n", q_ptr
, stalls
);
5292 next_link
= link
->next ();
5293 insn
= link
->insn ();
5294 if (insn
&& sched_verbose
> 6)
5295 print_rtl_single (sched_dump
, insn
);
5297 memcpy (temp_state
, state
, dfa_state_size
);
5298 if (recog_memoized (insn
) < 0)
5299 /* non-negative to indicate that it's not ready
5300 to avoid infinite Q->R->Q->R... */
5303 cost
= state_transition (temp_state
, insn
);
5305 if (sched_verbose
>= 6)
5306 fprintf (sched_dump
, "transition cost = %d\n", cost
);
5308 move_to_ready
= false;
5311 move_to_ready
= ok_for_early_queue_removal (insn
);
5312 if (move_to_ready
== true)
5314 /* move from Q to R */
5316 ready_add (ready
, insn
, false);
5319 XEXP (prev_link
, 1) = next_link
;
5321 insn_queue
[NEXT_Q_AFTER (q_ptr
, stalls
)] = next_link
;
5323 free_INSN_LIST_node (link
);
5325 if (sched_verbose
>= 2)
5326 fprintf (sched_dump
, ";;\t\tEarly Q-->Ready: insn %s\n",
5327 (*current_sched_info
->print_insn
) (insn
, 0));
5330 if (insns_removed
== flag_sched_stalled_insns
)
5331 /* Remove no more than flag_sched_stalled_insns insns
5332 from Q at a time. */
5333 return insns_removed
;
5337 if (move_to_ready
== false)
5344 } /* for stalls.. */
5346 return insns_removed
;
5350 /* Print the ready list for debugging purposes.
5351 If READY_TRY is non-zero then only print insns that max_issue
5354 debug_ready_list_1 (struct ready_list
*ready
, signed char *ready_try
)
5359 if (ready
->n_ready
== 0)
5361 fprintf (sched_dump
, "\n");
5365 p
= ready_lastpos (ready
);
5366 for (i
= 0; i
< ready
->n_ready
; i
++)
5368 if (ready_try
!= NULL
&& ready_try
[ready
->n_ready
- i
- 1])
5371 fprintf (sched_dump
, " %s:%d",
5372 (*current_sched_info
->print_insn
) (p
[i
], 0),
5374 if (sched_pressure
!= SCHED_PRESSURE_NONE
)
5375 fprintf (sched_dump
, "(cost=%d",
5376 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (p
[i
]));
5377 fprintf (sched_dump
, ":prio=%d", INSN_PRIORITY (p
[i
]));
5378 if (INSN_TICK (p
[i
]) > clock_var
)
5379 fprintf (sched_dump
, ":delay=%d", INSN_TICK (p
[i
]) - clock_var
);
5380 if (sched_pressure
== SCHED_PRESSURE_MODEL
)
5381 fprintf (sched_dump
, ":idx=%d",
5382 model_index (p
[i
]));
5383 if (sched_pressure
!= SCHED_PRESSURE_NONE
)
5384 fprintf (sched_dump
, ")");
5386 fprintf (sched_dump
, "\n");
5389 /* Print the ready list. Callable from debugger. */
5391 debug_ready_list (struct ready_list
*ready
)
5393 debug_ready_list_1 (ready
, NULL
);
5396 /* Search INSN for REG_SAVE_NOTE notes and convert them back into insn
5397 NOTEs. This is used for NOTE_INSN_EPILOGUE_BEG, so that sched-ebb
5398 replaces the epilogue note in the correct basic block. */
5400 reemit_notes (rtx_insn
*insn
)
5403 rtx_insn
*last
= insn
;
5405 for (note
= REG_NOTES (insn
); note
; note
= XEXP (note
, 1))
5407 if (REG_NOTE_KIND (note
) == REG_SAVE_NOTE
)
5409 enum insn_note note_type
= (enum insn_note
) INTVAL (XEXP (note
, 0));
5411 last
= emit_note_before (note_type
, last
);
5412 remove_note (insn
, note
);
5417 /* Move INSN. Reemit notes if needed. Update CFG, if needed. */
5419 move_insn (rtx_insn
*insn
, rtx_insn
*last
, rtx nt
)
5421 if (PREV_INSN (insn
) != last
)
5427 bb
= BLOCK_FOR_INSN (insn
);
5429 /* BB_HEAD is either LABEL or NOTE. */
5430 gcc_assert (BB_HEAD (bb
) != insn
);
5432 if (BB_END (bb
) == insn
)
5433 /* If this is last instruction in BB, move end marker one
5436 /* Jumps are always placed at the end of basic block. */
5437 jump_p
= control_flow_insn_p (insn
);
5440 || ((common_sched_info
->sched_pass_id
== SCHED_RGN_PASS
)
5441 && IS_SPECULATION_BRANCHY_CHECK_P (insn
))
5442 || (common_sched_info
->sched_pass_id
5443 == SCHED_EBB_PASS
));
5445 gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn
)) == bb
);
5447 BB_END (bb
) = PREV_INSN (insn
);
5450 gcc_assert (BB_END (bb
) != last
);
5453 /* We move the block note along with jump. */
5457 note
= NEXT_INSN (insn
);
5458 while (NOTE_NOT_BB_P (note
) && note
!= nt
)
5459 note
= NEXT_INSN (note
);
5463 || BARRIER_P (note
)))
5464 note
= NEXT_INSN (note
);
5466 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note
));
5471 SET_NEXT_INSN (PREV_INSN (insn
)) = NEXT_INSN (note
);
5472 SET_PREV_INSN (NEXT_INSN (note
)) = PREV_INSN (insn
);
5474 SET_NEXT_INSN (note
) = NEXT_INSN (last
);
5475 SET_PREV_INSN (NEXT_INSN (last
)) = note
;
5477 SET_NEXT_INSN (last
) = insn
;
5478 SET_PREV_INSN (insn
) = last
;
5480 bb
= BLOCK_FOR_INSN (last
);
5484 fix_jump_move (insn
);
5486 if (BLOCK_FOR_INSN (insn
) != bb
)
5487 move_block_after_check (insn
);
5489 gcc_assert (BB_END (bb
) == last
);
5492 df_insn_change_bb (insn
, bb
);
5494 /* Update BB_END, if needed. */
5495 if (BB_END (bb
) == last
)
5499 SCHED_GROUP_P (insn
) = 0;
5502 /* Return true if scheduling INSN will finish current clock cycle. */
5504 insn_finishes_cycle_p (rtx_insn
*insn
)
5506 if (SCHED_GROUP_P (insn
))
5507 /* After issuing INSN, rest of the sched_group will be forced to issue
5508 in order. Don't make any plans for the rest of cycle. */
5511 /* Finishing the block will, apparently, finish the cycle. */
5512 if (current_sched_info
->insn_finishes_block_p
5513 && current_sched_info
->insn_finishes_block_p (insn
))
5519 /* Helper for autopref_multipass_init. Given a SET in PAT and whether
5520 we're expecting a memory WRITE or not, check that the insn is relevant to
5521 the autoprefetcher modelling code. Return true iff that is the case.
5522 If it is relevant, record the base register of the memory op in BASE and
5523 the offset in OFFSET. */
5526 analyze_set_insn_for_autopref (rtx pat
, bool write
, rtx
*base
, int *offset
)
5528 if (GET_CODE (pat
) != SET
)
5531 rtx mem
= write
? SET_DEST (pat
) : SET_SRC (pat
);
5535 struct address_info info
;
5536 decompose_mem_address (&info
, mem
);
5538 /* TODO: Currently only (base+const) addressing is supported. */
5539 if (info
.base
== NULL
|| !REG_P (*info
.base
)
5540 || (info
.disp
!= NULL
&& !CONST_INT_P (*info
.disp
)))
5544 *offset
= info
.disp
? INTVAL (*info
.disp
) : 0;
5548 /* Functions to model cache auto-prefetcher.
5550 Some of the CPUs have cache auto-prefetcher, which /seems/ to initiate
5551 memory prefetches if it sees instructions with consequitive memory accesses
5552 in the instruction stream. Details of such hardware units are not published,
5553 so we can only guess what exactly is going on there.
5554 In the scheduler, we model abstract auto-prefetcher. If there are memory
5555 insns in the ready list (or the queue) that have same memory base, but
5556 different offsets, then we delay the insns with larger offsets until insns
5557 with smaller offsets get scheduled. If PARAM_SCHED_AUTOPREF_QUEUE_DEPTH
5558 is "1", then we look at the ready list; if it is N>1, then we also look
5559 through N-1 queue entries.
5560 If the param is N>=0, then rank_for_schedule will consider auto-prefetching
5561 among its heuristics.
5562 Param value of "-1" disables modelling of the auto-prefetcher. */
5564 /* Initialize autoprefetcher model data for INSN. */
5566 autopref_multipass_init (const rtx_insn
*insn
, int write
)
5568 autopref_multipass_data_t data
= &INSN_AUTOPREF_MULTIPASS_DATA (insn
)[write
];
5570 gcc_assert (data
->status
== AUTOPREF_MULTIPASS_DATA_UNINITIALIZED
);
5571 data
->base
= NULL_RTX
;
5572 data
->min_offset
= 0;
5573 data
->max_offset
= 0;
5574 data
->multi_mem_insn_p
= false;
5575 /* Set insn entry initialized, but not relevant for auto-prefetcher. */
5576 data
->status
= AUTOPREF_MULTIPASS_DATA_IRRELEVANT
;
5578 rtx pat
= PATTERN (insn
);
5580 /* We have a multi-set insn like a load-multiple or store-multiple.
5581 We care about these as long as all the memory ops inside the PARALLEL
5582 have the same base register. We care about the minimum and maximum
5583 offsets from that base but don't check for the order of those offsets
5584 within the PARALLEL insn itself. */
5585 if (GET_CODE (pat
) == PARALLEL
)
5587 int n_elems
= XVECLEN (pat
, 0);
5590 rtx prev_base
= NULL_RTX
;
5594 for (i
= 0; i
< n_elems
; i
++)
5596 rtx set
= XVECEXP (pat
, 0, i
);
5597 if (GET_CODE (set
) != SET
)
5600 rtx base
= NULL_RTX
;
5602 if (!analyze_set_insn_for_autopref (set
, write
, &base
, &offset
))
5608 min_offset
= offset
;
5609 max_offset
= offset
;
5611 /* Ensure that all memory operations in the PARALLEL use the same
5613 else if (REGNO (base
) != REGNO (prev_base
))
5617 min_offset
= MIN (min_offset
, offset
);
5618 max_offset
= MAX (max_offset
, offset
);
5622 /* If we reached here then we have a valid PARALLEL of multiple memory
5623 ops with prev_base as the base and min_offset and max_offset
5624 containing the offsets range. */
5625 gcc_assert (prev_base
);
5626 data
->base
= prev_base
;
5627 data
->min_offset
= min_offset
;
5628 data
->max_offset
= max_offset
;
5629 data
->multi_mem_insn_p
= true;
5630 data
->status
= AUTOPREF_MULTIPASS_DATA_NORMAL
;
5635 /* Otherwise this is a single set memory operation. */
5636 rtx set
= single_set (insn
);
5637 if (set
== NULL_RTX
)
5640 if (!analyze_set_insn_for_autopref (set
, write
, &data
->base
,
5644 /* This insn is relevant for the auto-prefetcher.
5645 The base and offset fields will have been filled in the
5646 analyze_set_insn_for_autopref call above. */
5647 data
->status
= AUTOPREF_MULTIPASS_DATA_NORMAL
;
5651 /* Helper for autopref_rank_for_schedule. Given the data of two
5652 insns relevant to the auto-prefetcher modelling code DATA1 and DATA2
5653 return their comparison result. Return 0 if there is no sensible
5654 ranking order for the two insns. */
5657 autopref_rank_data (autopref_multipass_data_t data1
,
5658 autopref_multipass_data_t data2
)
5660 /* Simple case when both insns are simple single memory ops. */
5661 if (!data1
->multi_mem_insn_p
&& !data2
->multi_mem_insn_p
)
5662 return data1
->min_offset
- data2
->min_offset
;
5664 /* Two load/store multiple insns. Return 0 if the offset ranges
5665 overlap and the difference between the minimum offsets otherwise. */
5666 else if (data1
->multi_mem_insn_p
&& data2
->multi_mem_insn_p
)
5668 int min1
= data1
->min_offset
;
5669 int max1
= data1
->max_offset
;
5670 int min2
= data2
->min_offset
;
5671 int max2
= data2
->max_offset
;
5673 if (max1
< min2
|| min1
> max2
)
5679 /* The other two cases is a pair of a load/store multiple and
5680 a simple memory op. Return 0 if the single op's offset is within the
5681 range of the multi-op insn and the difference between the single offset
5682 and the minimum offset of the multi-set insn otherwise. */
5683 else if (data1
->multi_mem_insn_p
&& !data2
->multi_mem_insn_p
)
5685 int max1
= data1
->max_offset
;
5686 int min1
= data1
->min_offset
;
5688 if (data2
->min_offset
>= min1
5689 && data2
->min_offset
<= max1
)
5692 return min1
- data2
->min_offset
;
5696 int max2
= data2
->max_offset
;
5697 int min2
= data2
->min_offset
;
5699 if (data1
->min_offset
>= min2
5700 && data1
->min_offset
<= max2
)
5703 return data1
->min_offset
- min2
;
5707 /* Helper function for rank_for_schedule sorting. */
5709 autopref_rank_for_schedule (const rtx_insn
*insn1
, const rtx_insn
*insn2
)
5712 for (int write
= 0; write
< 2 && !r
; ++write
)
5714 autopref_multipass_data_t data1
5715 = &INSN_AUTOPREF_MULTIPASS_DATA (insn1
)[write
];
5716 autopref_multipass_data_t data2
5717 = &INSN_AUTOPREF_MULTIPASS_DATA (insn2
)[write
];
5719 if (data1
->status
== AUTOPREF_MULTIPASS_DATA_UNINITIALIZED
)
5720 autopref_multipass_init (insn1
, write
);
5722 if (data2
->status
== AUTOPREF_MULTIPASS_DATA_UNINITIALIZED
)
5723 autopref_multipass_init (insn2
, write
);
5725 int irrel1
= data1
->status
== AUTOPREF_MULTIPASS_DATA_IRRELEVANT
;
5726 int irrel2
= data2
->status
== AUTOPREF_MULTIPASS_DATA_IRRELEVANT
;
5728 if (!irrel1
&& !irrel2
)
5729 r
= autopref_rank_data (data1
, data2
);
5731 r
= irrel2
- irrel1
;
5737 /* True if header of debug dump was printed. */
5738 static bool autopref_multipass_dfa_lookahead_guard_started_dump_p
;
5740 /* Helper for autopref_multipass_dfa_lookahead_guard.
5741 Return "1" if INSN1 should be delayed in favor of INSN2. */
5743 autopref_multipass_dfa_lookahead_guard_1 (const rtx_insn
*insn1
,
5744 const rtx_insn
*insn2
, int write
)
5746 autopref_multipass_data_t data1
5747 = &INSN_AUTOPREF_MULTIPASS_DATA (insn1
)[write
];
5748 autopref_multipass_data_t data2
5749 = &INSN_AUTOPREF_MULTIPASS_DATA (insn2
)[write
];
5751 if (data2
->status
== AUTOPREF_MULTIPASS_DATA_UNINITIALIZED
)
5752 autopref_multipass_init (insn2
, write
);
5753 if (data2
->status
== AUTOPREF_MULTIPASS_DATA_IRRELEVANT
)
5756 if (rtx_equal_p (data1
->base
, data2
->base
)
5757 && autopref_rank_data (data1
, data2
) > 0)
5759 if (sched_verbose
>= 2)
5761 if (!autopref_multipass_dfa_lookahead_guard_started_dump_p
)
5763 fprintf (sched_dump
,
5764 ";;\t\tnot trying in max_issue due to autoprefetch "
5766 autopref_multipass_dfa_lookahead_guard_started_dump_p
= true;
5769 fprintf (sched_dump
, " %d(%d)", INSN_UID (insn1
), INSN_UID (insn2
));
5780 We could have also hooked autoprefetcher model into
5781 first_cycle_multipass_backtrack / first_cycle_multipass_issue hooks
5782 to enable intelligent selection of "[r1+0]=r2; [r1+4]=r3" on the same cycle
5783 (e.g., once "[r1+0]=r2" is issued in max_issue(), "[r1+4]=r3" gets
5784 unblocked). We don't bother about this yet because target of interest
5785 (ARM Cortex-A15) can issue only 1 memory operation per cycle. */
5787 /* Implementation of first_cycle_multipass_dfa_lookahead_guard hook.
5788 Return "1" if INSN1 should not be considered in max_issue due to
5789 auto-prefetcher considerations. */
5791 autopref_multipass_dfa_lookahead_guard (rtx_insn
*insn1
, int ready_index
)
5795 /* Exit early if the param forbids this or if we're not entering here through
5796 normal haifa scheduling. This can happen if selective scheduling is
5797 explicitly enabled. */
5798 if (!insn_queue
|| PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH
) <= 0)
5801 if (sched_verbose
>= 2 && ready_index
== 0)
5802 autopref_multipass_dfa_lookahead_guard_started_dump_p
= false;
5804 for (int write
= 0; write
< 2; ++write
)
5806 autopref_multipass_data_t data1
5807 = &INSN_AUTOPREF_MULTIPASS_DATA (insn1
)[write
];
5809 if (data1
->status
== AUTOPREF_MULTIPASS_DATA_UNINITIALIZED
)
5810 autopref_multipass_init (insn1
, write
);
5811 if (data1
->status
== AUTOPREF_MULTIPASS_DATA_IRRELEVANT
)
5814 if (ready_index
== 0
5815 && data1
->status
== AUTOPREF_MULTIPASS_DATA_DONT_DELAY
)
5816 /* We allow only a single delay on priviledged instructions.
5817 Doing otherwise would cause infinite loop. */
5819 if (sched_verbose
>= 2)
5821 if (!autopref_multipass_dfa_lookahead_guard_started_dump_p
)
5823 fprintf (sched_dump
,
5824 ";;\t\tnot trying in max_issue due to autoprefetch "
5826 autopref_multipass_dfa_lookahead_guard_started_dump_p
= true;
5829 fprintf (sched_dump
, " *%d*", INSN_UID (insn1
));
5834 for (int i2
= 0; i2
< ready
.n_ready
; ++i2
)
5836 rtx_insn
*insn2
= get_ready_element (i2
);
5839 r
= autopref_multipass_dfa_lookahead_guard_1 (insn1
, insn2
, write
);
5842 if (ready_index
== 0)
5845 data1
->status
= AUTOPREF_MULTIPASS_DATA_DONT_DELAY
;
5851 if (PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH
) == 1)
5854 /* Everything from the current queue slot should have been moved to
5856 gcc_assert (insn_queue
[NEXT_Q_AFTER (q_ptr
, 0)] == NULL_RTX
);
5858 int n_stalls
= PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH
) - 1;
5859 if (n_stalls
> max_insn_queue_index
)
5860 n_stalls
= max_insn_queue_index
;
5862 for (int stalls
= 1; stalls
<= n_stalls
; ++stalls
)
5864 for (rtx_insn_list
*link
= insn_queue
[NEXT_Q_AFTER (q_ptr
, stalls
)];
5866 link
= link
->next ())
5868 rtx_insn
*insn2
= link
->insn ();
5869 r
= autopref_multipass_dfa_lookahead_guard_1 (insn1
, insn2
,
5873 /* Queue INSN1 until INSN2 can issue. */
5875 if (ready_index
== 0)
5876 data1
->status
= AUTOPREF_MULTIPASS_DATA_DONT_DELAY
;
5884 if (sched_verbose
>= 2
5885 && autopref_multipass_dfa_lookahead_guard_started_dump_p
5886 && (ready_index
== ready
.n_ready
- 1 || r
< 0))
5887 /* This does not /always/ trigger. We don't output EOL if the last
5888 insn is not recognized (INSN_CODE < 0) and lookahead_guard is not
5889 called. We can live with this. */
5890 fprintf (sched_dump
, "\n");
5895 /* Define type for target data used in multipass scheduling. */
5896 #ifndef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T
5897 # define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T int
5899 typedef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T first_cycle_multipass_data_t
;
5901 /* The following structure describe an entry of the stack of choices. */
5904 /* Ordinal number of the issued insn in the ready queue. */
5906 /* The number of the rest insns whose issues we should try. */
5908 /* The number of issued essential insns. */
5910 /* State after issuing the insn. */
5912 /* Target-specific data. */
5913 first_cycle_multipass_data_t target_data
;
5916 /* The following array is used to implement a stack of choices used in
5917 function max_issue. */
5918 static struct choice_entry
*choice_stack
;
5920 /* This holds the value of the target dfa_lookahead hook. */
5923 /* The following variable value is maximal number of tries of issuing
5924 insns for the first cycle multipass insn scheduling. We define
5925 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
5926 need this constraint if all real insns (with non-negative codes)
5927 had reservations because in this case the algorithm complexity is
5928 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
5929 might be incomplete and such insn might occur. For such
5930 descriptions, the complexity of algorithm (without the constraint)
5931 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
5932 static int max_lookahead_tries
;
5934 /* The following function returns maximal (or close to maximal) number
5935 of insns which can be issued on the same cycle and one of which
5936 insns is insns with the best rank (the first insn in READY). To
5937 make this function tries different samples of ready insns. READY
5938 is current queue `ready'. Global array READY_TRY reflects what
5939 insns are already issued in this try. The function stops immediately,
5940 if it reached the such a solution, that all instruction can be issued.
5941 INDEX will contain index of the best insn in READY. The following
5942 function is used only for first cycle multipass scheduling.
5946 This function expects recognized insns only. All USEs,
5947 CLOBBERs, etc must be filtered elsewhere. */
5949 max_issue (struct ready_list
*ready
, int privileged_n
, state_t state
,
5950 bool first_cycle_insn_p
, int *index
)
5952 int n
, i
, all
, n_ready
, best
, delay
, tries_num
;
5954 struct choice_entry
*top
;
5960 n_ready
= ready
->n_ready
;
5961 gcc_assert (dfa_lookahead
>= 1 && privileged_n
>= 0
5962 && privileged_n
<= n_ready
);
5964 /* Init MAX_LOOKAHEAD_TRIES. */
5965 if (max_lookahead_tries
== 0)
5967 max_lookahead_tries
= 100;
5968 for (i
= 0; i
< issue_rate
; i
++)
5969 max_lookahead_tries
*= dfa_lookahead
;
5972 /* Init max_points. */
5973 more_issue
= issue_rate
- cycle_issued_insns
;
5974 gcc_assert (more_issue
>= 0);
5976 /* The number of the issued insns in the best solution. */
5981 /* Set initial state of the search. */
5982 memcpy (top
->state
, state
, dfa_state_size
);
5983 top
->rest
= dfa_lookahead
;
5985 if (targetm
.sched
.first_cycle_multipass_begin
)
5986 targetm
.sched
.first_cycle_multipass_begin (&top
->target_data
,
5988 first_cycle_insn_p
);
5990 /* Count the number of the insns to search among. */
5991 for (all
= i
= 0; i
< n_ready
; i
++)
5995 if (sched_verbose
>= 2)
5997 fprintf (sched_dump
, ";;\t\tmax_issue among %d insns:", all
);
5998 debug_ready_list_1 (ready
, ready_try
);
6001 /* I is the index of the insn to try next. */
6006 if (/* If we've reached a dead end or searched enough of what we have
6009 /* or have nothing else to try... */
6011 /* or should not issue more. */
6012 || top
->n
>= more_issue
)
6014 /* ??? (... || i == n_ready). */
6015 gcc_assert (i
<= n_ready
);
6017 /* We should not issue more than issue_rate instructions. */
6018 gcc_assert (top
->n
<= more_issue
);
6020 if (top
== choice_stack
)
6023 if (best
< top
- choice_stack
)
6028 /* Try to find issued privileged insn. */
6029 while (n
&& !ready_try
[--n
])
6033 if (/* If all insns are equally good... */
6035 /* Or a privileged insn will be issued. */
6037 /* Then we have a solution. */
6039 best
= top
- choice_stack
;
6040 /* This is the index of the insn issued first in this
6042 *index
= choice_stack
[1].index
;
6043 if (top
->n
== more_issue
|| best
== all
)
6048 /* Set ready-list index to point to the last insn
6049 ('i++' below will advance it to the next insn). */
6055 if (targetm
.sched
.first_cycle_multipass_backtrack
)
6056 targetm
.sched
.first_cycle_multipass_backtrack (&top
->target_data
,
6057 ready_try
, n_ready
);
6060 memcpy (state
, top
->state
, dfa_state_size
);
6062 else if (!ready_try
[i
])
6065 if (tries_num
> max_lookahead_tries
)
6067 insn
= ready_element (ready
, i
);
6068 delay
= state_transition (state
, insn
);
6071 if (state_dead_lock_p (state
)
6072 || insn_finishes_cycle_p (insn
))
6073 /* We won't issue any more instructions in the next
6080 if (memcmp (top
->state
, state
, dfa_state_size
) != 0)
6083 /* Advance to the next choice_entry. */
6085 /* Initialize it. */
6086 top
->rest
= dfa_lookahead
;
6089 memcpy (top
->state
, state
, dfa_state_size
);
6092 if (targetm
.sched
.first_cycle_multipass_issue
)
6093 targetm
.sched
.first_cycle_multipass_issue (&top
->target_data
,
6103 /* Increase ready-list index. */
6107 if (targetm
.sched
.first_cycle_multipass_end
)
6108 targetm
.sched
.first_cycle_multipass_end (best
!= 0
6109 ? &choice_stack
[1].target_data
6112 /* Restore the original state of the DFA. */
6113 memcpy (state
, choice_stack
->state
, dfa_state_size
);
6118 /* The following function chooses insn from READY and modifies
6119 READY. The following function is used only for first
6120 cycle multipass scheduling.
6122 -1 if cycle should be advanced,
6123 0 if INSN_PTR is set to point to the desirable insn,
6124 1 if choose_ready () should be restarted without advancing the cycle. */
6126 choose_ready (struct ready_list
*ready
, bool first_cycle_insn_p
,
6127 rtx_insn
**insn_ptr
)
6129 if (dbg_cnt (sched_insn
) == false)
6131 if (nonscheduled_insns_begin
== NULL_RTX
)
6132 nonscheduled_insns_begin
= current_sched_info
->prev_head
;
6134 rtx_insn
*insn
= first_nonscheduled_insn ();
6136 if (QUEUE_INDEX (insn
) == QUEUE_READY
)
6137 /* INSN is in the ready_list. */
6139 ready_remove_insn (insn
);
6144 /* INSN is in the queue. Advance cycle to move it to the ready list. */
6145 gcc_assert (QUEUE_INDEX (insn
) >= 0);
6149 if (dfa_lookahead
<= 0 || SCHED_GROUP_P (ready_element (ready
, 0))
6150 || DEBUG_INSN_P (ready_element (ready
, 0)))
6152 if (targetm
.sched
.dispatch (NULL
, IS_DISPATCH_ON
))
6153 *insn_ptr
= ready_remove_first_dispatch (ready
);
6155 *insn_ptr
= ready_remove_first (ready
);
6161 /* Try to choose the best insn. */
6165 insn
= ready_element (ready
, 0);
6166 if (INSN_CODE (insn
) < 0)
6168 *insn_ptr
= ready_remove_first (ready
);
6172 /* Filter the search space. */
6173 for (i
= 0; i
< ready
->n_ready
; i
++)
6177 insn
= ready_element (ready
, i
);
6179 /* If this insn is recognizable we should have already
6180 recognized it earlier.
6181 ??? Not very clear where this is supposed to be done.
6183 gcc_checking_assert (INSN_CODE (insn
) >= 0
6184 || recog_memoized (insn
) < 0);
6185 if (INSN_CODE (insn
) < 0)
6187 /* Non-recognized insns at position 0 are handled above. */
6193 if (targetm
.sched
.first_cycle_multipass_dfa_lookahead_guard
)
6196 = (targetm
.sched
.first_cycle_multipass_dfa_lookahead_guard
6199 if (ready_try
[i
] < 0)
6200 /* Queue instruction for several cycles.
6201 We need to restart choose_ready as we have changed
6204 change_queue_index (insn
, -ready_try
[i
]);
6208 /* Make sure that we didn't end up with 0'th insn filtered out.
6209 Don't be tempted to make life easier for backends and just
6210 requeue 0'th insn if (ready_try[0] == 0) and restart
6211 choose_ready. Backends should be very considerate about
6212 requeueing instructions -- especially the highest priority
6213 one at position 0. */
6214 gcc_assert (ready_try
[i
] == 0 || i
> 0);
6219 gcc_assert (ready_try
[i
] == 0);
6220 /* INSN made it through the scrutiny of filters! */
6223 if (max_issue (ready
, 1, curr_state
, first_cycle_insn_p
, &index
) == 0)
6225 *insn_ptr
= ready_remove_first (ready
);
6226 if (sched_verbose
>= 4)
6227 fprintf (sched_dump
, ";;\t\tChosen insn (but can't issue) : %s \n",
6228 (*current_sched_info
->print_insn
) (*insn_ptr
, 0));
6233 if (sched_verbose
>= 4)
6234 fprintf (sched_dump
, ";;\t\tChosen insn : %s\n",
6235 (*current_sched_info
->print_insn
)
6236 (ready_element (ready
, index
), 0));
6238 *insn_ptr
= ready_remove (ready
, index
);
6244 /* This function is called when we have successfully scheduled a
6245 block. It uses the schedule stored in the scheduled_insns vector
6246 to rearrange the RTL. PREV_HEAD is used as the anchor to which we
6247 append the scheduled insns; TAIL is the insn after the scheduled
6248 block. TARGET_BB is the argument passed to schedule_block. */
6251 commit_schedule (rtx_insn
*prev_head
, rtx_insn
*tail
, basic_block
*target_bb
)
6256 last_scheduled_insn
= prev_head
;
6258 scheduled_insns
.iterate (i
, &insn
);
6261 if (control_flow_insn_p (last_scheduled_insn
)
6262 || current_sched_info
->advance_target_bb (*target_bb
, insn
))
6264 *target_bb
= current_sched_info
->advance_target_bb (*target_bb
, 0);
6270 x
= next_real_insn (last_scheduled_insn
);
6272 dump_new_block_header (1, *target_bb
, x
, tail
);
6275 last_scheduled_insn
= bb_note (*target_bb
);
6278 if (current_sched_info
->begin_move_insn
)
6279 (*current_sched_info
->begin_move_insn
) (insn
, last_scheduled_insn
);
6280 move_insn (insn
, last_scheduled_insn
,
6281 current_sched_info
->next_tail
);
6282 if (!DEBUG_INSN_P (insn
))
6283 reemit_notes (insn
);
6284 last_scheduled_insn
= insn
;
6287 scheduled_insns
.truncate (0);
6290 /* Examine all insns on the ready list and queue those which can't be
6291 issued in this cycle. TEMP_STATE is temporary scheduler state we
6292 can use as scratch space. If FIRST_CYCLE_INSN_P is true, no insns
6293 have been issued for the current cycle, which means it is valid to
6294 issue an asm statement.
6296 If SHADOWS_ONLY_P is true, we eliminate all real insns and only
6297 leave those for which SHADOW_P is true. If MODULO_EPILOGUE is true,
6298 we only leave insns which have an INSN_EXACT_TICK. */
6301 prune_ready_list (state_t temp_state
, bool first_cycle_insn_p
,
6302 bool shadows_only_p
, bool modulo_epilogue_p
)
6305 bool sched_group_found
= false;
6306 int min_cost_group
= 0;
6311 for (i
= 0; i
< ready
.n_ready
; i
++)
6313 rtx_insn
*insn
= ready_element (&ready
, i
);
6314 if (SCHED_GROUP_P (insn
))
6316 sched_group_found
= true;
6321 /* Make two passes if there's a SCHED_GROUP_P insn; make sure to handle
6322 such an insn first and note its cost. If at least one SCHED_GROUP_P insn
6323 gets queued, then all other insns get queued for one cycle later. */
6324 for (pass
= sched_group_found
? 0 : 1; pass
< 2; )
6326 int n
= ready
.n_ready
;
6327 for (i
= 0; i
< n
; i
++)
6329 rtx_insn
*insn
= ready_element (&ready
, i
);
6331 const char *reason
= "resource conflict";
6333 if (DEBUG_INSN_P (insn
))
6336 if (sched_group_found
&& !SCHED_GROUP_P (insn
)
6337 && ((pass
== 0) || (min_cost_group
>= 1)))
6341 cost
= min_cost_group
;
6342 reason
= "not in sched group";
6344 else if (modulo_epilogue_p
6345 && INSN_EXACT_TICK (insn
) == INVALID_TICK
)
6347 cost
= max_insn_queue_index
;
6348 reason
= "not an epilogue insn";
6350 else if (shadows_only_p
&& !SHADOW_P (insn
))
6353 reason
= "not a shadow";
6355 else if (recog_memoized (insn
) < 0)
6357 if (!first_cycle_insn_p
6358 && (GET_CODE (PATTERN (insn
)) == ASM_INPUT
6359 || asm_noperands (PATTERN (insn
)) >= 0))
6363 else if (sched_pressure
!= SCHED_PRESSURE_NONE
)
6365 if (sched_pressure
== SCHED_PRESSURE_MODEL
6366 && INSN_TICK (insn
) <= clock_var
)
6368 memcpy (temp_state
, curr_state
, dfa_state_size
);
6369 if (state_transition (temp_state
, insn
) >= 0)
6370 INSN_TICK (insn
) = clock_var
+ 1;
6380 struct delay_pair
*delay_entry
;
6382 = delay_htab
->find_with_hash (insn
,
6383 htab_hash_pointer (insn
));
6384 while (delay_entry
&& delay_cost
== 0)
6386 delay_cost
= estimate_shadow_tick (delay_entry
);
6387 if (delay_cost
> max_insn_queue_index
)
6388 delay_cost
= max_insn_queue_index
;
6389 delay_entry
= delay_entry
->next_same_i1
;
6393 memcpy (temp_state
, curr_state
, dfa_state_size
);
6394 cost
= state_transition (temp_state
, insn
);
6399 if (cost
< delay_cost
)
6402 reason
= "shadow tick";
6407 if (SCHED_GROUP_P (insn
) && cost
> min_cost_group
)
6408 min_cost_group
= cost
;
6409 ready_remove (&ready
, i
);
6410 /* Normally we'd want to queue INSN for COST cycles. However,
6411 if SCHED_GROUP_P is set, then we must ensure that nothing
6412 else comes between INSN and its predecessor. If there is
6413 some other insn ready to fire on the next cycle, then that
6414 invariant would be broken.
6416 So when SCHED_GROUP_P is set, just queue this insn for a
6418 queue_insn (insn
, SCHED_GROUP_P (insn
) ? 1 : cost
, reason
);
6428 /* Called when we detect that the schedule is impossible. We examine the
6429 backtrack queue to find the earliest insn that caused this condition. */
6431 static struct haifa_saved_data
*
6432 verify_shadows (void)
6434 struct haifa_saved_data
*save
, *earliest_fail
= NULL
;
6435 for (save
= backtrack_queue
; save
; save
= save
->next
)
6438 struct delay_pair
*pair
= save
->delay_pair
;
6439 rtx_insn
*i1
= pair
->i1
;
6441 for (; pair
; pair
= pair
->next_same_i1
)
6443 rtx_insn
*i2
= pair
->i2
;
6445 if (QUEUE_INDEX (i2
) == QUEUE_SCHEDULED
)
6448 t
= INSN_TICK (i1
) + pair_delay (pair
);
6451 if (sched_verbose
>= 2)
6452 fprintf (sched_dump
,
6453 ";;\t\tfailed delay requirements for %d/%d (%d->%d)"
6455 INSN_UID (pair
->i1
), INSN_UID (pair
->i2
),
6456 INSN_TICK (pair
->i1
), INSN_EXACT_TICK (pair
->i2
));
6457 earliest_fail
= save
;
6460 if (QUEUE_INDEX (i2
) >= 0)
6462 int queued_for
= INSN_TICK (i2
);
6466 if (sched_verbose
>= 2)
6467 fprintf (sched_dump
,
6468 ";;\t\tfailed delay requirements for %d/%d"
6469 " (%d->%d), queued too late\n",
6470 INSN_UID (pair
->i1
), INSN_UID (pair
->i2
),
6471 INSN_TICK (pair
->i1
), INSN_EXACT_TICK (pair
->i2
));
6472 earliest_fail
= save
;
6479 return earliest_fail
;
6482 /* Print instructions together with useful scheduling information between
6483 HEAD and TAIL (inclusive). */
6485 dump_insn_stream (rtx_insn
*head
, rtx_insn
*tail
)
6487 fprintf (sched_dump
, ";;\t| insn | prio |\n");
6489 rtx_insn
*next_tail
= NEXT_INSN (tail
);
6490 for (rtx_insn
*insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
6492 int priority
= NOTE_P (insn
) ? 0 : INSN_PRIORITY (insn
);
6493 const char *pattern
= (NOTE_P (insn
)
6495 : str_pattern_slim (PATTERN (insn
)));
6497 fprintf (sched_dump
, ";;\t| %4d | %4d | %-30s ",
6498 INSN_UID (insn
), priority
, pattern
);
6500 if (sched_verbose
>= 4)
6502 if (NOTE_P (insn
) || LABEL_P (insn
) || recog_memoized (insn
) < 0)
6503 fprintf (sched_dump
, "nothing");
6505 print_reservation (sched_dump
, insn
);
6507 fprintf (sched_dump
, "\n");
6511 /* Use forward list scheduling to rearrange insns of block pointed to by
6512 TARGET_BB, possibly bringing insns from subsequent blocks in the same
6516 schedule_block (basic_block
*target_bb
, state_t init_state
)
6519 bool success
= modulo_ii
== 0;
6520 struct sched_block_state ls
;
6521 state_t temp_state
= NULL
; /* It is used for multipass scheduling. */
6522 int sort_p
, advance
, start_clock_var
;
6524 /* Head/tail info for this block. */
6525 rtx_insn
*prev_head
= current_sched_info
->prev_head
;
6526 rtx_insn
*next_tail
= current_sched_info
->next_tail
;
6527 rtx_insn
*head
= NEXT_INSN (prev_head
);
6528 rtx_insn
*tail
= PREV_INSN (next_tail
);
6530 if ((current_sched_info
->flags
& DONT_BREAK_DEPENDENCIES
) == 0
6531 && sched_pressure
!= SCHED_PRESSURE_MODEL
&& !sched_fusion
)
6532 find_modifiable_mems (head
, tail
);
6534 /* We used to have code to avoid getting parameters moved from hard
6535 argument registers into pseudos.
6537 However, it was removed when it proved to be of marginal benefit
6538 and caused problems because schedule_block and compute_forward_dependences
6539 had different notions of what the "head" insn was. */
6541 gcc_assert (head
!= tail
|| INSN_P (head
));
6543 haifa_recovery_bb_recently_added_p
= false;
6545 backtrack_queue
= NULL
;
6550 dump_new_block_header (0, *target_bb
, head
, tail
);
6552 if (sched_verbose
>= 2)
6554 dump_insn_stream (head
, tail
);
6555 memset (&rank_for_schedule_stats
, 0,
6556 sizeof (rank_for_schedule_stats
));
6560 if (init_state
== NULL
)
6561 state_reset (curr_state
);
6563 memcpy (curr_state
, init_state
, dfa_state_size
);
6565 /* Clear the ready list. */
6566 ready
.first
= ready
.veclen
- 1;
6570 /* It is used for first cycle multipass scheduling. */
6571 temp_state
= alloca (dfa_state_size
);
6573 if (targetm
.sched
.init
)
6574 targetm
.sched
.init (sched_dump
, sched_verbose
, ready
.veclen
);
6576 /* We start inserting insns after PREV_HEAD. */
6577 last_scheduled_insn
= prev_head
;
6578 last_nondebug_scheduled_insn
= NULL
;
6579 nonscheduled_insns_begin
= NULL
;
6581 gcc_assert ((NOTE_P (last_scheduled_insn
)
6582 || DEBUG_INSN_P (last_scheduled_insn
))
6583 && BLOCK_FOR_INSN (last_scheduled_insn
) == *target_bb
);
6585 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
6590 insn_queue
= XALLOCAVEC (rtx_insn_list
*, max_insn_queue_index
+ 1);
6591 memset (insn_queue
, 0, (max_insn_queue_index
+ 1) * sizeof (rtx
));
6593 /* Start just before the beginning of time. */
6596 /* We need queue and ready lists and clock_var be initialized
6597 in try_ready () (which is called through init_ready_list ()). */
6598 (*current_sched_info
->init_ready_list
) ();
6601 sched_pressure_start_bb (*target_bb
);
6603 /* The algorithm is O(n^2) in the number of ready insns at any given
6604 time in the worst case. Before reload we are more likely to have
6605 big lists so truncate them to a reasonable size. */
6606 if (!reload_completed
6607 && ready
.n_ready
- ready
.n_debug
> MAX_SCHED_READY_INSNS
)
6609 ready_sort_debug (&ready
);
6610 ready_sort_real (&ready
);
6612 /* Find first free-standing insn past MAX_SCHED_READY_INSNS.
6613 If there are debug insns, we know they're first. */
6614 for (i
= MAX_SCHED_READY_INSNS
+ ready
.n_debug
; i
< ready
.n_ready
; i
++)
6615 if (!SCHED_GROUP_P (ready_element (&ready
, i
)))
6618 if (sched_verbose
>= 2)
6620 fprintf (sched_dump
,
6621 ";;\t\tReady list on entry: %d insns: ", ready
.n_ready
);
6622 debug_ready_list (&ready
);
6623 fprintf (sched_dump
,
6624 ";;\t\t before reload => truncated to %d insns\n", i
);
6627 /* Delay all insns past it for 1 cycle. If debug counter is
6628 activated make an exception for the insn right after
6629 nonscheduled_insns_begin. */
6631 rtx_insn
*skip_insn
;
6633 if (dbg_cnt (sched_insn
) == false)
6634 skip_insn
= first_nonscheduled_insn ();
6638 while (i
< ready
.n_ready
)
6642 insn
= ready_remove (&ready
, i
);
6644 if (insn
!= skip_insn
)
6645 queue_insn (insn
, 1, "list truncated");
6648 ready_add (&ready
, skip_insn
, true);
6652 /* Now we can restore basic block notes and maintain precise cfg. */
6653 restore_bb_notes (*target_bb
);
6655 last_clock_var
= -1;
6659 gcc_assert (scheduled_insns
.length () == 0);
6661 must_backtrack
= false;
6662 modulo_insns_scheduled
= 0;
6664 ls
.modulo_epilogue
= false;
6665 ls
.first_cycle_insn_p
= true;
6667 /* Loop until all the insns in BB are scheduled. */
6668 while ((*current_sched_info
->schedule_more_p
) ())
6670 perform_replacements_new_cycle ();
6673 start_clock_var
= clock_var
;
6677 advance_one_cycle ();
6679 /* Add to the ready list all pending insns that can be issued now.
6680 If there are no ready insns, increment clock until one
6681 is ready and add all pending insns at that point to the ready
6683 queue_to_ready (&ready
);
6685 gcc_assert (ready
.n_ready
);
6687 if (sched_verbose
>= 2)
6689 fprintf (sched_dump
, ";;\t\tReady list after queue_to_ready:");
6690 debug_ready_list (&ready
);
6692 advance
-= clock_var
- start_clock_var
;
6694 while (advance
> 0);
6696 if (ls
.modulo_epilogue
)
6698 int stage
= clock_var
/ modulo_ii
;
6699 if (stage
> modulo_last_stage
* 2 + 2)
6701 if (sched_verbose
>= 2)
6702 fprintf (sched_dump
,
6703 ";;\t\tmodulo scheduled succeeded at II %d\n",
6709 else if (modulo_ii
> 0)
6711 int stage
= clock_var
/ modulo_ii
;
6712 if (stage
> modulo_max_stages
)
6714 if (sched_verbose
>= 2)
6715 fprintf (sched_dump
,
6716 ";;\t\tfailing schedule due to excessive stages\n");
6719 if (modulo_n_insns
== modulo_insns_scheduled
6720 && stage
> modulo_last_stage
)
6722 if (sched_verbose
>= 2)
6723 fprintf (sched_dump
,
6724 ";;\t\tfound kernel after %d stages, II %d\n",
6726 ls
.modulo_epilogue
= true;
6730 prune_ready_list (temp_state
, true, false, ls
.modulo_epilogue
);
6731 if (ready
.n_ready
== 0)
6736 ls
.shadows_only_p
= false;
6737 cycle_issued_insns
= 0;
6738 ls
.can_issue_more
= issue_rate
;
6745 if (sort_p
&& ready
.n_ready
> 0)
6747 /* Sort the ready list based on priority. This must be
6748 done every iteration through the loop, as schedule_insn
6749 may have readied additional insns that will not be
6750 sorted correctly. */
6751 ready_sort (&ready
);
6753 if (sched_verbose
>= 2)
6755 fprintf (sched_dump
,
6756 ";;\t\tReady list after ready_sort: ");
6757 debug_ready_list (&ready
);
6761 /* We don't want md sched reorder to even see debug isns, so put
6762 them out right away. */
6763 if (ready
.n_ready
&& DEBUG_INSN_P (ready_element (&ready
, 0))
6764 && (*current_sched_info
->schedule_more_p
) ())
6766 while (ready
.n_ready
&& DEBUG_INSN_P (ready_element (&ready
, 0)))
6768 rtx_insn
*insn
= ready_remove_first (&ready
);
6769 gcc_assert (DEBUG_INSN_P (insn
));
6770 (*current_sched_info
->begin_schedule_ready
) (insn
);
6771 scheduled_insns
.safe_push (insn
);
6772 last_scheduled_insn
= insn
;
6773 advance
= schedule_insn (insn
);
6774 gcc_assert (advance
== 0);
6775 if (ready
.n_ready
> 0)
6776 ready_sort (&ready
);
6780 if (ls
.first_cycle_insn_p
&& !ready
.n_ready
)
6783 resume_after_backtrack
:
6784 /* Allow the target to reorder the list, typically for
6785 better instruction bundling. */
6787 && (ready
.n_ready
== 0
6788 || !SCHED_GROUP_P (ready_element (&ready
, 0))))
6790 if (ls
.first_cycle_insn_p
&& targetm
.sched
.reorder
)
6792 = targetm
.sched
.reorder (sched_dump
, sched_verbose
,
6793 ready_lastpos (&ready
),
6794 &ready
.n_ready
, clock_var
);
6795 else if (!ls
.first_cycle_insn_p
&& targetm
.sched
.reorder2
)
6797 = targetm
.sched
.reorder2 (sched_dump
, sched_verbose
,
6799 ? ready_lastpos (&ready
) : NULL
,
6800 &ready
.n_ready
, clock_var
);
6803 restart_choose_ready
:
6804 if (sched_verbose
>= 2)
6806 fprintf (sched_dump
, ";;\tReady list (t = %3d): ",
6808 debug_ready_list (&ready
);
6809 if (sched_pressure
== SCHED_PRESSURE_WEIGHTED
)
6810 print_curr_reg_pressure ();
6813 if (ready
.n_ready
== 0
6814 && ls
.can_issue_more
6815 && reload_completed
)
6817 /* Allow scheduling insns directly from the queue in case
6818 there's nothing better to do (ready list is empty) but
6819 there are still vacant dispatch slots in the current cycle. */
6820 if (sched_verbose
>= 6)
6821 fprintf (sched_dump
,";;\t\tSecond chance\n");
6822 memcpy (temp_state
, curr_state
, dfa_state_size
);
6823 if (early_queue_to_ready (temp_state
, &ready
))
6824 ready_sort (&ready
);
6827 if (ready
.n_ready
== 0
6828 || !ls
.can_issue_more
6829 || state_dead_lock_p (curr_state
)
6830 || !(*current_sched_info
->schedule_more_p
) ())
6833 /* Select and remove the insn from the ready list. */
6839 res
= choose_ready (&ready
, ls
.first_cycle_insn_p
, &insn
);
6845 goto restart_choose_ready
;
6847 gcc_assert (insn
!= NULL_RTX
);
6850 insn
= ready_remove_first (&ready
);
6852 if (sched_pressure
!= SCHED_PRESSURE_NONE
6853 && INSN_TICK (insn
) > clock_var
)
6855 ready_add (&ready
, insn
, true);
6860 if (targetm
.sched
.dfa_new_cycle
6861 && targetm
.sched
.dfa_new_cycle (sched_dump
, sched_verbose
,
6862 insn
, last_clock_var
,
6863 clock_var
, &sort_p
))
6864 /* SORT_P is used by the target to override sorting
6865 of the ready list. This is needed when the target
6866 has modified its internal structures expecting that
6867 the insn will be issued next. As we need the insn
6868 to have the highest priority (so it will be returned by
6869 the ready_remove_first call above), we invoke
6870 ready_add (&ready, insn, true).
6871 But, still, there is one issue: INSN can be later
6872 discarded by scheduler's front end through
6873 current_sched_info->can_schedule_ready_p, hence, won't
6876 ready_add (&ready
, insn
, true);
6882 if (current_sched_info
->can_schedule_ready_p
6883 && ! (*current_sched_info
->can_schedule_ready_p
) (insn
))
6884 /* We normally get here only if we don't want to move
6885 insn from the split block. */
6887 TODO_SPEC (insn
) = DEP_POSTPONED
;
6888 goto restart_choose_ready
;
6893 /* If this insn is the first part of a delay-slot pair, record a
6895 struct delay_pair
*delay_entry
;
6897 = delay_htab
->find_with_hash (insn
, htab_hash_pointer (insn
));
6900 save_backtrack_point (delay_entry
, ls
);
6901 if (sched_verbose
>= 2)
6902 fprintf (sched_dump
, ";;\t\tsaving backtrack point\n");
6906 /* DECISION is made. */
6908 if (modulo_ii
> 0 && INSN_UID (insn
) < modulo_iter0_max_uid
)
6910 modulo_insns_scheduled
++;
6911 modulo_last_stage
= clock_var
/ modulo_ii
;
6913 if (TODO_SPEC (insn
) & SPECULATIVE
)
6914 generate_recovery_code (insn
);
6916 if (targetm
.sched
.dispatch (NULL
, IS_DISPATCH_ON
))
6917 targetm
.sched
.dispatch_do (insn
, ADD_TO_DISPATCH_WINDOW
);
6919 /* Update counters, etc in the scheduler's front end. */
6920 (*current_sched_info
->begin_schedule_ready
) (insn
);
6921 scheduled_insns
.safe_push (insn
);
6922 gcc_assert (NONDEBUG_INSN_P (insn
));
6923 last_nondebug_scheduled_insn
= last_scheduled_insn
= insn
;
6925 if (recog_memoized (insn
) >= 0)
6927 memcpy (temp_state
, curr_state
, dfa_state_size
);
6928 cost
= state_transition (curr_state
, insn
);
6929 if (sched_pressure
!= SCHED_PRESSURE_WEIGHTED
&& !sched_fusion
)
6930 gcc_assert (cost
< 0);
6931 if (memcmp (temp_state
, curr_state
, dfa_state_size
) != 0)
6932 cycle_issued_insns
++;
6936 asm_p
= (GET_CODE (PATTERN (insn
)) == ASM_INPUT
6937 || asm_noperands (PATTERN (insn
)) >= 0);
6939 if (targetm
.sched
.variable_issue
)
6941 targetm
.sched
.variable_issue (sched_dump
, sched_verbose
,
6942 insn
, ls
.can_issue_more
);
6943 /* A naked CLOBBER or USE generates no instruction, so do
6944 not count them against the issue rate. */
6945 else if (GET_CODE (PATTERN (insn
)) != USE
6946 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
6947 ls
.can_issue_more
--;
6948 advance
= schedule_insn (insn
);
6950 if (SHADOW_P (insn
))
6951 ls
.shadows_only_p
= true;
6953 /* After issuing an asm insn we should start a new cycle. */
6954 if (advance
== 0 && asm_p
)
6963 ls
.first_cycle_insn_p
= false;
6964 if (ready
.n_ready
> 0)
6965 prune_ready_list (temp_state
, false, ls
.shadows_only_p
,
6966 ls
.modulo_epilogue
);
6970 if (!must_backtrack
)
6971 for (i
= 0; i
< ready
.n_ready
; i
++)
6973 rtx_insn
*insn
= ready_element (&ready
, i
);
6974 if (INSN_EXACT_TICK (insn
) == clock_var
)
6976 must_backtrack
= true;
6981 if (must_backtrack
&& modulo_ii
> 0)
6983 if (modulo_backtracks_left
== 0)
6985 modulo_backtracks_left
--;
6987 while (must_backtrack
)
6989 struct haifa_saved_data
*failed
;
6990 rtx_insn
*failed_insn
;
6992 must_backtrack
= false;
6993 failed
= verify_shadows ();
6994 gcc_assert (failed
);
6996 failed_insn
= failed
->delay_pair
->i1
;
6997 /* Clear these queues. */
6998 perform_replacements_new_cycle ();
6999 toggle_cancelled_flags (false);
7000 unschedule_insns_until (failed_insn
);
7001 while (failed
!= backtrack_queue
)
7002 free_topmost_backtrack_point (true);
7003 restore_last_backtrack_point (&ls
);
7004 if (sched_verbose
>= 2)
7005 fprintf (sched_dump
, ";;\t\trewind to cycle %d\n", clock_var
);
7006 /* Delay by at least a cycle. This could cause additional
7008 queue_insn (failed_insn
, 1, "backtracked");
7012 if (ready
.n_ready
> 0)
7013 goto resume_after_backtrack
;
7016 if (clock_var
== 0 && ls
.first_cycle_insn_p
)
7022 ls
.first_cycle_insn_p
= true;
7024 if (ls
.modulo_epilogue
)
7027 if (!ls
.first_cycle_insn_p
|| advance
)
7028 advance_one_cycle ();
7029 perform_replacements_new_cycle ();
7032 /* Once again, debug insn suckiness: they can be on the ready list
7033 even if they have unresolved dependencies. To make our view
7034 of the world consistent, remove such "ready" insns. */
7035 restart_debug_insn_loop
:
7036 for (i
= ready
.n_ready
- 1; i
>= 0; i
--)
7040 x
= ready_element (&ready
, i
);
7041 if (DEPS_LIST_FIRST (INSN_HARD_BACK_DEPS (x
)) != NULL
7042 || DEPS_LIST_FIRST (INSN_SPEC_BACK_DEPS (x
)) != NULL
)
7044 ready_remove (&ready
, i
);
7045 goto restart_debug_insn_loop
;
7048 for (i
= ready
.n_ready
- 1; i
>= 0; i
--)
7052 x
= ready_element (&ready
, i
);
7053 resolve_dependencies (x
);
7055 for (i
= 0; i
<= max_insn_queue_index
; i
++)
7057 rtx_insn_list
*link
;
7058 while ((link
= insn_queue
[i
]) != NULL
)
7060 rtx_insn
*x
= link
->insn ();
7061 insn_queue
[i
] = link
->next ();
7062 QUEUE_INDEX (x
) = QUEUE_NOWHERE
;
7063 free_INSN_LIST_node (link
);
7064 resolve_dependencies (x
);
7070 undo_all_replacements ();
7075 fprintf (sched_dump
, ";;\tReady list (final): ");
7076 debug_ready_list (&ready
);
7079 if (modulo_ii
== 0 && current_sched_info
->queue_must_finish_empty
)
7080 /* Sanity check -- queue must be empty now. Meaningless if region has
7082 gcc_assert (!q_size
&& !ready
.n_ready
&& !ready
.n_debug
);
7083 else if (modulo_ii
== 0)
7085 /* We must maintain QUEUE_INDEX between blocks in region. */
7086 for (i
= ready
.n_ready
- 1; i
>= 0; i
--)
7090 x
= ready_element (&ready
, i
);
7091 QUEUE_INDEX (x
) = QUEUE_NOWHERE
;
7092 TODO_SPEC (x
) = HARD_DEP
;
7096 for (i
= 0; i
<= max_insn_queue_index
; i
++)
7098 rtx_insn_list
*link
;
7099 for (link
= insn_queue
[i
]; link
; link
= link
->next ())
7104 QUEUE_INDEX (x
) = QUEUE_NOWHERE
;
7105 TODO_SPEC (x
) = HARD_DEP
;
7107 free_INSN_LIST_list (&insn_queue
[i
]);
7111 if (sched_pressure
== SCHED_PRESSURE_MODEL
)
7112 model_end_schedule ();
7116 commit_schedule (prev_head
, tail
, target_bb
);
7118 fprintf (sched_dump
, ";; total time = %d\n", clock_var
);
7121 last_scheduled_insn
= tail
;
7123 scheduled_insns
.truncate (0);
7125 if (!current_sched_info
->queue_must_finish_empty
7126 || haifa_recovery_bb_recently_added_p
)
7128 /* INSN_TICK (minimum clock tick at which the insn becomes
7129 ready) may be not correct for the insn in the subsequent
7130 blocks of the region. We should use a correct value of
7131 `clock_var' or modify INSN_TICK. It is better to keep
7132 clock_var value equal to 0 at the start of a basic block.
7133 Therefore we modify INSN_TICK here. */
7134 fix_inter_tick (NEXT_INSN (prev_head
), last_scheduled_insn
);
7137 if (targetm
.sched
.finish
)
7139 targetm
.sched
.finish (sched_dump
, sched_verbose
);
7140 /* Target might have added some instructions to the scheduled block
7141 in its md_finish () hook. These new insns don't have any data
7142 initialized and to identify them we extend h_i_d so that they'll
7144 sched_extend_luids ();
7147 /* Update head/tail boundaries. */
7148 head
= NEXT_INSN (prev_head
);
7149 tail
= last_scheduled_insn
;
7153 fprintf (sched_dump
, ";; new head = %d\n;; new tail = %d\n",
7154 INSN_UID (head
), INSN_UID (tail
));
7156 if (sched_verbose
>= 2)
7158 dump_insn_stream (head
, tail
);
7159 print_rank_for_schedule_stats (";; TOTAL ", &rank_for_schedule_stats
,
7163 fprintf (sched_dump
, "\n");
7166 head
= restore_other_notes (head
, NULL
);
7168 current_sched_info
->head
= head
;
7169 current_sched_info
->tail
= tail
;
7171 free_backtrack_queue ();
7176 /* Set_priorities: compute priority of each insn in the block. */
7179 set_priorities (rtx_insn
*head
, rtx_insn
*tail
)
7183 int sched_max_insns_priority
=
7184 current_sched_info
->sched_max_insns_priority
;
7185 rtx_insn
*prev_head
;
7187 if (head
== tail
&& ! INSN_P (head
))
7192 prev_head
= PREV_INSN (head
);
7193 for (insn
= tail
; insn
!= prev_head
; insn
= PREV_INSN (insn
))
7199 (void) priority (insn
);
7201 gcc_assert (INSN_PRIORITY_KNOWN (insn
));
7203 sched_max_insns_priority
= MAX (sched_max_insns_priority
,
7204 INSN_PRIORITY (insn
));
7207 current_sched_info
->sched_max_insns_priority
= sched_max_insns_priority
;
7212 /* Set sched_dump and sched_verbose for the desired debugging output. */
7214 setup_sched_dump (void)
7216 sched_verbose
= sched_verbose_param
;
7217 sched_dump
= dump_file
;
7222 /* Allocate data for register pressure sensitive scheduling. */
7224 alloc_global_sched_pressure_data (void)
7226 if (sched_pressure
!= SCHED_PRESSURE_NONE
)
7228 int i
, max_regno
= max_reg_num ();
7230 if (sched_dump
!= NULL
)
7231 /* We need info about pseudos for rtl dumps about pseudo
7232 classes and costs. */
7233 regstat_init_n_sets_and_refs ();
7234 ira_set_pseudo_classes (true, sched_verbose
? sched_dump
: NULL
);
7235 sched_regno_pressure_class
7236 = (enum reg_class
*) xmalloc (max_regno
* sizeof (enum reg_class
));
7237 for (i
= 0; i
< max_regno
; i
++)
7238 sched_regno_pressure_class
[i
]
7239 = (i
< FIRST_PSEUDO_REGISTER
7240 ? ira_pressure_class_translate
[REGNO_REG_CLASS (i
)]
7241 : ira_pressure_class_translate
[reg_allocno_class (i
)]);
7242 curr_reg_live
= BITMAP_ALLOC (NULL
);
7243 if (sched_pressure
== SCHED_PRESSURE_WEIGHTED
)
7245 saved_reg_live
= BITMAP_ALLOC (NULL
);
7246 region_ref_regs
= BITMAP_ALLOC (NULL
);
7248 if (sched_pressure
== SCHED_PRESSURE_MODEL
)
7249 tmp_bitmap
= BITMAP_ALLOC (NULL
);
7251 /* Calculate number of CALL_SAVED_REGS and FIXED_REGS in register classes
7252 that we calculate register pressure for. */
7253 for (int c
= 0; c
< ira_pressure_classes_num
; ++c
)
7255 enum reg_class cl
= ira_pressure_classes
[c
];
7257 call_saved_regs_num
[cl
] = 0;
7258 fixed_regs_num
[cl
] = 0;
7260 for (int i
= 0; i
< ira_class_hard_regs_num
[cl
]; ++i
)
7261 if (!call_used_regs
[ira_class_hard_regs
[cl
][i
]])
7262 ++call_saved_regs_num
[cl
];
7263 else if (fixed_regs
[ira_class_hard_regs
[cl
][i
]])
7264 ++fixed_regs_num
[cl
];
7269 /* Free data for register pressure sensitive scheduling. Also called
7270 from schedule_region when stopping sched-pressure early. */
7272 free_global_sched_pressure_data (void)
7274 if (sched_pressure
!= SCHED_PRESSURE_NONE
)
7276 if (regstat_n_sets_and_refs
!= NULL
)
7277 regstat_free_n_sets_and_refs ();
7278 if (sched_pressure
== SCHED_PRESSURE_WEIGHTED
)
7280 BITMAP_FREE (region_ref_regs
);
7281 BITMAP_FREE (saved_reg_live
);
7283 if (sched_pressure
== SCHED_PRESSURE_MODEL
)
7284 BITMAP_FREE (tmp_bitmap
);
7285 BITMAP_FREE (curr_reg_live
);
7286 free (sched_regno_pressure_class
);
7290 /* Initialize some global state for the scheduler. This function works
7291 with the common data shared between all the schedulers. It is called
7292 from the scheduler specific initialization routine. */
7297 /* Disable speculative loads in their presence if cc0 defined. */
7299 flag_schedule_speculative_load
= 0;
7301 if (targetm
.sched
.dispatch (NULL
, IS_DISPATCH_ON
))
7302 targetm
.sched
.dispatch_do (NULL
, DISPATCH_INIT
);
7304 if (live_range_shrinkage_p
)
7305 sched_pressure
= SCHED_PRESSURE_WEIGHTED
;
7306 else if (flag_sched_pressure
7307 && !reload_completed
7308 && common_sched_info
->sched_pass_id
== SCHED_RGN_PASS
)
7309 sched_pressure
= ((enum sched_pressure_algorithm
)
7310 PARAM_VALUE (PARAM_SCHED_PRESSURE_ALGORITHM
));
7312 sched_pressure
= SCHED_PRESSURE_NONE
;
7314 if (sched_pressure
!= SCHED_PRESSURE_NONE
)
7315 ira_setup_eliminable_regset ();
7317 /* Initialize SPEC_INFO. */
7318 if (targetm
.sched
.set_sched_flags
)
7320 spec_info
= &spec_info_var
;
7321 targetm
.sched
.set_sched_flags (spec_info
);
7323 if (spec_info
->mask
!= 0)
7325 spec_info
->data_weakness_cutoff
=
7326 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF
) * MAX_DEP_WEAK
) / 100;
7327 spec_info
->control_weakness_cutoff
=
7328 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF
)
7329 * REG_BR_PROB_BASE
) / 100;
7332 /* So we won't read anything accidentally. */
7337 /* So we won't read anything accidentally. */
7340 /* Initialize issue_rate. */
7341 if (targetm
.sched
.issue_rate
)
7342 issue_rate
= targetm
.sched
.issue_rate ();
7346 if (targetm
.sched
.first_cycle_multipass_dfa_lookahead
7347 /* Don't use max_issue with reg_pressure scheduling. Multipass
7348 scheduling and reg_pressure scheduling undo each other's decisions. */
7349 && sched_pressure
== SCHED_PRESSURE_NONE
)
7350 dfa_lookahead
= targetm
.sched
.first_cycle_multipass_dfa_lookahead ();
7354 /* Set to "0" so that we recalculate. */
7355 max_lookahead_tries
= 0;
7357 if (targetm
.sched
.init_dfa_pre_cycle_insn
)
7358 targetm
.sched
.init_dfa_pre_cycle_insn ();
7360 if (targetm
.sched
.init_dfa_post_cycle_insn
)
7361 targetm
.sched
.init_dfa_post_cycle_insn ();
7364 dfa_state_size
= state_size ();
7366 init_alias_analysis ();
7369 df_set_flags (DF_LR_RUN_DCE
);
7370 df_note_add_problem ();
7372 /* More problems needed for interloop dep calculation in SMS. */
7373 if (common_sched_info
->sched_pass_id
== SCHED_SMS_PASS
)
7375 df_rd_add_problem ();
7376 df_chain_add_problem (DF_DU_CHAIN
+ DF_UD_CHAIN
);
7381 /* Do not run DCE after reload, as this can kill nops inserted
7383 if (reload_completed
)
7384 df_clear_flags (DF_LR_RUN_DCE
);
7386 regstat_compute_calls_crossed ();
7388 if (targetm
.sched
.init_global
)
7389 targetm
.sched
.init_global (sched_dump
, sched_verbose
, get_max_uid () + 1);
7391 alloc_global_sched_pressure_data ();
7393 curr_state
= xmalloc (dfa_state_size
);
7396 static void haifa_init_only_bb (basic_block
, basic_block
);
7398 /* Initialize data structures specific to the Haifa scheduler. */
7400 haifa_sched_init (void)
7402 setup_sched_dump ();
7405 scheduled_insns
.create (0);
7407 if (spec_info
!= NULL
)
7409 sched_deps_info
->use_deps_list
= 1;
7410 sched_deps_info
->generate_spec_deps
= 1;
7413 /* Initialize luids, dependency caches, target and h_i_d for the
7418 auto_vec
<basic_block
> bbs (n_basic_blocks_for_fn (cfun
));
7420 FOR_EACH_BB_FN (bb
, cfun
)
7421 bbs
.quick_push (bb
);
7422 sched_init_luids (bbs
);
7423 sched_deps_init (true);
7424 sched_extend_target ();
7425 haifa_init_h_i_d (bbs
);
7428 sched_init_only_bb
= haifa_init_only_bb
;
7429 sched_split_block
= sched_split_block_1
;
7430 sched_create_empty_bb
= sched_create_empty_bb_1
;
7431 haifa_recovery_bb_ever_added_p
= false;
7433 nr_begin_data
= nr_begin_control
= nr_be_in_data
= nr_be_in_control
= 0;
7434 before_recovery
= 0;
7440 /* Finish work with the data specific to the Haifa scheduler. */
7442 haifa_sched_finish (void)
7444 sched_create_empty_bb
= NULL
;
7445 sched_split_block
= NULL
;
7446 sched_init_only_bb
= NULL
;
7448 if (spec_info
&& spec_info
->dump
)
7450 char c
= reload_completed
? 'a' : 'b';
7452 fprintf (spec_info
->dump
,
7453 ";; %s:\n", current_function_name ());
7455 fprintf (spec_info
->dump
,
7456 ";; Procedure %cr-begin-data-spec motions == %d\n",
7458 fprintf (spec_info
->dump
,
7459 ";; Procedure %cr-be-in-data-spec motions == %d\n",
7461 fprintf (spec_info
->dump
,
7462 ";; Procedure %cr-begin-control-spec motions == %d\n",
7463 c
, nr_begin_control
);
7464 fprintf (spec_info
->dump
,
7465 ";; Procedure %cr-be-in-control-spec motions == %d\n",
7466 c
, nr_be_in_control
);
7469 scheduled_insns
.release ();
7471 /* Finalize h_i_d, dependency caches, and luids for the whole
7472 function. Target will be finalized in md_global_finish (). */
7473 sched_deps_finish ();
7474 sched_finish_luids ();
7475 current_sched_info
= NULL
;
7480 /* Free global data used during insn scheduling. This function works with
7481 the common data shared between the schedulers. */
7486 haifa_finish_h_i_d ();
7487 free_global_sched_pressure_data ();
7490 if (targetm
.sched
.finish_global
)
7491 targetm
.sched
.finish_global (sched_dump
, sched_verbose
);
7493 end_alias_analysis ();
7495 regstat_free_calls_crossed ();
7500 /* Free all delay_pair structures that were recorded. */
7502 free_delay_pairs (void)
7506 delay_htab
->empty ();
7507 delay_htab_i2
->empty ();
7511 /* Fix INSN_TICKs of the instructions in the current block as well as
7512 INSN_TICKs of their dependents.
7513 HEAD and TAIL are the begin and the end of the current scheduled block. */
7515 fix_inter_tick (rtx_insn
*head
, rtx_insn
*tail
)
7517 /* Set of instructions with corrected INSN_TICK. */
7518 auto_bitmap processed
;
7519 /* ??? It is doubtful if we should assume that cycle advance happens on
7520 basic block boundaries. Basically insns that are unconditionally ready
7521 on the start of the block are more preferable then those which have
7522 a one cycle dependency over insn from the previous block. */
7523 int next_clock
= clock_var
+ 1;
7525 /* Iterates over scheduled instructions and fix their INSN_TICKs and
7526 INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
7527 across different blocks. */
7528 for (tail
= NEXT_INSN (tail
); head
!= tail
; head
= NEXT_INSN (head
))
7533 sd_iterator_def sd_it
;
7536 tick
= INSN_TICK (head
);
7537 gcc_assert (tick
>= MIN_TICK
);
7539 /* Fix INSN_TICK of instruction from just scheduled block. */
7540 if (bitmap_set_bit (processed
, INSN_LUID (head
)))
7544 if (tick
< MIN_TICK
)
7547 INSN_TICK (head
) = tick
;
7550 if (DEBUG_INSN_P (head
))
7553 FOR_EACH_DEP (head
, SD_LIST_RES_FORW
, sd_it
, dep
)
7557 next
= DEP_CON (dep
);
7558 tick
= INSN_TICK (next
);
7560 if (tick
!= INVALID_TICK
7561 /* If NEXT has its INSN_TICK calculated, fix it.
7562 If not - it will be properly calculated from
7563 scratch later in fix_tick_ready. */
7564 && bitmap_set_bit (processed
, INSN_LUID (next
)))
7568 if (tick
< MIN_TICK
)
7571 if (tick
> INTER_TICK (next
))
7572 INTER_TICK (next
) = tick
;
7574 tick
= INTER_TICK (next
);
7576 INSN_TICK (next
) = tick
;
7583 /* Check if NEXT is ready to be added to the ready or queue list.
7584 If "yes", add it to the proper list.
7586 -1 - is not ready yet,
7587 0 - added to the ready list,
7588 0 < N - queued for N cycles. */
7590 try_ready (rtx_insn
*next
)
7592 ds_t old_ts
, new_ts
;
7594 old_ts
= TODO_SPEC (next
);
7596 gcc_assert (!(old_ts
& ~(SPECULATIVE
| HARD_DEP
| DEP_CONTROL
| DEP_POSTPONED
))
7597 && (old_ts
== HARD_DEP
7598 || old_ts
== DEP_POSTPONED
7599 || (old_ts
& SPECULATIVE
)
7600 || old_ts
== DEP_CONTROL
));
7602 new_ts
= recompute_todo_spec (next
, false);
7604 if (new_ts
& (HARD_DEP
| DEP_POSTPONED
))
7605 gcc_assert (new_ts
== old_ts
7606 && QUEUE_INDEX (next
) == QUEUE_NOWHERE
);
7607 else if (current_sched_info
->new_ready
)
7608 new_ts
= current_sched_info
->new_ready (next
, new_ts
);
7610 /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
7611 have its original pattern or changed (speculative) one. This is due
7612 to changing ebb in region scheduling.
7613 * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
7614 has speculative pattern.
7616 We can't assert (!(new_ts & HARD_DEP) || new_ts == old_ts) here because
7617 control-speculative NEXT could have been discarded by sched-rgn.c
7618 (the same case as when discarded by can_schedule_ready_p ()). */
7620 if ((new_ts
& SPECULATIVE
)
7621 /* If (old_ts == new_ts), then (old_ts & SPECULATIVE) and we don't
7622 need to change anything. */
7623 && new_ts
!= old_ts
)
7628 gcc_assert ((new_ts
& SPECULATIVE
) && !(new_ts
& ~SPECULATIVE
));
7630 res
= haifa_speculate_insn (next
, new_ts
, &new_pat
);
7635 /* It would be nice to change DEP_STATUS of all dependences,
7636 which have ((DEP_STATUS & SPECULATIVE) == new_ts) to HARD_DEP,
7637 so we won't reanalyze anything. */
7642 /* We follow the rule, that every speculative insn
7643 has non-null ORIG_PAT. */
7644 if (!ORIG_PAT (next
))
7645 ORIG_PAT (next
) = PATTERN (next
);
7649 if (!ORIG_PAT (next
))
7650 /* If we gonna to overwrite the original pattern of insn,
7652 ORIG_PAT (next
) = PATTERN (next
);
7654 res
= haifa_change_pattern (next
, new_pat
);
7663 /* We need to restore pattern only if (new_ts == 0), because otherwise it is
7664 either correct (new_ts & SPECULATIVE),
7665 or we simply don't care (new_ts & HARD_DEP). */
7667 gcc_assert (!ORIG_PAT (next
)
7668 || !IS_SPECULATION_BRANCHY_CHECK_P (next
));
7670 TODO_SPEC (next
) = new_ts
;
7672 if (new_ts
& (HARD_DEP
| DEP_POSTPONED
))
7674 /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
7675 control-speculative NEXT could have been discarded by sched-rgn.c
7676 (the same case as when discarded by can_schedule_ready_p ()). */
7677 /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
7679 change_queue_index (next
, QUEUE_NOWHERE
);
7683 else if (!(new_ts
& BEGIN_SPEC
)
7684 && ORIG_PAT (next
) && PREDICATED_PAT (next
) == NULL_RTX
7685 && !IS_SPECULATION_CHECK_P (next
))
7686 /* We should change pattern of every previously speculative
7687 instruction - and we determine if NEXT was speculative by using
7688 ORIG_PAT field. Except one case - speculation checks have ORIG_PAT
7689 pat too, so skip them. */
7691 bool success
= haifa_change_pattern (next
, ORIG_PAT (next
));
7692 gcc_assert (success
);
7693 ORIG_PAT (next
) = 0;
7696 if (sched_verbose
>= 2)
7698 fprintf (sched_dump
, ";;\t\tdependencies resolved: insn %s",
7699 (*current_sched_info
->print_insn
) (next
, 0));
7701 if (spec_info
&& spec_info
->dump
)
7703 if (new_ts
& BEGIN_DATA
)
7704 fprintf (spec_info
->dump
, "; data-spec;");
7705 if (new_ts
& BEGIN_CONTROL
)
7706 fprintf (spec_info
->dump
, "; control-spec;");
7707 if (new_ts
& BE_IN_CONTROL
)
7708 fprintf (spec_info
->dump
, "; in-control-spec;");
7710 if (TODO_SPEC (next
) & DEP_CONTROL
)
7711 fprintf (sched_dump
, " predicated");
7712 fprintf (sched_dump
, "\n");
7715 adjust_priority (next
);
7717 return fix_tick_ready (next
);
7720 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
7722 fix_tick_ready (rtx_insn
*next
)
7726 if (!DEBUG_INSN_P (next
) && !sd_lists_empty_p (next
, SD_LIST_RES_BACK
))
7729 sd_iterator_def sd_it
;
7732 tick
= INSN_TICK (next
);
7733 /* if tick is not equal to INVALID_TICK, then update
7734 INSN_TICK of NEXT with the most recent resolved dependence
7735 cost. Otherwise, recalculate from scratch. */
7736 full_p
= (tick
== INVALID_TICK
);
7738 FOR_EACH_DEP (next
, SD_LIST_RES_BACK
, sd_it
, dep
)
7740 rtx_insn
*pro
= DEP_PRO (dep
);
7743 gcc_assert (INSN_TICK (pro
) >= MIN_TICK
);
7745 tick1
= INSN_TICK (pro
) + dep_cost (dep
);
7756 INSN_TICK (next
) = tick
;
7758 delay
= tick
- clock_var
;
7759 if (delay
<= 0 || sched_pressure
!= SCHED_PRESSURE_NONE
|| sched_fusion
)
7760 delay
= QUEUE_READY
;
7762 change_queue_index (next
, delay
);
7767 /* Move NEXT to the proper queue list with (DELAY >= 1),
7768 or add it to the ready list (DELAY == QUEUE_READY),
7769 or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
7771 change_queue_index (rtx_insn
*next
, int delay
)
7773 int i
= QUEUE_INDEX (next
);
7775 gcc_assert (QUEUE_NOWHERE
<= delay
&& delay
<= max_insn_queue_index
7777 gcc_assert (i
!= QUEUE_SCHEDULED
);
7779 if ((delay
> 0 && NEXT_Q_AFTER (q_ptr
, delay
) == i
)
7780 || (delay
< 0 && delay
== i
))
7781 /* We have nothing to do. */
7784 /* Remove NEXT from wherever it is now. */
7785 if (i
== QUEUE_READY
)
7786 ready_remove_insn (next
);
7788 queue_remove (next
);
7790 /* Add it to the proper place. */
7791 if (delay
== QUEUE_READY
)
7792 ready_add (readyp
, next
, false);
7793 else if (delay
>= 1)
7794 queue_insn (next
, delay
, "change queue index");
7796 if (sched_verbose
>= 2)
7798 fprintf (sched_dump
, ";;\t\ttick updated: insn %s",
7799 (*current_sched_info
->print_insn
) (next
, 0));
7801 if (delay
== QUEUE_READY
)
7802 fprintf (sched_dump
, " into ready\n");
7803 else if (delay
>= 1)
7804 fprintf (sched_dump
, " into queue with cost=%d\n", delay
);
7806 fprintf (sched_dump
, " removed from ready or queue lists\n");
7810 static int sched_ready_n_insns
= -1;
7812 /* Initialize per region data structures. */
7814 sched_extend_ready_list (int new_sched_ready_n_insns
)
7818 if (sched_ready_n_insns
== -1)
7819 /* At the first call we need to initialize one more choice_stack
7823 sched_ready_n_insns
= 0;
7824 scheduled_insns
.reserve (new_sched_ready_n_insns
);
7827 i
= sched_ready_n_insns
+ 1;
7829 ready
.veclen
= new_sched_ready_n_insns
+ issue_rate
;
7830 ready
.vec
= XRESIZEVEC (rtx_insn
*, ready
.vec
, ready
.veclen
);
7832 gcc_assert (new_sched_ready_n_insns
>= sched_ready_n_insns
);
7834 ready_try
= (signed char *) xrecalloc (ready_try
, new_sched_ready_n_insns
,
7835 sched_ready_n_insns
,
7836 sizeof (*ready_try
));
7838 /* We allocate +1 element to save initial state in the choice_stack[0]
7840 choice_stack
= XRESIZEVEC (struct choice_entry
, choice_stack
,
7841 new_sched_ready_n_insns
+ 1);
7843 for (; i
<= new_sched_ready_n_insns
; i
++)
7845 choice_stack
[i
].state
= xmalloc (dfa_state_size
);
7847 if (targetm
.sched
.first_cycle_multipass_init
)
7848 targetm
.sched
.first_cycle_multipass_init (&(choice_stack
[i
]
7852 sched_ready_n_insns
= new_sched_ready_n_insns
;
7855 /* Free per region data structures. */
7857 sched_finish_ready_list (void)
7868 for (i
= 0; i
<= sched_ready_n_insns
; i
++)
7870 if (targetm
.sched
.first_cycle_multipass_fini
)
7871 targetm
.sched
.first_cycle_multipass_fini (&(choice_stack
[i
]
7874 free (choice_stack
[i
].state
);
7876 free (choice_stack
);
7877 choice_stack
= NULL
;
7879 sched_ready_n_insns
= -1;
7883 haifa_luid_for_non_insn (rtx x
)
7885 gcc_assert (NOTE_P (x
) || LABEL_P (x
));
7890 /* Generates recovery code for INSN. */
7892 generate_recovery_code (rtx_insn
*insn
)
7894 if (TODO_SPEC (insn
) & BEGIN_SPEC
)
7895 begin_speculative_block (insn
);
7897 /* Here we have insn with no dependencies to
7898 instructions other then CHECK_SPEC ones. */
7900 if (TODO_SPEC (insn
) & BE_IN_SPEC
)
7901 add_to_speculative_block (insn
);
7905 Tries to add speculative dependencies of type FS between instructions
7906 in deps_list L and TWIN. */
7908 process_insn_forw_deps_be_in_spec (rtx_insn
*insn
, rtx_insn
*twin
, ds_t fs
)
7910 sd_iterator_def sd_it
;
7913 FOR_EACH_DEP (insn
, SD_LIST_FORW
, sd_it
, dep
)
7918 consumer
= DEP_CON (dep
);
7920 ds
= DEP_STATUS (dep
);
7922 if (/* If we want to create speculative dep. */
7924 /* And we can do that because this is a true dep. */
7925 && (ds
& DEP_TYPES
) == DEP_TRUE
)
7927 gcc_assert (!(ds
& BE_IN_SPEC
));
7929 if (/* If this dep can be overcome with 'begin speculation'. */
7931 /* Then we have a choice: keep the dep 'begin speculative'
7932 or transform it into 'be in speculative'. */
7934 if (/* In try_ready we assert that if insn once became ready
7935 it can be removed from the ready (or queue) list only
7936 due to backend decision. Hence we can't let the
7937 probability of the speculative dep to decrease. */
7938 ds_weak (ds
) <= ds_weak (fs
))
7942 new_ds
= (ds
& ~BEGIN_SPEC
) | fs
;
7944 if (/* consumer can 'be in speculative'. */
7945 sched_insn_is_legitimate_for_speculation_p (consumer
,
7947 /* Transform it to be in speculative. */
7952 /* Mark the dep as 'be in speculative'. */
7957 dep_def _new_dep
, *new_dep
= &_new_dep
;
7959 init_dep_1 (new_dep
, twin
, consumer
, DEP_TYPE (dep
), ds
);
7960 sd_add_dep (new_dep
, false);
7965 /* Generates recovery code for BEGIN speculative INSN. */
7967 begin_speculative_block (rtx_insn
*insn
)
7969 if (TODO_SPEC (insn
) & BEGIN_DATA
)
7971 if (TODO_SPEC (insn
) & BEGIN_CONTROL
)
7974 create_check_block_twin (insn
, false);
7976 TODO_SPEC (insn
) &= ~BEGIN_SPEC
;
7979 static void haifa_init_insn (rtx_insn
*);
7981 /* Generates recovery code for BE_IN speculative INSN. */
7983 add_to_speculative_block (rtx_insn
*insn
)
7986 sd_iterator_def sd_it
;
7988 auto_vec
<rtx_insn
*, 10> twins
;
7990 ts
= TODO_SPEC (insn
);
7991 gcc_assert (!(ts
& ~BE_IN_SPEC
));
7993 if (ts
& BE_IN_DATA
)
7995 if (ts
& BE_IN_CONTROL
)
7998 TODO_SPEC (insn
) &= ~BE_IN_SPEC
;
7999 gcc_assert (!TODO_SPEC (insn
));
8001 DONE_SPEC (insn
) |= ts
;
8003 /* First we convert all simple checks to branchy. */
8004 for (sd_it
= sd_iterator_start (insn
, SD_LIST_SPEC_BACK
);
8005 sd_iterator_cond (&sd_it
, &dep
);)
8007 rtx_insn
*check
= DEP_PRO (dep
);
8009 if (IS_SPECULATION_SIMPLE_CHECK_P (check
))
8011 create_check_block_twin (check
, true);
8013 /* Restart search. */
8014 sd_it
= sd_iterator_start (insn
, SD_LIST_SPEC_BACK
);
8017 /* Continue search. */
8018 sd_iterator_next (&sd_it
);
8021 auto_vec
<rtx_insn
*> priorities_roots
;
8022 clear_priorities (insn
, &priorities_roots
);
8026 rtx_insn
*check
, *twin
;
8029 /* Get the first backward dependency of INSN. */
8030 sd_it
= sd_iterator_start (insn
, SD_LIST_SPEC_BACK
);
8031 if (!sd_iterator_cond (&sd_it
, &dep
))
8032 /* INSN has no backward dependencies left. */
8035 gcc_assert ((DEP_STATUS (dep
) & BEGIN_SPEC
) == 0
8036 && (DEP_STATUS (dep
) & BE_IN_SPEC
) != 0
8037 && (DEP_STATUS (dep
) & DEP_TYPES
) == DEP_TRUE
);
8039 check
= DEP_PRO (dep
);
8041 gcc_assert (!IS_SPECULATION_CHECK_P (check
) && !ORIG_PAT (check
)
8042 && QUEUE_INDEX (check
) == QUEUE_NOWHERE
);
8044 rec
= BLOCK_FOR_INSN (check
);
8046 twin
= emit_insn_before (copy_insn (PATTERN (insn
)), BB_END (rec
));
8047 haifa_init_insn (twin
);
8049 sd_copy_back_deps (twin
, insn
, true);
8051 if (sched_verbose
&& spec_info
->dump
)
8052 /* INSN_BB (insn) isn't determined for twin insns yet.
8053 So we can't use current_sched_info->print_insn. */
8054 fprintf (spec_info
->dump
, ";;\t\tGenerated twin insn : %d/rec%d\n",
8055 INSN_UID (twin
), rec
->index
);
8057 twins
.safe_push (twin
);
8059 /* Add dependences between TWIN and all appropriate
8060 instructions from REC. */
8061 FOR_EACH_DEP (insn
, SD_LIST_SPEC_BACK
, sd_it
, dep
)
8063 rtx_insn
*pro
= DEP_PRO (dep
);
8065 gcc_assert (DEP_TYPE (dep
) == REG_DEP_TRUE
);
8067 /* INSN might have dependencies from the instructions from
8068 several recovery blocks. At this iteration we process those
8069 producers that reside in REC. */
8070 if (BLOCK_FOR_INSN (pro
) == rec
)
8072 dep_def _new_dep
, *new_dep
= &_new_dep
;
8074 init_dep (new_dep
, pro
, twin
, REG_DEP_TRUE
);
8075 sd_add_dep (new_dep
, false);
8079 process_insn_forw_deps_be_in_spec (insn
, twin
, ts
);
8081 /* Remove all dependencies between INSN and insns in REC. */
8082 for (sd_it
= sd_iterator_start (insn
, SD_LIST_SPEC_BACK
);
8083 sd_iterator_cond (&sd_it
, &dep
);)
8085 rtx_insn
*pro
= DEP_PRO (dep
);
8087 if (BLOCK_FOR_INSN (pro
) == rec
)
8088 sd_delete_dep (sd_it
);
8090 sd_iterator_next (&sd_it
);
8094 /* We couldn't have added the dependencies between INSN and TWINS earlier
8095 because that would make TWINS appear in the INSN_BACK_DEPS (INSN). */
8098 FOR_EACH_VEC_ELT_REVERSE (twins
, i
, twin
)
8100 dep_def _new_dep
, *new_dep
= &_new_dep
;
8102 init_dep (new_dep
, insn
, twin
, REG_DEP_OUTPUT
);
8103 sd_add_dep (new_dep
, false);
8106 calc_priorities (priorities_roots
);
8109 /* Extends and fills with zeros (only the new part) array pointed to by P. */
8111 xrecalloc (void *p
, size_t new_nmemb
, size_t old_nmemb
, size_t size
)
8113 gcc_assert (new_nmemb
>= old_nmemb
);
8114 p
= XRESIZEVAR (void, p
, new_nmemb
* size
);
8115 memset (((char *) p
) + old_nmemb
* size
, 0, (new_nmemb
- old_nmemb
) * size
);
8120 Find fallthru edge from PRED. */
8122 find_fallthru_edge_from (basic_block pred
)
8127 succ
= pred
->next_bb
;
8128 gcc_assert (succ
->prev_bb
== pred
);
8130 if (EDGE_COUNT (pred
->succs
) <= EDGE_COUNT (succ
->preds
))
8132 e
= find_fallthru_edge (pred
->succs
);
8136 gcc_assert (e
->dest
== succ
);
8142 e
= find_fallthru_edge (succ
->preds
);
8146 gcc_assert (e
->src
== pred
);
8154 /* Extend per basic block data structures. */
8156 sched_extend_bb (void)
8158 /* The following is done to keep current_sched_info->next_tail non null. */
8159 rtx_insn
*end
= BB_END (EXIT_BLOCK_PTR_FOR_FN (cfun
)->prev_bb
);
8160 rtx_insn
*insn
= DEBUG_INSN_P (end
) ? prev_nondebug_insn (end
) : end
;
8161 if (NEXT_INSN (end
) == 0
8164 /* Don't emit a NOTE if it would end up before a BARRIER. */
8165 && !BARRIER_P (NEXT_INSN (end
))))
8167 rtx_note
*note
= emit_note_after (NOTE_INSN_DELETED
, end
);
8168 /* Make note appear outside BB. */
8169 set_block_for_insn (note
, NULL
);
8170 BB_END (EXIT_BLOCK_PTR_FOR_FN (cfun
)->prev_bb
) = end
;
8174 /* Init per basic block data structures. */
8176 sched_init_bbs (void)
8181 /* Initialize BEFORE_RECOVERY variable. */
8183 init_before_recovery (basic_block
*before_recovery_ptr
)
8188 last
= EXIT_BLOCK_PTR_FOR_FN (cfun
)->prev_bb
;
8189 e
= find_fallthru_edge_from (last
);
8193 /* We create two basic blocks:
8194 1. Single instruction block is inserted right after E->SRC
8196 2. Empty block right before EXIT_BLOCK.
8197 Between these two blocks recovery blocks will be emitted. */
8199 basic_block single
, empty
;
8201 /* If the fallthrough edge to exit we've found is from the block we've
8202 created before, don't do anything more. */
8203 if (last
== after_recovery
)
8206 adding_bb_to_current_region_p
= false;
8208 single
= sched_create_empty_bb (last
);
8209 empty
= sched_create_empty_bb (single
);
8211 /* Add new blocks to the root loop. */
8212 if (current_loops
!= NULL
)
8214 add_bb_to_loop (single
, (*current_loops
->larray
)[0]);
8215 add_bb_to_loop (empty
, (*current_loops
->larray
)[0]);
8218 single
->count
= last
->count
;
8219 empty
->count
= last
->count
;
8220 single
->frequency
= last
->frequency
;
8221 empty
->frequency
= last
->frequency
;
8222 BB_COPY_PARTITION (single
, last
);
8223 BB_COPY_PARTITION (empty
, last
);
8225 redirect_edge_succ (e
, single
);
8226 make_single_succ_edge (single
, empty
, 0);
8227 make_single_succ_edge (empty
, EXIT_BLOCK_PTR_FOR_FN (cfun
),
8230 rtx_code_label
*label
= block_label (empty
);
8231 rtx_jump_insn
*x
= emit_jump_insn_after (targetm
.gen_jump (label
),
8233 JUMP_LABEL (x
) = label
;
8234 LABEL_NUSES (label
)++;
8235 haifa_init_insn (x
);
8237 emit_barrier_after (x
);
8239 sched_init_only_bb (empty
, NULL
);
8240 sched_init_only_bb (single
, NULL
);
8243 adding_bb_to_current_region_p
= true;
8244 before_recovery
= single
;
8245 after_recovery
= empty
;
8247 if (before_recovery_ptr
)
8248 *before_recovery_ptr
= before_recovery
;
8250 if (sched_verbose
>= 2 && spec_info
->dump
)
8251 fprintf (spec_info
->dump
,
8252 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
8253 last
->index
, single
->index
, empty
->index
);
8256 before_recovery
= last
;
8259 /* Returns new recovery block. */
8261 sched_create_recovery_block (basic_block
*before_recovery_ptr
)
8266 haifa_recovery_bb_recently_added_p
= true;
8267 haifa_recovery_bb_ever_added_p
= true;
8269 init_before_recovery (before_recovery_ptr
);
8271 barrier
= get_last_bb_insn (before_recovery
);
8272 gcc_assert (BARRIER_P (barrier
));
8274 rtx_insn
*label
= emit_label_after (gen_label_rtx (), barrier
);
8276 rec
= create_basic_block (label
, label
, before_recovery
);
8278 /* A recovery block always ends with an unconditional jump. */
8279 emit_barrier_after (BB_END (rec
));
8281 if (BB_PARTITION (before_recovery
) != BB_UNPARTITIONED
)
8282 BB_SET_PARTITION (rec
, BB_COLD_PARTITION
);
8284 if (sched_verbose
&& spec_info
->dump
)
8285 fprintf (spec_info
->dump
, ";;\t\tGenerated recovery block rec%d\n",
8291 /* Create edges: FIRST_BB -> REC; FIRST_BB -> SECOND_BB; REC -> SECOND_BB
8292 and emit necessary jumps. */
8294 sched_create_recovery_edges (basic_block first_bb
, basic_block rec
,
8295 basic_block second_bb
)
8299 /* This is fixing of incoming edge. */
8300 /* ??? Which other flags should be specified? */
8301 if (BB_PARTITION (first_bb
) != BB_PARTITION (rec
))
8302 /* Partition type is the same, if it is "unpartitioned". */
8303 edge_flags
= EDGE_CROSSING
;
8307 edge e2
= single_succ_edge (first_bb
);
8308 edge e
= make_edge (first_bb
, rec
, edge_flags
);
8310 /* TODO: The actual probability can be determined and is computed as
8311 'todo_spec' variable in create_check_block_twin and
8312 in sel-sched.c `check_ds' in create_speculation_check. */
8313 e
->probability
= profile_probability::very_unlikely ();
8314 e
->count
= first_bb
->count
.apply_probability (e
->probability
);
8315 rec
->count
= e
->count
;
8316 rec
->frequency
= EDGE_FREQUENCY (e
);
8317 e2
->probability
= e
->probability
.invert ();
8318 e2
->count
= first_bb
->count
- e2
->count
;
8320 rtx_code_label
*label
= block_label (second_bb
);
8321 rtx_jump_insn
*jump
= emit_jump_insn_after (targetm
.gen_jump (label
),
8323 JUMP_LABEL (jump
) = label
;
8324 LABEL_NUSES (label
)++;
8326 if (BB_PARTITION (second_bb
) != BB_PARTITION (rec
))
8327 /* Partition type is the same, if it is "unpartitioned". */
8329 /* Rewritten from cfgrtl.c. */
8330 if (crtl
->has_bb_partition
&& targetm_common
.have_named_sections
)
8332 /* We don't need the same note for the check because
8333 any_condjump_p (check) == true. */
8334 CROSSING_JUMP_P (jump
) = 1;
8336 edge_flags
= EDGE_CROSSING
;
8341 make_single_succ_edge (rec
, second_bb
, edge_flags
);
8342 if (dom_info_available_p (CDI_DOMINATORS
))
8343 set_immediate_dominator (CDI_DOMINATORS
, rec
, first_bb
);
8346 /* This function creates recovery code for INSN. If MUTATE_P is nonzero,
8347 INSN is a simple check, that should be converted to branchy one. */
8349 create_check_block_twin (rtx_insn
*insn
, bool mutate_p
)
8352 rtx_insn
*label
, *check
, *twin
;
8355 sd_iterator_def sd_it
;
8357 dep_def _new_dep
, *new_dep
= &_new_dep
;
8360 gcc_assert (ORIG_PAT (insn
) != NULL_RTX
);
8363 todo_spec
= TODO_SPEC (insn
);
8366 gcc_assert (IS_SPECULATION_SIMPLE_CHECK_P (insn
)
8367 && (TODO_SPEC (insn
) & SPECULATIVE
) == 0);
8369 todo_spec
= CHECK_SPEC (insn
);
8372 todo_spec
&= SPECULATIVE
;
8374 /* Create recovery block. */
8375 if (mutate_p
|| targetm
.sched
.needs_block_p (todo_spec
))
8377 rec
= sched_create_recovery_block (NULL
);
8378 label
= BB_HEAD (rec
);
8382 rec
= EXIT_BLOCK_PTR_FOR_FN (cfun
);
8387 check_pat
= targetm
.sched
.gen_spec_check (insn
, label
, todo_spec
);
8389 if (rec
!= EXIT_BLOCK_PTR_FOR_FN (cfun
))
8391 /* To have mem_reg alive at the beginning of second_bb,
8392 we emit check BEFORE insn, so insn after splitting
8393 insn will be at the beginning of second_bb, which will
8394 provide us with the correct life information. */
8395 check
= emit_jump_insn_before (check_pat
, insn
);
8396 JUMP_LABEL (check
) = label
;
8397 LABEL_NUSES (label
)++;
8400 check
= emit_insn_before (check_pat
, insn
);
8402 /* Extend data structures. */
8403 haifa_init_insn (check
);
8405 /* CHECK is being added to current region. Extend ready list. */
8406 gcc_assert (sched_ready_n_insns
!= -1);
8407 sched_extend_ready_list (sched_ready_n_insns
+ 1);
8409 if (current_sched_info
->add_remove_insn
)
8410 current_sched_info
->add_remove_insn (insn
, 0);
8412 RECOVERY_BLOCK (check
) = rec
;
8414 if (sched_verbose
&& spec_info
->dump
)
8415 fprintf (spec_info
->dump
, ";;\t\tGenerated check insn : %s\n",
8416 (*current_sched_info
->print_insn
) (check
, 0));
8418 gcc_assert (ORIG_PAT (insn
));
8420 /* Initialize TWIN (twin is a duplicate of original instruction
8421 in the recovery block). */
8422 if (rec
!= EXIT_BLOCK_PTR_FOR_FN (cfun
))
8424 sd_iterator_def sd_it
;
8427 FOR_EACH_DEP (insn
, SD_LIST_RES_BACK
, sd_it
, dep
)
8428 if ((DEP_STATUS (dep
) & DEP_OUTPUT
) != 0)
8430 struct _dep _dep2
, *dep2
= &_dep2
;
8432 init_dep (dep2
, DEP_PRO (dep
), check
, REG_DEP_TRUE
);
8434 sd_add_dep (dep2
, true);
8437 twin
= emit_insn_after (ORIG_PAT (insn
), BB_END (rec
));
8438 haifa_init_insn (twin
);
8440 if (sched_verbose
&& spec_info
->dump
)
8441 /* INSN_BB (insn) isn't determined for twin insns yet.
8442 So we can't use current_sched_info->print_insn. */
8443 fprintf (spec_info
->dump
, ";;\t\tGenerated twin insn : %d/rec%d\n",
8444 INSN_UID (twin
), rec
->index
);
8448 ORIG_PAT (check
) = ORIG_PAT (insn
);
8449 HAS_INTERNAL_DEP (check
) = 1;
8451 /* ??? We probably should change all OUTPUT dependencies to
8455 /* Copy all resolved back dependencies of INSN to TWIN. This will
8456 provide correct value for INSN_TICK (TWIN). */
8457 sd_copy_back_deps (twin
, insn
, true);
8459 if (rec
!= EXIT_BLOCK_PTR_FOR_FN (cfun
))
8460 /* In case of branchy check, fix CFG. */
8462 basic_block first_bb
, second_bb
;
8465 first_bb
= BLOCK_FOR_INSN (check
);
8466 second_bb
= sched_split_block (first_bb
, check
);
8468 sched_create_recovery_edges (first_bb
, rec
, second_bb
);
8470 sched_init_only_bb (second_bb
, first_bb
);
8471 sched_init_only_bb (rec
, EXIT_BLOCK_PTR_FOR_FN (cfun
));
8473 jump
= BB_END (rec
);
8474 haifa_init_insn (jump
);
8477 /* Move backward dependences from INSN to CHECK and
8478 move forward dependences from INSN to TWIN. */
8480 /* First, create dependencies between INSN's producers and CHECK & TWIN. */
8481 FOR_EACH_DEP (insn
, SD_LIST_BACK
, sd_it
, dep
)
8483 rtx_insn
*pro
= DEP_PRO (dep
);
8486 /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
8487 check --TRUE--> producer ??? or ANTI ???
8488 twin --TRUE--> producer
8489 twin --ANTI--> check
8491 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
8492 check --ANTI--> producer
8493 twin --ANTI--> producer
8494 twin --ANTI--> check
8496 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
8497 check ~~TRUE~~> producer
8498 twin ~~TRUE~~> producer
8499 twin --ANTI--> check */
8501 ds
= DEP_STATUS (dep
);
8503 if (ds
& BEGIN_SPEC
)
8505 gcc_assert (!mutate_p
);
8509 init_dep_1 (new_dep
, pro
, check
, DEP_TYPE (dep
), ds
);
8510 sd_add_dep (new_dep
, false);
8512 if (rec
!= EXIT_BLOCK_PTR_FOR_FN (cfun
))
8514 DEP_CON (new_dep
) = twin
;
8515 sd_add_dep (new_dep
, false);
8519 /* Second, remove backward dependencies of INSN. */
8520 for (sd_it
= sd_iterator_start (insn
, SD_LIST_SPEC_BACK
);
8521 sd_iterator_cond (&sd_it
, &dep
);)
8523 if ((DEP_STATUS (dep
) & BEGIN_SPEC
)
8525 /* We can delete this dep because we overcome it with
8526 BEGIN_SPECULATION. */
8527 sd_delete_dep (sd_it
);
8529 sd_iterator_next (&sd_it
);
8532 /* Future Speculations. Determine what BE_IN speculations will be like. */
8535 /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
8538 gcc_assert (!DONE_SPEC (insn
));
8542 ds_t ts
= TODO_SPEC (insn
);
8544 DONE_SPEC (insn
) = ts
& BEGIN_SPEC
;
8545 CHECK_SPEC (check
) = ts
& BEGIN_SPEC
;
8547 /* Luckiness of future speculations solely depends upon initial
8548 BEGIN speculation. */
8549 if (ts
& BEGIN_DATA
)
8550 fs
= set_dep_weak (fs
, BE_IN_DATA
, get_dep_weak (ts
, BEGIN_DATA
));
8551 if (ts
& BEGIN_CONTROL
)
8552 fs
= set_dep_weak (fs
, BE_IN_CONTROL
,
8553 get_dep_weak (ts
, BEGIN_CONTROL
));
8556 CHECK_SPEC (check
) = CHECK_SPEC (insn
);
8558 /* Future speculations: call the helper. */
8559 process_insn_forw_deps_be_in_spec (insn
, twin
, fs
);
8561 if (rec
!= EXIT_BLOCK_PTR_FOR_FN (cfun
))
8563 /* Which types of dependencies should we use here is,
8564 generally, machine-dependent question... But, for now,
8569 init_dep (new_dep
, insn
, check
, REG_DEP_TRUE
);
8570 sd_add_dep (new_dep
, false);
8572 init_dep (new_dep
, insn
, twin
, REG_DEP_OUTPUT
);
8573 sd_add_dep (new_dep
, false);
8577 if (spec_info
->dump
)
8578 fprintf (spec_info
->dump
, ";;\t\tRemoved simple check : %s\n",
8579 (*current_sched_info
->print_insn
) (insn
, 0));
8581 /* Remove all dependencies of the INSN. */
8583 sd_it
= sd_iterator_start (insn
, (SD_LIST_FORW
8585 | SD_LIST_RES_BACK
));
8586 while (sd_iterator_cond (&sd_it
, &dep
))
8587 sd_delete_dep (sd_it
);
8590 /* If former check (INSN) already was moved to the ready (or queue)
8591 list, add new check (CHECK) there too. */
8592 if (QUEUE_INDEX (insn
) != QUEUE_NOWHERE
)
8595 /* Remove old check from instruction stream and free its
8597 sched_remove_insn (insn
);
8600 init_dep (new_dep
, check
, twin
, REG_DEP_ANTI
);
8601 sd_add_dep (new_dep
, false);
8605 init_dep_1 (new_dep
, insn
, check
, REG_DEP_TRUE
, DEP_TRUE
| DEP_OUTPUT
);
8606 sd_add_dep (new_dep
, false);
8610 /* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
8611 because it'll be done later in add_to_speculative_block. */
8613 auto_vec
<rtx_insn
*> priorities_roots
;
8615 clear_priorities (twin
, &priorities_roots
);
8616 calc_priorities (priorities_roots
);
8620 /* Removes dependency between instructions in the recovery block REC
8621 and usual region instructions. It keeps inner dependences so it
8622 won't be necessary to recompute them. */
8624 fix_recovery_deps (basic_block rec
)
8626 rtx_insn
*note
, *insn
, *jump
;
8627 auto_vec
<rtx_insn
*, 10> ready_list
;
8628 auto_bitmap in_ready
;
8630 /* NOTE - a basic block note. */
8631 note
= NEXT_INSN (BB_HEAD (rec
));
8632 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note
));
8633 insn
= BB_END (rec
);
8634 gcc_assert (JUMP_P (insn
));
8635 insn
= PREV_INSN (insn
);
8639 sd_iterator_def sd_it
;
8642 for (sd_it
= sd_iterator_start (insn
, SD_LIST_FORW
);
8643 sd_iterator_cond (&sd_it
, &dep
);)
8645 rtx_insn
*consumer
= DEP_CON (dep
);
8647 if (BLOCK_FOR_INSN (consumer
) != rec
)
8649 sd_delete_dep (sd_it
);
8651 if (bitmap_set_bit (in_ready
, INSN_LUID (consumer
)))
8652 ready_list
.safe_push (consumer
);
8656 gcc_assert ((DEP_STATUS (dep
) & DEP_TYPES
) == DEP_TRUE
);
8658 sd_iterator_next (&sd_it
);
8662 insn
= PREV_INSN (insn
);
8664 while (insn
!= note
);
8666 /* Try to add instructions to the ready or queue list. */
8669 FOR_EACH_VEC_ELT_REVERSE (ready_list
, i
, temp
)
8672 /* Fixing jump's dependences. */
8673 insn
= BB_HEAD (rec
);
8674 jump
= BB_END (rec
);
8676 gcc_assert (LABEL_P (insn
));
8677 insn
= NEXT_INSN (insn
);
8679 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn
));
8680 add_jump_dependencies (insn
, jump
);
8683 /* Change pattern of INSN to NEW_PAT. Invalidate cached haifa
8684 instruction data. */
8686 haifa_change_pattern (rtx_insn
*insn
, rtx new_pat
)
8690 t
= validate_change (insn
, &PATTERN (insn
), new_pat
, 0);
8694 update_insn_after_change (insn
);
8698 /* -1 - can't speculate,
8699 0 - for speculation with REQUEST mode it is OK to use
8700 current instruction pattern,
8701 1 - need to change pattern for *NEW_PAT to be speculative. */
8703 sched_speculate_insn (rtx_insn
*insn
, ds_t request
, rtx
*new_pat
)
8705 gcc_assert (current_sched_info
->flags
& DO_SPECULATION
8706 && (request
& SPECULATIVE
)
8707 && sched_insn_is_legitimate_for_speculation_p (insn
, request
));
8709 if ((request
& spec_info
->mask
) != request
)
8712 if (request
& BE_IN_SPEC
8713 && !(request
& BEGIN_SPEC
))
8716 return targetm
.sched
.speculate_insn (insn
, request
, new_pat
);
8720 haifa_speculate_insn (rtx_insn
*insn
, ds_t request
, rtx
*new_pat
)
8722 gcc_assert (sched_deps_info
->generate_spec_deps
8723 && !IS_SPECULATION_CHECK_P (insn
));
8725 if (HAS_INTERNAL_DEP (insn
)
8726 || SCHED_GROUP_P (insn
))
8729 return sched_speculate_insn (insn
, request
, new_pat
);
8732 /* Print some information about block BB, which starts with HEAD and
8733 ends with TAIL, before scheduling it.
8734 I is zero, if scheduler is about to start with the fresh ebb. */
8736 dump_new_block_header (int i
, basic_block bb
, rtx_insn
*head
, rtx_insn
*tail
)
8739 fprintf (sched_dump
,
8740 ";; ======================================================\n");
8742 fprintf (sched_dump
,
8743 ";; =====================ADVANCING TO=====================\n");
8744 fprintf (sched_dump
,
8745 ";; -- basic block %d from %d to %d -- %s reload\n",
8746 bb
->index
, INSN_UID (head
), INSN_UID (tail
),
8747 (reload_completed
? "after" : "before"));
8748 fprintf (sched_dump
,
8749 ";; ======================================================\n");
8750 fprintf (sched_dump
, "\n");
8753 /* Unlink basic block notes and labels and saves them, so they
8754 can be easily restored. We unlink basic block notes in EBB to
8755 provide back-compatibility with the previous code, as target backends
8756 assume, that there'll be only instructions between
8757 current_sched_info->{head and tail}. We restore these notes as soon
8759 FIRST (LAST) is the first (last) basic block in the ebb.
8760 NB: In usual case (FIRST == LAST) nothing is really done. */
8762 unlink_bb_notes (basic_block first
, basic_block last
)
8764 /* We DON'T unlink basic block notes of the first block in the ebb. */
8768 bb_header
= XNEWVEC (rtx_insn
*, last_basic_block_for_fn (cfun
));
8770 /* Make a sentinel. */
8771 if (last
->next_bb
!= EXIT_BLOCK_PTR_FOR_FN (cfun
))
8772 bb_header
[last
->next_bb
->index
] = 0;
8774 first
= first
->next_bb
;
8777 rtx_insn
*prev
, *label
, *note
, *next
;
8779 label
= BB_HEAD (last
);
8780 if (LABEL_P (label
))
8781 note
= NEXT_INSN (label
);
8784 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note
));
8786 prev
= PREV_INSN (label
);
8787 next
= NEXT_INSN (note
);
8788 gcc_assert (prev
&& next
);
8790 SET_NEXT_INSN (prev
) = next
;
8791 SET_PREV_INSN (next
) = prev
;
8793 bb_header
[last
->index
] = label
;
8798 last
= last
->prev_bb
;
8803 /* Restore basic block notes.
8804 FIRST is the first basic block in the ebb. */
8806 restore_bb_notes (basic_block first
)
8811 /* We DON'T unlink basic block notes of the first block in the ebb. */
8812 first
= first
->next_bb
;
8813 /* Remember: FIRST is actually a second basic block in the ebb. */
8815 while (first
!= EXIT_BLOCK_PTR_FOR_FN (cfun
)
8816 && bb_header
[first
->index
])
8818 rtx_insn
*prev
, *label
, *note
, *next
;
8820 label
= bb_header
[first
->index
];
8821 prev
= PREV_INSN (label
);
8822 next
= NEXT_INSN (prev
);
8824 if (LABEL_P (label
))
8825 note
= NEXT_INSN (label
);
8828 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note
));
8830 bb_header
[first
->index
] = 0;
8832 SET_NEXT_INSN (prev
) = label
;
8833 SET_NEXT_INSN (note
) = next
;
8834 SET_PREV_INSN (next
) = note
;
8836 first
= first
->next_bb
;
8844 Fix CFG after both in- and inter-block movement of
8845 control_flow_insn_p JUMP. */
8847 fix_jump_move (rtx_insn
*jump
)
8849 basic_block bb
, jump_bb
, jump_bb_next
;
8851 bb
= BLOCK_FOR_INSN (PREV_INSN (jump
));
8852 jump_bb
= BLOCK_FOR_INSN (jump
);
8853 jump_bb_next
= jump_bb
->next_bb
;
8855 gcc_assert (common_sched_info
->sched_pass_id
== SCHED_EBB_PASS
8856 || IS_SPECULATION_BRANCHY_CHECK_P (jump
));
8858 if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next
)))
8859 /* if jump_bb_next is not empty. */
8860 BB_END (jump_bb
) = BB_END (jump_bb_next
);
8862 if (BB_END (bb
) != PREV_INSN (jump
))
8863 /* Then there are instruction after jump that should be placed
8865 BB_END (jump_bb_next
) = BB_END (bb
);
8867 /* Otherwise jump_bb_next is empty. */
8868 BB_END (jump_bb_next
) = NEXT_INSN (BB_HEAD (jump_bb_next
));
8870 /* To make assertion in move_insn happy. */
8871 BB_END (bb
) = PREV_INSN (jump
);
8873 update_bb_for_insn (jump_bb_next
);
8876 /* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
8878 move_block_after_check (rtx_insn
*jump
)
8880 basic_block bb
, jump_bb
, jump_bb_next
;
8881 vec
<edge
, va_gc
> *t
;
8883 bb
= BLOCK_FOR_INSN (PREV_INSN (jump
));
8884 jump_bb
= BLOCK_FOR_INSN (jump
);
8885 jump_bb_next
= jump_bb
->next_bb
;
8887 update_bb_for_insn (jump_bb
);
8889 gcc_assert (IS_SPECULATION_CHECK_P (jump
)
8890 || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next
)));
8892 unlink_block (jump_bb_next
);
8893 link_block (jump_bb_next
, bb
);
8897 move_succs (&(jump_bb
->succs
), bb
);
8898 move_succs (&(jump_bb_next
->succs
), jump_bb
);
8899 move_succs (&t
, jump_bb_next
);
8901 df_mark_solutions_dirty ();
8903 common_sched_info
->fix_recovery_cfg
8904 (bb
->index
, jump_bb
->index
, jump_bb_next
->index
);
8907 /* Helper function for move_block_after_check.
8908 This functions attaches edge vector pointed to by SUCCSP to
8911 move_succs (vec
<edge
, va_gc
> **succsp
, basic_block to
)
8916 gcc_assert (to
->succs
== 0);
8918 to
->succs
= *succsp
;
8920 FOR_EACH_EDGE (e
, ei
, to
->succs
)
8926 /* Remove INSN from the instruction stream.
8927 INSN should have any dependencies. */
8929 sched_remove_insn (rtx_insn
*insn
)
8931 sd_finish_insn (insn
);
8933 change_queue_index (insn
, QUEUE_NOWHERE
);
8934 current_sched_info
->add_remove_insn (insn
, 1);
8938 /* Clear priorities of all instructions, that are forward dependent on INSN.
8939 Store in vector pointed to by ROOTS_PTR insns on which priority () should
8940 be invoked to initialize all cleared priorities. */
8942 clear_priorities (rtx_insn
*insn
, rtx_vec_t
*roots_ptr
)
8944 sd_iterator_def sd_it
;
8946 bool insn_is_root_p
= true;
8948 gcc_assert (QUEUE_INDEX (insn
) != QUEUE_SCHEDULED
);
8950 FOR_EACH_DEP (insn
, SD_LIST_BACK
, sd_it
, dep
)
8952 rtx_insn
*pro
= DEP_PRO (dep
);
8954 if (INSN_PRIORITY_STATUS (pro
) >= 0
8955 && QUEUE_INDEX (insn
) != QUEUE_SCHEDULED
)
8957 /* If DEP doesn't contribute to priority then INSN itself should
8958 be added to priority roots. */
8959 if (contributes_to_priority_p (dep
))
8960 insn_is_root_p
= false;
8962 INSN_PRIORITY_STATUS (pro
) = -1;
8963 clear_priorities (pro
, roots_ptr
);
8968 roots_ptr
->safe_push (insn
);
8971 /* Recompute priorities of instructions, whose priorities might have been
8972 changed. ROOTS is a vector of instructions whose priority computation will
8973 trigger initialization of all cleared priorities. */
8975 calc_priorities (rtx_vec_t roots
)
8980 FOR_EACH_VEC_ELT (roots
, i
, insn
)
8985 /* Add dependences between JUMP and other instructions in the recovery
8986 block. INSN is the first insn the recovery block. */
8988 add_jump_dependencies (rtx_insn
*insn
, rtx_insn
*jump
)
8992 insn
= NEXT_INSN (insn
);
8996 if (dep_list_size (insn
, SD_LIST_FORW
) == 0)
8998 dep_def _new_dep
, *new_dep
= &_new_dep
;
9000 init_dep (new_dep
, insn
, jump
, REG_DEP_ANTI
);
9001 sd_add_dep (new_dep
, false);
9006 gcc_assert (!sd_lists_empty_p (jump
, SD_LIST_BACK
));
9009 /* Extend data structures for logical insn UID. */
9011 sched_extend_luids (void)
9013 int new_luids_max_uid
= get_max_uid () + 1;
9015 sched_luids
.safe_grow_cleared (new_luids_max_uid
);
9018 /* Initialize LUID for INSN. */
9020 sched_init_insn_luid (rtx_insn
*insn
)
9022 int i
= INSN_P (insn
) ? 1 : common_sched_info
->luid_for_non_insn (insn
);
9027 luid
= sched_max_luid
;
9028 sched_max_luid
+= i
;
9033 SET_INSN_LUID (insn
, luid
);
9036 /* Initialize luids for BBS.
9037 The hook common_sched_info->luid_for_non_insn () is used to determine
9038 if notes, labels, etc. need luids. */
9040 sched_init_luids (bb_vec_t bbs
)
9045 sched_extend_luids ();
9046 FOR_EACH_VEC_ELT (bbs
, i
, bb
)
9050 FOR_BB_INSNS (bb
, insn
)
9051 sched_init_insn_luid (insn
);
9057 sched_finish_luids (void)
9059 sched_luids
.release ();
9063 /* Return logical uid of INSN. Helpful while debugging. */
9065 insn_luid (rtx_insn
*insn
)
9067 return INSN_LUID (insn
);
9070 /* Extend per insn data in the target. */
9072 sched_extend_target (void)
9074 if (targetm
.sched
.h_i_d_extended
)
9075 targetm
.sched
.h_i_d_extended ();
9078 /* Extend global scheduler structures (those, that live across calls to
9079 schedule_block) to include information about just emitted INSN. */
9083 int reserve
= (get_max_uid () + 1 - h_i_d
.length ());
9085 && ! h_i_d
.space (reserve
))
9087 h_i_d
.safe_grow_cleared (3 * get_max_uid () / 2);
9088 sched_extend_target ();
9092 /* Initialize h_i_d entry of the INSN with default values.
9093 Values, that are not explicitly initialized here, hold zero. */
9095 init_h_i_d (rtx_insn
*insn
)
9097 if (INSN_LUID (insn
) > 0)
9099 INSN_COST (insn
) = -1;
9100 QUEUE_INDEX (insn
) = QUEUE_NOWHERE
;
9101 INSN_TICK (insn
) = INVALID_TICK
;
9102 INSN_EXACT_TICK (insn
) = INVALID_TICK
;
9103 INTER_TICK (insn
) = INVALID_TICK
;
9104 TODO_SPEC (insn
) = HARD_DEP
;
9105 INSN_AUTOPREF_MULTIPASS_DATA (insn
)[0].status
9106 = AUTOPREF_MULTIPASS_DATA_UNINITIALIZED
;
9107 INSN_AUTOPREF_MULTIPASS_DATA (insn
)[1].status
9108 = AUTOPREF_MULTIPASS_DATA_UNINITIALIZED
;
9112 /* Initialize haifa_insn_data for BBS. */
9114 haifa_init_h_i_d (bb_vec_t bbs
)
9120 FOR_EACH_VEC_ELT (bbs
, i
, bb
)
9124 FOR_BB_INSNS (bb
, insn
)
9129 /* Finalize haifa_insn_data. */
9131 haifa_finish_h_i_d (void)
9134 haifa_insn_data_t data
;
9135 reg_use_data
*use
, *next_use
;
9136 reg_set_data
*set
, *next_set
;
9138 FOR_EACH_VEC_ELT (h_i_d
, i
, data
)
9140 free (data
->max_reg_pressure
);
9141 free (data
->reg_pressure
);
9142 for (use
= data
->reg_use_list
; use
!= NULL
; use
= next_use
)
9144 next_use
= use
->next_insn_use
;
9147 for (set
= data
->reg_set_list
; set
!= NULL
; set
= next_set
)
9149 next_set
= set
->next_insn_set
;
9157 /* Init data for the new insn INSN. */
9159 haifa_init_insn (rtx_insn
*insn
)
9161 gcc_assert (insn
!= NULL
);
9163 sched_extend_luids ();
9164 sched_init_insn_luid (insn
);
9165 sched_extend_target ();
9166 sched_deps_init (false);
9170 if (adding_bb_to_current_region_p
)
9172 sd_init_insn (insn
);
9174 /* Extend dependency caches by one element. */
9175 extend_dependency_caches (1, false);
9177 if (sched_pressure
!= SCHED_PRESSURE_NONE
)
9178 init_insn_reg_pressure_info (insn
);
9181 /* Init data for the new basic block BB which comes after AFTER. */
9183 haifa_init_only_bb (basic_block bb
, basic_block after
)
9185 gcc_assert (bb
!= NULL
);
9189 if (common_sched_info
->add_block
)
9190 /* This changes only data structures of the front-end. */
9191 common_sched_info
->add_block (bb
, after
);
9194 /* A generic version of sched_split_block (). */
9196 sched_split_block_1 (basic_block first_bb
, rtx after
)
9200 e
= split_block (first_bb
, after
);
9201 gcc_assert (e
->src
== first_bb
);
9203 /* sched_split_block emits note if *check == BB_END. Probably it
9204 is better to rip that note off. */
9209 /* A generic version of sched_create_empty_bb (). */
9211 sched_create_empty_bb_1 (basic_block after
)
9213 return create_empty_bb (after
);
9216 /* Insert PAT as an INSN into the schedule and update the necessary data
9217 structures to account for it. */
9219 sched_emit_insn (rtx pat
)
9221 rtx_insn
*insn
= emit_insn_before (pat
, first_nonscheduled_insn ());
9222 haifa_init_insn (insn
);
9224 if (current_sched_info
->add_remove_insn
)
9225 current_sched_info
->add_remove_insn (insn
, 0);
9227 (*current_sched_info
->begin_schedule_ready
) (insn
);
9228 scheduled_insns
.safe_push (insn
);
9230 last_scheduled_insn
= insn
;
9234 /* This function returns a candidate satisfying dispatch constraints from
9238 ready_remove_first_dispatch (struct ready_list
*ready
)
9241 rtx_insn
*insn
= ready_element (ready
, 0);
9243 if (ready
->n_ready
== 1
9245 || INSN_CODE (insn
) < 0
9246 || !active_insn_p (insn
)
9247 || targetm
.sched
.dispatch (insn
, FITS_DISPATCH_WINDOW
))
9248 return ready_remove_first (ready
);
9250 for (i
= 1; i
< ready
->n_ready
; i
++)
9252 insn
= ready_element (ready
, i
);
9255 || INSN_CODE (insn
) < 0
9256 || !active_insn_p (insn
))
9259 if (targetm
.sched
.dispatch (insn
, FITS_DISPATCH_WINDOW
))
9261 /* Return ith element of ready. */
9262 insn
= ready_remove (ready
, i
);
9267 if (targetm
.sched
.dispatch (NULL
, DISPATCH_VIOLATION
))
9268 return ready_remove_first (ready
);
9270 for (i
= 1; i
< ready
->n_ready
; i
++)
9272 insn
= ready_element (ready
, i
);
9275 || INSN_CODE (insn
) < 0
9276 || !active_insn_p (insn
))
9279 /* Return i-th element of ready. */
9280 if (targetm
.sched
.dispatch (insn
, IS_CMP
))
9281 return ready_remove (ready
, i
);
9284 return ready_remove_first (ready
);
9287 /* Get number of ready insn in the ready list. */
9290 number_in_ready (void)
9292 return ready
.n_ready
;
9295 /* Get number of ready's in the ready list. */
9298 get_ready_element (int i
)
9300 return ready_element (&ready
, i
);
9303 #endif /* INSN_SCHEDULING */